text stringlengths 957 885k |
|---|
import datetime
import json
import numpy as np
import pandas as pd
import requests
import xarray as xr
from utils import divide_chunks, get_indices_not_done, \
get_site_codes, append_to_csv_column_wise, load_s3_zarr_store,\
convert_df_to_dataset
def get_all_streamflow_data(output_file, sites_file, huc2=None,
num_sites_per_chunk=5, start_date="1970-01-01",
end_date='2019-01-01', time_scale='H',
output_format='zarr', num_site_chunks_write=6,
s3=False):
"""
gets all streamflow data for a date range for a given huc2. Calls are
chunked by station
:param output_file: [str] path to the csv file or zarr store where the data
will be stored
:param sites_file: [str] path to file that contains the nwis site
information
:param huc2: [str] zero-padded huc 2 (e.g., "02")
:param num_sites_per_chunk: [int] the number of sites that will be pulled
at in each web service call
:param start_date: [str] the start date of when you want the data for
(e.g., "1980-01-01")
:param end_date: [str] the end date of when you want the data for
(e.g., "1990-01-01")
:param time_scale: [str] Pandas like time string for the time scale at which
the data will be aggregated (e.g., 'H' for hour or 'D' for daily)
:param output_format: [str] the format of the output file. 'csv' or 'zarr'
:param num_site_chunks_write:
:param S3:
:return: None
"""
product = get_product_from_time_scale(time_scale)
site_codes = get_site_codes(sites_file, huc2)
not_done_sites = get_indices_not_done(output_file, site_codes, 'site_code',
output_format, is_column=False,
s3=s3)
site_codes_chunked = divide_chunks(not_done_sites, num_sites_per_chunk)
# loop through site_code_chunks
chunk_dfs = []
i = 0
for site_chunk in site_codes_chunked:
last_chunk = False
if site_chunk[-1] == not_done_sites[-1]:
last_chunk = True
streamflow_df_sites = None
# catch if there is a problem on the server retrieving the data
try:
streamflow_df_sites = get_streamflow_data(site_chunk,
start_date,
end_date,
product,
time_scale)
except json.decoder.JSONDecodeError:
continue
if streamflow_df_sites is not None:
chunk_dfs.append(streamflow_df_sites)
# add the number of stations for which we got data
i += streamflow_df_sites.shape[1]
if not i % (num_site_chunks_write * num_sites_per_chunk) or \
last_chunk:
print('writing out', flush=True)
write_out_chunks(chunk_dfs, output_file, output_format)
chunk_dfs = []
def write_out_chunks(chunks_dfs, out_file, out_format):
all_chunks_df = pd.concat(chunks_dfs, axis=1)
# write the data out to the output file
if out_format == 'zarr':
zarr_store = load_s3_zarr_store(out_file)
append_to_zarr(all_chunks_df, zarr_store)
elif out_format == 'csv':
append_to_csv_column_wise(all_chunks_df, out_file)
else:
raise ValueError("output_format should be 'csv' or 'zarr'")
def get_product_from_time_scale(time_scale):
"""
get the the USGS nwis product that is appropriate for the time scale
:param time_scale: str - Pandas like time string for the time scale at which
the data will be aggregated (e.g., 'H' for hour or 'D' for daily)
:return:
"""
iv_scales = ['15T', 'T', 'H']
dv_scale = ['D']
if time_scale in iv_scales:
return 'iv'
elif time_scale in dv_scale:
return 'dv'
else:
raise ValueError("time scale must be '15T', 'T', 'H', or 'D'")
def append_to_zarr(streamflow_df, output_zarr):
# chunks
time_chunk = len(streamflow_df.index)
site_code_chunk = len(streamflow_df.columns)
ds = convert_df_to_dataset(streamflow_df, 'site_code', 'datetime',
'streamflow', {'datetime': time_chunk,
'site_code': site_code_chunk})
ds.to_zarr(output_zarr, append_dim='site_code', mode='a')
def get_streamflow_data(sites, start_date, end_date, product, time_scale):
response = call_nwis_service(sites, start_date, end_date, product)
data = json.loads(response.text)
streamflow_df = nwis_json_to_df(data, start_date, end_date,
time_scale)
return streamflow_df
def call_nwis_service(sites, start_date, end_date, product):
"""
gets the data for a list of sites from a start date to an end date
"""
base_url = "http://waterservices.usgs.gov/nwis/{}/?format=json&sites={}&" \
"startDT={}&endDT={}¶meterCd=00060&siteStatus=all"
url = base_url.format(product, ",".join(sites), start_date, end_date)
request_start_time = datetime.datetime.now()
print(f"starting request for sites {sites} at {request_start_time}, "
f"for period {start_date} to {end_date}", flush=True)
r = None
while not r:
try:
r = requests.get(url)
except:
print('there was some problem. trying again', flush=True)
request_end_time = datetime.datetime.now()
request_time = request_end_time - request_start_time
print(f"took {request_time} to get data for huc {sites}", flush=True)
return r
def format_dates(datetime_txt):
# convert datetime
datetime_ser = pd.to_datetime(datetime_txt, utc=True)
# remove the time zone info since we are now in utc
datetime_ser = datetime_ser.dt.tz_localize(None)
return datetime_ser
def resample_reindex(df, start_date, end_date, time_scale):
# resample to get mean at correct time scale
df_resamp = df.resample(time_scale).mean()
# get new index
date_index = pd.date_range(start=start_date, end=end_date,
freq=time_scale)
# make so the index goes from start to end regardless of actual data
# presence
df_reindexed = df_resamp.reindex(date_index)
return df_reindexed
def delete_non_approved_data(df):
"""
disregard the data that do not have the "approved" tag in the qualifier
column
:param df: dataframe with qualifiers
:return: dataframe with just the values that are approved
"""
# first I have to get the actual qualifiers. originally, these are lists
# in a column in the df (e.g., [A, [91]]
# todo: what does the number mean (i.e., [91])
qualifiers_list = df['qualifiers'].to_list()
qualifiers = [q[0] for q in qualifiers_list]
# check qualifier's list
if qualifiers[0] not in ['A', 'P']:
print("we have a weird qualifier. it is ", qualifiers[0])
qualifier_ser = pd.Series(qualifiers, index=df.index)
approved_indices = (qualifier_ser == 'A')
approved_df = df[approved_indices]
return approved_df
def format_df(ts_df, site_code, start_date, end_date, time_scale,
only_approved=True):
"""
format unformatted dataframe. this includes setting a datetime index,
resampling, reindexing to the start and end date,
renaming the column to the site code, removing the qualifier column and
optionally screening out any data points that are not approved
:param ts_df: (dataframe) unformatted time series dataframe from nwis json
data
:param site_code: (str) the site_code of the site (taken from json data)
:param start_date: (str) start date of call
:param end_date: (str) end date of call
:param time_scale: (str) time scale in which you want to resample and at
which your new index will be. should be a code (i.e., 'H' for hourly)
:param only_approved: (bool) whether or not to screen out non-approved data
points
:return: formatted dataframe
"""
# convert datetime
ts_df['dateTime'] = format_dates(ts_df['dateTime'])
ts_df.set_index('dateTime', inplace=True)
if only_approved:
# get rid of any points that were not approved
ts_df = delete_non_approved_data(ts_df)
# delete qualifiers column
del ts_df['qualifiers']
# rename the column from 'value' to the site_code
ts_df = ts_df.rename(columns={'value': site_code})
# make the values numeric
ts_df[site_code] = pd.to_numeric(ts_df[site_code])
ts_df = resample_reindex(ts_df, start_date, end_date, time_scale)
return ts_df
def nwis_json_to_df(json_data, start_date, end_date, time_scale='H'):
"""
combine time series in json produced by nwis web from multiple sites into
one pandas df. the df is also resampled to a time scale and reindexed so
the dataframes are from the start date to the end date regardless of
whether there is data available or not
"""
df_collection = []
time_series = json_data['value']['timeSeries']
for ts in time_series:
site_code = ts['sourceInfo']['siteCode'][0]['value']
print('processing the data for site ', site_code, flush=True)
# this is where the actual data is
ts_data = ts['values'][0]['value']
if ts_data:
ts_df = pd.DataFrame(ts_data)
ts_df_formatted = format_df(ts_df, site_code, start_date, end_date,
time_scale)
df_collection.append(ts_df_formatted)
if df_collection:
df_combined = pd.concat(df_collection, axis=1)
df_combined = df_combined.replace(-999999, np.nan)
return df_combined
else:
return None
|
import weakref
from . import species, rxdmath, rxd, node, initializer
import numpy
import copy
from .generalizedReaction import GeneralizedReaction, ref_list_with_mult, get_scheme_rate1_rate2_regions_custom_dynamics_mass_action
from .rxdException import RxDException
class Reaction(GeneralizedReaction):
def __init__(self, *args, **kwargs):
"""Specify a reaction to be added to the system.
Examples:
For 2 * H + O > H2O in a mass action reaction at rate k:
r = rxd.Reaction(2 * H + O, H2O, k)
To constrain the reaction to a specified list of regions,
say to just the extracellular space (ext) and the cytosol (cyt),
use the regions keyword, e.g.
r = rxd.Reaction(2 * H + O, H2O, k, regions=[ext, cyt])
For a bi-directional reaction, specify a backward reaction rate.
e.g. if kf is the forward rate and kb is the backward rate, then:
r = rxd.Reaction(2 * H + O, H2O, kf, kb)
To use dynamics other than mass-action, add that mass_action=False
flag and put the full formula instead of a mass-action rate for
kf (and kb). E.g. Michaelis-Menten degradation
r = rxd.Reaction(
dimer, decomposed, dimer / (k + diamer), mass_action=False
)
"""
# parse the arguments
scheme, rate1, rate2, regions, custom_dynamics, mass_action = (
get_scheme_rate1_rate2_regions_custom_dynamics_mass_action(args, kwargs)
)
# TODO: verify schemes use weakrefs
self._scheme = scheme
if custom_dynamics is not None and mass_action is not None:
raise RxDException('Cannot specify both custom_dynamics and mass_action.')
elif custom_dynamics is None and mass_action is None:
custom_dynamics = False
elif custom_dynamics is None and mass_action is not None:
custom_dynamics = not mass_action
self._custom_dynamics = custom_dynamics
if scheme._dir == '<':
rate_f, rate_b = 0, rate1
if rate2 is not None:
raise RxDException('unidirectional Reaction can have only one rate constant')
elif scheme._dir == '<>':
rate_f, rate_b = rate1, rate2
if rate2 is None:
raise RxDException('bidirectional Reaction needs two rate constants')
elif scheme._dir == '>':
rate_f, rate_b = rate1, 0
if rate2 is not None:
raise RxDException('unidirectional Reaction can have only one rate constant')
else:
raise RxDException('unknown reaction scheme direction: %r' % scheme._dir)
self._original_rate_f = rate_f
self._original_rate_b = rate_b
self._voltage_dependent = any([ar._voltage_dependent for ar in [scheme, rate_f, rate_b] if hasattr(ar,'_voltage_dependent')])
self._membrane_flux = False
self._dir = scheme._dir
self._custom_dynamics = custom_dynamics
self._trans_membrane = False
if not hasattr(regions, '__len__'):
regions = [regions]
self._regions = regions
rxd._register_reaction(self)
# initialize self if the rest of rxd is already initialized
if initializer.is_initialized():
self._do_init()
def _do_init(self):
self._update_rates()
self._update_indices()
def _update_rates(self):
lhs = self._scheme._lhs._items
rhs = self._scheme._rhs._items
if self._dir == '<':
# TODO: remove this limitation (probably means doing with rate_b what done with rate_f and making sure _sources and _dests are correct
raise RxDException('pure reverse reaction currently not supported; reformulate as a forward reaction')
rate_f = rxdmath._ensure_arithmeticed(self._original_rate_f)
rate_b = rxdmath._ensure_arithmeticed(self._original_rate_b)
if not self._custom_dynamics:
for k, v in zip(list(lhs.keys()), list(lhs.values())):
if v == 1:
rate_f *= k
else:
rate_f *= k ** v
if self._dir == '<>':
for k, v in zip(list(rhs.keys()), list(rhs.values())):
if v == 1:
rate_b *= k
else:
rate_b *= k ** v
rate = rate_f - rate_b
self._rate_arithmeticed = rate
self._sources = ref_list_with_mult(lhs)
self._dests = ref_list_with_mult(rhs)
#Check to if it is an extracellular reaction
from . import region, species
#Was an ECS region was passed to to the constructor
ecs_region = [r for r in self._regions if isinstance(r, region.Extracellular)]
#Are any of of the sources or destinations passed to the constructor extracellular
if not ecs_region:
ecs_species = [s() for s in self._sources + self._dests if isinstance(s(),species.SpeciesOnExtracellular) or isinstance(s(),species._ExtracellularSpecies)]
if ecs_species:
ecs_region = [ecs_species[0]._region] if isinstance(ecs_species[0],species._ExtracellularSpecies) else [ecs_species[0]._extracellular()._region]
#Are any of of the sources or destinations passed to the constructor defined on the ECS
if not ecs_region:
sps = [s() for s in self._sources + self._dests if isinstance(s(),species.Species)]
# only have an ecs reaction if all the species are defined on the ecs
if sps and all(s._extracellular_instances for s in sps):
# take an intersection of all the extracellular regions
ecs_region = list(sps[0]._extracellular_instances.keys())
for s in sps:
ecs_region = [r for r in s._extracellular_instances.keys() if r in ecs_region]
if ecs_region:
self._rate_ecs, self._involved_species_ecs = rxdmath._compile(rate, ecs_region)
# if a region is specified -- use it
if self._regions and self._regions != [None]:
self._react_regions = self._regions
else:
# else find the common regions shared by all sources and destinations
self._react_regions = []
regs = []
for sptr in self._sources + self._dests:
s = sptr() if isinstance(sptr(), species.Species) else sptr()._species()
regs.append(set(s._regions))
self._react_regions = list(set.intersection(*regs))
self._rate, self._involved_species = rxdmath._compile(rate, self._react_regions)
#Species are in at most one region
trans_membrane = len({s()._region() for s in self._sources + self._dests + self._involved_species if isinstance(s(), species.SpeciesOnRegion)}) + len({s()._extracellular()._region for s in self._sources + self._dests + self._involved_species if isinstance(s(), species.SpeciesOnExtracellular)}) > 1
if trans_membrane:
raise RxDException('Reaction does not support multi-compartment dynamics. Use MultiCompartmentReaction.')
#Recompile all the reactions in C
if hasattr(self, '_mult'):
rxd._compile_reactions()
@property
def f_rate(self):
"""Get or set the forward reaction rate"""
return self._original_rate_f
@property
def b_rate(self):
"""Get or set the backward reaction rate"""
return self._original_rate_b
@f_rate.setter
def f_rate(self, value):
if self._dir not in ('<>', '>'):
raise RxDException('no forward reaction in reaction scheme')
self._original_rate_f = value
self._update_rates()
@b_rate.setter
def b_rate(self, value):
if self._dir not in ('<>', '<'):
raise RxDException('no backward reaction in reaction scheme')
self._original_rate_b = value
self._update_rates()
def __repr__(self):
short_f = self._original_rate_f._short_repr() if hasattr(self._original_rate_f,'_short_repr') else self._original_rate_f
short_b = self._original_rate_b._short_repr() if hasattr(self._original_rate_b,'_short_repr') else self._original_rate_b
if len(self._regions) != 1 or self._regions[0] is not None:
regions_short = '[' + ', '.join(r._short_repr() for r in self._regions) + ']'
return 'Reaction(%s, %s, rate_b=%s, regions=%s, custom_dynamics=%r)' % (self._scheme, short_f, short_b, regions_short, self._custom_dynamics)
else:
return 'Reaction(%s, %s, rate_b=%s, custom_dynamics=%r)' % (self._scheme, short_f, short_b, self._custom_dynamics)
def _do_memb_scales(self):
# nothing to do since NEVER a membrane flux
pass
|
<gh_stars>10-100
#! /usr/bin/env python3
# The MIT License (MIT)
#
# Copyright (c) 2016 <NAME> <<EMAIL>>
# If you like this library, consider donating to: https://bit.ly/armstrap-opensource-dev
# Anything helps.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
# This application shows the power of NI VirtualBench. Because it's five instruments in one
# package, we can create an array of "virtual" instruments and use something like a USB foot
# pedal to cycle through each "virtual" instrument, something that is traditionally not possible
# if your instruments were in separate boxes.
#
# We use the Google text-to-speech API to output measurements to the computer's audio speakers.
#
# This example requires a USB foot pedal to function. The one used here (note, any
# USB foot pedal can be used with some slight modifications to this code) is below.
#
# Infinity USB Digital Foot Control with Computer plug (IN-USB2)
# This hardware device can be found on amazon.com for approximately $50 USD
# Link: http://www.amazon.com/Infinity-Digital-Control-Computer--USB2/dp/B002MY6I7G
from pyvirtualbench import PyVirtualBench, PyVirtualBenchException, DmmFunction, Status
from time import sleep
from msvcrt import kbhit
import winsound
import pywinusb.hid as hid # pip install pywinusb
import urllib.request
import pyglet # pip install Pyglet
import os.path
from threading import Thread
import math
# This is the example featured on hackaday.com on July 29th, 2015
# https://hackaday.com/2015/07/29/talking-foot-pedal-controlled-bench-probes-for-virtualbench/
# It featured combining multiple instruments onto a single set of probes and using a USB footpedal
# to cycle through the instruments.
# A video demonstation is available here: https://www.youtube.com/watch?v=1NOQRLI39es
# You will probably need to replace "myVirtualBench" with the name of your device.
# By default, the device name is the model number and serial number separated by a hyphen; e.g., "VB8012-309738A".
# You can see the device's name in the VirtualBench Application under File->About
virtualbench = PyVirtualBench('myVirtualBench')
selected_instrument_index = 0 # a global index to reference the currently selected instrument in the global 'instruments' array
def text_to_speech_async(mystr):
''' Converts the user input string 'mystr' into audio that is output to the computer speakers.
Note, this function is asynchronous we return while the audio is still playing.
'''
file_cache_dir = os.path.expanduser("~/Desktop/text_to_speech_cache/") # User-defined directory (change this to your liking)
if (os.path.isdir(file_cache_dir) == False):
os.makedirs(file_cache_dir)
file_path = file_cache_dir + mystr + ".mpeg"
# We only should hit the network if the audio is not in the cache
if (os.path.isfile(file_path) == False):
url = "http://translate.google.com/translate_tts?tl=en&q=" + mystr.replace(" ", "%20")
req = urllib.request.Request(url, data=None, headers={'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0'})
response_contents = urllib.request.urlopen(req)
fh = open(file_path, "wb")
fh.write(response_contents.read())
fh.close()
player = pyglet.media.Player()
source = pyglet.media.load(file_path)
player.queue(source)
player.play()
def background_worker(self):
try:
self.run_instrument()
except AttributeError as e:
pass
except OSError as e:
pass
except Exception as e:
pass
class VirtualBenchDmmDcVoltageInstrument:
def __init__(self):
global virtualbench;
self.instrument_name = "dmm dc voltage"
self.dmm = None
def acquire(self):
text_to_speech_async(self.instrument_name)
self.dmm = virtualbench.acquire_digital_multimeter();
self.dmm.configure_measurement(DmmFunction.DC_VOLTS, True, 10.0)
def release(self):
self.dmm.release()
self.dmm = None
def run_instrument(self):
previous_value = -1; # Invalid to force read output
status = Status.SUCCESS
while (True):
try:
sleep(1)
current_value = math.fabs(self.dmm.read())
if (math.fabs(previous_value - current_value) > 1):
text_to_speech_async("%.2f volts" % current_value)
previous_value = current_value
status = Status.SUCCESS
except PyVirtualBenchException as e:
if (status != e.status and e.status == Status.WARNING_DMM_OVERRANGE):
previous_value = -1; # Invalid to force read output
status = e.status
if (e.status != Status.WARNING_DMM_OVERRANGE):
raise PyVirtualBenchException(e)
class VirtualBenchDmmResistanceInstrument:
def __init__(self):
global virtualbench;
self.instrument_name = "dmm resistance"
self.dmm = None
def acquire(self):
text_to_speech_async(self.instrument_name)
self.dmm = virtualbench.acquire_digital_multimeter();
self.dmm.configure_measurement(DmmFunction.RESISTANCE)
def release(self):
self.dmm.release()
self.dmm = None
def run_instrument(self):
previous_value = -1; # Invalid to force read output
status = Status.SUCCESS
while (True):
try:
sleep(2)
current_value = math.fabs(self.dmm.read())
if (math.fabs(previous_value - current_value) > 1):
text_to_speech_async(self.olm_to_str(current_value))
previous_value = current_value
status = Status.SUCCESS
except PyVirtualBenchException as e:
# Do check so we don't repeat the same error over and over
if (status != e.status and e.status == Status.WARNING_DMM_OVERRANGE):
text_to_speech_async("probes not connected")
previous_value = -1; # Invalid to force read output
status = e.status
if (e.status != Status.WARNING_DMM_OVERRANGE):
raise PyVirtualBenchException(e)
def olm_to_str(self, value):
if ((value / 1000000) > 1):
return "%.2f mega olms" % (value / 1000000);
if ((value / 1000) > 1):
return "%.2f kilo olms" % (value / 1000);
return "%.2f olms" % value
class VirtualBenchContinuityInstrument:
def __init__(self):
global virtualbench;
self.instrument_name = "continuity"
self.dmm = None
self.is_beeping = False
def acquire(self):
text_to_speech_async(self.instrument_name)
self.dmm = virtualbench.acquire_digital_multimeter()
self.dmm.configure_measurement(DmmFunction.RESISTANCE)
def release(self):
self.dmm.release()
self.dmm = None
self.is_beeping = False
def run_instrument(self):
while (True):
try:
if (math.fabs(self.dmm.read()) < 100 and self.is_beeping == False): # 100 Ohlms
winsound.PlaySound('continuity_beep.wav', winsound.SND_FILENAME | winsound.SND_ASYNC | winsound.SND_LOOP)
self.is_beeping = True;
except PyVirtualBenchException as e:
winsound.PlaySound(None, 0) # Stops audio from playing
self.is_beeping = False;
if (e.status != Status.WARNING_DMM_OVERRANGE):
raise PyVirtualBenchException(e)
# global 'instruments' contains the instruments we care about for this application.
# Note: users should change this to instruments that make sense for their application.
instruments = [VirtualBenchDmmDcVoltageInstrument(), VirtualBenchDmmResistanceInstrument(), VirtualBenchContinuityInstrument()]
def footpedal_handler(data):
global instruments
global selected_instrument_index
left_pedal_pressed = data[1] & 1
middle_pedal_pressed = data[1] & 2
right_pedal_pressed = data[1] & 4
if (left_pedal_pressed):
# Release the currently selected instrument.
# This will cause the run_instrument background thread for that instrument
# to exit because the instrument handle will be invalid.
current_instrument = instruments[selected_instrument_index]
current_instrument.release();
# Cycle to the next instrument
selected_instrument_index = (selected_instrument_index + 1) % len(instruments)
# Once we have our new instrument, acquire a handle and start using it.
current_instrument = instruments[selected_instrument_index]
current_instrument.acquire()
thread = Thread(target=background_worker, args=(current_instrument,))
thread.start()
if (middle_pedal_pressed or right_pedal_pressed) :
text_to_speech_async("pedal functionality not defined")
# For this application, we use a Infinity USB Digital Foot Control with Computer plug (IN-USB2)
# This hardware device can be found on amazon.com for approximately $50 USD
# Link: http://www.amazon.com/Infinity-Digital-Control-Computer--USB2/dp/B002MY6I7G
def find_vec_usb_footpedal():
all_hids = hid.find_all_hid_devices()
for index, device in enumerate(all_hids):
if (device.vendor_id == 0x05f3 and device.product_id == 0x00ff):
print("Found VEC USB Footpedal")
return all_hids[index]
raise Exception("VEC USB Footpedal not found");
if __name__ == '__main__':
try:
footpedal = find_vec_usb_footpedal()
# When starting the application, we default the instrument to the first
# instrument in the global 'instruments' list
current_instrument = instruments[selected_instrument_index]
current_instrument.acquire()
thread = Thread(target=background_worker, args=(current_instrument,))
thread.start()
footpedal.open()
footpedal.set_raw_data_handler(footpedal_handler)
print("\nWaiting for data...\nPress any (system keyboard) key to stop...")
while not kbhit() and footpedal.is_plugged():
#just keep the device opened to receive events
sleep(0.5)
finally:
footpedal.close()
virtualbench.release()
|
import numpy as np
from scipy.spatial.distance import cdist
from plaster.tools.image import imops
from plaster.tools.image.coord import YX, HW
from plaster.tools.schema import check
from plaster.tools.utils.stats import half_nanstd
from plaster.tools.zlog.zlog import spy
def pixel_peak_find_one_im(im, approx_psf):
"""
Peak find on a single image with 1 pixel accuracy.
(compare to subpixel_peak_find_one_im)
Arguments:
im: the image to peak find
approx_psf: An estimated PSF search kernel
Returns:
locs: ndarray (n_peaks_found, 2) where the 2 is in (y,x) order
"""
from skimage.feature import peak_local_max # Defer slow import
std = half_nanstd(im.flatten())
# This is assert is to catch bad tests that pass-in noise free
# background that mean that the bg std can not be estimated
# and therefore will cause many false peaks.
assert std > 0.1, "The std is suspiciously small on a pixel_peak_find_one_im"
# Tuning thresh:
# I initially had it set at 2 * std.
# Later, in synthetic 2 count data without a bandpass filter
# I found that I was losing too many 1 counts so I tried 1 * std
# but I found that when I put the band-pass back in that 2 * std
# seemed right again. We probably should find an objective way
# to determine this.
thresh = 2 * std
cim = imops.convolve(np.nan_to_num(im, nan=float(np.nanmedian(im))), approx_psf)
# CLEAN the edges
# ZBS: Added because there were often edge effect from the convolution
# that created false stray edge peaks.
imops.edge_fill(cim, approx_psf.shape[0])
# The background is well-described by the the histogram centered
# around zero thanks to the fact that im and kern are expected
# to be roughly zero-centered. Therefore we estimate the threshold
# by using the samples less than zero cim[cim<0]
if (cim < 0).sum() > 0:
cim[cim < thresh] = 0
return peak_local_max(cim, min_distance=2, threshold_abs=thresh)
else:
return np.zeros((0, 2))
def _pixel_to_subpixel_one_im(im, peak_dim, locs):
"""
This is a subtle calculation.
locs is given as an *integer* position (only has pixel accuracy).
We then extract out a sub-image using an *integer* half width.
Peak_dim is typically odd. Suppose it is (11, 11)
That makes half_peak_mea_i be 11 // 2 = 5
Suppose that a peak is at (17.5, 17.5).
Suppose that peak was found a (integer) location (17, 17)
which is within 1 pixel of its center as expected.
We extract the sub-image at (17 - 5, 17 - 5) = (12:23, 12:23)
The Center-of-mass calculation should return (5.5, 5.5) because that is
relative to the sub-image which was extracted
We wish to return (17.5, 17.5). So that's the lower left
(17 - 5) of the peak plus the COM found.
"""
check.array_t(locs, dtype=int)
assert peak_dim[0] == peak_dim[1]
half_peak_mea_i = peak_dim[0] // 2
lower_left_locs = locs - half_peak_mea_i
com_per_loc = np.zeros(locs.shape)
for loc_i, loc in enumerate(lower_left_locs):
peak_im = imops.crop(im, off=YX(loc), dim=peak_dim, center=False)
com_per_loc[loc_i] = imops.com(peak_im ** 2)
return lower_left_locs + com_per_loc
def peak_find_chcy_ims(chcy_ims, approx_psf, cycle_i, subpixel=True):
"""
Previous version of this code depended on the channels being
balanced relative to one another. But this early-stage channel
balancing turned out to be problematic.
This new code instead peak finds on each channel independently
and then reconciles the peak locations by unioning the
lists of peaks and de-duping them using an approximate measure
of distance as the de-dupe key.
If subpixel is not True uses the faster pixel-only accuracy.
Returns:
locs: ndarray (n_peaks, 2) where the second dim is in (y, x) order
"""
# Use more than one cycle to improve the quality of the sub-pixel estimate
# But then discard peaks that are off after cycle 1??
n_channels = chcy_ims.shape[0]
locs_per_channel = []
for ch_i in range(n_channels):
im = chcy_ims[ch_i, cycle_i, :, :]
try:
locs = pixel_peak_find_one_im(im, approx_psf)
if subpixel:
locs = _pixel_to_subpixel_one_im(
im, HW(approx_psf.shape), locs.astype(int)
)
except Exception:
# Failure during peak find, no peaks recorded for this frame.
locs = np.zeros((0, 2))
locs_per_channel += [locs]
union_locs = np.vstack(locs_per_channel)
# UNION and de-dupe where <= 1.0 pixel is considered a duplicate
dists = cdist(union_locs, union_locs, "euclidean")
n_locs = union_locs.shape[0]
if n_locs == 0:
return np.zeros((0, 2))
# Set self-distances to large so that they will not be found as "closest"
dists[np.arange(n_locs), np.arange(n_locs)] = 1e6
closest_i = np.argmin(dists, axis=1)
closest_d = dists[np.arange(n_locs), closest_i]
# Any peaks with another peak within 1 pixel is a candidate for de-dupeing
dedupe_mask = closest_d <= 1.0
a_iz = closest_i[dedupe_mask]
b_iz = np.arange(n_locs)[dedupe_mask]
c_iz = np.arange(n_locs)[~dedupe_mask] # Those with no mate (in one channel only)
# Of those pairs we have to keep one or the other so we take the
# one with the lower index value.
keep_iz = np.where(a_iz < b_iz, a_iz, b_iz)
keep_iz = np.concatenate((keep_iz, c_iz))
return union_locs[np.unique(keep_iz)]
def peak_find_chcy_ims_fast(chcy_ims, approx_psf, cycle_i, subpixel=True):
"""
Unlike the above this assumes that channel balance is working well
and that we can just mean over the channels
"""
n_channels = chcy_ims.shape[0]
np.save(f"/erisyon/internal/_chcy_ims_{cycle_i}.npy", chcy_ims[:, cycle_i, 0, 0])
im = np.mean(chcy_ims[:, cycle_i, :, :], axis=0)
np.save(f"/erisyon/internal/_mean_im_{cycle_i}.npy", im)
try:
locs = pixel_peak_find_one_im(im, approx_psf)
if subpixel:
locs = _pixel_to_subpixel_one_im(im, HW(approx_psf.shape), locs.astype(int))
except Exception:
# Failure during peak find, no peaks recorded for this frame.
locs = np.zeros((0, 2))
return locs
|
#!/usr/bin/env python3
# @lc app=leetcode.cn id=706 lang=python3
#
# [706] Design HashMap
#
# https://leetcode-cn.com/problems/design-hashmap/description/
#
# algorithms
# Easy (58.76%)
# Total Accepted: 35.5K
# Total Submissions: 56.1K
# Testcase Example: '["MyHashMap","put","put","get","get","put","get","remove","get"]\n' + '[[],[1,1],[2,2],[1],[3],[2,1],[2],[2],[2]]'
#
# 不使用任何内建的哈希表库设计一个哈希映射(HashMap)。
# 实现 MyHashMap 类:
# MyHashMap() 用空映射初始化对象
# void put(int key, int value) 向 HashMap 插入一个键值对 (key, value) 。如果 key
# 已经存在于映射中,则更新其对应的值 value 。
# int get(int key) 返回特定的 key 所映射的 value ;如果映射中不包含 key 的映射,返回 -1 。
# void remove(key) 如果映射中存在 key 的映射,则移除 key 和它所对应的 value 。
# 示例:
# 输入:
# ["MyHashMap", "put", "put", "get", "get", "put", "get", "remove", "get"]
# [[], [1, 1], [2, 2], [1], [3], [2, 1], [2], [2], [2]]
# 输出:
# [null, null, null, 1, -1, null, 1, null, -1]
# 解释:
# MyHashMap myHashMap = new MyHashMap();
# myHashMap.put(1, 1); // myHashMap 现在为 [[1,1]]
# myHashMap.put(2, 2); // myHashMap 现在为 [[1,1], [2,2]]
# myHashMap.get(1); // 返回 1 ,myHashMap 现在为 [[1,1], [2,2]]
# myHashMap.get(3); // 返回 -1(未找到),myHashMap 现在为 [[1,1], [2,2]]
# myHashMap.put(2, 1); // myHashMap 现在为 [[1,1], [2,1]](更新已有的值)
# myHashMap.get(2); // 返回 1 ,myHashMap 现在为 [[1,1], [2,1]]
# myHashMap.remove(2); // 删除键为 2 的数据,myHashMap 现在为 [[1,1]]
# myHashMap.get(2); // 返回 -1(未找到),myHashMap 现在为 [[1,1]]
# 提示:
# 0
# 最多调用 10^4 次 put、get 和 remove 方法
# 进阶:你能否不使用内置的 HashMap 库解决此问题?
class MyHashMap:
def __init__(self):
"""
Initialize your data structure here.
"""
self.buckets = 1000
self.items_per_bucket = 1001
self.table = [[] for _ in range(self.buckets)]
def hash(self, key):
return key % self.buckets
def pos(self, key):
return key // self.buckets
def put(self, key: int, value: int) -> None:
"""
value will always be non-negative.
"""
hashkey = self.hash(key)
if not self.table[hashkey]:
self.table[hashkey] = [None] * self.items_per_bucket
self.table[hashkey][self.pos(key)] = value
def get(self, key: int) -> int:
"""
Returns the value to which the specified key is mapped, or -1 if this map contains no mapping for the key
"""
hashkey = self.hash(key)
if (self.table[hashkey]) and (self.table[hashkey][self.pos(key)] is not None):
return self.table[hashkey][self.pos(key)]
return -1
def remove(self, key: int) -> None:
"""
Removes the mapping of the specified value key if this map contains a mapping for the key
"""
hashkey = self.hash(key)
if self.table[hashkey]:
self.table[hashkey][self.pos(key)] = None
|
<filename>tests/tasks/test_empathetic_dialogues.py
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import unittest
from parlai.core.opt import Opt
from parlai.tasks.empathetic_dialogues.agents import (
EmotionClassificationSituationTeacher,
EmpatheticDialoguesTeacher,
)
from parlai.utils import testing as testing_utils
EPISODE_COUNTS = {
'train_experiencer_only': 19531,
'train_both_sides': 39057,
'valid': 2769,
'test': 2547,
}
EXAMPLE_COUNTS = {
'train_experiencer_only': 40254,
'train_both_sides': 64636,
'valid': 5738,
'test': 5259,
}
class TestEDTeacher(unittest.TestCase):
"""
Basic tests to count the number of examples/episodes and to check a few utterances.
# Counting num episodes (from the original internal copy of the data)
cat /checkpoint/parlai/tasks/empathetic_dialogues/train.csv | grep -E '^hit:[0-9]+_conv:[0-9]+,2' | wc # 19531
cat /checkpoint/parlai/tasks/empathetic_dialogues/train.csv | grep -E '^hit:[0-9]+_conv:[0-9]+,(2|3)' | wc # 39057
cat /checkpoint/parlai/tasks/empathetic_dialogues/valid_random_cands.csv | grep -E '^hit:[0-9]+_conv:[0-9]+,2' | wc # 2769
cat /checkpoint/parlai/tasks/empathetic_dialogues/test_random_cands.csv | grep -E '^hit:[0-9]+_conv:[0-9]+,2' | wc # 2547
# We count the number of lines with turn_idx=2 because this means that we have at
# least one full utterance in the conversation. For train_experiencer_only==False,
# we also include turn_idx=3 to count the Listener-based conversations in the same
# manner.
# Count num examples (from the original internal copy of the data)
grep -E 'hit:[0-9]+_conv:[0-9]+,(2|4|6|8|10|12),' /checkpoint/parlai/tasks/empathetic_dialogues/train.csv | wc # 40254
grep -E 'hit:[0-9]+_conv:[0-9]+,(2|3|4|5|6|7|8|9|10|11|12),' /checkpoint/parlai/tasks/empathetic_dialogues/train.csv | wc # 64636 (--train-experiencer-only False)
grep -E 'hit:[0-9]+_conv:[0-9]+,(2|4|6|8|10|12),' /checkpoint/parlai/tasks/empathetic_dialogues/valid_random_cands.csv | wc # 5738
grep -E 'hit:[0-9]+_conv:[0-9]+,(2|4|6|8|10|12),' /checkpoint/parlai/tasks/empathetic_dialogues/test_random_cands.csv | wc # 5259
"""
def test_counts(self):
with testing_utils.tempdir() as tmpdir:
data_path = tmpdir
# Check EmpatheticDialoguesTeacher, with multiple examples per episode
opts_episodes_and_examples = [
(
{'datatype': 'train'},
EPISODE_COUNTS['train_both_sides'],
EXAMPLE_COUNTS['train_both_sides'],
), # Test the default mode
(
{'datatype': 'train', 'train_experiencer_only': True},
EPISODE_COUNTS['train_experiencer_only'],
EXAMPLE_COUNTS['train_experiencer_only'],
),
(
{'datatype': 'train', 'train_experiencer_only': False},
EPISODE_COUNTS['train_both_sides'],
EXAMPLE_COUNTS['train_both_sides'],
),
(
{'datatype': 'valid'},
EPISODE_COUNTS['valid'],
EXAMPLE_COUNTS['valid'],
),
({'datatype': 'test'}, EPISODE_COUNTS['test'], EXAMPLE_COUNTS['test']),
]
for teacher_class in [EmpatheticDialoguesTeacher]:
for opt, num_episodes, num_examples in opts_episodes_and_examples:
full_opt = Opt({**opt, 'datapath': data_path})
teacher = teacher_class(full_opt)
self.assertEqual(teacher.num_episodes(), num_episodes)
self.assertEqual(teacher.num_examples(), num_examples)
# Check EmotionClassificationSituationTeacher, with one example per episode
train_episode_count = EPISODE_COUNTS['train_experiencer_only']
# For the situation classifier, we only want to have one episode per train
# conversation
opts_episodes = [
({'datatype': 'train'}, train_episode_count), # Test the default mode
(
{'datatype': 'train', 'train_experiencer_only': True},
train_episode_count,
),
(
{'datatype': 'train', 'train_experiencer_only': False},
train_episode_count,
),
({'datatype': 'valid'}, EPISODE_COUNTS['valid']),
({'datatype': 'test'}, EPISODE_COUNTS['test']),
]
for teacher_class in [EmotionClassificationSituationTeacher]:
for opt, num_episodes in opts_episodes:
full_opt = Opt({**opt, 'datapath': data_path})
teacher = teacher_class(full_opt)
self.assertEqual(teacher.num_episodes(), num_episodes)
self.assertEqual(teacher.num_examples(), num_episodes)
def test_check_examples(self):
with testing_utils.tempdir() as tmpdir:
data_path = tmpdir
# Check EmpatheticDialoguesTeacher
opts_and_examples = [
(
{'datatype': 'train', 'train_experiencer_only': True},
{
'situation': ' i used to scare for darkness',
'emotion': 'afraid',
'text': 'dont you feel so.. its a wonder ',
'labels': [
'I do actually hit blank walls a lot of times but i get by'
],
'prepend_ctx': None,
'prepend_cand': None,
'deepmoji_ctx': None,
'deepmoji_cand': None,
'episode_done': False,
'label_candidates': [],
},
),
(
{'datatype': 'train', 'train_experiencer_only': False},
{
'situation': 'I remember going to the fireworks with my best friend. There was a lot of people, but it only felt like us in the world.',
'emotion': 'sentimental',
'text': 'Where has she gone?',
'labels': ['We no longer talk.'],
'prepend_ctx': None,
'prepend_cand': None,
'deepmoji_ctx': None,
'deepmoji_cand': None,
'episode_done': True,
'label_candidates': [],
},
),
(
{'datatype': 'valid'},
{
'situation': 'I was walking through my hallway a few week ago, and my son was hiding under the table and grabbed my ankle. I thought i was got. ',
'emotion': 'surprised',
'text': 'I may have let out a scream that will have him question my manhood for the rest of our lives, lol. ',
'labels': ['I would probably scream also.'],
'prepend_ctx': None,
'prepend_cand': None,
'deepmoji_ctx': None,
'deepmoji_cand': None,
'episode_done': True,
'label_candidates': [
"That really does make it special. I'm glad you have that. ",
"It must've have been. Glad they are okay now.",
"Well sometimes companies make mistakes. I doubt it's anything you did.",
"Oh no, I'm so so sorry. I've always had at least one pet throughout my life, and they're truly part of the family.",
'Wow. That must suck. Do you like the band incubus? I missed them a couple of times but I saw them this year',
"I can't play those kinds of games. Too spooky for me.",
'I think your boss should give you more recognition in that case!',
"That's always a good thing. It means you should get on great with your neighbors.",
"Yeah, I had my Commodore 64 and Amiga in the late 80's. Still, the games were great when they worked!",
"That's ok, you did the right thing. It probably happens to lots of people.",
"That's good. Now you don't have to worry about it.",
'Hopefully one day you will be willing to explore a relationship in a serious way.',
"I'm sorry, things will get better.",
'Oh, okay. Maybe you should ask your teacher for some extra help or find a study buddy. i hope you do better next time.',
'Why? What did she do?',
'I do enjoy the zoo and the animals. I think they could be just as good.',
'Well at least you managed to save him!',
'That sucks, how much is it?',
'Yeah, that is a hard one to deal with. Maybe you should give it back so you will not feel bad about yourself.',
'HAve you been practicing? Do you have note cards?',
"That's good news at least. I hope you are feeling better now. And don't be hard on yourself, accidents happen.",
'Oops. I hate when that happens. Did they say anything to you?',
'no its not',
'Yes, my friends are coming with me. :)',
"Oh my gosh! I'm sorry! haha Thats funny. Atleast you have them a story to always remember.;)",
'I am so happy for you! All of your hard work paid off!',
'Wow, thats a nice car',
"Does it make you feel like you're living in an alternate reality?",
'glade all was well',
'ah, crisis averted! that could have been a lot worse',
"Maybe if we weren't so attached to being creatures of comfort. Some things we just can't let go of, wouldn't exist without some poor shmuck having to do the dirty work. I guess we're all that shmuck to someone, someway or another.",
"That's awesome! You're going to be rolling in the dough with those skills",
"Don't worry, from what you said it doesn't sound like you almost ruined it. It wasn't something on purpose at least.",
'Have you tried yoga? It can help in the meanwhile till you get a proper vacation.',
"I wish my insurance would give me something like that! It's good to go anyways.",
'I bet you are pretty anxious and excited at the same time.',
'Do you honk at them?',
"That's a bad supervisor. Did you call him/her out on it?",
"Geniuses don't do chores my friend.",
'Which country? that sounds fun, are you guys doing anything fun there?',
'oh that is so exciting!!! good for you man!',
'Wow! Any way they can get out? Did they call someone?',
'I love that nostalgic feeling. ',
'Congratulations. You have done great!',
'hahaha I definitely admire your courage to have done that.',
'wait til she leaves and continue',
'I do too. I am so sorry you are going through this',
'That is awesome. Congratulations. Im sure you earned every penny.',
'I want some of whatever you had for breakfast. You seem very happy.',
'Oh wow! I am so sorry that happened to you.',
'Well, hopefully there will be nothing but great things for him in his future.',
'Oh that was so nice of them! I bet you were relieved!',
'how was it ?',
"Nice! Why do you like it more than other places you've lived?",
'It must be difficult, do you think she will come back ever?',
"That's so messed up! Why was he doing that?",
'Did you try therapy at all or counseling?',
'Did you reconnect and promise to keep in touch?',
'I am so sorry for you. Perhaps you can hang with her after your workdays?',
"That's good that you found happiness. That's what were all in the pursuit of right?",
'I hope these feelings last for you!',
'you have eyes too',
"Wow, that's rude! He won't last long...",
"Hopefully the person learned what they had to do so they don't hold the line up in the future.",
'Oh no that must have been a terrible experience, I hope no one got hurt from the shattered glass.',
"Congrats!, I'm sure you must be very happy!",
"That's good to know! You all have a lot to be thankful for!",
'It depends, if you love her, you could try to work it out. Or you could cheat on her too',
"I'm sorry to hear that, I'm pretty terrified of the dentist myself. Ask for gas! Good luck, I'm sure everything will be just fine.",
'That makes sense, you are a good older sibling!',
'They say dogs are mans best friend. ',
'I would probably scream also.',
'Well I hope he gets to felling better.',
"If I had a bag of M&M's right now I would eat them for sure!",
'Yep. Happy and healthy is a blessing',
'Wow was it a scam or was it legit?',
'that is good to hear, it was a motivation to succeed, a great character building ',
'Its not time to get over it, you arent doing any wrong its okay to "feel" things. I hope people around you give you a lot of love! ',
'Awww. Did you keep it?',
"Oh I see.. Well that's a pretty positive talent then, huh? Maybe you should encourage him to keep doing it. Maybe he misses it. You could get him a present for his birthday or Christmas that was related to drawing tools/pencils and all that.",
"You learn a lot about someone when you move in with them, so if you feel comfortable in your relationship I think that's actually rather prudent.",
'That is terrible. How long have you had this pet?',
'Oh that sucks...did she explain herself yet',
"8 Miles!? That's very impressive. I bet I could barely make it a mile!",
'That stuff is pretty expensive. Maybe you can sell it on eBay or something.',
'Its horrible to have to got through things like thaty',
'Oh god.. so sorry to hear that.. May i ask how did Tom pass?',
'Like a paranormal type fear or a human with intent to harm type fear?',
'I bet you cant wait. WHere are going for your vacation?',
'Aw, that sucks. Did you give her a proper burial?',
'Awesome! What are you going to see?',
'What kind of food does it serve? Sounds wonderful!',
"Oh no! What's wrong with your dad?",
'oh god yes i know what you mean, any ideas what you wanna do ?',
"Hopefully you'll able to get it all sorted out soon. I'm sure when it's done it'll be a beautiful house.",
'That would be bad you should double check before you leave',
'I hope he continues to do well.',
"You can only do so much. Next time I'd just let him drink on his own.",
'I am sure you will meet them',
'Wow thats nice. What do you drive?',
],
},
),
(
{'datatype': 'test'},
{
'situation': 'My mother stopped by my house one day and said she saw 3 dogs on the road, down from our house. They were starving, with ribs showing, and it was a mother dog and her two small puppies. Of course, my daughter wanted to bring them to our house, so we could feed and help them. We did, and my heart went out to them, as they were so sweet, but really were in a bad shape.',
'emotion': 'caring',
'text': "Oh my goodness, that's very scary! I hope you are okay now and the drunk driver was punished for his actions?",
'labels': ['Yeah he was punished hes in jail still'],
'prepend_ctx': None,
'prepend_cand': None,
'deepmoji_ctx': None,
'deepmoji_cand': None,
'episode_done': True,
'label_candidates': [
"Are you really able to work from home? Finding a gig is so difficult, I'm glad that it is working for you.",
"Oh no. That's quite unfortunate for the deer. Did you just drive past it?",
'Wow, you must have felt jealous',
'I can only imagine! How is he now?',
'Oh goodness, what happened for the past 3 weeks?',
'LOL i hate that',
'I love a warm fire outside while camping! Sounds like a great time.',
'Yeah he was punished hes in jail still',
'Was he upset?',
"Wow that's awesome! Are you on a team?",
'Oh man that is just crazy! Feel bad for the person who has to clean it.',
'im sorry, thats awful. its a shame his parents arent being more supportive',
'I bet that was scary. Did he surprise you with something?',
'That sounds pretty stressful. Are you moving soon?',
"Well, if I were you, I'd keep it up, whether or not my spouse laughed at me, or a new girlfriend/boyfriend, whatever. It's not childish to me. Life is stressful enough. Let us snuggle what we want.",
"That's hilarious! Is he usually down for a good prank?",
'Oh I love seeing kids achieve things! Adorable. Good for her! :) ',
'that makes two of us! i am terrified of all snakes',
'that is dangerous, glad that both of your are okay',
"That's good to hear. I hope I meet someone that will do that for me.",
"Well that's good.",
'We need more people like you in the world. Theres always someone out there who needs a helping hand and could use a friend.',
'How ever so exciting! is this your first cruise?',
'Do you feel any less nervous? Job interviews are always nerve-wracking ',
'Maybe you could try to better that?',
"That's what matters most, that you had a good time and made memories!",
'Oh man! I hear you. I rescue animals and it is VERY hard to potty train them!',
'Hopefully they will give him a better shift.',
"That's a big step. I hope it works out for you.",
"Hiking is probably a tough environment to meet people! LA is real nice, but I hear the people there aren't/",
'I hope things turn out better for you. Keep fighting.',
"please don't lol i'm a man, I appreciate what you women go through when you're pregnant or on your period but i'm okay with not knowing details",
'I wish refrigerators would have a warranty that replaced food when they went bad. ',
'Seeing old friends that you have not contacted in so long is a nice feeling. ',
'Cool. Will you leave it there forever?',
'Oh wow. How far away did you move??',
'So inconsiderate! It reminds me of my neighbours doing building work one morning at 6AM!',
'Oh no, did they do something embarrasing?',
'That is awesome! Is there a particular reason you are so happy?',
'Did you buy all the essential items the dog will need? ',
'Fantastic, now do you have a job lined up?',
'Better luck next time! I love to scratch!',
'Thats neat. Do you guys make a lot of money?',
'I would be furious. What did you do?',
"Well hopefully you're able to familiarize yourself quickly. Good luck!",
'Oh thats good for your friend, but it sounds like you really would like to live there! I can imagine feeling jealous',
"That's unfortunate. What are you doing now?",
'Oh no. I rent also so I know your pain. My last landlord was awful. How did your landlord react?',
'Im sorry to hear that, how long did you have him?',
'Lovely. What did you do together?',
'Have you thought about getting another dog? ',
'Oh yeah? Do you still look awesome like you did back then?',
'Do you dress up when the new movies come out also?',
"That's a shame. I hate it when a place doesn't live up to the hype.",
"Sometimes life isn't very fair. I like to think of it as motivation to get a better job.",
'Well at least you have a plan. Are you planning to start the renovation soon?',
"Kids pick those things up quickly. And it'll help with her hand-eye coordination, reading - all sorts of things! ",
'did you enjoy yourself ',
'Haha, how did she feel when she found out?',
'that would really help if it was a permanent solution',
"Wow that must have been frustrating. I hope it didn't cost too much.",
'How nice of her. You must have been so happy to see her.',
"I know it's hard, but practice makes perfect! Keep trying and I am sure you will get it!",
'Do they live in a different state than you?',
"It reallyi s the best way to do things. That way even if you forget something you've got time to remember and remedy the situation",
'Wow, must have been rather frightening. Glad you are ok!',
'That was really nice! What a wonderful surprise! This act of kindness helps to restore my faith in humanity.',
"It's located in a small farming town in Vermont. I went on a tour of their factory once and it was interesting to see the cheese being made.",
"Poor guy was just nervous, I'm sure the more you take him the more he will venture away from you and have some fun!",
'How did he scare you?',
"Isn't that what sisters are for? What were you guys upset about?",
'That is a long time to be in the car for sure.',
"I'm glad to hear... Weddings can be stressful",
"That sounds amazing! How'd you guys meet?",
"Getting out of town and way from all of everday life's struggles always sounds like a great time. Did you leave you cell phone at home while you weer away to 'really' get away from everything for a minute?",
'Man that is scary! Granted i like to hear things about that. ',
'Yikes! Was anything damaged? ',
'Ouch, I would try and wear something on your neck next time you go in there.',
'awesome! was it hold em?',
'Not me! haha I love them all!',
"Oh that's nice, I love doing that. Did the cat seem happy?",
"Yeah, I can imagine. At least it's only one week!",
'Ew, I hate spiders. We are in the process of getting them out of our garage.',
"That's great news. I don't know what I would do if my mom passed.",
'Is that like the child equivalent of under the bed?',
"That's really fantastic! I'm glad to hear you turned your life around. ",
'What kind of work do you do?',
'Ah ok I undestand.',
'Very sad to hear. You have a good heart and are very caring, that is something to atleast be proud of!',
'Man that sounds really stressful...',
'You are so strong! Please thank your husband for his service and thank you for being his support, no matter the miles between you. Take care of yourself and get out with friends when you can!',
'I see. Is it your favorite food now? :p',
'YAY! good job! He/she is going to be beautiful',
'Nothing went wrong, we just have different lives in different places. I go visit every now and then.',
"A spelling bee - what fun! I'm sure you will win - I bet you've worked hard toward your goal.",
'You should install security cameras outside your house.',
'Border collie. She was great!',
'Oh dear me.. So sorry to hear that! what did you do?',
'Praise God man! He really is amazing and we should always be grateful for we have ',
'Oh no.. Did he pass away?',
],
},
),
]
for opt, example in opts_and_examples:
full_opt = Opt({**opt, 'datapath': data_path})
teacher = EmpatheticDialoguesTeacher(full_opt)
self.assertEqual(teacher.get(episode_idx=1, entry_idx=1), example)
# Check EmotionClassificationSituationTeacher
opts_and_examples = [
(
{'datatype': 'train', 'train_experiencer_only': True},
{
'text': ' i used to scare for darkness',
'labels': ['afraid'],
'episode_done': True,
},
),
(
{'datatype': 'train', 'train_experiencer_only': False},
{
'text': ' i used to scare for darkness',
'labels': ['afraid'],
'episode_done': True,
},
),
(
{'datatype': 'valid'},
{
'text': 'I was walking through my hallway a few week ago, and my son was hiding under the table and grabbed my ankle. I thought i was got. ',
'labels': ['surprised'],
'episode_done': True,
},
),
(
{'datatype': 'test'},
{
'text': "My mother stopped by my house one day and said she saw 3 dogs on the road, down from our house. They were starving, with ribs showing, and it was a mother dog and her two small puppies. Of course, my daughter wanted to bring them to our house, so we could feed and help them. We did, and my heart went out to them, as they were so sweet, but really were in a bad shape.",
'labels': ['caring'],
'episode_done': True,
},
),
]
for opt, example in opts_and_examples:
full_opt = Opt({**opt, 'datapath': data_path})
teacher = EmotionClassificationSituationTeacher(full_opt)
self.assertEqual(teacher.get(episode_idx=1), example)
if __name__ == '__main__':
unittest.main()
|
from sympy import *
init_printing(pretty_print=true)
x = Symbol('x')
def f(x):
return exp(x)
def aplicando_h(a, b, h):
lista = [a]
elemento = 0
while True:
if elemento < b:
elemento = lista[-1] + h
lista.append(elemento)
else:
break
return lista
def aplicando_x(lista_x):
lista_x_aplicado = []
for c in lista_x:
elemento = f(c)
lista_x_aplicado.append(elemento)
return lista_x_aplicado
def isimpar(number):
if (number % 2) != 0:
return true
else:
return false
def somatorio(lista):
somatorio = 0
for item in lista:
somatorio += item
return somatorio
def simpson(x0, xn, n_intervalos):
# if n_intervalos == 1:
# i2 = (h / 3) * (y[0] + (4 * y[1]) + y[2])
# return i2
h = (xn - x0) / n_intervalos
soma = f(x0) + f(xn)
for c in range(1, n_intervalos):
k = x0 + (c * h)
if isimpar(c):
soma = soma + 4 * f(k)
else:
soma = soma + 2 * f(k)
soma_final = soma * (h/3)
return soma_final
def erro(valor_real, valor_soma):
erro = valor_soma - valor_real
return erro
# # Para 1 subintervalo:
integral = Integral(exp(x), (x, 1, 4)).doit().evalf()
soma_simpson_1 = simpson(1, 4, 1).evalf()
erro_1 = erro(integral, soma_simpson_1)
porcentagem_erro_1 = (erro_1 / integral) * 100
# # Para 4 subintervalo:
soma_simpson_4 = simpson(1, 4, 4).evalf()
erro_4 = erro(integral, soma_simpson_4)
porcentagem_erro_4 = (erro_4 / integral) * 100
# Para 10 subintervalo:
soma_simpson_10 = simpson(1, 4, 10).evalf()
erro_10 = erro(integral, soma_simpson_10)
porcentagem_erro_10 = (erro_10 / integral) * 100
# Para 100 subintervalo:
soma_simpson_100 = simpson(1, 4, 100).evalf()
erro_100 = erro(integral, soma_simpson_100)
porcentagem_erro_100 = (erro_100 / integral) * 100
# print('Para 1 subintervalo: ')
# print('-' * 30)
print(f'1/3 de Simpson = {soma_simpson_1:.3f}')
print(f'Integral = {integral:.3f}')
print(f'Erro = {erro_1:.3f}')
print(f'Porcentagem do erro = {porcentagem_erro_1:.3f}%')
print('-' * 30)
print('Para 4 subintervalos: ')
print('-' * 30)
print(f'1/3 de Simpson = {soma_simpson_4:.3f}')
print(f'Erro = {erro_4:.3f}')
print(f'Porcentagem do erro = {porcentagem_erro_4:.3f}%')
print('-' * 30)
print('Para 10 subintervalos: ')
print('-' * 30)
print(f'1/3 de Simpson = {soma_simpson_10:.3f}')
print(f'Erro = {erro_10:.3f}')
print(f'Porcentagem do erro = {porcentagem_erro_10:.3f}%')
print('-' * 30)
print('Para 100 subintervalos: ')
print('-' * 30)
print(f'1/3 de Simpson = {soma_simpson_100:.3f}')
print(f'Erro = {erro_100:.3f}')
print(f'Porcentagem do erro = {porcentagem_erro_100:.3f}%')
|
"""Tests for the Ecosystem class."""
import unittest
import axelrod
class TestEcosystem(unittest.TestCase):
@classmethod
def setUpClass(cls):
cooperators = axelrod.Tournament(
players=[
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Cooperator(),
]
)
defector_wins = axelrod.Tournament(
players=[
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Cooperator(),
axelrod.Defector(),
]
)
cls.res_cooperators = cooperators.play()
cls.res_defector_wins = defector_wins.play()
def test_default_population_sizes(self):
eco = axelrod.Ecosystem(self.res_cooperators)
pops = eco.population_sizes
self.assertEqual(eco.num_players, 4)
self.assertEqual(len(pops), 1)
self.assertEqual(len(pops[0]), 4)
self.assertAlmostEqual(sum(pops[0]), 1.0)
self.assertEqual(list(set(pops[0])), [0.25])
def test_non_default_population_sizes(self):
eco = axelrod.Ecosystem(
self.res_cooperators, population=[0.7, 0.25, 0.03, 0.02]
)
pops = eco.population_sizes
self.assertEqual(eco.num_players, 4)
self.assertEqual(len(pops), 1)
self.assertEqual(len(pops[0]), 4)
self.assertAlmostEqual(sum(pops[0]), 1.0)
self.assertEqual(pops[0], [0.7, 0.25, 0.03, 0.02])
def test_population_normalization(self):
eco = axelrod.Ecosystem(self.res_cooperators, population=[70, 25, 3, 2])
pops = eco.population_sizes
self.assertEqual(eco.num_players, 4)
self.assertEqual(len(pops), 1)
self.assertEqual(len(pops[0]), 4)
self.assertAlmostEqual(sum(pops[0]), 1.0)
self.assertEqual(pops[0], [0.7, 0.25, 0.03, 0.02])
def test_results_and_population_of_different_sizes(self):
self.assertRaises(
TypeError,
axelrod.Ecosystem,
self.res_cooperators,
population=[0.7, 0.2, 0.03, 0.1, 0.1],
)
def test_negative_populations(self):
self.assertRaises(
TypeError,
axelrod.Ecosystem,
self.res_cooperators,
population=[0.7, -0.2, 0.03, 0.2],
)
def test_fitness_function(self):
fitness = lambda p: 2 * p
eco = axelrod.Ecosystem(self.res_cooperators, fitness=fitness)
self.assertTrue(eco.fitness(10), 20)
def test_cooperators_are_stable_over_time(self):
eco = axelrod.Ecosystem(self.res_cooperators)
eco.reproduce(100)
pops = eco.population_sizes
self.assertEqual(len(pops), 101)
for p in pops:
self.assertEqual(len(p), 4)
self.assertEqual(sum(p), 1.0)
self.assertEqual(list(set(p)), [0.25])
def test_defector_wins_with_only_cooperators(self):
eco = axelrod.Ecosystem(self.res_defector_wins)
eco.reproduce(1000)
pops = eco.population_sizes
self.assertEqual(len(pops), 1001)
for p in pops:
self.assertEqual(len(p), 4)
self.assertAlmostEqual(sum(p), 1.0)
last = pops[-1]
self.assertAlmostEqual(last[0], 0.0)
self.assertAlmostEqual(last[1], 0.0)
self.assertAlmostEqual(last[2], 0.0)
self.assertAlmostEqual(last[3], 1.0)
|
# -*- coding: utf-8 -*-
"""
Copyright ©2017. The Regents of the University of California (Regents). All Rights Reserved.
Permission to use, copy, modify, and distribute this software and its documentation for educational,
research, and not-for-profit purposes, without fee and without a signed licensing agreement, is
hereby granted, provided that the above copyright notice, this paragraph and the following two
paragraphs appear in all copies, modifications, and distributions. Contact The Office of Technology
Licensing, UC Berkeley, 2150 Shattuck Avenue, Suite 510, Berkeley, CA 94720-1620, (510) 643-
7201, <EMAIL>, http://ipira.berkeley.edu/industry-info for commercial licensing opportunities.
IN NO EVENT SHALL REGENTS BE LIABLE TO ANY PARTY FOR DIRECT, INDIRECT, SPECIAL,
INCIDENTAL, OR CONSEQUENTIAL DAMAGES, INCLUDING LOST PROFITS, ARISING OUT OF
THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN IF REGENTS HAS BEEN
ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
REGENTS SPECIFICALLY DISCLAIMS ANY WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE SOFTWARE AND ACCOMPANYING DOCUMENTATION, IF ANY, PROVIDED
HEREUNDER IS PROVIDED "AS IS". REGENTS HAS NO OBLIGATION TO PROVIDE
MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.
"""
"""
Class to encapsulate robot grippers
Author: Jeff
"""
import json
import numpy as np
import os
import sys
import IPython
import meshpy.obj_file as obj_file
from autolab_core import RigidTransform
GRIPPER_MESH_FILENAME = 'gripper.obj'
GRIPPER_PARAMS_FILENAME = 'params.json'
T_MESH_GRIPPER_FILENAME = 'T_mesh_gripper.tf'
T_GRASP_GRIPPER_FILENAME = 'T_grasp_gripper.tf'
class RobotGripper(object):
""" Robot gripper wrapper for collision checking and encapsulation of grasp parameters (e.g. width, finger radius, etc)
Note: The gripper frame should be the frame used to command the physical robot
Attributes
----------
name : :obj:`str`
name of gripper
mesh : :obj:`Mesh3D`
3D triangular mesh specifying the geometry of the gripper
params : :obj:`dict`
set of parameters for the gripper, at minimum (finger_radius and grasp_width)
T_mesh_gripper : :obj:`RigidTransform`
transform from mesh frame to gripper frame (for rendering)
T_grasp_gripper : :obj:`RigidTransform`
transform from gripper frame to the grasp canonical frame (y-axis = grasp axis, x-axis = palm axis)
"""
def __init__(self, name, mesh, mesh_filename, params, T_mesh_gripper, T_grasp_gripper):
self.name = name
self.mesh = mesh
self.mesh_filename = mesh_filename
self.T_mesh_gripper = T_mesh_gripper
self.T_grasp_gripper = T_grasp_gripper
for key, value in list(params.items()):
setattr(self, key, value)
def collides_with_table(self, grasp, stable_pose, clearance=0.0):
""" Checks whether or not the gripper collides with the table in the stable pose.
No longer necessary with CollisionChecker.
Parameters
----------
grasp : :obj:`ParallelJawPtGrasp3D`
grasp parameterizing the pose of the gripper
stable_pose : :obj:`StablePose`
specifies the pose of the table
clearance : float
min distance from the table
Returns
-------
bool
True if collision, False otherwise
"""
# transform mesh into object pose to check collisions with table
T_obj_gripper = grasp.gripper_pose(self)
T_obj_mesh = T_obj_gripper * self.T_mesh_gripper.inverse()
mesh_tf = self.mesh.transform(T_obj_mesh.inverse())
# extract table
n = stable_pose.r[2, :]
x0 = stable_pose.x0
# check all vertices for intersection with table
collision = False
for vertex in mesh_tf.vertices():
v = np.array(vertex)
if n.dot(v - x0) < clearance:
collision = True
return collision
@staticmethod
def load(gripper_name, gripper_dir='data/grippers'):
""" Load the gripper specified by gripper_name.
Parameters
----------
gripper_name : :obj:`str`
name of the gripper to load
gripper_dir : :obj:`str`
directory where the gripper files are stored
Returns
-------
:obj:`RobotGripper`
loaded gripper objects
"""
mesh_filename = os.path.join(gripper_dir, gripper_name, GRIPPER_MESH_FILENAME)
mesh = obj_file.ObjFile(mesh_filename).read()
f = open(os.path.join(os.path.join(gripper_dir, gripper_name, GRIPPER_PARAMS_FILENAME)), 'r')
params = json.load(f)
T_mesh_gripper = RigidTransform.load(os.path.join(gripper_dir, gripper_name, T_MESH_GRIPPER_FILENAME))
T_grasp_gripper = RigidTransform.load(os.path.join(gripper_dir, gripper_name, T_GRASP_GRIPPER_FILENAME))
return RobotGripper(gripper_name, mesh, mesh_filename, params, T_mesh_gripper, T_grasp_gripper)
|
# pylint: disable=invalid-name
# pylint: disable=line-too-long
import sys
from optparse import OptionParser
from . import flashimage
from . import jffs2
from . import uboot
def main() :
parser = OptionParser()
parser.add_option("-c", dest = "command", default = "information", help = "Command (i[nformation], r[ead], s[equential_read], w[rite], erase, e[xtract], extract_pages, add_oob, remove_oob, check_ecc, find_uboot, dump_uboot,find_jffs2, dump_jffs2, check_bad_blocks)")
parser.add_option("-i", dest = "raw_image_filename", default = '', help = "Use file instead of device for operations")
parser.add_option("-o", dest = "output_filename", default = 'output.dmp', help = "Output filename")
parser.add_option("-L", action = "store_true", dest = "slow", default = False, help = "Set clock FTDI chip at 12MHz instead of 60MHz")
parser.add_option("-R", action = "store_true", dest = "raw_mode", default = False, help = "Raw mode - skip bad block before reading/writing")
parser.add_option("-j", action = "store_true", dest = "add_jffs2_oob", default = False, help = "Add JFFS2 OOB to the source")
parser.add_option("-C", dest = "compare_target_filename", default = '', help = "When writing a file compare with this file before writing and write only differences", metavar = "COMPARE_TARGET_FILENAME")
parser.add_option("-n", dest = "name_prefix", default = '', help = "Set output file name prefix")
parser.add_option("-s", type = "int", default = 0, dest = "start_offset")
parser.add_option("-l", type = "int", default = 0, dest = "length")
parser.add_option("-p", type = "int", nargs = 2, dest = "pages")
parser.add_option("-b", type = "int", nargs = 2, dest = "blocks")
parser.add_option("-P", type = "int", default = 512, dest = "page_size")
parser.add_option("-O", type = "int", default = 16, dest = "oob_size")
parser.add_option("--bp", type = "int", default = 32, dest = "pages_per_block")
(options, args) = parser.parse_args()
use_ansi = False
try:
import colorama
colorama.init()
use_ansi = True
except:
try:
import tendo.ansiterm
use_ansi = True
except:
pass
start_page = -1
end_page = -1
if options.pages is not None:
start_page = options.pages[0]
if len(options.pages) > 1:
end_page = options.pages[1]
flash_image_io = flashimage.IO(options.raw_image_filename, options.start_offset, options.length, options.page_size, options.oob_size, options.pages_per_block, options.slow)
if not flash_image_io.is_initialized():
print('Device not ready, aborting...')
sys.exit(0)
flash_image_io.set_use_ansi(use_ansi)
if options.blocks is not None:
if not options.blocks:
start_page = options.blocks[0] * flash_image_io.SrcImage.PagePerBlock
if len(options.blocks) > 1:
end_page = (options.blocks[1] + 1) * flash_image_io.SrcImage.PagePerBlock
if options.command[0] == 'i':
flash_image_io.SrcImage.dump_info()
elif options.command[0] == 'r' or options.command[0] == 's':
sequential_read = False
if options.command[0] == 's':
sequential_read = True
flash_image_io.read_pages(start_page, end_page, False, options.output_filename, seq = sequential_read, raw_mode = options.raw_mode)
elif options.command[0] == 'add_oob':
if options.raw_image_filename:
print('Add OOB to %s' % (options.raw_image_filename))
flash_image_io.add_oob(options.raw_image_filename, options.output_filename)
elif options.command == 'extract_pages':
if options.raw_image_filename:
print('Extract from pages(0x%x - 0x%x) to %s' % (start_page, end_page, options.output_filename))
flash_image_io.extract_pages(options.output_filename, start_page, end_page, remove_oob = False)
elif options.command[0] == 'e':
if options.raw_image_filename:
print('Extract data from pages(0x%x - 0x%x) to %s' % (start_page, end_page, options.output_filename))
flash_image_io.extract_pages(options.output_filename, start_page, end_page, remove_oob = True)
elif options.command[0] == 'w':
filename = args[0]
add_oob = False
add_jffs2_eraser_marker = False
if options.command == 'add_oob':
add_oob = True
if options.add_jffs2_oob:
add_oob = True
add_jffs2_eraser_marker = True
if options.compare_target_filename != '':
cfd = open(options.compare_target_filename, 'rb')
cfd.seek(options.start_offset)
fd = open(filename, 'rb')
fd.seek(options.start_offset)
current_page = 0
while 1:
cdata = cfd.read(flash_image_io.SrcImage.PageSize)
data = fd.read(flash_image_io.SrcImage.PageSize)
if not data:
break
if cdata != data:
print('Changed Page:0x%x file_offset: 0x%x' % (start_page+current_page, options.start_offset + current_page*flash_image_io.SrcImage.PageSize))
current_block = current_page / flash_image_io.SrcImage.PagePerBlock
print('Erasing and re-programming Block: %d' % (current_block))
flash_image_io.SrcImage.erase_block_by_page(current_page)
target_start_page = start_page+current_block*flash_image_io.SrcImage.PagePerBlock
target_end_page = target_start_page+flash_image_io.SrcImage.PagePerBlock-1
print('Programming Page: %d ~ %d' % (target_start_page, target_end_page))
flash_image_io.SrcImage.write_pages(
filename,
options.start_offset + current_block*flash_image_io.SrcImage.PagePerBlock*flash_image_io.SrcImage.PageSize,
target_start_page,
target_end_page,
add_oob,
add_jffs2_eraser_marker = add_jffs2_eraser_marker,
raw_mode = options.raw_mode
)
current_page = (current_block+1)*flash_image_io.SrcImage.PagePerBlock+1
fd.seek(options.start_offset+current_page * flash_image_io.SrcImage.PageSize)
cfd.seek(options.start_offset+current_page * flash_image_io.SrcImage.PageSize)
else:
current_page += 1
else:
flash_image_io.SrcImage.write_pages(filename, options.start_offset, start_page, end_page, add_oob, add_jffs2_eraser_marker = add_jffs2_eraser_marker, raw_mode = options.raw_mode)
elif options.command == 'erase':
if options.blocks is not None:
start = options.blocks[0]
end = options.blocks[1]
flash_image_io.SrcImage.erase_block(start, end)
else:
flash_image_io.SrcImage.erase()
if options.command == 'check_bad_blocks':
flash_image_io.check_bad_blocks()
if options.command == 'check_ecc':
flash_image_io.check_ecc()
elif options.command == 'find_uboot':
uboot_util = uboot.Util(flash_image_io)
uboot_util.find()
elif options.command == 'dump_uboot':
uboot_util = uboot.Util(flash_image_io)
uboot_util.dump()
elif options.command == 'find_jffs2':
jffs2_util = jffs2.Util(flash_image_io)
jffs2_util.find()
elif options.command == 'dump_jffs2':
jffs2_util = jffs2.Util(flash_image_io)
jffs2_util.dump(options.name_prefix)
if __name__ == "__main__":
sys.exit(main())
|
<reponame>aliyun/dingtalk-sdk
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.core import TeaCore
from alibabacloud_tea_openapi.client import Client as OpenApiClient
from alibabacloud_tea_openapi import models as open_api_models
from alibabacloud_tea_util.client import Client as UtilClient
from alibabacloud_dingtalk.doc_1_0 import models as dingtalkdoc__1__0_models
from alibabacloud_tea_util import models as util_models
from alibabacloud_openapi_util.client import Client as OpenApiUtilClient
class Client(OpenApiClient):
"""
*\
"""
def __init__(
self,
config: open_api_models.Config,
):
super().__init__(config)
self._endpoint_rule = ''
if UtilClient.empty(self._endpoint):
self._endpoint = 'api.dingtalk.com'
def batch_get_workspace_docs(
self,
request: dingtalkdoc__1__0_models.BatchGetWorkspaceDocsRequest,
) -> dingtalkdoc__1__0_models.BatchGetWorkspaceDocsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.BatchGetWorkspaceDocsHeaders()
return self.batch_get_workspace_docs_with_options(request, headers, runtime)
async def batch_get_workspace_docs_async(
self,
request: dingtalkdoc__1__0_models.BatchGetWorkspaceDocsRequest,
) -> dingtalkdoc__1__0_models.BatchGetWorkspaceDocsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.BatchGetWorkspaceDocsHeaders()
return await self.batch_get_workspace_docs_with_options_async(request, headers, runtime)
def batch_get_workspace_docs_with_options(
self,
request: dingtalkdoc__1__0_models.BatchGetWorkspaceDocsRequest,
headers: dingtalkdoc__1__0_models.BatchGetWorkspaceDocsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.BatchGetWorkspaceDocsResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.node_ids):
body['nodeIds'] = request.node_ids
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_access_token_type):
body['dingAccessTokenType'] = request.ding_access_token_type
if not UtilClient.is_unset(request.ding_uid):
body['dingUid'] = request.ding_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.BatchGetWorkspaceDocsResponse(),
self.do_roarequest('BatchGetWorkspaceDocs', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/docs/infos/query', 'json', req, runtime)
)
async def batch_get_workspace_docs_with_options_async(
self,
request: dingtalkdoc__1__0_models.BatchGetWorkspaceDocsRequest,
headers: dingtalkdoc__1__0_models.BatchGetWorkspaceDocsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.BatchGetWorkspaceDocsResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.node_ids):
body['nodeIds'] = request.node_ids
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_access_token_type):
body['dingAccessTokenType'] = request.ding_access_token_type
if not UtilClient.is_unset(request.ding_uid):
body['dingUid'] = request.ding_uid
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.BatchGetWorkspaceDocsResponse(),
await self.do_roarequest_async('BatchGetWorkspaceDocs', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/docs/infos/query', 'json', req, runtime)
)
def delete_sheet(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.DeleteSheetRequest,
) -> dingtalkdoc__1__0_models.DeleteSheetResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.DeleteSheetHeaders()
return self.delete_sheet_with_options(workbook_id, sheet_id, request, headers, runtime)
async def delete_sheet_async(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.DeleteSheetRequest,
) -> dingtalkdoc__1__0_models.DeleteSheetResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.DeleteSheetHeaders()
return await self.delete_sheet_with_options_async(workbook_id, sheet_id, request, headers, runtime)
def delete_sheet_with_options(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.DeleteSheetRequest,
headers: dingtalkdoc__1__0_models.DeleteSheetHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.DeleteSheetResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.DeleteSheetResponse(),
self.do_roarequest('DeleteSheet', 'doc_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/doc/workbooks/{workbook_id}/sheets/{sheet_id}', 'none', req, runtime)
)
async def delete_sheet_with_options_async(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.DeleteSheetRequest,
headers: dingtalkdoc__1__0_models.DeleteSheetHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.DeleteSheetResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.DeleteSheetResponse(),
await self.do_roarequest_async('DeleteSheet', 'doc_1.0', 'HTTP', 'DELETE', 'AK', f'/v1.0/doc/workbooks/{workbook_id}/sheets/{sheet_id}', 'none', req, runtime)
)
def update_workspace_doc_members(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersRequest,
) -> dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersHeaders()
return self.update_workspace_doc_members_with_options(workspace_id, node_id, request, headers, runtime)
async def update_workspace_doc_members_async(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersRequest,
) -> dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersHeaders()
return await self.update_workspace_doc_members_with_options_async(workspace_id, node_id, request, headers, runtime)
def update_workspace_doc_members_with_options(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersRequest,
headers: dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersResponse(),
self.do_roarequest('UpdateWorkspaceDocMembers', 'doc_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/docs/{node_id}/members', 'none', req, runtime)
)
async def update_workspace_doc_members_with_options_async(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersRequest,
headers: dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.UpdateWorkspaceDocMembersResponse(),
await self.do_roarequest_async('UpdateWorkspaceDocMembers', 'doc_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/docs/{node_id}/members', 'none', req, runtime)
)
def create_workspace_doc(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.CreateWorkspaceDocRequest,
) -> dingtalkdoc__1__0_models.CreateWorkspaceDocResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.CreateWorkspaceDocHeaders()
return self.create_workspace_doc_with_options(workspace_id, request, headers, runtime)
async def create_workspace_doc_async(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.CreateWorkspaceDocRequest,
) -> dingtalkdoc__1__0_models.CreateWorkspaceDocResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.CreateWorkspaceDocHeaders()
return await self.create_workspace_doc_with_options_async(workspace_id, request, headers, runtime)
def create_workspace_doc_with_options(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.CreateWorkspaceDocRequest,
headers: dingtalkdoc__1__0_models.CreateWorkspaceDocHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.CreateWorkspaceDocResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.name):
body['name'] = request.name
if not UtilClient.is_unset(request.doc_type):
body['docType'] = request.doc_type
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.parent_node_id):
body['parentNodeId'] = request.parent_node_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.CreateWorkspaceDocResponse(),
self.do_roarequest('CreateWorkspaceDoc', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/docs', 'json', req, runtime)
)
async def create_workspace_doc_with_options_async(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.CreateWorkspaceDocRequest,
headers: dingtalkdoc__1__0_models.CreateWorkspaceDocHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.CreateWorkspaceDocResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.name):
body['name'] = request.name
if not UtilClient.is_unset(request.doc_type):
body['docType'] = request.doc_type
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.parent_node_id):
body['parentNodeId'] = request.parent_node_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.CreateWorkspaceDocResponse(),
await self.do_roarequest_async('CreateWorkspaceDoc', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/docs', 'json', req, runtime)
)
def create_sheet(
self,
workbook_id: str,
request: dingtalkdoc__1__0_models.CreateSheetRequest,
) -> dingtalkdoc__1__0_models.CreateSheetResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.CreateSheetHeaders()
return self.create_sheet_with_options(workbook_id, request, headers, runtime)
async def create_sheet_async(
self,
workbook_id: str,
request: dingtalkdoc__1__0_models.CreateSheetRequest,
) -> dingtalkdoc__1__0_models.CreateSheetResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.CreateSheetHeaders()
return await self.create_sheet_with_options_async(workbook_id, request, headers, runtime)
def create_sheet_with_options(
self,
workbook_id: str,
request: dingtalkdoc__1__0_models.CreateSheetRequest,
headers: dingtalkdoc__1__0_models.CreateSheetHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.CreateSheetResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
body = {}
if not UtilClient.is_unset(request.name):
body['name'] = request.name
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.CreateSheetResponse(),
self.do_roarequest('CreateSheet', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workbooks/{workbook_id}/sheets', 'json', req, runtime)
)
async def create_sheet_with_options_async(
self,
workbook_id: str,
request: dingtalkdoc__1__0_models.CreateSheetRequest,
headers: dingtalkdoc__1__0_models.CreateSheetHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.CreateSheetResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
body = {}
if not UtilClient.is_unset(request.name):
body['name'] = request.name
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.CreateSheetResponse(),
await self.do_roarequest_async('CreateSheet', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workbooks/{workbook_id}/sheets', 'json', req, runtime)
)
def create_workspace(
self,
request: dingtalkdoc__1__0_models.CreateWorkspaceRequest,
) -> dingtalkdoc__1__0_models.CreateWorkspaceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.CreateWorkspaceHeaders()
return self.create_workspace_with_options(request, headers, runtime)
async def create_workspace_async(
self,
request: dingtalkdoc__1__0_models.CreateWorkspaceRequest,
) -> dingtalkdoc__1__0_models.CreateWorkspaceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.CreateWorkspaceHeaders()
return await self.create_workspace_with_options_async(request, headers, runtime)
def create_workspace_with_options(
self,
request: dingtalkdoc__1__0_models.CreateWorkspaceRequest,
headers: dingtalkdoc__1__0_models.CreateWorkspaceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.CreateWorkspaceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.name):
body['name'] = request.name
if not UtilClient.is_unset(request.description):
body['description'] = request.description
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_uid):
body['dingUid'] = request.ding_uid
if not UtilClient.is_unset(request.ding_access_token_type):
body['dingAccessTokenType'] = request.ding_access_token_type
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.CreateWorkspaceResponse(),
self.do_roarequest('CreateWorkspace', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces', 'json', req, runtime)
)
async def create_workspace_with_options_async(
self,
request: dingtalkdoc__1__0_models.CreateWorkspaceRequest,
headers: dingtalkdoc__1__0_models.CreateWorkspaceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.CreateWorkspaceResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.name):
body['name'] = request.name
if not UtilClient.is_unset(request.description):
body['description'] = request.description
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_uid):
body['dingUid'] = request.ding_uid
if not UtilClient.is_unset(request.ding_access_token_type):
body['dingAccessTokenType'] = request.ding_access_token_type
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.CreateWorkspaceResponse(),
await self.do_roarequest_async('CreateWorkspace', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces', 'json', req, runtime)
)
def delete_workspace_doc_members(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersRequest,
) -> dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersHeaders()
return self.delete_workspace_doc_members_with_options(workspace_id, node_id, request, headers, runtime)
async def delete_workspace_doc_members_async(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersRequest,
) -> dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersHeaders()
return await self.delete_workspace_doc_members_with_options_async(workspace_id, node_id, request, headers, runtime)
def delete_workspace_doc_members_with_options(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersRequest,
headers: dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersResponse(),
self.do_roarequest('DeleteWorkspaceDocMembers', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/docs/{node_id}/members/remove', 'none', req, runtime)
)
async def delete_workspace_doc_members_with_options_async(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersRequest,
headers: dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.DeleteWorkspaceDocMembersResponse(),
await self.do_roarequest_async('DeleteWorkspaceDocMembers', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/docs/{node_id}/members/remove', 'none', req, runtime)
)
def get_workspace(
self,
workspace_id: str,
) -> dingtalkdoc__1__0_models.GetWorkspaceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.GetWorkspaceHeaders()
return self.get_workspace_with_options(workspace_id, headers, runtime)
async def get_workspace_async(
self,
workspace_id: str,
) -> dingtalkdoc__1__0_models.GetWorkspaceResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.GetWorkspaceHeaders()
return await self.get_workspace_with_options_async(workspace_id, headers, runtime)
def get_workspace_with_options(
self,
workspace_id: str,
headers: dingtalkdoc__1__0_models.GetWorkspaceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.GetWorkspaceResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.GetWorkspaceResponse(),
self.do_roarequest('GetWorkspace', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/workspaces/{workspace_id}', 'json', req, runtime)
)
async def get_workspace_with_options_async(
self,
workspace_id: str,
headers: dingtalkdoc__1__0_models.GetWorkspaceHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.GetWorkspaceResponse:
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.GetWorkspaceResponse(),
await self.do_roarequest_async('GetWorkspace', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/workspaces/{workspace_id}', 'json', req, runtime)
)
def search_workspace_docs(
self,
request: dingtalkdoc__1__0_models.SearchWorkspaceDocsRequest,
) -> dingtalkdoc__1__0_models.SearchWorkspaceDocsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.SearchWorkspaceDocsHeaders()
return self.search_workspace_docs_with_options(request, headers, runtime)
async def search_workspace_docs_async(
self,
request: dingtalkdoc__1__0_models.SearchWorkspaceDocsRequest,
) -> dingtalkdoc__1__0_models.SearchWorkspaceDocsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.SearchWorkspaceDocsHeaders()
return await self.search_workspace_docs_with_options_async(request, headers, runtime)
def search_workspace_docs_with_options(
self,
request: dingtalkdoc__1__0_models.SearchWorkspaceDocsRequest,
headers: dingtalkdoc__1__0_models.SearchWorkspaceDocsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.SearchWorkspaceDocsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.workspace_id):
query['workspaceId'] = request.workspace_id
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.SearchWorkspaceDocsResponse(),
self.do_roarequest('SearchWorkspaceDocs', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/docs', 'json', req, runtime)
)
async def search_workspace_docs_with_options_async(
self,
request: dingtalkdoc__1__0_models.SearchWorkspaceDocsRequest,
headers: dingtalkdoc__1__0_models.SearchWorkspaceDocsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.SearchWorkspaceDocsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.workspace_id):
query['workspaceId'] = request.workspace_id
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.keyword):
query['keyword'] = request.keyword
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.SearchWorkspaceDocsResponse(),
await self.do_roarequest_async('SearchWorkspaceDocs', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/docs', 'json', req, runtime)
)
def update_range(
self,
workbook_id: str,
sheet_id: str,
range_address: str,
request: dingtalkdoc__1__0_models.UpdateRangeRequest,
) -> dingtalkdoc__1__0_models.UpdateRangeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.UpdateRangeHeaders()
return self.update_range_with_options(workbook_id, sheet_id, range_address, request, headers, runtime)
async def update_range_async(
self,
workbook_id: str,
sheet_id: str,
range_address: str,
request: dingtalkdoc__1__0_models.UpdateRangeRequest,
) -> dingtalkdoc__1__0_models.UpdateRangeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.UpdateRangeHeaders()
return await self.update_range_with_options_async(workbook_id, sheet_id, range_address, request, headers, runtime)
def update_range_with_options(
self,
workbook_id: str,
sheet_id: str,
range_address: str,
request: dingtalkdoc__1__0_models.UpdateRangeRequest,
headers: dingtalkdoc__1__0_models.UpdateRangeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.UpdateRangeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
body = {}
if not UtilClient.is_unset(request.values):
body['values'] = request.values
if not UtilClient.is_unset(request.background_colors):
body['backgroundColors'] = request.background_colors
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.UpdateRangeResponse(),
self.do_roarequest('UpdateRange', 'doc_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/doc/workbooks/{workbook_id}/sheets/{sheet_id}/ranges/{range_address}', 'none', req, runtime)
)
async def update_range_with_options_async(
self,
workbook_id: str,
sheet_id: str,
range_address: str,
request: dingtalkdoc__1__0_models.UpdateRangeRequest,
headers: dingtalkdoc__1__0_models.UpdateRangeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.UpdateRangeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
body = {}
if not UtilClient.is_unset(request.values):
body['values'] = request.values
if not UtilClient.is_unset(request.background_colors):
body['backgroundColors'] = request.background_colors
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.UpdateRangeResponse(),
await self.do_roarequest_async('UpdateRange', 'doc_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/doc/workbooks/{workbook_id}/sheets/{sheet_id}/ranges/{range_address}', 'none', req, runtime)
)
def batch_get_workspaces(
self,
request: dingtalkdoc__1__0_models.BatchGetWorkspacesRequest,
) -> dingtalkdoc__1__0_models.BatchGetWorkspacesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.BatchGetWorkspacesHeaders()
return self.batch_get_workspaces_with_options(request, headers, runtime)
async def batch_get_workspaces_async(
self,
request: dingtalkdoc__1__0_models.BatchGetWorkspacesRequest,
) -> dingtalkdoc__1__0_models.BatchGetWorkspacesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.BatchGetWorkspacesHeaders()
return await self.batch_get_workspaces_with_options_async(request, headers, runtime)
def batch_get_workspaces_with_options(
self,
request: dingtalkdoc__1__0_models.BatchGetWorkspacesRequest,
headers: dingtalkdoc__1__0_models.BatchGetWorkspacesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.BatchGetWorkspacesResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.include_recent):
body['includeRecent'] = request.include_recent
if not UtilClient.is_unset(request.workspace_ids):
body['workspaceIds'] = request.workspace_ids
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_uid):
body['dingUid'] = request.ding_uid
if not UtilClient.is_unset(request.ding_access_token_type):
body['dingAccessTokenType'] = request.ding_access_token_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.BatchGetWorkspacesResponse(),
self.do_roarequest('BatchGetWorkspaces', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/infos/query', 'json', req, runtime)
)
async def batch_get_workspaces_with_options_async(
self,
request: dingtalkdoc__1__0_models.BatchGetWorkspacesRequest,
headers: dingtalkdoc__1__0_models.BatchGetWorkspacesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.BatchGetWorkspacesResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.include_recent):
body['includeRecent'] = request.include_recent
if not UtilClient.is_unset(request.workspace_ids):
body['workspaceIds'] = request.workspace_ids
if not UtilClient.is_unset(request.ding_org_id):
body['dingOrgId'] = request.ding_org_id
if not UtilClient.is_unset(request.ding_isv_org_id):
body['dingIsvOrgId'] = request.ding_isv_org_id
if not UtilClient.is_unset(request.ding_uid):
body['dingUid'] = request.ding_uid
if not UtilClient.is_unset(request.ding_access_token_type):
body['dingAccessTokenType'] = request.ding_access_token_type
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.BatchGetWorkspacesResponse(),
await self.do_roarequest_async('BatchGetWorkspaces', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/infos/query', 'json', req, runtime)
)
def delete_workspace_members(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.DeleteWorkspaceMembersRequest,
) -> dingtalkdoc__1__0_models.DeleteWorkspaceMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.DeleteWorkspaceMembersHeaders()
return self.delete_workspace_members_with_options(workspace_id, request, headers, runtime)
async def delete_workspace_members_async(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.DeleteWorkspaceMembersRequest,
) -> dingtalkdoc__1__0_models.DeleteWorkspaceMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.DeleteWorkspaceMembersHeaders()
return await self.delete_workspace_members_with_options_async(workspace_id, request, headers, runtime)
def delete_workspace_members_with_options(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.DeleteWorkspaceMembersRequest,
headers: dingtalkdoc__1__0_models.DeleteWorkspaceMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.DeleteWorkspaceMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.DeleteWorkspaceMembersResponse(),
self.do_roarequest('DeleteWorkspaceMembers', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/members/remove', 'none', req, runtime)
)
async def delete_workspace_members_with_options_async(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.DeleteWorkspaceMembersRequest,
headers: dingtalkdoc__1__0_models.DeleteWorkspaceMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.DeleteWorkspaceMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.DeleteWorkspaceMembersResponse(),
await self.do_roarequest_async('DeleteWorkspaceMembers', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/members/remove', 'none', req, runtime)
)
def add_workspace_doc_members(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.AddWorkspaceDocMembersRequest,
) -> dingtalkdoc__1__0_models.AddWorkspaceDocMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.AddWorkspaceDocMembersHeaders()
return self.add_workspace_doc_members_with_options(workspace_id, node_id, request, headers, runtime)
async def add_workspace_doc_members_async(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.AddWorkspaceDocMembersRequest,
) -> dingtalkdoc__1__0_models.AddWorkspaceDocMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.AddWorkspaceDocMembersHeaders()
return await self.add_workspace_doc_members_with_options_async(workspace_id, node_id, request, headers, runtime)
def add_workspace_doc_members_with_options(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.AddWorkspaceDocMembersRequest,
headers: dingtalkdoc__1__0_models.AddWorkspaceDocMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.AddWorkspaceDocMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.AddWorkspaceDocMembersResponse(),
self.do_roarequest('AddWorkspaceDocMembers', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/docs/{node_id}/members', 'none', req, runtime)
)
async def add_workspace_doc_members_with_options_async(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.AddWorkspaceDocMembersRequest,
headers: dingtalkdoc__1__0_models.AddWorkspaceDocMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.AddWorkspaceDocMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.AddWorkspaceDocMembersResponse(),
await self.do_roarequest_async('AddWorkspaceDocMembers', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/docs/{node_id}/members', 'none', req, runtime)
)
def update_workspace_members(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.UpdateWorkspaceMembersRequest,
) -> dingtalkdoc__1__0_models.UpdateWorkspaceMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.UpdateWorkspaceMembersHeaders()
return self.update_workspace_members_with_options(workspace_id, request, headers, runtime)
async def update_workspace_members_async(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.UpdateWorkspaceMembersRequest,
) -> dingtalkdoc__1__0_models.UpdateWorkspaceMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.UpdateWorkspaceMembersHeaders()
return await self.update_workspace_members_with_options_async(workspace_id, request, headers, runtime)
def update_workspace_members_with_options(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.UpdateWorkspaceMembersRequest,
headers: dingtalkdoc__1__0_models.UpdateWorkspaceMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.UpdateWorkspaceMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.UpdateWorkspaceMembersResponse(),
self.do_roarequest('UpdateWorkspaceMembers', 'doc_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/members', 'none', req, runtime)
)
async def update_workspace_members_with_options_async(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.UpdateWorkspaceMembersRequest,
headers: dingtalkdoc__1__0_models.UpdateWorkspaceMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.UpdateWorkspaceMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.UpdateWorkspaceMembersResponse(),
await self.do_roarequest_async('UpdateWorkspaceMembers', 'doc_1.0', 'HTTP', 'PUT', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/members', 'none', req, runtime)
)
def get_sheet(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.GetSheetRequest,
) -> dingtalkdoc__1__0_models.GetSheetResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.GetSheetHeaders()
return self.get_sheet_with_options(workbook_id, sheet_id, request, headers, runtime)
async def get_sheet_async(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.GetSheetRequest,
) -> dingtalkdoc__1__0_models.GetSheetResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.GetSheetHeaders()
return await self.get_sheet_with_options_async(workbook_id, sheet_id, request, headers, runtime)
def get_sheet_with_options(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.GetSheetRequest,
headers: dingtalkdoc__1__0_models.GetSheetHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.GetSheetResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.GetSheetResponse(),
self.do_roarequest('GetSheet', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/workbooks/{workbook_id}/sheets/{sheet_id}', 'json', req, runtime)
)
async def get_sheet_with_options_async(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.GetSheetRequest,
headers: dingtalkdoc__1__0_models.GetSheetHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.GetSheetResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.GetSheetResponse(),
await self.do_roarequest_async('GetSheet', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/workbooks/{workbook_id}/sheets/{sheet_id}', 'json', req, runtime)
)
def get_related_workspaces(
self,
request: dingtalkdoc__1__0_models.GetRelatedWorkspacesRequest,
) -> dingtalkdoc__1__0_models.GetRelatedWorkspacesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.GetRelatedWorkspacesHeaders()
return self.get_related_workspaces_with_options(request, headers, runtime)
async def get_related_workspaces_async(
self,
request: dingtalkdoc__1__0_models.GetRelatedWorkspacesRequest,
) -> dingtalkdoc__1__0_models.GetRelatedWorkspacesResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.GetRelatedWorkspacesHeaders()
return await self.get_related_workspaces_with_options_async(request, headers, runtime)
def get_related_workspaces_with_options(
self,
request: dingtalkdoc__1__0_models.GetRelatedWorkspacesRequest,
headers: dingtalkdoc__1__0_models.GetRelatedWorkspacesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.GetRelatedWorkspacesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.include_recent):
query['includeRecent'] = request.include_recent
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.GetRelatedWorkspacesResponse(),
self.do_roarequest('GetRelatedWorkspaces', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/workspaces', 'json', req, runtime)
)
async def get_related_workspaces_with_options_async(
self,
request: dingtalkdoc__1__0_models.GetRelatedWorkspacesRequest,
headers: dingtalkdoc__1__0_models.GetRelatedWorkspacesHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.GetRelatedWorkspacesResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.include_recent):
query['includeRecent'] = request.include_recent
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.GetRelatedWorkspacesResponse(),
await self.do_roarequest_async('GetRelatedWorkspaces', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/workspaces', 'json', req, runtime)
)
def get_recent_edit_docs(
self,
request: dingtalkdoc__1__0_models.GetRecentEditDocsRequest,
) -> dingtalkdoc__1__0_models.GetRecentEditDocsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.GetRecentEditDocsHeaders()
return self.get_recent_edit_docs_with_options(request, headers, runtime)
async def get_recent_edit_docs_async(
self,
request: dingtalkdoc__1__0_models.GetRecentEditDocsRequest,
) -> dingtalkdoc__1__0_models.GetRecentEditDocsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.GetRecentEditDocsHeaders()
return await self.get_recent_edit_docs_with_options_async(request, headers, runtime)
def get_recent_edit_docs_with_options(
self,
request: dingtalkdoc__1__0_models.GetRecentEditDocsRequest,
headers: dingtalkdoc__1__0_models.GetRecentEditDocsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.GetRecentEditDocsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.GetRecentEditDocsResponse(),
self.do_roarequest('GetRecentEditDocs', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/workspaces/docs/recentEditDocs', 'json', req, runtime)
)
async def get_recent_edit_docs_with_options_async(
self,
request: dingtalkdoc__1__0_models.GetRecentEditDocsRequest,
headers: dingtalkdoc__1__0_models.GetRecentEditDocsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.GetRecentEditDocsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.max_results):
query['maxResults'] = request.max_results
if not UtilClient.is_unset(request.next_token):
query['nextToken'] = request.next_token
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.GetRecentEditDocsResponse(),
await self.do_roarequest_async('GetRecentEditDocs', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/workspaces/docs/recentEditDocs', 'json', req, runtime)
)
def add_workspace_members(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.AddWorkspaceMembersRequest,
) -> dingtalkdoc__1__0_models.AddWorkspaceMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.AddWorkspaceMembersHeaders()
return self.add_workspace_members_with_options(workspace_id, request, headers, runtime)
async def add_workspace_members_async(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.AddWorkspaceMembersRequest,
) -> dingtalkdoc__1__0_models.AddWorkspaceMembersResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.AddWorkspaceMembersHeaders()
return await self.add_workspace_members_with_options_async(workspace_id, request, headers, runtime)
def add_workspace_members_with_options(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.AddWorkspaceMembersRequest,
headers: dingtalkdoc__1__0_models.AddWorkspaceMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.AddWorkspaceMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.AddWorkspaceMembersResponse(),
self.do_roarequest('AddWorkspaceMembers', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/members', 'json', req, runtime)
)
async def add_workspace_members_with_options_async(
self,
workspace_id: str,
request: dingtalkdoc__1__0_models.AddWorkspaceMembersRequest,
headers: dingtalkdoc__1__0_models.AddWorkspaceMembersHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.AddWorkspaceMembersResponse:
UtilClient.validate_model(request)
body = {}
if not UtilClient.is_unset(request.operator_id):
body['operatorId'] = request.operator_id
if not UtilClient.is_unset(request.members):
body['members'] = request.members
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.AddWorkspaceMembersResponse(),
await self.do_roarequest_async('AddWorkspaceMembers', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/members', 'json', req, runtime)
)
def get_workspace_node(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.GetWorkspaceNodeRequest,
) -> dingtalkdoc__1__0_models.GetWorkspaceNodeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.GetWorkspaceNodeHeaders()
return self.get_workspace_node_with_options(workspace_id, node_id, request, headers, runtime)
async def get_workspace_node_async(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.GetWorkspaceNodeRequest,
) -> dingtalkdoc__1__0_models.GetWorkspaceNodeResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.GetWorkspaceNodeHeaders()
return await self.get_workspace_node_with_options_async(workspace_id, node_id, request, headers, runtime)
def get_workspace_node_with_options(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.GetWorkspaceNodeRequest,
headers: dingtalkdoc__1__0_models.GetWorkspaceNodeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.GetWorkspaceNodeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.GetWorkspaceNodeResponse(),
self.do_roarequest('GetWorkspaceNode', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/docs/{node_id}', 'json', req, runtime)
)
async def get_workspace_node_with_options_async(
self,
workspace_id: str,
node_id: str,
request: dingtalkdoc__1__0_models.GetWorkspaceNodeRequest,
headers: dingtalkdoc__1__0_models.GetWorkspaceNodeHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.GetWorkspaceNodeResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.GetWorkspaceNodeResponse(),
await self.do_roarequest_async('GetWorkspaceNode', 'doc_1.0', 'HTTP', 'GET', 'AK', f'/v1.0/doc/workspaces/{workspace_id}/docs/{node_id}', 'json', req, runtime)
)
def append_rows(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.AppendRowsRequest,
) -> dingtalkdoc__1__0_models.AppendRowsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.AppendRowsHeaders()
return self.append_rows_with_options(workbook_id, sheet_id, request, headers, runtime)
async def append_rows_async(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.AppendRowsRequest,
) -> dingtalkdoc__1__0_models.AppendRowsResponse:
runtime = util_models.RuntimeOptions()
headers = dingtalkdoc__1__0_models.AppendRowsHeaders()
return await self.append_rows_with_options_async(workbook_id, sheet_id, request, headers, runtime)
def append_rows_with_options(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.AppendRowsRequest,
headers: dingtalkdoc__1__0_models.AppendRowsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.AppendRowsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
body = {}
if not UtilClient.is_unset(request.values):
body['values'] = request.values
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.AppendRowsResponse(),
self.do_roarequest('AppendRows', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workbooks/{workbook_id}/sheets/{sheet_id}/appendRows', 'none', req, runtime)
)
async def append_rows_with_options_async(
self,
workbook_id: str,
sheet_id: str,
request: dingtalkdoc__1__0_models.AppendRowsRequest,
headers: dingtalkdoc__1__0_models.AppendRowsHeaders,
runtime: util_models.RuntimeOptions,
) -> dingtalkdoc__1__0_models.AppendRowsResponse:
UtilClient.validate_model(request)
query = {}
if not UtilClient.is_unset(request.operator_id):
query['operatorId'] = request.operator_id
body = {}
if not UtilClient.is_unset(request.values):
body['values'] = request.values
real_headers = {}
if not UtilClient.is_unset(headers.common_headers):
real_headers = headers.common_headers
if not UtilClient.is_unset(headers.x_acs_dingtalk_access_token):
real_headers['x-acs-dingtalk-access-token'] = headers.x_acs_dingtalk_access_token
req = open_api_models.OpenApiRequest(
headers=real_headers,
query=OpenApiUtilClient.query(query),
body=OpenApiUtilClient.parse_to_map(body)
)
return TeaCore.from_map(
dingtalkdoc__1__0_models.AppendRowsResponse(),
await self.do_roarequest_async('AppendRows', 'doc_1.0', 'HTTP', 'POST', 'AK', f'/v1.0/doc/workbooks/{workbook_id}/sheets/{sheet_id}/appendRows', 'none', req, runtime)
)
|
<reponame>Kozoro/ipyida
# -*- encoding: utf8 -*-
#
# This module allows an IPython to be embeded inside IDA.
# You need the IPython module to be accessible from IDA for this to work.
# See README.adoc for more details.
#
# Copyright (c) 2015-2018 ESET
# Author: <NAME> <<EMAIL>>
# See LICENSE file for redistribution.
from ipykernel.kernelapp import IPKernelApp
import IPython.utils.frame
import ipykernel.iostream
import sys
import os
import logging
import idaapi
# The IPython kernel will override sys.std{out,err}. We keep a copy to let the
# existing embeded IDA console continue working, and also let IPython output to
# it.
_ida_stdout = sys.stdout
_ida_stderr = sys.stderr
# Path to a file to load into the kernel's namespace during its creation.
# Similar to the idapythonrc.py file.
IPYIDARC_PATH = os.path.join(idaapi.get_user_idadir(), 'ipyidarc.py')
if sys.__stdout__ is None or sys.__stdout__.fileno() < 0:
# IPython insist on using sys.__stdout__, however it's not available in IDA
# on Windows. We'll replace __stdout__ to the "nul" to avoid exception when
# writing and flushing on the bogus file descriptor.
sys.__stdout__ = open(os.devnull, "w")
# IPython will override sys.excepthook and send exception to sys.__stderr__. IDA
# expect exception to be written to sys.stderr (overridden by IDA) to print them
# in the console window. Used by wrap_excepthook.
_ida_excepthook = sys.excepthook
def is_using_ipykernel_5():
import ipykernel
return hasattr(ipykernel.kernelbase.Kernel, "process_one")
class IDATeeOutStream(ipykernel.iostream.OutStream):
def _setup_stream_redirects(self, name):
# This method was added in ipykernel 6.0 to capture stdout and stderr
# outside the context of the kernel. It expects stdout and stderr
# to be file object, with a fileno.
# Since IDAPython replaces sys.std{out,err] with IDAPythonStdOut
# instances, redirecting output to the console
# We override this method to temporarly replace sys.std{out,err] with
# the original ones (before IDAPython replaced them) while this method
# is called.
# This method is only called on macOS and Linux.
# See: https://github.com/ipython/ipykernel/commit/ae2f441a
try:
ida_ios = sys.stdout, sys.stderr
sys.stdout = sys.modules["__main__"]._orig_stdout
sys.stderr = sys.modules["__main__"]._orig_stderr
return super(IDATeeOutStream, self)._setup_stream_redirects(name)
finally:
sys.stdout, sys.stderr = ida_ios
def write(self, string):
"Write on both the previously saved IDA std output and zmq's stream"
if self.name == "stdout" and _ida_stdout:
_ida_stdout.write(string)
elif self.name == "stderr" and _ida_stderr:
_ida_stderr.write(string)
super(self.__class__, self).write(string)
def wrap_excepthook(ipython_excepthook):
"""
Return a function that will call both the ipython kernel execepthook
and IDA's
"""
def ipyida_excepthook(*args):
_ida_excepthook(*args)
ipython_excepthook(*args)
return ipyida_excepthook
class IPythonKernel(object):
def __init__(self):
self._timer = None
self.connection_file = None
def start(self):
if self._timer is not None:
raise Exception("IPython kernel is already running.")
# The IPKernelApp initialization is based on the IPython source for
# IPython.embed_kernel available here:
# https://github.com/ipython/ipython/blob/rel-3.2.1/IPython/kernel/zmq/embed.py
if IPKernelApp.initialized():
app = IPKernelApp.instance()
else:
# Load IPyIDA's user init file into the user namespace if it exists.
if os.path.exists(IPYIDARC_PATH):
IPKernelApp.exec_files = [ IPYIDARC_PATH ]
app = IPKernelApp.instance(
outstream_class='ipyida.kernel.IDATeeOutStream',
# We provide our own logger here because the default one from
# traitlets adds a handler that expect stderr to be a regular
# file object, and IDAPython's sys.stderr is actually a
# IDAPythonStdOut instance
log=logging.getLogger("ipyida_kernel")
)
app.initialize()
main = app.kernel.shell._orig_sys_modules_main_mod
if main is not None:
sys.modules[app.kernel.shell._orig_sys_modules_main_name] = main
# IPython <= 3.2.x will send exception to sys.__stderr__ instead of
# sys.stderr. IDA's console will not be able to display exceptions if we
# don't send it to IDA's sys.stderr. To fix this, we call both the
# ipython's and IDA's excepthook (IDA's excepthook is actually Python's
# default).
sys.excepthook = wrap_excepthook(sys.excepthook)
app.shell.set_completer_frame()
app.kernel.start()
self.connection_file = app.connection_file
if not is_using_ipykernel_5():
app.kernel.do_one_iteration()
def ipython_kernel_iteration():
app.kernel.do_one_iteration()
return int(1000 * app.kernel._poll_interval)
self._timer = idaapi.register_timer(int(1000 * app.kernel._poll_interval), ipython_kernel_iteration)
def stop(self):
if self._timer is not None:
idaapi.unregister_timer(self._timer)
self._timer = None
self.connection_file = None
sys.stdout = _ida_stdout
sys.stderr = _ida_stderr
@property
def started(self):
return self._timer is not None
def do_one_iteration():
"""Perform an iteration on IPython kernel runloop"""
if is_using_ipykernel_5():
raise Exception("Should not call this when ipykernel >= 5")
if IPKernelApp.initialized():
app = IPKernelApp.instance()
app.kernel.do_one_iteration()
else:
raise Exception("Kernel is not initialized")
|
<gh_stars>1-10
'''This plots the mixing sweep results
'''
from os import mkdir
from os.path import isdir
from pickle import load
from numpy import arange, array, atleast_2d, hstack, sum, where, zeros
from matplotlib.pyplot import axes, close, colorbar, imshow, set_cmap, subplots
from mpl_toolkits.axes_grid1 import make_axes_locatable
from seaborn import heatmap
if isdir('plots/mixing_sweep') is False:
mkdir('plots/mixing_sweep')
with open('outputs/mixing_sweep/results.pkl','rb') as f:
(growth_rate,
peaks,
R_end,
hh_prop,
attack_ratio,
internal_mix_range,
external_mix_range) = load(f)
internal_mix_len = len(internal_mix_range)
external_mix_len = len(external_mix_range)
r_min = growth_rate.min()
r_max = growth_rate.max()
peak_min = peaks.min()
peak_max = peaks.max()
R_end_min = R_end.min()
R_end_max = R_end.max()
hh_prop_min = hh_prop.min()
hh_prop_max = hh_prop.max()
attack_ratio_min = attack_ratio.min()
attack_ratio_max = attack_ratio.max()
fig, ax = subplots(1, 1, sharex=True)
axim=ax.imshow(growth_rate,
origin='lower',
extent=(0,1,0,1),
vmin=r_min,
vmax=r_max)
ax.set_ylabel('% reduction in\n within-household\n transmission')
ax.set_xlabel('% reduction in\n between-household\n transmission')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(axim, label="Growth rate", cax=cax)
fig.savefig('plots/mixing_sweep/growth_rate.png',
bbox_inches='tight',
dpi=300)
close()
fig, ax = subplots(1, 1)
axim=ax.imshow(peaks,
origin='lower',
extent=(0,1,0,1),
vmin=peak_min,
vmax=peak_max)
ax.set_ylabel('% reduction in\n within-household\n transmission')
ax.set_xlabel('% reduction in\n between-household\n transmission')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(axim, label="Peak % prevalence", cax=cax)
fig.savefig('plots/mixing_sweep/peaks.png',
bbox_inches='tight',
dpi=300)
close()
fig, ax = subplots(1, 1)
axim=ax.imshow(R_end,
origin='lower',
extent=(0,1,0,1),
vmin=R_end_min,
vmax=R_end_max)
ax.set_ylabel('% reduction in\n within-household\n transmission')
ax.set_xlabel('% reduction in\n between-household\n transmission')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(axim, label="% population immunity at end of simulation", cax=cax)
fig.savefig('plots/mixing_sweep/R_end.png',
bbox_inches='tight',
dpi=300)
close()
fig, ax = subplots(1, 1)
axim=ax.imshow(hh_prop,
origin='lower',
extent=(0,1,0,1),
vmin=hh_prop_min,
vmax=hh_prop_max)
ax.set_ylabel('% reduction in\n within-household\n transmission')
ax.set_xlabel('% reduction in\n between-household\n transmission')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(axim, label="% of households infected", cax=cax)
fig.savefig('plots/mixing_sweep/hh_prop.png',
bbox_inches='tight',
dpi=300)
close()
fig, ax = subplots(1, 1)
axim=ax.imshow(attack_ratio,
origin='lower',
extent=(0,1,0,1),
vmin=attack_ratio_min,
vmax=attack_ratio_max)
ax.set_ylabel('% reduction in\n within-household\n transmission')
ax.set_xlabel('% reduction in\n between-household\n transmission')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(axim, label="% attack ratio", cax=cax)
fig.savefig('plots/mixing_sweep/attack_ratio.png',
bbox_inches='tight',
dpi=300)
close()
fig, ((ax1, ax2), (ax3, ax4)) = subplots(2, 2)
axim=ax1.imshow(peaks,
origin='lower',
extent=(0,1,0,1),
vmin=peak_min,
vmax=peak_max)
ax1.set_ylabel('% reduction in\n within-household\n transmission')
divider = make_axes_locatable(ax1)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim, label="Peak % prevalence", cax=cax)
axim=ax2.imshow(R_end,
origin='lower',
extent=(0,1,0,1),
vmin=R_end_min,
vmax=R_end_max)
divider = make_axes_locatable(ax2)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim, label="Cumulative % prevalence", cax=cax)
axim=ax3.imshow(hh_prop,
origin='lower',
extent=(0,1,0,1),
vmin=hh_prop_min,
vmax=hh_prop_max)
ax3.set_ylabel('% reduction in\n within-household\n transmission')
ax3.set_xlabel('% reduction in\n between-household\n transmission')
divider = make_axes_locatable(ax3)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim, label="% of households infected", cax=cax)
axim=ax4.imshow(attack_ratio,
origin='lower',
extent=(0,1,0,1),
vmin=attack_ratio_min,
vmax=attack_ratio_max)
ax4.set_xlabel('% reduction in\n between-household\n transmission')
divider = make_axes_locatable(ax4)
cax = divider.append_axes("right", size="5%", pad=0.05)
cbar = colorbar(axim, label="% attack ratio", cax=cax)
fig.savefig('plots/mixing_sweep/grid_plot.png',
bbox_inches='tight',
dpi=300)
close()
|
<reponame>PolicyStat/distributed-nose<gh_stars>1-10
import logging
from hashring import HashRing
from nose.plugins.base import Plugin
from nose.util import test_address
logger = logging.getLogger('nose.plugins.distributed_nose')
class DistributedNose(Plugin):
"""
Distribute a test run, shared-nothing style, by specifying the total number
of runners and a unique ID for this runner.
"""
name = 'distributed'
def __init__(self):
Plugin.__init__(self)
self.node_count = None
self.node_id = None
self.hash_ring = None
def options(self, parser, env):
parser.add_option(
"--nodes",
action="store",
dest="distributed_nodes",
default=env.get('NOSE_NODES', 1),
help="Across how many nodes are tests being distributed?",
)
parser.add_option(
"--node-number",
action="store",
dest="distributed_node_number",
default=env.get('NOSE_NODE_NUMBER', 1),
help=(
"Of the total nodes running distributed tests, "
"which number is this node? (1-indexed)"
),
)
parser.add_option(
"--distributed-disabled",
action="store_true",
dest="distributed_disabled",
default=False,
help=((
"Set this flag to disable distribution, "
"despite having more than 1 node configured. "
"This is useful if you use environment configs "
"and want to temporarily disable test distribution."
)),
)
parser.add_option(
"--hash-by-class",
action="store_true",
dest="distributed_hash_by_class",
# any non-empty value enables
default=bool(env.get('NOSE_HASH_BY_CLASS', False)),
help=((
"By default, tests are distributed individually. "
"This results in the most even distribution and the"
" best speed if all tests have the same runtime. "
"However, it duplicates class setup/teardown work; "
"set this flag to keep tests in the same class on the same node. " # noqa
)),
)
def configure(self, options, config):
self.node_count = options.distributed_nodes
self.node_id = options.distributed_node_number
self.hash_by_class = options.distributed_hash_by_class
if not self._options_are_valid():
self.enabled = False
return
if options.distributed_disabled:
self.enabled = False
return
if self.node_count > 1:
# If the user gives us a non-1 count of distributed nodes, then
# let's distribute their tests
self.enabled = True
self.hash_ring = HashRing(range(1, self.node_count + 1))
def _options_are_valid(self):
try:
self.node_count = int(self.node_count)
except ValueError:
logger.critical("--nodes must be an integer")
return False
try:
self.node_id = int(self.node_id)
except ValueError:
logger.critical("--node-number must be an integer")
return False
if self.node_id > self.node_count:
logger.critical((
"--node-number can't be larger "
"than the number of nodes"
))
return False
if self.node_id < 1:
logger.critical(
"--node-number must be greater than zero"
)
return False
return True
def validateName(self, testObject):
try:
_, module, call = test_address(testObject)
except TypeError:
module = 'unknown'
call = str(testObject)
node = self.hash_ring.get_node('%s.%s' % (module, call))
if node != self.node_id:
return False
return None
def wantClass(self, cls):
if not self.hash_by_class:
# Defer to wantMethod.
return None
node = self.hash_ring.get_node(str(cls))
if node != self.node_id:
return False
return None
def wantMethod(self, method):
if self.hash_by_class:
# Don't override class selection decisions.
return None
return self.validateName(method)
def wantFunction(self, function):
# Always operate directly on bare functions.
return self.validateName(function)
|
<reponame>pinax/pinax-types
import datetime
from django.core.exceptions import ValidationError
from django.test import TestCase
from django.utils import timezone
from pinax.types.periods import (
PERIOD_TYPES,
get_period,
parse,
period_display,
period_for_date,
period_range,
period_start_end,
validate,
)
from pinax.types.values import VALUE_TYPES
class ValueTypesTests(TestCase):
def test_integer_value_type_raises_error(self):
with self.assertRaises(ValidationError):
VALUE_TYPES["integer"].validate("foo")
def test_integer_value_type_validates(self):
self.assertIsNone(VALUE_TYPES["integer"].validate("566"))
def test_boolean_value_type_raises_error_on_not_true_or_false(self):
with self.assertRaises(ValidationError):
VALUE_TYPES["boolean"].validate("foo")
def test_boolean_value_type_validates_true(self):
self.assertIsNone(VALUE_TYPES["boolean"].validate("true"))
def test_boolean_value_type_validates_false(self):
self.assertIsNone(VALUE_TYPES["boolean"].validate("false"))
def test_decimal_value_type_raises_error(self):
with self.assertRaises(ValidationError):
VALUE_TYPES["decimal"].validate("foo")
def test_decimal_value_type_validates(self):
self.assertIsNone(VALUE_TYPES["decimal"].validate("5.66"))
def test_monetary_value_type_raises_error(self):
with self.assertRaises(ValidationError):
VALUE_TYPES["monetary"].validate("foo")
def test_monetary_value_type_validates(self):
self.assertIsNone(VALUE_TYPES["monetary"].validate("56.60"))
def test_hour_value_type_raises_error(self):
with self.assertRaises(ValidationError):
VALUE_TYPES["hours"].validate("foo")
def test_hour_value_type_validates(self):
self.assertIsNone(VALUE_TYPES["hours"].validate("56"))
def test_traffic_light_value_type_raises_error_on_big_int(self):
with self.assertRaises(ValidationError):
VALUE_TYPES["traffic-light"].validate("1000")
def test_traffic_light_value_type_raises_error_on_non_int(self):
with self.assertRaises(ValidationError):
VALUE_TYPES["traffic-light"].validate("foo")
def test_traffic_light_value_type_validates(self):
self.assertIsNone(VALUE_TYPES["traffic-light"].validate("2"))
def test_monetary_value_display(self):
self.assertEquals(VALUE_TYPES["monetary"].display(1000), "$1,000")
def test_percentage_value_display(self):
self.assertEquals(VALUE_TYPES["percentage"].display(0.37), "37.0%")
def test_hours_value_display(self):
self.assertEquals(VALUE_TYPES["hours"].display(1000), "1000h")
def test_traffic_light_value_display(self):
self.assertEquals(VALUE_TYPES["traffic-light"].display(1), "red")
class PeriodTests(TestCase):
def setUp(self):
self.quarter_1 = get_period("Q-2015-1")
self.quarter_2 = get_period("Q-2015-2")
self.year = get_period("Y-2015")
def test_quarterly_sub_periods_monthly_all(self):
periods = self.quarter_1.sub_periods("monthly")
self.assertEquals(len(periods), 3)
self.assertEquals(periods[0].get_display(), "January 2015")
self.assertEquals(periods[1].get_display(), "February 2015")
self.assertEquals(periods[2].get_display(), "March 2015")
def test_quarterly_sub_periods_weekly_all(self):
periods = self.quarter_1.sub_periods("weekly")
self.assertEquals(len(periods), 14)
self.assertEquals(periods[0].raw_value, "W-2015-01")
self.assertEquals(periods[1].raw_value, "W-2015-02")
self.assertEquals(periods[2].raw_value, "W-2015-03")
self.assertEquals(periods[3].raw_value, "W-2015-04")
self.assertEquals(periods[4].raw_value, "W-2015-05")
self.assertEquals(periods[5].raw_value, "W-2015-06")
self.assertEquals(periods[6].raw_value, "W-2015-07")
self.assertEquals(periods[7].raw_value, "W-2015-08")
self.assertEquals(periods[8].raw_value, "W-2015-09")
self.assertEquals(periods[9].raw_value, "W-2015-10")
self.assertEquals(periods[10].raw_value, "W-2015-11")
self.assertEquals(periods[11].raw_value, "W-2015-12")
self.assertEquals(periods[12].raw_value, "W-2015-13")
self.assertEquals(periods[13].raw_value, "W-2015-14")
def test_yearly_sub_periods_quarterly_all(self):
periods = get_period("Y-2015").sub_periods("quarterly")
self.assertEquals(len(periods), 4)
self.assertEquals(periods[0].raw_value, "Q-2015-1")
self.assertEquals(periods[1].raw_value, "Q-2015-2")
self.assertEquals(periods[2].raw_value, "Q-2015-3")
self.assertEquals(periods[3].raw_value, "Q-2015-4")
def test_yearly_sub_periods_monthly_all(self):
periods = get_period("Y-2015").sub_periods("monthly")
self.assertEquals(len(periods), 12)
self.assertEquals(periods[0].get_display(), "January 2015")
self.assertEquals(periods[1].get_display(), "February 2015")
self.assertEquals(periods[2].get_display(), "March 2015")
self.assertEquals(periods[3].get_display(), "April 2015")
self.assertEquals(periods[4].get_display(), "May 2015")
self.assertEquals(periods[5].get_display(), "June 2015")
self.assertEquals(periods[6].get_display(), "July 2015")
self.assertEquals(periods[7].get_display(), "August 2015")
self.assertEquals(periods[8].get_display(), "September 2015")
self.assertEquals(periods[9].get_display(), "October 2015")
self.assertEquals(periods[10].get_display(), "November 2015")
self.assertEquals(periods[11].get_display(), "December 2015")
def test_yearly_sub_periods_weekly_all(self):
periods = get_period("Y-2015").sub_periods("weekly")
self.assertEquals(len(periods), 53)
for i in range(53):
self.assertEquals(periods[i].raw_value, "W-2015-{:02d}".format(i + 1))
def test_monthly_sub_periods_weekly_all(self):
periods = get_period("M-2015-01").sub_periods("weekly")
self.assertEquals(len(periods), 5)
for i in range(5):
self.assertEquals(periods[i].raw_value, "W-2015-{:02d}".format(i + 1))
def test_validate_for_pass(self):
try:
self.quarter_1.validate_for("quarterly")
except ValidationError:
self.fail()
def test_validate_for_fail(self):
with self.assertRaises(ValidationError):
self.quarter_1.validate_for("weekly")
def test_validate_can_contain_type_pass_1(self):
try:
self.quarter_1.validate_can_contain_type("quarterly")
except ValidationError:
self.fail()
def test_validate_can_contain_type_pass_2(self):
try:
self.quarter_1.validate_can_contain_type("weekly")
except ValidationError:
self.fail()
def test_validate_can_contain_type_faik(self):
with self.assertRaises(ValidationError):
self.quarter_1.validate_can_contain_type("yearly")
def test_get_display(self):
self.assertEquals(get_period("M-2015-04").get_display(), "April 2015")
def test_current(self):
period = get_period(PERIOD_TYPES["quarterly"].for_date(timezone.now()))
self.assertTrue(period.is_current())
def test_past(self):
period = get_period("M-2013-11")
self.assertTrue(period.is_past())
def test_includes_1(self):
self.assertTrue(self.year.includes(self.quarter_1))
self.assertTrue(self.year.includes(self.quarter_2))
def test_includes_2(self):
self.assertFalse(self.quarter_1.includes(self.year))
def test_includes_3(self):
self.assertFalse(self.quarter_1.includes(self.quarter_2))
def test_includes_4(self):
self.assertTrue(self.quarter_1.includes(get_period("Q-2015-1")))
def test_validate_random_string(self):
with self.assertRaises(ValidationError):
validate("Patrick")
def test_equality_true(self):
self.assertTrue(self.quarter_1 == get_period("Q-2015-1"))
def test_equality_false_1(self):
self.assertFalse(self.quarter_1 == self.quarter_2)
def test_equality_false_2(self):
self.assertFalse(self.quarter_1 == self.year)
def test_non_equality_false(self):
self.assertFalse(self.quarter_1 != get_period("Q-2015-1"))
def test_non_equality_true_1(self):
self.assertTrue(self.quarter_1 != self.quarter_2)
def test_non_equality_true_2(self):
self.assertTrue(self.quarter_1 != self.year)
def test_less_than_true(self):
self.assertTrue(self.quarter_1 < self.quarter_2)
def test_less_than_false_1(self):
self.assertFalse(self.quarter_2 < self.quarter_1)
def test_less_than_false_2(self):
self.assertFalse(self.quarter_1 < self.year)
def test_greater_than_true(self):
self.assertTrue(self.quarter_2 > self.quarter_1)
def test_greater_than_false_1(self):
self.assertFalse(self.quarter_1 > self.quarter_2)
def test_greater_than_false_2(self):
self.assertFalse(self.quarter_1 > self.year)
def test_less_than_or_equal_true(self):
self.assertTrue(self.quarter_1 <= self.quarter_2)
def test_less_than_or_equal_false_1(self):
self.assertFalse(self.quarter_2 <= self.quarter_1)
def test_less_than_or_equal_false_2(self):
self.assertFalse(self.quarter_1 <= self.year)
def test_greater_than_or_equal_true(self):
self.assertTrue(self.quarter_2 >= self.quarter_1)
def test_greater_than_or_equal_false_1(self):
self.assertFalse(self.quarter_1 >= self.quarter_2)
def test_greater_than_or_equal_false_2(self):
self.assertFalse(self.quarter_1 >= self.year)
def test_parse_week_1(self):
self.assertEquals(parse("2015-W03"), "W-2015-03")
def test_parse_week_2(self):
self.assertEquals(parse("2015-W3"), "W-2015-03")
def test_parse_week_3(self):
self.assertEquals(parse("2015W03"), "W-2015-03")
def test_parse_week_4(self):
self.assertEquals(parse("2015W3"), "W-2015-03")
def test_parse_month_1(self):
self.assertEquals(parse("jan 2015"), "M-2015-01")
def test_parse_month_2(self):
self.assertEquals(parse("Jan 2015"), "M-2015-01")
def test_parse_month_3(self):
self.assertEquals(parse("January 2015"), "M-2015-01")
def test_parse_month_4(self):
self.assertEquals(parse("1/2015"), "M-2015-01")
def test_parse_month_5(self):
self.assertEquals(parse("01/2015"), "M-2015-01")
def test_parse_month_6(self):
self.assertEquals(parse("2015 January"), "M-2015-01")
def test_parse_quarter_1(self):
self.assertEquals(parse("2015Q1"), "Q-2015-1")
def test_parse_quarter_2(self):
self.assertEquals(parse("2015Q1"), "Q-2015-1")
def test_parse_quarter_3(self):
with self.assertRaises(ValidationError):
self.assertEquals(parse("2015Q5"), "Q-2015-5")
def test_parse_year_1(self):
self.assertEquals(parse("2015"), "Y-2015")
def test_parse_invalid(self):
self.assertIsNone(parse("Patrick"))
def test_get_period_str(self):
period = get_period("M-2015-01")
self.assertEquals(str(period), "M-2015-01")
def test_get_period_validation(self):
with self.assertRaises(ValidationError):
get_period("2013W22")
def test_weekly_period_type_raises_error_wrong_format(self):
with self.assertRaises(ValidationError):
PERIOD_TYPES["weekly"].validate("2013W22")
def test_weekly_period_type_raises_error_week_number_too_low(self):
with self.assertRaises(ValidationError):
PERIOD_TYPES["weekly"].validate("W-2013-00")
def test_weekly_period_type_raises_error_week_number_too_high(self):
with self.assertRaises(ValidationError):
PERIOD_TYPES["weekly"].validate("W-2013-75")
def test_weekly_period_type_validates_week(self):
self.assertIsNone(PERIOD_TYPES["weekly"].validate("W-2013-22"))
def test_monthly_period_type_raises_error_wrong_format(self):
with self.assertRaises(ValidationError):
PERIOD_TYPES["monthly"].validate("201312")
def test_monthly_period_type_raises_error_wrong_week_number(self):
with self.assertRaises(ValidationError):
PERIOD_TYPES["monthly"].validate("M-2013-15")
def test_monthly_period_type_validates_week(self):
self.assertIsNone(PERIOD_TYPES["monthly"].validate("M-2013-12"))
def test_quarterly_period_type_raises_error_wrong_format(self):
with self.assertRaises(ValidationError):
PERIOD_TYPES["quarterly"].validate("2013Q4")
def test_quarterly_period_type_raises_error_wrong_week_number(self):
with self.assertRaises(ValidationError):
PERIOD_TYPES["quarterly"].validate("Q-20139")
def test_quarterly_period_type_validates_week(self):
self.assertIsNone(PERIOD_TYPES["quarterly"].validate("Q-2013-3"))
def test_yearly_period_type_raises_error_wrong_format(self):
with self.assertRaises(ValidationError):
PERIOD_TYPES["yearly"].validate("2013")
def test_yearly_period_type_raises_error_wrong_week_number(self):
with self.assertRaises(ValidationError):
PERIOD_TYPES["yearly"].validate("Y-20139")
def test_yearly_period_type_validates_week(self):
self.assertIsNone(PERIOD_TYPES["yearly"].validate("Y-2013"))
def test_weekly_period_for_date_end_of_year(self):
self.assertEquals(period_for_date("weekly", datetime.date(2014, 12, 27)), "W-2014-52")
def test_weekly_period_for_date_start_of_year(self):
self.assertEquals(period_for_date("weekly", datetime.date(2014, 12, 30)), "W-2015-01")
def test_weekly_period_for_date(self):
self.assertEquals(period_for_date("weekly", datetime.date(2013, 8, 7)), "W-2013-32")
def test_quarterly_period_for_date(self):
self.assertEquals(period_for_date("quarterly", datetime.date(2013, 8, 7)), "Q-2013-3")
def test_monthly_period_for_date(self):
self.assertEquals(period_for_date("monthly", datetime.date(2013, 8, 7)), "M-2013-08")
def test_yearly_period_for_date(self):
self.assertEquals(period_for_date("yearly", datetime.date(2013, 8, 7)), "Y-2013")
def test_period_for_date_today(self):
self.assertEquals(period_for_date("yearly"), f"Y-{datetime.date.today().year}")
def test_weekly_period_range(self):
self.assertEquals(list(period_range("W-2012-50", "W-2013-03")), ["W-2012-50", "W-2012-51", "W-2012-52", "W-2013-01", "W-2013-02"])
self.assertEquals(list(period_range("W-2014-50", "W-2015-03")), ["W-2014-50", "W-2014-51", "W-2014-52", "W-2015-01", "W-2015-02"])
self.assertEquals(list(period_range("W-2015-50", "W-2016-03")), ["W-2015-50", "W-2015-51", "W-2015-52", "W-2015-53", "W-2016-01", "W-2016-02"])
self.assertEquals(list(period_range("W-2016-50", "W-2017-03")), ["W-2016-50", "W-2016-51", "W-2016-52", "W-2017-01", "W-2017-02"])
def test_weekly_period_inclusive_range(self):
self.assertEquals(list(period_range("W-2012-50", "W-2013-03", inclusive=True)), ["W-2012-50", "W-2012-51", "W-2012-52", "W-2013-01", "W-2013-02", "W-2013-03"])
self.assertEquals(list(period_range("W-2014-50", "W-2015-03", inclusive=True)), ["W-2014-50", "W-2014-51", "W-2014-52", "W-2015-01", "W-2015-02", "W-2015-03"])
self.assertEquals(list(period_range("W-2015-50", "W-2016-03", inclusive=True)), ["W-2015-50", "W-2015-51", "W-2015-52", "W-2015-53", "W-2016-01", "W-2016-02", "W-2016-03"])
self.assertEquals(list(period_range("W-2016-50", "W-2017-03", inclusive=True)), ["W-2016-50", "W-2016-51", "W-2016-52", "W-2017-01", "W-2017-02", "W-2017-03"])
def test_monthly_period_range(self):
self.assertEquals(list(period_range("M-2012-11", "M-2013-03")), ["M-2012-11", "M-2012-12", "M-2013-01", "M-2013-02"])
def test_monthly_period_inclusive_range(self):
self.assertEquals(list(period_range("M-2012-11", "M-2013-03", inclusive=True)), ["M-2012-11", "M-2012-12", "M-2013-01", "M-2013-02", "M-2013-03"])
def test_quarterly_period_range(self):
self.assertEquals(list(period_range("Q-2012-3", "Q-2013-2")), ["Q-2012-3", "Q-2012-4", "Q-2013-1"])
def test_quarterly_period_inclusive_range(self):
self.assertEquals(list(period_range("Q-2012-3", "Q-2013-2", inclusive=True)), ["Q-2012-3", "Q-2012-4", "Q-2013-1", "Q-2013-2"])
def test_yearly_period_range(self):
self.assertEquals(list(period_range("Y-2010", "Y-2013")), ["Y-2010", "Y-2011", "Y-2012"])
def test_yearly_period_range_inclusive(self):
self.assertEquals(list(period_range("Y-2010", "Y-2013", inclusive=True)), ["Y-2010", "Y-2011", "Y-2012", "Y-2013"])
def test_period_range_mismatched_types(self):
with self.assertRaises(ValidationError):
period_range("W-2012-50", "Y-2013")
def test_yearly_period_type_display(self):
self.assertEquals(period_display("Y-2013"), "2013")
def test_monthly_period_type_display(self):
self.assertEquals(period_display("M-2013-08"), "August 2013")
def test_quarterly_period_type_display(self):
self.assertEquals(period_display("Q-2013-3"), "2013Q3")
def test_weekly_period_type_display(self):
self.assertEquals(period_display("W-2013-32"), "Week of Aug 05, 2013")
def test_yearly_period_type_start_end(self):
self.assertEquals(period_start_end("Y-2013"), (datetime.date(2013, 1, 1), datetime.date(2013, 12, 31)))
def test_monthly_period_type_start_end(self):
self.assertEquals(period_start_end("M-2013-08"), (datetime.date(2013, 8, 1), datetime.date(2013, 8, 31)))
def test_quarterly_period_type_start_end(self):
self.assertEquals(period_start_end("Q-2013-3"), (datetime.date(2013, 7, 1), datetime.date(2013, 9, 30)))
def test_weekly_period_type_start_end(self):
self.assertEquals(period_start_end("W-2013-32"), (datetime.date(2013, 8, 5), datetime.date(2013, 8, 11)))
|
from ..loaders import load_data
from ..utils import load_json_config
from deoxys_image.patch_sliding import get_patch_indice
from deoxys_vis import read_csv
import numpy as np
import h5py
import pandas as pd
import os
from time import time
import shutil
import matplotlib.pyplot as plt
import warnings
class H5Metric:
def __init__(self, ref_file, save_file, metric_name='score',
predicted_dataset='predicted',
target_dataset='y', batch_size=4,
map_file=None, map_column=None):
self.metric_name = metric_name
self.ref_file = ref_file
self.predicted = predicted_dataset
self.target = target_dataset
with h5py.File(ref_file, 'r') as f:
keys = list(f.keys())
if target_dataset not in keys:
self.predicted = [f'{key}/{predicted_dataset}' for key in keys]
self.target = [f'{key}/{target_dataset}' for key in keys]
self.batch_size = batch_size
self.res_file = save_file
self.map_file = map_file
self.map_column = map_column
def get_img_batch(self):
self.scores = []
if self.map_file is None:
if type(self.predicted) == str:
with h5py.File(self.ref_file, 'r') as f:
size = f[self.target].shape[0]
for i in range(0, size, self.batch_size):
with h5py.File(self.ref_file, 'r') as f:
predicted = f[self.predicted][i:i+self.batch_size]
targets = f[self.target][i:i+self.batch_size]
yield targets, predicted
else:
for pred, target in zip(self.predicted, self.target):
with h5py.File(self.ref_file, 'r') as f:
size = f[target].shape[0]
for i in range(0, size, self.batch_size):
with h5py.File(self.ref_file, 'r') as f:
predicted = f[pred][i:i+self.batch_size]
targets = f[target][i:i+self.batch_size]
yield targets, predicted
else: # handle 3d with different sizes
map_df = pd.read_csv(self.map_file)
map_data = map_df[self.map_column].values
for idx in map_data:
with h5py.File(self.ref_file, 'r') as f:
predicted = f[self.predicted][str(idx)][:]
targets = f[self.target][str(idx)][:]
yield np.expand_dims(targets, axis=0), np.expand_dims(
predicted, axis=0)
def update_score(self, scores):
self.scores.extend(scores)
def save_score(self):
if os.path.isfile(self.res_file):
df = pd.read_csv(self.res_file)
df[f'{self.metric_name}'] = self.scores
else:
df = pd.DataFrame(self.scores, columns=[f'{self.metric_name}'])
df.to_csv(self.res_file, index=False)
def post_process(self, **kwargs):
for targets, prediction in self.get_img_batch():
scores = self.calculate_metrics(
targets, prediction, **kwargs)
self.update_score(scores)
self.save_score()
def calculate_metrics(targets, predictions, **kwargs):
raise NotImplementedError
class H5CalculateFScore(H5Metric):
def __init__(self, ref_file, save_file, metric_name='f1_score',
predicted_dataset='predicted',
target_dataset='y', batch_size=4, beta=1, threshold=None,
map_file=None, map_column=None):
super().__init__(ref_file, save_file, metric_name,
predicted_dataset,
target_dataset, batch_size,
map_file, map_column)
self.threshold = 0.5 if threshold is None else threshold
self.beta = beta
def calculate_metrics(self, y_true, y_pred, **kwargs):
assert len(y_true) == len(y_pred), "Shape not match"
eps = 1e-8
size = len(y_true.shape)
reduce_ax = tuple(range(1, size))
y_pred = (y_pred > self.threshold).astype(y_pred.dtype)
if y_pred.ndim - y_true.ndim == 1 and y_pred.shape[-1] == 1:
y_pred = y_pred[..., 0]
true_positive = np.sum(y_pred * y_true, axis=reduce_ax)
target_positive = np.sum(y_true, axis=reduce_ax)
predicted_positive = np.sum(y_pred, axis=reduce_ax)
fb_numerator = (1 + self.beta ** 2) * true_positive + eps
fb_denominator = (
(self.beta ** 2) * target_positive + predicted_positive + eps
)
return fb_numerator / fb_denominator
class H5MetaDataMapping:
def __init__(self, ref_file, save_file, folds, fold_prefix='fold',
dataset_names=None):
self.ref_file = ref_file
self.save_file = save_file
if fold_prefix:
self.folds = ['{}_{}'.format(
fold_prefix, fold) for fold in folds]
else:
self.folds = folds
self.dataset_names = dataset_names
def post_process(self, *args, **kwargs):
data = {dataset_name: [] for dataset_name in self.dataset_names}
for fold in self.folds:
with h5py.File(self.ref_file, 'r') as f:
for dataset_name in self.dataset_names:
meta_data = f[fold][dataset_name][:]
dtype = meta_data.dtype.name
if 'int' not in dtype and 'float' not in dtype:
meta_data = meta_data.astype(str)
data[dataset_name].extend(meta_data)
df = pd.DataFrame(data)
df.to_csv(self.save_file, index=False)
class H5Merge2dSlice:
def __init__(self, ref_file, map_file, map_column, merge_file, save_file,
predicted_dataset='predicted', target_dataset='y',
input_dataset='x'):
self.ref_file = ref_file
self.map_file = map_file
self.map_column = map_column
self.merge_file = merge_file
self.save_file = save_file
self.predicted = predicted_dataset
self.target = target_dataset
self.inputs = input_dataset
with h5py.File(ref_file, 'r') as f:
keys = list(f.keys())
if input_dataset not in keys:
self.predicted = [f'{key}/{predicted_dataset}' for key in keys]
self.target = [f'{key}/{target_dataset}' for key in keys]
self.inputs = [f'{key}/{input_dataset}' for key in keys]
def post_process(self):
map_df = pd.read_csv(self.map_file)
map_data = map_df[self.map_column].values
unique_val = []
first, last = map_data[0], map_data[-1]
tmp = np.concatenate([[first], map_data, [last]])
indice = np.where(tmp[1:] != tmp[:-1])[0]
indice = np.concatenate([[0], indice, [len(map_data)]])
if type(self.inputs) == str:
with h5py.File(self.merge_file, 'w') as mf:
mf.create_group(self.inputs)
mf.create_group(self.target)
mf.create_group(self.predicted)
for i in range(len(indice) - 1):
start = indice[i]
end = indice[i+1]
unique_val.append(map_data[start])
assert map_data[start] == map_data[end-1], "id not match"
curr_name = str(map_data[start])
with h5py.File(self.ref_file, 'r') as f:
img = f[self.inputs][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.inputs].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.target][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.target].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.predicted][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.predicted].create_dataset(
curr_name, data=img, compression="gzip")
else:
inputs = self.inputs[0].split('/')[-1]
target = self.target[0].split('/')[-1]
predicted = self.predicted[0].split('/')[-1]
with h5py.File(self.merge_file, 'w') as mf:
mf.create_group(inputs)
mf.create_group(target)
mf.create_group(predicted)
offset = 0
curr_data_idx = 0
with h5py.File(self.ref_file, 'r') as f:
total = f[self.inputs[curr_data_idx]].shape[0]
for i in range(len(indice) - 1):
if indice[i] - offset >= total:
offset = indice[i]
curr_data_idx += 1
with h5py.File(self.ref_file, 'r') as f:
total = f[self.inputs[curr_data_idx]].shape[0]
map_start, map_end = indice[i], indice[i+1]
start = indice[i] - offset
end = indice[i+1] - offset
unique_val.append(map_data[map_start])
assert map_data[map_start] == map_data[map_end -
1], "id not match"
curr_name = str(map_data[map_start])
with h5py.File(self.ref_file, 'r') as f:
img = f[self.inputs[curr_data_idx]][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[inputs].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.target[curr_data_idx]][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[target].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.predicted[curr_data_idx]][start:end]
with h5py.File(self.merge_file, 'a') as mf:
mf[predicted].create_dataset(
curr_name, data=img, compression="gzip")
df = pd.DataFrame(data=unique_val, columns=[self.map_column])
df.to_csv(self.save_file, index=False)
class H5Transform3d:
def __init__(self, ref_file, map_file, map_column, merge_file,
predicted_dataset='predicted', target_dataset='y',
input_dataset='x'):
self.ref_file = ref_file
self.map_file = map_file
self.map_column = map_column
self.merge_file = merge_file
self.predicted = predicted_dataset
self.target = target_dataset
self.inputs = input_dataset
with h5py.File(ref_file, 'r') as f:
keys = list(f.keys())
if input_dataset not in keys:
self.predicted = [f'{key}/{predicted_dataset}' for key in keys]
self.target = [f'{key}/{target_dataset}' for key in keys]
self.inputs = [f'{key}/{input_dataset}' for key in keys]
def post_process(self):
map_df = pd.read_csv(self.map_file)
map_data = map_df[self.map_column].values
first, last = map_data[0], map_data[-1]
tmp = np.concatenate([[first], map_data, [last]])
indice = np.where(tmp[1:] != tmp[:-1])[0]
indice = np.concatenate([[0], indice, [len(map_data)]])
if type(self.inputs) == str:
with h5py.File(self.merge_file, 'w') as mf:
mf.create_group(self.inputs)
mf.create_group(self.target)
mf.create_group(self.predicted)
for i in range(len(map_data)):
curr_name = str(map_data[i])
with h5py.File(self.ref_file, 'r') as f:
img = f[self.inputs][i]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.inputs].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.target][i]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.target].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.predicted][i]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.predicted].create_dataset(
curr_name, data=img, compression="gzip")
else: # pragma: no cover
inputs = self.inputs[0].split('/')[-1]
target = self.target[0].split('/')[-1]
predicted = self.predicted[0].split('/')[-1]
with h5py.File(self.merge_file, 'w') as mf:
mf.create_group(inputs)
mf.create_group(target)
mf.create_group(predicted)
offset = 0
curr_data_idx = 0
with h5py.File(self.ref_file, 'r') as f:
total = f[self.inputs[curr_data_idx]].shape[0]
for i in range(len(map_data)):
if i - offset >= total:
offset = i
curr_data_idx += 1
with h5py.File(self.ref_file, 'r') as f:
total = f[self.inputs[curr_data_idx]].shape[0]
curr_name = str(map_data[i])
with h5py.File(self.ref_file, 'r') as f:
img = f[self.inputs[curr_data_idx]][i-offset]
with h5py.File(self.merge_file, 'a') as mf:
mf[inputs].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.target[curr_data_idx]][i-offset]
with h5py.File(self.merge_file, 'a') as mf:
mf[target].create_dataset(
curr_name, data=img, compression="gzip")
with h5py.File(self.ref_file, 'r') as f:
img = f[self.predicted[curr_data_idx]][i-offset]
with h5py.File(self.merge_file, 'a') as mf:
mf[predicted].create_dataset(
curr_name, data=img, compression="gzip")
# df = pd.DataFrame(data=unique_val, columns=[self.map_column])
# df.to_csv(self.save_file, index=False)
class H5MergePatches: # pragma: no cover
def __init__(self, ref_file, predicted_file,
map_column, merge_file, save_file,
patch_size, overlap,
folds, fold_prefix='fold',
original_input_dataset='x',
original_target_dataset='y',
predicted_dataset='predicted', target_dataset='y',
input_dataset='x'
):
self.ref_file = ref_file
self.predicted_file = predicted_file
self.map_column = map_column
self.merge_file = merge_file
self.save_file = save_file
self.ref_inputs = original_input_dataset
self.ref_targets = original_target_dataset
self.predicted = predicted_dataset
self.target = target_dataset
self.inputs = input_dataset
if fold_prefix:
self.folds = ['{}_{}'.format(
fold_prefix, fold) for fold in folds]
else:
self.folds = folds
self.patch_size = patch_size
self.overlap = overlap
print('merge images of patch', patch_size)
def _save_inputs_target_to_merge_file(self, fold, meta, index):
with h5py.File(self.ref_file, 'r') as f:
inputs = f[fold][self.ref_inputs][index]
targets = f[fold][self.ref_targets][index]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.inputs].create_dataset(
meta, data=inputs, compression="gzip")
mf[self.target].create_dataset(
meta, data=targets, compression="gzip")
def _merge_patches_to_merge_file(self, meta, start_cursor):
with h5py.File(self.merge_file, 'r') as mf:
shape = mf[self.target][meta].shape[:-1]
# fix patch size
if '__iter__' not in dir(self.patch_size):
self.patch_size = [self.patch_size] * len(shape)
indice = get_patch_indice(shape, self.patch_size, self.overlap)
next_cursor = start_cursor + len(indice)
with h5py.File(self.predicted_file, 'r') as f:
data = f[self.predicted][start_cursor: next_cursor]
predicted = np.zeros(shape)
weight = np.zeros(shape)
for i in range(len(indice)):
x, y, z = indice[i]
w, h, d = self.patch_size
predicted[x:x+w, y:y+h, z:z+d] = predicted[x:x+w, y:y+h, z:z+d] \
+ data[i][..., 0]
weight[x:x+w, y:y+h, z:z+d] = weight[x:x+w, y:y+h, z:z+d] \
+ np.ones(self.patch_size)
predicted = (predicted/weight)[..., np.newaxis]
with h5py.File(self.merge_file, 'a') as mf:
mf[self.predicted].create_dataset(
meta, data=predicted, compression="gzip")
return next_cursor
def post_process(self):
# create merge file
with h5py.File(self.merge_file, 'w') as mf:
mf.create_group(self.inputs)
mf.create_group(self.target)
mf.create_group(self.predicted)
data = []
start_cursor = 0
for fold in self.folds:
with h5py.File(self.ref_file, 'r') as f:
meta_data = f[fold][self.map_column][:]
data.extend(meta_data)
for index, meta in enumerate(meta_data):
self._save_inputs_target_to_merge_file(
fold, str(meta), index)
start_cursor = self._merge_patches_to_merge_file(
str(meta), start_cursor)
# create map file
df = pd.DataFrame(data, columns=[self.map_column])
df.to_csv(self.save_file, index=False)
class AnalysisPerEpoch: # pragma: no cover
_markers = ['o-', 'v-', '^-', '<-', '>-',
'1-', '2-', 's-', 'p-', 'P-',
'*-', '+-', 'x-', 'D-', 'd-'] * 10 + ['--']
def __init__(self, save_path, log_file_templates, epochs,
map_column='patient idx', monitor='', model_name=''):
self.save_path = save_path
self.log_file_templates = log_file_templates
self.epochs = epochs
self.map_column = map_column
self.monitor = monitor
self.model_name = model_name or save_path.split('/')[-2]
def post_process(self):
patient_dice_per_epoch = []
monitor = self.monitor
epochs = self.epochs
map_column = self.map_column
for epoch in epochs:
# load each log file
data = pd.read_csv(self.log_file_templates.format(epoch))
# metric column
if not monitor:
monitor = data.columns[-1]
patient_dice_per_epoch.append(data[monitor].values)
# Plot dice per epoch
patient_idx = data[map_column].values
# print(patient_dice_per_epoch)
all_data = np.vstack(patient_dice_per_epoch)
df = pd.DataFrame(all_data, columns=patient_idx)
df.index = epochs
df.index.name = 'epoch'
# df['mean'] = df.mean(axis=1)
df['mean'] = df[[pid for pid in patient_idx]].mean(axis=1)
best_epoch = df['mean'].idxmax()
best_metric = df['mean'].max()
plt.figure(figsize=(10, 8))
df.plot(style=self._markers[:len(patient_idx) + 1], ax=plt.gca())
plt.legend(loc='center left', bbox_to_anchor=(1.0, 0.5))
plt.title(
f'Model {self.model_name}' +
f'\nBest Epoch {best_epoch} - Mean {monitor} {best_metric:.6f}')
plt.savefig(self.save_path + '/dice_per_epoch.png')
plt.savefig(self.save_path + '/dice_per_epoch.pdf')
plt.close('all')
# save to csv
df.to_csv(self.save_path + '/dice_per_epoch.csv')
violin_df = df[df.columns[:-1]]
group_df = violin_df.reset_index().melt(
id_vars=violin_df.columns[:-len(patient_idx)],
var_name=map_column, value_name=monitor)
def Q1(x):
return x.quantile(0.25)
def Q3(x):
return x.quantile(0.75)
def to_int(x):
return x.astype(int)
group_df.groupby('epoch').agg(
{monitor: ['min', Q1, 'median', Q3, 'max', 'mean', 'std']})
with open(self.save_path + '/val_summary.txt') as f:
f.write(str(group_df))
class PostProcessor:
MODEL_PATH = '/model'
MODEL_NAME = '/model.{epoch:03d}.h5'
BEST_MODEL_PATH = '/best'
PREDICTION_PATH = '/prediction'
PREDICTION_NAME = '/prediction.{epoch:03d}.h5'
LOG_FILE = '/logs.csv'
PERFORMANCE_PATH = '/performance'
TEST_OUTPUT_PATH = '/test'
PREDICT_TEST_NAME = '/prediction_test.h5'
def __init__(self, log_base_path='logs',
temp_base_path='',
analysis_base_path='',
run_test=False, new_dataset_params=None):
self.temp_base_path = temp_base_path
self.log_base_path = log_base_path
self.update_data_reader(new_dataset_params)
try:
model_path = log_base_path + self.MODEL_PATH
model_files = os.listdir(model_path)
self.epochs = [int(filename[-6:-3])
for filename in model_files]
except Exception as e: # pragma: no cover
print('No saved models', e)
warnings.warn('load_best_model does not work')
if len(self.epochs) == 0:
print('No saved models in', model_path)
warnings.warn('load_best_model does not work')
self.run_test = run_test
def update_data_reader(self, new_dataset_params):
model_path = self.log_base_path + self.MODEL_PATH
sample_model_filename = model_path + '/' + os.listdir(model_path)[0]
with h5py.File(sample_model_filename, 'r') as f:
config = f.attrs['deoxys_config']
config = load_json_config(config)
dataset_params = config['dataset_params']
# update until level 2
if new_dataset_params is not None:
for key in new_dataset_params:
if key in dataset_params:
dataset_params[key].update(new_dataset_params[key])
else:
dataset_params[key] = new_dataset_params[key]
self.dataset_filename = dataset_params['config']['filename']
self.data_reader = load_data(dataset_params)
self.dataset_params = dataset_params
def _best_epoch_from_raw_log(self, monitor='', mode='max'):
print(F'Finding best model based on the {mode}imum {monitor} from '
'raw logs')
epochs = self.epochs
if len(epochs) == 0:
print('No saved models in', self.log_base_path)
raise Exception('load_best_model does not work')
logger_path = self.log_base_path + self.LOG_FILE
if os.path.isfile(logger_path):
df = read_csv(logger_path, usecols=['epoch', monitor])
df['epoch'] = df['epoch'] + 1
# only compare models that were saved
min_df = df[df['epoch'].isin(epochs)].min()
min_epoch = df[df['epoch'].isin(epochs)].idxmin()
max_df = df[df['epoch'].isin(epochs)].max()
max_epoch = df[df['epoch'].isin(epochs)].idxmax()
if mode == 'min':
val = min_df[monitor]
best_epoch = min_epoch[monitor] + 1
else:
val = max_df[monitor]
best_epoch = max_epoch[monitor] + 1
else:
warnings.warn('No log files to check for best model')
print('Best epoch:', best_epoch, f', with {monitor}={val}')
return best_epoch
def get_best_model(self, monitor='', mode='max',
keep_best_only=True): # pragma: no cover
best_epoch = self._best_epoch_from_raw_log(monitor, mode)
epochs = self.epochs
for epoch in epochs:
if epoch == best_epoch or not keep_best_only:
shutil.copy(
self.temp_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch),
self.log_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch))
return self.log_base_path + self.MODEL_PATH + \
self.MODEL_NAME.format(epoch=best_epoch)
class SegmentationPostProcessor(PostProcessor):
MODEL_PATH = '/model'
MODEL_NAME = '/model.{epoch:03d}.h5'
BEST_MODEL_PATH = '/best'
PREDICTION_PATH = '/prediction'
PREDICTION_NAME = '/prediction.{epoch:03d}.h5'
LOG_FILE = '/logs.csv'
PERFORMANCE_PATH = '/performance'
PREDICTED_IMAGE_PATH = '/images'
TEST_OUTPUT_PATH = '/test'
PREDICT_TEST_NAME = '/prediction_test.h5'
SINGLE_MAP_PATH = '/single_map'
SINGLE_MAP_NAME = '/logs.{epoch:03d}.csv'
MAP_PATH = '/logs'
MAP_NAME = '/logs.{epoch:03d}.csv'
TEST_SINGLE_MAP_NAME = '/single_result.csv'
TEST_MAP_NAME = '/result.csv'
def __init__(self, log_base_path='logs',
temp_base_path='',
analysis_base_path='',
map_meta_data=None, main_meta_data='',
run_test=False, new_dataset_params=None):
self.temp_base_path = temp_base_path
self.log_base_path = log_base_path
self.analysis_base_path = analysis_base_path or log_base_path
if not os.path.exists(self.analysis_base_path):
os.mkdir(self.analysis_base_path)
if not os.path.exists(self.analysis_base_path + self.PREDICTION_PATH):
os.mkdir(self.analysis_base_path + self.PREDICTION_PATH)
self.update_data_reader(new_dataset_params)
try:
temp_prediction_path = temp_base_path + self.PREDICTION_PATH
predicted_files = os.listdir(temp_prediction_path)
self.epochs = [int(filename[-6:-3])
for filename in predicted_files]
except Exception as e: # pragma: no cover
print("Error while getting epochs by temp folder:", e)
print("Using post-process log files as alternative")
try:
log_files = os.listdir(self.log_base_path + self.MAP_PATH)
self.epochs = [int(filename[-7:-4])
for filename in log_files]
except Exception as e:
print("Error while getting epochs by log files:", e)
print("Using dummy epochs as alternative.")
self.epochs = [5]
print("Post-process only works on test data.")
if map_meta_data:
if type(map_meta_data) == str:
self.map_meta_data = map_meta_data.split(',')
else:
self.map_meta_data = map_meta_data
else:
self.map_meta_data = ['patient_idx', 'slice_idx']
if main_meta_data:
self.main_meta_data = main_meta_data
else:
self.main_meta_data = self.map_meta_data[0]
self.run_test = run_test
# def update_data_reader(self, new_dataset_params):
# model_path = self.log_base_path + self.MODEL_PATH
# sample_model_filename = model_path + '/' + os.listdir(model_path)[0]
# with h5py.File(sample_model_filename, 'r') as f:
# config = f.attrs['deoxys_config']
# config = load_json_config(config)
# dataset_params = config['dataset_params']
# # update until level 2
# if new_dataset_params is not None:
# for key in new_dataset_params:
# if key in dataset_params:
# dataset_params[key].update(new_dataset_params[key])
# else:
# dataset_params[key] = new_dataset_params[key]
# self.dataset_filename = dataset_params['config']['filename']
# self.data_reader = load_data(dataset_params)
# self.dataset_params = dataset_params
def map_2d_meta_data(self):
print('mapping 2d meta data')
if not self.run_test:
map_folder = self.log_base_path + self.SINGLE_MAP_PATH
if not os.path.exists(map_folder):
os.makedirs(map_folder)
map_filename = map_folder + self.SINGLE_MAP_NAME
for epoch in self.epochs:
H5MetaDataMapping(
ref_file=self.dataset_filename,
save_file=map_filename.format(epoch=epoch),
folds=self.data_reader.val_folds,
fold_prefix='',
dataset_names=self.map_meta_data).post_process()
else:
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
if not os.path.exists(test_folder):
os.makedirs(test_folder)
map_filename = test_folder + self.TEST_SINGLE_MAP_NAME
H5MetaDataMapping(
ref_file=self.dataset_filename,
save_file=map_filename,
folds=self.data_reader.test_folds,
fold_prefix='',
dataset_names=self.map_meta_data).post_process()
return self
def calculate_fscore_single(self):
if not self.run_test:
print('calculating dice score per items in val set')
predicted_path = self.temp_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
map_folder = self.log_base_path + self.SINGLE_MAP_PATH
map_filename = map_folder + self.SINGLE_MAP_NAME
for epoch in self.epochs:
H5CalculateFScore(
predicted_path.format(epoch=epoch),
map_filename.format(epoch=epoch)
).post_process()
else:
print('calculating dice score per items in test set')
predicted_path = self.temp_base_path + \
self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
map_filename = test_folder + self.TEST_SINGLE_MAP_NAME
H5CalculateFScore(
predicted_path,
map_filename
).post_process()
return self
def calculate_fscore_single_3d(self):
self.calculate_fscore_single()
if not self.run_test:
map_folder = self.log_base_path + self.SINGLE_MAP_PATH
main_log_folder = self.log_base_path + self.MAP_PATH
try:
os.rename(map_folder, main_log_folder)
except Exception as e:
print("Files exist:", e)
print("Copying new logs file")
os.rename(main_log_folder,
main_log_folder + '-' + str(time()))
os.rename(map_folder, main_log_folder)
for epoch in self.epochs:
H5Transform3d(
ref_file=self.temp_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch),
map_file=main_log_folder +
self.MAP_NAME.format(epoch=epoch),
map_column=self.main_meta_data,
merge_file=self.log_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch),
).post_process()
else:
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
map_filename = test_folder + self.TEST_SINGLE_MAP_NAME
main_result_file_name = test_folder + self.TEST_MAP_NAME
try:
os.rename(map_filename, main_result_file_name)
except Exception as e:
print("Files exist:", e)
print("Copying new result file")
os.rename(main_result_file_name,
main_result_file_name + '-' + str(time()) + '.csv')
os.rename(map_filename, main_result_file_name)
H5Transform3d(
ref_file=self.temp_base_path + self.TEST_OUTPUT_PATH +
self.PREDICT_TEST_NAME,
map_file=main_result_file_name,
map_column=self.main_meta_data,
merge_file=test_folder + self.PREDICT_TEST_NAME,
).post_process()
def merge_2d_slice(self):
print('merge 2d slice to 3d images')
if not self.run_test:
predicted_path = self.temp_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
map_folder = self.log_base_path + self.SINGLE_MAP_PATH
map_filename = map_folder + self.SINGLE_MAP_NAME
merge_path = self.log_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
main_log_folder = self.log_base_path + self.MAP_PATH
if not os.path.exists(main_log_folder):
os.makedirs(main_log_folder)
main_log_filename = main_log_folder + self.MAP_NAME
for epoch in self.epochs:
H5Merge2dSlice(
predicted_path.format(epoch=epoch),
map_filename.format(epoch=epoch),
self.main_meta_data,
merge_path.format(epoch=epoch),
main_log_filename.format(epoch=epoch)
).post_process()
else:
predicted_path = self.temp_base_path + \
self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
map_filename = test_folder + self.TEST_SINGLE_MAP_NAME
merge_path = test_folder + self.PREDICT_TEST_NAME
main_result_file_name = test_folder + self.TEST_MAP_NAME
H5Merge2dSlice(
predicted_path,
map_filename,
self.main_meta_data,
merge_path,
main_result_file_name
).post_process()
return self
def merge_3d_patches(self): # pragma: no cover
print('merge 3d patches to 3d images')
if not self.run_test:
predicted_path = self.temp_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
# map_folder = self.log_base_path + self.SINGLE_MAP_PATH
# map_filename = map_folder + self.SINGLE_MAP_NAME
merge_path = self.analysis_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
main_log_folder = self.log_base_path + self.MAP_PATH
if not os.path.exists(main_log_folder):
os.makedirs(main_log_folder)
main_log_filename = main_log_folder + self.MAP_NAME
for epoch in self.epochs:
H5MergePatches(
ref_file=self.dataset_filename,
predicted_file=predicted_path.format(epoch=epoch),
map_column=self.main_meta_data,
merge_file=merge_path.format(epoch=epoch),
save_file=main_log_filename.format(epoch=epoch),
patch_size=self.data_reader.patch_size,
overlap=self.data_reader.overlap,
folds=self.data_reader.val_folds,
fold_prefix='',
original_input_dataset=self.data_reader.x_name,
original_target_dataset=self.data_reader.y_name,
).post_process()
else:
predicted_path = self.temp_base_path + \
self.TEST_OUTPUT_PATH + self.PREDICT_TEST_NAME
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
merge_path = test_folder + self.PREDICT_TEST_NAME
main_result_file_name = test_folder + self.TEST_MAP_NAME
if not os.path.exists(test_folder):
os.makedirs(test_folder)
H5MergePatches(
ref_file=self.dataset_filename,
predicted_file=predicted_path,
map_column=self.main_meta_data,
merge_file=merge_path,
save_file=main_result_file_name,
patch_size=self.data_reader.patch_size,
overlap=self.data_reader.overlap,
folds=self.data_reader.test_folds,
fold_prefix='',
original_input_dataset=self.data_reader.x_name,
original_target_dataset=self.data_reader.y_name,
).post_process()
return self
def calculate_fscore(self):
print('calculating dice score per 3d image')
if not self.run_test:
merge_path = self.analysis_base_path + \
self.PREDICTION_PATH + self.PREDICTION_NAME
main_log_folder = self.log_base_path + self.MAP_PATH
main_log_filename = main_log_folder + self.MAP_NAME
for epoch in self.epochs:
H5CalculateFScore(
merge_path.format(epoch=epoch),
main_log_filename.format(epoch=epoch),
map_file=main_log_filename.format(epoch=epoch),
map_column=self.main_meta_data
).post_process()
else:
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
merge_path = test_folder + self.PREDICT_TEST_NAME
main_result_file_name = test_folder + self.TEST_MAP_NAME
H5CalculateFScore(
merge_path,
main_result_file_name,
map_file=main_result_file_name,
map_column=self.main_meta_data
).post_process()
return self
def get_best_model(self, monitor='', mode='max', keep_best_only=True,
use_raw_log=False):
print('finding best model')
epochs = self.epochs
if use_raw_log:
best_epoch = self._best_epoch_from_raw_log(monitor, mode)
else:
res_df = pd.DataFrame(epochs, columns=['epochs'])
results = []
results_path = self.log_base_path + self.MAP_PATH + self.MAP_NAME
for epoch in epochs:
df = pd.read_csv(results_path.format(epoch=epoch))
if not monitor:
monitor = df.columns[-1]
results.append(df[monitor].mean())
res_df[monitor] = results
if mode == 'max':
best_epoch = epochs[res_df[monitor].argmax()]
else:
best_epoch = epochs[res_df[monitor].argmin()]
res_df.to_csv(self.log_base_path + '/log_new.csv', index=False)
print('Best epoch:', best_epoch)
if keep_best_only:
print('Keep best results only. Deleting prediction files...')
for epoch in epochs:
if epoch != best_epoch:
predicted_file = self.analysis_base_path + \
self.PREDICTION_PATH + \
self.PREDICTION_NAME.format(epoch=epoch)
if os.path.exists(predicted_file):
os.remove(predicted_file)
elif self.log_base_path != self.analysis_base_path:
# move the best prediction to main folder
if os.path.exists(self.analysis_base_path +
self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch)
):
shutil.copy(
self.analysis_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch),
self.log_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch))
os.remove(self.analysis_base_path +
self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch))
elif self.log_base_path != self.analysis_base_path:
# Copy the best prediction to the main folder
shutil.copy(self.analysis_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=best_epoch),
self.log_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=best_epoch))
return self.log_base_path + self.MODEL_PATH + \
self.MODEL_NAME.format(epoch=best_epoch)
def get_best_performance_images(self, monitor='', best_num=2, worst_num=2):
epochs = self.epochs
results_path = self.log_base_path + self.MAP_PATH + self.MAP_NAME
results = []
for epoch in epochs:
# only plot things in prediction
if os.path.exists(self.log_base_path + self.PREDICTION_PATH +
self.PREDICTION_NAME.format(epoch=epoch)):
df = pd.read_csv(results_path.format(epoch=epoch))
if not monitor:
monitor = df.columns[-1]
largest_indice = df[monitor].nlargest(best_num, keep='all')
smallest_indice = df[monitor].nsmallest(
worst_num, keep='all')
indice = list(largest_indice.index) + \
list(smallest_indice.index)
# `values` will implicitly cast all item to the same type
# take out each column first, then use `values`
results.append(
{'file_name': self.PREDICTION_NAME.format(epoch=epoch),
'ids': df[self.main_meta_data].values[indice],
'values': df[monitor].values[indice]})
return results
def get_best_performance_images_test_set(
self, monitor='', best_num=2, worst_num=2):
test_folder = self.log_base_path + self.TEST_OUTPUT_PATH
main_result_file_name = test_folder + self.TEST_MAP_NAME
df = pd.read_csv(main_result_file_name)
if not monitor:
monitor = df.columns[-1]
largest_indice = df[monitor].nlargest(best_num, keep='all')
smallest_indice = df[monitor].nsmallest(
worst_num, keep='all')
indice = list(largest_indice.index) + \
list(smallest_indice.index)
# `values` will implicitly cast all item to the same type
# take out each column first, then use `values`
return {'ids': df[self.main_meta_data].values[indice],
'values': df[monitor].values[indice]}
|
<gh_stars>0
# -*- coding: utf-8 -*-
"""
Module containing utilities to create/manipulate grids.
"""
import logging
import math
from typing import Optional, Tuple, Union
import geopandas as gpd
import pyproj
import shapely.ops as sh_ops
import shapely.geometry as sh_geom
#####################################################################
# First define/init some general variables/constants
#####################################################################
# Get a logger...
logger = logging.getLogger(__name__)
# logger.setLevel(logging.DEBUG)
#####################################################################
# Grid tile helpers
#####################################################################
def create_grid(
total_bounds: Tuple[float, float, float, float],
nb_columns: int,
nb_rows: int,
crs: Union[pyproj.CRS, int, str, None],
) -> gpd.GeoDataFrame:
xmin, ymin, xmax, ymax = total_bounds
width = (xmax - xmin) / nb_columns
height = (ymax - ymin) / nb_rows
return create_grid3(total_bounds=total_bounds, width=width, height=height, crs=crs)
def create_grid3(
total_bounds: Tuple[float, float, float, float],
width: float,
height: float,
crs: Union[pyproj.CRS, int, str, None],
) -> gpd.GeoDataFrame:
"""
Args:
total_bounds (Tuple[float, float, float, float]): [description]
width (float): [description]
height (float): [description]
crs (Union[pyproj.CRS, int, str, None]): [description]
number_decimals (int, optional): The number of decimals the coordinates
of the grid will have. Defaults to None, so no rounding.
Returns:
gpd.GeoDataFrame: [description]
"""
xmin, ymin, xmax, ymax = total_bounds
rows = int(math.ceil((ymax - ymin) / height))
cols = int(math.ceil((xmax - xmin) / width))
polygons = []
cell_left = xmin
cell_right = xmin + width
for _ in range(cols):
if cell_left > xmax:
break
cell_top = ymin + height
cell_bottom = ymin
for _ in range(rows):
if cell_bottom > ymax:
break
polygons.append(
sh_ops.Polygon(
[
(cell_left, cell_top),
(cell_right, cell_top),
(cell_right, cell_bottom),
(cell_left, cell_bottom),
]
)
)
cell_top += height
cell_bottom += height
cell_left += width
cell_right += width
return gpd.GeoDataFrame({"geometry": polygons}, crs=crs)
def create_grid2(
total_bounds: Tuple[float, float, float, float],
nb_squarish_tiles: int,
crs: Union[pyproj.CRS, int, str, None],
nb_squarish_tiles_max: Optional[int] = None,
) -> gpd.GeoDataFrame:
"""
Creates a grid and tries to approximate the number of cells asked as
good as possible with grid cells that as close to square as possible.
Args:
total_bounds (Tuple[float, float, float, float]): bounds of the grid to be
created
nb_squarish_cells (int): about the number of cells wanted
crs (pyproj.CRS, int, str, optional): the projection to create the grid in
nb_squarish_tiles_max (int, optional): the maximum number of cells
Returns:
gpd.GeoDataFrame: geodataframe with the grid
"""
# Check input
if nb_squarish_tiles_max is not None and nb_squarish_tiles_max < 1:
raise Exception("The maximum nb of tiles should be larger than 1")
# If more cells asked, calculate optimal number
xmin, ymin, xmax, ymax = total_bounds
total_width = xmax - xmin
total_height = ymax - ymin
columns_vs_rows = total_width / total_height
nb_rows = max(round(math.sqrt(nb_squarish_tiles / columns_vs_rows)), 1)
# Evade having too many cells (if few cells are asked)
if nb_rows > nb_squarish_tiles:
nb_rows = nb_squarish_tiles
nb_columns = max(round(nb_squarish_tiles / nb_rows), 1)
# If a maximum number of tiles is specified, check it
if nb_squarish_tiles_max is not None:
while (nb_rows * nb_columns) > nb_squarish_tiles_max:
# If the number of cells became larger than the max number of cells,
# increase the number of cells in the direction of the longest side
# of the resulting cells
if nb_columns > 1 and (
nb_rows == 1 or total_width / nb_columns > total_height / nb_rows
):
# Cell width is larger than cell height
nb_columns -= 1
else:
nb_rows -= 1
# Now we know everything to create the grid
return create_grid(
total_bounds=total_bounds, nb_columns=nb_columns, nb_rows=nb_rows, crs=crs
)
def split_tiles(
input_tiles: gpd.GeoDataFrame, nb_tiles_wanted: int
) -> gpd.GeoDataFrame:
nb_tiles = len(input_tiles)
if nb_tiles >= nb_tiles_wanted:
return input_tiles
nb_tiles_ratio_target = nb_tiles_wanted / nb_tiles
# Loop over all tiles in the grid
result_tiles = []
for tile in input_tiles.itertuples():
# For this tile, as long as the curr_nb_tiles_ratio_todo is not 1, keep
# splitting
curr_nb_tiles_ratio_todo = nb_tiles_ratio_target
curr_tiles_being_split = [tile.geometry]
while curr_nb_tiles_ratio_todo > 1:
# Check in how many parts the tiles are split in this iteration
divisor = 0
if round(curr_nb_tiles_ratio_todo) == 3:
divisor = 3
else:
divisor = 2
curr_nb_tiles_ratio_todo /= divisor
# Split all current tiles
tmp_tiles_after_split = []
for tile_to_split in curr_tiles_being_split:
xmin, ymin, xmax, ymax = tile_to_split.bounds
width = abs(xmax - xmin)
height = abs(ymax - ymin)
# Split in 2 or 3...
if divisor == 3:
if width > height:
split_line = sh_geom.LineString(
[
(xmin + width / 3, ymin - 1),
(xmin + width / 3, ymax + 1),
(xmin + 2 * width / 3, ymax + 1),
(xmin + 2 * width / 3, ymin - 1),
]
)
else:
split_line = sh_geom.LineString(
[
(xmin - 1, ymin + height / 3),
(xmax + 1, ymin + height / 3),
(xmax + 1, ymin + 2 * height / 3),
(xmin - 1, ymin + 2 * height / 3),
]
)
else:
if width > height:
split_line = sh_geom.LineString(
[(xmin + width / 2, ymin - 1), (xmin + width / 2, ymax + 1)]
)
else:
split_line = sh_geom.LineString(
[
(xmin - 1, ymin + height / 2),
(xmax + 1, ymin + height / 2),
]
)
tmp_tiles_after_split.extend(
sh_ops.split(tile_to_split, split_line).geoms
)
curr_tiles_being_split = tmp_tiles_after_split
# Copy the tile parts to the result and retain possible other columns
for tile_split_part in curr_tiles_being_split:
result_tiles.append(tile._replace(geometry=tile_split_part))
# We should be ready...
return gpd.GeoDataFrame(result_tiles, crs=input_tiles.crs)
|
<reponame>febsn/aldryn_newsblog_extra_plugins
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import ugettext, ugettext_lazy as _
from aldryn_categories.fields import CategoryForeignKey, CategoryManyToManyField
from aldryn_newsblog.models import Article, PluginEditModeMixin, NewsBlogCMSPlugin, AdjustableCacheModelMixin
from aldryn_newsblog.utils.utilities import get_valid_languages_from_request
from cms.models.pluginmodel import CMSPlugin
from taggit.models import Tag
from taggit.managers import TaggableManager
from .utils import get_additional_styles
class PluginStyleMixin(models.Model):
STANDARD = 'list'
STYLE_CHOICES = [
(STANDARD, _('Standard')),
]
style = models.CharField(
verbose_name=_('Style'),
choices=STYLE_CHOICES + get_additional_styles(),
default=STANDARD,
max_length=50,
)
class Meta:
abstract = True
@python_2_unicode_compatible
class NewsBlogTaggedArticlesPlugin(PluginEditModeMixin, PluginStyleMixin, NewsBlogCMSPlugin):
tag = models.ForeignKey(
Tag,
verbose_name=_('tag'),
on_delete=models.CASCADE,
)
article_count = models.PositiveIntegerField(
default=10,
help_text=_('The maximum number of tagged articles to display (0 for all).'),
)
def __str__(self):
if not self.pk:
return 'tagged articles'
return "%(count)s articles tagged by %(tag)s" % {
'count': self.article_count,
'tag': self.tag.__str__(),
}
def get_articles(self, request):
queryset = Article.objects
if not self.get_edit_mode(request):
queryset = queryset.published()
languages = get_valid_languages_from_request(
self.app_config.namespace, request)
if self.language not in languages:
return queryset.none()
queryset = queryset.translated(*languages).filter(
app_config=self.app_config,
tags=self.tag)
if self.article_count > 0:
queryset = queryset[:self.article_count]
return queryset
@python_2_unicode_compatible
class NewsBlogTagRelatedPlugin(PluginEditModeMixin, PluginStyleMixin, AdjustableCacheModelMixin,
CMSPlugin):
# NOTE: This one does NOT subclass NewsBlogCMSPlugin. This is because this
# plugin can really only be placed on the article detail view in an apphook.
exclude_tags = TaggableManager(verbose_name=_('excluded tags'), blank=True)
article_count = models.PositiveIntegerField(
default=10,
help_text=_('The maximum number of tagged articles to display (0 for all).'),
)
def get_articles(self, article, request):
"""
Returns a queryset of articles that have common tags with the given article.
"""
languages = get_valid_languages_from_request(
article.app_config.namespace, request)
if self.language not in languages:
return Article.objects.none()
filter_tags = article.tags.exclude(
pk__in=models.Subquery(self.exclude_tags.values('pk'))
)
queryset = Article.objects.filter(
tags__in=article.tags.all()).exclude(
tags__in=self.exclude_tags.all()).exclude(
pk=article.pk).translated(*languages)
if not self.get_edit_mode(request):
queryset = queryset.published()
queryset = queryset.distinct()
if self.article_count > 0:
queryset = queryset[:self.article_count]
return queryset
def __str__(self):
return ugettext('Tag-related articles')
@python_2_unicode_compatible
class NewsBlogCategoryRelatedPlugin(PluginEditModeMixin,
PluginStyleMixin,
AdjustableCacheModelMixin,
CMSPlugin):
# NOTE: This one does NOT subclass NewsBlogCMSPlugin. This is because this
# plugin can really only be placed on the article detail view in an apphook.
exclude_categories = CategoryManyToManyField(verbose_name=_('excluded categories'), blank=True)
article_count = models.PositiveIntegerField(
default=10,
help_text=_('The maximum number of related articles to display (0 for all).'),
)
def get_queryset(self, article, request):
"""
Returns a queryset of articles that have common categories with the given article.
"""
languages = get_valid_languages_from_request(
article.app_config.namespace, request)
if self.language not in languages:
return Article.objects.none()
filter_categories = article.categories.exclude(
pk__in=models.Subquery(self.exclude_categories.values('pk'))
)
queryset = Article.objects.filter(
categories__in=filter_categories
).exclude(
pk=article.pk
).translated(
*languages
)
if not self.get_edit_mode(request):
queryset = queryset.published()
return queryset.distinct()
def get_articles(self, article, request):
queryset = self.get_queryset(article, request)
if self.article_count > 0:
queryset = queryset[:self.article_count]
return queryset
def __str__(self):
return ugettext('Category-related articles')
@python_2_unicode_compatible
class NewsBlogLatestArticlesByCategoryPlugin(PluginEditModeMixin,
PluginStyleMixin,
AdjustableCacheModelMixin,
NewsBlogCMSPlugin):
category = CategoryForeignKey(
verbose_name=_('category'),
)
article_count = models.IntegerField(
default=10,
verbose_name=_('count'),
help_text=_("The maximum number of latest articles to display."),
)
def get_articles(self, request):
"""
Return a queryset of the latest articles, filtered by the category set in the plugin
settings and sliced to the desired count.
"""
queryset = Article.objects
if not self.get_edit_mode(request):
queryset = queryset.published()
languages = get_valid_languages_from_request(
self.app_config.namespace, request)
if self.language not in languages:
return queryset.none()
queryset = queryset.translated(*languages).filter(
app_config=self.app_config).filter(
categories=self.category)
if self.article_count > 0:
queryset = queryset[:self.article_count]
return queryset
def __str__(self):
return ugettext("{app_title}'s {article_count} latest articles with category {category}".format(
app_title=self.app_config.get_app_title(),
article_count=self.article_count,
category=self.category,
))
|
"""
Problem of assembling the original chromosome (sequence) from
its multiple fragments (reads) is represented with a graph,
where vertices are individual reads and edges are overlaps
between reads. The assembly of the original sequence
is equivalent to finding such a path through the graph
that each read is only used once. The path is found using
the Depth First Search (DFS) algorithm.
An example of usage of this module:
from assembly import SequenceAssembler
s = SequenceAssembler()
# read data
with open("data.fasta") as data:
s.read_fasta(data)
# run assembly
s.assemble()
# access path and resulting sequence
print s.path
print s.sequence
"""
from itertools import combinations
class Read:
""" Class representing reads in the graph
Parameters
----------
read : str
The string that represents a read,
read from an input file in FASTA format.
Attributes
----------
overlaps : dict
The dictionary that holds information about reads that
can be glued to the current read on the right side.
Key --- string representing the other read.
Value --- number of characters the other read protrudes
with respect to the current read after they are glued.
For example,
ATCGGCCAT
GCCATCGG
GCCATCGG can be glued to ATCGGCCAT on its right side,
and protrudes by three characters. The value can be
negative, in the following example, the value is -2:
ATCGGCCAT
TCGGCC
visited : int
Number of times this read was visited during
the graph traversal.
visit_limit: int
Limit on the number of times this read can be visited
during the graph traversal. It is possible by accidence
to have several reads that are equal to each other,
but still have a unique way to glue them. The limit is the
number of reads equal to the current read, including itself.
"""
def __init__(self, read):
self.overlaps = {}
self.visited = 0
self.visit_limit = 1
class SequenceAssembler:
""" Class for sequence assembler
Attributes
----------
reads : dict
The dictionary that holds reads.
Key --- string representing a read.
Value --- object of Read class.
path : list
The list that holds reads in the order they
should be glued into the origianl sequence.
sequence : str
A string representing the original sequence
to be assembled.
num_reads : int
Total number of reads added to the graph.
"""
def __init__(self):
self.reads = {}
self.path = []
self.sequence = ""
self.num_reads = 0
def add_read(self, read):
""" Add read to the graph.
If read is already in the dictinary of reads,
increment its visit limit.
"""
r = Read(read)
if read not in self.reads:
self.reads[read] = r
else:
self.reads[read].visit_limit += 1
self.num_reads += 1
def read_fasta(self, handle):
""" Read fragments from input file handle.
For example,
s = SequenceAssembler()
with open("data.fasta") as data:
s.read_fasta(data)
"""
read = ""
for line in handle:
if line[0] == ">":
if len(read):
self.add_read(read)
read = ""
else:
read += line.strip()
self.add_read(read)
def calculate_overlap(self, r1, r2):
""" Check if r1 and r2 can be glued.
Calculate how much one of them protrudes
with respect to another after they are glued.
"""
# We know that reads that can be glued,
# share at least half of their length.
# Make sure one is not shorter than
# the half of another.
if len(r1) / 2 + len(r1) % 2 <= len(r2) \
and len(r2) / 2 + len(r2) % 2 <= len(r1):
# prepare second halves for overlap pre-check
tail1 = r1[len(r1) / 2:]
tail2 = r2[len(r2) / 2:]
# case 1: r1 contains r2 completely
#
# For example,
#
# ATCGCCGGAT
# TCGCCGGA
pos = r1.find(r2)
if pos != -1:
self.reads[r1].overlaps[r2] = pos + len(r2) - len(r1)
# case 2: r2 contains r1 completely
#
# For example,
#
# TCGCCGGA
# ATCGCCGGAT
pos = r2.find(r1)
if pos != -1:
self.reads[r2].overlaps[r1] = pos + len(r1) - len(r2)
# case 3: end of r1 overlaps with beginning of r2
#
# For example,
#
# ATCGCCGGAT
# TCGCCGGATGC
#
# First check that at least half of r1 is in r2
# If there is a match, calculate the expected length
# of overlap and check if they indeed overlap.
pos = r2.find(tail1)
if pos != -1:
overlap = pos + len(tail1)
if r1[-overlap:] == r2[:overlap]:
self.reads[r1].overlaps[r2] = len(r2) - overlap
# case 4: end of r2 overlaps with beginning of r1
#
# For example,
#
# CGCCGGATCC
# TCGCCGGAT
#
# First check that at least half of r2 is in r1
# If there is a match, calculate the expected length
# of overlap and check if they indeed overlap.
pos = r1.find(tail2)
if pos != -1:
overlap = pos + len(tail2)
if r2[-overlap:] == r1[:overlap]:
self.reads[r2].overlaps[r1] = len(r1) - overlap
def find_path(self, num_visited, read):
""" Implements the DFS algorithm.
For each visited read, we check what reads can be visited next
and visit one of them. If at some point we are at a dead end,
we go back and try to visit another one. Continue until we
visit all reads.
"""
self.path.append(read)
r = self.reads[read]
r.visited += 1
if num_visited < self.num_reads:
finished = False
for other_read in r.overlaps:
if not finished and self.reads[other_read].visited < self.reads[other_read].visit_limit:
finished = self.find_path(num_visited + 1, other_read)
if not finished:
self.path.pop()
r.visited -= 1
else:
finished = True
return finished
def assemble(self):
""" Assemble the original sequence.
After building the graph (reading all reads)
calculate all overlaps run the DFS algorithm.
After finding the path to visit all reads only once,
assemble the original sequence.
"""
# Calculate overlaps between each pair of reads.
for r1, r2 in combinations(self.reads, 2):
self.calculate_overlap(r1, r2)
# If there are equal reads, they overlap too
for read in self.reads:
if self.reads[read].visit_limit > 1:
self.reads[read].overlaps[read] = 0
# Find the read to start the DFS algorithm,
# The good candidate is a read that can't be glued
# to any other read on the right side.
start_candidates = self.reads.copy()
for read in self.reads:
r = self.reads[read]
for other_read in r.overlaps:
if other_read in start_candidates:
del start_candidates[other_read]
if len(start_candidates):
for read in start_candidates:
if len(self.reads[read].overlaps):
self.find_path(1, read)
break
else:
# If there no good candidates where to start
# the DFS algorithm, try each node.
for read in self.reads:
if len(self.reads[read].overlaps):
self.find_path(1, read)
if len(self.path) == self.num_reads:
break
# Assemble the original sequence:
# start from the first node in the path,
# glue subsequent reads, according to how
# much they are supposed to protrude.
self.sequence = self.path[0]
if len(self.path) > 1:
for i in range(len(self.path)-1):
r = self.reads[self.path[i]]
overlap = r.overlaps[self.path[i+1]]
if overlap > 0:
self.sequence += self.path[i+1][-overlap:]
elif overlap < 0:
self.sequence = self.sequence[:overlap]
|
<reponame>benedictquartey/Chiromancer
import numpy as np #numpy library for matrix math
import cv2
import imutils # basic image processing
import pandas as pd
import os
from datetime import datetime
import time
import cv_functions
data_class=[]
data_images= []
dataSet = {'images':data_images,'label ':data_class}
timeStamp = "0.0.0.0"
backgnd = None
caliberation = False
df=None
def compile_data():
global df
df = pd.DataFrame(data=dataSet)
df =df.drop_duplicates()
print("Creating data csv file ...")
# write to csv file without headers and index
df.to_csv('data/palm_reader_data.csv',index=False,header=False)
time.sleep(1)
print("File saved in data/palm_reader_data.csv")
def save_img(frame,data_point_class):
# save training data
img_data=imutils.resize(frame, width=min(400, frame.shape[1]))
img_path= "data/IMG/"+datetime.now().strftime('%Y-%m-%d_%H.%M.%S')+".png"
#save processed image with time stamp
# create storage folder if it doesnt already exist
if not os.path.exists("data/IMG/"):
os.makedirs("data/IMG/")
cv2.imwrite(img_path,img_data)
data_images.append(img_path)
data_class.append(data_point_class)
print("Img Name: {}, Image Class : {}".format(img_path,data_point_class))
def main():
print("\n**************** Beginning Data Collection ****************\n")
print(" Collect data: [Option] C\n View data: [Option] V \n Quit script: [Option] Q\n")
option = input("[Option]: ")
if (option.lower()=='c'):
num_data_point = int(input("Number of Datapoints: "))
data_point_class = input("Class of Datapoints: ")
collectData(num_data_point,data_point_class)
elif(option.lower()=='v'):
print("\n**************** View Collected Data ****************\n")
viewData()
elif(option.lower()=='q'):
print("Quitting ...")
exit()
def collectData(num_data_point,data_point_class):
global backgnd
cap = cv2.VideoCapture(0)
saved_images=0
frameCount = 0
while(True):
_,frame = cap.read()
frame = imutils.resize(frame, width=700)
frame = cv2.flip(frame,1)
frame_clone = frame.copy()
# define boundaries of ROI where we look for hand
roi = frame[10:255,350:590]
gray = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
gray=cv2.GaussianBlur(gray,(7,7),0)
# calculate background for the first 30 frames
if frameCount<30:
backgnd=cv_functions.calculate_background(backgnd,gray)
frameCount+=1
# after background calculation, start capture loop until all data is collected
elif(frameCount==30 and saved_images !=num_data_point):
threshold_img=cv_functions.segment(backgnd,gray)
cv2.rectangle(frame_clone,(590,10),(350,255),(0,255,0),2)
cv2.imshow("Data Collector",frame_clone)
cv2.imshow("Threshold",threshold_img)
keypress = cv2.waitKey(100) & 0xFF
if keypress == ord("c"):
save_img(threshold_img,data_point_class)
saved_images+=1
if (saved_images ==num_data_point):
break
cap.release()
cv2.destroyAllWindows()
cv2.waitKey(1) #extra waitkey to ensure visualization window closes
compile_data()
main()
def viewData():
print(df)
main()
# only run if script if invoked directly
if __name__ == "__main__":
main()
|
#!/usr/bin/env python
#
# Copyright the CoLL team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
# Intro:
# Author: <NAME>
# Time: Oct 14, 2021
"""
import os
import time
from argparse import ArgumentParser
import run_exp.testbed_fsl as tb_fsl
import run_exp.testbed_tl as tb_tl
import run_exp.testbed_zslw as tb_zslw
import run_exp.transfer_learning as tl
import run_exp.transfer_learning_r as tlr
import run_exp.transfer_learning_w as tlw
import run_exp.few_shot_learning as fsl
import run_exp.few_shot_learning_r as fslr
import run_exp.few_shot_learning_w as fslw
import run_exp.zero_shot_learning as zsl
import run_exp.zero_shot_learning_r as zslr
import run_exp.zero_shot_learning_w as zslw
import run_exp.zero_shot_learning_oov as zslo
import run_exp.zero_shot_learning_oov_w as zslwo
import run_exp.zero_shot_learning_oov_w_trained as zslwot
import run_exp.supervised_learning as sl
import run_exp.supervised_learning_r as slr
import run_exp.supervised_learning_w as slw
import run_exp.mono_lingual_supervised_learning_zh as msl_zz
import run_exp.mono_lingual_supervised_learning_ar as msl_aa
import run_exp.cross_lingual_supervised_learning as cls
def batch_submit_multi_jobs(cmd_list, info_list, platform: str, split_num: int = 4, partition="g", node_id: int = None):
assert len(cmd_list) == len(info_list)
content = []
file_name = "./job_base_{pltf}.sh".format(pltf=platform)
file_out = "./job_{pltf}.sh".format(pltf=platform)
cmd_list_frac = []
info_list_frac = []
flag_idx = 0
while flag_idx < len(cmd_list):
if (flag_idx + split_num) <= len(cmd_list):
next_flag_idx = flag_idx + split_num
else:
next_flag_idx = len(cmd_list)
sub_cmd_list = cmd_list[flag_idx:next_flag_idx:]
sub_info_list = info_list[flag_idx:next_flag_idx:]
cmd_list_frac.append(sub_cmd_list)
info_list_frac.append(sub_info_list)
flag_idx = next_flag_idx
with open(file_name) as in_file:
for line in in_file:
content.append(line)
for i, sub_cmd_list in enumerate(cmd_list_frac):
with open(file_out, "w") as out_file:
# job_name
job_name = "__".join(info_list_frac[i])
print("- JOB NAME: ", job_name)
if platform == "group":
_info = "#SBATCH -J {job_name}\n".format(job_name=job_name)
content[21] = _info
if node_id is not None:
_node = f"#SBATCH --nodelist=node0{str(node_id)}"
content[22] = _node
# SBATCH -o log/fs2s-iwslt-%J.out
# SBATCH -e log/fs2s-iwslt-%J.err
_out_file = "#SBATCH -o log/%J-{job_name}.out\n".format(job_name=job_name)
content[15] = _out_file
_err_file = "#SBATCH -e log/%J-{job_name}.err\n".format(job_name=job_name)
content[16] = _err_file
else:
_partition = "#SBATCH --partition={var}\n".format(var=partition)
content[2] = _partition
_info = "#SBATCH --job-name={job_name}\n".format(job_name=job_name)
content[3] = _info
# SBATCH --output=log/fs2s-iwslt-%j.out
# SBATCH --error=log/fs2s-iwslt-%j.err
_out_file = "#SBATCH --output=log/%j-{job_name}.out\n".format(job_name=job_name)
content[4] = _out_file
_err_file = "#SBATCH --error=log/%j-{job_name}.err\n".format(job_name=job_name)
content[5] = _err_file
for line in content:
out_file.write(line)
# command
for cmd in sub_cmd_list:
out_file.write(cmd)
out_file.write("\n\n")
cmd = "sbatch job_{pltf}.sh".format(pltf=platform)
os.system(cmd)
def batch_run_interactive(cmd_list: [str], order=1):
# print(cmd_list)
for i in cmd_list[::order]:
print(i)
for i in cmd_list[::order]:
try:
os.system(i)
time.sleep(10)
print(i)
except:
print(i, " failed!")
# cancel slurm jobs
def batch_cancel(job_start: int, num: int, platform: str):
for i in range(job_start, job_start + num):
if platform == "group":
cmd = "scancel -v {i}".format(i=i)
else:
cmd = "scancel {i}".format(i=i)
os.system(cmd)
if __name__ == '__main__':
exps = {
"fsl": fsl,
"fslr": fslr,
"fslw": fslw,
"zsl": zsl,
"zslr": zslr,
"zslw": zslw,
"zslo": zslo,
"zslwo": zslwo,
"zslwot": zslwot,
"sl": sl,
"slr": slr,
"slw": slw,
"tl": tl,
"tlr": tlr,
"tlw": tlw,
"tb_fsl": tb_fsl,
"tb_tl": tb_tl,
"tb_zslw": tb_zslw,
"msl_zz": msl_zz, # train: zh; test: zh
"msl_aa": msl_aa,
"csl": cls
}
parser = ArgumentParser()
parser.add_argument("--exp", type=str, help="experiment id",
choices=exps.keys())
parser.add_argument("--split", default=3, type=int, help="experiment id")
parser.add_argument("--pltf", default="group", type=str, help="cluster: m3, group",
choices=["m3", "group"])
parser.add_argument("--sbatch", action="store_true")
parser.add_argument("--cancel", type=int)
parser.add_argument("--range", type=int)
parser.add_argument("--node_id", type=int, required=False)
args = parser.parse_args()
cmd_list, info_list = exps[args.exp].get_cmd()
if args.sbatch:
batch_submit_multi_jobs(cmd_list, info_list, args.pltf, split_num=args.split, partition="m3g",
node_id=args.node_id)
else:
# cmd: submit batch jobs for multi jobs
# optional partition for m3: dgx , m3g, m3h, m3e
batch_run_interactive(cmd_list, order=1)
if args.cancel:
batch_cancel(args.cancel, args.range, platform=args.pltf)
|
<filename>plugin.audio.booksshouldbefree/cache.py
# Copyright (C) 2013
# <NAME> (<EMAIL>)
#
# This Program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This Program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with XBMC; see the file COPYING. If not, write to
# the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
# http://www.gnu.org/copyleft/gpl.html
#
import os
import time
import glob
import urllib2
import loyal_book_utils as utils
import sfile
import xbmc
CacheDir = xbmc.translatePath(os.path.join(utils.PROFILE, 'c'))
CacheSize = 100
sfile.makedirs(CacheDir)
def clearCache():
try:
sfile.rmtree(CacheDir)
while sfile.isdir(CacheDir):
xbmc.sleep(50)
except:
pass
checkCacheDir()
def checkCacheDir():
try:
if sfile.isdir(CacheDir):
return
except:
pass
sfile.makedirs(CacheDir)
def getURLNoCache(url, agent=None, tidy=True):
req = urllib2.Request(url)
if agent:
req.add_header('User-Agent', agent)
response = urllib2.urlopen(req)
html = response.read()
response.close()
if tidy:
html = html.replace('\r', '').replace('\n', '').replace('\t', '')
return html
def getURL(url, maxSec=0, agent=None, tidy=True):
purgeCache()
if url == None:
return None
if maxSec > 0:
timestamp = getTimestamp(url)
if timestamp > 0:
if (time.time() - timestamp) <= maxSec:
return getCachedData(url)
data = getURLNoCache(url, agent, tidy)
addToCache(url, data)
return data
def getTimestamp(url):
cacheKey = createKey(url)
cachePath = os.path.join(CacheDir, cacheKey)
if os.path.isfile(cachePath):
try: return os.path.getmtime(cachePath)
except: pass
return 0
def getCachedData(url):
cacheKey = createKey(url)
cachePath = os.path.join(CacheDir, cacheKey)
f = file(cachePath, 'r')
data = f.read()
f.close()
return data
def addToCache(url, data):
checkCacheDir()
cacheKey = createKey(url)
cachePath = os.path.join(CacheDir, cacheKey)
f = file(cachePath, 'w')
f.write(data)
f.close()
purgeCache()
def createKey(url):
try:
from hashlib import md5
return md5(url).hexdigest()
except:
import md5
return md5.new(url).hexdigest()
def purgeCache():
files = glob.glob(os.path.join(CacheDir, '*'))
nFiles = len(files)
try:
while nFiles > CacheSize:
oldestFile = getOldestFile(files)
path = os.path.join(CacheDir, oldestFile)
while os.path.exists(path):
try: os.remove(path)
except: pass
files = glob.glob(os.path.join(CacheDir, '*'))
nFiles = len(files)
except:
pass
def getOldestFile(files):
if not files:
return None
now = time.time()
oldest = files[0], now - os.path.getctime(files[0])
for f in files[1:]:
age = now - os.path.getctime(f)
if age > oldest[1]:
oldest = f, age
return oldest[0] |
import re, os
import config
class room_task:
def __init__(self, thm_session, room_name, skip_answers=False) -> None:
self.room_tasks = None
self.thm_session = thm_session
self.room_name = room_name
self.skip_answers = skip_answers
def get_attr(self, task: dict=None, question: dict=None) -> dict:
attr = {
"room_name": self.room_name
, "authenticated": self.authenticated
, "task_count": self.tasks.__len__()
}
if task is not None:
for i in task:
if i == 'questions': continue
attr.update({i: task[i]})
attr.update({"questions_count": task['questions'].__len__()})
if question is not None:
for i in question:
attr.update({i: question[i]})
return attr
def replace_attr(self, rstring: str, pattrn: str, attrs:dict) -> str:
for tag in re.findall(pattrn, rstring):
if tag not in attrs: continue
rstring = rstring.replace("{"+tag+"}", str(attrs[tag]))
return rstring
def get_questions(self, task=None, taskNo=-1, index=-1) -> list:
task = task if task is not None else self.get_task(taskNo, index)
if self.authenticated:
for i in task['questions']:
if i['noAnswer'] and i['correct']:
i['submission'] = self.replace_attr(config.template['no_answer'], r"{(.*?)}", self.get_attr(task=task, question=i))
elif not i['correct']:
i['submission'] = self.replace_attr(config.template['placeholder'], r"{(.*?)}", self.get_attr(task=task, question=i))
return task['questions']
def get_formatted_room(self) -> str:
outstr = self.replace_attr(config.template['room'], r"{(.*?)}", self.get_attr())
outstr = outstr.replace("{tasks}", self.get_formatted_tasks())
return outstr
def get_formatted_tasks(self) -> str:
outstr = ""
for task in self.tasks:
task_str = self.replace_attr(config.template['task'], r"{(.*?)}", self.get_attr(task=task))
task_str = task_str.replace("{questions}", self.get_formatted_questions(task=task))
outstr += task_str
return outstr
def get_formatted_questions(self, task: dict=None, taskNo: int=-1, index: int=-1) -> str:
questions = self.get_questions(task=task, taskNo=taskNo, index=index)
outstr = ""
quest_format = config.template['question']
if self.skip_answers:
quest_format = quest_format.replace("{submission}", "")
if not self.authenticated:
for item in config.config['auth_only']:
quest_format = quest_format.replace("{"+item+"}", "")
for question in questions:
outstr += self.replace_attr(quest_format, r"{(.*?)}", self.get_attr(task=task, question=question))
return outstr
def write_room(self, file_loc: str) -> None:
if file_loc is None: file_loc = f"./README.md"
if file_loc.endswith("/") or file_loc.endswith("\\"): file_loc += "README.md"
elif not file_loc.endswith(".md"): file_loc += ".md"
if '/' in file_loc or '\\' in file_loc:
try: os.makedirs(os.path.dirname(file_loc), exist_ok=True)
except:
print("couldn't create directory..")
return
with open(f"{file_loc}", "w") as out:
out.write(self.get_formatted_room())
print(f"Output written to {file_loc}")
|
<filename>matrices.py
from numbers import Number
import random as rd
import time
import numpy as np
class Matrice:
# constructeur
def __init__(self, l, c=None, fill=0.0):
self.lignes = l
# matrice carree ?
if c is None:
self.colonnes = l
else:
self.colonnes = c
# cree une matrice lxc avec des fill
self.matrice = [[fill] * self.colonnes for i in range(self.lignes)]
def __str__(self):
result = ""
for line in self.matrice:
result += str(line) + "\n"
return result
'''
operateurs arithmetiques (+,-,*,/)
'''
# +
def __add__(self, other):
try:
if not isinstance(other, Matrice):
raise TypeError('Type Matrice requis')
# meme dimensions
if self.colonnes != other.colonnes or self.lignes != other.lignes:
raise ValueError('Les matrices doivent etre de meme format..')
reponse = Matrice(self.lignes, self.colonnes)
for i in range(self.lignes):
for j in range(self.colonnes):
reponse.matrice[i][j] = self.matrice[i][j] + other.matrice[i][j]
return reponse
except (TypeError, ValueError) as err:
print(err.message)
return None
# -
def __sub__(self, other):
try:
if not isinstance(other, Matrice):
raise TypeError('Type Matrice requis')
if self.colonnes != other.colonnes or self.lignes != other.lignes:
raise ValueError('Les matrices doivent etre de meme format..')
reponse = Matrice(self.lignes, self.colonnes)
for i in range(self.lignes):
for j in range(self.colonnes):
reponse.matrice[i][j] = self.matrice[i][j] - other.matrice[i][j]
return reponse
except (TypeError, ValueError) as err:
print(err.message)
return None
# *
def __mul__(self, other):
try:
# multiplication scalaire
if isinstance(other, Number):
reponse = Matrice(self.lignes, self.colonnes)
for i in range(self.lignes):
for j in range(self.colonnes):
reponse.matrice[i][j] = self.matrice[i][j] * other
return reponse
# les 2 sont des matrices?
if not isinstance(other, Matrice):
raise TypeError('Type Matrice ou Number requis')
# Alxp * Bpxc
if self.colonnes != other.lignes:
raise ValueError('Toutes les multiplications doivent respecter la contrainte: Alxp * Bpxc..')
reponse = Matrice(self.lignes, other.colonnes)
# parcours de la matrice reponseante
for i in range(self.lignes):
time.sleep(0.1) # slow it down sinon c'est trop vite et la diff est en milisecondes..
for j in range(other.colonnes):
# ligne de m multiplie colonne de n
for k in range(self.colonnes):
reponse.matrice[i][j] += self.matrice[i][k] * other.matrice[k][j]
return reponse
except (ValueError, TypeError) as err:
print(err.message)
return None
'''
2*A == A*2
faux pour matrices A*B != B*A
on assume que __mul__ sera appelee..
'''
__rmul__ = __mul__
# /
def __div__(self, other):
return self.__truediv__(other)
def __truediv__(self, other):
try:
self.makeFloat()
if isinstance(other, Number):
if other == 0.0:
raise ZeroDivisionError('Division par zero..')
other = float(other)
reponse = Matrice(self.lignes, self.colonnes)
for i in range(self.lignes):
for j in range(self.colonnes):
reponse.matrice[i][j] = self.matrice[i][j] / float(other)
return reponse
if not isinstance(other, Matrice):
raise TypeError('Type Matrice ou Number requis')
if not other.estCarree():
raise ValueError('Matrices carrees seulement')
other.makeFloat()
# A/B ==> A*BInverse // c'est ce qui se raproche le plus d'une division matricielle.. qu'on m'a dit..
if other.estInversible():
return self * other.Inverse()
else:
raise ValueError('La matrice B \'(A/B)\' n\'est pas inversible.. ')
except (ZeroDivisionError, TypeError, ValueError) as err:
print(err.message)
return None
def randomFilling(self, start=0, end=25):
for i in range(self.lignes):
for j in range(self.colonnes):
self.matrice[i][j] = rd.randint(start, end)
'''
operations matricielles
'''
# valeurabsolue pour eviter les fausses diagonales vides..
def Trace(self, valeurabsolue=False):
try:
if not self.estCarree():
raise ValueError('Matrices carrees seulement')
reponse = 0
for i in range(self.lignes):
if valeurabsolue:
reponse += abs(self.matrice[i][i])
else:
reponse += self.matrice[i][i]
return reponse
except ValueError as err:
print(err.message)
return None
def estCarree(self):
if self.colonnes == self.lignes:
return True
return False
def estReguliere(self):
if self.Determinant() != 0:
return True
return False
def Determinant(self):
try:
self.makeFloat()
if not self.estCarree():
raise ValueError('Matrices carrees seulement')
# cas de base
if self.lignes == 1:
return self.matrice[0][0]
if self.lignes == 2:
return self.matrice[0][0] * self.matrice[1][1] - self.matrice[0][1] * self.matrice[1][0]
# else if diagonale ou triangulaire
if self.estTriangulaire():
reponse = self.matrice[0][0]
for i in range(1, self.lignes):
for j in range(1, self.colonnes):
if i == j:
reponse *= self.matrice[i][j]
return reponse
# else
i = 0 # on choisit la premiere ligne..
reponse = 0
for j in range(self.colonnes):
m = Matrice(self.colonnes - 1)
# nouvelle matrice self moins ligne i, colonne j
ligne = 1
for k in range(m.lignes):
colonne = 0
for l in range(m.colonnes):
if j == l:
colonne += 1
m.matrice[k][l] = self.matrice[ligne][colonne]
colonne += 1
ligne += 1
# determiner le signe
signe = self.matrice[i][j]
if (i + j) % 2 != 0:
signe *= -1
reponse += signe * m.Determinant()
return reponse
except ValueError as err:
print(err.message)
return None
def Inverse(self):
try:
self.makeFloat()
if not self.estCarree():
raise ValueError('Matrices carrees seulement')
if not self.estReguliere():
raise ZeroDivisionError('Le determinant ne peut etre zero pour la division..')
if self.estDiagonale():
reponse = Matrice(self.lignes)
for i in range(self.lignes):
if self.matrice[i][i] != 0:
reponse.matrice[i][i] = 1 / self.matrice[i][i]
else:
reponse.matrice[i][i] = 0
return reponse
reciprocal = self.Determinant()
if self.lignes == 2:
m = Matrice(2)
m.matrice[0][0] = self.matrice[1][1]
m.matrice[1][1] = self.matrice[0][0]
m.matrice[0][1] = self.matrice[0][1] * -1
m.matrice[1][0] = self.matrice[1][0] * -1
return m / reciprocal
comatriceT = self.CoMatrice().Transposee()
return comatriceT / reciprocal
except (ValueError, ZeroDivisionError) as err:
print(err.message)
return None
def CoMatrice(self):
try:
if not self.estCarree():
raise ValueError('Matrices carrees seulement')
if self.lignes == 1:
return self
reponse = Matrice(self.lignes)
for i in range(self.lignes):
for j in range(self.colonnes):
m = Matrice(self.lignes - 1)
ligne = 0
for k in range(m.lignes):
colonne = 0
if ligne == i:
ligne += 1
for l in range(m.colonnes):
if colonne == j:
colonne += 1
m.matrice[k][l] = self.matrice[ligne][colonne]
colonne += 1
ligne += 1
reponse.matrice[i][j] = m.Determinant()
# determiner le signe
if (i + j) % 2 != 0: # and reponse.matrice[i][j]!=0:
reponse.matrice[i][j] *= -1
return reponse
except ValueError as err:
print(err.message)
return None
def estDiagonale(self):
try:
if not self.estCarree():
raise ValueError('Matrices carrees seulement')
for i in range(self.lignes):
for j in range(self.colonnes):
if i != j and self.matrice[i][j] != 0:
return False
# diagonale vide
if self.Trace(True) == 0:
raise ValueError('Matrice vide..')
return True
except ValueError as err:
print(err.message)
return None
def estTriangulaire(self, sens=None, stricte=False):
try:
if not self.estCarree():
raise ValueError('Matrices carrees seulement')
inferieure = False
superieure = False
for i in range(self.lignes):
for j in range(self.colonnes):
if i < j and self.matrice[i][j] != 0:
inferieure = True
elif i > j and self.matrice[i][j] != 0:
superieure = True
diagonaleVide = self.Trace(True) == 0
if not inferieure and not superieure and diagonaleVide:
raise ValueError('Matrice vide..')
if stricte and not diagonaleVide:
return False
# xor
if superieure != inferieure:
if sens != None:
if (sens == "inferieure" or sens == "i") and not inferieure:
return False
elif (sens == "superieure" or sens == "s") and not superieure:
return False
else:
raise ValueError(
"Seules les entrees suivantes sont acceptees:\n 'inferieure', 'i', 'superieure', 's'")
return True
return False
except ValueError as err:
print(err.message)
return None
def estInversible(self):
if not self.estCarree():
return False
if self.Determinant() == 0:
return False
return True
def Transposee(self):
if self.lignes == 1:
return self
reponse = Matrice(self.colonnes, self.lignes)
for i in range(self.lignes):
for j in range(self.colonnes):
reponse.matrice[j][i] = self.matrice[i][j]
return reponse
# pour jacobi
def delta(self, other):
try:
if not isinstance(other, Matrice):
raise ValueError('Matrice attendue')
if self.lignes != other.lignes or self.colonnes != other.colonnes:
raise ValueError('Matrices de meme dimensions requises')
if self.colonnes != 1 or other.colonnes != 1:
raise ValueError('Vecteur requis (\'Matrice colonne, xLignes 0Colonne\')')
diff = 0
for i in range(self.lignes):
diff += abs(self.matrice[i][0] - other.matrice[i][0])
return diff / self.lignes
except ValueError as err:
print(err.message)
return None
# strictement dominante diagonalement
def estSDD(self):
for i in range(self.lignes):
a = 0
x = 0
for j in range(self.colonnes):
if i == j:
a = self.matrice[i][j]
else:
x += abs(self.matrice[i][j])
if not a > x:
return False
return True
def makeFloat(self):
for i in range(self.lignes):
for j in range(self.colonnes):
self.matrice[i][j] = float(self.matrice[i][j])
def mean(matrice):
mean_vector = []
for col in range(matrice.colonnes):
total = 0
for ligne in range(matrice.lignes):
total += matrice.matrice[ligne][col]
mean_vector.append(total / matrice.lignes)
return mean_vector
def mean_matrice(matrice):
result = Matrice(matrice.lignes, matrice.colonnes)
mean_vector = mean(matrice)
for ligne in range(len(matrice.matrice)):
result.matrice[ligne] = mean_vector
return result
def substract_mean(matrice):
return matrice - mean_matrice(matrice)
def Identite(dimension):
reponse = Matrice(dimension)
for i in range(dimension):
reponse.matrice[i][i] = 1
return reponse
def MultiplieXMatrices(matrices):
try:
# matrices doit etre un dictionnaire..
if not isinstance(matrices, dict):
raise TypeError('Les matrices doivent etre stockees dans un dictionnaire..')
# au moins 2 matrices
if len(matrices) < 2:
raise ValueError('Il faut au moins 2 matrices..')
# toutes sont Alxp * Bpxc
for i in range(len(matrices) - 1):
if not isinstance(matrices[i], Matrice) or not isinstance(matrices[i + 1], Matrice):
raise TypeError('Tous les elements doivent etre de type Matrice..')
if matrices[i].colonnes != matrices[i + 1].lignes:
raise ValueError('Toutes les multiplications doivent respecter la contrainte: Alxp * Bpxc..')
# peupler d
d = [0] * (len(matrices) + 1)
d[0] = matrices.get(0).lignes
d[1] = matrices.get(0).colonnes
for i in range(1, len(matrices)):
d[i + 1] = matrices.get(i).colonnes
# avant parantheses
csp = 0
for i in range(len(d) - 2):
csp += d[i] * d[i + 1] * d[i + 2]
print('Cout sans parantheses: ' + str(csp))
# recoit et evalue une string de format ((A*B)*(C*D))*(E*F)...
return eval(CalculeMeilleurOrdreParantheses(d))
except (ValueError, TypeError) as err:
print(err.message)
return None
def CalculeMeilleurOrdreParantheses(d):
size = len(d) - 1
# tableau des couts
couts = [[None] * size for i in range(size)]
separation = [[None] * size for i in range(size)]
# etapes
for etape in range(size):
for i in range(size - etape):
if etape == 0:
couts[i][i] = 0
elif etape == 1:
couts[i][i + 1] = d[i] * d[i + 1] * d[i + 2]
separation[i][i + 1] = i + 1
else:
minimum = -1
# les cas possibles: (M11+M24+d0d1d4 / M12+M34+d0d2d4 / ...
for k in range(i, i + etape):
least = couts[i][k] + couts[k + 1][i + etape] + d[i] * d[k + 1] * d[i + etape + 1]
if minimum == -1:
minimum = least
separation[i][i + etape] = k + 1
if least < minimum:
minimum = least
separation[i][i + etape] = k + 1
couts[i][i + etape] = minimum
# on formatte la string pour permettre l'evaluation..
parenthesis_order = StringFormatParenthesageMinimal(separation, 0, size - 1)
parenthesis_order = parenthesis_order.replace(' m', '*m')
parenthesis_order = parenthesis_order.replace(' ', '')
parenthesis_order = parenthesis_order.replace(')m', ')*m')
parenthesis_order = parenthesis_order.replace(')(', ')*(')
print('Meilleur ordre:')
print(parenthesis_order)
print('Cout: ' + str(couts[0][size - 1]))
return parenthesis_order
def StringFormatParenthesageMinimal(l, i, j):
if i == j:
return "matrices.get(" + str(i) + ") "
else:
reponse = "("
reponse += StringFormatParenthesageMinimal(l, i, l[i][j] - 1)
reponse += StringFormatParenthesageMinimal(l, l[i][j], j)
reponse += ")"
return reponse
def Jacobi(A):
if not A.estCarree():
raise ValueError('La matrice doit etre carree..')
n = A.colonnes # matrice carree
maxit = 100 # nombre d'iterations maximal
eps = 1.0e-15 # niveau d'acuitee
pi = np.pi
ev = Matrice(1, n) # initialisation des eigenvalues
U = Matrice(n) # initialisation des eigenvector
for i in range(0, n):
U.matrice[i][i] = 1.0
for t in range(0, maxit):
s = 0 # compute sum of off-diagonal elements in A(i,j)
for i in range(0, n):
s = s + np.sum(np.abs(A.matrice[i][(i + 1):n]))
if s < eps: # diagonal form reached
for i in range(0, n):
ev.matrice[0][i] = A.matrice[i][i]
break
else:
limit = s / (n * (n - 1) / 2.0) # average value of off-diagonal elements
for i in range(0, n - 1): # loop over lines of matrix
for j in range(i + 1, n): # loop over columns of matrix
if np.abs(A.matrice[i][j]) > limit: # determine (ij) such that |A(i,j)| larger than average
# value of off-diagonal elements
denom = A.matrice[i][i] - A.matrice[j][j] # denominator of Eq. (3.61)
if np.abs(denom) < eps:
phi = pi / 4 # Eq. (3.62)
else:
phi = 0.5 * np.arctan(2.0 * A.matrice[i][j] / denom) # Eq. (3.61)
si = np.sin(phi)
co = np.cos(phi)
for k in range(i + 1, j):
store = A.matrice[i][k]
A.matrice[i][k] = A.matrice[i][k] * co + A.matrice[k][j] * si # Eq. (3.56)
A.matrice[k][j] = A.matrice[k][j] * co - store * si # Eq. (3.57)
for k in range(j + 1, n):
store = A.matrice[i][k]
A.matrice[i][k] = A.matrice[i][k] * co + A.matrice[j][k] * si # Eq. (3.56)
A.matrice[j][k] = A.matrice[j][k] * co - store * si # Eq. (3.57)
for k in range(0, i):
store = A.matrice[k][i]
A.matrice[k][i] = A.matrice[k][i] * co + A.matrice[k][j] * si
A.matrice[k][j] = A.matrice[k][j] * co - store * si
store = A.matrice[i][i]
A.matrice[i][i] = A.matrice[i][i] * co * co + 2.0 * A.matrice[i][j] * co * si + A.matrice[j][j] * si * si # Eq. (3.58)
A.matrice[j][j] = A.matrice[j][j] * co * co - 2.0 * A.matrice[i][j] * co * si + store * si * si # Eq. (3.59)
A.matrice[i][j] = 0.0 # Eq. (3.60)
for k in range(0, n):
store = U.matrice[k][j]
U.matrice[k][j] = U.matrice[k][j] * co - U.matrice[k][i] * si # Eq. (3.66)
U.matrice[k][i] = U.matrice[k][i] * co + store * si # Eq. (3.67)
return ev, U
|
#!/usr/bin/env python
"""
This example calculates the Ricci tensor from the metric and does this
on the example of Schwarzschild solution.
If you want to derive this by hand, follow the wiki page here:
https://en.wikipedia.org/wiki/Deriving_the_Schwarzschild_solution
Also read the above wiki and follow the references from there if
something is not clear, like what the Ricci tensor is, etc.
"""
from sympy import exp, Symbol, sin, dsolve, Function, Matrix, Eq, pprint, solve
def grad(f, X):
a = []
for x in X:
a.append(f.diff(x))
return a
def d(m, x):
return grad(m[0, 0], x)
class MT(object):
def __init__(self, m):
self.gdd = m
self.guu = m.inv()
def __str__(self):
return "g_dd =\n" + str(self.gdd)
def dd(self, i, j):
return self.gdd[i, j]
def uu(self, i, j):
return self.guu[i, j]
class G(object):
def __init__(self, g, x):
self.g = g
self.x = x
def udd(self, i, k, l):
g = self.g
x = self.x
r = 0
for m in [0, 1, 2, 3]:
r += (
g.uu(i, m)
/ 2
* (
g.dd(m, k).diff(x[l])
+ g.dd(m, l).diff(x[k])
- g.dd(k, l).diff(x[m])
)
)
return r
class Riemann(object):
def __init__(self, G, x):
self.G = G
self.x = x
def uddd(self, rho, sigma, mu, nu):
G = self.G
x = self.x
r = G.udd(rho, nu, sigma).diff(x[mu]) - G.udd(rho, mu, sigma).diff(x[nu])
for lam in [0, 1, 2, 3]:
r += G.udd(rho, mu, lam) * G.udd(lam, nu, sigma) - G.udd(
rho, nu, lam
) * G.udd(lam, mu, sigma)
return r
class Ricci(object):
def __init__(self, R, x):
self.R = R
self.x = x
self.g = R.G.g
def dd(self, mu, nu):
R = self.R
x = self.x
r = 0
for lam in [0, 1, 2, 3]:
r += R.uddd(lam, mu, lam, nu)
return r
def ud(self, mu, nu):
r = 0
for lam in [0, 1, 2, 3]:
r += self.g.uu(mu, lam) * self.dd(lam, nu)
return r.expand()
def curvature(Rmn):
return Rmn.ud(0, 0) + Rmn.ud(1, 1) + Rmn.ud(2, 2) + Rmn.ud(3, 3)
nu = Function("nu")
lam = Function("lambda")
t = Symbol("t")
r = Symbol("r")
theta = Symbol(r"theta")
phi = Symbol(r"phi")
# general, spherically symmetric metric
gdd = Matrix(
(
(-exp(nu(r)), 0, 0, 0),
(0, exp(lam(r)), 0, 0),
(0, 0, r ** 2, 0),
(0, 0, 0, r ** 2 * sin(theta) ** 2),
)
)
g = MT(gdd)
X = (t, r, theta, phi)
Gamma = G(g, X)
Rmn = Ricci(Riemann(Gamma, X), X)
def pprint_Gamma_udd(i, k, l):
pprint(Eq(Symbol("Gamma^%i_%i%i" % (i, k, l)), Gamma.udd(i, k, l)))
def pprint_Rmn_dd(i, j):
pprint(Eq(Symbol("R_%i%i" % (i, j)), Rmn.dd(i, j)))
# from Differential Equations example
def eq1():
r = Symbol("r")
e = Rmn.dd(0, 0)
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
def eq2():
r = Symbol("r")
e = Rmn.dd(1, 1)
C = Symbol("CC")
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
def eq3():
r = Symbol("r")
e = Rmn.dd(2, 2)
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
def eq4():
r = Symbol("r")
e = Rmn.dd(3, 3)
e = e.subs(nu(r), -lam(r))
pprint(dsolve(e, lam(r)))
pprint(dsolve(e, lam(r), "best"))
def main():
print("Initial metric:")
pprint(gdd)
print("-" * 40)
print("Christoffel symbols:")
pprint_Gamma_udd(0, 1, 0)
pprint_Gamma_udd(0, 0, 1)
print()
pprint_Gamma_udd(1, 0, 0)
pprint_Gamma_udd(1, 1, 1)
pprint_Gamma_udd(1, 2, 2)
pprint_Gamma_udd(1, 3, 3)
print()
pprint_Gamma_udd(2, 2, 1)
pprint_Gamma_udd(2, 1, 2)
pprint_Gamma_udd(2, 3, 3)
print()
pprint_Gamma_udd(3, 2, 3)
pprint_Gamma_udd(3, 3, 2)
pprint_Gamma_udd(3, 1, 3)
pprint_Gamma_udd(3, 3, 1)
print("-" * 40)
print("Ricci tensor:")
pprint_Rmn_dd(0, 0)
e = Rmn.dd(1, 1)
pprint_Rmn_dd(1, 1)
pprint_Rmn_dd(2, 2)
pprint_Rmn_dd(3, 3)
print("-" * 40)
print("Solve Einstein's equations:")
e = e.subs(nu(r), -lam(r)).doit()
l = dsolve(e, lam(r))
pprint(l)
lamsol = solve(l, lam(r))[0]
metric = gdd.subs(lam(r), lamsol).subs(nu(r), -lamsol) # .combine()
print("metric:")
pprint(metric)
if __name__ == "__main__":
main()
|
<reponame>ArnaudGallardo/boss<gh_stars>10-100
# Copyright 2016 The Johns Hopkins University Applied Physics Laboratory
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rest_framework.parsers import BaseParser
from django.conf import settings
import blosc
import numpy as np
import zlib
import io
from bosscore.request import BossRequest
from bosscore.error import BossParserError, BossError, ErrorCodes
import spdb
def is_too_large(request_obj, bit_depth):
"""Method to check if a request is too large to handle
Args:
request_obj:
Returns:
bool
"""
t_span = request_obj.get_time().stop - request_obj.get_time().start
total_bytes = request_obj.get_x_span() * request_obj.get_y_span() * request_obj.get_z_span() * t_span * bit_depth/8
if bit_depth == 64:
# Allow larger annotation posts since things compress so well
total_bytes /= 4
if total_bytes > settings.CUTOUT_MAX_SIZE:
return True
else:
return False
class ConsumeReqMixin:
"""
Provides a method to ensure a request is entirely consumed by a parser
when exiting .parse() early due to an error. Failure to completely
consume the request causes Nginx to lose its connection with uwsgi.
"""
def consume_request(self, stream):
"""
Consume the request and do not allow exceptions.
Args:
stream (stream-like object): The stream to consume.
"""
try:
stream.read()
except:
pass
class BloscParser(BaseParser, ConsumeReqMixin):
"""
Parser that handles blosc compressed binary data
"""
media_type = 'application/blosc'
def parse(self, stream, media_type=None, parser_context=None):
"""Method to decompress bytes from a POST that contains blosc compressed matrix data
**Bytes object decompressed should be C-ordered**
:param stream: Request stream
stream type: django.core.handlers.wsgi.WSGIRequest
:param media_type:
:param parser_context:
:return:
"""
# Process request and validate
try:
request_args = {
"service": "cutout",
"collection_name": parser_context['kwargs']['collection'],
"experiment_name": parser_context['kwargs']['experiment'],
"channel_name": parser_context['kwargs']['channel'],
"resolution": parser_context['kwargs']['resolution'],
"x_args": parser_context['kwargs']['x_range'],
"y_args": parser_context['kwargs']['y_range'],
"z_args": parser_context['kwargs']['z_range'],
}
if 't_range' in parser_context['kwargs']:
request_args["time_args"] = parser_context['kwargs']['t_range']
else:
request_args["time_args"] = None
req = BossRequest(parser_context['request'], request_args)
except BossError as err:
self.consume_request(stream)
return BossParserError(err.message, err.error_code)
except Exception as err:
self.consume_request(stream)
return BossParserError(str(err), ErrorCodes.UNHANDLED_EXCEPTION)
# Convert to Resource
resource = spdb.project.BossResourceDjango(req)
# Get bit depth
try:
bit_depth = resource.get_bit_depth()
except ValueError:
return BossParserError("Unsupported data type provided to parser: {}".format(resource.get_data_type()),
ErrorCodes.TYPE_ERROR)
# Make sure cutout request is under 500MB UNCOMPRESSED
if is_too_large(req, bit_depth):
return BossParserError("Cutout request is over 500MB when uncompressed. Reduce cutout dimensions.",
ErrorCodes.REQUEST_TOO_LARGE)
try:
# Decompress
raw_data = blosc.decompress(stream.read())
data_mat = np.fromstring(raw_data, dtype=resource.get_numpy_data_type())
except MemoryError:
return BossParserError("Ran out of memory decompressing data.",
ErrorCodes.BOSS_SYSTEM_ERROR)
except:
return BossParserError("Failed to decompress data. Verify the datatype/bitdepth of your data "
"matches the channel.", ErrorCodes.DATATYPE_DOES_NOT_MATCH)
# Reshape and return
try:
if req.time_request:
# Time series request (even if single time point) - Get 4D matrix
parsed_data = np.reshape(data_mat,
(len(req.get_time()),
req.get_z_span(),
req.get_y_span(),
req.get_x_span()),
order='C')
else:
# Not a time series request (time range [0,1] auto-populated) - Get 3D matrix
parsed_data = np.reshape(data_mat, (req.get_z_span(), req.get_y_span(), req.get_x_span()), order='C')
except ValueError:
return BossParserError("Failed to unpack data. Verify the datatype of your POSTed data and "
"xyz dimensions used in the POST URL.", ErrorCodes.DATA_DIMENSION_MISMATCH)
return req, resource, parsed_data
class BloscPythonParser(BaseParser, ConsumeReqMixin):
"""
Parser that handles blosc compressed binary data in python numpy format
"""
media_type = 'application/blosc-python'
def parse(self, stream, media_type=None, parser_context=None):
"""Method to decompress bytes from a POST that contains blosc compressed numpy ndarray
Only should be used if data sent was compressed using blosc.pack_array()
:param stream: Request stream
stream type: django.core.handlers.wsgi.WSGIRequest
:param media_type:
:param parser_context:
:return:
"""
try:
request_args = {
"service": "cutout",
"collection_name": parser_context['kwargs']['collection'],
"experiment_name": parser_context['kwargs']['experiment'],
"channel_name": parser_context['kwargs']['channel'],
"resolution": parser_context['kwargs']['resolution'],
"x_args": parser_context['kwargs']['x_range'],
"y_args": parser_context['kwargs']['y_range'],
"z_args": parser_context['kwargs']['z_range'],
}
if 't_range' in parser_context['kwargs']:
request_args["time_args"] = parser_context['kwargs']['t_range']
else:
request_args["time_args"] = None
req = BossRequest(parser_context['request'], request_args)
except BossError as err:
self.consume_request(stream)
return BossParserError(err.message, err.error_code)
except Exception as err:
self.consume_request(stream)
return BossParserError(str(err), ErrorCodes.UNHANDLED_EXCEPTION)
# Convert to Resource
resource = spdb.project.BossResourceDjango(req)
# Get bit depth
try:
bit_depth = resource.get_bit_depth()
except ValueError:
self.consume_request(stream)
return BossParserError("Unsupported data type provided to parser: {}".format(resource.get_data_type()),
ErrorCodes.TYPE_ERROR)
# Make sure cutout request is under 500MB UNCOMPRESSED
if is_too_large(req, bit_depth):
self.consume_request(stream)
return BossParserError("Cutout request is over 500MB when uncompressed. Reduce cutout dimensions.",
ErrorCodes.REQUEST_TOO_LARGE)
# Decompress and return
try:
parsed_data = blosc.unpack_array(stream.read())
except MemoryError:
return BossParserError("Ran out of memory decompressing data.",
ErrorCodes.BOSS_SYSTEM_ERROR)
except EOFError:
return BossParserError("Failed to unpack data. Verify the datatype of your POSTed data and "
"xyz dimensions used in the POST URL.", ErrorCodes.DATA_DIMENSION_MISMATCH)
return req, resource, parsed_data
class NpygzParser(BaseParser, ConsumeReqMixin):
"""
Parser that handles npygz compressed binary data
"""
media_type = 'application/npygz'
def parse(self, stream, media_type=None, parser_context=None):
"""Method to decompress bytes from a POST that contains a gzipped npy saved numpy ndarray
:param stream: Request stream
stream type: django.core.handlers.wsgi.WSGIRequest
:param media_type:
:param parser_context:
:return:
"""
try:
request_args = {
"service": "cutout",
"collection_name": parser_context['kwargs']['collection'],
"experiment_name": parser_context['kwargs']['experiment'],
"channel_name": parser_context['kwargs']['channel'],
"resolution": parser_context['kwargs']['resolution'],
"x_args": parser_context['kwargs']['x_range'],
"y_args": parser_context['kwargs']['y_range'],
"z_args": parser_context['kwargs']['z_range'],
}
if 't_range' in parser_context['kwargs']:
request_args["time_args"] = parser_context['kwargs']['t_range']
else:
request_args["time_args"] = None
req = BossRequest(parser_context['request'], request_args)
except BossError as err:
self.consume_request(stream)
return BossParserError(err.message, err.error_code)
except Exception as err:
self.consume_request(stream)
return BossParserError(str(err), ErrorCodes.UNHANDLED_EXCEPTION)
# Convert to Resource
resource = spdb.project.BossResourceDjango(req)
# Get bit depth
try:
bit_depth = resource.get_bit_depth()
except ValueError:
self.consume_request(stream)
return BossParserError("Unsupported data type provided to parser: {}".format(resource.get_data_type()),
ErrorCodes.TYPE_ERROR)
# Make sure cutout request is under 500MB UNCOMPRESSED
if is_too_large(req, bit_depth):
self.consume_request(stream)
return BossParserError("Cutout request is over 500MB when uncompressed. Reduce cutout dimensions.",
ErrorCodes.REQUEST_TOO_LARGE)
# Decompress and return
try:
data_bytes = zlib.decompress(stream.read())
# Open
data_obj = io.BytesIO(data_bytes)
parsed_data = np.load(data_obj)
except MemoryError:
return BossParserError("Ran out of memory decompressing data.",
ErrorCodes.BOSS_SYSTEM_ERROR)
except EOFError:
return BossParserError("Failed to unpack data. Verify the datatype of your POSTed data and "
"xyz dimensions used in the POST URL.", ErrorCodes.DATA_DIMENSION_MISMATCH)
return req, resource, parsed_data
|
from __future__ import print_function, division, absolute_import
'''
Modified by <NAME>:
2012-11-30: The timestamping has been converted to more precise, floating-point
representation, instead of integer representation.
(See Lines 600-601 and 630-633 for modification.)
'''
# y_serial Python module Version 0.60 Date : 2010-08-20
# -*- coding: iso-8859-1 -*-
# http://yserial.sourceforge.net
'''
_______________ y_serial :: warehouse compressed Python objects with SQLite
Dependencies: at least Python v2.5 because it includes the sqlite3 module.
Database itself and all other modules used are standard issue.
_____ PREFACE (in reStructured Text format for our site)
Intro and quick example
-----------------------
*In about ten minutes, you should be able to simply give some
labels to any Python object and save it to a database file; then
get back a set of objects by specifying portion of their labels.*
Here's a quick EXAMPLE for a typical situation. After downloading
the y_serial module, create an instance which is associated with a
regular file::
import y_serial_v060 as y_serial
demo = y_serial.Main( '/tmp/agency.sqlite' )
# ^instance ^include suitable path for database file
# ... now we do some work producing an object obj, e.g.
obj = 911
That object could have been a dictionary with a complex structure,
but let's continue on for the insert::
demo.insert( obj, "#plan agent007 #london", 'goldfinger' )
# ^notes ^table
We label each object with "notes" which can be arbitrarily long
text (or UTF-8), containing keywords or tags, but excluding commas.
Within that file we specify a "table" (merely an organizational
subsector). Some time later, perhaps in another script, we will
want to select some object::
eg1 = demo.select( "agent00[1-7],#plan", 'goldfinger' )
# ^search values are space-sensitive
# and comma separated;
# arbitrarily many permitted in string.
print("Example 1: ", eg1)
# That reveals the _latest_ goldfinger plan
# which involves any one of the top seven agents
# anywhere in the world including London.
That's it... **only a few lines of Python code to store compressed
serialized objects in a database, and to selectively retrieve
them** (with optional regular expression, and remarkably, *without
writing any SQL commands*). DEAD SIMPLE -- *with only one module
imported*. Hopefully you see how widely this is applicable...
Installation and license
------------------------
Sole requirement: Python version 2.x where x is 5 or greater.
Download the latest version of the module at
`http://sourceforge.net/projects/yserial
<http://sourceforge.net/projects/yserial/>`_ and put it where your
Python can find it. No tar.gz or eggs here ;-) The module includes
the tutorial documentation within itself. You are free to use
*y_serial* under the BSD license. No monetary charge at all;
however, if you are a developer, please critically review the code.
More eyeballs lead to increased scrutiny, and thus greater
reliability.
Overview
--------
The purpose of y_serial is to keep data persistent. It is based on
key/value where the conceptual key is
- filename + table_name + primary_key + timestamp + notes
and the value is some object. (Ironically this is implemented using
a SQL database. ;-) **Our goal is to have the simplest possible
user interface in the foreground, yet have near optimal computing
in the background.**
By "objects" we generally mean Python objects, but we include
support for files (binary, image, etc.) and URL content (e.g.
webpages). Python objects are strings, dictionaries, lists,
tuples, classes, and instances. Objects are inserted into a
hierarchy: database file, table within that database, row within
table. Moreover, each object is annotated for reference by "notes"
and automatically timestamped.
*So what is happening in the background?* To minimize storage size,
each object is compressed. But before compression, we serialize the
object. The processing between y_serial and the database is handled
by the sqlite3 module **(all dependencies are standard issue
modules)**. Your program's interaction with y_serial normally will
not require writing SQL, although subqueries can be customized.
y_serial is written as a single Python module which reads like a
working tutorial and includes many tips and references. It's
instructive in the way it unifies the standard batteries:
- sqlite3 (as of Python v2.5)
- zlib (for compression)
- cPickle (for serializing objects)
- ["re" module is not used, instead we access much faster
SQLite functions for regular expressions]
Technicalities aside, you are spared from explicitly spelling out
many of the difficult protocol details: cursor/connection,
SQL/DB-API implementation, serialization, compression, search
algorithm, etc. -- for these are optimized to interact with speed,
security, and concurrency -- yet handled transparently.
Our module is faster than comparable approaches under PostgreSQL.
Among serialization methods, we found cPickle to be one of the
fastest, and so we have used it in a secure manner. Try y_serial
with a few million objects.
We recommend SQLite because it requires neither separate
installation nor a server process; also, it uses single normal
files (easy to backup or send), not an elaborate filesystem.
Moreover, in comparison to similar applications with MySQL or
PostgreSQL, SQLite is extremely fast and suits most purposes
wonderfully. Should you later decide to migrate out of SQLite,
y_serial can help port your objects elsewhere including other NoSQL
implementations.
*The means for insertion, organization by annotation, and finally
retrieval are designed to be* **simple to use**. The notes
effectively label the objects placed into the database. We can
then later query the database, for example, by *regex* (regular
expression) searching on notes, and placing the qualified objects
in a dictionary. The keys of this dictionary correspond to the
unique primary keys used in the database. If necessary we can
access the timestamp for each object. We can thus use Python code
to process the contents of the qualified dictionary, in effect, a
data subset. If the objects in that dictionary are themselves
dictionaries we are essentially dealing with *schema-less data*
(see the compelling Friendfeed case study in the module's
Endnotes).
To illustrate, let's continue our example by adding an object for
agent006::
obj = 411
demo.insert( obj, "agent006 #paris #plan", 'goldfinger' )
# We now can get a dictionary of objects
# which matches our search values:
#
eg2 = demo.selectdic( "agent00[1-7],#plan", 'goldfinger' )
print("Example 2: ", eg2)
#
# which should look like:
# {1: [1257874696, u'#plan agent007 #london', 411],
# 2: [1257874696, u'agent006 #paris #plan', 911] }
Notice that we used a different method called *selectdic* which
produces a dictionary whose keys are the unique primary keys
automatically assigned in the database. Inside the list are the
(unix) epoch timestamp, followed by (unicode) notes, then object.
This means that we can *work with flexible data subsets using
Python code rather than cumbersome SQL.*
Other features
--------------
Instead of using comma-separated values, as in our example so far,
we could have crafted a custom subquery and used a method called
*dicsub*.
Or we could just skip any subquery altogether. Here we pick out the
most recent n-th entry::
eg3 = demo.select( 0, 'goldfinger' )
print("Example 3: ", eg3)
The method called "*view*" will verbosely pretty-print deeply
nested structures::
demo.view( 5, 'goldfinger' )
# ^last m inserts (or use search string argument).
y_serial can also act like a persistent *QUEUE*. Whatever that is
retrieved can be deleted thereafter by appending "POP=True" at the
end of any applicable method::
eg4 = demo.select( 0, 'goldfinger', POP=True )
Object(s) can of course be deleted directly::
demo.delete( "agent00?", 'goldfinger' )
# ^where notes mention any single digit agent.
To get rid of stale data we could freshen a table and vacuum the
entire database via *clean*::
demo.clean( 365.5 , 'goldfinger' )
# ^retain one year-old or less prior to last insert.
To delete the entire table::
demo.droptable( 'goldfinger' )
Other useful methods are available:
- insert any external file (via *infile*). This is handy for
working with thousands of image files.
- insert anything on the web by URL (via *inweb*).
- insert in batches by generator (via *ingenerator*). This can
be used to rapidly store a series of computationally intense
results for quick retrieval at a later time.
For concurrency we can easily code for a farm of databases using
the module's copy functions. In general, your program can have
multiple interacting instances which control distinct database
files.
[*What's in beta?* In heavy concurrent situations (say, hundreds
of near-simultaneous writes per second), SQLite has a problem
because of the way it locks the database. We can alleviate the
problem by diffusing such operations across many databases (called
"barns" via class *Farm*), and then writing back to the target
database as a batch (which is far more efficient than single
writes) over time. The harvest time to reap the batch is
stochastic (see the "plant" method). That introduces some latency
in accessing the newest objects -- the cost for scaling up
concurrency.]
The class *Main* should be stable for all practical purposes.
If you run across any bugs, please kindly report them to the
`Tracker
<https://sourceforge.net/tracker/?group_id=277002&atid=1176411>`_.
For group discussions, check out the SourceForge link in the
left sidebar.
For other specialized features, please RTM "read the module" for
tips on usage.
Summary
-------
**y_serial = serialization + persistance. In a few lines of code,
compress and annotate Python objects into SQLite; then later
retrieve them chronologically by keywords without any SQL. Highly
useful NoSQL "standard" module for a database to store schema-less
data.**
_______________ TABLE OF CONTENTS:
- Preface with quick example
- Usage with SUMMARY of CLASSES, METHODS, and FUNCTIONS:
Base:
_______________ Attributes and methods for database setup.
Set path to database for all instances; db0 is default.
Connection and execution methods.
Insertion( Base ):
_______________ INSERT pz BLOB into DATABASE
inbatch( self, objseq, table=Base.tab0 ):
Pickle and compress sequence of annotated objects; insert.
* ingenerator( self, generate_objnotes, table=Base.tab0 ):
Pickle and compress via generator function, then insert.
** insert( self, obj, notes='#0notes', table=Base.tab0 ):
Pickle and compress single object; insert with annotation.
Annex( Insertion ):
_______________ Add macro-objects (files, URL content) to DATABASE
inweb( self, URL, notes='', table=Base.tab0 ):
Pickle and compress URL content, then insert into table.
* infile( self, filename, notes='', table=Base.tab0 ):
Pickle and compress any file, then insert contents into table.
Answer( Base ):
_______________ Single item answer shouted out.
shout( self, question, table=Base.tab0 ):
Shout a question; get a short answer.
** lastkid( self, table=Base.tab0 ):
Get primary key ID of the last insert.
lastsec( self, table=Base.tab0 ):
Get time in unix seconds of the last insert.
lastdate( self, table=Base.tab0 ):
Get local date/time of the last insert.
Util:
_______________ Utility methods for keys, subquery, comma
comma2list( self, csvstr, wild=True ):
Convert comma separated values to a parameter list.
Deletion( Base, Util ):
_______________ Deletion methods; also used for queue POP
deletesub( self, subquery, parlist=[], table=Base.tab0 ):
Delete row(s) matching the subquery.
deletekid( self, kid, table=Base.tab0 ):
Delete single row with primary key kid.
deletecomma( self, csvstr, table=Base.tab0, wild=True ):
Delete row(s): notes match comma separated values in string.
* delete( self, dual, table=Base.tab0, wild=True ):
Alias "delete": deletekid OR deletecomma.
droptable( self, table=Base.tab0 ):
Delete a table: destroys its structure, indexes, data.
Subquery( Util, Answer, Deletion ):
_______________ SUBQUERY table, get dictionary. POP QUEUE.
dicsub(self, subquery='', parlist=[], table=Base.tab0, POP=False):
Subquery table to get objects into response dictionary.
diclast( self, m=1, table=Base.tab0, POP=False ):
Get dictionary with last m consecutive kids in table.
diccomma( self, csvstr, table=Base.tab0, wild=True, POP=False ):
Get dictionary where notes match comma separated values.
* selectdic( self, dual=1, table=Base.tab0, POP=False ):
Alias "selectdic": diclast OR diccomma.
Display( Subquery ):
_______________ View subquery via pretty print
viewsub(self, subquery='', parlist=[], table=Base.tab0, POP=False):
Subquery, order keys, and print qualified dictionary.
viewlast( self, m=1, table=Base.tab0, POP=False ):
Print last m consecutive kids in table.
viewcomma(self, csvstr='', table=Base.tab0, wild=True, POP=False):
Print dictionary where notes match comma separated values.
* view( self, dual=1, table=Base.tab0, POP=False ):
Alias "view": viewlast OR viewcomma.
Latest( Display ):
_______________ Retrieve the latest qualified object "omax"
omaxsub(self, subquery='', parlist=[], table=Base.tab0, POP=False):
Get the latest object omax which matches subquery.
omaxlast( self, n=0, table=Base.tab0, POP=False ):
Most quickly get the latest n-th object using key index.
omaxcomma( self, csvstr, table=Base.tab0, wild=True, POP=False ):
Get latest object where notes match comma separated values.
** select( self, dual=0, table=Base.tab0, POP=False ):
Alias "select": omaxlast OR omaxcomma.
* getkid( self, kid, table=Base.tab0, POP=False ):
Retrieve a row given primary key kid, POP optional.
Oldest( Latest ):
_______________ Retrieve the oldest qualified object "omin"
ominfirst( self, n=0, table=Base.tab0, POP=False ):
Most quickly get the oldest n-th object using key index.
* fifo( self, table=Base.tab0 ):
FIFO queue: return oldest object, then POP (delete) it.
Care( Answer, Deletion ):
_______________ Maintenance methods
freshen( self, freshdays=None, table=Base.tab0 ):
Delete rows in table over freshdays-old since last insert.
vacuum( self ):
Defrag entire database, i.e. all tables therein.
- why VACUUM?
* clean( self, freshdays=None, table=Base.tab0 ):
Delete stale rows after freshdays; vacuum/defrag database.
Main( Annex, Oldest, Care ):
_______________ Summary for use of a single database.
copysub( subquery, parlist, tablex, tabley, dbx=Base.db0, dby=Base.db0 ):
Subselect from tablex, then copy to tabley (in another database).
copylast( m, tablex, tabley, dbx=Base.db0, dby=Base.db0 ):
Copy last m consecutive kids in tablex over to tabley.
* comma( *string_args ):
Join string-type arguments with comma (cf. csvstr, comma2list).
copycomma( csvstr, tablex, tabley, dbx=Base.db0, dby=Base.db0, wild=True ):
Subselect by comma separated values from tablex, then copy to tabley.
** copy( dual, tablex, tabley, dbx=Base.db0, dby=Base.db0, wild=True ):
Alias "copy": copylast OR copycomma
Farm:
_______________ Start a farm of databases for concurrency and scale.
__init__( self, dir=dir0, maxbarns=barns0 ):
Set directory for the farm of maxbarns database files.
farmin( self, obj, notes='#0notes', table=Base.tab0, n=0 ):
Insert an object with notes into barn(n).
harvest(self, dual, tablex,tabley, n, dby=Base.db0, wild=True, size=10):
After farmin, reap dual under tablex barn(n) by size expectation.
cleanfarm( self, freshdays=None, table=Base.tab0 ):
Delete stale rows after freshdays; vacuum/defrag barns.
* plant( self, obj, notes='#0notes', table=Base.tab0, dby=Base.db0 ):
FARM SUMMARY: farmin insert with generic self-cleaning harvest.
- Change log
- TODO list
- ENDNOTES: operational tips and commentary with references
- pz Functions for FILE.gz
- Database versus FILE.gz
- tester( database=Base.db0 ):
Test class Main for bugs.
- testfarm( dir=Farm.dir0, maxbarns=Farm.barns0 ):
Test class Farm for bugs. Include path for directory.
- Acknowledgements and Revised BSD LICENCE
_______________ CHANGE LOG
2010-08-20 v0.60: Certified to run under Python 2.6 series.
Edited the preface (also used for welcome page).
Added getkid. Cosmetic touch-up for view method.
2010-04-25 v0.53: Be sure to assign default database db0 in class Base.
2009-11-22 v0.52: Added plant (insert) method summarizing class Farm.
2009-11-19 v0.51: Added fifo (queue) method to class Oldest.
Added beta Farm (of databases!) for concurrency.
!! 2009-11-11 v0.50: TOTAL TOP-DOWN GENERALIZED CLASS REWRITE
of the final bottom-up all-function revision 28.
- Python objects now are warehoused among various database files
by design. Each instance of the class Main is associated with
a particular database file.
- Dropped *glob functions in favor of the *comma form (see
comma2list to see how this works exactly).
- Simplified the names of many functions since most of them
are now methods within some class.
- LIFO has been renamed as POP to reflect queue generlization.
2009-10-26 Revision 28: final bottom-up all-function version
which also archives prior change log.
!! = indicates change(s) which broke backward compatibility.
pz = pickled zlib compressed binary format.
_______________ TODO List
The sqlite3 module, maintained by <NAME>, has been updated
from version 2.3.2 in Python 2.5 to version 2.4.1 in Python 2.6.
[ ] - update code for threads when the following becomes public:
Contrary to popular belief, newer versions of sqlite3 do support
access from multiple threads. This can be enabled via optional
keyword argument "check_same_thread":
sqlite.connect(":memory:", check_same_thread = False)
However, their docs omit this option. --Noted 2010-05-24
[ ] - update code for Python version 3.x
When we run y_serial.tester() with the "-3" flag under Python 2.6,
there is only one message:
"DeprecationWarning: buffer() not supported in 3.x"
This actually is about how BLOBs are handled in the sqlite3 module,
and is being resolved by <NAME>,
see http://bugs.python.org/issue7723
Thereafter, we expect an easy transition to Python 3.
'''
# _______________ Variable settings with imports
DEBUG = False
# Here's how to EXECUTE TESTS. First, be sure to change the default
# database file to suit yourself; see assignment db0 in class Base.
# import y_serial_v053 as y_serial
# y_serial.tester( database )
# # ^for the principal class Main
# y_serial.testfarm( directory, maxbarns )
# ^for the beta version, not yet in Main.
import sqlite3 as ysql
# ^ for portability to another SQL database.
import pprint
# pretty print to Display nested arbitrary Python data structures.
# __________ pz FUNCTIONS for pickled compression
from six.moves import cPickle as yPickle
# ^ written in C, compatible with pickle but thousand times faster.
# But some situations may only allow pure-Python pickle module.
# The data stream produced by pickle and cPickle are identical.
# So take your pick for yPickle.
# [Future note: pickle in Python v3 will integrate cPickle.]
pickle_protocol = 2
# 0 = original ASCII protocol and is backwards compatible.
# 1 = old binary format which is also backwards compatible.
# 2 = introduced in Python 2.3, more efficient pickling of new-style classes.
# 3 = for Python 3 with explicit support for bytes objects and byte arrays,
# but it is not backward compatible with protocol 2.
# So use that since Python 3 will still understand what to do.
import zlib
# ^zlib compression for pickled items.
compress_level = 7
# 1 to 9 controlling the level of compression;
# 1 is fastest and produces the least compression,
# 9 is slowest and produces the greatest compression.
def pzdumps( obj ):
'''Pickle object, then compress the pickled.'''
return zlib.compress( yPickle.dumps(obj, pickle_protocol), compress_level)
# as binary string.
def pzloads( pzob ):
'''Inverse of pzdumps: decompress pz object, then unpickle.'''
return yPickle.loads( zlib.decompress( pzob ) )
# ^auto-detects pickle protocol.
class Base:
'''_______________ Essential attributes and methods for database setup.'''
db0 = '/home/yaya/var/db/y_serial.sqlite'
# ========================================================== ***
# ^ be sure to specify an absolute path to the database file.
# This is just your convenient DEFAULT DATABASE FILE.
# Specify other such files explicitly when creating instances.
#
# [ Using an in-memory database ':memory:' will not work here
# because we go in and out of connection as needed. ]
tab0 = 'tmptable'
# ^default SQL table for storing objects temporarily.
TRANSACT = 'IMMEDIATE'
# among: None (autocommit), 'DEFERRED' (default), 'IMMEDIATE', 'EXCLUSIVE'.
#
# We want to support transactions among multiple concurrent sessions, and
# to AVOID DEADLOCK. For our purposes, such sessions should start out by:
# "BEGIN IMMEDIATE TRANSACTION;"
# to guarantee a (reserved) write lock while allowing others to read.
# See _The Definitive Guide to SQLite_, chapters 4 and 5,
# by <NAME> (2006, Apress) for the clearest explanation.
TIMEOUT = 14
# ^ in seconds (default: 5)
# During multiple concurrent sessions, writing creates a certain type
# of lock until a transaction is committed. TIMEOUT specifies how long
# a connection should wait for that lock to be released until raising
# an exception. Increase the wait if a very large amount of objects
# is routinely inserted during a single session.
def __init__( self, db=db0 ):
'''Set path to database for all instances; db0 is default.'''
self.db = db
def proceed( self, sql, parlist=[[]] ):
'''Connect, executemany, commit, then finally close.'''
try:
con = ysql.connect( self.db, timeout = self.TIMEOUT,
isolation_level = self.TRANSACT )
cur = con.cursor()
cur.executemany( sql, parlist )
# for an empty ^parameter list, use [[]].
con.commit()
# ^MUST remember to commit! else the data is rolled back!
except:
a = " !! Base.proceed did not commit. [Check db path.] \n"
b = " Suspect busy after TIMEOUT, \n"
c = " tried this sql and parameter list: \n"
raise IOError("%s%s%s%s\n%s" % ( a, b, c, sql, parlist ))
finally:
cur.close()
con.close()
# ^ very important to release lock for concurrency.
def respond( self, klass, sql, parlist=[] ):
'''Connect, execute select sql, get response dictionary.'''
try:
con = ysql.connect( self.db, timeout = self.TIMEOUT,
isolation_level = self.TRANSACT )
cur = con.cursor()
response = {}
for tupler in cur.execute( sql, parlist ):
self.responder( klass, tupler, response )
# ^ to be defined in a subclass
# (mostly to process output from subqueries).
# con.commit() intentionally omitted.
except:
a = " !! Base.respond choked, probably because \n"
b = " object feels out of context. \n"
c = " Tried this sql and parameter list: \n"
raise IOError("%s%s%s%s\n%s" % (a, b, c, sql, parlist))
finally:
cur.close()
con.close()
return response
def createtable( self, table=tab0 ):
'''Columns created: key ID, unix time, notes, and pzblob.'''
a = 'CREATE TABLE IF NOT EXISTS %s' % table
#b = '(kid INTEGER PRIMARY KEY, tunix INTEGER,' # commented out by <NAME>
b = '(kid INTEGER PRIMARY KEY, tunix REAL,' # added by <NAME>
c = 'notes TEXT, pzblob BLOB)'
sql = ' '.join( [a, b, c] )
try:
self.proceed( sql )
# try construct is useful for portability in standard
# cases where clause "IF NOT EXISTS" is not implemented.
except IOError:
if DEBUG:
print(" :: createtable: table exists.")
# createtable is designed to be harmless if it
# left sitting in your script.
class Insertion( Base ):
'''_______________ INSERT pz BLOB into DATABASE'''
# For inbatch we shall assume that the "objseq" is a sequence
# (tuple or list) of objnotes. An "objnotes" consists of a sequence
# pairing of an object and its annotation. Example:
# objseq = [ (obj1, 'First thing'), (obj2, 'Second thing') ]
# Use an empty string like "" to explicitly blank out annotation.
def inbatch( self, objseq, table=Base.tab0 ):
'''Pickle and compress sequence of annotated objects; insert.'''
self.createtable( table )
# ^ serves also to check table's existence.
s = "INSERT INTO %s " % table
#v = "VALUES (null, strftime('%s','now'), ?, ?)" # commented out by <NAME>
## ^SQLite's function for unix epoch time. # commented out by <NAME>
v = "VALUES (null, ((julianday('now') - 2440587.5)*86400.0), ?, ?)" # added by <NAME>
# This line above timestamps records with float, instead of integer. # added by <NAME>
sql = ' '.join([s, v])
def generate_parlist():
for i in objseq:
obj, notes = i
parlist = [ notes, ysql.Binary(pzdumps(obj)) ]
yield parlist
# ^ using generator for parameter list.
self.proceed( sql, generate_parlist() )
# inserting 100,000 rows takes about 10 seconds.
# objseq can be generated on the fly. Just write a generator function,
# and pass it along to pzgenerator [for illustration, see copy].
def ingenerator( self, generate_objnotes, table=Base.tab0 ):
'''Pickle and compress via generator function, then insert.'''
# generator should yield an objseq element like this: (obj, notes)
self.inbatch( [x for x in generate_objnotes], table )
# TIP: generate computationally intense results, then
# pass them to ingenerator which will warehouse them. Instantly
# access those pre-computed results later by subquery on notes.
def insert( self, obj, notes='#0notes', table=Base.tab0 ):
'''Pickle and compress single object; insert with annotation.'''
self.inbatch( [(obj, notes)], table )
# CAVEAT: if you have *lots* of objects to insert individually
# this repeatedly will be slow because it commits after every
# insert which causes sqlite to sync the inserted data to disk.
# REMEDY: prepare your annotated objects in objseq form,
# then use inbatch or ingenerator instead.
class Annex( Insertion ):
'''_______________ Add macro-objects (files, URL content) to DATABASE'''
def inweb( self, URL, notes='', table=Base.tab0 ):
'''Pickle and compress URL content, then insert into table.'''
# put URL address in quotes including the http portion.
if not notes:
# let notes be an empty string if you want the URL noted,
# else notes will be that supplied by argument.
notes = URL
import urllib2
webpage = urllib2.urlopen( URL )
webobj = webpage.read()
self.insert( webobj, notes, table )
def file2string( self, filename ):
'''Convert any file, text or binary, into a string object.'''
# put filename in quotes including the path.
f = open( filename, 'rb' )
# 'read binary' but also works here for text;
# line-end conversions are suppressed.
strobj = f.read()
f.close()
return strobj
# ^ Note: even if the file originally was iterable.
# that STRING OBJECT will NOT be iterable.
# If the file was text, one could regain iteration by writing
# back to a disk file, or by doing something in-memory, e.g.
# import cStringIO as yString
# # or StringIO
# inmemoryfile = yString.StringIO( strobj )
# for line in inmemoryfile:
# ... process the line ...
# inmemoryfile.close()
# Iteration is not necessary if the string object is
# to be used in its entirety, e.g. some boilerplate text.
# Getting text snippets from a database is generally
# faster than reading equivalent text files from disk.
def infile( self, filename, notes='', table=Base.tab0 ):
'''Pickle and compress any file, then insert contents into table.'''
# put filename in quotes including any needed path.
# file can be binary or text, including image.
if not notes:
# let notes be an empty string if you want filename noted,
# else notes will be that supplied by argument.
notes = filename
self.insert( self.file2string( filename ), notes, table )
class Answer( Base ):
'''_______________ Single item answer shouted out.'''
# see class Selection for reading and retrieval.
def responder( self, klass, tupler, response ):
'''(see def respond in abstract superclass Base.)'''
# different behavior depending on subclass...
if klass == 'Answer':
response[0] = tupler
# ^ we only expect a single answer.
if klass == 'Subquery':
kid, tunix, notes, pzblob = tupler
obj = pzloads( pzblob )
response[kid] = [ tunix, notes, obj ]
# each item in response DICTIONARY has a key kid <=
# (same as in the table), and it is a list consisting of <=
# timestamp, notes, and original object <=
# (decompressed and unpickled). <=
def shout( self, question, table=Base.tab0 ):
'''Shout a question; get a short answer.'''
sql = "SELECT ( %s ) FROM %s" % (question, table)
response = self.respond( 'Answer', sql )
return response[0][0]
# None, if the table is empty.
def lastkid( self, table=Base.tab0 ):
'''Get primary key ID of the last insert.'''
maxkid = self.shout( "MAX( kid )", table )
if maxkid == None:
maxkid = 0
return maxkid
def lastsec( self, table=Base.tab0 ):
'''Get time in unix seconds of the last insert.'''
tmax = self.shout( "MAX( tunix )", table )
if tmax == None:
tmax = 0
return tmax
# e.g. 1256856209
# seconds ^ elapsed since 00:00 UTC, 1 January 1970
# _____ Automatic timestamp ("tunix")
# As each object enters the database we also add an
# epoch timestamp. Rather than import another Python
# module for that purpose, we use SQLite's (faster)
# functions: strftime and datetime.
def lastdate( self, table=Base.tab0 ):
'''Get local date/time of the last insert.'''
tmax = self.lastsec( table )
q = "datetime( %s, 'unixepoch', 'localtime')" % tmax
if not tmax:
return ' :: lastdate not applicable.'
else:
return self.shout( q, table )
# e.g. u'2009-10-29 15:43:29'
class Util:
'''_______________ Utility methods for keys, subquery, comma'''
def reverse_dickeys( self, dictionary, recentfirst=True ):
'''List dictionary keys: sorted reverse (chronological) order.'''
dickeys = dictionary.keys()
dickeys.sort()
if recentfirst:
dickeys.reverse()
return dickeys
# _____ SUBQUERY regex style for LIKE
#
# Here we want the dictionary to consist of items which have
# notes containing " gold " (using LIKE):
#
# dic = I.dicsub("WHERE notes LIKE '% gold %'")
#
# Percent % in LIKE is the regex equivalent of star "*"
# ^wildcard for 0 or more characters
# Underscore _ in LIKE is the regex equivalent of period "."
# ^single character
# Escape ! in LIKE is the regex equivalent of backslash "\"
# (or one can specify an escape character
# by appending, for example, "ESCAPE '\'"
# at the end of the subquery.)
# [a-c] same in LIKE for character ranges, exclude by "^"
# _____ SUBQUERY regex style for GLOB
#
# For SQLite: the GLOB operator is similar to LIKE but uses the
# Unix file globbing [dissimilar to grep] syntax for its wildcards.
# ? in GLOB is the regex equivalent of period "."
#
# GLOB is case sensitive, unlike LIKE. <=
# But LIKE is case sensitive for unicode characters beyond ASCII.
# Both GLOB and LIKE may be preceded by the NOT keyword
# to invert the sense of the test.
def notesglob( self, parlist ):
'''Create a CONJUNCTIVE subquery using GLOB with placeholder.'''
# ^i.e. each term in parlist is an "AND" search term.
s = ""
for i in parlist:
s += "AND notes GLOB ? "
# use placeholder ^ rather than i for security!
return s.replace('AND', 'WHERE', 1)
# replace only the first occurrence
# Everyone is going to have a unique style of writing out their notes
# so that it can be efficiently searched. Tags are helpful, but they
# are optional within notes. Tags are useful for creating indexes.
# Define a "TAG" to be text in notes prefixed by "#"
def comma2list( self, csvstr, wild=True ):
'''Convert comma separated values within string to a parameter list
>>> # !! white spaces within string are significant !!
>>> print comma2list('#paris, agent007 ,#scheme')
['*#paris*', '* agent007 *', '*#scheme*']
Empty or single entry (without comma) csvstr is acceptable.
Empty string '' will select everything if wild is True.
Unlike official csv, internal quotes should not be used;
for simplicity, comma itself is not meant to be escaped.
'''
# wild will conveniently include stars on both ends...
if wild:
parlist = [ '*%s*' % i for i in csvstr.split(',') ]
else:
parlist = [ '%s' % i for i in csvstr.split(',') ]
# manually add wildcards as needed to csvstr for faster regex.
return parlist
#
# TIP: for faster execution, list most discriminating values first;
# use wild=False for exact search wherever possible.
#
# To form csvstr out of string variables a, b, c:
# csvstr = ','.join( [a, b, c] )
# See function comma after Main class.
class Deletion( Base, Util ):
'''_______________ Deletion methods; also used for queue POP'''
def deletesub( self, subquery, parlist=[], table=Base.tab0 ):
'''Delete row(s) matching the subquery.'''
sql = 'DELETE FROM %s %s' % ( table, subquery )
# use ? placeholder(s) for security^
self.proceed( sql, [ parlist ] )
def deletekid( self, kid, table=Base.tab0 ):
'''Delete single row with primary key kid.'''
subquery = 'WHERE kid = ?'
self.deletesub( subquery, [ kid ], table )
def deletecomma( self, csvstr, table=Base.tab0, wild=True ):
'''Delete row(s): notes match comma separated values in string.'''
parlist = self.comma2list( csvstr, wild )
self.deletesub( self.notesglob(parlist), parlist, table )
def delete( self, dual, table=Base.tab0, wild=True ):
'''Alias "delete": deletekid OR deletecomma.'''
# assuming dual is either an ^integer OR ^csvstr string...
if isinstance( dual, int ):
self.deletekid( dual, table )
else:
self.deletecomma( dual, table, wild )
def droptable( self, table=Base.tab0 ):
'''Delete a table: destroys its structure, indexes, data.'''
sql = 'DROP TABLE %s' % table
try:
self.proceed( sql )
except:
if DEBUG:
print(" ?? droptable: no table to delete.")
return " :: droptable: done."
# Delete a SQLite database file like a normal file at OS level.
class Subquery( Util, Answer, Deletion ):
'''_______________ SUBQUERY table, get dictionary. POP QUEUE.'''
# "kid" serves as key for both retrieved dictionaries and database.
#
# _____ SUBQUERY : SECURITY NOTICE.
# Per the DB-API recommendations,
# subquery should use ? as parameter placeholder
# to prevent SQL injection attacks; see Endnotes.
# Such a placeholder does not take a clause, simply values.
# Obscured fact: table names cannot be parametized.
# parlist shall be the parameter list which sequentially
# corresponds to the placeholder(s).
# parlist should be empty [] if no placeholders are used.
def dicsub(self, subquery='', parlist=[], table=Base.tab0, POP=False):
'''Subquery table to get objects into response dictionary.'''
a = 'SELECT kid, tunix, notes, pzblob FROM %s %s'
sql = a % ( table, subquery )
response = self.respond( 'Subquery', sql, parlist )
if POP:
self.deletesub( subquery, parlist, table )
return response
# __________ Using POP for QUEUE purposes ___ATTN___
#
# After y_serial retrieves entities that match a subquery pattern,
# it can optionally delete them.
#
# POP = True, herein means "Treat as queue" <=!!!
# whereby objects matching a subquery are
# retrieved and then DELETED.
#
# POP = False, means "retrieve but DO NOT delete." <=!
def diclast( self, m=1, table=Base.tab0, POP=False ):
'''Get dictionary with last m consecutive kids in table.'''
kid = self.lastkid( table ) - m
return self.dicsub('WHERE kid > ?', [kid], table, POP )
def diccomma( self, csvstr, table=Base.tab0, wild=True, POP=False ):
'''Get dictionary where notes match comma separated values.'''
parlist = self.comma2list( csvstr, wild )
subquery = self.notesglob( parlist )
return self.dicsub( subquery, parlist, table, POP )
def selectdic( self, dual=1, table=Base.tab0, POP=False ):
'''Alias "selectdic": diclast OR diccomma.'''
# assuming dual is either an ^integer OR ^csvstr string...
if isinstance( dual, int ):
return self.diclast( dual, table, POP )
else:
wild = True
# ^constrained for dual usage
return self.diccomma( dual, table, wild, POP )
class Display( Subquery ):
'''_______________ View subquery via pretty print'''
def viewsub(self, subquery='', parlist=[], table=Base.tab0, POP=False):
'''Subquery, order keys, and print qualified dictionary.'''
dic = self.dicsub( subquery, parlist, table, POP )
dickeylist = self.reverse_dickeys( dic )
diclen = len( dickeylist )
print("\n :: View in reverse chronological order :: ")
print( " :: ----------------------------------------")
# ^^^^ "grep -v '^ :: '" to get rid of labels.
if diclen:
for kid in dickeylist:
[ tunix, notes, obj ] = dic[ kid ]
print(" :: kid: %s (%s secs)" % (kid, tunix))
print((" :: notes: ", notes))
if isinstance( obj, str ):
if len( obj ) > 1000:
end = '[... Display limited ...]'
obj = ' '.join( [obj[0:1000], end] )
print(obj)
else:
try:
pprint.pprint( obj )
except:
print(" !! Display: object not printable.")
print(" :: ----------------------------------------")
print(" :: Display: MATCHED %s objects." % diclen)
if POP:
print(" and POP deleted them.")
else:
print(" !! Display: NOTHING matched subquery !!")
def viewlast( self, m=1, table=Base.tab0, POP=False ):
'''Print last m consecutive kids in table.'''
kid = self.lastkid( table ) - m
self.viewsub('WHERE kid > ?', [kid], table, POP )
def viewcomma(self, csvstr='', table=Base.tab0, wild=True, POP=False):
'''Print dictionary where notes match comma separated values.'''
parlist = self.comma2list( csvstr, wild )
subquery = self.notesglob( parlist )
self.viewsub( subquery, parlist, table, POP )
def view( self, dual=1, table=Base.tab0, POP=False ):
'''Alias "view": viewlast OR viewcomma.'''
# assuming dual is either an ^integer OR ^csvstr string...
if isinstance( dual, int ):
self.viewlast( dual, table, POP )
else:
wild = True
# ^constrained for dual usage
self.viewcomma( dual, table, wild, POP )
return ''
class Latest( Display ):
'''_______________ Retrieve the latest qualified object "omax" '''
# TIP :: snippets from omaxsub could be helpful in one's program.
def omaxsub(self, subquery='', parlist=[], table=Base.tab0, POP=False):
'''Get the latest object omax which matches subquery.'''
dic = self.dicsub( subquery, parlist, table )
dickeylist = self.reverse_dickeys( dic )
diclen = len( dickeylist )
# count how many matched subquery
if diclen:
keymax = dickeylist[0]
# ^MAX DIC KEY of the LATEST matching subquery.
# dic[keymax][0] corresponds to tunix.
# dic[keymax][1] corresponds to notes.
omax = dic[keymax][2]
# ^this is the LATEST OBJECT matching subquery.
if POP:
# ^queue-like deletion of only single object:
self.deletekid( keymax, table )
else:
omax = None
return omax
def omaxlast( self, n=0, table=Base.tab0, POP=False ):
'''Most quickly get the latest n-th object using key index.'''
# n = 0,1,2,... assuming consecutive kids.
# Avoiding LIKE or GLOB enhances performance in large tables.
# Also we are not reading an entire table into memory since a
# blank subquery would have put an entire table into dictionary.
#
subquery = "WHERE kid=(SELECT MAX(kid) - ? FROM %s)" % table
obj = self.omaxsub( subquery, [n], table, POP )
if DEBUG and obj == None:
print(" !! omaxlast: that kid does not exist.")
return obj
def omaxcomma( self, csvstr, table=Base.tab0, wild=True, POP=False ):
'''Get latest object where notes match comma separated values.'''
parlist = self.comma2list( csvstr, wild )
subquery = self.notesglob( parlist )
return self.omaxsub( subquery, parlist, table, POP )
def select( self, dual=0, table=Base.tab0, POP=False ):
'''Alias "select": omaxlast OR omaxcomma.'''
# assuming dual is either an ^integer OR ^csvstr string...
if isinstance( dual, int ):
obj = self.omaxlast( dual, table, POP )
else:
wild = True
# ^constrained for dual usage
obj = self.omaxcomma( dual, table, wild, POP )
return obj
def getkid( self, kid, table=Base.tab0, POP=False ):
'''Retrieve a row given primary key kid, POP optional.'''
subquery = 'WHERE kid = ?'
return self.omaxsub( subquery, [ kid ], table, POP )
# added 2010-06-07 can't believe this was missing ;-)
class Oldest( Latest ):
'''_______________ Retrieve the oldest qualified object "omin" '''
def ominfirst( self, n=0, table=Base.tab0, POP=False ):
'''Most quickly get the oldest n-th object using key index.'''
# n = 0,1,2,... assuming consecutive kids.
subquery = "WHERE kid=(SELECT MIN(kid) + ? FROM %s)" % table
obj = self.omaxsub( subquery, [n], table, POP )
# ^works because its dictionary is single entry.
if DEBUG and obj == None:
print(" !! ominfirst: that kid does not exist.")
return obj
def fifo( self, table=Base.tab0 ):
'''FIFO queue: return oldest object, then POP (delete) it.'''
n = 0
POP = True
return self.ominfirst( n, table, POP )
class Care( Answer, Deletion ):
'''_______________ Maintenance methods'''
def freshen( self, freshdays=None, table=Base.tab0 ):
'''Delete rows in table over freshdays-old since last insert.'''
# freshdays could be fractional days, e.g. 2.501 days;
# if it is None, then it's infinity.
# if it is 0, then nothing will remain.
if freshdays != None :
max_tunix = self.lastsec( table )
freshsecs = int( freshdays * 86400 )
expiration = max_tunix - freshsecs
sql = "WHERE tunix <= ?"
self.deletesub( sql, [expiration], table )
def vacuum( self ):
'''Defrag entire database, i.e. all tables therein.'''
self.proceed( 'VACUUM' )
# _____ why VACUUM?
# "When an entity (table, index, or trigger) is dropped from
# the database, it leaves behind empty space. This empty space
# will be reused the next time new information is added; but
# the database file might be larger than strictly necessary.
# Also, frequent inserts, updates, and deletes can cause the
# information in the database to become fragmented. The VACUUM
# command cleans the main database by copying its contents
# to a temporary database file and reloading the original
# database file from the copy. This eliminates free pages,
# aligns table data to be contiguous, and otherwise cleans up
# the database file structure." -- sqlite.org
# N.B. - Surprising how much file size will shrink.
def clean( self, freshdays=None, table=Base.tab0 ):
'''Delete stale rows after freshdays; vacuum/defrag database.'''
self.freshen( freshdays, table )
self.vacuum()
return ''
class Main( Annex, Oldest, Care ):
'''_______________ Summary for use of a single database.'''
pass
# Base
# Insertion(Base)
# Annex(Insertion)
# Util
# Answer( Base)
# Deletion(Base, Util)
# Subquery(Util, Answer, Deletion)
# Display(Subquery)
# Latest(Display)
# Oldest(Latest)
# Care(Answer, Deletion)
# _______________ COPY functions (demonstration outside of Main class)
# also note how ingenerator is employed usefully.
def copysub( subquery, parlist, tablex, tabley, dbx=Base.db0, dby=Base.db0 ):
'''Subselect from tablex, then copy to tabley (in another database).'''
# assume tablex is in dbx, and tabley is in dby;
# copying from *x to *y.
if (tablex != tabley) or (dbx != dby):
X = Main( dbx )
dic = X.dicsub( subquery, parlist, tablex )
dickeylist = X.reverse_dickeys( dic, recentfirst=False )
# order keys chronologically ^
# to preserve inserted ordering on copy.
diclen = len( dickeylist )
# count how many matched subquery
if diclen:
Y = Main( dby )
def generate_objnotes():
for i in dickeylist:
notes = dic[i][1]
obj = dic[i][2]
yield (obj, notes)
Y.ingenerator( generate_objnotes(), tabley )
# generator copies objects & notes from qualified dictionary.
# Timestamps are fresh, i.e. not preserved from old table.
if DEBUG:
p = ( diclen, tablex, tabley )
print(" :: copysub: %s objects from %s to %s." % p)
else:
if DEBUG:
p = ( tablex, tabley )
print(" !! copysub: NOTHING from %s to %s." % p)
else:
print(" !! copysub: table or database name(s) must differ.")
def copylast( m, tablex, tabley, dbx=Base.db0, dby=Base.db0 ):
'''Copy last m consecutive kids in tablex over to tabley.'''
A = Answer()
kid = A.lastkid( tablex ) - m
copysub('WHERE kid > ?', [kid], tablex, tabley, dbx, dby )
def comma( *string_args ):
'''Join string-type arguments with comma (cf. csvstr, comma2list).'''
# ^which may include regular expression... Essential <=!
return ','.join( string_args )
def copycomma( csvstr, tablex, tabley, dbx=Base.db0, dby=Base.db0, wild=True ):
'''Subselect by comma separated values from tablex, then copy to tabley.'''
U = Util()
parlist = U.comma2list( csvstr, wild )
subquery = U.notesglob( parlist )
copysub( subquery, parlist, tablex, tabley, dbx, dby )
def copy( dual, tablex, tabley, dbx=Base.db0, dby=Base.db0, wild=True ):
'''Alias "copy": copylast OR copycomma'''
# assuming dual is either an ^integer OR ^csvstr string...
if isinstance( dual, int ):
copylast( dual, tablex, tabley, dbx, dby )
else:
copycomma( dual, tablex, tabley, dbx, dby, wild )
# ============================= BETA =================================================
import random
class Farm:
'''_______________ Start a farm of databases for concurrency and scale.'''
# (Dependencies: Insertion, Deletion classes; copy function.)
# Reduces probability of locked database, prior to insert, by
# writing to a random "barn" database within "farm" directory.
# A barn is intended for temporary storage until harvest at
# which time the objects are moved to some central database.
#
# The number of barns increases concurrent write possibilities.
# The variable "size" determines frequency of harvest.
# The two variables should be optimized for your situation.
dir0 = '/var/tmp/yaya/db/y_serial_farm'
barns0 = 9
# ^ can be greatly increased without any tax on memory,
# for it only increases the number of files in dir.
# To set maxbarns, estimate the number of writes per second,
# and divide that by, say, 4.
def __init__( self, dir=dir0, maxbarns=barns0 ):
'''Set directory for the farm of maxbarns database files.'''
# Be sure it exists at OS level with appropriate permissions.
self.dir = dir
if not self.dir.endswith( '/'):
self.dir += '/'
self.maxbarns = maxbarns
def barn( self, n ):
'''Prepend directory to numbered database filename.'''
return ''.join( [self.dir, "barn%s.sqlite" % n ] )
# e.g. /var/tmp/yaya/db/y_serial_farm/barn7.sqlite
def farmin( self, obj, notes='#0notes', table=Base.tab0, n=0 ):
'''Insert an object with notes into barn(n).'''
# n should be random in range(maxbarns), see plant method.
I = Insertion( self.barn(n) )
I.insert( obj, notes, table )
def reap( self, dual, tablex, tabley, n, dby=Base.db0, wild=True ):
'''Move dual under tablex in barn(n) to tabley in dby.'''
try:
copy( dual, tablex, tabley, self.barn(n), dby, wild )
D = Deletion( self.barn(n) )
D.delete( dual, tablex, wild )
except:
if DEBUG:
print(" :: reap: skipped barn %s " % n)
# Objects enter a barn singularly via farmin, however, they move out
# rapidly by generator via reap. The harvest method spells out the
# stochastic condition for reap to occur (after each farmin insert).
# This technique avoids frequent and expensive query of barn contents.
# * See plant method below for typical usage.
def harvest(self, dual, tablex,tabley, n, dby=Base.db0, wild=True, size=10):
'''After farmin, reap dual under tablex barn(n) by size expectation.'''
# Prioritize: decreasing size increases the probability of reap.
# If dual='' and we harvest after every farmin, we can statistically
# expect size number of objects moved whenever reap is triggered.
# (For size=10, reap may move from 1 to 47 objects, 10 on average.)
if size * random.random() < 1:
self.reap( dual, tablex, tabley, n, dby, wild )
else:
if DEBUG:
print(" :: harvest: nothing from barn%s" % n)
# TIP: use reap to flush remaining objects at the close your script.
def cleanfarm( self, freshdays=None, table=Base.tab0 ):
'''Delete stale rows after freshdays; vacuum/defrag barns.'''
for n in range( self.maxbarns ):
try:
C = Care( self.barn(n) )
C.clean( freshdays, table )
except:
if DEBUG:
print(" :: cleanfarm: skipped barn %s" % n)
if DEBUG:
print(" :: cleanfarm: VACUUMed barns in %s" % self.dir)
def plant( self, obj, notes='#0notes', table=Base.tab0, dby=Base.db0 ):
'''FARM SUMMARY: farmin insert with generic self-cleaning harvest.'''
size = 10
wild = True
# ^constrains, also: dual='' and only one table name.
if obj == 'reap_ALL_BARNS':
# A bit odd, but it beats writing out the iteration later... ;-)
notes = ' :: plant: object and notes were not inserted.'
for n in range( self.maxbarns ):
self.reap( '', table, table, n, dby, wild )
else:
n = random.randrange( self.maxbarns )
# why random? to minimize conflicts other farmin operations.
self.farmin( obj, notes, table, n )
# ^inserts obj into table in some random barn(n).
# print("farmin barn%s" % n)
self.harvest( '', table, table, n, dby, wild, size )
# ^harvest whenever around size accumulates in a random barn.
if 100000 * random.random() < 1:
# vacuum of all barns approximately every 100,000 inserts.
self.cleanfarm()
# =========================== ENDNOTES ===============================================
# What problem does y_serial solve beyond serialization?
#
# "pickle reads and writes file objects, it does not handle the
# issue of naming persistent objects, nor the (even more complicated)
# issue of concurrent access to persistent objects. The pickle module
# can transform a complex object into a byte stream and it can
# transform the byte stream into an object with the same internal
# structure. Perhaps the most obvious thing to do with these byte
# streams is to write them onto a file, but it is also conceivable
# to send them across a network or store them in a database."
# http://docs.python.org/library/pickle.html
# y_serial takes a couple of minutes to write a MILLION annotated objects
# (that includes serialization and compression),
# which will consume at least 37MB for a single sqlite file,
# on a rather antiquated 32-bit commodity desktop,
# blink of an eye for read access using GLOB regex.
# Generally much faster than comparable DB-API use with PostgreSQL.
# "To BLOB or Not To BLOB: Large Object Storage in a Database or a Filesystem?
# by <NAME>; <NAME>; <NAME>
# Paper submitted on 26 Jan 2007 to http://arxiv.org/abs/cs.DB/0701168
#
# This paper looks at the question of fragmentation [...] objects smaller than
# 256KB are best stored in a database while objects larger than 1M are best
# stored in the filesystem. Between 256KB and 1MB, the read:write ratio and
# rate of object overwrite or replacement are important factors."
#
# Generally speaking, database queries are faster than file opens,
# however, filesystems are optimized for streaming large objects.
# That paper shows how important it is to keep a lean database,
# which is why we wrote the sqliteclean function.
#
# Objects under 1MB will do fine since they will be compressed
# before insertion into the database. If you have many larger objects,
# we included the alternative gzip compressed file solution.
# SUPPLEMENT
# _______________ pz FUNCTIONS for FILE.gz [not database related]
# Using compression with pickling.
# Source: recipe 7.3, Python Cookbook, second edition.
#
# ___ATTN___ Individual pickled items are already compressed by above.
# Below we place those items in a file which is gzipped.
# So there are two compression stages...
# the second may not squeeze out much, but we might as
# well gzip as long as we are writing to a file.
import gzip
# ^compression for files.
def pzdump(filename, *objects):
'''Pickle and zlib-compress objects, then save them in a gz file.'''
fil = gzip.open(filename, 'wb')
for obj in objects:
yPickle.dump( pzdumps(obj), fil, pickle_protocol)
fil.close()
# The protocol is recorded in the file together with the data, so
# Pickler.load can figure it out. Just pass it an instance of a file
# or pseudo-file object with a read method, and Pickler.load returns
# each object that was pickled to the file, one after the other,
# and raises EOFError when the file's done. We wrap a generator
# around Pickler.load, so you can simply loop over all recovered
# objects with a for statement, or, depending on what you need,
# you can use some call such as list(pzload('somefile.gz'))
# to get a list with all recovered objects as its items.
def pzload(filename):
'''Iterate zlib-compressed pickled objects from a gz file.'''
fil = gzip.open(filename, 'rb')
while True:
try:
yield yPickle.load(fil)
# ^hang on to the compressed version for now,
# decompress later as needed.
# ^ iterator
except EOFError:
break
fil.close()
# Example of iteration use...
# for i in pzload(filename): print pzloads(i)
# # each item gets printed after decompression.
def pzlist(filename):
'''List of zlib-compressed pickled objects from a gz file.'''
return list( pzload( filename ) )
def oblist(filename):
'''List of zlib-decompressed pickled objects from a gz file.'''
return [ pzloads(i) for i in pzload(filename) ]
# Another example of iteration use, ASSIGN VARIABLES...
# [x, y, z] = oblist(filename)
# # assuming three items in the file.
# ================== DATABASE versus FILE.gz =======================
#
# Putting all pz objects into a file would be suitable
# where the collection of such objects is fairly static
# and not so large in quantity. Use a database like
# SQLite or PostgreSQL if:
# - the situation is dynamic, i.e. pz objects need
# to be appended or deleted often.
# - particular pz objects are needed
# (using files, all pz objects have to be
# unpacked, then picked over).
# - the database can annotate or index the contents of
# of pz objects (use SQL to then cherry-pick).
# - the objects are generally under 1MB; see Endnotes.
#
# # _______________ 2009-08-24 warehouse objects in a file.gz
# import y_serial
# fname = '/tmp/y_serial.gz'
#
# item = 'This is a string to TEST some file.gz'
# y_serial.pzdump( fname, item, item, item, item, item )
# print y_serial.oblist( fname )
#
# ==================================================================
# Why BLOB dictionaries for storing schema-less data?
#
# * Dictionaries are very suitable objects
# which are variable length, arbitrarily nestable,
# and can contain arbitrary objects...
#
# <NAME> wrote a wonderful post about how Friendfeed uses
# MySQL to store schema-less data,
# http://bret.appspot.com/entry/how-friendfeed-uses-mysql
# which got me thinking about the details and its use for
# any Python program. (Thanks very much, Bret! ;-)
#
# Friendfeed uses a database with schema-less data, where
# dictionaries are zlib-pickled and then inserted into MySQL.
# Other tables then index the primary. Easier to shard, and
# avoids JOINs -- thus conceptually, python code on the
# dictionary structure replaces SQL code. The database
# merely becomes a fancy hash table with fast access.
# Nice for rapid development because dictionaries are
# easily modifiable and themselves have fast key access.
# Better for database load distribution and maintenance,
# plus it avoids scary table conversions requiring downtime.
# "Stop calling me NoSQL" by <NAME>
# http://blog.dhananjaynene.com/2009/10/stop-calling-me-nosql/
#
# You see unlike RDBMS, I don't require that data be clearly
# split into tables, columns and rows. I can work with data the way
# it is most naturally represented: as a tree of individual data
# fields, lists, arrays, dictionaries, etc. Also I do not require
# that you always clearly define each and every possible schema
# element before being able to store data corresponding to the
# schema. I can happily accept a schema dynamically or even
# work without a schema. Some of my early forms were based on
# key value pairs stored as B-Trees (eg. Berkeley DB). Over the
# years people have figured out ways to represent the data as
# a set of decomposed document elements, store data spread across
# a cluster, replicate it for better availability and fault tolerance,
# and even perform post storage processing tasks using map-reduce
# sequences. But really what separates me from my cousin and other
# storage systems is that I don't make demands on the data -- I take
# it in its naturally found form and then store it, replicate it,
# slice it, dice it and glean information out of it. And therein
# lies my true identity -- I will work with data the way the data
# is best represented with all its arbitrary inconsistencies and
# inabilities to always clearly specify a constraining schema.
# What can the PICKLE/cPickle module store? and what about json?
#
# * All the native datatypes that Python supports: booleans, integers,
# floating point numbers, complex numbers, strings, bytes objects,
# byte arrays, and None.
# * Lists, tuples, dictionaries, and sets containing
# any combination of native datatypes.
# * Lists, tuples, dictionaries, and sets containing any combination of
# lists, tuples, dictionaries, and sets containing any combination
# of native datatypes (and so on, to the maximum nesting level
# that Python supports).
# * Functions, classes, and instances of classes (with CAVEATS):
# pickle can save and restore class instances transparently,
# however the class definition must be importable and live in the
# same module as when the object was stored. picklable functions
# and classes must be defined in the top level of a module.
# [ Most likely reason why pzget gets CHOKED.
# (Hack: insert the defining text, then exec it later.) ]
#
# Good reference: http://diveintopython3.org/serializing.html
# Also includes a comparative review of the json module
# introduced as of Python v2.6 -- which is text-based serialization.
#
# _____ json versus pickle
# Few reasons why we opted for pickle instead of json:
# - human-readability is not a primary concern
# since the database could care less.
# - json does not distinguish between tuples and lists.
# - json cannot handle complex Python objects
# without additional en/decoding.
# (and why worry about internal structures?)
# - since json uses utf-8, this may fail in some cases:
# obj == json.loads(json.dumps(obj))
# - we are not handing off the serialized item
# to be read by another language.
# - as for SECURITY, we are not accepting any serialized
# item from an untrusted source into the database.
# y_serial's particular use of pickle is safe.
# _____ short digression on pickle security risk
#
# Generally, never unpickle an untrusted string whose origins are dubious,
# e.g. strings read from a socket or public webpage. So should one
# sanitize and encrypt such strings before the pickle stage?
# No, that would not be necessary.
#
# pickle uses a simple stack language that allows the creation
# of arbitrary python structures, and execute them. This stack
# language allows you to import modules (the 'c' symbol), and
# apply arguments to callables (the 'R' symbol), thus causing code
# to be run. Combine this with the python built-in methods eval
# and compile and you have the perfect vehicle for an
# "unpickle attack."
#
# For more details, also see excellent article by <NAME>,
# http://nadiana.com/python-pickle-insecure
# [And thanks so much, Nadia, for personally clarifying
# the difference between untrusted text string and
# the string derived from pickling such a thing.]
#
# Some naive methods proposed to "encrypt" strings before pickle:
#
# import string
# rot13key = string.maketrans(
# 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz',
# 'NOPQRST<KEY>IJ<KEY>')
# def rot13( text ):
# '''Because Python v3 will discontinue .encode('rot13')'''
# return text.translate( rot13key )
# # notice that, text == rot13(rot13( text ))
#
# # Also look at: .encode('base64') with .decode('base64')
#
# But such effort is misplaced, because the security risk is not pickling
# untrusted data, but rather unpickling untrusted pickles. The malicious
# person must be in a position to be able to modify the pickle or replace
# it somehow (e.g. over a communication channel that is not secure).
# Pickles over public channels should be crytographically signed;
# <NAME> cites http://mumrah.net/2009/09/making-pythons-pickle-safer/
#
# Never load untrusted pickle streams. Obey this frequently cited warning:
# "Never unpickle data received from an untrusted or unauthenticated
# source."
# y_serial complies, so don't worry, it's safe ...
# (just keep your database secure from tampering ;-)
# What does Google use in-house? not json, but rather "PROTOCOL BUFFERS,"
# http://code.google.com/apis/protocolbuffers/docs/pythontutorial.html
# Very nice for multi-lingual serialization, e.g. with C++
# and Java, but its focus is schema-less messages,
# not Python objects.
# What about YAML?
# Saw http://pyyaml.org/wiki/PyYAML, which apparently
# offers "high-level API for serializing and deserializing
# native Python objects." In short, we did not have the time
# to test its reliability. Speed is relatively slow.
# In summary, SERIALIZATION methods which are human readable,
# e.g. json or YAML, are much more slower than cPickle.
# If readability by other languages, e.g. C++ or Java, is
# not a necessary requirement, cPickle takes the cake.
# SQLite "maximum" number of TABLES
#
# "The more tables you have, the slower the first query will run
# and the more memory SQLite will use. For long-running applications
# where the startup time is not a significant factor, 100s or
# 1000s of tables is fine. For a CGI script that starts itself
# up anew several times per second, then you should try to keep
# the number of tables below a 100, I think. -- <NAME>"
# http://www.mail-archive.com/<EMAIL>/msg14057.html
# SQLite INTEGER PRIMARY KEY (regarding our kid)
#
# For performance reasons we did not use the AUTOINCREMENT option
# which guarantees kid uniqueness, thus: "the largest ROWID is equal to
# the largest possible integer 9223372036854775807 then the database
# engine starts picking candidate ROWIDs at random until it finds
# one that is not previously used." -- That's a lot of rows!
#
# Interesting to note that under the hood, "kid" as INTEGER PRIMARY KEY
# is just an alias for special column names ROWID, _ROWID_, or OID.
# SQLite will work great as the database engine for low to medium traffic
# WEBSITES (which is to say, 99.9% of all websites). Any site that gets
# fewer than 100K hits/day should work fine with SQLite. SQLite has been
# demonstrated to work with 10 times that amount of traffic.
#
# see SQLite Appropriate Uses : http://www.sqlite.org/whentouse.html
# SQLite SUBQUERY limits
#
# The length of the LIKE or GLOB pattern is limited within SQLite to
# SQLITE_MAX_LIKE_PATTERN_LENGTH bytes. The default value of this
# limit is 50000. A typical computer can evaluate even a pathological
# LIKE or GLOB pattern of such size relatively quickly.
#
# Tip: the "notes" field could contain TAGS using the hashmark #.
# That would focus searching for keywords.
# Python Cookbook, second ed. #7.12. BLOB in SQLite does NOT work:
#
# "The PySQLite Python extension offers function sqlite.encode
# to let you insert binary strings in SQLite databases."
# That function has been superceded by sqlite.Binary
# And now ? replaces %s for more secure syntax.
# DB-API string format v. SQL injection attacks
#
# 2009-08-29 comp.lang.python. Tim Cross illustrates why it's so important
# to use the DB API's own escaping functions.
#
# >> "somestring %s" % "foo" will work.
#
# > BAD IDEA when assembling SQL, unless you _like_ SQL-injection attacks:
# >
# > sql = "select * from users where name='%s' and password='%s'"
# >
# > # get some values from an untrusted user:
# > name = "administrator"
# > password = "' <PASSWORD>; drop table <PASSWORD>; --"
# >
# > cursor.execute(sql % (name, password))
# > # uh-oh!
#
# Of course, that OR 1=1 attack is at the publicly exposed prompt...
# that clause attaches to the WHERE subquery which is always evaluates true!
# oh, crap -- never knew how easy it was.
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/5fdaf7d1b46e6699
# __________ PARAMETER SUBSTITUTION for sqlite3
#
# First how does one find out the substitution style per the DB-API?
# >>> import sqlite3
# >>> print sqlite3.paramstyle
# qmark
#
# That means that a question mark "?" is used for SQL parameter substitions, and
# the second argument to the execute method is a *sequence* i.e. either a list
# or a tuple. In the latter case, a typical gotcha error is using
# (myvariable) instead of (myvariable,)
# for a single variable substitution. Thus using a list is easier: [myvariable]
#
# paramstyle can vary, for example, it's 'pyformat' for psycopg (PostgreSQL).
# The string attribute paramstyle is apparently read-only.
# _____ GOTCHA! Parameter substitution for sqlite3
#
# After hours of insanity, I find out this very, very obscure fact:
# table names *cannot be parametized* !!
# Indeed, one could easily think that a table name was a parameter,
# but then an injection attempt should not have access to other tables.
# So be sure to never expose the table variable in public apps.
#
# It also appears that under strict paramstyle, a placeholder cannot
# substitute a WHERE clause. This is very understandable since that's
# what a SQL injection attack wants to exploit.
# <NAME>: "I find the named-parameter binding style much more readable
# -- and sqlite3 supports it:
#
# c.execute('SELECT * FROM atable WHERE newID=:t', locals())
#
# Note: passing {'t': t} or dict(t=t) instead of locals() would be more
# punctiliously correct, but in my opinion it would interfere with readability
# when there are several parameters and/or longer names."
# __________ Batch processing (re inbatch)
#
# SQLite does fsync() 3 times per transaction to guarantee data integrity.
# So batch statements update the database in transactions
# (BEGIN TRANSACTION; ... COMMIT TRANSACTION;). Only 3 fsync are
# required per transaction, not per statement, and one also gets
# multi-statement atomicity, so all the changes make it to disk or
# none does. cf. Firefox3 performance hit:
# http://shaver.off.net/diary/2008/05/25/fsyncers-and-curveballs/
#
# <NAME> concurs: "Using a transaction is the fastest way to
# update data in SQLite. After each transaction the SQLite engine
# closes and opens the database file. When SQLite opens a database file
# it populates the SQLite internal structures, which takes time.
# So if you have 100 updates and don't use a transaction then SQlite
# will open and close the database 100 times. Using transactions
# improves speed. Use them."
#
# Ok, that said, it would seem to help if we did the following:
# cur.execute( "BEGIN TRANSACTION" )
# ... update stuff ...
# cur.execute( "COMMIT TRANSACTION" )
# but one should NOT do so, because the sqlite3 module has implicitly
# already taken care of this issue when it connects and con.commit()
#
# By default, sqlite3 opens transactions implicitly
# before a DML statement (INSERT/UPDATE/DELETE/REPLACE),
# and commits transactions implicitly before anything other
# than (SELECT/INSERT/UPDATE/DELETE/REPLACE).
#
# You can control the kind of "BEGIN" statements via the
# isolation_level parameter to the connect call,
# or via the isolation_level property of connections.
#
# If you want autocommit mode, then set isolation_level
# to None [which does NOT begin transaction]. Otherwise
# the default will result in a plain "BEGIN" statement.
# One could also set it to an isolation level
# supported by SQLite: DEFERRED, IMMEDIATE or EXCLUSIVE.
#
# y_serial uses IMMEDIATE; the differences are explained here:
# http://www.sqlite.org/lang_transaction.html
# _____ ENCODE TEXT in UTF-8
#
# Gerhard, back in 2007 said about the precursor to sqlite3:
# "SQLite databases store text in UTF-8 encoding. If you use pysqlite,
# and always use unicode strings, you will never have any problems with
# that. pysqlite does not rap on your knuckles if you store arbitrary
# encodings in the database, but you will feel sorry once you try to
# fetch the data."
# For y_serial, this does not pertain to the object themselves because
# they are BLOBs, but it's relevant to the attached annotation notes.
#
# [sqlite3 module will return Unicode objects for TEXT. If you wanted
# to return bytestrings instead, you could set con.text_factory to str.]
# _____ converting timestamp ("tunix") into human form
#
# We rely on SQLite for time functions but in your own program you may
# find these python functions useful to convert unix epoch seconds:
#
# import time
# # e.g. let tunix = 1254458728
#
# def tunixdate( tunix ):
# return time.localtime(ticks)[:3]
# # e.g. (2009, 10, 1)
#
# def tunixclock( tunix ):
# return time.localtime(ticks)[3:6]
# # e.g. (21, 45, 28)
#
# def tunixtuple( tunix ):
# return time.localtime(ticks)[:6]
# # e.g. (2009, 10, 1, 21, 45, 28)
# ================================== TESTER ==========================================
def tester( database=Base.db0 ):
'''Test class Main for bugs. Include path for database file.'''
ipass = 0
print("Starting tester()... for debugging y_serial module.")
if not DEBUG:
print("[DEBUG switch is currently False.]")
print("[Note: specify default database via db0 in class Base.]")
print("Creating instance using database...")
I = Main( database )
print((" using database:", database))
ipass += 1
I.createtable( 'ytest' )
print(" created table: ytest")
ipass += 1
# I.droptable( 'ytest' )
# ^ comment out line to test a brand new table.
# ==================================================================
print("INSERTING: 5 objects...")
def generate_testitems( n ):
for i in range( n ):
objnotes = ( i, "testitem-%s" % i )
yield objnotes
# ^yield, not return, for generators.
I.ingenerator( generate_testitems(2), 'ytest' )
ipass += 1
# --------------------------------
tmp1 = ("Part of 3-tuple.", 98, 'Encode text in UTF-8.' )
tmp2 = { 'spam' : 2 , 'eggs' : 43 }
tmp3 = 'I aspire to be stringy.'
I.inbatch([(tmp1, 'test #tuple'), (tmp2, 'test dictionary')], 'ytest')
ipass += 1
I.insert(tmp3, 'random string', 'ytest')
ipass += 1
# --------------------------------
lsec = I.lastsec( 'ytest' )
print((" Checking epoch second of last insert: ", lsec))
ipass += 1
ldate = I.lastdate( 'ytest' )
print((" Checking local date/time of last insert: ", ldate))
ipass += 1
lkid = I.lastkid( 'ytest' )
print((" Checking last kid PRIMARY KEY: ", lkid))
ipass += 1
# print(" (Note: delete* methods v0.50 have passed inspection.)")
# ==================================================================
print(" (Inserted and selected objects should be equivalent.)")
print(" Trying omaxsub ...")
got2 = I.omaxsub("WHERE notes GLOB ?", ['random*'], 'ytest')
if got2 == tmp3:
print("passed test: subquery.")
ipass += 1
else:
print("TEST FAIL! subquery.")
# --------------------------------
print(" Trying omaxlast via .select ...")
got1 = I.select( 1, 'ytest' )
if got1['eggs'] == tmp2['eggs']:
print("passed test: seek key ID.")
ipass += 1
else:
print("TEST FAIL! seek key ID.")
# --------------------------------
print(" Trying omaxcomma ...")
got3 = I.omaxcomma( comma('rand*','*ring'), 'ytest' )
if got3 == tmp3:
print("passed test: comma and notesglob.")
ipass += 1
else:
print("TEST FAIL! comma and notesglob.")
# ==================================================================
if DEBUG:
print(" =>> OCULAR TEST, verify Display of tuple:")
I.viewcomma( '*#tuple,test*', 'ytest', wild=False )
# --------------------------------
print(" Eyeball random string with queue POP=True:")
print(I.omaxcomma( "rand*,*ring", 'ytest', POP=True ))
if I.lastkid( 'ytest' ) == lkid - 1 :
print("passed test: POP deleted row as expected.")
ipass += 1
else:
print("TEST FAIL! row not deleted per POP.")
lkid = I.lastkid( 'ytest' )
print((" Current last kid PRIMARY KEY: ", lkid))
gotl = I.getkid( lkid, 'ytest' )
if gotl == tmp2:
print("passed test: getkid.")
ipass += 1
else:
print("TEST FAIL! getkid.")
# --------------------------------
print(" Trying .select with default POP=False ...")
got4 = I.select( '#tuple,test', 'ytest' )
if got4[1] == 98:
print("passed test: comma2list with wild=True.")
ipass += 1
else:
print("TEST FAIL! comma2list with wild=True.")
# ==================================================================
print(" (Note: infile v0.50 has passed inspection.)")
# Test infile separately since it requires an external file.
# 2009-09-14 v0.21:
# pzinfile working fine with text files.
# Round-trip on a binary file produced matching SHA256 signatures.
print(" (Note: inweb v0.50 has passed inspection.)")
# Test inweb separately since it requires an external website.
# 2009-09-20 v0.22
# HTML from python.org appears fine with newlines preserved.
print("----------------------------------------------------------------")
print("DELETING rows older than 30 minutes from ytest.")
I.freshen( 0.0208, 'ytest' )
# ^= 30mins expressed in days.
ipass += 1
print("DROPPING table ytest2.")
I.droptable( 'ytest2' )
ipass += 1
# print(" (Note: copysub and copycomma v0.50 have passed inspection.)")
print("COPYING table ytest to ytest2.")
copy( '', 'ytest', 'ytest2' )
# however, they are not necessarily identical for kids may differ.
ipass += 1
print(" Assert copy and fifo methods:")
obj1 = True
while obj1 != None:
obj1 = I.fifo( 'ytest' )
obj2 = I.fifo( 'ytest2' )
assert obj1 == obj2, " :: tester: copy FAIL."
print("PASSED, objects equivalent.")
ipass += 1
assert I.lastkid('ytest') == 0, " :: tester: fifo FAIL."
print(" (ytest and ytest2 should be empty due to iterated fifo.)")
ipass += 1
print("VACUUMing the entire database.")
I.clean()
ipass += 1
print("----------------------------------------------------------------")
# print("ipass =", ipass)
if ipass == 20:
# ^increment if you added a test ;-)
print(" *** tester compiled: PASSED -- verify results above. ***")
else:
print(" !!! tester summary: FAILED! -- y_serial BROKEN.")
def testfarm( dir=Farm.dir0, maxbarns=Farm.barns0, noobs=500 ):
'''Test class Farm for bugs. Include path for directory.'''
print("\n======================== testfarm ==============================")
ipass = 0
if not DEBUG:
print("[DEBUG switch is currently False.]")
F = Farm( dir, maxbarns )
print((" directory:", F.dir))
print((" maxbarns:", F.maxbarns))
ipass += 1
door = 0
F.farmin( 2009, 'testfarm farmin', 'ytest', door )
D = Main( Farm.barn(F, door))
assert 2009 == D.select(0, 'ytest'), "farmin: FAIL"
ipass += 1
testbarn = F.dir + 'testbarn.sqlite'
F.reap( 'farmin', 'ytest', 'ytest', door, testbarn, wild=True )
T = Main( testbarn )
assert 2009 == T.select(0, 'ytest'), "reap: FAIL"
ipass += 1
print("passed: farmin and reap.")
D.droptable( 'ytest' )
T.droptable( 'ytest' )
print("----------------------------------------------------------------")
print("TESTING plant: %s fresh objects. [Stand-by ...]" % noobs)
for i in range( noobs ):
F.plant( 'myobj', 'plant-%s' % i, 'ytest', testbarn )
ipass += 1
print((" lastkid in target database:", T.lastkid( 'ytest' )))
print("Next, reap_ALL_BARNS...")
F.plant( 'reap_ALL_BARNS', '', 'ytest', testbarn )
allkids = T.lastkid( 'ytest' )
print((" lastkid in target database:", allkids))
assert noobs == allkids, "reap_ALL_BARNS: FAIL."
ipass += 1
print("Cleaning up after plant objects.")
T.clean( 0, 'ytest' )
F.cleanfarm( 0, 'ytest' )
ipass += 1
print("----------------------------------------------------------------")
# print("ipass =", ipass)
if ipass == 6:
# ^increment if you added a test ;-)
print(" *** testfarm compiled: PASSED -- verify results above. ***")
else:
print(" !!! testfarm summary: FAILED! -- y_serial BROKEN.")
if __name__ == "__main__":
print("\n :: THIS IS A MODULE for import -- not for direct execution! \n")
raw_input('Enter something to get out: ')
# ============================ Acknowledgements ======================================
# - Special thanks to <NAME> for original inspiration and battle-tested
# case study of Friendfeed.
#
# - <NAME> for his assistance and his Cookbook for
# serving up delicious Python meals.
# SQLite is a C library that provides a disk-based database that doesn't
# require a separate server process. <NAME> deserves huge credit
# for putting SQLite in the public domain: solving 80% of data persistance
# issues, using only 20% of the effort required by other SQL databases.
# Thus SQLite is the most widely deployed SQL database engine in the world.
# The sqlite3 module was written by <NAME> and provides a SQL interface
# compliant with the Python DB-API 2.0 specification described by `PEP 249
# <http://www.python.org/dev/peps/pep-0249/>`_. His effort -- updating that
# module from version 2.3.2 in Python 2.5 to version 2.4.1 in Python 2.6 -- has
# been crucial to y_serial. We are counting on him to resolve `bug 7723
# <http://bugs.python.org/issue7723>`_ for an easy transition to Python 3.
# zlib was written by <NAME> (compression) and <NAME>
# (decompression). Jean-loup is also the primary author/maintainer of gzip(1);
# Mark is also the author of gzip's and UnZip's main decompression routines and
# was the original author of Zip. Not surprisingly, the compression algorithm
# used in zlib is essentially the same as that in gzip and Zip, namely, the
# 'deflate' method that originated in PKWARE's PKZIP 2.x.
# =========================== Revised BSD License ====================================
#
# Copyright (c) 2009, y_Developers, http://yserial.sourceforge.net
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of this organization nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
#!/usr/bin/env python
"""
Code to plot a contour from an MCMC chain
Author: <NAME> (2013)
Modified: <NAME> (12 August 2013)
"""
import sys,os
import numpy
import pylab
from scipy import interpolate
#from lumfunc import *
import line_profiler
from utils import *
#from settings import *
import matplotlib
from matplotlib.path import Path
#-------------------------------------------------------------------------------
def findconfidence(H):
"""
Finds the 95% and 68% confidence intervals, given a 2d histogram
of the likelihood
"""
H2 = H.ravel()
H2 = numpy.sort(H2)
#Cut out the very low end
#H2 = H2[H2>100]
#Loop through this flattened array until we find the value in the
#bin which contains 95% of the points
tot = sum(H2)
tot95=0
tot68=0
#Changed this to 68% and 30% C.I
for i in range(len(H2)):
tot95 += H2[i]
if tot95 >= 0.05*tot:
N95 = H2[i]
#print i
break
for i in range(len(H2)):
tot68 += H2[i]
if tot68>=0.32*tot:
N68 = H2[i]
break
return max(H2),N95,N68
#-------------------------------------------------------------------------------
def contour(chain,p,**kwargs):
"""
Original alias for contourSingle
"""
return contourSingle(chain,p,**kwargs)
#-------------------------------------------------------------------------------
def contourSingle(chain,p,**kwargs):
"""
#Given a chain, labels and a list of which parameters to plot, plots the contours
# Arguments:
# chain=an array of the chain (not using weights, i.e. each row counts only once)
# p= a list of integers: the two parameters you want to plot (refers to two columns in the chain)
#kwargs: labels= the labels of the parameters (list of strings)
# col=a tuple of the two colours for the contour plot
# line=boolean whether or not to just do a line contour plot
# outfile='outf.png'
"""
# !!!! BEWARE THE BINSIZE --- PLOT IS A STRONG FUNCTION OF THIS
binsize=50
H,xedges,yedges=numpy.histogram2d(chain[:,p[0]],chain[:,p[1]],bins=(binsize,binsize))
x=[]
y=[]
z=[]
for i in range(len(xedges[:-1])):
for j in range(len(yedges[:-1])):
x.append(xedges[:-1][i])
y.append(yedges[:-1][j])
z.append(H[i, j])
SMOOTH=False
if SMOOTH:
sz=50
smth=80e6
spl = interpolate.bisplrep(x, y, z, s=smth)
X = numpy.linspace(min(xedges[:-1]), max(xedges[:-1]), sz)
Y = numpy.linspace(min(yedges[:-1]), max(yedges[:-1]), sz)
Z = interpolate.bisplev(X, Y, spl)
else:
X=xedges[:-1]
Y=yedges[:-1]
Z=H
#I think this is the weird thing I have to do to make the contours work properly
X1=numpy.zeros([len(X), len(X)])
Y1=numpy.zeros([len(X), len(X)])
for i in range(len(X)):
X1[ :, i]=X
Y1[i, :]=Y
X=X1
Y=Y1
N100,N95,N68 = findconfidence(Z)
if 'col' in kwargs:
col=kwargs['col']
else:
col =('#a3c0f6','#0057f6') #A pretty blue
if 'labels' in kwargs:
labels=kwargs['labels']
else:
labels = ['x', 'y']
pylab.clf()
if 'line' in kwargs and kwargs['line']==True:
pylab.contour(X, Y,Z,levels=[N95,N68,N100],colors=col, linewidth=100)
else:
pylab.contourf(X, Y,Z,levels=[N95,N68,N100],colors=col)
pylab.xlabel(labels[p[0]],fontsize=22)
pylab.ylabel(labels[p[1]],fontsize=22)
if 'outfile' in kwargs:
outfile=kwargs['outfile']
pylab.savefig(outfile)
#pylab.close()
else:
pylab.show()
return
#-------------------------------------------------------------------------------
def contourTri(chain,**kwargs):
"""
#Given a chain, labels and a list of which parameters to plot, plots the contours
# Arguments:
# chain=an array of the chain (not using weights, i.e. each row counts only once)
# p= a list of integers: the two parameters you want to plot (refers to two columns in the chain)
#kwargs: labels= the labels of the parameters (list of strings)
# col=a tuple of the two colours for the contour plot
# line=boolean whether or not to just do a line contour plot
# outfile='triangle.png'
# binsize=50
# reconstruct=boolean whether or not to plot reconstruction
# autoscale=boolean whether or not to autoscale axes
# ranges=dictionary of plot range lists, labelled by
# parameter name, e.g. {'A':[0.0,1.0],etc.}
# title=outdir
p is now ignored
"""
# Collate the contour-region info
bundle=chain
TRUNCATE_C=False
TRUNCATE_C_LIMIT=2.0e7#1.0e4
C_COL=1#0
FONTSIZE=4; ROTATION=60.0
FIGSIZE=(8.27,11.69); DPI=400
AXIS_LABEL_OFFSET=-0.45#-0.5
#pylab.gcf().subplots_adjust(left=0.2)
# !!!! BEWARE THE BINSIZE --- PLOT IS A STRONG FUNCTION OF THIS
if 'binsize' in kwargs:
binsize=kwargs['binsize']
else:
binsize=50
print 'Using binsize = %i' % binsize
if 'labels' in kwargs:
labels=kwargs['labels']
parameters=labels # How did this ever work without??
else:
labels = ['x', 'y']
if 'ranges' in kwargs:
ranges=kwargs['ranges']
else:
ranges=None
if 'title' in kwargs:
title=kwargs['title']
else:
title=''
if 'autoscale' in kwargs:
autoscale=kwargs['autoscale']
else:
autoscale=True
p = range(len(labels))
pairs = trianglePairs(p)
nparams = len(p)
# Start setting up the plot
ipanel=0; ax={}
pylab.clf()
for panel in pairs:
ipanel+=1
H,xedges,yedges=numpy.histogram2d(chain[:,panel[0]],chain[:,panel[1]],bins=(binsize,binsize))
x=[]
y=[]
z=[]
for i in range(len(xedges[:-1])):
for j in range(len(yedges[:-1])):
x.append(xedges[:-1][i])
y.append(yedges[:-1][j])
z.append(H[i, j])
SMOOTH=False
if SMOOTH:
sz=50
smth=80e6
spl = interpolate.bisplrep(x, y, z, s=smth)
X = numpy.linspace(min(xedges[:-1]), max(xedges[:-1]), sz)
Y = numpy.linspace(min(yedges[:-1]), max(yedges[:-1]), sz)
Z = interpolate.bisplev(X, Y, spl)
else:
X=xedges[:-1]
Y=yedges[:-1]
Z=H
#I think this is the weird thing I have to do to make the contours work properly
X1=numpy.zeros([len(X), len(X)])
Y1=numpy.zeros([len(X), len(X)])
for i in range(len(X)):
X1[ :, i]=X
Y1[i, :]=Y
X=X1
Y=Y1
N100,N95,N68 = findconfidence(Z)
assert([N95,N68,N100]==sorted([N95,N68,N100]))
#Z=reversed(Z); X=reversed(X); Y=reversed(Y)
levels=[N68,N95,numpy.inf].sort()
if 'col' in kwargs:
col=kwargs['col']
else:
#col =('#FFFFFF','#a3c0f6','#0057f6') #A pretty blue (pale then dark)
col =('#a3c0f6','#0057f6')
#ccol=[x for y, x in sorted(zip(col, [N95,N68,N100]))]
# Now construct the subplot
ax[ipanel]=pylab.subplot2grid((nparams,nparams),panel[::-1]) # Reverse quadrant
#levels=[N68,N95,N100,numpy.inf].sort()
# Fix levels https://github.com/dfm/corner.py/pull/73
levelshiftfix=1.0e-4
levels=[N68-levelshiftfix,N95,N100+levelshiftfix,numpy.inf]
#for n,l in enumerate(levels[:-2]):
# print n,l,col[n]
#print levels
levels.sort()
#print levels
#print col
if 'line' in kwargs and kwargs['line']==True:
CS=pylab.contour(X,Y,Z,levels=levels,colors=col,linewidth=100)
else:
CS=pylab.contourf(X,Y,Z,levels=levels,colors=col)
# Identify points lying within 68 percent contour region
#print ipanel,bundle.shape,chain.shape
#v=CS.collections[0].get_paths()[0].vertices
#w=CS.collections[1].get_paths()[0].vertices
#print v[:,0]
#print v[:,1]
#b=bundle[:,[panel[0],panel[1]]]
#mask=Path(v).contains_points(b)
#mask2=Path(w).contains_points(b)
#print panel[0],panel[1],b[:,0].size,b[:,0][mask].size,b[:,0][mask2].size,labels[panel[0]],labels[panel[1]]
if 'truth' in kwargs and kwargs['truth'] is not None:
truth=kwargs['truth']
pylab.plot(truth[labels[panel[0]]],truth[labels[panel[1]]],'r+',\
markersize=20)
if 'labelDict' in kwargs and kwargs['labelDict'] is not None:
labelDict=kwargs['labelDict']
else:
labelDict=dict((name,name) for name in parameters)
# Set the axis labels only for left and bottom:
#print ax[ipanel].get_xlabel(),ax[ipanel].get_ylabel()
if panel[1] == (nparams-1):
ax[ipanel].set_xlabel(labelDict[labels[panel[0]]],fontsize=8)
ax[ipanel].xaxis.set_label_coords(0.5,AXIS_LABEL_OFFSET) # align axis labels
x_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax[ipanel].xaxis.set_major_formatter(x_formatter)
else:
ax[ipanel].set_xlabel('')
ax[ipanel].get_xaxis().set_ticklabels([])
if panel[0] == 0:
ax[ipanel].set_ylabel(labelDict[labels[panel[1]]],fontsize=8)
ax[ipanel].yaxis.set_label_coords(AXIS_LABEL_OFFSET,0.5) # align axis labels
y_formatter = matplotlib.ticker.ScalarFormatter(useOffset=False)
ax[ipanel].yaxis.set_major_formatter(y_formatter)
else:
ax[ipanel].set_ylabel('')
ax[ipanel].get_yaxis().set_ticklabels([])
# Set plot limits
if autoscale:
# HACK FOR C ONLY:
if TRUNCATE_C and panel[0]==C_COL:
xxlo,xxhi=ax[ipanel].xaxis.get_data_interval()
if xxhi>TRUNCATE_C_LIMIT:
pylab.xlim(xxlo,TRUNCATE_C_LIMIT)
ax[ipanel].set_xscale('log')
autoscale=True
#locs,labels = plt.xticks()
#plt.xticks(locs, map(lambda x: "%g" % x, locs*1.0e5))
else:
xlo=ranges[labels[panel[0]]][0]
xhi=ranges[labels[panel[0]]][1]
ylo=ranges[labels[panel[1]]][0]
yhi=ranges[labels[panel[1]]][1]
pylab.xlim(xlo,xhi)
pylab.ylim(ylo,yhi)
# Some housekeeping
pylab.xticks(fontsize=FONTSIZE,rotation=ROTATION)
pylab.yticks(fontsize=FONTSIZE,rotation=0)
# Set up the 1-D plots on the diagonal
for iparam in range(nparams):
# b=numpy.histogram(R,bins=bins)
J,edges=numpy.histogram(chain[:,iparam],density=True,bins=binsize)
ax1d=pylab.subplot2grid((nparams,nparams),(iparam,iparam))
pylab.plot(edges[:-1],J,color='k')
#print iparam,nparams,labels[iparam]
if 'truth' in kwargs and kwargs['truth'] is not None:
truth=kwargs['truth']
pylab.axvline(truth[parameters[iparam]],color='r')
if iparam == 0:
ax1d.set_ylabel(labelDict[labels[iparam]],fontsize=8)
ax1d.yaxis.set_label_coords(AXIS_LABEL_OFFSET,0.5) # align axis labels
if iparam == (nparams-1):
ax1d.set_xlabel(labelDict[labels[iparam]],fontsize=8)
ax1d.xaxis.set_label_coords(0.5,AXIS_LABEL_OFFSET) # align axis labels
# Set plot limits
#parameters=['x', 'y', 'S', 'sig', 'Q', 'el', 'em', 'R']
if autoscale:
# HACK FOR C ONLY:
if TRUNCATE_C and iparam==C_COL:
xxlo,xxhi=ax1d.xaxis.get_data_interval()
if xxhi>TRUNCATE_C_LIMIT:
pylab.xlim(xxlo,TRUNCATE_C_LIMIT)
#ax1d.set_xscale('log')
autoscale=True
if not autoscale:
xlo,xhi=ranges[parameters[iparam]]
pylab.xlim(xlo,xhi)
if iparam < (nparams-1):
ax1d.get_xaxis().set_ticklabels([])
ax1d.get_yaxis().set_ticklabels([])
pylab.xticks(fontsize=FONTSIZE,rotation=ROTATION)
pylab.yticks(fontsize=FONTSIZE)
#if iparam == 0: ax1d.set_xscale('log')
#ax1d.set_xscale('linear')
#axinfo=pylab.subplot2grid((nparams,nparams),(0,nparams-3))
axinfo=pylab.subplot2grid((nparams,nparams),(0,nparams-nparams%2-1))
axinfo.get_xaxis().set_visible(False)
axinfo.get_yaxis().set_visible(False)
pylab.axis('off')
pylab.title(title)
# Plot the truth - this needs to be generalized for non-lumfunc
if 'truth' in kwargs and kwargs['truth'] is not None:
truth=kwargs['truth']
note=[r' $\Delta \ln{Z}$ ='+str(sys.argv[-2])+r"$\pm $"+str(sys.argv[-1])]
for k,v in truth.items():
notelet='%s = %4.4g' % (labelDict[k],v)
note.append(notelet)
pylab.text(0,-0.4,'\n'.join(note))
if 'reconstruct' in kwargs:
reconstruct=kwargs['reconstruct']
axrecon=pylab.subplot2grid((nparams,nparams),(0,nparams-2),\
rowspan=2,colspan=2)
axrecon.set_xscale('log')
axrecon.set_yscale('log')
pylab.xticks(fontsize=FONTSIZE,rotation=60)
pylab.yticks(fontsize=FONTSIZE)
median_bins=medianArray(reconstruct[0])
dnds=calculateDnByDs(median_bins,reconstruct[1])
dndsN=calculateDnByDs(median_bins,ksNoisy)
print median_bins
print dnds
print truth.items()
recon=numpy.zeros(numpy.shape(median_bins))
post=numpy.zeros(numpy.shape(median_bins))
print '# i Smedian ks dnds dndsS2.5 NRecon dndsRecon dndsS2.5Recon log10dnds log10dndsR diffR dndsN'
if nparams == 4:
(C,alpha,Smin,Smax)\
=(truth['C'],truth['alpha'],truth['Smin'],truth['Smax'])
area=10.0 # Hack
# Reconstruct powerLaw points given truth
for i in range(len(median_bins)):
recon[i]=powerLawFuncS(median_bins[i],\
C,alpha,Smin,Smax,area)
post[i]=powerLawFuncS(median_bins[i],\
9.8,-0.63,0.04,14.1,area)
#recon *= lumfunc.ksRaw
#dndsR=lumfunc.calculateDnByDs(median_bins,recon)
# **** XXXX Where does the 1000 come from!? :(( XXXX
dndsR=recon*1000.0
dndsP=post*1000.0
# cols: i Smedian ks dnds dndsS2.5 NRecon dndsRecon
# dndsS2.5Recon log10dnds log10dndsR diffR dndsN
for i in range(len(median_bins)):
print '%i %f %i %i %i %i %i %i %f %f %i %i' % (i,median_bins[i],\
reconstruct[-1][i],dnds[i],\
dnds[i]*median_bins[i]**2.5,recon[i],dndsR[i],\
dndsR[i]*median_bins[i]**2.5,numpy.log10(dnds[i]),\
numpy.log10(dndsR[i]),int(dndsR[i]-dnds[i]),dndsN[i])
#print recon
pylab.xlim(reconstruct[0][0],reconstruct[0][-1])
#pylab.ylim(1.0e2,1.0e8)
#pylab.plot(median_bins,dnds*numpy.power(median_bins,2.5)*lumfunc.sqDeg2sr,'+')
power=2.5
pylab.plot(median_bins,dnds*sqDeg2sr*numpy.power(median_bins,power),'+')
pylab.plot(median_bins,dndsR*sqDeg2sr*numpy.power(median_bins,power),'-')
pylab.plot(median_bins,dndsN*sqDeg2sr*numpy.power(median_bins,power),'+')
pylab.plot(median_bins,dndsP*sqDeg2sr*numpy.power(median_bins,power),'-')
#pylab.plot(dnds,dndsR*numpy.power(median_bins,1.0))
#b=lumfunc.simtable(lumfunc.bins,a=-1.5,seed=1234,noise=10.0,dump=False)
if 'outfile' in kwargs:
outfile=kwargs['outfile']
pylab.savefig(outfile,figsize=FIGSIZE,dpi=DPI)
print 'Run: open %s' % outfile
#pylab.close()
else:
pylab.show()
return bundle
#-------------------------------------------------------------------------------
def trianglePairs(inlist):
"""
"""
pairs=[]
for i in inlist:
for j in inlist:
if j > i:
pairs.append((i,j))
return pairs
#-------------------------------------------------------------------------------
if __name__ == '__main___':
"""
"""
#parameters=lumfunc.parameters['C','alpha','Smin','Smax']
#Testing all functionality
#c=pylab.loadtxt('chain_2d_banana.txt')
#contour(c,[0,1], labels=['1', '2'], col=('#3bf940','#059a09'),line=True)
# Run as e.g.
contour_plot.contourTri(pylab.loadtxt('chains-4-all-10deg-130812/1-post_equal_weights.dat'),line=True,outfile='chains-4-all-10deg-130812/test.png',col=('red','blue'),labels=lumfunc.parameters,ranges=lumfunc.plotRanges,truth=lumfunc.plotTruth,reconstruct=(lumfunc.medianArray(lumfunc.bins),lumfunc.ksNoisy),autoscale=False,title='title')
#contour_plot.contourTri(pylab.loadtxt('chains-3-fixSmin-10deg-130812/1-post_equal_weights.dat'),line=True,outfile='test.png',col=('red','blue'),labels=lumfunc.parameters)
#import pymultinest
#a=pymultinest.Analyzer(len(lumfunc.parameters),'chains-4-all-mm-10deg-130815/1-')
#s=a.get_stats()
#print s
sys.exit(0)
|
<reponame>ndrogness/RogyGarden<filename>rogysensor.py
#!/usr/bin/env python3
import time
from collections import namedtuple
try:
from smbus2 import SMBus
except ImportError:
from smbus import SMBus
class RogySensor:
SensorData = namedtuple('SensorData', ['name', 'val', 'units'])
def __init__(self, sensor_type=None, history=5, samples_per_read=1):
self.sensor_type = sensor_type
self.active = False
self.vals = []
self.vals_ts = []
self.history = history
self.samples_per_read = samples_per_read
def __str__(self):
return ['At {0}: {1}={2}{3}'.format(self.vals_ts[i], self.vals[i].name, self.vals[i].val,
self.vals[i].units) for i in range(0, len(self.vals))]
def read(self, return_value=True, pretty_format=False):
# Override this
return [self.SensorData(name='na', val=-1, units='na')]
def post_conversion(self, in_value):
# Override this
return in_value
def _free_local(self):
# Override this
pass
def free(self):
if self.active is True:
self._free_local()
self.vals.clear()
self.vals_ts.clear()
class RogySensorI2C(RogySensor):
I2C_SENSORS = {
'BMP280': {'chipid': 0x58}
}
i2c_bus = SMBus(1)
def __init__(self, scl_pin=22, sda_pin=21, device='Unknown', history=5, samples_per_read=1):
super().__init__(sensor_type='i2c', history=history, samples_per_read=samples_per_read)
self._scl_pin = scl_pin
self._sda_pin = sda_pin
def _read_i2c(self):
# Override this
return [self.SensorData(name='na', val=-1, units='na')]
def read_data_val(self, sensordata_name):
self.read()
for _sval in self.vals[0]:
if _sval.name == sensordata_name:
return _sval.val, '{0}{1} at {2}'.format(_sval.val, _sval.units, time.asctime(self.vals_ts[0]))
else:
return None, 'Offline'
def read(self, return_value=True, pretty_format=False):
if self.active is not True:
return None
self.vals.insert(0, self._read_i2c())
if len(self.vals) > self.history:
del self.vals[self.history]
self.vals_ts.insert(0, time.localtime())
if len(self.vals_ts) > self.history:
del self.vals_ts[self.history]
if return_value is True:
if pretty_format is True:
return ['{0}={1}{2}'.format(sd.name, sd.val, sd.units) for sd in self.vals[0]]
else:
return self.post_conversion(self.vals[0])
else:
return None
class RogyBMP280(RogySensorI2C):
def __init__(self, scl_pin=22, sda_pin=21, history=5, samples_per_read=10):
super().__init__(scl_pin=scl_pin, sda_pin=sda_pin, device='BMP280', history=history, samples_per_read=samples_per_read)
# self.rs_i2c_device = MP_BMP280_I2C(i2c=self.rs_i2c, address=118)
self._read_i2c = self._read_bmp280
try:
import bmp280
except ImportError as err:
print('Missing bmp280 library:', err)
self.active = False
return
try:
self._sensor = bmp280.BMP280(i2c_dev=self.i2c_bus)
# self.rs_i2c = machine.I2C(scl=machine.Pin(scl_pin), sda=machine.Pin(sda_pin))
# Try a read
self._sensor.get_temperature()
except RuntimeError as err:
print('Cant start BMP280 sensor:', err)
self.active = False
return
self.active = True
def get_relative_altitude(self):
baseline_size = 100
baseline_values = []
for i in range(baseline_size):
pressure = self._sensor.get_pressure()
baseline_values.append(pressure)
time.sleep(.1)
baseline = sum(baseline_values[:-25]) / len(baseline_values[:-25])
return self._sensor.get_altitude(qnh=baseline)
def _read_bmp280(self):
# return self.rs_i2c_device.temperature
_st1 = 0
_st2 = 0
_st3 = 0
for i in range(0, self.samples_per_read):
_st1 += self._sensor.get_temperature()
_st2 += self._sensor.get_pressure()
_st3 += self._sensor.get_altitude()
time.sleep(.1)
# I'm American...convert to F
_st1 = '{:.1f}'.format(((_st1 / self.samples_per_read) * 9/5) + 32)
_st2 = '{:.2f}'.format(_st2 / self.samples_per_read)
_st3 = '{:.2f}'.format((_st3 / self.samples_per_read) * 3.28084)
# relative_altitude = '{:05.2f}'.format(self.get_relative_altitude() * 3.28084)
return [self.SensorData(name='temp', val=_st1, units='F'),
self.SensorData(name='bar_pres', val=_st2, units='hPa'),
self.SensorData(name='altitude', val=_st3, units='ft')
]
class RogyINA260(RogySensorI2C):
def __init__(self, scl_pin=22, sda_pin=21, history=5, samples_per_read=1):
super().__init__(scl_pin=scl_pin, sda_pin=sda_pin, device='INA260', history=history,
samples_per_read=samples_per_read)
# self.rs_i2c_device = MP_BMP280_I2C(i2c=self.rs_i2c, address=118)
self._read_i2c = self._read_ina260
try:
import board
import busio
import adafruit_ina260
except ImportError as err:
print('Missing INA260 sensor library:', err)
self.active = False
return
try:
self._sensor = adafruit_ina260.INA260(busio.I2C(board.SCL, board.SDA))
except ValueError as err2:
print('Cant start INA260 sensor:', err2)
self.active = False
return
self.active = True
def _read_ina260(self):
_st1 = 0
_st2 = 0
_st3 = 0
for i in range(0, self.samples_per_read):
_st1 += self._sensor.current
_st2 += self._sensor.voltage
_st3 += self._sensor.power
time.sleep(.1)
# Convert to Amps, V, Watts
_st1 = '{:.2f}'.format((_st1 / self.samples_per_read) / 1000)
_st2 = '{:.2f}'.format(_st2 / self.samples_per_read)
_st3 = '{:.2f}'.format((_st3 / self.samples_per_read) / 1000)
return [self.SensorData(name='current', val=_st1, units='A'),
self.SensorData(name='voltage', val=_st2, units='V'),
self.SensorData(name='power', val=_st3, units='W')
]
def main():
# vbat = machine.ADC(36)
# vbat.atten(vbat.ATTN_11DB)
# VBAT = Pin 35
bmp280 = RogyBMP280()
ina260 = RogyINA260()
while True:
print(bmp280.read(pretty_format=True))
print(ina260.read(pretty_format=True))
time.sleep(1)
if __name__ == '__main__':
main()
|
<reponame>GSByeon/openhgsenti
# -*- coding: utf8 -*-
#from __future__ import unicode_literals
from elasticsearch import Elasticsearch
import numpy as np
from collections import Counter
from django.shortcuts import render,get_object_or_404
from django.contrib.auth import logout
from django.http import HttpResponseRedirect
from quots.forms import *
from django.contrib.auth import authenticate, login
from quots.models import *
from django.views.decorators.csrf import csrf_exempt
from django.http import HttpResponse
from urllib.parse import unquote
import json
from ipware.ip import get_ip
from django.utils import timezone
import pytz
def softmax(x):
return (10-(np.exp(x) / np.sum(np.exp(x), axis=0)))/10
def gnrfreq(s):
ordered=sorted(sorted(Counter(s.split()).most_common(), key=lambda pair: pair[0], reverse=False), key=lambda pair: pair[1],
reverse=True)
sumofv=0
for k,v in ordered:
sumofv+=v
if sumofv is 100:
return ordered
else:
inpercent=[]
for k, v in ordered:
inpercent.append((k,round((v/sumofv)*100,2)))
return inpercent
def essearch(keyword):
context = {}
es = Elasticsearch("192.168.3.11:9200")
res = es.search(index="mov", sort={"good": "desc"}, size=100, doc_type="cmt",
body={"query": {"match": {"cmt": keyword}}})
resdic = {}
resavg = []
gnrstr = ""
for elem in res['hits']['hits']:
for gnr in elem['_source']['gnr']:
gnrstr += (gnr + " ")
# find mids containing keyword
#submid = es.search(index="mov", sort={"good": "desc"}, size=100, doc_type="cmt",
# body={"query": {"match": {"mid": elem['_source']['mid']}}})
submid = es.search(index="mov", sort={"good": "desc"}, size=100, doc_type="cmt", \
body={"query":
{
"bool":
{
"should": {
"term": {
"rid":
elem['_source']['rid']
}
},
"filter":
{
"term":
{
"mid":
elem['_source']['mid']
}
},
}
}
}
)
gbdif = []
for subelem in submid['hits']['hits']:
gbdif.append(-(subelem['_source']['good'] - subelem['_source']['bad']))
if np.max(gbdif) > 0:
gbdif = gbdif - np.max(gbdif)
rates = softmax(gbdif)
for ind, resubelem in enumerate(submid['hits']['hits']):
if resubelem['_source']['rid'] == elem['_source']['rid']:
starval = rates[ind] * elem['_source']['star']
resdic[elem['_source']['rid']] = [starval, elem['_source']['cmt']]
resavg.append(starval)
context['proof'] = resdic
context['gnr'] = gnrfreq(gnrstr)
if len(resavg) > 0:
context['avg'] = np.average(resavg)
else:
context['avg'] = -1
context['key']=keyword
return context
def search(request):
ip = get_ip(request)
if request.method == 'POST':
usr = User.objects.filter(username=request.user)
ip = get_ip(request)
rst = essearch(request.POST['keyword'])
intent=None
gnr = ""
for i, (k, v) in enumerate(rst['gnr']):
if i is 3:
break
else:
gnr += (k + "|")
if len(usr) is 1:
try:
intent=str(usr[0].last_name).split('|')[1]
except:
pass
obj = Searchedkey(usr=usr[0],
ipaddr=str(ip),
content=request.POST['keyword'],
star=rst['avg'],
gnrs=gnr,
isapi=False,
update=datetime.datetime.now(tz=pytz.UTC),
intent=intent
)
obj.save()
else:
obj = Searchedkey(usr=None,
ipaddr=str(ip),
content=request.POST['keyword'],
star=rst['avg'],
gnrs=gnr,
isapi=False,
update=datetime.datetime.now(tz=pytz.UTC),
intent=intent
)
obj.save()
return render(request, 'search.html', rst)
def index(request):
context = {}
return render(request, 'index.html', context)
def apicall(request):
context = {}
return render(request, 'apicall.html', context)
def jsonapi(request,userid,keyword):
usr=User.objects.filter(username=userid)
ip = get_ip(request)
intent=None
if len(usr) is 1:
rst=essearch(keyword)
gnr=""
try:
intent = str(usr[0].last_name).split('|')[1]
except:
pass
for i,(k,v) in enumerate(rst['gnr']):
if i is 3:
break
else:
gnr+=(k+"|")
js = json.dumps(rst, ensure_ascii=False)
obj = Searchedkey(usr=usr[0],
ipaddr=str(ip),
content=keyword,
star=rst['avg'],
gnrs=gnr,
isapi=True,
update=datetime.datetime.now(),
intent=intent
)
obj.save()
return HttpResponse(js, content_type=u"application/json; charset=utf-8", status=200)
else:
rst={"err":"오픈한글감성사전에 등록된 유저가 아닙니다"}
js = json.dumps(rst, ensure_ascii=False)
return HttpResponse(js, content_type=u"application/json; charset=utf-8", status=500)
def register(request):
if request.method == 'POST':
form = RegistrationForm(request.POST)
if form.is_valid():
meta=str(form.cleaned_data['phone']) + '|' + str(form.cleaned_data['content'])
user = User.objects.create_user(username=form.cleaned_data['username'],
password=form.cleaned_data['<PASSWORD>'],
email=form.cleaned_data['email'], first_name=form.cleaned_data['rname'],
last_name=meta
)
login(request,user)
return HttpResponseRedirect('/')
else:
context={'form': form}
return render(request, 'registration/register.html',context)
form = RegistrationForm()
context={'form': form}
return render(request, 'registration/register.html',context) |
<reponame>Harlen520/NCPQA<gh_stars>10-100
import pandas as pd
import numpy as np
import collections
from prepare.data_preprocess import data_preprocess
from torch.utils.data.distributed import DistributedSampler
from sklearn.model_selection import KFold
from torch.utils.data import TensorDataset, DataLoader, RandomSampler, SequentialSampler
import torch
import logging
import os
import pickle
from tqdm import tqdm
import time
logger = logging.getLogger(__name__)
class QAExample(object):
def __init__(self, example_id=None, qid=None, qusetion=None, docid=None, answer=None):
self.example_id = example_id
self.qid = qid
self.qusetion = qusetion
self.docid = docid
self.answer = answer
def __str__(self):
return self.__repr__()
def __repr__(self):
s = ""
s += "qid: %s" % (self.qid)
s += ", context: %s" % (self.contexts)
if self.answer is not None:
s += ", label: %s" % self.answer
return s
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, unique_id, qid, tokens, input_ids, input_mask, segment_ids, docid, answer,
label):
self.unique_id = unique_id,
self.qid = qid,
self.tokens = tokens,
self.input_ids = input_ids,
self.input_mask = input_mask,
self.segment_ids = segment_ids,
self.answer = answer,
self.docid = docid,
self.label = label
class Data_processor(object):
def __init__(self, tokenizer, policies_file, max_seq_length=384, max_query_length=64):
self.max_seq_length = max_seq_length
self.max_query_length = max_query_length
self.tokenizer = tokenizer
self._read_policies_context(policies_file)
def _read_policies_context(self, data_file):
with open(data_file, 'rb') as fin:
self.plicies = pickle.load(fin)
self.id2doc = {}
for c_data in self.plicies:
context = c_data['text']
self.id2doc[c_data['docid']] = context
def build_sub_docs_dict(self, args):
print('build_sub_docs_dict')
with open(args.sub_docs_dict, 'rb') as fin:
self.sub_docs_dict = pickle.load(fin)
def _train_data_preprocess(self, train_data):
'''
:param train_data:
:return: train_datas: [dict,dict,...,dict]
'''
train_datas = []
for c_data in train_data:
answer = c_data['answer']
docid = c_data['docid']
c_data['contexts'] = [docid]
context = self.id2doc[docid]
start_idx = context.find(answer)
if start_idx != -1:
end_idx = start_idx + len(answer) - 1
c_data['answer_span'] = [start_idx, end_idx]
c_data['context'] = context
train_datas.append(c_data)
return train_datas
def read_QA_examples(self, data_file, is_train=True, k=1):
with open(data_file, 'rb') as fin:
data_set = pickle.load(fin)
examples = []
example_id = 1000000
with tqdm(total=len(data_set), desc="reading examples:") as pbar:
for qid, data in data_set.items():
if len(data) > k:
data = data[:k]
for d in data:
docid = d['answer_docid']
qusetion = d['question']
answer = d['text']
if is_train:
examples.append(QAExample(
example_id=example_id,
qid=qid,
qusetion=qusetion,
docid=docid,
answer=answer
))
example_id += 1
else:
examples.append(QAExample(
example_id=example_id,
qid=qid,
qusetion=qusetion,
docid=docid,
answer=answer
))
example_id += 1
pbar.update(1)
return examples
def get_train_examples(self, train_file, train_examples_file):
if os.path.exists(train_examples_file):
train_examples = pickle.load(open(train_examples_file, mode='rb'))
else:
train_examples = self.read_QA_examples(train_file, is_train=True)
pickle.dump(train_examples, open(train_examples_file, mode='wb'))
np.random.shuffle(train_examples) # shuffle data
return train_examples
def _convert_examples_to_features(self, examples, is_train=True):
"""Loads a data file into a list of `InputBatch`s."""
unique_id = 1000000000
features = []
with tqdm(total=len(examples), desc="convert examples to features:") as pbar:
for example_id, example in enumerate(examples):
qid = example.qid
qusetion = example.qusetion
docid = example.docid
answer = example.answer
label = None
if is_train:
label = example.label
qusetion_tokens = self.tokenizer.tokenize(qusetion) if len(qusetion) > 0 else []
if len(qusetion_tokens) > self.max_query_length: # cut at tail
qusetion_tokens = qusetion_tokens[0:self.max_query_length]
tokens = ["[CLS]"] + qusetion_tokens + ["[SEP]"]
segment_ids = [0] * len(tokens)
input_mask = [1] * len(tokens)
pads = ["[PAD]"] * (self.max_query_length - len(tokens))
tokens += pads
input_mask += [0] * len(pads)
segment_ids += [0] * len(pads)
max_answer_length = self.max_seq_length - self.max_query_length - 3
answer_tokens = self.tokenizer.tokenize(answer) if len(answer) > 0 else []
if len(answer_tokens) > max_answer_length: # cut at tail
answer_tokens = answer_tokens[0:max_answer_length]
tokens += answer_tokens + ["[SEP]"]
segment_ids += [1] * len(answer_tokens + ["[SEP]"])
input_mask += [1] * len(answer_tokens + ["[SEP]"])
input_ids = self.tokenizer.convert_tokens_to_ids(tokens)
padding = [0] * (self.max_seq_length - len(input_ids))
input_ids += padding
input_mask += padding
segment_ids += padding
assert len(input_ids) == self.max_seq_length
assert len(input_mask) == self.max_seq_length
assert len(segment_ids) == self.max_seq_length
features.append(InputFeatures(
unique_id=unique_id,
qid=qid,
tokens=tokens,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
answer=answer,
docid=docid,
label=label
))
unique_id += 1
pbar.update(1)
return features
def get_train_features(self, train_examples, train_features_file):
if os.path.exists(train_features_file):
with open(train_features_file, "rb") as reader:
train_features = pickle.load(reader)
else:
train_features = self._convert_examples_to_features(
examples=train_examples,
is_train=True
)
logger.info(" Saving train features into file %s", train_features_file)
with open(train_features_file, "wb") as writer:
pickle.dump(train_features, writer)
logger.info("Num train features:{}".format(len(train_features)))
return train_features
def get_valid_features(self, valid_examples, valid_features_file):
if os.path.exists(valid_features_file):
with open(valid_features_file, "rb") as reader:
valid_features = pickle.load(reader)
else:
valid_features = self._convert_examples_to_features(
examples=valid_examples,
is_train=True
)
logger.info(" Saving valid features into file %s", valid_features_file)
with open(valid_features_file, "wb") as writer:
pickle.dump(valid_features, writer)
logger.info("Num valid features:{}".format(len(valid_features)))
return valid_features
def get_pred_features(self, pred_examples, pred_features_file, doc_stride):
if os.path.exists(pred_features_file):
with open(pred_features_file, "rb") as reader:
pred_features = pickle.load(reader)
else:
pred_features = self._convert_examples_to_features(
examples=pred_examples,
is_train=False
)
logger.info(" Saving train features into file %s", pred_features_file)
with open(pred_features_file, "wb") as writer:
pickle.dump(pred_features, writer)
return pred_features
def prepare_train_dataloader(self, train_features, train_batch_size, local_rank, union):
all_unique_ids = torch.tensor([f.unique_id for f in train_features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids for f in train_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask for f in train_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids for f in train_features], dtype=torch.long)
all_labels = torch.tensor([f.label for f in train_features], dtype=torch.long)
train_data = TensorDataset(all_unique_ids, all_input_ids, all_input_mask, all_segment_ids, all_labels)
if local_rank == -1:
train_sampler = RandomSampler(train_data)
else:
train_sampler = DistributedSampler(train_data)
train_dataloader = DataLoader(train_data, sampler=train_sampler, batch_size=train_batch_size, drop_last=False)
return train_dataloader
def prepare_pred_dataloader(self, predict_file, predict_batch_size, k=1, cache_dir=None):
pred_examples = self.read_QA_examples(
predict_file, is_train=False, k=k)
# eval_examples=eval_examples[:100]
logger.info("***** Running predictions *****")
logger.info(" Num predict examples = %d", len(pred_examples))
logger.info(" Predict batch size = %d", predict_batch_size)
cache_file = os.path.join(cache_dir, 'pred_features.pkl')
if os.path.exists(cache_file):
t0 = time.time()
with open(cache_file, 'rb') as fp:
pred_features = pickle.loads(fp.read())
t1 = time.time()
print('cache: predict_features --> {} loaded, cost time: {}s'.format(cache_file, t1-t0))
else:
pred_features = self._convert_examples_to_features(
examples=pred_examples,
is_train=False
)
with open(cache_file, 'wb') as fp:
fp.write(pickle.dumps(pred_features))
logger.info(" Num batch predict features = %d", len(pred_features))
all_unique_id = torch.tensor([f.unique_id[0] for f in pred_features], dtype=torch.long)
all_input_ids = torch.tensor([f.input_ids[0] for f in pred_features], dtype=torch.long)
all_input_mask = torch.tensor([f.input_mask[0] for f in pred_features], dtype=torch.long)
all_segment_ids = torch.tensor([f.segment_ids[0] for f in pred_features], dtype=torch.long)
pred_data = TensorDataset(all_unique_id, all_input_ids, all_input_mask, all_segment_ids)
# Run prediction for full data
pred_sampler = SequentialSampler(pred_data)
pred_dataloader = DataLoader(pred_data, sampler=pred_sampler, batch_size=predict_batch_size)
return pred_dataloader, pred_features
|
<filename>Trakttv.bundle/Contents/Libraries/Shared/plugin/modules/migrations/account.py
from plugin.core.environment import Environment
from plugin.models import (
Account, ClientRule, UserRule,
PlexAccount, PlexBasicCredential,
TraktAccount, TraktBasicCredential, TraktOAuthCredential
)
from plugin.modules.migrations.core.base import Migration
from exception_wrappers.libraries import apsw
import logging
import os
import peewee
import requests
log = logging.getLogger(__name__)
class AccountMigration(Migration):
def run(self, token_plex=None):
# Ensure server `Account` exists
self.create_server_account()
# Ensure administrator `Account` exists
self.create_administrator_account(token_plex=token_plex)
# Refresh extra accounts
accounts = Account.select().where(
Account.id > 1,
Account.deleted == False
)
for account in accounts:
self.refresh_account(account)
return True
@classmethod
def create_server_account(cls):
try:
Account.get(Account.id == 0)
except Account.DoesNotExist:
Account.create(
id=0,
name=''
)
@classmethod
def create_administrator_account(cls, token_plex=None):
username = cls.get_trakt_username()
try:
account = Account.get(Account.id == 1)
except Account.DoesNotExist:
account = Account.create(
id=1,
name=username
)
# Create default rules for account
cls.create_rules(account)
# Ensure plex account details exist
p_created, p_account = cls.create_plex_account(account)
cls.create_plex_basic_credential(p_account, token_plex=token_plex)
# Refresh plex account details
try:
p_refreshed = p_account.refresh(force=p_created)
except:
log.warn('Unable to refresh plex account (not authenticated?)', exc_info=True)
p_refreshed = False
# Ensure trakt account details exist
t_created, t_account = cls.create_trakt_account(account, username)
cls.create_trakt_basic_credential(t_account)
cls.create_trakt_oauth_credential(t_account)
# Refresh trakt account details
try:
t_refreshed = t_account.refresh(force=t_created)
except:
log.warn('Unable to refresh trakt account (not authenticated?)', exc_info=True)
t_refreshed = False
# Refresh account
account.refresh(force=p_refreshed or t_refreshed)
@classmethod
def refresh_account(cls, account):
if not account or account.deleted:
return
log.debug('Refreshing account: %r', account)
# Refresh plex account details
p_account = account.plex
p_refreshed = False
if p_account:
try:
p_refreshed = p_account.refresh()
except:
log.info('Unable to refresh plex account (not authenticated?)', exc_info=True)
p_refreshed = False
# Refresh trakt account details
t_account = account.trakt
t_refreshed = False
if t_account:
try:
t_refreshed = t_account.refresh()
except:
log.info('Unable to refresh trakt account (not authenticated?)', exc_info=True)
t_refreshed = False
# Refresh account
account.refresh(force=p_refreshed or t_refreshed)
@classmethod
def create_rules(cls, account):
ClientRule.create(account=account, priority=1)
UserRule.create(account=account, priority=1)
#
# Plex
#
@classmethod
def create_plex_account(cls, account):
try:
return True, PlexAccount.create(
account=account
)
except (apsw.ConstraintError, peewee.IntegrityError):
return False, PlexAccount.get(
account=account
)
@classmethod
def create_plex_basic_credential(cls, plex_account, token_plex=None):
if token_plex is None:
token_plex = cls.get_token()
if not token_plex:
log.warn('No plex token available, unable to authenticate plex account')
return False
try:
PlexBasicCredential.create(
account=plex_account,
token_plex=token_plex
)
except (apsw.ConstraintError, peewee.IntegrityError) as ex:
# Ensure basic credential has a token
rows_updated = PlexBasicCredential.update(
token_plex=token_plex,
token_server=None
).where(
PlexBasicCredential.account == plex_account,
PlexBasicCredential.token_plex != token_plex
).execute()
# Check if basic credential was updated
if rows_updated:
return True
log.debug('Ignoring basic credential update for %r, already exists (%s)', plex_account, ex)
return False
return True
#
# Trakt
#
@classmethod
def create_trakt_account(cls, account, username):
try:
return True, TraktAccount.create(
account=account,
username=username
)
except (apsw.ConstraintError, peewee.IntegrityError):
return False, TraktAccount.get(
account=account
)
@classmethod
def create_trakt_basic_credential(cls, trakt_account):
if not Environment.dict['trakt.token']:
return False
try:
TraktBasicCredential.create(
account=trakt_account,
password=Environment.get_pref('password'),
token=Environment.dict['trakt.token']
)
except (apsw.ConstraintError, peewee.IntegrityError) as ex:
log.debug('Ignoring basic credential update for %r, already exists (%s)', trakt_account, ex)
return False
return True
@classmethod
def create_trakt_oauth_credential(cls, trakt_account):
if not Environment.dict['trakt.pin.code'] or not Environment.dict['trakt.pin.authorization']:
return False
try:
TraktOAuthCredential.create(
account=trakt_account,
code=Environment.dict['trakt.pin.code'],
**Environment.dict['trakt.pin.authorization']
)
except (apsw.ConstraintError, peewee.IntegrityError) as ex:
log.debug('Ignoring oauth credential update for %r, already exists (%s)', trakt_account, ex)
return False
return True
@classmethod
def get_trakt_username(cls):
if Environment.get_pref('username'):
return Environment.get_pref('username')
if Environment.dict['trakt.username']:
return Environment.dict['trakt.username']
return None
@classmethod
def get_token(cls, request_headers=None):
# Environment token
env_token = os.environ.get('PLEXTOKEN')
if env_token:
log.info('Plex Token: environment')
return env_token
# Check if anonymous access is available
server = requests.get('http://localhost:32400')
if server.status_code == 200:
log.info('Plex Token: anonymous')
return 'anonymous'
# No token available
if request_headers is None:
log.error('Plex Token: not available')
return None
# Try retrieve token from request
req_token = request_headers.get('X-Plex-Token')
if req_token:
log.info('Plex Token: request')
return req_token
# No token available in request
data = {
'Client': {
'User-Agent': request_headers.get('User-Agent'),
'X-Plex-Product': request_headers.get('X-Plex-Product'),
},
'Headers': request_headers.keys()
}
log.debug('Request details: %r', data)
log.error('Plex Token: not available', extra={
'data': data
})
return None
|
# *****************************************************************************
# Copyright (c) 2019, Intel Corporation All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# *****************************************************************************
from __future__ import print_function, division, absolute_import
import warnings
from collections import namedtuple
import itertools
import numba
from numba import ir, ir_utils, types
from numba import compiler as numba_compiler
from numba.targets.registry import CPUDispatcher
from numba.ir_utils import (mk_unique_var, replace_vars_inner, find_topo_order,
dprint_func_ir, remove_dead, mk_alloc, remove_dels,
get_name_var_table, replace_var_names,
add_offset_to_labels, get_ir_of_code, find_const,
compile_to_numba_ir, replace_arg_nodes,
find_callname, guard, require, get_definition,
build_definitions, replace_vars_stmt,
replace_vars_inner, find_build_sequence)
from numba.inline_closurecall import inline_closure_call
from numba.analysis import compute_cfg_from_blocks
from numba.compiler_machinery import FunctionPass, register_pass
import sdc
from sdc import utils, config
import sdc.io
from sdc.io import parquet_pio
from sdc.hiframes import filter, join, aggregate, sort
from sdc.utils import (get_constant, NOT_CONSTANT, debug_prints,
inline_new_blocks, ReplaceFunc, is_call, is_assign, update_globals)
import sdc.hiframes.api
from sdc.str_ext import string_type
from sdc.str_arr_ext import string_array_type
import sdc.io
from sdc.io import csv_ext
import pandas as pd
import numpy as np
import math
import sdc.io
from sdc.io.parquet_pio import ParquetHandler
from sdc.hiframes.pd_timestamp_ext import (datetime_date_type,
datetime_date_to_int, int_to_datetime_date)
from sdc.hiframes.pd_series_ext import SeriesType
from sdc.hiframes.pd_categorical_ext import PDCategoricalDtype, CategoricalArray
from sdc.hiframes.rolling import get_rolling_setup_args, supported_rolling_funcs
from sdc.hiframes.aggregate import get_agg_func, supported_agg_funcs
import sdc.hiframes.pd_dataframe_ext
def remove_hiframes(rhs, lives, call_list):
# used in stencil generation of rolling
if len(call_list) == 1 and call_list[0] in [int, min, max, abs]:
return True
# used in stencil generation of rolling
if (len(call_list) == 1 and isinstance(call_list[0], CPUDispatcher)
and call_list[0].py_func == numba.stencilparfor._compute_last_ind):
return True
# used in stencil generation of rolling
if call_list == ['ceil', math]:
return True
if (len(call_list) == 4 and call_list[1:] == ['api', 'hiframes', sdc] and
call_list[0] in ['fix_df_array', 'fix_rolling_array',
'concat', 'count', 'mean', 'quantile', 'var',
'str_contains_regex', 'str_contains_noregex', 'column_sum',
'nunique', 'init_series', 'init_datetime_index',
'convert_tup_to_rec', 'convert_rec_to_tup']):
return True
if (len(call_list) == 4 and call_list[1:] == ['series_kernels', 'hiframes', sdc] and
call_list[0]
in ['_sum_handle_nan', '_mean_handle_nan', '_var_handle_nan']):
return True
if call_list == ['dist_return', 'distributed_api', sdc]:
return True
if call_list == ['init_dataframe', 'pd_dataframe_ext', 'hiframes', sdc]:
return True
if call_list == ['get_dataframe_data', 'pd_dataframe_ext', 'hiframes', sdc]:
return True
if call_list == ['get_dataframe_index', 'pd_dataframe_ext', 'hiframes', sdc]:
return True
# if call_list == ['set_parent_dummy', 'pd_dataframe_ext', 'hiframes', sdc]:
# return True
if call_list == ['rolling_dummy', 'pd_rolling_ext', 'hiframes', sdc]:
return True
if call_list == ['agg_typer', 'api', 'hiframes', sdc]:
return True
if call_list == [list]:
return True
if call_list == ['groupby']:
return True
if call_list == ['rolling']:
return True
if call_list == [pd.api.types.CategoricalDtype]:
return True
# TODO: move to Numba
if call_list == ['empty_inferred', 'ndarray', 'unsafe', numba]:
return True
if call_list == ['chain', itertools]:
return True
return False
numba.ir_utils.remove_call_handlers.append(remove_hiframes)
@register_pass(mutates_CFG=True, analysis_only=False)
class HiFramesPass(FunctionPass):
"""analyze and transform hiframes calls"""
_name = "sdc_extention_hi_frames_pass"
def __init__(self):
pass
def run_pass(self, state):
return HiFramesPassImpl(state).run_pass()
class HiFramesPassImpl(object):
def __init__(self, state):
# replace inst variables as determined previously during the pass
# currently use to keep lhs of Arg nodes intact
self.replace_var_dict = {}
# df_var -> {col1:col1_var ...}
self.df_vars = {}
# df_var -> label where it is defined
self.df_labels = {}
self.arrow_tables = {}
self.reverse_copies = {}
self.state = state
def run_pass(self):
ir_utils._max_label = max(self.state.func_ir.blocks.keys())
self.pq_handler = ParquetHandler(
self.state.func_ir, self.state.typingctx, self.state.args, self.state.locals, self.reverse_copies)
# FIXME: see why this breaks test_kmeans
# remove_dels(self.state.func_ir.blocks)
dprint_func_ir(self.state.func_ir, "starting hiframes")
self._handle_metadata()
blocks = self.state.func_ir.blocks
# call build definition since rewrite pass doesn't update definitions
# e.g. getitem to static_getitem in test_column_list_select2
self.state.func_ir._definitions = build_definitions(blocks)
# topo_order necessary since df vars need to be found before use
topo_order = find_topo_order(blocks)
work_list = list((l, blocks[l]) for l in reversed(topo_order))
while work_list:
label, block = work_list.pop()
self._get_reverse_copies(blocks[label].body)
new_body = []
replaced = False
self._working_body = new_body
for i, inst in enumerate(block.body):
self._replace_vars(inst)
# ir_utils.replace_vars_stmt(inst, self.replace_var_dict)
out_nodes = [inst]
# handle potential dataframe set column here
# df['col'] = arr
if (isinstance(inst, ir.StaticSetItem)
and isinstance(inst.index, str)):
# cfg needed for set df column
cfg = compute_cfg_from_blocks(blocks)
out_nodes = self._run_df_set_column(inst, label, cfg)
elif isinstance(inst, ir.Assign):
self.state.func_ir._definitions[inst.target.name].remove(inst.value)
out_nodes = self._run_assign(inst, label)
elif isinstance(inst, ir.Return):
out_nodes = self._run_return(inst)
if isinstance(out_nodes, list):
# TODO: fix scope/loc
new_body.extend(out_nodes)
self._update_definitions(out_nodes)
if isinstance(out_nodes, ReplaceFunc):
rp_func = out_nodes
if rp_func.pre_nodes is not None:
new_body.extend(rp_func.pre_nodes)
self._update_definitions(rp_func.pre_nodes)
# replace inst.value to a call with target args
# as expected by inline_closure_call
# TODO: inst other than Assign?
inst.value = ir.Expr.call(
ir.Var(block.scope, "dummy", inst.loc),
rp_func.args, (), inst.loc)
block.body = new_body + block.body[i:]
update_globals(rp_func.func, rp_func.glbls)
inline_closure_call(self.state.func_ir, rp_func.glbls,
block, len(new_body), rp_func.func, work_list=work_list)
replaced = True
break
if isinstance(out_nodes, dict):
block.body = new_body + block.body[i:]
# TODO: insert new blocks in current spot of work_list
# instead of append?
# TODO: rename variables, fix scope/loc
inline_new_blocks(self.state.func_ir, block, len(new_body), out_nodes, work_list)
replaced = True
break
if not replaced:
blocks[label].body = new_body
self.state.func_ir.blocks = ir_utils.simplify_CFG(self.state.func_ir.blocks)
# self.state.func_ir._definitions = build_definitions(blocks)
# XXX: remove dead here fixes h5 slice issue
# iterative remove dead to make sure all extra code (e.g. df vars) is removed
# while remove_dead(blocks, self.state.func_ir.arg_names, self.state.func_ir):
# pass
self.state.func_ir._definitions = build_definitions(blocks)
dprint_func_ir(self.state.func_ir, "after hiframes")
if debug_prints(): # pragma: no cover
print("df_vars: ", self.df_vars)
return True
def _replace_vars(self, inst):
# variable replacement can affect definitions so handling assignment
# values specifically
if is_assign(inst):
lhs = inst.target.name
self.state.func_ir._definitions[lhs].remove(inst.value)
ir_utils.replace_vars_stmt(inst, self.replace_var_dict)
if is_assign(inst):
self.state.func_ir._definitions[lhs].append(inst.value)
# if lhs changed, TODO: test
if inst.target.name != lhs:
self.state.func_ir._definitions[inst.target.name] = self.state.func_ir._definitions[lhs]
def _run_assign(self, assign, label):
lhs = assign.target.name
rhs = assign.value
if isinstance(rhs, ir.Expr):
if rhs.op == 'call':
return self._run_call(assign, label)
# HACK: delete pd.DataFrame({}) nodes to avoid typing errors
# TODO: remove when dictionaries are implemented and typing works
if rhs.op == 'getattr':
val_def = guard(get_definition, self.state.func_ir, rhs.value)
if (isinstance(val_def, ir.Global) and val_def.value == pd
and rhs.attr in ('DataFrame', 'read_csv',
'read_parquet', 'to_numeric')):
# TODO: implement to_numeric in typed pass?
# put back the definition removed earlier but remove node
# enables function matching without node in IR
self.state.func_ir._definitions[lhs].append(rhs)
return []
if rhs.op == 'getattr':
val_def = guard(get_definition, self.state.func_ir, rhs.value)
if (isinstance(val_def, ir.Global) and val_def.value == np
and rhs.attr == 'fromfile'):
# put back the definition removed earlier but remove node
self.state.func_ir._definitions[lhs].append(rhs)
return []
# HACK: delete pyarrow.parquet.read_table() to avoid typing errors
if rhs.op == 'getattr' and rhs.attr == 'read_table':
import pyarrow.parquet as pq
val_def = guard(get_definition, self.state.func_ir, rhs.value)
if isinstance(val_def, ir.Global) and val_def.value == pq:
# put back the definition removed earlier but remove node
self.state.func_ir._definitions[lhs].append(rhs)
return []
if (rhs.op == 'getattr' and rhs.value.name in self.arrow_tables
and rhs.attr == 'to_pandas'):
# put back the definition removed earlier but remove node
self.state.func_ir._definitions[lhs].append(rhs)
return []
# if rhs.op in ('build_list', 'build_tuple'): TODO: test tuple
if rhs.op == 'build_list':
# if build_list items are constant, add the constant values
# to the returned list type as metadata. This enables type
# inference for calls like pd.merge() where the values
# determine output dataframe type
# TODO: add proper metadata to Numba types
# XXX: when constants are used, all the uses of the list object
# have to be checked since lists are mutable
# Tests:
# SDC_CONFIG_PIPELINE_SDC=0 python -m sdc.runtests sdc.tests.test_dataframe.TestDataFrame.test_df_drop_inplace2
# SDC_CONFIG_PIPELINE_SDC=0 python -m sdc.runtests sdc.tests.test_dataframe.TestDataFrame.test_df_drop1
try:
vals = tuple(find_const(self.state.func_ir, v) for v in rhs.items)
# a = ['A', 'B'] ->
# tmp = ['A', 'B']
# a = add_consts_to_type(tmp, 'A', 'B')
vals_expr = ", ".join("'{}'".format(c) if isinstance(c, str) else "{}".format(c) for c in vals)
func_text = "def _build_f(a):\n"
func_text += " return sdc.hiframes.api.add_consts_to_type(a, {})\n".format(vals_expr)
loc_vars = {}
exec(func_text, {'sdc': sdc}, loc_vars)
_build_f = loc_vars['_build_f']
target = assign.target
tmp_target = ir.Var(
target.scope, mk_unique_var(target.name), rhs.loc)
tmp_assign = ir.Assign(rhs, tmp_target, rhs.loc)
return self._replace_func(
_build_f, (tmp_target,), pre_nodes=[tmp_assign])
except numba.ir_utils.GuardException:
pass
if rhs.op == 'make_function':
# HACK make globals availabe for typing in series.map()
rhs.globals = self.state.func_ir.func_id.func.__globals__
# pass pivot values to df.pivot_table() calls using a meta
# variable passed as argument. The meta variable's type
# is set to MetaType with pivot values baked in.
pivot_key = lhs + ":pivot"
if pivot_key in self.state.locals:
pivot_values = self.state.locals.pop(pivot_key)
# put back the definition removed earlier
self.state.func_ir._definitions[lhs].append(rhs)
pivot_call = guard(get_definition, self.state.func_ir, lhs)
assert pivot_call is not None
meta_var = ir.Var(
assign.target.scope, mk_unique_var('pivot_meta'), rhs.loc)
meta_assign = ir.Assign(
ir.Const(0, rhs.loc), meta_var, rhs.loc)
self._working_body.insert(0, meta_assign)
pivot_call.kws = list(pivot_call.kws)
pivot_call.kws.append(('_pivot_values', meta_var))
self.state.locals[meta_var.name] = sdc.hiframes.api.MetaType(pivot_values)
# handle copies lhs = f
if isinstance(rhs, ir.Var) and rhs.name in self.df_vars:
self.df_vars[lhs] = self.df_vars[rhs.name]
if isinstance(rhs, ir.Var) and rhs.name in self.df_labels:
self.df_labels[lhs] = self.df_labels[rhs.name]
if isinstance(rhs, ir.Var) and rhs.name in self.arrow_tables:
self.arrow_tables[lhs] = self.arrow_tables[rhs.name]
# enables function matching without node in IR
self.state.func_ir._definitions[lhs].append(rhs)
return []
return [assign]
def _run_call(self, assign, label):
"""handle calls and return new nodes if needed
"""
lhs = assign.target
rhs = assign.value
func_name = ""
func_mod = ""
fdef = guard(find_callname, self.state.func_ir, rhs)
if fdef is None:
# could be make_function from list comprehension which is ok
func_def = guard(get_definition, self.state.func_ir, rhs.func)
if isinstance(func_def, ir.Expr) and func_def.op == 'make_function':
return [assign]
warnings.warn(
"function call couldn't be found for initial analysis")
return [assign]
else:
func_name, func_mod = fdef
# handling pd.DataFrame() here since input can be constant dictionary
if fdef == ('DataFrame', 'pandas'):
return self._handle_pd_DataFrame(assign, lhs, rhs, label)
# handling pd.read_csv() here since input can have constants
# like dictionaries for typing
if fdef == ('read_csv', 'pandas'):
return self._handle_pd_read_csv(assign, lhs, rhs, label)
# match flatmap pd.Series(list(itertools.chain(*A))) and flatten
if fdef == ('Series', 'pandas'):
return self._handle_pd_Series(assign, lhs, rhs)
if fdef == ('read_table', 'pyarrow.parquet'):
return self._handle_pq_read_table(assign, lhs, rhs)
if (func_name == 'to_pandas' and isinstance(func_mod, ir.Var)
and func_mod.name in self.arrow_tables):
return self._handle_pq_to_pandas(assign, lhs, rhs, func_mod, label)
if fdef == ('read_parquet', 'pandas'):
return self._handle_pd_read_parquet(assign, lhs, rhs, label)
if fdef == ('concat', 'pandas'):
return self._handle_concat(assign, lhs, rhs, label)
# if fdef == ('crosstab', 'pandas'):
# return self._handle_crosstab(lhs, rhs, label)
if fdef == ('to_numeric', 'pandas'):
return self._handle_pd_to_numeric(assign, lhs, rhs)
if isinstance(func_mod, ir.Var) and self._is_df_var(func_mod):
return self._run_call_df(
assign, lhs, rhs, func_mod, func_name, label)
if func_name == 'drop' and isinstance(func_mod, ir.Var):
# handle potential df.drop(inplace=True) here since it needs
# variable replacement
return self._handle_df_drop(assign, lhs, rhs, func_mod)
# groupby aggregate
# e.g. df.groupby('A')['B'].agg(lambda x: x.max()-x.min())
if isinstance(func_mod, ir.Var) and self._is_df_obj_call(func_mod, 'groupby'):
return self._handle_aggregate(lhs, rhs, func_mod, func_name, label)
# rolling window
# e.g. df.rolling(2).sum
if isinstance(func_mod, ir.Var) and self._is_df_obj_call(func_mod, 'rolling'):
return self._handle_rolling(lhs, rhs, func_mod, func_name, label)
if fdef == ('fromfile', 'numpy'):
return sdc.io.np_io._handle_np_fromfile(assign, lhs, rhs)
return [assign]
def _run_call_df(self, assign, lhs, rhs, df_var, func_name, label):
# df.pivot_table()
if func_name == 'pivot_table':
return self._handle_df_pivot_table(lhs, rhs, df_var, label)
# df.isin()
if func_name == 'isin':
return self._handle_df_isin(lhs, rhs, df_var, label)
# df.append()
if func_name == 'append':
return self._handle_df_append(lhs, rhs, df_var, label)
# df.fillna()
if func_name == 'fillna':
return self._handle_df_fillna(lhs, rhs, df_var, label)
if func_name not in ('groupby', 'rolling'):
raise NotImplementedError(
"data frame function {} not implemented yet".format(func_name))
return [assign]
def _handle_df_isin(self, lhs, rhs, df_var, label):
other = self._get_arg('isin', rhs.args, dict(rhs.kws), 0, 'values')
other_colmap = {}
df_col_map = self._get_df_cols(df_var)
nodes = []
df_case = False
# dataframe case
if self._is_df_var(other):
df_case = True
arg_df_map = self._get_df_cols(other)
for cname in df_col_map:
if cname in arg_df_map:
other_colmap[cname] = arg_df_map[cname]
else:
other_def = guard(get_definition, self.state.func_ir, other)
# dict case
if isinstance(other_def, ir.Expr) and other_def.op == 'build_map':
for c, v in other_def.items:
cname = guard(find_const, self.state.func_ir, c)
if not isinstance(cname, str):
raise ValueError("dictionary argument to isin() should have constant keys")
other_colmap[cname] = v
# HACK replace build_map to avoid inference errors
other_def.op = 'build_list'
other_def.items = [v[0] for v in other_def.items]
else:
# general iterable (e.g. list, set) case
# TODO: handle passed in dict case (pass colname to func?)
other_colmap = {c: other for c in df_col_map.keys()}
out_df_map = {}
def isin_func(A, B): return sdc.hiframes.api.df_isin(A, B)
def isin_vals_func(A, B): return sdc.hiframes.api.df_isin_vals(A, B)
# create array of False values used when other col not available
def bool_arr_func(A): return sdc.hiframes.api.init_series(np.zeros(len(A), np.bool_))
# use the first array of df to get len. TODO: check for empty df
false_arr_args = [list(df_col_map.values())[0]]
for cname, in_var in self.df_vars[df_var.name].items():
if cname in other_colmap:
if df_case:
func = isin_func
else:
func = isin_vals_func
other_col_var = other_colmap[cname]
args = [in_var, other_col_var]
else:
func = bool_arr_func
args = false_arr_args
f_block = compile_to_numba_ir(func, {'sdc': sdc, 'np': np}).blocks.popitem()[1]
replace_arg_nodes(f_block, args)
nodes += f_block.body[:-2]
out_df_map[cname] = nodes[-1].target
self._create_df(lhs.name, out_df_map, label)
return nodes
def _handle_df_append(self, lhs, rhs, df_var, label):
other = self._get_arg('append', rhs.args, dict(rhs.kws), 0, 'other')
# only handles df or list of df input
# TODO: check for series/dict/list input
# TODO: enforce ignore_index=True?
# single df case
if self._is_df_var(other):
return self._handle_concat_df(lhs, [df_var, other], label)
# list of dfs
df_list = guard(get_definition, self.state.func_ir, other)
if len(df_list.items) > 0 and self._is_df_var(df_list.items[0]):
return self._handle_concat_df(lhs, [df_var] + df_list.items, label)
raise ValueError("invalid df.append() input. Only dataframe and list"
" of dataframes supported")
def _handle_df_fillna(self, lhs, rhs, df_var, label):
nodes = []
inplace_default = ir.Var(lhs.scope, mk_unique_var("fillna_default"), lhs.loc)
nodes.append(ir.Assign(ir.Const(False, lhs.loc), inplace_default, lhs.loc))
val_var = self._get_arg('fillna', rhs.args, dict(rhs.kws), 0, 'value')
inplace_var = self._get_arg('fillna', rhs.args, dict(rhs.kws), 3, 'inplace', default=inplace_default)
def _fillna_func(A, val, inplace): return A.fillna(val, inplace=inplace)
out_col_map = {}
for cname, in_var in self._get_df_cols(df_var).items():
f_block = compile_to_numba_ir(_fillna_func, {}).blocks.popitem()[1]
replace_arg_nodes(f_block, [in_var, val_var, inplace_var])
nodes += f_block.body[:-2]
out_col_map[cname] = nodes[-1].target
# create output df if not inplace
if (inplace_var.name == inplace_default.name
or guard(find_const, self.state.func_ir, inplace_var) == False):
self._create_df(lhs.name, out_col_map, label)
return nodes
def _handle_df_dropna(self, lhs, rhs, df_var, label):
nodes = []
inplace_default = ir.Var(lhs.scope, mk_unique_var("dropna_default"), lhs.loc)
nodes.append(ir.Assign(ir.Const(False, lhs.loc), inplace_default, lhs.loc))
inplace_var = self._get_arg('dropna', rhs.args, dict(rhs.kws), 4, 'inplace', default=inplace_default)
col_names = self._get_df_col_names(df_var)
col_vars = self._get_df_col_vars(df_var)
arg_names = ", ".join([mk_unique_var(cname).replace('.', '_') for cname in col_names])
out_names = ", ".join([mk_unique_var(cname).replace('.', '_') for cname in col_names])
func_text = "def _dropna_imp({}, inplace):\n".format(arg_names)
func_text += " ({},) = sdc.hiframes.api.dropna(({},), inplace)\n".format(
out_names, arg_names)
loc_vars = {}
exec(func_text, {'sdc': sdc}, loc_vars)
_dropna_imp = loc_vars['_dropna_imp']
f_block = compile_to_numba_ir(_dropna_imp, {'sdc': sdc}).blocks.popitem()[1]
replace_arg_nodes(f_block, col_vars + [inplace_var])
nodes += f_block.body[:-3]
# extract column vars from output
out_col_map = {}
for i, cname in enumerate(col_names):
out_col_map[cname] = nodes[-len(col_names) + i].target
# create output df if not inplace
if (inplace_var.name == inplace_default.name
or guard(find_const, self.state.func_ir, inplace_var) == False):
self._create_df(lhs.name, out_col_map, label)
else:
# assign back to column vars for inplace case
for i in range(len(col_vars)):
c_var = col_vars[i]
dropped_var = list(out_col_map.values())[i]
nodes.append(ir.Assign(dropped_var, c_var, lhs.loc))
return nodes
def _handle_df_drop(self, assign, lhs, rhs, df_var):
"""handle possible df.drop(inplace=True)
lhs = A.drop(inplace=True) -> A1, lhs = drop_inplace(...)
replace A with A1
"""
kws = dict(rhs.kws)
inplace_var = self._get_arg('drop', rhs.args, kws, 5, 'inplace', '')
inplace = guard(find_const, self.state.func_ir, inplace_var)
if inplace is not None and inplace:
# TODO: make sure call post dominates df_var definition or df_var
# is not used in other code paths
# replace func variable with drop_inplace
f_block = compile_to_numba_ir(
lambda: sdc.hiframes.api.drop_inplace,
{'sdc': sdc}).blocks.popitem()[1]
nodes = f_block.body[:-2]
new_func_var = nodes[-1].target
rhs.func = new_func_var
rhs.args.insert(0, df_var)
# new tuple return
ret_tup = ir.Var(lhs.scope, mk_unique_var('drop_ret'), lhs.loc)
assign.target = ret_tup
nodes.append(assign)
new_df_var = ir.Var(df_var.scope, mk_unique_var(df_var.name), df_var.loc)
zero_var = ir.Var(df_var.scope, mk_unique_var('zero'), df_var.loc)
one_var = ir.Var(df_var.scope, mk_unique_var('one'), df_var.loc)
nodes.append(ir.Assign(ir.Const(0, lhs.loc), zero_var, lhs.loc))
nodes.append(ir.Assign(ir.Const(1, lhs.loc), one_var, lhs.loc))
getitem0 = ir.Expr.static_getitem(ret_tup, 0, zero_var, lhs.loc)
nodes.append(ir.Assign(getitem0, new_df_var, lhs.loc))
getitem1 = ir.Expr.static_getitem(ret_tup, 1, one_var, lhs.loc)
nodes.append(ir.Assign(getitem1, lhs, lhs.loc))
# replace old variable with new one
self.replace_var_dict[df_var.name] = new_df_var
return nodes
return [assign]
# df.drop(labels=None, axis=0, index=None, columns=None, level=None,
# inplace=False, errors='raise')
labels_var = self._get_arg('drop', rhs.args, kws, 0, 'labels', '')
axis_var = self._get_arg('drop', rhs.args, kws, 1, 'axis', '')
labels = self._get_str_or_list(labels_var, default='')
axis = guard(find_const, self.state.func_ir, axis_var)
if labels != '' and axis is not None:
if axis != 1:
raise ValueError("only dropping columns (axis=1) supported")
columns = labels
else:
columns_var = self._get_arg('drop', rhs.args, kws, 3, 'columns', '')
err_msg = ("columns argument (constant string list) "
"or labels and axis required")
columns = self._get_str_or_list(columns_var, err_msg=err_msg)
inplace_var = self._get_arg('drop', rhs.args, kws, 5, 'inplace', '')
inplace = guard(find_const, self.state.func_ir, inplace_var)
if inplace is not None and inplace:
df_label = self.df_labels[df_var.name]
cfg = compute_cfg_from_blocks(self.state.func_ir.blocks)
# dropping columns inplace possible only when it dominates the df
# creation to keep schema consistent
if label not in cfg.backbone() and label not in cfg.post_dominators()[df_label]:
raise ValueError("dropping dataframe columns inplace inside "
"conditionals and loops not supported yet")
# TODO: rename df name
# TODO: support dropping columns of input dfs (reflection)
for cname in columns:
self.df_vars[df_var.name].pop(cname)
return []
in_df_map = self._get_df_cols(df_var)
nodes = []
out_df_map = {c: _gen_arr_copy(in_df_map[c], nodes)
for c in in_df_map.keys() if c not in columns}
self._create_df(lhs.name, out_df_map, label)
return nodes
def _get_reverse_copies(self, body):
for inst in body:
if isinstance(inst, ir.Assign) and isinstance(inst.value, ir.Var):
self.reverse_copies[inst.value.name] = inst.target.name
return
def _handle_pd_DataFrame(self, assign, lhs, rhs, label):
"""transform pd.DataFrame({'A': A}) call
"""
kws = dict(rhs.kws)
if 'data' in kws:
data = kws['data']
if len(rhs.args) != 0: # pragma: no cover
raise ValueError(
"only data argument suppoted in pd.DataFrame()")
else:
if len(rhs.args) != 1: # pragma: no cover
raise ValueError(
"data argument in pd.DataFrame() expected")
data = rhs.args[0]
arg_def = guard(get_definition, self.state.func_ir, data)
if (not isinstance(arg_def, ir.Expr)
or arg_def.op != 'build_map'): # pragma: no cover
raise ValueError(
"Invalid DataFrame() arguments (constant dict of columns expected)")
nodes, items = self._fix_df_arrays(arg_def.items)
# HACK replace build_map to avoid inference errors
arg_def.op = 'build_list'
arg_def.items = [v[0] for v in arg_def.items]
n_cols = len(items)
data_args = ", ".join('data{}'.format(i) for i in range(n_cols))
col_args = ", ".join('col{}'.format(i) for i in range(n_cols))
func_text = "def _init_df({}, index, {}):\n".format(data_args, col_args)
func_text += " return sdc.hiframes.pd_dataframe_ext.init_dataframe({}, index, {})\n".format(
data_args, col_args)
loc_vars = {}
exec(func_text, {'sdc': sdc}, loc_vars)
_init_df = loc_vars['_init_df']
# TODO: support index var
index = ir.Var(lhs.scope, mk_unique_var('df_index_none'), lhs.loc)
nodes.append(ir.Assign(ir.Const(None, lhs.loc), index, lhs.loc))
data_vars = [a[1] for a in items]
col_vars = [a[0] for a in items]
args = data_vars + [index] + col_vars
return self._replace_func(_init_df, args,
pre_nodes=nodes
)
# df_nodes, col_map = self._process_df_build_map(items)
# nodes += df_nodes
# self._create_df(lhs.name, col_map, label)
# # remove DataFrame call
# return nodes
def _handle_pd_read_csv(self, assign, lhs, rhs, label):
"""transform pd.read_csv(names=[A], dtype={'A': np.int32}) call
"""
# schema: pd.read_csv(filepath_or_buffer, sep=',', delimiter=None,
# header='infer', names=None, index_col=None, usecols=None,
# squeeze=False, prefix=None, mangle_dupe_cols=True, dtype=None,
# engine=None, converters=None, true_values=None, false_values=None,
# skipinitialspace=False, skiprows=None, nrows=None, na_values=None,
# keep_default_na=True, na_filter=True, verbose=False,
# skip_blank_lines=True, parse_dates=False,
# infer_datetime_format=False, keep_date_col=False, date_parser=None,
# dayfirst=False, iterator=False, chunksize=None, compression='infer',
# thousands=None, decimal=b'.', lineterminator=None, quotechar='"',
# quoting=0, escapechar=None, comment=None, encoding=None,
# dialect=None, tupleize_cols=None, error_bad_lines=True,
# warn_bad_lines=True, skipfooter=0, doublequote=True,
# delim_whitespace=False, low_memory=True, memory_map=False,
# float_precision=None)
kws = dict(rhs.kws)
fname = self._get_arg('read_csv', rhs.args, kws, 0, 'filepath_or_buffer')
sep = self._get_str_arg('read_csv', rhs.args, kws, 1, 'sep', ',')
sep = self._get_str_arg('read_csv', rhs.args, kws, 2, 'delimiter', sep)
# TODO: header arg
names_var = self._get_arg('read_csv', rhs.args, kws, 4, 'names', '')
dtype_var = self._get_arg('read_csv', rhs.args, kws, 10, 'dtype', '')
skiprows = self._get_str_arg('read_csv', rhs.args, kws, 16, 'skiprows', 0)
col_names = self._get_str_or_list(names_var, default=0)
if dtype_var is '':
# infer column names and types from constant filename
fname_const = guard(find_const, self.state.func_ir, fname)
if fname_const is None:
raise ValueError("pd.read_csv() requires explicit type"
"annotation using 'dtype' if filename is not constant")
rows_to_read = 100 # TODO: tune this
df = pd.read_csv(fname_const, nrows=rows_to_read, skiprows=skiprows)
# TODO: string_array, categorical, etc.
dtypes = [types.Array(numba.typeof(d).dtype, 1, 'C')
for d in df.dtypes.values]
cols = df.columns.to_list()
# overwrite column names like Pandas if explicitly provided
if col_names != 0:
cols[-len(col_names):] = col_names
else:
# a row is used for names if not provided
skiprows += 1
col_names = cols
dtype_map = {c: d for c, d in zip(col_names, dtypes)}
else:
dtype_map = guard(get_definition, self.state.func_ir, dtype_var)
if (not isinstance(dtype_map, ir.Expr)
or dtype_map.op != 'build_map'): # pragma: no cover
# try single type for all columns case
dtype_map = self._get_const_dtype(dtype_var)
else:
new_dtype_map = {}
for n_var, t_var in dtype_map.items:
# find constant column name
c = guard(find_const, self.state.func_ir, n_var)
if c is None: # pragma: no cover
raise ValueError("dtype column names should be constant")
new_dtype_map[c] = self._get_const_dtype(t_var)
# HACK replace build_map to avoid inference errors
dtype_map.op = 'build_list'
dtype_map.items = [v[0] for v in dtype_map.items]
dtype_map = new_dtype_map
if col_names == 0:
raise ValueError("pd.read_csv() names should be constant list")
usecols_var = self._get_arg('read_csv', rhs.args, kws, 6, 'usecols', '')
usecols = list(range(len(col_names)))
if usecols_var != '':
err_msg = "pd.read_csv() usecols should be constant list of ints"
usecols = self._get_str_or_list(usecols_var, err_msg=err_msg, typ=int)
# TODO: support other args
date_cols = []
if 'parse_dates' in kws:
err_msg = "pd.read_csv() parse_dates should be constant list"
date_cols = self._get_str_or_list(kws['parse_dates'], err_msg=err_msg, typ=int)
columns, data_arrs, out_types = self._get_csv_col_info(
dtype_map, date_cols, col_names, lhs)
nodes = [csv_ext.CsvReader(
fname, lhs.name, sep, columns, data_arrs, out_types, usecols,
lhs.loc, skiprows)]
n_cols = len(columns)
data_args = ", ".join('data{}'.format(i) for i in range(n_cols))
func_text = "def _init_df({}):\n".format(data_args)
func_text += " return sdc.hiframes.pd_dataframe_ext.init_dataframe({}, None, {})\n".format(
data_args, ", ".join("'{}'".format(c) for c in columns))
loc_vars = {}
exec(func_text, {'sdc': sdc}, loc_vars)
_init_df = loc_vars['_init_df']
f_block = compile_to_numba_ir(
_init_df, {'sdc': sdc}).blocks.popitem()[1]
replace_arg_nodes(f_block, data_arrs)
nodes += f_block.body[:-2]
nodes[-1].target = lhs
return nodes
def _get_csv_col_info(self, dtype_map, date_cols, col_names, lhs):
if isinstance(dtype_map, types.Type):
typ = dtype_map
data_arrs = [ir.Var(lhs.scope, mk_unique_var(cname), lhs.loc)
for cname in col_names]
return col_names, data_arrs, [typ] * len(col_names)
columns = []
data_arrs = []
out_types = []
for i, (col_name, typ) in enumerate(dtype_map.items()):
columns.append(col_name)
# get array dtype
if i in date_cols:
typ = types.Array(types.NPDatetime('ns'), 1, 'C')
out_types.append(typ)
# output array variable
data_arrs.append(
ir.Var(lhs.scope, mk_unique_var(col_name), lhs.loc))
return columns, data_arrs, out_types
def _get_const_dtype(self, dtype_var):
dtype_def = guard(get_definition, self.state.func_ir, dtype_var)
if isinstance(dtype_def, ir.Const) and isinstance(dtype_def.value, str):
typ_name = dtype_def.value
if typ_name == 'str':
return string_array_type
typ_name = 'int64' if typ_name == 'int' else typ_name
typ_name = 'float64' if typ_name == 'float' else typ_name
typ = getattr(types, typ_name)
typ = types.Array(typ, 1, 'C')
return typ
# str case
if isinstance(dtype_def, ir.Global) and dtype_def.value == str:
return string_array_type
# categorical case
if isinstance(dtype_def, ir.Expr) and dtype_def.op == 'call':
if (not guard(find_callname, self.state.func_ir, dtype_def)
== ('category', 'pandas.core.dtypes.dtypes')):
raise ValueError("pd.read_csv() invalid dtype "
"(built using a call but not Categorical)")
cats_var = self._get_arg('CategoricalDtype', dtype_def.args,
dict(dtype_def.kws), 0, 'categories')
err_msg = "categories should be constant list"
cats = self._get_str_or_list(cats_var, list_only=True, err_msg=err_msg)
typ = PDCategoricalDtype(cats)
return CategoricalArray(typ)
if not isinstance(dtype_def, ir.Expr) or dtype_def.op != 'getattr':
raise ValueError("pd.read_csv() invalid dtype")
glob_def = guard(get_definition, self.state.func_ir, dtype_def.value)
if not isinstance(glob_def, ir.Global) or glob_def.value != np:
raise ValueError("pd.read_csv() invalid dtype")
# TODO: extend to other types like string and date, check error
typ_name = dtype_def.attr
typ_name = 'int64' if typ_name == 'int' else typ_name
typ_name = 'float64' if typ_name == 'float' else typ_name
typ = getattr(types, typ_name)
typ = types.Array(typ, 1, 'C')
return typ
def _handle_pd_Series(self, assign, lhs, rhs):
"""transform pd.Series(A) call
"""
kws = dict(rhs.kws)
data = self._get_arg('pd.Series', rhs.args, kws, 0, 'data')
# match flatmap pd.Series(list(itertools.chain(*A))) and flatten
data_def = guard(get_definition, self.state.func_ir, data)
if (is_call(data_def) and guard(find_callname, self.state.func_ir, data_def)
== ('list', 'builtins') and len(data_def.args) == 1):
arg_def = guard(get_definition, self.state.func_ir, data_def.args[0])
if (is_call(arg_def) and guard(find_callname, self.state.func_ir,
arg_def) == ('chain', 'itertools')):
in_data = arg_def.vararg
arg_def.vararg = None # avoid typing error
return self._replace_func(
lambda l: sdc.hiframes.api.flatten_to_series(l),
[in_data]
)
# pd.Series() is handled in typed pass now
# return self._replace_func(lambda arr: sdc.hiframes.api.init_series(
# sdc.hiframes.api.fix_df_array(arr)),
# [data])
return [assign]
def _handle_pd_to_numeric(self, assign, lhs, rhs):
"""transform pd.to_numeric(A, errors='coerce') call here since dtype
has to be specified in locals and applied
"""
kws = dict(rhs.kws)
if 'errors' not in kws or guard(find_const, self.state.func_ir, kws['errors']) != 'coerce':
raise ValueError("pd.to_numeric() only supports errors='coerce'")
if lhs.name not in self.reverse_copies or (self.reverse_copies[lhs.name]) not in self.state.locals:
raise ValueError("pd.to_numeric() requires annotation of output type")
typ = self.state.locals.pop(self.reverse_copies[lhs.name])
dtype = numba.numpy_support.as_dtype(typ.dtype)
arg = rhs.args[0]
return self._replace_func(
lambda arr: sdc.hiframes.api.to_numeric(arr, dtype),
[arg], extra_globals={'dtype': dtype})
def _handle_pq_read_table(self, assign, lhs, rhs):
if len(rhs.args) != 1: # pragma: no cover
raise ValueError("Invalid read_table() arguments")
# put back the definition removed earlier but remove node
self.state.func_ir._definitions[lhs.name].append(rhs)
self.arrow_tables[lhs.name] = rhs.args[0]
return []
def _handle_pq_to_pandas(self, assign, lhs, rhs, t_var, label):
return self._gen_parquet_read(self.arrow_tables[t_var.name], lhs, label)
def _gen_parquet_read(self, fname, lhs, label):
columns, data_arrs, nodes = self.pq_handler.gen_parquet_read(
fname, lhs)
n_cols = len(columns)
data_args = ", ".join('data{}'.format(i) for i in range(n_cols))
func_text = "def _init_df({}):\n".format(data_args)
func_text += " return sdc.hiframes.pd_dataframe_ext.init_dataframe({}, None, {})\n".format(
data_args, ", ".join("'{}'".format(c) for c in columns))
loc_vars = {}
exec(func_text, {'sdc': sdc}, loc_vars)
_init_df = loc_vars['_init_df']
return self._replace_func(_init_df, data_arrs, pre_nodes=nodes)
def _handle_pd_read_parquet(self, assign, lhs, rhs, label):
fname = rhs.args[0]
return self._gen_parquet_read(fname, lhs, label)
def _handle_concat(self, assign, lhs, rhs, label):
# converting build_list to build_tuple before type inference to avoid
# errors
kws = dict(rhs.kws)
objs_arg = self._get_arg('concat', rhs.args, kws, 0, 'objs')
df_list = guard(get_definition, self.state.func_ir, objs_arg)
if not isinstance(df_list, ir.Expr) or not (df_list.op
in ['build_tuple', 'build_list']):
raise ValueError("pd.concat input should be constant list or tuple")
# XXX convert build_list to build_tuple since Numba doesn't handle list of
# arrays for np.concatenate()
if df_list.op == 'build_list':
df_list.op = 'build_tuple'
if len(df_list.items) == 0:
# copied error from pandas
raise ValueError("No objects to concatenate")
return [assign]
def _handle_concat_df(self, lhs, df_list, label):
# TODO: handle non-numerical (e.g. string, datetime) columns
nodes = []
# get output column names
all_colnames = []
for df in df_list:
all_colnames.extend(self._get_df_col_names(df))
# TODO: verify how Pandas sorts column names
all_colnames = sorted(set(all_colnames))
# generate a concat call for each output column
# TODO: support non-numericals like string
def gen_nan_func(A): return np.full(len(A), np.nan)
# gen concat function
arg_names = ", ".join(['in{}'.format(i) for i in range(len(df_list))])
func_text = "def _concat_imp({}):\n".format(arg_names)
func_text += " return sdc.hiframes.api.init_series(sdc.hiframes.api.concat(({})))\n".format(
arg_names)
loc_vars = {}
exec(func_text, {'sdc': sdc}, loc_vars)
_concat_imp = loc_vars['_concat_imp']
done_cols = {}
for cname in all_colnames:
# arguments to the generated function
args = []
# get input columns
for df in df_list:
df_col_map = self._get_df_cols(df)
# generate full NaN column
if cname not in df_col_map:
# use a df column just for len()
len_arr = list(df_col_map.values())[0]
f_block = compile_to_numba_ir(gen_nan_func,
{'sdc': sdc, 'np': np}).blocks.popitem()[1]
replace_arg_nodes(f_block, [len_arr])
nodes += f_block.body[:-2]
args.append(nodes[-1].target)
else:
args.append(df_col_map[cname])
f_block = compile_to_numba_ir(_concat_imp,
{'sdc': sdc, 'np': np}).blocks.popitem()[1]
replace_arg_nodes(f_block, args)
nodes += f_block.body[:-2]
done_cols[cname] = nodes[-1].target
self._create_df(lhs.name, done_cols, label)
return nodes
def _handle_concat_series(self, lhs, rhs):
# defer to typed pass since the type might be non-numerical
def f(arr_list): # pragma: no cover
return sdc.hiframes.api.init_series(sdc.hiframes.api.concat(arr_list))
return self._replace_func(f, rhs.args)
def _fix_df_arrays(self, items_list):
nodes = []
new_list = []
for item in items_list:
col_varname = item[0]
col_arr = item[1]
# fix list(multi-dim arrays) (packing images)
# FIXME: does this break for list(other things)?
col_arr = self._fix_df_list_of_array(col_arr)
def f(arr): # pragma: no cover
df_arr = sdc.hiframes.api.fix_df_array(arr)
f_block = compile_to_numba_ir(
f, {'sdc': sdc}).blocks.popitem()[1]
replace_arg_nodes(f_block, [col_arr])
nodes += f_block.body[:-3] # remove none return
new_col_arr = nodes[-1].target
new_list.append((col_varname, new_col_arr))
return nodes, new_list
def _fix_df_list_of_array(self, col_arr):
list_call = guard(get_definition, self.state.func_ir, col_arr)
if guard(find_callname, self.state.func_ir, list_call) == ('list', 'builtins'):
return list_call.args[0]
return col_arr
def _process_df_build_map(self, items_list):
df_cols = {}
nodes = []
for item in items_list:
col_var = item[0]
if isinstance(col_var, str):
col_name = col_var
else:
col_name = get_constant(self.state.func_ir, col_var)
if col_name is NOT_CONSTANT: # pragma: no cover
raise ValueError(
"data frame column names should be constant")
# cast to series type
def f(arr): # pragma: no cover
df_arr = sdc.hiframes.api.init_series(arr)
f_block = compile_to_numba_ir(
f, {'sdc': sdc}).blocks.popitem()[1]
replace_arg_nodes(f_block, [item[1]])
nodes += f_block.body[:-3] # remove none return
new_col_arr = nodes[-1].target
df_cols[col_name] = new_col_arr
return nodes, df_cols
def _get_func_output_typ(self, col_var, func, wrapper_func, label):
# stich together all blocks before the current block for type inference
# XXX: does control flow affect type inference in Numba?
dummy_ir = self.state.func_ir.copy()
dummy_ir.blocks[label].body.append(ir.Return(0, col_var.loc))
topo_order = find_topo_order(dummy_ir.blocks)
all_body = []
for l in topo_order:
if l == label:
break
all_body += dummy_ir.blocks[l].body
# add nodes created for current block so far
all_body += self._working_body
dummy_ir.blocks = {0: ir.Block(col_var.scope, col_var.loc)}
dummy_ir.blocks[0].body = all_body
_globals = self.state.func_ir.func_id.func.__globals__
_globals.update({'sdc': sdc, 'numba': numba, 'np': np})
f_ir = compile_to_numba_ir(wrapper_func, {'sdc': sdc})
# fix definitions to enable finding sentinel
f_ir._definitions = build_definitions(f_ir.blocks)
first_label = min(f_ir.blocks)
for i, stmt in enumerate(f_ir.blocks[first_label].body):
if (isinstance(stmt, ir.Assign)
and isinstance(stmt.value, ir.Expr)
and stmt.value.op == 'call'):
fdef = guard(get_definition, f_ir, stmt.value.func)
if isinstance(fdef, ir.Global) and fdef.name == 'map_func':
update_globals(func, _globals)
inline_closure_call(f_ir, _globals, f_ir.blocks[first_label], i, func)
break
f_ir.blocks = ir_utils.simplify_CFG(f_ir.blocks)
f_topo_order = find_topo_order(f_ir.blocks)
assert isinstance(f_ir.blocks[f_topo_order[-1]].body[-1], ir.Return)
output_var = f_ir.blocks[f_topo_order[-1]].body[-1].value
first_label = f_topo_order[0]
replace_arg_nodes(f_ir.blocks[first_label], [col_var])
assert first_label != topo_order[0] # TODO: check for 0 and handle
dummy_ir.blocks.update(f_ir.blocks)
dummy_ir.blocks[0].body.append(ir.Jump(first_label, col_var.loc))
# dead df code can cause type inference issues
# TODO: remove this
hiframes.api.enable_hiframes_remove_dead = False
while remove_dead(dummy_ir.blocks, dummy_ir.arg_names, dummy_ir):
pass
hiframes.api.enable_hiframes_remove_dead = True
# run type inference on the dummy IR
warnings = numba.errors.WarningsFixer(numba.errors.NumbaWarning)
infer = numba.typeinfer.TypeInferer(self.state.typingctx, dummy_ir, warnings)
for index, (name, ty) in enumerate(zip(dummy_ir.arg_names, self.state.args)):
infer.seed_argument(name, index, ty)
infer.build_constraint()
infer.propagate()
out_tp = infer.typevars[output_var.name].getone()
# typemap, restype, calltypes = numba.typed_passes.type_inference_stage(self.state.typingctx, dummy_ir, self.state.args, None)
return out_tp
def _is_df_obj_call(self, call_var, obj_name):
"""determines whether variable is coming from groupby() or groupby()[],
rolling(), rolling()[]
"""
var_def = guard(get_definition, self.state.func_ir, call_var)
# groupby()['B'] case
if (isinstance(var_def, ir.Expr)
and var_def.op in ['getitem', 'static_getitem']):
return self._is_df_obj_call(var_def.value, obj_name)
# groupby() called on column or df
call_def = guard(find_callname, self.state.func_ir, var_def)
if (call_def is not None and call_def[0] == obj_name
and isinstance(call_def[1], ir.Var)
and self._is_df_var(call_def[1])):
return True
return False
def _handle_df_pivot_table(self, lhs, rhs, df_var, label):
# TODO: multiple keys (index columns)
kws = dict(rhs.kws)
values_arg = self._get_str_arg('pivot_table', rhs.args, kws, 0, 'values')
index_arg = self._get_str_arg('pivot_table', rhs.args, kws, 1, 'index')
columns_arg = self._get_str_arg('pivot_table', rhs.args, kws, 2, 'columns')
agg_func_arg = self._get_str_arg('pivot_table', rhs.args, kws, 3, 'aggfunc', 'mean')
agg_func = get_agg_func(self.state.func_ir, agg_func_arg, rhs)
in_vars = {values_arg: self.df_vars[df_var.name][values_arg]}
# get output type
agg_func_dis = numba.njit(agg_func)
agg_gb_var = ir.Var(lhs.scope, mk_unique_var("agg_gb"), lhs.loc)
nodes = [ir.Assign(ir.Global("agg_gb", agg_func_dis, lhs.loc), agg_gb_var, lhs.loc)]
def to_arr(a, _agg_f):
b = sdc.hiframes.api.to_arr_from_series(a)
res = sdc.hiframes.api.init_series(sdc.hiframes.api.agg_typer(b, _agg_f))
f_block = compile_to_numba_ir(to_arr, {'sdc': sdc, 'np': np}).blocks.popitem()[1]
replace_arg_nodes(f_block, [in_vars[values_arg], agg_gb_var])
nodes += f_block.body[:-3] # remove none return
out_types = {values_arg: nodes[-1].target}
pivot_values = self._get_pivot_values(lhs.name)
df_col_map = ({col: ir.Var(lhs.scope, mk_unique_var(col), lhs.loc)
for col in pivot_values})
# df_col_map = ({col: ir.Var(lhs.scope, mk_unique_var(col), lhs.loc)
# for col in [values_arg]})
out_df = df_col_map.copy()
self._create_df(lhs.name, out_df, label)
pivot_arr = self.df_vars[df_var.name][columns_arg]
agg_node = aggregate.Aggregate(
lhs.name, df_var.name, [index_arg], None, df_col_map,
in_vars, [self.df_vars[df_var.name][index_arg]],
agg_func, out_types, lhs.loc, pivot_arr, pivot_values)
nodes.append(agg_node)
return nodes
def _get_pivot_values(self, varname):
if varname not in self.reverse_copies or (self.reverse_copies[varname] + ':pivot') not in self.state.locals:
raise ValueError("pivot_table() requires annotation of pivot values")
new_name = self.reverse_copies[varname]
values = self.state.locals.pop(new_name + ":pivot")
return values
def _get_str_arg(self, f_name, args, kws, arg_no, arg_name, default=None,
err_msg=None):
arg = None
if len(args) > arg_no:
arg = guard(find_const, self.state.func_ir, args[arg_no])
elif arg_name in kws:
arg = guard(find_const, self.state.func_ir, kws[arg_name])
if arg is None:
if default is not None:
return default
if err_msg is None:
err_msg = ("{} requires '{}' argument as a "
"constant string").format(f_name, arg_name)
raise ValueError(err_msg)
return arg
def _get_arg(self, f_name, args, kws, arg_no, arg_name, default=None,
err_msg=None):
arg = None
if len(args) > arg_no:
arg = args[arg_no]
elif arg_name in kws:
arg = kws[arg_name]
if arg is None:
if default is not None:
return default
if err_msg is None:
err_msg = "{} requires '{}' argument".format(f_name, arg_name)
raise ValueError(err_msg)
return arg
def _handle_crosstab(self, lhs, rhs, label):
kws = dict(rhs.kws)
# TODO: hanlde multiple keys (index args)
index_arg = self._get_arg('crosstab', rhs.args, kws, 0, 'index')
columns_arg = self._get_arg('crosstab', rhs.args, kws, 1, 'columns')
# TODO: handle values and aggfunc options
in_vars = {}
# output of crosstab is array[int64]
def to_arr():
res = sdc.hiframes.api.init_series(np.empty(1, np.int64))
f_block = compile_to_numba_ir(to_arr, {'sdc': sdc, 'np': np}).blocks.popitem()[1]
nodes = f_block.body[:-3] # remove none return
out_tp_var = nodes[-1].target
out_types = {'__dummy__': out_tp_var}
pivot_values = self._get_pivot_values(lhs.name)
df_col_map = ({col: ir.Var(lhs.scope, mk_unique_var(col), lhs.loc)
for col in pivot_values})
out_df = df_col_map.copy()
self._create_df(lhs.name, out_df, label)
pivot_arr = columns_arg
def _agg_len_impl(in_arr): # pragma: no cover
numba.parfor.init_prange()
count = 0
for i in numba.parfor.internal_prange(len(in_arr)):
count += 1
return count
# TODO: make out_key_var an index column
agg_node = aggregate.Aggregate(
lhs.name, 'crosstab', [index_arg.name], None, df_col_map,
in_vars, [index_arg],
_agg_len_impl, out_types, lhs.loc, pivot_arr, pivot_values, True)
nodes.append(agg_node)
return nodes
def _handle_aggregate(self, lhs, rhs, obj_var, func_name, label):
# format df.groupby('A')['B'].agg(lambda x: x.max()-x.min())
# TODO: support aggregation functions sum, count, etc.
if func_name not in supported_agg_funcs:
raise ValueError("only {} supported in groupby".format(
", ".join(supported_agg_funcs)))
# find selected output columns
df_var, out_colnames, explicit_select, obj_var = self._get_df_obj_select(obj_var, 'groupby')
key_colnames, as_index = self._get_agg_obj_args(obj_var)
if out_colnames is None:
out_colnames = list(self.df_vars[df_var.name].keys())
# key arr is not output by default
# as_index should be handled separately since it just returns keys
for k in key_colnames:
out_colnames.remove(k)
# find input vars
in_vars = {out_cname: self.df_vars[df_var.name][out_cname]
for out_cname in out_colnames}
nodes, agg_func, out_tp_vars = self._handle_agg_func(
in_vars, out_colnames, func_name, lhs, rhs)
# output column map, create dataframe if multiple outputs
out_key_vars = None
# XXX output becomes series if single output and explicitly selected
if len(out_colnames) == 1 and explicit_select and as_index:
df_col_map = {out_colnames[0]: lhs}
else:
out_df = {}
# keys come first in column list
if as_index is False:
out_key_vars = []
for k in key_colnames:
out_key_var = ir.Var(lhs.scope, mk_unique_var(k), lhs.loc)
out_df[k] = out_key_var
out_key_vars.append(out_key_var)
df_col_map = ({col: ir.Var(lhs.scope, mk_unique_var(col), lhs.loc)
for col in out_colnames})
out_df.update(df_col_map)
self._create_df(lhs.name, out_df, label)
in_key_vars = [self.df_vars[df_var.name][k] for k in key_colnames]
agg_node = aggregate.Aggregate(
lhs.name, df_var.name, key_colnames, out_key_vars, df_col_map,
in_vars, in_key_vars,
agg_func, out_tp_vars, lhs.loc)
nodes.append(agg_node)
return nodes
def _handle_agg_func(self, in_vars, out_colnames, func_name, lhs, rhs):
agg_func = get_agg_func(self.state.func_ir, func_name, rhs)
out_tp_vars = {}
# sdc.jit() instead of numba.njit() to handle str arrs etc
agg_func_dis = sdc.jit(agg_func)
#agg_func_dis = numba.njit(agg_func)
agg_gb_var = ir.Var(lhs.scope, mk_unique_var("agg_gb"), lhs.loc)
nodes = [ir.Assign(ir.Global("agg_gb", agg_func_dis, lhs.loc), agg_gb_var, lhs.loc)]
for out_cname in out_colnames:
in_var = in_vars[out_cname]
def to_arr(a, _agg_f):
b = sdc.hiframes.api.to_arr_from_series(a)
res = sdc.hiframes.api.init_series(sdc.hiframes.api.agg_typer(b, _agg_f))
f_block = compile_to_numba_ir(to_arr, {'sdc': sdc, 'np': np}).blocks.popitem()[1]
replace_arg_nodes(f_block, [in_var, agg_gb_var])
nodes += f_block.body[:-3] # remove none return
out_tp_vars[out_cname] = nodes[-1].target
return nodes, agg_func, out_tp_vars
def _get_agg_obj_args(self, agg_var):
# find groupby key and as_index
groubpy_call = guard(get_definition, self.state.func_ir, agg_var)
assert isinstance(groubpy_call, ir.Expr) and groubpy_call.op == 'call'
kws = dict(groubpy_call.kws)
as_index = True
if 'as_index' in kws:
as_index = guard(find_const, self.state.func_ir, kws['as_index'])
if as_index is None:
raise ValueError(
"groupby as_index argument should be constant")
if len(groubpy_call.args) == 1:
by_arg = groubpy_call.args[0]
elif 'by' in kws:
by_arg = kws['by']
else: # pragma: no cover
raise ValueError("by argument for groupby() required")
err_msg = ("groupby() by argument should be "
"list of column names or a column name")
key_colnames = self._get_str_or_list(by_arg, True, err_msg=err_msg)
return key_colnames, as_index
def _get_str_or_list(self, by_arg, list_only=False, default=None, err_msg=None, typ=None):
typ = str if typ is None else typ
by_arg_def = guard(find_build_sequence, self.state.func_ir, by_arg)
if by_arg_def is None:
# try add_consts_to_type
by_arg_call = guard(get_definition, self.state.func_ir, by_arg)
if guard(find_callname, self.state.func_ir, by_arg_call) == ('add_consts_to_type', 'sdc.hiframes.api'):
by_arg_def = guard(find_build_sequence, self.state.func_ir, by_arg_call.args[0])
if by_arg_def is None:
# try dict.keys()
by_arg_call = guard(get_definition, self.state.func_ir, by_arg)
call_name = guard(find_callname, self.state.func_ir, by_arg_call)
if (call_name is not None and len(call_name) == 2
and call_name[0] == 'keys'
and isinstance(call_name[1], ir.Var)):
var_def = guard(get_definition, self.state.func_ir, call_name[1])
if isinstance(var_def, ir.Expr) and var_def.op == 'build_map':
by_arg_def = [v[0] for v in var_def.items], 'build_map'
# HACK replace dict.keys getattr to avoid typing errors
keys_getattr = guard(
get_definition, self.state.func_ir, by_arg_call.func)
assert isinstance(
keys_getattr, ir.Expr) and keys_getattr.attr == 'keys'
keys_getattr.attr = 'copy'
if by_arg_def is None:
# try single key column
by_arg_def = guard(find_const, self.state.func_ir, by_arg)
if by_arg_def is None:
if default is not None:
return default
raise ValueError(err_msg)
key_colnames = [by_arg_def]
else:
if list_only and by_arg_def[1] != 'build_list':
if default is not None:
return default
raise ValueError(err_msg)
key_colnames = [guard(find_const, self.state.func_ir, v) for v in by_arg_def[0]]
if any(not isinstance(v, typ) for v in key_colnames):
if default is not None:
return default
raise ValueError(err_msg)
return key_colnames
def _get_df_obj_select(self, obj_var, obj_name):
"""analyze selection of columns in after groupby() or rolling()
e.g. groupby('A')['B'], groupby('A')['B', 'C'], groupby('A')
"""
select_def = guard(get_definition, self.state.func_ir, obj_var)
out_colnames = None
explicit_select = False
if isinstance(select_def, ir.Expr) and select_def.op in ('getitem', 'static_getitem'):
obj_var = select_def.value
out_colnames = (select_def.index
if select_def.op == 'static_getitem'
else guard(find_const, self.state.func_ir, select_def.index))
if not isinstance(out_colnames, (str, tuple)):
raise ValueError("{} output column names should be constant".format(obj_name))
if isinstance(out_colnames, str):
out_colnames = [out_colnames]
explicit_select = True
obj_call = guard(get_definition, self.state.func_ir, obj_var)
# find dataframe
call_def = guard(find_callname, self.state.func_ir, obj_call)
assert (call_def is not None and call_def[0] == obj_name
and isinstance(call_def[1], ir.Var)
and self._is_df_var(call_def[1]))
df_var = call_def[1]
return df_var, out_colnames, explicit_select, obj_var
def _handle_rolling(self, lhs, rhs, obj_var, func_name, label):
# format df.rolling(w)['B'].sum()
# TODO: support aggregation functions sum, count, etc.
if func_name not in supported_rolling_funcs:
raise ValueError("only ({}) supported in rolling".format(
", ".join(supported_rolling_funcs)))
nodes = []
# find selected output columns
df_var, out_colnames, explicit_select, obj_var = self._get_df_obj_select(obj_var, 'rolling')
rolling_call = guard(get_definition, self.state.func_ir, obj_var)
window, center, on = get_rolling_setup_args(self.state.func_ir, rolling_call, False)
on_arr = self.df_vars[df_var.name][on] if on is not None else None
if not isinstance(center, ir.Var):
center_var = ir.Var(lhs.scope, mk_unique_var("center"), lhs.loc)
nodes.append(ir.Assign(ir.Const(center, lhs.loc), center_var, lhs.loc))
center = center_var
if not isinstance(window, ir.Var):
window_var = ir.Var(lhs.scope, mk_unique_var("window"), lhs.loc)
nodes.append(ir.Assign(ir.Const(window, lhs.loc), window_var, lhs.loc))
window = window_var
# TODO: get 'on' arg for offset case
if out_colnames is None:
out_colnames = list(self.df_vars[df_var.name].keys())
# TODO: remove index col for offset case
nan_cols = []
if func_name in ('cov', 'corr'):
if len(rhs.args) != 1:
raise ValueError("rolling {} requires one argument (other)".format(func_name))
# XXX pandas only accepts variable window cov/corr
# when both inputs have time index
if on_arr is not None:
raise ValueError("variable window rolling {} not supported yet.".format(func_name))
# TODO: support variable window rolling cov/corr which is only
# possible in pandas with time index
other = rhs.args[0]
if self._is_df_var(other):
# df on df cov/corr returns common columns only (without
# pairwise flag)
# TODO: support pairwise arg
col_set1 = set(out_colnames)
col_set2 = set(self._get_df_col_names(other))
out_colnames = list(col_set1 & col_set2)
# Pandas makes non-common columns NaNs
nan_cols = list(col_set1 ^ col_set2)
# output column map, create dataframe if multiple outputs
out_df = None
if len(out_colnames) == 1 and explicit_select:
df_col_map = {out_colnames[0]: lhs}
else:
df_col_map = ({col: ir.Var(lhs.scope, mk_unique_var(col), lhs.loc)
for col in out_colnames})
if on is not None:
df_col_map[on] = on_arr
out_df = df_col_map.copy()
# TODO: add datetime index for offset case
args = rhs.args
for cname, out_col_var in df_col_map.items():
if cname == on:
continue
in_col_var = self.df_vars[df_var.name][cname]
if func_name in ('cov', 'corr'):
args[0] = self.df_vars[other.name][cname]
nodes += self._gen_rolling_call(in_col_var, out_col_var, window, center, args, func_name, on_arr)
# create NaN columns for cov/corr case
len_arr = self.df_vars[df_var.name][out_colnames[0]]
for cname in nan_cols:
def f(arr):
nan_arr = np.full(len(arr), np.nan)
f_block = compile_to_numba_ir(f, {'np': np}).blocks.popitem()[1]
replace_arg_nodes(f_block, [len_arr])
nodes += f_block.body[:-3] # remove none return
out_df[cname] = nodes[-1].target
if out_df is not None:
# Pandas sorts the output column names _flex_binary_moment
# line: res_columns = arg1.columns.union(arg2.columns)
self._create_df(lhs.name, dict(sorted(out_df.items())), label)
return nodes
def _gen_rolling_call(self, in_col_var, out_col_var, window, center, args, func_name, on_arr):
nodes = []
if func_name in ('cov', 'corr'):
other = args[0]
if on_arr is not None:
if func_name == 'cov':
def f(arr, other, on_arr, w, center): # pragma: no cover
df_arr = sdc.hiframes.api.init_series(
sdc.hiframes.rolling.rolling_cov(
arr, other, on_arr, w, center))
if func_name == 'corr':
def f(arr, other, on_arr, w, center): # pragma: no cover
df_arr = sdc.hiframes.api.init_series(
sdc.hiframes.rolling.rolling_corr(
arr, other, on_arr, w, center))
args = [in_col_var, other, on_arr, window, center]
else:
if func_name == 'cov':
def f(arr, other, w, center): # pragma: no cover
df_arr = sdc.hiframes.api.init_series(
sdc.hiframes.rolling.rolling_cov(
arr, other, w, center))
if func_name == 'corr':
def f(arr, other, w, center): # pragma: no cover
df_arr = sdc.hiframes.api.init_series(
sdc.hiframes.rolling.rolling_corr(
arr, other, w, center))
args = [in_col_var, other, window, center]
# variable window case
elif on_arr is not None:
if func_name == 'apply':
def f(arr, on_arr, w, center, func): # pragma: no cover
df_arr = sdc.hiframes.api.init_series(
sdc.hiframes.rolling.rolling_variable(
arr, on_arr, w, center, False, func))
args = [in_col_var, on_arr, window, center, args[0]]
else:
def f(arr, on_arr, w, center): # pragma: no cover
df_arr = sdc.hiframes.api.init_series(
sdc.hiframes.rolling.rolling_variable(
arr, on_arr, w, center, False, _func_name))
args = [in_col_var, on_arr, window, center]
else: # fixed window
# apply case takes the passed function instead of just name
if func_name == 'apply':
def f(arr, w, center, func): # pragma: no cover
df_arr = sdc.hiframes.api.init_series(
sdc.hiframes.rolling.rolling_fixed(
arr, w, center, False, func))
args = [in_col_var, window, center, args[0]]
else:
def f(arr, w, center): # pragma: no cover
df_arr = sdc.hiframes.api.init_series(
sdc.hiframes.rolling.rolling_fixed(
arr, w, center, False, _func_name))
args = [in_col_var, window, center]
f_block = compile_to_numba_ir(f, {'sdc': sdc, '_func_name': func_name}).blocks.popitem()[1]
replace_arg_nodes(f_block, args)
nodes += f_block.body[:-3] # remove none return
nodes[-1].target = out_col_var
return nodes
def _fix_rolling_array(self, col_var, func):
"""
for integers and bools, the output should be converted to float64
"""
# TODO: check all possible funcs
def f(arr): # pragma: no cover
df_arr = sdc.hiframes.api.fix_rolling_array(arr)
f_block = compile_to_numba_ir(f, {'sdc': sdc}).blocks.popitem()[1]
replace_arg_nodes(f_block, [col_var])
nodes = f_block.body[:-3] # remove none return
new_col_var = nodes[-1].target
return new_col_var, nodes
def _handle_metadata(self):
"""remove distributed input annotation from locals and add to metadata
"""
if 'distributed' not in self.state.metadata:
# TODO: keep updated in variable renaming?
self.state.metadata['distributed'] = self.state.locals.pop(
'##distributed', set())
if 'threaded' not in self.state.metadata:
self.state.metadata['threaded'] = self.state.locals.pop('##threaded', set())
# handle old input flags
# e.g. {"A:input": "distributed"} -> "A"
dist_inputs = {var_name.split(":")[0]
for (var_name, flag) in self.state.locals.items()
if var_name.endswith(":input") and flag == 'distributed'}
thread_inputs = {var_name.split(":")[0]
for (var_name, flag) in self.state.locals.items()
if var_name.endswith(":input") and flag == 'threaded'}
# check inputs to be in actuall args
for arg_name in dist_inputs | thread_inputs:
if arg_name not in self.state.func_ir.arg_names:
raise ValueError(
"distributed input {} not found in arguments".format(
arg_name))
self.state.locals.pop(arg_name + ":input")
self.state.metadata['distributed'] |= dist_inputs
self.state.metadata['threaded'] |= thread_inputs
# handle old return flags
# e.g. {"A:return":"distributed"} -> "A"
flagged_returns = {var_name.split(":")[0]: flag
for (var_name, flag) in self.state.locals.items()
if var_name.endswith(":return")}
for v, flag in flagged_returns.items():
if flag == 'distributed':
self.state.metadata['distributed'].add(v)
elif flag == 'threaded':
self.state.metadata['threaded'].add(v)
self.state.locals.pop(v + ":return")
return
def _run_return(self, ret_node):
# TODO: handle distributed analysis, requires handling variable name
# change in simplify() and replace_var_names()
flagged_vars = self.state.metadata['distributed'] | self.state.metadata['threaded']
nodes = [ret_node]
cast = guard(get_definition, self.state.func_ir, ret_node.value)
assert cast is not None, "return cast not found"
assert isinstance(cast, ir.Expr) and cast.op == 'cast'
scope = cast.value.scope
loc = cast.loc
# XXX: using split('.') since the variable might be renamed (e.g. A.2)
ret_name = cast.value.name.split('.')[0]
if ret_name in flagged_vars:
flag = ('distributed' if ret_name in self.state.metadata['distributed']
else 'threaded')
nodes = self._gen_replace_dist_return(cast.value, flag)
new_arr = nodes[-1].target
new_cast = ir.Expr.cast(new_arr, loc)
new_out = ir.Var(scope, mk_unique_var(flag + "_return"), loc)
nodes.append(ir.Assign(new_cast, new_out, loc))
ret_node.value = new_out
nodes.append(ret_node)
return nodes
# shortcut if no dist return
if len(flagged_vars) == 0:
return nodes
cast_def = guard(get_definition, self.state.func_ir, cast.value)
if (cast_def is not None and isinstance(cast_def, ir.Expr)
and cast_def.op == 'build_tuple'):
nodes = []
new_var_list = []
for v in cast_def.items:
vname = v.name.split('.')[0]
if vname in flagged_vars:
flag = ('distributed' if vname in self.state.metadata['distributed']
else 'threaded')
nodes += self._gen_replace_dist_return(v, flag)
new_var_list.append(nodes[-1].target)
else:
new_var_list.append(v)
new_tuple_node = ir.Expr.build_tuple(new_var_list, loc)
new_tuple_var = ir.Var(scope, mk_unique_var("dist_return_tp"), loc)
nodes.append(ir.Assign(new_tuple_node, new_tuple_var, loc))
new_cast = ir.Expr.cast(new_tuple_var, loc)
new_out = ir.Var(scope, mk_unique_var("dist_return"), loc)
nodes.append(ir.Assign(new_cast, new_out, loc))
ret_node.value = new_out
nodes.append(ret_node)
return nodes
def _gen_replace_dist_return(self, var, flag):
if flag == 'distributed':
def f(_dist_arr): # pragma: no cover
_d_arr = sdc.distributed_api.dist_return(_dist_arr)
elif flag == 'threaded':
def f(_threaded_arr): # pragma: no cover
_th_arr = sdc.distributed_api.threaded_return(_threaded_arr)
else:
raise ValueError("Invalid return flag {}".format(flag))
f_block = compile_to_numba_ir(
f, {'sdc': sdc}).blocks.popitem()[1]
replace_arg_nodes(f_block, [var])
return f_block.body[:-3] # remove none return
def _run_df_set_column(self, inst, label, cfg):
"""replace setitem of string index with a call to handle possible
dataframe case where schema is changed:
df['new_col'] = arr -> df2 = set_df_col(df, 'new_col', arr)
dataframe_pass will replace set_df_col() with regular setitem if target
is not dataframe
"""
# setting column possible only when it dominates the df creation to
# keep schema consistent
# invalid case:
# df = pd.DataFrame({'A': A})
# if cond:
# df['B'] = B
# return df
# TODO: add this check back in
# if label not in cfg.backbone() and label not in cfg.post_dominators()[df_label]:
# raise ValueError("setting dataframe columns inside conditionals and"
# " loops not supported yet")
# TODO: generalize to more cases
# for example:
# df = pd.DataFrame({'A': A})
# if cond:
# df['B'] = B
# else:
# df['B'] = C
# return df
# TODO: check for references to df
# for example:
# df = pd.DataFrame({'A': A})
# df2 = df
# df['B'] = C
# return df2
df_var = inst.target
# create var for string index
cname_var = ir.Var(inst.value.scope, mk_unique_var("$cname_const"), inst.loc)
nodes = [ir.Assign(ir.Const(inst.index, inst.loc), cname_var, inst.loc)]
def func(df, cname, arr): return sdc.hiframes.api.set_df_col(df, cname, arr)
f_block = compile_to_numba_ir(func, {'sdc': sdc}).blocks.popitem()[1]
replace_arg_nodes(f_block, [df_var, cname_var, inst.value])
nodes += f_block.body[:-2]
# rename the dataframe variable to keep schema static
new_df_var = ir.Var(df_var.scope, mk_unique_var(df_var.name), df_var.loc)
nodes[-1].target = new_df_var
self.replace_var_dict[df_var.name] = new_df_var
return nodes
def _replace_func(self, func, args, const=False, array_typ_convert=True,
pre_nodes=None, extra_globals=None):
glbls = {'numba': numba, 'np': np, 'sdc': sdc}
if extra_globals is not None:
glbls.update(extra_globals)
return ReplaceFunc(func, None, args, glbls, pre_nodes)
def _create_df(self, df_varname, df_col_map, label):
# order is important for proper handling of itertuples, apply, etc.
# starting pandas 0.23 and Python 3.6, regular dict order is OK
# for <0.23 ordered_df_map = OrderedDict(sorted(df_col_map.items()))
self.df_vars[df_varname] = df_col_map
self.df_labels[df_varname] = label
def _is_df_colname(self, df_var, cname):
""" is cname a column name in df_var
"""
df_var_renamed = self._get_renamed_df(df_var)
return cname in self.df_vars[df_var_renamed.name]
def _is_df_var(self, var):
assert isinstance(var, ir.Var)
return (var.name in self.df_vars)
def _get_df_cols(self, df_var):
#
assert isinstance(df_var, ir.Var)
df_var_renamed = self._get_renamed_df(df_var)
return self.df_vars[df_var_renamed.name]
def _get_df_col_names(self, df_var):
assert isinstance(df_var, ir.Var)
df_var_renamed = self._get_renamed_df(df_var)
return list(self.df_vars[df_var_renamed.name].keys())
def _get_df_col_vars(self, df_var):
#
assert isinstance(df_var, ir.Var)
df_var_renamed = self._get_renamed_df(df_var)
return list(self.df_vars[df_var_renamed.name].values())
def _get_df_colvar(self, df_var, cname):
assert isinstance(df_var, ir.Var)
df_var_renamed = self._get_renamed_df(df_var)
return self.df_vars[df_var_renamed.name][cname]
def _get_renamed_df(self, df_var):
# XXX placeholder for df variable renaming
assert isinstance(df_var, ir.Var)
return df_var
def _update_definitions(self, node_list):
loc = ir.Loc("", 0)
dumm_block = ir.Block(ir.Scope(None, loc), loc)
dumm_block.body = node_list
build_definitions({0: dumm_block}, self.state.func_ir._definitions)
return
def _gen_arr_copy(in_arr, nodes):
f_block = compile_to_numba_ir(
lambda A: A.copy(), {}).blocks.popitem()[1]
replace_arg_nodes(f_block, [in_arr])
nodes += f_block.body[:-2]
return nodes[-1].target
def simple_block_copy_propagate(block):
"""simple copy propagate for a single block before typing, without Parfor"""
var_dict = {}
# assignments as dict to replace with latest value
for stmt in block.body:
# only rhs of assignments should be replaced
# e.g. if x=y is available, x in x=z shouldn't be replaced
if isinstance(stmt, ir.Assign):
stmt.value = replace_vars_inner(stmt.value, var_dict)
else:
replace_vars_stmt(stmt, var_dict)
if isinstance(stmt, ir.Assign) and isinstance(stmt.value, ir.Var):
lhs = stmt.target.name
rhs = stmt.value.name
# rhs could be replaced with lhs from previous copies
if lhs != rhs:
var_dict[lhs] = stmt.value
# a=b kills previous t=a
lhs_kill = []
for k, v in var_dict.items():
if v.name == lhs:
lhs_kill.append(k)
for k in lhs_kill:
var_dict.pop(k, None)
if (isinstance(stmt, ir.Assign)
and not isinstance(stmt.value, ir.Var)):
lhs = stmt.target.name
var_dict.pop(lhs, None)
# previous t=a is killed if a is killed
lhs_kill = []
for k, v in var_dict.items():
if v.name == lhs:
lhs_kill.append(k)
for k in lhs_kill:
var_dict.pop(k, None)
return
|
# Copyright 2016 VMware, Inc.
#
# All Rights Reserved
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import xml.etree.ElementTree as et
from networking_sfc.extensions import flowclassifier
from networking_sfc.services.flowclassifier.common import exceptions as exc
from networking_sfc.services.flowclassifier.drivers import base as fc_driver
from neutron_lib.callbacks import events
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import context as n_context
from neutron_lib.plugins import directory
from oslo_config import cfg
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from vmware_nsx._i18n import _
from vmware_nsx.common import config # noqa
from vmware_nsx.common import exceptions as nsx_exc
from vmware_nsx.common import locking
from vmware_nsx.common import nsxv_constants
from vmware_nsx.plugins.nsx_v.vshield import vcns as nsxv_api
from vmware_nsx.plugins.nsx_v.vshield import vcns_driver
from vmware_nsx.services.flowclassifier.nsx_v import utils as fc_utils
LOG = logging.getLogger(__name__)
REDIRECT_FW_SECTION_NAME = 'OS Flow Classifier Rules'
class NsxvFlowClassifierDriver(fc_driver.FlowClassifierDriverBase):
"""FlowClassifier Driver For NSX-V."""
_redirect_section_id = None
def initialize(self):
self._nsxv = vcns_driver.VcnsDriver(None)
self.init_profile_id()
self.init_security_group()
self.init_security_group_in_profile()
# register an event to the end of the init to handle the first upgrade
if self._is_new_security_group:
registry.subscribe(self.init_complete,
resources.PROCESS,
events.BEFORE_SPAWN)
def init_profile_id(self):
"""Init the service insertion profile ID
Initialize the profile id that should be assigned to the redirect
rules from the nsx configuration and verify that it exists on backend.
"""
if not cfg.CONF.nsxv.service_insertion_profile_id:
raise cfg.RequiredOptError("service_insertion_profile_id",
group=cfg.OptGroup('nsxv'))
self._profile_id = cfg.CONF.nsxv.service_insertion_profile_id
# Verify that this moref exists
if not self._nsxv.vcns.validate_inventory(self._profile_id):
error = (_("Configured service profile ID: %s not found") %
self._profile_id)
raise nsx_exc.NsxPluginException(err_msg=error)
def init_security_group(self):
"""Init the service insertion security group
Look for the service insertion security group in the backend.
If it was not found - create it
This security group will contain all the VMs vnics that should
be inspected by the redirect rules
"""
# check if this group exist, and create it if not.
sg_name = fc_utils.SERVICE_INSERTION_SG_NAME
sg_id = self._nsxv.vcns.get_security_group_id(sg_name)
self._is_new_security_group = False
if not sg_id:
description = ("OpenStack Service Insertion Security Group, "
"managed by Neutron nsx-v plugin.")
sg = {"securitygroup": {"name": sg_name,
"description": description}}
h, sg_id = (
self._nsxv.vcns.create_security_group(sg))
self._is_new_security_group = True
self._security_group_id = sg_id
def init_security_group_in_profile(self):
"""Attach the security group to the service profile
"""
data = self._nsxv.vcns.get_service_insertion_profile(self._profile_id)
if data and len(data) > 1:
profile = et.fromstring(data[1])
profile_binding = profile.find('serviceProfileBinding')
sec_groups = profile_binding.find('securityGroups')
for sec in sec_groups.iter('string'):
if sec.text == self._security_group_id:
# Already there
return
# add the security group to the binding
et.SubElement(sec_groups, 'string').text = self._security_group_id
self._nsxv.vcns.update_service_insertion_profile_binding(
self._profile_id,
et.tostring(profile_binding, encoding="us-ascii"))
def init_complete(self, resource, event, trigger, **kwargs):
if self._is_new_security_group:
# add existing VMs to the new security group
# This code must run after init is done
core_plugin = directory.get_plugin()
core_plugin.add_vms_to_service_insertion(
self._security_group_id)
# Add the first flow classifier entry
if cfg.CONF.nsxv.service_insertion_redirect_all:
self.add_any_any_redirect_rule()
def add_any_any_redirect_rule(self):
"""Add an any->any flow classifier entry
Add 1 flow classifier entry that will redirect all the traffic to the
security partner
The user will be able to delete/change it later
"""
context = n_context.get_admin_context()
fc_plugin = directory.get_plugin(flowclassifier.FLOW_CLASSIFIER_EXT)
# first check that there is no other flow classifier entry defined:
fcs = fc_plugin.get_flow_classifiers(context)
if len(fcs) > 0:
return
# Create any->any rule
fc = {'name': 'redirect_all',
'description': 'Redirect all traffic',
'tenant_id': nsxv_constants.INTERNAL_TENANT_ID,
'l7_parameters': {},
'ethertype': 'IPv4',
'protocol': None,
'source_port_range_min': None,
'source_port_range_max': None,
'destination_port_range_min': None,
'destination_port_range_max': None,
'source_ip_prefix': None,
'destination_ip_prefix': None,
'logical_source_port': None,
'logical_destination_port': None
}
fc_plugin.create_flow_classifier(context, {'flow_classifier': fc})
def get_redirect_fw_section_id(self):
if not self._redirect_section_id:
# try to find it
self._redirect_section_id = self._nsxv.vcns.get_section_id(
REDIRECT_FW_SECTION_NAME)
if not self._redirect_section_id:
# create it for the first time
section = et.Element('section')
section.attrib['name'] = REDIRECT_FW_SECTION_NAME
self._nsxv.vcns.create_redirect_section(et.tostring(section))
self._redirect_section_id = self._nsxv.vcns.get_section_id(
REDIRECT_FW_SECTION_NAME)
return self._redirect_section_id
def get_redirect_fw_section_uri(self):
return '%s/%s/%s' % (nsxv_api.FIREWALL_PREFIX,
nsxv_api.FIREWALL_REDIRECT_SEC_TYPE,
self.get_redirect_fw_section_id())
def get_redirect_fw_section_from_backend(self):
section_uri = self.get_redirect_fw_section_uri()
section_resp = self._nsxv.vcns.get_section(section_uri)
if section_resp and len(section_resp) > 1:
xml_section = section_resp[1]
return et.fromstring(xml_section)
def update_redirect_section_in_backed(self, section):
section_uri = self.get_redirect_fw_section_uri()
self._nsxv.vcns.update_section(
section_uri,
et.tostring(section, encoding="us-ascii"),
None)
def _rule_ip_type(self, flow_classifier):
if flow_classifier.get('ethertype') == 'IPv6':
return 'Ipv6Address'
return 'Ipv4Address'
def _rule_ports(self, type, flow_classifier):
min_port = flow_classifier.get(type + '_port_range_min')
max_port = flow_classifier.get(type + '_port_range_max')
return self._ports_list(min_port, max_port)
def _ports_list(self, min_port, max_port):
"""Return a string representing the port/range"""
if min_port == max_port:
return str(min_port)
return "%s-%s" % (min_port, max_port)
def _rule_name(self, flow_classifier):
# The name of the rule will include the name & id of the classifier
# so we can later find it in order to update/delete it.
# Both the flow classifier DB & the backend has max name length of 255
# so we may have to trim the name a bit
return (flow_classifier.get('name')[:200] + '-' +
flow_classifier.get('id'))
def _is_the_same_rule(self, rule, flow_classifier_id):
return rule.find('name').text.endswith(flow_classifier_id)
def init_redirect_fw_rule(self, redirect_rule, flow_classifier):
et.SubElement(redirect_rule, 'name').text = self._rule_name(
flow_classifier)
et.SubElement(redirect_rule, 'action').text = 'redirect'
et.SubElement(redirect_rule, 'direction').text = 'inout'
si_profile = et.SubElement(redirect_rule, 'siProfile')
et.SubElement(si_profile, 'objectId').text = self._profile_id
et.SubElement(redirect_rule, 'packetType').text = flow_classifier.get(
'ethertype').lower()
# init the source & destination
if flow_classifier.get('source_ip_prefix'):
sources = et.SubElement(redirect_rule, 'sources')
sources.attrib['excluded'] = 'false'
source = et.SubElement(sources, 'source')
et.SubElement(source, 'type').text = self._rule_ip_type(
flow_classifier)
et.SubElement(source, 'value').text = flow_classifier.get(
'source_ip_prefix')
if flow_classifier.get('destination_ip_prefix'):
destinations = et.SubElement(redirect_rule, 'destinations')
destinations.attrib['excluded'] = 'false'
destination = et.SubElement(destinations, 'destination')
et.SubElement(destination, 'type').text = self._rule_ip_type(
flow_classifier)
et.SubElement(destination, 'value').text = flow_classifier.get(
'destination_ip_prefix')
# init the service
if (flow_classifier.get('destination_port_range_min') or
flow_classifier.get('source_port_range_min')):
services = et.SubElement(redirect_rule, 'services')
service = et.SubElement(services, 'service')
et.SubElement(service, 'isValid').text = 'true'
if flow_classifier.get('source_port_range_min'):
source_port = et.SubElement(service, 'sourcePort')
source_port.text = self._rule_ports('source',
flow_classifier)
if flow_classifier.get('destination_port_range_min'):
dest_port = et.SubElement(service, 'destinationPort')
dest_port.text = self._rule_ports('destination',
flow_classifier)
prot = et.SubElement(service, 'protocolName')
prot.text = flow_classifier.get('protocol').upper()
# Add the classifier description
if flow_classifier.get('description'):
notes = et.SubElement(redirect_rule, 'notes')
notes.text = flow_classifier.get('description')
def _loc_fw_section(self):
return locking.LockManager.get_lock('redirect-fw-section')
@log_helpers.log_method_call
def create_flow_classifier(self, context):
"""Create a redirect rule at the backend
"""
flow_classifier = context.current
with self._loc_fw_section():
section = self.get_redirect_fw_section_from_backend()
new_rule = et.SubElement(section, 'rule')
self.init_redirect_fw_rule(new_rule, flow_classifier)
self.update_redirect_section_in_backed(section)
@log_helpers.log_method_call
def update_flow_classifier(self, context):
"""Update the backend redirect rule
"""
flow_classifier = context.current
with self._loc_fw_section():
section = self.get_redirect_fw_section_from_backend()
redirect_rule = None
for rule in section.iter('rule'):
if self._is_the_same_rule(rule, flow_classifier['id']):
redirect_rule = rule
break
if redirect_rule is None:
msg = _("Failed to find redirect rule %s "
"on backed") % flow_classifier['id']
raise exc.FlowClassifierException(message=msg)
else:
# The flowclassifier plugin currently supports updating only
# name or description
name = redirect_rule.find('name')
name.text = self._rule_name(flow_classifier)
notes = redirect_rule.find('notes')
notes.text = flow_classifier.get('description') or ''
self.update_redirect_section_in_backed(section)
@log_helpers.log_method_call
def delete_flow_classifier(self, context):
"""Delete the backend redirect rule
"""
flow_classifier_id = context.current['id']
with self._loc_fw_section():
section = self.get_redirect_fw_section_from_backend()
redirect_rule = None
for rule in section.iter('rule'):
if self._is_the_same_rule(rule, flow_classifier_id):
redirect_rule = rule
section.remove(redirect_rule)
break
if redirect_rule is None:
LOG.error("Failed to delete redirect rule %s: "
"Could not find rule on backed",
flow_classifier_id)
# should not fail the deletion
else:
self.update_redirect_section_in_backed(section)
@log_helpers.log_method_call
def create_flow_classifier_precommit(self, context):
"""Validate the flow classifier data before committing the transaction
The NSX-v redirect rules does not support:
- logical ports
- l7 parameters
- source ports range / destination port range with more than 15 ports
"""
flow_classifier = context.current
# Logical source port
logical_source_port = flow_classifier['logical_source_port']
if logical_source_port is not None:
msg = _('The NSXv driver does not support setting '
'logical source port in FlowClassifier')
raise exc.FlowClassifierBadRequest(message=msg)
# Logical destination port
logical_destination_port = flow_classifier['logical_destination_port']
if logical_destination_port is not None:
msg = _('The NSXv driver does not support setting '
'logical destination port in FlowClassifier')
raise exc.FlowClassifierBadRequest(message=msg)
# L7 parameters
l7_params = flow_classifier['l7_parameters']
if l7_params is not None and len(l7_params.keys()) > 0:
msg = _('The NSXv driver does not support setting '
'L7 parameters in FlowClassifier')
raise exc.FlowClassifierBadRequest(message=msg)
|
import data_mod.utils.jsonutils as sut
import unittest
class TestJsonUtils(unittest.TestCase):
test_case_01 = {
"input": """{
"Root": {
"Level_1": {
"Level_2": {
"Level_3a": "+00:00",
"Level_3b": "20000000001"
}
}
}
}""",
"output": """{"Root.Level_1.Level_2.Level_3a" : "+00:00",
"Root.Level_1.Level_2.Level_3b" : "20000000001"}""",
"comment": "A typical JSON string without array presence"
}
test_case_02 = {
"input": """{
"Root": {
"ArrayOfObjects": [
{
"Object": {
"Field_1": "+00:00",
"Field_2": "20000000001"
}
},
{
"Object": {
"Property_A": 1,
"Property_B": 2,
"Property_C": "abcd"
}
}
]
}
}""",
"output": """{
"Root.ArrayOfObjects[0].Object.Field_1": "+00:00",
"Root.ArrayOfObjects[0].Object.Field_2": "20000000001",
"Root.ArrayOfObjects[1].Object.Property_A": 1,
"Root.ArrayOfObjects[1].Object.Property_B": 2,
"Root.ArrayOfObjects[1].Object.Property_C": "abcd"
}""",
"comment": "A JSON which has (non-nested) array of objects"
}
test_case_03 = {
"input": """{
"Root": {
"Level_1": {
"Level_2": {
"Level_3": "+00:00",
"Level_3_array": ["aaa", "bbb"],
"Level_3_number": 1.5
}
}
}
}""",
"output": """{
"Root.Level_1.Level_2.Level_3": "+00:00",
"Root.Level_1.Level_2.Level_3_array": ['aaa', 'bbb'],
"Root.Level_1.Level_2.Level_3_number": 1.5
}""",
"comment": "JSON has array of primitive items"
}
test_case_04 = {
"input": """{
"Root": {
"Level_1": {
"Level_2": {
"Level_3": "+00:00",
"Level_3_array": [
{
"Level_4_str": "adfga",
"Level_4_number": 1.3
},
{
"Level_4_str": "ADFGA",
"Level_4_number": 1.3237456
}
],
"Level_3_number": 1.5
}
}
}
}""",
"output": """{
"Root.Level_1.Level_2.Level_3": "+00:00",
"Root.Level_1.Level_2.Level_3_array[0].Level_4_str": "adfga",
"Root.Level_1.Level_2.Level_3_array[0].Level_4_number": 1.3,
"Root.Level_1.Level_2.Level_3_array[1].Level_4_str": "ADFGA",
"Root.Level_1.Level_2.Level_3_array[1].Level_4_number": 1.3237456,
"Root.Level_1.Level_2.Level_3_number": 1.5
}""",
"comment": "JSON has array sits inside a deeper level, and data inside the array contains float"
}
test_case_05 = {
"input": """{
"Root": {
"Level_1": {
"Level_2": {
"Level_2": "+00:00",
"Level_2_array": [
{
"Level_3_str": ["AAA", "BBB", "CCC"],
"Level_3_number": 1.3
},
{
"Level_3_str": "ADFGA",
"Level_3_number": [2, 4, 5, 7]
}
],
"Level_2_number": 1.5
}
}
}
}""",
"output": """{
"Root.Level_1.Level_2.Level_2": "+00:00",
"Root.Level_1.Level_2.Level_2_array[0].Level_3_str": ['AAA', 'BBB', 'CCC'],
"Root.Level_1.Level_2.Level_2_array[0].Level_3_number": 1.3,
"Root.Level_1.Level_2.Level_2_array[1].Level_3_str": "ADFGA",
"Root.Level_1.Level_2.Level_2_array[1].Level_3_number": [2, 4, 5, 7],
"Root.Level_1.Level_2.Level_2_number": 1.5
}""",
"comment": "JSON has nested arrays"
}
test_case_06 = {
"input": """{
"Root": {
"Level_1": {
"Level_2": {
"Level_3": "+00:00",
"Level_3_array": [[1,2,3], [5,7,9]],
"Level_3_number": 1.5
}
}
}
}""",
"output": """{
"Root.Level_1.Level_2.Level_3": "+00:00",
"Root.Level_1.Level_2.Level_3_array[0]": [1, 2, 3],
"Root.Level_1.Level_2.Level_3_array[1]": [5, 7, 9],
"Root.Level_1.Level_2.Level_3_number": 1.5
}""",
"comment": "JSON has nested arrays of primitives"
}
test_case_07 = {
"input": '{"Root": 1}',
"output": """{
"Root": 1
}""",
"comment": "Simple JSON that has only one level of dictionary"
}
test_case_08 = {
"input": '[{"Leaf1": 1}]',
"output": """{
"": [{'Leaf1': 1}]
}
""",
"comment": "A JSON which starts with array instead of dictionary."
}
def test_flatten_tree(self):
for tcase in [TestJsonUtils.test_case_01,
TestJsonUtils.test_case_02,
TestJsonUtils.test_case_03,
TestJsonUtils.test_case_04,
TestJsonUtils.test_case_05,
TestJsonUtils.test_case_06,
TestJsonUtils.test_case_07,
TestJsonUtils.test_case_08]:
self.excute_test_case(tcase)
def excute_test_case(self, testcase):
actual = sut.flatten_json_string(testcase['input'])
expected = eval(testcase['output'])
self.assertDictEqual(actual, expected)
if __name__ == '__main__':
unittest.main() |
<gh_stars>1-10
# -*- coding: utf-8 -*-
import numpy
import sympy.mpmath as sm
sm.mp = 1024
def polynomial(x, list_a):
return sum(a*x**k for k,a in enumerate(list_a))
def d_polynomial(x, list_a):
return sum(a*k*x**(k-1) for k,a in enumerate(list_a) if k >= 1)
def dd_polynomial(x, list_a):
return sum(a*k*(k-1)*x**(k-2) for k,a in enumerate(list_a) if k >= 2)
def search_extreme_points(list_a):
def f(x):
return d_polynomial(x, list_a) - sm.cos(x);
def df(x):
return dd_polynomial(x, list_a) + sm.sin(x);
check_points = numpy.linspace(0.0, sm.pi, 100)
sign_reverse_section = \
[p for p in zip(check_points, check_points[1:]) if f(p[0])*f(p[1]) <= 0.0]
return [sm.findroot(f, x, df=df, tol=1.0e-20) for x,_ in sign_reverse_section]
###########################
# Remez algorithm --step2--
###########################
def update_polynomial_coefficients(list_x):
matrix_A = sm.matrix(
[[x**k for k in range(len(list_x)-1)] + [ 0 if j==0 else (-1)**j ] \
for j,x in enumerate(list_x)])
vector_b = sm.matrix([sm.sin(x) for x in list_x])
u = sm.lu_solve(matrix_A, vector_b)
# a[0],...,a[n], d
return u[:-1], u[u.rows-1]
###########################
# Remez algorithm --step3--
###########################
def update_maximum_error_points(list_a):
n = len(list_a)-1
extreme_points = search_extreme_points(list_a)
if len(extreme_points) == n+1:
return [sm.mpf(0.0)] + extreme_points
else:
raise Exception('[ERROR]number of extreme point ' + \
str(n+2) + '->' + str(len(extreme_points)))
###########################
# Remez algorithm --step4--
###########################
def check_convergence(
list_a,
list_x):
def ef(x):
return polynomial(x, list_a) - sm.sin(x)
err = numpy.var([ef(x)*(-1)**k for k,x in enumerate(list_x) if k>=1])
return err < 1.0e-32
##########################
# Remez algorithm --main--
##########################
def remez():
# Remez algorithm --step3--
list_a = [0.0, 0.988454351767074, 0.0470171509264136, -0.230234777715065, 0.0366430029450163]
list_x = update_maximum_error_points(list_a)
for count in range(1, 1000):
# Remez algorithm --step2--
list_a, d = update_polynomial_coefficients(list_x)
# Remez algorithm --step3--
list_x = update_maximum_error_points(list_a)
# Remez algorithm --step4--
if check_convergence(list_a, list_x):
return list_a, d, list_x, count
else:
raise Exception('[ERROR]Remez algorithm failed')
if __name__ == '__main__':
print('Remez algorithm calculating...', end='')
list_a, d, list_x, count = remez()
print(' OK')
for k,a in enumerate(list_a):
print('a[' + str(k) + ']=', sm.nstr(a, 17))
print('d=', sm.nstr(d, 17))
# -2*s0*s1*s3*x**3 + s0*s2*x + s1**2*s3*x**4 + x**2*(s0**2*s3 - s1*s2)
def f(s0,s1,s2,s3):
return s0*s2 - list_a[1], \
s0**2*s3 - s1*s2 - list_a[2], \
-2*s0*s1*s3 - list_a[3], \
s1**2*s3 - list_a[4]
print()
print('Newton method calculating...', end='')
initilal = (1.2732395447351627, 0.40528473456935109, 0.77633023248007499, 0.22308510060189463);
list_s = sm.findroot(
f,
initilal,
method='newton',
maxsteps=10000,
tol=1.0e-25)
print(' OK')
for k,s in enumerate(list_s):
print('s[' + str(k) + ']=', sm.nstr(s, 17))
#
#Remez algorithm calculating... OK
#a[0]= 0.0
#a[1]= 0.9897151132173856
#a[2]= 0.044771099390202579
#a[3]= -0.22906038058222875
#a[4]= 0.036456091836172492
#d= -0.00073239476651250248
#
#Newton method calculating... OK
#s[0]= 1.2728577660723033
#s[1]= 0.40516321064662885
#s[2]= 0.7775535802962265
#s[3]= 0.22208033386249199
#
|
<filename>rank_correlation_comparison.py
import matplotlib.pyplot as plt
import numpy as np
from scipy import stats
import pickle
from zero_cost_estimators import zero_cost_estimator
# specify some setup hyperparameters for comparison
sum_window_E = 1
dataset_list = ['cifar10-valid','cifar100', 'ImageNet16-120']
dataset = 'cifar10-valid'
search_space = 'nas201'
max_epochs = 200
n_arch = 6704
arch_dataset = f'./{dataset}_n{n_arch}_arch_info'
# define all estimators to be compared
method_list = [
{'name': 'TSE-EMA', 'E': None, 'metric': 'train_loss', 'color': 'red', 'style': '-'},
{'name': 'TSE', 'E': None, 'metric': 'train_loss', 'color': 'orange', 'style': '-'},
{'name': 'TSE-E', 'E': sum_window_E, 'metric': 'train_loss', 'color': 'blue', 'style': '-'},
{'name': 'VAccES', 'E': None, 'metric': 'val_acc', 'color': 'green', 'style': '-'},
{'name': 'LcSVR', 'E': None, 'metric': ['val_acc', 'HP', 'AP'], 'color': 'cyan',
'style': '-', 'ntrain': 200, 'interval': 25},
{'name': 'SoVL', 'E': None, 'metric': 'val_loss', 'color': 'deeppink', 'style': '-'},
{'name': 'SynFlow', 'label': 'SynFlow', 'E': None, 'metric': 'arch', 'color': 'lime', 'style': '-',
'batch_size': 64},
{'name': 'SNIP', 'E': None, 'metric': 'arch', 'color': 'blueviolet', 'style': '-', 'batch_size': 64},
{'name': 'JacCov', 'E': None, 'metric': 'arch', 'color': 'brown', 'style': '-', 'batch_size': 64},
# {'name': 'TestL', 'label': 'TestL(T=200)', 'E': None, 'metric': 'test_loss', 'color': 'black', 'style': '-'},
]
zero_cost_estimator_list = ['SynFlow', 'JacCov', 'SNIP']
# load prestored arch data
with open(arch_dataset, 'rb') as outfile:
res = pickle.load(outfile)
n_arch = len(res['test_acc'])
print(f'total_n_arch ={n_arch}')
# compute rank correlation for each estimator method
dic_for_plot = {}
for method in method_list:
test_acc_all_arch = [np.max(test_acc) for test_acc in res['test_acc']]
window_size = method['E']
method_name = method['name']
metric_name = method['metric']
style = method['style']
indices = range(n_arch)
print(f'compute rank correlation for {method_name}')
test_acc_all_arch_array = np.vstack(test_acc_all_arch)
if 'So' in method_name or 'TSE' in method_name:
metric_all_arch = res[metric_name]
sum_metric_all_arch = []
for i in range(n_arch):
metric_one_arch = metric_all_arch[i]
if window_size is not None:
so_metric = [np.sum(metric_one_arch[se - window_size:se]) for se in range(window_size, max_epochs)]
else:
if 'EMA' in method_name:
so_metric = []
mu = 0.9
for se in range(max_epochs):
if se <= 0:
ema = metric_one_arch[se]
else:
ema = ema * (1 - mu) + mu * metric_one_arch[se]
so_metric.append(ema)
else:
so_metric = [np.sum(metric_one_arch[:se]) for se in range(max_epochs)]
sum_metric_all_arch.append(so_metric)
metric_all_arch_array = np.vstack(sum_metric_all_arch)
elif 'LcSVR' in method_name:
from svr_estimator import SVR_Estimator
n_train = method['ntrain']
svr_interval = method['interval']
metric_all_arch_list = [res[metric] for metric in metric_name]
svr_regressor = SVR_Estimator(metric_all_arch_list, test_acc_all_arch,
all_curve=True, n_train=n_train)
elif method_name in zero_cost_estimator_list:
batch_size = method['batch_size']
metric_all_arch = res[metric_name]
estimator = zero_cost_estimator(method_name=method_name, search_space=search_space,
dataset=dataset, batch_size=batch_size)
score_all_arch = []
for i in range(n_arch):
metric_one_arch = metric_all_arch[i]
score_one_arch = estimator.predict(metric_one_arch)
score_all_arch.append(score_one_arch)
print(f'arch={i}: score={score_one_arch}')
metric_all_arch_array = np.vstack(score_all_arch)
else:
metric_all_arch = res[metric_name]
metric_all_arch_array = np.vstack(metric_all_arch)
# save method scores
if 'LcSVR' not in method_name:
method['score_all_arch'] = metric_all_arch_array
# compute rank correlation
if 'LcSVR' in method_name:
rank_correlation_metric = []
score_all_epochs = []
epoch_list = range(svr_interval, max_epochs + 1, svr_interval)
for epoch in epoch_list:
best_hyper, time_taken = svr_regressor.learn_hyper(epoch)
rank_coeff = svr_regressor.extrapolate()
rank_correlation_metric.append(rank_coeff)
score_all_epochs.append(svr_regressor.y_pred)
method['score_all_arch'] = np.hstack(score_all_epochs)
elif method_name in zero_cost_estimator_list or method_name == 'TestL':
rank_correlation_metric = []
if 'loss' in metric_name:
metric_all_arch_array = - metric_all_arch_array
rank_coeff, _ = stats.spearmanr(test_acc_all_arch_array, metric_all_arch_array)
rank_correlation_metric = [rank_coeff]*max_epochs
else:
rank_correlation_metric = []
for j in range(metric_all_arch_array.shape[1]):
if 'loss' in metric_name:
metric_estimator = - metric_all_arch_array[:, j]
else:
metric_estimator = metric_all_arch_array[:, j]
rank_coeff, _ = stats.spearmanr(test_acc_all_arch_array, metric_estimator)
rank_correlation_metric.append(rank_coeff)
# save rank correlation performance over epochs for plotting
if window_size is not None:
dic_for_plot[method_name] = [range(window_size, int(window_size + len(rank_correlation_metric))), rank_correlation_metric, style]
elif 'LcSVR' in method_name:
dic_for_plot[method_name] = [epoch_list, rank_correlation_metric, style]
else:
dic_for_plot[method_name] = [range(1, len(rank_correlation_metric)), rank_correlation_metric[1:], style]
# plot the rank correlation performance of all the estimators
figure, axes = plt.subplots(1, 1, figsize=(3, 3))
fs = 11
for method in method_list:
method_name = method['name']
color = method['color']
style = method['style']
content = dic_for_plot[method_name]
x_range, rank_corr, fmt = content
axes.plot(x_range, rank_corr, color=color, ls=style, label=method_name)
axes.legend(prop={'size': fs-1}, loc="lower right").set_zorder(12)
axes.set_title(f'{dataset}')
axes.set_xscale('log')
axes.set_xticks(np.logspace(0.0, np.log(int(max_epochs/2)) / np.log(10), 4, base=10, endpoint=True))
axes.set_xticklabels([f'{v/(int(max_epochs/2)*2):.2f}' for v in
np.logspace(0.0, np.log(int(max_epochs/2)) / np.log(10), 4, base=10, endpoint=True)])
if dataset in ['cifar10-valid', 'ImageNet16-120', 'cifar100']:
axes.set_xlim([int(max_epochs/2) * 0.04, int(max_epochs/2)])
axes.set_ylim([0.6, 1.0])
axes.set_xlabel('Fraction of $T_{end}$', fontsize=fs)
axes.set_ylabel('Rank Correlation', fontsize=fs)
fig_name = f'./rank_corr_comparison_on_{search_space}{dataset}_for{n_arch}archs.pdf'
plt.savefig(fig_name, bbox_inches='tight') |
<reponame>jasonfan1997/threeML
from dataclasses import dataclass, field
from enum import Enum, Flag
from typing import Any, Dict, List, Optional
import numpy as np
import matplotlib.pyplot as plt
from omegaconf import II, MISSING, SI, OmegaConf
from .plotting_structure import CornerStyle, MPLCmap
class Sampler(Enum):
emcee = "emcee"
multinest = "multinest"
zeus = "zeus"
dynesty_nested = "dynesty_nested"
dynesty_dynamic = "dynesty_dynamic"
ultranest = "ultranest"
autoemcee = "autoemcee"
_sampler_default = {'emcee': {'n_burnin': 1}}
class Optimizer(Enum):
minuit = "minuit"
scipy = "scipy"
ROOT = "ROOT"
@dataclass
class BayesianDefault:
default_sampler: Sampler = Sampler.emcee
emcee_setup: Optional[Dict[str, Any]] = field(
default_factory=lambda: {'n_burnin': None,
'n_iterations': 500,
"n_walkers": 50,
"seed": 5123})
multinest_setup: Optional[Dict[str, Any]] = field(
default_factory=lambda: {'n_live_points': 400,
'chain_name': "chains/fit-",
"resume": False,
"importance_nested_sampling": False,
"auto_clean": False,
})
ultranest_setup: Optional[Dict[str, Any]] = field(
default_factory=lambda: { "min_num_live_points":400,
"dlogz":0.5,
"dKL": 0.5,
"frac_remain": 0.01,
"Lepsilon": 0.001,
"min_ess": 400,
"update_interval_volume_fraction":0.8,
"cluster_num_live_points":40,
"use_mlfriends": True,
"resume": 'overwrite' }
)
zeus_setup: Optional[Dict[str, Any]] = field(
default_factory=lambda: {'n_burnin': None,
'n_iterations': 500,
"n_walkers": 50,
"seed": 5123})
dynesty_nested_setup: Optional[Dict[str, Any]] = field(
default_factory=lambda: { "n_live_points": 400,
"maxiter": None,
"maxcall": None,
"dlogz": None,
"logl_max": np.inf,
"n_effective": None,
"add_live": True,
"print_func": None,
"save_bounds":True,
"bound":"multi",
"wrapped_params": None,
"sample": "auto",
"periodic": None,
"reflective": None,
"update_interval": None,
"first_update": None,
"npdim": None,
"rstate": None,
"use_pool": None,
"live_points": None,
"logl_args": None,
"logl_kwargs": None,
"ptform_args": None,
"ptform_kwargs": None,
"gradient": None,
"grad_args": None,
"grad_kwargs": None,
"compute_jac": False,
"enlarge": None,
"bootstrap": 0,
"vol_dec": 0.5,
"vol_check": 2.0,
"walks": 25,
"facc": 0.5,
"slices": 5,
"fmove": 0.9,
"max_move": 100,
"update_func": None,
})
dynesty_dynmaic_setup: Optional[Dict[str, Any]] = field(
default_factory=lambda: {
"nlive_init": 500,
"maxiter_init": None,
"maxcall_init": None,
"dlogz_init": 0.01,
"logl_max_init": np.inf,
"n_effective_init": np.inf,
"nlive_batch": 500,
"wt_function": None,
"wt_kwargs": None,
"maxiter_batch": None,
"maxcall_batch": None,
"maxiter": None,
"maxcall": None,
"maxbatch": None,
"n_effective": np.inf,
"stop_function": None,
"stop_kwargs": None,
"use_stop": True,
"save_bounds": True,
"print_func": None,
"live_points": None,
"bound":"multi",
"wrapped_params": None,
"sample":"auto",
"periodic": None,
"reflective": None,
"update_interval": None,
"first_update": None,
"npdim": None,
"rstate": None,
"use_pool": None,
"logl_args": None,
"logl_kwargs": None,
"ptform_args": None,
"ptform_kwargs": None,
"gradient": None,
"grad_args": None,
"grad_kwargs": None,
"compute_jac": False,
"enlarge": None,
"bootstrap": 0,
"vol_dec": 0.5,
"vol_check": 2.0,
"walks": 25,
"facc": 0.5,
"slices": 5,
"fmove": 0.9,
"max_move": 100,
"update_func": None,
})
corner_style: CornerStyle =CornerStyle()
@dataclass
class MLEDefault:
default_minimizer: Optimizer = Optimizer.minuit
default_minimizer_algorithm: Optional[str] = None
default_minimizer_callback: Optional[str] = None
contour_cmap: MPLCmap = MPLCmap.Pastel1
contour_background: str = 'white'
contour_level_1: str = '#ffa372'
contour_level_2: str = '#ed6663'
contour_level_3: str = '#0f4c81'
profile_color: str = 'k'
profile_level_1: str = '#ffa372'
profile_level_2: str = '#ed6663'
profile_level_3: str = '#0f4c81'
|
<reponame>ETCCooperative/brownie
#!/usr/bin/python3
import hashlib
import itertools
import json
import re
import tempfile
from pathlib import Path
from typing import Dict, List, Optional, Set, Tuple
from urllib.parse import urlparse
from ethpm._utils.ipfs import generate_file_hash
from ethpm.backends.ipfs import InfuraIPFSBackend
from brownie import network
from brownie._config import _get_data_folder
from brownie.convert import to_address
from brownie.exceptions import InvalidManifest
from brownie.network.web3 import web3
from brownie.typing import AccountsType, TransactionReceiptType
from brownie.utils import color
from . import compiler
URI_REGEX = r"""^(?:erc1319://|)([^/:\s]*)(?::[0-9]+|)/([a-z][a-z0-9_-]{0,255})@([^\s:/'";]*)$"""
REGISTRY_ABI = [
{
"constant": False,
"inputs": [
{"name": "packageName", "type": "string"},
{"name": "version", "type": "string"},
{"name": "manifestURI", "type": "string"},
],
"name": "release",
"outputs": [{"name": "releaseId", "type": "bytes32"}],
"payable": False,
"stateMutability": "nonpayable",
"type": "function",
}
]
def get_manifest(uri: str) -> Dict:
"""
Fetches an ethPM manifest and processes it for use with Brownie.
A local copy is also stored if the given URI follows the ERC1319 spec.
Args:
uri: URI location of the manifest. Can be IPFS or ERC1319.
"""
# uri can be a registry uri or a direct link to ipfs
if not isinstance(uri, str):
raise TypeError("EthPM manifest uri must be given as a string")
match = re.match(URI_REGEX, uri)
if match is None:
# if a direct link to IPFS was used, we don't save the manifest locally
manifest = json.loads(_get_uri_contents(uri))
path = None
else:
address, package_name, version = match.groups()
path = _get_data_folder().joinpath(f"ethpm/{address}/{package_name}/{version}.json")
try:
with path.open("r") as fp:
return json.load(fp)
except (FileNotFoundError, json.decoder.JSONDecodeError):
pass
# TODO chain != 1
web3._mainnet.pm.set_registry(address)
package = web3._mainnet.pm.get_package(package_name, version)
manifest = package.manifest
uri = package.uri
manifest = process_manifest(manifest, uri)
if path:
manifest["meta_brownie"]["registry_address"] = address
# save a local copy before returning
for subfolder in list(path.parents)[2::-1]:
subfolder.mkdir(exist_ok=True)
with path.open("w") as fp:
json.dump(manifest, fp)
return manifest
def process_manifest(manifest: Dict, uri: Optional[str] = None) -> Dict:
"""
Processes a manifest for use with Brownie.
Args:
manifest: ethPM manifest
uri: IPFS uri of the package
"""
if manifest["manifest_version"] != "2":
raise InvalidManifest(
f"Brownie only supports v2 ethPM manifests, this "
f"manifest is v{manifest['manifest_version']}"
)
for key in ("contract_types", "deployments", "sources"):
manifest.setdefault(key, {})
# resolve sources
for key in list(manifest["sources"]):
content = manifest["sources"].pop(key)
if _is_uri(content):
content = _get_uri_contents(content)
# ensure all absolute imports begin with contracts/
content = re.sub(
r"""(import((\s*{[^};]*}\s*from)|)\s*)("|')(contracts/||/)(?=[^./])""",
lambda k: f"{k.group(1)}{k.group(4)}contracts/",
content,
)
path = Path("/").joinpath(key.lstrip("./")).resolve()
path_str = path.as_posix()[len(path.anchor) :]
manifest["sources"][f"contracts/{path_str}"] = content
# set contract_name in contract_types
contract_types = manifest["contract_types"]
for key, value in contract_types.items():
if "contract_name" not in value:
value["contract_name"] = key
# resolve package dependencies
for dependency_uri in manifest.pop("build_dependencies", {}).values():
dep_manifest = get_manifest(dependency_uri)
for key in ("sources", "contract_types"):
for k in [i for i in manifest[key] if i in dep_manifest[key]]:
if manifest[key][k] != dep_manifest[key][k]:
raise InvalidManifest("Namespace collision between package dependencies")
manifest[key].update(dep_manifest[key])
# compile sources to expand contract_types
if manifest["sources"]:
version = compiler.find_best_solc_version(manifest["sources"], install_needed=True)
build_json = compiler.compile_and_format(manifest["sources"], version)
for key, build in build_json.items():
manifest["contract_types"].setdefault(key, {"contract_name": key})
manifest["contract_types"][key].update(
{
"abi": build["abi"],
"source_path": build["sourcePath"],
"all_source_paths": build["allSourcePaths"],
"compiler": build["compiler"],
}
)
# delete contract_types with no source or ABI, we can't do much with them
manifest["contract_types"] = dict(
(k, v) for k, v in manifest["contract_types"].items() if "abi" in v
)
# resolve or delete deployments
for chain_uri in list(manifest["deployments"]):
deployments = manifest["deployments"][chain_uri]
for name in list(deployments):
deployments[name]["address"] = to_address(deployments[name]["address"])
alias = deployments[name]["contract_type"]
alias = alias[alias.rfind(":") + 1 :]
deployments[name]["contract_type"] = alias
if alias not in manifest["contract_types"]:
del deployments[name]
if not deployments:
del manifest["deployments"][chain_uri]
manifest["meta_brownie"] = {"manifest_uri": uri, "registry_address": None}
return manifest
def _is_uri(uri: str) -> bool:
try:
result = urlparse(uri)
return all([result.scheme, result.netloc])
except ValueError:
return False
def _get_uri_contents(uri: str) -> str:
path = _get_data_folder().joinpath(f"ipfs_cache/{urlparse(uri).netloc}.ipfs")
path.parent.mkdir(exist_ok=True)
if not path.exists():
data = InfuraIPFSBackend().fetch_uri_contents(uri)
with path.open("wb") as fp:
fp.write(data)
return data.decode("utf-8")
with path.open() as fp:
data = fp.read()
return data
def get_deployment_addresses(
manifest: Dict, contract_name: str, genesis_hash: Optional[str] = None
) -> List:
"""
Parses a manifest and returns a list of deployment addresses for the given contract
and chain.
Args:
manifest: ethPM manifest
contract_name: Name of the contract
genesis_block: Genesis block hash for the chain to return deployments on. If
None, the currently active chain will be used.
"""
if genesis_hash is None:
genesis_hash = web3.genesis_hash
if "meta_brownie" not in manifest:
manifest = process_manifest(manifest)
chain_uri = f"blockchain://{genesis_hash}"
key = next((i for i in manifest["deployments"] if i.startswith(chain_uri)), None)
if key is None:
return []
return [
v["address"]
for v in manifest["deployments"][key].values()
if manifest["contract_types"][v["contract_type"]]["contract_name"] == contract_name
]
def get_installed_packages(project_path: Path) -> Tuple[List, List]:
"""
Returns a list of a installed ethPM packages within a project, and a list
of packages that are installed and one or more files are modified or deleted.
Args:
project_path: Path to the root folder of the project
Returns:
(project name, version) of installed packages
(project name, version) of installed-but-modified packages
"""
packages_json = _load_packages_json(project_path)
# determine if packages are installed, modified, or deleted
installed: Set = set(packages_json["packages"])
modified: Set = set()
deleted: Set = set(packages_json["packages"])
for source_path in list(packages_json["sources"]):
package_list = packages_json["sources"][source_path]["packages"]
# source does not exist, package has been modified
if not project_path.joinpath(source_path).exists():
installed.difference_update(package_list)
modified.update(package_list)
continue
# source exists, package has NOT been deleted
deleted.difference_update(package_list)
with project_path.joinpath(source_path).open("rb") as fp:
source = fp.read()
if hashlib.md5(source).hexdigest() != packages_json["sources"][source_path]["md5"]:
# package has been modified
modified.update(package_list)
# deleted packages have not been modified, modified packages have not been deleted
modified.difference_update(deleted)
installed.difference_update(modified)
# properly remove deleted packages
for package_name in deleted:
remove_package(project_path, package_name, True)
return (
[(i, packages_json["packages"][i]["version"]) for i in sorted(installed)],
[(i, packages_json["packages"][i]["version"]) for i in sorted(modified)],
)
def install_package(project_path: Path, uri: str, replace_existing: bool = False) -> str:
"""
Installs an ethPM package within the project.
Args:
project_path: Path to the root folder of the project
uri: manifest URI, can be erc1319 or ipfs
replace_existing: if True, existing files will be overwritten when
installing the package
Returns: Name of the package
"""
manifest = get_manifest(uri)
package_name = manifest["package_name"]
remove_package(project_path, package_name, True)
packages_json = _load_packages_json(project_path)
for path, source in manifest["sources"].items():
source_path = project_path.joinpath(path)
if not replace_existing and source_path.exists():
with source_path.open() as fp:
if fp.read() != source:
raise FileExistsError(
f"Cannot overwrite existing file with different content: '{source_path}'"
)
for path, source in manifest["sources"].items():
for folder in list(Path(path).parents)[::-1]:
project_path.joinpath(folder).mkdir(exist_ok=True)
with project_path.joinpath(path).open("w") as fp:
fp.write(source)
with project_path.joinpath(path).open("rb") as fp:
source_bytes = fp.read()
packages_json["sources"].setdefault(path, {"packages": []})
packages_json["sources"][path]["md5"] = hashlib.md5(source_bytes).hexdigest()
packages_json["sources"][path]["packages"].append(package_name)
packages_json["packages"][package_name] = {
"manifest_uri": manifest["meta_brownie"]["manifest_uri"],
"registry_address": manifest["meta_brownie"]["registry_address"],
"version": manifest["version"],
}
with project_path.joinpath("build/packages.json").open("w") as fp:
json.dump(packages_json, fp, indent=2, sort_keys=True)
return manifest["package_name"]
def remove_package(project_path: Path, package_name: str, delete_files: bool) -> bool:
"""
Removes an ethPM package from a project.
Args:
project_path: Path to the root folder of the project
package_name: name of the package
delete_files: if True, source files related to the package are deleted.
files that are still required by other installed packages
will not be deleted.
Returns: boolean indicating if package was installed.
"""
packages_json = _load_packages_json(project_path)
if package_name not in packages_json["packages"]:
return False
del packages_json["packages"][package_name]
for source_path in [
k for k, v in packages_json["sources"].items() if package_name in v["packages"]
]:
packages_json["sources"][source_path]["packages"].remove(package_name)
if delete_files and not packages_json["sources"][source_path]["packages"]:
# if source file is not associated with any other projects, delete it
del packages_json["sources"][source_path]
if project_path.joinpath(source_path).exists():
project_path.joinpath(source_path).unlink()
# remove empty folders
for path in list(Path(source_path).parents)[:-2]:
parent_path = project_path.joinpath(path)
if parent_path.exists() and not list(parent_path.glob("*")):
parent_path.rmdir()
with project_path.joinpath("build/packages.json").open("w") as fp:
json.dump(packages_json, fp, indent=2, sort_keys=True)
return True
def create_manifest(
project_path: Path, package_config: Dict, pin_assets: bool = False, silent: bool = True
) -> Tuple[Dict, str]:
"""
Creates a manifest from a project, and optionally pins it to IPFS.
Arguments:
project_path: Path to the root folder of the project
package_config: Configuration settings for the manifest
pin_assets: if True, all source files and the manifest will
be uploaded onto IPFS via Infura.
Returns: generated manifest, ipfs uri of manifest
"""
package_config = _remove_empty_fields(package_config)
_verify_package_name(package_config["package_name"])
if pin_assets:
ipfs_backend = InfuraIPFSBackend()
manifest = {
"manifest_version": "2",
"package_name": package_config["package_name"],
"version": package_config["version"],
"sources": {},
"contract_types": {},
}
if "meta" in package_config:
manifest["meta"] = package_config["meta"]
# load packages.json and add build_dependencies
packages_json: Dict = {"sources": {}, "packages": {}}
if not package_config["settings"]["include_dependencies"]:
installed, modified = get_installed_packages(project_path)
if modified:
raise InvalidManifest(
f"Dependencies have been modified locally: {', '.join([i[0] for i in modified])}"
)
if installed:
packages_json = _load_packages_json(project_path)
manifest["build_dependencies"] = dict(
(k, v["manifest_uri"]) for k, v in packages_json["packages"].items()
)
# add sources
contract_path = project_path.joinpath("contracts")
for path in contract_path.glob("**/*.sol"):
if path.relative_to(project_path).as_posix() in packages_json["sources"]:
continue
if pin_assets:
if not silent:
print(f'Pinning "{color("bright magenta")}{path.name}{color}"...')
uri = ipfs_backend.pin_assets(path)[0]["Hash"]
else:
with path.open("rb") as fp:
uri = generate_file_hash(fp.read())
manifest["sources"][f"./{path.relative_to(contract_path).as_posix()}"] = f"ipfs://{uri}"
# add contract_types
for path in project_path.glob("build/contracts/*.json"):
with path.open() as fp:
build_json = json.load(fp)
if not build_json["bytecode"]:
# skip contracts that cannot deploy
continue
if build_json["sourcePath"] in packages_json["sources"]:
# skip dependencies
continue
manifest["contract_types"][build_json["contractName"]] = _get_contract_type(build_json)
# add deployments
deployment_networks = package_config["settings"]["deployment_networks"]
if deployment_networks:
active_network = network.show_active()
if active_network:
network.disconnect()
manifest["deployments"] = {}
if isinstance(deployment_networks, str):
deployment_networks = [deployment_networks]
if deployment_networks == ["*"]:
deployment_networks = [i.stem for i in project_path.glob("build/deployments/*")]
for network_name in deployment_networks:
instances = list(project_path.glob(f"build/deployments/{network_name}/*.json"))
if not instances:
continue
instances.sort(key=lambda k: k.stat().st_mtime, reverse=True)
network.connect(network_name)
manifest["deployments"][web3.chain_uri] = {}
for path in instances:
with path.open() as fp:
build_json = json.load(fp)
alias = build_json["contractName"]
source_path = build_json["sourcePath"]
if source_path in packages_json["sources"]:
alias = f"{packages_json['sources'][source_path]['packages'][0]}:{alias}"
if alias in manifest["contract_types"]:
# skip deployment if bytecode does not match that of contract_type
bytecode = manifest["contract_types"][alias]["deployment_bytecode"]["bytecode"]
if f"0x{build_json['bytecode']}" != bytecode:
continue
else:
# add contract_type for dependency
manifest["contract_types"][alias] = _get_contract_type(build_json)
key = build_json["contractName"]
for i in itertools.count(1):
if key not in manifest["deployments"][web3.chain_uri]:
break
key = f"{build_json['contractName']}-{i}"
manifest["deployments"][web3.chain_uri][key] = {
"address": path.stem,
"contract_type": alias,
}
network.disconnect()
if active_network:
network.connect(active_network)
if not manifest["deployments"]:
del manifest["deployments"]
uri = None
if pin_assets:
if not silent:
print("Pinning manifest...")
temp_path = Path(tempfile.gettempdir()).joinpath("manifest.json")
with temp_path.open("w") as fp:
json.dump(manifest, fp, sort_keys=True, separators=(",", ":"))
uri = ipfs_backend.pin_assets(temp_path)[0]["Hash"]
return manifest, uri
def verify_manifest(package_name: str, version: str, uri: str) -> None:
"""
Verifies the validity of a package at a given IPFS URI.
Arguments:
package_name: Package name
version: Package version
uri: IPFS uri
Returns None if the package is valid, raises InvalidManifest if not.
"""
_verify_package_name(package_name)
data = InfuraIPFSBackend().fetch_uri_contents(uri).decode("utf-8")
try:
manifest = json.loads(data)
except Exception:
raise InvalidManifest("URI did not return valid JSON encoded data")
if json.dumps(manifest, sort_keys=True, separators=(",", ":")) != data:
raise InvalidManifest("JSON data is not tightly packed with sorted keys")
for key, value in [
("manifest_version", "2"),
("package_name", package_name),
("version", version),
]:
if manifest.get(key, None) != value:
raise InvalidManifest(f"Missing or invalid field: {key}")
try:
process_manifest(manifest)
except Exception as e:
raise InvalidManifest(f"Cannot process manifest - {str(e)}")
def release_package(
registry_address: str, account: AccountsType, package_name: str, version: str, uri: str
) -> TransactionReceiptType:
"""
Creates a new release of a package at an ERC1319 registry.
Arguments:
registry_address: Address of the registry
account: Account object used to broadcast the transaction to the registry
package_name: Name of the package
version: Package version
uri: IPFS uri of the package
"""
registry = network.contract.Contract(
"ERC1319Registry", registry_address, REGISTRY_ABI, owner=account
)
verify_manifest(package_name, version, uri)
return registry.release(package_name, version, uri)
def _get_contract_type(build_json: Dict) -> Dict:
contract_type = {
"contract_name": build_json["contractName"],
"source_path": f"./{Path(build_json['sourcePath']).relative_to('contracts')}",
"deployment_bytecode": {"bytecode": f"0x{build_json['bytecode']}"},
"runtime_bytecode": {"bytecode": f"0x{build_json['deployedBytecode']}"},
"abi": build_json["abi"],
"compiler": {
"name": "solc" if build_json["language"] == "Solidity" else "vyper",
"version": build_json["compiler"]["version"],
"settings": {"evmVersion": build_json["compiler"]["evm_version"]},
},
}
if build_json["language"] == "Solidity":
contract_type["compiler"]["settings"]["optimizer"] = {
"enabled": build_json["compiler"]["optimize"],
"runs": build_json["compiler"]["runs"],
}
return contract_type
def _load_packages_json(project_path: Path) -> Dict:
try:
with project_path.joinpath("build/packages.json").open() as fp:
return json.load(fp)
except (FileNotFoundError, json.decoder.JSONDecodeError):
return {"sources": {}, "packages": {}}
def _remove_empty_fields(initial: Dict) -> Dict:
result = {}
for key, value in initial.items():
if isinstance(initial[key], dict):
value = _remove_empty_fields(value)
if isinstance(initial[key], list):
value = [i for i in initial[key] if i is not None]
if value not in (None, {}, [], ""):
result[key] = value
return result
def _verify_package_name(package_name: str) -> None:
if re.fullmatch(r"^[a-z][a-z0-9_-]{0,255}$", package_name) is None:
raise ValueError(f"Invalid package name '{package_name}'")
|
# -*- coding: utf-8 -*-
# Copyright 2013 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from netaddr import IPAddress
from netaddr import IPNetwork
from nailgun.db.sqlalchemy.models import Cluster
from nailgun.db.sqlalchemy.models import NetworkGroup
from nailgun.db.sqlalchemy.models import NeutronConfig
from nailgun.db.sqlalchemy.models import NovaNetworkConfig
from nailgun.test.base import BaseIntegrationTest
class TestNetworkChecking(BaseIntegrationTest):
def find_net_by_name(self, name):
for net in self.nets['networks']:
if net['name'] == name:
return net
def check_result_format(self, task, cluster_id):
if task.get('result'):
result = task['result']
self.assertIsInstance(result, list)
ng_fields = \
NetworkGroup.__mapper__.columns.keys() + ["ip_ranges"]
cluster_db = self.db.query(Cluster).get(cluster_id)
ng_fields += NeutronConfig.__mapper__.columns.keys() \
if cluster_db.net_provider == 'neutron' else \
NovaNetworkConfig.__mapper__.columns.keys()
for res in result:
if 'ids' in res:
self.assertIsInstance(res['ids'], list)
if 'errors' in res:
self.assertIsInstance(res['errors'], list)
for f in res['errors']:
self.assertIn(f, ng_fields)
def set_cluster_changes_w_error(self, cluster_id):
resp = self.env.cluster_changes_put(cluster_id,
expect_errors=True)
self.assertEqual(resp.status_code, 202)
task = resp.json_body
self.assertEqual(task['status'], 'error')
self.assertEqual(task['progress'], 100)
self.assertEqual(task['name'], 'deploy')
self.check_result_format(task, cluster_id)
return task
def update_nova_networks_w_error(self, cluster_id, nets):
resp = self.env.nova_networks_put(cluster_id, nets,
expect_errors=True)
self.assertEqual(resp.status_code, 202)
task = resp.json_body
self.assertEqual(task['status'], 'error')
self.assertEqual(task['progress'], 100)
self.assertEqual(task['name'], 'check_networks')
self.check_result_format(task, cluster_id)
return task
def update_nova_networks_success(self, cluster_id, nets):
resp = self.env.nova_networks_put(cluster_id, nets)
self.assertEqual(resp.status_code, 202)
task = resp.json_body
self.assertEqual(task['status'], 'ready')
self.assertEqual(task['progress'], 100)
self.assertEqual(task['name'], 'check_networks')
return task
def update_neutron_networks_w_error(self, cluster_id, nets):
resp = self.env.neutron_networks_put(cluster_id, nets,
expect_errors=True)
self.assertEqual(resp.status_code, 202)
task = resp.json_body
self.assertEqual(task['status'], 'error')
self.assertEqual(task['progress'], 100)
self.assertEqual(task['name'], 'check_networks')
self.check_result_format(task, cluster_id)
return task
def update_neutron_networks_success(self, cluster_id, nets):
resp = self.env.neutron_networks_put(cluster_id, nets)
self.assertEqual(resp.status_code, 202)
task = resp.json_body
self.assertEqual(task['status'], 'ready')
self.assertEqual(task['progress'], 100)
self.assertEqual(task['name'], 'check_networks')
return task
class TestNovaHandlers(TestNetworkChecking):
def setUp(self):
super(TestNovaHandlers, self).setUp()
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{"name": "eth0", "mac": "00:00:00:00:00:66"},
{"name": "eth1", "mac": "00:00:00:00:00:77"}])
self.env.create(
cluster_kwargs={},
nodes_kwargs=[
{"api": True,
"meta": meta,
"pending_addition": True},
]
)
self.cluster = self.env.clusters[0]
resp = self.env.nova_networks_get(self.cluster.id)
self.nets = resp.json_body
def test_network_checking(self):
self.update_nova_networks_success(self.cluster.id, self.nets)
ngs_created = self.db.query(NetworkGroup).filter(
NetworkGroup.name.in_([n['name'] for n in self.nets['networks']])
).all()
self.assertEqual(len(ngs_created), len(self.nets['networks']))
def test_network_checking_fails_if_admin_intersection(self):
admin_ng = self.env.network_manager.get_admin_network_group()
self.nets['networking_parameters']["fixed_networks_cidr"] = \
admin_ng.cidr
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertIn(
"Address space intersection between networks:\n",
task['message'])
self.assertIn("admin (PXE)", task['message'])
self.assertIn("fixed", task['message'])
def test_network_checking_fails_if_admin_intersection_ip_range(self):
admin_ng = self.env.network_manager.get_admin_network_group()
cidr = IPNetwork(admin_ng.cidr)
flt_r0 = str(IPAddress(cidr.first + 2))
flt_r1 = str(IPAddress(cidr.last))
self.nets['networking_parameters']['floating_ranges'] = \
[[flt_r0, flt_r1]]
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
"Address space intersection between floating range '{0}-{1}' and "
"'admin (PXE)' network.".format(flt_r0, flt_r1),
task['message'])
def test_network_checking_fails_if_networks_cidr_intersection(self):
self.find_net_by_name('management')["cidr"] = \
self.find_net_by_name('storage')["cidr"]
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertIn(
"Address space intersection between networks:\n",
task['message'])
self.assertIn("management", task['message'])
self.assertIn("storage", task['message'])
def test_network_checking_fails_if_untagged_intersection(self):
self.find_net_by_name('management')["vlan_start"] = None
self.env.nova_networks_put(self.cluster.id, self.nets)
task = self.set_cluster_changes_w_error(self.cluster.id)
self.assertIn(
'Some untagged networks are assigned to the same physical '
'interface. You should assign them to different physical '
'interfaces. Affected:\n',
task['message'])
self.assertIn('"management"', task['message'])
self.assertIn(' networks at node "Untitled', task['message'])
def test_network_checking_fails_if_networks_cidr_range_intersection(self):
self.find_net_by_name('public')["ip_ranges"] = \
[['172.16.58.3', '192.168.127.12']]
self.find_net_by_name('public')["gateway"] = '172.16.31.10'
self.find_net_by_name('public')["cidr"] = '172.16.31.10/24'
self.find_net_by_name('management')["cidr"] = '172.16.31.10/25'
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertIn(
"Address space intersection between networks:\n",
task['message'])
self.assertIn("public", task['message'])
self.assertIn("management", task['message'])
def test_network_checking_no_public_floating_ranges_intersection(self):
self.find_net_by_name('public')["ip_ranges"] = \
[['172.16.58.3', '172.16.17.32'],
['172.16.17.32', '192.168.3.11']]
self.nets['networking_parameters']["floating_ranges"] = \
[['172.16.58.3', '192.168.127.12'],
['192.168.3.11', '192.168.127.12']]
self.find_net_by_name('public')["gateway"] = '172.16.31.10'
self.find_net_by_name('public')["cidr"] = '172.16.31.10/24'
self.update_nova_networks_success(self.cluster.id, self.nets)
def test_network_checking_fails_if_public_ranges_intersection(self):
self.find_net_by_name('public')["ip_ranges"] = \
[['172.16.58.3', '172.16.17.32'],
['172.16.58.3', '192.168.127.12']]
self.find_net_by_name('public')["gateway"] = '172.16.31.10'
self.find_net_by_name('public')["cidr"] = '172.16.31.10/24'
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Address space intersection between ranges of public network."
)
def test_network_checking_fails_if_public_gateway_not_in_cidr(self):
self.find_net_by_name('public')["ip_ranges"] = \
[['172.16.58.3', '172.16.17.32'],
['172.16.17.32', '192.168.3.11']]
self.find_net_by_name('public')["gateway"] = '192.168.127.12'
self.find_net_by_name('public')["cidr"] = '192.168.3.11/24'
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Public gateway and public ranges are not in one CIDR."
)
def test_network_checking_fails_if_public_gateway_range_intersection(self):
self.find_net_by_name('public')["ip_ranges"] = \
[['172.16.58.3', '172.16.17.32'],
['172.16.17.32', '192.168.3.11']]
self.find_net_by_name('public')["gateway"] = '172.16.17.32'
self.find_net_by_name('public')["cidr"] = '172.16.31.10/24'
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Address intersection between public gateway and IP range of "
"public network."
)
self.find_net_by_name('public')["ip_ranges"] = \
[['172.16.58.3', '172.16.58.3']]
self.find_net_by_name('public')["gateway"] = '192.168.3.11'
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Address intersection between public gateway and IP range of "
"public network."
)
def test_network_checking_fails_if_floating_ranges_intersection(self):
self.nets['networking_parameters']["floating_ranges"] = \
[['192.168.127.12', '192.168.127.12'],
['172.16.17.32', '192.168.127.12']]
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Address space intersection between ranges of floating network."
)
def test_network_checking_fails_if_vlan_ids_intersection(self):
self.find_net_by_name('public')["vlan_start"] = 111
self.find_net_by_name('management')["vlan_start"] = 111
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertIn(
" networks use the same VLAN ID(s). "
"You should assign different VLAN IDs to every network.",
task['message'])
self.assertIn("management", task['message'])
self.assertIn("public", task['message'])
def test_network_checking_fails_if_vlan_id_in_fixed_vlan_range(self):
self.nets['networking_parameters']['net_manager'] = 'VLANManager'
self.find_net_by_name('public')["vlan_start"] = 1111
self.nets['networking_parameters']['fixed_networks_vlan_start'] = \
1100
self.nets['networking_parameters']['fixed_networks_amount'] = 20
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertIn(
" networks use the same VLAN ID(s). "
"You should assign different VLAN IDs to every network.",
task['message'])
self.assertIn("fixed", task['message'])
self.assertIn("public", task['message'])
def test_network_checking_fails_if_vlan_id_not_in_allowed_range(self):
self.find_net_by_name('public')["vlan_start"] = 5555
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"VLAN ID(s) is out of range for public network."
)
def test_network_size_not_fit_cidr_in_flatdhcp(self):
self.nets['networking_parameters']['net_manager'] = 'FlatDHCPManager'
self.nets['networking_parameters']['fixed_networks_cidr'] = \
"10.10.0.0/28"
self.nets['networking_parameters']['fixed_networks_amount'] = 1
self.nets['networking_parameters']['fixed_network_size'] = \
"256"
task = self.update_nova_networks_success(self.cluster.id, self.nets)
self.assertEqual(task['status'], 'ready')
def test_network_size_and_amount_not_fit_cidr(self):
self.nets['networking_parameters']['net_manager'] = 'VlanManager'
self.nets['networking_parameters']['fixed_networks_cidr'] = \
"10.10.0.0/24"
self.nets['networking_parameters']['fixed_networks_amount'] = 8
self.nets['networking_parameters']['fixed_network_size'] = \
"32"
self.update_nova_networks_success(self.cluster.id, self.nets)
self.nets['networking_parameters']['fixed_networks_amount'] = 32
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Number of fixed networks (32) doesn't fit into "
"fixed CIDR (10.10.0.0/24) and size of one fixed network (32)."
)
def test_network_fit_abc_classes_exclude_loopback(self):
self.find_net_by_name('management')['cidr'] = '127.19.216.0/24'
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"management network address space is inside loopback range "
"(127.0.0.0/8). It must have no intersection with "
"loopback range."
)
self.find_net_by_name('management')['cidr'] = '172.16.17.32/24'
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"management network address space does not belong to "
"A, B, C network classes. It must belong to either "
"A, B or C network class."
)
def test_network_gw_and_ranges_intersect_w_subnet_or_broadcast(self):
self.find_net_by_name('public')['gateway'] = '172.16.0.0'
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"public network gateway address is equal to either subnet address "
"or broadcast address of the network."
)
self.find_net_by_name('public')['gateway'] = '172.16.0.255'
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"public network gateway address is equal to either subnet address "
"or broadcast address of the network."
)
self.find_net_by_name('public')['gateway'] = '172.16.0.125'
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.0',
'172.16.0.122']]
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"public network IP range [172.16.0.0-172.16.0.122] intersect "
"with either subnet address or broadcast address of the network."
)
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.255',
'172.16.0.255']]
task = self.update_nova_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"public network IP range [172.16.0.255-172.16.0.255] intersect "
"with either subnet address or broadcast address of the network."
)
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.2',
'172.16.0.122']]
self.update_nova_networks_success(self.cluster.id, self.nets)
class TestNeutronHandlersGre(TestNetworkChecking):
def setUp(self):
super(TestNeutronHandlersGre, self).setUp()
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{"name": "eth0", "mac": "00:00:00:00:00:66"},
{"name": "eth1", "mac": "00:00:00:00:00:77"}])
self.env.create(
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': 'gre'
},
nodes_kwargs=[
{'api': True,
'pending_addition': True,
'meta': meta}
]
)
self.cluster = self.env.clusters[0]
resp = self.env.neutron_networks_get(self.cluster.id)
self.nets = resp.json_body
def test_network_checking(self):
self.update_neutron_networks_success(self.cluster.id, self.nets)
ngs_created = self.db.query(NetworkGroup).filter(
NetworkGroup.name.in_([n['name'] for n in self.nets['networks']])
).all()
self.assertEqual(len(ngs_created), len(self.nets['networks']))
# TODO(adanin) Provide a positive test that it's allowed to move any
# network to the Admin interface.
def test_network_checking_fails_if_admin_intersection(self):
admin_ng = self.env.network_manager.get_admin_network_group()
self.find_net_by_name('storage')["cidr"] = admin_ng.cidr
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertIn(
"Address space intersection between networks:\n",
task['message'])
self.assertIn("admin (PXE)", task['message'])
self.assertIn("storage", task['message'])
def test_network_checking_fails_if_untagged_intersection(self):
for n in self.nets['networks']:
n['vlan_start'] = None
self.update_neutron_networks_success(self.cluster.id, self.nets)
task = self.set_cluster_changes_w_error(self.cluster.id)
self.assertIn(
"Some untagged networks are "
"assigned to the same physical interface. "
"You should assign them to "
"different physical interfaces. Affected:\n",
task['message']
)
self.assertIn("admin (PXE)", task['message'])
self.assertIn("storage", task['message'])
self.assertIn("management", task['message'])
def test_network_checking_fails_if_public_gateway_not_in_cidr(self):
self.find_net_by_name('public')['cidr'] = '172.16.10.0/24'
self.find_net_by_name('public')['gateway'] = '172.16.10.1'
self.nets['networking_parameters']['floating_ranges'] = \
[['172.16.10.130', '172.16.10.254']]
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Public gateway and public ranges are not in one CIDR."
)
def test_network_checking_fails_if_public_gateway_range_intersection(self):
self.find_net_by_name('public')["ip_ranges"] = \
[['172.16.0.5', '172.16.0.43'],
['172.16.0.59', '172.16.0.90']]
self.find_net_by_name('public')["gateway"] = '172.16.0.77'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Address intersection between public gateway and IP range of "
"public network."
)
self.find_net_by_name('public')["ip_ranges"] = \
[['172.16.0.5', '172.16.0.99']]
self.find_net_by_name('public')["gateway"] = '172.16.0.55'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Address intersection between public gateway and IP range of "
"public network."
)
def test_network_checking_fails_if_public_float_range_not_in_cidr(self):
self.find_net_by_name('public')['cidr'] = '172.16.10.0/24'
self.find_net_by_name('public')['gateway'] = '172.16.10.1'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Floating address range 172.16.0.130:172.16.0.254 is not in "
"public address space 172.16.10.0/24."
)
def test_network_checking_fails_if_network_ranges_intersect(self):
self.find_net_by_name('management')['cidr'] = \
self.find_net_by_name('storage')['cidr']
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertIn(
"Address space intersection between networks:\n",
task['message'])
self.assertIn("management", task['message'])
self.assertIn("storage", task['message'])
def test_network_checking_fails_if_public_gw_ranges_intersect(self):
self.find_net_by_name('public')['gateway'] = '172.16.0.11'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Address intersection between public gateway "
"and IP range of public network."
)
def test_network_checking_fails_if_public_ranges_intersect(self):
self.find_net_by_name('public')['ip_ranges'] = \
[['172.16.0.2', '172.16.0.77'],
['172.16.0.55', '172.16.0.121']]
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Address space intersection between ranges "
"of public network."
)
def test_network_checking_fails_if_public_float_ranges_intersect(self):
self.find_net_by_name('public')['ip_ranges'] = \
[['172.16.0.2', '172.16.0.33'],
['172.16.0.55', '172.16.0.222']]
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Address space intersection between ranges "
"of public and external network."
)
def test_network_checking_public_network_cidr_became_smaller(self):
self.find_net_by_name('public')['cidr'] = '172.16.0.0/25'
self.find_net_by_name('public')['gateway'] = '172.16.0.1'
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.2',
'172.16.0.77']]
self.nets['networking_parameters']['floating_ranges'] = \
[['172.16.0.99', '172.16.0.111']]
self.update_neutron_networks_success(self.cluster.id, self.nets)
resp = self.env.neutron_networks_get(self.cluster.id)
self.nets = resp.json_body
self.assertEqual(self.find_net_by_name('public')['cidr'],
'172.16.0.0/25')
def test_network_checking_fails_on_network_vlan_match(self):
self.find_net_by_name('management')['vlan_start'] = '111'
self.find_net_by_name('storage')['vlan_start'] = '111'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertIn(
" networks use the same VLAN tags. "
"You should assign different VLAN tag "
"to every network.",
task['message'])
self.assertIn("management", task['message'])
self.assertIn("storage", task['message'])
def test_network_checking_fails_if_internal_gateway_not_in_cidr(self):
self.nets['networking_parameters']['internal_gateway'] = '172.16.10.1'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Internal gateway 172.16.10.1 is not in "
"internal address space 192.168.111.0/24."
)
def test_network_checking_fails_if_internal_w_floating_intersection(self):
self.nets['networking_parameters']['internal_cidr'] = '172.16.0.128/26'
self.nets['networking_parameters']['internal_gateway'] = '172.16.0.129'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Intersection between internal CIDR and floating range."
)
def test_network_fit_abc_classes_exclude_loopback(self):
self.find_net_by_name('management')['cidr'] = '127.19.216.0/24'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"management network address space is inside loopback range "
"(127.0.0.0/8). It must have no intersection with "
"loopback range."
)
self.find_net_by_name('management')['cidr'] = '172.16.17.32/24'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"management network address space does not belong to "
"A, B, C network classes. It must belong to either "
"A, B or C network class."
)
def test_network_gw_and_ranges_intersect_w_subnet_or_broadcast(self):
self.find_net_by_name('public')['gateway'] = '172.16.0.0'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"public network gateway address is equal to either subnet address "
"or broadcast address of the network."
)
self.find_net_by_name('public')['gateway'] = '172.16.0.255'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"public network gateway address is equal to either subnet address "
"or broadcast address of the network."
)
self.find_net_by_name('public')['gateway'] = '172.16.0.125'
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.0',
'172.16.0.122']]
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"public network IP range [172.16.0.0-172.16.0.122] intersect "
"with either subnet address or broadcast address of the network."
)
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.255',
'172.16.0.255']]
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"public network IP range [172.16.0.255-172.16.0.255] intersect "
"with either subnet address or broadcast address of the network."
)
self.find_net_by_name('public')['ip_ranges'] = [['172.16.0.55',
'172.16.0.99']]
self.nets['networking_parameters']['floating_ranges'] = \
[['172.16.0.0', '172.16.0.33']]
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Neutron L3 external floating range [172.16.0.0-172.16.0.33] "
"intersect with either subnet address or broadcast address "
"of public network."
)
self.nets['networking_parameters']['floating_ranges'] = \
[['172.16.0.155', '172.16.0.255']]
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Neutron L3 external floating range [172.16.0.155-172.16.0.255] "
"intersect with either subnet address or broadcast address "
"of public network."
)
self.nets['networking_parameters']['floating_ranges'] = \
[['172.16.0.155', '172.16.0.199']]
self.nets['networking_parameters']['internal_cidr'] = \
'192.168.111.0/24'
self.nets['networking_parameters']['internal_gateway'] = \
'192.168.111.0'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Neutron L3 internal network gateway address is equal to "
"either subnet address or broadcast address of the network."
)
self.nets['networking_parameters']['internal_gateway'] = \
'192.168.111.255'
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"Neutron L3 internal network gateway address is equal to "
"either subnet address or broadcast address of the network."
)
class TestNeutronHandlersVlan(TestNetworkChecking):
def setUp(self):
super(TestNeutronHandlersVlan, self).setUp()
meta = self.env.default_metadata()
self.env.set_interfaces_in_meta(meta, [
{"name": "eth0", "mac": "00:00:00:00:00:66"},
{"name": "eth1", "mac": "00:00:00:00:00:77"}])
self.env.create(
cluster_kwargs={
'net_provider': 'neutron',
'net_segment_type': 'vlan'
},
nodes_kwargs=[
{'api': True,
'pending_addition': True,
'meta': meta}
]
)
self.cluster = self.env.clusters[0]
resp = self.env.neutron_networks_get(self.cluster.id)
self.nets = resp.json_body
def test_network_checking(self):
self.update_neutron_networks_success(self.cluster.id, self.nets)
ngs_created = self.db.query(NetworkGroup).filter(
NetworkGroup.name.in_([n['name'] for n in self.nets['networks']])
).all()
self.assertEqual(len(ngs_created), len(self.nets['networks']))
def test_network_checking_failed_if_networks_tags_in_neutron_range(self):
self.find_net_by_name('storage')['vlan_start'] = 1000
task = self.update_neutron_networks_w_error(self.cluster.id, self.nets)
self.assertEqual(
task['message'],
"VLAN tags of storage network(s) intersect with "
"VLAN ID range defined for Neutron L2. "
"Networks VLAN tags must not intersect "
"with Neutron L2 VLAN ID range.")
|
import re
import pandas as pd
import sys
from google.cloud import bigquery, storage
class BqPivot():
"""
Class to generate a SQL query which creates pivoted tables in BigQuery.
Example
-------
The following example uses the kaggle's titanic data. It can be found here -
`https://www.kaggle.com/c/titanic/data`
This data is only 60 KB and it has been used for a demonstration purpose.
This module comes particularly handy with huge datasets for which we would need
BigQuery(https://en.wikipedia.org/wiki/BigQuery).
>>> from bq_pivot import BqPivot
>>> import pandas as pd
>>> data = pd.read_csv("titanic.csv").head()
>>> gen = BqPivot(data=data, index_col=["Pclass", "Survived", "PassengenId"],
pivot_col="Name", values_col="Age",
add_col_nm_suffix=False)
>>> print(gen.generate_query())
select Pclass, Survived, PassengenId,
sum(case when Name = "Braund, Mr. <NAME>" then Age else 0 end) as braund_mr_owen_harris,
sum(case when Name = "Cumings, Mrs. <NAME> (<NAME>)" then Age else 0 end) as cumings_mrs_john_bradley_florence_briggs_thayer,
sum(case when Name = "<NAME>" then Age else 0 end) as heikkinen_miss_laina,
sum(case when Name = "Futrelle, Mrs. <NAME> (L<NAME>)" then Age else 0 end) as futrelle_mrs_jacques_heath_lily_may_peel,
sum(case when Name = "Allen, Mr. <NAME>" then Age else 0 end) as allen_mr_william_henry
from <--insert-table-name-here-->
group by 1,2,3
"""
def __init__(self, index_col, pivot_col, values_col, table_name=None, data=None,
agg_fun="sum", not_eq_default="0", add_col_nm_suffix=True, custom_agg_fun=None,
prefix=None, suffix=None):
"""
Parameters
----------
index_col: list
The names of the index columns in the query (the columns on which the group by needs to be performed)
pivot_col: string
The name of the column on which the pivot needs to be done.
values_col: string or list of strings
The name or names of the columns on which aggregation needs to be performed.
agg_fun: string
The name of the sql aggregation function.
data: pandas.core.frame.DataFrame or string
The input data can either be a pandas dataframe or a string path to the pandas
data frame. The only requirement of this data is that it must have the column
on which the pivot it to be done. If data is not provided the __init__ call will
automatically query the table_name provided to get distinct pivot column values.
**Must provide one of data or table_name**
table_name: string
The name of the table in the query.
**Must provide one of data or table_name**
not_eq_default: numeric, optional
The value to take when the case when statement is not satisfied. For example,
if one is doing a sum aggregation on the value column then the not_eq_default should
be equal to 0. Because the case statement part of the sql query would look like -
... ...
sum(case when <pivot_col> = <some_pivot_col_value> then values_col else 0)
... ...
Similarly if the aggregation function is min then the not_eq_default should be
positive infinity.
add_col_nm_suffix: boolean, optional
If True, then the original values column name will be added as suffix in the new
pivoted columns.
custom_agg_fun: string, optional
Can be used if one wants to give customized aggregation function. The values col name
should be replaced with {}. For example, if we want an aggregation function like -
sum(coalesce(values_col, 0)) then the custom_agg_fun argument would be -
sum(coalesce({}, 0)).
If provided this would override the agg_fun argument.
prefix: string, optional
A fixed string to add as a prefix in the pivoted column names separated by an
underscore.
suffix: string, optional
A fixed string to add as a suffix in the pivoted column names separated by an
underscore.
"""
if data == None and table_name == None:
raise ValueError("At least one of data or table_name must be provided.")
self.query = ""
self.index_col = index_col
self.values_col = values_col
self.pivot_col = pivot_col
self.not_eq_default = not_eq_default
if data is None:
self.table_name = table_name
self.piv_col_vals = self._query_piv_col_vals()
elif data:
self.piv_col_vals = self._get_piv_col_vals(data)
self.table_name = self._get_table_name(table_name)
if type(self.values_col) == str:
self.piv_col_names = self._create_piv_col_names(add_col_nm_suffix, prefix, suffix)
elif type(self.values_col) == list:
self.piv_col_names = []
for value_col in self.values_col:
self.piv_col_names.append(self._create_piv_col_names(add_col_nm_suffix, prefix, suffix, value_col))
self.ord_col_names = self._create_ord_col_names()
self.function = custom_agg_fun if custom_agg_fun else agg_fun + "({})"
def _get_table_name(self, table_name):
"""
Returns the table name or a placeholder if the table name is not provided.
"""
return table_name if table_name else "<--insert-table-name-here-->"
def _query_piv_col_vals(self):
'''
Queries the distinct values in the pivot col directly from the table_name provided.
'''
return pd.read_gbq(f'SELECT DISTINCT({self.pivot_col}) FROM {self.table_name}')[self.pivot_col].astype(
str).to_list()
def _get_piv_col_vals(self, data):
"""
Gets all the unique values of the pivot column.
"""
if isinstance(data, pd.DataFrame):
self.data = data
elif isinstance(data, str):
self.data = pd.read_csv(data)
else:
raise ValueError("Provided data must be a pandas dataframe or a csv file path.")
if self.pivot_col not in self.data.columns:
raise ValueError("The provided data must have the column on which pivot is to be done. " \
"Also make sure that the column name in the data is same as the name " \
"provided to the pivot_col parameter.")
return self.data[self.pivot_col].astype(str).unique().tolist()
def _clean_col_name(self, col_name):
"""
The pivot column values can have arbitrary strings but in order to
convert them to column names some cleaning is required. This method
takes a string as input and returns a clean column name.
"""
# replace spaces with underscores
# remove non alpha numeric characters other than underscores
# replace multiple consecutive underscores with one underscore
# make all characters lower case
# remove trailing underscores
return re.sub("_+", "_", re.sub('[^0-9a-zA-Z_]+', '', re.sub(" ", "_", col_name))).lower().rstrip("_")
def _create_piv_col_names(self, add_col_nm_suffix, prefix, suffix, value_col=None):
"""
The method created a list of pivot column names of the new pivoted table.
"""
prefix = prefix + "_" if prefix else ""
suffix = "_" + suffix if suffix else ""
if value_col == None:
value_col = self.values_col
if add_col_nm_suffix:
piv_col_names = [
"{0}{1}_{2}{3}".format(prefix, self._clean_col_name(piv_col_val), value_col.lower(), suffix)
for piv_col_val in self.piv_col_vals]
else:
piv_col_names = ["{0}{1}{2}".format(prefix, self._clean_col_name(piv_col_val), suffix)
for piv_col_val in self.piv_col_vals]
return piv_col_names
def _create_ord_col_names(self):
'''
Create sanitized base ordinal names for each piv_col_val.
'''
ord_col_names = ["{}_".format(self._clean_col_name(piv_col_val))
for piv_col_val in self.piv_col_vals]
return ord_col_names
def _write_wide_ranked(self):
'''
Writes a 'wide_ranked' table
To do: substitue the current piv_col_names for ordinal_piv_col_names like "ALL_" instead of "ALL_size"
This also applies to _write_table_join in the ordinal() call
'''
query = 'WITH wide_ranked AS ( \nSELECT '
query = query + "".join(["ANY_VALUE(IF({} = '{}', rank, null)) as {},\n".format(self.pivot_col,
pivot_col_val,
ord_col_name)
for pivot_col_val, ord_col_name in zip(self.piv_col_vals, self.ord_col_names)])
query = query[:-2] + '\nFROM (\nSELECT "1" AS groupby_only_col,\n'
query = query + f"{self.pivot_col},\nRANK() over (ORDER BY {self.pivot_col}) AS rank\nFROM (\nSELECT DISTINCT {self.pivot_col}\n"
query = query + f"FROM `{self.table_name}`\n)\n)\nGROUP BY groupby_only_col\n),\n"
return query
def _write_long_array_aggregated(self):
query = ""
# replace all self.values_col with value_col, modify table names with i
query = query + f"long_array_aggregated AS (\n SELECT {self.index_col},\n "
query = query + "".join(
[f"ARRAY_AGG({values_col} ORDER BY rank) AS {values_col},\n" for values_col in self.values_col])[:-2]
query = query + f"\nFROM (\nSELECT ranked_classes_by_id.{self.index_col} AS {self.index_col},\n"
query = query + f"ranked_classes_by_id.rank as rank,\n"
query = query + "".join([f"source.{values_col} as {values_col},\n" for values_col in self.values_col])[:-2]
query = query + f"\n FROM `{self.table_name}` as source\n"
query = query + f"RIGHT JOIN (\nSELECT {self.index_col},\n {self.pivot_col},\n"
query = query + f"rank() over (PARTITION BY {self.index_col} ORDER BY {self.pivot_col}) as rank\n"
query = query + f"FROM (\n SELECT DISTINCT {self.pivot_col}\n"
query = query + f"FROM `{self.table_name}`)\n CROSS JOIN (\n SELECT DISTINCT {self.index_col}\n"
query = query + f"FROM `{self.table_name}`)\n"
query = query + f") as ranked_classes_by_id\n USING({self.index_col}, {self.pivot_col})\n)\nGROUP BY {self.index_col}\n)\n"
return query
def _write_table_join(self):
query = f"SELECT long_array_aggregated.{self.index_col}, \n"
for i, values_col in enumerate(self.values_col):
query = query + "".join(
["Long_array_aggregated.{}[ordinal({})] as {},\n".format(values_col, ord_col_name, pivot_col_name)
for ord_col_name, pivot_col_name in zip(self.ord_col_names, self.piv_col_names[i])]) # values col
# f"long_array_aggregated.{value_col}[ordinal({value for value in values_col})] as {value for value in values_col}, \n"
query = query[:-2] + f"\nfrom "
# for values_col in [self.values_col]:
query = query + "long_array_aggregated, "
query = query + "wide_ranked"
return query
# replace with new query functions
def generate_query(self):
"""
Returns the query to create the pivoted table.
In order to do this operation for multiple columns, we now need to iterate over the _write_long_array_aggregated function
Or, inside long array aggregated we need to iterate over the entire block
"""
self.query = self._write_wide_ranked() + \
self._write_long_array_aggregated() + \
self._write_table_join()
return self.query
def write_query(self, output_file=None, verbose=False):
"""
Writes the query to a text file if output_file is passed, or prints the query to the console.
"""
self.generate_query()
if verbose:
print(self.query)
if output_file is not None:
text_file = open(output_file, "w")
text_file.write(self.generate_query())
text_file.close()
def submit_pandas_query(self, **kwargs):
'''
Submits the query and returns the results.
'''
if self.query == "":
self.generate_query()
return pd.read_gbq(self.query)
def write_permanent_table(self, destination_table):
job_config = bigquery.QueryJobConfig(
allow_large_results=True,
destination=destination_table,
use_legacy_sql=True
)
if self.query == "":
self.generate_query()
sql = self.query()
# Start the query, passing in the extra configuration.
query_job = client.query(sql, job_config=job_config) # Make an API request.
query_job.result() # Wait for the job to complete.
print("Query results loaded to the table {}".format(table_id))
# TO DO
#def write_temporary_table(self):
def query_control(self, destination_table=None, local_file=None, temp_table=False):
if local_file is not None:
self.submit_pandas_query().to_csv(local_file)
if destination_table is not None:
self.write_permanant_table(destination_table)
elif temp_table == True:
self.write_temp_table()
else:
print('Final query not submitted to BigQuery. Would you like to do so now? Y/n')
answer = input()
if answer == 'Y' or answer == 'y':
print('Options: \nLocal file: l\nPermanent BigQuery table: b\nTemperary BigQuery table: t')
answer= input()
if answer == 'l':
print('Local file write path: ')
local_file = input()
self.submit_pandas_query.to_csv(local_file)
elif answer == 'b':
print('BigQuery table destination: ')
self.write_permanant_table(destination_table)
destination_table=None
# elif answer == 't':
# temp_table=True
# self.write_temp_table()
if __name__ == "__main__":
arguments = {'--output_file':None,
'--table_name':None,
'--index_col':None,
'--pivot_col':None,
'--values_col':None,
'--data':None,
'--agg_fun':"sum",
'--not_eq_default':"0",
'--add_col_nm_suffix':True,
'--custom_agg_fun':None,
'--prefix':None,
'--suffix':None,
'--verbose':False,
'--destination_table':None,
'--local_file':None,
'--temp_table':False}
if '--help' in sys.argv:
print('(Unofficial) Google BigQuery Python Pivot Script: BigPivot\n')
print('Commands available: \n')
print("".join([arg + ' \n' for arg in arguments]))
print('Ex. python bq_pivot.py --index_col id --pivot_col class --values_col values --table_name my-project-id:my-dataset:my-table')
print('\nIf you would like to run your query pass `--destination_table my-project-id:my-dataset:my-table` or `--temp_table True`')
print('\nAlternatively, to run the query and download the result to a csv using pd.read_gbq, pass `--local_file path/to/file`')
print('\n\nAdditionally, please make sure you set your GOOGLE_APPLICATION_CREDENTIALS using')
print('export GOOGLE_APPLICATION_CREDENTIALS=\'path/to/creds.json\'')
exit()
vn = len(sys.argv)
if vn < 9:
raise ValueError("The following arguments are required when entering from the Command Line: \nindex_col\npivot_col\nvalues_col\ntable_name\n\
If you only intend to construct the query locally, pass None for table_name and the path of a data file to read.")
for arg_name in arguments:
if arg_name in sys.argv:
arguments[arg_name] = sys.argv[sys.argv.index(arg_name) + 1]
bq_client = bigquery.Client()
storage_client = storage.Client()
gbqPivot = BqPivot(index_col=[arguments['--index_col']],
pivot_col=arguments['--pivot_col'],
values_col=arguments['--values_col'],
table_name=arguments['--table_name'],
data=arguments['--data'],
agg_fun=arguments['--agg_fun'],
not_eq_default=arguments['--not_eq_default'],
add_col_nm_suffix=arguments['--add_col_nm_suffix'],
custom_agg_fun=arguments['--custom_agg_fun'],
prefix=arguments['--prefix'],
suffix=arguments['--suffix'])
gbqPivot.write_query(output_file=arguments['--output_file'], verbose=arguments['--verbose'])
gbqPivot.query_control(destination_table=arguments['--destination_table'],
local_file=arguments['--local_file'],
temp_table=arguments['--temp_table'])
|
from dataclasses import dataclass
from typing import Collection, Dict, List, Optional, Set
from zerver.lib.mention import MentionData
from zerver.models import NotificationTriggers
@dataclass
class UserMessageNotificationsData:
user_id: int
online_push_enabled: bool
pm_email_notify: bool
pm_push_notify: bool
mention_email_notify: bool
mention_push_notify: bool
wildcard_mention_email_notify: bool
wildcard_mention_push_notify: bool
stream_push_notify: bool
stream_email_notify: bool
sender_is_muted: bool
def __post_init__(self) -> None:
# Check that there's no dubious data.
if self.pm_email_notify or self.pm_push_notify:
assert not (self.stream_email_notify or self.stream_push_notify)
if self.stream_email_notify or self.stream_push_notify:
assert not (self.pm_email_notify or self.pm_push_notify)
@classmethod
def from_user_id_sets(
cls,
*,
user_id: int,
flags: Collection[str],
private_message: bool,
online_push_user_ids: Set[int],
pm_mention_push_disabled_user_ids: Set[int],
pm_mention_email_disabled_user_ids: Set[int],
stream_push_user_ids: Set[int],
stream_email_user_ids: Set[int],
wildcard_mention_user_ids: Set[int],
muted_sender_user_ids: Set[int],
) -> "UserMessageNotificationsData":
# `wildcard_mention_user_ids` are those user IDs for whom wildcard mentions should
# obey notification settings of personal mentions. Hence, it isn't an independent
# notification setting and acts as a wrapper.
pm_email_notify = user_id not in pm_mention_email_disabled_user_ids and private_message
mention_email_notify = (
user_id not in pm_mention_email_disabled_user_ids and "mentioned" in flags
)
wildcard_mention_email_notify = (
user_id in wildcard_mention_user_ids
and user_id not in pm_mention_email_disabled_user_ids
and "wildcard_mentioned" in flags
)
pm_push_notify = user_id not in pm_mention_push_disabled_user_ids and private_message
mention_push_notify = (
user_id not in pm_mention_push_disabled_user_ids and "mentioned" in flags
)
wildcard_mention_push_notify = (
user_id in wildcard_mention_user_ids
and user_id not in pm_mention_push_disabled_user_ids
and "wildcard_mentioned" in flags
)
return cls(
user_id=user_id,
pm_email_notify=pm_email_notify,
mention_email_notify=mention_email_notify,
wildcard_mention_email_notify=wildcard_mention_email_notify,
pm_push_notify=pm_push_notify,
mention_push_notify=mention_push_notify,
wildcard_mention_push_notify=wildcard_mention_push_notify,
online_push_enabled=(user_id in online_push_user_ids),
stream_push_notify=(user_id in stream_push_user_ids),
stream_email_notify=(user_id in stream_email_user_ids),
sender_is_muted=(user_id in muted_sender_user_ids),
)
# For these functions, acting_user_id is the user sent a message
# (or edited a message) triggering the event for which we need to
# determine notifiability.
def is_notifiable(self, acting_user_id: int, idle: bool) -> bool:
return self.is_email_notifiable(acting_user_id, idle) or self.is_push_notifiable(
acting_user_id, idle
)
def is_push_notifiable(self, acting_user_id: int, idle: bool) -> bool:
return self.get_push_notification_trigger(acting_user_id, idle) is not None
def get_push_notification_trigger(self, acting_user_id: int, idle: bool) -> Optional[str]:
if not idle and not self.online_push_enabled:
return None
if self.user_id == acting_user_id:
return None
if self.sender_is_muted:
return None
# The order here is important. If, for example, both
# `mention_push_notify` and `stream_push_notify` are True, we
# want to classify it as a mention, since that's more salient.
if self.pm_push_notify:
return NotificationTriggers.PRIVATE_MESSAGE
elif self.mention_push_notify:
return NotificationTriggers.MENTION
elif self.wildcard_mention_push_notify:
return NotificationTriggers.WILDCARD_MENTION
elif self.stream_push_notify:
return NotificationTriggers.STREAM_PUSH
else:
return None
def is_email_notifiable(self, acting_user_id: int, idle: bool) -> bool:
return self.get_email_notification_trigger(acting_user_id, idle) is not None
def get_email_notification_trigger(self, acting_user_id: int, idle: bool) -> Optional[str]:
if not idle:
return None
if self.user_id == acting_user_id:
return None
if self.sender_is_muted:
return None
# The order here is important. If, for example, both
# `mention_email_notify` and `stream_email_notify` are True, we
# want to classify it as a mention, since that's more salient.
if self.pm_email_notify:
return NotificationTriggers.PRIVATE_MESSAGE
elif self.mention_email_notify:
return NotificationTriggers.MENTION
elif self.wildcard_mention_email_notify:
return NotificationTriggers.WILDCARD_MENTION
elif self.stream_email_notify:
return NotificationTriggers.STREAM_EMAIL
else:
return None
def get_user_group_mentions_data(
mentioned_user_ids: Set[int], mentioned_user_group_ids: List[int], mention_data: MentionData
) -> Dict[int, int]:
# Maps user_id -> mentioned user_group_id
mentioned_user_groups_map: Dict[int, int] = dict()
# Add members of the mentioned user groups into `mentions_user_ids`.
for group_id in mentioned_user_group_ids:
member_ids = mention_data.get_group_members(group_id)
for member_id in member_ids:
if member_id in mentioned_user_ids:
# If a user is also mentioned personally, we use that as a trigger
# for notifications.
continue
if member_id in mentioned_user_groups_map:
# If multiple user groups are mentioned, we prefer the
# user group with the least members for email/mobile
# notifications.
previous_group_id = mentioned_user_groups_map[member_id]
previous_group_member_ids = mention_data.get_group_members(previous_group_id)
if len(previous_group_member_ids) > len(member_ids):
mentioned_user_groups_map[member_id] = group_id
else:
mentioned_user_groups_map[member_id] = group_id
return mentioned_user_groups_map
|
# Copyright 2016-2021 The <NAME> at the California Institute of
# Technology (Caltech), with support from the Paul Allen Family Foundation,
# Google, & National Institutes of Health (NIH) under Grant U24CA224309-01.
# All rights reserved.
#
# Licensed under a modified Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.github.com/vanvalenlab/deepcell-tracking/LICENSE
#
# The Work provided may be used for non-commercial academic purposes only.
# For any other use of the Work, including commercial use, please contact:
# <EMAIL>
#
# Neither the name of Caltech nor the names of its contributors may be used
# to endorse or promote products derived from this software without specific
# prior written permission.
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for utility functions for the ISBI data format."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import networkx as nx
import numpy as np
from deepcell_tracking import isbi_utils
class TestIsbiUtils(object):
def test_trk_to_isbi(self, tmpdir):
# start with dummy lineage
# convert to ISBI file
# read file and validate
track = {}
# first cell, skips frame 3 but divides in frame 4
track[1] = {
'frames': [0, 1, 2, 4], # skipped a frame
'daughters': [2, 3],
'frame_div': 4,
'parent': None,
'label': 1,
}
track[2] = {
'frames': [5],
'daughters': [],
'frame_div': None,
'parent': 1,
'label': 2,
}
track[3] = {
'frames': [5],
'daughters': [4], # parent not in previous frame
'frame_div': 5,
'parent': 1,
'label': 3,
}
track[4] = {
'frames': [7],
'daughters': [],
'frame_div': None,
'parent': 3,
'label': 4,
}
isbifile = os.path.join(str(tmpdir), 'test_trk_to_isbi')
isbi_utils.trk_to_isbi(track, isbifile)
with open(isbifile, 'rb') as f:
data = set(l.decode() for l in f.readlines())
expected = {
'1 0 4 0{}'.format(os.linesep),
'2 5 5 1{}'.format(os.linesep),
'3 5 5 1{}'.format(os.linesep),
'4 7 7 0{}'.format(os.linesep), # no parent; not consecutive frame
}
assert data == expected
def test_txt_to_graph(self, tmpdir):
# cell_id, start, end, parent_id
rows = [
(1, 0, 3, 0), # cell 1 is in all 3 frames
(2, 0, 2, 0), # cell 2 is not in the last frame
(3, 3, 3, 2), # cell 3 is a daughter of 2
(4, 3, 3, 2), # cell 4 is a daughter of 2
(5, 3, 3, 4), # cell 5 is a daughter of 4, ignored bad frame value
]
text_file = os.path.join(str(tmpdir), 'test_txt_to_graph.txt')
with open(text_file, 'wb') as f:
# write the file
for row in rows:
line = '{} {} {} {}{}'.format(
row[0], row[1], row[2], row[3], os.linesep)
f.write(line.encode())
f.flush() # save the file
# read the file
G = isbi_utils.txt_to_graph(text_file)
for row in rows:
node_ids = ['{}_{}'.format(row[0], t)
for t in range(row[1], row[2] + 1)]
for node_id in node_ids:
assert node_id in G
if row[3]: # should have a division
daughter_id = '{}_{}'.format(row[0], row[1])
parent_id = '{}_{}'.format(row[3], row[1] - 1)
if G.has_node(parent_id):
assert G.nodes[parent_id]['division'] is True
assert G.has_edge(parent_id, daughter_id)
else:
assert not G.in_degree(daughter_id)
def test_classify_divisions(self):
G = nx.DiGraph()
G.add_edge('1_0', '1_1')
G.add_edge('1_1', '1_2')
G.add_edge('1_2', '1_3')
G.add_edge('2_0', '2_1')
G.add_edge('2_1', '2_2')
# node 2 divides into 3 and 4 in frame 3
G.add_edge('2_2', '3_3')
G.add_edge('2_2', '4_3')
G.nodes['2_2']['division'] = True
G.add_edge('4_3', '4_4') # another division in frame 4
G.nodes['4_3']['division'] = True
H = G.copy()
H.nodes['1_3']['division'] = True # False Positive
H.nodes['4_3']['division'] = False # False Negative
# force an incorrect division
G.add_edge('3_3', '5_4') # another division in frame 4
G.nodes['3_3']['division'] = True
H.nodes['3_3']['division'] = True
stats = isbi_utils.classify_divisions(G, H)
assert stats['Correct division'] == 1 # the only correct one
assert stats['False positive division'] == 1 # node 1_3
assert stats['False negative division'] == 1 # node 4_3
assert stats['Incorrect division'] == 1 # node 3_3
def test_contig_tracks(self):
# test already contiguous
frames = 5
track = {
1: {
'label': 1,
'frames': [0, 1, 2],
'daughters': [2, 3],
'parent': None,
},
2: {
'label': 2,
'frames': [3, 4],
'daughters': [],
'parent': 1
},
3: {
'label': 3,
'frames': [3, 4],
'daughters': [],
'parent': 1
}
}
original_track = copy.copy(track)
original_daughters = original_track[1]['daughters']
y = np.random.randint(0, 4, size=(frames, 40, 40, 1))
new_track, _ = isbi_utils.contig_tracks(1, track, y)
assert original_track == new_track
# test non-contiguous
track = copy.copy(original_track)
track[1]['frames'].append(4)
new_track, _ = isbi_utils.contig_tracks(1, track, y)
assert len(new_track) == len(original_track) + 1
assert new_track[1]['frames'] == original_track[1]['frames']
daughters = new_track[max(new_track)]['daughters']
assert daughters == original_daughters
for d in daughters:
assert new_track[d]['parent'] == max(new_track)
|
import logging
import uuid
from datetime import timedelta
from behave import *
from django.db.models import Sum
from api.tests.factories import (
UserFactory, InstanceFactory, IdentityFactory, InstanceStatusFactory,
ProviderFactory, ProviderMachineFactory, InstanceHistoryFactory)
from core.models import *
from core.models.allocation_source import total_usage
from jetstream.exceptions import TASPluginException
from jetstream.models import *
logger = logging.getLogger(__name__)
@given('a test Allocation Source')
def step_impl(context):
context.current_time = timezone.now()
name, compute_allowed = "testSource", 1000
context.allocation_source = AllocationSource.objects.create(name=name, compute_allowed=compute_allowed)
# source = AllocationSource.objects.filter(name=name)
assert (len(AllocationSource.objects.filter(name=name)) > 0)
@when('Allocation Source is assigned to Users')
def step_impl(context):
context.users = []
for row in context.table:
number_of_users = int(row['number of users assigned to allocation source'])
context.number_of_users = number_of_users
for i in range(number_of_users):
user = UserFactory.create(date_joined=context.current_time)
context.users.append(user)
UserAllocationSource.objects.create(allocation_source=context.allocation_source, user=user)
assert (len(UserAllocationSource.objects.filter(user=user, allocation_source=context.allocation_source)) > 0)
@when('All Users run an instance on Allocation Source for indefinite duration')
def step_impl(context):
for row in context.table:
cpu_size = int(row['cpu size of instance'])
context.cpu_size = cpu_size
for user in context.users:
alias = launch_instance(user, context.current_time, cpu_size)
payload = {}
payload["instance_id"] = str(alias)
payload["allocation_source_name"] = context.allocation_source.name
EventTable.objects.create(name="instance_allocation_source_changed",
payload=payload,
entity_id=user.username,
timestamp=context.current_time)
assert (len(InstanceStatusHistory.objects.filter(instance__created_by=user)) == 1)
@when('create_reports task is run for the first time')
def step_impl(context):
for row in context.table:
interval_time = int(row['task runs every x minutes'])
context.interval_time = interval_time
report_end_date = context.current_time + timedelta(minutes=interval_time)
create_reports(end_date=report_end_date)
assert (len(TASAllocationReport.objects.all()) > 0)
assert (TASAllocationReport.objects.last().end_date == report_end_date)
assert (TASAllocationReport.objects.last().start_date == context.current_time)
expected_initial_usage = context.cpu_size * context.interval_time * context.number_of_users
calculated_initial_usage = float(
TASAllocationReport.objects.filter(project_name=context.allocation_source.name).aggregate(Sum('compute_used'))[
'compute_used__sum']) * 60
assert (round(calculated_initial_usage, 2) == expected_initial_usage)
context.current_time = context.current_time + timedelta(minutes=interval_time)
@when('Users are deleted from Allocation Source after first create_reports run')
def step_impl(context):
for row in context.table:
users_deleted = int(row['number of users deleted from allocation source'])
users_deleted_after_time = int(row['users deleted x minutes after the first create_reports run'])
for i in range(users_deleted):
user = context.users[i]
payload = {}
payload["allocation_source_name"] = context.allocation_source.name
EventTable.objects.create(
payload=payload,
name="user_allocation_source_deleted",
entity_id=user.username,
timestamp=context.current_time + timedelta(minutes=users_deleted_after_time))
assert (len(UserAllocationSource.objects.filter(user=user, allocation_source=context.allocation_source)) == 0)
@then(
'Total expected allocation usage for allocation source matches calculated allocation usage from reports after next create_reports run')
def step_impl(context):
for row in context.table:
total_expected_usage = int(row['total expected allocation usage in minutes'])
report_end_date = context.current_time + timedelta(minutes=context.interval_time)
create_reports(end_date=report_end_date)
assert (len(TASAllocationReport.objects.all()) == 2 * context.number_of_users)
assert (TASAllocationReport.objects.last().start_date == context.current_time)
calculated_initial_usage = float(
TASAllocationReport.objects.filter(project_name=context.allocation_source.name).aggregate(Sum('compute_used'))[
'compute_used__sum']) * 60
logging.info("\n\n expected:%s actual:%s \n\n" % (total_expected_usage, int(calculated_initial_usage)))
# just for the purpose of these test cases, we require time in minutes
# conversion from microseconds to hours and then hours to minutes with rounding results in loss of time
# therefore instead of comparing exact values, we check if the difference is not more than a minute (or two)
assert (abs(total_expected_usage - int(calculated_initial_usage)) < 2)
#### Helpers ####
def launch_instance(user, time_created, cpu):
# context.user is admin and regular user
provider = ProviderFactory.create()
from core.models import IdentityMembership, Identity
user_group = IdentityMembership.objects.filter(member__name=user.username)
if not user_group:
user_identity = IdentityFactory.create_identity(
created_by=user,
provider=provider)
else:
user_identity = Identity.objects.all().last()
admin_identity = user_identity
provider_machine = ProviderMachine.objects.all()
if not provider_machine:
machine = ProviderMachineFactory.create_provider_machine(user, user_identity)
else:
machine = ProviderMachine.objects.all().last()
status = InstanceStatusFactory.create(name='active')
instance_state = InstanceFactory.create(
provider_alias=uuid.uuid4(),
source=machine.instance_source,
created_by=user,
created_by_identity=user_identity,
start_date=time_created)
size = Size(alias=uuid.uuid4(), name='small', provider=provider, cpu=cpu, disk=1, root=1, mem=1)
size.save()
InstanceHistoryFactory.create(
status=status,
activity="",
instance=instance_state,
start_date=time_created,
end_date=time_created + timedelta(minutes=30),
size=size
)
return instance_state.provider_alias
def create_reports(end_date=False):
"""
GO through the list of all users or all providers
For each username, get an XSede API map to the 'TACC username'
if 'TACC username' includes a jetstream resource, create a report
"""
user_allocation_list = UserAllocationSource.objects.all()
all_reports = []
if not end_date:
end_date = timezone.now()
last_report_date = TASAllocationReport.objects.order_by('end_date')
if not last_report_date:
last_report_date = end_date
else:
last_report_date = last_report_date.last().end_date
for item in user_allocation_list:
allocation_name = item.allocation_source.name
# CHANGED LINE
project_report = _create_reports_for(item.user, allocation_name, end_date)
if project_report:
all_reports.append(project_report)
# Take care of Deleted Users
# filter user_allocation_source_removed events which are created after the last report date
for event in EventTable.objects.filter(name="user_allocation_source_deleted", timestamp__gte=last_report_date).order_by('timestamp'):
user = AtmosphereUser.objects.get(username=event.entity_id)
allocation_name = event.payload['allocation_source_name']
end_date = event.timestamp
project_report = _create_reports_for(user, allocation_name, end_date)
if project_report:
all_reports.append(project_report)
return all_reports
def _create_reports_for(user, allocation_name, end_date):
driver = TASAPIDriver()
tacc_username = user.username # driver.get_tacc_username(user)
if not tacc_username:
logger.error("No TACC username for user: '{}' which came from allocation id: {}".format(user,
allocation_name))
return
project_name = allocation_name # driver.get_allocation_project_name(allocation_name)
try:
project_report = _create_tas_report_for(
user,
tacc_username,
project_name,
end_date)
return project_report
except TASPluginException:
logger.exception(
"Could not create the report because of the error below"
)
return
def _create_tas_report_for(user, tacc_username, tacc_project_name, end_date):
"""
Create a new report
"""
if not end_date:
raise TASPluginException("Explicit end date required")
if not user:
raise TASPluginException("User missing")
if not tacc_username:
raise TASPluginException("TACC Username missing")
if not tacc_project_name:
raise TASPluginException("OpenStack/TACC Project missing")
last_report = TASAllocationReport.objects.filter(
project_name=tacc_project_name,
user=user
).order_by('end_date').last()
if not last_report:
start_date = user.date_joined
else:
start_date = last_report.end_date
compute_used = total_usage(
user.username, start_date,
allocation_source_name=tacc_project_name,
end_date=end_date)
if compute_used < 0:
raise TASPluginException(
"Compute usage was not accurately calculated for user:%s for start_date:%s and end_date:%s"
% (user, start_date, end_date))
new_report = TASAllocationReport.objects.create(
user=user,
username=tacc_username,
project_name=tacc_project_name,
compute_used=compute_used,
start_date=start_date,
end_date=end_date,
tacc_api=settings.TACC_API_URL)
logger.info("Created New Report:%s" % new_report)
return new_report
|
<filename>src/generator.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri May 26 16:40:21 2017
@author: lenkakt
"""
import os, sys, getopt
import gravi
import numpy as np
strMainHelp = 'Usage: \n ' + \
'get_data.py -o <OutputFolder> -s <DataSize> -x <XLength> -y <YLength> -a <XStep> -b <YStep> -n <AddNoise> -l <NoiseLevel> -d <DataSeparator> -c <PrintComment> -X <XLimit>'+ \
'\n Example: get_data.py --DataSize=1000'
strShortHelp = 'get_data.py -o <OutputFolder> -s <DataSize> -x <XLength> -y <YLength> -a <XStep> -b <YStep> -n <AddNoise> -l <NoiseLevel> -d <DataSeparator> -c <PrintComment> -X <XLimit>'
def main(argv):
#---------------------------------------------------
# Configuration / parameters to be set
#
#
OutputFolder = './'
DataSize = 1
XLength = 100 #X lenght of the area in meters
YLength = 100 #Y length of the area in meters
XStep = 1 #The size of sampling step in meters
YStep = 1 #The size of sampling step in meters
AddNoise = False
NoiseLevel = 0.1 #The Noise level in percents
AnomalyTypes = [0,1,2] #The index of an anomaly type defined in Anomalies. Should be moved to parameters from command line
Densities = [1e6,1e4,1e4] #The list of densities for each of anomaly types
MaxDepth = 50
MaxRadius = 20
DataSeparator = ','
PrintComment = False
XLimit = 0
#----------------------------------------------------------------------------
#
# List of defined anomalies:
#
Anomalies = ["Sphere - density","Horizontal cylinder","Vertical cylinder","Random"]
#AnomaliesFcn = ["sphere_density","cylinder_horizontal","cylinder_vertical","random_data"]
try:
opts, args = getopt.getopt(argv,"ho:s:x:y:a:b:nl:d:cX:",["OutputFolder=","DataSize=","XLength=","YLength=","XStep=","YStep=","AddNoise","NoiseLevel=","DataSeparator=","PrintComment","XLimit="])
except getopt.GetoptError:
print strShortHelp
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print strMainHelp
sys.exit()
elif opt in ("-o", "--OutputFolder"):
OutputFolder = arg
if not os.path.exists(OutputFolder):
print "Creating the output directory " + OutputFolder
os.makedirs(OutputFolder)
elif opt in ("-s", "--DataSize"):
DataSize = int(arg)
elif opt in ("-x","--XLength"):
XLength = int(arg)
elif opt in ("-y","--YLength"):
YLength = int(arg)
elif opt in ("-a","--XStep"):
XStep = int(arg)
elif opt in ("-b","--YStep"):
YStep = int(arg)
elif opt in ("-n","--AddNoise"):
AddNoise = True
elif opt in ("-l","--NoiseLevel"):
NoiseLevel = float(arg)
elif opt in ("-d","--DataSeparator"):
DataSeparator = str(arg)
elif opt in ("-c","--PrintComment"):
PrintComment = True
elif opt in ("-X","--XLimit"):
XLimit = int(arg)
#------------------------------------------------------------------------
#
#Create a grid
#
#
XPoints = np.arange(0,XLength,XStep)
YPoints = np.arange(0,YLength,YStep)
XScale, YScale = np.meshgrid(XPoints,YPoints)
#--------------------------------------------------------------------------
# Set random parameters
#
Depths = np.random.randint(1,MaxDepth,(DataSize,1))
Radiuses = np.random.randint(1,MaxRadius,(DataSize,1))
#-----------------------------------------------------------------------
# Set print options
#
np.set_printoptions(threshold=np.inf, linewidth=np.inf)
# Main part:
# Points: always have [X1,Y1] or [X1,Y1,X2,Y2]
# Depths always depth from 1 - 20
# Radius always Radius from 1 - 20
for Anomaly in AnomalyTypes:
if Anomaly==0: #Sphere
Points = np.random.randint(1+XLimit,XLength-XLimit,(DataSize,2))
for i in range(0,DataSize):
YPos = Points[i,0]
XPos = Points[i,1]
ZPos = Depths[i,0]
Radius = Radiuses[i,0]
if (Radius > ZPos):
store = Radius
Radius = ZPos
ZPos = store
Density = Densities[Anomaly]
DescriptionString = Anomalies[Anomaly] + " XPos: "+str(XPos)+" YPos: "+str(YPos)+ " ZPos: "+str(ZPos)+" Density: "+str(Density)+" Radius: "+str(Radius)+" XLength: "+str(XLength)+" XStep: "+str(XStep)+" YLength: "+str(YLength)+" YStep: "+str(YStep)
FileName = Anomalies[Anomaly]+"_"+str(XPos)+"_"+str(YPos)+"_"+str(ZPos)+"_"+str(Density)+"_"+str(Radius)+"_"+str(XLength)+"_"+str(XStep)+"_"+str(YLength)+"_"+str(YStep)+".txt"
AnomalyMatrix = gravi.sphere_mass(XPos,YPos,ZPos,Density,Radius,XScale,YScale)
if AddNoise:
DataScale = NoiseLevel*np.amax(AnomalyMatrix)
NoiseMatrix = gravi.random_data(DataScale,XLength,YLength)
AnomalyMatrix = AnomalyMatrix + NoiseMatrix
FilePath = OutputFolder + "/" + FileName
if os.path.exists(FilePath):
try:
os.remove(FilePath)
except OSError, e:
print ("Error: %s - %s." % (e.filename,e.strerror))
with open(FilePath, 'w') as f:
if PrintComment:
f.writelines('#'+DescriptionString+"\n")
f.writelines(np.array2string(AnomalyMatrix,separator=DataSeparator))
f.close()
if (i % 100 == 0):
print Anomalies[Anomaly]
print i
elif Anomaly == 1: #HCylinder
Points = np.random.randint(1,XLength,(DataSize,4))
for i in range(0,DataSize):
YPos = Points[i,0]
XPos = Points[i,1]
YPos2 = Points[i,2]
XPos2 = Points[i,3]
ZPos = Depths[i,0]
Radius = Radiuses[i,0]
if (Radius > ZPos):
store = Radius
Radius = ZPos
ZPos = store
Density = Densities[Anomaly]
DescriptionString = Anomalies[Anomaly] + " XPos: "+str(XPos)+" YPos: "+str(YPos)+ " XPos2: "+str(XPos2)+" YPos2: "+str(YPos2)+" ZPos: "+str(ZPos)+" Density: "+str(Density)+" Radius: "+str(Radius)+" XLength: "+str(XLength)+" XStep: "+str(XStep)+" YLength: "+str(YLength)+" YStep: "+str(YStep)
FileName = Anomalies[Anomaly]+"_"+str(XPos)+"_"+str(YPos)+"_"+str(XPos2)+"_"+str(YPos2)+"_"+str(ZPos)+"_"+str(Density)+"_"+str(Radius)+"_"+str(XLength)+"_"+str(XStep)+"_"+str(YLength)+"_"+str(YStep)+".txt"
AnomalyMatrix = gravi.cylinder_horizontal(XPos,YPos,XPos2,YPos2,ZPos,Density,Radius,XScale,YScale)
if AddNoise:
DataScale = NoiseLevel*np.amax(AnomalyMatrix)
NoiseMatrix = gravi.random_data(DataScale,XLength,YLength)
AnomalyMatrix = AnomalyMatrix + NoiseMatrix
FilePath = OutputFolder + "/" + FileName
if os.path.exists(FilePath):
try:
os.remove(FilePath)
except OSError, e:
print ("Error: %s - %s." % (e.filename,e.strerror))
with open(FilePath, 'w') as f:
if PrintComment:
f.writelines('#'+DescriptionString+"\n")
f.writelines(np.array2string(AnomalyMatrix,separator=DataSeparator))
f.close()
if (i % 100 == 0):
print Anomalies[Anomaly]
print i
elif Anomaly == 2: #VCylinder
Points = np.random.randint(1,XLength,(DataSize,2))
for i in range(0,DataSize):
YPos = Points[i,0]
XPos = Points[i,1]
ZPos = Depths[i,0]
Radius = Radiuses[i,0]
if (Radius > ZPos):
store = Radius
Radius = ZPos
ZPos = store
Density = Densities[Anomaly]
DescriptionString = Anomalies[Anomaly] + " XPos: "+str(XPos)+" YPos: "+str(YPos)+ " ZPos: "+str(ZPos)+" Density: "+str(Density)+" Radius: "+str(Radius)+" XLength: "+str(XLength)+" XStep: "+str(XStep)+" YLength: "+str(YLength)+" YStep: "+str(YStep)
FileName = Anomalies[Anomaly]+"_"+str(XPos)+"_"+str(YPos)+"_"+str(XPos2)+"_"+str(YPos2)+"_"+str(ZPos)+"_"+str(Density)+"_"+str(Radius)+"_"+str(XLength)+"_"+str(XStep)+"_"+str(YLength)+"_"+str(YStep)+".txt"
AnomalyMatrix = gravi.cylinder_vertical(XPos,YPos,ZPos,Density,Radius,XScale,YScale)
if AddNoise:
DataScale = NoiseLevel*np.amax(AnomalyMatrix)
NoiseMatrix = gravi.random_data(DataScale,XLength,YLength)
AnomalyMatrix = AnomalyMatrix + NoiseMatrix
FilePath = OutputFolder + "/" + FileName
if os.path.exists(FilePath):
try:
os.remove(FilePath)
except OSError, e:
print ("Error: %s - %s." % (e.filename,e.strerror))
with open(FilePath, 'w') as f:
if PrintComment:
f.writelines('#'+DescriptionString+"\n")
f.writelines(np.array2string(AnomalyMatrix,separator=DataSeparator))
f.close()
if (i % 100 == 0):
print Anomalies[Anomaly]
print i
else:
print 'Unknown anomaly type index'
if __name__ == "__main__":
main(sys.argv[1:])
|
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 16 17:46:57 2017
@author: kcarnold
"""
from megacomplete import data
import numpy as np
import scipy.sparse
#%%
sents = data.yelp_sents()
#%%
sent_lens = np.array([len(sent) for doc in sents for sent in doc])
min_sent_len, max_sent_len = np.percentile(sent_lens, [25, 75])
#%%
rs = np.random.RandomState(0)
reasonable_length_sents = [[sent for sent in doc if min_sent_len <= len(sent) <= max_sent_len] for doc in sents]
orig_sents_flat = [rs.choice(doc_sents) for doc_sents in reasonable_length_sents if doc_sents]
print('\n'.join(np.random.choice(orig_sents_flat, 10, replace=False)))
#%%
from sklearn.feature_extraction.text import TfidfVectorizer
vectorizer = TfidfVectorizer(min_df=5, max_df=.5, stop_words='english')
orig_vecs = vectorizer.fit_transform(orig_sents_flat)
#%%
vec_norms = scipy.sparse.linalg.norm(orig_vecs, axis=1)
indices_to_keep = np.flatnonzero(vec_norms)
vecs = orig_vecs[indices_to_keep]
sents_flat = [orig_sents_flat[i] for i in indices_to_keep]
#%%
print('\n'.join(np.random.choice(sents_flat, 10, replace=False)))
#%%
# Similarity
#import numpy as np
#
#sims = vecs * vectorizer.transform(['the service was great']).T
#sims_A = sims.A.ravel().copy()
#sims_A[sims_A > .999] = 0
#sims_argsort = np.argsort(sims_A)
#[sents_flat[i] for i in sims_argsort[-50:]]
#%%
from sklearn.cluster import MiniBatchKMeans
mbk = MiniBatchKMeans(init='k-means++', n_clusters=10, n_init=10)
clusters = mbk.fit_predict(vecs)
#%%
import numpy as np
np.bincount(clusters)
#%%
for c in range(np.max(clusters)+1):
ss = np.flatnonzero(clusters == c)
np.random.shuffle(ss)
for i in ss[:10]:
print(sents_flat[i])
print()
#%%
cluster_dists = mbk.transform(vecs)
for c in range(cluster_dists.shape[1]):
print(c)
for i in np.argsort(cluster_dists[:,c])[:10]:
print(i, sents_flat[i].replace('\n', ' '))
print()
#%%
import subprocess
def dump_kenlm(model_name, tokenized_sentences):
# Dump '\n'.join(' '.join-formatted tokenized reviews, without special markers,
# to a file that KenLM can read, and build a model with it.
with open('models/{}.txt'.format(model_name), 'w') as f:
for toks in tokenized_sentences:
print(toks.lower(), file=f)
subprocess.run(['./scripts/make_model.sh', model_name])
#%%
# We used a subsample of sentences for making the clustering. Train the LMs on the full set, though.
# or not.
sentences_in_cluster = [[] for i in range(mbk.n_clusters)]
for i, c in enumerate(clusters):
sentences_in_cluster[c].append(orig_sents_flat[i])
#%%
[len(c) for c in sentences_in_cluster]
#%%
for cluster_idx, cluster in enumerate(sentences_in_cluster):
print(cluster_idx)
dump_kenlm('cluster_{}'.format(cluster_idx), [s.lower() for s in cluster])
#%%
from suggestion import suggestion_generator, paths
models = [suggestion_generator.Model.from_basename(paths.paths.model_basename('cluster_{}'.format(cluster_idx))) for cluster_idx in range(mbk.n_clusters)]
#%%
|
"""
==========================================
From raw data to dSPM on SPM Faces dataset
==========================================
Runs a full pipeline using MNE-Python:
- artifact removal
- averaging Epochs
- forward model computation
- source reconstruction using dSPM on the contrast : "faces - scrambled"
"""
print(__doc__)
# Authors: <NAME> <<EMAIL>>
# <NAME> <<EMAIL>>
#
# License: BSD (3-clause)
import matplotlib.pyplot as plt
import mne
from mne.datasets import spm_face
from mne.preprocessing import ICA, create_eog_epochs
from mne import io
from mne.minimum_norm import make_inverse_operator, apply_inverse
data_path = spm_face.data_path()
subjects_dir = data_path + '/subjects'
###############################################################################
# Load and filter data, set up epochs
raw_fname = data_path + '/MEG/spm/SPM_CTF_MEG_example_faces%d_3D_raw.fif'
raw = io.Raw(raw_fname % 1, preload=True) # Take first run
picks = mne.pick_types(raw.info, meg=True, exclude='bads')
raw.filter(1, 30, method='iir')
events = mne.find_events(raw, stim_channel='UPPT001')
# plot the events to get an idea of the paradigm
mne.viz.plot_events(events, raw.info['sfreq'])
event_ids = {"faces": 1, "scrambled": 2}
tmin, tmax = -0.2, 0.6
baseline = None # no baseline as high-pass is applied
reject = dict(mag=5e-12)
epochs = mne.Epochs(raw, events, event_ids, tmin, tmax, picks=picks,
baseline=baseline, preload=True, reject=reject)
# Fit ICA, find and remove major artifacts
ica = ICA(n_components=0.95).fit(raw, decim=6, reject=reject)
# compute correlation scores, get bad indices sorted by score
eog_epochs = create_eog_epochs(raw, ch_name='MRT31-2908', reject=reject)
eog_inds, eog_scores = ica.find_bads_eog(eog_epochs, ch_name='MRT31-2908')
ica.plot_scores(eog_scores, eog_inds) # see scores the selection is based on
ica.plot_components(eog_inds) # view topographic sensitivity of components
ica.exclude += eog_inds[:1] # we saw the 2nd ECG component looked too dipolar
ica.plot_overlay(eog_epochs.average()) # inspect artifact removal
epochs_cln = ica.apply(epochs, copy=True) # clean data, default in place
evoked = [epochs_cln[k].average() for k in event_ids]
contrast = evoked[1] - evoked[0]
evoked.append(contrast)
for e in evoked:
e.plot(ylim=dict(mag=[-400, 400]))
plt.show()
# estimate noise covarariance
noise_cov = mne.compute_covariance(epochs_cln, tmax=0)
###############################################################################
# Visualize fields on MEG helmet
trans_fname = data_path + ('/MEG/spm/SPM_CTF_MEG_example_faces1_3D_'
'raw-trans.fif')
maps = mne.make_field_map(evoked[0], trans_fname=trans_fname,
subject='spm', subjects_dir=subjects_dir,
n_jobs=1)
evoked[0].plot_field(maps, time=0.170)
###############################################################################
# Compute forward model
# Make source space
src = mne.setup_source_space('spm', spacing='oct6', subjects_dir=subjects_dir,
overwrite=True)
mri = trans_fname
bem = data_path + '/subjects/spm/bem/spm-5120-5120-5120-bem-sol.fif'
forward = mne.make_forward_solution(contrast.info, mri=mri, src=src, bem=bem)
forward = mne.convert_forward_solution(forward, surf_ori=True)
###############################################################################
# Compute inverse solution
snr = 3.0
lambda2 = 1.0 / snr ** 2
method = 'dSPM'
inverse_operator = make_inverse_operator(contrast.info, forward, noise_cov,
loose=0.2, depth=0.8)
# Compute inverse solution on contrast
stc = apply_inverse(contrast, inverse_operator, lambda2, method,
pick_normal=False)
# stc.save('spm_%s_dSPM_inverse' % constrast.comment)
# plot constrast
# Plot brain in 3D with PySurfer if available. Note that the subject name
# is already known by the SourceEstimate stc object.
brain = stc.plot(surface='inflated', hemi='both', subjects_dir=subjects_dir)
brain.set_time(170.0) # milliseconds
brain.scale_data_colormap(fmin=4, fmid=6, fmax=8, transparent=True)
brain.show_view('ventral')
# brain.save_image('dSPM_map.png')
|
import json
import uuid
import requests
from invoke.watchers import Responder
from invoke import task
class DeploymentError(Exception):
def __init__(self, message):
super(DeploymentError, self).__init__("Deployment error: ".format(message))
def _create_resources(c, project_name, verbose=False):
commands = (
("Resource Group",
# location westus is used because requests comes from Menlo Park, California, US
"az group create --name {}-gr --location westus"),
("Storage",
"az storage account create --name {0}st --location westus --resource-group {0}-gr --sku Standard_LRS"),
("Redis",
"az redis create --resource-group {0}-gr --location westus --name {0}-rd --sku Basic --vm-size C0"),
("Function App",
"az functionapp create --resource-group {0}-gr --name {0}-fn --consumption-plan-location westus "
"--storage-account {0}st"),
)
for command in commands:
r = c.run(command[1].format(project_name),
hide=None if verbose else "out", echo=verbose)
if r.ok:
print("Successfully created {}".format(command[0]))
else:
raise DeploymentError("Can't create resources")
@task
def delete_resources(c, project_name, verbose=False):
responder = Responder(
pattern=r"Are you sure you want to perform this operation? (y/n):",
response="y\n",
)
c.run("az group delete --name {}-gr".format(project_name),
watchers=[responder],
pty=True,
hide=None if verbose else "out",
echo=verbose)
@task
def set_os_env(c, project_name, fb_page_access_token, fb_verify_token, db_url, db_password, verbose=False):
if not fb_verify_token:
fb_verify_token = str(<KEY>())
r = c.run("az redis list-keys --resource-group {0}-gr --name {0}-rd".format(project_name),
hide=None if verbose else "out", echo=verbose)
res_dict = json.loads(r.stdout)
c.run('az functionapp config appsettings set --name {0}-fn --resource-group {0}-gr --settings '
'FB_VERIFY_TOKEN="{1}" FB_PAGE_ACCESS_TOKEN="{2}" REDIS_HOST="{3}" REDIS_PASSWD="{4}" '
'DB_URL="{5}" DB_PASSWORD="{6}"'.format(
project_name,
fb_verify_token,
fb_page_access_token,
"{}-rd.redis.cache.windows.net".format(project_name),
res_dict["primaryKey"],
db_url,
db_password),
hide=None if verbose else "out", echo=verbose)
@task
def set_device_access_conf(c, project_name, device_host, shared_access_key_name, shared_access_key, auth_function_key,
verbose=False):
c.run('az functionapp config appsettings set --name {0}-fn --resource-group {0}-gr --settings '
'DEVICE_HOST="{1}" DEVICE_ACCESS_KEY_NAME="{2}" DEVICE_ACCESS_KEY="{3}" AUTH_FUNCTION_KEY="{4}"'.format(
project_name,
device_host,
shared_access_key_name,
shared_access_key,
auth_function_key),
hide=None if verbose else "out", echo=verbose)
@task
def set_admin_fb_sender_ids(c, project_name, ids, verbose=False):
c.run('az functionapp config appsettings set --name {0}-fn --resource-group {0}-gr --settings '
'ADMIN_FB_SENDER_IDS="{1}"'.format(
project_name,
ids),
hide=None if verbose else "out", echo=verbose)
@task
def deploy(c, project_name, fb_page_access_token=None, fb_verify_token=None, skip_resources_creation=False,
db_url=None, db_password=None, verbose=False):
if not skip_resources_creation:
if not (fb_page_access_token and db_url and db_password):
print("'deploy' did not receive some of required arguments: "
"--fb-page-access-token, --db-url, --db-password\n"
"You can skip those arguments only if use --skip-resources-creation")
return 1
try:
_create_resources(c, project_name, verbose)
except DeploymentError as e:
delete_resources(c, project_name)
raise e
set_os_env(c, project_name, fb_page_access_token, fb_verify_token, db_url, db_password, verbose)
c.run("zip -FSr greenBotMessenger.zip .", hide=None if verbose else "out", echo=verbose)
c.run("az functionapp deployment source config-zip --src greenBotMessenger.zip "
"--name {0}-fn --resource-group {0}-gr --debug".format(project_name),
hide=None if verbose else "out", echo=verbose)
@task
def show_config(c, project_name, verbose=False):
r = c.run("az functionapp config appsettings list"
" --name {0}-fn --resource-group {0}-gr".format(project_name),
hide=None if verbose else "out", echo=verbose)
res = {}
for s in json.loads(r.stdout):
if s["name"] in ("REDIS_HOST", "REDIS_PASSWD", "FB_VERIFY_TOKEN", "FB_PAGE_ACCESS_TOKEN",
"DB_URL", "DB_PASSWORD", "DEVICE_HOST", "DEVICE_ACCESS_KEY_NAME", "DEVICE_ACCESS_KEY",
"ADMIN_FB_SENDER_IDS"):
res[s["name"]] = s["value"]
res["WEBHOOK_URL"] = "https://{}-fn.azurewebsites.net/api/webhook".format(project_name)
print("\n\n".join(map(lambda k: "{} = {}".format(k, res[k]), res)))
@task
def setup_fb_greeting(c, fb_page_access_token, verbose=False):
request_body_greeting = {
"greeting": [
{
"locale": "default",
"text": "Welcome, {{user_full_name}}! Let's grow something!"
},
]
}
request_body_get_started = {
"get_started": {"payload": "get_started"}
}
res = requests.post(url="https://graph.facebook.com/v2.6/me/messenger_profile",
params={"access_token": fb_page_access_token},
headers={'content-type': 'application/json'},
data=json.dumps(request_body_greeting))
if verbose:
print res.content
res = requests.post(url="https://graph.facebook.com/v2.6/me/messenger_profile",
params={"access_token": fb_page_access_token},
headers={'content-type': 'application/json'},
data=json.dumps(request_body_get_started))
if verbose:
print res.content
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
"""
import os
import sys
sys.path.insert(1, '/home/labs/ahissarlab/arivkind/imagewalker')
sys.path.insert(1, '/home/labs/ahissarlab/orra/imagewalker')
sys.path.insert(1, '/home/orram/Documents/GitHub/imagewalker')
import random
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.datasets import cifar10
import matplotlib.pyplot as plt
import scipy.stats as stats
import pandas as pd
import time
import pickle
import argparse
from feature_learning_utils import student3, write_to_file, full_learning_dataset_update, net_weights_reinitializer
from keras_utils import create_cifar_dataset, split_dataset_xy
print(os.getcwd() + '/')
#%%
# load dataset
(trainX, trainY), (testX, testY) = cifar10.load_data()
images, labels = trainX, trainY
# if len(sys.argv) == 1:
# parameters = {
# 'layer_name' : 'max_pool2',#layers_names[int(sys.argv[1])],
# 'num_feature' : 64,#int(sys.argv[2]),
# 'trajectory_index' : 40,#int(sys.argv[3]),
# 'sample' : 5,
# 'res' : 8,
# 'run_index' : np.random.randint(10,100),
# 'dropout' : 0.2,
# 'rnn_dropout' : 0,
# 'run_name_prefix': 'noname'
# }
# else:
# parameters = {
# 'layer_name' : layers_names[int(sys.argv[1])],
# 'num_feature' : int(sys.argv[2]),
# 'trajectory_index' : int(sys.argv[3]),
# 'sample' : int(sys.argv[4]),
# 'res' : int(sys.argv[5]),
# 'run_index' : np.random.randint(10,100),
# 'dropout' : 0.2,
# 'rnn_dropout' : 0,
# 'run_name_prefix': 'noname'
# }
parser = argparse.ArgumentParser()
#general parameters
parser.add_argument('--run_name_prefix', default='noname', type=str, help='path to pretrained teacher net')
parser.add_argument('--run_index', default=10, type=int, help='run_index')
parser.add_argument('--testmode', dest='testmode', action='store_true')
parser.add_argument('--no-testmode', dest='testmode', action='store_false')
### student parameters
parser.add_argument('--epochs', default=1, type=int, help='num training epochs')
parser.add_argument('--num_feature', default=64, type=int, help='legacy to be discarded')
parser.add_argument('--time_pool', default=0, help='time dimention pooling to use - max_pool, average_pool, 0')
parser.add_argument('--student_block_size', default=1, type=int, help='number of repetition of each convlstm block')
parser.add_argument('--conv_rnn_type', default='lstm', type=str, help='conv_rnn_type')
parser.add_argument('--student_nl', default='relu', type=str, help='non linearity')
parser.add_argument('--dropout', default=0.2, type=float, help='dropout1')
parser.add_argument('--rnn_dropout', default=0.0, type=float, help='dropout1')
conv_rnn_type='lstm'
parser.add_argument('--layer_norm_student', dest='layer_norm_student', action='store_true')
parser.add_argument('--no-layer_norm_student', dest='layer_norm_student', action='store_false')
### syclop parameters
parser.add_argument('--trajectory_index', default=40, type=int, help='trajectory index')
parser.add_argument('--sample', default=5, type=int, help='sample')
parser.add_argument('--res', default=8, type=int, help='resolution')
parser.add_argument('--broadcast', default=0, type=int, help='integrate the coordinates by broadcasting them as extra dimentions')
parser.add_argument('--style', default='brownain', type=str, help='choose syclops style of motion')
parser.add_argument('--max_length', default=5, type=int, help='choose syclops max trajectory length')
### teacher network parameters
parser.add_argument('--teacher_net', default=None, type=str, help='path to pretrained teacher net')
parser.add_argument('--resblocks', default=3, type=int, help='resblocks')
parser.add_argument('--last_layer_size', default=128, type=int, help='last_layer_size')
parser.add_argument('--dropout1', default=0.2, type=float, help='dropout1')
parser.add_argument('--dropout2', default=0.0, type=float, help='dropout2')
parser.add_argument('--dataset_norm', default=128.0, type=float, help='dropout2')
parser.add_argument('--dataset_center', dest='dataset_center', action='store_true')
parser.add_argument('--no-dataset_center', dest='dataset_center', action='store_false')
parser.add_argument('--layer_norm_res', dest='layer_norm_res', action='store_true')
parser.add_argument('--no-layer_norm_res', dest='layer_norm_res', action='store_false')
parser.add_argument('--layer_norm_2', dest='layer_norm_2', action='store_true')
parser.add_argument('--no-layer_norm_2', dest='layer_norm_2', action='store_false')
parser.add_argument('--skip_conn', dest='skip_conn', action='store_true')
parser.add_argument('--no-skip_conn', dest='skip_conn', action='store_false')
parser.add_argument('--last_maxpool_en', dest='last_maxpool_en', action='store_true')
parser.add_argument('--no-last_maxpool_en', dest='last_maxpool_en', action='store_false')
parser.add_argument('--nl', default='relu', type=str, help='non linearity')
parser.add_argument('--stopping_patience', default=10, type=int, help='stopping patience')
parser.add_argument('--learning_patience', default=5, type=int, help='stopping patience')
parser.add_argument('--manual_suffix', default='', type=str, help='manual suffix')
parser.add_argument('--data_augmentation', dest='data_augmentation', action='store_true')
parser.add_argument('--no-data_augmentation', dest='data_augmentation', action='store_false')
parser.add_argument('--rotation_range', default=0.0, type=float, help='dropout1')
parser.add_argument('--width_shift_range', default=0.1, type=float, help='dropout2')
parser.add_argument('--height_shift_range', default=0.1, type=float, help='dropout2')
parser.set_defaults(data_augmentation=True,layer_norm_res=True,layer_norm_student=True,layer_norm_2=True,skip_conn=True,last_maxpool_en=True, testmode=False,dataset_center=True)
config = parser.parse_args()
config = vars(config)
print('config ',config)
parameters = config
TESTMODE = parameters['testmode']
lsbjob = os.getenv('LSB_JOBID')
lsbjob = '' if lsbjob is None else lsbjob
# layer_name = parameters['layer_name']
num_feature = parameters['num_feature']
trajectory_index = parameters['trajectory_index']
sample = parameters['sample']
res = parameters['res']
run_index = parameters['run_index']
dropout = parameters['dropout']
rnn_dropout = parameters['rnn_dropout']
this_run_name = parameters['run_name_prefix'] + '_j' + lsbjob + '_t' + str(int(time.time()))
print(parameters)
# scale pixels
def prep_pixels(train, test):
# convert from integers to floats
train_norm = train.astype('float32')
test_norm = test.astype('float32')
#center
if parameters['dataset_center']:
mean_image = np.mean(train_norm, axis=0)
train_norm -= mean_image
test_norm -= mean_image
# normalize to range 0-1
train_norm = train_norm / parameters['dataset_norm']
test_norm = test_norm / parameters['dataset_norm']
# return normalized images
return train_norm, test_norm
# prepare pixel data
trainX, testX = prep_pixels(trainX, testX)
#%%
############################### Get Trained Teacher ##########################3
# if len(sys.argv) == 1:
# path = '/home/orram/Documents/GitHub/imagewalker/teacher_student/'
# else:
# path = '/home/labs/ahissarlab/orra/imagewalker/teacher_student/'
path = os.getcwd() + '/'
# teacher = tf.keras.models.Sequential()
# teacher.add(fe_model)
# teacher.add(be_model)
#
# opt=tf.keras.optimizers.Adam(lr=1e-3)
# teacher.compile(
# optimizer=opt,
# loss="sparse_categorical_crossentropy",
# metrics=["sparse_categorical_accuracy"],
# )
#
# history = teacher.fit(trainX[:45000],
# trainY[:45000],
# epochs=15 if not TESTMODE else 1,
# batch_size=64,
# validation_data=(trainX[45000:], trainY[45000:]),
# verbose=0)
#
# #Save Network
# teacher.save(path +'cifar_trained_model')
#
# #plot_results
# plt.figure()
# plt.plot(history.history['sparse_categorical_accuracy'], label = 'train')
# plt.plot(history.history['val_sparse_categorical_accuracy'], label = 'test')
# plt.legend()
# plt.grid()
# plt.title('Cifar10 - train/test accuracies')
# plt.savefig('Saved Networks accur plot')
#
#
# if os.path.exists(path + 'cifar_trained_model'+this_run_name):
# teacher = keras.models.load_model(path + 'cifar_trained_model'+this_run_name)
# else:
# teacher = train_model(path, trainX, trainY)
teacher = keras.models.load_model(parameters['teacher_net'])
teacher.evaluate(trainX[45000:], trainY[45000:], verbose=2)
fe_model = teacher.layers[0]
be_model = teacher.layers[1]
#%%
#################### Get Layer features as a dataset ##########################
print('making feature data')
intermediate_layer_model = fe_model
decoder = be_model
batch_size = 64
start = 0
end = batch_size
train_data = []
validation_data = []
train_data = np.zeros([50000,res,res,num_feature])
count = 0
#Drow N random features from the batch and sort them in order
# if layer_name == 'max_pool2':
# feature_space = 64
# elif layer_name == 'max_pool3':
# feature_space = 128
feature_space = 64
feature_list = np.random.choice(np.arange(feature_space),num_feature, replace = False)
feature_list = np.sort(feature_list)
for batch in range(len(trainX)//batch_size + 1):
count+=1
intermediate_output = intermediate_layer_model(trainX[start:end]).numpy()
train_data[start:end,:,:] = intermediate_output[:,:,:,feature_list]
start += batch_size
end += batch_size
print('loaded feature data from teacher')
#%%
feature_test_data = train_data[45000:]
feature_train_data = train_data[:45000][:,:,:]
#%%
############################## load syclop data #################################
print('loading Syclop Data')
train_dataset, test_dataset = create_cifar_dataset(images, labels,res = res,
sample = sample, return_datasets=True,
mixed_state = False, add_seed = 0,
trajectory_list = trajectory_index,
style = parameters['style'],
broadcast = parameters['broadcast'],
max_length = parameters['max_length'])
train_dataset_x, train_dataset_y = split_dataset_xy(train_dataset, sample = sample)
test_dataset_x, test_dataset_y = split_dataset_xy(test_dataset,sample = sample)
#%%
##################### Define Student #########################################
epochs = parameters['epochs']
verbose = 2
evaluate_prediction_size = 150
prediction_data_path = path +'predictions/'
shape = feature_test_data.shape
teacher_mean = np.mean(feature_test_data.reshape(shape[0]*shape[1]*shape[2], shape[3]),axis = 0)
teacher_var = np.var(feature_test_data.reshape(shape[0]*shape[1]*shape[2], shape[3]),axis = 0)
#print('teacher mean = ', teacher_mean, 'var =', teacher_var)
parameters['teacher_mean'] = teacher_mean
parameters['teacher_var'] = teacher_var
if num_feature == 64 or num_feature == 128:
feature_list = 'all'
parameters['feature_list'] = feature_list
save_model_path = path + 'saved_models/{}_feature/'.format(this_run_name)
checkpoint_filepath = save_model_path + '/{}_feature_net_ckpt'.format(this_run_name)
model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
filepath=checkpoint_filepath,
save_weights_only=True,
monitor='val_mean_squared_error',
mode='min',
save_best_only=True)
lr_reducer = keras.callbacks.ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=5, min_lr=0.5e-6)
early_stopper = keras.callbacks.EarlyStopping(
monitor='val_mean_squared_error', min_delta=5e-5, patience=3, verbose=0,
mode='auto', baseline=None, restore_best_weights=True
)
def save_model(net,path,parameters,checkpoint = True):
feature_list = parameters['feature_list']
traject = parameters['trajectory_index']
home_folder = path + '{}_saved_models/'.format(this_run_name)
if not os.path.exists(home_folder):
os.mkdir(home_folder)
if checkpoint:
child_folder = home_folder + 'checkpoint/'
else:
child_folder = home_folder + 'end_of_run_model/'
if not os.path.exists(child_folder):
os.mkdir(child_folder)
#Saving weights as numpy array
numpy_weights_path = child_folder + '{}_numpy_weights/'.format(this_run_name)
if not os.path.exists(numpy_weights_path):
os.mkdir(numpy_weights_path)
all_weights = net.get_weights()
with open(numpy_weights_path + 'numpy_weights_{}'.format(this_run_name), 'wb') as file_pi:
pickle.dump(all_weights, file_pi)
#LOAD WITH - pickle.load - and load manualy to model.get_layer.set_weights()
#save weights with keras
keras_weights_path = child_folder + '{}_keras_weights/'.format(this_run_name)
if not os.path.exists(keras_weights_path):
os.mkdir(keras_weights_path)
net.save_weights(keras_weights_path + 'keras_weights_{}'.format(this_run_name))
#LOADING WITH - load_status = sequential_model.load_weights("ckpt")
student = student3(sample = sample,
res = res,
activation = parameters['student_nl'],
dropout = dropout,
rnn_dropout = rnn_dropout,
num_feature = num_feature,
layer_norm = parameters['layer_norm_student'],
conv_rnn_type = parameters['conv_rnn_type'],
time_pool = parameters['time_pool'],
add_coordinates = parameters['broadcast'],
block_size = parameters['student_block_size']
)
student.evaluate(test_dataset_x[0],
feature_test_data, verbose = 2)
student_history = student.fit(train_dataset_x[0],
feature_train_data,
batch_size = 32,
epochs = epochs if not TESTMODE else 1,
validation_data=(test_dataset_x[0], feature_test_data),
verbose = verbose,
callbacks=[model_checkpoint_callback,lr_reducer,early_stopper])
print('{} train:'.format(student.name), student_history.history['mean_squared_error'])
print('{} test:'.format(student.name), student_history.history['val_mean_squared_error'])
save_model(student, save_model_path, parameters, checkpoint = False)
#student.load_weights(checkpoint_filepath) # todo!
save_model(student, save_model_path, parameters, checkpoint = True)
student.evaluate(test_dataset_x[0],
feature_test_data, verbose = 2)
student_test_data = np.zeros([5000,res,res,num_feature])
student_train_data = np.zeros([45000,res,res,num_feature])
start = 0
end = batch_size
count = 0
for batch in range(len(train_dataset_x[0])//batch_size + 1):
count+=1
train_temp = student(train_dataset_x[0][start:end]).numpy()
student_train_data[start:end,:,:,:] = train_temp[:,:,:,:]
start += batch_size
end += batch_size
start = 0
end = batch_size
count = 0
for batch in range(len(test_dataset_x[0])//batch_size + 1):
count+=1
test_temp = student(test_dataset_x[0][start:end]).numpy()
student_test_data[start:end,:,:,:] = test_temp[:,:,:,:]
start += batch_size
end += batch_size
#Evaluate per featurefull_student_net.evaluate(test_dataset_x[0],test_dataset_y, verbose=1)
var_list = []
for feature_indx in range(num_feature):
var = np.var(student_test_data[:,:,:,feature_indx] - feature_test_data[:,:,:,feature_indx])
var_list.append(var)
parameters['student_var'] = var_list
with open(prediction_data_path + 'predictions_traject_{}'.format(this_run_name,), 'wb') as file_pi:
pickle.dump((feature_test_data, student_test_data,feature_train_data,student_train_data ), file_pi)
############################# The Student learnt the Features!! #################################################
####################### Now Let's see how good it is in classification ##########################################
#Define a Student_Decoder Network that will take the Teacher weights of the last layers:
opt=tf.keras.optimizers.Adam(lr=1e-3)
decoder.compile(
optimizer=opt,
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
################################## Sanity Check with Teachers Features ###########################################
decoder.evaluate(feature_test_data,trainY[45000:], verbose=2)
############################################## Evaluate with Student Features ###################################
print('Evaluating students features witout more training')
lr_reducer = keras.callbacks.ReduceLROnPlateau(factor=np.sqrt(0.1), cooldown=0, patience=1, min_lr=0.5e-6)
early_stopper = keras.callbacks.EarlyStopping(
monitor='val_sparse_categorical_accuracy', min_delta=1e-4, patience=5, verbose=0,
mode='auto', baseline=None, restore_best_weights=True
)
pre_training_accur = decoder.evaluate(student_test_data,trainY[45000:], verbose=2)
parameters['pre_training_decoder_accur'] = pre_training_accur[1]
############################ Re-train the half_net with the student training features ###########################
print('Training the base newtwork with the student features')
decoder_history = decoder.fit(student_train_data,
trainY[:45000],
epochs = 10 if not TESTMODE else 1,
batch_size = 64,
validation_data = (student_test_data, trainY[45000:]),
verbose = 2,
callbacks=[lr_reducer,early_stopper],)
home_folder = save_model_path + '{}_saved_models/'.format(this_run_name)
decoder.save(home_folder +'decoder_trained_model')
############################## Now Let's Try and Trian the student features #####################################
########################### Combining the student and the decoder and training ##################################
print('Training the student and decoder together - reinitiating the decoder before learning')
net_weights_reinitializer(decoder)
def full_student(student, decoder):
input = keras.layers.Input(shape=(sample, res,res,3))\
student_features = student(input)
decoder_prediction = decoder(student_features)
model = keras.models.Model(inputs=input,outputs=decoder_prediction)
opt=tf.keras.optimizers.Adam(lr=1e-3)
model.compile(
optimizer=opt,
loss="sparse_categorical_crossentropy",
metrics=["sparse_categorical_accuracy"],
)
return model
full_student_net = full_student(student, decoder)
full_history = full_student_net.fit(train_dataset_x[0],
trainY[:45000],
epochs = 10 if not TESTMODE else 1,
batch_size = 64,
validation_data = (test_dataset_x[0], trainY[45000:]),
verbose = 2,
callbacks=[lr_reducer,early_stopper],)
#full_student_net.save(home_folder +'full_trained_model')
# full_learning_dataset_update(student_history, decoder_history, full_history, student,parameters, name = 'full_train_102_{}'.format(this_run_name)) |
<reponame>lfoppiano/grobid-superconductors-tools
from grobid_superconductors.commons.grobid_evaluation_analysis import append_tokens_before, append_tokens_after, \
extract_error_cases
def test_append_tokens_before():
error_case = []
input_data = {
'data': [
['a', '<other>', '<other>'],
['b', '<other>', '<other>'],
['c', '<other>', '<other>'],
['d', 'I-<l1>', 'I-<l1>'],
['e', '<l1>', '<l1>'],
['f', '<l1>', '<other>'],
['g', '<other>', '<other>']
]
}
nb_token_before = 5
output = append_tokens_before(error_case, input_data, 5, nb_token_before)
print(output)
assert len(output) == nb_token_before
assert output[0][0] == "a"
assert output[1][0] == "b"
assert output[2][0] == "c"
assert output[3][0] == "d<=>"
assert output[4][0] == "e<=>"
def test_append_toekns_before_2():
error_case = []
input_data = {
'data': [
['a', 'I-<ba>', 'I-<ba>'],
['b', '<ba>', '<ba>'],
['c', '<other>', '<other>'],
['d', 'I-<l1>', 'I-<l1>'],
['e', '<l1>', '<l1>'],
['f', '<l1>', '<other>'],
['g', '<other>', '<other>']
]
}
nb_token_before = 5
output = append_tokens_before(error_case, input_data, 5, nb_token_before)
print(output)
assert len(output) == nb_token_before
assert output[0][0] == "a"
assert output[1][0] == "b"
assert output[2][0] == "c"
assert output[3][0] == "d<=>"
assert output[4][0] == "e<=>"
def test_append_tokens_after():
error_case = []
input_data = {
'data': [
['a', '<other>', '<other>'],
['b', 'I-<l3>', 'I-<l3>'],
['c', '<l3>', 'I-<l2>'],
['d', '<other>', '<l2>'],
['e', '<other>', '<other>'],
['f', '<other>', '<other>'],
['g', '<other>', '<other>']
]
}
nb_token_after = 5
output = append_tokens_after(error_case, input_data, 2, nb_token_after)
print(output)
assert len(output) == nb_token_after - 1
assert output[0][0] == "d"
assert output[1][0] == "e"
assert output[2][0] == "f"
assert output[3][0] == "g"
def test_append_toekns_after_2():
error_case = []
input_data = {
'data': [
['a', '<other>', '<other>'],
['b', 'I-<l3>', 'I-<l3>'],
['c', '<l3>', 'I-<l2>'],
['d', '<other>', '<l2>'],
['e', '<other>', '<other>'],
['f', '<other>', '<other>'],
['g', '<other>', '<other>']
]
}
nb_token_after = 5
output = append_tokens_after(error_case, input_data, 3, nb_token_after)
print(output)
assert len(output) == 3
assert output[0][0] == "e"
assert output[1][0] == "f"
assert output[2][0] == "g"
def test_extract_error_cases_1():
input_data = [
{
'name': 'fold 0',
'data': [
['Hall', '<other>', '<other>'],
['coefficient', '<other>', '<other>'],
['and', '<other>', '<other>'],
['specific', 'I-<me_method>', 'I-<me_method>'],
['heat', '<me_method>', '<me_method>'],
['measurements', '<me_method>', '<other>'],
[',', '<other>', '<other>'],
['and', '<other>', '<other>'],
['the', '<other>', '<other>'],
['remaining', '<other>', '<other>'],
['part', '<other>', '<other>'],
['was', '<other>', '<other>']
],
'results': []
}
]
output = extract_error_cases(input_data, 3, 3)
print(output)
assert len(output) == 1
assert len(output[
0]) == 2 # Each error case is composed by a list of two elements, the representative label and the sequence
sequence = output[0][1]
assert len(sequence) == 7 # sequence
assert sequence[0][0] == "and"
assert sequence[1][0] == "specific<=>"
assert sequence[2][0] == "heat<=>"
assert sequence[3][0] == "measurements<-r>"
assert sequence[4][0] == ","
assert sequence[5][0] == "and"
assert sequence[6][0] == "the"
# def test_count_discrepancies_near_annotations():
# cases = [
# [
# '<l3>',
# [
# ['a', '<other>', '<other>'],
# ['b<=>', 'I-<l3>', 'I-<l3>'],
# ['c<+>', '<l3>', 'I-<l2>'],
# ['d<+>', '<other>', '<l2>'],
# ['e', '<other>', '<other>'],
# ['f', '<other>', '<other>'],
# ['g', '<other>', '<other>']
# ]
# ]
# ]
#
# discrepancies = count_discrepancies(cases)
#
# print(discrepancies)
# assert len(discrepancies.keys()) == 1
# label_discrepancy = discrepancies['<l3>']
# assert len(label_discrepancy['<+>']) == 1
# assert label_discrepancy['<+>']['d'] == 1
# def test_count_discrepancies_near_annotations_real_case():
# cases = [
# [
# '<valueAtomic>',
# [
# ['on', '<other>', '<other>'],
# ['October<=>', 'I-<valueAtomic>', 'I-<valueAtomic>'],
# ['19<=>', '<valueAtomic>', '<valueAtomic>'],
# [',<=>', '<valueAtomic>', '<valueAtomic>'],
# ['2014<=>', '<valueAtomic>', '<valueAtomic>'],
# ['at<-r>', '<valueAtomic>', '<other>'],
# ['approximately<-r>', '<valueAtomic>', '<other>'],
# ['18<-p>', '<valueAtomic>', 'I-<valueAtomic>'],
# [':<=>', '<valueAtomic>', '<valueAtomic>'],
# ['29<=>', '<valueAtomic>', '<valueAtomic>'],
# ['UT<=>', '<valueAtomic>', '<valueAtomic>'],
# [',', '<other>', '<other>'],
# ['reaching', '<other>', '<other>']
# ]
# ]
# ]
#
# discrepancies = count_discrepancies(cases)
#
# assert len(discrepancies.keys()) == 1 # labels
# label_discrepancy = discrepancies['<valueAtomic>']
# assert len(label_discrepancy['<-r>']) == 2
# assert len(label_discrepancy['<-p>']) == 1
# assert label_discrepancy['<-r>']['at'] == 1
# assert label_discrepancy['<-r>']['approximately'] == 1
# assert label_discrepancy['<-p>']['18'] == 1
|
<reponame>MatthieuDartiailh/vispy
# -*- coding: utf-8 -*-
# -----------------------------------------------------------------------------
# Copyright (c) 2014, Vispy Development Team.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
# -----------------------------------------------------------------------------
"""
Marker shader definitions. You need to combine marker_frag with one of the
available marker function (marker_disc, marker_diamond, ...)
"""
vert = """
#version 120
// Uniforms
// ------------------------------------
uniform mat4 u_model;
uniform mat4 u_view;
uniform mat4 u_projection;
uniform float u_antialias;
uniform float u_size;
// Attributes
// ------------------------------------
attribute vec3 a_position;
attribute vec4 a_fg_color;
attribute vec4 a_bg_color;
attribute float a_linewidth;
attribute float a_size;
// Varyings
// ------------------------------------
varying vec4 v_fg_color;
varying vec4 v_bg_color;
varying float v_size;
varying float v_linewidth;
varying float v_antialias;
void main (void) {
v_size = a_size * u_size;
v_linewidth = a_linewidth;
v_antialias = u_antialias;
v_fg_color = a_fg_color;
v_bg_color = a_bg_color;
gl_Position = u_projection * u_view * u_model *
vec4(a_position*u_size,1.0);
gl_PointSize = v_size + 2*(v_linewidth + 1.5*v_antialias);
}
"""
frag = """
#version 120
// Constants
// ------------------------------------
// Varyings
// ------------------------------------
varying vec4 v_fg_color;
varying vec4 v_bg_color;
varying float v_size;
varying float v_linewidth;
varying float v_antialias;
// Functions
// ------------------------------------
float marker(vec2 P, float size);
// Main
// ------------------------------------
void main()
{
float size = v_size +2*(v_linewidth + 1.5*v_antialias);
float t = v_linewidth/2.0-v_antialias;
// The marker function needs to be linked with this shader
float r = marker(gl_PointCoord, size);
float d = abs(r) - t;
if( r > (v_linewidth/2.0+v_antialias))
{
discard;
}
else if( d < 0.0 )
{
gl_FragColor = v_fg_color;
}
else
{
float alpha = d/v_antialias;
alpha = exp(-alpha*alpha);
if (r > 0)
gl_FragColor = vec4(v_fg_color.rgb, alpha*v_fg_color.a);
else
gl_FragColor = mix(v_bg_color, v_fg_color, alpha);
}
}
"""
disc = """
float marker(vec2 P, float size)
{
float r = length((P.xy - vec2(0.5,0.5))*size);
r -= v_size/2;
return r;
}
"""
arrow = """
float marker(vec2 P, float size)
{
float r1 = abs(P.x -.50)*size + abs(P.y -.5)*size - v_size/2;
float r2 = abs(P.x -.25)*size + abs(P.y -.5)*size - v_size/2;
float r = max(r1,-r2);
return r;
}
"""
ring = """
float marker(vec2 P, float size)
{
float r1 = length((P.xy - vec2(0.5,0.5))*size) - v_size/2;
float r2 = length((P.xy - vec2(0.5,0.5))*size) - v_size/4;
float r = max(r1,-r2);
return r;
}
"""
clobber = """
float marker(vec2 P, float size)
{
const float PI = 3.14159265358979323846264;
const float t1 = -PI/2;
const vec2 c1 = 0.2*vec2(cos(t1),sin(t1));
const float t2 = t1+2*PI/3;
const vec2 c2 = 0.2*vec2(cos(t2),sin(t2));
const float t3 = t2+2*PI/3;
const vec2 c3 = 0.2*vec2(cos(t3),sin(t3));
float r1 = length((P.xy- vec2(0.5,0.5) - c1)*size);
r1 -= v_size/3;
float r2 = length((P.xy- vec2(0.5,0.5) - c2)*size);
r2 -= v_size/3;
float r3 = length((P.xy- vec2(0.5,0.5) - c3)*size);
r3 -= v_size/3;
float r = min(min(r1,r2),r3);
return r;
}
"""
square = """
float marker(vec2 P, float size)
{
float r = max(abs(P.x -.5)*size, abs(P.y -.5)*size);
r -= v_size/2;
return r;
}
"""
diamond = """
float marker(vec2 P, float size)
{
float r = abs(P.x -.5)*size + abs(P.y -.5)*size;
r -= v_size/2;
return r;
}
"""
vbar = """
float marker(vec2 P, float size)
{
float r1 = max(abs(P.x - 0.75)*size, abs(P.x - 0.25)*size);
float r3 = max(abs(P.x - 0.50)*size, abs(P.y - 0.50)*size);
float r = max(r1,r3);
r -= v_size/2;
return r;
}
"""
hbar = """
float marker(vec2 P, float size)
{
float r2 = max(abs(P.y - 0.75)*size, abs(P.y - 0.25)*size);
float r3 = max(abs(P.x - 0.50)*size, abs(P.y - 0.50)*size);
float r = max(r2,r3);
r -= v_size/2;
return r;
}
"""
cross = """
float marker(vec2 P, float size)
{
float r1 = max(abs(P.x - 0.75)*size, abs(P.x - 0.25)*size);
float r2 = max(abs(P.y - 0.75)*size, abs(P.y - 0.25)*size);
float r3 = max(abs(P.x - 0.50)*size, abs(P.y - 0.50)*size);
float r = max(min(r1,r2),r3);
r -= v_size/2;
return r;
}
"""
tailed_arrow = """
float marker(vec2 P, float size)
{
//arrow_right
float r1 = abs(P.x -.50)*size + abs(P.y -.5)*size - v_size/2;
float r2 = abs(P.x -.25)*size + abs(P.y -.5)*size - v_size/2;
float arrow = max(r1,-r2);
//hbar
float r3 = (abs(P.y-.5)*2+.3)*v_size-v_size/2;
float r4 = (P.x -.775)*size;
float r6 = abs(P.x -.5)*size-v_size/2;
float limit = (P.x -.5)*size + abs(P.y -.5)*size - v_size/2;
float hbar = max(limit,max(max(r3,r4),r6));
return min(arrow,hbar);
}
"""
|
<reponame>achoi007/CloudComputing
import unittest
from collections import defaultdict
from itertools import combinations, product, ifilter
class SerialEquivalence:
'''
2 txns are serially equivalent iff all pairs of conflicting ops (pair
containing 1 op from each txn) are executed in same order (txn order) for
all objects they both access.
Conflicting ops are:
* read(x) and write(x)
* write(x) and read(x)
* write(x) and write(x)
* NOT read(x) and read(x)
* NOT read/write(x) and read/write(y)
* Take all pairs of conflict ops - 1 from T1 and 1 from T2
* If T1 op was reflected first on server, mark pair as (T1, T2) else (T2, T1)
* All pairs should either be (T1, T2) or (T2, T1)
'''
WRITE = 1
READ = 2
@staticmethod
def check(txns):
return all(map(SerialEquivalence.checkPairs, combinations(txns, 2)))
@staticmethod
def checkPairs(txnPair):
currOrder = None
for (t1, t2) in SerialEquivalence.iterateConflicts(txnPair):
# If op in T1 is before op in T2, generate (T1, T2) else generate
# (T2, T1)
t1time = t1[2]
t2time = t2[2]
if t1time < t2time:
order = (1, 2)
elif t1time > t2time:
order = (2, 1)
else:
order = (1, 2)
# If there is no current order, make this current order.
if currOrder == None:
currOrder = order
# Otherwise, if order is different from previous order, the pair
# is not serially equivalent.
elif currOrder != order:
return False
return True
@staticmethod
def iterateConflicts(txnPair):
return ifilter(SerialEquivalence.inConflict, product(*txnPair))
@staticmethod
def inConflict(ops):
'''
Conflicting ops are:
* read(x) and write(x)
* write(x) and read(x)
* write(x) and write(x)
* NOT read(x) and read(x)
* NOT read/write(x) and read/write(y)
'''
op1,op2 = ops
return op1[1] == op2[1] and \
not(op1[0] == SerialEquivalence.READ and op2[0] == SerialEquivalence.READ)
class SerialEquivalenceTest(unittest.TestCase):
def readTxn(self, txnId, objId=None):
self.addTxn(txnId, SerialEquivalence.READ, objId)
def writeTxn(self, txnId, objId=None):
self.addTxn(txnId, SerialEquivalence.WRITE, objId)
def addTxn(self, txnId, txnType, objId):
if objId == None:
objId = self.defaultObjId
self.txns[txnId].append((txnType, objId, self.timestamp))
self.timestamp += 1
def getTxns(self):
return self.txns.values()
def setUp(self):
self.timestamp = 0
self.txns = defaultdict(list)
self.defaultObjId = "abc"
def testAllRead(self):
self.readTxn(0)
self.readTxn(1)
self.readTxn(0)
self.readTxn(1)
txns = self.getTxns()
self.assertTrue(SerialEquivalence.check(txns))
def testLostUpdate(self):
self.readTxn(0)
self.readTxn(1)
self.writeTxn(0)
self.writeTxn(1)
txns = self.getTxns()
self.assertFalse(SerialEquivalence.check(txns))
def testInconsistentRetrieval(self):
self.readTxn(0, "123")
self.readTxn(0, "789")
self.writeTxn(0, "123")
self.readTxn(1, "123")
self.readTxn(1, "789")
self.writeTxn(0, "789")
txns = self.getTxns()
self.assertFalse(SerialEquivalence.check(txns))
def testReadWriteQ11(self):
self.readTxn(0, "x")
self.writeTxn(1, "x")
self.writeTxn(1, "y")
self.readTxn(0, "x")
self.writeTxn(0, "y")
txns = self.getTxns()
self.assertFalse(SerialEquivalence.check(txns))
def testReadWriteQ12(self):
self.writeTxn(1, "y")
self.writeTxn(1, "x")
self.writeTxn(1, "y")
self.readTxn(0, "x")
self.writeTxn(1, "y")
self.writeTxn(0, "y")
txns = self.getTxns()
self.assertTrue(SerialEquivalence.check(txns))
def testReadWriteQ13(self):
self.writeTxn(1, "a")
self.readTxn(2, "b")
self.writeTxn(2, "b")
self.readTxn(1, "b")
self.writeTxn(2, "a")
self.readTxn(1, "a")
txns = self.getTxns()
self.assertFalse(SerialEquivalence.check(txns))
def testThreeTxns(self):
self.readTxn("v", "x")
self.readTxn("v", "y")
self.writeTxn("u", "x")
self.readTxn("u", "y")
self.readTxn("v", "x")
self.readTxn("u", "y")
self.readTxn("v", "y")
self.readTxn("v", "x")
self.readTxn("v", "y")
self.readTxn("t", "x")
self.writeTxn("t", "y")
txns = self.getTxns()
self.assertFalse(SerialEquivalence.check(txns))
def testReadWriteQ15(self):
self.readTxn(2, "b")
self.writeTxn(1, "a")
self.readTxn(1, "b")
self.readTxn(1, "a")
self.writeTxn(2, "b")
self.writeTxn(2, "a")
txns = self.getTxns()
self.assertTrue(SerialEquivalence.check(txns))
def testExam1(self):
self.readTxn(98, "x")
self.writeTxn(99, "x")
self.writeTxn(99, "y")
self.readTxn(99, "y")
self.writeTxn(98, "x")
self.writeTxn(99, "y")
self.writeTxn(98, "y")
txns = self.getTxns()
self.assertFalse(SerialEquivalence.check(txns))
def testExam2(self):
self.readTxn(1, "x")
self.readTxn(3, "x")
self.readTxn(3, "y")
self.readTxn(2, "x")
self.readTxn(2, "y")
self.writeTxn(3, "x")
self.readTxn(2, "x")
self.readTxn(3, "y")
self.readTxn(3, "x")
self.readTxn(3, "y")
self.writeTxn(1, "y")
txns = self.getTxns()
self.assertFalse(SerialEquivalence.check(txns))
if __name__ == '__main__':
unittest.main() |
from argparse import ArgumentParser
import torch
import torch.nn as nn
import pytorch_lightning as pl
from pytorch_lightning.metrics import Accuracy
from torch.nn import functional as F
from torch.utils.data import DataLoader, random_split
from torchvision.datasets.mnist import MNIST
from torchvision import transforms
class LitClassifier(pl.LightningModule):
def __init__(self, hidden_dim=128, learning_rate=1e-3):
super().__init__()
self.save_hyperparameters()
self.train_acc = Accuracy()
self.val_acc = Accuracy(compute_on_step=False)
self.test_acc = Accuracy(compute_on_step=False)
self.example_input_array = torch.rand(10, 28 * 28)
self.dims = (1, 28, 28)
channels, width, height = self.dims
self.model = nn.Sequential(
nn.Flatten(),
nn.Linear(channels * width * height, self.hparams.hidden_dim),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(self.hparams.hidden_dim, self.hparams.hidden_dim),
nn.ReLU(),
nn.Dropout(0.1),
nn.Linear(self.hparams.hidden_dim, 10)
)
def forward(self, x):
x = self.model(x)
return x
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
acc = self.train_acc(y_hat, y)
self.log("train_acc_step", acc)
return {"loss": loss}
def training_epoch_end(self, outputs):
self.log("epoch_acc", self.train_acc.compute(), prog_bar=True)
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.val_acc(y_hat, y)
self.log('valid_loss', loss)
def validation_epoch_end(self, outputs):
self.log("epoch_val_acc", self.val_acc.compute(), prog_bar=True)
def test_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
self.test_acc(y_hat, y)
loss = F.cross_entropy(y_hat, y)
self.log('test_loss', loss)
def test_epoch_end(self, outputs):
self.log("test_acc", self.test_acc.compute(), prog_bar=True)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.learning_rate)
@staticmethod
def add_model_specific_args(parent_parser):
parser = ArgumentParser(parents=[parent_parser], add_help=False)
parser.add_argument('--hidden_dim', type=int, default=128)
parser.add_argument('--learning_rate', type=float, default=0.001)
return parser
class LitMNISTDataModule(pl.LightningDataModule):
def __init__(self, data_dir, batch_size):
super().__init__()
self.data_dir = data_dir
self.batch_size = batch_size
self.transform = transforms.Compose([transforms.ToTensor()])
def prepare_data(self):
MNIST(self.data_dir, train=False, download=True)
MNIST(self.data_dir, train=True, download=True)
def setup(self, stage=None):
self.mnist_test = MNIST(self.data_dir, train=False, transform=self.transform)
mnist_full = MNIST(self.data_dir, train=True, transform=self.transform)
self.mnist_train, self.mnist_val = random_split(mnist_full, [55000, 5000])
def train_dataloader(self):
return DataLoader(self.mnist_train, batch_size=self.batch_size)
def val_dataloader(self):
return DataLoader(self.mnist_val, batch_size=self.batch_size)
def test_dataloader(self):
return DataLoader(self.mnist_test, batch_size=self.batch_size)
def cli_main():
pl.seed_everything(1234)
# ------------
# args
# ------------
parser = ArgumentParser()
parser.add_argument('--batch_size', default=32, type=int)
parser.add_argument('--data_dir', default='', type=str)
parser = pl.Trainer.add_argparse_args(parser)
parser = LitClassifier.add_model_specific_args(parser)
args = parser.parse_args()
# ------------
# data
# ------------
mnist = LitMNISTDataModule(args.data_dir, args.batch_size)
# ------------
# model
# ------------
model = LitClassifier(args.hidden_dim, args.learning_rate)
# ------------
# training
# ------------
trainer = pl.Trainer.from_argparse_args(args)
trainer.fit(model, mnist)
# ------------
# testing
# ------------
trainer.test(datamodule=mnist)
if __name__ == '__main__':
cli_main()
|
class Realtime():
def __init__(self, device):
self.device = device
self.interface_name = "com.attocube.ids.realtime"
def AafIsEnabled(self):
"""
Checks if the anti-aliasing filter is enabled.
Parameters
----------
Returns
-------
enabled: enabled false: Anti-Aliasing Filter is disabled
true: Anti-Aliasing Filter is enabled
"""
response = self.device.request(self.interface_name + "." + "AafIsEnabled")
self.device.handleError(response)
return response['result'][1]
def apply(self):
"""
Applies new real time settings. Necessary after JSON realtime set commands.
Parameters
----------
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "apply")
self.device.handleError(response)
return
def disableTestChannel(self):
"""
Disables the test channel.
Parameters
----------
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "disableTestChannel")
self.device.handleError(response)
return
def discard(self):
"""
Discards new real time settings. Necessary after JSON set commands in case of failure.
Parameters
----------
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "discard")
self.device.handleError(response)
return
def enableTestChannel(self, axis):
"""
Enables the Test Channel, which can be used for estimating the maximum signal range.
Parameters
----------
axis: Test Channel Master Axis
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "enableTestChannel", [axis])
self.device.handleError(response)
return
def getAafAttenuation(self):
"""
Returns the current attenuation at f_nyquist of the anti-aliasing filter.
Parameters
----------
Returns
-------
attenuation: attenuation [3-30] dB m f_nyquist
"""
response = self.device.request(self.interface_name + "." + "getAafAttenuation")
self.device.handleError(response)
return response['result'][1]
def getAafEnabled(self):
"""
Checks if the anti-aliasing filter is enabled.
Parameters
----------
Returns
-------
enabled: enabled false - the Anti-Aliasing Filter is disabled
true - the Anti-Aliasing Filter is enabled
"""
response = self.device.request(self.interface_name + "." + "getAafEnabled")
self.device.handleError(response)
return response['result'][1]
def getAafWindow(self):
"""
Returns the current filter window of the anti-aliasing filter.
Parameters
----------
Returns
-------
window: window 0 = Rectangular,
1 = Cosine,
2 = Cosine^2,
3 = Hamming,
4 = Raised Cosine,
5 = Automatic
"""
response = self.device.request(self.interface_name + "." + "getAafWindow")
self.device.handleError(response)
return response['result'][1]
def getHighPassCutOffFreq(self):
"""
Reads out the high pass filter number of Linear/Analog output mode.
Parameters
----------
Returns
-------
value: value N, Linear Analog High Pass Cut-Off freqency is 1600/2^N kHz, with N \\in [1,24]
"""
response = self.device.request(self.interface_name + "." + "getHighPassCutOffFreq")
self.device.handleError(response)
return response['result'][1]
def getLinearRange(self):
"""
Reads out the range number of Linear/Analog output mode.
Parameters
----------
Returns
-------
rangenumber: rangenumber N, Linear Analog Range is +-2^(N+11) pm, with N \\in [0, 34]
"""
response = self.device.request(self.interface_name + "." + "getLinearRange")
self.device.handleError(response)
return response['result'][1]
def getPeriodHsslClk(self):
"""
Reads out the HSSL period clock.
Parameters
----------
Returns
-------
period: period Period in the Range of [40ns..10200ns]
"""
response = self.device.request(self.interface_name + "." + "getPeriodHsslClk")
self.device.handleError(response)
return response['result'][1]
def getPeriodHsslGap(self):
"""
Reads out the HSSL period gap.
Parameters
----------
Returns
-------
gap: gap Number of clocks
"""
response = self.device.request(self.interface_name + "." + "getPeriodHsslGap")
self.device.handleError(response)
return response['result'][1]
def getPeriodSinCosClk(self):
"""
Reads out the Sine-Cosine and AquadB period clock.
Parameters
----------
Returns
-------
period: period 40ns to 10200ns
"""
response = self.device.request(self.interface_name + "." + "getPeriodSinCosClk")
self.device.handleError(response)
return response['result'][1]
def getResolutionBissC(self):
"""
Reads out the BissC resolution.
Parameters
----------
Returns
-------
resolution: resolution 1pm to 65535pm
"""
response = self.device.request(self.interface_name + "." + "getResolutionBissC")
self.device.handleError(response)
return response['result'][1]
def getResolutionHsslHigh(self):
"""
Reads out the HSSL resolution high bit.
Parameters
----------
Returns
-------
resolution: resolution Resolution in the Range of [0..46]
"""
response = self.device.request(self.interface_name + "." + "getResolutionHsslHigh")
self.device.handleError(response)
return response['result'][1]
def getResolutionHsslLow(self):
"""
Reads out the HSSL resolution low bit.#
Parameters
----------
Returns
-------
resolution: resolution Resolution in the range of [0..46]
"""
response = self.device.request(self.interface_name + "." + "getResolutionHsslLow")
self.device.handleError(response)
return response['result'][1]
def getResolutionSinCos(self):
"""
Reads out the Sine-Cosine and AquadB resolution.
Parameters
----------
Returns
-------
resolution: resolution 1pm to 65535pm
"""
response = self.device.request(self.interface_name + "." + "getResolutionSinCos")
self.device.handleError(response)
return response['result'][1]
def getRtDistanceMode(self):
"""
Reads out the distance mode. Depending on the realtime output mode, the mode can
be Displacement (returns 1), Absolute Distance (returns 2) or Vibrometry (returns 3).
Parameters
----------
Returns
-------
linearmode: linearmode 1 = Displacement (Available in HSSL mode and Linear Mode)
2 = Absolute Distance (Available in HSSL mode only)
3 = Vibrometry (Available in Linear mode)
"""
response = self.device.request(self.interface_name + "." + "getRtDistanceMode")
self.device.handleError(response)
return response['result'][1]
def getRtOutMode(self):
"""
Reads out the current realtime output mode.
Parameters
----------
Returns
-------
rtOutMode: rtOutMode 0 = HSSL (TTL), 1 = HSSL (LVDS), 2 = AquadB (TTL),
3 = AquadB (LVDS), 4 = SinCos (TTL Error Signal),
5 = SinCos (LVDS Error Signal), 6 = Linear (TTL), 7 = Linear (LVDS),
8 = BiSS-C, 9 = Deactivated
"""
response = self.device.request(self.interface_name + "." + "getRtOutMode")
self.device.handleError(response)
return response['result'][1]
def getTestChannelEnabled(self):
"""
Checks if the test channel is enabled
Parameters
----------
Returns
-------
enabled: enabled true = enabled, false = disabled
"""
response = self.device.request(self.interface_name + "." + "getTestChannelEnabled")
self.device.handleError(response)
return response['result'][1]
def setAaf(self, enabled, attenuation, window):
"""
Sets the anti-aliasing filter with assigned filter window.
Parameters
----------
enabled: 0 - disables the Anti-Aliasing Filter
1 - enables the Anti-Aliasing Filter
attenuation: [3-30] dB m f_nyquist
window: 0 = Rectangular,
1 = Cosine,
2 = Cosine^2,
3 = Hamming,
4 = Raised Cosine,
5 = Automatic
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setAaf", [enabled, attenuation, window])
self.device.handleError(response)
return
def setHighPassCutOffFreq(self, value):
"""
Sets the high pass filter number of Linear/Analog output mode.
Parameters
----------
value: N, Linear Analog High Pass Cut-Off freqency is 1600/2^N kHz, with N \\in [1,24]
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setHighPassCutOffFreq", [value])
self.device.handleError(response)
return
def setLinearRange(self, rangenumber):
"""
Sets the range number of Linear/Analog output mode.
Parameters
----------
rangenumber: N, Linear Analog Range is +-2^(N+11) pm, with N \\in [0, 34]
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setLinearRange", [rangenumber])
self.device.handleError(response)
return
def setPeriodHsslClk(self, period):
"""
Set the HSSL period clock. The value has to be a multiple of 40ns. If not, the value automatically is rounded.
Parameters
----------
period: Period in the Range of [40ns..10200ns]
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setPeriodHsslClk", [period])
if response['result'][0] == 0:
self.apply()
else:
self.discard()
self.device.handleError(response)
return
def setPeriodHsslGap(self, value):
"""
Set the HSSL gap.
Parameters
----------
value: Number of clocks
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setPeriodHsslGap", [value])
if response['result'][0] == 0:
self.apply()
else:
self.discard()
self.device.handleError(response)
return
def setPeriodSinCosClk(self, value):
"""
Sets the Sine-Cosine and AquadB period clock. The value has to be a multiple of 40ns. If not, the value automatically is rounded.
Parameters
----------
value: period 40ns to 10200ns
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setPeriodSinCosClk", [value])
if response['result'][0] == 0:
self.apply()
else:
self.discard()
self.device.handleError(response)
return
def setResolutionBissC(self, value):
"""
Sets the BissC resolution.
Parameters
----------
value: resolution 1pm to 65535pm
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setResolutionBissC", [value])
if response['result'][0] == 0:
self.apply()
else:
self.discard()
self.device.handleError(response)
return
def setResolutionHsslHigh(self, value):
"""
Sets the HSSL resolution high bit.
Parameters
----------
value: Resolution in the Range of [0..46]
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setResolutionHsslHigh", [value])
if response['result'][0] == 0:
self.apply()
else:
self.discard()
self.device.handleError(response)
return
def setResolutionHsslLow(self, value):
"""
Sets the HSSL resolution low bit.
Parameters
----------
value: Resolution in the Range of [0..46]
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setResolutionHsslLow", [value])
if response['result'][0] == 0:
self.apply()
else:
self.discard()
self.device.handleError(response)
return
def setResolutionSinCos(self, value):
"""
Sets the Sine-Cosine and AquadB resolution.
Parameters
----------
value: resolution 1pm to 65535pm
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setResolutionSinCos", [value])
if response['result'][0] == 0:
self.apply()
else:
self.discard()
self.device.handleError(response)
return
def setRtDistanceMode(self, value):
"""
Sets the distance mode. Depending on the configuration of the IDS the mode can be
Displacement (returns 1), Absolute Distance (returns 2) or Vibrometry (returns 3).
Parameters
----------
value: 1 = Displacement (HSSL mode and Linear Mode)
2 = Absolute Distance (HSSL mode only)
3 = Vibrometry (Linear mode)
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setRtDistanceMode", [value])
if response['result'][0] == 0:
self.apply()
else:
self.discard()
self.device.handleError(response)
return
def setRtOutMode(self, value):
"""
Sets the real time output mode.
Parameters
----------
value: rtOutMode 0 = HSSL (TTL), 1 = HSSL (LVDS), 2 = AquadB (TTL),
3 = AquadB (LVDS), 4 = SinCos (TTL Error Signal),
5 = SinCos (LVDS Error Signal), 6 = Linear (TTL), 7 = Linear (LVDS),
8 = BiSS-C, 9 = Deactivated
Returns
-------
"""
response = self.device.request(self.interface_name + "." + "setRtOutMode", [value])
if response['result'][0] == 0:
self.apply()
else:
self.discard()
self.device.handleError(response)
return
|
<reponame>kassemal/diffTesting<gh_stars>0
"""
Methods that read 'Internet Usage' dataset and the related generalization trees.
"""
#!/usr/bin/env python
# coding=utf-8
import utils.utility as ul
import pickle
from pulp import *
#Some remarkable attributes numbers:
#2 Age (index = 1): Not-Say, 41, 28, 25, 17, 55, 53, 32, 65, 49, 27, 33, 44, 63, 18, 30, 34, 39, 60, 26, 35, 50, 40, 29, 23, 19, 45, 31, 59, 37, 54, 24, 36, 20,
#48, 69, 42, 21, 47, 43, 38, 22, 5, 57, 68, 52, 15, 51, 16, 56, 46, 64, 73, 62, 80, 58, 61, 7, 14, 67, 70, 72, 76, 71, 66, 78, 13, 79, 77, 9,
#6, 75, 74, 8, 12, 11, 10 (77 ->[5,80])
#12 Country: Texas, Florida, Illinois, Ontario, Washington, Oklahoma, California, Oregon, Alberta, Kentucky,
#North-Carolina, Georgia, Pennsylvania, Indiana, Virginia, Australia, Michigan, Ohio, Connecticut, Rhode-Island,
#New-York, United-Kingdom, Massachusetts, Saskatchewan, Idaho, Wisconsin, New-Jersey, Italy, South-Dakota, Louisiana,
#Vermont, Missouri, Mississippi, Netherlands, Kansas, Alaska, Minnesota, Colorado, Maryland, Utah, Nevada, Washington-DC,
#Wyoming, Arizona, New-Hampshire, South-Carolina, Delaware, Tennessee, Sweden, Afghanistan, Iowa, British-Columbia, Arkansas,
#Montana, France, Alabama, Kuwait, Finland, Switzerland, New-Zealand, Belgium, China, Spain, Manitoba, Maine, Hong-Kong, Nebraska,
#Germany, West-Virginia, Brazil, New-Brunswick, Quebec, Other, Colombia, Hawaii, Japan, South-Africa, Portugal, New Mexico, Austria, India,
#Namibia, Argentina, Israel, Ireland, Nova-Scotia, Thailand, Singapore, Taiwan, North-Dakota, Philippines, Turkey, Venezuela, Denmark, Malaysis,
#Greece, Norway, South-Korea, Oman, Bhutan, Iceland, Czech, Prince Edward Island, Chile, Panama, Newfoundland, Hungary, Egypt, Russia, Ecuador,
#Croatia, Poland, Morocco, Puerto-Rico, Costa-Rica, Dominican-Republic, Jamaica, Yukon, Northwest Territories, Netherlands Antilles, Kenya,
#Sri-Lanka, Indonesia, Romania, Armenia, Algeria, Tunisia, Nicaragua, Burundi (129)
#19 Education Attainment: Masters, Some-College, College, High-School, Professional, Grammar, Special, Doctoral, Other (9)
#21 Gender : Male, Female (2)
#22 Household Income: Over-$100, under-$10, $50-74, $75-99, Not-Say, $30-39, $20-29, $10-19, $40-49 (9)
#32 Major Geographical Location
#33 Major Occupation: Professional, Education, Computer, Other, Management (5)
#34 Marital Status: Married, Single, Other, Divorced, Separated, Widowed, Not-Say (7)
#36 Opinions on Censorship
#37 Primary Computing Platform
#38 Primary Language: English, Spanish, Italian, Dutch, american, Swedish, Russian, French, Chinese, Finnish, Englishmalay, German, URDU, Portuguese,
#Slovenian, Bengali, Kannada, EnglishAmerican-Sign-Language, Hebrew, afrikaans, EnglishCajun, GermanSwiss-German, Afrikaans,
#Korean, Englishpig-latin, EnglishFILIPINO, Turkish, Japanese, Not-Say, EnglishAustralian, Hindi, Danish, english, Greek, Englishand-also-Spanish,
#SOUTHERN, Norwegian, swedish, danish, bilingual-in-Spanish-and-english, Arabic, Englishestonian, Japanesenot-really, EnglishPolish,
#Englishtagalog, Icelandic, Tamil, Englishczech, American-Sign-Language, icelandic, Tagalog, EnglishTagalog, Englishsouthern-english,
#scottish-gaelic, bosnian, swahili, Serbian, Filipino, both-english+italian, Polish, Hungarian, EnglishMandarin-Chinese,
#norwegian, Bulgarian, EnglishTamil, croatian, turkish, korean, SpanishNorwegian, ENGLISH, EnglishSinhalese, Englishfrench, EnglishAmerican,
#EnglishNew-Zealand, EnglishAmerican-Southern, EnglishSPANISH, American, Croatian, finnish, Macedonian, Englishaustralian, Englishhawaiian,
#EnglishFilipino, Urdu, Hindienglish, Bahasa-Malaysia, Malay, Telugu, Indonesian, Englishitalian-french-german, cherakee, Not-Saymardesouquot,
#Danisk, swedishn, Norvegian, Romanian, Swiss-German, EnglishHungarian, EnglishAfrikaans, EnglishGreek, canadian-english, EnglishAustralian-English,
#Frenchfrench, spanish, australian, SpanishENGLISH, Swiss-German, maltese, Lithaunian, united-states-of-america-english-with-southern-accent, Germanaustrianic,
#Bengalidanish, EnglishTurkish, Thai, EnglishEbonics, hebrew (116)
#39 Primary Place of WWW Access
#40 Race: White, Hispanic, Indigenous, Not-Say, Other, Latino, Black, Asian (8)
#60 Registered to Vote
#61 Sexual Preference
#62 Web Ordering
#63 Web Page Creation
#70 Willingness to Pay Fees
#71 Years on Internet: 1-3-yr, Under-6-mo, 4-6-yr, 6-12-mo, Over-7-yr (5)
#Attribute names as ordered in 'data/internet.data' file
ATT_NAMES = ['actual_time', 'age', 'community_building', 'community_membership_family',
'community_membership_hobbies', 'community_membership_none', 'community_membership_other',
'community_membership_political', 'community_membership_professional', 'community_membership_religious',
'community_membership_support', 'country', 'disability_cognitive', 'disability_hearing', 'disability_motor',
'disability_not_impaired', 'disability_not_say', 'disability_vision', 'education_attainment', 'falsification_of_information',
'gender', 'household_income', 'how_you_heard_about_survey_banner', 'how_you_heard_about_survey_friend',
'how_you_heard_about_survey_mailing_list', 'how_you_heard_about_survey_others', 'how_you_heard_about_printed_media',
'how_you_heard_about_survey_remebered', 'how_you_heard_about_survey_search_engine', 'how_you_heard_about_usenet_news',
'how_you_heard_about_www_page', 'major_geographical_location', 'major_occupation', 'marital_status',
'most_import_issue_facing_the_internet', 'opinions_on_censorship', 'primary_computing_platform', 'primary_language',
'primary_place_of_www_access', 'race', 'not_purchasing_bad_experience', 'not_purchasing_bad_press', 'not_purchasing_cant_find',
'not_purchasing_company_policy', 'not_purchasing_easier_locally', 'not_purchasing_enough_info', 'not_purchasing_judge_quality ',
'not_purchasing_never_tried ', 'not_purchasing_no_credit', 'not_purchasing_not_applicable', 'not_purchasing_not_option',
'not_purchasing_other', 'not_purchasing_prefer_people', 'not_purchasing_privacy', 'not_purchasing_receipt', 'not_purchasing_security',
'not_purchasing_too_complicated', 'not_purchasing_uncomfortable', 'not_purchasing_unfamiliar_vendor', 'registered_to_vote',
'sexual_preference', 'web_ordering', 'web_page_creation', 'who_pays_for_access_dont_know', 'who_pays_for_access_other',
'who_pays_for_access_parents', 'who_pays_for_access_school', 'who_pays_for_access_self',
'who_pays_for_access_work', 'willingness_to_pay_fees', 'years_on_internet', 'pseudonym']
#'False' means that the attribute values are continuous or ordinal
#'True' means that the attribute is categorical
CATEGORY = [True, False, True, False, False, False, False, False, False, False, False, True,
False, False, False, False, False, False, True, True, True, True, False, False, False, False, False, False,
False, False, False, True, True, True, True, False, True, True, True, True, False, False, False, False,
False, False, False, False, False, False, False, False, False, False, False, False, False, False, False,
True, True, True, True, False, False, False, False, False, False, True, True, True]
def read():
"""
read internet usage data from 'data/internet.data'
"""
#initialize
nb_attributes = len(ATT_NAMES)
data, numeric_dict = [], []
for j in range(nb_attributes):
if CATEGORY[j] is False:
numeric_dict.append(dict()) #dictionary for continuous attributes
#read data
data_file = open('data/internet.data', 'rU')
for line in data_file:
line = line.strip()
temp = line.split('\t')
# remove all the records where 'age' takes the value 'Not-Say'
# Only 9799 records will remain
if temp[1] == 'Not-Say':
continue
#replace missing entries by '?'
for j in range(len(temp)):
if temp[j] == '':
temp[j] = '?'
#verify that the number of attributes in each record is 72
if len(temp) == 72:
data.append(temp)
else:
continue
#keep a dictionary of continuous attributes
index = 0
for j in range(nb_attributes):
if CATEGORY[j] is False:
try:
numeric_dict[index][temp[j]] += 1
except:
numeric_dict[index][temp[j]] = 1
index += 1
# pickle numeric attributes and get NumRange
index = 0
for j in range(nb_attributes):
if CATEGORY[j] is False:
static_file = open('data/internet_' + ATT_NAMES[j] + '_static.pickle', 'wb')
sort_value = list(numeric_dict[index].keys())
sort_value.sort(cmp=ul.cmp_str)
pickle.dump((numeric_dict[index], sort_value), static_file)
static_file.close()
index += 1
return data
|
<gh_stars>1-10
#!/usr/bin/env python
import os
import unittest
import numpy as np
from slowgrad.tensor import Tensor
from slowgrad.utils import fetch
import slowgrad.optim as optim
from tqdm import trange
from models import *
# mnist loader
def fetch_mnist():
import gzip
parse = lambda dat: np.frombuffer(gzip.decompress(dat), dtype=np.uint8
).copy()
X_train = parse(
fetch("http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz")
)[0x10:].reshape((-1, 28, 28))
Y_train = parse(
fetch(
"http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz"))[8:]
X_test = parse(
fetch("http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz")
)[0x10:].reshape((-1, 28, 28))
Y_test = parse(
fetch(
"http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz"))[8:]
return X_train, Y_train, X_test, Y_test
# load the mnist dataset
X_train, Y_train, X_test, Y_test = fetch_mnist()
def train(model, optim, steps, BS=128):
losses, accuracies = [], []
t = trange(steps)
for i in t:
optim.zero_grad()
samp = np.random.randint(0, X_train.shape[0], size=(BS))
x = Tensor(X_train[samp].reshape((-1, 28 * 28)).astype(np.float32))
Y = Y_train[samp]
y = np.zeros((len(samp), 10), np.float32)
# correct loss for NLL, torch NLL loss returns one per row
y[range(y.shape[0]), Y] = -10.0
y = Tensor(y)
# network
out = model.forward(x)
# NLL loss function
loss = out.mul(y).mean()
loss.backward()
optim.step()
cat = np.argmax(out.data, axis=1)
accuracy = (cat == Y).mean()
# printing
loss = loss.data
losses.append(loss)
accuracies.append(accuracy)
t.set_description("loss %.2f accuracy %.2f" % (loss, accuracy))
def evaluate(model):
def numpy_eval():
Y_test_preds_out = model.forward(
Tensor(X_test.reshape((-1, 28 * 28)).astype(np.float32)))
Y_test_preds = np.argmax(Y_test_preds_out.data, axis=1)
return (Y_test == Y_test_preds).mean()
accuracy = numpy_eval()
print("test set accuracy is %f" % accuracy)
assert accuracy > 0.95
class TestMNIST(unittest.TestCase):
def test_sgd(self):
np.random.seed(1337)
model = TinyBobNet()
optimizer = optim.SGD(model.parameters(), lr=0.001)
train(model, optimizer, steps=1000)
evaluate(model)
def test_sgd_layer(self):
np.random.seed(1337)
model = TinyBobNetLayer()
optimizer = optim.SGD(model.parameters(), lr=0.001)
train(model, optimizer, steps=1000)
evaluate(model)
# def test_convnet(self):
# np.random.seed(1337)
# model = TinyConvNet()
# optimizer = optim.SGD(model.parameters(), lr=0.001)
# train(model, optimizer, steps=1000)
# evaluate(model)
# def test_convnet_layer(self):
# np.random.seed(1337)
# model = TinyConvNetLayer()
# optimizer = optim.SGD(model.parameters(), lr=0.001)
# train(model, optimizer, steps=1000)
# evaluate(model)
if __name__ == "__main__":
np.random.seed(1337)
model = TinyConvNetLayer()
optimizer = optim.RMSProp(model.parameters(), lr=0.001)
train(model, optimizer, steps=1000)
evaluate(model)
|
<reponame>zhenwendai/MXFusion
import warnings
import numpy as np
import mxnet as mx
from mxnet import initializer
from mxnet import ndarray
from mxnet.gluon import ParameterDict
from ..components.variables import VariableType, Variable
from ..components import ModelComponent
from ..util.inference import realize_shape
from ..common.config import get_default_device
from ..components.functions.gluon_func_eval import GluonFunctionEvaluation
class InferenceParameters(object):
"""
The parameters and outcomes of an inference method.
InferenceParameters is a pool of memory that contains a mapping from uuid to two types of memories (MXNet ParameterDict and Constants).
:param constants: Specify a list of model variables as constants
:type constants: {ModelComponent.uuid : mxnet.ndarray}
:param dtype: data type for internal numberical representation
:type dtype: {numpy.float64, numpy.float32, 'float64', 'float32'}
:param context: The MXNet context
:type context: {mxnet.cpu or mxnet.gpu}
"""
def __init__(self, constants=None, dtype=None, context=None):
self.dtype = dtype if dtype is not None else np.float32
self.mxnet_context = context if context is not None else get_default_device()
self._constants = {}
self._var_ties = {}
if constants is not None:
constant_uuids = {
(k.uuid if isinstance(k, ModelComponent) else k): v
for k, v in constants.items()}
self._constants.update(constant_uuids)
self._params = ParameterDict()
def update_constants(self, constants):
self.constants.update({
(k.uuid if isinstance(k, ModelComponent) else k): v
for k, v in constants.items()})
def initialize_params(self, graphs, observed_uuid):
"""
:param graphs: a list of graphs in which the parameters will be optimized.
:type graphs: a list of FactorGraph
:param observed_uuid: Parameter Variables that are passed in directly as data, not to be inferred.
:type observed_uuid: list, set
"""
if self._params is not None:
warnings.warn("InferenceParameters has already been initialized. The existing one will be overwritten.")
self._params = ParameterDict()
for g in graphs:
# load in parameterdict from external gluon blocks.
for f in g.functions.values():
if isinstance(f, GluonFunctionEvaluation):
self._params.update(
f.function_wrapper.collect_internal_parameters())
for var in g.get_constants():
self._constants[var.uuid] = var.constant
excluded = set(self._constants.keys()).union(observed_uuid)
for var in g.get_parameters(excluded=excluded,
include_inherited=False):
var_shape = realize_shape(var.shape, self._constants)
init = initializer.Constant(var.initial_value) if var.initial_value is not None else None
self._params.get(name=var.uuid, shape=var_shape,
dtype=self.dtype,
allow_deferred_init=True, init=init)
self._params.initialize(ctx=self.mxnet_context)
def initialize_with_carryover_params(self, graphs, observed_uuid, var_ties,
carryover_params):
"""
:param graphs: a list of graphs in which the parameters will be optimized.
:type graphs: a list of FactorGraph
:param observed_uuid: Parameter Variables that are passed in directly as data, not to be inferred.
:type observed_uuid: {UUID : mx.ndarray}
:param var_ties: A dictionary of variable maps that are tied together and use the MXNet Parameter of the dict value's uuid.
:type var_ties: { UUID to tie from : UUID to tie to }
:param carryover_params: list of InferenceParameters containing the outcomes of previous inference algorithms.
:type carryover_params: [InferenceParameters]
"""
# TODO: var_ties is discarded at the moment.
var_uuid = set()
for g in graphs:
var_uuid = var_uuid.union(set(g.variables.keys()))
carryover_pairs = {}
for carryover in carryover_params:
for uuid, v in carryover.param_dict.items():
if uuid in var_uuid:
if uuid in carryover_pairs:
warnings.warn('The variable with UUID '+uuid+' exists in multiple carryover parameter sets.')
carryover_pairs[uuid] = v
# self._var_ties = var_ties.copy()
# for g in graphs:
# # TODO: check the behavior of var_ties in graph
# self._var_ties.update(g.var_ties)
# for v_uuid in self.constants:
# if v_uuid in self._var_ties:
# del self._var_ties[v_uuid]
observed_uuid = set(observed_uuid).union(carryover_pairs.keys())
self.initialize_params(graphs, observed_uuid)
# carryover_pairs = {
# to_var_uuid: carryover.param_dict[to_var_uuid]
# for from_var_uuid, to_var_uuid in self._var_ties.items()
# for carryover in carryover_params
# if to_var_uuid in carryover.param_dict}
self._params.update(carryover_pairs)
@property
def param_dict(self):
return self._params
@property
def constants(self):
return self._constants
@property
def var_ties(self):
return self._var_ties
def __getitem__(self, key, ctx=None):
if not isinstance(key, Variable):
raise KeyError("The access key of inference parameter needs to be Variable, but got "+str(type(key))+".")
pkey = key.inherited_name if key.isInherited else key.uuid
val = self._params.get(pkey).data(ctx)
if key.transformation is not None:
val = key.transformation.transform(val)
return val
def __setitem__(self, key, item):
if not isinstance(key, Variable):
raise KeyError("The access key of inference parameter needs to be Variable, but get "+str(type(key))+".")
if key.type == VariableType.PARAMETER:
if key.transformation is not None:
item = key.transformation.inverseTransform(item)
self._params.get(key.uuid).set_data(item)
elif key.type == VariableType.CONSTANT:
self._params.get(key.uuid)._value = item
# Override contains so that it doesn't use the __getitem__ method.
def __contains__(self, k):
return k in self.__dict__
@staticmethod
def load_parameters(uuid_map=None,
parameters_file=None,
variable_constants_file=None,
mxnet_constants_file=None,
context=None, dtype=None,
current_params=None):
"""
Loads back a sest of InferenceParameters from files.
:param parameters_file: These are the parameters of the previous inference algorithm. These are in a {uuid: mx.nd.array} mapping.
:type mxnet_constants_file: file saved down with mx.nd.save(), so a {uuid: mx.nd.array} mapping saved in a binary format.
:param mxnet_constants_file: These are the constants in mxnet format from the previous inference algorithm. These are in a {uuid: mx.nd.array} mapping.
:type mxnet_constants_file: file saved down with mx.nd.save(), so a {uuid: mx.nd.array} mapping saved in a binary format.
:param variable_constants_file: These are the constants in primitive format from the previous inference algorithm.
:type variable_constants_file: json dict of {uuid: constant_primitive}
"""
def with_uuid_map(item, uuid_map):
if uuid_map is not None:
return uuid_map[item]
else:
return item
ip = InferenceParameters(context=context, dtype=dtype)
if parameters_file is not None:
old_params = ndarray.load(parameters_file)
mapped_params = {with_uuid_map(k, uuid_map): v
for k, v in old_params.items()}
new_paramdict = ParameterDict()
if current_params is not None:
new_paramdict.update(current_params)
# Do this because we need to map the uuids to the new Model
# before loading them into the ParamDict
for name, mapped_param in mapped_params.items():
new_paramdict[name]._load_init(mapped_param, ip.mxnet_context)
ip._params = new_paramdict
new_mxnet_constants = {}
new_variable_constants = {}
if variable_constants_file is not None:
import json
with open(variable_constants_file) as f:
old_constants = json.load(f)
new_variable_constants = {with_uuid_map(k, uuid_map): v for k, v in old_constants.items()}
if mxnet_constants_file is not None:
new_mxnet_constants = {with_uuid_map(k, uuid_map): v for k, v in ndarray.load(mxnet_constants_file).items()}
ip._constants = {}
ip._constants.update(new_variable_constants)
ip._constants.update(new_mxnet_constants)
return ip
def save(self, prefix):
"""
Saves the parameters and constants down to json files as maps from {uuid : value}, where value is an mx.ndarray for parameters and either primitive number types or mx.ndarray for constants. Saves up to 3 files: prefix+["_params.json", "_variable_constants.json", "_mxnet_constants.json"]
:param prefix: The directory and any appending tag for the files to save this Inference as.
:type prefix: str , ex. "../saved_inferences/experiment_1"
"""
param_file = prefix + "_params.json"
variable_constants_file = prefix + "_variable_constants.json"
mxnet_constants_file = prefix + "_mxnet_constants.json"
to_save = {key: value._reduce() for key, value in self._params.items()}
ndarray.save(param_file, to_save)
mxnet_constants = {uuid: value
for uuid, value in self._constants.items()
if isinstance(value, mx.ndarray.ndarray.NDArray)}
ndarray.save(mxnet_constants_file, mxnet_constants)
variable_constants = {uuid: value
for uuid, value in self._constants.items()
if uuid not in mxnet_constants}
import json
with open(variable_constants_file, 'w') as f:
json.dump(variable_constants, f, ensure_ascii=False)
|
import json
from django.conf import settings
from django.core import validators
from django.db import models
from django.utils.translation import ugettext_lazy as _
from rest_framework.renderers import JSONRenderer
from taggit.managers import TaggableManager
from taggit.models import CommonGenericTaggedItemBase, TaggedItemBase
from .constants import units
from categories.models import Category
from common.models import Region
from containers.models import Container
from core.db.base import BaseAbstractModel
from core.storage.utils import public_image_upload_to
from delivery_options.models import DeliveryOption
BASE_UNITS = (
("weight", _("Weight (g) per item")),
("volume", _("Volume (l) per item")),
)
class BigAutoFieldTaggedItem(CommonGenericTaggedItemBase, TaggedItemBase):
object_id = models.BigIntegerField(verbose_name=_("Object id"), db_index=True)
class Product(BaseAbstractModel):
class Meta:
verbose_name = _("Product")
verbose_name_plural = _("Products")
name = models.CharField(max_length=255, verbose_name=_("Name"))
description = models.TextField(verbose_name=_("Description"))
image_url = models.CharField(
max_length=255, null=True, blank=True, verbose_name=_("Image url")
)
image = models.ImageField(
upload_to=public_image_upload_to, null=True, blank=True, verbose_name=_("Image")
)
region = models.ForeignKey(
Region,
on_delete=models.PROTECT,
related_name="products",
blank=False,
help_text=_("Region of origin"),
verbose_name=_("Region of origin"),
)
regions = models.ManyToManyField(
Region,
help_text=_("The associated regions the product should be available in"),
verbose_name=_("Available in regions"),
)
category = models.ForeignKey(
Category, on_delete=models.PROTECT, verbose_name=_("Category")
)
seller = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.CASCADE, verbose_name=_("Seller")
)
is_organic = models.BooleanField(default=False, verbose_name=_("Is organic"))
is_vegan = models.BooleanField(default=False, verbose_name=_("Is vegan"))
is_gluten_free = models.BooleanField(
default=False, verbose_name=_("Is gluten free")
)
is_grazing_animal = models.BooleanField(
default=False, verbose_name=_("Is grazing animal")
)
is_gmo_free = models.BooleanField(default=False, verbose_name=_("Is GMO free"))
amount = models.DecimalField(
max_digits=10,
decimal_places=2,
verbose_name=_("Amount in a lot"),
validators=[
validators.MinValueValidator(0, message=_("Amount should not be negative"))
],
)
unit = models.CharField(
max_length=10, choices=units, null=True, blank=True, verbose_name=_("Unit")
)
price = models.DecimalField(
max_digits=10,
decimal_places=2,
verbose_name=_("Price"),
validators=[
validators.MinValueValidator(0, message=_("Price should not be negative"))
],
)
vat = models.DecimalField(
max_digits=10,
decimal_places=2,
verbose_name=_("VAT rate"),
validators=[
validators.MaxValueValidator(100, _("VAT should not be more than 100%")),
validators.MinValueValidator(0, message=_("VAT should not be negative")),
],
)
container_type = models.ForeignKey(
Container, on_delete=models.PROTECT, verbose_name=_("Container type")
)
container_description = models.TextField(
null=True, blank=True, verbose_name=_("Container description")
)
delivery_charge = models.DecimalField(
max_digits=10,
decimal_places=2,
default=0,
validators=[
validators.MinValueValidator(
0, message=_("Delivery charge should not be negative")
)
],
verbose_name=_("Delivery charge"),
)
delivery_options = models.ManyToManyField(
DeliveryOption, verbose_name=_("Delivery options")
)
third_party_delivery = models.BooleanField(
default=False, verbose_name=_("Third party delivery possible")
)
delivery_requirements = models.CharField(
max_length=255, null=True, blank=True, verbose_name=_("Delivery requirements")
)
tags = TaggableManager(
through=BigAutoFieldTaggedItem, blank=True, verbose_name=_("Tags")
)
ean8 = models.CharField(max_length=255, null=True, blank=True)
ean13 = models.CharField(max_length=255, null=True, blank=True)
sellers_product_identifier = models.CharField(
max_length=255,
null=True,
blank=True,
verbose_name=_("Sellers product identifier"),
)
base_unit = models.CharField(
max_length=16, choices=BASE_UNITS, blank=True, verbose_name=_("Base unit")
)
item_quantity = models.DecimalField(
max_digits=10,
decimal_places=2,
null=True,
blank=True,
validators=[
validators.MinValueValidator(
0, message=_("Items quantity should not be negative")
)
],
verbose_name=_("Item quantity"),
)
def create_snapshot(self):
from products.serializers import ProductSnapshotSerializer
return json.loads(JSONRenderer().render(ProductSnapshotSerializer(self).data))
def first_available_delivery_option(self):
if self.region.settings.first().central_logistics_company:
return self.delivery_options.first()
return self.delivery_options.exclude(
id=DeliveryOption.CENTRAL_LOGISTICS
).first()
def __str__(self):
return self.name
|
<gh_stars>0
import numbers
import os
from unittest.mock import MagicMock
import numpy as np
import pytest
import torch
from pytest import approx, raises
from sklearn.metrics import confusion_matrix, f1_score, precision_score, recall_score
import ignite.distributed as idist
from ignite.engine import Engine, Events, State
from ignite.metrics import ConfusionMatrix, Precision, Recall
from ignite.metrics.metric import BatchFiltered, BatchWise, EpochWise, Metric, reinit__is_reduced, sync_all_reduce
class DummyMetric1(Metric):
def __init__(self, true_output, output_transform=lambda x: x):
super(DummyMetric1, self).__init__(output_transform=output_transform)
self.true_output = true_output
def reset(self):
pass
def compute(self):
pass
def update(self, output):
assert output == self.true_output
@pytest.mark.distributed
@pytest.mark.skipif("WORLD_SIZE" not in os.environ, reason="Skip if WORLD_SIZE not in env vars")
@pytest.mark.skipif(torch.cuda.is_available(), reason="Skip if GPU")
def test_metric_warning(distributed_context_single_node_gloo):
y = torch.tensor([1.0])
with pytest.warns(RuntimeWarning, match=r"DummyMetric1 class does not support distributed setting"):
DummyMetric1((y, y))
def test_no_transform():
y_pred = torch.Tensor([[2.0], [-2.0]])
y = torch.zeros(2)
metric = DummyMetric1(true_output=(y_pred, y))
state = State(output=(y_pred, y))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_transform():
y_pred = torch.Tensor([[2.0], [-2.0]])
y = torch.zeros(2)
def transform(output):
pred_dict, target_dict = output
return pred_dict["y"], target_dict["y"]
metric = DummyMetric1(true_output=(y_pred, y), output_transform=transform)
state = State(output=({"y": y_pred}, {"y": y}))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_output_as_mapping_wrong_keys():
metric = DummyMetric1(true_output=(0, 1))
state = State(output=({"y1": 0, "y2": 1}))
engine = MagicMock(state=state)
with pytest.raises(
ValueError, match=r"When transformed engine's output is a mapping, " r"it should contain \('y_pred', 'y'\) keys"
):
metric.iteration_completed(engine)
def test_output_as_mapping_keys_is_none():
class DummyMetric(Metric):
required_output_keys = None
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
metric = DummyMetric()
assert metric.required_output_keys is None
state = State(output=({"y1": 0, "y2": 1}))
engine = MagicMock(state=state)
with pytest.raises(TypeError, match=r"Transformed engine output for DummyMetric metric should be a tuple/list"):
metric.iteration_completed(engine)
def test_output_as_mapping():
y_pred = torch.Tensor([[2.0], [-2.0]])
y = torch.zeros(2)
metric = DummyMetric1(true_output=(y_pred, y))
state = State(output=({"y_pred": y_pred, "y": y}))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_no_grad():
y_pred = torch.zeros(4, requires_grad=True)
y = torch.zeros(4, requires_grad=False)
class DummyMetric(Metric):
def reset(self):
pass
def compute(self):
pass
def update(self, output):
y_pred, y = output
mse = torch.pow(y_pred - y.view_as(y_pred), 2)
assert y_pred.requires_grad
assert not mse.requires_grad
metric = DummyMetric()
state = State(output=(y_pred, y))
engine = MagicMock(state=state)
metric.iteration_completed(engine)
def test_arithmetics():
class ListGatherMetric(Metric):
def __init__(self, index):
self.index = index
super(ListGatherMetric, self).__init__()
def reset(self):
self.list_ = []
def update(self, output):
self.list_ = output
def compute(self):
return self.list_[self.index]
m0 = ListGatherMetric(0)
m1 = ListGatherMetric(1)
m2 = ListGatherMetric(2)
# __add__
m0_plus_m1 = m0 + m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_plus_m1.compute() == 11
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_plus_m1.compute() == 22
m2_plus_2 = m2 + 2
m2.update([1, 10, 100])
assert m2_plus_2.compute() == 102
m2_plus_2 = 2 + m2
m2.update([1, 10, 100])
assert m2_plus_2.compute() == 102
# __sub__
m0_minus_m1 = m0 - m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_minus_m1.compute() == -9
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_minus_m1.compute() == -18
m2_minus_2 = m2 - 2
m2.update([1, 10, 100])
assert m2_minus_2.compute() == 98
m2_minus_2 = 2 - m2
m2.update([1, 10, 100])
assert m2_minus_2.compute() == -98
# __mul__
m0_times_m1 = m0 * m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_times_m1.compute() == 10
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_times_m1.compute() == 40
m2_times_2 = m2 * 2
m2.update([1, 10, 100])
assert m2_times_2.compute() == 200
m2_times_2 = 2 * m2
m2.update([1, 10, 100])
assert m2_times_2.compute() == 200
# __pow__
m0_pow_m1 = m0 ** m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_pow_m1.compute() == 1
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_pow_m1.compute() == 2 ** 20
m2_pow_2 = m2 ** 2
m2.update([1, 10, 100])
assert m2_pow_2.compute() == 10000
m2_pow_2 = 0.99 ** m2
m2.update([1, 10, 100])
assert m2_pow_2.compute() == 0.3660323412732292
# __mod__
m0_mod_m1 = m0 % m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_mod_m1.compute() == 1
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_mod_m1.compute() == 2
m2_mod_2 = m2 % 2
m2.update([1, 10, 100])
assert m2_mod_2.compute() == 0
# __truediv__
m0_truediv_m1 = m0 / m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_truediv_m1.compute() == approx(0.1)
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_truediv_m1.compute() == approx(0.1)
m2_truediv_2 = m2 / 2
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(50.0)
m2_truediv_2 = 200 / m2
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(2.0)
m0_truediv_m1 = m0.__truediv__(m1)
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_truediv_m1.compute() == approx(0.1)
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_truediv_m1.compute() == approx(0.1)
m2_truediv_2 = m2.__truediv__(2)
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(50.0)
m2_truediv_2 = m2.__rtruediv__(200)
m2.update([1, 10, 100])
assert m2_truediv_2.compute() == approx(2.0)
# __floordiv__
m0_floordiv_m1 = m0 // m1
m0.update([1, 10, 100])
m1.update([1, 10, 100])
assert m0_floordiv_m1.compute() == 0
m0.update([2, 20, 200])
m1.update([2, 20, 200])
assert m0_floordiv_m1.compute() == 0
m2_floordiv_2 = m2 // 2
m2.update([1, 10, 100])
assert m2_floordiv_2.compute() == 50
def test_attach():
class CountMetric(Metric):
def __init__(self, value):
self.reset_count = 0
super(CountMetric, self).__init__()
self.reset_count = 0
self.compute_count = 0
self.update_count = 0
self.value = value
def reset(self):
self.reset_count += 1
def compute(self):
self.compute_count += 1
return self.value
def update(self, output):
self.update_count += 1
def process_function(*args, **kwargs):
return 1
engine = Engine(process_function)
m1 = CountMetric(123)
m2 = CountMetric(456)
m1.attach(engine, "m1")
m2.attach(engine, "m2_1")
m2.attach(engine, "m2_2")
engine.run(range(10), 5)
assert engine.state.metrics["m1"] == 123
assert engine.state.metrics["m2_1"] == 456
assert engine.state.metrics["m2_2"] == 456
assert m1.reset_count == 5
assert m1.compute_count == 5
assert m1.update_count == 50
assert m2.reset_count == 5
assert m2.compute_count == 10
assert m2.update_count == 50
assert m1.is_attached(engine)
assert m2.is_attached(engine)
def test_detach():
class DummyMetric(Metric):
required_output_keys = None
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
def process_function(*args, **kwargs):
return 1
engine = Engine(process_function)
m1 = DummyMetric()
m2 = DummyMetric()
m1.attach(engine, "m1")
m2.attach(engine, "m2_1")
m2.attach(engine, "m2_2")
m1.detach(engine)
m2.detach(engine)
engine.run(range(10), 5)
assert "m1" not in engine.state.metrics
assert "m2_1" not in engine.state.metrics
assert "m2_2" not in engine.state.metrics
assert not m1.is_attached(engine)
assert not m2.is_attached(engine)
def test_integration():
np.random.seed(1)
n_iters = 10
batch_size = 10
n_classes = 10
y_true = np.arange(0, n_iters * batch_size, dtype="int64") % n_classes
y_pred = 0.2 * np.random.rand(n_iters * batch_size, n_classes)
for i in range(n_iters * batch_size):
if np.random.rand() > 0.4:
y_pred[i, y_true[i]] = 1.0
else:
j = np.random.randint(0, n_classes)
y_pred[i, j] = 0.7
y_true_batch_values = iter(y_true.reshape(n_iters, batch_size))
y_pred_batch_values = iter(y_pred.reshape(n_iters, batch_size, n_classes))
def update_fn(engine, batch):
y_true_batch = next(y_true_batch_values)
y_pred_batch = next(y_pred_batch_values)
return torch.from_numpy(y_pred_batch), torch.from_numpy(y_true_batch)
evaluator = Engine(update_fn)
precision = Precision(average=False)
recall = Recall(average=False)
F1 = precision * recall * 2 / (precision + recall)
precision.attach(evaluator, "precision")
recall.attach(evaluator, "recall")
F1.attach(evaluator, "f1")
data = list(range(n_iters))
state = evaluator.run(data, max_epochs=1)
precision_true = precision_score(y_true, np.argmax(y_pred, axis=-1), average=None)
recall_true = recall_score(y_true, np.argmax(y_pred, axis=-1), average=None)
f1_true = f1_score(y_true, np.argmax(y_pred, axis=-1), average=None)
precision = state.metrics["precision"].numpy()
recall = state.metrics["recall"].numpy()
f1 = state.metrics["f1"].numpy()
assert precision_true == approx(precision), f"{precision_true} vs {precision}"
assert recall_true == approx(recall), f"{recall_true} vs {recall}"
assert f1_true == approx(f1), f"{f1_true} vs {f1}"
def test_abstract_class():
with raises(TypeError):
Metric()
def test_pytorch_operators():
def _test(composed_metric, metric_name, compute_true_value_fn):
metrics = {
metric_name: composed_metric,
}
y_pred = torch.rand(15, 10, 5).float()
y = torch.randint(0, 5, size=(15, 10)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
validator = Engine(update_fn)
for name, metric in metrics.items():
metric.attach(validator, name)
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
assert set(state.metrics.keys()) == set([metric_name,])
np_y_pred = np.argmax(y_pred.numpy(), axis=-1).ravel()
np_y = y.numpy().ravel()
assert state.metrics[metric_name] == approx(compute_true_value_fn(np_y_pred, np_y))
precision_1 = Precision(average=False)
precision_2 = Precision(average=False)
norm_summed_precision = (precision_1 + precision_2).norm(p=10)
def compute_true_norm_summed_precision(y_pred, y):
p1 = precision_score(y, y_pred, average=None)
p2 = precision_score(y, y_pred, average=None)
return np.linalg.norm(p1 + p2, ord=10)
_test(norm_summed_precision, "mean summed precision", compute_true_value_fn=compute_true_norm_summed_precision)
precision = Precision(average=False)
recall = Recall(average=False)
sum_precision_recall = (precision + recall).sum()
def compute_sum_precision_recall(y_pred, y):
p = precision_score(y, y_pred, average=None)
r = recall_score(y, y_pred, average=None)
return np.sum(p + r)
_test(sum_precision_recall, "sum precision recall", compute_true_value_fn=compute_sum_precision_recall)
precision = Precision(average=False)
recall = Recall(average=False)
f1 = (precision * recall * 2 / (precision + recall + 1e-20)).mean()
def compute_f1(y_pred, y):
f1 = f1_score(y, y_pred, average="macro")
return f1
_test(f1, "f1", compute_true_value_fn=compute_f1)
def test_indexing_metric():
def _test(ignite_metric, sklearn_metic, sklearn_args, index, num_classes=5):
y_pred = torch.rand(15, 10, num_classes).float()
y = torch.randint(0, num_classes, size=(15, 10)).long()
def update_fn(engine, batch):
y_pred, y = batch
return y_pred, y
metrics = {"metric": ignite_metric[index], "metric_wo_index": ignite_metric}
validator = Engine(update_fn)
for name, metric in metrics.items():
metric.attach(validator, name)
def data(y_pred, y):
for i in range(y_pred.shape[0]):
yield (y_pred[i], y[i])
d = data(y_pred, y)
state = validator.run(d, max_epochs=1, epoch_length=y_pred.shape[0])
sklearn_output = sklearn_metic(
y.view(-1).numpy(), y_pred.view(-1, num_classes).argmax(dim=1).numpy(), **sklearn_args
)
assert (state.metrics["metric_wo_index"][index] == state.metrics["metric"]).all()
assert np.allclose(state.metrics["metric"].numpy(), sklearn_output)
num_classes = 5
labels = list(range(0, num_classes, 2))
_test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels)
labels = list(range(num_classes - 1, 0, -2))
_test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels)
labels = [1]
_test(Precision(), precision_score, {"labels": labels, "average": None}, index=labels)
labels = list(range(0, num_classes, 2))
_test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels)
labels = list(range(num_classes - 1, 0, -2))
_test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels)
labels = [1]
_test(Recall(), recall_score, {"labels": labels, "average": None}, index=labels)
# np.ix_ is used to allow for a 2D slice of a matrix. This is required to get accurate result from
# ConfusionMatrix. ConfusionMatrix must be sliced the same row-wise and column-wise.
labels = list(range(0, num_classes, 2))
_test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels))
labels = list(range(num_classes - 1, 0, -2))
_test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels))
labels = [1]
_test(ConfusionMatrix(num_classes), confusion_matrix, {"labels": labels}, index=np.ix_(labels, labels))
class DummyMetric2(Metric):
@reinit__is_reduced
def reset(self):
pass
def compute(self):
pass
@reinit__is_reduced
def update(self, output):
pass
def _test_invalid_sync_all_reduce(device):
class InvalidMetric(Metric):
@reinit__is_reduced
def reset(self):
self.a = torch.tensor([0.0, 1.0, 2.0, 3.0], requires_grad=False)
self.c = 0.0
self.n = 0
self.m = -1
def compute(self):
pass
def update(self):
pass
@sync_all_reduce("a:sum")
def invalid_reduction_op_1(self):
pass
@sync_all_reduce("c:MaX")
def invalid_reduction_op_2(self):
pass
@sync_all_reduce("n:MINN")
def invalid_reduction_op_3(self):
pass
@sync_all_reduce("m:PROduCT")
def invalid_reduction_op_4(self):
pass
metric_device = device if torch.device(device).type != "xla" else "cpu"
m = InvalidMetric(device=metric_device)
m.reset()
if idist.get_world_size() > 1:
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_1()
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_2()
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_3()
with pytest.raises(ValueError, match=r"Reduction operation is not valid"):
m.invalid_reduction_op_4()
def _test_distrib_sync_all_reduce_decorator(device):
class DummyMetric(Metric):
@reinit__is_reduced
def reset(self):
# SUM op
self.a = torch.tensor([0.0, 1.0, 2.0, 3.0], device=self._device, requires_grad=False)
self.a_nocomp = self.a.clone().to("cpu")
self.b = torch.tensor(1.0, dtype=torch.float64, device=self._device, requires_grad=False)
self.b_nocomp = self.b.clone().to("cpu")
self.c = 0.0
self.c_nocomp = self.c
self.n = 0
self.n_nocomp = self.n
# MAX op
self.m = -1
# MIN op
self.k = 10000
# initialize number of updates to test (MAX, MIN) ops
self.num_updates = 0
# PRODUCT op
self.prod = torch.tensor([2.0, 3.0], device=self._device, requires_grad=False)
self.prod_nocomp = self.prod.clone().to("cpu")
@sync_all_reduce("a", "b", "c", "n:SUM", "m:MAX", "k:MIN", "prod:PRODUCT")
def compute(self):
assert (self.a.cpu() == (self.a_nocomp + 10) * idist.get_world_size()).all()
assert (self.b.cpu() == (self.b_nocomp - 5) * idist.get_world_size()).all()
assert self.c == pytest.approx((self.c_nocomp + 1.23456) * idist.get_world_size())
assert self.n == (self.n_nocomp + 1) * idist.get_world_size()
assert self.m == self.num_updates * (idist.get_world_size() - 1) - 1
assert self.k == 10000 - self.num_updates * (idist.get_world_size() - 1)
temp_prod_nocomp = 5 * self.prod_nocomp # new variable for the recomputing
temp_prod_nocomp = temp_prod_nocomp.pow(idist.get_world_size())
assert (self.prod.cpu() == temp_prod_nocomp).all()
@reinit__is_reduced
def update(self, output):
# SUM op
self.n += 1
self.c += 1.23456
self.a += 10.0
self.b -= 5.0
# MAX op
self.m += idist.get_rank()
# MIN op
self.k -= idist.get_rank()
# numper of updates for (MAX, MIN) ops
self.num_updates += 1
# PRODUCT op
self.prod *= 5
metric_device = device if torch.device(device).type != "xla" else "cpu"
m = DummyMetric(device=metric_device)
m.update(None)
m.compute()
# check if can call compute multiple times without all reduce invocation
m.compute()
def _test_creating_on_xla_fails(device):
with pytest.raises(ValueError, match=r"Cannot create metric on an XLA device. Use device='cpu' instead."):
DummyMetric2(device=device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif(torch.cuda.device_count() < 1, reason="Skip if no GPU")
def test_distrib_nccl_gpu(distributed_context_single_node_nccl):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
def test_distrib_gloo_cpu_or_gpu(distributed_context_single_node_gloo):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
@pytest.mark.distributed
@pytest.mark.skipif(not idist.has_hvd_support, reason="Skip if no Horovod dist support")
@pytest.mark.skipif("WORLD_SIZE" in os.environ, reason="Skip if launched as multiproc")
def test_distrib_hvd(gloo_hvd_executor):
device = "cpu" if not torch.cuda.is_available() else "cuda"
nproc = 4 if not torch.cuda.is_available() else torch.cuda.device_count()
gloo_hvd_executor(_test_distrib_sync_all_reduce_decorator, (device,), np=nproc, do_init=True)
gloo_hvd_executor(_test_invalid_sync_all_reduce, (device,), np=nproc, do_init=True)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_gloo_cpu_or_gpu(distributed_context_multi_node_gloo):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
@pytest.mark.multinode_distributed
@pytest.mark.skipif(not idist.has_native_dist_support, reason="Skip if no native dist support")
@pytest.mark.skipif("GPU_MULTINODE_DISTRIB" not in os.environ, reason="Skip if not multi-node distributed")
def test_multinode_distrib_nccl_gpu(distributed_context_multi_node_nccl):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_invalid_sync_all_reduce(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" in os.environ, reason="Skip if NUM_TPU_WORKERS is in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_single_device_xla():
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_creating_on_xla_fails(device)
_test_invalid_sync_all_reduce(device)
def _test_distrib_xla_nprocs(index):
device = idist.device()
_test_distrib_sync_all_reduce_decorator(device)
_test_creating_on_xla_fails(device)
_test_invalid_sync_all_reduce(device)
@pytest.mark.tpu
@pytest.mark.skipif("NUM_TPU_WORKERS" not in os.environ, reason="Skip if no NUM_TPU_WORKERS in env vars")
@pytest.mark.skipif(not idist.has_xla_support, reason="Skip if no PyTorch XLA package")
def test_distrib_xla_nprocs(xmp_executor):
n = int(os.environ["NUM_TPU_WORKERS"])
xmp_executor(_test_distrib_xla_nprocs, args=(), nprocs=n)
def test_completed():
class DummyMetric(Metric):
def reset(self):
pass
def compute(self):
pass
def update(self, output):
pass
m = DummyMetric()
# tensor
engine = MagicMock(state=State(metrics={}))
m.compute = MagicMock(return_value=torch.tensor(1.0))
m.completed(engine, "metric")
assert engine.state.metrics == {"metric": 1.0}
assert isinstance(engine.state.metrics["metric"], numbers.Number)
# mapping
engine = MagicMock(state=State(metrics={}))
metrics = {"foo": 1, "bar": torch.tensor(2.0), "baz": {"qux": "quux"}}
m.compute = MagicMock(return_value=metrics)
with pytest.raises(ValueError, match=r"Argument name 'foo' is conflicting with mapping keys"):
m.completed(engine, "foo")
m.completed(engine, "metric")
metrics["metric"] = metrics
assert engine.state.metrics == metrics
# other
engine = MagicMock(state=State(metrics={}))
m.compute = MagicMock(return_value="foo")
m.completed(engine, "metric")
assert engine.state.metrics == {"metric": "foo"}
def test_usage_exception():
engine = Engine(lambda e, b: b)
m = DummyMetric2()
with pytest.raises(TypeError, match=r"Unhandled usage type"):
m.attach(engine, "dummy", usage=1)
with pytest.raises(ValueError, match=r"usage should be 'EpochWise.usage_name' or 'BatchWise.usage_name'"):
m.attach(engine, "dummy", usage="fake")
def test_epochwise_usage():
class MyMetric(Metric):
def __init__(self):
super(MyMetric, self).__init__()
self.value = []
def reset(self):
self.value = []
def compute(self):
return self.value
def update(self, output):
self.value.append(output)
def test(usage):
engine = Engine(lambda e, b: b)
m = MyMetric()
m.attach(engine, "ewm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
ewm = engine.state.metrics["ewm"]
assert len(ewm) == 3
assert ewm == [0, 1, 2]
engine.run([0, 1, 2], max_epochs=10)
m.detach(engine, usage=usage)
test("epoch_wise")
test(EpochWise.usage_name)
test(EpochWise())
def test_batchwise_usage():
class MyMetric(Metric):
def __init__(self):
super(MyMetric, self).__init__()
self.value = []
def reset(self):
self.value = []
def compute(self):
return self.value
def update(self, output):
self.value.append(output)
def test(usage):
engine = Engine(lambda e, b: b)
m = MyMetric()
m.attach(engine, "bwm", usage=usage)
@engine.on(Events.ITERATION_COMPLETED)
def _():
bwm = engine.state.metrics["bwm"]
assert len(bwm) == 1
assert bwm[0] == (engine.state.iteration - 1) % 3
engine.run([0, 1, 2], max_epochs=10)
m.detach(engine, usage=usage)
test("batch_wise")
test(BatchWise.usage_name)
test(BatchWise())
def test_batchfiltered_usage():
class MyMetric(Metric):
def __init__(self):
super(MyMetric, self).__init__()
self.value = []
def reset(self):
self.value = []
def compute(self):
return self.value
def update(self, output):
self.value.append(output)
engine = Engine(lambda e, b: b)
m = MyMetric()
usage = BatchFiltered(every=2)
m.attach(engine, "bfm", usage=usage)
@engine.on(Events.EPOCH_COMPLETED)
def _():
bfm = engine.state.metrics["bfm"]
assert len(bfm) == 2
assert bfm[0] == 1
engine.run([0, 1, 2, 3], max_epochs=10)
def test_override_required_output_keys():
# https://discuss.pytorch.org/t/how-access-inputs-in-custom-ignite-metric/91221/5
import torch.nn as nn
from ignite.engine import create_supervised_evaluator
counter = [0]
class CustomMetric(Metric):
required_output_keys = ("y_pred", "y", "x")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def update(self, output):
y_pred, y, x = output
assert y_pred.shape == (4, 3)
assert y.shape == (4,)
assert x.shape == (4, 10)
assert x.equal(data[counter[0]][0])
assert y.equal(data[counter[0]][1])
counter[0] += 1
def reset(self):
pass
def compute(self):
pass
model = nn.Linear(10, 3)
metrics = {"Precision": Precision(), "CustomMetric": CustomMetric()}
evaluator = create_supervised_evaluator(
model, metrics=metrics, output_transform=lambda x, y, y_pred: {"x": x, "y": y, "y_pred": y_pred}
)
data = [
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
(torch.rand(4, 10), torch.randint(0, 3, size=(4,))),
]
evaluator.run(data)
|
import sys
import os
import struct
import io
def requires(moduleName):
"""Marks a function as requiring an optional module dependency."""
def decorate_function(fn):
def wrapper(*args, **kwargs):
if moduleName in sys.modules:
result = fn(*args, **kwargs)
return result
else:
raise ImportError(f'{fn.__name__}: This action requires the {moduleName} module.')
return wrapper
return decorate_function
class IOExtensionMixin():
"""Provides various helper functions to make reading packages from streams easier."""
def is_eof(self):
"""Returns whether the stream is at EOF."""
s = self.read(1)
if (s != b''):
self.seek(-1, os.SEEK_CUR)
return s == b''
def read_string(self):
"""Reads an encoded string with max length 255."""
length = int.from_bytes(self.read(1), byteorder='big')
stringBytes = self.read(length)
return stringBytes.decode('ascii')
def write_string(self, s):
"""Writes an encoded string with max length 255."""
length = len(s)
if length > 255:
raise Exception(f'String exceeds maximum length for packing: {length}')
stringBytes = s.encode('ascii')
self.write(bytes([length]))
self.write(stringBytes)
def read_big_string(self):
"""Reads an encoded string with max length 2^32 - 1."""
length = int.from_bytes(self.read(4), byteorder='big', signed=True)
stringBytes = self.read(length)
return stringBytes.decode('utf-8')
def write_big_string(self, s):
"""Writes an encoded string with max length 2^32 - 1."""
length = len(s)
stringBytes = s.encode('utf-8')
self.write(length.to_bytes(4, 'big', signed=True))
self.write(stringBytes)
def read_int(self, byteorder='big', signed=True):
"""Reads a four-byte integer."""
return int.from_bytes(self.read(4), byteorder=byteorder, signed=signed)
def write_int(self, n, byteorder='big', signed=True):
"""Writes a four-byte integer."""
intBytes = n.to_bytes(4, byteorder, signed=signed)
self.write(intBytes)
def read_single(self):
"""Reads a single-floating-point number."""
singleBytes = self.read(4)
return struct.unpack('>f', singleBytes)[0]
def write_single(self, s):
"""Writes a single-floating-point number."""
singleBytes = struct.pack('>f', s)
self.write(singleBytes)
def read_7bit_encoded_int(self):
"""Reads a 7-bit-encoded number.
7-bit-encoded numbers are encoded with the following algorithm:
- If the number fits in 7 bits (< 128), write this byte and stop
- Otherwise, write the least significant 7 bits of the number,
and set the most significant bit of the byte to 1, then shift
the number to remove those bits and repeat.
The advantage of this encoding is it supports numbers of any size.
"""
result = 0
index = -1
while True:
index += 1
byte_value = ord(self.read(1))
result |= (byte_value & 0x7f) << (7 * index)
if byte_value & 0x80 == 0:
break
return result
def write_7bit_encoded_int(self, n):
"""Writes a 7-bit-encoded number.
See read_7bit_encoded_int for more information on what a 7-bit-encoded numbers.
"""
value = abs(n)
while value >= 0x80:
self.write(bytes([(value | 0x80) & 0xFF]))
value >>= 7
self.write(bytes([value & 0xFF]))
def read_string_7b(self):
"""Reads a string prefixed with length as 7-bit-encoded data.
See read_7bit_encoded_int for more information on what a 7-bit-encoded numbers.
"""
length = self.read_7bit_encoded_int()
stringBytes = self.read(length)
return stringBytes.decode('ascii')
def write_string_7b(self, s):
"""Writes a string prefixed with length as 7-bit-encoded data.
See read_7bit_encoded_int for more information on what a 7-bit-encoded numbers.
"""
length = len(s)
stringBytes = s.encode('ascii')
self.write_7bit_encoded_int(length)
self.write(stringBytes)
class BytesIO(io.BytesIO, IOExtensionMixin):
"""An enhanced version of BytesIO that includes additional functions."""
pass
class FileIO(io.FileIO, IOExtensionMixin):
"""An enhanced version of FileIO that includes additonal functions."""
pass |
<reponame>hassanakbar4/ietfdb
# Copyright The IETF Trust 2016-2019, All Rights Reserved
import sys
import time
from textwrap import dedent
import debug # pyflakes:ignore
from django.conf import settings
from django.core.management.base import BaseCommand
from django.core.exceptions import MultipleObjectsReturned
sys.path.append(settings.MAILMAN_LIB_DIR)
have_mailman = False
try:
from Mailman import Utils
from Mailman import MailList
from Mailman import MemberAdaptor
have_mailman = True
except ImportError:
pass
from ietf.mailinglists.models import List, Subscribed
from ietf.utils.log import log
from ietf.utils.text import decode
mark = time.time()
def import_mailman_listinfo(verbosity=0):
def note(msg):
if verbosity > 2:
sys.stdout.write(msg)
sys.stdout.write('\n')
def log_time(msg):
global mark
if verbosity > 1:
t = time.time()
log(msg+' (%.1fs)'% (t-mark))
mark = t
if not have_mailman:
note("Could not import mailman modules -- skipping import of mailman list info")
return
log("Starting import of list info from Mailman")
names = list(Utils.list_names())
names.sort()
log_time("Fetched list of mailman list names")
addr_max_length = Subscribed._meta.get_field('email').max_length
subscribed = { l.name: set(l.subscribed_set.values_list('email', flat=True)) for l in List.objects.all().prefetch_related('subscribed_set') }
log_time("Computed dictionary of list members")
for name in names:
mlist = MailList.MailList(name, lock=False)
note("List: %s" % mlist.internal_name())
log_time("Fetched Mailman list object for %s" % name)
lists = List.objects.filter(name=mlist.real_name)
if lists.count() > 1:
# Arbitrary choice; we'll update the remaining item next
for item in lists[1:]:
item.delete()
mmlist, created = List.objects.get_or_create(name=mlist.real_name)
dirty = False
desc = decode(mlist.description)[:256]
if mmlist.description != desc:
mmlist.description = desc
dirty = True
if mmlist.advertised != mlist.advertised:
mmlist.advertised = mlist.advertised
dirty = True
if dirty:
mmlist.save()
log_time(" Updated database List object for %s" % name)
# The following calls return lowercased addresses
if mlist.advertised:
members = mlist.getRegularMemberKeys() + mlist.getDigestMemberKeys()
log_time(" Fetched list of list members")
members = set([ m for m in members if mlist.getDeliveryStatus(m) == MemberAdaptor.ENABLED ])
log_time(" Filtered list of list members")
if not mlist.real_name in subscribed:
log("Note: didn't find '%s' in the dictionary of subscriptions" % mlist.real_name)
continue
known = subscribed[mlist.real_name]
log_time(" Fetched known list members from database")
to_remove = known - members
to_add = members - known
for addr in to_remove:
note(" Removing subscription: %s" % (addr))
old = Subscribed.objects.get(email=addr)
log_time(" Fetched subscribed object")
old.lists.remove(mmlist)
log_time(" Removed %s from %s" % (mmlist, old))
if old.lists.count() == 0:
note(" Removing address with no subscriptions: %s" % (addr))
old.delete()
log_time(" Removed %s" % old)
log_time(" Removed addresses no longer subscribed")
if to_remove:
log(" Removed %s addresses from %s" % (len(to_remove), name))
for addr in to_add:
if len(addr) > addr_max_length:
sys.stderr.write(" ** Email address subscribed to '%s' too long for table: <%s>\n" % (name, addr))
continue
note(" Adding subscription: %s" % (addr))
try:
new, created = Subscribed.objects.get_or_create(email=addr)
except MultipleObjectsReturned as e:
sys.stderr.write(" ** Error handling %s in %s: %s\n" % (addr, name, e))
continue
new.lists.add(mmlist)
log_time(" Added new addresses")
if to_add:
log(" Added %s addresses to %s" % (len(to_add), name))
log("Completed import of list info from Mailman")
class Command(BaseCommand):
"""
Import list information from Mailman.
Import announced list names, descriptions, and subscribers, by calling the
appropriate Mailman functions and adding entries to the database.
Run this from cron regularly, with sufficient permissions to access the
mailman database files.
"""
help = dedent(__doc__).strip()
#option_list = BaseCommand.option_list + ( )
def handle(self, *filenames, **options):
"""
* Import announced lists, with appropriate meta-information.
* For each list, import the members.
"""
verbosity = int(options.get('verbosity'))
import_mailman_listinfo(verbosity)
|
<filename>DailyCodingProblem/112_Twitter_Find_Lowest_Common_Ancestor_of_Two_Nodes_In_A_Tree.py
"""
This problem was asked by Twitter.
Given a binary tree, find the lowest common ancestor (LCA) of two given nodes in the tree.
Assume that each node in the tree also has a pointer to its parent.
According to the definition of LCA on Wikipedia(https://en.wikipedia.org/wiki/Lowest_common_ancestor):
“The lowest common ancestor is defined between two nodes v and w as the lowest node in T that
has both v and w as descendants (where we allow a node to be a descendant of itself).”
"""
class Node:
def __init__(self, data, left=None, right=None, parent=None):
self.data = data
self.left = left
self.right = right
self.parent = parent
def add_node(self, node):
if node.data <= self.data:
self.left = node
else:
self.right = node
node.parent = self
def __repr__(self):
return "{}".format(self.data)
def find_lowest_common_ancestor(node_a, node_b):
path_a = []
path_b = []
def helper(node, path= []):
if node.parent:
return helper(node.parent, [node.data]+path)
return [node.data]+path
# calculate paths for both
path_a = helper(node_a)
path_b = helper(node_b)
# common nodes
common_node = [n for n in path_a if n in path_b]
return common_node[-1] # the last common node is the LCA
# simpler method:
# observation the LCA is always between the two values
def find_lowest_common_ancestor_redux(root, node_a, node_b):
if root.data < node_a.data and root.data < node_b.data:
return find_lowest_common_ancestor_redux(root.right, node_a, node_b)
if root.data > node_a.data and root.data > node_b.data:
return find_lowest_common_ancestor_redux(root.left, node_a, node_b)
return root.data
# LCA can also be thought of as finding the merge point of two linked-lists(since parent is provided)
# idea from solution to my HackerRank "Find Merge Points of Two Lists"
def find_lowest_common_ancestor_redux_redux(node_a, node_b):
iter_1 = node_a
iter_2 = node_b
while iter_1 != iter_2:
if iter_1.parent is None:
iter_1 = node_b
else:
iter_1 = iter_1.parent
if iter_2.parent is None:
iter_2 = node_a
else:
iter_2 = iter_2.parent
return iter_2.data
if __name__ == '__main__':
"""
5
/ \
4 9
/ / \
3 6 13
\ / \
8 10 14
/
7
"""
a = Node(5)
b = Node(4)
c = Node(3)
d = Node(9)
e = Node(6)
f = Node(8)
g = Node(7)
h = Node(13)
i = Node(10)
j = Node(14)
a.add_node(b)
b.add_node(c)
a.add_node(d)
d.add_node(e)
e.add_node(f)
f.add_node(g)
d.add_node(h)
h.add_node(i)
h.add_node(j)
print(find_lowest_common_ancestor(b, d)) # 5
print(find_lowest_common_ancestor(a, b)) # 5
print(find_lowest_common_ancestor(c, b)) # 4
print(find_lowest_common_ancestor(g, e)) # 6
print("\n\n")
print(find_lowest_common_ancestor_redux(a,b, d)) # 5
print(find_lowest_common_ancestor_redux(a,a, b)) # 5
print(find_lowest_common_ancestor_redux(a,c, b)) # 4
print(find_lowest_common_ancestor_redux(a,g, e)) # 6
print("\n\n")
print(find_lowest_common_ancestor_redux_redux(b, d)) # 5
print(find_lowest_common_ancestor_redux_redux(a, b)) # 5
print(find_lowest_common_ancestor_redux_redux(c, b)) # 4
print(find_lowest_common_ancestor_redux_redux(g, e)) # 6 |
# -*- coding: utf-8 -*-
# Copyright 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Exposes functionality needed for parsing SMILES strings.
"""
import enum
import logging
import networkx as nx
from .smiles_helper import (add_explicit_hydrogens, remove_explicit_hydrogens,
parse_atom, fill_valence, mark_aromatic_edges,
mark_aromatic_atoms)
LOGGER = logging.getLogger(__name__)
@enum.unique
class TokenType(enum.Enum):
"""Possible SMILES token types"""
ATOM = 1
BOND_TYPE = 2
BRANCH_START = 3
BRANCH_END = 4
RING_NUM = 5
EZSTEREO = 6
def _tokenize(smiles):
"""
Iterates over a SMILES string, yielding tokens.
Parameters
----------
smiles : iterable
The SMILES string to iterate over
Yields
------
tuple(TokenType, str)
A tuple describing the type of token and the associated data
"""
organic_subset = 'B C N O P S F Cl Br I * b c n o s p'.split()
smiles = iter(smiles)
token = ''
peek = None
while True:
char = peek if peek else next(smiles, '')
peek = None
if not char:
break
if char == '[':
token = char
for char in smiles:
token += char
if char == ']':
break
yield TokenType.ATOM, token
elif char in organic_subset:
peek = next(smiles, '')
if char + peek in organic_subset:
yield TokenType.ATOM, char + peek
peek = None
else:
yield TokenType.ATOM, char
elif char in '-=#$:.':
yield TokenType.BOND_TYPE, char
elif char == '(':
yield TokenType.BRANCH_START, '('
elif char == ')':
yield TokenType.BRANCH_END, ')'
elif char == '%':
# If smiles is too short this will raise a ValueError, which is
# (slightly) prettier than a StopIteration.
yield TokenType.RING_NUM, int(next(smiles, '') + next(smiles, ''))
elif char in '/\\':
yield TokenType.EZSTEREO, char
elif char.isdigit():
yield TokenType.RING_NUM, int(char)
def read_smiles(smiles, explicit_hydrogen=False, zero_order_bonds=True,
reinterpret_aromatic=True):
"""
Parses a SMILES string.
Parameters
----------
smiles : iterable
The SMILES string to parse. Should conform to the OpenSMILES
specification.
explicit_hydrogen : bool
Whether hydrogens should be explicit nodes in the outout graph, or be
implicit in 'hcount' attributes.
reinterprit_aromatic : bool
Whether aromaticity should be determined from the created molecule,
instead of taken from the SMILES string.
Returns
-------
nx.Graph
A graph describing a molecule. Nodes will have an 'element', 'aromatic'
and a 'charge', and if `explicit_hydrogen` is False a 'hcount'.
Depending on the input, they will also have 'isotope' and 'class'
information.
Edges will have an 'order'.
"""
bond_to_order = {'-': 1, '=': 2, '#': 3, '$': 4, ':': 1.5, '.': 0}
mol = nx.Graph()
anchor = None
idx = 0
default_bond = 1
next_bond = None
branches = []
ring_nums = {}
for tokentype, token in _tokenize(smiles):
if tokentype == TokenType.ATOM:
mol.add_node(idx, **parse_atom(token))
if anchor is not None:
if next_bond is None:
next_bond = default_bond
if next_bond or zero_order_bonds:
mol.add_edge(anchor, idx, order=next_bond)
next_bond = None
anchor = idx
idx += 1
elif tokentype == TokenType.BRANCH_START:
branches.append(anchor)
elif tokentype == TokenType.BRANCH_END:
anchor = branches.pop()
elif tokentype == TokenType.BOND_TYPE:
if next_bond is not None:
raise ValueError('Previous bond (order {}) not used. '
'Overwritten by "{}"'.format(next_bond, token))
next_bond = bond_to_order[token]
elif tokentype == TokenType.RING_NUM:
if token in ring_nums:
jdx, order = ring_nums[token]
if next_bond is None and order is None:
next_bond = default_bond
elif order is None: # Note that the check is needed,
next_bond = next_bond # But this could be pass.
elif next_bond is None:
next_bond = order
elif next_bond != order: # Both are not None
raise ValueError('Conflicting bond orders for ring '
'between indices {}'.format(token))
# idx is the index of the *next* atom we're adding. So: -1.
if mol.has_edge(idx-1, jdx):
raise ValueError('Edge specified by marker {} already '
'exists'.format(token))
if idx-1 == jdx:
raise ValueError('Marker {} specifies a bond between an '
'atom and itself'.format(token))
if next_bond or zero_order_bonds:
mol.add_edge(idx - 1, jdx, order=next_bond)
next_bond = None
del ring_nums[token]
else:
if idx == 0:
raise ValueError("Can't have a marker ({}) before an atom"
"".format(token))
# idx is the index of the *next* atom we're adding. So: -1.
ring_nums[token] = (idx - 1, next_bond)
next_bond = None
elif tokentype == TokenType.EZSTEREO:
LOGGER.warning('E/Z stereochemical information, which is specified by "%s", will be discarded', token)
if ring_nums:
raise KeyError('Unmatched ring indices {}'.format(list(ring_nums.keys())))
# Time to deal with aromaticity. This is a mess, because it's not super
# clear what aromaticity information has been provided, and what should be
# inferred. In addition, to what extend do we want to provide a "sane"
# molecule, even if this overrides what the SMILES string specifies?
cycles = nx.cycle_basis(mol)
ring_idxs = set()
for cycle in cycles:
ring_idxs.update(cycle)
non_ring_idxs = set(mol.nodes) - ring_idxs
for n_idx in non_ring_idxs:
if mol.nodes[n_idx].get('aromatic', False):
raise ValueError("You specified an aromatic atom outside of a"
" ring. This is impossible")
mark_aromatic_edges(mol)
fill_valence(mol)
if reinterpret_aromatic:
mark_aromatic_atoms(mol)
mark_aromatic_edges(mol)
for idx, jdx in mol.edges:
if ((not mol.nodes[idx].get('aromatic', False) or
not mol.nodes[jdx].get('aromatic', False))
and mol.edges[idx, jdx].get('order', 1) == 1.5):
mol.edges[idx, jdx]['order'] = 1
if explicit_hydrogen:
add_explicit_hydrogens(mol)
else:
remove_explicit_hydrogens(mol)
return mol
|
<reponame>tacaswell/pyFAI<filename>pyFAI/benchmark/__init__.py
#!/usr/bin/env python
# coding: utf-8
#
# Copyright (C) 2016-2018 European Synchrotron Radiation Facility, Grenoble, France
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
"Benchmark for Azimuthal integration of PyFAI"
__author__ = "<NAME>"
__date__ = "13/01/2022"
__license__ = "MIT"
__copyright__ = "2012-2017 European Synchrotron Radiation Facility, Grenoble, France"
from collections import OrderedDict
import json
import sys
import time
import timeit
import os
import platform
import subprocess
import fabio
import os.path as op
from math import ceil
# To use use the locally build version of PyFAI, use ../bootstrap.py
from .. import load
from ..azimuthalIntegrator import AzimuthalIntegrator
from ..method_registry import IntegrationMethod, Method
from ..utils import mathutil
from ..test.utilstest import UtilsTest
from ..opencl import pyopencl, ocl
try:
from ..gui.matplotlib import pyplot, pylab
from ..gui.utils import update_fig as _update_fig
def update_fig(*args, **kwargs):
pyplot.pause(0.1)
_update_fig(*args, **kwargs)
except ImportError:
pylab = None
def update_fig(*args, **kwargs):
pass
ds_list = ["Pilatus1M.poni",
"Pilatus2M.poni",
"Eiger4M.poni",
"Pilatus6M.poni",
"Eiger9M.poni",
"Mar3450.poni",
"Fairchild.poni"]
datasets = {"Fairchild.poni": "Fairchild.edf",
"halfccd.poni": "halfccd.edf",
"Frelon2k.poni": "Frelon2k.edf",
"Pilatus6M.poni": "Pilatus6M.cbf",
"Pilatus1M.poni": "Pilatus1M.edf",
"Mar3450.poni": "LaB6_260210.mar3450",
"Pilatus2M.poni":"Pilatus2M.cbf",
"Eiger4M.poni":"Eiger4M.edf",
"Eiger9M.poni":"Eiger9M.h5"
}
PONIS = { i: UtilsTest.getimage(i) for i in ds_list}
# Handle to the Bench instance: allows debugging from outside if needed
bench = None
class BenchTest(object):
"""Generic class for benchmarking with `timeit.Timer`"""
def setup(self):
"""Setup.
The method do not have arguments. Everything must be set before, from
the constructor for example.
"""
pass
def stmt(self):
"""Statement.
The method do not have arguments. Everything must be set before, from
the constructor, loaded from the `setup` to a class attribute.
"""
pass
def setup_and_stmt(self):
"""Execute the setup then the statement."""
self.setup()
return self.stmt()
def clean(self):
"""Clean up stored data"""
pass
def get_device(self):
res = None
if "ai" in dir(self):
if "engines" in dir(self.ai):
from ..method_registry import Method
for method in self.ai.engines:
if isinstance(method, Method) and method.impl == "opencl":
res = self.ai.engines[method].engine.ctx.devices[0]
break
else:
if ("ocl_csr_integr" in self.ai.engines):
res = self.ai.engines["ocl_csr_integr"].engine.ctx.devices[0]
return res
class BenchTest1D(BenchTest):
"""Test 1d integration"""
def __init__(self, poni, file_name, unit, method, function=None,
error_model=None):
BenchTest.__init__(self)
self.poni = poni
self.file_name = file_name
self.unit = unit
self.method = method
self.compute_engine = None
self.function_name = function or "integrate1d"
self.error_model = error_model
self.function = None
def setup(self):
self.ai = AzimuthalIntegrator.sload(self.poni)
self.data = fabio.open(self.file_name).data
self.N = min(self.data.shape)
self.function = self.ai.__getattribute__(self.function_name)
def stmt(self):
return self.function(self.data, self.N, safe=False, unit=self.unit,
method=self.method, error_model=self.error_model)
def clean(self):
self.ai = None
self.data = None
class BenchTest2D(BenchTest):
"""Test 2d integration"""
def __init__(self, poni, file_name, unit, method, output_size):
BenchTest.__init__(self)
self.poni = poni
self.file_name = file_name
self.unit = unit
self.method = method
self.output_size = output_size
def setup(self):
self.ai = AzimuthalIntegrator.sload(self.poni)
self.data = fabio.open(self.file_name).data
self.N = self.output_size
def stmt(self):
return self.ai.integrate2d(self.data, self.output_size[0], self.output_size[1], unit=self.unit, method=self.method)
def clean(self):
self.ai = None
self.data = None
class BenchTestGpu(BenchTest):
"""Test XRPD in OpenCL"""
def __init__(self, azimuthal_params, file_name, devicetype, useFp64, platformid, deviceid):
BenchTest.__init__(self)
self.azimuthal_params = azimuthal_params
self.file_name = file_name
self.devicetype = devicetype
self.useFp64 = useFp64
self.platformid = platformid
self.deviceid = deviceid
def setup(self):
self.ai = load(self.azimuthal_params)
self.data = fabio.open(self.file_name).data
self.N = min(self.data.shape)
self.ai.xrpd_OpenCL(self.data, self.N, devicetype=self.devicetype, useFp64=self.useFp64, platformid=self.platformid, deviceid=self.deviceid)
def stmt(self):
return self.ai.xrpd_OpenCL(self.data, self.N, safe=False)
def clean(self):
self.ai = None
self.data = None
class Bench(object):
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
LABELS = {("bbox", "histogram", "cython"): "CPU_serial",
("bbox", "lut", "cython"): "CPU_LUT_OpenMP",
("bbox", "lut", "opencl"): "LUT",
("bbox", "csr", "cython"): "CPU_CSR_OpenMP",
("bbox", "csr", "opencl"): "CSR",
}
def __init__(self, nbr=10, repeat=1, memprofile=False, unit="2th_deg", max_size=None):
self.reference_1d = {}
self.LIMIT = 8
self.repeat = repeat
self.nbr = nbr
self.results = OrderedDict()
self.meth = []
self._cpu = None
self.fig = None
self.ax = None
self.starttime = time.perf_counter()
self.plot = None
self.plot_x = []
self.plot_y = []
self.do_memprofile = memprofile
self.fig_mp = None
self.ax_mp = None
self.plot_mp = None
self.memory_profile = ([], [])
self.unit = unit
self.out_2d = (500, 360)
self.max_size = max_size or sys.maxunicode
def get_cpu(self):
if self._cpu is None:
if os.name == "nt":
self._cpu = platform.processor()
elif os.path.exists("/proc/cpuinfo"):
cpuinfo = [i.split(": ", 1)[1] for i in open("/proc/cpuinfo") if i.startswith("model name")]
if not cpuinfo:
cpuinfo = [i.split(": ", 1)[1] for i in open("/proc/cpuinfo") if i.startswith("cpu")]
self._cpu = cpuinfo[0].strip()
elif os.path.exists("/usr/sbin/sysctl"):
proc = subprocess.Popen(["sysctl", "-n", "machdep.cpu.brand_string"], stdout=subprocess.PIPE)
proc.wait()
self._cpu = proc.stdout.read().strip().decode("ASCII")
old = self._cpu
self._cpu = old.replace(" ", " ")
while old != self._cpu:
old = self._cpu
self._cpu = old.replace(" ", " ")
return self._cpu
def get_gpu(self, devicetype="gpu", useFp64=False, platformid=None, deviceid=None):
if ocl is None:
return "NoGPU"
try:
ctx = ocl.create_context(devicetype, useFp64, platformid, deviceid)
except Exception:
return "NoGPU"
else:
return ctx.devices[0].name
def get_mem(self):
"""
Returns the occupied memory for memory-leak hunting in MByte
"""
pid = os.getpid()
if os.path.exists("/proc/%i/status" % pid):
for l in open("/proc/%i/status" % pid):
if l.startswith("VmRSS"):
mem = int(l.split(":", 1)[1].split()[0]) / 1024.
else:
mem = 0
return mem
def print_init(self, t):
print(" * Initialization time: %.1f ms" % (1000.0 * t))
self.update_mp()
def print_init2(self, tinit, trep, loops):
print(" * Initialization time: %.1f ms, Repetition time: %.1f ms, executing %i loops" %
(1000.0 * tinit, 1000.0 * trep, loops))
self.update_mp()
def print_exec(self, t):
print(" * Execution time rep : %.1f ms" % (1000.0 * t))
self.update_mp()
def print_sep(self):
print("*" * 80)
self.update_mp()
def get_ref(self, param):
if param not in self.reference_1d:
file_name = UtilsTest.getimage(datasets[param])
poni = PONIS[param]
bench_test = BenchTest1D(poni, file_name, self.unit, ("bbox", "histogram", "cython"), function="integrate1d_ng")
bench_test.setup()
res = bench_test.stmt()
bench_test.compute_engine = res.compute_engine
self.reference_1d[param] = res
bench_test.clean()
return self.reference_1d[param]
def bench_1d(self, method="splitBBox", check=False, opencl=None, function="integrate1d"):
"""
:param method: method to be bechmarked
:param check: check results vs ref if method is LUT based
:param opencl: dict containing platformid, deviceid and devicetype
"""
method = IntegrationMethod.select_one_available(method, dim=1, default=None, degradable=True)
self.update_mp()
if opencl:
if (ocl is None):
print("No pyopencl")
return
if (opencl.get("platformid") is None) or (opencl.get("deviceid") is None):
platdev = ocl.select_device(opencl.get("devicetype"))
if not platdev:
print("No such OpenCL device: skipping benchmark")
return
platformid, deviceid = opencl["platformid"], opencl["deviceid"] = platdev
else:
platformid, deviceid = opencl["platformid"], opencl["deviceid"]
devicetype = opencl["devicetype"] = ocl.platforms[platformid].devices[deviceid].type
platform = str(ocl.platforms[platformid]).split()[0]
if devicetype == "CPU":
cpu_name = (str(ocl.platforms[platformid].devices[deviceid]).split("@")[0]).split()
device = ""
while cpu_name and len(device) < 5:
device = cpu_name.pop() + "" + device
else:
device = ' '.join(str(ocl.platforms[platformid].devices[deviceid]).split())
print("Working on device: %s platform: %s device: %s" % (devicetype, platform, device))
label = ("%s %s %s %s %s" % (function, devicetype, self.LABELS[method.method[1:4]], platform, device)).replace(" ", "_")
method = IntegrationMethod.select_method(dim=1, split=method.split_lower,
algo=method.algo_lower, impl=method.impl_lower,
target=(opencl["platformid"], opencl["deviceid"]))[0]
print(f"function: {function} \t method: {method}")
memory_error = (pyopencl.MemoryError, MemoryError, pyopencl.RuntimeError, RuntimeError)
else:
print("Working on processor: %s" % self.get_cpu())
label = function + " " + self.LABELS[method.method[1:4]]
memory_error = (MemoryError, RuntimeError)
results = OrderedDict()
first = True
for param in ds_list:
self.update_mp()
file_name = UtilsTest.getimage(datasets[param])
poni = PONIS[param]
bench_test = BenchTest1D(poni, file_name, self.unit, method, function=function)
bench_test.setup()
size = bench_test.data.size / 1.0e6
if size > self.max_size:
continue
print("1D integration of %s %.1f Mpixel -> %i bins" % (op.basename(file_name), size, bench_test.N))
try:
t0 = time.perf_counter()
res = bench_test.stmt()
t1 = time.perf_counter()
res2 = bench_test.stmt()
t2 = time.perf_counter()
loops = int(ceil(self.nbr / (t2 - t1)))
self.print_init2(t1 - t0, t2 - t1, loops)
except memory_error as error:
print("MemoryError: %s" % error)
break
if first:
actual_device = bench_test.get_device()
if actual_device:
print("Actual device used: %s" % actual_device)
self.update_mp()
if method.algo_lower in ("lut", "csr"):
key = Method(1, bench_test.method.split_lower, method.algo_lower, "cython", None)
if key and key in bench_test.ai.engines:
engine = bench_test.ai.engines.get(key)
if engine:
integrator = engine.engine
if method.algo_lower == "lut":
print("lut: shape= %s \t nbytes %.3f MB " % (integrator.lut.shape, integrator.lut_nbytes / 2 ** 20))
else:
print("csr: size= %s \t nbytes %.3f MB " % (integrator.data.size, integrator.lut_nbytes / 2 ** 20))
bench_test.clean()
self.update_mp()
try:
t = timeit.Timer(bench_test.stmt, bench_test.setup_and_stmt)
tmin = min([i / loops for i in t.repeat(repeat=self.repeat, number=loops)])
except memory_error as error:
print(error)
break
self.update_mp()
self.print_exec(tmin)
tmin *= 1000.0
if check:
ref = self.get_ref(param)
R = mathutil.rwp(res, ref)
print("%sResults are bad with R=%.3f%s" % (self.WARNING, R, self.ENDC) if R > self.LIMIT else"%sResults are good with R=%.3f%s" % (self.OKGREEN, R, self.ENDC))
self.update_mp()
if R < self.LIMIT:
results[size] = tmin
self.update_mp()
if first:
if opencl:
self.new_curve(results, label, style="--", marker="s" if "legacy" in function else "o")
else:
self.new_curve(results, label, style="-", marker="s" if "legacy" in function else "o")
first = False
else:
self.new_point(size, tmin)
else:
results[size] = tmin
if first:
self.new_curve(results, label, marker="s" if "legacy" in function else "o")
first = False
else:
self.new_point(size, tmin)
self.print_sep()
self.meth.append(label)
self.results[label] = results
self.update_mp()
def bench_2d(self, method="splitBBox", check=False, opencl=None):
self.update_mp()
if opencl:
if (ocl is None):
print("No pyopencl")
return
if (opencl.get("platformid") is None) or (opencl.get("deviceid") is None):
platdev = ocl.select_device(opencl.get("devicetype"))
if not platdev:
print("No such OpenCL device: skipping benchmark")
return
platformid, deviceid = opencl["platformid"], opencl["deviceid"] = platdev
devicetype = opencl["devicetype"] = ocl.platforms[platformid].devices[deviceid].type
platform = str(ocl.platforms[platformid]).split()[0]
if devicetype == "CPU":
device = (str(ocl.platforms[platformid].devices[deviceid]).split("@")[0]).split()[-1]
else:
device = ' '.join(str(ocl.platforms[platformid].devices[deviceid]).split())
print("Working on device: %s platform: %s device: %s" % (devicetype, platform, device))
method += "_%i,%i" % (opencl["platformid"], opencl["deviceid"])
label = ("2D %s %s %s %s" % (devicetype, self.LABELS[method[1:4]], platform, device)).replace(" ", "_")
memory_error = (pyopencl.MemoryError, MemoryError, pyopencl.RuntimeError, RuntimeError)
else:
print("Working on processor: %s" % self.get_cpu())
label = "2D_" + self.LABELS[method[1:4]]
memory_error = (MemoryError, RuntimeError)
results = OrderedDict()
first = True
for param in ds_list:
self.update_mp()
file_name = UtilsTest.getimage(datasets[param])
poni = PONIS[param]
bench_test = BenchTest2D(poni, file_name, self.unit, method, self.out_2d)
bench_test.setup()
size = bench_test.data.size / 1.0e6
print("2D integration of %s %.1f Mpixel -> %s bins" % (op.basename(file_name), size, bench_test.N))
try:
t0 = time.perf_counter()
_res = bench_test.stmt()
self.print_init(time.perf_counter() - t0)
except memory_error as error:
print(error)
break
self.update_mp()
if check:
module = sys.modules.get(AzimuthalIntegrator.__module__)
if module:
if "lut" in method:
key = module.EXT_LUT_ENGINE
elif "csr" in method:
key = module.EXT_CSR_ENGINE
else:
key = None
if key and module:
try:
integrator = bench_test.ai.engines.get(key).engine
except MemoryError as error:
print(error)
else:
if "lut" in method:
print("lut: shape= %s \t nbytes %.3f MB " % (integrator.lut.shape, integrator.lut_nbytes / 2 ** 20))
else:
print("csr: size= %s \t nbytes %.3f MB " % (integrator.data.size, integrator.lut_nbytes / 2 ** 20))
bench_test.ai.reset()
bench_test.clean()
try:
t = timeit.Timer(bench_test.stmt, bench_test.setup_and_stmt)
tmin = min([i / self.nbr for i in t.repeat(repeat=self.repeat, number=self.nbr)])
except memory_error as error:
print(error)
break
self.update_mp()
del t
self.update_mp()
self.print_exec(tmin)
tmin *= 1000.0
results[size] = tmin
if first:
self.new_curve(results, label, marker="o")
first = False
else:
self.new_point(size, tmin)
self.update_mp()
self.print_sep()
self.meth.append(label)
self.results[label] = results
self.update_mp()
def bench_gpu1d(self, devicetype="gpu", useFp64=True, platformid=None, deviceid=None):
self.update_mp()
print("Working on %s, in " % devicetype + ("64 bits mode" if useFp64 else"32 bits mode") + "(%s.%s)" % (platformid, deviceid))
if ocl is None or not ocl.select_device(devicetype):
print("No pyopencl or no such device: skipping benchmark")
return
results = OrderedDict()
label = "Forward_OpenCL_%s_%s_bits" % (devicetype, ("64" if useFp64 else"32"))
first = True
for param in ds_list:
self.update_mp()
file_name = UtilsTest.getimage(datasets[param])
ai = load(param)
data = fabio.open(file_name).data
size = data.size
N = min(data.shape)
print("1D integration of %s %.1f Mpixel -> %i bins (%s)" % (op.basename(file_name), size / 1e6, N, ("64 bits mode" if useFp64 else"32 bits mode")))
try:
t0 = time.perf_counter()
res = ai.xrpd_OpenCL(data, N, devicetype=devicetype, useFp64=useFp64, platformid=platformid, deviceid=deviceid)
t1 = time.perf_counter()
except Exception as error:
print("Failed to find an OpenCL GPU (useFp64:%s) %s" % (useFp64, error))
continue
self.print_init(t1 - t0)
self.update_mp()
ref = ai.xrpd(data, N)
R = mathutil.rwp(res, ref)
print("%sResults are bad with R=%.3f%s" % (self.WARNING, R, self.ENDC) if R > self.LIMIT else"%sResults are good with R=%.3f%s" % (self.OKGREEN, R, self.ENDC))
test = BenchTestGpu(param, file_name, devicetype, useFp64, platformid, deviceid)
t = timeit.Timer(test.stmt, test.setup)
tmin = min([i / self.nbr for i in t.repeat(repeat=self.repeat, number=self.nbr)])
del t
self.update_mp()
self.print_exec(tmin)
print("")
if R < self.LIMIT:
size /= 1e6
tmin *= 1000.0
results[size] = tmin
if first:
self.new_curve(results, label, marker="o")
first = False
else:
self.new_point(size, tmin)
self.update_mp()
self.print_sep()
self.meth.append(label)
self.results[label] = results
self.update_mp()
def save(self, filename=None):
if filename is None:
filename = f"benchmark{time.strftime('%Y%m%d-%H%M%S')}.json"
self.update_mp()
json.dump(self.results, open(filename, "w"), indent=4)
if self.fig is not None:
self.fig.savefig(filename[:-4] + "svg")
def print_res(self):
self.update_mp()
print("Summary: execution time in milliseconds")
print("Size/Meth\t" + "\t".join(self.meth))
for i in self.size:
print("%7.2f\t\t" % i + "\t\t".join("%.2f" % (self.results[j].get(i, 0)) for j in self.meth))
def init_curve(self):
self.update_mp()
if self.fig:
print("Already initialized")
return
if pylab and (sys.platform in ["win32", "darwin"]) or ("DISPLAY" in os.environ):
self.fig, self.ax = pyplot.subplots()
self.fig.show()
self.ax.set_autoscale_on(False)
self.ax.set_xlabel("Image size in mega-pixels")
self.ax.set_ylabel("Frame per second (log scale)")
try:
self.ax.set_yscale("log", base=2)
except Exception:
self.ax.set_yscale("log", basey=2)
t = [0.5, 1, 2, 5, 10, 20, 50, 100, 200, 500, 1000]
self.ax.set_yticks([float(i) for i in t])
self.ax.set_yticklabels([str(i)for i in t])
self.ax.set_xlim(0.5, 17)
self.ax.set_ylim(0.5, 1500)
self.ax.set_title(self.get_cpu() + " / " + self.get_gpu())
update_fig(self.fig)
def new_curve(self, results, label, style="-", marker="x"):
"""
Create a new curve within the current graph
:param results: dict with execution time in function of size
:param label: string with the title of the curve
:param style: the style of the line: "-" for plain line, "--" for dashed
"""
self.update_mp()
if not self.fig:
return
self.plot_x = list(results.keys())
self.plot_x.sort()
self.plot_y = [1000.0 / results[i] for i in self.plot_x]
self.plot = self.ax.plot(self.plot_x, self.plot_y, marker + style, label=label)[0]
self.ax.legend()
update_fig(self.fig)
def new_point(self, size, exec_time):
"""
Add new point to current curve
:param size: of the system
:param exec_time: execution time in ms
"""
self.update_mp()
if not self.plot:
return
self.plot_x.append(size)
self.plot_y.append(1000.0 / exec_time)
self.plot.set_data(self.plot_x, self.plot_y)
update_fig(self.fig)
def display_all(self):
if not self.fig:
return
for k in self.meth:
self.new_curve(self.results[k], k)
self.ax.legend()
self.fig.savefig("benchmark.png")
self.fig.show()
# plt.ion()
def update_mp(self):
"""
Update memory profile curve
"""
if not self.do_memprofile:
return
self.memory_profile[0].append(time.perf_counter() - self.starttime)
self.memory_profile[1].append(self.get_mem())
if pylab:
if self.fig_mp is None:
self.fig_mp, self.ax_mp = pyplot.subplots()
self.ax_mp.set_autoscale_on(False)
self.ax_mp.set_xlabel("Run time (s)")
self.ax_mp.set_xlim(0, 100)
self.ax_mp.set_ylim(0, 2 ** 10)
self.ax_mp.set_ylabel("Memory occupancy (MB)")
self.ax_mp.set_title("Memory leak hunter")
self.plot_mp = self.ax_mp.plot(*self.memory_profile)[0]
self.fig_mp.show()
else:
self.plot_mp.set_data(*self.memory_profile)
tmax = self.memory_profile[0][-1]
mmax = max(self.memory_profile[1])
if tmax > self.ax_mp.get_xlim()[-1]:
self.ax_mp.set_xlim(0, tmax)
if mmax > self.ax_mp.get_ylim()[-1]:
self.ax_mp.set_ylim(0, mmax)
if self.fig_mp.canvas:
update_fig(self.fig_mp)
def get_size(self):
if len(self.meth) == 0:
return []
size = list(self.results[self.meth[0]].keys())
for i in self.meth[1:]:
s = list(self.results[i].keys())
if len(s) > len(size):
size = s
size.sort()
return size
size = property(get_size)
def run_benchmark(number=10, repeat=1, memprof=False, max_size=1000,
do_1d=True, do_2d=False, devices="all"):
"""Run the integrated benchmark using the most common algorithms (method parameter)
:param number: Measure timimg over number of executions or average over this time
:param repeat: number of measurement, takes the best of them
:param memprof: set to True to enable memory profiling to hunt memory leaks
:param max_size: maximum image size in megapixel, set it to 2 to speed-up the tests.
:param do_1d: perfrom benchmarking using integrate1d
:param do_2d: perfrom benchmarking using integrate2d
:devices: "all", "cpu", "gpu" or "acc" or a list of devices [(proc_id, dev_id)]
"""
print("Averaging over %i repetitions (best of %s)." % (number, repeat))
bench = Bench(number, repeat, memprof, max_size=max_size)
bench.init_curve()
ocl_devices = []
if ocl:
if devices and isinstance(devices, (tuple, list)) and len(devices[0]) == 2:
ocl_devices = devices
else:
ocl_devices = []
for i in ocl.platforms:
if devices == "all":
ocl_devices += [(i.id, j.id) for j in i.devices]
else:
if "cpu" in devices:
ocl_devices += [(i.id, j.id) for j in i.devices if j.type == "CPU"]
if "gpu" in devices:
ocl_devices += [(i.id, j.id) for j in i.devices if j.type == "GPU"]
if "acc" in devices:
ocl_devices += [(i.id, j.id) for j in i.devices if j.type == "ACC"]
print("Devices:", ocl_devices)
if do_1d:
bench.bench_1d("splitBBox", True, function="integrate1d_legacy")
bench.bench_1d("splitBBox", True, function="integrate1d_ng")
# bench.bench_1d("lut", True)
bench.bench_1d("csr", True, function="integrate1d_legacy")
bench.bench_1d("csr", True, function="integrate1d_ng")
for device in ocl_devices:
print("Working on device: " + str(device))
# bench.bench_1d("lut_ocl", True, {"platformid": device[0], "deviceid": device[1]})
bench.bench_1d("csr_ocl", True, {"platformid": device[0], "deviceid": device[1]}, function="integrate1d_legacy")
bench.bench_1d("csr_ocl", True, {"platformid": device[0], "deviceid": device[1]}, function="integrate1d_ng")
if do_2d:
bench.bench_2d("splitBBox")
bench.bench_2d("lut", True)
for device in ocl_devices:
# bench.bench_1d("lut_ocl", True, {"platformid": device[0], "deviceid": device[1]})
bench.bench_1d("csr_ocl", True, {"platformid": device[0], "deviceid": device[1]})
bench.save()
bench.print_res()
bench.update_mp()
return bench.results
run = run_benchmark
|
<filename>gwas/src/spark.py
import sklearn as sk
from sklearn import decomposition
import numpy as np
import matplotlib as mpl
import matplotlib.pyplot as plt
import pandas as pd
import gzip as gz
import scipy
from scipy import stats
import math
import random
from scipy.optimize import minimize
from scipy.special import factorial
import scipy.stats as ss
import scipy.optimize as so
import csv
from itertools import groupby, cycle
from operator import itemgetter
class AncestryPCA:
def __init__(self, referenceVCF, referencePanel, VCF):
self.referenceVCF = referenceVCF
self.referencePanel = referencePanel
self.VCF = VCF
self.Colors = {'SAS':"green" , 'EAS':"blue", 'AMR':"orange", 'AFR':"purple", 'EUR':"red", 'AJ':"pink", 'DOMI':"yellow",'CASE':"grey"}
def LoadPanel(self):
df = pd.read_csv(self.referencePanel, delimiter="\t")
Populations = df["super_pop"].unique()
self.Pop = {}
for pop in Populations:
self.Pop[pop] = df[df["super_pop"]==pop]["sample"].values
def LoadVCF(self, vcf_file):
hand = gz.open(vcf_file, 'rt')
d = {}
for l in hand:
if l.startswith("##"):
continue
elif l.startswith("#"):
indvs = l.strip().split("\t")[9:]
for indv in indvs:
d[indv] = []
else:
if l.strip().split("\t")[0] in ["Y", "chrY", "chrX", "X"]:
continue
GTs = [self.convertGT(GT) for GT in l.strip().split("\t")[9:]]
for indv, GT in zip(indvs, GTs):
d[indv].append(GT)
df = pd.DataFrame(data=d)
return df.transpose()
def convertGT(self, GT):
GT = GT.split(":")[0]
if "." in GT:
return -9
elif "/" in GT:
GT = map(int, GT.split("/"))
return sum(GT)
elif "|" in GT:
GT = map(int, GT.split("|"))
return sum(GT)
else:
return -9
def pca(self):
self.LoadPanel()
df = self.LoadVCF(self.referenceVCF)
a, b = df.shape
print ("Reference Panel has {} individuals {} SNPs".format(a,b))
self.model = decomposition.PCA()
self.model.fit(df)
def simGT(p, N):
exp = 0
for i in range(N):
if random.uniform(0,1) <= p:
exp += 1
return exp
def onerun(NumofTotalHap, NumofRareHap, NumofChildsDict):
dat = np.concatenate( (np.zeros(NumofTotalHap-NumofRareHap), np.ones(NumofRareHap) ), axis=0)
np.random.shuffle(dat)
NumofChilds = sorted(NumofChildsDict.items(), key=lambda x: x[0])
exp = 0
start = 0
for i, count in enumerate(NumofChilds):
for j in range(start, start + count[1]*4, 4):
f1, f2, m1, m2 = dat[j: j+4]
if f1+f2 == 1 and m1 + m2 == 1:
exp += simGT(0.25, count[0])#0.25 * count[0]
elif (f1+f2 == 1 and m1 + m2 == 2) or (f1+f2 == 2 and m1 + m2 == 1):
exp += simGT(0.5, count[0])
elif f1+f2 == 2 and m1 + m2 == 2:
exp += simGT(1, count[0])
start = j
return exp
def onerun2(NumofTotalHap, NumofRareHap, NumofChildsDict, frac=0.1):
dat = np.concatenate( (np.zeros(NumofTotalHap-NumofRareHap), np.ones(NumofRareHap) ), axis=0)
np.random.shuffle(dat)
NumofChilds = sorted(NumofChildsDict.items(), key=lambda x: x[0])
exp = 0
start = 0
for i, count in enumerate(NumofChilds):
for j in range(start, start + count[1]*4, 4):
f1, f2, m1, m2 = dat[j: j+4]
if f1+f2 == 1 and m1 + m2 == 1:
exp += simGT(0.25, count[0])#0.25 * count[0]
elif (f1+f2 == 1 and m1 + m2 == 2) or (f1+f2 == 2 and m1 + m2 == 1):
exp += simGT(0.5, count[0])
elif f1+f2 == 2 and m1 + m2 == 2:
exp += simGT(1, count[0])
start = j
return exp
def Permutation(NumofTotalHap, NumofRareHap, NumofChildsDict, Nperm=20000):
EXP = []
for i in range(Nperm):
exp = onerun(NumofTotalHap, NumofRareHap, NumofChildsDict)
EXP.append(exp)
EXP = np.array(EXP)
return EXP
def PlotNFit(dat, af, mu = None, fit=False):
bins=np.arange(min(dat), max(dat)+1)
counts, bins = np.histogram(dat,bins=bins,density=1)
count = dict(zip(bins,counts))
x = np.arange(min(dat), max(dat))
y = np.array([count[i] for i in x])
plt.plot(x, y, 'bo', ms=8, label='permute pmf', color="red")
plt.vlines(x, 0, y, colors='b', lw=5, alpha=0.5, color="red")
if fit:
mu = max(0, FitPoisson(dat))
else:
mu = np.mean(dat)
x = np.arange(max(0, stats.poisson.ppf(0.0, mu)), max(1, stats.poisson.ppf(0.9999, mu)))
plt.plot(x, stats.poisson.pmf(x, mu), 'bo', ms=8, label='poisson pmf')
plt.vlines(x, 0, stats.poisson.pmf(x, mu), colors='b', lw=5, alpha=0.5)
mean, var = np.mean(dat), np.var(dat)
plt.title("AF={}, mean:{}, var:{}, lambda={}".format(af, round(mean,3), round(var,3), round(float(mu),3)))
plt.legend(loc='best', frameon=False)
plt.show()
if fit:
return mu
else:
return None
def NegBinom_Likelihood(P, x, neg=1):
n=np.round(P[0]) #by definition, it should be an integer
p=P[1]
loc=np.round(P[2])
return neg*(np.log(ss.nbinom.pmf(x, n, p, loc))).sum()
def FitNegBinom(dat):
result=[]
for i in range(20, 160): #in fact (80, 120) should probably be enough
_=so.fmin(NegBinom_Likelihood, [i, 0.5, 0], args=(dat,-1), full_output=True, disp=False)
result.append((_[1], _[0]))
P2=sorted(result, key=lambda x: x[0])[0][1]
return np.round(P2[0]), P2[1]
def plotscatter(E, O, title, xlim=None, ylim=None):
#fig = plt.figure(figsize=(5,5), dpi=200)
#fig.clear()
plt.figure(dpi=120)
plt.scatter(E, O, s=5)
xlim = max(E) if xlim == None else xlim
ylim = max(O) if ylim == None else ylim
_max = max([xlim, ylim])
plt.text(xlim*0.8, ylim*0.8, "Ratio:{0:.2f}".format(sum(O)/sum(E)))
plt.plot((0, _max), (0, _max), color="black")
plt.xlabel("Expected")
plt.ylabel("Observed")
plt.title(title)
plt.xlim((0, xlim))
plt.ylim((0, ylim))
plt.show()
plt.savefig("../Slides/{}.png".format("".join(title.split())), dpi=200, format="png")
plt.clf()
def QQplot(pvalues, title="QQ plot"):
pvalues.sort(reverse=True)
Qvalues = []
for x in pvalues:
try:
Qvalues.append(min(10, -math.log(x,10)))
except:
print(x)
top = int(Qvalues[-1]) + 1
NumTest = len(Qvalues)
Qvalues = [0] * (19000-NumTest) + Qvalues
Qexp = []
for i in range(len(Qvalues)):
Qexp.append(float(i+1)/NumTest)
Qexp.sort(reverse=True)
Qexp = [-1*math.log(x,10) for x in Qexp]
plt.subplot()
plt.scatter(Qexp, Qvalues, alpha=0.5)
plt.plot([0, top], [0, top], ls="-")
plt.title(title)
plt.xlabel('Exp Q')
plt.ylabel('Obs Q')
plt.show()
def QQplot2(pvalues, title="QQ plot", threshold=5e-8):
pvalues.sort(reverse=True)
Qvalues = []
for x in pvalues:
try:
Qvalues.append(min(10, -math.log(x,10)))
except:
print(x)
top = int(Qvalues[-1]) + 1
NumTest = len(Qvalues)
Qexp = []
for i in range(NumTest):
Qexp.append(float(i+1)/NumTest)
Qexp.sort(reverse=True)
Qexp = [-math.log(x,10) for x in Qexp]
#plt.subplot(dpi=120)
plt.figure(figsize=(4, 4), dpi=100)
plt.scatter(Qexp, Qvalues, alpha=0.5, s=2)
plt.plot([0, top], [0, top], ls="--", color='black')
if threshold != False:
plt.axhline(y=-math.log(threshold, 10), linestyle="--", color="black")
plt.title(title)
plt.xlabel('Exp Q')
plt.ylabel('Obs Q')
plt.show()
#return Qvalues, Qexp
def volcano(pvalues, effects, title="volcano plot"):
Qvalues = [-1*math.log(x,10) for x in pvalues]
effects = [math.log(x,2) for x in effects]
plt.scatter(effects, Qvalues)
plt.title(title)
plt.show()
def GetPvalues(exp, obs):
ratio, pvalue = [], []
for e,o in zip(exp, obs):
r = o/e if e != 0 else 'nan'
p = stats.poisson.sf(obs-1,exp)
ratio.append(r)
pvalue.append(p)
return ratio, pvalue
def _gen_data(df):
"""
iterate over the files and yield chr, start, pvalue
"""
for row in df.iterrows():
#print(row)
#print(row[1])
yield row[1]["CHR"], row[1]["BP"], row[1]["P"]
#yield row[1]["CHR"], row[1]["POS"], row[1]["P-value"]
#yield toks[columns[0]], int(toks[columns[1]]), float(toks[columns[2]])
def cmp(a, b):
if a > b:
return 1
elif a == b:
return 0
elif a < b:
return -1
def chr_loc_cmp(alocs, blocs):
return cmp(alocs[0], blocs[0]) or cmp(alocs[1], blocs[1])
def cmp_to_key(mycmp):
'Convert a cmp= function into a key= function'
class K:
def __init__(self, obj, *args):
self.obj = obj
def __lt__(self, other):
return mycmp(self.obj, other.obj) < 0
def __gt__(self, other):
return mycmp(self.obj, other.obj) > 0
def __eq__(self, other):
return mycmp(self.obj, other.obj) == 0
def __le__(self, other):
return mycmp(self.obj, other.obj) <= 0
def __ge__(self, other):
return mycmp(self.obj, other.obj) >= 0
def __ne__(self, other):
return mycmp(self.obj, other.obj) != 0
return K
def manhattan(df, image_path="./manhattan.png", no_log=False, colors="rgbk", title="manhattan", lines=False, ymax=10):
xs = []
ys = []
cs = []
colors = cycle(colors)
xs_by_chr = {}
last_x = 0
#data = sorted(_gen_data(df), cmp=chr_loc_cmp)
data = sorted(_gen_data(df), key=cmp_to_key(chr_loc_cmp))
for seqid, rlist in groupby(data, key=itemgetter(0)):
#color = colors.next()
color = next(colors)
rlist = list(rlist)
region_xs = [last_x + r[1] for r in rlist]
xs.extend(region_xs)
ys.extend([r[2] for r in rlist])
cs.extend([color] * len(rlist))
xs_by_chr[seqid] = (region_xs[0] + region_xs[-1]) / 2
# keep track so that chrs don't overlap.
last_x = xs[-1]
#xs_by_chr = [(k, xs_by_chr[k]) for k in sorted(xs_by_chr.keys(), cmp=chr_cmp)]
xs_by_chr = [(k, xs_by_chr[k]) for k in sorted(xs_by_chr.keys(), key=cmp_to_key(cmp))]
xs = np.array(xs)
ys = np.array(ys) if no_log else -np.log10(ys)
plt.close()
f = plt.figure(figsize=(10,4), dpi=120)
ax = f.add_axes((0.1, 0.09, 0.88, 0.85))
if title is not None:
plt.title(title)
ax.set_ylabel('-log10(p-value)')
if lines:
ax.vlines(xs, 0, ys, colors=cs, alpha=0.5)
else:
ax.scatter(xs, ys, s=2, c=cs, alpha=0.8, edgecolors='none')
# plot 0.05 line after multiple testing.
#ax.axhline(y=-np.log10(0.05 / len(data)), color='0.5', linewidth=2)
ax.axhline(y=-np.log10(5e-8), color='0.5', linewidth=2)
plt.axis('tight')
plt.xlim(0, xs[-1])
plt.ylim(ymin=0)
if ymax is not None: plt.ylim(ymax=ymax)
plt.xticks([c[1] for c in xs_by_chr], [c[0] for c in xs_by_chr], rotation=0, size=8.5)
#print >>sys.stderr, "saving to: %s" % image_path
#plt.savefig(image_path)
plt.show()
def get_filehandles(args):
return (open(a) if a != "-" else sys.stdin for a in args)
def processPRS_PTDT(S):
inp = open("/Users/jiayao/Work/spark/dat/30K/GWAS/spark_prs/plink.{}.profile".format(S), 'rt')
out = csv.writer(open("/Users/jiayao/Work/spark/dat/30K/GWAS/spark_prs/plink.{}.profile.tsv".format(S), 'wt'), delimiter="\t")
for l in inp:
out.writerow(l.split())
PRS = pd.read_csv("/Users/jiayao/Work/spark/dat/30K/GWAS/spark_prs/plink.{}.profile.tsv".format(S), delimiter="\t")
ID2Score = dict(zip(PRS["IID"].values, PRS["SCORE"].values))
Case = PRS[PRS["PHENO"]==2]["SCORE"].values
NonCase = PRS[PRS["PHENO"]==1]["SCORE"].values
#plt.hist(Case, color="red", alpha=0.5, bins=50, normed=1)
#plt.hist(NonCase, color="blue", alpha=0.5, bins=50, normed=1)
#plt.show()
mean_case = np.mean(Case)
mean_control = np.mean(NonCase)
#print(mean_case, mean_control)
FamDat = pd.read_csv("/Users/jiayao/Work/spark/dat/30K/GWAS/spark_prs/GenoHQ.fam.tsv", delimiter="\t", header=None)
FamDat.columns = ["FamID", "SampleID", "FatherID", "MotherID", "Gender", "Pheno"]
MidPrs = []
ProbPrs = []
for row in FamDat.iterrows():
row = row[1]
try:
if (row["Pheno"] == 2) and (row["FatherID"]!='0') and (row["MotherID"]!='0'):
prob_prs = ID2Score[row["SampleID"]]
FaPrs = ID2Score[row["FatherID"]]
MoPrs = ID2Score[row["MotherID"]]
mid_prs = (FaPrs+MoPrs)/2
MidPrs.append(mid_prs)
ProbPrs.append(prob_prs)
except:
continue
DEVs = []
SD_PRS_MP = np.std(MidPrs)
for prob_prs, mid_prs in zip(ProbPrs, MidPrs):
pTDT_dev = (prob_prs - mid_prs)/SD_PRS_MP
DEVs.append(pTDT_dev)
prob_dev = np.mean(DEVs)
prob_std = np.std(DEVs) / math.sqrt(len(DEVs))
t, prob_p = stats.ttest_1samp(DEVs, 0)
MidPrs = []
SibPrs = []
for row in FamDat.iterrows():
row = row[1]
try:
if (row["Pheno"] == 1) and (row["FatherID"]!='0') and (row["MotherID"]!='0'):
sib_prs = ID2Score[row["SampleID"]]
FaPrs = ID2Score[row["FatherID"]]
MoPrs = ID2Score[row["MotherID"]]
mid_prs = (FaPrs+MoPrs)/2
MidPrs.append(mid_prs)
SibPrs.append(sib_prs)
except:
continue
print(row)
SD_PRS_MP = np.std(MidPrs)
#print(SD_PRS_MP)
DEVs = []
for sib_prs, mid_prs in zip(SibPrs, MidPrs):
pTDT_dev = (sib_prs - mid_prs)/SD_PRS_MP
DEVs.append(pTDT_dev)
#plt.hist(DEVs, bins=100)
#plt.show()
sib_dev = np.mean(DEVs)
sib_std = np.std(DEVs) / math.sqrt(len(DEVs))
t, sib_p = stats.ttest_1samp(DEVs, 0)
return (prob_dev, prob_std, prob_p), (sib_dev, sib_std, sib_p)
def processPRS_PTDT_stratified(S, group1, group2):
PRS = pd.read_csv("/Users/jiayao/Work/spark/dat/30K/GWAS/spark_prs/plink.{}.profile.tsv".format(S), delimiter="\t")
ID2Score = dict(zip(PRS["IID"].values, PRS["SCORE"].values))
FamDat = pd.read_csv("/Users/jiayao/Work/spark/dat/30K/GWAS/spark_prs/GenoHQ.fam.tsv", delimiter="\t", header=None)
FamDat.columns = ["FamID", "SampleID", "FatherID", "MotherID", "Gender", "Pheno"]
MidPrs1 = []
ProbPrs1 = []
MidPrs2 = []
ProbPrs2 = []
for row in FamDat.iterrows():
row = row[1]
try:
prob_prs = ID2Score[row["SampleID"]]
FaPrs = ID2Score[row["FatherID"]]
MoPrs = ID2Score[row["MotherID"]]
mid_prs = (FaPrs+MoPrs)/2
if row["SampleID"] in group1:
MidPrs1.append(mid_prs)
ProbPrs1.append(prob_prs)
elif row["SampleID"] in group2:
MidPrs2.append(mid_prs)
ProbPrs2.append(prob_prs)
except:
continue
g1_devs = []
SD_PRS_MP1 = np.std(MidPrs1)
for prob_prs, mid_prs in zip(ProbPrs1, MidPrs1):
pTDT_dev = (prob_prs - mid_prs)/SD_PRS_MP1
g1_devs.append(pTDT_dev)
g1_mean_dev = np.mean(g1_devs)
g1_std = np.std(g1_devs) / math.sqrt(len(g1_devs))
t1, p1 = stats.ttest_1samp(g1_devs, 0)
g2_devs = []
SD_PRS_MP2 = np.std(MidPrs2)
for prob_prs, mid_prs in zip(ProbPrs2, MidPrs2):
pTDT_dev = (prob_prs - mid_prs)/SD_PRS_MP2
g2_devs.append(pTDT_dev)
g2_mean_dev = np.mean(g2_devs)
g2_std = np.std(g2_devs) / math.sqrt(len(g2_devs))
t2, p2 = stats.ttest_1samp(g2_devs, 0)
t, p = stats.ttest_ind(g1_devs, g2_devs)
return (g1_mean_dev, g1_std, p1), (g2_mean_dev, g2_std, p2), p
|
<gh_stars>10-100
# PyMoBu - Python enhancement for Autodesk's MotionBuilder
# Copyright (C) 2010 <NAME>
# <EMAIL>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
Module for more general functions
'''
import re
from pyfbsdk import FBSystem #@UnresolvedImport
from pyfbsdk import FBProgress #@UnresolvedImport
# get the whole list of components
kAllSceneComponents = FBSystem().Scene.Components
def deselect(pattern=None, **kwargs):
'''
Deselects objects that match the given parameters
See ls function for available arguments
'''
kwargs['selected'] = True
if not hasattr(pattern, '__iter__'):
pattern = [pattern]
for item in pattern:
matched = ls(pattern=item, **kwargs)
for obj in matched:
try:
obj.component.Selected = False
except:
obj.Selected = False
def select(pattern=None, add=False, toggle=False, **kwargs):
'''
Selects objects that match the given parameters
@param add: add the matched objects to the selection
@param toggle: toggles the selection of the matched objects
See ls function for additional arguments
'''
if not hasattr(pattern, "__iter__"):
pattern = [pattern]
kwargs.pop('selected', None)
if not add and not toggle:
deselect(pattern=None, **kwargs)
if toggle:
def selectFunc(x):
try:
x.component.Selected = not x.component.Selected
except:
x.Selected = not x.Selected
else:
def selectFunc(x):
try:
x.component.Selected = True
except:
x.Selected = True
for item in pattern:
matched = ls(pattern=item, **kwargs)
map(selectFunc, matched)
def delete(pattern=None, **kwargs):
'''
Deletes objects that match the given parameters
See ls function for additional arguments
'''
if not hasattr(pattern, "__iter__"):
pattern = [pattern]
for item in pattern:
matched = ls(pattern=item, **kwargs)
for obj in matched:
try:
obj.component.FBDelete()
except:
obj.FBDelete()
def ls(pattern=None, _type=None, selected=None, visible=None, includeNamespace=True):
'''
Similar to Maya's ls command - returns list of objects that match the given parameters
@param pattern: name of an object with with optional wild cards '*'
@param _type: object to compare if the component is of that type (either string or python class/type)
@param selected: True/False if the object is selected or not. Default is either
@param visible: True/False if the object is visible. Default is either
@param includeNamespace: does the search use the complete name (with namespace) Default True
'''
# set up the name testing based on the pattern
if pattern:
# create a name return function
if includeNamespace:
getName = lambda x: getattr(x, 'LongName', x.Name)
else:
getName = lambda x: x.Name
# if there is a wild card in the pattern
if '*' in pattern:
pattern = pattern.replace('*', '.*')
# name testing function
passesNameTest = lambda x: re.match(pattern, getName(x))
else:
passesNameTest = lambda x: pattern == getName(x)
else:
passesNameTest = lambda x: True
# for getting selection test
if selected is not None:
passesSelectionTest = lambda x: x.Selected == selected
else:
passesSelectionTest = lambda x: True
# for getting visibility test
if visible is not None:
passesVisibilityTest = lambda x: visible == bool(getattr(x, 'Visibility', False))
else:
passesVisibilityTest = lambda x: True
# for testing the type of component
if _type:
# if they gave a string, evaluate it
if isinstance(_type, basestring):
try:
_type = eval(_type)
except NameError:
raise NameError("Can not find object type '%s' in current namespace" % _type)
passesTypeTest = lambda x: isinstance(x, _type)
# no type was given so its True by default
else:
passesTypeTest = lambda x: True
matchList = []
for cmpnt in kAllSceneComponents:
# if we did not pass the selection test, continue on
if not passesSelectionTest(cmpnt):
continue
# check if the object is visible
if not passesVisibilityTest(cmpnt):
continue
# do the same for matching type
if not passesTypeTest(cmpnt):
continue
if passesNameTest(cmpnt):
# try converting it to a pymobu object
try:
pmbCmpnt = cmpnt.ConvertToPyMoBu()
except:
pmbCmpnt = cmpnt
matchList.append(pmbCmpnt)
return matchList
def progressBarIterator(func, items):
'''Function that displays a progress while looping a list of items through the function'''
# can't figure out why it doesn't work
# may convert this to a generator
progressBar = FBProgress()
progressBar.Caption = str(func.__name__)
ret = []
num = len(items)
try:
for i, item in enumerate(items):
progressBar.Text = str(item)
progressBar.Percent = int(i/num)
ret.append(func(item))
finally:
progressBar.FBDelete()
return ret
################################
# set up decorators #
################################
def decorated(origFunc, newFunc, decoration=None):
"""
Copies the original function's name/docs/signature to the new function, so that the docstrings
contain relevant information again.
Most importantly, it adds the original function signature to the docstring of the decorating function,
as well as a comment that the function was decorated. Supports nested decorations.
"""
if not hasattr(origFunc, '_decorated'):
# a func that has yet to be treated - add the original argspec to the docstring
import inspect
newFunc.__doc__ = "Original Arguments: %s\n\n%s" % (
inspect.formatargspec(*inspect.getargspec(origFunc)),
inspect.getdoc(origFunc) or "")
else:
newFunc.__doc__ = origFunc.__doc__ or ""
newFunc.__doc__ += "\n(Decorated by %s)" % (decoration or "%s.%s" % (newFunc.__module__, newFunc.__name__))
newFunc.__name__ = origFunc.__name__
newFunc.__module__ = origFunc.__module__
newFunc.__dict__ = origFunc.__dict__ # share attributes
newFunc._decorated = True # stamp the function as decorated
def decorator(func):
"""
Decorator for decorators. Calls the 'decorated' function above for the decorated function, to preserve docstrings.
"""
def decoratorFunc(origFunc, *x):
args = (origFunc,) + x
if x:
origFunc = x[0]
newFunc = func(*args)
decorated(origFunc, newFunc, "%s.%s" % (func.__module__, func.__name__))
return newFunc
decorated(func,decoratorFunc, "%s.%s" % (__name__, "decorator"))
return decoratorFunc |
<reponame>RamParameswaran/openfisca-djangoapi
import numpy as np
import pandas as pd
import plotly.graph_objects as go
from plotly.io import to_html
import networkx as nx
import plotly.express as px
from variables.models import Variable
from django.db.models import Count
colorScheme = {
"background_color": 'rgba(0, 0, 0, 0)',
"trace1_color": "#913535",
"trace2_color": "#283148",
"text_color": "#283148",
"highlight_color": "#8ea6b4",
}
def varIDBarChart(name='alias'):
var_names = []
var_alias = []
parents_number = []
children_number = []
offspring_number = []
for entry in Variable.objects.filter(name__icontains='PDRS'):
var_names.append(entry.name)
var_alias.append(entry.metadata['alias'])
parents_number.append(entry.parents.count())
children_number.append(entry.children.count())
offspring_number.append(len(entry.metadata['input_offspring']))
if (name == 'id'):
display_name = var_names
display_height = len(var_names) * 30
elif(name == 'alias'):
display_name = var_alias
display_height = len(var_alias) * 30
trace1 = go.Bar(x=display_name,
y=children_number,
# orientation='h',
name="children",
text=children_number,
textposition="auto",
marker=dict(
color=colorScheme['trace1_color']
),
# TODO: onHover: display var_id
)
trace2 = go.Bar(x=display_name,
y=parents_number,
text=parents_number,
textposition="inside",
# orientation='h',
name="parents",
marker=dict(
color=colorScheme['trace2_color']),
)
data = [trace1, trace2]
layout = go.Layout(
barmode='stack',
width=1500,
height=display_height,
yaxis=dict(
showticklabels=False,
dtick=1,
tickangle=0,
tickfont=dict(family='serif',
size=12,
color=colorScheme['text_color'],
),
),
xaxis=dict(
mirror=True, # TODO: try with bigger top margin
showticklabels=True,
dtick=1,
tick0=0,
categoryorder='total descending',
),
paper_bgcolor=colorScheme['background_color'],
plot_bgcolor=colorScheme['background_color'],
)
fig = go.Figure(data=data, layout=layout)
plot_div = fig.to_html(full_html=False)
return plot_div
def variable_directory():
"""
this plots the directory map of all variables in the code base
"""
# TODO: put it into the right data frame
var_id = []
var_alias = []
scheme_name = []
method_name = []
file_name = []
# file_var_count = []
for entry in Variable.objects.all():
var_id.append(entry.name)
var_alias.append(entry.metadata['alias'])
directory_list = entry.directory.split("/")
if (directory_list[0] == 'variables'):
scheme_name.append(directory_list[1])
if (directory_list[-1].endswith('.py')):
file_name.append(directory_list[-1])
if (len(directory_list) == 4):
method_name.append(directory_list[2])
else:
method_name.append(directory_list[-1].split(".py")[0])
df_var = pd.DataFrame(data={
'var_id': var_id,
'alias': var_alias,
'scheme': scheme_name,
'method': method_name,
'file': file_name,
})
df_var.reset_index()
# file_counts = df_var['file'].value_counts()
# df1 = df_var.groupby(by='method').agg('count')
fig = px.treemap(
df_var, path=['scheme', 'method', 'file'],
color='scheme',
color_discrete_map={
'(?)': colorScheme['highlight_color'],
'General_Appliances': colorScheme['trace2_color'], 'Home_Energy_Efficiency_Retrofits (HEER)': colorScheme['trace2_color'],
'High_Efficiency_Appliances_Business (HEAB)': colorScheme['trace2_color'],
'Other_ESS_methods': colorScheme['trace1_color'], 'Removal_of_Old_Appliances (RoOA)': colorScheme['trace2_color']},
title="Overview of Openfisca_nsw_safeguard Code Base",
height=700, width=1500)
fig.update_layout(uniformtext=dict(minsize=14, mode='hide'))
plot_div = fig.to_html(full_html=False)
return plot_div
|
from flask import Flask, jsonify, request
from .history import history as hist
from .api_funcs import *
from .comments import *
from uszipcode import SearchEngine
def create_app():
app = Flask(__name__)
source_message = 'Please select either USGS or EMSC as source'
@app.route('/')
def home():
return jsonify({'status_code': 200,
'message': 'success, the flask API is running'})
@app.route('/lastQuake/<source>/')
@app.route('/lastQuake/<source>/<float:mag>/')
@app.route('/lastQuake/<source>/<int:mag>/')
def lastQuake(source, mag=5.5):
CONN = connect()
# sanitize inputs
if mag < 0 or mag > 11:
return jsonify({'status_code': 400, 'message':
'please enter a magnitude between 0 and 11'})
# check to make sure that source is valid
if source.upper() not in ['USGS', 'EMSC']:
return jsonify({'status_code': 400,
'message': source_message})
curs = CONN.cursor()
response = curs.execute(f'''
SELECT * FROM {source}
WHERE Magnitude >= {mag}
ORDER BY Time Desc
limit 1;
''')
quake = curs.fetchone()
curs.close()
CONN.commit()
CONN.close()
num_quakes = 1 if quake is not None else 0
response = prep_response(quake, source) if quake is not None \
else f'No quakes of magnitude {mag} or higher in {source.upper()}'
return jsonify({'status_code': 200, 'message': response, 'num_quakes': num_quakes})
@app.route('/last/<source>/<time>/<float:mag>/')
@app.route('/last/<source>/<time>/<int:mag>/')
@app.route('/last/<source>/<time>/')
def getTime(time, source, mag=5.5):
'''This route pulls the last quakes from USGS over the specified time
frame that are at or above the specified magnitude.
Source is 'USGS' or 'EMSC'
Mag is a float with default 5.5
Options for time are 'HOUR', 'DAY', 'WEEK', or 'MONTH' '''
# sanitize inputs
if mag < 0 or mag > 11:
return jsonify({'status_code': 400, 'message':
'please enter a magnitude between 0 and 11'})
# verify that time is a valid input
if time.upper() not in ['HOUR', 'DAY', 'WEEK', 'MONTH']:
return jsonify({'status_code': 400,
'message': '''please choose from "hour", "day",
"week", or "month"'''})
# check that source is a valid input
if source.upper() not in ['USGS', 'EMSC']:
return jsonify({'status_code': 400,
'message': source_message})
message = get_last_quakes(get_now(), source, time, mag)
num_quakes = len(message)
message = message if len(message) != 0 else \
f'no quakes above {mag} in ' + \
f'{source.upper()} in the last {time.lower()}'
return jsonify({'status_code': 200,
'message': message, 'num_quakes': num_quakes})
@app.route('/test')
def testRoute():
response = query_one('SELECT * FROM USGS where time=1582252014390')
return jsonify(response)
@app.route('/history/<source>/<lat>,<lon>,<dist>')
def history(source, lat, lon, dist):
'''Start at coordinates (lat, lon) find the diagonal coordinates
with distance (dist) and find earthquakes within that square range'''
# check that source is a valid input
if source.upper() not in ['USGS', 'EMSC']:
return jsonify({'status_code': 400,
'message': source_message})
CONN = connect()
# Covert lat, lon and dist inputs to floats
lat = float(lat)
lon = float(lon)
dist = float(dist)
# Get corners from lat and lon
coordinates = hist(lat, lon, dist)
lonA = coordinates['lonA']
latA = coordinates['latA']
lonB = coordinates['lonB']
latB = coordinates['latB']
if latA < lat:
latA = 90.0
if latB > lat:
latB = -90.0
longitude_check = f'(Longitude BETWEEN {lonA} AND {lonB})'
if lonA < -180:
lonA = lonA + 360
longitude_check = f'(Longitude > {lonA} AND Longitude < {lonB})'
if lonB > 180:
lonB = lonB - 360
longitude_check = f'(Longitude > {lonA} AND Longitude < {lonB})'
# Query to get earthquakes within lat lon range
history_query = f'''
SELECT * FROM {source}
WHERE (Latitude BETWEEN {latB} AND {latA})
AND {longitude_check};
'''
curs = CONN.cursor()
curs.execute(history_query)
history = curs.fetchall()
CONN.close()
num_quakes = len(history)
quakes = []
for quake in history:
quakes.append(prep_response(quake, source))
return jsonify({'status_code': 200,
'message': quakes,
'num_quakes': num_quakes,
'boundingA': [latA, lonA],
'boundingB': [latB, lonB]})
@app.route('/zip/<zip>')
@app.route('/zip/<zip>/<dist>')
def zip_last(zip, dist=20):
'''returns the last quake within the given distance of the zip from USGS'''
search = SearchEngine(simple_zipcode=True)
loc = search.by_zipcode(zip)
CONN = connect()
# Covert lat, lon and dist inputs to floats
lat = loc.to_dict()['lat']
lon = loc.to_dict()['lng']
dist = float(dist)
# Get corners from lat and lon
coordinates = hist(lat, lon, dist)
lonA = coordinates['lonA']
latA = coordinates['latA']
lonB = coordinates['lonB']
latB = coordinates['latB']
if latA < lat:
latA = 90.0
if latB > lat:
latB = -90.0
longitude_check = f'(Longitude BETWEEN {lonA} AND {lonB})'
if lonA < -180:
lonA = lonA + 360
longitude_check = f'(Longitude > {lonA} AND Longitude < {lonB})'
if lonB > 180:
lonB = lonB - 360
longitude_check = f'(Longitude > {lonA} AND Longitude < {lonB})'
# Query to get earthquakes within lat lon range
history_query = f'''
SELECT * FROM USGS
WHERE (Latitude BETWEEN {latB} AND {latA})
AND {longitude_check}
ORDER BY time desc
LIMIT 1;
'''
curs = CONN.cursor()
curs.execute(history_query)
history = curs.fetchall()
CONN.close()
num_quakes = len(history)
quakes = []
for quake in history:
quakes.append(prep_response(quake, 'USGS'))
return jsonify({'status_code': 200,
'message': quakes})
@app.route('/comments/<source>/<quake>', methods=['GET', 'POST', 'DELETE'])
def comments(source, quake):
CONN = connect()
curs = CONN.cursor()
if request.method == 'GET':
query = f'''SELECT name, comment
FROM comments
WHERE source='{source.upper()}' and
QuakeID={quake};'''
curs.execute(query)
comments = curs.fetchall()
message = [prep_comments(comment) for comment in comments]
curs.close()
CONN.commit()
CONN.close()
return jsonify({'status_code': 200,
'message': message,
'num_comments': len(comments)})
if request.method == 'POST':
name = request.form.get('display_name')
comment = request.form.get('comment')
insertion = f"INSERT INTO comments (comment, name, QuakeID, source) values ('{comment}', '{name}', {quake}, '{source}');"
curs.execute(insertion)
curs.close()
CONN.commit()
CONN.close()
return jsonify({'comment': comment, 'name': name, 'quake': quake, 'source': source})
return app
|
<reponame>kclemens/epoet<gh_stars>0
import math
import logging
import random
import json
import gzip
class Box(object):
def __init__(self, min_lon=-180.0, min_lat=-90.0, max_lon=180.0, max_lat=90.0):
self.max_lon = max_lon
self.max_lat = max_lat
self.min_lon = min_lon
self.min_lat = min_lat
def __repr__(self):
return 'Box {},{} {},{}'.format(self.min_lat, self.min_lon, self.max_lat, self.max_lon)
def to_static_view(self):
return 'http://maps.google.com/maps/api/staticmap?size=500x300&sensor=false&path=color:0x00000000|weight:5|fillcolor:0xFFFF0033|{},{}|{},{}|{},{}|{},{}'.format(
self.min_lat, self.min_lon,
self.max_lat, self.min_lon,
self.max_lat, self.max_lon,
self.min_lat, self.max_lon,
)
def width_and_height_in_m(self):
height = (self.max_lat - self.min_lat) * (20005000 / 180.0)
width = (self.max_lon - self.min_lon) * (40075000/ 360.0)
return width, height
def sub_box_index(self, lat, lon, box_count):
q = math.sqrt(box_count)
assert q == math.floor(q)
assert self.min_lat <= lat < self.max_lat
assert self.min_lon <= lon < self.max_lon
ix = math.floor((q * (lat - self.min_lat)) / (self.max_lat - self.min_lat))
iy = math.floor((q * (lon - self.min_lon)) / (self.max_lon - self.min_lon))
return int(q*ix + iy)
def sub_box(self, box_index, box_count):
q = math.sqrt(box_count)
assert q == math.floor(q)
ix = math.floor(box_index / q)
iy = box_index - ix * q
assert ix < q and iy < q
x_width = (self.max_lat - self.min_lat) / q
y_width = (self.max_lon - self.min_lon) / q
return Box(
self.min_lon + y_width * iy,
self.min_lat + x_width * ix,
self.min_lon + y_width * (iy + 1),
self.min_lat + x_width * (ix + 1)
)
def centroid(self):
center_lat = self.min_lat + ((self.max_lat - self.min_lat) / 2.0)
center_on = self.min_lon + ((self.max_lon - self.min_lon) / 2.0)
return center_lat, center_on
class BoxIndex(object):
def __init__(self, options, iterations=2, outer_box=Box()):
def reduce_to_quad_length(collection):
collection = list(collection)
max_count = int(math.pow(math.floor(math.sqrt(len(collection))), 2))
return collection[:max_count]
self.line_term_options = map(reduce_to_quad_length, options)
self.iterations = iterations
self.outer_box = outer_box
outer_width, outer_height = self.outer_box.width_and_height_in_m()
inner_box_count = map(len, self.line_term_options)
inner_box_count = reduce(lambda x,y: x * y, inner_box_count, 1)
inner_box_count = math.pow(inner_box_count, iterations)
inner_box_count = math.sqrt(inner_box_count)
inner_width, inner_height = outer_width / inner_box_count, outer_height / inner_box_count
# inner box count:
# product of possible options for every term on a line
# to the power of lines (iterations) specified
# square root to get the number of boxes on a line
self.accuracy = math.sqrt(inner_width * inner_width + inner_height * inner_height) / 2
logging.info('configured a box index for %s', self.outer_box)
logging.info('with %.2fm * %.2fm max tile size (%.2fm accuracy)', inner_width, inner_height, self.accuracy)
def to_box_name(self, lat, lon):
box = self.outer_box
name = list()
logging.debug('computing box name for (%.2f, %.2f) and %s', lat, lon, box)
for _ in xrange(self.iterations):
for terms in self.line_term_options:
count = len(terms)
index = box.sub_box_index(lat, lon, count)
box = box.sub_box(index, count)
term = terms[index]
name.append(term)
logging.debug('sub box index %d of %d boxes with name %s and %s', index, count, terms[index], box)
return name
def from_box_name(self, words):
box = self.outer_box
logging.debug('computing lat lon from %s and %s', words, box)
for i, token in enumerate(words):
line_index = i % len(self.line_term_options)
terms = self.line_term_options[line_index]
index = terms.index(token)
count = len(terms)
box = box.sub_box(index, count)
logging.debug('for "%s" following to sub box %s with index %d of %d', token, box, index, count)
return box
def to_file(self, file_name='box_index.json.gz'):
obj = {'box': {'min_x': self.outer_box.min_lat,
'min_y': self.outer_box.min_lon,
'max_x': self.outer_box.max_lat,
'max_y': self.outer_box.max_lon},
'iterations': self.iterations,
'options': self.line_term_options}
json.dump(obj, gzip.open(file_name, 'wb'))
@classmethod
def from_file(cls, file_name='box_index.json.gz'):
data = json.load(gzip.open(file_name))
return cls(data['options'], data['iterations'], Box(data['box']['min_x'], data['box']['min_y'], data['box']['max_x'], data['box']['max_y']))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)7s - %(message)s')
# from random_poem import GeoPoet
# BoxIndex(GeoPoet().generate_line_options()).to_file()
# BoxIndex(GeoPoet(line_pattern=['^-^', '-^-'], rhyme='EY:fricative:AH:nasal').generate_line_options(), 2).to_file()
# BoxIndex(GeoPoet(line_pattern=['^-', '-^-', '-^--'], rhyme='AA:liquid:AH:affricate:IY').generate_line_options(), 2).to_file()
# BoxIndex(GeoPoet(line_pattern=['^--^-', '^--^-', '^--^-'], rhyme='EY:fricative:AH:nasal').generate_line_options(), 2).to_file()
latlons = [(52.5292, 13.3882), (52.4957, 13.3634), (52.5129, 13.3201)]
indices = [BoxIndex.from_file('rhyme_box_index.json.gz'), BoxIndex.from_file('syllables_box_index.json.gz')]
for i, index in enumerate(indices, 1):
print 'using box index {}'.format(i)
for lat, lon in latlons:
print 'encoding {},{}'.format(lat, lon)
tokens = index.to_box_name(lat, lon)
for i in range(len(tokens), 0, -1):
r_lat, r_lon = index.from_box_name(tokens[:i]).centroid()
print '"{}" resolves back to {},{}'.format(' '.join(tokens[:i]), r_lat, r_lon)
|
<gh_stars>0
"""This module contains the general information for SysdebugDiagnosticLog ManagedObject."""
from ...ucsmo import ManagedObject
from ...ucscoremeta import MoPropertyMeta, MoMeta
from ...ucsmeta import VersionMeta
class SysdebugDiagnosticLogConsts:
OPER_STATE_ALLOCATED = "allocated"
OPER_STATE_CREATED = "created"
OPER_STATE_UNKNOWN = "unknown"
SWITCH_ID_A = "A"
SWITCH_ID_B = "B"
SWITCH_ID_NONE = "NONE"
class SysdebugDiagnosticLog(ManagedObject):
"""This is SysdebugDiagnosticLog class."""
consts = SysdebugDiagnosticLogConsts()
naming_props = set([u'name', u'switchId'])
mo_meta = MoMeta("SysdebugDiagnosticLog", "sysdebugDiagnosticLog", "diag-log-[name]-[switch_id]", VersionMeta.Version321d, "InputOutput", 0xff, [], ["admin", "operations"], [u'computeBlade', u'computeRackUnit', u'computeServerUnit', u'sysdebugDiagnosticLogRepository'], [], ["Get", "Set"])
prop_meta = {
"checksum": MoPropertyMeta("checksum", "checksum", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version321d, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []),
"descr": MoPropertyMeta("descr", "descr", "string", VersionMeta.Version321d, MoPropertyMeta.READ_WRITE, 0x4, None, None, r"""[ !#$%&\(\)\*\+,\-\./:;\?@\[\]_\{\|\}~a-zA-Z0-9]{0,256}""", [], []),
"dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []),
"ip": MoPropertyMeta("ip", "ip", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, 0, 256, r"""((([0-9]){1,3}\.){3}[0-9]{1,3})""", [], []),
"name": MoPropertyMeta("name", "name", "string", VersionMeta.Version321d, MoPropertyMeta.NAMING, 0x10, 1, 128, None, [], []),
"oper_state": MoPropertyMeta("oper_state", "operState", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, None, ["allocated", "created", "unknown"], []),
"rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, 0x20, 0, 256, None, [], []),
"sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []),
"size": MoPropertyMeta("size", "size", "uint", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, None, [], []),
"status": MoPropertyMeta("status", "status", "string", VersionMeta.Version321d, MoPropertyMeta.READ_WRITE, 0x40, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []),
"switch_id": MoPropertyMeta("switch_id", "switchId", "string", VersionMeta.Version321d, MoPropertyMeta.NAMING, 0x80, None, None, None, ["A", "B", "NONE"], []),
"tftp_uri": MoPropertyMeta("tftp_uri", "tftpUri", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
"ts": MoPropertyMeta("ts", "ts", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, None, None, r"""([0-9]){4}-([0-9]){2}-([0-9]){2}T([0-9]){2}:([0-9]){2}:([0-9]){2}((\.([0-9]){3})){0,1}""", [], []),
"uri": MoPropertyMeta("uri", "uri", "string", VersionMeta.Version321d, MoPropertyMeta.READ_ONLY, None, 0, 510, None, [], []),
}
prop_map = {
"checksum": "checksum",
"childAction": "child_action",
"descr": "descr",
"dn": "dn",
"ip": "ip",
"name": "name",
"operState": "oper_state",
"rn": "rn",
"sacl": "sacl",
"size": "size",
"status": "status",
"switchId": "switch_id",
"tftpUri": "tftp_uri",
"ts": "ts",
"uri": "uri",
}
def __init__(self, parent_mo_or_dn, name, switch_id, **kwargs):
self._dirty_mask = 0
self.name = name
self.switch_id = switch_id
self.checksum = None
self.child_action = None
self.descr = None
self.ip = None
self.oper_state = None
self.sacl = None
self.size = None
self.status = None
self.tftp_uri = None
self.ts = None
self.uri = None
ManagedObject.__init__(self, "SysdebugDiagnosticLog", parent_mo_or_dn, **kwargs)
|
'''
Created on 2016/1/8
:author: hubo
'''
from vlcp.config.config import Configurable, config
from vlcp.event.connection import Client
from vlcp.protocol.redis import Redis, RedisConnectionStateEvent, RedisSubscribeMessageEvent,\
RedisReplyException
from contextlib import contextmanager
def _str(b, encoding = 'ascii'):
if isinstance(b, str):
return b
elif isinstance(b, bytes):
return b.decode(encoding)
else:
return str(b)
def _conn(func):
def f(self, container, *args, **kwargs):
for m in self._get_default_connection(container):
yield m
for m in func(self, container, *args, **kwargs):
yield m
return f
class RedisConnectionDown(IOError):
pass
class RedisConnectionRestarted(RedisConnectionDown):
pass
@config('redisclient')
class RedisClientBase(Configurable):
'''
Connect to Redis server
'''
_default_url = 'tcp://localhost/'
_default_timeout = 10
_default_db = 0
def __init__(self, conn = None, parent = None, protocol = None):
Configurable.__init__(self)
self._defaultconn = conn
self._parent = parent
self._lockconnmark = None
if protocol:
self._protocol = protocol
else:
if parent:
self._protocol = parent._protocol
else:
self._protocol = Redis()
def _get_connection(self, container, connection):
if not connection.connected:
for m in container.waitWithTimeout(self.timeout, self._protocol.statematcher(connection, RedisConnectionStateEvent.CONNECTION_UP, False)):
yield m
if container.timeout:
raise RedisConnectionDown('Disconnected from redis server')
def _get_default_connection(self, container):
if not self._defaultconn:
raise RedisConnectionDown('Not connected to redis server')
if self._lockconnmark is not None:
if self._lockconnmark >= 0:
if not self._defaultconn.connected or self._defaultconn.connmark != self._lockconnmark:
raise RedisConnectionRestarted('Disconnected from redis server; reconnected is not allowed in with scope')
else:
return
for m in self._get_connection(container, self._defaultconn):
yield m
if self._lockconnmark is not None and self._lockconnmark < 0:
self._lockconnmark = self._defaultconn.connmark
def _shutdown_conn(self, container, connection):
if connection:
if connection.connected:
# Send quit
try:
for m in self._protocol.send_command(connection, container, 'QUIT'):
yield m
for m in container.waitWithTimeout(1, self._protocol.statematcher(connection)):
yield m
except Exception:
for m in connection.shutdown():
yield m
else:
if container.timeout:
for m in connection.shutdown(True):
yield m
else:
for m in connection.shutdown():
yield m
def shutdown(self, container):
'''
Shutdown all connections to Redis server
'''
c = self._defaultconn
self._defaultconn = None
for m in self._shutdown_conn(container, c):
yield m
def release(self, container):
'''
Release the connection, leave it to be reused later.
'''
if not self._parent:
for m in self.shutdown(container):
yield m
else:
for m in self._parent._release_conn(container, self._defaultconn):
yield m
@contextmanager
def context(self, container, release = True, lockconn = True):
'''
Use with statement to manage the connection
:params release: if True(default), release the connection when leaving with scope
:params lockconn: if True(default), do not allow reconnect during with scope;
execute commands on a disconnected connection raises Exceptions.
'''
try:
if lockconn:
if self._lockconnmark is None:
# Lock next connmark
self._lockconnmark = -1
locked = True
yield self
finally:
if locked:
self._lockconnmark = None
self._lockconnmark = None
if release:
container.subroutine(self.release(container), False)
@_conn
def execute_command(self, container, *args):
'''
execute command on current connection
'''
for m in self._protocol.execute_command(self._defaultconn, container, *args):
yield m
@_conn
def batch_execute(self, container, *cmds):
'''
execute a batch of commands on current connection in pipeline mode
'''
for m in self._protocol.batch_execute(self._defaultconn, container, *cmds):
yield m
def register_script(self, container, script):
'''
register a script to this connection.
:returns: registered script. This is a tuple (sha1, script). Pass the tuple to
eval_registered, ensure_registerd as registerd_script parameter.
'''
if len(script) < 43:
container.retvalue = (None, script)
else:
for m in self.execute_command(container, 'SCRIPT', 'LOAD', script):
yield m
container.retvalue = (container.retvalue, script)
def eval_registered(self, container, registerd_script, *args):
'''
eval a registered script. If the script is not cached on the server, it is automatically cached.
'''
if registerd_script[0]:
try:
for m in self.execute_command(container, 'EVALSHA', registerd_script[0], *args):
yield m
except RedisReplyException as exc:
if exc.subtype == 'NOSCRIPT':
for m in self.execute_command(container, 'EVAL', registerd_script[1], *args):
yield m
else:
raise
else:
for m in self.execute_command(container, 'EVAL', registerd_script[1], *args):
yield m
def ensure_registerd(self, container, *scripts):
'''
Ensure that these scripts are cached on the server. Important when using scripts with batch_execute.
:param container: routine container.
:param *scripts: registered script tuples, return value of register_script
'''
loading = dict((s[0], s[1]) for s in scripts if s[0])
if loading:
keys = list(loading.keys())
for m in self.execute_command(container, 'SCRIPT', 'EXISTS', *keys):
yield m
r = container.retvalue
cmds = [('SCRIPT', 'LOAD', s) for s in (loading[keys[i]] for i in range(0, len(keys)) if not r[i])]
if cmds:
for m in self.batch_execute(container, cmds):
yield m
class RedisClient(RedisClientBase):
def __init__(self, url = None, db = None, protocol = None):
'''
Redis client to communicate with Redis server. Several connections are created for different functions.
:param url: connectiom url, e.g. 'tcp://localhost/'.
If not specified, redisclient.url in configuration is used
:param db: default database. If not specified, redisclient.db in configuration is used,
which defaults to 0.
:param protocol: use a pre-created protocol instance instead of creating a new instance
'''
RedisClientBase.__init__(self, protocol=protocol)
if url:
self.url = url
if db is not None:
self.db = db
self._subscribeconn = None
self._subscribecounter = {}
self._psubscribecounter = {}
self._connpool = []
self._shutdown = False
def _create_client(self, container):
if self._shutdown:
raise IOError('RedisClient already shutdown')
conn = Client(self.url, self._protocol, container.scheduler,
getattr(self, 'key', None),
getattr(self, 'certificate', None),
getattr(self, 'ca_certs', None))
conn.start()
return conn
def _get_default_connection(self, container):
if not self._defaultconn:
self._defaultconn = self._create_client(container)
for m in RedisClientBase._get_default_connection(self, container):
yield m
for m in self._protocol.send_command(self._defaultconn, container, 'SELECT', str(self.db)):
yield m
else:
for m in RedisClientBase._get_default_connection(self, container):
yield m
def _get_subscribe_connection(self, container):
if not self._subscribeconn:
self._subscribeconn = self._create_client(container)
for m in RedisClientBase._get_connection(self, container, self._subscribeconn):
yield m
def get_connection(self, container):
'''
Get an exclusive connection, useful for blocked commands and transactions.
You must call release or shutdown (not recommanded) to return the connection after use.
:param container: routine container
:returns: RedisClientBase object, with some commands same as RedisClient like execute_command,
batch_execute, register_script etc.
'''
if self._connpool:
conn = self._connpool.pop()
container.retvalue = RedisClientBase(conn, self)
else:
conn = self._create_client(container)
for m in RedisClientBase._get_connection(self, container, conn):
yield m
for m in self._protocol.send_command(conn, container, 'SELECT', str(self.db)):
yield m
container.retvalue = RedisClientBase(conn, self)
def _release_conn(self, container, connection):
if connection:
if self._shutdown or not connection.connected:
for m in self._shutdown_conn(container, connection):
yield m
else:
if connection.redis_select != str(self.db):
for m in self._protocol.send_command(connection, container, 'SELECT', str(self.db)):
yield m
self._connpool.append(connection)
def execute_command(self, container, *args):
'''
Execute command on Redis server:
- For (P)SUBSCRIBE/(P)UNSUBSCRIBE, the command is sent to the subscribe connection.
It is recommended to use (p)subscribe/(p)unsubscribe method instead of directly call the command
- For BLPOP, BRPOP, BRPOPLPUSH, the command is sent to a separated connection. The connection is
recycled after command returns.
- For other commands, the command is sent to the default connection.
'''
if args:
cmd = _str(args[0]).upper()
if cmd in ('SUBSCRIBE', 'UNSUBSCRIBE', 'PSUBSCRIBE', 'PUNSUBSCRIBE'):
for m in self._get_subscribe_connection(container):
yield m
for m in self._protocol.execute_command(self._subscribeconn, container, *args):
yield m
return
elif cmd in ('BLPOP', 'BRPOP', 'BRPOPLPUSH'):
for m in self.get_connection(container):
yield m
c = container.retvalue
with c.context(container):
for m in c.execute_command(container, *args):
yield m
r = container.retvalue
container.retvalue = r
return
for m in RedisClientBase.execute_command(self, container, *args):
yield m
def subscribe(self, container, *keys):
'''
Subscribe to specified channels
:param container: routine container
:param *keys: subscribed channels
:returns: list of event matchers for the specified channels
'''
for m in self._get_subscribe_connection(container):
yield m
realkeys = []
for k in keys:
count = self._subscribecounter.get(k, 0)
if count == 0:
realkeys.append(k)
self._subscribecounter[k] = count + 1
if realkeys:
for m in self._protocol.execute_command(self._subscribeconn, container, 'SUBSCRIBE', *realkeys):
yield m
container.retvalue = [self._protocol.subscribematcher(self._subscribeconn, k, None, RedisSubscribeMessageEvent.MESSAGE) for k in keys]
def unsubscribe(self, container, *keys):
'''
Unsubscribe specified channels. Every subscribed key should be unsubscribed exactly once, even if duplicated subscribed.
:param container: routine container
:param *keys: subscribed channels
'''
for m in self._get_subscribe_connection(container):
yield m
realkeys = []
for k in keys:
count = self._subscribecounter.get(k, 0)
if count <= 1:
realkeys.append(k)
try:
del self._subscribecounter[k]
except KeyError:
pass
else:
self._subscribecounter[k] = count - 1
if realkeys:
for m in self._protocol.execute_command(self._subscribeconn, container, 'UNSUBSCRIBE', *realkeys):
yield m
container.retvalue = None
def psubscribe(self, container, *keys):
'''
Subscribe to specified globs
:param container: routine container
:param *keys: subscribed globs
:returns: list of event matchers for the specified globs
'''
for m in self._get_subscribe_connection(container):
yield m
realkeys = []
for k in keys:
count = self._psubscribecounter.get(k, 0)
if count == 0:
realkeys.append(k)
self._psubscribecounter[k] = count + 1
for m in self._protocol.execute_command(self._subscribeconn, container, 'PSUBSCRIBE', *realkeys):
yield m
container.retvalue = [self._protocol.subscribematcher(self._subscribeconn, k, None, RedisSubscribeMessageEvent.PMESSAGE) for k in keys]
def punsubscribe(self, container, *keys):
'''
Unsubscribe specified globs. Every subscribed glob should be unsubscribed exactly once, even if duplicated subscribed.
:param container: routine container
:param *keys: subscribed globs
'''
for m in self._get_subscribe_connection(container):
yield m
realkeys = []
for k in keys:
count = self._subscribecounter.get(k, 0)
if count == 1:
realkeys.append(k)
del self._subscribecounter[k]
else:
self._subscribecounter[k] = count - 1
for m in self._protocol.execute_command(self._subscribeconn, container, 'PUNSUBSCRIBE', *keys):
yield m
def shutdown(self, container):
'''
Shutdown all connections. Exclusive connections created by get_connection will shutdown after release()
'''
p = self._connpool
self._connpool = []
self._shutdown = True
if self._defaultconn:
p.append(self._defaultconn)
self._defaultconn = None
if self._subscribeconn:
p.append(self._subscribeconn)
self._subscribeconn = None
for m in container.executeAll([self._shutdown_conn(container, o)
for o in p]):
yield m
class _RedisConnection(object):
def __init__(self, client, container):
self._client = client
self._container = container
def start(self, asyncstart = False):
pass
def shutdown(self):
if self._client:
try:
for m in self._client.shutdown(self._container):
yield m
finally:
self._client = None
self._container = None
def make_connobj(self, container):
'''
Return an object to be used like a connection. Put the connection-like object in module.connections
to make RedisClient shutdown on module unloading.
'''
return self._RedisConnection(self, container)
def subscribe_state_matcher(self, container, connected = True):
'''
Return a matcher to match the subscribe connection status.
:param container: a routine container. NOTICE: this method is not a routine.
:param connected: if True, the matcher matches connection up. If False, the matcher matches
connection down.
:returns: an event matcher.
'''
if not self._subscribeconn:
self._subscribeconn = self._create_client(container)
return RedisConnectionStateEvent.createMatcher(
RedisConnectionStateEvent.CONNECTION_UP if connected else RedisConnectionStateEvent.CONNECTION_DOWN,
self._subscribeconn
)
|
<filename>tensorf/network.py<gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import time
################## sh function ##################
C0 = 0.28209479177387814
C1 = 0.4886025119029199
C2 = [
1.0925484305920792,
-1.0925484305920792,
0.31539156525252005,
-1.0925484305920792,
0.5462742152960396
]
C3 = [
-0.5900435899266435,
2.890611442640554,
-0.4570457994644658,
0.3731763325901154,
-0.4570457994644658,
1.445305721320277,
-0.5900435899266435
]
C4 = [
2.5033429417967046,
-1.7701307697799304,
0.9461746957575601,
-0.6690465435572892,
0.10578554691520431,
-0.6690465435572892,
0.47308734787878004,
-1.7701307697799304,
0.6258357354491761,
]
def eval_sh(deg, sh, dirs):
"""
Evaluate spherical harmonics at unit directions
using hardcoded SH polynomials.
Works with torch/np/jnp.
... Can be 0 or more batch dimensions.
:param deg: int SH max degree. Currently, 0-4 supported
:param sh: torch.Tensor SH coeffs (..., C, (max degree + 1) ** 2)
:param dirs: torch.Tensor unit directions (..., 3)
:return: (..., C)
"""
assert deg <= 4 and deg >= 0
assert (deg + 1) ** 2 == sh.shape[-1]
C = sh.shape[-2]
result = C0 * sh[..., 0]
if deg > 0:
x, y, z = dirs[..., 0:1], dirs[..., 1:2], dirs[..., 2:3]
result = (result -
C1 * y * sh[..., 1] +
C1 * z * sh[..., 2] -
C1 * x * sh[..., 3])
if deg > 1:
xx, yy, zz = x * x, y * y, z * z
xy, yz, xz = x * y, y * z, x * z
result = (result +
C2[0] * xy * sh[..., 4] +
C2[1] * yz * sh[..., 5] +
C2[2] * (2.0 * zz - xx - yy) * sh[..., 6] +
C2[3] * xz * sh[..., 7] +
C2[4] * (xx - yy) * sh[..., 8])
if deg > 2:
result = (result +
C3[0] * y * (3 * xx - yy) * sh[..., 9] +
C3[1] * xy * z * sh[..., 10] +
C3[2] * y * (4 * zz - xx - yy)* sh[..., 11] +
C3[3] * z * (2 * zz - 3 * xx - 3 * yy) * sh[..., 12] +
C3[4] * x * (4 * zz - xx - yy) * sh[..., 13] +
C3[5] * z * (xx - yy) * sh[..., 14] +
C3[6] * x * (xx - 3 * yy) * sh[..., 15])
if deg > 3:
result = (result + C4[0] * xy * (xx - yy) * sh[..., 16] +
C4[1] * yz * (3 * xx - yy) * sh[..., 17] +
C4[2] * xy * (7 * zz - 1) * sh[..., 18] +
C4[3] * yz * (7 * zz - 3) * sh[..., 19] +
C4[4] * (zz * (35 * zz - 30) + 3) * sh[..., 20] +
C4[5] * xz * (7 * zz - 3) * sh[..., 21] +
C4[6] * (xx - yy) * (7 * zz - 1) * sh[..., 22] +
C4[7] * xz * (xx - 3 * yy) * sh[..., 23] +
C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy)) * sh[..., 24])
return result
def eval_sh_bases(deg, dirs):
"""
Evaluate spherical harmonics bases at unit directions,
without taking linear combination.
At each point, the final result may the be
obtained through simple multiplication.
:param deg: int SH max degree. Currently, 0-4 supported
:param dirs: torch.Tensor (..., 3) unit directions
:return: torch.Tensor (..., (deg+1) ** 2)
"""
assert deg <= 4 and deg >= 0
result = torch.empty((*dirs.shape[:-1], (deg + 1) ** 2), dtype=dirs.dtype, device=dirs.device)
result[..., 0] = C0
if deg > 0:
x, y, z = dirs.unbind(-1)
result[..., 1] = -C1 * y;
result[..., 2] = C1 * z;
result[..., 3] = -C1 * x;
if deg > 1:
xx, yy, zz = x * x, y * y, z * z
xy, yz, xz = x * y, y * z, x * z
result[..., 4] = C2[0] * xy;
result[..., 5] = C2[1] * yz;
result[..., 6] = C2[2] * (2.0 * zz - xx - yy);
result[..., 7] = C2[3] * xz;
result[..., 8] = C2[4] * (xx - yy);
if deg > 2:
result[..., 9] = C3[0] * y * (3 * xx - yy);
result[..., 10] = C3[1] * xy * z;
result[..., 11] = C3[2] * y * (4 * zz - xx - yy);
result[..., 12] = C3[3] * z * (2 * zz - 3 * xx - 3 * yy);
result[..., 13] = C3[4] * x * (4 * zz - xx - yy);
result[..., 14] = C3[5] * z * (xx - yy);
result[..., 15] = C3[6] * x * (xx - 3 * yy);
if deg > 3:
result[..., 16] = C4[0] * xy * (xx - yy);
result[..., 17] = C4[1] * yz * (3 * xx - yy);
result[..., 18] = C4[2] * xy * (7 * zz - 1);
result[..., 19] = C4[3] * yz * (7 * zz - 3);
result[..., 20] = C4[4] * (zz * (35 * zz - 30) + 3);
result[..., 21] = C4[5] * xz * (7 * zz - 3);
result[..., 22] = C4[6] * (xx - yy) * (7 * zz - 1);
result[..., 23] = C4[7] * xz * (xx - 3 * yy);
result[..., 24] = C4[8] * (xx * (xx - 3 * yy) - yy * (3 * xx - yy));
return result
def positional_encoding(positions, freqs):
freq_bands = (2**torch.arange(freqs).float()).to(positions.device) # (F,)
pts = (positions[..., None] * freq_bands).reshape(
positions.shape[:-1] + (freqs * positions.shape[-1], )) # (..., DF)
pts = torch.cat([torch.sin(pts), torch.cos(pts)], dim=-1)
return pts
def raw2alpha(sigma, dist):
# sigma, dist [N_rays, N_samples]
alpha = 1. - torch.exp(-sigma*dist)
T = torch.cumprod(torch.cat([torch.ones(alpha.shape[0], 1).to(alpha.device), 1. - alpha + 1e-10], -1), -1)
weights = alpha * T[:, :-1] # [N_rays, N_samples]
return alpha, weights, T[:,-1:]
def SHRender(xyz_sampled, viewdirs, features):
sh_mult = eval_sh_bases(2, viewdirs)[:, None]
rgb_sh = features.view(-1, 3, sh_mult.shape[-1])
rgb = torch.relu(torch.sum(sh_mult * rgb_sh, dim=-1) + 0.5)
return rgb
def RGBRender(xyz_sampled, viewdirs, features):
rgb = features
return rgb
class AlphaGridMask(torch.nn.Module):
def __init__(self, device, aabb, alpha_volume):
super(AlphaGridMask, self).__init__()
self.device = device
self.aabb=aabb.to(self.device)
self.aabbSize = self.aabb[1] - self.aabb[0]
self.invgridSize = 1.0/self.aabbSize * 2
self.alpha_volume = alpha_volume.view(1,1,*alpha_volume.shape[-3:])
self.gridSize = torch.LongTensor([alpha_volume.shape[-1],alpha_volume.shape[-2],alpha_volume.shape[-3]]).to(self.device)
def sample_alpha(self, xyz_sampled):
xyz_sampled = self.normalize_coord(xyz_sampled)
alpha_vals = F.grid_sample(self.alpha_volume, xyz_sampled.view(1,-1,1,1,3), align_corners=True).view(-1)
return alpha_vals
def normalize_coord(self, xyz_sampled):
return (xyz_sampled-self.aabb[0]) * self.invgridSize - 1
class MLPRender_Fea(torch.nn.Module):
def __init__(self,inChanel, viewpe=6, feape=6, featureC=128):
super(MLPRender_Fea, self).__init__()
self.in_mlpC = 2*viewpe*3 + 2*feape*inChanel + 3 + inChanel
self.viewpe = viewpe
self.feape = feape
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC,3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.feape > 0:
indata += [positional_encoding(features, self.feape)]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class MLPRender_PE(torch.nn.Module):
def __init__(self,inChanel, viewpe=6, pospe=6, featureC=128):
super(MLPRender_PE, self).__init__()
self.in_mlpC = (3+2*viewpe*3)+ (3+2*pospe*3) + inChanel #
self.viewpe = viewpe
self.pospe = pospe
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC,3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.pospe > 0:
indata += [positional_encoding(pts, self.pospe)]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class MLPRender(torch.nn.Module):
def __init__(self,inChanel, viewpe=6, featureC=128):
super(MLPRender, self).__init__()
self.in_mlpC = (3+2*viewpe*3) + inChanel
self.viewpe = viewpe
layer1 = torch.nn.Linear(self.in_mlpC, featureC)
layer2 = torch.nn.Linear(featureC, featureC)
layer3 = torch.nn.Linear(featureC,3)
self.mlp = torch.nn.Sequential(layer1, torch.nn.ReLU(inplace=True), layer2, torch.nn.ReLU(inplace=True), layer3)
torch.nn.init.constant_(self.mlp[-1].bias, 0)
def forward(self, pts, viewdirs, features):
indata = [features, viewdirs]
if self.viewpe > 0:
indata += [positional_encoding(viewdirs, self.viewpe)]
mlp_in = torch.cat(indata, dim=-1)
rgb = self.mlp(mlp_in)
rgb = torch.sigmoid(rgb)
return rgb
class TensorBase(torch.nn.Module):
def __init__(self, aabb, gridSize, device, density_n_comp = 8, appearance_n_comp = 24, app_dim = 27,
shadingMode = 'MLP_PE', alphaMask = None, near_far=[2.0,6.0],
density_shift = -10, alphaMask_thres=0.08, distance_scale=25, rayMarch_weight_thres=0.0001,
pos_pe = 6, view_pe = 6, fea_pe = 6, featureC=128, step_ratio=2.0,
fea2denseAct = 'softplus'):
super(TensorBase, self).__init__()
self.density_n_comp = density_n_comp
self.app_n_comp = appearance_n_comp
self.app_dim = app_dim
self.aabb = aabb
self.alphaMask = alphaMask
self.device=device
self.density_shift = density_shift
self.alphaMask_thres = alphaMask_thres
self.distance_scale = distance_scale
self.rayMarch_weight_thres = rayMarch_weight_thres
self.fea2denseAct = fea2denseAct
self.near_far = near_far
self.step_ratio = step_ratio
self.update_stepSize(gridSize)
self.matMode = [[0,1], [0,2], [1,2]]
self.vecMode = [2, 1, 0]
self.comp_w = [1,1,1]
self.init_svd_volume(gridSize[0], device)
self.shadingMode, self.pos_pe, self.view_pe, self.fea_pe, self.featureC = shadingMode, pos_pe, view_pe, fea_pe, featureC
self.init_render_func(shadingMode, pos_pe, view_pe, fea_pe, featureC, device)
def init_render_func(self, shadingMode, pos_pe, view_pe, fea_pe, featureC, device):
if shadingMode == 'MLP_PE':
self.renderModule = MLPRender_PE(self.app_dim, view_pe, pos_pe, featureC).to(device)
elif shadingMode == 'MLP_Fea':
self.renderModule = MLPRender_Fea(self.app_dim, view_pe, fea_pe, featureC).to(device)
elif shadingMode == 'MLP':
self.renderModule = MLPRender(self.app_dim, view_pe, featureC).to(device)
elif shadingMode == 'SH':
self.renderModule = SHRender
elif shadingMode == 'RGB':
assert self.app_dim == 3
self.renderModule = RGBRender
else:
print("Unrecognized shading module")
exit()
print("pos_pe", pos_pe, "view_pe", view_pe, "fea_pe", fea_pe)
print(self.renderModule)
def update_stepSize(self, gridSize):
print("aabb", self.aabb.view(-1))
print("grid size", gridSize)
self.aabbSize = self.aabb[1] - self.aabb[0]
self.invaabbSize = 2.0/self.aabbSize
self.gridSize= torch.LongTensor(gridSize).to(self.device)
self.units=self.aabbSize / (self.gridSize-1)
self.stepSize=torch.mean(self.units)*self.step_ratio
self.aabbDiag = torch.sqrt(torch.sum(torch.square(self.aabbSize)))
self.nSamples=int((self.aabbDiag / self.stepSize).item()) + 1
print("sampling step size: ", self.stepSize)
print("sampling number: ", self.nSamples)
def init_svd_volume(self, res, device):
pass
def compute_features(self, xyz_sampled):
pass
def compute_densityfeature(self, xyz_sampled):
pass
def compute_appfeature(self, xyz_sampled):
pass
def normalize_coord(self, xyz_sampled):
return (xyz_sampled-self.aabb[0]) * self.invaabbSize - 1
def get_optparam_groups(self, lr_init_spatial = 0.02, lr_init_network = 0.001):
pass
def get_kwargs(self):
return {
'aabb': self.aabb,
'gridSize':self.gridSize.tolist(),
'density_n_comp': self.density_n_comp,
'appearance_n_comp': self.app_n_comp,
'app_dim': self.app_dim,
'density_shift': self.density_shift,
'alphaMask_thres': self.alphaMask_thres,
'distance_scale': self.distance_scale,
'rayMarch_weight_thres': self.rayMarch_weight_thres,
'fea2denseAct': self.fea2denseAct,
'near_far': self.near_far,
'step_ratio': self.step_ratio,
'shadingMode': self.shadingMode,
'pos_pe': self.pos_pe,
'view_pe': self.view_pe,
'fea_pe': self.fea_pe,
'featureC': self.featureC
}
def get_state_dict(self):
kwargs = self.get_kwargs()
ckpt = {'kwargs': kwargs, 'state_dict': self.state_dict()}
if self.alphaMask is not None:
alpha_volume = self.alphaMask.alpha_volume.bool().cpu().numpy()
ckpt.update({'alphaMask.shape':alpha_volume.shape})
ckpt.update({'alphaMask.mask':np.packbits(alpha_volume.reshape(-1))})
ckpt.update({'alphaMask.aabb': self.alphaMask.aabb.cpu()})
return ckpt
def load(self, ckpt):
if 'alphaMask.aabb' in ckpt.keys():
length = np.prod(ckpt['alphaMask.shape'])
alpha_volume = torch.from_numpy(np.unpackbits(ckpt['alphaMask.mask'])[:length].reshape(ckpt['alphaMask.shape']))
self.alphaMask = AlphaGridMask(self.device, ckpt['alphaMask.aabb'].to(self.device), alpha_volume.float().to(self.device))
self.load_state_dict(ckpt['state_dict'])
def sample_ray_ndc(self, rays_o, rays_d, is_train=True, N_samples=-1):
N_samples = N_samples if N_samples > 0 else self.nSamples
near, far = self.near_far
interpx = torch.linspace(near, far, N_samples).unsqueeze(0).to(rays_o)
if is_train:
interpx += torch.rand_like(interpx).to(rays_o) * ((far - near) / N_samples)
rays_pts = rays_o[..., None, :] + rays_d[..., None, :] * interpx[..., None]
mask_outbbox = ((self.aabb[0] > rays_pts) | (rays_pts > self.aabb[1])).any(dim=-1)
return rays_pts, interpx, ~mask_outbbox
def sample_ray(self, rays_o, rays_d, is_train=True, N_samples=-1):
N_samples = N_samples if N_samples>0 else self.nSamples
stepsize = self.stepSize
near, far = self.near_far
vec = torch.where(rays_d==0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.aabb[1] - rays_o) / vec
rate_b = (self.aabb[0] - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1).clamp(min=near, max=far)
rng = torch.arange(N_samples)[None].float()
if is_train:
rng = rng.repeat(rays_d.shape[-2],1)
rng += torch.rand_like(rng[:,[0]])
step = stepsize * rng.to(rays_o.device)
interpx = (t_min[...,None] + step)
rays_pts = rays_o[...,None,:] + rays_d[...,None,:] * interpx[...,None]
mask_outbbox = ((self.aabb[0]>rays_pts) | (rays_pts>self.aabb[1])).any(dim=-1)
return rays_pts, interpx, ~mask_outbbox
def shrink(self, new_aabb, voxel_size):
pass
@torch.no_grad()
def updateAlphaMask(self, gridSize=(200,200,200)):
total_voxels = gridSize[0] * gridSize[1] * gridSize[2]
samples = torch.stack(torch.meshgrid(
torch.linspace(0, 1, gridSize[0]),
torch.linspace(0, 1, gridSize[1]),
torch.linspace(0, 1, gridSize[2]),
), -1).to(self.device)
dense_xyz = self.aabb[0] * (1-samples) + self.aabb[1] * samples
dense_xyz = dense_xyz.transpose(0,2).contiguous()
alpha = torch.zeros_like(dense_xyz[...,0])
for i in range(gridSize[2]):
alpha[i] = self.compute_alpha(dense_xyz[i].view(-1,3), self.distance_scale*self.aabbDiag).view((gridSize[1], gridSize[0]))
alpha = alpha.clamp(0,1)[None,None]
ks = 3
alpha = F.max_pool3d(alpha, kernel_size=ks, padding=ks // 2, stride=1).view(gridSize[::-1])
alpha[alpha>=self.alphaMask_thres] = 1
alpha[alpha<self.alphaMask_thres] = 0
self.alphaMask = AlphaGridMask(self.device, self.aabb, alpha)
valid_xyz = dense_xyz[alpha>0.5]
xyz_min = valid_xyz.amin(0)
xyz_max = valid_xyz.amax(0)
new_aabb = torch.stack((xyz_min, xyz_max))
total = torch.sum(alpha)
print(f"bbox: {xyz_min, xyz_max} alpha rest %%%f"%(total/total_voxels*100))
return new_aabb
@torch.no_grad()
def filtering_rays(self, all_rays, all_rgbs, N_samples=256, chunk=10240*5, bbox_only=False):
print('========> filtering rays ...')
tt = time.time()
N = torch.tensor(all_rays.shape[:-1]).prod()
mask_filtered = []
idx_chunks = torch.split(torch.arange(N), chunk)
for idx_chunk in idx_chunks:
rays_chunk = all_rays[idx_chunk].to(self.device)
rays_o, rays_d = rays_chunk[..., :3], rays_chunk[..., 3:6]
if bbox_only:
vec = torch.where(rays_d == 0, torch.full_like(rays_d, 1e-6), rays_d)
rate_a = (self.aabb[1] - rays_o) / vec
rate_b = (self.aabb[0] - rays_o) / vec
t_min = torch.minimum(rate_a, rate_b).amax(-1)#.clamp(min=near, max=far)
t_max = torch.maximum(rate_a, rate_b).amin(-1)#.clamp(min=near, max=far)
mask_inbbox = t_max > t_min
else:
xyz_sampled, _,_ = self.sample_ray(rays_o, rays_d, N_samples=N_samples, is_train=False)
mask_inbbox= (self.alphaMask.sample_alpha(xyz_sampled).view(xyz_sampled.shape[:-1]) > 0).any(-1)
mask_filtered.append(mask_inbbox.cpu())
mask_filtered = torch.cat(mask_filtered).view(all_rgbs.shape[:-1])
print(f'Ray filtering done! takes {time.time()-tt} s. ray mask ratio: {torch.sum(mask_filtered) / N}')
return all_rays[mask_filtered], all_rgbs[mask_filtered]
def feature2density(self, density_features):
if self.fea2denseAct == "softplus":
return F.softplus(density_features+self.density_shift)
elif self.fea2denseAct == "relu":
return F.relu(density_features)
def compute_alpha(self, xyz_locs, length=1):
if self.alphaMask is not None:
alphas = self.alphaMask.sample_alpha(xyz_locs)
alpha_mask = alphas > 0
else:
alpha_mask = torch.ones_like(xyz_locs[:,0], dtype=bool)
sigma = torch.zeros(xyz_locs.shape[:-1], device=xyz_locs.device)
if alpha_mask.any():
xyz_sampled = self.normalize_coord(xyz_locs[alpha_mask])
sigma_feature = self.compute_densityfeature(xyz_sampled)
validsigma = self.feature2density(sigma_feature)
sigma[alpha_mask] = validsigma
alpha = 1 - torch.exp(-sigma*length).view(xyz_locs.shape[:-1])
return alpha
def forward(self, rays_chunk, white_bg=True, is_train=False, ndc_ray=False, N_samples=-1):
# sample points
viewdirs = rays_chunk[:, 3:6]
if ndc_ray:
xyz_sampled, z_vals, ray_valid = self.sample_ray_ndc(rays_chunk[:, :3], viewdirs, is_train=is_train,N_samples=N_samples)
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
rays_norm = torch.norm(viewdirs, dim=-1, keepdim=True)
dists = dists * rays_norm
viewdirs = viewdirs / rays_norm
else:
xyz_sampled, z_vals, ray_valid = self.sample_ray(rays_chunk[:, :3], viewdirs, is_train=is_train,N_samples=N_samples)
dists = torch.cat((z_vals[:, 1:] - z_vals[:, :-1], torch.zeros_like(z_vals[:, :1])), dim=-1)
viewdirs = viewdirs.view(-1, 1, 3).expand(xyz_sampled.shape)
if self.alphaMask is not None:
alphas = self.alphaMask.sample_alpha(xyz_sampled[ray_valid])
alpha_mask = alphas > 0
ray_invalid = ~ray_valid
ray_invalid[ray_valid] |= (~alpha_mask)
ray_valid = ~ray_invalid
sigma = torch.zeros(xyz_sampled.shape[:-1], device=xyz_sampled.device)
rgb = torch.zeros((*xyz_sampled.shape[:2], 3), device=xyz_sampled.device)
if ray_valid.any():
xyz_sampled = self.normalize_coord(xyz_sampled)
sigma_feature = self.compute_densityfeature(xyz_sampled[ray_valid])
validsigma = self.feature2density(sigma_feature)
sigma[ray_valid] = validsigma
alpha, weight, bg_weight = raw2alpha(sigma, dists * self.distance_scale)
app_mask = weight > self.rayMarch_weight_thres
if app_mask.any():
app_features = self.compute_appfeature(xyz_sampled[app_mask])
valid_rgbs = self.renderModule(xyz_sampled[app_mask], viewdirs[app_mask], app_features)
rgb[app_mask] = valid_rgbs
acc_map = torch.sum(weight, -1)
rgb_map = torch.sum(weight[..., None] * rgb, -2)
if white_bg or (is_train and torch.rand((1,))<0.5):
rgb_map = rgb_map + (1. - acc_map[..., None])
rgb_map = rgb_map.clamp(0,1)
with torch.no_grad():
depth_map = torch.sum(weight * z_vals, -1)
depth_map = depth_map + (1. - acc_map) * rays_chunk[..., -1]
return rgb_map, depth_map # rgb, sigma, alpha, weight, bg_weight
class TensorVM(TensorBase):
def __init__(self, aabb, gridSize, device, **kargs):
super(TensorVM, self).__init__(aabb, gridSize, device, **kargs)
def init_svd_volume(self, res, device):
self.plane_coef = torch.nn.Parameter(
0.1 * torch.randn((3, self.app_n_comp + self.density_n_comp, res, res), device=device))
self.line_coef = torch.nn.Parameter(
0.1 * torch.randn((3, self.app_n_comp + self.density_n_comp, res, 1), device=device))
self.basis_mat = torch.nn.Linear(self.app_n_comp * 3, self.app_dim, bias=False, device=device)
def get_optparam_groups(self, lr_init_spatialxyz = 0.02, lr_init_network = 0.001):
grad_vars = [{'params': self.line_coef, 'lr': lr_init_spatialxyz}, {'params': self.plane_coef, 'lr': lr_init_spatialxyz},
{'params': self.basis_mat.parameters(), 'lr':lr_init_network}]
if isinstance(self.renderModule, torch.nn.Module):
grad_vars += [{'params':self.renderModule.parameters(), 'lr':lr_init_network}]
return grad_vars
def compute_features(self, xyz_sampled):
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach()
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach()
plane_feats = F.grid_sample(self.plane_coef[:, -self.density_n_comp:], coordinate_plane, align_corners=True).view(
-1, *xyz_sampled.shape[:1])
line_feats = F.grid_sample(self.line_coef[:, -self.density_n_comp:], coordinate_line, align_corners=True).view(
-1, *xyz_sampled.shape[:1])
sigma_feature = torch.sum(plane_feats * line_feats, dim=0)
plane_feats = F.grid_sample(self.plane_coef[:, :self.app_n_comp], coordinate_plane, align_corners=True).view(3 * self.app_n_comp, -1)
line_feats = F.grid_sample(self.line_coef[:, :self.app_n_comp], coordinate_line, align_corners=True).view(3 * self.app_n_comp, -1)
app_features = self.basis_mat((plane_feats * line_feats).T)
return sigma_feature, app_features
def compute_densityfeature(self, xyz_sampled):
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_feats = F.grid_sample(self.plane_coef[:, -self.density_n_comp:], coordinate_plane, align_corners=True).view(
-1, *xyz_sampled.shape[:1])
line_feats = F.grid_sample(self.line_coef[:, -self.density_n_comp:], coordinate_line, align_corners=True).view(
-1, *xyz_sampled.shape[:1])
sigma_feature = torch.sum(plane_feats * line_feats, dim=0)
return sigma_feature
def compute_appfeature(self, xyz_sampled):
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_feats = F.grid_sample(self.plane_coef[:, :self.app_n_comp], coordinate_plane, align_corners=True).view(3 * self.app_n_comp, -1)
line_feats = F.grid_sample(self.line_coef[:, :self.app_n_comp], coordinate_line, align_corners=True).view(3 * self.app_n_comp, -1)
app_features = self.basis_mat((plane_feats * line_feats).T)
return app_features
def vectorDiffs(self, vector_comps):
total = 0
for idx in range(len(vector_comps)):
# print(self.line_coef.shape, vector_comps[idx].shape)
n_comp, n_size = vector_comps[idx].shape[:-1]
dotp = torch.matmul(vector_comps[idx].view(n_comp,n_size), vector_comps[idx].view(n_comp,n_size).transpose(-1,-2))
# print(vector_comps[idx].shape, vector_comps[idx].view(n_comp,n_size).transpose(-1,-2).shape, dotp.shape)
non_diagonal = dotp.view(-1)[1:].view(n_comp-1, n_comp+1)[...,:-1]
# print(vector_comps[idx].shape, vector_comps[idx].view(n_comp,n_size).transpose(-1,-2).shape, dotp.shape,non_diagonal.shape)
total = total + torch.mean(torch.abs(non_diagonal))
return total
def vector_comp_diffs(self):
return self.vectorDiffs(self.line_coef[:,-self.density_n_comp:]) + self.vectorDiffs(self.line_coef[:,:self.app_n_comp])
@torch.no_grad()
def up_sampling_VM(self, plane_coef, line_coef, res_target):
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
mat_id_0, mat_id_1 = self.matMode[i]
plane_coef[i] = torch.nn.Parameter(
F.interpolate(plane_coef[i].data, size=(res_target[mat_id_1], res_target[mat_id_0]), mode='bilinear',
align_corners=True))
line_coef[i] = torch.nn.Parameter(
F.interpolate(line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
# plane_coef[0] = torch.nn.Parameter(
# F.interpolate(plane_coef[0].data, size=(res_target[1], res_target[0]), mode='bilinear',
# align_corners=True))
# line_coef[0] = torch.nn.Parameter(
# F.interpolate(line_coef[0].data, size=(res_target[2], 1), mode='bilinear', align_corners=True))
# plane_coef[1] = torch.nn.Parameter(
# F.interpolate(plane_coef[1].data, size=(res_target[2], res_target[0]), mode='bilinear',
# align_corners=True))
# line_coef[1] = torch.nn.Parameter(
# F.interpolate(line_coef[1].data, size=(res_target[1], 1), mode='bilinear', align_corners=True))
# plane_coef[2] = torch.nn.Parameter(
# F.interpolate(plane_coef[2].data, size=(res_target[2], res_target[1]), mode='bilinear',
# align_corners=True))
# line_coef[2] = torch.nn.Parameter(
# F.interpolate(line_coef[2].data, size=(res_target[0], 1), mode='bilinear', align_corners=True))
return plane_coef, line_coef
@torch.no_grad()
def upsample_volume_grid(self, res_target):
# self.app_plane, self.app_line = self.up_sampling_VM(self.app_plane, self.app_line, res_target)
# self.density_plane, self.density_line = self.up_sampling_VM(self.density_plane, self.density_line, res_target)
scale = res_target[0]/self.line_coef.shape[2] #assuming xyz have the same scale
plane_coef = F.interpolate(self.plane_coef.detach().data, scale_factor=scale, mode='bilinear',align_corners=True)
line_coef = F.interpolate(self.line_coef.detach().data, size=(res_target[0],1), mode='bilinear',align_corners=True)
self.plane_coef, self.line_coef = torch.nn.Parameter(plane_coef), torch.nn.Parameter(line_coef)
self.compute_stepSize(res_target)
print(f'upsamping to {res_target}')
class TensorVMSplit(TensorBase):
def __init__(self, aabb, gridSize, device, **kargs):
super(TensorVMSplit, self).__init__(aabb, gridSize, device, **kargs)
def init_svd_volume(self, res, device):
self.density_plane, self.density_line = self.init_one_svd(self.density_n_comp, self.gridSize, 0.1, device)
self.app_plane, self.app_line = self.init_one_svd(self.app_n_comp, self.gridSize, 0.1, device)
self.basis_mat = torch.nn.Linear(sum(self.app_n_comp), self.app_dim, bias=False).to(device)
def init_one_svd(self, n_component, gridSize, scale, device):
plane_coef, line_coef = [], []
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
mat_id_0, mat_id_1 = self.matMode[i]
plane_coef.append(torch.nn.Parameter(
scale * torch.randn((1, n_component[i], gridSize[mat_id_1], gridSize[mat_id_0])))) #
line_coef.append(
torch.nn.Parameter(scale * torch.randn((1, n_component[i], gridSize[vec_id], 1))))
return torch.nn.ParameterList(plane_coef).to(device), torch.nn.ParameterList(line_coef).to(device)
def get_optparam_groups(self, lr_init_spatialxyz = 0.02, lr_init_network = 0.001):
grad_vars = [{'params': self.density_line, 'lr': lr_init_spatialxyz}, {'params': self.density_plane, 'lr': lr_init_spatialxyz},
{'params': self.app_line, 'lr': lr_init_spatialxyz}, {'params': self.app_plane, 'lr': lr_init_spatialxyz},
{'params': self.basis_mat.parameters(), 'lr':lr_init_network}]
if isinstance(self.renderModule, torch.nn.Module):
grad_vars += [{'params':self.renderModule.parameters(), 'lr':lr_init_network}]
return grad_vars
def vectorDiffs(self, vector_comps):
total = 0
for idx in range(len(vector_comps)):
n_comp, n_size = vector_comps[idx].shape[1:-1]
dotp = torch.matmul(vector_comps[idx].view(n_comp,n_size), vector_comps[idx].view(n_comp,n_size).transpose(-1,-2))
non_diagonal = dotp.view(-1)[1:].view(n_comp-1, n_comp+1)[...,:-1]
total = total + torch.mean(torch.abs(non_diagonal))
return total
def vector_comp_diffs(self):
return self.vectorDiffs(self.density_line) + self.vectorDiffs(self.app_line)
def density_L1(self):
total = 0
for idx in range(len(self.density_plane)):
total = total + torch.mean(torch.abs(self.density_plane[idx])) + torch.mean(torch.abs(self.density_line[idx]))# + torch.mean(torch.abs(self.app_plane[idx])) + torch.mean(torch.abs(self.density_plane[idx]))
return total
def TV_loss_density(self, reg):
total = 0
for idx in range(len(self.density_plane)):
total = total + reg(self.density_plane[idx]) * 1e-2 + reg(self.density_line[idx]) * 1e-3
return total
def TV_loss_app(self, reg):
total = 0
for idx in range(len(self.app_plane)):
total = total + reg(self.app_plane[idx]) * 1e-2 + reg(self.app_line[idx]) * 1e-3
return total
def compute_densityfeature(self, xyz_sampled):
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
sigma_feature = torch.zeros((xyz_sampled.shape[0],), device=xyz_sampled.device)
for idx_plane in range(len(self.density_plane)):
plane_coef_point = F.grid_sample(self.density_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = F.grid_sample(self.density_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
sigma_feature = sigma_feature + torch.sum(plane_coef_point * line_coef_point, dim=0)
return sigma_feature
def compute_appfeature(self, xyz_sampled):
# plane + line basis
coordinate_plane = torch.stack((xyz_sampled[..., self.matMode[0]], xyz_sampled[..., self.matMode[1]], xyz_sampled[..., self.matMode[2]])).detach().view(3, -1, 1, 2)
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
plane_coef_point,line_coef_point = [],[]
for idx_plane in range(len(self.app_plane)):
plane_coef_point.append(F.grid_sample(self.app_plane[idx_plane], coordinate_plane[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
line_coef_point.append(F.grid_sample(self.app_line[idx_plane], coordinate_line[[idx_plane]],
align_corners=True).view(-1, *xyz_sampled.shape[:1]))
plane_coef_point, line_coef_point = torch.cat(plane_coef_point), torch.cat(line_coef_point)
return self.basis_mat((plane_coef_point * line_coef_point).T)
@torch.no_grad()
def up_sampling_VM(self, plane_coef, line_coef, res_target):
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
mat_id_0, mat_id_1 = self.matMode[i]
plane_coef[i] = torch.nn.Parameter(
F.interpolate(plane_coef[i].data, size=(res_target[mat_id_1], res_target[mat_id_0]), mode='bilinear',
align_corners=True))
line_coef[i] = torch.nn.Parameter(
F.interpolate(line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
return plane_coef, line_coef
@torch.no_grad()
def upsample_volume_grid(self, res_target):
self.app_plane, self.app_line = self.up_sampling_VM(self.app_plane, self.app_line, res_target)
self.density_plane, self.density_line = self.up_sampling_VM(self.density_plane, self.density_line, res_target)
self.update_stepSize(res_target)
print(f'upsamping to {res_target}')
@torch.no_grad()
def shrink(self, new_aabb):
print("====> shrinking ...")
xyz_min, xyz_max = new_aabb
t_l, b_r = (xyz_min - self.aabb[0]) / self.units, (xyz_max - self.aabb[0]) / self.units
# print(new_aabb, self.aabb)
# print(t_l, b_r,self.alphaMask.alpha_volume.shape)
t_l, b_r = torch.round(torch.round(t_l)).long(), torch.round(b_r).long() + 1
b_r = torch.stack([b_r, self.gridSize]).amin(0)
for i in range(len(self.vecMode)):
mode0 = self.vecMode[i]
self.density_line[i] = torch.nn.Parameter(
self.density_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
self.app_line[i] = torch.nn.Parameter(
self.app_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
mode0, mode1 = self.matMode[i]
self.density_plane[i] = torch.nn.Parameter(
self.density_plane[i].data[...,t_l[mode1]:b_r[mode1],t_l[mode0]:b_r[mode0]]
)
self.app_plane[i] = torch.nn.Parameter(
self.app_plane[i].data[...,t_l[mode1]:b_r[mode1],t_l[mode0]:b_r[mode0]]
)
if not torch.all(self.alphaMask.gridSize == self.gridSize):
t_l_r, b_r_r = t_l / (self.gridSize-1), (b_r-1) / (self.gridSize-1)
correct_aabb = torch.zeros_like(new_aabb)
correct_aabb[0] = (1-t_l_r)*self.aabb[0] + t_l_r*self.aabb[1]
correct_aabb[1] = (1-b_r_r)*self.aabb[0] + b_r_r*self.aabb[1]
print("aabb", new_aabb, "\ncorrect aabb", correct_aabb)
new_aabb = correct_aabb
newSize = b_r - t_l
self.aabb = new_aabb
self.update_stepSize((newSize[0], newSize[1], newSize[2]))
class TensorCP(TensorBase):
def __init__(self, aabb, gridSize, device, **kargs):
super(TensorCP, self).__init__(aabb, gridSize, device, **kargs)
def init_svd_volume(self, res, device):
self.density_line = self.init_one_svd(self.density_n_comp, self.gridSize, 0.2, device)
self.app_line = self.init_one_svd(self.app_n_comp, self.gridSize, 0.2, device)
self.basis_mat = torch.nn.Linear(self.app_n_comp[0], self.app_dim, bias=False).to(device)
def init_one_svd(self, n_component, gridSize, scale, device):
line_coef = []
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
line_coef.append(
torch.nn.Parameter(scale * torch.randn((1, n_component[i], gridSize[vec_id], 1))))
return torch.nn.ParameterList(line_coef).to(device)
def get_optparam_groups(self, lr_init_spatialxyz = 0.02, lr_init_network = 0.001):
grad_vars = [{'params': self.density_line, 'lr': lr_init_spatialxyz},
{'params': self.app_line, 'lr': lr_init_spatialxyz},
{'params': self.basis_mat.parameters(), 'lr':lr_init_network}]
if isinstance(self.renderModule, torch.nn.Module):
grad_vars += [{'params':self.renderModule.parameters(), 'lr':lr_init_network}]
return grad_vars
def compute_densityfeature(self, xyz_sampled):
coordinate_line = torch.stack((xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
line_coef_point = F.grid_sample(self.density_line[0], coordinate_line[[0]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = line_coef_point * F.grid_sample(self.density_line[1], coordinate_line[[1]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = line_coef_point * F.grid_sample(self.density_line[2], coordinate_line[[2]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
sigma_feature = torch.sum(line_coef_point, dim=0)
return sigma_feature
def compute_appfeature(self, xyz_sampled):
coordinate_line = torch.stack(
(xyz_sampled[..., self.vecMode[0]], xyz_sampled[..., self.vecMode[1]], xyz_sampled[..., self.vecMode[2]]))
coordinate_line = torch.stack((torch.zeros_like(coordinate_line), coordinate_line), dim=-1).detach().view(3, -1, 1, 2)
line_coef_point = F.grid_sample(self.app_line[0], coordinate_line[[0]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = line_coef_point * F.grid_sample(self.app_line[1], coordinate_line[[1]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
line_coef_point = line_coef_point * F.grid_sample(self.app_line[2], coordinate_line[[2]],
align_corners=True).view(-1, *xyz_sampled.shape[:1])
return self.basis_mat(line_coef_point.T)
@torch.no_grad()
def up_sampling_Vector(self, density_line_coef, app_line_coef, res_target):
for i in range(len(self.vecMode)):
vec_id = self.vecMode[i]
density_line_coef[i] = torch.nn.Parameter(
F.interpolate(density_line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
app_line_coef[i] = torch.nn.Parameter(
F.interpolate(app_line_coef[i].data, size=(res_target[vec_id], 1), mode='bilinear', align_corners=True))
return density_line_coef, app_line_coef
@torch.no_grad()
def upsample_volume_grid(self, res_target):
self.density_line, self.app_line = self.up_sampling_Vector(self.density_line, self.app_line, res_target)
self.update_stepSize(res_target)
print(f'upsamping to {res_target}')
@torch.no_grad()
def shrink(self, new_aabb):
print("====> shrinking ...")
xyz_min, xyz_max = new_aabb
t_l, b_r = (xyz_min - self.aabb[0]) / self.units, (xyz_max - self.aabb[0]) / self.units
t_l, b_r = torch.round(torch.round(t_l)).long(), torch.round(b_r).long() + 1
b_r = torch.stack([b_r, self.gridSize]).amin(0)
for i in range(len(self.vecMode)):
mode0 = self.vecMode[i]
self.density_line[i] = torch.nn.Parameter(
self.density_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
self.app_line[i] = torch.nn.Parameter(
self.app_line[i].data[...,t_l[mode0]:b_r[mode0],:]
)
if not torch.all(self.alphaMask.gridSize == self.gridSize):
t_l_r, b_r_r = t_l / (self.gridSize-1), (b_r-1) / (self.gridSize-1)
correct_aabb = torch.zeros_like(new_aabb)
correct_aabb[0] = (1-t_l_r)*self.aabb[0] + t_l_r*self.aabb[1]
correct_aabb[1] = (1-b_r_r)*self.aabb[0] + b_r_r*self.aabb[1]
print("aabb", new_aabb, "\ncorrect aabb", correct_aabb)
new_aabb = correct_aabb
newSize = b_r - t_l
self.aabb = new_aabb
self.update_stepSize((newSize[0], newSize[1], newSize[2]))
def density_L1(self):
total = 0
for idx in range(len(self.density_line)):
total = total + torch.mean(torch.abs(self.density_line[idx]))
return total
def TV_loss_density(self, reg):
total = 0
for idx in range(len(self.density_line)):
total = total + reg(self.density_line[idx]) * 1e-3
return total
def TV_loss_app(self, reg):
total = 0
for idx in range(len(self.app_line)):
total = total + reg(self.app_line[idx]) * 1e-3
return total |
#!/usr/bin/python3
from pyvips import Image, Introspect, GValue, Error, \
ffi, values_for_enum, vips_lib, gobject_lib, \
type_map, type_name, type_from_name, nickname_find
# This file generates the phpdoc comments for the magic methods and properties.
# It's in Python, since we use the whole of FFI, not just the
# small bit exposed by php-vips-ext.
# Regenerate docs with something like:
#
# cd src
# python ../examples/generate_phpdoc.py
# this needs pyvips
#
# pip install --user pyvips
# map a Python gtype to PHP argument type names
gtype_to_php_arg = {
GValue.gbool_type: 'bool',
GValue.gint_type: 'integer',
GValue.gdouble_type: 'float',
GValue.gstr_type: 'string',
GValue.refstr_type: 'string',
GValue.genum_type: 'string',
GValue.gflags_type: 'integer',
GValue.gobject_type: 'string',
GValue.image_type: 'Image',
GValue.array_int_type: 'integer[]|integer',
GValue.array_double_type: 'float[]|float',
GValue.array_image_type: 'Image[]|Image',
GValue.blob_type: 'string'
}
# php result type names are different, annoyingly, and very restricted
gtype_to_php_result = {
GValue.gbool_type: 'bool',
GValue.gint_type: 'integer',
GValue.gdouble_type: 'float',
GValue.gstr_type: 'string',
GValue.refstr_type: 'string',
GValue.genum_type: 'string',
GValue.gflags_type: 'integer',
GValue.gobject_type: 'string',
GValue.image_type: 'Image',
GValue.array_int_type: 'array',
GValue.array_double_type: 'array',
GValue.array_image_type: 'array',
GValue.blob_type: 'string'
}
# values for VipsArgumentFlags
_REQUIRED = 1
_INPUT = 16
_OUTPUT = 32
_DEPRECATED = 64
_MODIFY = 128
# for VipsOperationFlags
_OPERATION_DEPRECATED = 8
# some names we might generate are reserved PHP names ... just append a "1".
reserved_php_names = {
'DEFAULT': 'DEFAULT1',
'XOR': 'XOR1',
'AND': 'AND1',
'OR': 'OR1'
}
def gtype_to_php(gtype, result=False):
"""Map a gtype to PHP type name we use to represent it.
"""
fundamental = gobject_lib.g_type_fundamental(gtype)
gtype_map = gtype_to_php_result if result else gtype_to_php_arg
if gtype in gtype_map:
return gtype_map[gtype]
if fundamental in gtype_map:
return gtype_map[fundamental]
return '<unknown type>'
def remove_prefix(enum_str):
prefix = 'Vips'
if enum_str.startswith(prefix):
return enum_str[len(prefix):]
return enum_str
def generate_operation(operation_name):
intro = Introspect.get(operation_name)
result = ' * @method '
if intro.member_x is None:
result += 'static '
if len(intro.required_output) == 0:
result += 'void '
elif len(intro.required_output) == 1:
arg = intro.required_output[0]
details = intro.details[arg]
result += '{0} '.format(gtype_to_php(details['type'], True))
else:
# we generate a Returns: block for this case, see below
result += 'array '
result += '{0}('.format(operation_name)
for name in intro.method_args:
details = intro.details[name]
result += '{0} ${1}, '.format(gtype_to_php(details['type']), name)
result += 'array $options = []) '
description = intro.description
result += description[0].upper() + description[1:] + '.\n'
# find any Enums we've referenced and output @see lines for them
for name in intro.required_output + intro.method_args:
details = intro.details[name]
fundamental = gobject_lib.g_type_fundamental(details['type'])
if fundamental != GValue.genum_type:
continue
result += ' * @see {0} for possible values for ${1}\n'.format(remove_prefix(type_name(details['type'])), name)
if len(intro.required_output) > 1:
result += ' * Return array with: [\n'
for name in intro.required_output:
details = intro.details[name]
result += ' * \'{0}\' => @type {1} {2}\n'.format(name, gtype_to_php(details['type']),
details['blurb'][0].upper() + details['blurb'][1:])
result += ' * ];\n'
result += ' * @throws Exception\n'
return result
preamble = """<?php
/**
* This file was generated automatically. Do not edit!
*
* PHP version 7
*
* LICENSE:
*
* Copyright (c) 2016 <NAME>
*
* Permission is hereby granted, free of charge, to any person obtaining
* a copy of this software and associated documentation files (the
* "Software"), to deal in the Software without restriction, including
* without limitation the rights to use, copy, modify, merge, publish,
* distribute, sublicense, and/or sell copies of the Software, and to
* permit persons to whom the Software is furnished to do so, subject to
* the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
* LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
* OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
* WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*
* @category Images
* @package Jcupitt\\Vips
* @author <NAME> <<EMAIL>>
* @copyright 2016 <NAME>
* @license https://opensource.org/licenses/MIT MIT
* @link https://github.com/jcupitt/php-vips
*/
"""
class_header = """ * @category Images
* @package Jcupitt\\Vips
* @author <NAME> <<EMAIL>>
* @copyright 2016 <NAME>
* @license https://opensource.org/licenses/MIT MIT
* @link https://github.com/jcupitt/php-vips
"""
def generate_auto_doc(filename):
all_nicknames = []
def add_nickname(gtype, a, b):
nickname = nickname_find(gtype)
try:
# can fail for abstract types
intro = Introspect.get(nickname)
# we are only interested in non-deprecated operations
if (intro.flags & _OPERATION_DEPRECATED) == 0:
all_nicknames.append(nickname)
except Error:
pass
type_map(gtype, add_nickname)
return ffi.NULL
type_map(type_from_name('VipsOperation'), add_nickname)
# add 'missing' synonyms by hand
all_nicknames.append('crop')
# make list unique and sort
all_nicknames = list(set(all_nicknames))
all_nicknames.sort()
# these have hand-written methods, don't autodoc them
no_generate = [
'bandjoin',
'bandrank',
'ifthenelse',
'add',
'subtract',
'multiply',
'divide',
'remainder'
]
all_nicknames = [x for x in all_nicknames if x not in no_generate]
print('Generating {0} ...'.format(filename))
with open(filename, 'w') as f:
f.write(preamble)
f.write('\n')
f.write('namespace Jcupitt\\Vips;\n')
f.write('\n')
f.write('/**\n')
f.write(' * Autodocs for the Image class.\n')
f.write(class_header)
f.write(' *\n')
for nickname in all_nicknames:
f.write(generate_operation(nickname))
f.write(' *\n')
# all magic properties
tmp_file = Image.new_temp_file('%s.v')
all_properties = tmp_file.get_fields()
for name in all_properties:
php_name = name.replace('-', '_')
gtype = tmp_file.get_typeof(name)
fundamental = gobject_lib.g_type_fundamental(gtype)
f.write(' * @property {0} ${1} {2}\n'.format(gtype_to_php(gtype), php_name, tmp_file.get_blurb(name)))
if fundamental == GValue.genum_type:
f.write(' * @see {0} for possible values\n'.format(remove_prefix(type_name(gtype))))
f.write(' */\n')
f.write('abstract class ImageAutodoc\n')
f.write('{\n')
f.write('}\n')
def generate_enums():
# otherwise we're missing some enums
vips_lib.vips_token_get_type()
vips_lib.vips_saveable_get_type()
vips_lib.vips_image_type_get_type()
all_enums = []
def add_enum(gtype, a, b):
nickname = type_name(gtype)
all_enums.append(nickname)
type_map(gtype, add_enum)
return ffi.NULL
type_map(type_from_name('GEnum'), add_enum)
for name in all_enums:
gtype = type_from_name(name)
php_name = remove_prefix(name)
print('Generating {0}.php ...'.format(php_name))
with open('{0}.php'.format(php_name), 'w') as f:
f.write(preamble)
f.write('\n')
f.write('namespace Jcupitt\\Vips;\n')
f.write('\n')
f.write('/**\n')
f.write(' * The {0} enum.\n'.format(php_name))
f.write(class_header)
f.write(' */\n')
f.write('abstract class {0}\n'.format(php_name))
f.write('{\n')
for value in values_for_enum(gtype):
php_name = value.replace('-', '_').upper()
if php_name in reserved_php_names:
php_name = reserved_php_names[php_name]
f.write(' const {0} = \'{1}\';\n'.format(php_name, value))
f.write('}\n')
generate_auto_doc('ImageAutodoc.php')
generate_enums()
|
# -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community
Edition) available.
Copyright (C) 2017-2021 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
specific language governing permissions and limitations under the License.
"""
import datetime
from django.test import TestCase
from mock.mock import MagicMock
from gcloud.taskflow3.models import TaskFlowInstance
from gcloud.tests.mock import mock
from gcloud.tests.mock_settings import TASKFLOW_STATISTICS_FILTER, PIPELINE_INSTANCE_FILTER, PROJECT_FILTER
from gcloud.utils.dates import format_datetime
TEST_TOTAL = 15
TEST_PAGE = 1
TEST_LIMIT = 10
TEST_PROJ_ID = 12345
TEST_CREATE_TIME = datetime.datetime.now()
TEST_TASK_INSTANCE_ID_LIST = [i for i in range(1, TEST_TOTAL + 1)]
TEST_TASKFLOW = MagicMock()
TEST_TASKFLOW.count = MagicMock(return_value=TEST_TOTAL)
TEST_TASKFLOW.values_list = MagicMock(return_value=TEST_TASK_INSTANCE_ID_LIST)
TEST_TASKFLOW_STATISTICS_DATA = [
{
"instance_id": i,
"task_instance_id": i,
"project_id": TEST_PROJ_ID,
"category": "test_category",
"create_time": TEST_CREATE_TIME,
"creator": "test_creator",
"elapsed_time": "elapsed_time",
"atom_total": i,
"subprocess_total": i,
"gateways_total": i,
"create_method": "test_create_method",
}
for i in range(1, TEST_LIMIT + 1)
]
TEST_TASKFLOW_STATISTICS = MagicMock(return_value=TEST_TASKFLOW_STATISTICS_DATA)
TEST_INSTANCE_NAMEDATA = [(i, "test_instance") for i in range(1, TEST_LIMIT + 1)]
TEST_PROJECT_NAMEDATA = [(TEST_PROJ_ID, "test_proj")]
TEST_GROUPS = [
{
"instance_id": i,
"instance_name": dict(TEST_INSTANCE_NAMEDATA)[i],
"project_id": TEST_PROJ_ID,
"project_name": dict(TEST_PROJECT_NAMEDATA)[TEST_PROJ_ID],
"category": "test_category",
"create_time": format_datetime(TEST_CREATE_TIME),
"creator": "test_creator",
"elapsed_time": "elapsed_time",
"atom_total": i,
"subprocess_total": i,
"gateways_total": i,
"create_method": "test_create_method",
}
for i in range(1, TEST_LIMIT + 1)
]
class MockTaskflowStatistics(MagicMock):
def values(self, *args, **kwargs):
return TEST_TASKFLOW_STATISTICS_DATA
class MockInstanceDict(MagicMock):
def values_list(self, *args, **kwargs):
return TEST_INSTANCE_NAMEDATA
class MockProjectDict(MagicMock):
def values_list(self, *args, **kwargs):
return TEST_PROJECT_NAMEDATA
class TestGroupByInstanceNode(TestCase):
def test_group_by_instance_node(self):
with mock.patch(TASKFLOW_STATISTICS_FILTER, MockTaskflowStatistics()) as mock_statistics_filter:
with mock.patch(PIPELINE_INSTANCE_FILTER, MockInstanceDict()) as mock_instance_dict:
with mock.patch(PROJECT_FILTER, MockProjectDict()) as mock_project_dict:
total, groups = TaskFlowInstance.objects.group_by_instance_node(
taskflow=TEST_TASKFLOW, filters=None, page=TEST_PAGE, limit=TEST_LIMIT
)
mock_statistics_filter.assert_called_once_with(task_instance_id__in=TEST_TASK_INSTANCE_ID_LIST)
mock_instance_dict.assert_called_once_with(id__in=TEST_TASK_INSTANCE_ID_LIST[0:TEST_LIMIT])
mock_project_dict.assert_called_once_with(id__in=[TEST_PROJ_ID for i in range(TEST_LIMIT)])
self.assertEqual(total, TEST_TOTAL)
self.assertEqual(groups, TEST_GROUPS)
|
<filename>to_srt.py<gh_stars>0
import argparse
import codecs
import math
import os
import re
SUPPORTED_EXTENSIONS = [".xml", ".vtt", "dfxp"]
def leading_zeros(value, digits=2):
value = "000000" + str(value)
return value[-digits:]
def convert_time(raw_time, extension):
if int(raw_time) == 0:
if extension == 'ass':
return "{}:{}:{}.{}".format(0, 0, 0, 0)
else:
return "{}:{}:{},{}".format(0, 0, 0, 0)
ms = '000'
if len(raw_time) > 4:
ms = leading_zeros(int(raw_time[:-5]) % 1000, 2) # Accept only 2 digits after coma for seconds
time_in_seconds = int(raw_time[:-7]) if len(raw_time) > 7 else 0
second = leading_zeros(time_in_seconds % 60)
minute = leading_zeros(int(math.floor(time_in_seconds / 60)) % 60)
hour = leading_zeros(int(math.floor(time_in_seconds / 3600)))
if extension == 'ass':
return "{}:{}:{}.{}".format(hour, minute, second, ms)
else:
return "{}:{}:{},{}".format(hour, minute, second, ms)
def xml_id_display_align_before(text):
"""
displayAlign="before" means the current sub will be displayed on top.
That is and not at bottom. We check what's the xml:id associated to it
to have an {\an8} position tag in the output file.
"""
align_before_re = re.compile(u'<region.*tts:displayAlign=\"before\".*xml:id=\"(.*)\"/>')
has_align_before = re.search(align_before_re, text)
if has_align_before:
return has_align_before.group(1)
return u""
def xml_get_cursive_style_ids(text):
style_section = re.search("<styling>(.*)</styling>", text, flags=re.DOTALL)
if not style_section:
return []
style_ids_re = re.compile(
'<style.* tts:fontStyle="italic".* xml:id=\"([a-zA-Z0-9_.]+)\"')
return [re.search(style_ids_re, line).groups()[0]
for line in style_section.group().split("\n")
if re.search(style_ids_re, line)]
def xml_cleanup_spans_start(span_id_re, cursive_ids, text, extension):
has_cursive = []
span_start_tags = re.findall(span_id_re, text)
for s in span_start_tags:
if extension == 'ass':
has_cursive.append(u"{\\i1}" if s[1] in cursive_ids else u"")
else:
has_cursive.append(u"<i>" if s[1] in cursive_ids else u"")
text = has_cursive[-1].join(text.split(s[0], 1))
return text, has_cursive
def xml_cleanup_spans_end(span_end_re, text, has_cursive, extension):
span_end_tags = re.findall(span_end_re, text)
for s, cursive in zip(span_end_tags, has_cursive):
if extension == 'ass':
cursive = u"{\\i0}" if cursive else u""
else:
cursive = u"</i>" if cursive else u""
text = cursive.join(text.split(s, 1))
return text
def to_srt(text, fileName):
if ".xml" in fileName.lower() or ".dfxp" in fileName.lower():
return xml_to_srt(text)
if fileName.lower() == ".vtt":
return vtt_to_srt(text)
def convert_vtt_time(line):
times = line.replace(".", ",").split(" --> ")
if len(times[0]) == 9:
times = ["00:" + t for t in times]
return "{} --> {}".format(times[0], times[1].split(" ")[0])
def vtt_to_srt(text):
if not text.startswith(u"\ufeffWEBVTT") and not text.startswith(u"WEBVTT"):
raise Exception(".vtt format must start with WEBVTT, wrong file?")
lines = []
current_sub_line = []
for line in text.split("\n"):
if current_sub_line:
current_sub_line.append(line)
if not line:
lines.append("\n".join(current_sub_line) + "\n")
current_sub_line = []
elif " --> " in line:
current_sub_line = [convert_vtt_time(line)]
if current_sub_line:
lines.append("\n".join(current_sub_line))
return "".join((u"{}\n{}".format(i, l) for i, l in enumerate(lines, 1)))
def xml_to_srt(text):
def append_subs(start, end, prev_content, format_time):
subs.append({
"start_time": convert_time(start, 'srt') if format_time else start,
"end_time": convert_time(end, 'srt') if format_time else end,
"content": u"\n".join(prev_content),
})
display_align_before = xml_id_display_align_before(text)
begin_re = re.compile(u"\s*<p begin=")
sub_lines = (l for l in text.split("\n") if re.search(begin_re, l))
subs = []
prev_time = {"start": 0, "end": 0}
prev_content = []
start = end = ''
start_re = re.compile(u'begin\="([0-9:\.]*)')
end_re = re.compile(u'end\="([0-9:\.]*)')
content_re = re.compile(u'\">(.*)</p>')
# some span tags are used for italics, we'll replace them by <i> and </i>,
# which is the standard for .srt files. We ignore all other uses.
cursive_ids = xml_get_cursive_style_ids(text)
span_id_re = re.compile(u'(<span style=\"([a-zA-Z0-9_.]+)\">)+')
span_end_re = re.compile(u'(</span>)+')
br_re = re.compile(u'(<br\s*\/?>)+')
fmt_t = True
for s in sub_lines:
s, has_cursive = xml_cleanup_spans_start(
span_id_re, cursive_ids, s, 'srt')
string_region_re = r'<p(.*region="' + display_align_before + r'".*")>(.*)</p>'
s = re.sub(string_region_re, r'<p\1>{\\an8}\2</p>', s)
content = re.search(content_re, s).group(1)
br_tags = re.search(br_re, content)
if br_tags:
content = u"\n".join(content.split(br_tags.group()))
content = xml_cleanup_spans_end(
span_end_re, content, has_cursive, 'srt')
prev_start = prev_time["start"]
start = re.search(start_re, s).group(1)
end = re.search(end_re, s).group(1)
if len(start.split(":")) > 1:
fmt_t = False
start = start.replace(".", ",")
end = end.replace(".", ",")
if (prev_start == start and prev_time["end"] == end) or not prev_start:
# Fix for multiple lines starting at the same time
prev_time = {"start": start, "end": end}
prev_content.append(content)
continue
append_subs(prev_time["start"], prev_time["end"], prev_content, fmt_t)
prev_time = {"start": start, "end": end}
prev_content = [content]
append_subs(start, end, prev_content, fmt_t)
lines = (u"{}\n{} --> {}\n{}\n".format(
s + 1, subs[s]["start_time"], subs[s]["end_time"], subs[s]["content"])
for s in range(len(subs)))
return u"\n".join(lines)
def xml_to_ass(text, fileTitle):
def append_subs(start, end, prev_content, format_time):
subs.append({
"start_time": convert_time(start, 'ass') if format_time else start,
"end_time": convert_time(end, 'ass') if format_time else end,
"content": u"\n".join(prev_content),
})
display_align_before = xml_id_display_align_before(text)
begin_re = re.compile(u"\s*<p begin=")
sub_lines = (l for l in text.split("\n") if re.search(begin_re, l))
subs = []
prev_time = {"start": 0, "end": 0}
prev_content = []
start = end = ''
start_re = re.compile(u'begin\="([0-9:\.]*)')
end_re = re.compile(u'end\="([0-9:\.]*)')
content_re = re.compile(u'\">(.*)</p>')
# some span tags are used for italics, we'll replace them by <i> and </i>,
# which is the standard for .srt files. We ignore all other uses.
cursive_ids = xml_get_cursive_style_ids(text)
span_id_re = re.compile(u'(<span style=\"([a-zA-Z0-9_.]+)\">)+')
span_end_re = re.compile(u'(</span>)+')
br_re = re.compile(u'(<br\s*\/?>)+')
fmt_t = True
for s in sub_lines:
s, has_cursive = xml_cleanup_spans_start(
span_id_re, cursive_ids, s, 'ass')
string_region_re = r'<p(.*region="' + display_align_before + r'".*")>(.*)</p>'
s = re.sub(string_region_re, r'<p\1>{\\an8}\2</p>', s)
content = re.search(content_re, s).group(1)
br_tags = re.search(br_re, content)
if br_tags:
content = u"\\N".join(content.split(br_tags.group()))
content = xml_cleanup_spans_end(
span_end_re, content, has_cursive, 'ass')
prev_start = prev_time["start"]
start = re.search(start_re, s).group(1)
end = re.search(end_re, s).group(1)
if len(start.split(":")) > 1:
fmt_t = False
start = start.replace(".", ",")
end = end.replace(".", ",")
if (prev_start == start and prev_time["end"] == end) or not prev_start:
# Fix for multiple lines starting at the same time
prev_time = {"start": start, "end": end}
prev_content.append(content)
continue
append_subs(prev_time["start"], prev_time["end"], prev_content, fmt_t)
prev_time = {"start": start, "end": end}
prev_content = [content]
append_subs(start, end, prev_content, fmt_t)
lines = (u"Dialogue: 0,{},{},{},,0,0,0,,{}\n".format(
subs[s]["start_time"], subs[s]["end_time"], setFont(subs[s]["content"]), subs[s]["content"].replace('{\\an8}', '').replace('{\\i0}\\N{\\i1}', '\\N'))
for s in range(len(subs)))
concatenatedDialogues = u"".join(lines)
return addAssheader(fileTitle) + concatenatedDialogues
def setFont(text):
text = text.replace('&', '&')
isHourFormat = re.match(r'([01]?[0-9]|2[0-3])h[0-5][0-9]', text) or re.match(r'([01]?[0-9]|2[0-3])\sh\s[0-5][0-9]', text)
if isHourFormat:
return 'Sign'
elif 'an8' in text:
return 'Top'
elif text.isupper():
return 'Sign'
else:
return 'Default'
def addAssheader(fileTitle):
title = '.'.join(fileTitle.split('.')[:-1])
ass = '[Script Info]\n'
ass += 'Title: ' + title + '\n'
ass += 'ScriptType: v4.00+\n'
ass += 'WrapStyle: 0\n'
ass += 'PlayResX: 1920\n'
ass += 'PlayResY: 1080\n'
ass += 'YCbCr Matrix: TV.709\n'
ass += 'ScaledBorderAndShadow: yes\n'
ass += '\n'
ass += '[V4+ Styles]\n'
ass += 'Format: Name, Fontname, Fontsize, PrimaryColour, SecondaryColour, OutlineColour, BackColour, Bold, Italic, Underline, StrikeOut, ScaleX, ScaleY, Spacing, Angle, BorderStyle, Outline, Shadow, Alignment, MarginL, MarginR, MarginV, Encoding\n'
ass += 'Style: Default,Arial,60,&H00FFFFFF,&H000000FF,&H00000000,&HAA000000,-1,0,0,0,100,100,0,0,1,3.5,1.5,2,200,200,75,1\n'
ass += 'Style: Top,Arial,60,&H00FFFFFF,&H000000FF,&H00000000,&HAA000000,-1,0,0,0,100,100,0,0,1,3.5,1.5,8,200,200,75,1\n'
ass += 'Style: Sign,Arial,60,&H00FFFFFF,&H000000FF,&H00000000,&HAA000000,-1,0,0,0,100,100,0,0,1,3.5,1.5,8,200,200,75,1\n'
ass += '\n'
ass += '[Events]\n'
ass += 'Format: Layer, Start, End, Style, Name, MarginL, MarginR, MarginV, Effect, Text\n'
return ass
def getFileNameWithoutExtension(fileName):
return fileName.replace('.xml', '').replace('.vtt', '').replace('.dfxp', '')
def main():
directory = "."
help_text = u"path to the {} directory (defaults to current directory)"
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--input", type=str, default=directory,
help=help_text.format("input", directory))
parser.add_argument("-o", "--output", type=str, default=directory,
help=help_text.format("output", directory))
parser.add_argument('-ass', '--ass', default=directory,
help=help_text.format("ass", directory), action='store_true')
a = parser.parse_args()
filenames = [fn for fn in os.listdir(a.input)
if fn[-4:].lower() in SUPPORTED_EXTENSIONS]
for fn in filenames:
with codecs.open("{}/{}".format(a.input, fn), 'rb', "utf-8") as f:
text = f.read()
if a.ass == True :
with codecs.open("{}/{}.ass".format(a.output, getFileNameWithoutExtension(fn)), 'wb', "utf-8") as f:
f.write(xml_to_ass(text, fn))
print('\nFile created: ' + getFileNameWithoutExtension(fn) + '.ass')
else:
with codecs.open("{}/{}.srt".format(a.output, getFileNameWithoutExtension(fn)), 'wb', "utf-8") as f:
f.write(to_srt(text, fn))
print('\nFile created: ' + getFileNameWithoutExtension(fn) + '.srt')
if __name__ == '__main__':
main()
|
from unittest import mock
import pytest
from django.contrib.auth import get_user_model
from email_auth import authentication, models
@pytest.fixture
def mock_email_address_qs():
mock_qs = mock.Mock(spec=models.EmailAddress.objects)
mock_qs.all.return_value = mock_qs
with mock.patch("email_auth.models.EmailAddress.objects", new=mock_qs):
yield mock_qs
@pytest.fixture
def mock_user_qs():
mock_qs = mock.Mock(spec=get_user_model().objects)
mock_qs.all.return_value = mock_qs
with mock.patch("django.contrib.auth.models.User.objects", new=mock_qs):
yield mock_qs
def test_authenticate_with_verified_email_correct_password(
mock_email_address_qs
):
"""
If a verified email address and the password of the user who owns
the email address are provided, the authentication method should
return the owner of the email address.
"""
password = "password"
user = get_user_model()(is_active=True)
user.set_password(password)
email = models.EmailAddress(address="<EMAIL>", user=user)
mock_email_address_qs.get.return_value = email
backend = authentication.VerifiedEmailBackend()
authenticated_user = backend.authenticate(None, email.address, password)
assert authenticated_user == user
assert mock_email_address_qs.get.call_args[1]["address"] == email.address
@mock.patch("django.contrib.auth.models.User.check_password", autospec=True)
def test_authenticate_with_missing_email(
mock_check_password, mock_email_address_qs
):
"""
If no verified email with the given address exists, authentication
should fail.
"""
email = "<EMAIL>"
mock_email_address_qs.get.side_effect = models.EmailAddress.DoesNotExist
backend = authentication.VerifiedEmailBackend()
authenticated_user = backend.authenticate(None, email, "password")
assert authenticated_user is None
assert mock_email_address_qs.get.call_args[1]["address"] == email
assert mock_email_address_qs.get.call_args[1]["is_verified"]
# There should still be a password check even if no user is found.
assert mock_check_password.call_count == 1
def test_authenticate_with_verified_email_incorrect_password(
mock_email_address_qs
):
"""
If the user provides a verified email address but the provided
password does not match the owner of the email address,
authentication should fail.
"""
password = "password"
user = get_user_model()(is_active=True)
user.set_password(password + "<PASSWORD>")
email = models.EmailAddress(address="<EMAIL>", user=user)
mock_email_address_qs.get.return_value = email
backend = authentication.VerifiedEmailBackend()
authenticated_user = backend.authenticate(None, email.address, password)
assert authenticated_user is None
def test_authenticate_with_verified_email_correct_password_inactive_user(
mock_email_address_qs
):
"""
If the user provides valid credentials but is inactive,
authentication should fail.
"""
password = "password"
user = get_user_model()(is_active=False)
user.set_password(password)
email = models.EmailAddress(address="<EMAIL>", user=user)
mock_email_address_qs.get.return_value = email
backend = authentication.VerifiedEmailBackend()
authenticated_user = backend.authenticate(None, email.address, password)
assert authenticated_user is None
def test_get_user(mock_user_qs):
"""
The authentication backend should allow for fetching a user by their
ID.
"""
pk = 42
user = get_user_model()(pk=pk)
mock_user_qs.get.return_value = user
backend = authentication.VerifiedEmailBackend()
retrieved_user = backend.get_user(pk)
assert retrieved_user == user
assert mock_user_qs.get.call_args[1]["pk"] == pk
def test_get_user_invalid_id(mock_user_qs):
"""
If there is no user with the specified ID, ``None`` should be
returned.
"""
mock_user_qs.get.side_effect = get_user_model().DoesNotExist
backend = authentication.VerifiedEmailBackend()
retrieved_user = backend.get_user(42)
assert retrieved_user is None
assert mock_user_qs.get.call_args[1] == {"pk": 42}
|
<filename>config_files/create_config_files.py
import argparse
import yaml
import os
if __name__ == "__main__":
datasets = ["yago43k"]
train_types = ["1vsAll", "KvsAll", "negative_sampling"]
template_filename = "templates_iclr2020.yaml"
# parse args
parser = argparse.ArgumentParser()
parser.add_argument(
"--model", type=str, required=False, help="config file of single model"
)
parser.add_argument(
"--prefix",
type=str,
required=False,
help="prefix in config files of all input models",
)
args, _ = parser.parse_known_args()
if (args.model and args.prefix) or (not args.model and not args.prefix):
print("ERROR: must specify either model or prefix, not both")
exit(1)
# load input config files
loaded_models = []
if args.prefix:
for file in os.listdir(""):
filename = os.fsdecode(file)
if filename.startswith(args.prefix) and filename.endswith(".yaml"):
with open(filename, "r") as f:
loaded_models.append(yaml.load(f, Loader=yaml.SafeLoader))
# create output folder
root_folder = args.prefix
else:
with open(args.model, "r") as file:
loaded_models.append(yaml.load(file, Loader=yaml.SafeLoader))
# create output folder
root_folder = os.path.splitext(args.model)[0]
# create output folder
os.mkdir(root_folder)
# create config files
for dataset in datasets:
# set output folder for dataset
os.mkdir(os.path.join(root_folder, dataset))
for model_dict in loaded_models:
model_name = model_dict["model"]
# add reciprocal relations as an option if applicable
if model_name in ["conve"]:
models = "# model\n - name: model\n type: fixed\n value: reciprocal_relations_model"
else:
models = "# model\n - name: model\n type: choice\n values: [" + model_name + ", reciprocal_relations_model]"
#models = "[" + model_name + ", reciprocal_relations_model]"
model_specific_entries = []
if "ax_search" in model_dict:
model_specific_entries = model_dict["ax_search"]["parameters"]
for train_type in train_types:
# determine set of loss functions
# no margin ranking in all train types
if train_type == "KvsAll" or train_type == "1vsAll":
losses = ["kl", "bce"]
# no bce in transe
elif train_type == "negative_sampling" and model_name == "transe":
losses = ["kl", "margin_ranking"]
else:
losses = ["kl", "bce", "margin_ranking"]
for loss in losses:
# skip transe if loss = bce
if loss == "bce" and model_name == "transe":
continue
# skip transe unless train type is negative sampling
if (train_type == "1vsAll" or train_type == "KvsAll") and model_name == "transe":
continue
# set output folder for model-train_type-loss combo
output_folder = (
model_name
+ "-"
+ train_type
+ "-"
+ loss
)
first_line = dataset + "-" + output_folder
output_folder = os.path.join(root_folder, dataset, output_folder)
os.mkdir(output_folder)
# create config file from template
output_filename = "config.yaml"
output_file = open(
os.path.join(output_folder, output_filename), "w"
)
output_file.write("# " + first_line)
with open(template_filename, "r") as template_file:
for line in template_file:
# handle entries for specific train types
if "#train_type" in line and "___train_type___" not in line:
if train_type in line:
output_file.write("\n" + line.strip("\n"))
else:
continue
else:
# set model, dataset, train_type and losses
new_line = line.strip("\n").replace(
"___dataset___", dataset
)
new_line = new_line.replace("___model___", model_name)
new_line = new_line.replace("___models___", models)
new_line = new_line.replace(
"___train_type___", train_type
)
new_line = new_line.replace("___loss___", loss)
# write new line
output_file.write("\n" + new_line)
# append margin if applicable
if (
loss == "margin_ranking"
and train_type == "negative_sampling"
):
output_file.write("\n # margin\n")
output_file.write(" - name: train.loss_arg\n")
output_file.write(" type: range\n")
output_file.write(" bounds: [0.0, 10.0]\n")
# append model specific entries given by user
output_file.write("\n # model-specific entries\n")
for entry in model_specific_entries:
for key in entry:
if key == "name":
output_file.write(
" - "
+ str(key)
+ ": "
+ str(entry[key])
+ "\n"
)
else:
output_file.write(
" "
+ str(key)
+ ": "
+ str(entry[key])
+ "\n"
)
|
#!/usr/bin/env python3
# author: @netmanchris
# This section imports required libraries
import requests
import json
from pyhpeimc.auth import IMCAuth
HEADERS = {'Accept': 'application/json', 'Content-Type':
'application/json', 'Accept-encoding': 'application/json'}
#auth = IMCAuth('http://','10.101.0.201','8080', 'admin','admin')
headers = {'Accept': 'application/json', 'Content-Type':
'application/json', 'Accept-encoding': 'application/json'}
def add_perf_task(task, auth, url):
"""
function takes the a python dict containing all necessary fields for a performance tasks, transforms the dict into
JSON and issues a RESTFUL call to create the performance task.
device.
:param task: dictionary containing all required fields for performance tasks
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: 204
:rtype: str
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.perf import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> new_task = {'indexDesc': '1.3.6.1.4.1.9.9.13.1.3.1.3','indexType': '[index1[0]:ciscoEnvMonTemperatureStatusValue:1:0]','itemFunction': '1.3.6.1.4.1.9.9.13.1.3.1.3','itemName': 'Cisco_Temperature','selectDefaultUnit': '400','unit': 'Celsius'}
>>> new_perf_task = add_perf_task(new_task, auth.creds, auth.url)
"""
add_perf_task_url = "/imcrs/perf/task"
f_url = url + add_perf_task_url
payload = json.dumps(task)
# creates the URL using the payload variable as the contents
r = requests.post(f_url, data = payload, auth=auth, headers=headers)
try:
return r.status_code
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + ' get_dev_alarms: An Error has occured'
def get_perf_task(task_name, auth, url):
"""
function takes the a str object containing the name of an existing performance tasks and issues a RESTFUL call
to the IMC REST service. It will return a list
:param task_name: str containing the name of the performance task
:param auth: requests auth object #usually auth.creds from auth pyhpeimc.auth.class
:param url: base url of IMC RS interface #usually auth.url from pyhpeimc.auth.authclass
:return: 204
:rtype: dict
>>> from pyhpeimc.auth import *
>>> from pyhpeimc.plat.perf import *
>>> auth = IMCAuth("http://", "10.101.0.203", "8080", "admin", "admin")
>>> selected_task = get_perf_task('Cisco_Temperature', auth.creds, auth.url)
>>> assert type(selected_task) is dict
>>> assert 'taskName' in selected_task
"""
get_perf_task_url = "/imcrs/perf/task?name="+task_name+"&orderBy=taskId&desc=false"
f_url = url + get_perf_task_url
# creates the URL using the payload variable as the contents
r = requests.get(f_url, auth=auth, headers=headers)
try:
if r.status_code == 200:
perf_task_info = (json.loads(r.text))['task']
return perf_task_info
except requests.exceptions.RequestException as e:
return "Error:\n" + str(e) + ' get_dev_alarms: An Error has occured' |
# coding: utf-8
from __future__ import unicode_literals
import itertools
import random
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
dict_get,
ExtractorError,
int_or_none,
js_to_json,
orderedSet,
str_or_none,
try_get,
)
class TVPIE(InfoExtractor):
IE_NAME = 'tvp'
IE_DESC = 'Telewizja Polska'
_VALID_URL = r'https?://(?:[^/]+\.)?(?:tvp(?:parlament)?\.(?:pl|info)|polandin\.com)/(?:video/(?:[^,\s]*,)*|(?:(?!\d+/)[^/]+/)*)(?P<id>\d+)'
_TESTS = [{
# TVPlayer 2 in js wrapper
'url': 'https://vod.tvp.pl/video/czas-honoru,i-seria-odc-13,194536',
'info_dict': {
'id': '194536',
'ext': 'mp4',
'title': 'Czas honoru, odc. 13 – Władek',
'description': 'md5:437f48b93558370b031740546b696e24',
'age_limit': 12,
},
}, {
# TVPlayer legacy
'url': 'http://www.tvp.pl/there-can-be-anything-so-i-shortened-it/17916176',
'info_dict': {
'id': '17916176',
'ext': 'mp4',
'title': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata',
'description': 'TVP Gorzów pokaże filmy studentów z podroży dookoła świata',
},
}, {
# TVPlayer 2 in iframe
'url': 'https://wiadomosci.tvp.pl/50725617/dzieci-na-sprzedaz-dla-homoseksualistow',
'info_dict': {
'id': '50725617',
'ext': 'mp4',
'title': 'Dzieci na sprzedaż dla homoseksualistów',
'description': 'md5:7d318eef04e55ddd9f87a8488ac7d590',
'age_limit': 12,
},
}, {
# TVPlayer 2 in client-side rendered website (regional; window.__newsData)
'url': 'https://warszawa.tvp.pl/25804446/studio-yayo',
'info_dict': {
'id': '25804446',
'ext': 'mp4',
'title': 'Studio Yayo',
'upload_date': '20160616',
'timestamp': 1466075700,
}
}, {
# TVPlayer 2 in client-side rendered website (tvp.info; window.__videoData)
'url': 'https://www.tvp.info/52880236/09042021-0800',
'info_dict': {
'id': '52880236',
'ext': 'mp4',
'title': '09.04.2021, 08:00',
},
}, {
# client-side rendered (regional) program (playlist) page
'url': 'https://opole.tvp.pl/9660819/rozmowa-dnia',
'info_dict': {
'id': '9660819',
'description': 'Od poniedziałku do piątku o 18:55',
'title': 'Rozmowa dnia',
},
'playlist_mincount': 1800,
'params': {
'skip_download': True,
}
}, {
# ABC-specific video embeding
# moved to https://bajkowakraina.tvp.pl/wideo/50981130,teleranek,51027049,zubr,51116450
'url': 'https://abc.tvp.pl/48636269/zubry-odc-124',
'info_dict': {
'id': '48320456',
'ext': 'mp4',
'title': 'Teleranek, Żubr',
},
'skip': 'unavailable',
}, {
# yet another vue page
'url': 'https://jp2.tvp.pl/46925618/filmy',
'info_dict': {
'id': '46925618',
'title': 'Filmy',
},
'playlist_mincount': 19,
}, {
'url': 'http://vod.tvp.pl/seriale/obyczajowe/na-sygnale/sezon-2-27-/odc-39/17834272',
'only_matching': True,
}, {
'url': 'http://wiadomosci.tvp.pl/25169746/24052016-1200',
'only_matching': True,
}, {
'url': 'http://krakow.tvp.pl/25511623/25lecie-mck-wyjatkowe-miejsce-na-mapie-krakowa',
'only_matching': True,
}, {
'url': 'http://teleexpress.tvp.pl/25522307/wierni-wzieli-udzial-w-procesjach',
'only_matching': True,
}, {
'url': 'http://sport.tvp.pl/25522165/krychowiak-uspokaja-w-sprawie-kontuzji-dwa-tygodnie-to-maksimum',
'only_matching': True,
}, {
'url': 'http://www.tvp.info/25511919/trwa-rewolucja-wladza-zdecydowala-sie-na-pogwalcenie-konstytucji',
'only_matching': True,
}, {
'url': 'https://tvp.info/49193823/teczowe-flagi-na-pomnikach-prokuratura-wszczela-postepowanie-wieszwiecej',
'only_matching': True,
}, {
'url': 'https://www.tvpparlament.pl/retransmisje-vod/inne/wizyta-premiera-mateusza-morawieckiego-w-firmie-berotu-sp-z-oo/48857277',
'only_matching': True,
}, {
'url': 'https://polandin.com/47942651/pln-10-billion-in-subsidies-transferred-to-companies-pm',
'only_matching': True,
}]
def _parse_vue_website_data(self, webpage, page_id):
website_data = self._search_regex([
# website - regiony, tvp.info
# directory - jp2.tvp.pl
r'window\.__(?:website|directory)Data\s*=\s*({(?:.|\s)+?});',
], webpage, 'website data')
if not website_data:
return None
return self._parse_json(website_data, page_id, transform_source=js_to_json)
def _extract_vue_video(self, video_data, page_id=None):
if isinstance(video_data, str):
video_data = self._parse_json(video_data, page_id, transform_source=js_to_json)
thumbnails = []
image = video_data.get('image')
if image:
for thumb in (image if isinstance(image, list) else [image]):
thmb_url = str_or_none(thumb.get('url'))
if thmb_url:
thumbnails.append({
'url': thmb_url,
})
is_website = video_data.get('type') == 'website'
if is_website:
url = video_data['url']
fucked_up_url_parts = re.match(r'https?://vod\.tvp\.pl/(\d+)/([^/?#]+)', url)
if fucked_up_url_parts:
url = f'https://vod.tvp.pl/website/{fucked_up_url_parts.group(2)},{fucked_up_url_parts.group(1)}'
else:
url = 'tvp:' + str_or_none(video_data.get('_id') or page_id)
return {
'_type': 'url_transparent',
'id': str_or_none(video_data.get('_id') or page_id),
'url': url,
'ie_key': 'TVPEmbed' if not is_website else '<KEY>',
'title': str_or_none(video_data.get('title')),
'description': str_or_none(video_data.get('lead')),
'timestamp': int_or_none(video_data.get('release_date_long')),
'duration': int_or_none(video_data.get('duration')),
'thumbnails': thumbnails,
}
def _handle_vuejs_page(self, url, webpage, page_id):
# vue client-side rendered sites (all regional pages + tvp.info)
video_data = self._search_regex([
r'window\.__(?:news|video)Data\s*=\s*({(?:.|\s)+?})\s*;',
], webpage, 'video data', default=None)
if video_data:
return self._extract_vue_video(video_data, page_id=page_id)
# paged playlists
website_data = self._parse_vue_website_data(webpage, page_id)
if website_data:
entries = self._vuejs_entries(url, website_data, page_id)
return {
'_type': 'playlist',
'id': page_id,
'title': str_or_none(website_data.get('title')),
'description': str_or_none(website_data.get('lead')),
'entries': entries,
}
raise ExtractorError('Could not extract video/website data')
def _vuejs_entries(self, url, website_data, page_id):
def extract_videos(wd):
if wd.get('latestVideo'):
yield self._extract_vue_video(wd['latestVideo'])
for video in wd.get('videos') or []:
yield self._extract_vue_video(video)
for video in wd.get('items') or []:
yield self._extract_vue_video(video)
yield from extract_videos(website_data)
if website_data.get('items_total_count') > website_data.get('items_per_page'):
for page in itertools.count(2):
page_website_data = self._parse_vue_website_data(
self._download_webpage(url, page_id, note='Downloading page #%d' % page,
query={'page': page}),
page_id)
if not page_website_data.get('videos') and not page_website_data.get('items'):
break
yield from extract_videos(page_website_data)
def _real_extract(self, url):
page_id = self._match_id(url)
webpage, urlh = self._download_webpage_handle(url, page_id)
# The URL may redirect to a VOD
# example: https://vod.tvp.pl/48463890/wadowickie-spotkania-z-janem-pawlem-ii
if TVPWebsiteIE.suitable(urlh.url):
return self.url_result(urlh.url, ie=TVPWebsiteIE.ie_key(), video_id=page_id)
if re.search(
r'window\.__(?:video|news|website|directory)Data\s*=',
webpage):
return self._handle_vuejs_page(url, webpage, page_id)
# classic server-side rendered sites
video_id = self._search_regex([
r'<iframe[^>]+src="[^"]*?embed\.php\?(?:[^&]+&)*ID=(\d+)',
r'<iframe[^>]+src="[^"]*?object_id=(\d+)',
r"object_id\s*:\s*'(\d+)'",
r'data-video-id="(\d+)"',
# abc.tvp.pl - somehow there are more than one video IDs that seem to be the same video?
# the first one is referenced to as "copyid", and seems to be unused by the website
r'<script>\s*tvpabc\.video\.init\(\s*\d+,\s*(\d+)\s*\)\s*</script>',
], webpage, 'video id', default=page_id)
return {
'_type': 'url_transparent',
'url': 'tvp:' + video_id,
'description': self._og_search_description(
webpage, default=None) or (self._html_search_meta(
'description', webpage, default=None)
if '//s.tvp.pl/files/portal/v' in webpage else None),
'thumbnail': self._og_search_thumbnail(webpage, default=None),
'ie_key': 'TVPEmbed',
}
class TVPStreamIE(InfoExtractor):
IE_NAME = 'tvp:stream'
_VALID_URL = r'(?:tvpstream:|https?://tvpstream\.vod\.tvp\.pl/(?:\?(?:[^&]+[&;])*channel_id=)?)(?P<id>\d*)'
_TESTS = [{
# untestable as "video" id changes many times across a day
'url': 'https://tvpstream.vod.tvp.pl/?channel_id=1455',
'only_matching': True,
}, {
'url': 'tvpstream:39821455',
'only_matching': True,
}, {
# the default stream when you provide no channel_id, most probably TVP Info
'url': 'tvpstream:',
'only_matching': True,
}, {
'url': 'https://tvpstream.vod.tvp.pl/',
'only_matching': True,
}]
_PLAYER_BOX_RE = r'<div\s[^>]*id\s*=\s*["\']?tvp_player_box["\']?[^>]+data-%s-id\s*=\s*["\']?(\d+)'
_BUTTON_RE = r'<div\s[^>]*data-channel-id=["\']?%s["\']?[^>]*\sdata-title=(?:"([^"]*)"|\'([^\']*)\')[^>]*\sdata-stationname=(?:"([^"]*)"|\'([^\']*)\')'
def _real_extract(self, url):
channel_id = self._match_id(url)
channel_url = self._proto_relative_url('//tvpstream.vod.tvp.pl/?channel_id=%s' % channel_id or 'default')
webpage = self._download_webpage(channel_url, channel_id, 'Downloading channel webpage')
if not channel_id:
channel_id = self._search_regex(self._PLAYER_BOX_RE % 'channel',
webpage, 'default channel id')
video_id = self._search_regex(self._PLAYER_BOX_RE % 'video',
webpage, 'video id')
audition_title, station_name = self._search_regex(
self._BUTTON_RE % (re.escape(channel_id)), webpage,
'audition title and station name',
group=(1, 2))
return {
'_type': 'url_transparent',
'id': channel_id,
'url': 'tvp:%s' % video_id,
'title': audition_title,
'alt_title': station_name,
'is_live': True,
'ie_key': 'TVPEmbed',
}
class TVPEmbedIE(InfoExtractor):
IE_NAME = 'tvp:embed'
IE_DESC = 'Telewizja Polska'
_VALID_URL = r'''(?x)
(?:
tvp:
|https?://
(?:[^/]+\.)?
(?:tvp(?:parlament)?\.pl|tvp\.info|polandin\.com)/
(?:sess/
(?:tvplayer\.php\?.*?object_id
|TVPlayer2/(?:embed|api)\.php\?.*[Ii][Dd])
|shared/details\.php\?.*?object_id)
=)
(?P<id>\d+)
'''
_TESTS = [{
'url': 'tvp:194536',
'info_dict': {
'id': '194536',
'ext': 'mp4',
'title': 'Czas honoru, odc. 13 – Władek',
'description': 'md5:76649d2014f65c99477be17f23a4dead',
'age_limit': 12,
},
}, {
'url': 'https://www.tvp.pl/sess/tvplayer.php?object_id=51247504&autoplay=false',
'info_dict': {
'id': '51247504',
'ext': 'mp4',
'title': 'Razmova 091220',
},
}, {
# TVPlayer2 embed URL
'url': 'https://tvp.info/sess/TVPlayer2/embed.php?ID=50595757',
'only_matching': True,
}, {
'url': 'https://wiadomosci.tvp.pl/sess/TVPlayer2/api.php?id=51233452',
'only_matching': True,
}, {
# pulsembed on dziennik.pl
'url': 'https://www.tvp.pl/shared/details.php?copy_id=52205981&object_id=52204505&autoplay=false&is_muted=false&allowfullscreen=true&template=external-embed/video/iframe-video.html',
'only_matching': True,
}]
@staticmethod
def _extract_urls(webpage, **kw):
return [m.group('embed') for m in re.finditer(
r'(?x)<iframe[^>]+?src=(["\'])(?P<embed>%s)' % TVPEmbedIE._VALID_URL[4:],
webpage)]
def _real_extract(self, url):
video_id = self._match_id(url)
# it could be anything that is a valid JS function name
callback = random.choice((
'jebac_pis',
'jebacpis',
'ziobro',
'sasin70',
'sasin_przejebal_70_milionow_PLN',
'tvp_is_a_state_propaganda_service',
))
webpage = self._download_webpage(
('https://www.tvp.pl/sess/TVPlayer2/api.php?id=%s'
+ '&@method=getTvpConfig&@callback=%s') % (video_id, callback), video_id)
# stripping JSONP padding
datastr = webpage[15 + len(callback):-3]
if datastr.startswith('null,'):
error = self._parse_json(datastr[5:], video_id)
raise ExtractorError(error[0]['desc'])
content = self._parse_json(datastr, video_id)['content']
info = content['info']
is_live = try_get(info, lambda x: x['isLive'], bool)
formats = []
for file in content['files']:
video_url = file.get('url')
if not video_url:
continue
if video_url.endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats(video_url, video_id, m3u8_id='hls', fatal=False, live=is_live))
elif video_url.endswith('.mpd'):
if is_live:
# doesn't work with either ffmpeg or native downloader
continue
formats.extend(self._extract_mpd_formats(video_url, video_id, mpd_id='dash', fatal=False))
elif video_url.endswith('.f4m'):
formats.extend(self._extract_f4m_formats(video_url, video_id, f4m_id='hds', fatal=False))
elif video_url.endswith('.ism/manifest'):
formats.extend(self._extract_ism_formats(video_url, video_id, ism_id='mss', fatal=False))
else:
# mp4, wmv or something
quality = file.get('quality', {})
formats.append({
'format_id': 'direct',
'url': video_url,
'ext': determine_ext(video_url, file['type']),
'fps': int_or_none(quality.get('fps')),
'tbr': int_or_none(quality.get('bitrate')),
'width': int_or_none(quality.get('width')),
'height': int_or_none(quality.get('height')),
})
self._sort_formats(formats)
title = dict_get(info, ('subtitle', 'title', 'seoTitle'))
description = dict_get(info, ('description', 'seoDescription'))
thumbnails = []
for thumb in content.get('posters') or ():
thumb_url = thumb.get('src')
if not thumb_url or '{width}' in thumb_url or '{height}' in thumb_url:
continue
thumbnails.append({
'url': thumb.get('src'),
'width': thumb.get('width'),
'height': thumb.get('height'),
})
age_limit = try_get(info, lambda x: x['ageGroup']['minAge'], int)
if age_limit == 1:
age_limit = 0
duration = try_get(info, lambda x: x['duration'], int) if not is_live else None
subtitles = {}
for sub in content.get('subtitles') or []:
if not sub.get('url'):
continue
subtitles.setdefault(sub['lang'], []).append({
'url': sub['url'],
'ext': sub.get('type'),
})
info_dict = {
'id': video_id,
'title': title,
'description': description,
'thumbnails': thumbnails,
'age_limit': age_limit,
'is_live': is_live,
'duration': duration,
'formats': formats,
'subtitles': subtitles,
}
# vod.tvp.pl
if info.get('vortalName') == 'vod':
info_dict.update({
'title': '%s, %s' % (info.get('title'), info.get('subtitle')),
'series': info.get('title'),
'season': info.get('season'),
'episode_number': info.get('episode'),
})
return info_dict
class TVPWebsiteIE(InfoExtractor):
IE_NAME = 'tvp:series'
_VALID_URL = r'https?://vod\.tvp\.pl/website/(?P<display_id>[^,]+),(?P<id>\d+)'
_TESTS = [{
# series
'url': 'https://vod.tvp.pl/website/wspaniale-stulecie,17069012/video',
'info_dict': {
'id': '17069012',
},
'playlist_count': 312,
}, {
# film
'url': 'https://vod.tvp.pl/website/krzysztof-krawczyk-cale-moje-zycie,51374466',
'info_dict': {
'id': '51374509',
'ext': 'mp4',
'title': '<NAME> – całe moje życie, <NAME> – całe moje życie',
'description': 'md5:2e80823f00f5fc263555482f76f8fa42',
'age_limit': 12,
},
'params': {
'skip_download': True,
},
'add_ie': ['TVPEmbed'],
}, {
'url': 'https://vod.tvp.pl/website/lzy-cennet,38678312',
'only_matching': True,
}]
def _entries(self, display_id, playlist_id):
url = 'https://vod.tvp.pl/website/%s,%s/video' % (display_id, playlist_id)
for page_num in itertools.count(1):
page = self._download_webpage(
url, display_id, 'Downloading page %d' % page_num,
query={'page': page_num})
video_ids = orderedSet(re.findall(
r'<a[^>]+\bhref=["\']/video/%s,[^,]+,(\d+)' % display_id,
page))
if not video_ids:
break
for video_id in video_ids:
yield self.url_result(
'tvp:%s' % video_id, ie=TVPEmbedIE.ie_key(),
video_id=video_id)
def _real_extract(self, url):
mobj = self._match_valid_url(url)
display_id, playlist_id = mobj.group('display_id', 'id')
return self.playlist_result(
self._entries(display_id, playlist_id), playlist_id)
|
<gh_stars>0
# -*- coding: utf-8 -*-
# Scrapy settings for crawler project
#
# For simplicity, this file contains only settings considered important or
# commonly used. You can find more settings consulting the documentation:
#
# https://doc.scrapy.org/en/latest/topics/settings.html
# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# https://doc.scrapy.org/en/latest/topics/spider-middleware.html
import os
ATTRIBUTES = {
'HOTEL_REVIEW_URL': 'http://192.168.1.250:82/api/v1/crawl/hotel/general',
'RES_REVIEW_URL': 'http://192.168.1.135:9004/nha-hang/post_restaurant/detail',#'http://ziviu.com/nha-hang/post_restaurant/detail'
}
BOT_NAME = 'crawler'
SPIDER_MODULES = ['crawler.spiders']
NEWSPIDER_MODULE = 'crawler.spiders'
# # runSpider.py
# RUNSPIDER_URL = 'http://crawler.wemarry.vn/api/get-{spider_name}-multi?id={spider_id}'
# Obey robots.txt rules
ROBOTSTXT_OBEY = False
# LOG_LEVEL = 'ERROR'
# Configure maximum concurrent requests performed by Scrapy (default: 16)
CONCURRENT_REQUESTS = 4
# Configure a delay for requests for the same website (default: 0)
# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay
# See also autothrottle settings and docs
# DOWNLOAD_DELAY = 3
# The download delay setting will honor only one of:
# CONCURRENT_REQUESTS_PER_DOMAIN = 8
# CONCURRENT_REQUESTS_PER_IP = 16
# Disable cookies (enabled by default)
# COOKIES_ENABLED = False
# Disable Telnet Console (enabled by default)
# TELNETCONSOLE_ENABLED = False
# Override the default request headers:
# DEFAULT_REQUEST_HEADERS = {
# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
# 'Accept-Language': 'en',
# }
RANDOM_UA_PER_PROXY = True
ROTATING_PROXY_LIST = [
'172.16.58.3:3427',
'192.168.3.11:3428',
'172.16.17.32:3429',
'172.16.31.10:3430',
'172.16.58.3:3431',
'192.168.3.11:3432',
'192.168.3.11:3433',
'172.16.58.3:3434',
'172.16.31.10:3435',
'192.168.127.12:3436',
'172.16.58.3:3437',
'172.16.17.32:3438',
'192.168.3.11:3412',
'172.16.17.32:3413',
'192.168.127.12:3414',
'172.16.58.3:3415',
'192.168.127.12:3416',
'172.16.17.32:3417',
'172.16.17.32:3418',
'172.16.58.3:3410',
]
# SPLASH_SETTINGS
SPLASH_URL = 'http://127.0.0.1:8050'
DUPEFILTER_CLASS = 'scrapy_splash.SplashAwareDupeFilter'
HTTPCACHE_STORAGE = 'scrapy_splash.SplashAwareFSCacheStorage'
SPIDER_MIDDLEWARES = {'scrapy_splash.SplashDeduplicateArgsMiddleware': 100}
# SELENIUM_SETTINGS
SELENIUM_DRIVER_NAME = 'chrome'
SELENIUM_DRIVER_EXECUTABLE_PATH = os.path.join(os.path.dirname(
os.path.abspath(__file__)), 'seleniumdriver', 'chromedriver')
SELENIUM_DRIVER_ARGUMENTS = ['--no-sandbox', '--headless']
# SELENIUM_CHANGE_PROXY - Boolen (default False)
# SELENIUM_CHANGE_AGENT - Boolen (default False)
# SELENIUM_LOAD_IMAGE - Boolen (default False)
# SELENIUM_DISABLE_NOTIFY - Boolen (default True)
ITEM_PIPELINES = {
# 'crawler.pipelines.PostPipeline': 300,
# 'crawler.pipelines.MongoDBPipeline': 300,
}
# MONGODB
MONGO_URI = 'mongodb://localhost:27017/'
MONGODB_DB = 'data'
# MONGODB_COLLECTION = 'test'
# Enable or disable spider middlewares
# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html
# SPIDER_MIDDLEWARES = {'scrapy_splash.SplashDeduplicateArgsMiddleware': 100}
# Enable or disable downloader middlewares
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html
# Enable or disable extensions
# See https://doc.scrapy.org/en/latest/topics/extensions.html
# EXTENSIONS = {'scrapy.extensions.telnet.TelnetConsole': None,}
# Configure item pipelines
# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html
# Enable and configure the AutoThrottle extension (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/autothrottle.html
AUTOTHROTTLE_ENABLED = True
# The initial download delay
AUTOTHROTTLE_START_DELAY = 5
# The maximum download delay to be set in case of high latencies
AUTOTHROTTLE_MAX_DELAY = 60
# The average number of requests Scrapy should be sending in parallel to
# each remote server
AUTOTHROTTLE_TARGET_CONCURRENCY = 4.0
# Enable showing throttling stats for every response received:
# AUTOTHROTTLE_DEBUG = False
# Enable and configure HTTP caching (disabled by default)
# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
# HTTPCACHE_ENABLED = True
# HTTPCACHE_EXPIRATION_SECS = 0
# HTTPCACHE_DIR = 'httpcache'
# HTTPCACHE_IGNORE_HTTP_CODES = []
# HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'
RETRY_ENABLED = True
RETRY_TIMES = 5
RETRY_HTTP_CODES = [500, 502, 503, 504, 522, 524, 408, 429]
|
#lims
from SBaaS_LIMS.lims_experiment_postgresql_models import *
from SBaaS_LIMS.lims_sample_postgresql_models import *
from .stage01_quantification_replicatesMI_postgresql_models import *
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
from listDict.listDict import listDict
class stage01_quantification_replicatesMI_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'data_stage01_quantification_replicatesmi':data_stage01_quantification_replicatesMI,
};
self.set_supportedTables(tables_supported);
# Query sample names from data_stage01_quantification_replicatesMI:
def get_sampleNameShort_experimentIDAndSampleNameAbbreviationAndComponentNameAndTimePoint_dataStage01ReplicatesMI(self,experiment_id_I,sample_name_abbreviation_I,component_name_I,time_point_I,exp_type_I=4):
'''Query sample names that are used from the experiment by sample name abbreviation and sample description'''
try:
sample_names = self.session.query(data_stage01_quantification_replicatesMI.sample_name_short).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.component_name.like(component_name_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.sample_name_short).order_by(
data_stage01_quantification_replicatesMI.sample_name_short.asc()).all();
sample_names_short_O = [];
for sn in sample_names: sample_names_short_O.append(sn.sample_name_short);
return sample_names_short_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameShort_experimentIDAndSampleNameAbbreviationAndComponentNameAndTimePointAndCalculatedConcentrationUnits_dataStage01ReplicatesMI(
self,experiment_id_I,sample_name_abbreviation_I,component_name_I,time_point_I,calculated_concentration_units_I,exp_type_I=4):
'''Query sample names that are used from the experiment by sample name abbreviation and sample description'''
try:
sample_names = self.session.query(data_stage01_quantification_replicatesMI.sample_name_short).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.calculated_concentration_units.like(calculated_concentration_units_I),
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.component_name.like(component_name_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.sample_name_short).order_by(
data_stage01_quantification_replicatesMI.sample_name_short.asc()).all();
sample_names_short_O = [];
for sn in sample_names: sample_names_short_O.append(sn.sample_name_short);
return sample_names_short_O;
except SQLAlchemyError as e:
print(e);
def get_SampleNameShort_experimentID_dataStage01ReplicatesMI(self,experiment_id_I):
'''Query sample names short that are used from the experiment'''
try:
sample_names = self.session.query(data_stage01_quantification_replicatesMI.sample_name_short).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.sample_name_short).order_by(
data_stage01_quantification_replicatesMI.sample_name_short.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_short);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameAbbreviations_experimentID_dataStage01ReplicatesMI(self,experiment_id_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample_description.sample_name_abbreviation).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_abbreviation);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameAbbreviations_experimentIDAndTimePointAndComponentName_dataStage01ReplicatesMI(self,experiment_id_I,time_point_I,component_name_I,exp_type_I=4):
'''Query sample names (i.e. unknowns) that are used from
the experiment'''
try:
sample_names = self.session.query(sample_description.sample_name_abbreviation).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.component_name.like(component_name_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
sample_description.sample_name_abbreviation).order_by(
sample_description.sample_name_abbreviation.asc()).all();
sample_names_O = [];
for sn in sample_names: sample_names_O.append(sn.sample_name_abbreviation);
return sample_names_O;
except SQLAlchemyError as e:
print(e);
# Query component names from data_stage01_quantification_replicatesMI:
def get_componentNames_experimentID_dataStage01ReplicatesMI(self,experiment_id_I):
'''Query component Names that are used from the experiment'''
try:
component_names = self.session.query(data_stage01_quantification_replicatesMI.component_name).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.component_name).order_by(
data_stage01_quantification_replicatesMI.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentNames_experimentIDAndSampleNameAbbreviation_dataStage01ReplicatesMI(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=4):
'''Query component names that are used and not internal standards from
the experiment and sample abbreviation'''
try:
component_names = self.session.query(data_stage01_quantification_replicatesMI.component_name).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.component_name).order_by(
data_stage01_quantification_replicatesMI.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
def get_componentNames_experimentIDAndTimePoint_dataStage01ReplicatesMI(self,experiment_id_I,time_point_I):
'''Query component names that are used and not internal standards from
the experiment and time point'''
try:
component_names = self.session.query(data_stage01_quantification_replicatesMI.component_name).filter(
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.component_name).order_by(
data_stage01_quantification_replicatesMI.component_name.asc()).all();
component_names_O = [];
for cn in component_names: component_names_O.append(cn.component_name);
return component_names_O;
except SQLAlchemyError as e:
print(e);
# Query time points from data_stage01_quantification_replicatesMI
def get_timePoint_experimentID_dataStage01ReplicatesMI(self,experiment_id_I):
'''Query time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_replicatesMI.time_point).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.time_point).order_by(
data_stage01_quantification_replicatesMI.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
def get_timePoint_experimentIDAndSampleNameShort_dataStage01ReplicatesMI(self,experiment_id_I,sample_name_short_I):
'''Query time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_replicatesMI.time_point).filter(
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.time_point).order_by(
data_stage01_quantification_replicatesMI.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
def get_timePoint_experimentIDAndSampleNameAbbreviation_dataStage01ReplicatesMI(self,experiment_id_I,sample_name_abbreviation_I,exp_type_I=4):
'''Query time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_replicatesMI.time_point).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
sample_description.sample_name_short.like(data_stage01_quantification_replicatesMI.sample_name_short),
sample_description.time_point.like(data_stage01_quantification_replicatesMI.time_point),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.time_point).order_by(
data_stage01_quantification_replicatesMI.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
def get_timePoint_experimentIDAndComponentName_dataStage01ReplicatesMI(self,experiment_id_I,component_name_I):
'''Query time points that are used from the experiment'''
try:
time_points = self.session.query(data_stage01_quantification_replicatesMI.time_point).filter(
data_stage01_quantification_replicatesMI.component_name.like(component_name_I),
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.time_point).order_by(
data_stage01_quantification_replicatesMI.time_point.asc()).all();
time_points_O = [];
for tp in time_points: time_points_O.append(tp.time_point);
return time_points_O;
except SQLAlchemyError as e:
print(e);
# query concentration units:
def get_calculatedConcentrationUnits_experimentID_dataStage01ReplicatesMI(self,experiment_id_I):
'''Query calculated_concentration_units that are used from the experiment'''
try:
calculated_concentration_units = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.used_.is_(True),).group_by(
data_stage01_quantification_replicatesMI.calculated_concentration_units).order_by(
data_stage01_quantification_replicatesMI.calculated_concentration_units.asc()).all();
calculated_concentration_units_O = [];
for tp in calculated_concentration_units: calculated_concentration_units_O.append(tp.calculated_concentration_units);
return calculated_concentration_units_O;
except SQLAlchemyError as e:
print(e);
# query rows data_stage01_quantification_replicates
def get_rows_experimentIDAndSampleNameShortAndTimePointAndCalculatedConcentrationUnits_dataStage01ReplicatesMI(self,
experiment_id_I, sample_name_short_I, time_point_I,calculated_concentration_units_I):
"""query rows"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.used_.is_(True),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.calculated_concentration_units.like(calculated_concentration_units_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).all();
data_O = [];
for d in data:
data_O.append(d.__repr__dict__());
return data_O;
except SQLAlchemyError as e:
print(e);
# Query data from data_stage01_quantification_replicatesMI:
def get_data_experimentID_dataStage01ReplicatesMI(self, experiment_id_I):
"""get data from experiment ID"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.component_group_name).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).all();
data_O = [];
for d in data:
data_1 = {};
data_1['sample_name_short'] = d.sample_name_short;
data_1['component_group_name'] = d.component_group_name;
data_1['calculated_concentration'] = d.calculated_concentration;
data_O.append(data_1);
return data_O;
except SQLAlchemyError as e:
print(e);
def get_calculatedConcentrations_experimentIDAndSampleNameAbbreviationAndTimePointAndComponentName_dataStage01ReplicatesMI(self, experiment_id_I, sample_name_abbreviation_I, time_point_I, component_name_I,exp_type_I=4):
"""Query calculatedConcentrations"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.component_name.like(component_name_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.calculated_concentration).all();
ratios_O = [];
for d in data:
ratios_O.append(d[0]);
return ratios_O;
except SQLAlchemyError as e:
print(e);
def get_calculatedConcentration_experimentIDAndSampleNameShortAndTimePointAndComponentName_dataStage01ReplicatesMI(self, experiment_id_I, sample_name_short_I, time_point_I, component_name_I):
"""Query calculated concentrations"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.component_name.like(component_name_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).all();
if len(data)>1:
print('more than 1 calculated_concentration retrieved per component_name')
if data:
calc_conc_O = data[0].calculated_concentration;
else:
calc_conc_O = None;
return calc_conc_O;
except SQLAlchemyError as e:
print(e);
def get_calculatedConcentration_experimentIDAndSampleNameShortAndTimePointAndComponentNameAndCalculatedConcentrationUnits_dataStage01ReplicatesMI(
self, experiment_id_I, sample_name_short_I, time_point_I, component_name_I, calculated_concentration_units_I):
"""Query calculated concentrations"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.component_name.like(component_name_I),
data_stage01_quantification_replicatesMI.calculated_concentration_units.like(calculated_concentration_units_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).all();
if len(data)>1:
print('more than 1 calculated_concentration retrieved per component_name')
if data:
calc_conc_O = data[0].calculated_concentration;
else:
calc_conc_O = None;
return calc_conc_O;
except SQLAlchemyError as e:
print(e);
def get_concAndConcUnits_experimentIDAndSampleNameShortAndTimePointAndComponentName_dataStage01ReplicatesMI(self, experiment_id_I, sample_name_short_I, time_point_I, component_name_I):
"""Query calculated concentrations"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.component_name.like(component_name_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).all();
if len(data)>1:
print('more than 1 calculated_concentration retrieved per component_name')
if data:
conc_O = data[0][0];
conc_units_O = data[0][1];
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
def get_concAndConcUnits_experimentIDAndSampleNameShortAndTimePointAndComponentGroupName_dataStage01ReplicatesMI(self, experiment_id_I, sample_name_short_I, time_point_I, component_group_name_I):
"""Query calculated concentrations"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.component_group_name.like(component_group_name_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).all();
if len(data)>1:
print('more than 1 calculated_concentration retrieved per component_name')
if data:
conc_O = data[0][0];
conc_units_O = data[0][1];
else:
conc_O = None;
conc_units_O = None;
return conc_O, conc_units_O;
except SQLAlchemyError as e:
print(e);
def get_sampleNameShort_experimentIDAndSampleNameAbbreviationAndTimePoint_dataStage01ReplicatesMI(self,experiment_id_I,sample_name_abbreviation_I,time_point_I,exp_type_I=4):
'''Query sample names that are used from the experiment by sample name abbreviation and sample description'''
#Not tested
try:
sample_names = self.session.query(data_stage01_quantification_replicatesMI.sample_name_short).filter(
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
sample_description.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
sample_description.sample_id.like(sample.sample_id),
sample.sample_name.like(experiment.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.sample_name_short).order_by(
data_stage01_quantification_replicatesMI.sample_name_short.asc()).all();
sample_names_short_O = [];
for sn in sample_names: sample_names_short_O.append(sn.sample_name_short);
return sample_names_short_O;
except SQLAlchemyError as e:
print(e);
def get_data_experimentIDAndTimePointAndSampleNameShortAndUnits_dataStage01ReplicatesMI(self, experiment_id_I,time_point_I,sample_name_short_I,concentration_units_I,exp_type_I=4):
"""get data from experiment ID"""
#Tested
try:
data = self.session.query(data_stage01_quantification_replicatesMI.experiment_id,
sample_description.sample_name_abbreviation,
sample_description.sample_replicate,
data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_replicatesMI.calculated_concentration_units.like(concentration_units_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
sample_description.sample_id.like(sample.sample_id),
sample.sample_name.like(experiment.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.experiment_id,
sample_description.sample_name_abbreviation,
sample_description.sample_replicate,
data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).all();
data_O = [];
for d in data:
data_1 = {};
data_1['experiment_id'] = d.experiment_id;
data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;
data_1['sample_replicate'] = d.sample_replicate;
data_1['sample_name_short'] = d.sample_name_short;
data_1['time_point'] = d.time_point;
data_1['component_group_name'] = d.component_group_name;
data_1['component_name'] = d.component_name;
data_1['calculated_concentration'] = d.calculated_concentration;
data_1['calculated_concentration_units'] = d.calculated_concentration_units;
data_O.append(data_1);
return data_O;
except SQLAlchemyError as e:
print(e);
def get_data_experimentIDAndTimePointAndUnits_dataStage01ReplicatesMI(self, experiment_id_I,time_point_I,concentration_units_I,exp_type_I=4):
"""get data from experiment ID"""
#Tested
try:
data = self.session.query(data_stage01_quantification_replicatesMI.experiment_id,
sample_description.sample_name_abbreviation,
sample_description.sample_replicate,
data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.calculated_concentration_units.like(concentration_units_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
sample_description.sample_id.like(sample.sample_id),
sample.sample_name.like(experiment.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.experiment_id,
sample_description.sample_name_abbreviation,
sample_description.sample_replicate,
data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).all();
data_O = [];
for d in data:
data_1 = {};
data_1['experiment_id'] = d.experiment_id;
data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;
data_1['sample_replicate'] = d.sample_replicate;
data_1['sample_name_short'] = d.sample_name_short;
data_1['time_point'] = d.time_point;
data_1['component_group_name'] = d.component_group_name;
data_1['component_name'] = d.component_name;
data_1['calculated_concentration'] = d.calculated_concentration;
data_1['calculated_concentration_units'] = d.calculated_concentration_units;
data_O.append(data_1);
return data_O;
except SQLAlchemyError as e:
print(e);
def get_RExpressionData_experimentIDAndTimePointAndUnits_dataStage01ReplicatesMI(self, experiment_id_I,time_point_I,concentration_units_I,exp_type_I=4):
"""get data from experiment ID"""
#Tested
try:
data = self.session.query(data_stage01_quantification_replicatesMI.experiment_id,
sample_description.sample_name_abbreviation,
sample_description.sample_replicate,
data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.calculated_concentration_units.like(concentration_units_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
sample_description.sample_id.like(sample.sample_id),
sample.sample_name.like(experiment.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.experiment_id,
sample_description.sample_name_abbreviation,
sample_description.sample_replicate,
data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).all();
data_O = [];
for d in data:
data_1 = {};
data_1['experiment_id'] = d.experiment_id;
data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;
data_1['sample_replicate'] = d.sample_replicate;
data_1['sample_name_short'] = d.sample_name_short;
data_1['time_point'] = d.time_point;
data_1['component_group_name'] = d.component_group_name;
data_1['component_name'] = d.component_name;
data_1['calculated_concentration'] = d.calculated_concentration;
data_1['calculated_concentration_units'] = d.calculated_concentration_units;
data_O.append(data_1);
return data_O;
except SQLAlchemyError as e:
print(e);
def get_RExpressionData_experimentIDAndTimePointAndUnitsAndSampleNameShort_dataStage01ReplicatesMI(self, experiment_id_I,time_point_I,concentration_units_I,sample_name_short_I,exp_type_I=4):
"""get data from experiment ID"""
#Tested
try:
data = self.session.query(data_stage01_quantification_replicatesMI.experiment_id,
sample_description.sample_name_abbreviation,
sample_description.sample_replicate,
data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.calculated_concentration_units.like(concentration_units_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
sample_description.sample_id.like(sample.sample_id),
sample.sample_name.like(experiment.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.experiment_id,
sample_description.sample_name_abbreviation,
sample_description.sample_replicate,
data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).all();
data_O = [];
for d in data:
data_1 = {};
data_1['experiment_id'] = d.experiment_id;
data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;
data_1['sample_replicate'] = d.sample_replicate;
data_1['sample_name_short'] = d.sample_name_short;
data_1['time_point'] = d.time_point;
data_1['component_group_name'] = d.component_group_name;
data_1['component_name'] = d.component_name;
data_1['calculated_concentration'] = d.calculated_concentration;
data_1['calculated_concentration_units'] = d.calculated_concentration_units;
data_O.append(data_1);
return data_O;
except SQLAlchemyError as e:
print(e);
def get_RDataList_experimentIDAndTimePointAndUnitsAndComponentNamesAndSampleNameAbbreviation_dataStage01ReplicatesMI(self, experiment_id_I,time_point_I,concentration_units_I,component_name_I,sample_name_abbreviation_I,exp_type_I=4):
"""get data from experiment ID"""
#Tested
try:
data = self.session.query(data_stage01_quantification_replicatesMI.experiment_id,
sample_description.sample_name_abbreviation,
sample_description.sample_replicate,
data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.component_name.like(component_name_I),
sample_description.sample_name_abbreviation.like(sample_name_abbreviation_I),
data_stage01_quantification_replicatesMI.calculated_concentration_units.like(concentration_units_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
sample_description.sample_id.like(sample.sample_id),
sample.sample_name.like(experiment.sample_name),
experiment.id.like(experiment_id_I),
experiment.exp_type_id == exp_type_I,
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.experiment_id,
sample_description.sample_name_abbreviation,
sample_description.sample_replicate,
data_stage01_quantification_replicatesMI.sample_name_short,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.calculated_concentration,
data_stage01_quantification_replicatesMI.calculated_concentration_units).all();
data_O = [];
concentrations_O = [];
for d in data:
concentrations_O.append(d.calculated_concentration);
data_1 = {};
data_1['experiment_id'] = d.experiment_id;
data_1['sample_name_abbreviation'] = d.sample_name_abbreviation;
data_1['sample_replicate'] = d.sample_replicate;
data_1['sample_name_short'] = d.sample_name_short;
data_1['time_point'] = d.time_point;
data_1['component_group_name'] = d.component_group_name;
data_1['component_name'] = d.component_name;
data_1['calculated_concentration'] = d.calculated_concentration;
data_1['calculated_concentration_units'] = d.calculated_concentration_units;
data_O.append(data_1);
return data_O,concentrations_O;
except SQLAlchemyError as e:
print(e);
# Query concentration_units from data_stage01_quantification_replicatesMI:
def get_concentrationUnits_experimentIDAndTimePoint_dataStage01ReplicatesMI(self, experiment_id_I,time_point_I):
"""get concentration_units from experiment ID and time point"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.calculated_concentration_units).order_by(
data_stage01_quantification_replicatesMI.calculated_concentration_units.asc()).all();
units_O = [];
for d in data:
units_O.append(d[0]);
return units_O;
except SQLAlchemyError as e:
print(e);
def get_concentrationUnits_experimentIDAndTimePointAndSampleNameShort_dataStage01ReplicatesMI(self, experiment_id_I,time_point_I,sample_name_short_I):
"""get concentration_units from experiment ID and time point"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.time_point.like(time_point_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_name_short_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.calculated_concentration_units).order_by(
data_stage01_quantification_replicatesMI.calculated_concentration_units.asc()).all();
units_O = [];
for d in data:
units_O.append(d[0]);
return units_O;
except SQLAlchemyError as e:
print(e);
def get_concentrationUnits_experimentID_dataStage01ReplicatesMI(self, experiment_id_I):
"""get concentration_units from experiment ID"""
try:
data = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration_units).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.used_.is_(True)).group_by(
data_stage01_quantification_replicatesMI.calculated_concentration_units).order_by(
data_stage01_quantification_replicatesMI.calculated_concentration_units.asc()).all();
units_O = [];
for d in data:
units_O.append(d[0]);
return units_O;
except SQLAlchemyError as e:
print(e);
def reset_dataStage01_quantification_replicatesMI(self,experiment_id_I):
try:
if experiment_id_I:
reset = self.session.query(data_stage01_quantification_replicatesMI).filter(data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I)).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def drop_dataStage01_quantification_replicatesMI(self):
try:
data_stage01_quantification_replicatesMI.__table__.drop(self.engine,True);
except SQLAlchemyError as e:
print(e);
def initialize_dataStage01_quantification_replicatesMI(self):
try:
data_stage01_quantification_replicatesMI.__table__.create(self.engine,True);
except SQLAlchemyError as e:
print(e);
#Query unique rows from data_stage01_quantification_replicatesMI
def get_sampleNameAbbreviationsAndCalculatedConcentrationUnitsAndTimePointsAndComponentNames_experimentID_dataStage01QuantificationReplicatesMI(
self,experiment_id_I,sample_name_abbreviations_I,time_points_I,calculated_concentration_units_I,exp_type_I=4):
'''unique calculated_concentration_units/sample_name_abbreviations/component_names/component_group_names/time_points
that are used by the experiment_id'''
try:
data = self.session.query(data_stage01_quantification_replicatesMI.calculated_concentration_units,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_name,
sample_description.sample_name_abbreviation
).filter(
data_stage01_quantification_replicatesMI.experiment_id.like(experiment_id_I),
data_stage01_quantification_replicatesMI.sample_name_short.like(sample_description.sample_name_short),
data_stage01_quantification_replicatesMI.used_.is_(True),
sample_description.time_point.like(data_stage01_quantification_replicatesMI.time_point),
experiment.exp_type_id == exp_type_I,
experiment.id.like(experiment_id_I),
experiment.sample_name.like(sample.sample_name),
sample.sample_id.like(sample_description.sample_id)
).group_by(data_stage01_quantification_replicatesMI.calculated_concentration_units,
data_stage01_quantification_replicatesMI.component_name,
data_stage01_quantification_replicatesMI.component_group_name,
data_stage01_quantification_replicatesMI.time_point,
data_stage01_quantification_replicatesMI.component_name,
sample_description.sample_name_abbreviation
).order_by(data_stage01_quantification_replicatesMI.calculated_concentration_units.asc(),
data_stage01_quantification_replicatesMI.component_name.asc(),
data_stage01_quantification_replicatesMI.component_group_name.asc(),
data_stage01_quantification_replicatesMI.time_point.asc(),
data_stage01_quantification_replicatesMI.component_name.asc(),
sample_description.sample_name_abbreviation.asc()
).all();
data_O=[];
if data:
data_O = listDict(record_I=data);
data_O.convert_record2DataFrame();
data_O.filterIn_byDictList({
'sample_name_abbreviation':sample_name_abbreviations_I,
'time_point':time_points_I,
'calculated_concentration_units':calculated_concentration_units_I,
});
data_O.convert_dataFrame2ListDict();
return data_O.get_listDict();
except SQLAlchemyError as e:
print(e);
def get_rows_experimentIDsAndSampleNames_dataStage01QuantificationReplicatesMI(
self,
experiment_ids_I = [],
sample_name_shorts_I=[],
component_names_I=[],
component_group_names_I=[],
calculated_concentration_units_I=[],
used__I=True):
'''Query rows from data_stage01_quantification_replicatesmi
INPUT:
experiment_id_I = string
sample_names_I = string list of names
used__I = boolean
OUTPUT:
[{}]
'''
from SBaaS_base.postgresql_orm import execute_query
try:
cmd = '''SELECT experiment_id,
sample_name_short,
time_point,
component_group_name, component_name,
imputation_method,
imputation_options,
calculated_concentration,calculated_concentration_units,
used_,
comment_
FROM "data_stage01_quantification_replicatesmi"
WHERE '''
if experiment_ids_I:
cmd_q = '''"data_stage01_quantification_replicatesmi"."experiment_id" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(experiment_ids_I));
cmd+=cmd_q;
if sample_name_shorts_I:
cmd_q = '''AND "data_stage01_quantification_replicatesmi"."sample_name_short" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(sample_name_shorts_I));
cmd+=cmd_q;
if component_names_I:
cmd_q = '''AND "data_stage01_quantification_replicatesmi"."component_name" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(component_names_I));
cmd+=cmd_q;
if component_group_names_I:
cmd_q = '''AND "data_stage01_quantification_replicatesmi"."component_group_name" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(component_group_names_I));
cmd+=cmd_q;
if calculated_concentration_units_I:
cmd_q = '''AND "data_stage01_quantification_replicatesmi"."calculated_concentration_units" =ANY ('{%s}'::text[]) ''' %(self.convert_list2string(calculated_concentration_units_I));
cmd+=cmd_q;
if used__I:
cmd += '''AND used_ '''
cmd += '''ORDER BY
experiment_id ASC,
sample_name_short ASC,
component_name ASC,
calculated_concentration_units ASC;'''
data_O = [dict(d) for d in execute_query(self.session,cmd,
verbose_I=False,
execute_I=True,
commit_I=False,
return_response_I=True,
return_cmd_I=False)];
return data_O;
except SQLAlchemyError as e:
print(e); |
<filename>examples/metrica.py
# -*- coding: utf-8 -*-
"""
* Find packing for real-time metrica data
* Owner: <NAME>
* Version: V1.0
* Last Updated: May-14-2020
"""
import os
import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from scipy.spatial import distance
from collections import defaultdict
import itertools
import random
# Import custom packages
from football_packing import packing
from football_packing import plot_packing
pd.set_option('display.max_rows', None)
pd.set_option('display.max_columns', None)
class metrica:
def __init__(
self,
path_play_df: str,
path_event_df: str,
game_id: list
):
self.path_play_df = path_play_df
self.path_event_df = path_event_df
self.game_id = game_id
self.defend_side = ""
self.goal_center = {'left': (0, 0.5), 'right': (1, 0.5)}
def get_defend_side(self, defending_team_xy, passing_team_xy):
"""
* Process to identify which side the defending team defends
"""
total_defend_left = defending_team_xy[defending_team_xy['x']
<= 0.2]['x'].count()
total_defend_right = defending_team_xy[defending_team_xy['x']
>= 0.8]['x'].count()
total_passer_left = passing_team_xy[passing_team_xy['x']
<= 0.21]['x'].count()
# 2. When only one end of pitch has a defender/gk
if (((total_defend_left == 0) and (total_defend_right > 0)) or ((total_defend_left > 0) and (total_defend_right == 0))):
if (total_defend_right > 0):
self.defend_side = 'right'
else:
self.defend_side = 'left'
# 1. When both end of pitch has a last man
elif (total_defend_left > 0) and (total_defend_right > 0):
# 1.1 When last man is on left and no attacking player near him
if (total_defend_left > 0) and (total_passer_left == 0):
self.defend_side = 'left'
else:
# 1.2
self.defend_side = 'right'
def process_data(self):
game_events = pd.read_csv(self.path_event_df)
play_df = pd.read_csv(self.path_play_df, dtype={
'frame': 'int', 'player': 'str', 'game_id': 'str'})
play_df = play_df[play_df['game_id'] == self.game_id]
event_type = ['PASS']
game_events = game_events[game_events['Type'].isin(event_type)]
game_events.loc[:, 'From'] = game_events['From'].str.replace(
'Player', '')
game_events.loc[:, 'To'] = game_events['To'].str.replace(
'Player', '')
random_index = random.choice(game_events.index.values)
random_game_events = game_events[game_events.index == random_index]
random_end_frame = random_game_events['End Frame'].values[0]
random_sender = random_game_events['From'].values[0]
random_receiver = random_game_events['To'].values[0]
random_passing_team = random_game_events['Team'].values[0]
random_play_end_df = play_df[play_df['frame']
== random_end_frame].reset_index(drop=True)
sender_xy = random_play_end_df[random_play_end_df['player'] == random_sender][[
'x', 'y']].values[0]
receiver_xy = random_play_end_df[random_play_end_df['player'] == random_receiver][[
'x', 'y']].values[0]
if random_passing_team == 'Away':
passing_team_xy = random_play_end_df[(random_play_end_df['team'] == 'away') &
(random_play_end_df['player']
!= random_sender)
& (random_play_end_df['player'] != random_receiver)][[
'x', 'y', 'player']].dropna()
defending_team_xy = random_play_end_df[random_play_end_df['team'] == 'home'][[
'x', 'y', 'player']].dropna().set_index('player', drop=False)
else:
passing_team_xy = random_play_end_df[(random_play_end_df['team'] == 'home') &
(random_play_end_df['player']
!= random_sender)
& (random_play_end_df['player'] != random_receiver)][[
'x', 'y', 'player']].dropna()
defending_team_xy = random_play_end_df[random_play_end_df['team'] == 'away'][[
'x', 'y', 'player']].dropna().set_index('player', drop=False)
defending_team_xy = defending_team_xy.rename(
columns={'player': 'player_id'})
self.get_defend_side(defending_team_xy, passing_team_xy)
pack = packing(sender_xy, receiver_xy,
defending_team_xy, col_label_x='x', col_label_y='y', defend_side=self.defend_side)
self.packing_df, self.packing_rate, self.pass_pressure = pack.get_packing()
plot = plot_packing(passer_team_df=passing_team_xy, packing_df=self.packing_df,
col_label_x='x', col_label_y='y',
packing_rate=self.packing_rate, pass_pressure=self.pass_pressure,
sender_xy=sender_xy, receiver_xy=receiver_xy,
x_range=[0, 1], y_range=[1, 0], path_to_save=dir_path+'/',
pass_frame=random_end_frame, file_name='metrica',
bcg_img='/images/pitch/pitch.jpg')
plot.plot()
if __name__ == '__main__':
dir_path = os.path.dirname(os.path.realpath(__file__))
"""
Path to the game level data -
Use this Tidy data - https://drive.google.com/drive/folders/1BGLHbe7DB_NGZxitjJAQxu2-N-B4Zk3s
Credit - <NAME>
"""
path_game_df = sys.argv[1]
# Path to the event level data
path_events_df = sys.argv[2]
game_id = '1'
metric = metrica(path_game_df, path_events_df, game_id)
metric.process_data()
|
#!/usr/bin/env python
"""
Copyright (c) 2020 <NAME>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
import logging
import os
import cocotb_test.simulator
import cocotb
from cocotb.triggers import Timer
from cocotb.regression import TestFactory
from cocotbext.i2c import I2cMaster, I2cMemory
class TB:
def __init__(self, dut):
self.dut = dut
self.log = logging.getLogger("cocotb.tb")
self.log.setLevel(logging.DEBUG)
self.i2c_master = I2cMaster(sda=dut.sda_1_o, sda_o=dut.sda_1_i,
scl=dut.scl_1_o, scl_o=dut.scl_1_i, speed=400e3)
self.i2c_memory = I2cMemory(sda=dut.sda_2_o, sda_o=dut.sda_2_i,
scl=dut.scl_2_o, scl_o=dut.scl_2_i, addr=0x50, size=256)
async def run_test(dut, payload_lengths=None, payload_data=None):
tb = TB(dut)
await Timer(100, 'us')
test_data = b'\xaa\xbb\xcc\xdd'
await tb.i2c_master.write(0x50, b'\x00' + test_data)
await tb.i2c_master.send_stop()
await Timer(100, 'us')
await tb.i2c_master.write(0x50, b'\x00')
data = await tb.i2c_master.read(0x50, 4)
await tb.i2c_master.send_stop()
tb.log.info("Read data: %s", data)
assert test_data == data
if cocotb.SIM_NAME:
factory = TestFactory(run_test)
factory.generate_tests()
# cocotb-test
tests_dir = os.path.dirname(__file__)
def test_i2c(request):
dut = "test_i2c"
module = os.path.splitext(os.path.basename(__file__))[0]
toplevel = dut
verilog_sources = [
os.path.join(tests_dir, f"{dut}.v"),
]
parameters = {}
extra_env = {f'PARAM_{k}': str(v) for k, v in parameters.items()}
sim_build = os.path.join(tests_dir, "sim_build",
request.node.name.replace('[', '-').replace(']', ''))
cocotb_test.simulator.run(
python_search=[tests_dir],
verilog_sources=verilog_sources,
toplevel=toplevel,
module=module,
parameters=parameters,
sim_build=sim_build,
extra_env=extra_env,
)
|
from hazelcast.exception import HazelcastSerializationError
from hazelcast.util import enum
from hazelcast import six
FieldType = enum(
PORTABLE=0,
BYTE=1,
BOOLEAN=2,
CHAR=3,
SHORT=4,
INT=5,
LONG=6,
FLOAT=7,
DOUBLE=8,
UTF=9,
PORTABLE_ARRAY=10,
BYTE_ARRAY=11,
BOOLEAN_ARRAY=12,
CHAR_ARRAY=13,
SHORT_ARRAY=14,
INT_ARRAY=15,
LONG_ARRAY=16,
FLOAT_ARRAY=17,
DOUBLE_ARRAY=18,
UTF_ARRAY=19
)
class FieldDefinition(object):
def __init__(self, index, field_name, field_type, version, factory_id=0, class_id=0):
self.index = index
self.field_name = field_name
self.field_type = field_type
self.version = version
self.factory_id = factory_id
self.class_id = class_id
def __eq__(self, other):
return isinstance(other, self.__class__) \
and (self.index, self.field_name, self.field_type, self.version, self.factory_id, self.class_id) == \
(other.index, other.field_name, other.field_type, other.version, other.factory_id, other.class_id)
def __repr__(self):
return "FieldDefinition[ ix:{}, name:{}, type:{}, version:{}, fid:{}, cid:{}]".format(self.index,
self.field_name,
self.field_type,
self.version,
self.factory_id,
self.class_id)
class ClassDefinition(object):
def __init__(self, factory_id, class_id, version):
self.factory_id = factory_id
self.class_id = class_id
self.version = version
self.field_defs = {} # string:FieldDefinition
def add_field_def(self, field_def):
self.field_defs[field_def.field_name] = field_def
def get_field(self, field_name_or_index):
if isinstance(field_name_or_index, int):
index = field_name_or_index
count = self.get_field_count()
if 0 <= index < count:
for field in six.itervalues(self.field_defs):
if field.index == index:
return field
raise IndexError("Index is out of bound. Index: {} and size: {}".format(index, count))
else:
return self.field_defs.get(field_name_or_index, None)
def has_field(self, field_name):
return field_name in self.field_defs
def get_field_names(self):
return list(self.field_defs.keys())
def get_field_type(self, field_name):
fd = self.get_field(field_name)
if fd:
return fd.field_type
raise ValueError("Unknown field: {}".format(field_name))
def get_field_class_id(self, field_name):
fd = self.get_field(field_name)
if fd:
return fd.class_id
raise ValueError("Unknown field: {}".format(field_name))
def get_field_count(self):
return len(self.field_defs)
def set_version_if_not_set(self, version):
if self.version < 0:
self.version = version
def __eq__(self, other):
return isinstance(other, self.__class__) and (self.factory_id, self.class_id, self.version, self.field_defs) == \
(other.factory_id, other.class_id, other.version, other.field_defs)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "fid:{}, cid:{}, v:{}, fields:{}".format(self.factory_id, self.class_id, self.version, self.field_defs)
def __hash__(self):
return id(self)//16
class ClassDefinitionBuilder(object):
def __init__(self, factory_id, class_id, version=0):
self.factory_id = factory_id
self.class_id = class_id
self.version = version
self._index = 0
self._done = False
self._field_defs = list()
def add_portable_field(self, field_name, class_def):
if class_def.class_id is None or class_def.class_id == 0:
raise ValueError("Portable class id cannot be zero!")
self._add_field_by_type(field_name, FieldType.PORTABLE, class_def.version,
class_def.factory_id, class_def.class_id)
return self
def add_byte_field(self, field_name):
self._add_field_by_type(field_name, FieldType.BYTE, self.version)
return self
def add_boolean_field(self, field_name):
self._add_field_by_type(field_name, FieldType.BOOLEAN, self.version)
return self
def add_char_field(self, field_name):
self._add_field_by_type(field_name, FieldType.CHAR, self.version)
return self
def add_short_field(self, field_name):
self._add_field_by_type(field_name, FieldType.SHORT, self.version)
return self
def add_int_field(self, field_name):
self._add_field_by_type(field_name, FieldType.INT, self.version)
return self
def add_long_field(self, field_name):
self._add_field_by_type(field_name, FieldType.LONG, self.version)
return self
def add_float_field(self, field_name):
self._add_field_by_type(field_name, FieldType.FLOAT, self.version)
return self
def add_double_field(self, field_name):
self._add_field_by_type(field_name, FieldType.DOUBLE, self.version)
return self
def add_utf_field(self, field_name):
self._add_field_by_type(field_name, FieldType.UTF, self.version)
return self
def add_portable_array_field(self, field_name, class_def):
if class_def.class_id is None or class_def.class_id == 0:
raise ValueError("Portable class id cannot be zero!")
self._add_field_by_type(field_name, FieldType.PORTABLE_ARRAY, class_def.version,
class_def.factory_id, class_def.class_id)
return self
def add_byte_array_field(self, field_name):
self._add_field_by_type(field_name, FieldType.BYTE_ARRAY, self.version)
return self
def add_boolean_array_field(self, field_name):
self._add_field_by_type(field_name, FieldType.BOOLEAN_ARRAY, self.version)
return self
def add_char_array_field(self, field_name):
self._add_field_by_type(field_name, FieldType.CHAR_ARRAY, self.version)
return self
def add_short_array_field(self, field_name):
self._add_field_by_type(field_name, FieldType.SHORT_ARRAY, self.version)
return self
def add_int_array_field(self, field_name):
self._add_field_by_type(field_name, FieldType.INT_ARRAY, self.version)
return self
def add_long_array_field(self, field_name):
self._add_field_by_type(field_name, FieldType.LONG_ARRAY, self.version)
return self
def add_float_array_field(self, field_name):
self._add_field_by_type(field_name, FieldType.FLOAT_ARRAY, self.version)
return self
def add_double_array_field(self, field_name):
self._add_field_by_type(field_name, FieldType.DOUBLE_ARRAY, self.version)
return self
def add_utf_array_field(self, field_name):
self._add_field_by_type(field_name, FieldType.UTF_ARRAY, self.version)
return self
def add_field_def(self, field_def):
self._check()
if self._index != field_def.index:
raise ValueError("Invalid field index")
self._index += 1
self._field_defs.append(field_def)
return self
def build(self):
self._done = True
cd = ClassDefinition(self.factory_id, self.class_id, self.version)
for field_def in self._field_defs:
cd.add_field_def(field_def)
return cd
def _add_field_by_type(self, field_name, field_type, version, factory_id=0, class_id=0):
self._check()
self._field_defs.append(FieldDefinition(self._index, field_name, field_type, version, factory_id, class_id))
self._index += 1
def _check(self):
if self._done:
raise HazelcastSerializationError("ClassDefinition is already built for {}".format(self.class_id))
|
<gh_stars>1-10
from bs4 import BeautifulSoup # 크롤링을 위해 bs4 라이브러리 사용
from urllib.request import urlopen
today_menu_list = [[[], [], [], [], [], [], []]]
all_menu_list = []
menuTime = []
day_list = ['월','화','수','목','금','토','일']
tday = ""
for i in range(7): # 첫번째 인덱스를 요일, 두번째 인덱스를 메뉴 시간, 세번째 인덱스를 메뉴 시간별 세부 메뉴를 가진 3차원 배열
line = []
all_menu_list.append(line)
for j in range(7):
line = []
all_menu_list[i].append(line)
def find_td(selected_menu, result_list,real_index):
index = 0
for menu_sub_list in selected_menu: # select는 상단의 코드에서 복수의 td들을 ResultSet(일종의 List형태)로 가져오므로, 반복문을 통해서 각각의 요소들을 get_text()로 처리해줘야함
oneday_menuplan = menu_sub_list.get_text().split('\n') # 한칸에 모든 메뉴가 \n으로 나누어져있음
for j in range(len(oneday_menuplan)):
if (oneday_menuplan[j] is '\r' or # 메뉴가 게재된칸에는 가격정보와 제공시간이 적혀있으므로, 메뉴만 얻기위해 예외처리
oneday_menuplan[j][0] is '*' or
oneday_menuplan[j][0] is '[' or
oneday_menuplan[j][0] is '(' or
oneday_menuplan[j][0] is '❝'
):
break
elif oneday_menuplan[j][0] is '<': # <즉석조리기기>칸의 라면 메뉴들을 가져오기위한 예외처리
continue
else:
if '국밥' in oneday_menuplan[j]: # 국밥칸에서 칼로리 정보를 제외하고 메뉴 이름만 가져오기위한 예외처리
result_list[index][real_index].append(oneday_menuplan[j][:4].strip())
else:
result_list[index][real_index].append(oneday_menuplan[j].strip())
index += 1
url = "https://www.uicoop.ac.kr/main.php?mkey=2&w=2&l=1"
html = urlopen(url)
soup = BeautifulSoup(html, "html.parser") # 원하는 태그 정보를 뽑아줄 bs4 인스턴스 생성
index = 0
index1 = 0
for tr_menu in soup.find(id='menuBox').find_all('tr'): # menuBox라는 이름을 가진 테이블에서 한줄씩 뽑아냄
not_today_menu = tr_menu.select('.din_list')
today_menu = tr_menu.select('.din_lists')
menutime_tr = tr_menu.find(class_ = 'corn_nm')
day = tr_menu.find(class_ = 'din_mns')
if day:
tday = day.get_text().strip()[:1]
short_date = int(day.get_text().strip()[3:8].replace('/',''))
if menutime_tr:
menuTime.append(menutime_tr.get_text())
if not_today_menu: # 찾은 내용이 있을 경우
find_td(not_today_menu, all_menu_list, index)
index += 1
if today_menu:
find_td(today_menu, today_menu_list, index1)
index1 += 1
for i in range(len(day_list)):
if tday == day_list[i]:
all_menu_list.insert(i,today_menu_list[0])
today_index = i
break
|
from typing import List
from itertools import combinations
from collections import defaultdict
from clustering import Clustering
class CustomMeasure(object):
def __init__(self, clusterings: List[Clustering]):
self.nclusterings = len(clusterings)
self.clusterings = clusterings
self.clustering_agreed = [
clustering.get_num_agreed_pairs() for clustering in clusterings
]
self.pair_assignments = defaultdict(lambda: None)
def add_clustering(self, clustering: Clustering):
self.nclusterings = self.nclusterings + 1
self.clusterings.append(clustering)
self.clustering_agreed.append(clustering.get_num_agreed_pairs())
def get_clustering(self, ind):
return self.clusterings[ind]
def get_or_calc_pair_assignments(self, pair, clustering_indices):
key = self.build_key(pair, clustering_indices)
is_agreed = self.get_pair_assignments(key)
if is_agreed is None:
is_agreed = self.calc_pair_assignments(pair, clustering_indices)
self.put_pair_assignments(key, is_agreed)
return is_agreed
def build_key(self, pair, clustering_indices):
key1 = str(sorted(pair))
key2 = str(sorted(clustering_indices))
key = f"{key1}_{key2}"
return key
def get_pair_assignments(self, key):
return self.pair_assignments[key]
def put_pair_assignments(self, key, is_agreed):
self.pair_assignments[key] = is_agreed
def calc_pair_assignments(self, pair, clustering_indices):
agreed = []
for clustering_ind in clustering_indices:
clustering = self.clusterings[clustering_ind]
agreed.append(
clustering.get_assignment(pair[0]) == clustering.get_assignment(pair[1])
)
is_agreed = all(agreed)
return is_agreed
def get_num_agreed_pairs(self, indices, clustering_indices, only_last=False):
num_agreed = 0
if only_last:
ind_combinations = zip(
indices[:-1], indices[-1:] * (len(indices) - 1)
)
else:
ind_combinations = combinations(indices, 2)
for pair in ind_combinations:
is_agreed = self.get_or_calc_pair_assignments(pair, clustering_indices)
if is_agreed:
num_agreed += 1
return num_agreed
def get_combination(self):
clustering_indices = range(self.nclusterings)
# group_size = len(clustering_indices)
group_size = 2
clustering_combinations = combinations(clustering_indices, group_size)
return clustering_combinations
def get_measure(self, indices, clustering_combinations=None, agreed_dict={}):
if clustering_combinations is None:
clustering_combinations = self.get_combination()
new_agreed_dict = {}
measures = []
for tuple_indices in clustering_combinations:
only_last = tuple_indices in agreed_dict
num_subset_agreed = self.get_num_agreed_pairs(
indices, tuple_indices, only_last
)
if only_last:
num_subset_agreed += agreed_dict[tuple_indices]
new_agreed_dict[tuple_indices] = num_subset_agreed
scores = [
num_subset_agreed / self.clustering_agreed[ind]
for ind in tuple_indices
]
measures.append(sum(scores) / len(scores))
measure = sum(measures) / len(measures)
return measure, new_agreed_dict
def __call__(self, *args, **kwargs):
return self.get_measure(*args, **kwargs)
|
<gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# #
# Copyright 2016-2017 VMware Inc.
# This file is part of ETSI OSM
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
# For those usages not covered by the Apache License, Version 2.0 please
# contact: <EMAIL>
# #
import logging
import os
import subprocess
import yaml
from lxml import etree as ET
# file paths
MODULE_DIR = os.path.dirname(__file__)
OVF_TEMPLATE_PATH = os.path.join(MODULE_DIR,
"ovf_template/template.xml")
IDE_CDROM_XML_PATH = os.path.join(MODULE_DIR,
"ovf_template/ide_cdrom.xml")
OS_INFO_FILE_PATH = os.path.join(MODULE_DIR,
"config/os_type.yaml")
DISK_CONTROLLER_INFO_FILE_PATH = os.path.join(MODULE_DIR,
"config/disk_controller.yaml")
# Set logger
LOG_FILE = os.path.join(MODULE_DIR, "logs/ovf_converter.log")
os.makedirs(os.path.dirname(LOG_FILE), exist_ok=True)
logger = logging.getLogger(__name__)
hdlr = logging.FileHandler(LOG_FILE)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
hdlr.setFormatter(formatter)
logger.addHandler(hdlr)
logger.setLevel(10)
__version__ = "1.2"
__description__ = "OVF Hardware Version 14 compatible"
def get_version(*args, **kwargs):
""" get version of this application"""
version = str(__version__) + " - " + str(__description__)
return version
# converter class
class OVFConverter(object):
""" Class to convert input image into OVF format """
def __init__(self, source_img_path, output_location=None, output_ovf_name=None,
memory=None, cpu=None, disk=None, os_type=None,
disk_controller=None, cdrom=None, hwversion=14):
"""
Constructor to initialize object of class OVFConverter
Args:
source_img_path - absolute path to source image which will get convert into ovf
output_location - location where created OVF will be kept. This location
should have write access. If not given file will get
created at source location (optional)
output_ovf_name - name of output ovf.If not given source image name will
be used (optional)
memory - required memory for VM in MB (optional)
cpu - required number of virtual cpus for VM (optional)
disk - required size of disk for VM in GB (optional)
os_type- required operating system type as specified in user document
(default os type other 32 bit) (optional)
disk_controller - required disk controller type
(default controller SCSI with lsilogicsas)
(SATA, IDE, Paravirtual, Buslogic, Lsilogic, Lsilogicsas) (optional)
hwversion - VMware ESXi hardware family version (optional)
Returns:
Nothing.
"""
self.logger = logger
self.ovf_template_path = OVF_TEMPLATE_PATH
self.source_img_path = source_img_path
self.source_img_filename, file_extension = os.path.splitext(os.path.basename(self.source_img_path))
self.source_img_location = os.path.dirname(self.source_img_path)
self.source_format = file_extension[1:]
self.output_format = "ovf"
self.output_ovf_name = output_ovf_name.split('.')[0] if output_ovf_name else self.source_img_filename
self.output_location = output_location if output_location else "."
self.output_ovf_name_ext = self.output_ovf_name + "." + self.output_format
self.output_path = os.path.join(self.output_location, self.output_ovf_name_ext)
self.output_diskimage_format = "vmdk"
self.output_diskimage_name = self.source_img_filename + "." + self.output_diskimage_format
self.output_diskimage_path = os.path.join(self.output_location, self.output_diskimage_name)
self.logger.info("Input parameters to Converter: \n ovf_template_path = {}, \n source_img_path = {}, \n"
"source_img_location ={} , \n source_format = {}, \n source_img_filename = {}".format(
self.ovf_template_path,
self.source_img_path, self.source_img_location,
self.source_format, self.source_img_filename))
self.logger.info("Output parameters to Converter: \n output_format = {}, \n output_ovf_name = {}, \n"
"output_location ={} , \n output_path = {}, \n output_diskimage_name = {} , \n"
" output_diskimage_path = {} ".format(self.output_format, self.output_ovf_name,
self.output_location, self.output_path,
self.output_diskimage_name, self.output_diskimage_path))
self.disk_capacity = 1
self.disk_populated_size = 0
self.vm_name = self.output_ovf_name
self.memory = str(memory) if memory is not None else None
self.cpu = str(cpu) if cpu is not None else None
self.os_type = str(os_type).strip() if os_type else None
self.cdrom = cdrom
self.hwversion = hwversion
if self.os_type:
self.osID, self.osType = self.__get_osType()
if self.osID is None or self.osType is None:
error_msg = "ERROR: Invalid input can not find OS type {} ".format(self.os_type)
self.__raise_exception(error_msg)
self.disk_controller = str(disk_controller).strip() if disk_controller else None
if self.disk_controller:
self.disk_controller_info = self.__get_diskcontroller()
if not self.disk_controller_info:
error_msg = "ERROR: Invalid input can not find Disk Controller {} ".format(self.disk_controller)
self.__raise_exception(error_msg)
if disk is not None:
# convert disk size from GB to bytes
self.disk_size = int(disk) * 1024 * 1024 * 1024
else:
self.disk_size = None
self.logger.info("Other input parameters to Converter: \n vm_name = {}, \n memory = {}, \n"
"disk_size ={} \n os type = {} \n disk controller = {}".format(
self.vm_name, self.memory, self.disk_size, self.os_type, self.disk_controller))
# check access for read input location and write output location return none if no access
if not os.access(self.source_img_path, os.F_OK):
error_msg = "ERROR: Source image file {} not present".format(self.source_img_path)
self.__raise_exception(error_msg, exception_type="IO")
elif not os.access(self.source_img_path, os.R_OK):
error_msg = "ERROR: Cannot read source image file {}".format(self.source_img_path)
self.__raise_exception(error_msg, exception_type="IO")
if not os.access(self.output_location, os.W_OK):
error_msg = "ERROR: No write access to location {} to write output OVF ".format(self.output_location)
self.__raise_exception(error_msg, exception_type="IO")
def __get_image_info(self):
"""
Private method to get information about source imager.
Args : None
Return : True on success else False
"""
try:
print("Getting source image information")
command = "qemu-img info \t " + self.source_img_path
output, error, returncode = self.__execute_command(command)
if error or returncode:
self.logger.error("ERROR: Error occurred while getting information about source image : {} \n "
"return code : {} ".format(error, returncode))
return False
elif output:
self.logger.info("Get Image Info Output : {} \n ".format(output))
split_output = output.decode().split("\n")
for line in split_output:
line = line.strip()
if "virtual size" in line:
virtual_size_info = line.split(":")[1].split()
if len(virtual_size_info) == 3 and virtual_size_info[2].strip(")") == "bytes":
self.disk_capacity = int(virtual_size_info[1].strip("("))
else:
self.disk_capacity = self.__convert_size(virtual_size_info[0])
elif "disk size" in line:
size = line.split(":")[1].split()[0]
self.disk_populated_size = self.__convert_size(size)
elif "file format" in line:
self.source_format = line.split(":")[1]
self.logger.info("Updated source image virtual disk capacity : {} ,"
"Updated source image populated size: {}".format(self.disk_capacity,
self.disk_populated_size))
return True
except Exception as exp:
error_msg = "ERROR: Error occurred while getting information about source image : {}".format(exp)
self.logger.error(error_msg)
print(error_msg)
return False
def __convert_image(self):
"""
Private method to convert source disk image into .vmdk disk image.
Args : None
Return : True on success else False
"""
print("Converting source disk image to .vmdk ")
command = "qemu-img convert -p -f " + self.source_format + " -O " + self.output_diskimage_format + \
" -o subformat=streamOptimized " + self.source_img_path + " " + self.output_diskimage_path
_, error, returncode = self.__execute_command(command, show_output=True)
if error or returncode:
error_msg = "ERROR: Error occurred while converting source disk image into vmdk: {}\n" + \
"return code : {} ".format(error, returncode)
self.logger.error(error_msg)
print(error_msg)
return False
else:
if os.path.isfile(self.output_diskimage_path):
self.logger.info("Successfully converted source image {} into {} \n "
"return code : {} ".format(self.source_img_path,
self.output_diskimage_path,
returncode))
result = self.__make_image_bootable()
if result:
self.logger.info("Made {} bootable".format(self.output_diskimage_path))
print("Output VMDK is at: {}".format(self.output_diskimage_path))
return True
else:
self.logger.error("Cannot make {} bootable".format(self.output_diskimage_path))
print("ERROR: Fail to convert source image into .vmdk")
return False
else:
self.logger.error("Converted vmdk disk file {} is not present \n ".format(
self.output_diskimage_path))
print("Fail to convert source image into .vmdk")
return False
def __make_image_bootable(self):
"""
Private method to make source disk image bootable.
Args : None
Return : True on success else False
"""
command = "printf '\x03' | dd conv=notrunc of=" + self.output_diskimage_path + "\t bs=1 seek=$((0x4))"
output, error, returncode = self.__execute_command(command)
if error and returncode:
error_msg = "ERROR:Error occurred while making source disk image bootable : {} \n "\
"return code : {} ".format(error, returncode)
self.logger.error(error_msg)
print(error_msg)
return False
else:
self.logger.info("Make Image Bootable Output : {} ".format(output))
return True
def __edit_ovf_template(self):
"""
Private method to create new OVF file by editing OVF template
Args : None
Return : True on success else False
"""
try:
print("Creating OVF")
# Read OVF template file
OVF_tree = ET.parse(self.ovf_template_path)
root = OVF_tree.getroot()
# Collect namespaces
nsmap = {k: v for k, v in root.nsmap.items() if k}
nsmap["xmlns"] = "http://schemas.dmtf.org/ovf/envelope/1"
# Edit OVF template
references = root.find('xmlns:References', nsmap)
if references is not None:
file_tag = references.find('xmlns:File', nsmap)
if file_tag is not None:
file_tag.attrib['{' + nsmap['ovf'] + '}href'] = self.output_diskimage_name
disksection = root.find('xmlns:DiskSection', nsmap)
if disksection is not None:
diak_tag = disksection.find('xmlns:Disk', nsmap)
if diak_tag is not None:
if self.disk_size and self.disk_size > self.disk_capacity:
self.disk_capacity = self.disk_size
diak_tag.attrib['{' + nsmap['ovf'] + '}capacity'] = str(self.disk_capacity)
diak_tag.attrib['{' + nsmap['ovf'] + '}populatedSize'] = str(self.disk_populated_size)
virtuasystem = root.find('xmlns:VirtualSystem', nsmap)
if virtuasystem is not None:
name_tag = virtuasystem.find('xmlns:Name', nsmap)
if name_tag is not None:
name_tag.text = self.vm_name
if self.os_type is not None:
operatingSystemSection = virtuasystem.find('xmlns:OperatingSystemSection', nsmap)
if self.osID and self.osType:
operatingSystemSection.attrib['{' + nsmap['ovf'] + '}id'] = self.osID
os_discription_tag = operatingSystemSection.find('xmlns:Description', nsmap)
os_discription_tag.text = self.osType
virtualHardwareSection = virtuasystem.find('xmlns:VirtualHardwareSection', nsmap)
system = virtualHardwareSection.find('xmlns:System', nsmap)
virtualSystemIdentifier = system.find('vssd:VirtualSystemIdentifier', nsmap)
if virtualSystemIdentifier is not None:
virtualSystemIdentifier.text = self.vm_name
VirtualSystemType = system.find('vssd:VirtualSystemType', nsmap)
if VirtualSystemType is not None:
VirtualSystemType.text = "vmx-{}".format(self.hwversion)
if self.memory is not None or self.cpu is not None or self.disk_controller is not None:
for item in virtualHardwareSection.iterfind('xmlns:Item', nsmap):
description = item.find("rasd:Description", nsmap)
if self.cpu is not None:
if description is not None and description.text == "Number of Virtual CPUs":
cpu_item = item.find("rasd:VirtualQuantity", nsmap)
name_item = item.find("rasd:ElementName", nsmap)
if cpu_item is not None:
cpu_item.text = self.cpu
name_item.text = self.cpu + " virtual CPU(s)"
if self.memory is not None:
if description is not None and description.text == "Memory Size":
mem_item = item.find("rasd:VirtualQuantity", nsmap)
name_item = item.find("rasd:ElementName", nsmap)
if mem_item is not None:
mem_item.text = self.memory
name_item.text = self.memory + " MB of memory"
if self.disk_controller is not None:
if description is not None and description.text == "SCSI Controller":
if self.disk_controller_info is not None:
name_item = item.find("rasd:ElementName", nsmap)
name_item.text = str(self.disk_controller_info["controllerName"]) + "0"
resource_type = item.find("rasd:ResourceType", nsmap)
resource_type.text = self.disk_controller_info["resourceType"]
description.text = self.disk_controller_info["controllerName"]
resource_subtype = item.find("rasd:ResourceSubType", nsmap)
if self.disk_controller_info["controllerName"] == "IDE Controller":
# Remove resource subtype item
resource_subtype.getparent().remove(resource_subtype)
if "resourceSubType" in self.disk_controller_info:
resource_subtype.text = self.disk_controller_info["resourceSubType"]
if self.cdrom:
last_item = list(virtualHardwareSection.iterfind('xmlns:Item', nsmap))[-1]
ide_cdrom_items_etree = ET.parse(IDE_CDROM_XML_PATH)
ide_cdrom_items = list(ide_cdrom_items_etree.iterfind('Item'))
for item in ide_cdrom_items:
last_item.addnext(item)
# Save output OVF
OVF_tree.write(self.output_path, xml_declaration=True, encoding='utf-8',
method="xml")
if os.path.isfile(self.output_path):
logger.info("Successfully written output OVF at {}".format(self.output_path))
print("Output OVF is at: {}".format(self.output_path))
return self.output_path
else:
error_msg = "ERROR: Error occurred while creating OVF file"
print(error_msg)
return False
except Exception as exp:
error_msg = "ERROR: Error occurred while editing OVF template : {}".format(exp)
self.logger.error(error_msg)
print(error_msg)
return False
def __convert_size(self, size):
"""
Private method to convert disk size from GB,MB to bytes.
Args :
size : disk size with prefix 'G' for GB and 'M' for MB
Return : disk size in bytes
"""
byte_size = 0
try:
if not size:
self.logger.error("No size {} to convert in bytes".format(size))
else:
size = str(size)
disk_size = float(size[:-1])
input_type = size[-1].strip()
self.logger.info("Disk size : {} , size type : {} ".format(disk_size, input_type))
if input_type == "G":
byte_size = disk_size * 1024 * 1024 * 1024
elif input_type == "M":
byte_size = disk_size * 1024 * 1024
self.logger.info("Disk size in bytes: {} ".format(byte_size))
return int(byte_size)
except Exception as exp:
error_msg = "ERROR:Error occurred while converting disk size in bytes : {}".format(exp)
self.logger.error(error_msg)
print(error_msg)
return False
def __get_osType(self):
"""
Private method to get OS ID and Type
Args :
None
Return :
osID : OS ID
osType: OS Type
"""
osID = None
osType = None
os_info = self.__read_yaml_file(OS_INFO_FILE_PATH)
try:
if self.os_type and os_info:
for os_id, os_type in os_info.items():
if self.os_type.lower() == os_type.lower():
osID = os_id
osType = os_type
break
except Exception as exp:
error_msg = "ERROR:Error occurred while getting OS details : {}".format(exp)
self.logger.error(error_msg)
print(error_msg)
return osID, osType
def __get_diskcontroller(self):
"""
Private method to get details of Disk Controller
Args :
None
Return :
disk_controller : dict with details of Disk Controller
"""
disk_controller = {}
scsi_subtype = None
if self.disk_controller.lower() in ["paravirtual", "lsilogic", "buslogic", "lsilogicsas"]:
scsi_subtype = self.disk_controller
self.disk_controller = "SCSI"
disk_controller_info = self.__read_yaml_file(DISK_CONTROLLER_INFO_FILE_PATH)
try:
if self.disk_controller and disk_controller_info:
for key, value in disk_controller_info.items():
if self.disk_controller.lower() in key.lower():
disk_controller['controllerName'] = key
disk_controller['resourceType'] = str(value["ResourceType"])
resourceSubTypes = value["ResourceSubTypes"] if "ResourceSubTypes" in value else None
if key == "SATA Controller":
disk_controller["resourceSubType"] = resourceSubTypes[0]
elif key == "SCSI Controller":
if scsi_subtype:
if scsi_subtype.lower() == "paravirtual":
scsi_subtype = "VirtualSCSI"
for subtype in resourceSubTypes:
if scsi_subtype.lower() == subtype.lower():
disk_controller["resourceSubType"] = subtype
break
else:
error_msg = "ERROR: Invalid inputs can not "\
"find SCSI subtype {}".format(scsi_subtype)
self.__raise_exception(error_msg)
except KeyError as exp:
error_msg = "ERROR:Error occurred while getting Disk Controller details : {}".format(exp)
self.logger.error(error_msg)
print(error_msg)
return disk_controller
def __read_yaml_file(self, file_path):
"""
Private method to execute command
Args :
command : command to execute
Return :
Dict of yaml data
"""
with open(file_path) as data_file:
data = yaml.load(data_file, Loader=yaml.SafeLoader)
return data
def __raise_exception(self, error_msg, exception_type="Generic"):
"""
Private method to execute command
Args :
command : command to execute
Return :
None
"""
if error_msg:
self.logger.debug(error_msg)
print(error_msg)
if exception_type == "Generic":
raise Exception(error_msg)
elif exception_type == "IO":
raise Exception(error_msg)
def __execute_command(self, command, show_output=False):
"""
Private method to execute command
Args :
command : command to execute
Return :
stdout : output of command
stderr: error occurred while executing command if any
returncode : return code of command execution
"""
try:
self.logger.info("Execute command: {} ".format(command))
proc = subprocess.Popen(command, stdout=subprocess.PIPE, stdin=subprocess.PIPE,
stderr=subprocess.PIPE, shell=True, bufsize=1)
stdout = b''
stderr = b''
while True:
output = proc.stdout.read(1)
stdout += output
if show_output:
print(output.decode(), end='')
returncode = proc.poll()
if returncode is not None:
for output in proc.stdout.readlines():
stdout += output
if show_output:
print(output.decode(), end='')
break
for output in proc.stderr.readlines():
stderr += output
except Exception as exp:
self.logger.error("Error {} occurred while executing command {} ".format(exp, command))
return stdout, stderr, returncode
def create_ovf(self):
"""
Method to convert source image into OVF
Args : None
Return : True on success else False
"""
# check output format
if self.source_format == self.output_format:
self.logger.info("Source format is OVF. No need to convert: {} ")
return self.source_img_path
# Get source img properties
img_info = self.__get_image_info()
if img_info:
# Create vmdk disk image
disk_img = self.__convert_image()
if disk_img:
# Edit OVF tempalte
ovf_path = self.__edit_ovf_template()
return ovf_path
else:
self.logger.error("Error in getting image information cannot convert image")
raise Exception("Error in getting image information cannot convert image")
return False
|
'''Extracting Text from PDF'''
import enum
from pathlib import Path
import fitz
from graeScript import outfile_path
class FileMode(enum):
"""
PDF output text modes that work with sending to file.
From Fitz/pymupdf:
https://pymupdf.readthedocs.io/en/latest/page.html?highlight=get_text#Page.get_text
"""
text = 0
html = 1
json = 2
rawjson = 3
xhtml = 4
xml = 5
class NonFileMode(enum):
"""
PDF output text modes that do not work with sending to file.
From Fitz/pymupdf:
https://pymupdf.readthedocs.io/en/latest/page.html?highlight=get_text#Page.get_text
"""
blocks = 0
dict = 1
rawdict = 2
words = 3
class PdfContent:
file_modes = ('text', 'html', 'json', 'rawjson', 'xhtml', 'xml')
non_file_modes = ('blocks', 'dict', 'rawdict', 'words')
def __init__(self, file: str | Path) -> None:
self.file = file
doc = fitz.open(file)
self.page_num = doc.page_count
self.toc = doc.get_toc()
self.metadata = doc.metadata
doc.close()
def to_file(self, outfile: str | Path, *, mode: str | FileMode = 'text',
pgstart: int = 1, pgstop: int = None, pgstep: int = 1):
"""
Send PDF text/contents to output file in the format of `mode`.
Args:
outfile (str | Path): The output file path.
mode (str | FileMode, optional): The mode of the PDF output.
Defaults to 'text'. See Fitz docs: https://bit.ly/3FiUbHf
pgstart (int, optional): The page at which to start. Defaults to 1.
pgstop (int, optional): The page at which to stop. Defaults to
None.
pgstep (int, optional): The page steps. Defaults to 1 which
outputs every page from pgstart to pgstop.
Raises:
SystemExit: If the mode is not a valid file mode.
"""
if isinstance(mode, (FileMode)):
mode = mode.name
if mode not in self.file_modes:
print(f'Mode must be one of {", ".join(self.file_modes)}.')
raise SystemExit
doc = fitz.open(self.file)
if not pgstop:
pgstop = doc.page_count
with open(outfile_path() / outfile, 'wb') as f:
for page in doc.pages(pgstart, pgstop, pgstep):
text = page.get_text(mode).encode('utf8')
f.write(text)
f.write(bytes((12,)))
doc.close()
def get(self, *, mode: str | FileMode | NonFileMode = 'text',
pgstart: int = 1, pgstop: int = None, pgstep: int = 1):
"""
Return PDF text/contents in the format of `mode`.
Args:
mode (str | FileMode | NonFileMode, optional): The mode of the PDF
output. Defaults to 'text'. See Fitz docs:
https://bit.ly/3FiUbHf
pgstart (int, optional): The page at which to start. Defaults to 1.
pgstop (int, optional): The page at which to stop. Defaults to
None.
pgstep (int, optional): The page steps. Defaults to 1 which
outputs every page from pgstart to pgstop.
Raises:
SystemExit: If mode is not a FileMode or NonFileMode
"""
if isinstance(mode, (FileMode, NonFileMode)):
mode = mode.name
if mode not in self.file_modes and mode not in self.non_file_modes:
print(f'Mode must be one of {", ".join(self.file_modes)},'
f' {", ".join(self.non_file_modes)}.')
raise SystemExit
doc = fitz.open(self.file)
if not pgstop:
pgstop = doc.page_count
text = []
for page in doc.pages(pgstart, pgstop, pgstep):
text.append(page.get_text(mode))
doc.close()
return text
if __name__ == '__main__':
pass
|
<filename>src/ppo/agent.py
import random
import logging
import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import load_model
from typing import Tuple, List, Any
from src.base import Agent
from src.ppo.memory import Memory
_logger = logging.getLogger(__name__)
class Agent(Agent):
"""Interacts with and learns from the environment."""
def __init__(self, state_size: int, action_size: int, seed: int = 1993,
memory_size: int = int(1e5), nb_hidden: tuple = (64, 64),
gamma: float = 0.99, lam: float = 0.97,
target_kl: float = 0.01,
policy_lr: float = 3e-4, value_lr: float = 1e-3,
train_policy_iters: int = 80, train_value_iters: int = 80,
clip_ratio: float = 0.2,
epsilon_enabled: bool = True, epsilon_start: float = 1.0,
epsilon_end: float = 0.01, epsilon_decay: float = 0.995,
**kwargs):
"""
Initialize a Proximal Policy Optimization (PPO) agent.
Parameters:
- state_size: dimension of each state.
- action_size: dimension of each action.
- seed: random seed.
- memory_size: size of the replay memory.
- nb_hidden: number of hidden layers in the network.
- gamma: discount factor. (Always between 0 and 1.).
- lam: lambda for GAE-Lambda.
- target_kl: KL divergence between target and current policy. This will get used for early stopping in learn function. (Usually small, 0.01 or 0.05.)
- policy_lr: learning rate for the policy optimizer.
- value_function_lr: learning rate for the value function optimizer.
- train_policy_iters: Maximum number of gradient descent steps to take on policy loss per epoch. (Early stopping may cause optimizer to take fewer than this.)
- train_value_iters: Number of gradient descent steps to take on value function per epoch.
- clip_ratio: clipping ratio for the policy objective.
- epsilon_enabled: if True, use epsilon-greedy action selection.
- epsilon_start: initial value for the epsilon parameter.
- epsilon_end: final value for the epsilon parameter.
- epsilon_decay: decay rate for the epsilon parameter.
"""
self.state_size = state_size
self.action_size = action_size
self.seed = random.seed(seed)
self.memory_size = memory_size
self.gamma = gamma
self.lam = lam
self.target_kl = target_kl
self.policy_lr = policy_lr
self.value_lr = value_lr
self.train_policy_iters = train_policy_iters
self.train_value_iters = train_value_iters
self.clip_ratio = clip_ratio
self.epsilon_enabled = epsilon_enabled
self.epsilon_start = epsilon_start
self.epsilon_end = epsilon_end
self.epsilon_decay = epsilon_decay
# Initialize epsilon
if self.epsilon_enabled:
self.epsilon = self.epsilon_start
else:
self.epsilon = 0.0
self.memory = Memory(self.state_size, self.action_size,
self.memory_size, self.gamma, self.lam)
# Initialize the actor and the critic as keras models
state_input = keras.Input(shape=(self.state_size,), dtype=tf.float32)
logits = self.build_mlp(state_input, list(nb_hidden) +
[self.action_size], tf.tanh, None)
self.actor = keras.Model(inputs=state_input, outputs=logits)
self.value = tf.squeeze(
self.build_mlp(state_input, list(nb_hidden) + [1], tf.tanh, None), axis=1
)
self.critic = keras.Model(inputs=state_input, outputs=self.value)
# Initialize the policy and the value function optimizers
self.policy_opt = Adam(learning_rate=self.policy_lr)
self.value_opt = Adam(learning_rate=self.value_lr)
_logger.info("PPO Agent initialized.")
def logs(self):
"""
Logs the agent's performance.
You can replace this function with your own implementation.
Returns:
- A string with the log message.
"""
return f"Epsilon: {self.epsilon:.2f}"
def act(self, state: np.ndarray):
"""
Returns a random action.
Args:
- state: current state.
Returns
- action: action to take.
"""
_logger.debug(f"Acting on state {state}")
# Preprocessing state
state = self.prep_state(state)
# Sample action from actor
logits = self.actor(state)
action = tf.squeeze(tf.random.categorical(logits, 1), axis=1)
# Get the value and log-probability of the action
self.temp_value = self.critic(state)
self.temp_logprob = self.compute_logprobabilities(logits, action)
# Epsilon-greedy action selection
if self.epsilon_enabled and random.random() < self.epsilon:
_logger.debug("Epsilon-greedy action selection.")
action = random.randrange(self.action_size)
else:
action = action[0].numpy()
if self.epsilon_enabled:
self.decay_eps()
_logger.debug(f"Returning action: {action}")
return action
def step(self, state: np.ndarray, action: int, reward: int, done: bool, **kwargs):
"""
Performs a step from the environment.
In the Random Agent, this function does nothing.
Args:
- state: current state.
- action: action taken.
- reward: reward received.
- done: if the episode is over.
"""
_logger.debug({"message": "Registering step", "state": state,
"action": action, "reward": reward, "done": done})
# Preprocessing state
state = self.prep_state(state)
# Convert action to tensor
action = tf.convert_to_tensor([action], dtype=tf.int64)
self.memory.add(state, action, reward,
self.temp_value, self.temp_logprob)
self.temp_value = None
self.temp_logprob = None
if done:
last_value = 0 if done else self.critic(state)
self.memory.finish_trajectory(last_value)
experiences = self.memory.get()
self.learn(experiences)
_logger.debug("Finished step.")
def learn(self, experiences: Tuple[Any]):
"""
Update value parameters using given batch of experience tuples.
Args:
- experiences: tuple of (states, actions, advantages, returns, logprobs) tuples.
"""
_logger.debug("Starting learning.")
states, actions, advantages, returns, logprobs = experiences
# Update the policy and implement early stopping using KL divergence
for _ in range(self.train_policy_iters):
kl = self.train_policy(states, actions, logprobs, advantages)
if kl > 1.5 * self.target_kl:
break # Early Stopping
# Update the value function
for _ in range(self.train_value_iters):
self.train_value_function(states, returns)
_logger.debug("Finished learning.")
def decay_eps(self):
"""
Decay epsilon-greedy used for action selection.
"""
self.epsilon = max(self.epsilon_end, self.epsilon_decay * self.epsilon)
def save_model(self, path: str = "/PPO"):
"""
Save the model to the given path.
Args:
- path: path to save the model
"""
_logger.debug(f"Saving model to path: {path}")
# remove / at the end of the path
if path[-1] == "/":
path = path[:-1]
actor_path = f"{path}/actor"
critic_path = f"{path}/critic"
_logger.debug(f"Saving actor to path: {actor_path}")
self.actor.save(actor_path)
_logger.debug(f"Saving critic to path: {critic_path}")
self.critic.save(critic_path)
def load_model(self, path: str = "/PPO"):
"""
Load the model from the given path.
Args:
- path: path to load the model
"""
_logger.debug(f"Loading model from path: {path}")
# remove / at the end of the path
if path[-1] == "/":
path = path[:-1]
actor_path = f"{path}/actor"
critic_path = f"{path}/critic"
_logger.debug(f"Loading actor from path: {actor_path}")
self.actor = load_model(actor_path)
_logger.debug(f"Loading critic from path: {critic_path}")
self.critic = load_model(critic_path)
def build_mlp(self, x: int, nb_hidden: List[int],
activation: callable = tf.tanh, output_activation: callable = None):
"""
Creates a multi-layer perceptron policy.
Args:
- x: input for the network
- nb_hidden: list with the number of units in each hidden layer
- activation: activation function
- output_activation: output activation function
Returns:
- output tensor
"""
_logger.debug({"message": "Starting policy network.", "x": x,
"nb_hidden": nb_hidden, "activation": activation,
"output_activation": output_activation})
for size in nb_hidden[:-1]:
x = layers.Dense(units=size, activation=activation)(x)
return layers.Dense(units=nb_hidden[-1], activation=output_activation)(x)
def compute_logprobabilities(self, logits: tf.Tensor, actions: tf.Tensor):
"""
Compute the log-probabilities of taking actions a by using the logits (i.e. the output of the actor).
Args:
- logits: the output of the actor
- actions: the actions to take
Returns:
- log-probabilities of the actions
"""
_logger.debug({"message": "Starting log-probabilities computation.",
"logits": logits, "actions": actions})
logprobs_all = tf.nn.log_softmax(logits)
logprob = tf.reduce_sum(
tf.one_hot(actions, self.action_size) * logprobs_all, axis=1
)
_logger.debug({"message": "Finished log-probabilities computation.",
"logprob": logprob})
return logprob
def train_policy(self, states: np.ndarray, actions: np.ndarray,
logprobabilities: np.ndarray, advantages: np.ndarray):
"""
Train the policy by maxizing the PPO-Clip objective.
Args:
- states: batch of states.
- actions: batch of actions.
- logprobabilities: batch of log-probabilities.
- advantages: batch of advantages.
"""
_logger.debug({"message": "Starting policy training.",
"states": states, "actions": actions,
"logprobabilities": logprobabilities,
"advantages": advantages})
with tf.GradientTape() as tape: # Record operations for automatic differentiation.
ratio = tf.exp(
self.compute_logprobabilities(self.actor(states), actions)
- logprobabilities
)
min_advantage = tf.where(
advantages > 0,
(1 + self.clip_ratio) * advantages,
(1 - self.clip_ratio) * advantages,
)
policy_loss = -tf.reduce_mean(
tf.minimum(ratio * advantages, min_advantage)
)
policy_grads = tape.gradient(
policy_loss, self.actor.trainable_variables)
self.policy_opt.apply_gradients(
zip(policy_grads, self.actor.trainable_variables))
kl = tf.reduce_mean(
logprobabilities
- self.compute_logprobabilities(self.actor(states), actions)
)
kl = tf.reduce_sum(kl)
_logger.debug({"message": "Finished policy training.",
"policy_loss": policy_loss, "kl": kl})
return kl
def train_value_function(self, states, returns):
"""
Train the value function by regression on mean-squared error.
Args:
- states: batch of states.
- returns: batch of returns.
"""
_logger.debug({"message": "Starting value function training.",
"states": states, "returns": returns})
# Record operations for automatic differentiation.
with tf.GradientTape() as tape:
value_loss = tf.reduce_mean((returns - self.critic(states)) ** 2)
value_grads = tape.gradient(
value_loss, self.critic.trainable_variables)
self.value_opt.apply_gradients(
zip(value_grads, self.critic.trainable_variables))
_logger.debug({"message": "Finished value function training."})
|
<filename>wagtail_review/models.py<gh_stars>0
from django.conf import settings
from django.db import models
from django.db.models import Case, Q, Value, When
from django.db.models.constraints import CheckConstraint, UniqueConstraint
from django.template.loader import render_to_string
from django.urls import reverse
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.module_loading import import_string
from django.utils.translation import ugettext_lazy as _
try:
from wagtail.admin.mail import send_mail # Wagtail >= 2.7
except ImportError:
from wagtail.admin.utils import send_mail # Wagtail < 2.7
from wagtail.core.models import UserPagePermissionsProxy
from .token import Token
def get_review_url_impl(token):
return settings.BASE_URL + reverse('wagtail_review:review', args=[token.encode()])
def get_review_url(token):
REVIEW_URL_BUILDER = getattr(settings, 'WAGTAILREVIEW_REVIEW_URL_BUILDER', None)
if REVIEW_URL_BUILDER is not None:
review_url_builder = import_string(REVIEW_URL_BUILDER)
else:
review_url_builder = get_review_url_impl
return review_url_builder(token)
class ExternalReviewer(models.Model):
"""
Represents an external user who doesn't have an account but may need to view
draft revisions of pages and comment on them.
"""
email = models.EmailField()
def get_reviewer(self):
user, created = Reviewer.objects.get_or_create(external=self)
return user
class Share(models.Model):
"""
Grants access to draft revisions of a page to an external user.
"""
external_user = models.ForeignKey(ExternalReviewer, on_delete=models.CASCADE, related_name='shares')
page = models.ForeignKey('wagtailcore.Page', on_delete=models.CASCADE, related_name='wagtailreview_shares')
shared_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='+')
shared_at = models.DateTimeField(auto_now_add=True)
can_comment = models.BooleanField(default=False)
first_accessed_at = models.DateTimeField(null=True)
last_accessed_at = models.DateTimeField(null=True)
expires_at = models.DateTimeField(null=True)
def send_share_email(self):
"""
Emails the user with the review link
"""
email_address = self.external_user.email
review_token = Token(self.external_user.get_reviewer(), self.page.get_latest_revision())
email_body = render_to_string('wagtail_review/email/share.txt', {
'page': self.page,
'review_url': get_review_url(review_token),
})
send_mail("A page has been shared with you", email_body, [email_address])
def log_access(self):
"""
Updates the *_accessed_at fields
"""
self.last_accessed_at = timezone.now()
if self.first_accessed_at is None:
self.first_accessed_at = self.last_accessed_at
self.save(update_fields=['first_accessed_at', 'last_accessed_at'])
class Meta:
unique_together = [
('external_user', 'page'),
]
class Reviewer(models.Model):
"""
This model represents a union of the AUTH_USER_MODEL and ExternalReviewer models.
It's intended as a place to reference in ForeignKeys in places where either an internal or external
user could be specified.
"""
internal = models.ForeignKey(settings.AUTH_USER_MODEL, null=True, on_delete=models.CASCADE, related_name='+')
external = models.ForeignKey(ExternalReviewer, null=True, on_delete=models.CASCADE, related_name='+')
def get_name(self):
if self.internal:
return self.internal.get_full_name() or self.internal.email
else:
return self.external.email
def get_email(self):
if self.internal:
return self.internal.email
else:
return self.external.email
def page_perms(self, page_id):
return ReviewerPagePermissions(self, page_id)
class Meta:
constraints = [
# Either internal or external must be set and not both
CheckConstraint(
check=Q(internal__isnull=False, external__isnull=True) | Q(internal__isnull=True, external__isnull=False),
name='either_internal_or_external'
),
# Internal must be unique if it is not null
UniqueConstraint(fields=['internal'], condition=Q(internal__isnull=False), name='unique_internal'),
# External must be unique if it is not null
UniqueConstraint(fields=['external'], condition=Q(external__isnull=False), name='unique_external'),
]
class ReviewerPagePermissions:
def __init__(self, reviewer, page_id):
self.reviewer = reviewer
self.page_id = page_id
@cached_property
def share(self):
if self.reviewer.external_id:
return Share.objects.filter(external_user_id=self.reviewer.external_id, page_id=self.page_id).first()
def can_view(self):
"""
Returns True if the reviewer can view the page
"""
if self.reviewer.external_id:
if self.share is None:
# Not shared with this reviewer before
return False
if self.share.expires_at is not None and self.share.expires_at < timezone.now():
# Share has expired
return False
return True
def can_comment(self):
"""
Returns True if the reviewer can comment on the page
"""
if not self.can_view():
return False
if self.reviewer.external_id and not self.share.can_comment:
# Reviewer can view but not comment
return False
return True
class Comment(models.Model):
page_revision = models.ForeignKey('wagtailcore.PageRevision', on_delete=models.CASCADE, related_name='wagtailreview_comments')
reviewer = models.ForeignKey(Reviewer, on_delete=models.CASCADE, related_name='comments')
quote = models.TextField()
text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
resolved_at = models.DateTimeField(null=True)
content_path = models.TextField()
start_xpath = models.TextField()
start_offset = models.IntegerField()
end_xpath = models.TextField()
end_offset = models.IntegerField()
def get_frontend_url(self, reviewer):
review_token = Token(reviewer, self.page_revision_id)
return get_review_url(review_token) + "?comment=" + str(self.id)
class CommentReply(models.Model):
comment = models.ForeignKey(Comment, on_delete=models.CASCADE, related_name='replies')
reviewer = models.ForeignKey(Reviewer, on_delete=models.CASCADE, related_name='comment_replies')
text = models.TextField()
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
class ReviewRequestQuerySet(models.QuerySet):
def has_approved_response(self):
return self.filter(responses__in=ReviewResponse.objects.approved())
def has_no_approved_response(self):
return self.exclude(responses__in=ReviewResponse.objects.approved())
def open(self):
return self.filter(is_closed=False)
def closed(self):
return self.filter(is_closed=True)
class ReviewRequest(models.Model):
page_revision = models.ForeignKey('wagtailcore.PageRevision', on_delete=models.CASCADE, related_name='wagtailreview_reviewrequests')
submitted_by = models.ForeignKey(settings.AUTH_USER_MODEL, on_delete=models.CASCADE, related_name='+')
submitted_at = models.DateTimeField(auto_now_add=True)
assignees = models.ManyToManyField(Reviewer)
is_closed = models.BooleanField(default=False)
objects = ReviewRequestQuerySet.as_manager()
def get_review_url(self, reviewer):
review_token = Token(reviewer, self.page_revision_id, self)
return get_review_url(review_token)
def send_request_emails(self):
# send request emails to all reviewers except the reviewer record for the user submitting the request
for reviewer in self.assignees.all():
email = reviewer.get_email()
context = {
'email': email,
'user': reviewer.internal,
'reviewer': reviewer,
'review_request': self,
'page': self.page_revision.as_page_object(),
'submitter': self.submitted_by,
'review_url': self.get_review_url(reviewer),
}
email_subject = render_to_string('wagtail_review/email/request_review_subject.txt', context).strip()
email_content = render_to_string('wagtail_review/email/request_review.txt', context).strip()
send_mail(email_subject, email_content, [email])
@classmethod
def get_pages_with_reviews_for_user(cls, user):
"""
Return a queryset of pages which have reviews, for which the user has edit permission
"""
user_perms = UserPagePermissionsProxy(user)
reviewed_pages = (
cls.objects
.order_by('-submitted_at')
.values_list('page_revision__page_id', 'submitted_at')
)
# Annotate datetime when a review was last created for this page
last_review_requested_at = Case(
*[
When(pk=pk, then=Value(submitted_at))
for pk, submitted_at in reviewed_pages
],
output_field=models.DateTimeField(),
)
return (
user_perms.editable_pages()
.filter(pk__in=(page[0] for page in reviewed_pages))
.annotate(last_review_requested_at=last_review_requested_at)
.order_by('-last_review_requested_at')
)
def get_assignees_without_response(self):
return self.assignees.exclude(
id__in=ReviewResponse.objects.filter(request=self).values_list('submitted_by_id', flat=True)
)
class ReviewResponseQuerySet(models.QuerySet):
def approved(self):
return self.filter(status=ReviewResponse.STATUS_APPROVED)
def needs_changes(self):
return self.filter(status=ReviewResponse.STATUS_NEEDS_CHANGES)
class ReviewResponse(models.Model):
STATUS_APPROVED = 'approved'
STATUS_NEEDS_CHANGES = 'needs-changes'
STATUS_CHOICES = [
(STATUS_APPROVED, _("approved")),
(STATUS_NEEDS_CHANGES, _("needs changes")),
]
request = models.ForeignKey(ReviewRequest, on_delete=models.CASCADE, related_name='responses')
submitted_by = models.ForeignKey(Reviewer, on_delete=models.CASCADE, related_name='+')
submitted_at = models.DateTimeField(auto_now_add=True)
status = models.CharField(max_length=255, choices=STATUS_CHOICES)
comment = models.TextField(blank=True)
objects = ReviewResponseQuerySet.as_manager()
|
<filename>bot_ava.py
from selenium import webdriver
from time import sleep
from icalendar import Calendar, Event, vDatetime
from datetime import datetime
from pytz import UTC
import os
import time
import secret
import platform
import json
DATA_PATH = r'C:\Users\lacft\Documents\moddle_calendar_bot\data'
data = {}
class Bot:
def __init__(self, username, password):
self.username = username
self.password = password
def getFile(self):
file_in_path = self.getFileInPath()
if(file_in_path == False):
self.downloadFile()
file_name = self.renameFile()
self.readIcsFile('{}.ics'.format(file_name))
elif(self.calculateDeltaTime(file_in_path)):
self.readIcsFile(file_in_path)
else:
self.deleteFile(file_in_path)
self.getFile()
def downloadFile(self):
try:
options = webdriver.ChromeOptions()
preferences = {'download.default_directory': '{}'.format(DATA_PATH), 'safebrowsing.enable': 'false'}
options.add_experimental_option("prefs", preferences)
driver = webdriver.Chrome(chrome_options=options)
driver.get('https://virtual.ifro.edu.br/jiparana/calendar/export.php')
sleep(2)
driver.find_element_by_xpath('//*[@id="username"]').send_keys(self.username)
driver.find_element_by_xpath('//*[@id="password"]').send_keys(self.password)
sleep(2)
driver.find_element_by_xpath('//*[@id="loginbtn"]').click()
sleep(2)
driver.find_element_by_xpath('//*[@id="id_events_exportevents_all"]').click()
driver.find_element_by_xpath('//*[@id="id_period_timeperiod_recentupcoming"]').click()
sleep(2)
driver.find_element_by_xpath('//*[@id="id_export"]').click()
sleep(5)
driver.quit()
print('download file sucessful')
except:
print('download file error')
def getFileInPath(self):
path = DATA_PATH
for p, _, files in os.walk(os.path.abspath(path)):
if (len(files) == 0):
return False
else:
for file in files:
print(os.path.join(file))
if(len(files) == 1):
return os.path.join(file)
else:
self.deleteFile(file)
data['calendar'] = []
def readIcsFile(self, file_name):
# try:
file = open('data/{}'.format(file_name),'rb')
calendar = Calendar.from_ical(file.read())
print('file {} opened'.format(file_name))
for component in calendar.walk():
if component.name == "VEVENT":
print(component['dtstart'])
# data['calendar'].append({
# 'name': '{}'.format(component.get('summary')),
# 'description': '{}'.format(component.get('description')),
# 'dateStart': '{}'.format(vDatetime.from_ical({}.format(component.get('dtstart')))),
# 'dateEnd': '{}'.format(vDatetime.from_ical({}.format(component.get('dtend')))),
# 'categorie': '{}'.format(component.get('categories'))
# })
# self.writeJsonData()
#print(component.get('description'))
file.close()
print('file {} closed'.format(file_name))
# except:
# print('read file error')
def writeJsonData(self):
with open('data.json', 'w') as jsonFile:
json.dump(data, jsonFile)
def buildFileName(self):
if platform.system == 'Windows':
return os.path.getctime(DATA_PATH)
else:
stat = os.stat(DATA_PATH)
try:
return stat.st_birthtime
except AttributeError:
return stat.st_mtime
def renameFile(self):
try:
file_name = self.buildFileName()
os.rename(r'{}\icalexport.ics'.format(DATA_PATH), r'{}\{}.ics'.format(DATA_PATH, file_name))
print('rename file sucessful')
return file_name
except:
print('rename file error')
def deleteFile(self, file_name):
try:
os.remove(r'{}\{}'.format(DATA_PATH, file_name))
print('delete file sucessful')
except:
print('delete file error')
def calculateDeltaTime(self, name):
print(name)
#tratar string
name = name.split('.ics')
print(name[0])
date_file = datetime.fromtimestamp(float(name[0]))
now = datetime.now()
delta = now - date_file
print(delta)
print(delta.total_seconds())
if(delta.total_seconds() < 20000):
return True
else:
return False
bot = Bot(secret.USERNAME, secret.PASSWORD)
bot.getFile()
|
<gh_stars>1-10
# ==============================================================================
# ARSC (A Relatively Simple Computer) License
# ==============================================================================
#
# ARSC is distributed under the following BSD-style license:
#
# Copyright (c) 2016-2017 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice, this
# list of conditions and the following disclaimer in the documentation and/or other
# materials provided with the distribution.
#
# 3. The name of the author may not be used to endorse or promote products derived from
# this product without specific prior written permission from the author.
#
# 4. Products derived from this product may not be called "ARSC" nor may "ARSC" appear
# in their names without specific prior written permission from the author.
#
# THIS PRODUCT IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,
# BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS PRODUCT, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
#
# PART OF THE ARSC ASSEMBLER
#
from binascii import hexlify
# The base class for code generators
class BaseGenerator:
def on_instruction(self, opcode, indirect_or_iodev_bit, index, address, stmt_str = None):
raise NotImplementedError
def on_bss_directive(self, bss_stmt):
raise NotImplementedError
def on_bsc_directive(self, bsc_stmt):
raise NotImplementedError
def get_generated_code(self):
raise NotImplementedError
def on_finished(self):
pass
# The pretty code generator that generates the human-readable, annotated code
class PrettyGenerator(BaseGenerator):
def __init__(self):
self.gen_code = ''
self.curr_addr = 0
def get_generated_code(self):
return self.gen_code
def on_instruction(self, opcode, indirect_or_iodev_bit, index, address, stmt_str = None):
self.gen_code += (
'0x'
+ format(self.curr_addr, '04x')
+ ':\t'
+ format(opcode, '05b')
+ format(indirect_or_iodev_bit, '01b')
+ format(index, '02b')
+ format(address, '08b'))
if stmt_str is not None:
self.gen_code += '\t// ' + stmt_str.lstrip()
self.gen_code += '\n'
self.curr_addr += 1
def on_bss_directive(self, bss_stmt):
for i in range(0, bss_stmt.AllocSize):
self.gen_code += (
'0x'
+ format(self.curr_addr, '04x')
+ ':\t'
+ '0'.zfill(16)
+ '\t// '
+ bss_stmt.VariableSymbol
+ ' + '
+ str(i)
+ ' (BSS)'
+ '\n')
self.curr_addr += 1
def on_bsc_directive(self, bsc_stmt):
i = 0
for constant in bsc_stmt.Constants:
self.gen_code += (
'0x'
+ format(self.curr_addr, '04x')
+ ':\t')
if constant >= 0:
self.gen_code += format(constant, '016b')
else:
self.gen_code += bin(constant & 0b1111111111111111)[2:]
self.gen_code += (
'\t// '
+ bsc_stmt.ConstantSymbol
+ ' + '
+ str(i)
+ ' (BSC -> %d)' % constant
+ '\n')
self.curr_addr += 1
i += 1
# Binary generator that generates the executable (binary) ARSC code. 'get_hex_string'
# method may be used to obtain the hexadecimal ASCII representation of the binary
# data, where each byte is represented by two HEX digits. This HEX string can be
# stored in the .hex memory initialization file
class BinaryGenerator(BaseGenerator):
def __init__(self):
self.bin_data = bytearray()
def get_generated_code(self):
return self.bin_data
def on_instruction(self, opcode, indirect_or_iodev_bit, index, address, stmt_str = None):
self.bin_data.append(address)
self.bin_data.append((opcode << 3) | (indirect_or_iodev_bit << 2) | index)
def on_bss_directive(self, bss_stmt):
for i in range(0, bss_stmt.AllocSize):
self.bin_data.append(0)
self.bin_data.append(0)
def on_bsc_directive(self, bsc_stmt):
for constant in bsc_stmt.Constants:
self.bin_data.append(constant & 0xFF)
self.bin_data.append((constant >> 8) & 0xFF)
# MIF generator produces an ASCII memory intialization string that can be used to
# create a .mif memory initialization file supported by most FPGA synthesis tools
# for CAM, RAM and ROM memory initialization
class MifGenerator(BinaryGenerator):
def __init__(self):
self.mif_data = ''
BinaryGenerator.__init__(self)
def on_finished(self):
self.mif_data = 'WIDTH=16;\n'
self.mif_data += 'DEPTH=%d;\n\n' % (len(self.bin_data) / 2)
self.mif_data += 'ADDRESS_RADIX=HEX;\nDATA_RADIX=HEX;\n\n'
self.mif_data += 'CONTENT BEGIN\n'
address = 0
for i in range(0, len(self.bin_data), 2):
self.mif_data += '\t%s\t:\t%s%s;\n' %\
(format(address, '02x'), format(self.bin_data[i+1], '02x'), format(self.bin_data[i], '02x'))
address += 1
self.mif_data += 'END;\n'
def get_generated_code(self):
return self.mif_data
# Hexadecimal generator that produce the HEX string representation of the binary
# data (used to create a .hex memory initialization file)
class HexGenerator(BinaryGenerator):
def __init__(self):
self.hex_data = ''
BaseGenerator.__init__(self)
def on_finished(self):
self.hex_data = hexlify(self.bin_data)
def get_generated_code(self):
return self.hex_data |
<filename>api/views.py<gh_stars>0
import re
from rest_framework.permissions import IsAuthenticated
from rest_framework.views import APIView
from api.serializers import (
UserSerializer,
UserInCourseSerializer,
LoginSerializer,
CourseSerializer,
RegistrationInCourseReadSerializer,
RegistrationInCourseWriteSerializer,
ActivitySerializer,
SubmissionReadSerializer,
SubmissionGradeSerializer
)
from django.contrib.auth.models import User
from rest_framework.response import Response
from rest_framework import serializers, status
from django.contrib.auth import authenticate
from rest_framework.authtoken.models import Token
from django.contrib.auth.hashers import make_password
from api.models import Activity, Course, Submission
from django.shortcuts import get_object_or_404
from rest_framework.authentication import TokenAuthentication
from api.permissions import IsInstructor, ReadOnlyCourses, IsFacilitator, IsStudent
class AccViews(APIView):
def post(self, request):
serializer = UserSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
serializer.validated_data['password'] = <PASSWORD>(serializer.validated_data['password'])
try:
getted_user = User.objects.get(username=serializer.validated_data['username'])
return Response({'msg': 'user already exists!'}, status=status.HTTP_409_CONFLICT)
except User.DoesNotExist:
user_to_post = User.objects.create(**serializer.validated_data)
user = UserSerializer(user_to_post)
return Response(user.data, status=status.HTTP_201_CREATED)
class LoginView(APIView):
def post(self, request):
serializer = LoginSerializer(data=request.data)
if not serializer.is_valid():
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
user_to_authenticate = serializer.validated_data
user = authenticate(request=request, **user_to_authenticate)
if user:
token, _ = Token.objects.get_or_create(user=user)
return Response({'token': str(token)}, status=status.HTTP_200_OK)
return Response({'msg':'invalid user'}, status=status.HTTP_401_UNAUTHORIZED)
class CourseView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsInstructor | ReadOnlyCourses]
def post(self, request):
serializer = CourseSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
course = Course.objects.get_or_create(**serializer.validated_data)[0]
return Response(data=CourseSerializer(course).data, status=status.HTTP_201_CREATED)
def put(self, request, course_id):
try:
getted_course = Course.objects.get(id=course_id)
except Course.DoesNotExist:
return Response(data={"errors": "invalid course_id"}, status=status.HTTP_404_NOT_FOUND)
serializer = RegistrationInCourseWriteSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
users_list = serializer.validated_data['user_ids']
try:
fetch_list = [User.objects.get(id=user_id) for user_id in users_list]
except:
return Response(data={'errors': 'invalid user_id list'}, status=status.HTTP_404_NOT_FOUND)
unrepeated_fetch_list = list(set(fetch_list))
if any(user.is_staff or user.is_superuser for user in unrepeated_fetch_list):
return Response({'errors': 'Only students can be enrolled in the course.'}, status=status.HTTP_400_BAD_REQUEST)
getted_course.users.set(unrepeated_fetch_list)
updated_course = RegistrationInCourseReadSerializer(instance=getted_course)
return Response(data=updated_course.data, status=status.HTTP_200_OK)
def get(self, request, course_id=None):
if course_id:
try:
course = Course.objects.get(id=course_id)
except Course.DoesNotExist:
return Response(data={'errors': 'invalid course_id'}, status=status.HTTP_404_NOT_FOUND)
course = RegistrationInCourseReadSerializer(instance=course)
return Response(data=course.data, status=status.HTTP_200_OK)
courses = Course.objects.all()
courses = RegistrationInCourseReadSerializer(instance=courses, many=True)
return Response(data=courses.data, status=status.HTTP_200_OK)
def delete(self, request, course_id):
course_to_delete = get_object_or_404(Course, id=course_id)
course_to_delete.delete()
return Response(data='', status=status.HTTP_204_NO_CONTENT)
class ActivityView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [IsInstructor | IsFacilitator]
def post(self, request):
serializer = ActivitySerializer(data=request.data)
serializer.is_valid(raise_exception=True)
try:
activity_to_post = Activity.objects.get(title=serializer.validated_data['title'])
activity_to_post.points = serializer.validated_data['points']
activity_to_post.save()
except Activity.DoesNotExist:
activity_to_post = Activity.objects.create(**serializer.validated_data)
activity_to_post = ActivitySerializer(instance=activity_to_post)
return Response(data=activity_to_post.data, status=status.HTTP_201_CREATED)
def get(self, request):
activities = Activity.objects.all()
activities = ActivitySerializer(instance=activities, many=True)
return Response(data=activities.data, status=status.HTTP_200_OK)
class SubmissionView(APIView):
authentication_classes = [TokenAuthentication]
permission_classes = [(IsInstructor | IsFacilitator | IsStudent) & IsAuthenticated]
def post(self, request, activity_id):
activity = get_object_or_404(Activity, id=activity_id)
submission_serializer = SubmissionReadSerializer(data=request.data)
submission_serializer.is_valid(raise_exception=True)
submission = Submission.objects.create(user=request.user, activity=activity, **submission_serializer.validated_data)
submission = SubmissionReadSerializer(instance=submission)
return Response(data=submission.data, status=status.HTTP_201_CREATED)
def put(self, request, submission_id):
submission = get_object_or_404(Submission, id=submission_id)
serializer = SubmissionGradeSerializer(data=request.data)
serializer.is_valid(raise_exception=True)
submission.grade = serializer.validated_data['grade']
submission.save()
submission = SubmissionGradeSerializer(instance=submission)
return Response(data=submission.data, status=status.HTTP_200_OK)
def get(self, request):
submissions = Submission.objects.all()
if not request.user.is_staff and not request.user.is_superuser:
submissions = Submission.objects.filter(user_id=request.user.id)
submissions = SubmissionReadSerializer(instance=submissions, many=True)
return Response(data=submissions.data, status=status.HTTP_200_OK)
|
from os.path import join
import numpy as np
import pickle
from grammar.grammar import Grammar
from components.dataset import Example
from grammar.python3.python3_transition_system import *
from datasets.utils import build_dataset_vocab
import sys
sys.path.append('.')
# from grammar.hypothesis import Hypothesis, ApplyRuleAction
# from components.action_info import get_action_infos
# from components.vocab import VocabEntry, Vocab
def elim_extraline(string: str):
lines = string.splitlines(True)
prev_spaces = len(lines[0]) - len(lines[0].lstrip(' '))
spaces = 0
index = 1
for line in lines[1:]:
if(line is not '\n'):
spaces = len(line) - len(line.lstrip(' '))
prev_empty = lines[index-1] == '\n'
if(spaces == prev_spaces and prev_empty):
tmp = lines.pop(index-1)
index = index - 1
prev_spaces = spaces
index = index + 1
result = "".join(lines)
return result
def load_dataset(split, transition_system):
prefix = 'card2code/third_party/hearthstone/'
src_file = join(prefix, "{}_hs.in".format(split))
tgt_file = join(prefix, "{}_hs.out".format(split))
examples = []
for idx, (src_line, tgt_line) in enumerate(zip(open(src_file, encoding="utf-8"), open(tgt_file, encoding="utf-8"))):
# print(idx)
src_line = src_line.rstrip()
tgt_line = tgt_line.rstrip()
tgt_line = tgt_line.replace("§", "\n")
src_toks = src_line.split()
tgt_toks = tgt_line.split()
tgt_ast = transition_system.surface_code_to_ast(tgt_line)
# sanity check
reconstructed_tgt = transition_system.ast_to_surface_code(tgt_ast)
tgt_line = elim_extraline(tgt_line)
reconstructed_tgt = reconstructed_tgt.replace("\n\n", "\n", 1)
# reconstructed_tgt = reconstructed_tgt.replace("'True'", "True")
# reconstructed_tgt = reconstructed_tgt.replace("'False'", "False")
# print(tgt_line, reconstructed_tgt)
# assert tgt_line.strip() == reconstructed_tgt.strip()
tgt_action_tree = transition_system.get_action_tree(tgt_ast)
# sanity check
ast_from_action = transition_system.build_ast_from_actions(
tgt_action_tree)
assert transition_system.compare_ast(ast_from_action, tgt_ast)
tgt_from_hyp = transition_system.ast_to_surface_code(ast_from_action)
tgt_from_hyp = tgt_from_hyp.replace("\n\n", "\n", 1)
tgt_from_hyp = elim_extraline(tgt_from_hyp)
# print(tgt_line)
# print(tgt_from_hyp)
# assert tgt_from_hyp.strip() == tgt_line.strip()
# sanity check
# tgt_action_infos = get_action_infos(src_toks, tgt_actions)
example = Example(idx=idx,
src_toks=src_toks,
tgt_actions=tgt_action_tree,
tgt_toks=tgt_toks,
tgt_ast=tgt_ast,
meta=None)
examples.append(example)
return examples
def make_dataset():
grammar = Grammar.from_text(
open('torchASN/data/hearthstone/python_3_7_12_asdl.txt').read())
transition_system = Python3TransitionSystem(grammar)
train_set = load_dataset("train", transition_system)
dev_set = load_dataset("dev", transition_system)
test_set = load_dataset("test", transition_system)
# get vocab from actions
vocab = build_dataset_vocab(train_set, transition_system, src_cutoff=2)
# cache decision using vocab can be done in train
pickle.dump(train_set, open('torchASN/data/hearthstone/train.bin', 'wb'))
pickle.dump(dev_set, open('torchASN/data/hearthstone/dev.bin', 'wb'))
pickle.dump(test_set, open('torchASN/data/hearthstone/test.bin', 'wb'))
pickle.dump(vocab, open('torchASN/data/hearthstone/vocab.bin', 'wb'))
if __name__ == "__main__":
make_dataset()
|
<filename>integration/test/short_region/plot_margin_sweep.py
#!/usr/bin/env python3
#
# Copyright (c) 2015 - 2021, Intel Corporation
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the
# distribution.
#
# * Neither the name of Intel Corporation nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY LOG OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
import pandas
import numpy as np
import glob
from collections import defaultdict
import sys
import math
import geopmpy.io
# TODO: copied from plotter.py
# maybe move these functions into that file
import subprocess
import os
try:
with open(os.devnull, 'w') as FNULL:
subprocess.check_call("python3 -c 'import matplotlib.pyplot'", stdout=FNULL, stderr=FNULL, shell=True)
except subprocess.CalledProcessError:
sys.stderr.write('Warning: Unable to use default matplotlib backend ({}). For interactive plotting,'
' please install Tkinter support in the OS. '
'For more information see: https://matplotlib.org/faq/usage_faq.html#what-is-a-backend\n'
'Trying Agg...\n\n'
.format(os.getenv('MPLBACKEND', 'TkAgg')))
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import cm, colors
class ShortRegionPerfMarginAnalysis:
@staticmethod
def profile_to_big_o(df):
'''
Used to extract big-o from the profile name and rename profile to big-o only
'''
def extract_big_o(profile_str):
return float(profile_str.split('_')[-3])
df['Profile'] = df['Profile'].apply(extract_big_o)
return df
@staticmethod
def extract_bigo_from_region(df):
'''
Assumes the bigo is on the end of the region name. Adds a new column
for the bigo and renames region to the base region.
TODO: some interference from auto regions (OMPT)
'''
def extract_big_o(region_str):
if '_' not in region_str or 'OMPT' in region_str:
return float('nan')
return float(region_str.split('_')[-1])
def extract_region(region_str):
if '_' not in region_str or 'OMPT' in region_str:
return region_str
return '_'.join(region_str.split('_')[:-1])
bigos = df['region'].apply(extract_big_o)
bigos = bigos.rename('bigo')
df['region'] = df['region'].apply(extract_region)
df = pandas.concat([df, bigos], axis=1)
return df
def __init__(self, outdir='.'):
self._min_freq = 1000000000 # TODO: use geopmread
self._max_freq = 2100000000 # TODO: use geopmread
self._step_freq = 100000000
# TODO: max is baseline for comparison
self._outdir = outdir
# TODO: select by multiple column values at once
def get_baseline_data(self, df):
baseline_df = df.loc[(df['FREQ_MIN'] == self._max_freq) & (df['FREQ_MAX'] == self._max_freq)]
return baseline_df
def get_learning_data(self, df):
learning_df = df.loc[(df['FREQ_MIN'] == self._min_freq) & (df['FREQ_MAX'] == self._max_freq)]
return learning_df
# TODO: static methods might be nice as helpers in io.py
@staticmethod
def get_column_values(df, col_name):
rv = df[col_name].dropna().unique()
return sorted(rv)
@staticmethod
def filter_by_column_value(df, col_name, col_value):
df = df.loc[df[col_name] == col_value]
return df
def plot_selected_frequencies(self, df, title_label):
'''
Plot with region runtime and perf margin vs selected frequency
TODO: move to plotter?
Input should be filtered to the agent+policy of interest (learning runs)
and region of interest
'''
fdf = self.get_learning_data(df)
big_os = self.get_column_values(fdf, 'bigo')
perf_margins = self.get_column_values(fdf, 'PERF_MARGIN')
selected = []
selected_error = []
for bigo in big_os:
tmp = []
etmp = []
for pp in perf_margins:
rpdf = self.filter_by_column_value(fdf, 'bigo', bigo)
rpdf = self.filter_by_column_value(rpdf, 'PERF_MARGIN', pp)
freq = rpdf['requested-online-frequency'].mean()
freq = freq / 1e9
error = rpdf['requested-online-frequency'].std()
error = error / 1e9
tmp.append(freq)
etmp.append(error)
selected.append(tmp)
selected_error.append(etmp)
## TODO: need to handle NAN somehow
# NAN means EE agent failed to learn.
# clean up long floats
perf_margins = ['{:.3f}'.format(yy) for yy in perf_margins]
# update xtick labels to be scientific notation
big_os = ['$2^{{{}}}$'.format(int(math.log2(xx*10000))) for xx in big_os]
x_label = 'big-o (100us = $10^{{-4}}$s)'
cmap, norm, freqs, z_thresh = self.frequency_colormap()
self.plot_heatmap(data=selected, cmap=cmap, norm=norm, zbar_range=freqs, z_thresh=z_thresh,
zbar_label='selected frequency (GHz)',
x_range=big_os, x_label=x_label,
y_range=perf_margins, y_label='perf margin (max % degradation)',
title='EE Agent Selected Frequency' + title_label,
outdir=self._outdir)
cmap, norm, freqs, z_thresh = self.frequency_error_colormap()
self.plot_heatmap(data=selected_error, cmap=cmap, norm=norm, zbar_range=freqs, z_thresh=z_thresh,
zbar_label='stdev selected frequency (GHz)',
x_range=big_os, x_label=x_label,
y_range=perf_margins, y_label='perf margin (max % degradation)',
title='EE Agent Std Dev Selected Frequency' + title_label,
outdir=self._outdir)
def frequency_colormap(self):
freqs = np.arange(self._min_freq, self._max_freq + self._step_freq, self._step_freq)
freqs = [xx / 1e9 for xx in freqs]
z_thresh = 1.7
cmap = cm.get_cmap('magma', len(freqs))
norm = colors.BoundaryNorm(freqs, cmap.N)
return cmap, norm, freqs, z_thresh
def frequency_error_colormap(self):
freqs = np.linspace(0.0, self._step_freq * 5, 10)
freqs = [xx / 1e9 for xx in freqs]
z_thresh = 0.5
cmap = cm.get_cmap('magma', len(freqs))
norm = colors.BoundaryNorm(freqs, cmap.N)
return cmap, norm, freqs, z_thresh
@staticmethod
def comparison_3d(base_df, comp_df, x_dim, y_dim, z_dim, baseline_y=False):
xvals = ShortRegionPerfMarginAnalysis.get_column_values(comp_df, x_dim)
yvals = ShortRegionPerfMarginAnalysis.get_column_values(comp_df, y_dim)
data = []
for xx in xvals:
tmp = []
for yy in yvals:
base = ShortRegionPerfMarginAnalysis.filter_by_column_value(base_df, x_dim, xx)
comp = ShortRegionPerfMarginAnalysis.filter_by_column_value(comp_df, x_dim, xx)
# TODO: is baseline also swept over both dims?
if baseline_y:
base = ShortRegionPerfMarginAnalysis.filter_by_column_value(base, y_dim, yy)
comp = ShortRegionPerfMarginAnalysis.filter_by_column_value(comp, y_dim, yy)
# TODO: have a separate heatmap with error
rs = (base[z_dim].mean() - comp[z_dim].mean()) / base[z_dim].mean() * 100
tmp.append(rs.mean())
data.append(tmp)
return xvals, yvals, data
def plot_energy_savings(self, base_df, comp_df, title_label):
big_os, perf_margins, savings = self.comparison_3d(base_df, comp_df, 'bigo', 'PERF_MARGIN', 'package-energy (joules)')
# clean up long floats
perf_margins = ['{:.3f}'.format(yy) for yy in perf_margins]
# update xtick labels to be scientific notation
big_os = ['$2^{{{}}}$'.format(int(math.log2(xx*10000))) for xx in big_os]
x_label = 'big-o (100us = $10^{{-4}}$s)'
cmap, norm, percents, z_thresh = self.energy_savings_colormap()
self.plot_heatmap(data=savings, cmap=cmap, norm=norm, zbar_range=percents, z_thresh=z_thresh,
zbar_label='% energy savings vs. sticker',
x_range=big_os, x_label=x_label,
y_range=perf_margins, y_label='perf margin (max % degradation)',
title='Energy Savings' + title_label, outdir=self._outdir)
def energy_savings_colormap(self):
percents = np.linspace(0.0, 50.0, 20)
z_thresh = 0.0
cmap = cm.get_cmap('autumn')
norm = colors.Normalize(percents.min(), percents.max())
return cmap, norm, percents, z_thresh
def plot_performance_degradation(self, base_df, comp_df, title_label):
big_os, perf_margins, savings = self.comparison_3d(base_df, comp_df, 'bigo', 'PERF_MARGIN', 'runtime (sec)')
# clean up long floats
perf_margins = ['{:.3f}'.format(yy) for yy in perf_margins]
# update xtick labels to be scientific notation
big_os = ['$2^{{{}}}$'.format(int(math.log2(xx*10000))) for xx in big_os]
x_label = 'big-o (100us = $10^{{-4}}$s)'
cmap, norm, percents, z_thresh = self.runtime_degradation_colormap()
self.plot_heatmap(data=savings, cmap=cmap, norm=norm, zbar_range=percents, z_thresh=z_thresh,
zbar_label='% runtime degradation vs. sticker',
x_range=big_os, x_label=x_label,
y_range=perf_margins, y_label='perf margin (max % degradation)',
title='Runtime Degradation' + title_label, outdir=self._outdir)
def runtime_degradation_colormap(self):
''' Pick range of values, colormap, text color threshold that look good for data'''
percents = np.linspace(-20.0, 20.0, 20)
z_thresh = -14.0
cmap = cm.get_cmap('coolwarm')
norm = colors.Normalize(percents.min(), percents.max())
return cmap, norm, percents, z_thresh
@staticmethod
def plot_heatmap(data, cmap, norm, zbar_range, zbar_label, z_thresh,
x_range, x_label, y_range, y_label, title, outdir):
# need to transpose the data for imshow
data = np.array(data)
data = data.T
f, ax = plt.subplots()
im = ax.imshow(data, interpolation='none', cmap=cmap, norm=norm)
cbar = ax.figure.colorbar(im, ax=ax, ticks=zbar_range)
cbar.ax.set_ylabel(zbar_label, rotation=-90, va='bottom')
ax.set_xlabel(x_label)
ax.set_xticklabels(x_range)
ax.set_xticks(np.arange(len(x_range)))
ax.set_ylabel(y_label)
ax.set_yticklabels(y_range)
ax.set_yticks(np.arange(len(y_range)))
# display text value over color
for x in range(len(x_range)):
for y in range(len(y_range)):
# note reversal of x and y for data access
color = 'black'
if data[y][x] < z_thresh:
color = 'white'
ax.text(x, y, '{:.2f}'.format(data[y][x]), ha='center', va='center',
color=color, size=8)
######################
# fix for top and bottom being cut off
b, t = ax.get_ylim()
b += 0.5
t -= 0.5
ax.set_ylim(b, t)
################
plt.title(title)
f.tight_layout()
filename = '{}.png'.format(title.replace(' ', '_').replace('\n', '_'))
plt.savefig(os.path.join(outdir, filename))
plt.close()
if __name__ == '__main__':
# get reports location from command line for now
if len(sys.argv) < 2:
sys.stderr.write('Pass path to reports.\n')
sys.exit(1)
outdir = sys.argv[1]
collection = geopmpy.io.RawReportCollection('*.report', dir_name=outdir)
df = collection.get_df()
# rename region and extract big-o
df = ShortRegionPerfMarginAnalysis.extract_bigo_from_region(df)
# TODO: this requires exact match currently but could take a pattern or substring
region_name = 'timed_scaling'
df = ShortRegionPerfMarginAnalysis.filter_by_column_value(df, 'region', region_name)
main = ShortRegionPerfMarginAnalysis(outdir=outdir)
region_label = ':\n{} region'.format(region_name)
main.plot_selected_frequencies(df, region_label)
base_df = main.get_baseline_data(df)
comp_df = main.get_learning_data(df)
main.plot_energy_savings(base_df, comp_df, region_label)
main.plot_performance_degradation(base_df, comp_df, region_label)
|
# -*- coding: utf-8 -*-
"""
Reference implementation of RiWalk.
Author: <NAME>
For more details, refer to the paper:
RiWalk: Fast Structural Node Embedding via Role Identification
ICDM, 2019
"""
import argparse
import json
import time
import RiWalkGraph
from gensim.models import Word2Vec
from gensim.models.keyedvectors import Word2VecKeyedVectors
import networkx as nx
import os
import glob
import logging
import sys
def debug(type_, value, tb):
if hasattr(sys, 'ps1') or not sys.stderr.isatty():
sys.__excepthook__(type_, value, tb)
else:
import traceback
import pdb
traceback.print_exception(type_, value, tb)
print(u"\n")
pdb.pm()
def parse_args():
"""
Parses the RiWalk arguments.
"""
parser = argparse.ArgumentParser(description="Run RiWalk")
parser.add_argument('--input', nargs='?', default='graphs/karate.edgelist',
help='Input graph path')
parser.add_argument('--output', nargs='?', default='embs/karate.emb',
help='Embeddings path')
parser.add_argument('--dimensions', type=int, default=128,
help='Number of dimensions. Default is 128.')
parser.add_argument('--walk-length', type=int, default=10,
help='Length of walk per source. Default is 10.')
parser.add_argument('--num-walks', type=int, default=80,
help='Number of walks per source. Default is 80.')
parser.add_argument('--window-size', type=int, default=10,
help='Context size for optimization. Default is 10.')
parser.add_argument('--until-k', type=int, default=4,
help='Neighborhood size k. Default is 4.')
parser.add_argument('--iter', default=5, type=int,
help='Number of epochs in SGD. Default is 5.')
parser.add_argument('--workers', type=int, default=4,
help='Number of parallel workers. Default is 4.')
parser.add_argument('--flag', nargs='?', default='sp',
help='Flag indicating using RiWalk-SP(sp) or RiWalk-WL(wl). Default is sp.')
parser.add_argument('--without-discount', action='store_true', default=False,
help='Flag indicating not using discount.')
parser.add_argument("--debug", dest="debug", action='store_true', default=False,
help="drop a debugger if an exception is raised.")
parser.add_argument("-l", "--log", dest="log", default="DEBUG",
help="Log verbosity level. Default is DEBUG.")
return parser.parse_args()
class Sentences(object):
"""
a wrapper of random walk files to feed to word2vec
"""
def __init__(self, file_names):
self.file_names = file_names
def __iter__(self):
fs = []
for file_name in self.file_names:
fs.append(open(file_name))
while True:
flag = 0
for i, f in enumerate(fs):
line = f.readline()
if line != '':
flag = 1
yield line.split()
if not flag:
try:
for f in fs:
f.close()
except:
pass
return
class RiWalk:
def __init__(self, args):
self.args = args
os.system('rm -rf walks/__random_walks_*.txt')
def learn_embeddings(self):
"""
learn embeddings from random walks.
hs: 0:negative sampling 1:hierarchica softmax
sg: 0:CBOW 1:skip-gram
"""
dim = self.args.dimensions
window_size = self.args.window_size
workers = self.args.workers
iter_num = self.args.iter
logging.debug('begin learning embeddings')
learning_begin_time = time.time()
walk_files = glob.glob('walks/__random_walks_*.txt')
sentences = Sentences(walk_files)
model = Word2Vec(sentences, size=dim, window=window_size, min_count=0, sg=1, hs=0, workers=workers, iter=iter_num)
learning_end_time = time.time()
logging.debug('done learning embeddings')
logging.debug('learning_time: {}'.format(learning_end_time - learning_begin_time))
print('learning_time', learning_end_time - learning_begin_time, flush=True)
return model.wv
def read_graph(self):
logging.debug('begin reading graph')
read_begin_time = time.time()
input_file_name = self.args.input
nx_g = nx.read_edgelist(input_file_name, nodetype=int, create_using=nx.DiGraph())
for edge in nx_g.edges():
nx_g[edge[0]][edge[1]]['weight'] = 1
nx_g = nx_g.to_undirected()
logging.debug('done reading graph')
read_end_time = time.time()
logging.debug('read time: {}'.format(read_end_time - read_begin_time))
return nx_g
def preprocess_graph(self, nx_g):
"""
1. relabel nodes with 0,1,2,3,...,N.
2. convert graph to adjacency representation as a list of lists.
"""
logging.debug('begin preprocessing graph')
preprocess_begin_time = time.time()
mapping = {_: i for i, _ in enumerate(nx_g.nodes())}
nx_g = nx.relabel_nodes(nx_g, mapping)
nx_g = [list(nx_g.neighbors(_)) for _ in range(len(nx_g))]
logging.info('#nodes: {}'.format(len(nx_g)))
logging.info('#edges: {}'.format(sum([len(_) for _ in nx_g]) // 2))
logging.debug('done preprocessing')
logging.debug('preprocess time: {}'.format(time.time() - preprocess_begin_time))
return nx_g, mapping
def learn(self, nx_g, mapping):
g = RiWalkGraph.RiGraph(nx_g, self.args)
walk_time, bfs_time, ri_time, walks_writing_time = g.process_random_walks()
print('walk_time', walk_time / self.args.workers, flush=True)
print('bfs_time', bfs_time / self.args.workers, flush=True)
print('ri_time', ri_time / self.args.workers, flush=True)
print('walks_writing_time', walks_writing_time / self.args.workers, flush=True)
wv = self.learn_embeddings()
original_wv = Word2VecKeyedVectors(self.args.dimensions)
original_nodes = list(mapping.keys())
original_vecs = [wv.word_vec(str(mapping[node])) for node in original_nodes]
original_wv.add(entities=list(map(str, original_nodes)), weights=original_vecs)
return original_wv
def riwalk(self):
nx_g = self.read_graph()
read_end_time = time.time()
nx_g, mapping = self.preprocess_graph(nx_g)
wv = self.learn(nx_g, mapping)
return wv, time.time() - read_end_time
def main():
args = parse_args()
numeric_level = getattr(logging, args.log.upper(), None)
LOG_FORMAT = "%(asctime)s - %(levelname)s - %(message)s"
DATE_FORMAT = "%m/%d/%Y %H:%M:%S %p"
os.system('rm -f RiWalk.log')
logging.basicConfig(filename='RiWalk.log', level=numeric_level, format=LOG_FORMAT, datefmt=DATE_FORMAT)
logging.info(str(vars(args)))
if args.debug:
sys.excepthook = debug
wv, total_time = RiWalk(args).riwalk()
write_begin_time = time.time()
wv.save_word2vec_format(fname=args.output, binary=False)
logging.debug('embedding_writing_time: {}'.format(time.time() - write_begin_time))
json.dump({'time': total_time}, open(args.output.replace('.emb', '_time.json'), 'w'))
if __name__ == '__main__':
main()
|
<reponame>861934367/cgat
##########################################################################
#
# MRC FGU Computational Genomics Group
#
# $Id$
#
# Copyright (C) 2009 <NAME>
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
##########################################################################
'''
Masker.py - Wrapper for sequence masking tools
==============================================
:Author: <NAME>
:Release: $Id$
:Date: |today|
:Tags: Python
Code
----
'''
import os
import subprocess
import tempfile
import string
import re
import random
from CGAT import Experiment as E
from CGAT import Genomics as Genomics
from CGAT import FastaIterator as FastaIterator
import cStringIO as StringIO
# Class for calling masking programs.
class Masker:
"""a masker preserves gaps, but it does not preserve
whitespace characters."""
mExecutable = None
mOptions = ""
mHasPeptideMasking = False
mHasNucleicAcidMasking = False
# set to true if masker outputs softmasked sequence
soft_mask = False
def __init__(self):
pass
def getAlphabet(self, sequence):
"""get sequence type (aa,na,codons)."""
s1 = re.sub("[acgtxn\-.]", "", sequence.lower())
s2 = re.sub("[-.]", "", sequence.lower())
if float(len(s1)) < (len(s2) * 0.1):
alphabet = "na"
if len(sequence) % 3 == 0:
alphabet = "codons"
else:
alphabet = "aa"
return alphabet
def __call__(self, sequence):
"""mask a sequence."""
sequence = re.sub("\s", "", sequence)
a = self.getAlphabet(sequence)
seq = list(sequence)
if len(seq) < 5:
# do not mask empty/short sequences
pass
elif a == "aa" and self.mHasPeptideMasking:
c = 0
m = self.maskSequence(sequence)
if self.soft_mask:
m = re.sub("[a-z]", "x", m)
for p, m in zip(sequence, m):
if m in "Xx":
if p.isupper():
seq[c] = "X"
else:
seq[c] = "x"
c += 1
elif a == "codons" and self.mHasPeptideMasking:
peptide_sequence = Genomics.TranslateDNA2Protein(sequence)
masked_sequence = self.maskSequence(peptide_sequence)
if self.soft_mask:
masked_sequence = re.sub("[a-z]", "x", masked_sequence)
c = 0
for p, m in zip(peptide_sequence, masked_sequence):
if m in "Xx":
if p.isupper():
seq[c:c + 3] = ["N"] * 3
else:
seq[c:c + 3] = ["n"] * 3
c += 3
elif a in ("na", "codons") and self.mHasNucleicAcidMasking:
masked_sequence = self.maskSequence(sequence)
if self.soft_mask:
masked_sequence = re.sub("[a-z]", "N", masked_sequence)
return masked_sequence
else:
raise ValueError(
"masking of sequence type %s not implemented." % a)
return "".join(seq)
def maskSequence(self, peptide_sequence):
"""mask peptide sequence
"""
Masker.__init__(self)
outfile, filename_peptide = tempfile.mkstemp()
os.write(outfile, ">test\n%s\n" % (peptide_sequence))
os.close(outfile)
infile = filename_peptide
statement = self.mCommand % locals()
E.debug("statement: %s" % statement)
s = subprocess.Popen(statement,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
(out, err) = s.communicate()
if s.returncode != 0:
raise RuntimeError(
"Error in running %s \n%s\nTemporary directory" %
(statement, err))
os.remove(filename_peptide)
masked_sequence = re.sub(
"\s", "", string.join(out.split("\n")[1:], ""))
return masked_sequence
def maskSequences(self, sequences):
'''mask a collection of sequences.'''
outfile, infile = tempfile.mkstemp()
for x, s in enumerate(sequences):
os.write(outfile, ">%i\n%s\n" % (x, s))
os.close(outfile)
statement = self.mCommand % locals()
E.debug("statement: %s" % statement)
s = subprocess.Popen(statement,
shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
close_fds=True)
(out, err) = s.communicate()
if s.returncode != 0:
raise RuntimeError(
"Error in running %s \n%s\nTemporary directory" %
(statement, err))
result = [
x.sequence for x in FastaIterator.iterate(StringIO.StringIO(out))]
os.remove(infile)
return result
class MaskerBias (Masker):
mCommand = "biasdb.pl %(infile)s"
mHasPeptideMasking = True
class MaskerSeg (Masker):
# mCommand = "seg %(infile)s 12 2.2 2.5 -x"
mCommand = ("segmasker -in %(infile)s -window 12 -locut 2.2 "
"-hicut 2.5 -outfmt fasta")
mHasPeptideMasking = True
soft_mask = True
class MaskerDustMasker(Masker):
'''use dustmasker. masked chars are returned as
lower case characters.'''
mCommand = "dustmasker -outfmt fasta -in %(infile)s"
mHasNucleicAcidMasking = True
class MaskerRandom (Masker):
"""randomly mask a proportion of positions in a sequence
in multiple alignment."""
def __init__(self, proportion=10, *args, **kwargs):
Masker.__init__(self, *args, **kwargs)
self.mProportion = proportion / 100.0
def __call__(self, sequence):
"""mask a sequence."""
sequence = re.sub("\s", "", sequence)
a = self.getAlphabet(sequence)
if a == "codons":
frame = 3
else:
frame = 1
positions = [
x for x in range(0, len(sequence), frame) if sequence[x] != "-"]
to_mask = random.sample(
positions, int(len(positions) * self.mProportion))
print int(len(positions) * self.mProportion)
s = list(sequence)
print to_mask
for x in to_mask:
for y in range(x, x + frame):
s[x] == "x"
return "".join(s)
if __name__ == "__main__":
x = MaskerRandom()
print x("AAA AAA AAA AAA --- AAA AAA AAA AAA")
x = MaskerDustMasker()
print x.maskSequences(("AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA",
"GGGGGGGGGG", ))
def maskSequences(sequences, masker=None):
'''return a list of masked sequence.
*masker* can be one of
dust/dustmasker * run dustmasker on sequences
softmask * use softmask to hardmask sequences
'''
if masker in ("dust", "dustmasker"):
masker_object = MaskerDustMasker()
else:
masker_object = None
if masker == "softmask":
# the genome sequence is repeat soft-masked
masked_seq = sequences
elif masker in ("dust", "dustmasker"):
# run dust
masked_seq = masker_object.maskSequences(
[x.upper() for x in sequences])
elif masker is None:
masked_seq = [x.upper() for x in sequences]
else:
raise ValueError("unknown masker %s" % masker)
# hard mask softmasked characters
masked_seq = [re.sub("[a-z]", "N", x) for x in masked_seq]
return masked_seq
|
from rest_framework import serializers
from django.contrib.auth import get_user_model, authenticate # authenticate function which comes with Django and it's a Django helper command for working with the Django authentication system. So you simply pass in the username and password and you can authenticate a request
from django.utils.translation import ugettext_lazy as _
class UserSerializer(serializers.ModelSerializer): # Django rest framework has a built-in serializer that we can do this with that we just need to specify the fields that we want from our module and it does the database conversion for us. And even helps with the creating and retrieving from the database.
"""Serializer for the users object"""
class Meta:
model = get_user_model() # model that you want to base your model sterilizer from. Call the get_user_model so it actually returns the user model class
fields = ('email', 'password', 'name') # fields that you want to include in serializer so these are the fields that are going to be converted to and from json when we make our HTTP POST and then we retrieve that in our view and then we want to save it to a model. So it basically are the fields that we want to make accessible in the API either to read or write.
extra_kwargs = {'password': {'<PASSWORD>, 'min_length': 5}} # extra keyword args allows us to configure a few extra settings in our model serilizer. We use this to ensure that the password is write only and that the minimum required length is 5 characters. Extra_kwargs allows you to set some extra restrictions or arguments for the fields that we reference in our fields variable here.
def create(self, validated_data): # The create function is the function that's called when we create a new object. I t basically specifies all the available functions that you can override in the different serializers that are available. We're going to override the create function here. We're going to call the create user function in our model because by default it only calls the create function and we want to use our create user model manager function that we created in our models to create the user so we know that the password that it stores will be encrypted. Otherwise the password that it sets will just be the clear text password that we pass in and then the authentication won't work because it's expecting an encrypted salt key.
"""Create a new user with encrypted password and return it"""
return get_user_model().objects.create_user(**validated_data) # we're going to use this star syntax here to unwind this validated data into the parameters of the create user function.
# Django rest framework: when we're ready to create the user it will call the create function and it will pass in the validated data the validated data will contain all of the data that was passed into our serializer which would be the JSON data that was made in the HTTP POST and it passes it as the argument here and then we can then use that to create our user.
def update(self, instance, validated_data): # The purpose of this is we want to make sure the password is set using the set password function instead of just setting it to whichever value is provided.
"""Update a user, setting the password correctly and return it"""
password = validated_data.pop('password', None)
user = super().update(instance, validated_data)
if password:
user.set_password(password)
user.save()
return user
# Token endpoint.
# This is going to be an endpoint that you can make a HTTP POST request and you can generate a temporary auth token that you can then use to authenticate future requests with the API.
# With our API we're going to be using token authentication.
# So the way that you log in is you use this API to generate a token and then you provide that token as the authentication header for future requests which you want to authenticate.
# The benefit of this is you don't need to send the user's username and password with every single request that you make.
# You just need to send it once to create the token and then you can use that token for future requests and if you ever want to revoke the token you can do that in the database.
class AuthTokenSerializer(serializers.Serializer):
"""Serializer for the user authentication object""" # overriding the default token serializer; we're just modifying it slightly to accept our email address instead of username.
email = serializers.CharField()
password = serializers.CharField(
style={'input_type': 'password'},
trim_whitespace=False # because it's possible to have whitespace in your password (default Django rest framework serializer will trim off this white space)
)
def validate(self, attrs):
"""Validate and authenticate the user"""
email = attrs.get('email') # attrs parameter here is basically just every field that makes up our serializer; so any field that makes up a sterilizer will get passed into the validate function here as this dictionary and then we can retrieve the fields via this attributes and we can then validate whether we want to pass this validation or we want to fail the validation.
password = attrs.get('password')
user = authenticate(
request=self.context.get('request'),
username=email,
password=password
)
if not user:
msg = _('Unable to authenticate with provided credentials')
raise serializers.ValidationError(msg, code='authorization') # Django rest framework knows how to handle this error and it handles it by passing the error as a 400 response and sending a response to the user which describes this message here basically, it just says unable to authenticate with provided credentials.
attrs['user'] = user
return attrs # whenever you're overriding the validate function you must return the values at the end once the validation is successful.
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
import komand
import json
class Component:
DESCRIPTION = "Get alerts by domain"
class Input:
DOMAIN = "domain"
class Output:
RESULTS = "results"
class GetAlertForDomainInput(komand.Input):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"domain": {
"type": "string",
"title": "Domain",
"description": "Domain to get",
"order": 1
}
},
"required": [
"domain"
]
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
class GetAlertForDomainOutput(komand.Output):
schema = json.loads("""
{
"type": "object",
"title": "Variables",
"properties": {
"results": {
"type": "array",
"title": "Alerts",
"description": "All alerts that match the given domain",
"items": {
"$ref": "#/definitions/alert"
},
"order": 1
}
},
"required": [
"results"
],
"definitions": {
"alert": {
"type": "object",
"title": "alert",
"properties": {
"Actor": {
"type": "string",
"title": "Actor",
"order": 8
},
"AlertId": {
"type": "string",
"title": "Alert ID",
"order": 5
},
"AlertPart": {
"type": "integer",
"title": "AlertPart",
"order": 39
},
"AlertTime": {
"type": "string",
"title": "Alert Time",
"order": 31
},
"AlertTitle": {
"type": "string",
"title": "Alert Title",
"order": 30
},
"BuiltInMachineTags": {
"type": "string",
"title": "BuiltInMachineTags",
"order": 15
},
"Category": {
"type": "string",
"title": "Category",
"order": 1
},
"CommandLine": {
"type": "string",
"title": "CommandLine",
"order": 11
},
"ComputerDnsName": {
"type": "string",
"title": "Computer DNS Name",
"order": 43
},
"CreatorIocName": {
"type": "string",
"title": "Creator IoC Name",
"order": 27
},
"CreatorIocValue": {
"type": "string",
"title": "CreatorIocValue",
"order": 22
},
"Description": {
"type": "string",
"title": "Description",
"order": 16
},
"DeviceID": {
"type": "string",
"title": "Device ID",
"order": 32
},
"ExternalId": {
"type": "string",
"title": "External ID",
"order": 46
},
"FileHash": {
"type": "string",
"title": "FileHash",
"order": 40
},
"FileName": {
"type": "string",
"title": "FileName",
"order": 44
},
"FilePath": {
"type": "string",
"title": "FilePath",
"order": 41
},
"FullId": {
"type": "string",
"title": "Full ID",
"order": 12
},
"InternalIPv4List": {
"type": "string",
"title": "Internal IP v4 List",
"order": 25
},
"InternalIPv6List": {
"type": "string",
"title": "Internal IP v6 List",
"order": 34
},
"IoaDefinitionId": {
"type": "string",
"title": "IoaDefinitionId",
"order": 3
},
"IocName": {
"type": "string",
"title": "IoC Name",
"order": 42
},
"IocUniqueId": {
"type": "string",
"title": "IoC Unique ID",
"order": 18
},
"IocValue": {
"type": "string",
"title": "IoC Value",
"order": 36
},
"IpAddress": {
"type": "string",
"title": "IP Address",
"order": 14
},
"LastProcessedTimeUtc": {
"type": "string",
"title": "Last Processed Time UTC",
"order": 7
},
"LinkToWDATP": {
"type": "string",
"title": "Link to Windows Defender ATP",
"order": 19
},
"LogOnUsers": {
"type": "string",
"title": "LogOnUsers",
"order": 38
},
"MachineDomain": {
"type": "string",
"title": "Machine Domain",
"order": 29
},
"MachineGroup": {
"type": "string",
"title": "MachineGroup",
"order": 21
},
"MachineName": {
"type": "string",
"title": "MachineName",
"order": 45
},
"Md5": {
"type": "string",
"title": "MD5",
"order": 24
},
"RemediationAction": {
"type": "string",
"title": "RemediationAction",
"order": 33
},
"RemediationIsSuccess": {
"type": "string",
"title": "RemediationIsSuccess",
"order": 9
},
"Severity": {
"type": "string",
"title": "Severity",
"order": 4
},
"Sha1": {
"type": "string",
"title": "SHA-1",
"order": 26
},
"Sha256": {
"type": "string",
"title": "SHA-256",
"order": 13
},
"Source": {
"type": "string",
"title": "Source",
"order": 10
},
"ThreatCategory": {
"type": "string",
"title": "ThreatCategory",
"order": 20
},
"ThreatFamily": {
"type": "string",
"title": "ThreatFamily",
"order": 17
},
"ThreatName": {
"type": "string",
"title": "Threat Name",
"order": 35
},
"Url": {
"type": "string",
"title": "URL",
"order": 28
},
"UserDefinedMachineTags": {
"type": "string",
"title": "UserDefinedMachineTags",
"order": 23
},
"UserDomain": {
"type": "string",
"title": "UserDomain",
"order": 6
},
"UserName": {
"type": "string",
"title": "UserName",
"order": 2
},
"WasExecutingWhileDetected": {
"type": "string",
"title": "WasExecutingWhileDetected",
"order": 37
}
}
}
}
}
""")
def __init__(self):
super(self.__class__, self).__init__(self.schema)
|
import sys
import numpy as np
from PIL import Image
import torchvision
from torch.utils.data.dataset import Subset
from sklearn.metrics.pairwise import cosine_similarity, euclidean_distances
import torch
import torch.nn.functional as F
import random
import os
import json
from numpy.testing import assert_array_almost_equal
def fix_seed(seed=888):
np.random.seed(seed)
random.seed(seed)
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.deterministic = True
np.random.seed(seed)
def get_cifar100(root, cfg_trainer, train=True,
transform_train=None, transform_val=None,
download=True, noise_file = '', teacher_idx=None, seed=888):
base_dataset = torchvision.datasets.CIFAR100(root, train=train, download=download)
print (seed)
if train:
fix_seed(seed)
train_idxs, val_idxs = train_val_split(base_dataset.targets, seed)
train_dataset = CIFAR100_train(root, cfg_trainer, train_idxs, train=True, transform=transform_train)
val_dataset = CIFAR100_val(root, cfg_trainer, val_idxs, train=train, transform=transform_val)
if cfg_trainer['asym']:
train_dataset.asymmetric_noise()
if len(val_dataset) > 0:
val_dataset.asymmetric_noise()
else:
train_dataset.symmetric_noise()
if len(val_dataset) > 0:
val_dataset.symmetric_noise()
if teacher_idx is not None:
print(len(teacher_idx))
train_dataset.truncate(teacher_idx)
print(f"Train: {len(train_dataset)} Val: {len(val_dataset)}") # Train: 45000 Val: 5000
else:
fix_seed(seed)
train_dataset = []
val_dataset = CIFAR100_val(root, cfg_trainer, None, train=train, transform=transform_val)
print(f"Test: {len(val_dataset)}")
if len(val_dataset) == 0:
return train_dataset, None
else:
return train_dataset, val_dataset
# return train_dataset, val_dataset
def train_val_split(base_dataset: torchvision.datasets.CIFAR10, seed=888):
fix_seed(seed)
num_classes = 100
base_dataset = np.array(base_dataset)
train_n = int(len(base_dataset) * 0.9 / num_classes)
train_idxs = []
val_idxs = []
for i in range(num_classes):
idxs = np.where(base_dataset == i)[0]
np.random.shuffle(idxs)
train_idxs.extend(idxs[:train_n])
val_idxs.extend(idxs[train_n:])
np.random.shuffle(train_idxs)
np.random.shuffle(val_idxs)
return train_idxs, val_idxs
class CIFAR100_train(torchvision.datasets.CIFAR100):
def __init__(self, root, cfg_trainer, indexs, train=True,
transform=None, target_transform=None,
download=False, seed=888):
super(CIFAR100_train, self).__init__(root, train=train,
transform=transform, target_transform=target_transform,
download=download)
fix_seed(seed)
self.num_classes = 100
self.cfg_trainer = cfg_trainer
self.train_data = self.data[indexs]
self.train_labels = np.array(self.targets)[indexs]
self.indexs = indexs
self.prediction = np.zeros((len(self.train_data), self.num_classes, self.num_classes), dtype=np.float32)
self.noise_indx = []
self.seed = seed
#self.all_refs_encoded = torch.zeros(self.num_classes,self.num_ref,1024, dtype=np.float32)
self.count = 0
def symmetric_noise(self):
self.train_labels_gt = self.train_labels.copy()
fix_seed(self.seed)
indices = np.random.permutation(len(self.train_data))
for i, idx in enumerate(indices):
if i < self.cfg_trainer['percent'] * len(self.train_data):
self.noise_indx.append(idx)
self.train_labels[idx] = np.random.randint(self.num_classes, dtype=np.int32)
def multiclass_noisify(self, y, P, random_state=0):
""" Flip classes according to transition probability matrix T.
It expects a number between 0 and the number of classes - 1.
"""
fix_seed(self.seed)
assert P.shape[0] == P.shape[1]
assert np.max(y) < P.shape[0]
# row stochastic matrix
assert_array_almost_equal(P.sum(axis=1), np.ones(P.shape[1]))
assert (P >= 0.0).all()
m = y.shape[0]
new_y = y.copy()
flipper = np.random.RandomState(random_state)
for idx in np.arange(m):
i = y[idx]
# draw a vector with only an 1
flipped = flipper.multinomial(1, P[i, :], 1)[0]
new_y[idx] = np.where(flipped == 1)[0]
return new_y
# def build_for_cifar100(self, size, noise):
# """ random flip between two random classes.
# """
# assert(noise >= 0.) and (noise <= 1.)
# P = np.eye(size)
# cls1, cls2 = np.random.choice(range(size), size=2, replace=False)
# P[cls1, cls2] = noise
# P[cls2, cls1] = noise
# P[cls1, cls1] = 1.0 - noise
# P[cls2, cls2] = 1.0 - noise
# assert_array_almost_equal(P.sum(axis=1), 1, 1)
# return P
def build_for_cifar100(self, size, noise):
""" The noise matrix flips to the "next" class with probability 'noise'.
"""
assert(noise >= 0.) and (noise <= 1.)
P = (1. - noise) * np.eye(size)
for i in np.arange(size - 1):
P[i, i + 1] = noise
# adjust last row
P[size - 1, 0] = noise
assert_array_almost_equal(P.sum(axis=1), 1, 1)
return P
def asymmetric_noise(self, asym=False, random_shuffle=False):
self.train_labels_gt = self.train_labels.copy()
P = np.eye(self.num_classes)
n = self.cfg_trainer['percent']
nb_superclasses = 20
nb_subclasses = 5
fix_seed(self.seed)
if n > 0.0:
for i in np.arange(nb_superclasses):
init, end = i * nb_subclasses, (i+1) * nb_subclasses
P[init:end, init:end] = self.build_for_cifar100(nb_subclasses, n)
y_train_noisy = self.multiclass_noisify(self.train_labels, P=P,
random_state=0)
actual_noise = (y_train_noisy != self.train_labels).mean()
assert actual_noise > 0.0
self.train_labels = y_train_noisy
def truncate(self, teacher_idx):
self.train_data = self.train_data[teacher_idx]
self.train_labels = self.train_labels[teacher_idx]
self.train_labels_gt = self.train_labels_gt[teacher_idx]
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, target_gt = self.train_data[index], self.train_labels[index], self.train_labels_gt[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index, target_gt
def __len__(self):
return len(self.train_data)
class CIFAR100_val(torchvision.datasets.CIFAR100):
def __init__(self, root, cfg_trainer, indexs, train=True,
transform=None, target_transform=None,
download=False):
super(CIFAR100_val, self).__init__(root, train=train,
transform=transform, target_transform=target_transform,
download=download)
# self.train_data = self.data[indexs]
# self.train_labels = np.array(self.targets)[indexs]
self.num_classes = 100
self.cfg_trainer = cfg_trainer
if train:
self.train_data = self.data[indexs]
self.train_labels = np.array(self.targets)[indexs]
else:
self.train_data = self.data
self.train_labels = np.array(self.targets)
self.train_labels_gt = self.train_labels.copy()
def symmetric_noise(self):
indices = np.random.permutation(len(self.train_data))
for i, idx in enumerate(indices):
if i < self.cfg_trainer['percent'] * len(self.train_data):
self.train_labels[idx] = np.random.randint(self.num_classes, dtype=np.int32)
def multiclass_noisify(self, y, P, random_state=0):
""" Flip classes according to transition probability matrix T.
It expects a number between 0 and the number of classes - 1.
"""
print (P.shape[0] == P.shape[1])
print (max(y) < P.shape[0])
assert P.shape[0] == P.shape[1]
assert np.max(y) < P.shape[0]
# row stochastic matrix
assert_array_almost_equal(P.sum(axis=1), np.ones(P.shape[1]))
assert (P >= 0.0).all()
m = y.shape[0]
new_y = y.copy()
flipper = np.random.RandomState(random_state)
for idx in np.arange(m):
i = y[idx]
# draw a vector with only an 1
flipped = flipper.multinomial(1, P[i, :], 1)[0]
new_y[idx] = np.where(flipped == 1)[0]
return new_y
def build_for_cifar100(self, size, noise):
""" random flip between two random classes.
"""
assert(noise >= 0.) and (noise <= 1.)
P = np.eye(size)
cls1, cls2 = np.random.choice(range(size), size=2, replace=False)
P[cls1, cls2] = noise
P[cls2, cls1] = noise
P[cls1, cls1] = 1.0 - noise
P[cls2, cls2] = 1.0 - noise
assert_array_almost_equal(P.sum(axis=1), 1, 1)
return P
def asymmetric_noise(self, asym=False, random_shuffle=False):
P = np.eye(self.num_classes)
n = self.cfg_trainer['percent']
nb_superclasses = 20
nb_subclasses = 5
if n > 0.0:
for i in np.arange(nb_superclasses):
init, end = i * nb_subclasses, (i+1) * nb_subclasses
P[init:end, init:end] = self.build_for_cifar100(nb_subclasses, n)
y_train_noisy = self.multiclass_noisify(self.train_labels, P=P,
random_state=0)
actual_noise = (y_train_noisy != self.train_labels).mean()
assert actual_noise > 0.0
self.train_labels = y_train_noisy
def __len__(self):
return len(self.train_data)
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
img, target, target_gt = self.train_data[index], self.train_labels[index], self.train_labels_gt[index]
# doing this so that it is consistent with all other datasets
# to return a PIL Image
img = Image.fromarray(img)
if self.transform is not None:
img = self.transform(img)
if self.target_transform is not None:
target = self.target_transform(target)
return img, target, index, target_gt
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.