text
stringlengths
0
1.25M
meta
stringlengths
47
1.89k
import sys import glob,os,tqdm import soundfile as sf import numpy as np import zipfile import argparse def save_single_channel_wav(source_path,tgt_path): for item in glob.glob(source_path+'/*.wav'): s,f=sf.read(item) fname=os.path.basename(item) sf.write(tgt_path+'/'+fname,s[:,0],f) def main(args): tool_path = os.path.normpath(args.tool_path) decode_path = os.path.normpath(args.decode_path) input_path = os.path.normpath(args.input_path) # Create some directories. zip_dir = os.path.join(decode_path, 'zip') os.makedirs(zip_dir, exist_ok=True) decoding_cmd = os.path.join(decode_path, 'decoding_cmd') os.makedirs(decoding_cmd, exist_ok=True) decoding_result = os.path.join(decode_path, 'decoding_result') os.makedirs(decoding_result, exist_ok=True) meeting = glob.glob(os.path.join(input_path, 'overlap*')) transcription_file=os.path.join(input_path, 'utterance_transcription.txt') zip_file=os.path.join(zip_dir,'utterances.zip') zipf = zipfile.ZipFile(zip_file, 'w') all_wav=glob.glob(os.path.join(input_path,'*','*.wav')) for meet in tqdm.tqdm(meeting): all_wav=glob.glob(os.path.join(meet,'*.wav')) meet_name=os.path.basename(meet) for item in all_wav: file_name=os.path.basename(item) fname=meet_name+'_'+file_name zipf.write(item,arcname=fname) zip_list=os.path.join(decoding_cmd,'zip_list.scp') with open(zip_list,'w') as f: f.write(zip_file+'\n') meeting_list=os.path.join(decoding_cmd,'meeting_list.scp') meeting=[os.path.basename(x) for x in meeting] with open(meeting_list,'w') as f: for item in meeting: f.write(item+'\n') # then make the decoding command os.makedirs(os.path.join('..','exp'),exist_ok=True) with open(os.path.join(decoding_cmd, 'decode.sh'),'w') as f: cmd = 'sh {} {} {} {}'.format(os.path.join(tool_path, 'run_asr_utterance.sh'), zip_list, decoding_result, transcription_file, ) f.write(cmd+'\n') cmd = 'chown -R {}:{} {}'.format(os.getuid(), os.getgid(), decoding_result) f.write(cmd+'\n') # then do the wer eval as well cmd = 'python {} --meeting_list {} --decode_path {} --experiment_setup {} --development_session {} --result_path {}'.format(os.path.normpath(os.path.join(tool_path, '../python/get_wer.py')), meeting_list,os.path.join(decoding_result,'utterances','LM_fglarge'), args.experiment_setup, args.development_session, decoding_result) f.write(cmd+'\n') def make_argparse(): parser = argparse.ArgumentParser(description='Generate ASR input files') parser.add_argument('--input_path', metavar='<path>', required=True, help='Directory where input audio files are retrieved.') parser.add_argument('--decode_path', metavar='<path>', required=True, help='Directory in which decoding is to be performed') parser.add_argument('--tool_path', metavar='<path>', required=True) parser.add_argument('--experiment_setup', default='raw', type=str, required=False) parser.add_argument('--development_session', default='session0', type=str, required=False) return parser if __name__ == '__main__': parser = make_argparse() args = parser.parse_args() main(args)
{"hexsha": "16d1566b22b0bf17a7b424a4fe99cdf9169f39d4", "size": 3269, "ext": "py", "lang": "Python", "max_stars_repo_path": "asr/python/gen_asrinput_raw_utterance.py", "max_stars_repo_name": "chenzhuo1011/libri_css", "max_stars_repo_head_hexsha": "9e3b7b0c9bffd8ef6da19f7056f3a2f2c2484ffa", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 80, "max_stars_repo_stars_event_min_datetime": "2020-05-08T00:23:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T10:54:53.000Z", "max_issues_repo_path": "asr/python/gen_asrinput_raw_utterance.py", "max_issues_repo_name": "chenzhuo1011/libri_css", "max_issues_repo_head_hexsha": "9e3b7b0c9bffd8ef6da19f7056f3a2f2c2484ffa", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-05-16T06:47:55.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-18T22:10:36.000Z", "max_forks_repo_path": "asr/python/gen_asrinput_raw_utterance.py", "max_forks_repo_name": "chenzhuo1011/libri_css", "max_forks_repo_head_hexsha": "9e3b7b0c9bffd8ef6da19f7056f3a2f2c2484ffa", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-05-29T12:10:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-01-18T18:04:27.000Z", "avg_line_length": 32.69, "max_line_length": 194, "alphanum_fraction": 0.6901193025, "include": true, "reason": "import numpy", "num_tokens": 816}
/** * Swaggy Jenkins * Jenkins API clients generated from Swagger / Open API specification * * OpenAPI spec version: 1.1.1 * Contact: blah@cliffano.com * * NOTE: This class is auto generated by OpenAPI-Generator 3.2.1-SNAPSHOT. * https://openapi-generator.tech * Do not edit the class manually. */ #include "HudsonMasterComputermonitorData.h" #include <string> #include <sstream> #include <boost/property_tree/ptree.hpp> #include <boost/property_tree/json_parser.hpp> using boost::property_tree::ptree; using boost::property_tree::read_json; using boost::property_tree::write_json; namespace org { namespace openapitools { namespace server { namespace model { HudsonMasterComputermonitorData::HudsonMasterComputermonitorData() { m_Hudson_node_monitors_ArchitectureMonitor = ""; m__class = ""; } HudsonMasterComputermonitorData::~HudsonMasterComputermonitorData() { } std::string HudsonMasterComputermonitorData::toJsonString() { std::stringstream ss; ptree pt; pt.put("Hudson_node_monitors_ArchitectureMonitor", m_Hudson_node_monitors_ArchitectureMonitor); pt.put("_class", m__class); write_json(ss, pt, false); return ss.str(); } void HudsonMasterComputermonitorData::fromJsonString(std::string const& jsonString) { std::stringstream ss(jsonString); ptree pt; read_json(ss,pt); m_Hudson_node_monitors_ArchitectureMonitor = pt.get("Hudson_node_monitors_ArchitectureMonitor", ""); m__class = pt.get("_class", ""); } std::shared_ptr<SwapSpaceMonitorMemoryUsage2> HudsonMasterComputermonitorData::getHudsonNodeMonitorsSwapSpaceMonitor() const { return m_Hudson_node_monitors_SwapSpaceMonitor; } void HudsonMasterComputermonitorData::setHudsonNodeMonitorsSwapSpaceMonitor(std::shared_ptr<SwapSpaceMonitorMemoryUsage2> value) { m_Hudson_node_monitors_SwapSpaceMonitor = value; } std::shared_ptr<DiskSpaceMonitorDescriptorDiskSpace> HudsonMasterComputermonitorData::getHudsonNodeMonitorsTemporarySpaceMonitor() const { return m_Hudson_node_monitors_TemporarySpaceMonitor; } void HudsonMasterComputermonitorData::setHudsonNodeMonitorsTemporarySpaceMonitor(std::shared_ptr<DiskSpaceMonitorDescriptorDiskSpace> value) { m_Hudson_node_monitors_TemporarySpaceMonitor = value; } std::shared_ptr<DiskSpaceMonitorDescriptorDiskSpace> HudsonMasterComputermonitorData::getHudsonNodeMonitorsDiskSpaceMonitor() const { return m_Hudson_node_monitors_DiskSpaceMonitor; } void HudsonMasterComputermonitorData::setHudsonNodeMonitorsDiskSpaceMonitor(std::shared_ptr<DiskSpaceMonitorDescriptorDiskSpace> value) { m_Hudson_node_monitors_DiskSpaceMonitor = value; } std::string HudsonMasterComputermonitorData::getHudsonNodeMonitorsArchitectureMonitor() const { return m_Hudson_node_monitors_ArchitectureMonitor; } void HudsonMasterComputermonitorData::setHudsonNodeMonitorsArchitectureMonitor(std::string value) { m_Hudson_node_monitors_ArchitectureMonitor = value; } std::shared_ptr<ResponseTimeMonitorData> HudsonMasterComputermonitorData::getHudsonNodeMonitorsResponseTimeMonitor() const { return m_Hudson_node_monitors_ResponseTimeMonitor; } void HudsonMasterComputermonitorData::setHudsonNodeMonitorsResponseTimeMonitor(std::shared_ptr<ResponseTimeMonitorData> value) { m_Hudson_node_monitors_ResponseTimeMonitor = value; } std::shared_ptr<ClockDifference> HudsonMasterComputermonitorData::getHudsonNodeMonitorsClockMonitor() const { return m_Hudson_node_monitors_ClockMonitor; } void HudsonMasterComputermonitorData::setHudsonNodeMonitorsClockMonitor(std::shared_ptr<ClockDifference> value) { m_Hudson_node_monitors_ClockMonitor = value; } std::string HudsonMasterComputermonitorData::getClass() const { return m__class; } void HudsonMasterComputermonitorData::setClass(std::string value) { m__class = value; } } } } }
{"hexsha": "0dee1581b9bb327fc06eb115363ee54fdfa0a83c", "size": 3803, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "clients/cpp-restbed-server/generated/model/HudsonMasterComputermonitorData.cpp", "max_stars_repo_name": "PankTrue/swaggy-jenkins", "max_stars_repo_head_hexsha": "aca35a7cca6e1fcc08bd399e05148942ac2f514b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 23.0, "max_stars_repo_stars_event_min_datetime": "2017-08-01T12:25:26.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-25T03:44:11.000Z", "max_issues_repo_path": "clients/cpp-restbed-server/generated/model/HudsonMasterComputermonitorData.cpp", "max_issues_repo_name": "PankTrue/swaggy-jenkins", "max_issues_repo_head_hexsha": "aca35a7cca6e1fcc08bd399e05148942ac2f514b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 35.0, "max_issues_repo_issues_event_min_datetime": "2017-06-14T03:28:15.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-14T10:25:54.000Z", "max_forks_repo_path": "clients/cpp-restbed-server/generated/model/HudsonMasterComputermonitorData.cpp", "max_forks_repo_name": "PankTrue/swaggy-jenkins", "max_forks_repo_head_hexsha": "aca35a7cca6e1fcc08bd399e05148942ac2f514b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 11.0, "max_forks_repo_forks_event_min_datetime": "2017-08-31T19:00:20.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-19T12:04:12.000Z", "avg_line_length": 30.918699187, "max_line_length": 140, "alphanum_fraction": 0.8267157507, "num_tokens": 909}
c Subroutine to get qc_key c AJ_Kettle, 17Nov2020 SUBROUTINE get_qckey(s_directory_qckey,s_filename_qckey, + l_qckey, + s_qckey_sourceflag,s_qckey_c3sflag,s_qckey_timescale) IMPLICIT NONE c************************************************************************ c Declare variables CHARACTER(LEN=300) :: s_directory_qckey CHARACTER(LEN=300) :: s_filename_qckey INTEGER :: l_qckey CHARACTER(LEN=*) :: s_qckey_sourceflag(100) CHARACTER(LEN=*) :: s_qckey_c3sflag(100) CHARACTER(LEN=*) :: s_qckey_timescale(100) c***** c Variables used in subroutine INTEGER :: i,j,k,ii,jj,kk CHARACTER(LEN=300) :: s_command INTEGER :: io CHARACTER(LEN=300) :: s_pathandname CHARACTER(LEN=300) :: s_linget CHARACTER(LEN=300) :: s_linsto(100) INTEGER :: i_st,i_en c************************************************************************ c print*,'just entered get_qckey' c***** c Input list of names into vector s_pathandname=TRIM(s_directory_qckey)//TRIM(s_filename_qckey) c print*,'s_pathandname=',TRIM(s_pathandname) OPEN(UNIT=2,FILE=TRIM(s_pathandname), + FORM='formatted',STATUS='OLD',ACTION='READ') ii=0 DO READ(2,1000,IOSTAT=io) s_linget 1000 FORMAT(a300) c print*,'s_linget=',TRIM(s_linget) c print*,'len',LEN_TRIM(s_linget) IF (io .GT. 0) THEN print*, 'Check input. Something went wrong' GOTO 100 ELSE IF (io .LT. 0) THEN print*, 'end of file reached' GOTO 100 ELSE ii=ii+1 s_linsto(ii)=s_linget IF (s_linget(1:2).EQ.'s-') THEN i_st=ii+1 ENDIF IF (s_linget(1:2).EQ.'e-') THEN i_en=ii-1 ENDIF ENDIF ENDDO 100 CONTINUE CLOSE(UNIT=2) c***** c Extract fields from lines ii=0 DO i=i_st,i_en ii=ii+1 s_linget=s_linsto(i) s_qckey_sourceflag(ii)=s_linget(1:2) s_qckey_c3sflag(ii) =s_linget(6:7) s_qckey_timescale(ii) =s_linget(11:23) ENDDO l_qckey=ii c print*,'l_qckey=',l_qckey c print*,'s_qckey_sourceflag=',(s_qckey_sourceflag(i),i=1,l_qckey) c print*,'s_qckey_c3sflag=',(s_qckey_c3sflag(i),i=1,l_qckey) c print*,'s_qckey_timescale=',(s_qckey_timescale(i),i=1,l_qckey) c***** c print*,'just leaving get_qckey' RETURN END
{"hexsha": "2bc8b9b1fa6c49984e1d0b6c915c92d0662324f1", "size": 2538, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "r202106_gsom_to_cdm/Step1_cdmmake/Subroutine/get_qckey.f", "max_stars_repo_name": "glamod/glamod-nuim", "max_stars_repo_head_hexsha": "eed6f9d7d71b0c456ef39fdea6b58677e13ab50c", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "r202106_gsom_to_cdm/Step1_cdmmake/Subroutine/get_qckey.f", "max_issues_repo_name": "glamod/glamod-nuim", "max_issues_repo_head_hexsha": "eed6f9d7d71b0c456ef39fdea6b58677e13ab50c", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 23, "max_issues_repo_issues_event_min_datetime": "2022-01-28T13:57:39.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-28T09:34:41.000Z", "max_forks_repo_path": "r202106_gsom_to_cdm/Step1_cdmmake/Subroutine/get_qckey.f", "max_forks_repo_name": "glamod/glamod-nuim", "max_forks_repo_head_hexsha": "eed6f9d7d71b0c456ef39fdea6b58677e13ab50c", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2019-01-24T12:06:06.000Z", "max_forks_repo_forks_event_max_datetime": "2019-01-24T12:06:06.000Z", "avg_line_length": 23.9433962264, "max_line_length": 73, "alphanum_fraction": 0.5524034673, "num_tokens": 836}
#!/bin/env python "Simulate seeing based on a model." import sys from argparse import ArgumentParser import configparser from collections import namedtuple import csv import numpy as np import pandas as pd import astropy.time # constants __author__ = "Eric H. Neilsen, Jr." __maintainer__ = "Eric H. Neilsen, Jr." __email__ = "neilsen@fnal.gov" year_length_days = 365.24217 # exception classes # interface functions def seeing(start_mjd, end_mjd, freq, outer_scale, mean_log_r0, seasonal_amplitude, seasonal_phase, nightly_coeff, nightly_innovation, sample_coeff, sample_innovation, init_nightly_offset=0.0, init_sample_offset=0.0, start_elapsed_seconds=0, nightly_offsets=None, random_seed=None): """A generator to generate seeing values. Args: start_mjd: the MJD of the first generated seeing value end_mjd: the MJD of the last generated seeing value freq: seconds between generated seeing values mean_log_r0: the global mean log10(r0) seasonal_amplitude: amplitude of seasonal variation in log10(r0) seasonal_phase: phase of seasonal variation in log10(r0) (peak r0 in days after November 17) nightly_coeff: AR1 model coefficient for nightly variation nightly_innovation: amplitude of nightly model variation in log10(r0) sample_coeff: AR1 model coefficient for sample variation sample_innovation: amplitude of sample model variation in log10(r0) Returns: a generator function that generates seeing values at samples r0 is measured in meters everywhere. Each returned value is a namedtuple with the following elements: mjd: the Modified Julian Date of the sample (in days) elapsed_seconds: seconds since the first generated value r0: Fried parameter, in meters seeing: FWHM in arcseconds Example: >>> seeing_generator = seeing(61100.0, 61101.0, 300, ... 20, ... -0.9424, 0.058, 296.5, 0.3, 0.09, 0.7, 0.053, ... random_seed=6563) ... >>> for s in list(seeing_generator)[:5]: ... print(s) ... # doctest: +ELLIPSIS SeeingSample(mjd=61100.0, elapsed_seconds=0, r0=0.08097..., seeing=1.03902...) SeeingSample(mjd=61100.00347..., elapsed_seconds=300, r0=0.07717..., seeing=1.09429...) SeeingSample(mjd=61100.00694..., elapsed_seconds=600, r0=0.07659..., seeing=1.10319...) SeeingSample(mjd=61100.01041..., elapsed_seconds=900, r0=0.06214..., seeing=1.38053...) SeeingSample(mjd=61100.01388..., elapsed_seconds=1200, r0=0.06491..., seeing=1.31761...) """ if random_seed is not None: np.random.seed(random_seed) if nightly_offsets is None: nightly_offsets = ar1(nightly_coeff, nightly_innovation, init_nightly_offset) sample_offsets = ar1(sample_coeff, sample_innovation, init_sample_offset) def seasonal_offset(mjd): return year_cos(mjd, seasonal_phase, seasonal_amplitude) mjd = start_mjd elapsed_seconds = start_elapsed_seconds # iterate over nights for nightly_offset in nightly_offsets: if mjd > end_mjd: break night_log_r0 = (mean_log_r0 + seasonal_offset(mjd + 0.5) + nightly_offset) night_mjd = calc_night_mjd(mjd) # iterate over samples within the night for sample_offset in sample_offsets: if mjd > end_mjd: break if calc_night_mjd(mjd) > night_mjd: break log_r0 = night_log_r0 + sample_offset r0 = np.power(10, log_r0) seeing = vk_seeing(r0, outer_scale) kol_seeing = 60*60*np.degrees(0.98*5e-7/r0) yield SeeingSample(mjd, elapsed_seconds, r0, seeing, round(kol_seeing, 2), 'artificial') elapsed_seconds += freq dt = elapsed_seconds - start_elapsed_seconds mjd = start_mjd + dt/(24.0*60.0*60.0) def sim_seeing(fp=sys.stdout, first=False, **kwargs): """Generate artificial seeing and write it to a file. Args: fp: the file pointer to the file to write The remaining arguments are as in simsee.seeing """ writer = csv.writer(sys.stdout, delimiter="\t") for seeing_record in seeing(**kwargs): if first: writer.writerow(seeing_record._fields) first = False writer.writerow(seeing_record) def interpolate_seeing(dimm, fp=sys.stdout, **kwargs): """Interpolate gaps in seeing data. Args: dimm: a pandas.DataFrame with the dimm data start_mjd: the start time (decimal MJD) end_mjd: the end time (decamal MJD) years_offset: difference between recorded years and reported fp: the file name or pointer to the output data set The remaining arguments are the same as in sim_seeing. """ if 'random_seed' in kwargs: random_seed = kwargs['random_seed'] if random_seed is not None: np.random.seed(random_seed) start_mjd = kwargs['start_mjd'] end_mjd = kwargs['end_mjd'] years_offset = kwargs['years_offset'] mjd_offset = int(round(year_length_days*years_offset)) mean_log_r0 = kwargs['mean_log_r0'] seasonal_amplitude = kwargs['seasonal_amplitude'] seasonal_phase = kwargs['seasonal_phase'] nightly_coeff = kwargs['nightly_coeff'] nightly_innovation = kwargs['nightly_innovation'] freq = kwargs['freq'] freq_days = freq/(24.0*60*60) # Many but not all of the keyword arguments are propagated # directly into sim_seeing sim_seeing_kwargs = {k: kwargs[k] for k in ['freq', 'outer_scale', 'mean_log_r0', 'seasonal_amplitude', 'seasonal_phase', 'nightly_coeff', 'nightly_innovation', 'sample_coeff', 'sample_innovation']} # Get then mean seeing in each night in the requeste range # Do thes before we filter on time to include measuremens # at edge nights that are not within the strict limits, # if the limits are part way into their nights. nightly_dimm = interpolate_night_seeing(dimm, calc_night_mjd(start_mjd), calc_night_mjd(end_mjd) + 1, years_offset, mean_log_r0, seasonal_amplitude, seasonal_phase, nightly_coeff, nightly_innovation) # actually filter to get measurements in the requested time range dimm_in_time = dimm.query('{0} < mjd < {1}'.format(start_mjd-mjd_offset, end_mjd-mjd_offset)) dimm_in_time = dimm_in_time.copy() dimm_in_time['elapsed_seconds'] = np.round( (dimm_in_time.mjd+mjd_offset-start_mjd)*24*60*60).astype(int) def seasonal_offset(mjd): return year_cos(mjd, seasonal_phase, seasonal_amplitude) prev_mjd = start_mjd writer = csv.writer(sys.stdout, delimiter="\t") writer.writerow(SeeingSample._fields) for dimm_time, dimm_row in dimm_in_time.iterrows(): next_mjd = dimm_row.mjd + mjd_offset if next_mjd > prev_mjd + freq_days: try: sim_start_mjd = prev_mjd + freq_days start_elapsed_seconds = int(prev_elapsed_seconds + freq) except UnboundLocalError: # This is the first point sim_start_mjd = start_mjd start_elapsed_seconds = 0 sim_start_night = calc_night_mjd(sim_start_mjd) sim_end_mjd = min((end_mjd, dimm_row.mjd + mjd_offset - freq_days)) sim_end_night = calc_night_mjd(sim_end_mjd) try: init_sample_offset = prev_log_r0 - nightly_dimm[sim_start_night] except UnboundLocalError: # This is the first point init_sample_offset = 0 nightly_offsets = [n - seasonal_offset(start_mjd+0.5) - mean_log_r0 for n in nightly_dimm.loc[sim_start_night:sim_end_night]] sim_seeing(fp, start_mjd=sim_start_mjd, end_mjd=sim_end_mjd, init_sample_offset=init_sample_offset, start_elapsed_seconds=start_elapsed_seconds, nightly_offsets=nightly_offsets, **sim_seeing_kwargs) sample_seeing = SeeingSample(next_mjd, int(dimm_row.elapsed_seconds), dimm_row.r0, dimm_row.vk_seeing, dimm_row.seeing, dimm_time.isoformat()) prev_log_r0 = dimm_row.log_r0 prev_mjd = next_mjd prev_elapsed_seconds = dimm_row.elapsed_seconds if next_mjd > end_mjd: break writer.writerow(sample_seeing) def main(): """Parse command line arguments and generate a text file.""" parser = ArgumentParser(description= "Generate a simulated seeing data set for survey strategy simulation.") parser.add_argument("config_fname", type=str, help="file with configuration parameters") args = parser.parse_args() config_fname = args.config_fname config = parse_simsee_config(config_fname) output_fp = sys.stdout if 'dimm_fname' in config: dimm = load_dimm(config['dimm_fname'], outer_scale=config['outer_scale']) interpolate_seeing(dimm, output_fp, **config) else: sim_seeing(output_fp, True, **config) output_fp.close() return 0 # classes SeeingSample = namedtuple( 'SeeingSample', ['mjd', 'elapsed_seconds', 'r0', 'seeing', 'kol_seeing', 'dimm_time']) # internal functions & classes def ar1(coeff, innovation, initial_value=0.0): """Generate the next value in an AR1 time series. See _Time Series Analysis_ by Cryer and Chan (2010), p. 66 Args: coeff: the regression coefficient (phi in Cryer and Chan) innovation: the innovation standard deviation (e in Cryer and Chan) initial_value: the initial value in the time series Returns: the next value in the time series Example: >>> import random >>> from itertools import islice >>> np.random.seed(6563) >>> >>> # Use notation from p. 66 of Cryer and Chan p. 66 >>> phi, sigma = 0.7, 12.0 >>> >>> >>> gen = ar1(phi, sigma, 100.0) >>> tuple(round(y, 3) for y in islice(gen, 8)) (41.074, 51.996, 38.643, 31.865, 7.083, 8.412, 19.426, 5.219) >>> >>> # Check that the variance of artificially generated >>> # data is close to theoretical expectations >>> big_sample = tuple(islice(gen, 100000)) >>> np.var(big_sample) # doctest: +ELLIPSIS 283.9848... >>> >>> # Equation 4.3.3 from Cryer and Chan >>> (sigma**2)/(1-phi**2) # doctest: +ELLIPSIS 282.3529... >>> """ value = initial_value while True: value = coeff*value + np.random.normal(0.0, innovation) yield value def vk_seeing(r0, outer_scale=20.0, wavelength=5.0e-7): """Calculate the seeing using a von Karman model. See Tokovinin 2002PASP..114.1156T Args: r0: the Fried parameter, in meters outer_scale: the von Karman outer scale, in meters wavelength: the wavelength of light, in meters Returns: The PSF FWHM, in arcseconds >>> vk_seeing(0.12, 20.0) # doctest: +ELLIPSIS 0.677... >>> vk_seeing(0.10, 20.0) # doctest: +ELLIPSIS 0.826... >>> vk_seeing(0.12, 30.0) # doctest: +ELLIPSIS 0.701... """ # Calculate the DIMM estimate of the seeing using the Kolmogorov model, # using eqn 5 from Tokovinin 2002PASP..114.1156T eqn 5 kol_seeing = 0.98*wavelength/r0 # Calculate the correction factor required to convert the Kolmogorov model # seeing to the von Karman model seeing, # using eqn 19 of Tokovinin 2002PASP..114.1156T vk_correction2 = 1.0 - 2.183*np.power(r0/outer_scale, 0.356) # Apply the correction factor seeing_rad = kol_seeing * np.sqrt(vk_correction2) # Convert to arcseconds seeing = np.degrees(seeing_rad)*(60.0*60.0) return seeing def calc_night_mjd(mjd, obs_lon=-70.8062): """Calculate the integer MJD designatating a night at Cerro Pachon. Args: mjd: the floating point modified Julian date obs_lon: the observatory longitude (degrees East of lon=0) Return: an intereger MJD that designates a night >>> calc_night_mjd(61123.1) 61122 >>> calc_night_mjd(61123.5) 61123 >>> calc_night_mjd(61123.8) 61123 """ # Longitude of Cerro Pachon is -70.8062 degrees # The 0.5 shifts the rollover to noon from midnight, # the obs_lon/360 shifts it from noon at Greenwich to # (mean solar) noon wherever you want. ctio_night_shift = -0.5 - obs_lon/360.0 mjd = np.floor(mjd + ctio_night_shift).astype(int) return mjd def interpolate_night_seeing(dimm, start_mjd, end_mjd, years_offset, mean_log_r0, seasonal_amplitude, seasonal_phase, nightly_coeff, nightly_innovation, random_seed=None): """Get nightly seeing means, interpolating when necessary. Args: samples: a pandas.DataFrame of seeing samples start_mjd: the first mjd in the sequence end_mjd: the last mjd in the sequence mean_log_r0: the global mean log10(r0) seasonal_amplitude: amplitude of seasonal variation in log10(r0) seasonal_phase: phase of seasonal variation in log10(r0) (peak r0 in days after November 17) nightly_coeff: AR1 model coefficient for nightly variation nightly_innovation: amplitude of nightly model variation in log10(r0) Returns: a pandas.Series with seeing values for every night from start_mjd to end_mjd >>> dimm = load_dimm('pachon_dimm.h5') >>> interpolate_night_seeing(dimm, 53080, 53090, 0, ... -0.9424, 0.058, 296.5, 0.3, 0.09, ... 6563) ... 53080 -0.857619 53081 -1.165991 53082 -0.867227 53083 -0.935089 53084 -0.936177 53085 -1.086785 53086 -0.805879 53087 -0.905333 53088 -0.804373 53089 -0.847581 53090 -0.817402 dtype: float64 """ if random_seed is not None: np.random.seed(random_seed) mjd_offset = int(round(year_length_days*years_offset)) def seasonal_offset(mjd): return year_cos(mjd, seasonal_phase, seasonal_amplitude) dimm_nights = dimm.groupby('night_mjd').agg({'log_r0': 'mean'}) mjds = [] log_r0s = [] nightly_offset = 0 for mjd in range(start_mjd, end_mjd+1): try: log_r0 = dimm_nights.loc[mjd-mjd_offset, 'log_r0'] season_log_r0 = mean_log_r0 + seasonal_offset(mjd + 0.5) nightly_offset = log_r0 - season_log_r0 except KeyError: nightly_offset = nightly_coeff * nightly_offset \ + np.random.normal(0.0, nightly_innovation) log_r0 = season_log_r0 + nightly_offset mjds.append(mjd) log_r0s.append(log_r0) dimm_interp_nights = pd.Series(log_r0s, index=mjds) return dimm_interp_nights def load_dimm(fname, obs_lon=-70.8062, outer_scale=20): """Load DIMM data from an HDF5 file and add derived colums. Args: fname: the name of the file from which to load DIMM data obs_lon: the observator longitude, in degrees east Return: a pandas.DataFrame with the data >>> df = load_dimm('pachon_dimm.h5') >>> df[['seeing', 'r0', 'log_r0', 'vk_seeing']].head() seeing r0 log_r0 vk_seeing time 2004-03-17 02:33:15 0.71 0.142352 -0.846637 0.561129 2004-03-17 02:34:35 0.74 0.136581 -0.864611 0.587403 2004-03-17 02:35:42 0.74 0.136581 -0.864611 0.587403 2004-03-17 02:36:49 0.75 0.134760 -0.870440 0.596173 2004-03-17 02:37:58 0.72 0.140375 -0.852711 0.569880 >>> df[['mjd', 'night_mjd']].head() mjd night_mjd time 2004-03-17 02:33:15 53081.106424 53080 2004-03-17 02:34:35 53081.107350 53080 2004-03-17 02:35:42 53081.108125 53080 2004-03-17 02:36:49 53081.108900 53080 2004-03-17 02:37:58 53081.109699 53080 """ dimm = pd.read_hdf(fname) dimm = dimm.query('0.05 < seeing < 10.0').copy() dimm['r0'] = 0.98*5e-7/np.radians(dimm.seeing/(60*60)) dimm['log_r0'] = np.log10(dimm.r0) dimm['vk_seeing'] = vk_seeing(dimm.r0, outer_scale) dimm['mjd'] = dimm.index.to_julian_date()-2400000.5 dimm['night_mjd'] = calc_night_mjd(dimm.mjd) return dimm def year_cos(mjd, seasonal_phase, seasonal_amplitude): """Calculate the seasonal offset assuming a cos with a period of 1 year. Args: mjd: the MJD seasonal_phase: the phase (in days past November 17) seasonal_amplitude: the amplitude in log10(r0), r0 in meters Return: seasonal offset in log10(r0), r0 in meters Why Nov 17? The epoch for MJD is 1858-11-17 >>> # MJD 60700 is 2025-01-25 >>> >>> year_cos(60700, 24.7, 0.1) # doctest: +ELLIPSIS 0.0999991... >>> year_cos(60701, 24.7, 0.1) # doctest: +ELLIPSIS 0.0999770... >>> year_cos(60699, 24.7, 0.1) # doctest: +ELLIPSIS 0.0999915... >>> >>> year_cos(60699+365.242/2, 24.7, 0.1) # doctest: +ELLIPSIS -0.0999915... >>> """ mjd_jan_1_2000 = 51544 angle = (mjd - mjd_jan_1_2000 - seasonal_phase)*2*np.pi/year_length_days return seasonal_amplitude * np.cos(angle) def parse_simsee_config(config_fname): """Parse the simsee configuration file.""" config = configparser.ConfigParser() config.read(config_fname) config_dict = { 'start_mjd': astropy.time.Time(config['simulation']['start_date']).mjd, 'end_mjd': astropy.time.Time(config['simulation']['end_date']).mjd, 'freq': config.getint('simulation', 'freq'), 'random_seed': config.getint('simulation', 'random_seed'), 'outer_scale': config.getfloat('optics', 'outer_scale'), 'mean_log_r0': config.getfloat('seasonal', 'mean'), 'seasonal_amplitude': config.getfloat('seasonal', 'c'), 'seasonal_phase': config.getfloat('seasonal', 'd'), 'nightly_coeff': config.getfloat('nightly', 'coeff'), 'nightly_innovation': config.getfloat('nightly', 'innovation'), 'sample_coeff': config.getfloat('sample', 'coeff'), 'sample_innovation': config.getfloat('sample', 'innovation')} try: dimm_fname = config.get('dimm', 'fname') years_offset = config.getint('dimm', 'years_offset') config_dict['dimm_fname'] = dimm_fname config_dict['years_offset'] = years_offset except: pass return config_dict if __name__ == '__main__': status = main() sys.exit(status)
{"hexsha": "cda36d8ad25e2bd2df7ba87bd5e27e0be186671a", "size": 19676, "ext": "py", "lang": "Python", "max_stars_repo_path": "code/simsee/python/simsee.py", "max_stars_repo_name": "galdering/ObsStrat", "max_stars_repo_head_hexsha": "1032e5c66ee9d7e6b8d8ddde443670489d32e7fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 6, "max_stars_repo_stars_event_min_datetime": "2018-07-16T21:36:27.000Z", "max_stars_repo_stars_event_max_datetime": "2021-11-23T02:47:29.000Z", "max_issues_repo_path": "code/simsee/python/simsee.py", "max_issues_repo_name": "galdering/ObsStrat", "max_issues_repo_head_hexsha": "1032e5c66ee9d7e6b8d8ddde443670489d32e7fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 11, "max_issues_repo_issues_event_min_datetime": "2018-07-23T13:12:29.000Z", "max_issues_repo_issues_event_max_datetime": "2018-10-19T19:50:02.000Z", "max_forks_repo_path": "code/simsee/python/simsee.py", "max_forks_repo_name": "galdering/ObsStrat", "max_forks_repo_head_hexsha": "1032e5c66ee9d7e6b8d8ddde443670489d32e7fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 5, "max_forks_repo_forks_event_min_datetime": "2018-10-19T19:41:17.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-30T00:31:20.000Z", "avg_line_length": 34.6408450704, "max_line_length": 92, "alphanum_fraction": 0.6161313275, "include": true, "reason": "import numpy,import astropy", "num_tokens": 5328}
from typing import Callable, Tuple, Union import numpy as np from emukit.core.acquisition import Acquisition from emukit.core.interfaces import IModel, IPriorHyperparameters class IntegratedHyperParameterAcquisition(Acquisition): """ This acquisition class provides functionality for integrating any acquisition function over model hyper-parameters """ def __init__( self, model: Union[IModel, IPriorHyperparameters], acquisition_generator: Callable, n_samples: int = 10, n_burnin: int = 100, subsample_interval: int = 10, step_size: float = 1e-1, leapfrog_steps: int = 20, ): """ :param model: An emukit model that implements IPriorHyperparameters :param acquisition_generator: Function that returns acquisition object when given the model as the only argument :param n_samples: Number of hyper-parameter samples :param n_burnin: Number of initial samples not used. :param subsample_interval: Interval of subsampling from HMC samples. :param step_size: Size of the gradient steps in the HMC sampler. :param leapfrog_steps: Number of gradient steps before each Metropolis Hasting step. """ self.model = model self.acquisition_generator = acquisition_generator self.n_samples = n_samples self.n_burnin = n_burnin self.subsample_interval = subsample_interval self.step_size = step_size self.leapfrog_steps = leapfrog_steps self.update_parameters() acquisition = self.acquisition_generator(model) self._has_gradients = acquisition.has_gradients def evaluate(self, x: np.ndarray) -> np.ndarray: """ Evaluate acquisition by integrating over the hyper-parameters of the model :param x: locations where the evaluation is done. :return: Array with integrated acquisition value at all input locations """ acquisition_value = 0 for sample in self.samples: self.model.fix_model_hyperparameters(sample) acquisition = self.acquisition_generator(self.model) acquisition_value += acquisition.evaluate(x) return acquisition_value / self.n_samples def evaluate_with_gradients(self, x: np.ndarray) -> Tuple: """ Computes the acquisition value and its derivative integrating over the hyper-parameters of the model :param x: locations where the evaluation with gradients is done. :return: tuple containing the integrated expected improvement at the points x and its gradient. """ if x.ndim == 1: x = x[None, :] acquisition_value = 0 d_acquisition_dx = 0 for sample in self.samples: self.model.fix_model_hyperparameters(sample) acquisition = self.acquisition_generator(self.model) improvement_sample, d_improvement_dx_sample = acquisition.evaluate_with_gradients(x) acquisition_value += improvement_sample d_acquisition_dx += d_improvement_dx_sample return acquisition_value / self.n_samples, d_acquisition_dx / self.n_samples def update_parameters(self): self.samples = self.model.generate_hyperparameters_samples( self.n_samples, self.n_burnin, self.subsample_interval, self.step_size, self.leapfrog_steps ) @property def has_gradients(self) -> bool: """Returns that this acquisition has gradients""" return self._has_gradients def update_batches(self, x_batch, lipschitz_constant, f_min): acquisition = self.acquisition_generator(self.model) acquisition.update_batches(x_batch, lipschitz_constant, f_min)
{"hexsha": "04c89e1baad8b4ce42679f4b5708bfd2e3d51fb6", "size": 3777, "ext": "py", "lang": "Python", "max_stars_repo_path": "emukit/core/acquisition/integrated_acquisition.py", "max_stars_repo_name": "EmuKit/Emukit", "max_stars_repo_head_hexsha": "2df951e42c82400192220eb18af428f3eb764f6c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 272, "max_stars_repo_stars_event_min_datetime": "2018-09-18T11:56:37.000Z", "max_stars_repo_stars_event_max_datetime": "2020-10-10T22:21:25.000Z", "max_issues_repo_path": "emukit/core/acquisition/integrated_acquisition.py", "max_issues_repo_name": "EmuKit/Emukit", "max_issues_repo_head_hexsha": "2df951e42c82400192220eb18af428f3eb764f6c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 278, "max_issues_repo_issues_event_min_datetime": "2018-09-19T15:38:50.000Z", "max_issues_repo_issues_event_max_datetime": "2020-10-14T13:45:24.000Z", "max_forks_repo_path": "emukit/core/acquisition/integrated_acquisition.py", "max_forks_repo_name": "EmuKit/Emukit", "max_forks_repo_head_hexsha": "2df951e42c82400192220eb18af428f3eb764f6c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 88, "max_forks_repo_forks_event_min_datetime": "2018-09-18T11:56:48.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-23T13:33:55.000Z", "avg_line_length": 39.34375, "max_line_length": 120, "alphanum_fraction": 0.6920836643, "include": true, "reason": "import numpy", "num_tokens": 765}
# -*- coding: utf-8 -*- """ Landscape Evaporative Response Index This script will download all or update existing LERI files used in the app. It uses the same FTP server as EDDI. Production notes: - LERI does not usually cover the full grid and only dates back to 2000, so maybe there would be space to experiment with a different resolution? - Also, LERI is not available for the same time periods as EDDI, SPI, and SPEI. The monthly values are available for 1, 3, 7 and 12 month-windows. - The 1- and 3-month files come out every month, the 7-month files only once per year (January), and the 12-month files twice per year (September and December). Not sure why this is, and it would throw the time-period selection system off. Perhaps we start with just the 1- and 3-month LERIs then brainstorm how to fit the others in. - Also, these are netcdf files, so the process will be a blend of Get_EDDI.py and Get_WWDT.py. - I am sharing the temp folder with EDDI, so don't run the two at the same time (Get_LERI and Get_EDDI). Created on Mon Mar 18 09:47:33 2019 @author: User """ import datetime as dt import ftplib from glob import glob from netCDF4 import Dataset import numpy as np import os from osgeo import gdal import pandas as pd import sys from tqdm import tqdm import xarray as xr if sys.platform == 'win32': sys.path.insert(0, 'C:/Users/User/github/Ubuntu-Practice-Machine') os.chdir('C:/Users/User/github/Ubuntu-Practice-Machine') data_path = 'f:/' elif 'travis' in os.getcwd(): os.chdir('/home/travis/github/Ubuntu-Practice-Machine') data_path = '' else: sys.path.insert(0, '/root/Sync/Ubuntu-Practice-Machine') os.chdir('/root/Sync/Ubuntu-Practice-Machine') data_path = '/root/Sync' from functions import toNetCDF, toNetCDFAlbers, toNetCDFPercentile, isInt # These make output logs too noisy to see what happened gdal.PushErrorHandler('CPLQuietErrorHandler') os.environ['GDAL_PAM_ENABLED'] = 'NO' # There are often missing epsg codes in the gcs.csv file, but proj4 works proj = ('+proj=aea +lat_1=20 +lat_2=60 +lat_0=40 +lon_0=-96 +x_0=0 +y_0=0 ' + '+ellps=GRS80 +datum=NAD83 +units=m no_defs') # Get resolution from file call try: res = float(sys.argv[1]) except: res = 0.25 # In[] Data Source and target directories ftp_path = 'ftp://ftp.cdc.noaa.gov/Projects/LERI/CONUS_archive/data/' temp_folder = os.path.join(data_path, 'data/droughtindices/netcdfs/leri') pc_folder = os.path.join(data_path, 'data/droughtindices/netcdfs/percentiles') if not os.path.exists(temp_folder): os.makedirs(temp_folder) if not os.path.exists(pc_folder): os.makedirs(pc_folder) # In[] Index Options indices = ['leri1', 'leri3'] # In[] Define scraping routine def getLERI(scale, date, temp_folder): ''' The date in the file name always uses the first day of the month. ''' year = date.year month = date.month file_name = 'LERI_{:02d}mn_{}{:02d}01.nc'.format(scale, year, month) local_file = os.path.join(temp_folder, 'leri.nc') with open(local_file, 'wb') as dst: ftp.retrbinary('RETR %s' % file_name, dst.write) return local_file # In[] Today's date, month, and year todays_date = dt.datetime.today() today = np.datetime64(todays_date) print("##") print("#####") print("############") print("#######################") print("#######################################") print("####################################################") print("\nRunning Get_LERI.py using a " + str(res) + " degree resolution:\n") print(str(today) + '\n') # In[] Get time series of currently available values # Connect to FTP ftp = ftplib.FTP('ftp.cdc.noaa.gov', 'anonymous', 'anonymous@cdc.noaa.gov') for index in indices: ftp.cwd('/Projects/LERI/CONUS_archive/data/') print('\n' + index) original_path = os.path.join(data_path, "data/droughtindices/netcdfs/", index + ".nc") percentile_path = os.path.join(data_path, "data/droughtindices/netcdfs/percentiles", index + '.nc') albers_path = os.path.join(data_path, "data/droughtindices/netcdfs/albers", index + '.nc') scale = index[-2:] scale = int("".join([s for s in scale if isInt(s)])) # Delete existing contents of temporary folder temps = glob(os.path.join(temp_folder, "*")) for t in temps: os.remove(t) ####### If we are only missing some dates ################################# if os.path.exists(original_path): with xr.open_dataset(original_path) as data: dates = pd.DatetimeIndex(data.time.data) data.close() # Extract dates d1 = dates[0] d2 = dates[-1] # Get a list of the dates already in the netcdf file existing_dates = pd.date_range(d1, d2, freq="M") # Get all of the last day of month files for the index ftp_years = ftp.nlst() ftp_years = [f for f in ftp_years if isInt(f)] # First Date ftp.cwd(os.path.join('/Projects/LERI/CONUS_archive/data/', ftp_years[0])) ftp_files = ftp.nlst() ftp_files = [f for f in ftp_files if f[-16:-12] == "{:02d}mn".format(scale)] ftp_first = ftp_files[0] first_date = pd.to_datetime(ftp_first[-11:-3], format='%Y%m%d') # Last Date ftp.cwd(os.path.join('/Projects/LERI/CONUS_archive/data/', ftp_years[-1])) ftp_files = ftp.nlst() ftp_files = [f for f in ftp_files if f[-16:-12] == "{:02d}mn".format(scale)] ftp_last = ftp_files[-1] last_date = pd.to_datetime(ftp_last[-11:-3], format='%Y%m%d') # All dates available available_dates = pd.date_range(first_date, last_date, freq='M') # Get needed dates needed_dates = [a for a in available_dates if a not in existing_dates] # Download missing files if len(needed_dates) > 0: print_statement = '{} missing file(s) since {}...\n' print(print_statement.format(len(needed_dates), needed_dates[0])) for date in tqdm(needed_dates, position=0): ftp.cwd(os.path.join('/Projects/LERI/CONUS_archive/data/', str(date.year))) # This returns the filename of the downloaded asc file in_path = getLERI(scale, date, temp_folder) # Save each to a geotiff to use the netcdf builders file_name = ('leri_' + str(date.year) + '{:02d}'.format(date.month) + '.tif') out_path = os.path.join(temp_folder, file_name) tif_path = out_path # Resample each, working from disk ds = gdal.Warp(out_path, in_path, dstSRS='EPSG:4326', xRes=res, yRes=res, outputBounds=[-130, 20, -55, 50]) del ds # Reproject the output from above in_path = out_path out_path = os.path.join(temp_folder, 'proj_' + file_name) tif_path_proj = out_path ds = gdal.Warp(out_path, in_path, dstSRS=proj) del ds # Open old data sets old = Dataset(original_path, 'r+') old_proj = Dataset(albers_path, 'r+') times = old.variables['time'] times_proj = old_proj.variables['time'] values = old.variables['value'] values_proj = old_proj.variables['value'] n = times.shape[0] # Convert new date to days date = dt.datetime(date.year, date.month, day=15) days = date - dt.datetime(1900, 1, 1) days = np.float64(days.days) # Convert new data to array base_data = gdal.Open(tif_path) base_data_proj = gdal.Open(tif_path_proj) array = base_data.ReadAsArray() array_proj = base_data_proj.ReadAsArray() del base_data del base_data_proj # Write changes to file and close times[n] = days times_proj[n] = days values[n] = array values_proj[n] = array_proj old.close() old_proj.close() # Now recreate the entire percentile data set print('Reranking percentiles...') pc_path = os.path.join(pc_folder, index + '.nc') os.remove(pc_path) toNetCDFPercentile(original_path, pc_path) ############## If we need to start over ################################### else: print(original_path + " not detected, building new dataset...\n") # Get all available years ftp_years = ftp.nlst() ftp_years = [f for f in ftp_years if isInt(f)] # Find the most recently available month max_year = max(ftp_years) ftp.cwd(os.path.join('/Projects/LERI/CONUS_archive/data/', max_year)) files = ftp.nlst() files = [f for f in files if int(f[5:7]) == scale] months = [int(f[-7:-5]) for f in files] max_year = int(max_year) max_month = max(months) # available dates date1 = dt.datetime(int(min(ftp_years)), 1, 1) date2 = dt.datetime(max_year, max_month, 1) available_dates = pd.date_range(date1, date2, freq='M') # Loop through these, download and transform data for date in tqdm(available_dates, position=0): ftp.cwd(os.path.join('/Projects/LERI/CONUS_archive/data/', str(date.year))) in_path = getLERI(scale, date, temp_folder) # The are rather large files, this could take a while out_file = ('temp_' + str(date.year) + '{:02d}'.format(date.month) + '.tif') out_path = os.path.join(temp_folder, out_file) tif_path = out_path # Resample each, working from disk ds = gdal.Warp(out_path, in_path, dstSRS='EPSG:4326', xRes=res, yRes=res, outputBounds=[-130, 20, -55, 50]) del ds os.remove(in_path) # Resample in_path = tif_path out_file = ('proj_temp_' + str(date.year) + '{:02d}'.format(date.month) + '.tif') out_path = os.path.join(temp_folder, out_file) ds = gdal.Warp(out_path, in_path, dstSRS=proj) del ds # Merge individual tif files into a single netcdf file tfiles = glob(os.path.join(temp_folder, 'temp_*')) tfiles_proj = glob(os.path.join(temp_folder, 'proj_*')) ncdir = os.path.join(data_path, "data/droughtindices/netcdfs/", index + ".nc") ncdir_proj = os.path.join(data_path, "data/droughtindices/netcdfs/albers", index + ".nc") # Finally save to file toNetCDF(tfiles=tfiles, ncfiles=None, savepath=ncdir, index=index, year1=1980, month1=1, year2=todays_date.year, month2=todays_date.month, proj=4326, percentiles=False, wmode='w') # Save another projected version toNetCDFAlbers(tfiles=tfiles_proj, ncfiles=None, savepath=ncdir_proj, index=index, year1=1980, month1=1, year2=todays_date.year, month2=todays_date.month, proj=proj, percentiles=False, wmode='w') # Now lets get the percentile values pc_path = os.path.join(data_path, "data/droughtindices/netcdfs/" + "percentiles", index + ".nc") toNetCDFPercentile(ncdir, pc_path) # Close connection with FTP server ftp.quit() print("Update Complete.") print("####################################################") print("#######################################") print("#######################") print("############") print("#####") print("##")
{"hexsha": "f84e53117827bf9bf15afbb0e10111f35a5a8a68", "size": 12572, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/Get_LERI.py", "max_stars_repo_name": "WilliamsTravis/Drought-Index-Portal", "max_stars_repo_head_hexsha": "af1c2ce53ec2c04eceba0758f73135c155082dd6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2019-03-02T19:44:39.000Z", "max_stars_repo_stars_event_max_datetime": "2019-03-02T19:44:39.000Z", "max_issues_repo_path": "scripts/Get_LERI.py", "max_issues_repo_name": "WilliamsTravis/Ubuntu-Practice-Machine", "max_issues_repo_head_hexsha": "f68b96ba12d7ac421d76feeefddd5ec7d7cd72bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "scripts/Get_LERI.py", "max_forks_repo_name": "WilliamsTravis/Ubuntu-Practice-Machine", "max_forks_repo_head_hexsha": "f68b96ba12d7ac421d76feeefddd5ec7d7cd72bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-03-27T09:00:08.000Z", "max_forks_repo_forks_event_max_datetime": "2020-03-27T09:00:08.000Z", "avg_line_length": 39.0434782609, "max_line_length": 79, "alphanum_fraction": 0.560292714, "include": true, "reason": "import numpy", "num_tokens": 2997}
[STATEMENT] lemma None_in_map_option_set: "None \<in> map_option_set f x \<longleftrightarrow> None \<in> Set.bind (set_option x) f \<or> x = None" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (None \<in> map_option_set f x) = (None \<in> set_option x \<bind> f \<or> x = None) [PROOF STEP] by(cases x) simp_all
{"llama_tokens": 131, "file": "Probabilistic_While_While_SPMF", "length": 1}
const seaborn_rocket_gradient = [RGBA(0.01060815, 0.01808215, 0.10018654), RGBA(0.01428972, 0.02048237, 0.10374486), RGBA(0.01831941, 0.0229766, 0.10738511), RGBA(0.02275049, 0.02554464, 0.11108639), RGBA(0.02759119, 0.02818316, 0.11483751), RGBA(0.03285175, 0.03088792, 0.11863035), RGBA(0.03853466, 0.03365771, 0.12245873), RGBA(0.04447016, 0.03648425, 0.12631831), RGBA(0.05032105, 0.03936808, 0.13020508), RGBA(0.05611171, 0.04224835, 0.13411624), RGBA(0.0618531, 0.04504866, 0.13804929), RGBA(0.06755457, 0.04778179, 0.14200206), RGBA(0.0732236, 0.05045047, 0.14597263), RGBA(0.0788708, 0.05305461, 0.14995981), RGBA(0.08450105, 0.05559631, 0.15396203), RGBA(0.09011319, 0.05808059, 0.15797687), RGBA(0.09572396, 0.06050127, 0.16200507), RGBA(0.10132312, 0.06286782, 0.16604287), RGBA(0.10692823, 0.06517224, 0.17009175), RGBA(0.1125315, 0.06742194, 0.17414848), RGBA(0.11813947, 0.06961499, 0.17821272), RGBA(0.12375803, 0.07174938, 0.18228425), RGBA(0.12938228, 0.07383015, 0.18636053), RGBA(0.13501631, 0.07585609, 0.19044109), RGBA(0.14066867, 0.0778224, 0.19452676), RGBA(0.14633406, 0.07973393, 0.1986151), RGBA(0.15201338, 0.08159108, 0.20270523), RGBA(0.15770877, 0.08339312, 0.20679668), RGBA(0.16342174, 0.0851396, 0.21088893), RGBA(0.16915387, 0.08682996, 0.21498104), RGBA(0.17489524, 0.08848235, 0.2190294), RGBA(0.18065495, 0.09009031, 0.22303512), RGBA(0.18643324, 0.09165431, 0.22699705), RGBA(0.19223028, 0.09317479, 0.23091409), RGBA(0.19804623, 0.09465217, 0.23478512), RGBA(0.20388117, 0.09608689, 0.23860907), RGBA(0.20973515, 0.09747934, 0.24238489), RGBA(0.21560818, 0.09882993, 0.24611154), RGBA(0.22150014, 0.10013944, 0.2497868), RGBA(0.22741085, 0.10140876, 0.25340813), RGBA(0.23334047, 0.10263737, 0.25697736), RGBA(0.23928891, 0.10382562, 0.2604936), RGBA(0.24525608, 0.10497384, 0.26395596), RGBA(0.25124182, 0.10608236, 0.26736359), RGBA(0.25724602, 0.10715148, 0.27071569), RGBA(0.26326851, 0.1081815, 0.27401148), RGBA(0.26930915, 0.1091727, 0.2772502), RGBA(0.27536766, 0.11012568, 0.28043021), RGBA(0.28144375, 0.11104133, 0.2835489), RGBA(0.2875374, 0.11191896, 0.28660853), RGBA(0.29364846, 0.11275876, 0.2896085), RGBA(0.29977678, 0.11356089, 0.29254823), RGBA(0.30592213, 0.11432553, 0.29542718), RGBA(0.31208435, 0.11505284, 0.29824485), RGBA(0.31826327, 0.1157429, 0.30100076), RGBA(0.32445869, 0.11639585, 0.30369448), RGBA(0.33067031, 0.11701189, 0.30632563), RGBA(0.33689808, 0.11759095, 0.3088938), RGBA(0.34314168, 0.11813362, 0.31139721), RGBA(0.34940101, 0.11863987, 0.3138355), RGBA(0.355676, 0.11910909, 0.31620996), RGBA(0.36196644, 0.1195413, 0.31852037), RGBA(0.36827206, 0.11993653, 0.32076656), RGBA(0.37459292, 0.12029443, 0.32294825), RGBA(0.38092887, 0.12061482, 0.32506528), RGBA(0.38727975, 0.12089756, 0.3271175), RGBA(0.39364518, 0.12114272, 0.32910494), RGBA(0.40002537, 0.12134964, 0.33102734), RGBA(0.40642019, 0.12151801, 0.33288464), RGBA(0.41282936, 0.12164769, 0.33467689), RGBA(0.41925278, 0.12173833, 0.33640407), RGBA(0.42569057, 0.12178916, 0.33806605), RGBA(0.43214263, 0.12179973, 0.33966284), RGBA(0.43860848, 0.12177004, 0.34119475), RGBA(0.44508855, 0.12169883, 0.34266151), RGBA(0.45158266, 0.12158557, 0.34406324), RGBA(0.45809049, 0.12142996, 0.34540024), RGBA(0.46461238, 0.12123063, 0.34667231), RGBA(0.47114798, 0.12098721, 0.34787978), RGBA(0.47769736, 0.12069864, 0.34902273), RGBA(0.48426077, 0.12036349, 0.35010104), RGBA(0.49083761, 0.11998161, 0.35111537), RGBA(0.49742847, 0.11955087, 0.35206533), RGBA(0.50403286, 0.11907081, 0.35295152), RGBA(0.51065109, 0.11853959, 0.35377385), RGBA(0.51728314, 0.1179558, 0.35453252), RGBA(0.52392883, 0.11731817, 0.35522789), RGBA(0.53058853, 0.11662445, 0.35585982), RGBA(0.53726173, 0.11587369, 0.35642903), RGBA(0.54394898, 0.11506307, 0.35693521), RGBA(0.5506426, 0.11420757, 0.35737863), RGBA(0.55734473, 0.11330456, 0.35775059), RGBA(0.56405586, 0.11235265, 0.35804813), RGBA(0.57077365, 0.11135597, 0.35827146), RGBA(0.5774991, 0.11031233, 0.35841679), RGBA(0.58422945, 0.10922707, 0.35848469), RGBA(0.59096382, 0.10810205, 0.35847347), RGBA(0.59770215, 0.10693774, 0.35838029), RGBA(0.60444226, 0.10573912, 0.35820487), RGBA(0.61118304, 0.10450943, 0.35794557), RGBA(0.61792306, 0.10325288, 0.35760108), RGBA(0.62466162, 0.10197244, 0.35716891), RGBA(0.63139686, 0.10067417, 0.35664819), RGBA(0.63812122, 0.09938212, 0.35603757), RGBA(0.64483795, 0.0980891, 0.35533555), RGBA(0.65154562, 0.09680192, 0.35454107), RGBA(0.65824241, 0.09552918, 0.3536529), RGBA(0.66492652, 0.09428017, 0.3526697), RGBA(0.67159578, 0.09306598, 0.35159077), RGBA(0.67824099, 0.09192342, 0.3504148), RGBA(0.684863, 0.09085633, 0.34914061), RGBA(0.69146268, 0.0898675, 0.34776864), RGBA(0.69803757, 0.08897226, 0.3462986), RGBA(0.70457834, 0.0882129, 0.34473046), RGBA(0.71108138, 0.08761223, 0.3430635), RGBA(0.7175507, 0.08716212, 0.34129974), RGBA(0.72398193, 0.08688725, 0.33943958), RGBA(0.73035829, 0.0868623, 0.33748452), RGBA(0.73669146, 0.08704683, 0.33543669), RGBA(0.74297501, 0.08747196, 0.33329799), RGBA(0.74919318, 0.08820542, 0.33107204), RGBA(0.75535825, 0.08919792, 0.32876184), RGBA(0.76145589, 0.09050716, 0.32637117), RGBA(0.76748424, 0.09213602, 0.32390525), RGBA(0.77344838, 0.09405684, 0.32136808), RGBA(0.77932641, 0.09634794, 0.31876642), RGBA(0.78513609, 0.09892473, 0.31610488), RGBA(0.79085854, 0.10184672, 0.313391), RGBA(0.7965014, 0.10506637, 0.31063031), RGBA(0.80205987, 0.10858333, 0.30783), RGBA(0.80752799, 0.11239964, 0.30499738), RGBA(0.81291606, 0.11645784, 0.30213802), RGBA(0.81820481, 0.12080606, 0.29926105), RGBA(0.82341472, 0.12535343, 0.2963705), RGBA(0.82852822, 0.13014118, 0.29347474), RGBA(0.83355779, 0.13511035, 0.29057852), RGBA(0.83850183, 0.14025098, 0.2876878), RGBA(0.84335441, 0.14556683, 0.28480819), RGBA(0.84813096, 0.15099892, 0.281943), RGBA(0.85281737, 0.15657772, 0.27909826), RGBA(0.85742602, 0.1622583, 0.27627462), RGBA(0.86196552, 0.16801239, 0.27346473), RGBA(0.86641628, 0.17387796, 0.27070818), RGBA(0.87079129, 0.17982114, 0.26797378), RGBA(0.87507281, 0.18587368, 0.26529697), RGBA(0.87925878, 0.19203259, 0.26268136), RGBA(0.8833417, 0.19830556, 0.26014181), RGBA(0.88731387, 0.20469941, 0.25769539), RGBA(0.89116859, 0.21121788, 0.2553592), RGBA(0.89490337, 0.21785614, 0.25314362), RGBA(0.8985026, 0.22463251, 0.25108745), RGBA(0.90197527, 0.23152063, 0.24918223), RGBA(0.90530097, 0.23854541, 0.24748098), RGBA(0.90848638, 0.24568473, 0.24598324), RGBA(0.911533, 0.25292623, 0.24470258), RGBA(0.9144225, 0.26028902, 0.24369359), RGBA(0.91717106, 0.26773821, 0.24294137), RGBA(0.91978131, 0.27526191, 0.24245973), RGBA(0.92223947, 0.28287251, 0.24229568), RGBA(0.92456587, 0.29053388, 0.24242622), RGBA(0.92676657, 0.29823282, 0.24285536), RGBA(0.92882964, 0.30598085, 0.24362274), RGBA(0.93078135, 0.31373977, 0.24468803), RGBA(0.93262051, 0.3215093, 0.24606461), RGBA(0.93435067, 0.32928362, 0.24775328), RGBA(0.93599076, 0.33703942, 0.24972157), RGBA(0.93752831, 0.34479177, 0.25199928), RGBA(0.93899289, 0.35250734, 0.25452808), RGBA(0.94036561, 0.36020899, 0.25734661), RGBA(0.94167588, 0.36786594, 0.2603949), RGBA(0.94291042, 0.37549479, 0.26369821), RGBA(0.94408513, 0.3830811, 0.26722004), RGBA(0.94520419, 0.39062329, 0.27094924), RGBA(0.94625977, 0.39813168, 0.27489742), RGBA(0.94727016, 0.4055909, 0.27902322), RGBA(0.94823505, 0.41300424, 0.28332283), RGBA(0.94914549, 0.42038251, 0.28780969), RGBA(0.95001704, 0.42771398, 0.29244728), RGBA(0.95085121, 0.43500005, 0.29722817), RGBA(0.95165009, 0.44224144, 0.30214494), RGBA(0.9524044, 0.44944853, 0.3072105), RGBA(0.95312556, 0.45661389, 0.31239776), RGBA(0.95381595, 0.46373781, 0.31769923), RGBA(0.95447591, 0.47082238, 0.32310953), RGBA(0.95510255, 0.47787236, 0.32862553), RGBA(0.95569679, 0.48489115, 0.33421404), RGBA(0.95626788, 0.49187351, 0.33985601), RGBA(0.95681685, 0.49882008, 0.34555431), RGBA(0.9573439, 0.50573243, 0.35130912), RGBA(0.95784842, 0.51261283, 0.35711942), RGBA(0.95833051, 0.51946267, 0.36298589), RGBA(0.95879054, 0.52628305, 0.36890904), RGBA(0.95922872, 0.53307513, 0.3748895), RGBA(0.95964538, 0.53983991, 0.38092784), RGBA(0.96004345, 0.54657593, 0.3870292), RGBA(0.96042097, 0.55328624, 0.39319057), RGBA(0.96077819, 0.55997184, 0.39941173), RGBA(0.9611152, 0.5666337, 0.40569343), RGBA(0.96143273, 0.57327231, 0.41203603), RGBA(0.96173392, 0.57988594, 0.41844491), RGBA(0.96201757, 0.58647675, 0.42491751), RGBA(0.96228344, 0.59304598, 0.43145271), RGBA(0.96253168, 0.5995944, 0.43805131), RGBA(0.96276513, 0.60612062, 0.44471698), RGBA(0.96298491, 0.6126247, 0.45145074), RGBA(0.96318967, 0.61910879, 0.45824902), RGBA(0.96337949, 0.6255736, 0.46511271), RGBA(0.96355923, 0.63201624, 0.47204746), RGBA(0.96372785, 0.63843852, 0.47905028), RGBA(0.96388426, 0.64484214, 0.4861196), RGBA(0.96403203, 0.65122535, 0.4932578), RGBA(0.96417332, 0.65758729, 0.50046894), RGBA(0.9643063, 0.66393045, 0.5077467), RGBA(0.96443322, 0.67025402, 0.51509334), RGBA(0.96455845, 0.67655564, 0.52251447), RGBA(0.96467922, 0.68283846, 0.53000231), RGBA(0.96479861, 0.68910113, 0.53756026), RGBA(0.96492035, 0.69534192, 0.5451917), RGBA(0.96504223, 0.7015636, 0.5528892), RGBA(0.96516917, 0.70776351, 0.5606593), RGBA(0.96530224, 0.71394212, 0.56849894), RGBA(0.96544032, 0.72010124, 0.57640375), RGBA(0.96559206, 0.72623592, 0.58438387), RGBA(0.96575293, 0.73235058, 0.59242739), RGBA(0.96592829, 0.73844258, 0.60053991), RGBA(0.96612013, 0.74451182, 0.60871954), RGBA(0.96632832, 0.75055966, 0.61696136), RGBA(0.96656022, 0.75658231, 0.62527295), RGBA(0.96681185, 0.76258381, 0.63364277), RGBA(0.96709183, 0.76855969, 0.64207921), RGBA(0.96739773, 0.77451297, 0.65057302), RGBA(0.96773482, 0.78044149, 0.65912731), RGBA(0.96810471, 0.78634563, 0.66773889), RGBA(0.96850919, 0.79222565, 0.6764046), RGBA(0.96893132, 0.79809112, 0.68512266), RGBA(0.96935926, 0.80395415, 0.69383201), RGBA(0.9698028, 0.80981139, 0.70252255), RGBA(0.97025511, 0.81566605, 0.71120296), RGBA(0.97071849, 0.82151775, 0.71987163), RGBA(0.97120159, 0.82736371, 0.72851999), RGBA(0.97169389, 0.83320847, 0.73716071), RGBA(0.97220061, 0.83905052, 0.74578903), RGBA(0.97272597, 0.84488881, 0.75440141), RGBA(0.97327085, 0.85072354, 0.76299805), RGBA(0.97383206, 0.85655639, 0.77158353), RGBA(0.97441222, 0.86238689, 0.78015619), RGBA(0.97501782, 0.86821321, 0.78871034), RGBA(0.97564391, 0.87403763, 0.79725261), RGBA(0.97628674, 0.87986189, 0.8057883), RGBA(0.97696114, 0.88568129, 0.81430324), RGBA(0.97765722, 0.89149971, 0.82280948), RGBA(0.97837585, 0.89731727, 0.83130786), RGBA(0.97912374, 0.90313207, 0.83979337), RGBA(0.979891, 0.90894778, 0.84827858), RGBA(0.98067764, 0.91476465, 0.85676611), RGBA(0.98137749, 0.92061729, 0.86536915)] const seaborn_icefire_gradient = [RGBA(0.73936227, 0.90443867, 0.85757238), RGBA(0.72888063, 0.89639109, 0.85488394), RGBA(0.71834255, 0.88842162, 0.8521605), RGBA(0.70773866, 0.88052939, 0.849422), RGBA(0.69706215, 0.87271313, 0.84668315), RGBA(0.68629021, 0.86497329, 0.84398721), RGBA(0.67543654, 0.85730617, 0.84130969), RGBA(0.66448539, 0.84971123, 0.83868005), RGBA(0.65342679, 0.84218728, 0.83611512), RGBA(0.64231804, 0.83471867, 0.83358584), RGBA(0.63117745, 0.827294, 0.83113431), RGBA(0.62000484, 0.81991069, 0.82876741), RGBA(0.60879435, 0.81256797, 0.82648905), RGBA(0.59754118, 0.80526458, 0.82430414), RGBA(0.58624247, 0.79799884, 0.82221573), RGBA(0.57489525, 0.7907688, 0.82022901), RGBA(0.56349779, 0.78357215, 0.81834861), RGBA(0.55204294, 0.77640827, 0.81657563), RGBA(0.54052516, 0.76927562, 0.81491462), RGBA(0.52894085, 0.76217215, 0.81336913), RGBA(0.51728854, 0.75509528, 0.81194156), RGBA(0.50555676, 0.74804469, 0.81063503), RGBA(0.49373871, 0.7410187, 0.80945242), RGBA(0.48183174, 0.73401449, 0.80839675), RGBA(0.46982587, 0.72703075, 0.80747097), RGBA(0.45770893, 0.72006648, 0.80667756), RGBA(0.44547249, 0.71311941, 0.80601991), RGBA(0.43318643, 0.70617126, 0.80549278), RGBA(0.42110294, 0.69916972, 0.80506683), RGBA(0.40925101, 0.69211059, 0.80473246), RGBA(0.3976693, 0.68498786, 0.80448272), RGBA(0.38632002, 0.67781125, 0.80431024), RGBA(0.37523981, 0.67057537, 0.80420832), RGBA(0.36442578, 0.66328229, 0.80417474), RGBA(0.35385939, 0.65593699, 0.80420591), RGBA(0.34358916, 0.64853177, 0.8043), RGBA(0.33355526, 0.64107876, 0.80445484), RGBA(0.32383062, 0.63356578, 0.80467091), RGBA(0.31434372, 0.62600624, 0.8049475), RGBA(0.30516161, 0.618389, 0.80528692), RGBA(0.29623491, 0.61072284, 0.80569021), RGBA(0.28759072, 0.60300319, 0.80616055), RGBA(0.27923924, 0.59522877, 0.80669803), RGBA(0.27114651, 0.5874047, 0.80730545), RGBA(0.26337153, 0.57952055, 0.80799113), RGBA(0.25588696, 0.57157984, 0.80875922), RGBA(0.248686, 0.56358255, 0.80961366), RGBA(0.24180668, 0.55552289, 0.81055123), RGBA(0.23526251, 0.54739477, 0.8115939), RGBA(0.22921445, 0.53918506, 0.81267292), RGBA(0.22397687, 0.53086094, 0.8137141), RGBA(0.21977058, 0.52241482, 0.81457651), RGBA(0.21658989, 0.51384321, 0.81528511), RGBA(0.21452772, 0.50514155, 0.81577278), RGBA(0.21372783, 0.49630865, 0.81589566), RGBA(0.21409503, 0.48734861, 0.81566163), RGBA(0.2157176, 0.47827123, 0.81487615), RGBA(0.21842857, 0.46909168, 0.81351614), RGBA(0.22211705, 0.45983212, 0.81146983), RGBA(0.22665681, 0.45052233, 0.80860217), RGBA(0.23176013, 0.44119137, 0.80494325), RGBA(0.23727775, 0.43187704, 0.80038017), RGBA(0.24298285, 0.42261123, 0.79493267), RGBA(0.24865068, 0.41341842, 0.78869164), RGBA(0.25423116, 0.40433127, 0.78155831), RGBA(0.25950239, 0.39535521, 0.77376848), RGBA(0.2644736, 0.38651212, 0.76524809), RGBA(0.26901584, 0.37779582, 0.75621942), RGBA(0.27318141, 0.36922056, 0.746605), RGBA(0.27690355, 0.3607736, 0.73659374), RGBA(0.28023585, 0.35244234, 0.72622103), RGBA(0.28306009, 0.34438449, 0.71500731), RGBA(0.28535896, 0.33660243, 0.70303975), RGBA(0.28708711, 0.32912157, 0.69034504), RGBA(0.28816354, 0.32200604, 0.67684067), RGBA(0.28862749, 0.31519824, 0.66278813), RGBA(0.28847904, 0.30869064, 0.6482815), RGBA(0.28770912, 0.30250126, 0.63331265), RGBA(0.28640325, 0.29655509, 0.61811374), RGBA(0.28458943, 0.29082155, 0.60280913), RGBA(0.28233561, 0.28527482, 0.58742866), RGBA(0.27967038, 0.2798938, 0.57204225), RGBA(0.27665361, 0.27465357, 0.55667809), RGBA(0.27332564, 0.2695165, 0.54145387), RGBA(0.26973851, 0.26447054, 0.52634916), RGBA(0.2659204, 0.25949691, 0.511417), RGBA(0.26190145, 0.25458123, 0.49668768), RGBA(0.2577151, 0.24971691, 0.48214874), RGBA(0.25337618, 0.24490494, 0.46778758), RGBA(0.24890842, 0.24013332, 0.45363816), RGBA(0.24433654, 0.23539226, 0.4397245), RGBA(0.23967922, 0.23067729, 0.4260591), RGBA(0.23495608, 0.22598894, 0.41262952), RGBA(0.23018113, 0.22132414, 0.39945577), RGBA(0.22534609, 0.21670847, 0.38645794), RGBA(0.22048761, 0.21211723, 0.37372555), RGBA(0.2156198, 0.20755389, 0.36125301), RGBA(0.21074637, 0.20302717, 0.34903192), RGBA(0.20586893, 0.19855368, 0.33701661), RGBA(0.20101757, 0.19411573, 0.32529173), RGBA(0.19619947, 0.18972425, 0.31383846), RGBA(0.19140726, 0.18540157, 0.30260777), RGBA(0.1866769, 0.1811332, 0.29166583), RGBA(0.18201285, 0.17694992, 0.28088776), RGBA(0.17745228, 0.17282141, 0.27044211), RGBA(0.17300684, 0.16876921, 0.26024893), RGBA(0.16868273, 0.16479861, 0.25034479), RGBA(0.16448691, 0.16091728, 0.24075373), RGBA(0.16043195, 0.15714351, 0.23141745), RGBA(0.15652427, 0.15348248, 0.22238175), RGBA(0.15277065, 0.14994111, 0.21368395), RGBA(0.14918274, 0.14653431, 0.20529486), RGBA(0.14577095, 0.14327403, 0.19720829), RGBA(0.14254381, 0.14016944, 0.18944326), RGBA(0.13951035, 0.13723063, 0.18201072), RGBA(0.13667798, 0.13446606, 0.17493774), RGBA(0.13405762, 0.13188822, 0.16820842), RGBA(0.13165767, 0.12950667, 0.16183275), RGBA(0.12948748, 0.12733187, 0.15580631), RGBA(0.12755435, 0.1253723, 0.15014098), RGBA(0.12586516, 0.12363617, 0.1448459), RGBA(0.12442647, 0.12213143, 0.13992571), RGBA(0.12324241, 0.12086419, 0.13539995), RGBA(0.12232067, 0.11984278, 0.13124644), RGBA(0.12166209, 0.11907077, 0.12749671), RGBA(0.12126982, 0.11855309, 0.12415079), RGBA(0.12114244, 0.11829179, 0.1212385), RGBA(0.12127766, 0.11828837, 0.11878534), RGBA(0.12284806, 0.1179729, 0.11772022), RGBA(0.12619498, 0.11721796, 0.11770203), RGBA(0.129968, 0.11663788, 0.11792377), RGBA(0.13410011, 0.11625146, 0.11839138), RGBA(0.13855459, 0.11606618, 0.11910584), RGBA(0.14333775, 0.11607038, 0.1200606), RGBA(0.148417, 0.11626929, 0.12125453), RGBA(0.15377389, 0.11666192, 0.12268364), RGBA(0.15941427, 0.11723486, 0.12433911), RGBA(0.16533376, 0.11797856, 0.12621303), RGBA(0.17152547, 0.11888403, 0.12829735), RGBA(0.17797765, 0.11994436, 0.13058435), RGBA(0.18468769, 0.12114722, 0.13306426), RGBA(0.19165663, 0.12247737, 0.13572616), RGBA(0.19884415, 0.12394381, 0.1385669), RGBA(0.20627181, 0.12551883, 0.14157124), RGBA(0.21394877, 0.12718055, 0.14472604), RGBA(0.22184572, 0.12893119, 0.14802579), RGBA(0.22994394, 0.13076731, 0.15146314), RGBA(0.23823937, 0.13267611, 0.15502793), RGBA(0.24676041, 0.13462172, 0.15870321), RGBA(0.25546457, 0.13661751, 0.16248722), RGBA(0.26433628, 0.13865956, 0.16637301), RGBA(0.27341345, 0.14070412, 0.17034221), RGBA(0.28264773, 0.14277192, 0.1743957), RGBA(0.29202272, 0.14486161, 0.17852793), RGBA(0.30159648, 0.14691224, 0.1827169), RGBA(0.31129002, 0.14897583, 0.18695213), RGBA(0.32111555, 0.15103351, 0.19119629), RGBA(0.33107961, 0.1530674, 0.19543758), RGBA(0.34119892, 0.15504762, 0.1996803), RGBA(0.35142388, 0.15701131, 0.20389086), RGBA(0.36178937, 0.1589124, 0.20807639), RGBA(0.37229381, 0.16073993, 0.21223189), RGBA(0.38288348, 0.16254006, 0.2163249), RGBA(0.39359592, 0.16426336, 0.22036577), RGBA(0.40444332, 0.16588767, 0.22434027), RGBA(0.41537995, 0.16745325, 0.2282297), RGBA(0.42640867, 0.16894939, 0.23202755), RGBA(0.43754706, 0.17034847, 0.23572899), RGBA(0.44878564, 0.1716535, 0.23932344), RGBA(0.4601126, 0.17287365, 0.24278607), RGBA(0.47151732, 0.17401641, 0.24610337), RGBA(0.48300689, 0.17506676, 0.2492737), RGBA(0.49458302, 0.17601892, 0.25227688), RGBA(0.50623876, 0.17687777, 0.255096), RGBA(0.5179623, 0.17765528, 0.2577162), RGBA(0.52975234, 0.17835232, 0.2601134), RGBA(0.54159776, 0.17898292, 0.26226847), RGBA(0.55348804, 0.17956232, 0.26416003), RGBA(0.56541729, 0.18010175, 0.26575971), RGBA(0.57736669, 0.180631, 0.26704888), RGBA(0.58932081, 0.18117827, 0.26800409), RGBA(0.60127582, 0.18175888, 0.26858488), RGBA(0.61319563, 0.1824336, 0.2687872), RGBA(0.62506376, 0.18324015, 0.26858301), RGBA(0.63681202, 0.18430173, 0.26795276), RGBA(0.64842603, 0.18565472, 0.26689463), RGBA(0.65988195, 0.18734638, 0.26543435), RGBA(0.67111966, 0.18948885, 0.26357955), RGBA(0.68209194, 0.19216636, 0.26137175), RGBA(0.69281185, 0.19535326, 0.25887063), RGBA(0.70335022, 0.19891271, 0.25617971), RGBA(0.71375229, 0.20276438, 0.25331365), RGBA(0.72401436, 0.20691287, 0.25027366), RGBA(0.73407638, 0.21145051, 0.24710661), RGBA(0.74396983, 0.21631913, 0.24380715), RGBA(0.75361506, 0.22163653, 0.24043996), RGBA(0.7630579, 0.22731637, 0.23700095), RGBA(0.77222228, 0.23346231, 0.23356628), RGBA(0.78115441, 0.23998404, 0.23013825), RGBA(0.78979746, 0.24694858, 0.22678822), RGBA(0.79819286, 0.25427223, 0.22352658), RGBA(0.80630444, 0.26198807, 0.22040877), RGBA(0.81417437, 0.27001406, 0.21744645), RGBA(0.82177364, 0.27837336, 0.21468316), RGBA(0.82915955, 0.28696963, 0.21210766), RGBA(0.83628628, 0.2958499, 0.20977813), RGBA(0.84322168, 0.30491136, 0.20766435), RGBA(0.84995458, 0.31415945, 0.2057863), RGBA(0.85648867, 0.32358058, 0.20415327), RGBA(0.86286243, 0.33312058, 0.20274969), RGBA(0.86908321, 0.34276705, 0.20157271), RGBA(0.87512876, 0.3525416, 0.20064949), RGBA(0.88100349, 0.36243385, 0.19999078), RGBA(0.8866469, 0.37249496, 0.1997976), RGBA(0.89203964, 0.38273475, 0.20013431), RGBA(0.89713496, 0.39318156, 0.20121514), RGBA(0.90195099, 0.40380687, 0.20301555), RGBA(0.90648379, 0.41460191, 0.20558847), RGBA(0.9106967, 0.42557857, 0.20918529), RGBA(0.91463791, 0.43668557, 0.21367954), RGBA(0.91830723, 0.44790913, 0.21916352), RGBA(0.92171507, 0.45922856, 0.22568002), RGBA(0.92491786, 0.4705936, 0.23308207), RGBA(0.92790792, 0.48200153, 0.24145932), RGBA(0.93073701, 0.49341219, 0.25065486), RGBA(0.93343918, 0.5048017, 0.26056148), RGBA(0.93602064, 0.51616486, 0.27118485), RGBA(0.93850535, 0.52748892, 0.28242464), RGBA(0.94092933, 0.53875462, 0.29416042), RGBA(0.94330011, 0.5499628, 0.30634189), RGBA(0.94563159, 0.56110987, 0.31891624), RGBA(0.94792955, 0.57219822, 0.33184256), RGBA(0.95020929, 0.5832232, 0.34508419), RGBA(0.95247324, 0.59419035, 0.35859866), RGBA(0.95471709, 0.60510869, 0.37236035), RGBA(0.95698411, 0.61595766, 0.38629631), RGBA(0.95923863, 0.62676473, 0.40043317), RGBA(0.9615041, 0.6375203, 0.41474106), RGBA(0.96371553, 0.64826619, 0.42928335), RGBA(0.96591497, 0.65899621, 0.44380444), RGBA(0.96809871, 0.66971662, 0.45830232), RGBA(0.9702495, 0.6804394, 0.47280492), RGBA(0.9723881, 0.69115622, 0.48729272), RGBA(0.97450723, 0.70187358, 0.50178034), RGBA(0.9766108, 0.712592, 0.51626837), RGBA(0.97871716, 0.72330511, 0.53074053), RGBA(0.98082222, 0.73401769, 0.54520694), RGBA(0.9829001, 0.74474445, 0.5597019), RGBA(0.98497466, 0.75547635, 0.57420239), RGBA(0.98705581, 0.76621129, 0.58870185), RGBA(0.98913325, 0.77695637, 0.60321626), RGBA(0.99119918, 0.78771716, 0.61775821), RGBA(0.9932672, 0.79848979, 0.63231691), RGBA(0.99535958, 0.80926704, 0.64687278), RGBA(0.99740544, 0.82008078, 0.66150571), RGBA(0.9992197, 0.83100723, 0.6764127)]
{"hexsha": "585724a198e952e9116529fc3a25611211fad598", "size": 38485, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/seaborn_color_gradients.jl", "max_stars_repo_name": "isentropic/PlotThemes.jl", "max_stars_repo_head_hexsha": "2d8afccc844584a64d9e14bd02939c3da64c3871", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/seaborn_color_gradients.jl", "max_issues_repo_name": "isentropic/PlotThemes.jl", "max_issues_repo_head_hexsha": "2d8afccc844584a64d9e14bd02939c3da64c3871", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/seaborn_color_gradients.jl", "max_forks_repo_name": "isentropic/PlotThemes.jl", "max_forks_repo_head_hexsha": "2d8afccc844584a64d9e14bd02939c3da64c3871", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 74.7281553398, "max_line_length": 75, "alphanum_fraction": 0.4094062622, "num_tokens": 11771}
# Copyright (C) 2020 coneypo # SPDX-License-Identifier: MIT # Author: coneypo # Blog: http://www.cnblogs.com/AdaminXie # GitHub: https://github.com/coneypo/Dlib_face_recognition_from_camera # Mail: coneypo@foxmail.com # 利用 OT 对于单张人脸追踪, 实时人脸识别 (Real-time face detection and recognition via Object-tracking for single face) import dlib import numpy as np import cv2 import os import pandas as pd import time # Dlib 正向人脸检测器 (Use frontal face detector of Dlib) detector = dlib.get_frontal_face_detector() # Dlib 人脸 landmark 特征点检测器 (Get face landmarks) predictor = dlib.shape_predictor('data/data_dlib/shape_predictor_68_face_landmarks.dat') # Dlib Resnet 人脸识别模型,提取 128D 的特征矢量 (Use Dlib resnet50 model to get 128D face descriptor) face_reco_model = dlib.face_recognition_model_v1("data/data_dlib/dlib_face_recognition_resnet_model_v1.dat") class Face_Recognizer: def __init__(self): self.font = cv2.FONT_ITALIC # 统计 FPS (For FPS) self.frame_time = 0 self.frame_start_time = 0 self.fps = 0 # 统计帧数 (cnt for frame) self.frame_cnt = 0 # 用来存储所有录入人脸特征的数组 (Save the features of faces in the database) self.features_known_list = [] # 用来存储录入人脸名字 (Save the name of faces in the database) self.name_known_list = [] # 用来存储上一帧和当前帧 ROI 的质心坐标 (List to save centroid positions of ROI in frame N-1 and N) self.last_frame_centroid_list = [] self.current_frame_centroid_list = [] # 用来存储当前帧检测出目标的名字 (List to save names of objects in current frame) self.current_frame_name_list = [] # 上一帧和当前帧中人脸数的计数器 (cnt for faces in frame N-1 and N) self.last_frame_faces_cnt = 0 self.current_frame_face_cnt = 0 # 用来存放进行识别时候对比的欧氏距离 (Save the e-distance for faceX when recognizing) self.current_frame_face_X_e_distance_list = [] # 存储当前摄像头中捕获到的所有人脸的坐标名字 (Save the positions and names of current faces captured) self.current_frame_face_position_list = [] # 存储当前摄像头中捕获到的人脸特征 (Save the features of people in current frame) self.current_frame_face_feature_list = [] # 控制再识别的后续帧数 (Reclassify after 'reclassify_interval' frames) # 如果识别出 "unknown" 的脸, 将在 reclassify_interval_cnt 计数到 reclassify_interval 后, 对于人脸进行重新识别 self.reclassify_interval_cnt = 0 self.reclassify_interval = 10 # 从 "features_all.csv" 读取录入人脸特征 (Get known faces from "features_all.csv") def get_face_database(self): if os.path.exists("data/features_all.csv"): path_features_known_csv = "data/features_all.csv" csv_rd = pd.read_csv(path_features_known_csv, header=None) for i in range(csv_rd.shape[0]): features_someone_arr = [] for j in range(0, 128): if csv_rd.iloc[i][j] == '': features_someone_arr.append('0') else: features_someone_arr.append(csv_rd.iloc[i][j]) self.features_known_list.append(features_someone_arr) self.name_known_list.append("Person_" + str(i + 1)) print("Faces in Database:", len(self.features_known_list)) return 1 else: print('##### Warning #####', '\n') print("'features_all.csv' not found!") print( "Please run 'get_faces_from_camera.py' and 'features_extraction_to_csv.py' before 'face_reco_from_camera.py'", '\n') print('##### End Warning #####') return 0 # 计算两个128D向量间的欧式距离 (Compute the e-distance between two 128D features) # 更新 FPS (Update FPS of Video stream def update_fps(self): now = time.time() self.frame_time = now - self.frame_start_time self.fps = 1.0 / self.frame_time self.frame_start_time = now # 计算两个128D向量间的欧式距离 (Compute the e-distance between two 128D features) @staticmethod def return_euclidean_distance(feature_1, feature_2): feature_1 = np.array(feature_1) feature_2 = np.array(feature_2) dist = np.sqrt(np.sum(np.square(feature_1 - feature_2))) return dist # 生成的 cv2 window 上面添加说明文字 (putText on cv2 window) def draw_note(self, img_rd): # 添加说明 (Add some statements cv2.putText(img_rd, "Face Recognizer with OT (one person)", (20, 40), self.font, 1, (255, 255, 255), 1, cv2.LINE_AA) cv2.putText(img_rd, "FPS: " + str(self.fps.__round__(2)), (20, 100), self.font, 0.8, (0, 255, 0), 1, cv2.LINE_AA) cv2.putText(img_rd, "Q: Quit", (20, 450), self.font, 0.8, (255, 255, 255), 1, cv2.LINE_AA) # 处理获取的视频流,进行人脸识别 (Face detection and recognition wit OT from input video stream) def process(self, stream): # 1. 读取存放所有人脸特征的 csv (Get faces known from "features.all.csv") if self.get_face_database(): while stream.isOpened(): self.frame_cnt += 1 print(">>> Frame " + str(self.frame_cnt) + " starts") flag, img_rd = stream.read() kk = cv2.waitKey(1) # 2. 检测人脸 (Detect faces for frame X) faces = detector(img_rd, 0) # 3. 更新帧中的人脸数 (Update cnt for faces in frames) self.last_frame_faces_cnt = self.current_frame_face_cnt self.current_frame_face_cnt = len(faces) # 4.1 当前帧和上一帧相比没有发生人脸数变化 (If cnt not changes, 1->1 or 0->0) if self.current_frame_face_cnt == self.last_frame_faces_cnt: print(" >>> scene 1: 当前帧和上一帧相比没有发生人脸数变化 (No face cnt changes in this frame!!!") if "unknown" in self.current_frame_name_list: print(" >>> 有未知人脸, 开始进行 reclassify_interval_cnt 计数") self.reclassify_interval_cnt += 1 # 4.1.1 当前帧一张人脸 / One face in this frame if self.current_frame_face_cnt ==1: if self.reclassify_interval_cnt==self.reclassify_interval: print(" >>> scene 1.1 需要对于当前帧重新进行人脸识别 (Re-classification for current frame)") self.reclassify_interval_cnt=0 self.current_frame_face_feature_list = [] self.current_frame_face_X_e_distance_list = [] self.current_frame_name_list = [] for i in range(len(faces)): shape = predictor(img_rd, faces[i]) self.current_frame_face_feature_list.append( face_reco_model.compute_face_descriptor(img_rd, shape)) # a. 遍历捕获到的图像中所有的人脸 (Traversal all the faces in the database) for k in range(len(faces)): self.current_frame_name_list.append("unknown") # b. 每个捕获人脸的名字坐标 (Positions of faces captured) self.current_frame_face_position_list.append(tuple( [faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)])) # c. 对于某张人脸,遍历所有存储的人脸特征 (For every faces detected, compare the faces in the database) for i in range(len(self.features_known_list)): # 如果 person_X 数据不为空 (If the data of person_X is not empty) if str(self.features_known_list[i][0]) != '0.0': print(" >>> with person", str(i + 1), "the e distance: ", end='') e_distance_tmp = self.return_euclidean_distance( self.current_frame_face_feature_list[k], self.features_known_list[i]) print(e_distance_tmp) self.current_frame_face_X_e_distance_list.append(e_distance_tmp) else: # 空数据 person_X (For empty data) self.current_frame_face_X_e_distance_list.append(999999999) print(" >>> current_frame_face_X_e_distance_list:", self.current_frame_face_X_e_distance_list) # d. 寻找出最小的欧式距离匹配 (Find the one with minimum e distance) similar_person_num = self.current_frame_face_X_e_distance_list.index( min(self.current_frame_face_X_e_distance_list)) if min(self.current_frame_face_X_e_distance_list) < 0.4: self.current_frame_name_list[k] = self.name_known_list[similar_person_num] print(" >>> recognition result for face " + str(k + 1) + ": " + self.name_known_list[similar_person_num]) else: print( " >>> recognition result for face " + str(k + 1) + ": " + "unknown") else: print(" >>> scene 1.2 不需要对于当前帧重新进行人脸识别 (No re-classification for current frame)") # 获取特征框坐标 (Get ROI positions) for k, d in enumerate(faces): # 计算矩形框大小 (Compute the size of rectangle box) height = (d.bottom() - d.top()) width = (d.right() - d.left()) hh = int(height / 2) ww = int(width / 2) cv2.rectangle(img_rd, tuple([d.left() - ww, d.top() - hh]), tuple([d.right() + ww, d.bottom() + hh]), (255, 255, 255), 2) self.current_frame_face_position_list[k] = tuple( [faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)]) print(" >>> self.current_frame_name_list[k]: ", self.current_frame_name_list[k]) print(" >>> self.current_frame_face_position_list[k]: ", self.current_frame_face_position_list[k]) # 写名字 (Write names under ROI img_rd = cv2.putText(img_rd, self.current_frame_name_list[k], self.current_frame_face_position_list[k], self.font, 0.8, (0, 255, 255), 1, cv2.LINE_AA) # 4.2 当前帧和上一帧相比发生人脸数变化 (If face cnt changes, 1->0 or 0->1) else: print(" >>> scene 2: 当前帧和上一帧相比人脸数发生变化 (Faces cnt changes in this frame)") self.current_frame_face_position_list = [] self.current_frame_face_X_e_distance_list = [] self.current_frame_face_feature_list = [] # 4.2.1 人脸数从 0->1 / Face cnt 0->1 if (self.current_frame_face_cnt == 1): print(" >>> scene 2.1 出现人脸,进行人脸识别 (Get person in this frame and do face recognition)") self.current_frame_name_list = [] for i in range(len(faces)): shape = predictor(img_rd, faces[i]) self.current_frame_face_feature_list.append( face_reco_model.compute_face_descriptor(img_rd, shape)) # a. 遍历捕获到的图像中所有的人脸 (Traversal all the faces in the database) for k in range(len(faces)): self.current_frame_name_list.append("unknown") # b. 每个捕获人脸的名字坐标 (Positions of faces captured) self.current_frame_face_position_list.append(tuple( [faces[k].left(), int(faces[k].bottom() + (faces[k].bottom() - faces[k].top()) / 4)])) # c. 对于某张人脸,遍历所有存储的人脸特征 (For every face detected, compare with faces in the database) for i in range(len(self.features_known_list)): # 如果 person_X 数据不为空 (If data of person_X is not empty) if str(self.features_known_list[i][0]) != '0.0': print(" >>> with person", str(i + 1), "the e distance: ", end='') e_distance_tmp = self.return_euclidean_distance( self.current_frame_face_feature_list[k], self.features_known_list[i]) print(e_distance_tmp) self.current_frame_face_X_e_distance_list.append(e_distance_tmp) else: # 空数据 person_X (Empty data for person_X) self.current_frame_face_X_e_distance_list.append(999999999) # d. 寻找出最小的欧式距离匹配 (Find the one with minimum e distance) similar_person_num = self.current_frame_face_X_e_distance_list.index(min(self.current_frame_face_X_e_distance_list)) if min(self.current_frame_face_X_e_distance_list) < 0.4: self.current_frame_name_list[k] = self.name_known_list[similar_person_num] print(" >>> recognition result for face " + str(k + 1) + ": " + self.name_known_list[similar_person_num]) else: print(" >>> recognition result for face " + str(k + 1) + ": " + "unknown") if "unknown" in self.current_frame_name_list: self.reclassify_interval_cnt+=1 # 4.2.1 人脸数从 1->0 / Face cnt 1->0 elif self.current_frame_face_cnt == 0: print(" >>> scene 2.2 人脸消失, 当前帧中没有人脸 (No face in this frame!!!)") self.reclassify_interval_cnt=0 self.current_frame_name_list = [] self.current_frame_face_feature_list = [] # 5. 生成的窗口添加说明文字 (Add note on cv2 window) self.draw_note(img_rd) if kk == ord('q'): break self.update_fps() cv2.namedWindow("camera", 1) cv2.imshow("camera", img_rd) print(">>> Frame ends\n\n") def run(self): cap = cv2.VideoCapture(0) self.process(cap) cap.release() cv2.destroyAllWindows() def main(): Face_Recognizer_con = Face_Recognizer() Face_Recognizer_con.run() if __name__ == '__main__': main()
{"hexsha": "4a6aaa311646e367c84f5277d668a9920615d084", "size": 15677, "ext": "py", "lang": "Python", "max_stars_repo_path": "face_reco_from_camera_ot_single_person.py", "max_stars_repo_name": "python-faker/Dlib_face_recognition_from_camera", "max_stars_repo_head_hexsha": "ee3a26b3c6669c4f4f08a7cc52af54fe4449aa75", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "face_reco_from_camera_ot_single_person.py", "max_issues_repo_name": "python-faker/Dlib_face_recognition_from_camera", "max_issues_repo_head_hexsha": "ee3a26b3c6669c4f4f08a7cc52af54fe4449aa75", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "face_reco_from_camera_ot_single_person.py", "max_forks_repo_name": "python-faker/Dlib_face_recognition_from_camera", "max_forks_repo_head_hexsha": "ee3a26b3c6669c4f4f08a7cc52af54fe4449aa75", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 50.8993506494, "max_line_length": 144, "alphanum_fraction": 0.5150220068, "include": true, "reason": "import numpy", "num_tokens": 3716}
import pprint import sys import time import socket import pickle import cv2 import numpy import zmq def send_array(socket, A, f_num, flags=0, copy=True, track=False): """send a numpy array with metadata""" md = dict( dtype='uint8', shape=A.shape, frame_num=f_num, ) socket.send_json(md, flags | zmq.SNDMORE) print("sending from collector 1") return socket.send(A, flags, copy=copy, track=track) def result_collector(): PUSH_PORT = int(sys.argv[1])+2000 print("I am collector #%s" % (PUSH_PORT)) receiverSwitcher = True context = zmq.Context() results_receiver1 = context.socket(zmq.PULL) results_receiver1.bind("tcp://127.0.0.1:%s" % (int(sys.argv[1]))) results_sender1 = context.socket(zmq.PUSH) results_sender2 = context.socket(zmq.PUSH) results_sender1.connect("tcp://127.0.0.1:%s" % PUSH_PORT) results_sender2.connect("tcp://127.0.0.1:%s" % PUSH_PORT) while True: recv_msg = pickle.loads(results_receiver1.recv()) image = recv_msg['image'] f_num = recv_msg['frame_num'] #image, f_num = recv_array(results_receiver1) #print(f_num, image) if(receiverSwitcher): send_array(results_sender1, image, f_num) print("sender1\n") receiverSwitcher = False else: send_array(results_sender2, image, f_num) print("sender2\n") receiverSwitcher = True result_collector()
{"hexsha": "04ba2c7e5187c4ee61818973fac69fe9b0ffdf72", "size": 1500, "ext": "py", "lang": "Python", "max_stars_repo_path": "collector1.py", "max_stars_repo_name": "naderabdalghani/distributed-systems-video-analyzer", "max_stars_repo_head_hexsha": "df47a655d6d8ca4a9686fb227dba04a8acfc1625", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "collector1.py", "max_issues_repo_name": "naderabdalghani/distributed-systems-video-analyzer", "max_issues_repo_head_hexsha": "df47a655d6d8ca4a9686fb227dba04a8acfc1625", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "collector1.py", "max_forks_repo_name": "naderabdalghani/distributed-systems-video-analyzer", "max_forks_repo_head_hexsha": "df47a655d6d8ca4a9686fb227dba04a8acfc1625", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.8620689655, "max_line_length": 69, "alphanum_fraction": 0.6353333333, "include": true, "reason": "import numpy", "num_tokens": 390}
#!/usr/bin/env python import numpy as np import h5py from util import * # Change dir to caffe root or prototxt database paths won't work wrong import os print os.getcwd() os.chdir('..') print os.getcwd() # The caffe module needs to be on the Python path; # we'll add it here explicitly. import sys sys.path.insert(0, './caffe/python/') sys.path.insert(0, './lib/') sys.path.insert(0, './tools/') import caffe print os.getcwd() # data_path = './data/genome/1600-400-20' # Load classes # classes = ['__background__'] # with open(os.path.join(data_path, 'objects_vocab.txt')) as f: # for object in f.readlines(): # classes.append(object.split(',')[0].lower().strip()) # # # Load attributes # attributes = ['__no_attribute__'] # with open(os.path.join(data_path, 'attributes_vocab.txt')) as f: # for att in f.readlines(): # attributes.append(att.split(',')[0].lower().strip()) # Check object extraction from fast_rcnn.config import cfg, cfg_from_file from fast_rcnn.test import im_detect,_get_blobs GPU_ID = 0 # if we have multiple GPUs, pick one caffe.set_device(GPU_ID) caffe.set_mode_gpu() # net = None cfg_from_file('experiments/cfgs/faster_rcnn_end2end_resnet.yml') weights = 'data/faster_rcnn_models/resnet101_faster_rcnn_final_iter_320000.caffemodel' prototxt = 'models/vg/ResNet-101/faster_rcnn_end2end_final/test.prototxt' net = caffe.Net(prototxt, caffe.TEST, weights=weights) ######################### # DON'T FORGET CHANGE PATH!!! gt_file_path = '/home/lab/dQ_IC/0_dataset/Flickr30k/gt.token' img_path = '/home/lab/dQ_IC/0_dataset/Flickr30k/flickr30k_images' feat_file_path = '/home/lab/dQ_IC/0_dataset/Flickr30k/bt.h5' VFEAT_DIM = 2048 SFEAT_DIM = 5 n_box = 36 print "Begin extract image features..." img_names = get_img_names_list(gt_file_path, img_path) img_names = img_names[0:30] # Warmup on a dummy image im = 128 * np.ones((500, 500, 3), dtype=np.uint8) for i in xrange(2): _, _, _, _ = im_detect(net, im) sfeat_all = np.zeros((len(img_names), n_box, SFEAT_DIM)) vfeat_all = np.zeros((len(img_names), n_box, VFEAT_DIM)) # extract image features one by one for idx, im_name in enumerate(img_names): # print idx, im_name vfeat, sfeat = extract_fea(net, im_name) # print im_name sfeat_all[idx] = sfeat vfeat_all[idx] = vfeat if idx % 10 == 0: print '{:d}/{:d}'.format(idx, len(img_names)) feat_file = h5py.File(feat_file_path, 'w') feat_file['vfeat'] = vfeat_all feat_file['sfeat'] = sfeat_all feat_file.close()
{"hexsha": "e74b1c7e797c0bb51173e7aee1d079f6fe9fd60d", "size": 2505, "ext": "py", "lang": "Python", "max_stars_repo_path": "tools/extract_feature_whole.py", "max_stars_repo_name": "daqingliu/bottom-up-attention", "max_stars_repo_head_hexsha": "d5f6d57fd015447364e225f3f1a30ac51e4d0a55", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 5, "max_stars_repo_stars_event_min_datetime": "2020-08-13T11:45:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-16T14:50:48.000Z", "max_issues_repo_path": "tools/extract_feature_whole.py", "max_issues_repo_name": "daqingliu/bottom-up-attention", "max_issues_repo_head_hexsha": "d5f6d57fd015447364e225f3f1a30ac51e4d0a55", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tools/extract_feature_whole.py", "max_forks_repo_name": "daqingliu/bottom-up-attention", "max_forks_repo_head_hexsha": "d5f6d57fd015447364e225f3f1a30ac51e4d0a55", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-08-13T11:51:36.000Z", "max_forks_repo_forks_event_max_datetime": "2020-08-13T11:51:36.000Z", "avg_line_length": 28.1460674157, "max_line_length": 86, "alphanum_fraction": 0.7089820359, "include": true, "reason": "import numpy", "num_tokens": 731}
double precision md,mu,ms,mc,mb,mt common/qmass/md,mu,ms,mc,mb,mt
{"hexsha": "c4646fdf51c61d22896e79e5b2168332ec011809", "size": 78, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "MCFM-JHUGen/src/Inc/qmass.f", "max_stars_repo_name": "tmartini/JHUGen", "max_stars_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2015-06-08T13:09:28.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-04T19:59:36.000Z", "max_issues_repo_path": "MCFM-JHUGen/src/Inc/qmass.f", "max_issues_repo_name": "tmartini/JHUGen", "max_issues_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 64, "max_issues_repo_issues_event_min_datetime": "2015-06-24T15:08:17.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T04:59:32.000Z", "max_forks_repo_path": "MCFM-JHUGen/src/Inc/qmass.f", "max_forks_repo_name": "tmartini/JHUGen", "max_forks_repo_head_hexsha": "80da31668d7b7eb5b02bb4cac435562c45075d24", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 19, "max_forks_repo_forks_event_min_datetime": "2015-05-04T22:15:41.000Z", "max_forks_repo_forks_event_max_datetime": "2021-07-06T10:04:40.000Z", "avg_line_length": 26.0, "max_line_length": 40, "alphanum_fraction": 0.641025641, "num_tokens": 31}
[STATEMENT] lemma f_join_all_conv: "(xs \<Join>\<^sub>f I = xs) = ({..<length xs} \<subseteq> I)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (xs \<Join>\<^sub> I = xs) = ({..<length xs} \<subseteq> I) [PROOF STEP] apply (case_tac "length xs = 0", simp add: f_join_Nil) [PROOF STATE] proof (prove) goal (1 subgoal): 1. length xs \<noteq> 0 \<Longrightarrow> (xs \<Join>\<^sub> I = xs) = ({..<length xs} \<subseteq> I) [PROOF STEP] apply (rule iffI) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>length xs \<noteq> 0; xs \<Join>\<^sub> I = xs\<rbrakk> \<Longrightarrow> {..<length xs} \<subseteq> I 2. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] apply (rule subsetI, rename_tac t) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>t. \<lbrakk>length xs \<noteq> 0; xs \<Join>\<^sub> I = xs; t \<in> {..<length xs}\<rbrakk> \<Longrightarrow> t \<in> I 2. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] apply (clarsimp simp: list_eq_iff[of _ xs] f_join_length) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>t. \<lbrakk>xs \<noteq> []; t < length xs; card (I \<down>< length xs) = length xs; \<forall>i<length xs. xs \<Join>\<^sub> I ! i = xs ! i\<rbrakk> \<Longrightarrow> t \<in> I 2. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] apply (rule ccontr) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>t. \<lbrakk>xs \<noteq> []; t < length xs; card (I \<down>< length xs) = length xs; \<forall>i<length xs. xs \<Join>\<^sub> I ! i = xs ! i; t \<notin> I\<rbrakk> \<Longrightarrow> False 2. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] apply (subgoal_tac "I \<down>< length xs \<subset> {..<length xs}") [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<And>t. \<lbrakk>xs \<noteq> []; t < length xs; card (I \<down>< length xs) = length xs; \<forall>i<length xs. xs \<Join>\<^sub> I ! i = xs ! i; t \<notin> I; I \<down>< length xs \<subset> {..<length xs}\<rbrakk> \<Longrightarrow> False 2. \<And>t. \<lbrakk>xs \<noteq> []; t < length xs; card (I \<down>< length xs) = length xs; \<forall>i<length xs. xs \<Join>\<^sub> I ! i = xs ! i; t \<notin> I\<rbrakk> \<Longrightarrow> I \<down>< length xs \<subset> {..<length xs} 3. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] prefer 2 [PROOF STATE] proof (prove) goal (3 subgoals): 1. \<And>t. \<lbrakk>xs \<noteq> []; t < length xs; card (I \<down>< length xs) = length xs; \<forall>i<length xs. xs \<Join>\<^sub> I ! i = xs ! i; t \<notin> I\<rbrakk> \<Longrightarrow> I \<down>< length xs \<subset> {..<length xs} 2. \<And>t. \<lbrakk>xs \<noteq> []; t < length xs; card (I \<down>< length xs) = length xs; \<forall>i<length xs. xs \<Join>\<^sub> I ! i = xs ! i; t \<notin> I; I \<down>< length xs \<subset> {..<length xs}\<rbrakk> \<Longrightarrow> False 3. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] apply blast [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>t. \<lbrakk>xs \<noteq> []; t < length xs; card (I \<down>< length xs) = length xs; \<forall>i<length xs. xs \<Join>\<^sub> I ! i = xs ! i; t \<notin> I; I \<down>< length xs \<subset> {..<length xs}\<rbrakk> \<Longrightarrow> False 2. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] apply (drule psubset_card_mono[OF finite_lessThan]) [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>t. \<lbrakk>xs \<noteq> []; t < length xs; card (I \<down>< length xs) = length xs; \<forall>i<length xs. xs \<Join>\<^sub> I ! i = xs ! i; t \<notin> I; card (I \<down>< length xs) < card {..<length xs}\<rbrakk> \<Longrightarrow> False 2. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] apply simp [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] apply (subgoal_tac "length (xs \<Join>\<^sub>f I) = length xs") [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs 2. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> length (xs \<Join>\<^sub> I) = length xs [PROOF STEP] prefer 2 [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I\<rbrakk> \<Longrightarrow> length (xs \<Join>\<^sub> I) = length xs 2. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] apply (simp add: f_join_length cut_less_Int_conv Int_absorb1) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>length xs \<noteq> 0; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs\<rbrakk> \<Longrightarrow> xs \<Join>\<^sub> I = xs [PROOF STEP] apply (clarsimp simp: list_eq_iff[of _ xs] f_join_nth) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>i. \<lbrakk>xs \<noteq> []; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs; i < length xs\<rbrakk> \<Longrightarrow> xs ! (I \<rightarrow> i) = xs ! i [PROOF STEP] apply (rule arg_cong[where f="(!) xs"]) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>i. \<lbrakk>xs \<noteq> []; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs; i < length xs\<rbrakk> \<Longrightarrow> I \<rightarrow> i = i [PROOF STEP] apply (subgoal_tac "I \<down>< length xs = {..<length xs}") [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>i. \<lbrakk>xs \<noteq> []; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs; i < length xs; I \<down>< length xs = {..<length xs}\<rbrakk> \<Longrightarrow> I \<rightarrow> i = i 2. \<And>i. \<lbrakk>xs \<noteq> []; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs; i < length xs\<rbrakk> \<Longrightarrow> I \<down>< length xs = {..<length xs} [PROOF STEP] prefer 2 [PROOF STATE] proof (prove) goal (2 subgoals): 1. \<And>i. \<lbrakk>xs \<noteq> []; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs; i < length xs\<rbrakk> \<Longrightarrow> I \<down>< length xs = {..<length xs} 2. \<And>i. \<lbrakk>xs \<noteq> []; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs; i < length xs; I \<down>< length xs = {..<length xs}\<rbrakk> \<Longrightarrow> I \<rightarrow> i = i [PROOF STEP] apply fastforce [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>i. \<lbrakk>xs \<noteq> []; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs; i < length xs; I \<down>< length xs = {..<length xs}\<rbrakk> \<Longrightarrow> I \<rightarrow> i = i [PROOF STEP] apply (subst inext_nth_cut_less_eq[where t="length xs", symmetric], simp) [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<And>i. \<lbrakk>xs \<noteq> []; {..<length xs} \<subseteq> I; length (xs \<Join>\<^sub> I) = length xs; i < length xs; I \<down>< length xs = {..<length xs}\<rbrakk> \<Longrightarrow> I \<down>< length xs \<rightarrow> i = i [PROOF STEP] apply (simp add: inext_nth_lessThan) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{"llama_tokens": 3052, "file": "AutoFocus-Stream_IL_AF_Stream", "length": 21}
MODULE global_variables use bb_module use atom_module use rgrid_sol_module use rgrid_module use ggrid_module use bz_module use fd_module use strfac_module use pseudopot_module use ps_local_module use ps_pcc_module use ps_initrho_module use ps_nloc2_init_module use ps_nloc2_module use bc_module use electron_module use density_module use parallel_module use wf_module use localpot_module use nonlocal_module use eion_module use gram_schmidt_module use gram_schmidt_t_module use hartree_variables use hartree_module use xc_hybrid_module use xc_module use scalapack_module use subspace_diag_module use subspace_diag_la_module use subspace_diag_sl_module use subspace_mate_sl_module use subspace_solv_sl_module use subspace_rotv_sl_module use kinetic_variables use kinetic_module use hamiltonian_module use cgpc_module use cg_module use total_energy_module use mixing_module use esp_gather_module use watch_module use io_module use array_bound_module use atomopt_module ! use esm_rgrid_module ! use esm_rshell_module ! use esm_cylindrical_test ! use ps_local_rs_module ! use esm_genpot_module ! use esm_kinetic_module use rgrid_mol_module use ps_local_mol_module, only: init_ps_local_mol,construct_ps_local_mol use ps_pcc_mol_module use ps_initrho_mol_module use ps_nloc2_mol_module use bc_mol_module use kinetic_mol_module use ps_gth_module use ps_nloc_mr_module ! use rsdft_bcast_module ! use localpot2_variables ! use localpot2_ion_module ! use localpot2_density_module ! use localpot2_vh_module ! use localpot2_xc_module ! use localpot2_module ! use localpot2_te_module ! use localpot2_Smatrix_module use ps_nloc3_module use test_hpsi2_module use test_force_module use info_module use init_occ_electron_module use esp_calc_module use symmetry_module use force_module use sweep_module use scf_module implicit none integer :: iswitch_scf,iswitch_opt,iswitch_band integer :: iswitch_latopt integer :: iswitch_test,iswitch_tddft,iswitch_dos real(8) :: etime_limit logical :: disp_switch END MODULE global_variables
{"hexsha": "52e053b136e0739f7a55505a030c85819742ba52", "size": 2188, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/global_variables.f90", "max_stars_repo_name": "j-iwata/RSDFT_DEVELOP", "max_stars_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-12-02T05:03:05.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-02T05:03:05.000Z", "max_issues_repo_path": "src/global_variables.f90", "max_issues_repo_name": "j-iwata/RSDFT_DEVELOP", "max_issues_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/global_variables.f90", "max_forks_repo_name": "j-iwata/RSDFT_DEVELOP", "max_forks_repo_head_hexsha": "14e79a4d78a19e5e5c6fd7b3d2f2f0986f2ff6df", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-01-22T02:44:58.000Z", "max_forks_repo_forks_event_max_datetime": "2020-01-22T02:44:58.000Z", "avg_line_length": 20.4485981308, "max_line_length": 73, "alphanum_fraction": 0.8121572212, "num_tokens": 652}
#!/usr/bin/env python3 # Copyright 2018 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import argparse import time import numpy as np import os import errno import sys from object_detector import ObjectDetector as TFObjectDetector from object_detector_lite import ObjectDetector as LiteObjectDetector import cv2 description_text = """\ Use this script to visualize network output on each frame of a video. Once you've trained a network, you may want to intuitively understand its performance on different videos, especially looking at frame to frame performance in a single video. This script enables that visualization for both TensorFlow and TFLite model formats. Additionally, this script lets you save a video with each frame annotated with output from the network, as well as save individual annotated frames if desired. """ epilog_text = """\ example: ./camera_cv.py --movie [movie.mp4] --path_to_model [model.pb] """ parser = argparse.ArgumentParser( description=description_text, epilog=epilog_text, formatter_class=argparse.RawDescriptionHelpFormatter) parser.add_argument("--movie", type=str, default="", help="Movie file to run prediction on") parser.add_argument("--write_images", default=False, action="store_true", help="Whether to write each frame as a separate image") parser.add_argument("--write_movie", default=False, action="store_true", help="Whether to write an annotated movie") parser.add_argument("--tflite", default=False, action="store_true", help="Whether model is tflite") parser.add_argument("--path_to_model", type=str, default="output_inference_graph/frozen_inference_graph.pb", help="Directory containing frozen checkpoint file or .tflite model") parser.add_argument("--path_to_labels", type=str, default="train_data/label.pbtxt", help="Text proto (TF) or text (tflite) file containing label map") parser.add_argument("--num_classes", type=int, default=2, help="Number of classes") parser.add_argument("--threshold", type=float, default=0.6, help="Threshold for displaying detections") parser.add_argument("--box_priors", type=str, default="box_priors.txt", help="Path to box_priors.txt file containing priors (only required for TFLite)") args = parser.parse_args() if args.movie is not "" and not os.path.exists(args.movie): print("Movie file %s missing" % args.movie) sys.exit(1) if args.movie is not "": cam = cv2.VideoCapture(args.movie) else: cam = cv2.VideoCapture(0) args.movie = "movie.mkv" width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT)) if args.tflite: objdet = LiteObjectDetector(args.path_to_model, args.path_to_labels, args.box_priors) else: objdet = TFObjectDetector(args.path_to_model, args.path_to_labels, args.num_classes) movie_name = os.path.splitext(os.path.basename(args.movie))[0] if args.write_movie: out_path = os.path.join(os.path.dirname(args.movie), movie_name + "_boxes") movie_path = "%s.mkv" % out_path print("Writing movie to", movie_path) writer = cv2.VideoWriter( movie_path, cv2.VideoWriter_fourcc(*"MJPG"), int(cam.get(cv2.CAP_PROP_FPS)), (width, height) ) # Quit if there was a problem if not writer.isOpened(): print("Unable to open video!") sys.exit() if args.write_images: movie_dir = os.path.dirname(args.movie) images_dir = os.path.join(movie_dir, "%s_images" % movie_name) print("Writing images to %s" % images_dir) try: os.makedirs(images_dir) except OSError as e: if e.errno == errno.EEXIST: print("Directory exists already, continuing!") else: raise counter = 0 ret, frame = cam.read() while ret == True: img = frame.copy() # Aliased, but lets us turn off transformations as necessary. img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) h, w, _ = img.shape expand = np.expand_dims(img, axis=0) result = objdet.detect(expand) boxes = [] for i in range(result['num_detections']): if result['detection_scores'][i] > args.threshold: class_ = result['detection_classes'][i] box = result['detection_boxes'][i] score = result['detection_scores'][i] y1, x1 = int(box[0] * h), int(box[1] * w) y2, x2 = int(box[2] * h), int(box[3] * w) if args.tflite: x1, y1, x2, y2 = y1, x1, y2, x2 boxes.append((class_, score, x1, y1, x2, y2)) for box in boxes: class_, score, x1, y1, x2, y2 = box w1 = x2-x1 h1 = y2-y1 cv2.rectangle(img, (x1, y1), (x2, y2), (255,0,0), 2) cv2.putText(img, "%s: %5.2f" % (class_-1, score), (x1, y1), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,0,0), 2) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) cv2.imshow('image', img) if cv2.waitKey(1) & 0xFF == ord('q'): break if args.write_movie: writer.write(img) if args.write_images: print("[%d] Writing original to %s" % (counter, images_dir)) cv2.imwrite(os.path.join(images_dir, "orig_%05d.png" % counter), frame) print("[%d] Writing boxes to %s" % (counter, images_dir)) cv2.imwrite(os.path.join(images_dir, "box_%05d.png" % counter), img) counter += 1 ret, frame = cam.read() if args.write_movie: writer.release()
{"hexsha": "5013f875a152f3948dac6716d5d339b4d0e67c27", "size": 6053, "ext": "py", "lang": "Python", "max_stars_repo_path": "training/camera_cv.py", "max_stars_repo_name": "isabella232/ftc-object-detection", "max_stars_repo_head_hexsha": "e8923e208c449dd457b60d84fc21eb715364f0de", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 37, "max_stars_repo_stars_event_min_datetime": "2018-12-22T23:30:44.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-18T21:37:01.000Z", "max_issues_repo_path": "training/camera_cv.py", "max_issues_repo_name": "lizlooney/ftc-object-detection", "max_issues_repo_head_hexsha": "e8923e208c449dd457b60d84fc21eb715364f0de", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2019-02-21T04:32:09.000Z", "max_issues_repo_issues_event_max_datetime": "2021-02-11T22:23:39.000Z", "max_forks_repo_path": "training/camera_cv.py", "max_forks_repo_name": "isabella232/ftc-object-detection", "max_forks_repo_head_hexsha": "e8923e208c449dd457b60d84fc21eb715364f0de", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 32, "max_forks_repo_forks_event_min_datetime": "2018-11-06T18:16:30.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-22T00:10:56.000Z", "avg_line_length": 36.6848484848, "max_line_length": 110, "alphanum_fraction": 0.6680984636, "include": true, "reason": "import numpy", "num_tokens": 1511}
import numpy as np import matplotlib.pyplot as plt import matplotlib.image as mpimg import glob import cv2 import pickle def camera_calibration(nx, ny, path, show=True): images = glob.glob(path) objpoints = [] # 3D points in real world space imgpoints = [] # 2D points in the image plane objp = np.zeros((ny*nx, 3), np.float32) objp[:, :2] = np.mgrid[0:nx, 0:ny].T.reshape(-1, 2) #x,y coordinates for fname in images: img = mpimg.imread(fname) img_size = (img.shape[1], img.shape[0]) gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None) if ret == True: imgpoints.append(corners) objpoints.append(objp) # draw and display the corners img = cv2.drawChessboardCorners(img, (nx, ny), corners, ret) if show: plt.figure(figsize=(10,10)) fig = plt.figure() plt.imshow(img) ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, img_size,None,None) calib_dict = {} calib_dict['mtx'] = mtx calib_dict['dist'] = dist calib_dict['rvecs'] = rvecs calib_dict['tvecs'] = tvecs return calib_dict def undistort(img, mtx, dist): """ Computes the ideal point coordinates from the observed point coordinates. """ undist = cv2.undistort(img, mtx, dist, None, mtx) return undist if __name__ == '__main__': calib_dict = camera_calibration(nx=9, ny=6, path="camera_cal/calibration*.jpg") with open('calibrate_camera.p', 'wb') as f: pickle.dump(calib_dict, f)
{"hexsha": "5b1a29b521aae11917a8b8d161678c6b21768c2d", "size": 1658, "ext": "py", "lang": "Python", "max_stars_repo_path": "distortion_correction.py", "max_stars_repo_name": "MenglingHettinger/CarND-Advanced-Lane-Lines", "max_stars_repo_head_hexsha": "049509f2f3fd160d3db5df8caef4985932e8bc5b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "distortion_correction.py", "max_issues_repo_name": "MenglingHettinger/CarND-Advanced-Lane-Lines", "max_issues_repo_head_hexsha": "049509f2f3fd160d3db5df8caef4985932e8bc5b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "distortion_correction.py", "max_forks_repo_name": "MenglingHettinger/CarND-Advanced-Lane-Lines", "max_forks_repo_head_hexsha": "049509f2f3fd160d3db5df8caef4985932e8bc5b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.8846153846, "max_line_length": 96, "alphanum_fraction": 0.6224366707, "include": true, "reason": "import numpy", "num_tokens": 461}
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2017, 2018. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. # pylint: disable=unused-import """Test Qiskit's QuantumCircuit class.""" import os import tempfile import unittest import numpy import qiskit.extensions.simulator from qiskit import BasicAer from qiskit import QuantumRegister, ClassicalRegister, QuantumCircuit from qiskit import execute from qiskit import QiskitError from qiskit.quantum_info import state_fidelity from qiskit.test import QiskitTestCase from qiskit.extensions.unitary import UnitaryGate class TestCircuitQiskitCode(QiskitTestCase): """QuantumCircuit Qiskit tests.""" def test_circuit_qiskit_code(self): """Test circuit qiskit_code() method. """ qr1 = QuantumRegister(1, 'qr1') qr2 = QuantumRegister(2, 'qr2') cr = ClassicalRegister(3, 'cr') qc = QuantumCircuit(qr1, qr2, cr) qc.u1(0.3, qr1[0]) qc.u2(0.2, 0.1, qr2[0]) qc.u3(0.3, 0.2, 0.1, qr2[1]) qc.s(qr2[1]) qc.sdg(qr2[1]) qc.cx(qr1[0], qr2[1]) qc.barrier(qr2) qc.cx(qr2[1], qr1[0]) qc.h(qr2[1]) qc.x(qr2[1]).c_if(cr, 0) qc.y(qr1[0]).c_if(cr, 1) qc.z(qr1[0]).c_if(cr, 2) qc.barrier(qr1, qr2) qc.measure(qr1[0], cr[0]) qc.measure(qr2[0], cr[1]) qc.measure(qr2[1], cr[2]) expected_python = """qr1 = QuantumRegister(1, 'qr1')\n""" + \ """qr2 = QuantumRegister(2, 'qr2')\n""" + \ """cr = ClassicalRegister(3, 'cr')\n""" + \ """gcn_qc = QuantumCircuit(qr1, qr2, cr)\n""" + \ """gcn_qc.u1(0.300000000000000, qr1[0])\n""" + \ """gcn_qc.u2(0.200000000000000, 0.100000000000000, qr2[0])\n""" + \ """gcn_qc.u3(0.300000000000000, 0.200000000000000,""" + \ """ 0.100000000000000, qr2[1])\n""" + \ """gcn_qc.s(qr2[1])\n""" + \ """gcn_qc.sdg(qr2[1])\n""" + \ """gcn_qc.cx(qr1[0], qr2[1])\n""" + \ """gcn_qc.barrier(qr2[0], qr2[1])\n""" + \ """gcn_qc.cx(qr2[1], qr1[0])\n""" + \ """gcn_qc.h(qr2[1])\n""" + \ """gcn_qc.x(qr2[1]).c_if(cr, 0)\n""" + \ """gcn_qc.y(qr1[0]).c_if(cr, 1)\n""" + \ """gcn_qc.z(qr1[0]).c_if(cr, 2)\n""" + \ """gcn_qc.barrier(qr1[0], qr2[0], qr2[1])\n""" + \ """gcn_qc.measure(qr1[0], cr[0])\n""" + \ """gcn_qc.measure(qr2[0], cr[1])\n""" + \ """gcn_qc.measure(qr2[1], cr[2])\n""" self.assertEqual(qc.qiskit_code(), expected_python) qr1 = QuantumRegister(3, 'qr1') cr = ClassicalRegister(3, 'cr') qc = QuantumCircuit(qr1, cr) qc.measure(qr1, cr) expected_python = """qr1 = QuantumRegister(3, 'qr1')\n""" + \ """cr = ClassicalRegister(3, 'cr')\n""" + \ """gcn_qc = QuantumCircuit(qr1, cr)\n""" + \ """gcn_qc.measure(qr1[0], cr[0])\n""" + \ """gcn_qc.measure(qr1[1], cr[1])\n""" + \ """gcn_qc.measure(qr1[2], cr[2])\n""" self.assertEqual(qc.qiskit_code(), expected_python) qr = QuantumRegister(3, 'qr') cr = ClassicalRegister(3, 'cr') qc = QuantumCircuit(qr, cr) matrix = numpy.array([[1, 0], [0, 1]]) qc.append(UnitaryGate(matrix), [qr[0]]) expected_python = """qr = QuantumRegister(3, 'qr')\n""" + \ """cr = ClassicalRegister(3, 'cr')\n""" + \ """gcn_qc = QuantumCircuit(qr, cr)\n""" + \ """matrix = np.array([[1.+0.j, 0.+0.j],\n""" + \ """ [0.+0.j, 1.+0.j]])\n""" + \ """gcn_qc.append(UnitaryGate(matrix), [qr[0]])\n""" self.assertEqual(qc.qiskit_code(), expected_python) qr = QuantumRegister(3, 'qr') cr = ClassicalRegister(3, 'cr') qc = QuantumCircuit(qr, cr) sigmax = numpy.array([[0, 1], [1, 0]]) sigmay = numpy.array([[0, -1j], [1j, 0]]) matrix = numpy.kron(sigmax, sigmay) uni2q = UnitaryGate(matrix, label='test') qc.append(uni2q, [qr[0], qr[1]]) expected_python = """qr = QuantumRegister(3, 'qr')\n""" + \ """cr = ClassicalRegister(3, 'cr')\n""" + \ """gcn_qc = QuantumCircuit(qr, cr)\n""" + \ """matrix = np.array([[0.+0.j, 0.-0.j, 0.+0.j, 0.-1.j],\n""" + \ """ [0.+0.j, 0.+0.j, 0.+1.j, 0.+0.j],\n""" + \ """ [0.+0.j, 0.-1.j, 0.+0.j, 0.-0.j],\n""" + \ """ [0.+1.j, 0.+0.j, 0.+0.j, 0.+0.j]])\n""" + \ """gcn_qc.append(UnitaryGate(matrix, label='test'), [qr[0], qr[1]])\n""" self.assertEqual(qc.qiskit_code(), expected_python) qr = QuantumRegister(3, 'qr%') cr = ClassicalRegister(3, 'gcn_qc') qc = QuantumCircuit(qr, cr) qc.h(qr[1]) expected_python = """grn_qr = QuantumRegister(3, 'grn_qr')\n""" + \ """grn_gcn_qc = ClassicalRegister(3, 'grn_gcn_qc')\n""" + \ """gcn_qc = QuantumCircuit(grn_qr, grn_gcn_qc)\n""" + \ """gcn_qc.h(grn_qr[1])\n""" self.assertEqual(qc.qiskit_code(), expected_python)
{"hexsha": "6bd62aab580f7cc2d88bc34280a231cd19466a58", "size": 6146, "ext": "py", "lang": "Python", "max_stars_repo_path": "test/python/circuit/test_qiskit_code.py", "max_stars_repo_name": "rfclambert/qiskit-terra", "max_stars_repo_head_hexsha": "9f236c9744fd95c8ff40d874197e683db024d993", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/python/circuit/test_qiskit_code.py", "max_issues_repo_name": "rfclambert/qiskit-terra", "max_issues_repo_head_hexsha": "9f236c9744fd95c8ff40d874197e683db024d993", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/python/circuit/test_qiskit_code.py", "max_forks_repo_name": "rfclambert/qiskit-terra", "max_forks_repo_head_hexsha": "9f236c9744fd95c8ff40d874197e683db024d993", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 46.9160305344, "max_line_length": 98, "alphanum_fraction": 0.479498861, "include": true, "reason": "import numpy", "num_tokens": 1930}
import tensorflow as tf import tensorflow_probability as tfp import numpy as np tfd = tfp.distributions tfb = tfp.bijectors class DiomedesModel(tf.Module): def __init__(self): # Set up fixed effects and other parameters. # These are free parameters to be optimized in E-steps self._intercept = tf.Variable( 0., name="intercept") # alpha in eq self._stddev_drivers = tfp.util.TransformedVariable( 1., bijector=tfb.Exp(), name="stddev_drivers") # sigma in eq self._stddev_cys = tfp.util.TransformedVariable( 1., bijector=tfb.Exp(), name="stddev_cys") # sigma in eq def __call__(self, features): num_drivers = len(np.unique(features['dd'])) num_cy = len(np.unique(features['cyd'])) model = tfd.JointDistributionSequential([ # Set up random effects. tfd.MultivariateNormalDiag( loc=tf.zeros(num_drivers), scale_identity_multiplier=self._stddev_drivers), tfd.MultivariateNormalDiag( loc=tf.zeros(num_cy), scale_identity_multiplier=self._stddev_cys), # This is the likelihood for the observed. lambda effect_cys, effect_drivers: tfd.Independent( tfd.Normal( loc=(tf.gather(effect_drivers, features["dd"], axis=-1) + tf.gather(effect_cys, features["cyd"], axis=-1) + self._intercept), scale=1.), reinterpreted_batch_ndims=1) ]) # To enable tracking of the trainable variables via the created distribution, # we attach a reference to `self`. Since all TFP objects sub-class # `tf.Module`, this means that the following is possible: # LinearMixedEffectModel()(features_train).trainable_variables # ==> tuple of all tf.Variables created by LinearMixedEffectModel. model._to_track = self model.num_drivers = num_drivers model.num_cy = num_cy return model def fitDiomedesModel(model, targets, num_warmup_iters=1000, num_iters=5000): # Set up E-step (MCMC). @tf.function(autograph=False, experimental_compile=True) def one_e_step(current_state, kernel_results): next_state, next_kernel_results = hmc.one_step(current_state=current_state, previous_kernel_results=kernel_results) return next_state, next_kernel_results # Set up M-step (gradient descent). @tf.function(autograph=False, experimental_compile=True) def one_m_step(current_state): with tf.GradientTape() as tape: loss = -target_log_prob_fn(*current_state) grads = tape.gradient(loss, trainable_variables) optimizer.apply_gradients(zip(grads, trainable_variables)) return loss num_accepted = 0 effect_driver_samples = np.zeros([num_iters, model.num_drivers]) effect_cy_samples = np.zeros([num_iters, model.num_cy]) loss_history = np.zeros([num_iters]) optimizer = tf.optimizers.Adam(learning_rate=.01) target_log_prob_fn = lambda *x: model.log_prob(x + (targets,)) trainable_variables = model.trainable_variables current_state = model.sample()[:-1] hmc = tfp.mcmc.HamiltonianMonteCarlo( target_log_prob_fn=target_log_prob_fn, step_size=0.015, num_leapfrog_steps=3) kernel_results = hmc.bootstrap_results(current_state) # Run warm-up stage. for t in range(num_warmup_iters): current_state, kernel_results = one_e_step(current_state, kernel_results) num_accepted += kernel_results.is_accepted.numpy() if t % 500 == 0 or t == num_warmup_iters - 1: print("Warm-Up Iteration: {:>3} Acceptance Rate: {:.3f}".format( t, num_accepted / (t + 1))) num_accepted = 0 # reset acceptance rate counter # Run training. for t in range(num_iters): # run 5 MCMC iterations before every joint EM update for _ in range(5): current_state, kernel_results = one_e_step(current_state, kernel_results) loss = one_m_step(current_state) effect_driver_samples[t, :] = current_state[0].numpy() effect_cy_samples[t, :] = current_state[1].numpy() num_accepted += kernel_results.is_accepted.numpy() loss_history[t] = loss.numpy() if t % 500 == 0 or t == num_iters - 1: print("Iteration: {:>4} Acceptance Rate: {:.3f} Loss: {:.3f}".format( t, num_accepted / (t + 1), loss_history[t])) return effect_driver_samples, effect_cy_samples, current_state, loss_history
{"hexsha": "c0612ac605a85c34af55bf3174c585ef073c04d2", "size": 4529, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/model.py", "max_stars_repo_name": "aleccrowell/diomedes", "max_stars_repo_head_hexsha": "69c4dcddd62a1c846337ced59c2b16a81bc6991f", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/model.py", "max_issues_repo_name": "aleccrowell/diomedes", "max_issues_repo_head_hexsha": "69c4dcddd62a1c846337ced59c2b16a81bc6991f", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 6, "max_issues_repo_issues_event_min_datetime": "2020-01-18T19:42:07.000Z", "max_issues_repo_issues_event_max_datetime": "2020-01-18T19:56:56.000Z", "max_forks_repo_path": "src/model.py", "max_forks_repo_name": "aleccrowell/diomedes", "max_forks_repo_head_hexsha": "69c4dcddd62a1c846337ced59c2b16a81bc6991f", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 43.9708737864, "max_line_length": 123, "alphanum_fraction": 0.6699050563, "include": true, "reason": "import numpy", "num_tokens": 1106}
[STATEMENT] lemma bounded_inner_imp_bdd_above: assumes "bounded s" shows "bdd_above ((\<lambda>x. x \<bullet> a) ` s)" [PROOF STATE] proof (prove) goal (1 subgoal): 1. bdd_above ((\<lambda>x. x \<bullet> a) ` s) [PROOF STEP] by (simp add: assms bounded_imp_bdd_above bounded_linear_image bounded_linear_inner_left)
{"llama_tokens": 129, "file": null, "length": 1}
import torch import torch.nn as nn from torch import optim import torch.nn.functional as F import numpy as np import matplotlib.pyplot as plt from itertools import combinations from curve import HilbertCurve, ZCurve from utils import * from environment import CurveEnvironment from agent import Agent CUDA = torch.cuda.is_available() DIM = 2 ORDER = 3 DATA_SIZE = 15 MAX_STEP = 200 GAMMA = 0.99 # 시간 할인율 LEARNING_RATE = 1e-3 # 학습률 USE_RNN = True class PGDriver: def __init__(self, dimension, order, data_size, learning_rate, use_rnn=False): self.use_rnn = use_rnn self.env = CurveEnvironment(order=order, dim=dimension, data_size=data_size, life=20) self.agent = Agent( num_states=2 ** (dimension * order) * 3, num_actions=2 ** (dimension * order), network_type='policy_gradient', learning_rate=learning_rate, use_rnn=use_rnn ) def convert_state(self, inp_state): if self.use_rnn: torch_state = torch.tensor(inp_state, dtype=torch.float32).view(1, -1, self.env.dim + 1) else: torch_state = torch.tensor(inp_state, dtype=torch.float32).view(1, -1) return torch_state def run(self, max_episode=5000, max_step=1000, span=10): """ :param max_episode: 수행할 최대 episode :param max_step: 한 episode에서 수행할 수 있는 max_step :param span: 최근 span 의 평균 보상, 또는 cost ... :return: """ cost_list = np.zeros(span) # 에피소드 당 달성할 수 있는 평균 cost reward_list = np.zeros(span) # 에피소드 당 달성할 수 있는 평균 reward for ep in range(1, max_episode + 1): # 최대 에피소드 수만큼 반복 obs = self.env.reset() state = self.convert_state(obs) mean_cost = 0 mean_reward = 0 ep_history = [] for step in range(1, max_step + 1): action, log_prob, entropy = self.agent.get_action(state) next_obs, reward, done, infos = self.env.step(action) ep_history.append([log_prob, reward, entropy]) state = self.convert_state(next_obs) if done: self.agent.update(ep_history) break # 추가 정보 mean_cost = mean_cost + 1 / step * (infos['cost'] - mean_cost) mean_reward = mean_reward + 1 / step * (reward - mean_reward) cost_list[ep % len(cost_list)] = mean_cost reward_list[ep % len(reward_list)] = mean_reward if ep % span == 0: print(f'episode {ep} is over.') print('Average of the cost in the {} episodes : {:.3f} And Reward : {:.3f}' .format(span, np.mean(cost_list), np.mean(reward_list))) ''' 주어진 state와 활성화된 데이터를 기반으로 reward를 위한 metrics을 측정하는 함수 ''' if __name__ == '__main__': np.random.seed(210) driver = PGDriver(dimension=DIM, order=ORDER, data_size=DATA_SIZE, learning_rate=LEARNING_RATE, use_rnn=USE_RNN) driver.run(max_episode=5000, max_step=1000)
{"hexsha": "45b7c09a7c77ef8339cc9d167d7549344173965b", "size": 3071, "ext": "py", "lang": "Python", "max_stars_repo_path": "main/driver_policy_gradient.py", "max_stars_repo_name": "zzong2006/space-filling-curve-with-RF-learning", "max_stars_repo_head_hexsha": "30823745dae91240c0977185fb1831c9b4771a40", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-15T23:27:58.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-15T23:27:58.000Z", "max_issues_repo_path": "main/driver_policy_gradient.py", "max_issues_repo_name": "zzong2006/space-filling-curve-with-RF-learning", "max_issues_repo_head_hexsha": "30823745dae91240c0977185fb1831c9b4771a40", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "main/driver_policy_gradient.py", "max_forks_repo_name": "zzong2006/space-filling-curve-with-RF-learning", "max_forks_repo_head_hexsha": "30823745dae91240c0977185fb1831c9b4771a40", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.9895833333, "max_line_length": 116, "alphanum_fraction": 0.603386519, "include": true, "reason": "import numpy", "num_tokens": 829}
% Convert an edge list to an adjacency list. % % INPUTS: edge list, mx3, m - number of edges % OUTPUTS: adjacency list % % Note: Information about edge weights (if any) is lost. % GB: last updated, September 25, 2012 function adjL = edgeL2adjL(el) nodes = unique([el(:,1)' el(:,2)']); adjL=cell(numel(nodes),1); for e=1:size(el,1); adjL{el(e,1)}=[adjL{el(e,1)},el(e,2)]; end
{"author": "aeolianine", "repo": "octave-networks-toolbox", "sha": "e70f79eb62a54ef96934d900830f9177caf732c9", "save_path": "github-repos/MATLAB/aeolianine-octave-networks-toolbox", "path": "github-repos/MATLAB/aeolianine-octave-networks-toolbox/octave-networks-toolbox-e70f79eb62a54ef96934d900830f9177caf732c9/edgeL2adjL.m"}
#include <boost/align/aligned_alloc.hpp> int main() { void* p = boost::alignment::aligned_alloc(16, 100); if (p) { boost::alignment::aligned_free(p); } return 0; }
{"hexsha": "c01eb23030b40462b7b01135c44f33afad9e149a", "size": 172, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "Boost/Algorithms/align_alloc.cpp", "max_stars_repo_name": "zzragida/CppExamples", "max_stars_repo_head_hexsha": "d627b097efc04209aa4012f7b7f9d82858da3f2d", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Boost/Algorithms/align_alloc.cpp", "max_issues_repo_name": "zzragida/CppExamples", "max_issues_repo_head_hexsha": "d627b097efc04209aa4012f7b7f9d82858da3f2d", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Boost/Algorithms/align_alloc.cpp", "max_forks_repo_name": "zzragida/CppExamples", "max_forks_repo_head_hexsha": "d627b097efc04209aa4012f7b7f9d82858da3f2d", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.3333333333, "max_line_length": 52, "alphanum_fraction": 0.6627906977, "num_tokens": 52}
import CuArrays: cu import YaoArrayRegister: _measure, measure, measure!, measure_collapseto!, measure_remove! import YaoBase: batch_normalize! import Yao: expect export cpu, cu, GPUReg cu(reg::ArrayReg{B}) where B = ArrayReg{B}(CuArray(reg.state)) cpu(reg::ArrayReg{B}) where B = ArrayReg{B}(collect(reg.state)) const GPUReg{B, T, MT} = ArrayReg{B, T, MT} where MT<:GPUArray function batch_normalize!(s::CuSubArr, p::Real=2) p!=2 && throw(ArgumentError("p must be 2!")) s./=norm2(s, dims=1) s end @inline function tri2ij(l::Int) i = ceil(Int, sqrt(2*l+0.25)-0.5) j = l-i*(i-1)÷2 i+1,j end ############### MEASURE ################## measure(rng::AbstractRNG, ::ComputationalBasis, reg::GPUReg{1}, ::AllLocs; nshots::Int=1) = _measure(rng::AbstractRNG, reg |> probs |> Vector, nshots) # TODO: optimize the batch dimension using parallel sampling function measure(rng::AbstractRNG, ::ComputationalBasis, reg::GPUReg{B}, ::AllLocs; nshots::Int=1) where B regm = reg |> rank3 pl = dropdims(mapreduce(abs2, +, regm, dims=2), dims=2) _measure(rng::AbstractRNG, pl |> Matrix, nshots) end function measure_remove!(rng::AbstractRNG, ::ComputationalBasis, reg::GPUReg{B}, ::AllLocs) where B regm = reg |> rank3 nregm = similar(regm, 1<<nremain(reg), B) pl = dropdims(mapreduce(abs2, +, regm, dims=2), dims=2) pl_cpu = pl |> Matrix res_cpu = map(ib->_measure(rng::AbstractRNG, view(pl_cpu, :, ib), 1)[], 1:B) res = CuArray(res_cpu) @inline function kernel(nregm, regm, res, pl) state = (blockIdx().x-1) * blockDim().x + threadIdx().x if state <= length(nregm) i,j = GPUArrays.gpu_ind2sub(nregm, state) r = Int(res[j])+1 @inbounds nregm[i,j] = regm[r,i,j]/CUDAnative.sqrt(pl[r, j]) end return end X, Y = cudiv(length(nregm)) @cuda threads=X blocks=Y kernel(nregm, regm, res, pl) reg.state = reshape(nregm,1,:) res end function measure!(rng::AbstractRNG, ::ComputationalBasis, reg::GPUReg{B, T}, ::AllLocs) where {B, T} regm = reg |> rank3 pl = dropdims(mapreduce(abs2, +, regm, dims=2), dims=2) pl_cpu = pl |> Matrix res_cpu = map(ib->_measure(rng::AbstractRNG, view(pl_cpu, :, ib), 1)[], 1:B) res = CuArray(res_cpu) @inline function kernel(regm, res, pl) state = (blockIdx().x-1) * blockDim().x + threadIdx().x if state <= length(regm) k,i,j = GPUArrays.gpu_ind2sub(regm, state) @inbounds rind = Int(res[j]) + 1 @inbounds regm[k,i,j] = k==rind ? regm[k,i,j]/CUDAnative.sqrt(pl[k, j]) : T(0) end return end X, Y = cudiv(length(regm)) @cuda threads=X blocks=Y kernel(regm, res, pl) res end function measure_collapseto!(rng::AbstractRNG, ::ComputationalBasis, reg::GPUReg{B, T}, ::AllLocs; config=0) where {B, T} regm = reg |> rank3 pl = dropdims(mapreduce(abs2, +, regm, dims=2), dims=2) pl_cpu = pl |> Matrix res_cpu = map(ib->_measure(rng::AbstractRNG, view(pl_cpu, :, ib), 1)[], 1:B) res = CuArray(res_cpu) @inline function kernel(regm, res, pl, val) state = (blockIdx().x-1) * blockDim().x + threadIdx().x if state <= length(regm) k,i,j = GPUArrays.gpu_ind2sub(regm, state) @inbounds rind = Int(res[j]) + 1 @inbounds k==val+1 && (regm[k,i,j] = regm[rind,i,j]/CUDAnative.sqrt(pl[rind, j])) CuArrays.sync_threads() @inbounds k!=val+1 && (regm[k,i,j] = 0) end return end X, Y = cudiv(length(regm)) @cuda threads=X blocks=Y kernel(regm, res, pl, config) res end import YaoArrayRegister: insert_qubits!, join function batched_kron(A::Union{CuArray{T1, 3}, Adjoint{<:Any, <:CuArray{T1, 3}}}, B::Union{CuArray{T2, 3}, Adjoint{<:Any, <:CuArray{T2, 3}}}) where {T1 ,T2} res = CuArrays.zeros(promote_type(T1,T2), size(A,1)*size(B, 1), size(A,2)*size(B,2), size(A, 3)) @inline function kernel(res, A, B) state = (blockIdx().x-1) * blockDim().x + threadIdx().x i,j,b = GPUArrays.gpu_ind2sub(res, state) i_A = (i-1) ÷ size(B,1) + 1 j_A = (j-1) ÷ size(B,2) + 1 i_B = (i-1) % size(B,1) + 1 j_B = (j-1) % size(B,2) + 1 state <= length(res) && (@inbounds res[state] = A[i_A, j_A, b]*B[i_B, j_B, b]) return end X, Y = cudiv(length(res)) @cuda threads=X blocks=Y kernel(res, A, B) res end function join(reg1::GPUReg{B}, reg2::GPUReg{B}) where {B} s1 = reg1 |> rank3 s2 = reg2 |> rank3 state = batched_kron(s1, s2) ArrayReg{B}(reshape(state, size(state, 1), :)) end export insert_qubits! function insert_qubits!(reg::GPUReg{B}, loc::Int; nqubits::Int=1) where B na = nactive(reg) focus!(reg, 1:loc-1) reg2 = join(zero_state(nqubits; nbatch=B) |> cu, reg) |> relax! |> focus!((1:na+nqubits)...) reg.state = reg2.state reg end for FUNC in [:measure!, :measure_collapseto!, :measure_remove!] @eval function $FUNC(rng::AbstractRNG, op::AbstractBlock, reg::GPUReg, al::AllLocs; kwargs...) where B E, V = eigen!(mat(op) |> Matrix) ei = Eigen(E|>cu, V|>cu) $FUNC(rng::AbstractRNG, ei, reg, al; kwargs...) end end
{"hexsha": "01732ad387ca33e1b961cfb674eb3f736c498cc4", "size": 5242, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/GPUReg.jl", "max_stars_repo_name": "UnofficialJuliaMirrorSnapshots/CuYao.jl-b48ca7a8-dd42-11e8-2b8e-1b7706800275", "max_stars_repo_head_hexsha": "edf0be4bb914ac8c022dc5f95ced9db949ea19c5", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/GPUReg.jl", "max_issues_repo_name": "UnofficialJuliaMirrorSnapshots/CuYao.jl-b48ca7a8-dd42-11e8-2b8e-1b7706800275", "max_issues_repo_head_hexsha": "edf0be4bb914ac8c022dc5f95ced9db949ea19c5", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/GPUReg.jl", "max_forks_repo_name": "UnofficialJuliaMirrorSnapshots/CuYao.jl-b48ca7a8-dd42-11e8-2b8e-1b7706800275", "max_forks_repo_head_hexsha": "edf0be4bb914ac8c022dc5f95ced9db949ea19c5", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.6573426573, "max_line_length": 156, "alphanum_fraction": 0.6014879817, "num_tokens": 1795}
#!/usr/bin/env python """ Script to run the BEAST on the PHAT-like data. Assumes that the datamodel.py file exists in the same directory as this script. And it must be called datamodel.py used in make_model.py code in physicsmodel, more recoding needed to remove this dependency """ # system imports from __future__ import (absolute_import, division, print_function) import argparse import time import numpy as np from multiprocessing import Pool # BEAST imports from beast.physicsmodel.create_project_dir import create_project_dir from beast.physicsmodel.model_grid import (make_iso_table, make_spectral_grid, add_stellar_priors, make_extinguished_sed_grid) import beast.observationmodel.noisemodel.generic_noisemodel as noisemodel from beast.observationmodel.ast import (make_ast_input_list, make_ast_xy_list) from beast.fitting import fit from beast.fitting import trim_grid from beast.physicsmodel.grid import FileSEDGrid from beast.tools import verify_params from beast.tools import subgridding_tools import pickle # import datamodel # print("Import statement gets datamodel from {}.".format(datamodel.__file__)) # print("Distances are {}".format(datamodel.distances)) # import runpy, os # print("runpy.run_path runs code from {}.".format(datamodelfile)) # datamodel_globals = runpy.run_path(datamodelfile) # print("Distances are {}".format(datamodel_globals['distances'])) # https://stackoverflow.com/questions/67631/how-to-import-a-module-given-the-full-path import importlib.util import os datamodelfile = os.path.join(os.getcwd(), 'datamodel.py') spec = importlib.util.spec_from_file_location('datamodel', datamodelfile) datamodel = importlib.util.module_from_spec(spec) spec.loader.exec_module(datamodel) print("Project name: {}".format(datamodel.project)) outdir = os.path.join('.', datamodel.project) subgrid_names_file = os.path.join(outdir, 'subgrid_fnames.txt') if __name__ == '__main__': # commandline parser parser = argparse.ArgumentParser() parser.add_argument("-p", "--physicsmodel", help="Generate the physics model grid", action="store_true") parser.add_argument("-a", "--ast", help="Generate an input AST file", action="store_true") parser.add_argument("-o", "--observationmodel", help="Calculate the observation model (bias and noise)", action="store_true") parser.add_argument("-t", "--trim", help="Trim the physics and observation model grids", action="store_true") parser.add_argument("-f", "--fit", help="Fit the observed data", action="store_true") parser.add_argument("-r", "--resume", help="Resume a fitting run", action="store_true") parser.add_argument("--nprocs", type=int, default=1, help='''Number of processes to use to process the subgrids''') parser.add_argument("--nsubs", type=int, default=1, help='''Number of subgrids to split the physics model into''') parser.add_argument("--subset", type=int, nargs=2, default=[None, None], help='''Only process subgrids in the range [start, stop[. Should work for the o, t and f steps''') parser.add_argument("-m", "--merge", action="store_true", help="Merge the subgrid results") parser.add_argument("--ignore-missing-subresults", action="store_true", help='''In some cases, it might not be possible to perform the fit step for some subgrids, for example because the trimmed grids are empty or only have zero/negative weight points. In that case, this option can be used to perform the merge anyway. Just make sure that any missing grids are missing for the right reasons.''') parser.add_argument("--dens_bin", type=int, default=None, help='''Run for a certain source/background density bin. Use the split_catalog_using_map ` tool to generate the right files for this option.''') args = parser.parse_args() if args.dens_bin is not None: bin_subfolder = 'bin{}'.format(args.dens_bin) pdir = create_project_dir(bin_subfolder) def subcatalog_fname(full_cat_fname, dens_bin): return full_cat_fname.replace('.fits', '_bin{}.fits'.format(dens_bin)) # check input parameters, print what is the problem, stop run_beast verify_params.verify_input_format(datamodel) def parallel_wrapper(function, argument): parallel = args.nprocs > 1 if (parallel): p = Pool(args.nprocs) for r in p.imap_unordered(function, argument, chunksize=1): print(r) else: for a in argument: r = function(a) print(r) def get_modelsubgridfiles(): with open(subgrid_names_file, 'r') as f: modelsedgridfiles = f.read().split('\n')[:-1] return modelsedgridfiles subset_slice = slice(*args.subset) if args.physicsmodel: # make sure the project directory exists pdir = create_project_dir(datamodel.project) # download and load the isochrones (iso_fname, oiso) = make_iso_table(datamodel.project, oiso=datamodel.oiso, logtmin=datamodel.logt[0], logtmax=datamodel.logt[1], dlogt=datamodel.logt[2], z=datamodel.z) if hasattr(datamodel, 'add_spectral_properties_kwargs'): extra_kwargs = datamodel.add_spectral_properties_kwargs else: extra_kwargs = None # generate the spectral library (no dust extinction) (spec_fname, g_spec) = make_spectral_grid( datamodel.project, oiso, osl=datamodel.osl, distance=datamodel.distances, distance_unit=datamodel.distance_unit, add_spectral_properties_kwargs=extra_kwargs) # Work with the whole grid up to here (otherwise, priors need a # rework (they don't like having only a subset of the parameter # space, especially when there's only one age for example) (pspec_fname, g_pspec) = add_stellar_priors(datamodel.project, g_spec) # Make subgrids, by splitting the spectral grid into equal sized pieces custom_sub_pspec = subgridding_tools.split_grid( pspec_fname, args.nsubs) file_prefix = '{0}/{0}_'.format(datamodel.project) # process the subgrids individually def gen_subgrid(i, sub_name): sub_g_pspec = FileSEDGrid(sub_name) sub_seds_fname = '{}seds.gridsub{}.hd5'.format(file_prefix, i) (sub_seds_fname, sub_g_seds) = make_extinguished_sed_grid( datamodel.project, sub_g_pspec, datamodel.filters, extLaw=datamodel.extLaw, av=datamodel.avs, rv=datamodel.rvs, fA=datamodel.fAs, rv_prior_model=datamodel.rv_prior_model, av_prior_model=datamodel.av_prior_model, fA_prior_model=datamodel.fA_prior_model, add_spectral_properties_kwargs=extra_kwargs, seds_fname=sub_seds_fname) return sub_seds_fname par_tuples = [(i, sub_name) for i, sub_name in enumerate(custom_sub_pspec)][subset_slice] parallel = args.nprocs > 1 if (parallel): p = Pool(args.nprocs) p.starmap(gen_subgrid, par_tuples) else: for pt in par_tuples: gen_subgrid(*pt) # Save a list of subgrid names that we expect to see required_names = ['{}seds.gridsub{}.hd5'.format(file_prefix, i) for i in range(args.nsubs)] with open(subgrid_names_file, 'w') as fname_file: for fname in required_names: fname_file.write(fname + '\n') # seds_fname = '{}seds.grid.hd5'.format(file_prefix) # subgridding_tools.merge_grids(seds_fname, final_sub_names) if args.ast: # Determine magnitude range for ASTs mag_cuts = datamodel.ast_maglimit bright_cuts = None if len(mag_cuts) == 1: tmp_cuts = mag_cuts obsdata = datamodel.get_obscat(datamodel.obsfile, datamodel.filters) faintest_mags = np.zeros(len(datamodel.filters)) brightest_mags = np.zeros(len(datamodel.filters)) for k, filtername in enumerate(obsdata.filters): sfiltername = obsdata.data.resolve_alias(filtername) sfiltername = sfiltername.replace('rate', 'vega') sfiltername = sfiltername.replace('RATE', 'VEGA') keep, = np.where(obsdata[sfiltername] < 99.) faintest_mags[k] = np.percentile(obsdata[keep][sfiltername], 90.) brightest_mags[k] = np.amin(obsdata[keep][sfiltername]) # max. mags from the gst observation cat. mag_cuts = faintest_mags + tmp_cuts # this many mags fainter thant the 90th percentile bright_cuts = brightest_mags - tmp_cuts # this many mags brighter than the brightest source # Choose seds for ASTs modelsedgridfile = os.path.join( outdir, datamodel.project + '_seds.grid.hd5') outfile_chosenSEDs = os.path.join( outdir, datamodel.project + '_chosenSEDs.txt') N_per_age = datamodel.ast_models_selected_per_age Nfilters = datamodel.ast_bands_above_maglimit Nrealize = datamodel.ast_realization_per_model toothpick_style = True if toothpick_style: N_fluxes = 25 min_N_per_flux = 50 bins_outfile = os.path.join( outdir, datamodel.project + '_toothpick_style_bins.txt') chosen_seds = \ make_ast_input_list.pick_models_toothpick_style(modelsedgridfile, datamodel.filters, mag_cuts, Nfilters, N_fluxes, min_N_per_flux, outfile_chosenSEDs, bins_outfile, bright_cut=bright_cuts) else: chosen_seds = make_ast_input_list.pick_models(modelsedgridfile, datamodel.filters, mag_cuts, Nfilters, N_per_age, Nrealize, outfile_chosenSEDs) # Assign positions for ASTs outfile_inputAST = os.path.join( outdir, datamodel.project + '_inputAST.txt') make_ast_xy_list.pick_positions_from_map(chosen_seds, input_map=datamodel.ast_bg_map_file, N_bins=datamodel.ast_bg_nbins, Npermodel=10, outfile=outfile_inputAST, refimage=datamodel.ast_reference_image, Nrealize=1) if args.observationmodel: print('Generating noise model from ASTs and absflux A matrix') modelsedgridfiles = get_modelsubgridfiles()[subset_slice] # Process the subgrids individually def gen_subobsmodel(modelsedgridfile): modelsedgrid = FileSEDGrid(modelsedgridfile) # generate the AST noise model noisefile = modelsedgridfile.replace('seds', 'noisemodel') astfile = datamodel.astfile # If we are treating regions with different # backgrounds/source densities separately, pick one of the # split ast files, and put the results in a subfolder. if args.dens_bin is not None: noisefile = os.path.join(bin_subfolder, noisefile) create_project_dir(os.path.dirname(noisefile)) astfile = subcatalog_fname(astfile, args.dens_bin) outname = noisemodel.make_toothpick_noise_model( noisefile, astfile, modelsedgrid, absflux_a_matrix=datamodel.absflux_a_matrix) return outname parallel_wrapper(gen_subobsmodel, modelsedgridfiles) if args.trim: print('Trimming the model and noise grids') # read in the observed data obsfile = datamodel.obsfile if args.dens_bin is not None: obsfile = subcatalog_fname(obsfile, args.dens_bin) obsdata = datamodel.get_obscat(obsfile, datamodel.filters) modelsedgridfiles = get_modelsubgridfiles()[subset_slice] # trim the models individually def trim_submodel(modelsedgridfile): modelsedgrid = FileSEDGrid(modelsedgridfile) noisefile = modelsedgridfile.replace('seds', 'noisemodel') sed_trimname = modelsedgridfile.replace('seds', 'seds_trim') noisemodel_trimname = sed_trimname.replace('seds', 'noisemodel') # When working with density bins, we nees to work in a subfolder if args.dens_bin is not None: noisefile = os.path.join(bin_subfolder, noisefile) sed_trimname = os.path.join(bin_subfolder, sed_trimname) noisemodel_trimname = os.path.join(bin_subfolder, noisemodel_trimname) # read in the noise model just created noisemodel_vals = noisemodel.get_noisemodelcat(noisefile) # trim the model sedgrid trim_grid.trim_models(modelsedgrid, noisemodel_vals, obsdata, sed_trimname, noisemodel_trimname, sigma_fac=3.) parallel_wrapper(trim_submodel, modelsedgridfiles) if args.fit: start_time = time.clock() # read in the observed data obsfile = datamodel.obsfile if args.dens_bin is not None: obsfile = subcatalog_fname(obsfile, args.dens_bin) obsdata = datamodel.get_obscat(obsfile, datamodel.filters) modelsedgridfiles = get_modelsubgridfiles() trimmed_modelsedgridfiles = [ s.replace('seds', 'seds_trim') for s in modelsedgridfiles] trimmed_noisemodelfiles = [ s.replace('seds', 'noisemodel') for s in trimmed_modelsedgridfiles] # File where the ranges and number of unique values for the grid # will be stored (this can take a while to calculate) grid_info_pkl = 'grid_info_dict.pkl' if args.dens_bin is not None: # Use the right subfolder trimmed_modelsedgridfiles, trimmed_noisemodelfiles = [ [os.path.join(bin_subfolder, f) for f in l] for l in [trimmed_modelsedgridfiles, trimmed_noisemodelfiles] ] grid_info_pkl = os.path.join(bin_subfolder, grid_info_pkl) if not os.path.isfile(grid_info_pkl): grid_info_dict = subgridding_tools.reduce_grid_info( trimmed_modelsedgridfiles, trimmed_noisemodelfiles, nprocs=4) with open(grid_info_pkl, 'wb') as p: pickle.dump(grid_info_dict, p) print('wrote grid_info_dict to ' + grid_info_pkl) else: print('loading grid_info_dict from ' + grid_info_pkl) with open(grid_info_pkl, 'rb') as p: grid_info_dict = pickle.loads(p.read()) # perform fits for the subgrids individually def fit_submodel(modelsedgridfile): # input files trimmed_modelsedgridfile = modelsedgridfile.replace( 'seds', 'seds_trim') trimmed_noisemodelfile = trimmed_modelsedgridfile.replace( 'seds', 'noisemodel') # output files lnpfile = modelsedgridfile.replace('seds', 'lnp') statsfile = modelsedgridfile.replace('seds', 'stats') statsfile = statsfile.replace('.hd5', '.fits') pdf1dfile = statsfile.replace('stats', 'pdf1d') if args.dens_bin is not None: # Put everything in the right subfolder trimmed_modelsedgridfile, trimmed_noisemodelfile, lnpfile, statsfile, pdf1dfile = [ os.path.join(bin_subfolder, f) for f in [trimmed_modelsedgridfile, trimmed_noisemodelfile, lnpfile, statsfile, pdf1dfile] ] # load the subgrid seds and subgrid noisemodel modelsedgrid = FileSEDGrid(trimmed_modelsedgridfile) noisemodel_vals = noisemodel.get_noisemodelcat( trimmed_noisemodelfile) try: fit.summary_table_memory(obsdata, noisemodel_vals, modelsedgrid, resume=args.resume, threshold=-10., save_every_npts=100, lnp_npts=60, stats_outname=statsfile, pdf1d_outname=pdf1dfile, grid_info_dict=grid_info_dict, lnp_outname=lnpfile, do_not_normalize=True) print("Done fitting on grid " + trimmed_modelsedgridfile) except Exception as e: if not args.ignore_missing_subresults: raise e parallel_wrapper(fit_submodel, modelsedgridfiles[subset_slice]) new_time = time.clock() print('time to fit: ', (new_time - start_time) / 60., ' min') if args.merge: modelsedgridfiles = get_modelsubgridfiles() with_fits = [s.replace('.hd5', '.fits') for s in modelsedgridfiles] pdf1dfiles = [s.replace('seds', 'pdf1d') for s in with_fits] statsfiles = [s.replace('seds', 'stats') for s in with_fits] output_fname_base = os.path.join(datamodel.project, 'combined') if args.dens_bin is not None: pdf1dfiles, statsfiles = [[os.path.join(bin_subfolder, f) for f in l] for l in [pdf1dfiles, statsfiles]] output_fname_base = os.path.join(bin_subfolder, output_fname_base) if args.ignore_missing_subresults: # remove any missing filenames from the lists, and hope for the best def only_existing_files(file_list): return [f for f in file_list if os.path.isfile(f)] pdf1dfiles = only_existing_files(pdf1dfiles) statsfiles = only_existing_files(statsfiles) print("Merging") print(list(zip(pdf1dfiles, statsfiles))) subgridding_tools.merge_pdf1d_stats(pdf1dfiles, statsfiles, output_fname_base=output_fname_base) # print help if no arguments if not any(vars(args).values()): parser.print_help()
{"hexsha": "5d82352ef320398602614bdf022996ffa26c64a3", "size": 20415, "ext": "py", "lang": "Python", "max_stars_repo_path": "beast/examples/subgridding/run_beast_subgrids.py", "max_stars_repo_name": "marthaboyer/beast", "max_stars_repo_head_hexsha": "1ca71fb64ab60827e4e4e1937b64f319a98166c3", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "beast/examples/subgridding/run_beast_subgrids.py", "max_issues_repo_name": "marthaboyer/beast", "max_issues_repo_head_hexsha": "1ca71fb64ab60827e4e4e1937b64f319a98166c3", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "beast/examples/subgridding/run_beast_subgrids.py", "max_forks_repo_name": "marthaboyer/beast", "max_forks_repo_head_hexsha": "1ca71fb64ab60827e4e4e1937b64f319a98166c3", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.1883116883, "max_line_length": 103, "alphanum_fraction": 0.5760960078, "include": true, "reason": "import numpy", "num_tokens": 4251}
function nthroot(n::Integer, r::Real) r < 0 || n == 0 && throw(DomainError()) n < 0 && return 1 / nthroot(-n, r) r > 0 || return 0 x = r / n prevdx = r while true y = x ^ (n - 1) dx = (r - y * x) / (n * y) abs(dx) ≥ abs(prevdx) && return x x += dx prevdx = dx end end @show nthroot.(-5:2:5, 5.0) @show nthroot.(-5:2:5, 5.0) - 5.0 .^ (1 ./ (-5:2:5))
{"hexsha": "a5acccf10f0bef1177ba7addc93ea331b5f38d91", "size": 418, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "lang/Julia/nth-root.jl", "max_stars_repo_name": "ethansaxenian/RosettaDecode", "max_stars_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lang/Julia/nth-root.jl", "max_issues_repo_name": "ethansaxenian/RosettaDecode", "max_issues_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lang/Julia/nth-root.jl", "max_forks_repo_name": "ethansaxenian/RosettaDecode", "max_forks_repo_head_hexsha": "8ea1a42a5f792280b50193ad47545d14ee371fb7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 23.2222222222, "max_line_length": 52, "alphanum_fraction": 0.4330143541, "num_tokens": 176}
using ArgMacros function main() @beginarguments begin @argumentdefault Int 1 opt1 "-o" "--opt1" @argumentdefault Int 2 opt2 "--opt2" @argumentflag flag "--flag" @positionaloptional String arg "arg" end println(" arg=>", arg) println(" opt1=>", opt1) println(" opt2=>", opt2) println(" flag=>", flag) return end main()
{"hexsha": "29dadf3402e023253d4d3afc7a90b1d37f47c584", "size": 381, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "example/argmacros.jl", "max_stars_repo_name": "Eggiverse/Comonicon.jl", "max_stars_repo_head_hexsha": "aed96bc93cbf8427a7a2acd5a2297ce367c110b8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 70, "max_stars_repo_stars_event_min_datetime": "2020-07-20T15:13:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-15T14:38:34.000Z", "max_issues_repo_path": "example/argmacros.jl", "max_issues_repo_name": "Leticia-maria/Comonicon.jl", "max_issues_repo_head_hexsha": "a7c38f9378f32a70396c5aaa5607b46391c921a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 55, "max_issues_repo_issues_event_min_datetime": "2020-07-21T00:07:37.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-15T00:50:19.000Z", "max_forks_repo_path": "example/argmacros.jl", "max_forks_repo_name": "Leticia-maria/Comonicon.jl", "max_forks_repo_head_hexsha": "a7c38f9378f32a70396c5aaa5607b46391c921a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 7, "max_forks_repo_forks_event_min_datetime": "2021-06-04T21:28:27.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-21T03:26:01.000Z", "avg_line_length": 20.0526315789, "max_line_length": 49, "alphanum_fraction": 0.5879265092, "num_tokens": 107}
# -*- coding:utf-8 -*- # Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # This program is free software; you can redistribute it and/or modify # it under the terms of the MIT License. # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # MIT License for more details. """Datasets for image classification.""" import os import numpy as np import torch from torchvision import transforms, datasets from modnas.registry.dataset import register def get_metadata(dataset): """Return dataset metadata.""" if dataset == 'cifar10': mean = [0.49139968, 0.48215827, 0.44653124] stddev = [0.24703233, 0.24348505, 0.26158768] elif dataset == 'cifar100': mean = [0.5070751592371323, 0.48654887331495095, 0.4409178433670343] stddev = [0.2673342858792401, 0.2564384629170883, 0.27615047132568404] elif dataset == 'mnist': mean = [0.13066051707548254] stddev = [0.30810780244715075] elif dataset == 'fashionmnist': mean = [0.28604063146254594] stddev = [0.35302426207299326] elif dataset == 'imagenet': mean = [0.485, 0.456, 0.406] stddev = [0.229, 0.224, 0.225] else: mean = [0.5, 0.5, 0.5] stddev = [0, 0, 0] return { 'mean': mean, 'stddev': stddev, } _train_transforms = { 'cifar10': lambda: [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()], 'cifar100': lambda: [transforms.RandomCrop(32, padding=4), transforms.RandomHorizontalFlip()], 'mnist': lambda: [transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1)], 'fashionmnist': lambda: [transforms.RandomAffine(degrees=15, translate=(0.1, 0.1), scale=(0.9, 1.1), shear=0.1), transforms.RandomVerticalFlip()], 'imagenet': lambda resize_scale=0.08: [ transforms.RandomResizedCrop(224, scale=(resize_scale, 1.0)), transforms.RandomHorizontalFlip(), ], 'image': lambda resize_scale=0.08: [ transforms.RandomResizedCrop(224, scale=(resize_scale, 1.0)), transforms.RandomHorizontalFlip(), ], } _valid_transforms = { 'imagenet': lambda: [ transforms.Resize(256), transforms.CenterCrop(224), ], 'image': lambda: [ transforms.Resize(256), transforms.CenterCrop(224), ], } class Cutout(object): """Apply Cutout on dataset.""" def __init__(self, length): self.length = length def __call__(self, img): """Return image with Cutout applied.""" h, w = img.size(1), img.size(2) mask = np.ones((h, w), np.float32) y = np.random.randint(h) x = np.random.randint(w) y1 = np.clip(y - self.length // 2, 0, h) y2 = np.clip(y + self.length // 2, 0, h) x1 = np.clip(x - self.length // 2, 0, w) x2 = np.clip(x + self.length // 2, 0, w) mask[y1:y2, x1:x2] = 0. mask = torch.from_numpy(mask) mask = mask.expand_as(img) img *= mask return img @register def ImageClsData(dataset, root, valid=False, mean=None, stddev=None, cutout=0, jitter=False, transform_args=None, to_tensor=True): """Return dataset for image classification.""" dataset = dataset.lower() meta = get_metadata(dataset) mean = meta['mean'] if mean is None else mean stddev = meta['stddev'] if stddev is None else stddev os.makedirs(root, exist_ok=True) if dataset == 'cifar10': dset = datasets.CIFAR10 elif dataset == 'cifar100': dset = datasets.CIFAR100 elif dataset == 'mnist': dset = datasets.MNIST elif dataset == 'fashionmnist': dset = datasets.FashionMNIST elif dataset == 'imagenet': dset = datasets.ImageFolder elif dataset == 'image': dset = datasets.ImageFolder else: raise ValueError('unsupported dataset: {}'.format(dataset)) transf_all = _valid_transforms if valid else _train_transforms transf = transf_all.get(dataset, lambda: [])(**(transform_args or {})) if jitter is True or jitter == 'strong': transf.append(transforms.ColorJitter(brightness=0.4, contrast=0.4, saturation=0.4, hue=0.1)) elif jitter == 'normal': transf.append(transforms.ColorJitter(brightness=32. / 255., saturation=0.5)) if to_tensor: transf.extend([transforms.ToTensor(), transforms.Normalize(mean, stddev)]) if cutout > 0: transf.append(Cutout(cutout)) if dset == datasets.ImageFolder: data = dset(root, transform=transforms.Compose(transf)) else: data = dset(root, train=(not valid), transform=transforms.Compose(transf), download=True) return data
{"hexsha": "13b66ec5a684e4b020f4413369eae206f8f36e84", "size": 5009, "ext": "py", "lang": "Python", "max_stars_repo_path": "vega/algorithms/nas/modnas/data_provider/dataset/torch/image_cls.py", "max_stars_repo_name": "shaido987/vega", "max_stars_repo_head_hexsha": "14d5d49fb8bdf96bd1f3fcfac201ce6b6712c3b6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-05-08T07:47:44.000Z", "max_stars_repo_stars_event_max_datetime": "2021-05-08T07:47:44.000Z", "max_issues_repo_path": "vega/algorithms/nas/modnas/data_provider/dataset/torch/image_cls.py", "max_issues_repo_name": "WholeG/vega", "max_issues_repo_head_hexsha": "d1ccf1c3ce68a118bdb6775594ceed0f895911e7", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "vega/algorithms/nas/modnas/data_provider/dataset/torch/image_cls.py", "max_forks_repo_name": "WholeG/vega", "max_forks_repo_head_hexsha": "d1ccf1c3ce68a118bdb6775594ceed0f895911e7", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.525974026, "max_line_length": 101, "alphanum_fraction": 0.6182870833, "include": true, "reason": "import numpy", "num_tokens": 1356}
This editor can edit this entry and tell us a bit about themselves by clicking the Edit icon. 20101005 08:19:43 nbsp Howdy and welcome to the wiki! My names Evan, pleased to meet you! Thanks for adding Gan Haverim, it sounds like a neat preschool. I reformatted it a bit to match other entries to try to help you a bit. Once again, welcome to the wiki! Users/JabberWokky Evan JabberWokky Edwards 20101005 12:44:10 nbsp Hey, thanks. I was like, whoa, it just reformatted itself to conform. Thats magic. But then I saw that you did it. :) Users/edubin
{"hexsha": "3ab1e7b91a7e489126c290eb11a64fe611122fbf", "size": 566, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/edubin.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/edubin.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/edubin.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 70.75, "max_line_length": 309, "alphanum_fraction": 0.7561837456, "num_tokens": 163}
import numpy as np import pickle as pkl import networkx as nx import scipy.sparse as sp from scipy.sparse.linalg.eigen.arpack import eigsh import sys import re import datetime def parse_index_file(filename): """Parse index file.""" index = [] for line in open(filename): index.append(int(line.strip())) return index def sample_mask(idx, l): """Create mask.""" mask = np.zeros(l) mask[idx] = 1 return np.array(mask, dtype=np.bool) def load_data(dataset_str): names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph'] objects = [] for i in range(len(names)): with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f: if sys.version_info > (3, 0): objects.append(pkl.load(f, encoding='latin1')) else: objects.append(pkl.load(f)) x, y, tx, ty, allx, ally, graph = tuple(objects) test_idx_reorder = parse_index_file( "data/ind.{}.test.index".format(dataset_str)) test_idx_range = np.sort(test_idx_reorder) print_log(x.shape, y.shape, tx.shape, ty.shape, allx.shape, ally.shape) if dataset_str == 'citeseer': test_idx_range_full = range( min(test_idx_reorder), max(test_idx_reorder) + 1) tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1])) tx_extended[test_idx_range - min(test_idx_range), :] = tx tx = tx_extended ty_extended = np.zeros((len(test_idx_range_full), y.shape[1])) ty_extended[test_idx_range - min(test_idx_range), :] = ty ty = ty_extended features = sp.vstack((allx, tx)).tolil() features[test_idx_reorder, :] = features[test_idx_range, :] adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph)) labels = np.vstack((ally, ty)) labels[test_idx_reorder, :] = labels[test_idx_range, :] idx_test = test_idx_range.tolist() idx_train = range(len(y)) idx_val = range(len(y), len(y) + 500) train_mask = sample_mask(idx_train, labels.shape[0]) val_mask = sample_mask(idx_val, labels.shape[0]) test_mask = sample_mask(idx_test, labels.shape[0]) y_train = np.zeros(labels.shape) y_val = np.zeros(labels.shape) y_test = np.zeros(labels.shape) y_train[train_mask, :] = labels[train_mask, :] y_val[val_mask, :] = labels[val_mask, :] y_test[test_mask, :] = labels[test_mask, :] return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask def load_corpus(dataset_str): names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'adj'] objects = [] for i in range(len(names)): with open("./data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f: if sys.version_info > (3, 0): objects.append(pkl.load(f, encoding='latin1')) else: objects.append(pkl.load(f)) x, y, tx, ty, allx, ally, adj = tuple(objects) features = sp.vstack((allx, tx)).tolil() labels = np.vstack((ally, ty)) train_idx_orig = parse_index_file("./data/{}.train.index".format(dataset_str)) train_size = len(train_idx_orig) val_size = train_size - x.shape[0] test_size = tx.shape[0] idx_train = range(len(y)) idx_val = range(len(y), len(y) + val_size) idx_test = range(allx.shape[0], allx.shape[0] + test_size) train_mask = sample_mask(idx_train, labels.shape[0]) val_mask = sample_mask(idx_val, labels.shape[0]) test_mask = sample_mask(idx_test, labels.shape[0]) y_train = np.zeros(labels.shape) y_val = np.zeros(labels.shape) y_test = np.zeros(labels.shape) y_train[train_mask, :] = labels[train_mask, :] y_val[val_mask, :] = labels[val_mask, :] y_test[test_mask, :] = labels[test_mask, :] adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj) return adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask, train_size, test_size def sparse_to_tuple(sparse_mx): def to_tuple(mx): if not sp.isspmatrix_coo(mx): mx = mx.tocoo() coords = np.vstack((mx.row, mx.col)).transpose() values = mx.data shape = mx.shape return coords, values, shape if isinstance(sparse_mx, list): for i in range(len(sparse_mx)): sparse_mx[i] = to_tuple(sparse_mx[i]) else: sparse_mx = to_tuple(sparse_mx) return sparse_mx def preprocess_features(features): rowsum = np.array(features.sum(1)) r_inv = np.power(rowsum, -1).flatten() r_inv[np.isinf(r_inv)] = 0. r_mat_inv = sp.diags(r_inv) features = r_mat_inv.dot(features) return features.A def normalize_adj(adj): adj = sp.coo_matrix(adj) rowsum = np.array(adj.sum(1)) d_inv_sqrt = np.power(rowsum, -0.5).flatten() d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0. d_mat_inv_sqrt = sp.diags(d_inv_sqrt) return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo() def preprocess_adj(adj): adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0])) return adj_normalized.A def construct_feed_dict(features, support, labels, labels_mask, placeholders): feed_dict = dict() feed_dict.update({placeholders['labels']: labels}) feed_dict.update({placeholders['labels_mask']: labels_mask}) feed_dict.update({placeholders['features']: features}) feed_dict.update({placeholders['support'][i]: support[i] for i in range(len(support))}) feed_dict.update({placeholders['num_features_nonzero']: features[1].shape}) return feed_dict def chebyshev_polynomials(adj, k): print_log("Calculating Chebyshev polynomials up to order {}...".format(k)) adj_normalized = normalize_adj(adj) laplacian = sp.eye(adj.shape[0]) - adj_normalized largest_eigval, _ = eigsh(laplacian, 1, which='LM') scaled_laplacian = ( 2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0]) t_k = list() t_k.append(sp.eye(adj.shape[0]).A) t_k.append(scaled_laplacian.A) def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap): s_lap = sp.csr_matrix(scaled_lap, copy=True) return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two for i in range(2, k + 1): t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian)) return t_k def loadWord2Vec(filename): vocab = [] embd = [] word_vector_map = {} file = open(filename, 'r') for line in file.readlines(): row = line.strip().split(' ') if (len(row) > 2): vocab.append(row[0]) vector = row[1:] length = len(vector) for i in range(length): vector[i] = float(vector[i]) embd.append(vector) word_vector_map[row[0]] = vector print_log('Loaded Word Vectors!') file.close() return vocab, embd, word_vector_map def clean_str(string): string = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", string) string = re.sub(r"\'s", " \'s", string) string = re.sub(r"\'ve", " \'ve", string) string = re.sub(r"n\'t", " n\'t", string) string = re.sub(r"\'re", " \'re", string) string = re.sub(r"\'d", " \'d", string) string = re.sub(r"\'ll", " \'ll", string) string = re.sub(r",", " , ", string) string = re.sub(r"!", " ! ", string) string = re.sub(r"\(", " \( ", string) string = re.sub(r"\)", " \) ", string) string = re.sub(r"\?", " \? ", string) string = re.sub(r"\s{2,}", " ", string) return string.strip().lower() def print_log(msg='', end='\n'): now = datetime.datetime.now() t = str(now.year) + '/' + str(now.month) + '/' + str(now.day) + ' ' \ + str(now.hour).zfill(2) + ':' + str(now.minute).zfill(2) + ':' + str(now.second).zfill(2) if isinstance(msg, str): lines = msg.split('\n') else: lines = [msg] for line in lines: if line == lines[-1]: print('[' + t + '] ' + str(line), end=end) else: print('[' + t + '] ' + str(line))
{"hexsha": "5c8f334271453a97644e438211d294a0d2ba59e0", "size": 8197, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "Sherrylone/Ensemble-Neural-Network", "max_stars_repo_head_hexsha": "31a8ad63460208c82293126d9e3e5e61796dff96", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "utils.py", "max_issues_repo_name": "Sherrylone/Ensemble-Neural-Network", "max_issues_repo_head_hexsha": "31a8ad63460208c82293126d9e3e5e61796dff96", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "utils.py", "max_forks_repo_name": "Sherrylone/Ensemble-Neural-Network", "max_forks_repo_head_hexsha": "31a8ad63460208c82293126d9e3e5e61796dff96", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5792079208, "max_line_length": 105, "alphanum_fraction": 0.5979016713, "include": true, "reason": "import numpy,import scipy,from scipy,import networkx", "num_tokens": 2199}
___author__ = 'Asus' from IClassifier import IClassifier from Utils.utilities import load_stf from glove import Glove from scipy.spatial.distance import cosine from scipy.spatial.distance import euclidean import numpy as np class GloveClassifier(IClassifier): def __init__(self): self.GloveInstace = None self.Centroids = None def answerQuestion(self,wordAskedFor,question,possibilities): qV = self.GloveInstance.word_vectors[self.GloveInstance.dictionary[wordAskedFor]] pVs = [] cqV = self.Centroids[wordAskedFor] cpVs = [] maxSim = 1000 correct = -1 comment = '' for p in possibilities: pVs.append(self.GloveInstance.word_vectors[self.GloveInstance.dictionary[p]]) cpVs.append(self.Centroids[p]) for i,pV in enumerate(pVs): a = cosine(qV,pV) - np.power(cosine(pV,cpVs[i]),0.09) #a = 1/euclidean(qV,pV) nPtokens = question.split(' ') tokens = [] for token in nPtokens: tokens.append(token.strip().strip('.')) wAi = -1 for j,token in enumerate(tokens): if token == wordAskedFor: wAi = j for j in range(9): try: m1 = self.GloveInstance.word_vectors[self.GloveInstance.dictionary[tokens[wAi-j-1]]] a += (1/np.power(j+1,2.0))*(cosine(pV,m1) - np.power(cosine(pV,cpVs[i]),0.09)) except: nothing = 0 try: d1 = self.GloveInstance.word_vectors[self.GloveInstance.dictionary[tokens[wAi+j+1]]] a += (1/np.power(j+1,2.0))*(cosine(pV,d1) - np.power(cosine(pV,cpVs[i]),0.09)) except: nothing = 0 comment += '\n\t\t\tsim(' + wordAskedFor + ',' + possibilities[i] + ')=' +str(a) if a<maxSim: maxSim = a correct = i return (possibilities[correct],comment)
{"hexsha": "b6d174ccf498dc93eceff7ddd2747056196d3ddc", "size": 1662, "ext": "py", "lang": "Python", "max_stars_repo_path": "SimilarityClassification/Classifiers/GloveCenteredESLExtendedClassifier.py", "max_stars_repo_name": "dudenzz/word_embedding", "max_stars_repo_head_hexsha": "7aa7c7619f8a56903384176f3d83e999153c55ce", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "SimilarityClassification/Classifiers/GloveCenteredESLExtendedClassifier.py", "max_issues_repo_name": "dudenzz/word_embedding", "max_issues_repo_head_hexsha": "7aa7c7619f8a56903384176f3d83e999153c55ce", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "SimilarityClassification/Classifiers/GloveCenteredESLExtendedClassifier.py", "max_forks_repo_name": "dudenzz/word_embedding", "max_forks_repo_head_hexsha": "7aa7c7619f8a56903384176f3d83e999153c55ce", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.24, "max_line_length": 89, "alphanum_fraction": 0.6835138387, "include": true, "reason": "import numpy,from scipy", "num_tokens": 509}
import torch import numpy as np import pandas as pd from tqdm import tqdm import os import time import pickle import jieba from collections import Counter from gensim.models import KeyedVectors from torch.utils.data import Dataset, DataLoader from sklearn.model_selection import StratifiedShuffleSplit, train_test_split from matplotlib import pyplot as plt class WVEmbedding(): def __init__(self, wv_path, data_path, vocab_size=29000, emb_path=None): self.wv_path =wv_path self.data_path = data_path self.vocab_size = vocab_size self.word_list = self.get_word_list() self.word_to_id, self.id_to_word = self.get_vocab() # load data from saved data, save lots of time if emb_path: self.embedding = np.load(emb_path) else: self.embedding = self.get_embedding() def get_embedding(self): self.wv = KeyedVectors.load_word2vec_format(self.wv_path) # get embedding dim embedding_dim = self.wv.vector_size emb = np.zeros((self.vocab_size, embedding_dim)) wv_dict = self.wv.vocab.keys() num_found = 0 for idx in tqdm(range(self.vocab_size)): word = self.id_to_word[idx] if word == '<pad>' or word == '<unk>': emb[idx] = np.zeros([embedding_dim]) elif word in wv_dict: emb[idx] = self.wv.get_vector(word) num_found += 1 print("{} of {} found, rate:{:.2f}".format(num_found, self.vocab_size, num_found/self.vocab_size)) return emb # get all words from train data, dev data, test data def get_word_list(self): data = pd.read_csv(self.data_path, sep=',') word_list = [] for i, line in enumerate(data['review'].values): word_list += jieba.lcut(line) return word_list def get_vocab(self): counts = Counter(self.word_list) vocab = sorted(counts, key=counts.get, reverse=True) # add <pad> vocab = ['<pad>', '<unk>'] + vocab print('total word size:{}'.format(len(vocab))) # trunk vocabulary if len(vocab) < self.vocab_size: raise Exception('Vocab less than requested!!!') else: vocab = vocab[:self.vocab_size] word_to_id = {word: i for i, word in enumerate(vocab)} id_to_word = {i: word for i, word in enumerate(vocab)} return word_to_id, id_to_word class WaiMaiDataSet(Dataset): def __init__(self, data_path, word_to_id, max_len=40, use_unk=False): self.datas, self.labels = self.load_data(data_path) self.max_len = max_len self.word_to_id = word_to_id self.pad_int = word_to_id['<pad>'] self.use_unk = use_unk # internal data self.conversation_list, self.total_len = self.process_data(self.datas) def load_data(self, data_path): data = pd.read_csv(data_path) return data['review'].tolist(), data['label'].tolist() # turn sentence to id def sent_to_ids(self, text): tokens = jieba.lcut(text) # if use_unk is True, it will use <unk> vectors # else just remove this word if self.use_unk: token_ids = [self.word_to_id[x] if x in self.word_to_id else self.word_to_id['<unk>'] for x in tokens] else: token_ids = [self.word_to_id[x] for x in tokens if x in self.word_to_id] # Trunking or PADDING if len(token_ids) > self.max_len: token_ids = token_ids[: self.max_len] text_len = self.max_len else: text_len = len(token_ids) token_ids = token_ids + [self.pad_int] * (self.max_len - len(token_ids)) return token_ids, text_len def process_data(self, data_list): conversation_list= [] total_len = [] for line in data_list: conversation, conver_len = self.sent_to_ids(line) conversation_list.append(conversation) total_len.append(conver_len) return conversation_list, total_len def __len__(self): return len(self.conversation_list) def __getitem__(self, idx): return torch.LongTensor(self.conversation_list[idx]),\ self.total_len[idx], \ self.labels[idx] # turn sentence to vector represent, # average all the word vector as the sentence vector # def to_avg_sv(path, save_path, wv_embedding): data = pd.read_csv(path) sv_list = [] for line in data['review'].values: words = jieba.lcut(line) n = 0 sentence_vector = 0 for word in words: # not <unk> try: row_index = wv_embedding.word_to_id[word] sentence_vector += wv_embedding.embedding[row_index] n += 1 except: pass # average sentence_vector /= n sv_list.append(sentence_vector) sv = np.array(sv_list) np.save(save_path, sv) def to_concat_sv(path, save_path, wv_embedding, sen_len): data = pd.read_csv(path) sv_list = [] for line in data['review'].values: words = jieba.lcut(line) n = 0 sentence_vector = [] for word in words: # not <unk> try: row_index = wv_embedding.word_to_id[word] sentence_vector += wv_embedding.embedding[row_index].tolist() n += 1 except: pass # concat if n < sen_len: sentence_vector += [0.]*300*(sen_len - n) else: sentence_vector = sentence_vector[:300 * sen_len] sv_list.append(sentence_vector) return sv_list # sv = np.array(sv_list) # np.save(save_path, sv) def plot_avg_len(path, wv_embedding): data = pd.read_csv(path) len_list = [] for line in data['review'].values: words = jieba.lcut(line) n = 0 for word in words: # not <unk> try: row_index = wv_embedding.word_to_id[word] n += 1 except: pass # average len_list.append(n) plt.hist(len_list,bins=100) plt.show() # spilt single file to train val test file def split_train_val_test(data_path): data = pd.read_csv(data_path, sep=',') X = data['review'].tolist() Y = data['label'].tolist() X_train,X_valtest, y_train, y_valtest = train_test_split(X, Y, test_size=0.2, stratify=Y) X_test,X_val, y_test, y_val = train_test_split(X_valtest, y_valtest, test_size=0.5, stratify=y_valtest) # 下面这行代码运行报错 # list.to_csv('e:/testcsv.csv',encoding='utf-8') thead = ['label', 'review'] def list_to_csv(thead, c1, c2, path): data = np.vstack((c1, c2)) data = np.transpose(data, (1,0)) df = pd.DataFrame(columns=thead, data=data) # df.to_csv(path, index=False) list_to_csv(thead, y_train, X_train, 'weibo100k_train.csv') list_to_csv(thead, y_val, X_val, 'weibo100k_val.csv') list_to_csv(thead, y_test, X_test, 'weibo100k_test.csv') # return X_train,y_train, X_val, y_val, X_test, y_test def get_data_set(root_path): def get_data(mode='train'): x_train_path = root_path+"_{}_sv.npy".format(mode) y_train_path = root_path+"_{}.csv".format(mode) x_train = np.load(x_train_path) data = pd.read_csv(y_train_path) y_train = data['label'].tolist() y_train = np.array(y_train) return x_train, y_train x_train, y_train = get_data('train') x_val, y_val = get_data('val') x_test, y_test = get_data('test') return x_train, y_train, x_val, y_val, x_test, y_test if __name__ == "__main__": wv_path = "D:/datasets/NLP/embedding_cn/sgns.weibo.bigram-char.bz2" data_path = "D:/datasets/NLP/waimai10k.csv" wv_embedding = WVEmbedding(wv_path, data_path, 29000, emb_path='data/waimai10k/waimai10k_vocab29k_embedding.npy') # to_avg_sv("data/waimai10k/waimai10k_train.csv", "data/waimai10k/waimai10k_train_sv.npy", wv_embedding) # to_avg_sv("data/waimai10k/waimai10k_val.csv", "data/waimai10k/waimai10k_val_sv.npy", wv_embedding) # a = wv_embedding.embedding train_data = to_concat_sv("data/waimai10k/waimai10k_train.csv", "data/waimai10k/waimai10k_train_sv.npy", wv_embedding, 100) from sklearn.decomposition import PCA pca = PCA() pca.fit(train_data) print(pca.explained_variance_ratio_)
{"hexsha": "200301a2f0feb7c4f3156fc5bc8c34479bdf76a7", "size": 8844, "ext": "py", "lang": "Python", "max_stars_repo_path": "utils.py", "max_stars_repo_name": "kangyangWHU/ML_Project", "max_stars_repo_head_hexsha": "cc4c0406ff9cd1dd6e1fad827100d4ffc9b7f574", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 3, "max_stars_repo_stars_event_min_datetime": "2020-09-27T13:04:05.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-05T09:25:17.000Z", "max_issues_repo_path": "utils.py", "max_issues_repo_name": "kangyangWHU/ML_Project", "max_issues_repo_head_hexsha": "cc4c0406ff9cd1dd6e1fad827100d4ffc9b7f574", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-11-21T13:44:37.000Z", "max_issues_repo_issues_event_max_datetime": "2020-11-24T08:49:43.000Z", "max_forks_repo_path": "utils.py", "max_forks_repo_name": "kangyangWHU/ML_Project", "max_forks_repo_head_hexsha": "cc4c0406ff9cd1dd6e1fad827100d4ffc9b7f574", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-12-27T05:16:35.000Z", "max_forks_repo_forks_event_max_datetime": "2020-12-27T05:16:35.000Z", "avg_line_length": 31.0315789474, "max_line_length": 128, "alphanum_fraction": 0.5978064224, "include": true, "reason": "import numpy", "num_tokens": 2194}
""" Module dipole_pruning ================ This module supplies one class, DipolePruning, that is used to find the minimum number of elements required to resolve the dipole operator, to the requested tolerance. """ import os import numpy as np class DipolePruning(object): r"""This class takes the dipole operator, mu, which must be expressed in the eigenbasis of the system hamiltonian and uses Bessel's inequality to determine the smallest number of states needed to correctly resolve mu, to the given tolerance. It expects a file mu.npz in the folder 'file_path', with archive keys 'mu_GSM_to_SEM' and, optionally, 'mu_SEM_to_DEM'. Each key must return a 3d numpy array with indices [i,j,k], where i and j are indices of the eigenvalues of the system hamiltonian, and k is an index 0,1,2 cooresponding to cartesian coordinates x,y,z. """ def __init__(self,file_path): """Initialize object with Args: file_path (str): file path to folder containing mu.npz """ self.base_path = file_path self.load_mu() def load_mu(self): """Load the precalculated dipole overlaps. The dipole operator must be stored as a .npz file, and must contain a up to two arrays, each with three indices: (upper manifold eigenfunction, lower manifold eigenfunction, cartesian coordinate). Keys: 'GSM_to_SEM' connects the ground state and singly excited manifolds, 'SEM_to_DEM' connects the singly and doubly excited manifolds.""" file_name = os.path.join(self.base_path,'mu.npz') mu = np.load(file_name) mu_keys = mu.keys() self.mu = {mu_key:mu[mu_key] for mu_key in mu_keys} def calculate_boolean_mu(self,overlap_matrix,*,rel_tol=1E-3): """Uses Bessel's inequality to find the minimum number of dipole matrix elements needed to correctly resolve the dipole operator to the given tolerance. Args: overlap_matrix (np.ndarray) : 3d-array of dipole matrix elements [i,j,k] where i,j are eigenstates and k is a cartesian coordinate. rel_tol (float) : relative tolerance for resolving the dipole operator mu. """ dim0, dim1 = overlap_matrix.shape[:2] bool_mat = np.zeros((dim0,dim1),dtype=bool) # Inner product over cartesian coordinates: mu_ij dot mu_ij for each i,j pair # where mu_ij is a cartesian vector prob_matrix = np.sum(overlap_matrix**2,axis=(2)) # Sum over all lower manifold states probabilities = np.sum(prob_matrix,axis=1) # For each state n in the higher manifold for n in range(dim0): prob_tot = probabilities[n] # All lower states that connect to state n prob_list = prob_matrix[n,:] # Sort lower states by magnitude of mu_nj dot mu_nj prob_sort_ind = prob_list.argsort()[::-1] prob_sorted = prob_list[prob_sort_ind] prob = 0 # Bessel's inequality for j in range(prob_sorted.size): prob += prob_sorted[j] if np.abs((prob_tot - prob)/prob_tot) < rel_tol: # If the relative tolerance is attained, break out of loop break #Keep only the states needed to satisfy the specified rel_tol non_zero_ind = prob_sort_ind[:j+1] #Set the states needed as True in a boolean array bool_mat[n,non_zero_ind] = True return bool_mat def save_boolean_mu(self,*,rel_tol = 1E-3): """Create and save the boolean masks for the dipole matrices at the given tolerance. Files created by this function are mu_pruned.npz and mu_boolean.npz. Args: rel_tol (float) : relative tolerance for resolving the dipole operator mu. Default value of 0.001 has been found to work well with vibronic systems to give convergence of the Transient Absorption signal of better than 1%. """ file_name_pruned = os.path.join(self.base_path,'mu_pruned.npz') file_name_boolean = os.path.join(self.base_path,'mu_boolean.npz') mu_GSM_to_SEM_boolean = self.calculate_boolean_mu(self.mu['GSM_to_SEM'],rel_tol=rel_tol) mu_GSM_to_SEM_pruned = self.mu['GSM_to_SEM'] * mu_GSM_to_SEM_boolean[:,:,np.newaxis] mu_boolean_dict = {'GSM_to_SEM':mu_GSM_to_SEM_boolean} mu_pruned_dict = {'GSM_to_SEM':mu_GSM_to_SEM_pruned} if 'SEM_to_DEM' in self.mu.keys(): mu_SEM_to_DEM_boolean = self.calculate_boolean_mu(self.mu['SEM_to_DEM'],rel_tol=rel_tol) mu_SEM_to_DEM_pruned = self.mu['SEM_to_DEM'] * mu_SEM_to_DEM_boolean[:,:,np.newaxis] mu_boolean_dict['SEM_to_DEM'] = mu_SEM_to_DEM_boolean mu_pruned_dict['SEM_to_DEM'] = mu_SEM_to_DEM_pruned np.savez(file_name_pruned,**mu_pruned_dict) np.savez(file_name_boolean,**mu_boolean_dict)
{"hexsha": "b8ab7b5bcf3bf81004fddaec17add90e933b61e6", "size": 4969, "ext": "py", "lang": "Python", "max_stars_repo_path": "ultrafastultrafast/dipole_pruning.py", "max_stars_repo_name": "peterarose/UF2", "max_stars_repo_head_hexsha": "cfc2c6625467945e12ac08bd267f79b6741e567f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 8, "max_stars_repo_stars_event_min_datetime": "2020-08-18T12:19:34.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-26T17:33:47.000Z", "max_issues_repo_path": "ultrafastultrafast/dipole_pruning.py", "max_issues_repo_name": "peterarose/UF2", "max_issues_repo_head_hexsha": "cfc2c6625467945e12ac08bd267f79b6741e567f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2020-09-03T11:43:44.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-25T04:13:41.000Z", "max_forks_repo_path": "ultrafastultrafast/dipole_pruning.py", "max_forks_repo_name": "peterarose/UF2", "max_forks_repo_head_hexsha": "cfc2c6625467945e12ac08bd267f79b6741e567f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-08-18T12:19:39.000Z", "max_forks_repo_forks_event_max_datetime": "2020-09-14T00:58:58.000Z", "avg_line_length": 42.8362068966, "max_line_length": 100, "alphanum_fraction": 0.6693499698, "include": true, "reason": "import numpy", "num_tokens": 1219}
LOG_GROUP_COST_FUNCTIONS = :CostFunctionsConstructor LOG_GROUP_OPTIMZATION_CONTAINER = :OptimizationContainer LOG_GROUP_TYPE_REGISTRATIONS = :TypeRegistrations LOG_GROUP_FEEDFORWARDS_CONSTRUCTION = :FeedforwardConstructor LOG_GROUP_BRANCH_CONSTRUCTIONS = :BranchConstructor LOG_GROUP_SERVICE_CONSTUCTORS = :ServicesConstructor LOG_GROUP_MODELS_VALIDATION = :ModelValidation LOG_GROUP_OPTIMIZATION_CONTAINER = :OptimizationContainer LOG_GROUP_BUILD_INITIAL_CONDITIONS = :InitialConditionsBuild LOG_GROUP_NETWORK_CONSTRUCTION = :NetworkConstructor LOG_GROUP_MODEL_STORE = :ModelStore LOG_GROUP_RESULTS = :Results LOG_GROUP_SIMULATION_STORE = :SimulationStore
{"hexsha": "140951fb42490bb28549b4c1973ab9ad9a42f1d5", "size": 658, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/utils/logging.jl", "max_stars_repo_name": "NREL/PowerSimulations.jl", "max_stars_repo_head_hexsha": "17891d4a61d071ce98efd96706ec7586e5d833cc", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 51, "max_stars_repo_stars_event_min_datetime": "2018-11-30T12:50:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-03-26T21:22:23.000Z", "max_issues_repo_path": "src/utils/logging.jl", "max_issues_repo_name": "NREL/PowerSimulations.jl", "max_issues_repo_head_hexsha": "17891d4a61d071ce98efd96706ec7586e5d833cc", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 246, "max_issues_repo_issues_event_min_datetime": "2018-11-19T19:56:51.000Z", "max_issues_repo_issues_event_max_datetime": "2020-03-30T22:16:13.000Z", "max_forks_repo_path": "src/utils/logging.jl", "max_forks_repo_name": "NREL/PowerSimulations.jl", "max_forks_repo_head_hexsha": "17891d4a61d071ce98efd96706ec7586e5d833cc", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 23, "max_forks_repo_forks_event_min_datetime": "2018-12-11T08:40:20.000Z", "max_forks_repo_forks_event_max_datetime": "2020-02-21T15:53:32.000Z", "avg_line_length": 43.8666666667, "max_line_length": 61, "alphanum_fraction": 0.8996960486, "num_tokens": 141}
from typing import List, Any from collections import defaultdict import numpy as np import torch import torch.nn.functional as F from torch import Tensor as T from ..utils.dist_utils import gather_and_compare def calculate_softmax_np(logits: np.ndarray, dim: int): """ Numpy-version softmax. Args: logits (np.ndarray): Input logits. dim (int): Dimension along which softmax is calculated. """ e_logits = np.exp(logits - np.max(logits, axis=dim, keepdims=True)) return e_logits / np.sum(e_logits, axis=dim, keepdims=True) def calculate_entropy_torch( logits: T, dim: int, normalized: bool, assert_normalized: bool = False, eps: float = 1e-8, ) -> T: """ Torch-version Shannon entropy calculation. Args: logits (torch.Tensor): Input logits, normalized or not normalized. dim (int): Dimension along which entropy is calculated. normalized (bool): Whether `tensor` is normalized along `dim` axis. If not, a softmax layer will be applied to the input tensor. assert_normalized (bool): Whether to check if the array is normalized or not if `normalized` is True. """ if not normalized: logits = F.softmax(logits, dim=dim) elif assert_normalized: logits_s = logits.sum(dim=dim) one = torch.Tensor([1.0]).to(device=logits.device) if not torch.allclose(logits_s, one): raise ValueError( "The array has not been normalized (e.g., softmaxed)" ) logits = logits + eps entropy = - logits * torch.log(logits) entropy = torch.sum(entropy, dim=dim) return entropy def calculate_entropy_np( logits: np.ndarray, dim: int, normalized: bool, assert_normalized: bool = False, eps: float = 1e-8, ) -> np.ndarray: """ Numpy-version Shannon entropy calculation. Args: logits (np.ndarray): Input logits, normalized or not normalized. dim (int): Dimension along which entropy is calculated. normalized (bool): Whether `tensor` is normalized along `dim` axis. If not, a softmax layer will be applied to the input tensor. assert_normalized (bool): Whether to check if the array is normalized or not if `normalized` is True. """ if not normalized: logits = calculate_softmax_np(logits, dim=dim) elif assert_normalized: logits_s = logits.sum(axis=dim) if not np.allclose(logits_s, 1.0): raise ValueError( "The array has not been normalized (e.g., softmaxed)" ) logits = logits + eps entropy = - logits * np.log(logits) entropy = np.sum(entropy, axis=dim) return entropy def calculate_bvsb_torch( logits: T, dim: int, normalized: bool, assert_normalized: bool = False, ) -> T: """ Calculate best-vs-second best values from a prediction map. Args: logits (torch.Tensor): Input logits, normalized or not normalized. dim (int): Dimension along which bvsb value is calculated. normalized (bool): Whether `tensor` is normalized along `dim` axis. If not, a softmax layer will be applied to the input tensor. """ if logits.shape[dim] == 1: raise ValueError( f"Best-vs-second-best policy is not applicable for single-class " f"probabilities." ) if not normalized: logits = F.softmax(logits, dim=dim) elif assert_normalized: logits_s = logits.sum(dim=dim) one = torch.Tensor([1.0]).to(device=logits.device) if not torch.allclose(logits_s, one): raise ValueError( "The array has not been normalized (e.g., softmaxed)" ) bvsb = torch.topk( logits, k=2, dim=dim, largest=True, sorted=True, )[0] bvsb_0, bvsb_1 = torch.split(bvsb, split_size_or_sections=1, dim=dim) bvsb = (bvsb_1 / bvsb_0).squeeze(dim=dim) return bvsb def calculate_bvsb_np( logits: np.ndarray, dim: int, normalized: bool, assert_normalized: bool = False, ) -> np.ndarray: """ Calculate best-vs-second best values from a prediction map. Args: logits (np.ndarray): Input logits. dim (int): Dimension along which bvsb value is calculated. normalized (bool): Whether `tensor` is normalized along `dim` axis. If not, a softmax layer will be applied to the input tensor. assert_normalized (bool): Whether to check if the array is normalized or not if `normalized` is True. """ if logits.shape[1] == 1: raise ValueError( f"Best-vs-second-best policy is not applicable for single-class " f"probabilities." ) if not normalized: logits = calculate_softmax_np(logits, dim=dim) elif assert_normalized: logits_s = logits.sum(axis=dim) if not np.allclose(logits_s, 1.0): raise ValueError( "The array has not been normalized (e.g., softmaxed)" ) bvsb_idxs = np.argpartition( -logits, kth=2, axis=dim, ) bvsb_idxs_0 = np.take(bvsb_idxs, indices=[0], axis=dim) bvsb_0 = np.take_along_axis(logits, bvsb_idxs_0, axis=dim).squeeze(dim) bvsb_idxs_1 = np.take(bvsb_idxs, indices=[1], axis=dim) bvsb_1 = np.take_along_axis(logits, bvsb_idxs_1, axis=dim).squeeze(dim) bvsb = (bvsb_1 / bvsb_0) return bvsb def get_unique_indices( x: List[Any], device: str, buffer_size: int = None, ): """ Get indices of unique values in a given list. If a value has a duplicate in the list, only its first occurence is recorded. Elements in the list should be all hash-able. """ # Use a dict to count occurences count = defaultdict(list) for i, obj in enumerate(x): count[obj].append(i) unique_indices = [] for indices in count.values(): unique_indices.append(indices[0]) gather_and_compare(unique_indices, device, buffer_size=buffer_size) return unique_indices
{"hexsha": "e0e91ee6606b808577a14a35e2178a2120c7ef0f", "size": 6166, "ext": "py", "lang": "Python", "max_stars_repo_path": "mmdet/active_learning/al/utils.py", "max_stars_repo_name": "the-linh-ai/mmdetection", "max_stars_repo_head_hexsha": "d7fa62fef08715810fed9afd057b2ec048a67313", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "mmdet/active_learning/al/utils.py", "max_issues_repo_name": "the-linh-ai/mmdetection", "max_issues_repo_head_hexsha": "d7fa62fef08715810fed9afd057b2ec048a67313", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mmdet/active_learning/al/utils.py", "max_forks_repo_name": "the-linh-ai/mmdetection", "max_forks_repo_head_hexsha": "d7fa62fef08715810fed9afd057b2ec048a67313", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.1414141414, "max_line_length": 77, "alphanum_fraction": 0.6302302952, "include": true, "reason": "import numpy", "num_tokens": 1490}
#!/usr/bin/env python """ Simple module to deal with unit conversion """ __all__ = ['Units'] import numpy as np class Units(object): """Simple class to keep track of unit conversions""" # Could be replaced by astropy... # length cm = 1 m = 1e2 # m to cm km = m * 1e3 # km to cm pc = 3.08568e18 # pc to cm kpc = pc * 1e3 # kpc to cm m2 = 1e4 # mass g = 1.0 msun = 1.98892e33 # solar mass to g gev = 1.78266e-24 # gev to g # density msun_pc3 = msun * np.power(pc, -3) msun_kpc3 = msun * np.power(kpc, -3) msun2_pc5 = np.power(msun, 2) * np.power(pc, -5) msun2_kpc5 = np.power(msun, 2) * np.power(kpc, -5) gev2_cm5 = np.power(gev, 2) gev_cm3 = np.power(gev, 1) gev_cm2 = np.power(gev, 1) g_cm3 = 1.0 cm3_s = 1.0 # random hr = 3600. deg2 = np.power(np.pi / 180., 2) # This is to convert stuff to strings that astropy units understands map_to_astropy = {'gev2_cm5': 'GeV2 / cm5', 'gev_cm3': 'GeV / cm3', 'gev_cm2': 'GeV / cm2', 'g_cm3': 'g / cm3', 'cm3_s': 'cm3 / s'} map_from_astropy = {} for k, v in map_to_astropy.items(): map_from_astropy[v] = k @staticmethod def get_value(key): """ Get a conversion value based on a key This is here to make it easy to automate unit conversion Parameters ---------- key : str, a key corresponding to one of the globals defined above Returns ------- the conversion constant """ if key is None: return None try: return getattr(Units, key) except AttributeError: try: newkey = Units.map_from_astropy[key] return getattr(Units, newkey) except: raise KeyError("Did not recoginze units %s" % key) @staticmethod def convert_to(value, key): """ Convert from cgs units to a different type of units Parameters ---------- value : scalar or array-like, the input value(s) key : str, a key corresponding to one of the globals defined above Returns ------- the input values, converted to requested units """ conv = Units.get_value(key) if conv is None: return value return value / conv @staticmethod def convert_from(value, key): """ Convert to cgs units from a different type of units Parameters ---------- value : scalar or array-like, the input value(s) key : str, a key corresponding to one of the globals defined above Returns ------- the input values, converted to cgs units """ conv = Units.get_value(key) if conv is None: return value return value * conv
{"hexsha": "d54fe28e4c130d5f479049feb82c6cedbabff935", "size": 2982, "ext": "py", "lang": "Python", "max_stars_repo_path": "dmsky/utils/units.py", "max_stars_repo_name": "kadrlica/dmsky", "max_stars_repo_head_hexsha": "a8f0c47c43164851b738b4a59a723addf89054bc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-19T05:19:31.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-19T05:19:31.000Z", "max_issues_repo_path": "dmsky/utils/units.py", "max_issues_repo_name": "kadrlica/dmsky", "max_issues_repo_head_hexsha": "a8f0c47c43164851b738b4a59a723addf89054bc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 10, "max_issues_repo_issues_event_min_datetime": "2017-10-24T23:32:40.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-16T23:49:59.000Z", "max_forks_repo_path": "dmsky/utils/units.py", "max_forks_repo_name": "kadrlica/dmsky", "max_forks_repo_head_hexsha": "a8f0c47c43164851b738b4a59a723addf89054bc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2017-05-18T19:01:11.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-08T18:37:27.000Z", "avg_line_length": 26.625, "max_line_length": 76, "alphanum_fraction": 0.5331991952, "include": true, "reason": "import numpy", "num_tokens": 809}
From VyZX Require Export GateDefinitions. From VyZX Require Export GateRules.
{"author": "inQWIRE", "repo": "VyZX", "sha": "86c6e2b2bdc380173a99c1a6fa777544bdd59cdb", "save_path": "github-repos/coq/inQWIRE-VyZX", "path": "github-repos/coq/inQWIRE-VyZX/VyZX-86c6e2b2bdc380173a99c1a6fa777544bdd59cdb/src/Gates/Gates.v"}
using Test function check_readme(repository_root::AbstractString) readme_path = joinpath(repository_root, "README.md") example_path = joinpath(repository_root, "example.yml") readme_contents = read(readme_path, String) example_contents = string( "```yaml\n", read(example_path, String), "```\n", ) @testset "Check README" begin @test occursin(example_contents, readme_contents) end return nothing end
{"hexsha": "c70af4944911c34776439e6c08d516163431a464", "size": 466, "ext": "jl", "lang": "Julia", "max_stars_repo_path": ".ci/check_readme.jl", "max_stars_repo_name": "JuliaRegistries/compathelper-action", "max_stars_repo_head_hexsha": "3e6a675d956931a21542d8dd74d8e40374f3eda8", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": ".ci/check_readme.jl", "max_issues_repo_name": "JuliaRegistries/compathelper-action", "max_issues_repo_head_hexsha": "3e6a675d956931a21542d8dd74d8e40374f3eda8", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 2, "max_issues_repo_issues_event_min_datetime": "2021-08-24T07:34:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-09-01T14:05:01.000Z", "max_forks_repo_path": ".ci/check_readme.jl", "max_forks_repo_name": "JuliaRegistries/compathelper-action", "max_forks_repo_head_hexsha": "3e6a675d956931a21542d8dd74d8e40374f3eda8", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.4117647059, "max_line_length": 59, "alphanum_fraction": 0.678111588, "num_tokens": 107}
import numpy as np def PolyDetrend(t,x,Order=1): ''' Detrends a time series using a polynomial. Inputs ====== t : float Time array x : float Value array to be detrended Order : int Order of polynomial to use Returns ======= y : float Detrended x ''' #get the polyfit first good = np.isfinite(x) if not good.all(): use = np.where(good)[0] p = np.polyfit(t[good],x[good],Order) else: p = np.polyfit(t,x,Order) #get the polynomial function pf = np.poly1d(p) #now work out the values of the fit at y xp = pf(t) #subtract from original return x - xp
{"hexsha": "4339597f567a44c3be38ddb275bc7c7bc3a2894e", "size": 595, "ext": "py", "lang": "Python", "max_stars_repo_path": "wavespec/Tools/PolyDetrend.py", "max_stars_repo_name": "mattkjames7/wavespec", "max_stars_repo_head_hexsha": "2928ac3f4fb921a21caba3663b0d7c22e43692fa", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-01-04T15:08:57.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-04T15:08:57.000Z", "max_issues_repo_path": "wavespec/Tools/PolyDetrend.py", "max_issues_repo_name": "mattkjames7/wavespec", "max_issues_repo_head_hexsha": "2928ac3f4fb921a21caba3663b0d7c22e43692fa", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "wavespec/Tools/PolyDetrend.py", "max_forks_repo_name": "mattkjames7/wavespec", "max_forks_repo_head_hexsha": "2928ac3f4fb921a21caba3663b0d7c22e43692fa", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 14.875, "max_line_length": 43, "alphanum_fraction": 0.6386554622, "include": true, "reason": "import numpy", "num_tokens": 193}
#!/usr/bin/env python3 import unittest, random, sys, copy, argparse, inspect, time from graderUtil import graded, CourseTestRunner, GradedTestCase import numpy # Import student submission import submission numpy.random.seed(0) ############################################# # HELPER FUNCTIONS FOR CREATING TEST INPUTS # ############################################# ######### # TESTS # ######### # By convention, test classes are used for each subquestion (1a, 1b, 2a, 2b, etc.) # Tests within a class are numbered, starting at 0: test_0(), test_1(), etc. # Tests must have a docstring. The docstring must be in the following format: # """test_name: test_description""" # The test name should be: subquestion-test_num-{basic/hidden} # # Access the solutions with the following method: # result = self.run_with_solution_if_possible(submission, lambda sub_or_sol: sub_or_sol.func(test_args)) # # In the supplied function, the first and only argument is either the submission # or the solution, depending on whether the autograder is run locally or with # the solutions present. class Test_1b(GradedTestCase): def basic_test(self, submission_op, expected_op, func_being_tested): """ Basic shape and data type test Args: submission_op: student submission output of the method being tested expected_op: expected output func_being_tested: name of the function being tested Returns: None """ # use assertIsInstance instead self.assertTrue(isinstance(submission_op, type(expected_op)), msg="Expected output of {func_being_tested} function to be of type : " "{expected_op_type} but got {submission_op_type}" .format(func_being_tested=func_being_tested, expected_op_type=type(expected_op), submission_op_type=type(submission_op))) self.assertTrue(expected_op.shape == submission_op.shape, msg="Expected output shape of {func_being_tested} function to be : " "{expected_op_shape} but got {submission_op_shape}" .format(func_being_tested=func_being_tested, expected_op_shape=expected_op.shape, submission_op_shape=submission_op.shape)) @graded(timeout=30) def test_00(self): """1b-0-basic: Evaluating softmax() output""" x = numpy.array([[0.40740357, 0.46248366], [0.73424676, 0.52653602], [0.21102139, 0.34013982], [0.98344458, 0.73088265]]) submission_op = submission.softmax(x) expected_op = numpy.array([[0.48623346, 0.51376654], [0.55174179, 0.44825821], [0.46776516, 0.53223484], [0.56280698, 0.43719302]]) self.basic_test(submission_op=submission_op, expected_op=expected_op, func_being_tested="softmax()") self.assertTrue(numpy.allclose(expected_op, submission_op), msg="Expected output of softmax() function to be {} but got {}".format(expected_op, submission_op)) @graded(timeout=30) def test_01i(self): """1b-1i-basic: Evaluating forward_pass() output datatype and shape""" S = numpy.array([[0.0, 1.0, 1.0, 0.0]]) W = numpy.array([[0.71186985, 0.80105879], [0.40029088, 0.16408907], [0.24686674, 0.37594635], [0.50711907, 0.47308381]]) submission_op = submission.forward_pass(W, S) expected_op = numpy.array([[0.52675497, 0.47324503]]) self.basic_test(submission_op=submission_op, expected_op=expected_op, func_being_tested="forward_pass()") @graded(timeout=30, is_hidden=True, after_published=False) def test_01ii(self): """1b-1ii-hidden: Evaluating forward_pass() output""" S = numpy.array([[1.0, 0.0, 0.0, 1.0, 0.0]]) W = numpy.array([[0.54642436, 0.8941891], [0.6116459, 0.91185742], [0.40735244, 0.16564978], [0.614072, 0.10781311], [0.19485619, 0.03428044]]) submission_op = submission.forward_pass(W, S) expected_op = self.run_with_solution_if_possible(submission, lambda sub_or_sol: sub_or_sol.forward_pass(W, S)) self.assertTrue(numpy.allclose(expected_op, submission_op), msg="Output of forward_pass() function does not match expected value\nYour output:\n" .format(submission_op)) @graded(timeout=30) def test_02i(self): """1b-2i-basic: Evaluating policy_gradient() output shape and datatype""" W = numpy.array(([[0.48435543, 0.87405626], [0.9785218, 0.97894915], [0.94119834, 0.01011787]])) S = numpy.array([[1.0, 0.0, 1.0], [1.0, 0.0, 1.0], [1.0, 1.0, 0.0]]) A = numpy.array([1, 1, 0]) G = numpy.array([0.64159167, 0.51844126, 0.8613691]) learning_rate = 0.01 submission_op = submission.policy_gradient(W, S, A, G, learning_rate) expected_op = numpy.array([[-0.00219649, 0.00219649], [0.00513646, -0.00513646], [-0.00733295, 0.00733295]]) self.basic_test(submission_op=submission_op, expected_op=expected_op, func_being_tested="policy_gradient()") @graded(timeout=30, is_hidden=True, after_published=False) def test_02ii(self): """1b-2ii-hidden: Evaluating policy_gradient() output""" # TODO - initialize the right value W = numpy.array(([[0.86514535, 0.54550479], [0.96762212, 0.46626473], [0.02265237, 0.66559754]])) S = numpy.array([[0.0, 0.0, 1.0], [1.0, 0.0, 0.0], [0.0, 1.0, 0.0]]) A = numpy.array([0, 1, 0]) G = numpy.array([0.58153841, 0.21730104, 0.73679393]) learning_rate = 0.01 submission_op = submission.policy_gradient(W, S, A, G, learning_rate) expected_op = self.run_with_solution_if_possible(submission, lambda sub_or_sol: sub_or_sol.policy_gradient(W, S, A, G, learning_rate)) self.assertTrue(numpy.allclose(expected_op, submission_op), msg="Output of policy_gradient() does not match expected value\nYour output:\n" .format(submission_op)) @graded(timeout=30) def test_03(self): """1b-3-basic: Evaluating init_policy_data() output""" num_state_params = 4 num_actions = 2 submission_op = submission.init_policy_data(num_state_params, num_actions) # expected_op in this case may not be exactly equal to submission_op. We only evaluate for shape and the data type expected_op = { 'W': numpy.array([[1.76405235, 0.40015721], [0.97873798, 2.2408932], [1.86755799, -0.97727788], [0.95008842, -0.15135721], [-0.10321885, 0.4105985]]), 'episode': [], 'history': []} self.assertIn('W', submission_op) self.assertIn('episode', submission_op) self.assertIn('history', submission_op) self.basic_test(submission_op=submission_op['W'], expected_op=expected_op['W'], func_being_tested="init_policy_data()") @graded(timeout=30) def test_04(self): """1b-4-basic: Evaluating choose_action() output""" state = numpy.array([1., 1., 0., 0., 0.]) policy_data = {'W': numpy.array([[0.06381596, -0.12089246, 0.02620287, -0.6438723], [-0.19561564, 0.84086584, 0.98102769, 0.03692181], [-1.33627622, 2.09333675, 0.77533065, -0.11594722], [0.7757198, -0.09771534, 0.25640622, 0.35121989], [-0.97432407, -1.49746049, 0.04341171, 0.52956114]]), 'episode': [], 'history': []} num_actions = 4 submission_op = submission.choose_action(state, policy_data, num_actions) self.assertTrue(numpy.issubdtype(type(submission_op), numpy.integer)) self.assertTrue(submission_op in set(numpy.arange(num_actions))) @graded(timeout=30) def test_05(self): """1b-5-basic: Evaluating record_transition()""" policy_data = {'W': numpy.array([[-0.91033536, -1.46960675, -0.78136995], [0.27055657, -1.17676548, -0.21280714], [-0.01503552, 0.27920773, 1.81210538], [-0.5006686 , 0.46293075, -1.46280086]]), 'episode': [], 'history': []} prior_state = numpy.array([1., 0., 0., 1.]) action = 2 reward = 0.0 posterior_state = numpy.array([1., 1., 0., 0.]) policy_data_updated = copy.deepcopy(policy_data) submission.record_transition(policy_data_updated, prior_state, action, reward, posterior_state) self.assertTrue(len(policy_data_updated['episode']) - len(policy_data['episode']) == 1) # Note : You may not need all of the data provided to this record_transition method (s, a, r, s') self.assertTrue(len(policy_data_updated['episode'][-1]) == 3) @graded(timeout=30) def test_06i(self): """1b-6i-basic: Evaluating accumulate_discounted_future_rewards() output datatype and shape""" R = numpy.array([0., 0., 0., -1., 0., 0., 0., 0., 0., -1.]) gamma = 0.5 submission_op = submission.accumulate_discounted_future_rewards(R, gamma) expected_op = numpy.array([-0.12695312, -0.25390625, -0.5078125, -1.015625, -0.03125, -0.0625, -0.125, -0.25, -0.5, -1.]) self.basic_test(submission_op=submission_op, expected_op=expected_op, func_being_tested='accumulate_discounted_future_rewards()') @graded(timeout=30, is_hidden=True, after_published=False) def test_06ii(self): """1b-6ii-hidden: Evaluating accumulate_discounted_future_rewards() output""" R = numpy.array([0., 0., -1., -1., 0., -1.]) gamma = 0.3 submission_op = submission.accumulate_discounted_future_rewards(R, gamma) expected_op = self.run_with_solution_if_possible(submission, lambda sub_or_sol: sub_or_sol.accumulate_discounted_future_rewards(R, gamma)) self.assertTrue(numpy.allclose(expected_op, submission_op), msg="Output of policy_gradient() function does not match expected value\nYour output:\n{}" .format(submission_op)) def getTestCaseForTestID(test_id): question, part, _ = test_id.split('-') g = globals().copy() for name, obj in g.items(): if inspect.isclass(obj) and name == ('Test_'+question): return obj('test_'+part) if __name__ == '__main__': # Parse for a specific test parser = argparse.ArgumentParser() parser.add_argument('test_case', nargs='?', default='all') test_id = parser.parse_args().test_case assignment = unittest.TestSuite() if test_id != 'all': assignment.addTest(getTestCaseForTestID(test_id)) else: assignment.addTests(unittest.defaultTestLoader.discover('.', pattern='grader.py')) CourseTestRunner().run(assignment)
{"hexsha": "5f47843d830eeb0fed713c363881c25ad2d3df2d", "size": 11402, "ext": "py", "lang": "Python", "max_stars_repo_path": "XCS229ii-PS3-Sandbox/src/grader.py", "max_stars_repo_name": "bearbearyu1223/Stanford-XCS-229-II", "max_stars_repo_head_hexsha": "7e5743fb326352a168400bb96694c54ed476773f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-04-16T20:15:20.000Z", "max_stars_repo_stars_event_max_datetime": "2021-04-23T08:37:27.000Z", "max_issues_repo_path": "XCS229ii-PS3-Sandbox/src/grader.py", "max_issues_repo_name": "bearbearyu1223/Stanford-XCS-229-II", "max_issues_repo_head_hexsha": "7e5743fb326352a168400bb96694c54ed476773f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "XCS229ii-PS3-Sandbox/src/grader.py", "max_forks_repo_name": "bearbearyu1223/Stanford-XCS-229-II", "max_forks_repo_head_hexsha": "7e5743fb326352a168400bb96694c54ed476773f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 44.3657587549, "max_line_length": 138, "alphanum_fraction": 0.6038414313, "include": true, "reason": "import numpy", "num_tokens": 3036}
-- Dominio de discurso variable A : Type lemma Example1 (P Q : A → Prop) : (∀ x, P x → Q x) → (∀ x, P x) → (∀ x, Q x) := begin intros hpq hp, intro a, apply hpq, -- lines below can be replaced by apply ((hpq a) (hp a)) apply hp end lemma Example2 (P Q : A → Prop) : (∀ x, P x ∧ Q x) ↔ (∀ x, P x) ∧ (∀ x, Q x) := begin apply iff.intro, -- [-->] intro hpq, apply and.intro, intro x, apply and.left (hpq x), intro x, apply and.right (hpq x), -- [<--] intro hpq, intro x, apply and.intro, apply ((and.left hpq) x), apply ((and.right hpq) x) end lemma Example3 (P Q : A → Prop) : (∃ x, P x ∧ Q x) → (∃ x, P x) := begin intro hpq, apply (exists.elim hpq), intros a ha, apply exists.intro a, apply (and.left ha) end
{"author": "ahevia", "repo": "CC3101_2021", "sha": "178392747409383f39e357b0dde919553d214aeb", "save_path": "github-repos/lean/ahevia-CC3101_2021", "path": "github-repos/lean/ahevia-CC3101_2021/CC3101_2021-178392747409383f39e357b0dde919553d214aeb/Clases/Clase5/demo_Clase_5_Logica_Predicados.lean"}
@testset "Strings" begin @testset "Strings: Is Palindrome" begin s = "AA" # A palindrome @test is_palindrome(s) == true s = "ABC" # Not a palindrome @test is_palindrome(s) == false s = "Statistics" # Not a palindrome @test is_palindrome(s) == false s = "Stats" # A palindrome @test is_palindrome(s) == true s = "Racecar" # A palindrome @test is_palindrome(s) == true x = "Hello" # Not a palindrome @test is_palindrome(s) == false end end
{"hexsha": "9d7b9bccca34bc1a1bd7436452fee9ec52d322da", "size": 512, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "test/strings.jl", "max_stars_repo_name": "arubhardwaj/Julia", "max_stars_repo_head_hexsha": "cc8e8e942072f7bfb5479b25482133fd2c7141a6", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test/strings.jl", "max_issues_repo_name": "arubhardwaj/Julia", "max_issues_repo_head_hexsha": "cc8e8e942072f7bfb5479b25482133fd2c7141a6", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test/strings.jl", "max_forks_repo_name": "arubhardwaj/Julia", "max_forks_repo_head_hexsha": "cc8e8e942072f7bfb5479b25482133fd2c7141a6", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 28.4444444444, "max_line_length": 42, "alphanum_fraction": 0.59375, "num_tokens": 158}
#!/usr/bin/env python3 import numpy as np import h5py as h5 import argparse import os import subprocess parser = argparse.ArgumentParser() parser.add_argument('--npts', type=int, default=101, help='Number of gridpoints per dimension.') parser.add_argument('--target', type=float, nargs=3, default=[0.0, 0.0, 0.0], help='Coordinates of the target point.') args = parser.parse_args() filename = '../data/eikonal3d.h5' if os.path.isfile(filename): os.remove(filename) f = h5.File(filename, "w") data = np.ones((args.npts, args.npts, args.npts)) i = int(np.floor((args.npts - 1) * (1 + args.target[0]) / 2.0)) j = int(np.floor((args.npts - 1) * (1 + args.target[1]) / 2.0)) k = int(np.floor((args.npts - 1) * (1 + args.target[2]) / 2.0)) data[i, j, k] = -1 f.create_dataset('cost_function', shape=data.shape, data=data) f.close() subprocess.run(["../../build/examples/eikonal3d"]) f = h5.File(filename, "r") data = f['value_function'][()] f.close() t = np.linspace(-1, 1, data.shape[0]) x, y, z = np.meshgrid(t, t, t, indexing='ij') w = np.sqrt((x - args.target[0])**2 + (y - args.target[1])**2 + (z - args.target[2])**2) print('Maximum error:', np.amax(np.abs(w - data)))
{"hexsha": "6776093698b7f2929c57cf3fe15e0bcd1e6cc9dc", "size": 1239, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/scripts/eikonal3d.py", "max_stars_repo_name": "mcpca/fsm", "max_stars_repo_head_hexsha": "df4081fa0e595284ddbb1f30f20c5fb2063aa41f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-06-18T14:07:29.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-14T11:35:29.000Z", "max_issues_repo_path": "examples/scripts/eikonal3d.py", "max_issues_repo_name": "mcpca/fsm", "max_issues_repo_head_hexsha": "df4081fa0e595284ddbb1f30f20c5fb2063aa41f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/scripts/eikonal3d.py", "max_forks_repo_name": "mcpca/fsm", "max_forks_repo_head_hexsha": "df4081fa0e595284ddbb1f30f20c5fb2063aa41f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-08-31T07:50:47.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-03T17:30:14.000Z", "avg_line_length": 26.3617021277, "max_line_length": 77, "alphanum_fraction": 0.6198547215, "include": true, "reason": "import numpy", "num_tokens": 381}
// Copyright (C) 2013,2014 Vicente J. Botet Escriba // // Distributed under the Boost Software License, Version 1.0. (See accompanying // file LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // // 2013/11 Vicente J. Botet Escriba // first implementation of a simple user scheduler. // 2013/11 Vicente J. Botet Escriba // rename loop_executor. #ifndef BOOST_THREAD_EXECUTORS_LOOP_EXECUTOR_HPP #define BOOST_THREAD_EXECUTORS_LOOP_EXECUTOR_HPP #include <boost/thread/detail/config.hpp> #if defined BOOST_THREAD_PROVIDES_FUTURE_CONTINUATION && defined BOOST_THREAD_PROVIDES_EXECUTORS && defined BOOST_THREAD_USES_MOVE #include <boost/thread/detail/delete.hpp> #include <boost/thread/detail/move.hpp> #include <boost/thread/concurrent_queues/sync_queue.hpp> #include <boost/thread/executors/work.hpp> #include <boost/assert.hpp> #include <boost/config/abi_prefix.hpp> namespace boost { namespace executors { class loop_executor { public: /// type-erasure to store the works to do typedef executors::work work; private: /// the thread safe work queue concurrent::sync_queue<work > work_queue; public: /** * Effects: try to execute one task. * Returns: whether a task has been executed. * Throws: whatever the current task constructor throws or the task() throws. */ bool try_executing_one() { return execute_one(/*wait:*/false); } private: /** * Effects: Execute one task. * Remark: If wait is true, waits until a task is available or the executor * is closed. If wait is false, returns false immediately if no * task is available. * Returns: whether a task has been executed (if wait is true, only returns false if closed). * Throws: whatever the current task constructor throws or the task() throws. */ bool execute_one(bool wait) { work task; try { queue_op_status status = wait ? work_queue.wait_pull(task) : work_queue.try_pull(task); if (status == queue_op_status::success) { task(); return true; } BOOST_ASSERT(!wait || status == queue_op_status::closed); return false; } catch (...) { std::terminate(); //return false; } } public: /// loop_executor is not copyable. BOOST_THREAD_NO_COPYABLE(loop_executor) /** * \b Effects: creates a thread pool that runs closures using one of its closure-executing methods. * * \b Throws: Whatever exception is thrown while initializing the needed resources. */ loop_executor() { } /** * \b Effects: Destroys the thread pool. * * \b Synchronization: The completion of all the closures happen before the completion of the \c loop_executor destructor. */ ~loop_executor() { // signal to all the worker thread that there will be no more submissions. close(); } /** * The main loop of the worker thread */ void loop() { while (execute_one(/*wait:*/true)) { } BOOST_ASSERT(closed()); while (try_executing_one()) { } } /** * \b Effects: close the \c loop_executor for submissions. * The loop will work until there is no more closures to run. */ void close() { work_queue.close(); } /** * \b Returns: whether the pool is closed for submissions. */ bool closed() { return work_queue.closed(); } /** * \b Requires: \c Closure is a model of \c Callable(void()) and a model of \c CopyConstructible/MoveConstructible. * * \b Effects: The specified \c closure will be scheduled for execution at some point in the future. * If invoked closure throws an exception the \c loop_executor will call \c std::terminate, as is the case with threads. * * \b Synchronization: completion of \c closure on a particular thread happens before destruction of thread's thread local variables. * * \b Throws: \c sync_queue_is_closed if the thread pool is closed. * Whatever exception that can be throw while storing the closure. */ void submit(BOOST_THREAD_RV_REF(work) closure) { work_queue.push(boost::move(closure)); } #if defined(BOOST_NO_CXX11_RVALUE_REFERENCES) template <typename Closure> void submit(Closure & closure) { submit(work(closure)); } #endif void submit(void (*closure)()) { submit(work(closure)); } template <typename Closure> void submit(BOOST_THREAD_FWD_REF(Closure) closure) { //work_queue.push(work(boost::forward<Closure>(closure))); work w((boost::forward<Closure>(closure))); submit(boost::move(w)); } /** * \b Requires: This must be called from an scheduled task. * * \b Effects: reschedule functions until pred() */ template <typename Pred> bool reschedule_until(Pred const& pred) { do { if ( ! try_executing_one()) { return false; } } while (! pred()); return true; } /** * run queued closures */ void run_queued_closures() { sync_queue<work>::underlying_queue_type q = work_queue.underlying_queue(); while (! q.empty()) { work& task = q.front(); task(); q.pop_front(); } } }; } using executors::loop_executor; } #include <boost/config/abi_suffix.hpp> #endif #endif
{"hexsha": "ca2cdda6b2afb052da1e45a21d295004655b23f5", "size": 5789, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "deps/boost/include/boost/thread/executors/loop_executor.hpp", "max_stars_repo_name": "kindlychung/mediasoup-sfu-cpp", "max_stars_repo_head_hexsha": "f69d2f48f7edbf4f0c57244280a47bea985f39cf", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 995.0, "max_stars_repo_stars_event_min_datetime": "2018-06-22T10:39:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T01:22:14.000Z", "max_issues_repo_path": "deps/boost/include/boost/thread/executors/loop_executor.hpp", "max_issues_repo_name": "kindlychung/mediasoup-sfu-cpp", "max_issues_repo_head_hexsha": "f69d2f48f7edbf4f0c57244280a47bea985f39cf", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 32.0, "max_issues_repo_issues_event_min_datetime": "2018-06-23T14:19:37.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T10:20:37.000Z", "max_forks_repo_path": "deps/boost/include/boost/thread/executors/loop_executor.hpp", "max_forks_repo_name": "kindlychung/mediasoup-sfu-cpp", "max_forks_repo_head_hexsha": "f69d2f48f7edbf4f0c57244280a47bea985f39cf", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 172.0, "max_forks_repo_forks_event_min_datetime": "2018-06-22T11:12:00.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-29T07:44:33.000Z", "avg_line_length": 27.0514018692, "max_line_length": 138, "alphanum_fraction": 0.6080497495, "num_tokens": 1297}
# -*- coding: utf-8 -*- """ Iridas .cube LUT Format Input / Output Utilities ================================================ Defines *Iridas* *.cube* *LUT* Format related input / output utilities objects. - :func:`colour.io.read_LUT_IridasCube` - :func:`colour.io.write_LUT_IridasCube` References ---------- - :cite:`AdobeSystems2013b` : Adobe Systems. (2013). Cube LUT Specification. https://drive.google.com/open?id=143Eh08ZYncCAMwJ1q4gWxVOqR_OSWYvs """ from __future__ import division, unicode_literals import numpy as np from colour.constants import DEFAULT_INT_DTYPE from colour.io.luts import LUT1D, LUT3x1D, LUT3D, LUTSequence from colour.io.luts.common import path_to_title from colour.utilities import as_float_array, usage_warning __author__ = 'Colour Developers' __copyright__ = 'Copyright (C) 2013-2020 - Colour Developers' __license__ = 'New BSD License - https://opensource.org/licenses/BSD-3-Clause' __maintainer__ = 'Colour Developers' __email__ = 'colour-developers@colour-science.org' __status__ = 'Production' __all__ = ['read_LUT_IridasCube', 'write_LUT_IridasCube'] def read_LUT_IridasCube(path): """ Reads given *Iridas* *.cube* *LUT* file. Parameters ---------- path : unicode *LUT* path. Returns ------- LUT3x1D or LUT3d :class:`LUT3x1D` or :class:`LUT3D` class instance. References ---------- :cite:`AdobeSystems2013b` Examples -------- Reading a 3x1D *Iridas* *.cube* *LUT*: >>> import os >>> path = os.path.join( ... os.path.dirname(__file__), 'tests', 'resources', 'iridas_cube', ... 'ACES_Proxy_10_to_ACES.cube') >>> print(read_LUT_IridasCube(path)) LUT3x1D - ACES Proxy 10 to ACES ------------------------------- <BLANKLINE> Dimensions : 2 Domain : [[ 0. 0. 0.] [ 1. 1. 1.]] Size : (32, 3) Reading a 3D *Iridas* *.cube* *LUT*: >>> path = os.path.join( ... os.path.dirname(__file__), 'tests', 'resources', 'iridas_cube', ... 'Colour_Correct.cube') >>> print(read_LUT_IridasCube(path)) LUT3D - Generated by Foundry::LUT --------------------------------- <BLANKLINE> Dimensions : 3 Domain : [[ 0. 0. 0.] [ 1. 1. 1.]] Size : (4, 4, 4, 3) Reading a 3D *Iridas* *.cube* *LUT* with comments: >>> path = os.path.join( ... os.path.dirname(__file__), 'tests', 'resources', 'iridas_cube', ... 'Demo.cube') >>> print(read_LUT_IridasCube(path)) LUT3x1D - Demo -------------- <BLANKLINE> Dimensions : 2 Domain : [[ 0. 0. 0.] [ 1. 2. 3.]] Size : (3, 3) Comment 01 : Comments can go anywhere """ title = path_to_title(path) domain_min, domain_max = np.array([0, 0, 0]), np.array([1, 1, 1]) dimensions = 3 size = 2 table = [] comments = [] with open(path) as cube_file: lines = cube_file.readlines() for line in lines: line = line.strip() if len(line) == 0: continue if line.startswith('#'): comments.append(line[1:].strip()) continue tokens = line.split() if tokens[0] == 'TITLE': title = ' '.join(tokens[1:])[1:-1] elif tokens[0] == 'DOMAIN_MIN': domain_min = as_float_array(tokens[1:]) elif tokens[0] == 'DOMAIN_MAX': domain_max = as_float_array(tokens[1:]) elif tokens[0] == 'LUT_1D_SIZE': dimensions = 2 size = DEFAULT_INT_DTYPE(tokens[1]) elif tokens[0] == 'LUT_3D_SIZE': dimensions = 3 size = DEFAULT_INT_DTYPE(tokens[1]) else: table.append(tokens) table = as_float_array(table) if dimensions == 2: return LUT3x1D( table, title, np.vstack([domain_min, domain_max]), comments=comments) elif dimensions == 3: # The lines of table data shall be in ascending index order, # with the first component index (Red) changing most rapidly, # and the last component index (Blue) changing least rapidly. table = table.reshape([size, size, size, 3], order='F') return LUT3D( table, title, np.vstack([domain_min, domain_max]), comments=comments) def write_LUT_IridasCube(LUT, path, decimals=7): """ Writes given *LUT* to given *Iridas* *.cube* *LUT* file. Parameters ---------- LUT : LUT3x1D or LUT3d or LUTSequence :class:`LUT3x1D`, :class:`LUT3D` or :class:`LUTSequence` class instance to write at given path. path : unicode *LUT* path. decimals : int, optional Formatting decimals. Returns ------- bool Definition success. Warnings -------- - If a :class:`LUTSequence` class instance is passed as ``LUT``, the first *LUT* in the *LUT* sequence will be used. References ---------- :cite:`AdobeSystems2013b` Examples -------- Writing a 3x1D *Iridas* *.cube* *LUT*: >>> from colour.algebra import spow >>> domain = np.array([[-0.1, -0.2, -0.4], [1.5, 3.0, 6.0]]) >>> LUT = LUT3x1D( ... spow(LUT3x1D.linear_table(16, domain), 1 / 2.2), ... 'My LUT', ... domain, ... comments=['A first comment.', 'A second comment.']) >>> write_LUT_IridasCube(LUT, 'My_LUT.cube') # doctest: +SKIP Writing a 3D *Iridas* *.cube* *LUT*: >>> domain = np.array([[-0.1, -0.2, -0.4], [1.5, 3.0, 6.0]]) >>> LUT = LUT3D( ... spow(LUT3D.linear_table(16, domain), 1 / 2.2), ... 'My LUT', ... np.array([[-0.1, -0.2, -0.4], [1.5, 3.0, 6.0]]), ... comments=['A first comment.', 'A second comment.']) >>> write_LUT_IridasCube(LUT, 'My_LUT.cube') # doctest: +SKIP """ if isinstance(LUT, LUTSequence): LUT = LUT[0] usage_warning('"LUT" is a "LUTSequence" instance was passed, ' 'using first sequence "LUT":\n' '{0}'.format(LUT)) assert not LUT.is_domain_explicit(), '"LUT" domain must be implicit!' if isinstance(LUT, LUT1D): LUT = LUT.as_LUT(LUT3x1D) assert (isinstance(LUT, LUT3x1D) or isinstance(LUT, LUT3D)), '"LUT" must be a 1D, 3x1D or 3D "LUT"!' is_3x1D = isinstance(LUT, LUT3x1D) size = LUT.size if is_3x1D: assert 2 <= size <= 65536, '"LUT" size must be in domain [2, 65536]!' else: assert 2 <= size <= 256, '"LUT" size must be in domain [2, 256]!' def _format_array(array): """ Formats given array as an *Iridas* *.cube* data row. """ return '{1:0.{0}f} {2:0.{0}f} {3:0.{0}f}'.format(decimals, *array) with open(path, 'w') as cube_file: cube_file.write('TITLE "{0}"\n'.format(LUT.name)) if LUT.comments: for comment in LUT.comments: cube_file.write('# {0}\n'.format(comment)) cube_file.write('{0} {1}\n'.format( 'LUT_1D_SIZE' if is_3x1D else 'LUT_3D_SIZE', LUT.table.shape[0])) default_domain = np.array([[0, 0, 0], [1, 1, 1]]) if not np.array_equal(LUT.domain, default_domain): cube_file.write('DOMAIN_MIN {0}\n'.format( _format_array(LUT.domain[0]))) cube_file.write('DOMAIN_MAX {0}\n'.format( _format_array(LUT.domain[1]))) if not is_3x1D: table = LUT.table.reshape([-1, 3], order='F') else: table = LUT.table for row in table: cube_file.write('{0}\n'.format(_format_array(row))) return True
{"hexsha": "133fa516185c1d9f6dd77a641bfc7ad9d011c7b5", "size": 7863, "ext": "py", "lang": "Python", "max_stars_repo_path": "colour/io/luts/iridas_cube.py", "max_stars_repo_name": "wenh06/colour", "max_stars_repo_head_hexsha": "445fdad2711ae39c95b4375166905568d24a95f4", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-09-09T01:53:40.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-09T01:53:40.000Z", "max_issues_repo_path": "colour/io/luts/iridas_cube.py", "max_issues_repo_name": "wenh06/colour", "max_issues_repo_head_hexsha": "445fdad2711ae39c95b4375166905568d24a95f4", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "colour/io/luts/iridas_cube.py", "max_forks_repo_name": "wenh06/colour", "max_forks_repo_head_hexsha": "445fdad2711ae39c95b4375166905568d24a95f4", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.897338403, "max_line_length": 79, "alphanum_fraction": 0.5449573954, "include": true, "reason": "import numpy", "num_tokens": 2374}
# **CS224W - Colab 4** In Colab 2 we constructed GNN models by using PyTorch Geometric's built in GCN layer, `GCNConv`. In Colab 3 we implemented the **GraphSAGE** ([Hamilton et al. (2017)](https://arxiv.org/abs/1706.02216)) layer. In this colab you'll use what you've learned and implement a more powerful layer: **GAT** ([Veličković et al. (2018)](https://arxiv.org/abs/1710.10903)). Then we will run our models on the CORA dataset, which is a standard citation network benchmark dataset. **Note**: Make sure to **sequentially run all the cells in each section** so that the intermediate variables / packages will carry over to the next cell Have fun and good luck on Colab 4 :) # Device We recommend using a GPU for this Colab. Please click `Runtime` and then `Change runtime type`. Then set the `hardware accelerator` to **GPU**. ## Installation ```python # Install torch geometric import os if 'IS_GRADESCOPE_ENV' not in os.environ: !pip install torch-scatter -f https://data.pyg.org/whl/torch-1.10.0+cu113.html !pip install torch-sparse -f https://data.pyg.org/whl/torch-1.10.0+cu113.html !pip install torch-geometric !pip install -q git+https://github.com/snap-stanford/deepsnap.git ``` Looking in links: https://data.pyg.org/whl/torch-1.10.0+cu113.html Requirement already satisfied: torch-scatter in /usr/local/lib/python3.7/dist-packages (2.0.9) Looking in links: https://data.pyg.org/whl/torch-1.10.0+cu113.html Requirement already satisfied: torch-sparse in /usr/local/lib/python3.7/dist-packages (0.6.12) Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from torch-sparse) (1.4.1) Requirement already satisfied: numpy>=1.13.3 in /usr/local/lib/python3.7/dist-packages (from scipy->torch-sparse) (1.19.5) Requirement already satisfied: torch-geometric in /usr/local/lib/python3.7/dist-packages (2.0.3) Requirement already satisfied: networkx in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.6.3) Requirement already satisfied: PyYAML in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (3.13) Requirement already satisfied: rdflib in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (6.1.1) Requirement already satisfied: numpy in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (1.19.5) Requirement already satisfied: pandas in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (1.1.5) Requirement already satisfied: pyparsing in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (3.0.6) Requirement already satisfied: yacs in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (0.1.8) Requirement already satisfied: scipy in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (1.4.1) Requirement already satisfied: tqdm in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (4.62.3) Requirement already satisfied: jinja2 in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.11.3) Requirement already satisfied: scikit-learn in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (1.0.2) Requirement already satisfied: googledrivedownloader in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (0.4) Requirement already satisfied: requests in /usr/local/lib/python3.7/dist-packages (from torch-geometric) (2.23.0) Requirement already satisfied: MarkupSafe>=0.23 in /usr/local/lib/python3.7/dist-packages (from jinja2->torch-geometric) (2.0.1) Requirement already satisfied: pytz>=2017.2 in /usr/local/lib/python3.7/dist-packages (from pandas->torch-geometric) (2018.9) Requirement already satisfied: python-dateutil>=2.7.3 in /usr/local/lib/python3.7/dist-packages (from pandas->torch-geometric) (2.8.2) Requirement already satisfied: six>=1.5 in /usr/local/lib/python3.7/dist-packages (from python-dateutil>=2.7.3->pandas->torch-geometric) (1.15.0) Requirement already satisfied: importlib-metadata in /usr/local/lib/python3.7/dist-packages (from rdflib->torch-geometric) (4.10.0) Requirement already satisfied: setuptools in /usr/local/lib/python3.7/dist-packages (from rdflib->torch-geometric) (57.4.0) Requirement already satisfied: isodate in /usr/local/lib/python3.7/dist-packages (from rdflib->torch-geometric) (0.6.1) Requirement already satisfied: typing-extensions>=3.6.4 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->rdflib->torch-geometric) (3.10.0.2) Requirement already satisfied: zipp>=0.5 in /usr/local/lib/python3.7/dist-packages (from importlib-metadata->rdflib->torch-geometric) (3.7.0) Requirement already satisfied: certifi>=2017.4.17 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (2021.10.8) Requirement already satisfied: urllib3!=1.25.0,!=1.25.1,<1.26,>=1.21.1 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (1.24.3) Requirement already satisfied: chardet<4,>=3.0.2 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (3.0.4) Requirement already satisfied: idna<3,>=2.5 in /usr/local/lib/python3.7/dist-packages (from requests->torch-geometric) (2.10) Requirement already satisfied: threadpoolctl>=2.0.0 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->torch-geometric) (3.0.0) Requirement already satisfied: joblib>=0.11 in /usr/local/lib/python3.7/dist-packages (from scikit-learn->torch-geometric) (1.1.0) ```python import torch_geometric torch_geometric.__version__ ``` '2.0.3' # 1) GNN Layers ## Implementing Layer Modules In Colab 2, we implemented a GCN model for node and graph classification tasks. However, for that notebook we took advantage of PyG's built in GCN module. For Colabs 3 and 4, we provide a build upon a general Graph Neural Network Stack, into which we will be able to plugin our own module implementations: GraphSAGE and GAT. We will then use our layer implemenations to complete node classification on the CORA dataset, a standard citation network benchmark. In this dataset, nodes correspond to documents and edges correspond to undirected citations. Each node or document in the graph is assigned a class label and features based on the documents binarized bag-of-words representation. Specifically, the Cora graph has 2708 nodes, 5429 edges, 7 prediction classes, and 1433 features per node. ## GNN Stack Module Below is the implementation of a general GNN stack, where we can plugin any GNN layer, such as **GraphSage**, **GAT**, etc. This module is provided for you. Your implementations of the **GraphSage** and **GAT** layers will function as components in the GNNStack Module. ```python import torch import torch_scatter import torch.nn as nn import torch.nn.functional as F import torch_geometric.nn as pyg_nn import torch_geometric.utils as pyg_utils from torch import Tensor from typing import Union, Tuple, Optional from torch_geometric.typing import (OptPairTensor, Adj, Size, NoneType, OptTensor) from torch.nn import Parameter, Linear from torch_sparse import SparseTensor, set_diag from torch_geometric.nn.conv import MessagePassing from torch_geometric.utils import remove_self_loops, add_self_loops, softmax class GNNStack(torch.nn.Module): def __init__( self, input_dim, hidden_dim, output_dim, args, emb=False ): # Parentizing super(GNNStack, self).__init__() # What type of GNN we are talking about conv_model = self.build_conv_model(args.model_type) # List of conv layers self.convs = nn.ModuleList() # Append input layer self.convs.append(conv_model(input_dim, hidden_dim)) # Check if the number of layers is more than 1 assert (args.num_layers >= 1), 'Number of layers is not >=1' # Go through all layers for l in range(args.num_layers-1): self.convs.append(conv_model(args.heads * hidden_dim, hidden_dim)) # Post-message-passing self.post_mp = nn.Sequential( nn.Linear(args.heads * hidden_dim, hidden_dim), nn.Dropout(args.dropout), nn.Linear(hidden_dim, output_dim) ) # Define dropout self.dropout = args.dropout # Store number of layers for future use into the methods self.num_layers = args.num_layers # Storing boolean about embeddings (True dont apply a classification # layer in the end) self.emb = emb def build_conv_model(self, model_type): if model_type == 'GraphSage': return GraphSage elif model_type == 'GAT': # When applying GAT with num heads > 1, you need to modify the # input and output dimension of the conv layers (self.convs), # to ensure that the input dim of the next layer is num heads # multiplied by the output dim of the previous layer. # HINT: In case you want to play with multiheads, you need to change the for-loop that builds up self.convs to be # self.convs.append(conv_model(hidden_dim * num_heads, hidden_dim)), # and also the first nn.Linear(hidden_dim * num_heads, hidden_dim) in post-message-passing. return GAT def forward(self, data): # Extract from batch x, edge_index, batch = data.x, data.edge_index, data.batch # Go through all layers for i in range(self.num_layers): x = self.convs[i](x, edge_index) x = F.relu(x) x = F.dropout(x, p=self.dropout, training=self.training) x = self.post_mp(x) if self.emb == True: return x return F.log_softmax(x, dim=1) def loss(self, pred, label): return F.nll_loss(pred, label) ``` ## Creating Our Own Message Passing Layer Now let's start implementing our own message passing layers! Working through this part will help us become acutely familiar with the behind the scenes work of implementing Pytorch Message Passing Layers, allowing us to build our own GNN models. To do so, we will work with and implement 3 critcal functions needed to define a PyG Message Passing Layer: `forward`, `message`, and `aggregate`. Before diving head first into the coding details, let us quickly review the key components of the message passing process. To do so, we will focus on a single round of messsage passing with respect to a single central node $x$. Before message passing, $x$ is associated with a feature vector $x^{l-1}$, and the goal of message passing is to update this feature vector as $x^l$. To do so, we implement the following steps: 1) each neighboring node $v$ passes its current message $v^{l-1}$ across the edge $(x, v)$ - 2) for the node $x$, we aggregate all of the messages of the neighboring nodes (for example through a sum or mean) - and 3) we transform the aggregated information by for example applying linear and non-linear transformations. Altogether, the message passing process is applied such that every node $u$ in our graph updates its embedding by acting as the central node $x$ in step 1-3 described above. Now, we extending this process to that of a single message passing layer, the job of a message passing layer is to update the current feature representation or embedding of each node in a graph by propagating and transforming information within the graph. Overall, the general paradigm of a message passing layers is: 1) pre-processing -> 2) **message passing** / propagation -> 3) post-processing. The `forward` fuction that we will implement for our message passing layer captures this execution logic. Namely, the `forward` function handles the pre and post-processing of node features / embeddings, as well as initiates message passing by calling the `propagate` function. The `propagate` function encapsulates the message passing process! It does so by calling three important functions: 1) `message`, 2) `aggregate`, and 3) `update`. Our implementation will vary slightly from this, as we will not explicitly implement `update`, but instead place the logic for updating node embeddings after message passing and within the `forward` function. To be more specific, after information is propagated (message passing), we can further transform the node embeddings outputed by `propagate`. Therefore, the output of `forward` is exactly the node embeddings after one GNN layer. Lastly, before starting to implement our own layer, let us dig a bit deeper into each of the functions described above: 1. ``` def propagate(edge_index, x=(x_i, x_j), extra=(extra_i, extra_j), size=size): ``` Calling `propagate` initiates the message passing process. Looking at the function parameters, we highlight a couple of key parameters. - `edge_index` is passed to the forward function and captures the edge structure of the graph. - `x=(x_i, x_j)` represents the node features that will be used in message passing. In order to explain why we pass the tuple `(x_i, x_j)`, we first look at how our edges are represented. For every edge $(i, j) \in \mathcal{E}$, we can differentiate $i$ as the source or central node ($x_{central}$) and j as the neighboring node ($x_{neighbor}$). Taking the example of message passing above, for a central node $u$ we will aggregate and transform all of the messages associated with the nodes $v$ s.t. $(u, v) \in \mathcal{E}$ (i.e. $v \in \mathcal{N}_{u}$). Thus we see, the subscripts `_i` and `_j` allow us to specifcally differenciate features associated with central nodes (i.e. nodes recieving message information) and neighboring nodes (i.e. nodes passing messages). This is definitely a somewhat confusing concept; however, one key thing to remember / wrap your head around is that depending on the perspective, a node $x$ acts as a central node or a neighboring node. In fact, in undirected graphs we store both edge directions (i.e. $(i, j)$ and $(j, i)$). From the central node perspective, `x_i`, x is collecting neighboring information to update its embedding. From a neighboring node perspective, `x_j`, x is passing its message information along the edge connecting it to a different central node. - `extra=(extra_i, extra_j)` represents additional information that we can associate with each node beyond its current feature embedding. In fact, we can include as many additional parameters of the form `param=(param_i, param_j)` as we would like. Again, we highlight that indexing with `_i` and `_j` allows us to differentiate central and neighboring nodes. The output of the `propagate` function is a matrix of node embeddings after the message passing process and has shape $[N, d]$. 2. ``` def message(x_j, ...): ``` The `message` function is called by propagate and constructs the messages from neighboring nodes $j$ to central nodes $i$ for each edge $(i, j)$ in *edge_index*. This function can take any argument that was initially passed to `propagate`. Furthermore, we can again differentiate central nodes and neighboring nodes by appending `_i` or `_j` to the variable name, .e.g. `x_i` and `x_j`. Looking more specifically at the variables, we have: - `x_j` represents a matrix of feature embeddings for all neighboring nodes passing their messages along their respective edge (i.e. all nodes $j$ for edges $(i, j) \in \mathcal{E}$). Thus, its shape is $[|\mathcal{E}|, d]$! - In implementing GAT we will see how to access additional variables passed to propagate Critically, we see that the output of the `message` function is a matrix of neighboring node embeddings ready to be aggregated, having shape $[|\mathcal{E}|, d]$. 3. ``` def aggregate(self, inputs, index, dim_size = None): ``` Lastly, the `aggregate` function is used to aggregate the messages from neighboring nodes. Looking at the parameters we highlight: - `inputs` represents a matrix of the messages passed from neighboring nodes (i.e. the output of the `message` function). - `index` has the same shape as `inputs` and tells us the central node that corresponding to each of the rows / messages $j$ in the `inputs` matrix. Thus, `index` tells us which rows / messages to aggregate for each central node. The output of `aggregate` is of shape $[N, d]$. For additional resources refer to the PyG documentation for implementing custom message passing layers: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html ## GAT Implementation Attention mechanisms have become the state-of-the-art in many sequence-based tasks such as machine translation and learning sentence representations. One of the major benefits of attention-based mechanisms is their ability to focus on the most relevant parts of the input to make decisions. In this problem, we will see how attention mechanisms can be used to perform node classification over graph-structured data through the usage of Graph Attention Networks (GATs) ([Veličković et al. (2018)](https://arxiv.org/abs/1710.10903)). The building block of the Graph Attention Network is the graph attention layer, which is a variant of the aggregation function. Let $N$ be the number of nodes and $F$ be the dimension of the feature vector for each node. The input to each graph attentional layer is a set of node features: $\mathbf{h} = \{\overrightarrow{h_1}, \overrightarrow{h_2}, \dots, \overrightarrow{h_N}$\}, $\overrightarrow{h_i} \in R^F$. The output of each graph attentional layer is a new set of node features, which may have a new dimension $F'$: $\mathbf{h'} = \{\overrightarrow{h_1'}, \overrightarrow{h_2'}, \dots, \overrightarrow{h_N'}\}$, with $\overrightarrow{h_i'} \in \mathbb{R}^{F'}$. We will now describe how this transformation is performed for each graph attention layer. First, a shared linear transformation parametrized by the weight matrix $\mathbf{W} \in \mathbb{R}^{F' \times F}$ is applied to every node. Next, we perform self-attention on the nodes. We use a shared attention function $a$: \begin{equation} a : \mathbb{R}^{F'} \times \mathbb{R}^{F'} \rightarrow \mathbb{R}. \end{equation} that computes the attention coefficients capturing the importance of node $j$'s features to node $i$: \begin{equation} e_{ij} = a(\mathbf{W_l}\overrightarrow{h_i}, \mathbf{W_r} \overrightarrow{h_j}) \end{equation} The most general formulation of self-attention allows every node to attend to all other nodes which drops all structural information. However, to utilize graph structure in the attention mechanisms, we use **masked attention**. In masked attention, we only compute attention coefficients $e_{ij}$ for nodes $j \in \mathcal{N}_i$ where $\mathcal{N}_i$ is some neighborhood of node $i$ in the graph. To easily compare coefficients across different nodes, we normalize the coefficients across $j$ using a softmax function: \begin{equation} \alpha_{ij} = \text{softmax}_j(e_{ij}) = \frac{\exp(e_{ij})}{\sum_{k \in \mathcal{N}_i} \exp(e_{ik})} \end{equation} For this problem, our attention mechanism $a$ will be a single-layer feedforward neural network parametrized by a weight vectors $\overrightarrow{a_l} \in \mathbb{R}^{F'}$ and $\overrightarrow{a_r} \in \mathbb{R}^{F'}$, followed by a LeakyReLU nonlinearity (with negative input slope 0.2). Let $\cdot^T$ represent transposition and $||$ represent concatenation. The coefficients computed by our attention mechanism may be expressed as: \begin{equation} \alpha_{ij} = \frac{\exp\Big(\text{LeakyReLU}\Big(\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i} + \overrightarrow{a_r}^T\mathbf{W_r}\overrightarrow{h_j}\Big)\Big)}{\sum_{k\in \mathcal{N}_i} \exp\Big(\text{LeakyReLU}\Big(\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i} + \overrightarrow{a_r}^T\mathbf{W_r}\overrightarrow{h_k}\Big)\Big)} \end{equation} For the following questions, we denote `alpha_l` = $\alpha_l = [...,\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i},...] \in \mathcal{R}^n$ and `alpha_r` = $\alpha_r = [..., \overrightarrow{a_r}^T \mathbf{W_r} \overrightarrow{h_j}, ...] \in \mathcal{R}^n$. At every layer of GAT, after the attention coefficients are computed for that layer, the aggregation function can be computed by a weighted sum of neighborhood messages, where weights are specified by $\alpha_{ij}$. Now, we use the normalized attention coefficients to compute a linear combination of the features corresponding to them. These aggregated features will serve as the final output features for every node. \begin{equation} h_i' = \sum_{j \in \mathcal{N}_i} \alpha_{ij} \mathbf{W_r} \overrightarrow{h_j}. \end{equation} At this point, we have covered a lot of information! Before reading further about multi-head attention, we encourage you to go again through the excersize of thinking about what components of the attention mechanism correspond with the different functions: 1) `forward`, 2) `message`, and 3 `aggregate`. - Hint 1: Our aggregation is very similar to that of GraphSage except now we are using sum aggregation - Hint 2: The terms we aggregate over again represent the individual message that each neighbor node j sends. Thus, we see that $\alpha_{ij}$ is part of the message each node sends and is thus computed during the message step. This makes sense since an attention weight is associated with each edge in the graph. - Hint 3: Look at the terms in the definition of $\alpha_{ij}$. What values do we want to pre-process and pass as parameters to the `propagate` function. The parameters of `message(..., x_j, alpha_j, alpha_i, ...)` should give a good hint. ### Multi-Head Attention To stabilize the learning process of self-attention, we use multi-head attention. To do this we use $K$ independent attention mechanisms, or ``heads'' compute output features as in the above equations. Then, we concatenate these output feature representations: \begin{equation} \overrightarrow{h_i}' = ||_{k=1}^K \Big(\sum_{j \in \mathcal{N}_i} \alpha_{ij}^{(k)} \mathbf{W_r}^{(k)} \overrightarrow{h_j}\Big) \end{equation} where $||$ is concentation, $\alpha_{ij}^{(k)}$ are the normalized attention coefficients computed by the $k$-th attention mechanism $(a^k)$, and $\mathbf{W}^{(k)}$ is the corresponding input linear transformation's weight matrix. Note that for this setting, $\mathbf{h'} \in \mathbb{R}^{KF'}$. ```python class GAT(MessagePassing): def __init__( self, in_channels, out_channels, heads = 2, negative_slope = 0.2, dropout = 0., **kwargs): super(GAT, self).__init__(node_dim=0, **kwargs) self.in_channels = in_channels self.out_channels = out_channels self.heads = heads self.negative_slope = negative_slope self.dropout = dropout self.lin_l = None self.lin_r = None self.att_l = None self.att_r = None ############################################################################ # TODO: Your code here! # Define the layers needed for the message functions below. # self.lin_l is the linear transformation that you apply to embeddings # BEFORE message passing. # # Pay attention to dimensions of the linear layers, since we're using # multi-head attention. # Our implementation is ~1 lines, but don't worry if you deviate from this. self.lin_l = Linear(in_channels, heads * out_channels) ############################################################################ self.lin_r = self.lin_l ############################################################################ # TODO: Your code here! # Define the attention parameters \overrightarrow{a_l/r}^T in the above intro. # You have to deal with multi-head scenarios. # Use nn.Parameter instead of nn.Linear # Our implementation is ~2 lines, but don't worry if you deviate from this. # The idea here is to initialize all the parameters involving the attention # mechanism, which is described by the vector of weight a of dimension # out_channels. # However, if we stack several heads together, we need such a vector # for each one of these heads # # Therefore, following the documentation: https://pytorch.org/docs/1.9.1/generated/torch.nn.parameter.Parameter.html self.att_l = Parameter(torch.zeros(heads, out_channels)) self.att_r = Parameter(torch.zeros(heads, out_channels)) # PS: I almost used self.attention_l; but in the reset_parameters below # the guy is using att_l ############################################################################ self.reset_parameters() def reset_parameters(self): nn.init.xavier_uniform_(self.lin_l.weight) nn.init.xavier_uniform_(self.lin_r.weight) nn.init.xavier_uniform_(self.att_l) nn.init.xavier_uniform_(self.att_r) def forward(self, x, edge_index, size=None): """ Implement message passing, as well as any post-processing (our update rule). Parameters ---------- self : GAT object GAT. x : tensor of shape (num_nodes, in_channels) Data associated to each node. edge_index : tensor of shape (2, num_edges) Describe graph structure via edge list. size : tuple with shape Needed if edge_index is a matrix representing connections as (num_nodes, num_connected_nodes). See: https://github.com/pyg-team/pytorch_geometric/blob/master/torch_geometric/nn/conv/message_passing.py Returns ------- out : tensor of shape (num_nodes, out_channels) Embedded data associated to each node. """ # Now what was called K is H, and F' is C... this guy is so confused H, C = self.heads, self.out_channels ############################################################################ # TODO: Your code here! # Implement message passing, as well as any pre- and post-processing (our update rule). # 1. First apply linear transformation to node embeddings, and split that # into multiple heads. We use the same representations for source and # target nodes, but apply different linear weights (W_l and W_r) # 2. Calculate alpha vectors for central nodes (alpha_l) and neighbor nodes (alpha_r). # 3. Call propagate function to conduct the message passing. # 3.1 Remember to pass alpha = (alpha_l, alpha_r) as a parameter. # 3.2 See there for more information: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html # 4. Transform the output back to the shape of [N, H * C]. # Our implementation is ~5 lines, but don't worry if you deviate from this. # 1. First apply linear transformation to node embeddings, and split that # into multiple heads. We use the same representations for source and # target nodes, but apply different linear weights (W_l and W_r) # Apply linear transformations x_i = self.lin_l(x) # (num_nodes, heads * out_channels) x_j = self.lin_r(x) # (num_nodes, heads * out_channels) # Split into multiple heads # I assume that split here means that we want to have a tensor of the # form # (num_nodes, heads, out_channels) # Then, we can treat heads independently, as if we are doing things in # parallel. # # Fortunately, tensor also has a .reshape method x_i = x_i.reshape(-1, H, C) # (num_nodes, heads, out_channels) x_j = x_j.reshape(-1, H, C) # (num_nodes, heads, out_channels) # 2. Calculate alpha vectors for central nodes (alpha_l) and neighbor nodes (alpha_r). # The guy defined it as # `alpha_l` = $\alpha_l = [...,\overrightarrow{a_l}^T \mathbf{W_l} \overrightarrow{h_i},...] # We already have the W h part... now, we neet to apply the attention coeff alpha_l = self.att_l * x_i # (num_nodes, heads, out_channels) alpha_r = self.att_r * x_j # (num_nodes, heads, out_channels) # 3. Call propagate function to conduct the message passing. # 3.1 Remember to pass alpha = (alpha_l, alpha_r) as a parameter. # 3.2 See there for more information: https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html out = self.propagate(edge_index, x=(x_i, x_j), alpha=(alpha_l, alpha_r), size=size) # (num_nodes, heads, out_channels) # 4. Transform the output back to the shape of [N, H * C]. out = out.reshape(-1, H * C) # (num_edges, heads * out_channels) ############################################################################ return out def message(self, x_j, alpha_j, alpha_i, index, ptr, size_i): """ Messages from the neighbors. For GAT, the neighbors send their weighted raw data, where the weights are given by the attention mechanism. Parameters ---------- x_j : tensor of shape (num_edges, out_channels) Data of the target node of each edge. alpha_j : tensor of shape (num_edges, heads, out_channels) Attention parameters for the target nodes. alpha_i : tensor of shape (num_edges, heads, out_channels) Attention parameters for the central nodes. index : tensor of shape (num_edges) List o indexes of the (target) nodes receiving the message: index = edge_list[1], since we are using the flow="source-to-target". Returns ------- out : tensor of shape (num_edges, head, out_channels) Messages sent to the central node from the target node of each edge. PS: Note how different is this message function from the one of Colab 3. """ out = None ############################################################################ # TODO: Your code here! # Implement your message function. Putting the attention in message # instead of in update is a little tricky. # 1. Calculate the final attention weights using alpha_i and alpha_j, # and apply leaky Relu. # 2. Calculate softmax over the neighbor nodes for all the nodes. Use # torch_geometric.utils.softmax instead of the one in Pytorch. # 3. Apply dropout to attention weights (alpha). # 4. Multiply embeddings and attention weights. As a sanity check, the output # should be of shape [E, H, C]. # 5. ptr (LongTensor, optional): If given, computes the softmax based on # sorted inputs in CSR representation. You can simply pass it to softmax. # Our implementation is ~4-5 lines, but don't worry if you deviate from this. # 1. Calculate the final attention weights using alpha_i and alpha_j, # and apply leaky Relu. # # Documentation on Leak ReLU: https://pytorch.org/docs/1.9.1/generated/torch.nn.LeakyReLU.html e_vector = F.leaky_relu(alpha_j + alpha_i, negative_slope=self.negative_slope) # (num_edges, heads, out_channels) # Following the notation of the guy, e_vector has the e_ij values before # normalization # 2. Calculate softmax over the neighbor nodes for all the nodes. Use # torch_geometric.utils.softmax instead of the one in Pytorch. if ptr: alpha = F.softmax(e_vector, dim=ptr) # (num_edges, heads, out_channels) # ptr figure outs how to compute the neighborhoods and so on # see: https://github.com/pyg-team/pytorch_geometric/blob/50b7bfc4a59b5b6f7ec547ff862985f3b2e22798/torch_geometric/nn/conv/message_passing.py#L336 else: alpha = pyg_utils.softmax(e_vector, index=index) # (num_edges, heads, out_channels) # THIS FUNCTION IS ALREADY FIGURING OUT THE NEIGHBOR SETS FOR US TO COMPUTE # THE SOFTMAX FUNCTION, see: https://pytorch-geometric.readthedocs.io/en/1.3.2/_modules/torch_geometric/utils/softmax.html # Unsurprisingly, it uses the scatter function. # 3. Apply dropout to attention weights (alpha). alpha = F.dropout(alpha, p=self.dropout) # (num_edges, heads, out_channels) # 4. Multiply embeddings and attention weights. As a sanity check, the output # should be of shape [E, H, C]. out = alpha * x_j # (num_edges, heads, out_channels) # This is denoted as h' in the explanation above, but without the sum # yet (aggreation); this will be realized in the aggreagation function # So, here we have this alpha Wr h # 5. ptr (LongTensor, optional): If given, computes the softmax based on # sorted inputs in CSR representation. You can simply pass it to softmax. # # - OK, why he didnt say this before.. lets go back to the softmax # implementation ############################################################################ return out def aggregate(self, inputs, index, dim_size=None): """ Aggregates at the central node incoming messages from the neighbors. For GAT, the aggregation is a weighted sum over the data coming from the neighbors. The weights are defined by the attention mechanism. Parameters ---------- inputs : tensor of shape (E, heads, out_channels) Messages sent to the central node from the target node of each edge. index : tensor of shape (num_edges) List o indexes of the (target) nodes receiving the message: index = edge_list[1], since we are using the flow="source-to-target". dim_size : tuple with shape Tuple with shape of the aggregation. Returns ------- out : tensor of shape (num_edges, heads, out_channels) Aggregated messages at the central nodes. """ ############################################################################ # TODO: Your code here! # Implement your aggregate function here. # See here as how to use torch_scatter.scatter: https://pytorch-scatter.readthedocs.io/en/latest/_modules/torch_scatter/scatter.html # Pay attention to "reduce" parameter is different from that in GraphSage. # Our implementation is ~1 lines, but don't worry if you deviate from this. # Now, we just need to sum over the neighboorhood sets, see the h' equation # above!! out = torch_scatter.scatter(src=inputs, index=index, dim=self.node_dim, dim_size=dim_size, reduce="sum") # dim needs to match the node dimension!! Using the node_dim # property that the MensagePassing class has; similar to Colab 3 # see: https://github.com/pyg-team/pytorch_geometric/blob/50b7bfc4a59b5b6f7ec547ff862985f3b2e22798/torch_geometric/nn/conv/message_passing.py#L336 ############################################################################ return out ``` ## Building Optimizers This function has been implemented for you. **For grading purposes please use the default Adam optimizer**, but feel free to play with other types of optimizers on your own. ```python import torch.optim as optim def build_optimizer(args, params): weight_decay = args.weight_decay filter_fn = filter(lambda p : p.requires_grad, params) if args.opt == 'adam': optimizer = optim.Adam(filter_fn, lr=args.lr, weight_decay=weight_decay) elif args.opt == 'sgd': optimizer = optim.SGD(filter_fn, lr=args.lr, momentum=0.95, weight_decay=weight_decay) elif args.opt == 'rmsprop': optimizer = optim.RMSprop(filter_fn, lr=args.lr, weight_decay=weight_decay) elif args.opt == 'adagrad': optimizer = optim.Adagrad(filter_fn, lr=args.lr, weight_decay=weight_decay) if args.opt_scheduler == 'none': return None, optimizer elif args.opt_scheduler == 'step': scheduler = optim.lr_scheduler.StepLR(optimizer, step_size=args.opt_decay_step, gamma=args.opt_decay_rate) elif args.opt_scheduler == 'cos': scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.opt_restart) return scheduler, optimizer ``` ## Training and Testing Here we provide you with the functions to train and test. **Please do not modify this part for grading purposes.** ```python import time import networkx as nx import numpy as np import torch import torch.optim as optim from tqdm import trange import pandas as pd import copy from torch_geometric.datasets import TUDataset from torch_geometric.datasets import Planetoid from torch_geometric.data import DataLoader import torch_geometric.nn as pyg_nn import matplotlib.pyplot as plt def train(dataset, args): print("Node task. test set size:", np.sum(dataset[0]['test_mask'].numpy())) print() test_loader = loader = DataLoader(dataset, batch_size=args.batch_size, shuffle=False) # build model model = GNNStack(dataset.num_node_features, args.hidden_dim, dataset.num_classes, args) scheduler, opt = build_optimizer(args, model.parameters()) # train losses = [] test_accs = [] best_acc = 0 best_model = None for epoch in trange(args.epochs, desc="Training", unit="Epochs"): total_loss = 0 model.train() for batch in loader: opt.zero_grad() pred = model(batch) label = batch.y pred = pred[batch.train_mask] label = label[batch.train_mask] loss = model.loss(pred, label) loss.backward() opt.step() total_loss += loss.item() * batch.num_graphs total_loss /= len(loader.dataset) losses.append(total_loss) if epoch % 10 == 0: test_acc = test(test_loader, model) test_accs.append(test_acc) if test_acc > best_acc: best_acc = test_acc best_model = copy.deepcopy(model) else: test_accs.append(test_accs[-1]) return test_accs, losses, best_model, best_acc, test_loader def test(loader, test_model, is_validation=False, save_model_preds=False, model_type=None): test_model.eval() correct = 0 # Note that Cora is only one graph! for data in loader: with torch.no_grad(): # max(dim=1) returns values, indices tuple; only need indices pred = test_model(data).max(dim=1)[1] label = data.y mask = data.val_mask if is_validation else data.test_mask # node classification: only evaluate on nodes in test set pred = pred[mask] label = label[mask] if save_model_preds: print ("Saving Model Predictions for Model Type", model_type) data = {} data['pred'] = pred.view(-1).cpu().detach().numpy() data['label'] = label.view(-1).cpu().detach().numpy() df = pd.DataFrame(data=data) # Save locally as csv df.to_csv('CORA-Node-' + model_type + '.csv', sep=',', index=False) correct += pred.eq(label).sum().item() total = 0 for data in loader.dataset: total += torch.sum(data.val_mask if is_validation else data.test_mask).item() return correct / total class objectview(object): def __init__(self, d): self.__dict__ = d ``` ## Let's Start the Training! We will be working on the CORA dataset on node-level classification. This part is implemented for you. **For grading purposes, please do not modify the default parameters.** However, feel free to play with different configurations just for fun! **Submit your best accuracy and loss on Gradescope.** ```python if 'IS_GRADESCOPE_ENV' not in os.environ: for args in [{ 'model_type': 'GAT', 'dataset': 'cora', 'num_layers': 2, 'heads': 1, 'batch_size': 32, 'hidden_dim': 32, 'dropout': 0.5, 'epochs': 500, 'opt': 'adam', 'opt_scheduler': 'none', 'opt_restart': 0, 'weight_decay': 5e-3, 'lr': 0.01}, ]: args = objectview(args) for model in ['GAT']: args.model_type = model # Match the dimension. if model == 'GAT': args.heads = 2 else: args.heads = 1 if args.dataset == 'cora': dataset = Planetoid(root='/tmp/cora', name='Cora') else: raise NotImplementedError("Unknown dataset") test_accs, losses, best_model, best_acc, test_loader = train(dataset, args) print("Maximum test set accuracy: {0}".format(max(test_accs))) print("Minimum loss: {0}".format(min(losses))) # Run test for our best model to save the predictions! test(test_loader, best_model, is_validation=False, save_model_preds=True, model_type=model) print() plt.title(dataset.name) plt.plot(losses, label="training loss" + " - " + args.model_type) plt.plot(test_accs, label="test accuracy" + " - " + args.model_type) plt.legend() plt.show() ``` ## Question 1: What is the maximum accuracy obtained on test set for GAT? (10 points) Running the training cell above will also save your best GAT model predictions as *CORA-Node-GAT.csv*. When you sumbit your assignment, you will have to download this file and attatch it to your submission. As with the other colabs, please zip this file (DON'T CHANGE ITS NAME) and the .csv file that's generated!
{"hexsha": "5292b73310efa402795071f7b7e345bc4581c8e5", "size": 80448, "ext": "ipynb", "lang": "Jupyter Notebook", "max_stars_repo_path": "Colab 4/CS224W - Colab4_victor.ipynb", "max_stars_repo_name": "victorcroisfelt/aau-cs224w-ml-with-graphs", "max_stars_repo_head_hexsha": "adb38651be8da98cc574f127763c785ed16dfb5a", "max_stars_repo_licenses": ["Unlicense", "MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "Colab 4/CS224W - Colab4_victor.ipynb", "max_issues_repo_name": "victorcroisfelt/aau-cs224w-ml-with-graphs", "max_issues_repo_head_hexsha": "adb38651be8da98cc574f127763c785ed16dfb5a", "max_issues_repo_licenses": ["Unlicense", "MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Colab 4/CS224W - Colab4_victor.ipynb", "max_forks_repo_name": "victorcroisfelt/aau-cs224w-ml-with-graphs", "max_forks_repo_head_hexsha": "adb38651be8da98cc574f127763c785ed16dfb5a", "max_forks_repo_licenses": ["Unlicense", "MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 76.8366762178, "max_line_length": 22574, "alphanum_fraction": 0.6625894988, "converted": true, "num_tokens": 10208}
import mlflow import numpy as np import torch import torchani from ael import grad, loaders, models np.random.seed(42) torch.manual_seed(42) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Radial coefficients RcR = 5.2 EtaR = torch.tensor([16.0], device=device) RsR = torch.tensor([0.9], device=device) # Angular coefficients RcA = 3.5 Zeta = torch.tensor([32], device=device) TsA = torch.tensor([0.19634954], device=device) # Angular shift in GA EtaA = torch.tensor([8.0], device=device) RsA = torch.tensor([0.9], device=device) # Radial shift in GA def test_grad(testdata, testdir): with mlflow.start_run(): # Distance 0.0 produces a segmentation fault (see MDAnalysis#2656) data = loaders.PDBData(testdata, 0.1, testdir) n_systems = len(data) assert n_systems == 2 # Transform atomic numbers to species amap = loaders.anummap(data.species) data.atomicnums_to_idxs(amap) n_species = len(amap) # Define AEVComputer AEVC = torchani.AEVComputer( RcR, RcA, EtaR, RsR, EtaA, Zeta, RsA, TsA, n_species ) # Radial functions: 1 # Angular functions: 1 # Number of species: 5 # AEV: 1 * 5 + 1 * 5 * (5 + 1) // 2 = 5 (R) + 15 (A) = 20 assert AEVC.aev_length == 20 model = models.AffinityModel(n_species, AEVC.aev_length) # Move model and AEVComputer to device model.to(device) AEVC.to(device) # Model in evaluation mode model.eval() for i in range(n_systems): pdbid, _, (species, coordinates) = data[i] gradient = grad.gradient(species, coordinates, model, AEVC, device) assert gradient.shape == coordinates.shape def test_atomic(testdata, testdir): with mlflow.start_run(): # Distance 0.0 produces a segmentation fault (see MDAnalysis#2656) data = loaders.PDBData(testdata, 0.1, testdir) n_systems = len(data) assert n_systems == 2 # Transform atomic numbers to species amap = loaders.anummap(data.species) data.atomicnums_to_idxs(amap) n_species = len(amap) # Define AEVComputer AEVC = torchani.AEVComputer( RcR, RcA, EtaR, RsR, EtaA, Zeta, RsA, TsA, n_species ) # Radial functions: 1 # Angular functions: 1 # Number of species: 5 # AEV: 1 * 5 + 1 * 5 * (5 + 1) // 2 = 5 (R) + 15 (A) = 20 assert AEVC.aev_length == 20 model = models.AffinityModel(n_species, AEVC.aev_length) # Move model and AEVComputer to device model.to(device) AEVC.to(device) # Model in evaluation mode model.eval() for pdbid, _, (species, coordinates) in data: atomic = grad.atomic(species, coordinates, model, AEVC, device) # Add fictitious batch dimension species = species.unsqueeze(0).to(device) coordinates = coordinates.unsqueeze(0).to(device) assert atomic.shape == species.shape aevs = AEVC.forward((species, coordinates)).aevs prediction = model(species, aevs) assert np.allclose( torch.sum(atomic, dim=1).cpu().detach().numpy(), prediction.cpu().detach().numpy(), )
{"hexsha": "900f85e8824af3ad2a5cba1010ed75f54ea9d162", "size": 3383, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_grad.py", "max_stars_repo_name": "hengwei-chan/aescore", "max_stars_repo_head_hexsha": "0eba684e757db2994fc6062736eef73ba8365a9b", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2021-03-04T12:51:49.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-25T13:48:23.000Z", "max_issues_repo_path": "tests/test_grad.py", "max_issues_repo_name": "hengwei-chan/aescore", "max_issues_repo_head_hexsha": "0eba684e757db2994fc6062736eef73ba8365a9b", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2020-11-19T16:19:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T14:40:43.000Z", "max_forks_repo_path": "tests/test_grad.py", "max_forks_repo_name": "hengwei-chan/aescore", "max_forks_repo_head_hexsha": "0eba684e757db2994fc6062736eef73ba8365a9b", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 3, "max_forks_repo_forks_event_min_datetime": "2021-04-13T08:14:26.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-17T22:21:53.000Z", "avg_line_length": 27.064, "max_line_length": 79, "alphanum_fraction": 0.6018326929, "include": true, "reason": "import numpy", "num_tokens": 920}
''' This script define the agent we would use to train ''' import numpy as np import gym_super_mario_bros from gym_super_mario_bros.actions import RIGHT_ONLY, SIMPLE_MOVEMENT, COMPLEX_MOVEMENT from nes_py.wrappers.joypad_space import JoypadSpace from gym import spaces from gym.spaces.box import Box from gym import Wrapper from collections import deque # ordered collections import torch import torch.multiprocessing as mp # hyper parameters from src.params import * from src.utils import preprocess # from params import * # from utils import preprocess class Reward(Wrapper): ''' design the reward function reference @ https://github.com/Kautenja/gym-super-mario-bros/blob/master/gym_super_mario_bros/smb_env.py ''' def __init__(self,env): """ Args: env -- the gym environment passed in """ super(Reward,self).__init__(env) self.observation_space = Box(low=0,high=255,shape=(1,84,84)) # define observation space self.curr_score = 0 self.curr_time = 400 # initial time left self.curr_x = 40 # initial distance self.curr_stat = 0 # mario is small, 0 def step(self,action): """ update step reward here """ state,reward,done,info = self.env.step(action) # obtain the state = preprocess(state) # distance reward reward = min(max((info['x_pos']-self.curr_x),0),2) self.curr_x = info['x_pos'] # time reward reward += (info['time']-self.curr_time) * 0.1 self.curr_time = info['time'] # score reward reward+=(info['score']-self.curr_score)* 0.025 self.curr_score = info['score'] # total reward if done: if info['flag_get']: reward+=50 else: reward-=50 return state, reward/10, done,info def reset(self): """ reset """ state = self.env.reset() self.curr_score = 0 return preprocess(state) class SkipEnv(Wrapper): def __init__(self,env,skip=4): """ Return only every 'skip' frame Default skip frames: 4 """ super(SkipEnv,self).__init__(env) self.observation_space = Box(low=0,high=255,shape=(4,84,84)) self.skip = skip self.skip_frame = deque(maxlen=skip) # buffer to store skip observation def step(self,action): skip_reward = 0.0 done = None for _ in range(self.skip): state,reward,done,info = self.env.step(action) self.skip_frame.append(state) skip_reward+=reward if done: break states = np.stack(self.skip_frame,axis=1) return states.astype(np.float32),reward,done,info def reset(self): """ clear past frame buffer and init to first state """ self.skip_frame.clear() state = self.env.reset() for _ in range(self.skip): self.skip_frame.append(state) state = np.stack(self.skip_frame,axis=1) return state.astype(np.float32) def gym_env(world,stage,version,actions): ''' Define the Super Mario Individual Stages to use. @ https://github.com/Kautenja/gym-super-mario-bros Inputs: world: a number in {1,2,3,4,5,6,7,8} indicating the world stage: a number in {1,2,3,4} indicating the stage version: a number in {0,1,2,3} specifying the ROM actions: static action sets for binary to discrete action space wrappers Outputs: env: Individual environment to use num_state: number of SuperMario Space num_action : number of action ''' env = gym_super_mario_bros.make('SuperMarioBros-{}-{}-v{}'.format(world,stage,version)) if actions == 'RIGHT_ONLY': act = RIGHT_ONLY elif actions == 'SIMPLE_MOVEMENT': act = SIMPLE_MOVEMENT elif actions == 'COMPLEX_MOVEMENT': act = COMPLEX_MOVEMENT env = JoypadSpace(env,act) env = Reward(env) env = SkipEnv(env) # skip frame, default = 4 num_state = env.observation_space.shape[0] num_action = env.action_space.n return env, num_state, num_action # # for debugging # if __name__ == "__main__": # env,num_state,num_action = gym_env(world,stage,version,actions)
{"hexsha": "25c76c8c332b9b3f00255b46cb34950d5b13c8fd", "size": 4361, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/agent.py", "max_stars_repo_name": "yuanyyf/Super-Mario-Bro-version-A3C", "max_stars_repo_head_hexsha": "42660bc3f327a2d8e014634615666c5d68eaee90", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/agent.py", "max_issues_repo_name": "yuanyyf/Super-Mario-Bro-version-A3C", "max_issues_repo_head_hexsha": "42660bc3f327a2d8e014634615666c5d68eaee90", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/agent.py", "max_forks_repo_name": "yuanyyf/Super-Mario-Bro-version-A3C", "max_forks_repo_head_hexsha": "42660bc3f327a2d8e014634615666c5d68eaee90", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-01T01:44:01.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-01T01:44:01.000Z", "avg_line_length": 30.4965034965, "max_line_length": 98, "alphanum_fraction": 0.6214171062, "include": true, "reason": "import numpy", "num_tokens": 1090}
[STATEMENT] lemma less_eq_integer_code [code]: "0 \<le> (0::integer) \<longleftrightarrow> True" "0 \<le> Pos l \<longleftrightarrow> True" "0 \<le> Neg l \<longleftrightarrow> False" "Pos k \<le> 0 \<longleftrightarrow> False" "Pos k \<le> Pos l \<longleftrightarrow> k \<le> l" "Pos k \<le> Neg l \<longleftrightarrow> False" "Neg k \<le> 0 \<longleftrightarrow> True" "Neg k \<le> Pos l \<longleftrightarrow> True" "Neg k \<le> Neg l \<longleftrightarrow> l \<le> k" [PROOF STATE] proof (prove) goal (1 subgoal): 1. (((0 \<le> 0) = True &&& (0 \<le> Pos l) = True) &&& (0 \<le> Neg l) = False &&& (Pos k \<le> 0) = False) &&& ((Pos k \<le> Pos l) = (k \<le> l) &&& (Pos k \<le> Neg l) = False) &&& (Neg k \<le> 0) = True &&& (Neg k \<le> Pos l) = True &&& (Neg k \<le> Neg l) = (l \<le> k) [PROOF STEP] by simp_all
{"llama_tokens": 348, "file": null, "length": 1}
#include <iostream> #include <boost/asio.hpp> #include <boost/bind.hpp> #include <boost/shared_ptr.hpp> #include <boost/enable_shared_from_this.hpp> using namespace std; using boost::asio::ip::tcp; class Connection : public boost::enable_shared_from_this<Connection> { private: tcp::socket sock; enum { BUF_SIZE = 1410 }; char buf[BUF_SIZE]; string msg_; char *msg_buff = (char *)malloc(1400 * sizeof(char)); char *msg_receive=(char *)malloc(1400 * sizeof(char)); void handle_Read(const boost::system::error_code &error, std::size_t bytes_transferred); void handle_Write(const boost::system::error_code &error); string make_daytime_string(); public: Connection(boost::asio::io_context &service); tcp::socket &getSocket() { return sock; } void start(); }; typedef boost::shared_ptr<Connection> ConnectionPtr; class Server { private: tcp::acceptor acceptor; void start(); void handle_Accept(ConnectionPtr con, const boost::system::error_code &error); public: Server(boost::asio::io_context &service); }; Connection::Connection(boost::asio::io_context &service) : sock(service) { } void Connection::start() { memset(msg_buff, 0, BUF_SIZE); boost::asio::async_read(sock, boost::asio::buffer(msg_buff, BUF_SIZE), boost::asio::transfer_at_least(1), boost::bind(&Connection::handle_Read, // #1 shared_from_this(), boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred)); } void Connection::handle_Read(const boost::system::error_code &error, std::size_t bytes_transferred) { if (!error) { cout << "recv from: " << sock.remote_endpoint().address() << ":" << sock.remote_endpoint().port() << endl; cout << "接受到的数据:" << endl; memcpy(msg_receive,msg_buff,BUF_SIZE); //cout << strlen(msg_buff) <<endl; printf("%d %s\n", *(short *)msg_receive, msg_receive + 2); //打印发送的数据包 /*TODO 数据包验证函数*/ memset(msg_receive,0,BUF_SIZE); msg_ = make_daytime_string(); cout << "将要发送的数据" << endl; cout << msg_ << endl; sock.async_write_some(boost::asio::buffer(msg_), boost::bind( &Connection::handle_Write, // #2 shared_from_this(), boost::asio::placeholders::error)); } } void Connection::handle_Write(const boost::system::error_code &error) { if (!error) { memset(msg_buff, 0, BUF_SIZE); // 注意:重置buff sock.async_read_some(boost::asio::buffer(msg_buff, BUF_SIZE), boost::bind( &Connection::handle_Read, // #3 shared_from_this(), boost::asio::placeholders::error, boost::asio::placeholders::bytes_transferred)); } } string Connection::make_daytime_string() //生成字符串的日期信息 {   time_t now = time(0); return ctime(&now); } Server::Server(boost::asio::io_context &service) : acceptor(service, tcp::endpoint(tcp::v4(), 9972)) { start(); } void Server::start() { ConnectionPtr conn(new Connection(acceptor.get_io_context())); acceptor.async_accept(conn->getSocket(), boost::bind( &Server::handle_Accept, this, conn, boost::asio::placeholders::error)); } void Server::handle_Accept(ConnectionPtr con, const boost::system::error_code &error) { if (!error) { con->start(); start(); } }
{"hexsha": "4bfc7a691d6f39026044e0ae2a82e88c841c147b", "size": 3891, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "src/TCP/header/server.hpp", "max_stars_repo_name": "chenxull/UDP_Project", "max_stars_repo_head_hexsha": "2490ee11dea3f0a11ff509f59338e510db25d0d9", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/TCP/header/server.hpp", "max_issues_repo_name": "chenxull/UDP_Project", "max_issues_repo_head_hexsha": "2490ee11dea3f0a11ff509f59338e510db25d0d9", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/TCP/header/server.hpp", "max_forks_repo_name": "chenxull/UDP_Project", "max_forks_repo_head_hexsha": "2490ee11dea3f0a11ff509f59338e510db25d0d9", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 31.6341463415, "max_line_length": 176, "alphanum_fraction": 0.5605242868, "num_tokens": 857}
/* * Morph target animation file * * This file is part of the "SoftPixel Engine" (Copyright (c) 2008 by Lukas Hermanns) * See "SoftPixelEngine.hpp" for license information. */ #include "SceneGraph/Animation/spMorphTargetAnimation.hpp" #include <boost/foreach.hpp> namespace sp { namespace scene { MorphTargetAnimation::MorphTargetAnimation() : MeshAnimation (ANIMATION_MORPHTARGET ), MaxKeyframe_ (0 ), isCulling_ (false ) { } MorphTargetAnimation::~MorphTargetAnimation() { } void MorphTargetAnimation::addKeyframeSequence( video::MeshBuffer* Surface, u32 Index, const std::vector<SVertexKeyframe> &Keyframes) { Vertices_.push_back(SMorphTargetVertex(Surface, Index, Keyframes)); if (MaxKeyframe_ < Keyframes.size()) MaxKeyframe_ = Keyframes.size(); } void MorphTargetAnimation::removeKeyframeSequence(video::MeshBuffer* Surface, u32 Index) { MaxKeyframe_ = 0; for (std::list<SMorphTargetVertex>::iterator it = Vertices_.begin(); it != Vertices_.end();) { if (it->Surface == Surface && it->Index == Index) it = Vertices_.erase(it); else { if (MaxKeyframe_ < it->Keyframes.size()) MaxKeyframe_ = it->Keyframes.size(); ++it; } } } void MorphTargetAnimation::clearKeyframes() { Vertices_.clear(); MaxKeyframe_ = 0; } void MorphTargetAnimation::setupManualAnimation(SceneNode* Node) { isCulling_ = true; } void MorphTargetAnimation::updateAnimation(scene::SceneNode* Node) { /* Get valid mesh object */ if (!Node || Node->getType() != scene::NODE_MESH || !playing()) return; Mesh* Object = static_cast<Mesh*>(Node); /* Update playback process */ isCulling_ = checkFrustumCulling(Object); updatePlayback(getSpeed()); /* Update the vertex transformation if the object is inside a view frustum of any camera */ if (isCulling_) Object->updateVertexBuffer(); } u32 MorphTargetAnimation::getKeyframeCount() const { return MaxKeyframe_; } void MorphTargetAnimation::interpolate(u32 IndexFrom, u32 IndexTo, f32 Interpolation) { if (!isCulling_) return; /* Temporary interpolation vector */ dim::vector3df Vec; foreach (SMorphTargetVertex &Vert, Vertices_) { if (IndexFrom < Vert.Keyframes.size() && IndexTo < Vert.Keyframes.size()) { /* Get frame references */ SVertexKeyframe* From = &Vert.Keyframes[IndexFrom]; SVertexKeyframe* To = &Vert.Keyframes[IndexTo]; /* Update transformation for vertex coordinate */ math::lerp(Vec, From->Position, To->Position, Interpolation); Vert.Surface->setVertexCoord(Vert.Index, Vec); /* Update transformation for vertex normal */ math::lerp(Vec, From->Normal, To->Normal, Interpolation); Vert.Surface->setVertexNormal(Vert.Index, Vec); } } } void MorphTargetAnimation::copy(const Animation* Other) { //!TODO! } } // /namespace scene } // /namespace sp // ================================================================================
{"hexsha": "0695a8b6dda107d227135195d1387fc824bcf9fb", "size": 3255, "ext": "cpp", "lang": "C++", "max_stars_repo_path": "sources/SceneGraph/Animation/spMorphTargetAnimation.cpp", "max_stars_repo_name": "rontrek/softpixel", "max_stars_repo_head_hexsha": "73a13a67e044c93f5c3da9066eedbaf3805d6807", "max_stars_repo_licenses": ["Zlib"], "max_stars_count": 14.0, "max_stars_repo_stars_event_min_datetime": "2015-08-16T21:05:20.000Z", "max_stars_repo_stars_event_max_datetime": "2019-08-21T17:22:01.000Z", "max_issues_repo_path": "sources/SceneGraph/Animation/spMorphTargetAnimation.cpp", "max_issues_repo_name": "rontrek/softpixel", "max_issues_repo_head_hexsha": "73a13a67e044c93f5c3da9066eedbaf3805d6807", "max_issues_repo_licenses": ["Zlib"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "sources/SceneGraph/Animation/spMorphTargetAnimation.cpp", "max_forks_repo_name": "rontrek/softpixel", "max_forks_repo_head_hexsha": "73a13a67e044c93f5c3da9066eedbaf3805d6807", "max_forks_repo_licenses": ["Zlib"], "max_forks_count": 3.0, "max_forks_repo_forks_event_min_datetime": "2020-02-15T09:17:41.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-21T14:10:40.000Z", "avg_line_length": 25.4296875, "max_line_length": 96, "alphanum_fraction": 0.619969278, "num_tokens": 764}
# The simple struct Newton{T1, Tlin, Tage, TFact} <: QuasiNewton{T1} approx::T1 linsolve::Tlin reset_age::Tage factorizer::TFact end hasprecon(::Newton) = NoPrecon() # struct DefaultSequence end DefaultNewtonLinsolve(B::Number, g) = B\g function DefaultNewtonLinsolve(B, g) B\g end function DefaultNewtonLinsolve(d, B, g) d .= (B\g) end Newton(;approx=Direct(), linsolve=DefaultNewtonLinsolve, reset_age=nothing) = Newton(approx, linsolve, reset_age, nothing) summary(::Newton{<:Direct, <:typeof(DefaultNewtonLinsolve)}) = "Newton's method with default linsolve" summary(::Newton{<:Direct, Any}) = "Newton's method with user supplied linsolve"
{"hexsha": "b4fe3f25dc93f1089429850a291b60738bf026c5", "size": 659, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/quasinewton/approximations/newton.jl", "max_stars_repo_name": "mohamed82008/NLSolvers.jl", "max_stars_repo_head_hexsha": "1c0252243975bfd3e45f210caf60947b46785611", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 13, "max_stars_repo_stars_event_min_datetime": "2020-10-04T00:16:07.000Z", "max_stars_repo_stars_event_max_datetime": "2021-10-15T17:36:57.000Z", "max_issues_repo_path": "src/quasinewton/approximations/newton.jl", "max_issues_repo_name": "mohamed82008/NLSolvers.jl", "max_issues_repo_head_hexsha": "1c0252243975bfd3e45f210caf60947b46785611", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 14, "max_issues_repo_issues_event_min_datetime": "2020-10-17T21:02:42.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-14T00:35:26.000Z", "max_forks_repo_path": "src/quasinewton/approximations/newton.jl", "max_forks_repo_name": "mohamed82008/NLSolvers.jl", "max_forks_repo_head_hexsha": "1c0252243975bfd3e45f210caf60947b46785611", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 6, "max_forks_repo_forks_event_min_datetime": "2021-01-28T02:01:43.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-28T21:41:15.000Z", "avg_line_length": 34.6842105263, "max_line_length": 122, "alphanum_fraction": 0.7405159332, "num_tokens": 210}
#redirect Reserve Officers Training Corps
{"hexsha": "070b1915c8c7ce563607f6122baeeaa74ba34c2c", "size": 42, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "lab/davisWiki/ROTC.f", "max_stars_repo_name": "voflo/Search", "max_stars_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lab/davisWiki/ROTC.f", "max_issues_repo_name": "voflo/Search", "max_issues_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lab/davisWiki/ROTC.f", "max_forks_repo_name": "voflo/Search", "max_forks_repo_head_hexsha": "55088b2fe6a9d6c90590f090542e0c0e3c188c7d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 21.0, "max_line_length": 41, "alphanum_fraction": 0.8571428571, "num_tokens": 7}
function cnn_resnet_finetune(varargin) if ~isempty(gcp('nocreate')), delete(gcp) end addpath('resnet'); opts = cnn_setup_environment(); opts.train.gpus = [1] ; % opts.train.gpus = [ 1 : 3 ] opts.dataSet = 'Remat'; % opts.dataSet = 'hmdb51'; opts.aug = 'stretch'; opts.border = 0; opts.pad = 0; opts.train.memoryMapFile = fullfile(tempdir, 'ramdisk', ['matconvnet' num2str(feature('getpid')) '.bin']) ; addpath('network_surgery'); opts.dataDir = fullfile(opts.dataPath, opts.dataSet) ; opts.splitDir = [opts.dataSet '_splits']; opts.nSplit = 1 ; opts.dropOutRatio = 0.5 ; opts.train.cheapResize = 0 ; opts.inputdim = [ 224, 224, 3] ; opts.train.batchSize = 196 ; opts.train.numSubBatches = ceil(8 / max(numel(opts.train.gpus),1)); opts.train.epochFactor = 10 ; opts.train.augmentation = 'borders25'; opts.train.backpropDepth = cell(1, 2); opts.train.backpropDepth(:) = {'pool5'}; opts.train.learningRate = [1e-2*ones(1, 2) 1e-2*ones(1, 3) 1e-3*ones(1, 3) 1e-4*ones(1, 3)] ; if strcmp(opts.dataSet, 'hmdb51') opts.train.learningRate = [1e-2*ones(1, 2) 1e-2*ones(1, 1) 1e-3*ones(1, 1) 1e-4*ones(1, 1)] ; end model = ['img-res50-' opts.train.augmentation '-bs=' num2str(opts.train.batchSize) ... '-split' num2str(opts.nSplit) '-dr' num2str(opts.dropOutRatio)]; if strfind(model, 'vgg16'); baseModel = 'imagenet-vgg-verydeep-16.mat' ; opts.train.learningRate = [1e-3*ones(1, 3) 5e-4*ones(1, 5) 5e-5*ones(1,2) 5e-6*ones(1,2)] ; opts.train.backpropDepth = cell(1, 3); opts.train.backpropDepth(:) = {'layer37'}; opts.train.batchSize = 128 ; opts.train.numSubBatches = ceil(16 / max(numel(opts.train.gpus),1)); elseif strfind(model, 'vgg-m'); baseModel = 'imagenet-vgg-m-2048.mat' ; elseif strfind(model, 'res152'); baseModel = 'imagenet-resnet-152-dag.mat' ; elseif strfind(model, 'res101'); baseModel = 'imagenet-resnet-101-dag.mat' ; elseif strfind(model, 'res50'); baseModel = 'imagenet-resnet-50-dag.mat' ; opts.train.numSubBatches = ceil(32 / max(numel(opts.train.gpus),1)); else error('Unknown model %s', model) ; end opts.model = fullfile(opts.modelPath,baseModel) ; opts.expDir = fullfile(opts.dataDir, [opts.dataSet '-' model]) ; opts.imdbPath = fullfile(opts.dataDir, [opts.dataSet '_resnet_split' num2str(opts.nSplit) 'imdb.mat']); opts.train.plotDiagnostics = 0 ; opts.train.continue = 1 ; opts.train.prefetch = 1 ; opts.train.expDir = opts.expDir ; opts.train.numAugments = 1; opts.train.frameSample = 'random'; opts.train.nFramesPerVid = 1; opts.train.uniformAugments = false; [opts, varargin] = vl_argparse(opts, varargin) ; % ------------------------------------------------------------------------- % Database initialization % ------------------------------------------------------------------------- if exist(opts.imdbPath) imdb = load(opts.imdbPath) ; else imdb = cnn_Remat_resnet_setup_data('dataPath', opts.dataPath, 'flowDir',opts.flowDir, ... 'dataSet', opts.dataSet, 'nSplit', opts.nSplit) ; save(opts.imdbPath, '-struct', 'imdb', '-v6') ; end nClasses = length(imdb.classes.name); if ~exist(opts.model) fprintf('Downloading base model file: %s ...\n', baseModel); mkdir(fileparts(opts.model)) ; urlwrite(... ['http://www.vlfeat.org/matconvnet/models/' baseModel], ... opts.model) ; end net = load(opts.model); if isfield(net, 'net'), net=net.net;end if isstruct(net.layers) % replace 1000-way imagenet classifiers for p = 1 : numel(net.params) sz = size(net.params(p).value); if any(sz == 1000) sz(sz == 1000) = nClasses; fprintf('replace classifier layer of %s\n', net.params(p).name); if numel(sz) > 2 net.params(p).value = 0.01 * randn(sz, class(net.params(p).value)); else net.params(p).value = zeros(sz, class(net.params(p).value)); end end end net.meta.normalization.border = [256 256] - net.meta.normalization.imageSize(1:2); net = dagnn.DagNN.loadobj(net); if strfind(model, 'bnorm') net = insert_bnorm_layers(net) ; end else % net=vl_simplenn_tidy(net); if isfield(net, 'meta'), netNorm = net.meta.normalization; else netNorm = net.normalization; end if(netNorm.imageSize(3) == 3) && ~isempty(strfind(opts.model, 'imagenet')) netNorm.border = [240 240] - netNorm.imageSize(1:2); net = replace_last_layer(net, [1 2], [1 2], nClasses, opts.dropOutRatio); end if strfind(model, 'bnorm') net = insert_bnorm_layers(net) ; end net = dagnn.DagNN.fromSimpleNN(net) ; end net = dagnn.DagNN.setLrWd(net); net.renameVar(net.vars(1).name, 'input'); if ~isnan(opts.dropOutRatio) dr_layers = find(arrayfun(@(x) isa(x.block,'dagnn.DropOut'), net.layers)) ; if ~isempty(dr_layers) if opts.dropOutRatio > 0 for i=dr_layers, net.layers(i).block.rate = opts.dropOutRatio; end else net.removeLayer({net.layers(dr_layers).name}); end else if opts.dropOutRatio > 0 pool5_layer = find(arrayfun(@(x) isa(x.block,'dagnn.Pooling'), net.layers)) ; conv_layers = pool5_layer(end); for i=conv_layers block = dagnn.DropOut() ; block.rate = opts.dropOutRatio ; newName = ['drop_' net.layers(i).name]; net.addLayer(newName, ... block, ... net.layers(i).outputs, ... {newName}) ; for l = 1:numel(net.layers)-1 for f = net.layers(i).outputs sel = find(strcmp(f, net.layers(l).inputs )) ; if ~isempty(sel) [net.layers(l).inputs{sel}] = deal(newName) ; end end end end end end end net.layers(~cellfun('isempty', strfind({net.layers(:).name}, 'err'))) = [] ; opts.train.derOutputs = {} ; for l=numel(net.layers):-1:1 if isa(net.layers(l).block, 'dagnn.Loss') && isempty(strfind(net.layers(l).name, 'err')) opts.train.derOutputs = {opts.train.derOutputs{:}, net.layers(l).outputs{:}, 1} ; end if isa(net.layers(l).block, 'dagnn.SoftMax') net.removeLayer(net.layers(l).name) l = l - 1; end end if isempty(opts.train.derOutputs) net = dagnn.DagNN.insertLossLayers(net, 'numClasses', nClasses) ; fprintf('setting derivative for layer %s \n', net.layers(end).name); opts.train.derOutputs = {opts.train.derOutputs{:}, net.layers(end).outputs{:}, 1} ; end lossLayers = find(arrayfun(@(x) isa(x.block,'dagnn.Loss') && strcmp(x.block.loss,'softmaxlog'),net.layers)); net.addLayer('top1error', ... dagnn.Loss('loss', 'classerror'), ... net.layers(lossLayers(end)).inputs, ... 'top1error') ; net.addLayer('top5error', ... dagnn.Loss('loss', 'topkerror', 'opts', {'topK', 5}), ... net.layers(lossLayers(end)).inputs, ... 'top5error') ; net.print() ; net.rebuild() ; net.meta.normalization.rgbVariance = []; net.meta.normalization.averageImage = mean(mean(net.meta.normalization.averageImage, 1), 2); opts.train.train = find(ismember(imdb.images.set, [1])) ; opts.train.train = repmat(opts.train.train,1,opts.train.epochFactor); % opts.train.valmode = '250samples'; opts.train.valmode = '30samples' opts.train.denseEval = 1; net.conserveMemory = 1 ; fn = getBatchFn(opts, net.meta); %fn = getBatchWrapper_ucf101_rgbflow(net.meta.normalization, opts.numFetchThreads, opts.train) ; [info] = cnn_resnet_train_dag(net, imdb, fn, opts.train) ; end % ------------------------------------------------------------------------- function fn = getBatchFn(opts, meta) % ------------------------------------------------------------------------- bopts.numThreads = opts.numFetchThreads ; bopts.pad = opts.pad ; bopts.border = opts.border ; bopts.transformation = opts.aug ; bopts.imageSize = meta.normalization.imageSize ; bopts.averageImage = meta.normalization.averageImage ; bopts.rgbVariance = meta.normalization.rgbVariance ; % bopts.transformation = meta.augmentation.transformation ; fn = @(x,y) getSimpleNNBatch(bopts,x,y) ; end % ------------------------------------------------------------------------- function [im,labels] = getSimpleNNBatch(opts, imdb, batch) % ------------------------------------------------------------------------- images = strcat([imdb.imageDir filesep], imdb.images.name(batch)) ; isVal = ~isempty(batch) && imdb.images.set(batch(1)) ~= 1 ; if ~isVal % training im = cnn_resnet_get_batch(images, opts, ... 'prefetch', nargout == 0) ; else % validation: disable data augmentation % opts.border=0; im = cnn_resnet_get_batch(images, opts, ... 'prefetch', nargout == 0, ... 'transformation', 'none') ; end if nargout > 0 labels = imdb.images.class(batch) ; end end
{"author": "zhangqianqianQQ", "repo": "MachineVisionAlgorithm", "sha": "683338f6c3b1aab9fa2b80026915fe936aebf0ee", "save_path": "github-repos/MATLAB/zhangqianqianQQ-MachineVisionAlgorithm", "path": "github-repos/MATLAB/zhangqianqianQQ-MachineVisionAlgorithm/MachineVisionAlgorithm-683338f6c3b1aab9fa2b80026915fe936aebf0ee/\u8bc6\u522b\u7b97\u6cd5/DAIN-master/cnn_resnet_finetune.m"}
\documentclass[a4paper,Times New Roman 11pt]{article} \usepackage{graphicx} \usepackage{pdfpages} \usepackage{booktabs} \usepackage{csquotes} \usepackage{hyperref} \usepackage{ragged2e} \usepackage{multirow} \usepackage[style=authoryear-ibid,backend=biber]{biblatex} \usepackage{fancyhdr} \pagestyle{fancy} \fancyhf{} \fancyfoot[LE,RO]{John O'Grady Diploma in Higher Computing, ITB, 2017} \fancyfoot[RE,LO]{\thepage} \renewcommand{\headrulewidth}{2pt} \renewcommand{\footrulewidth}{1pt} \addbibresource{mybib.bib} \newcommand\addrow[2]{#1 &#2\\ } \newcommand\addheading[2]{#1 &#2\\ \hline} \newcommand\tabularhead{\begin{tabular}{lp{9cm}} \hline } \newcommand\addmulrow[2]{ \begin{minipage}[t][][t]{3cm}#1\end{minipage}% &\begin{minipage}[t][][t]{9cm} \begin{enumerate} #2 \end{enumerate} \end{minipage}\\ } \newenvironment{usecase}{\tabularhead} {\hline\end{tabular}} \begin{document} \begin{titlepage} \newcommand{\HRule}{\rule{\linewidth}{0.5mm}} \title {Carers Scheduling Prototype Software Application} \author{John O'Grady} \date{April 2017} \maketitle \thispagestyle{empty} \begin{center} \HRule \\[0.4cm] \textsc{\large Submitted in part fulfilment \\for the Higher Diploma in Computing \\School of Informatics and Engineering, \\Institute of Technology Blanchardstown, \\Dublin, Ireland } \end{center} \HRule \\[2cm] %---------------------------------------------------------------------------------------- % AUTHOR SECTION %---------------------------------------------------------------------------------------- \begin{minipage}{0.4\textwidth} \begin{flushleft} \large \emph{Author:}\\ \textsc{John O'Grady} \end{flushleft} \end{minipage} ~ \begin{minipage}{0.4\textwidth} \begin{flushright} \large \emph{Supervisor:} \\ \textsc{Dr. Matt Smith} \end{flushright} \end{minipage}\\[2cm] \begin{center} \includegraphics{itb_logo.jpg} \end{center} \end{titlepage} \pagenumbering{arabic} %---------------------------------------------------------------------------------------- % Brief overview of project, aims, methodology and approach taken %---------------------------------------------------------------------------------------- \section*{Abstract} This project is concerned with requirements gathering, planning and delivery of a prototype rostering and scheduling software application for the Irish Wheelchair Association (IWA), a large non profit service provider which needs a new organisation wide software process to manage the planning of care appointments for people with disabilities. This project begins by setting the organisational context for the planned new software initiative. Next, it progresses to examine the body of current academic research in software delivery to provide a brief synthesis of best practice in the planning, implementation and deployment of modern software applications. Based on this research, this author selects the Systems Development Life Cycle (SDLC) methodology as an appropriate toolset to determine the requirements of IWA for a new rostering and scheduling solution which the organisation plans to deploy to its 1,500 Personal Assistants. Having gathered the requirements, the project progresses to define the organisational context for the software deployment. Using the SDLC methodology the author, who is responsible for leading the Information Technology function at IWA, works collaboratively with key project stakeholders across the organisation to define a broad functional requirement specification and produces various key design artefacts. As the development work on the project progresses, a suite of SDLC mandated project management documents are iteratively refined and finalised. A high level test plan is developed and implemented to ensure code quality and alignment with user expectations and requirements. Various rework is undertaken to address issues and shortcomings identified through the testing process. Finally, in conclusion, the author offers some insights from his experience of the software planning and development process together with consideration of some areas for further investigation and ongoing improvement of the prototype which are outside the scope of the initial project build. \pagebreak \tableofcontents \pagebreak------------------------------------------------ \begin{samepage} \justify \section{Introduction} This project examines the requirements for a prototype Rostering and Scheduling solution for which the ultimate client, the Irish Wheelchair Association (IWA) has a real world business requirement. It is envisaged that the finished software deliverable at the conclusion of this project will not be a fully enterprise ready software solution but rather the project deliverable will be employed as a prototype for user acceptance testing by representative end users. This software development process will be used to examine and further refine IWA's existing assumptions in relation to the imminent deployment of a critical real life deployment whose successful implementation is considered to be fundamental to the competitive position of the IWA. The scope of this project is to establish the detailed software functionality requirements for the new application and to utilise this information to deliver a prototype software solution for evaluation purposes which will cover the primary use cases and functional requirements identified. This will enable end users to test, assess and give feedback on the prototype and it is envisaged that this process will be constructive in mitigating the risk of any significant functionality components being omitted from the final production system or the usability of the ultimate solution not meeting user requirements in full. Following this iterative process, it is planned to document a more comprehensive set of final functional requirements for the production deliverable will fundamentally underpin a competitive tender exercise and vendor engagement through which IWA will select, configure and implement a live rostering and scheduling solution for its 1,500 Personal Assistants across Ireland. \pagebreak \section{Organisational Context} \subsection {History of Irish Wheelchair Association} The Irish Wheelchair Association (IWA) is a vibrant independent organisation which was founded in 1960 by a group of people with disabilities. IWA is governed by a Board of Directors elected from its 20,000 membership base. It is the largest provider of Assisted Living Services in the Ireland, employing over 2,600 employees and delivering over 2 million hours of service annually. It is substantially funded by the Irish Government, primarily through the Health Service Executive, from whom it receives funding as a Section 39 agency under the Health Act 2004, although it also receives funding from various other statutory and non statutory sources and raises funds directly from the general public. A challenge which is directly pertinent to the planned software project is that IWA's funding revenue streams are primarily remitted in respect of service level agreements for the delivery of front line services and it receives no direct funding for Information Technology or indeed other Shared Services activities. During the recent economic downturn and its impact on the public finances, IWA, like many non service providers, has had seen the government apply substantial cumulative funding cuts between 2008 and 2014. Against this backdrop, IWA has managed to maintain and in some cases expand its level of service activity through pursuing internal efficiencies and implementing a series of pay reductions which have been agreed with its workforce. However, maintaining service delivery in the face of significant funding reductions has progressively diminished the financial reserves that the Association holds. These funding reductions have also significantly impacted on the state of IWA's Information Technology infrastructure which is now in need of attention following a sustained pattern of historic under investment. The organisation's strategic plan for the years 2017 to 2010 now views Information Technology initiatives as a core enabler of driving overall efficiency and value for money in its model of service delivery, on the strict understanding that approval of all capital investment decisions must be clearly linked to a defined and fully costed business case which will deliver an identifiable and measurable return on investment in net financial terms to the organisation. IWA has been at the forefront of developing person centred service provision in Ireland, based on international best practice, and IWA is now somewhat unique among large charities in Ireland in that it remains wholly owned by, and accountable to its 20,000 members who are made up of people with disabilities, active volunteers and other IWA supporters. In all areas of its activities, IWA advocates for independence and quality of life for all people with disabilities in Ireland. In this regard, the stated mission of IWA has recently been updated in its new Strategic Plan for 2017-2020 as follows: \begin{displayquote} Irish Wheelchair Association has a vision of an Ireland where people with disabilities enjoy equal rights, choices and opportunities in how they live their lives, and where our country is a model worldwide for a truly inclusive society. \end{displayquote} In addition to Assisted Living Service, which is IWA's largest service employing 1,700 of IWA's 2,600 strong workforce, IWA provides a range of other services including a network of 57 community based Resource and Outreach Centres, respite services, driving tuition and a variety of member led youth and local branch projects. IWA Sport is a subsidiary division of IWA which is a recognised National Governing Body of Sport by the Irish Sports Council and IWA operates a network of volunteer led Sports clubs throughout Ireland which are an vibrant component of Ireland's Paralympic movement, particularly with regard to the development of young disabled athletes. IWA also has developed specialist teams in the specialist areas of Accessibility, Housing and Transport where it is widely acknowledged as an expert in providing expertise and advocacy in ensuring public and private developments are configured to meet the needs of people with disabilities. \subsection {Overview of Assisted Living Service} IWA's Assisted Living Service (ALS) provides significant individual supports to people with disabilities which are tailored to the needs and wishes of each person to enable them to live independently. IWA, with funding support from the Health Service Executive, provides a Personal Assistant (PA) to assist with tasks that the person with a disability might find difficult or impossible to do in their daily lives. Traditionally, people with disabilities have been treated under the medical model of care, with negative impacts for their independence and the degree of control they exercised over their own lives. As \parencite{brisenden} has outlined \begin{displayquote} The medical model of disability is one rooted in an undue emphasis on clinical diagnosis, the very nature of which is destined to lead to a partial and inhibiting view of the disabled individual. \end{displayquote} \newpage Originating in the international Independent Living movement, Assisted Living is an alternative, person centered philosophy which postulates that people with disabilities are best placed to make determinations on their own needs. In keeping with this, IWA's Assisted Living Service (ALS) provides support to individuals in their homes and communities facilitating community participation, access to education, employment and improved quality of life. The ALS model of service delivery comes in two main strands \begin{itemize} \item \textbf {Self-directed or leader-managed package. }In a self-directed or leader-managed package, the person with the disability acts as the leader or service manager for IWA. This involves recruiting their own personal assistants, organising their weekly rosters, returning their timesheets, arranging holiday cover, etc. The leader can consult the service coordinator when necessary. \item \textbf {Supported package.} In the supported package, the service coordinator takes responsibility for some or all of the management, delivery and operation of the service. \end{itemize} \subsection {Current Scheduling Process} IWA does not currently have a unified scheduling, rostering and time attendance system in place across all of its ALS locations. A custom static (non calendarised) solution for roster and timetable planning functionality has been developed on its Microsoft Dynamics CRM environment and is currently in use across all IWA offices and a calendar based extension of this is currently in a pilot phase in a small number of IWA locations. It is likely that IWA will migrate management of schedules and rosters to the new Rostering and Scheduling platform however it should be noted that IWA does envisage continuing to use Dynamics CRM for other a variety of other key service management functions such as on-boarding service users, employee recruitment, risk assessments, evaluations and logging contact and activity information with employees and service users. While Microsoft Dynamics CRM has provided some basic functionality in relation to static rosters to date, in the absence of a fully calendarised roster solution across all IWA offices, local teams in IWA have developed a variety of long-standing and ad-hoc local solutions to managing planning rosters, all of which IWA wishes to discontinue in favour of the proposed new solution. The current IWA process for capturing time and attendance information for payroll and billing purposes involves each PAs completing paper timesheets which are progressively signed off by the service user or a family member throughout the month at each service visit. At the end of the payroll month, the timesheets are delivered to the local coordinator at the local IWA office and are then data entered into the Focal-Point system. These timesheets are initially entered as ‘pending approval’ by an ALS Administrator within the system and routed to the ALS Coordinator for the service for approval. The coordinator reviews the timesheet against the expected visits and service budget and approves or amends the timesheet. Once the Focal-point process is complete, an export of the approved timesheet data is taken from Focal Point and used to feed the Mega pay payroll system. The Mega pay application in turn feeds the Access Accounts system for invoicing/billing purposes. In the context of implementing a new rostering and time attendance system, IWA wishes to cease using Focal-point for the capture of Time and Attendance information and approval of same in favour of the new rostering and scheduling system directly managing the capturing and approval of time and attendance data which can then be exported directly to Access Accounts for billing purposes and Megapay for payroll processing. \subsection {Overview of Organisational Impacts} %---------------------------------------------------------------------------------------- % Background to work of IWA and context in which app will be used % Reference and link to live tender and explanation of tender approach % Description of service and roles who will use the live applciation % Definition and scope of prototype application %---------------------------------------------------------------------------------------- The casual reader of the preceding section will quickly appreciate the inherent inefficiencies of the current paper centric process in an organisation of the size of IWA. Indeed, IWA employs a service management cohort of 26 Service Coordinators, 8 Service Support Officers and 20 Administrators across 15 ALS offices around Ireland and it has been estimated that across these employee groups, approximately 25\% of their working time is spent managing rostering and scheduling functions in relation to the ALS service to ensure all service visits are covered and a further 25\% of their time is currently consumed in manual data entry and approval tasks in relation to timesheet and payroll information. This inefficient process has a direct impact on IWA's cost base and also has an indirect opportunity cost impact by limiting the available time of Service Coordinators to spend on other essential functions such as planning, evaluation, training and supervision activities. In this context, it should be noted that each IWA Service Coordinator is responsible for managing a large number (between 50-100 per Coordinator) of service packages for individual service users and each also supervise a similar number of Personal Assistants for whom the Service Coordinator acts as line manager. This also has an impact on the ratio of required back office personnel to service delivery hours and due to the somewhat manual rostering and scheduling process currently being operated, IWA's service coordination and administration costs per service delivery hour are somewhat higher than some of its competitors, placing it at a competitive disadvantage, particularly in comparison to some of the private sector commercial operators who have recently entered the Irish home care market and who in many cases currently have more sophisticated technological solutions in place to handle this key internal process. \subsection {Procurement and Tendering Approach} IWA has recently launched the first phase of the competitive tender process for its new rostering and scheduling solution through the Office of Government Procurement's E-Tender's website which is available at \href{https://irl.eu-supply.com/app/rfq/publicpurchase_frameset.asp?PID=110399&B=ETENDERS_SIMPLE&PS=1&PP=ctm/Supplier/publictenders}{this link} The first stage of the procurement process requires prospective vendors to complete a short pre-qualification questionnaire which provides an opportunity for vendors to demonstrate the capabilities of their software platform against the high level requirements summarised in the preceding section, as well as providing an overview of their organisational capability and customer references where they have already deployed their solution in a similar usage context. Following assessment of the pre-qualification questionnaires, IWA will shortlist a small number of interested vendors for the second phase of the tender process which will require vendors to submit a more comprehensive response against a detailed Request for Tenders (RFT) document provided to the vendors by IWA. Once a successful platform/vendor meeting all of IWA's mandatory requirements as set out in the RFT document has been appointed through the procurement process, IWA will work with that vendor to configure and test the system to align the chosen platform which IWA's requirements. It is envisaged that IWA will initially implement the solution for a small group of 50-100 Personal Assistants who will take part in a pilot exercise to confirm, test and sign off on the chosen solution as fit for purpose prior to its rollout to the wider group of 1,500 Personal Assistants and 25 Service Coordinators. \newpage \begin{samepage} \section {Literature Review} \subsection{Software Development Paradigms} In this section, we complete a rapid tour through the body of academic literature to review different software development paradigms and select an appropriate methodology for the prototype project. We begin by looking at the Waterfall Approach to software development before examining some alternatives. Many computer scientists regard the Waterfall methodology as the classical approach to software development while the Agile methodology and related approaches have been gaining increasing traction in recent times and can be reasonably positioned as the more modernist or fashionable approach, especially for projects which are likely to need the capacity for rapid adaptation. \subsection {The Waterfall Methodology} In the 1960s and early 1970s, the largest customer worldwide for software development was the U.S. Government's Department of Defense which managed a sophisticated and critical portfolio of software systems and projects which was probably without rival in terms of its scale at the time. \parencite{royce} is often credited as being the first to use the Waterfall development as a term to describe a planned top down approach to software development which moves incrementally through a series of rigid steps which cannot be revisited once completed. However, others have disputed that Royce was the first to use the term and also suggested that Royce did not envisage a rigid approach to development , noting that Royce was in favour of an iterative approach which involved the capacity for the rework of outputs based on the experience of using outputted artefacts in subsequent development steps, though only within successive steps. Royce also felt that there was considerable risk in the fact that the Waterfall model only envisaged testing of the outputs of the project as a penultimate activity when the project was close to completion. Royce's model of Software Development suggests the following incremental steps in developing new software systems \begin{itemize} \item Systems requirements \item Software requirements \item Analysis \item Program design \item Coding \item Testing \item Operation \end{itemize} \newpage \subsection {Systems Development Life Cycle Methodology} More recent commentators have adapted the foregoing model to add some additional steps and the following framework is frequently used in establishing a structure for project teams engaged in software development, though there are various alternate models also in use which use similar overall concepts: \begin{itemize} \item \textbf{Initiation.} In this stage, the project's life begins when a project sponsor recognises the need or opportunity for a software project to take place. \item \textbf {System Concept Development.} At this juncture, the team defines the scope for the project and engages in risk analysis activities, defining boundaries for the project and undertaking feasibility studies. \item \textbf{Planning.} Here, the team develops a project management plan and sets about acquiring the resources needed for the project. \item \textbf{Requirements Analysis.} Now the project is underway, and the project team work to establish what the user requirements are, using this knowledge to define a detailed functional requirements document. \item \textbf{Design.} This stage sees the requirements transformed into a detailed Systems Design Document which begins to examine and plan the technical approach which will employed to deliver the required functionality. \item \textbf{Development.} This begins the actual technical delivery, where the team convert the design documents into realisable code and system resources including database creation, preparing test cases and beginning the installation, coding and assembly/ compilation of a live software deliverable \item \textbf{Integration and Test.} The focus in this stage is to ensure that the system under construction aligns with and meets user expectations in relation to the system's functional requirements. These assumptions can be tested through user acceptance testing and users undertaking quality assurance activities. It is best practice here that the test activities should be formally documented. \item \textbf{Implementation.} Now that we have a system in place, it needs to be deployed to a production environment and where necessary integrated with other relevant software and hardware resources. \item \textbf{Operation \& Maintenance.}. This stage defines a series of operational tasks and procedures to operate, maintain and fine-tune the information system in a production scenario. \item \textbf{Disposition.} This stage at the end of the project focuses on the end of system activities, with a particular emphasis on the data aspects of projects. \end{itemize} The Waterfall model gained wide acceptance as a framework for software development, though controversy raged continually about its effectiveness and appropriateness to large scale projects. In order to increase the speed of delivery and in response to increasing complexity and project risk, the Department of Defense looked to computer scientists in academia in order to assist it in developing more reliable and predictable software development methodologies. In an influential publication, \parencite{defensesscienceboard}, reporting to the U.S. Military as the Defense Science Board Task Force on Military Software, highlighted increasing risks associated with more traditional software development approaches including the Waterfall approach. This report noted that the cost of military software contracts had been steadily increasing, while the time to delivery was also becoming longer. It highlighted a variety of mitigation strategies which it recommended to the Department of Defense including standardising programming languages (using the DoD mandated Ada language) as well as a renewed focus on requirements gathering and an more iterative approach to managing software projects. A more recent study \parencite{peterson} found that the major issues with the Waterfall model arise in relation to the requirements gathering activities, and also present in relation to the verification of same. Based on this, it concluded that even though it was still commonly in use, the Waterfall model was fundamentally unsuitable for large scale or complex development projects \subsection {Spiral Model} Noting that the Waterfall approach had become the de facto standard for military (and general) software development, and echoing the shortcomings noted by the Defence Science Board report, \parencite{boehm} proposed an alternate framework which he termed the Spiral model. Because the Waterfall approach placed a strong emphasis on the finalisation and sign off of detailed specification criteria prior to moving on to the development and implement stages, Boehm noted that this highly structured approach frequently served to act as a barrier to early prototyping. Boehm contrasted this with the Evolutionary Development approach ( where a user might declare `I can’t tell you what I want, but I’ll know it when I see it.')', which had become popular as an antidote to the rigid strictures of the Waterfall approach. He also observed that using Evolutionary Development also presented some significant challenges in that it could frequently be difficult to distinguish this approach from the older and less structured `Code and Fix' or `Spaghetti code' implementations which have been repeatedly found to have performed very poorly as they scaled to larger implementations. Figure 1. below shows an overview of the Spiral model of development \begin{figure}[h!] \includegraphics[scale=0.9]{spiralmodel.png} \caption{The Spiral model of development} \label{fig:spiral_model} \end{figure} To address the shortcomings of both methodologies, Boehm proposed the Spiral model which he noted as evolving based on the insights he had gained from applying the Waterfall model in large government projects. It suggests a flexible approach based on the risk profile of each project which adopts suitable tools from one or more process based methodologies such as incremental development, the waterfall model, or evolutionary prototyping. A challenge with the Spiral model is that its core concepts have often been oversimplified leading to misconceptions and what Boehm terms `hazardous spiral look-alikes`. This term refers to implementations of the Spiral approach which appear to have the all the required core components but in fact diverge and inviolate the one or more of the key principles of the model. To counteract this approach, Boehm stresses that projects using the spiral model should always display six invariants (items which are always required in every project) and he usefully provides examples where each invariant could be subject to an incorrect interpretation which would invalidate its meaning. \begin{itemize} \item \textbf{Define artefacts concurrently.} This invariant notes that concurrency is a better approach here as defining project artefacts sequentially increases the risk that the project does not meet stakeholder expectations. \item \textbf{Perform four basic activities in every cycle.} At its core, the Spiral model emphasises four basic activities in each cycle of development as follows: \begin{itemize} \item 1. Determine the objectives \item 2. Identify and resolve risks \item 3. Development and test \item 4. Plan the next iteration \end{itemize} \item \textbf {Risk determines level of effort.} It is the responsibility of the project team to determine how much effort should be spent on each project area, based on the perceived level of risk for the area. This decision should always be made based on a strategy of reducing the overall level of risk to the project. \newpage \item \textbf {Risk determines degree of details} Based on a risk based approach, the team must make a determination as to how much detail needs to be gathered for each project artefact. For example, they should ensure they gather sufficient detail in relation to requirements areas where a detailed specification helps to reduce unpredictability and contribute to lowering the overall level of risk, for example in precisely defining the integration approach to be taken between hardware and software components. Conversely, project teams should feel entirely comfortable producing much less detail in relation to front requirements specifications in other areas which have a lower overall level of project risk, for example in relation to the design of the graphical user interface. \item \textbf {Use anchor point milestones.} In the original specification of the spiral model, it did not initially include any project milestone assessments but from practical experience- again with a risk focused approach- these were introduced to anchor project delivery to communicate progress updates to stakeholders. \item \textbf {Focus on the system and its life cycle} This invariant recommends that project managers and stakeholders take a long term view of the project life cycle and avoid an excessive focus in the initial stages of the project on the development of software code. \end{itemize} \subsection {Agile} The Agile manifesto emerged as a strong counterpoint to the various process driven approaches such as the Waterfall methodology and echoed many of the concerns and ideas of earlier iterative and incremental software development approaches. The manifesto was written by 17 experienced software developers who convened in 2001 and drawing on their own experience of efficient software development principles, sought to define the guiding principles for the Agile Alliance. Central to their thinking was the notion that while they valued the items on the right hand column of the table above, they placed an even higher value on the items in the left hand column. For example, while they did see a value in developing documentation, they did not take this to the level of generating copious manuals for form's sake which would gather dust, not be kept up to date, and ultimately deliver little in terms of value to end users. \begin{table}[] \centering \caption{The Agile Manifesto} \label{my-label} \begin{tabular}{|l|l|l|ll} \cline{1-3} Individuals and interactions & over & Processes and tools \\ \cline{1-3} Working software & over & Comprehensive documentation \\ \cline{1-3} Customer collaboration & over & Contract negotiation \\ \cline{1-3} Responding to change & over & Following a plan \\ \cline{1-3} \end{tabular} \end{table} The twelve principles of the Manifesto for Agile Software Development, which are reproduced verbatim below, have found wide acceptance among software developers, particularly in recent years as the time to market between releases in new areas of software development including mobile apps and online web applications needs to be much shorter than in traditional application development. The \textbf{Manifesto for Agile Software Development} is based on twelve principles: \begin{itemize} \item Customer satisfaction by early and continuous delivery of valuable software \item Welcome changing requirements, even in late development \item Working software is delivered frequently (weeks rather than months) \item Close, daily cooperation between business people and developers \item Projects are built around motivated individuals, who should be trusted \item Face-to-face conversation is the best form of communication (co-location) \item Working software is the principal measure of progress \item Sustainable development, able to maintain a constant pace \item Continuous attention to technical excellence and good design \item Simplicity—the art of maximizing the amount of work not done—is essential \item Best architectures, requirements, and designs emerge from self-organizing teams \item Regularly, the team reflects on how to become more effective, and adjusts accordingly \end{itemize} Where traditional software houses could plan for new versions of their software to be released perhaps on an annual basis, which allowed sufficient time for testing, feedback and rework of code from testing to eliminate bugs, modern application development needs to focus on regularly adding new functionality. The `bite-sized' approach of Agile is a good fit for these rapid development requirements, allowing software development houses to iteratively plan and develop and test new releases of working software while allowing for frequent application updates to be responsive to customer needs, competitor actions or market conditions, often completing a development cycle within a short period of a few weeks. These Agile principles have inspired many further iterative innovations in the field of software development giving rise to other approaches such as Extreme Programming, Lean Software Development (which has many parallels with Toyota's Lean Manufacturing System for vehicle manufacturer) and also resources and tools such as Kanban Boards, Scrum and DevOps tools. Some of the core project management methodologies such as PMBOK (The Project Management Book of Knowledge) and PMI (Project Management Institute) have also adapted to the emergence of Agile principles by defining specific Agile oriented project management methodologies. There are also mainstream training and certification options available for those who wish to pursue careers as Agile practitioners or Scrum masters. %---------------------------------------------------------------------------------------- % Overview of fields reviewed and sources consulted % Review of SDLC, Agile vs Waterfall % UML Methodology- critical evaluation of UML % User Testing Approaches % MVC and alternative frameworks- why I chose MVC % The Symfony Framework [and alternatives] %---------------------------------------- \end{samepage} \begin{samepage} \newpage \section {Initiation and Concept Development} \subsection {Scope of Final Rostering and Scheduling Solution} In order to address and remedy the deficiencies noted in the earlier Organisational context section in relation to the inefficient processes currently being used to manage the organisation's rostering and scheduling processes, the IWA's Senior Management Team have mandated the IWA ICT team to work with internal stakeholders to define functional requirements and implement a competitive tender process to select and deploy a new software solution to enable IWA to manage rostering and the capture of time and attendance information in a more efficient manner. The proposed solution will deliver the following functionality components to IWA: \begin{itemize} \item \textbf{A Rostering and Scheduling solution} for use by ALS Administrators, Coordinators and Support Officers. The proposed solution should also interface with the IWA Megapay payroll system. Finally, it should manage the customer billing process in relation to the care services provided to statutory and individual customers. \item \textbf {A Mobile Application} to be used by PAs employed by IWA which should be capable of running on the iOS, Android and Windows Phone and thereby be suitable to run on the personal mobile devices of employees to avoid IWA having to supply company owned and funded devices on the corporate account. \item \textbf {Attendance Verification Mechanism via Mobile App}. A key requirement for the mobile application is that it provides a reliable and independent real-time verification of an employee’s attendance at a service visit location, together with timestamped confirmation of the length of time that was spent at the location. This is required to satisfy IWA service level agreement obligations with its funders and to minimise the possibility of fraud. \pagebreak \item\textbf {Quotation/Proposals Generation.} The proposed enterprise solution should be capable of generating detailed and personalised Service proposals/ quotations where the coordinator can plan the service schedule for the service user, referencing each visit to the appropriate price card item to generate a completed quotation for the customer which shows the provisional service schedule and the projected weekly invoiced cost. \item \textbf {Employee, Service User and Customer Portals.} IWA would also like to implement Service user and Employee portals which includes an authentication layer so that a service user or their family member can view upcoming service visits. Similarly, employees can view their upcoming roster visits to various service users including information such contact information, tasks to be completed etc. and a Customer portal where a funder can view invoices, schedules for upcoming service and validate completed visits by viewing validation timestamps of attendance. \end{itemize} \subsection {Scope of Prototype Solution} As the prototype application is being fast tracked for user acceptance testing, this application has more limited scope and is primarily focused on the backend Rostering and Scheduling solution and in particularly on examining in detail the user experience and optimal process and validation checks required by Coordinators and Administrators in handling various common service delivery scenarios. The prototype will also attempt to model in a simplistic fashion the experience of portal layer users who will interact using various security limited roles such as employees, service users and customers although it will not attempt to fully implement the data privacy restrictions to be granted to each type of role- for example employees being unable to access the planned roster records for other employees to the fully robust extent that would be required in an enterprise level application. The aspects of creation of a mobile app, integration of the mobile app with the backend rostering and scheduling and the verification of attendance via the mobile app are all considered as out of scope for the prototype application. \end{samepage} \pagebreak \subsection {Feasibility Review} \textbf{Risk Register for Project and Mitigation Actions} \noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}} \textbf{Risk: It may be challenging to find a solution which meets all of IWA’s requirements.} \newline Mitigation: In this regard, it will be necessary to prioritise those essential aspects of the new system’s functionality over ‘nice to have’ aspects \noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}} \textbf{Risk: Required project resources are not available or their input delays key project phases} \newline Mitigation: Agreement of IWA management to release key resources when required and provide backfill for the other work normally assigned to those resources \noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}} \textbf{Risk: The project takes longer to deliver or software/customisation costs are higher than anticipated.} \newline Mitigation: Detailed project planning and agreement of deliverable, costs and timelines with vendor. Prioritisation of scope and deliverables to ensure key functionality is delivered on time. \noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}} \textbf{Risk: Solution does not fully meet IWA's requirements or becomes outdated as IWA's requirements change.} \newline Mitigation: Detailed agreement on deliverable/scope /cost for initial build and sign off of same with vendor. Comprehensive user acceptance testing and user sign off on agreed and tested functionality at key milestones. Design solution so that it is flexible to adapt to the key areas where IWA requirements may change e.g. payroll payment rate structure, billing rate structure, customer invoicing/reporting formats, additional functionality/ logic in the mobile app. \noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}} \textbf{Risk: Less than 100\% adoption by ALS teams, current manual processes persist at local level.} \newline Mitigation: Dedicated project manager for rollout phase working with local teams. Tight project planning and monitoring/direction from local ALS management teams to ensure project phases and adoption take place to plan. Structured training plan for staff using system and identification of staff who are struggling to adapt to the new process and provision of support by local ALS team leads to ensure they are brought up to speed. \noindent\makebox[\linewidth]{\rule{\textwidth}{0.4pt}} \textbf{Risk: The prototype has limited value as it has marked differences in functionality or user interface from available production platforms which might be considered by IWA} \newline Mitigation: Focus on confirmation of required functionality over specific user interface features. Research commercial offerings and build some aspects of their user interface into prototype so that users get a realistic sense of what the finished product might look like \newpage \section {Requirements Analysis} \subsection {Functional Requirements } In this functional requirements section, we set out in simple terms what the prototype application will do. \begin{itemize} \item \textbf {Managing Service Appointments.} For this application, the core objective of the system is to manage a series of appointments for care visits and to make sure that each one is covered, taking account of the relationships between service users and personal assistants. \item \textbf {Documenting service user, customer, employee and office location and contact information.} The application should record (and show in the index and show views for each entity) the address and contact information for each employee, service user or office. \item \textbf {Displaying service user roster as calendar plan.} The application should display the roster plan for each service user in a calendar plan (similar to an Outlook diary) format and allow the user to interact with and update each visit from that calendar view. \item \textbf {Assigning service user, employee and customers to office locations.} The allow a user to indicate which IWA office is responsible for managing the relationship with a given service user, employee or customer. \item \textbf {Geocoding addresses and Displaying Google Maps.} For each service user, employee and office. the system should integrate with Google Maps to obtain valid geocoordinates for the address and display this as a map on the view screen for each record. \item \textbf{Assigning a Personal Assistant to a Service User.} A Personal Assistant (employee) will frequently be assigned as the `preference' (or regular) resource to be sent to a given service user, based on a positive experience of working together. \item \textbf{Assigning a Personal Assistant as `Do Not Send' to a Service User.} Conversely, a negative experience from a previous visit may give rise to the need to designate that a given resource should not be sent to fulfil care visits for a particular service user. In this case, we would indicate this relationship by creating a `Do Not Send' association between the service user and the employee. \item \textbf{Employee Unavailable Times.} The system needs to document times when a Personal Assistant (employee) is not available due to other commitments e.g. second job, family, college attendance and not show this employee as available when finding an employee to fill an unfilled slot \newpage \item \textbf{Employee Absence.} The system needs to document times when a Personal Assistant (employee) is on leave and unavailable to work and not show this employee as available when finding an employee to fill an unfilled slot \item \textbf{Employee Working.} The system needs to document times when a Personal Assistant (employee) is assigned to a roster and therefore unavailable to work on other rosters. It should not show this employee as available when finding an employee to fill an unfilled \item \textbf{Roster required employees .} The system should store for each visit how many employees are required. \item \textbf{Roster status.} When the number of assigned employees is less than the required quotient, the system should show a warning to this effect. When enough employees are assigned, the message should be updated to reflect this. \item \textbf{Find an employee for an unassigned roster.} When the number of assigned employees is less than the required quotient, the system should allow a user to find a suitable employee to fulfil the visit, bearing in mind `do not send' relationships and filtering out employees who are working elsewhere, designated as unavailable at that time or recorded as being on an approved absence (holidays, sick leave etc.) at the time of the roster. slot \end{itemize} \subsection {Availability Requirements } The system should be available over a web browser once the user has internet access. It should not rely on a client install of software on the user's machine nor should it require the user to use a VPN connection. \subsection {Security Requirements } \begin{itemize} \item \textbf{Encryption} The system should use an encrypted (https, SSL) connection to the server to ensure privacy of the data in transmission. \item \textbf{Role Based Security} The system tag each uses to the IWA office that they are assigned to, and restrict that user to only be able to see service users, employees and customers associated with the office that the user is associated with (Note: out of scope for prototype deliverable) \item \textbf{Service User Portal User- Security} The system should allow user who are attached to the Service User security role to login in and to only be able to see rosters for that service user (Note: included in scope for prototype deliverable) \item \textbf{Employee Portal User- Security} The system should allow user who are attached to the Employee security role to login in and to only be able to see rosters to which that employee is assigned (Note: out of scope for prototype deliverable) \newpage \item \textbf{Customer Portal User- Security} The system should allow user who are attached to the Customer security role to login in and to only be able to see rosters for which that customer is designated as the customer of the Roster visit (Note: out of scope for prototype deliverable) \end{itemize} \subsection {Use case specification} The high-level use case specification aims to identify all the actors who will use a system and what actions they can take in that system. \subsection {Detailed Use Case Analysis} \begin{figure}[h!] \includegraphics[scale=0.65]{rosterusecase.jpg} \caption{Roster Use Case} \label{fig:roster use case} \end{figure} The following detailed use cases have been identified for the prototype application. As many of the use cases relate to standard CRUD (Create, Read, Update, Delete) functionality for each entity for which it is envisaged that the development approach will use the Symfony skeleton templating facility to initially scaffold the generic controller and related views and form type classes, a generic use case for each of these four standard functions has been created to avoid duplication and repetition. As noted, this generic use case- for example new() entity record- is therefore applicable to all of the individual entities noted, and any particular variations (for example passing parameters to identify a related entity context) are noted in the use case where appropriate \newpage \begin{usecase} \addheading{Title}{User login} \addheading{Description}{User supplies credentials to login} \addheading{Actor}{User} \addrow{Precondition}{User must not be logged in ie is currently authenticated as anonymous user} \addrow{Postcondition}{User is authenticated having supplied correct password and holds one or more security role} \addmulrow{Basic Flows}{\item User navigates to login area via base template button which is visible on all pages\ldots \item User enters username and password and clicks login\ldots \item Controller method checks username and password against user repository. If credentials are matched to repository logs user in and set the user as authenticate, allocating any roles held by that user \item If credentials are not matched to repository then display login failed message and redisplay the login screen\\ldots} \addmulrow{Alternate Exception Flows}{\item User who is not logged in attempts to access a secured resource and is automatically rerouted to the login screen\ldots} \end{usecase} \begin{usecase} \addheading{Title}{User registration} \addheading{Description}{User a username and password and creates a new user account} \addheading{Actor}{User} \addrow{Precondition}{User must not be logged in i.e. is currently authenticated as anonymous user} \addrow{Postcondition}{User is authenticated having supplied correct password and holds one or more security role. User credentials are updated in the database for future visits} \addmulrow{Basic Flows}{\item User navigates to register area via base template button which is shown on all pages when the user is not currently logged in \ldots \item User enters username and password and clicks login\ldots \item Controller method checks username and password against user repository. If credentials supplied are valid, and do not duplicate an existing user account the user is set to a logged in stage and the database is updated \item If credentials are matched to an existing user record in the repository display the registration failed message and redisplay the registration form screen\\ldots} \addmulrow{Alternate Exception Flows}{\item None\ldots} \end{usecase} \begin{usecase} \addheading{Title}{User accesses index view of entity records } \addheading{Description}{User can view index list of one of type Customer, Employee, Roster Assignment, Service User, Service User Assignment, Employee Absence Times, Employee Unavailability, Do Not Send, Office records. This is a reusable use case mapped to multiple Entities,all created using the Symfony templating approach} \addheading{Actor}{User} \addrow{Precondition}{User must be logged in and hold an appropriately authorised security role} \addrow{Postcondition}{N/A.} \addmulrow{Basic Flows}{\item User navigates to index view and can see all customer records\ldots \item User clicks on Edit link and can view a single page view screen of the selected record\ldots} \addmulrow{Alternate Exception Flows}{\item User is not logged in. On routing to area, system routes User to user login screen and denies permission to resource until they have successfully authenticated using an account holding the appropriate role\ldots \item User attempts to route to a record that doesn't exist and is shown error 404 not found\ldots \item Attempt to retrieve entity records failed at database level. Re-display previous screen and advise user that an error was encountered and suggest that they retry the effort\ldots \item User authenticates but does not hold an appropriate role to access the index view. Display 403 forbidden exception message\ldots} \end{usecase} \begin{usecase} \addheading{Title}{User views individual entity item} \addheading{Description}{User can select view an individual item of type Customer, Employee, Roster Assignment, Service User, Service User Assignment, Employee Absence Times, Employee Unavailability, Do Not Send, Office records} \addheading{Actor}{User} \addrow{Precondition}{User must be logged in and hold an appropriately authorised security role} \addrow{Postcondition}{N/A.} \addmulrow{Basic Flows }{\item User navigates to that record and can see all attributes of that entity record on a single page\ldots} \addmulrow{Alternate Exception Flows}{\item User is not logged in. On routing to area, system routes User to user login screen and denies permission to resource until they have successfully authenticated using an account holding the appropriate role\ldots \item User attempts to route to a record that doesn't exist and is shown error 404 not found\ldots \item Attempt to retrieve selected record failed at database level. Redisplay index and advise user that an error was encountered and suggest that they retry the effort\ldots \item User authenticates but does not hold an appropriate role to view the entity item. Display 403 forbidden exception message\ldots} \end{usecase} \begin{usecase} \addheading{Title}{User can create a new entity record } \addheading{Description}{User can create a new record of type Customer(s), Employee, Roster Assignment, Service User, Service User Assignment, Employee Absence Times, Employee Unavailability, Do Not Send, Office records. This is a reusable use case mapped to multiple Entities,all created using the Symfony templating approach} \addheading{Actor}{User} \addrow{Precondition}{User must be logged in and hold an appropriately authorised security role} \addrow{Postcondition}{The newly created record is now saved to the database} \addmulrow{Basic Flows}{\item User navigates to index view and clicks the new button or navigates from a related entity (see Alternate Flow below)\ldots \item User clicks on New link and can view a single page form screen with empty attribute controls for the selected record type\ldots} \addmulrow{Alternate Flows}{\item from service user record, user clicks on a button for new roster or assign employee or mark as do not send. Or from employee, user clicks on add unavailability time or employee absence period. User is routed to special version of the New() action for that entity which accepts either an employee or service user object as a parameter and method binds new entity record to have service user or employee context as an associated foreign key when saving newly created record)\ldots} \addmulrow{Alternate Exception Flows}{\item User is not logged in. On routing to area, system routes User to user login screen and denies permission to resource until they have successfully authenticated using an account holding the appropriate role\ldots \item User authenticates but does not hold an appropriate role to access the index view. Display 403 forbidden exception message\ldots \item Attempt to save new record failed at database level. Redisplay new record form with previously entered form data and advise user that an error was encountered and suggest that they retry the effort\ldots} \end{usecase} \begin{usecase} \addheading{Title}{User can edit an entity record } \addheading{Description}{User can retrieve and edit an existing record of type Customer(s), Employee, Roster Assignment, Service User, Service User Assignment, Employee Absence Times, Employee Unavailability, Do Not Send, Office records. This is a reusable use case mapped to multiple Entities,all created using the Symfony templating approach} \addheading{Actor}{User} \addrow{Precondition}{User must be logged in and hold an appropriately authorised security role} \addrow{Postcondition}{The updated record is now saved with changes made to that single entity record updated to the database} \addmulrow{Basic Flows}{\item User navigates to index view and clicks the edit button \ldots \item User is routed to a single page form screen with attribute controls for the selected record type prepopulated with the previously saved values retrieved from the database\ldots \item User makes changes to the field values and clicks update or save. The changes are validated and then saved to the database\ldots} \addmulrow{Alternate Flows}{\item The user cancels the attempt to edit the record and turns to the (non editable) form version of the entity\ldots} \addmulrow{Alternate Exception Flows}{\item User is not logged in. On routing to area, system routes User to user login screen and denies permission to resource until they have successfully authenticated using an account holding the appropriate role\ldots \item User authenticates but does not hold an appropriate role to access the index view. Display 403 forbidden exception message\ldots \item Attempt to save edited record failed at database level. Redisplay edit record form with previously entered form data and advise user that an error was encountered and suggest that they retry the effort\ldots} \end{usecase} \begin{usecase} \addheading{Title}{User can delete a new entity record } \addheading{Description}{User can delete a single entity record of type Customer(s), Employee, Roster Assignment, Service User, Service User Assignment, Employee Absence Times, Employee Unavailability, Do Not Send, Office records. This is a reusable use case mapped to multiple Entities,all created using the Symfony templating approach} \addheading{Actor}{User} \addrow{Precondition}{User must be logged in and hold an appropriately authorised security role} \addrow{Postcondition}{The deleted record is removed from the database.} \addmulrow{Basic Flows}{\item User navigates to index view and clicks the delete button \ldots \item the selected record is removed from the database\ldots} \addmulrow{Alternate Flows}{\item The user clicks the delete button when in the view/show mode and having previously selected an individual record to view/show\ldots} \addmulrow{Alternate Exception Flows}{\item User is not logged in. On routing to area, system routes User to user login screen and denies permission to resource until they have successfully authenticated using an account holding the appropriate role\ldots \item User authenticates but does not hold an appropriate role to access the index view. Display 403 forbidden exception message\ldots \item Attempt to delete record failed at database level. Redisplay current record and advise user that an error was encountered and suggest that they retry the effort\ldots} \end{usecase} \begin{usecase} \addheading{Title}{Check for a Do Not Send relationship between an employee and a service user} \addheading{Description}{When saving an assignment of an employee to a roster, check first that a do not send relationship has not already been defined for that employee.} \addheading{Actor}{User} \addrow{Precondition}{User must be logged in and hold an appropriately authorised security role. This method is called when assigning an employee to a roster record} \addrow{Postcondition}{The roster assignment record is updated with the relevant employee entity from the database.} \addmulrow{Basic Flows}{\item User assigns an employee to a record. \ldots \item If no do not send relationship exists between the service user and the employee, save the selected roster assignment record to the database\ldots} \addmulrow{Alternate Flows}{\item If a do not send relationship does exists between, do not save the roster assignment and advise the user that the change has not been saved because of the do not send relationship\ldots} \addmulrow{Alternate Exception Flows}{\item User is not logged in. On routing to area, system routes User to user login screen and denies permission to resource until they have successfully authenticated using an account holding the appropriate role\ldots \item User authenticates but does not hold an appropriate role to access the index view. Display 403 forbidden exception message\ldots \item Attempt to save new record failed at database level. Redisplay current record and advise user that an error was encountered and suggest that they retry the effort\ldots} \end{usecase} \begin{usecase} \addheading{Title}{Display available employees for a roster} \addheading{Description}{When a roster does not have enough employees assigned, show available employees to the user and allow them to select one.} \addheading{Actor}{User} \addrow{Precondition}{User must be logged in and hold an appropriately authorised security role. The roster must not have sufficient employee resources attached} \addrow{Postcondition}{None.} \addmulrow{Basic Flows}{\item User clicks on the Find An Employee To Assign button on a roster record \ldots \newpage \item The controller method retrieves all employees and then filters out employee who have a do not send relationship to that service user\ldots \item From the residual array of employee objects, the controller method filters out employee who are not available at that time due to being assigned elsewhere\ldots \item From the residual array of employee objects, the controller method filters out employee who have recorded that they are unavailable at that time\ldots \item From the residual array of employee objects, the controller method filters out employee who have an absence recorded which overlaps with the roster time\ldots \item Finally, the residual array of available employee objects are displayed to the user and the user can select an individual employee record from the displayed list and click the assign button\ldots} \addmulrow{Alternate Flows}{\item There are no available employees and the user navigates away from the page\ldots} \addmulrow{Alternate Exception Flows}{\item User is not logged in. On routing to area, system routes User to user login screen and denies permission to resource until they have successfully authenticated using an account holding the appropriate role\ldots \item User authenticates but does not hold an appropriate role to access the view. Display 403 forbidden exception message\ldots \item Attempt to assign the selected employee to the roster record failed at database level. Redisplay current record and advise user that an error was encountered and suggest that they retry the effort\ldots} \end{usecase} \begin{usecase} \addheading{Title}{Get Mapping coordinates for address} \addheading{Description}{Use call to Google Maps API to retrieve geo coordinates for an address. Reusable factory method which accepts either an employee, service user or office object and geocodes the address for that object} \addheading{Actor}{User} \addrow{Precondition}{User must be logged in and hold an appropriately authorised security role. The object to be geocoded must have an address} \addrow{Postcondition}{The latitude and longtitude of the object's address are saved to the database.} \addmulrow{Basic Flows}{\item User clicks on the Geocode button from the show screen of an employee, service user or office entity record\ldots \newpage \item The controller method sends an appropriate API call to the Google Maps location service which returns a valid set of the coordinates for the address\ldots} \addmulrow{Alternate Flows}{\item If Google Maps cannot locate the coordinates return and save an empty of coordinate values as latitude and longitude\ldots} \addmulrow{Alternate Exception Flows}{\item User is not logged in. On routing to area, system routes User to user login screen and denies permission to resource until they have successfully authenticated using an account holding the appropriate role\ldots \item User authenticates but does not hold an appropriate role to access the geocoding view. Display 403 forbidden exception message\ldots \item Attempt to assign the newly obtained coordinates failed at database level. Redisplay current record and advise user that an error was encountered and suggest that they retry the effort\ldots} \end{usecase} \begin{usecase} \addheading{Title}{Assign user role(s) } \addheading{Description}{Assign role(s) to a user} \addheading{Actor}{Administrator} \addrow{Precondition}{User must be logged in and hold an appropriately authorised security role.} \addrow{Postcondition}{User role is updated against the appropriate user record.} \addmulrow{Basic Flows}{\item Administrator clicks on Assign User Role button from the Admin button shown only to Administrator role\ldots \newpage \item Administrator selects the required role and clicks update\ldots} \addmulrow{Alternate Exception Flows}{\item User is not logged in. On routing to area, system routes User to user login screen and denies permission to resource until they have successfully authenticated using an account holding the appropriate role\ldots \item User authenticates but does not hold an appropriate role to access the Administrator view. Display 403 forbidden exception message\ldots \item Attempt to assign the newly obtained coordinates failed at database level. Redisplay current record and advise user that an error was encountered and suggest that they retry the effort\ldots} \end{usecase} \begin{usecase} \addheading{Title}{Password reset} \addheading{Description}{Reset the password of a user} \addheading{Actor}{Administrator} \addrow{Precondition}{User must be logged in and hold an appropriately authorised security role.} \addrow{Postcondition}{New password value is updated against the appropriate user record.} \addmulrow{Basic Flows}{\item Administrator clicks on Reset Password button from the Admin button shown only to Administrator role\ldots \newpage \item Administrator selects the user whose password needs to be reset and then enters in a new password\ldots \item New password is saved to the database\ldots} \addmulrow{Alternate Exception Flows}{\item User is not logged in. On routing to area, system routes User to user login screen and denies permission to resource until they have successfully authenticated using an account holding the appropriate role\ldots \item User authenticates but does not hold an appropriate role to access the Administrator view. Display 403 forbidden exception message\ldots \item Attempt to save the new password failed at database level. Redisplay current record and advise user that an error was encountered and suggest that they retry the effort\ldots} \end{usecase} \end{samepage} \newpage \section{Design Specification} \subsection {System Design Document} \subsubsection{Hardware Architecture- Prototype Application} Considerations for the Prototype Application: It is envisaged that the prototype system will be hosted initially on the author's own personal web hosting for prototyping purposes and with a limited initial user base. This will incorporate a web front end, with a successful user login required to access most parts of the application. The system will utilise a MySQL back end database and the web application will interact with this database layer through Symfony/Doctrine repository classes (see below for more information) \subsubsection{Hardware Architecture- Production Appplication} Considerations for the Production Application: It is envisaged that the production system will incorporate a load balanced web application server with a backend database though a private cloud hosted system is considered to be ideal for the usage scenario. Further consideration of the hardware/ infrastructure environment will depend on the specific solution chosen as a result of the procurement process and is deferred here until this has been completed. \subsection {Software Development Document} \subsubsection{Design considerations} Given the requirement to rapidly complete and deliver a prototype application for user acceptance testing, the ability to deliver a basic working application in a short time frame becomes a paramount consideration. A second consideration is to ensure an appropriate separation of concerns between the front end application (the view that is presented to the user through the user interface), controller methods where the application business logic is applied (for example determining which employees are actually available to cover an unfilled roster) and lower level data level operations such as retrieving, adding or updating a record from/to the back end database. \subsubsection{Model View Controller} The \textbf{Model View Controller} approach as shown in Figure 3 below is a widely used application development approach which meets the above requirements and also helps to enforce a degree of rigour and structured design to the code base for the application. \begin{figure}[h!] \includegraphics{mvc.jpg} \caption{The Model View Controller} \label{fig:model view controller} \end{figure} In this approach there are three main components in the architectural pattern each of which serves a different purpose but works in conjunction with the other two components, while achieving the requirement for separation of concerns. \begin{itemize} \item \textbf{Model} represents an object with each entity's attendant data attributes \item \textbf{View} represents the presentation layer to the user of the data which is stored in the model \item \textbf{Controller} represents the connecting layer between the model and the view. It allows the data flow into the model object and keeps the presentation of the data in the view up to date when the actual data in the object changes. It can act on both the model and the view and keeps the view and the model separate from one another. \end{itemize} The discussion of choosing an appropriate project implementation methodology is continued in the Implementation plan later in this document. \newpage \section {Testing Specification} \subsection {Testing Approaches Used} A variety of different approaches were used in testing the application to ensure that all functionality was working as per the specification and also to confirm the usability of the application through end user acceptance testing by subject matter experts within the business. \subsubsection {Unit Testing} Firstly, a comprehensive series of automated tests were developed using PHPUnit, which is a third party automated testing tool which has been integrated into the Symfony framework. This allows for a test driven approach to development where the automated series of tests can be developed and run repetitively as changes to the application are made to test that all code modules continue to successfully pass the tests and have not been impacted by the most recent changes made. This is considerably more efficient and lower in risk than relying solely on human driven functional testing. Indeed, a cornerstone of Test Driven Development is to write the automated tests first, in the knowledge that these will initially fail to run successfully and then proceeds to write the required implementation code to pass the test cases \parencite{maxwilliams}. \parencite{janzen} note that the use of the term \textit{unit} is the smallest possible testable software component, outlining that there is debate about the actual definition of the term. Some experts argue that it should be an individual model entity while others prefer to focus on individual methods within a class as the unit. For the purposes of this project, the author has focused on grouping each unit test at an entity model test. Some other classes- for example the reference entity for employee absence reason and which was very basic in nature and whose structure continues to be closely based on the original boilerplate created by the Symfony templating system- have been excluded but otherwise detailed test classes in PHPUnit have been created for all of the core classes in the application. The following test classes were created \begin{itemize} \item OfficeTest (discussed in more detail below) \item CustomerTest \item DoNotSendTest \item RosterTest \item EmployeeTest \item ServiceUserTest \item ServiceUserAssignedEmployeeTest \end{itemize} The full code library for the tests is included in the Appendix One but it is useful to delve into one class to demonstrate the testing approach taken. This section discusses the test units implemented for the Office entity for which the seven individual test units have been created. Two are discussed in detail to outline the testing approach taken. \textbf{testShowSecondOfficeItemfromIndex}. this test tests that the View link to second item (id=2) in the index view is routes correctly. This link is publicly available therefore the client can be called without supplying a user context. \begin{verbatim} public function testShowSecondOfficeItemfromIndex() { $client = static::createClient(); $client->followRedirects(true); $crawler = $client->request('GET', '/office'); $link= $crawler->selectLink('View') ->eq(1) ->link(); $this->assertEquals( $link->getUri(),'http://localhost/office/2'); } \end{verbatim} \textbf{testEditFormSubmit}.This test uses the edit facility on the office entity. Because this link is not publicly available the client class here must be supplied with a valid user context otherwise the test will fail. The button crawlerNode below creates a new form object and because a web page can have more than one form, the particular one to be used is identified by the text value of its submit button text value, in this case the Edit link. Having called up the edit form, the test submits a form with valid data and then validates that following submission of the form that the context is back on the show page as per the redirect in the edit controller. \begin{verbatim} public function testEditFormSubmit() { // this link is only available to authenticated users with the ROLE_ADMIN role $client = static::createClient(array(), array( 'PHP_AUTH_USER' => 'testuser@gmail.com', 'PHP_AUTH_PW' => 'pa$$word', )); $client->followRedirects(true); $crawler = $client->request('GET','/office/1'); // follow the Edit link $crawler = $client->click($crawler->selectLink('Edit')->link()); $buttonCrawlerNode = $crawler->selectButton('Submit Changes'); //fill in new form values $form = $buttonCrawlerNode->form(array( 'appbundle_office[officeName]'=> 'Clane', 'appbundle_office[addressLine1]' => 'Ballinagappa Road', 'appbundle_office[addressLine2]' => 'Clane', 'appbundle_office[addressLine3]'=> 'Kildare', 'appbundle_office[eirCode]' => 'D05W987', 'appbundle_office[landlineTelephone]' => '04556555', 'appbundle_office[mobileTelephone]' => '12456', 'appbundle_office[isActive]' => true, 'appbundle_office[countyPostcode]' => 1001009)); // submit the form $client->submit($form); $client->followRedirects(true); // check that we're back on the show page $this->assertEquals( $client->getRequest()->getUri(),'http://localhost/office/1'); } } \end{verbatim} \subsubsection{Functional Testing report} A significant amount of functional testing was completed by the author throughout and at the conclusion of the build of the initial prototype application. It would be difficult to document the operation and outcome of every individual functional test completed in an application of this size, but a small representative sample is included here. \begin{usecase} \addheading{Title}{Create Service User with valid Data} \addheading{Description}{User can navigate to the index view area of Service Users and a single entity record of type Service User. On Clicking Add New Service User, the user should be presented with a blank form to complete the input of a new Service User Record. If the data is valid, once the user clicks submit, the page should route back to the show page for the newly created service user. If they used a valid address, the system should have retrieved coordinates from Google Maps, saved these as part of the create action and should now be displaying the address in the map pane} \addrow{Expected Outcome}{Service user is shown in the Index view with the required property values} \addrow{Actual Outcome}{As expected} \end{usecase} \begin{usecase} \addheading{Title}{Attempt to Create Service User with missing Data (tests x3)} \addheading{Description}{User can navigate to the index view area of Service Users and a single entity record of type Service User. On Clicking Add New Service User, the user should be presented with a blank form to complete the input of a new Service User Record.User submits form without completing mandatory fields- first name, last name and address line 1 .} \addrow{Expected Outcome}{If mandatory data fields are left blank , once the user clicks submit, the page should highlight these to the user giving them an opportunity to amend the form and resubmit.} \addrow{Actual Outcome}{As expected} \addrow{Notes}{Test repeated on other mandatory fields} \end{usecase} \begin{figure}[h!] \includegraphics{testwithemptyname.png} \caption{Test Create Service User with missing Data} \label{fig:Test Create Service User with missing Data} \end{figure} \begin{usecase} \addheading{Title}{Attempt to Create Service User with invalid date data} \addheading{Description}{User can navigate to the index view area of Service Users and a single entity record of type Service User. On Clicking Add New Service User, the user should be presented with a blank form to complete the input of a new Service User Record. User enters an invalid start date and end date} \addrow{Expected Outcome}{If mandatory data fields are left blank , once the user clicks submit, the page should highlight these to the user giving them an opportunity to amend the form and resubmit.} \addrow{Actual Outcome}{As expected} \addrow{Notes}{In this instance, the Jquery datepicker prevents an invalid date being possible} \end{usecase} \begin{usecase} \addheading{Title}{Add a Roster to the service user} \addheading{Description}{From the show page for a service user, the User can click the Add New Roster button. Then the user should be presented with a roster form to input a new Roster Record. User enters an valid start date, time and end date and end time for roster} \addrow{Expected Outcome}{Roster should be saved, user redirected to show service user r page and able to see roster in the calendar view.} \addrow{Actual Outcome}{As expected} \end{usecase} \begin{usecase} \addheading{Title}{Add a Roster to the service user with empty date or time (tests x4)} \addheading{Description}{From the show page for a service user, the User can click the Add New Roster button. Then the user should be presented with a roster form to input a new Roster Record. User clicks create without selecting a valid start date, time and end date and end time for roster} \addrow{Expected Outcome}{The page should highlight the missing form data to the user and allow them to complete and resubmit the data} \addrow{Actual Outcome}{As expected} \addrow{Notes}{In this instance, the Jquery datepicker prevents an invalid date being possible but a blank date is possible and correctly handled by highlighting this to the user. Test was repeatedly successfully sequentially with start time missing, end date end time} \end{usecase} \begin{figure}[h!] \includegraphics{rosterwithemptydate.png} \caption{Test Create Roster with missing date } \label{fig:Test Create Roster with missing date } \end{figure} \begin{usecase} \addheading{Title}{Set the employee to being on leave and recheck availability} \addheading{Description}{Enter an employee absence and navigate to an unfilled roster during the period of the absence to verify that the absent employee is not shown as available} \addrow{Expected Outcome}{The find an employee to assign screen should exclude the absent employee from the list of available employees} \addrow{Actual Outcome}{As expected} \end{usecase} \begin{usecase} \addheading{Title}{Remove the absence for the employee and recheck availability} \addheading{Description}{Remove the absence and navigate to an unfilled roster during the period of the absence to verify that the absent employee is not shown as available} \addrow{Expected Outcome}{The find an employee to assign screen should now show the previously absent employee from the list of available employees} \addrow{Actual Outcome}{As expected} \end{usecase} \begin{usecase} \addheading{Title}{Create roster and leave as unassigned} \addheading{Description}{Create a new roster with a requirement for one employee resource and save} \addrow{Expected Outcome}{Roster should show a warning that not enough employees have been assigned} \addrow{Actual Outcome}{As expected} \end{usecase} \begin{usecase} \addheading{Title}{Assign roster to employee and recheck roster status} \addheading{Description}{Locate an unfilled roster and assign an available employee to the roster} \addrow{Expected Outcome}{Roster should now show a success message that the roster has sufficient employees, roster should no longer show the Find an Employee to Assign} \addrow{Actual Outcome}{As expected} \end{usecase} \begin{usecase} \addheading{Title}{Mark Employee as not to be sent to a service user} \addheading{Description}{From the show page for a service user, the User can click the Mark as Do Not Send button. Then the user should be presented with a Do Not Send form to create a new association between a service user and an employee record. User clicks Mark as do not send.} \addrow{Expected Outcome}{ Association should be saved, user redirected to show service user page and able to see that the employee is now marked as do not send } \addrow{Actual Outcome}{As expected} \end{usecase} \begin{usecase} \addheading{Title}{Check Employee is not shown as available for a service user who they have DO NOT SEND association} \addheading{Description}{From a roster which is not fully assigned service user, the User can click the FIND AN EMPLOYEE TO ASSIGN button. The user should be presented with of available employees excluding the Do Not Send employee} \addrow{Expected Outcome}{ The user should be presented with of available employees excluding the Do Not Send employee} \addrow{Actual Outcome}{As expected} \end{usecase} \begin{usecase} \addheading{Title}{Check Employee is restored as as available employee for a service user once their DO NOT SEND association is removed} \addheading{Description}{From the show page for a service user, the User can click the Remove button in the Do Not Send button beside the relevant employee. On navigating to an unassigned roster for that service user, the User can click the FIND AN EMPLOYEE TO ASSIGN button. The user should be presented with of available employees which now includes the previously missing employee while they were marked as Do Not Send } \addrow{Expected Outcome}{ The user should be presented with of all available employees excluding the employee who has missing in the previous step } \addrow{Actual Outcome}{As expected} \end{usecase} \begin{usecase} \addheading{Title}{Verify Google Maps geocoding} \addheading{Description}{Check coordinates against Google Maps and verify map} \addrow{Expected Outcome}{ On changing an address or creating a new record, the coordinates should be updated to reflect the correct location of the record which should be consistent with Google Maps} \addrow{Actual Outcome}{As expected} \addrow{Notes}{Some addresses in rural areas are not always recognised by Google Maps} \end{usecase} \begin{usecase} \addheading{Title}{Verify Google Maps distance calculation} \addheading{Description}{Check distance shown between available employees and a given service user against Google Maps and verify distance} \addrow{Expected Outcome}{ The distance shown for available employees should be consistent with that available in the directions area of Google Maps} \addrow{Actual Outcome}{As expected} \addrow{Notes}{Some addresses in rural areas are not always recognised by Google Maps} \end{usecase} \begin{usecase} \addheading{Title}{Create Service User with valid Data} \addheading{Description}{User can navigate to the index view area of Service Users and start to create a single entity record of type Service User. Clicking Add New Service User, the user should be presented with a blank form to complete the input of a new Service User Record. If the data is valid, once the user clicks submit, the page should route back to the show page for the newly created service user. If they used a valid address, the system should have retrieved coordinates from Google Maps, saved these as part of the create action and should now be displaying the address in the map pane} \addrow{Expected Outcome}{Service user is shown in the Index view with the required property values} \addrow{Actual Outcome}{As expected} \end{usecase} \subsubsection{User Acceptance Testing report} User acceptance testing is an integral and essential part of any software development project, but frequently this important area of the implementation of a new system does not get enough resources and attention, usually to the detriment of the final software deliverable. As \parencite{davis} notes: \begin{displayquote} Lack of user acceptance has long been an impediment to the success of new information systems. \end{displayquote}. A number of recent research efforts note the ongoing pattern of failed software projects, with a number of contemporary analysts estimating the failure rate to be in the between 50\% \parencite{florentine} and 75\% \parencite{gartner}. It is therefore crucial to devote sufficient time and resources to this important activity and to have a robust and structured way to feedback the results of the testing into the overall development process so that all issues identified are resolved before the go-live. For this project three different internal IWA users completed detailed user acceptance to attempt to complete a wide range of functions in the application independently with minimal instruction. The three users (who are all IWA employees involved in the Rostering project in various ways) willingly participated and offered highly constructive and useful feedback to the project. \newline \textit{\textbf{Testing Team}} \begin{itemize} \item KL who works as an Assisted Living Coordinator \item MB who works as Payroll Manager \item MK who works as an ICT support analyst and web designer \end{itemize} \subsubsection{Overview} The three users' individual task data entry sheets are included in Appendix 3. In summary the users were required to complete a series of sequential tasks which involved both working with existing preset service users and employee records as well as creating new service user and employee records and working to create a roster and assign an employee record to it. The value of end user testing clearly became evident to the author as one reasonably serious bug was identified through the testing process having been completely overlooked in both the development stage's functional and also in the automated unit testing. This issue primarily arose from one of the end users traversing through the steps in a different sequence to the well travelled route the author had been using, revealing in the process a hitherto overlooked bug in the code which only came to light as a result of the detailed user acceptance testing. \subsubsection{Outcomes from User Acceptance Testing} \begin{usecase} \addheading{Title}{Bug: when editing a roster} \addheading{Description}{when editing a roster, the roster reverted from the previously associated service user and showed an empty service user drop down control.} \addrow{Resolution}{The RosterType form class had an incorrect code entry which worked fine for new entries but dropped the service user when editing. As a workaround, the code in this RosterType class was split into two FormType modules one which was called from the Create() context and one which was called from the Edit() context} \addrow{Status}{Resolved} \end{usecase} \newline \begin{usecase} \addheading{Title}{Issue: Confusion with Assigning Rosters} \addheading{Description}{Some users found it a little confusing navigating from the service user screen into a roster and then clicking Find An Employee to Assign and finally clicking Assign} \addrow{Resolution}{This approach was taken to avoid calling the Google Maps distance API in a scenario where the roster was already filled with sufficient assigned employees and there was no need to suggest additional employees} \addrow{Status}{Open/Under review} \end{usecase} \newline \begin{usecase} \addheading{Title}{Suggestion: Lock menu items at the top of the screen} \addheading{Description}{The static menu navigation items at the top of the screen did not stay at the top once you began to scroll down the page} \addrow{Resolution}{This was resolved using the navbar navbar-default navbar-fixed-top class in Bootstrap which pins a fixed navigation bar at the top of the screen} \addrow{Status}{Resolved} \end{usecase} \newline \begin{usecase} \addheading{Title}{Suggestion: Lock the Add new employee/service user button at the top of the index pages} \addheading{Description}{As soon as users scroll down the index pages for service users, employees and rosters, they had to scroll back up to the top to locate the Add New button} \addrow{Resolution}{This was resolved by creating a new .divide-nav class to include buttons and other navigation items which needed to be pinned at the top of the screen and also by adjusting the body css class so that it created padding at the top of the screen so that this second class was positioned below the navbar-fixed-top class} \addrow{Status}{Resolved} \end{usecase} \newline \subsubsection{Final comments} While each user made some minor suggestions in navigating through the different screens, in particular in relation to assigning an employee to a roster, they generally reported that the application was easy to use and straightforward to navigate around in. Some useful suggestions were made in relation to pinning the navigation menu at the top of the screen, pinning the add new item button at the top of the index views for rosters, service users and employees and these have already been implemented in an updated release of the application which will be retested by the users concerned. Crucially, a significant and hitherto unidentified bug was identified through the testing process, where on editing a roster, the association to the service user was removed and the user needed to re-select the appropriate service users in the user interface. The root cause of this bug has been identified and resolved. Finally, a common theme identified by all users was that the process to assign an employee to a roster took too many steps and needed to made more user friendly. This is now under review and options for improvement will be scoped further with the users concerned. \newpage \section {Implementation} \subsection {Implementation Plan} \subsubsection{Evaluation of Different Web Development Frameworks} The following different web development frameworks- each of which are suitable to produce dynamic, data driven web pages- were evaluated in the search to find a suitable platform on which to build the prototype application. All three platforms examined offer the ability for server side logic within an application. \begin{itemize} \item \textbf{ASP.NET MVC} is the Microsoft led development stack based on the .Net framework which is a widely used set of web development tools . It offers a strong integrated development environment (IDE) in Visual Studio which is freely available in the Community edition for non commercial use cases. It uses a combination of HTML, Razor, a proprietary Microsoft syntax used for developing views and C\# for controller and repository and other non view related classes. It supports an MVC led approach with scaffolding of views and controllers based on having first creating a model with the relevant entity classes. It also integrates well with Azure web services, Microsoft's cloud stack where Visual Studio allows you to easily deploy web applications from a development environment. Asp.Net also includes the Entity Framework which essentially allows a code free, scaffolded approach to working with the database, driven entirely from the data model in your application. It also supports a test driven approach and integrates with a wide variety of third party components through a vibrant Nuget package marketplace. \item \textbf{Symfony} is a widely used web development framework developed by Fabien Potencier and distributed as open source tools which also supports the MVC approach to separatino of concerns. It uses the very flexible Twig templating engine (which in the author's opinion is easier to use than Razor!) and allows users to adopt a flexible approach to selecting which Symfony components they choose to use in a given application. As Symfony has reached a wide degree of developer adoption, achieving 500 million downloads of the framework in 2016, it is widely supported with a significant choice of different add-on components (termed bundles in the Symfony domain) which are available to its developer community. Symfony uses PHP as its main code language and allows developers to easily scaffold views and controllers based on the creation of an entity model. It also supports a scaffolded approach to interacting with databases, similar in approach to Entity Framework through the use of command line tools which validate and update your database as required. Because Symfony uses the Doctrine ORM (Object Relational Mapping) which abstracts database layer operations from the rest of the application it supports MySQL and various other database platforms including SQLite, PostgreSQL and Microsoft SQL server. An interesting twist on the Symfony approach, which is essentially a full stack framework, is the Silex approach, which uses elements of the Symfony framework (and a related library called Pimple) without requiring the developer to fully utilise all of the Symfony components and instead beginning with a skeleton framework which is much smaller and lighter in size than the full Symfony framework. When using Silex, a developer still has the flexibility to add in other components from the larger Symfony stack as the requirements of their application grow. One aspect about Symfony to note here is that the author notes some commentary from the developer community online which would suggest that Symfony is an ideal approach for small to medium web applications but its highly component driven approach can result in some performance impacts when used in very large scale deployments. Symfony also supports a test driven approach, both through using the test classes built into the application framework but also through its tight integration with the PHPUnit testing framework. \item \textbf{Node.js} is a third very commonly used web development open source framework developed by Ryan Dahl which runs on a Javascript run time environment which allows for Javascript code to be executed server side. Again, Node.js also supports the MVC approach to separation of concerns. The innovations provided by Node.js allow Javascript, traditionally a client side scripting language, to be run server side and has become one of the core components driving the "Javascript everywhere'' development paradigm. Node.js supports a similar ORM driven approach to database operations resulting in a wide variety of relational and non relational databases being supported. Similar to ASP.Net and Symfony, Node.js has a number of different templating languages including Jade, blade, Mustache and Handlebars JS. It has also achieved wide acceptance among developers and has an extensive library available of reusable components, which serve to significantly reduce the requirement for developers to write their own code for commonly used operations. Node.js is perceived as an advanced level language with a significant learning curve to be overcome for developers migrating to the platform. An advantage of Node.js is that it does not create a lock on server resources and scales very well to large scale applications, working particularly well in scenarios which require real time processing. Corporate users of Node.js include LinkedIn, Microsoft, Netflix and Paypal. \end{itemize} \subsubsection{Choice of a Web Development Framework} Though it was very instructive to learn more about Node.js as part of the research for the project, the perceived difficulty level associated with this platform and the author's lack of previous exposure to it meant that it was not an ideal choice for a student project, particularly as there was a requirement for a rapid delivery of a prototype application for user testing. The author had some previous exposure to ASP.NET MVC and viewed this as viable choice for the application. However, using PHP/Symfony was a better fit given that the syllabus for the Higher Diploma in Computing that had been undertaken over the past 2 years had included significant exposure to PHP and the project/prototyping requirement afforded the author an opportunity to gain a familiarity with Symfony. It was also very helpful that the author's supervisor \parencite{smith}.on this project had produced some excellent learning materials on the Symfony platform which greatly assisted in getting to grips with the new platform. A minor criticism of the Symfony platform is that while the Symfony documentation \parencite{weaver} is generally excellent and extremely comprehensive, it can be somewhat challenging for developers migrating to the platform for the first time to get over the initial learning curve and get up and running in developing their very first Symfony application. In fact, the first draft of the prototype application for this project was initially built on the Silex platform and as some of the requirements for implementation of a robust security model and the advantages of the scaffolding approach offered by Symfony came to light, it was determined to be useful to rebuild the entire prototype application on the full Symfony stack. It was most instructive to the author to note that having built the application on the Silex plaform originally, the second build on the Symfony platform, which was completed from scratch without reuse of any of the Silex application, took approximately 30\% of the time originally taken to build the Silex application, due in no small part to the automated scaffolding approach which Symfony offers to automate the construction of CRUD controller functions as well templated views. \subsubsection{Examples of some code efficiencies available in Symfony} The author was very impressed with the following Symfony capabilities which among others were noted to substantially save time in manual coding tasks: \textbf{Twig Templating }allows calling server side code in a format which is less verbose and easier to manage than embedding PHP code among the HTML code in your presentation view. A simple example is included here. Note also the concise way that the Annotation of related classes referred to below allows you to access attributes of a related class, in this case the mobile telephone property of the related employee class. \begin{verbatim} {% for assignedEmployee in assignedEmployees %} <td>{{ assignedEmployee.employeeId }}</td> <td>{{ assignedEmployee.employeeId.mobileTelephone }}</td> {% endfor %} \end{verbatim} The \textbf{Annotation capabilities} allow foreign keys to be defined in annotations to a given class or attribute which are automatically created in the underlying database, as follows: \begin{verbatim} /** * Roster * * @ORM\Table(name="roster") * @ORM\Entity(repositoryClass= "AppBundle\Repository\RosterRepository") */ class Roster { /** * @ORM\Column(type="integer") * @ORM\Id * @ORM\GeneratedValue(strategy="AUTO") */ private $id; /** * @ORM\ManyToOne(targetEntity= "AppBundle\Entity\ServiceUser") * @ORM\JoinColumn(name="serviceUserId", referencedColumnName="id") */ private $serviceUserId; \end{verbatim} The \textbf{Form Type/ Builder} classes allow an entity's data entry/update form to be automatically scaffolded based on the entities data attributes as defined in its model. For example rather than having to create each an individual label and attribute for each property of the customer class, Symfony automatically scaffolds this using very simple syntax as follows: \begin{verbatim} public function buildForm(FormBuilderInterface $builder, array $options) { $builder->add('customerName')->add('addressLine1')-> add('addressLine2')->add('addressLine3')-> add('eirCode')->add('landlineTelephone')-> add('mobileTelephone')->add('isActive')-> add('mainContact')-> add('countyPostcode')->add('managingOffice'); } /** * {@inheritdoc} */ public function configureOptions(OptionsResolver $resolver) { $resolver->setDefaults(array( 'data_class' => 'AppBundle\Entity\Customer' )); } \end{verbatim} Then in the actual presentation view, the syntax required to build the form, in this case an Edit form for a customer record, is extremely concise and the scaffolding approach automatically updates the form as the model of the entity class changes, An example of calling the formbuilder from a view is shown in the Twig template as follows: \begin{verbatim} { form_start(edit_form) }} {{ form_widget(edit_form) }} <input type="submit" value="Edit"/> {{ form_end(edit_form) }} \end{verbatim} Where it's necessary to extend the logic of an individual entity's form type, Symfony provides an extensive libary of FormTypes which can be used and overridden as required. For example, the EntityType class allows you to wire the contents of a dropdown control to the list of objects in another entity- or in this case,the ChoiceType allows you to define in your FormType class the contents of a drop down control together with both the values and labels for the dropdown control. An example of how you can extend the logic of the formtype is this instance where the form needed to include or not include a roster date field depending whether the submitted form data included a value in the posted \_REQUEST attribute which represented the posted form data. In this scenario, the form builder needed to handle both a new record being created which routes to the generic form without a roster date prepopulated, and a new record being created from a calendar with the specific date in question already set by the user clicking +new in the on a specific date. This can be handled using logic in the formbuilder as follows: \begin{verbatim} public function buildForm(FormBuilderInterface $builder, array $options) { $timezone = new \DateTimeZone("Europe/Dublin"); if (isset ($_REQUEST['rosterDate'])) { $builder->add('serviceUserId', EntityType::class, array('class' => 'AppBundle:ServiceUser', 'data' => $options['serviceUser'])) ->add('rosterStartTime', DateTimeType::class, array('date_widget' => "single_text", 'time_widget' => "single_text", 'data' => new \DateTime($_REQUEST['rosterDate'], $timezone))) ->add('rosterEndTime', DateTimeType::class, array('date_widget' => "single_text", 'time_widget' => "single_text", 'data' => new \DateTime($_REQUEST['rosterDate'], $timezone))) ->add('numberResourcesNeeded', ChoiceType::class, array( 'choices' => array( '0' => '0', '1' => '1', '2' => '2', '3' => '3'), 'required' => true, 'placeholder' => 'Choose How Many Resources Are Needed', 'empty_data' => null )) ->add('customerId'); } else { $builder->add('serviceUserId', EntityType::class, array('class' => 'AppBundle:ServiceUser', 'data' => $options['serviceUser'])) ->add('rosterStartTime', DateTimeType::class, array('date_widget' => "single_text", 'time_widget' => "single_text", )) ->add('rosterEndTime', DateTimeType::class, array('date_widget' => "single_text", 'time_widget' => "single_text", )) ->add('numberResourcesNeeded', ChoiceType::class, array( 'choices' => array( '0' => '0', '1' => '1', '2' => '2', '3' => '3'), 'required' => true, 'placeholder' => 'Choose How Many Resources Are Needed', 'empty_data' => null )) ->add('customerId'); } } \end{verbatim} The \textbf{Dynamic Query} capabilities of Symfony via Doctrine ORM can obviate the need to create specific views at a database level of entity classes where a (presentation level) view needed to present de-normalised data to the user. As an example of this capability, where a \textbf{rosterassigned} entity exists which has a foreign key attribute which relates to a roster entity via a many to one entity relationship. This relationship allows for one or more employees to be have a roster assignment association to a single roster entity. Once this relationship exists, it's possible to dynamically query the entity to retrieve only assignments which have a relationship to a specific roster, as follows: \begin{verbatim}$assignedEmployees = $em-> getRepository('AppBundle:RosterAssignedEmployee') ->findByRosterId($roster->getId()); \end{verbatim} \ The \textbf{Query builder} feature offers a powerful and flexible range of default Doctrine ORM queries similar to the findByRosterId() one noted above. These cater for most scenarios but it is also possible to create your own queries which resemble an SQL type logic (there's also a Doctrine Query Language which resembles the T-SQL query approach to database queries even more closely, but the author did not find a need for this in this project) which allows you to create your own custom queries when the need arises. For example, the query shown below accepts a datetime object and a service user object as input parameters and checks to see if there are any rosters for the service user starting at any time in the 24 hour period of the datetime object provided. This query is used in a section of the Show view for a service user which displays a calendar control in a table control showing each as a line of table data. It then returns a true or false value to the calling method depending on whether or not a roster was found for that day. The logic of the custom getByDate() query is as follows: \begin{verbatim} // this doctrine filter is used by the for loop in the show // method of the service user controller. //for each checked date in the for loop it checks to see if //that service user has any rosters starting in that 24 hour period. public function getByDate(\DateTime $date, ServiceUser $serviceUser) { $from = new \DateTime($date->format("Y-m-d") . " 00:00:00"); $to = new \DateTime($date->format("Y-m-d") . " 23:59:59"); $qb = $this->createQueryBuilder("roster"); $qb->andWhere($qb->expr() ->between('roster.rosterStartTime', ':date_from', ':date_to')) ->andWhere("roster.serviceUserId= " . $serviceUser->getId()); $qb->setParameter('date_from', $from, \Doctrine\DBAL\Types\Type::DATETIME); $qb->setParameter('date_to', $to, \Doctrine\DBAL\Types\Type::DATETIME); $result = $qb->getQuery()->getResult(); return $result; } } \end{verbatim} \subsection { Entity Relationship Diagram} Based on the review of the use cases together with workshops with the subject matter experts provided within the Assisted Living Services team a map of the required classes for the application together with the properties and methods required for each class was created. This was then modelled into an initial Entity Relationship Diagram which is included on the next page. \includepdf[]{erd.pdf} \subsection { Code Organisation for Project} As the prototype application, though less full featured than the anticipated production version, utilised a reasonable number of entity (model) classes, views and controller methods, an organised an consistent approach to code organisation within the project was important. The following strategy was adopted. \begin {itemize} \item \textbf{Presentation views} were grouped, as is standard in under the Symfony project structure under the \textbackslash.app\textbackslash Resources\textbackslash views node of the project. Within this folder a subfolder for each entity was created with a standard naming convention used, based on the scaffolded names created by Symfony of index, show, edit and delete. \item \textbf{Custom Error template views} were grouped, as is standard in under the Symfony project structure under the \textbackslash.app\textbackslash Resources\textbackslash TwigBundle \textbackslash views node of the project. \item \textbf{Controllers classes} for each object were grouped in under the \textbackslash..src\textbackslash AppBundle\textbackslash Controller node of the project. Within this folder a separate controller file was created with a standard naming convention used e.g. EmployeeController, RosterController. Within each controller file, the method names mapped to the related views e.g. index(), new() etc. \item \textbf{Entity Models} classes for each object were grouped in under the \textbackslash ..src\textbackslash AppBundle\textbackslash Entity node of the project. Within this folder a separate controller file was created with a standard naming convention used e.g. Employee, Roster. As noted the Annotation facility within Symfony was used to implement database level requirements such as primary keys, foreign keys, foreign key constraints etc. \item \textbf{Form types classes} for each entity were grouped in under the \textbackslash ..src\textbackslash AppBundle\textbackslash Form node of the project. Within this folder a separate controller file was created with a standard naming convention used e.g. EmployeeType, RosterType. These were extended to implement linkages between different entity classes in the view- for example contents of drop down controls related to other entities. \item \textbf{Repository classes} for each entity were grouped in under the \textbackslash ..src\textbackslash AppBundle\textbackslash Repository node of the project. Within this folder a separate controller file was created with a standard naming convention used e.g. EmployeeRepository, RosterRepository. \item \textbf{Test classes} for each object were grouped in under the \textbackslash..src\textbackslash AppBundle\textbackslash Test node of the project. \item \textbf{Reusable or Factory Methods} which did not relate to a specific entity, for example the Google Maps integration classes were grouped in a specific folder for that type for example the Google Maps classes are under the \textbackslash..src\textbackslash AppBundle\textbackslash Mapping node of the project. \item \textbf{Third party components} were installed as default by Composer in the \textbackslash ..src\textbackslash Vendor node of the project. \end {itemize} \subsection {Detailed Implementation Plan} The following implementation plan was developed and progressively put into practice as work on the project progressed. See also the project diary in Appendix X which progressive build up and testing of the code base for the project. \begin {itemize} \item \textbf{Complete and confirm Entity Relational Design} including models and anticipated methods and views for each entity. \item \textbf{Complete and confirm Use cases } for different functionality of the application \item \textbf{Install Symfony Project} and supporting components e.g. Twig, Doctrine etc. \item \textbf{Create Entity model classes for main areas of application} e.g. Employee, Service User, Office, Roster, Customer etc. \item \textbf{Create supporting Entity model classes for other areas of application} e.g. ServiceUser Assignment, ServiceUserDoNotSend, EmployeeAbsence, RosterAssignment as well as lookup list classes e.g. AbsenceReason, County etc. \item \textbf{Annotate} data classes to create appopriate entity relationships and primary, foreign keys \item \textbf{Scaffold initial views and controlllers} using Symfony/Doctrine commands to create initial default views and controllers with standard Create/Read/Update and Delete Classes \item \textbf{Test and Confirm resulting Database schema}and populate with sample data \item \textbf{Extend views and controllers }to show related entities where required for example Service User views to be facilitate user creating viewing, editing and deleting related records e.g. Rosters, Assignments etc. and automatically associating these with the underlying service user from whose context the action was initiated. \item \textbf{Update Form Types} so that the forms correctly show related entities \item \textbf{Update Repository Classes} for custom queries as required \item \textbf{Google Maps integration} build and implement to controllers and views on various entities using this functionality. \item \textbf{Implement Find Available Employees} adapt subsidiary methods in the service user controller so that the system has the capacity to suggest available employees for a given roster slot, progressively completing validation on initial list of employees and filtering out do not sends, employees on absence, employees working elsewhere, employees who are not available at time of the roster. \item \textbf{Implement Security} Extend user class and create roles and annotate controller methods to restrict access where needed. \item \textbf{Calendar view of Rosters}Show monthly view of calendars using third party framework such as Bootstrap Calendar or Full Calendar. \item \textbf{Calendar Add/Delete/Update }Update so that users can add/delete or update individual rosters when shown in the calendar view. \item \textbf{First Pass testing} on above methods and logic \item \textbf{Automated Unit testing} on above methods and logic by creating unit test classes on the main code modules \item \textbf{Work on Application Visuals and aesthetics} to ensure a user friendly and consistent approach to styling throughout the site. \item \textbf{Bootstrap}. Investigate the capabilities of Bootstrap and their potential applicability to the site \item \textbf{Bootstrap}. Update the show view for the employee and service user to display a responsive multi column view which exposes a range of different entities related to service users and employees. \item \textbf{User Acceptance testing }. Prepare testing script sheets to that users can work through a series of task administrative tasks which touch on both manipulating existing data and creating new records. \item \textbf{Address issues from User Acceptance testing }. Work through the results of the user acceptance testing and address any bugs, issues or user suggestions. \end {itemize} \subsection {Web Design Approach} \subsubsection {Step1 : Symfony templating} This project started by using the Symfony facility to generate boiler plate CRUD (Create/Read/Update/Delete) templates and associated controllers. This approach yields considerable time savings for developers, obviating the need for much repetitive code generation and it is clear that a command line bundle can complete this more accurately than a human developer. In fact, as noted earlier in the Design section, the author had the experience of initially creating a basic initial prototype version of the Scheduler application initially in Silex, the light-weight version using a bare-bones approach to the Symfony framework. In this iteration, the author initially create templates and controllers to reflect the requirements in the model for each entity. Even when using the initially created template and controller for the first entity (service user) as a template to generate controllers and view templates for the other dozen or so entities, this was a very time consuming exercise, taking many days of development. At this point a decision was made to rebuild from scratch on the full Symfony framework to leverage the power of the Security model, skeleton templating system and various other features including the very flexible Dynamic Doctrine queries. The second iteration of the application was built using the Symfony skeleton bundle which allows for the use of a command line driven approach to generating boiler plate templates and controllers. This took considerably less time to accomplish, estimated by the author to have delivered time savings in the order of 70-80\% over the original and more manual Silex approach. This powerful approach uses the following command which generates the controller templates in a few seconds based on an already existing model class for the referenced entity is already in place. This command yields the following initial templated code which can be generated in a few seconds once the model is already in place. \begin{verbatim} $ php app/console generate:doctrine:crud --entity=AppBundle:Employee--format=annotation --with-write --no-interaction \end{verbatim} \textbf {\underline{Templated Controller methods}} When this routine completes the following five controller actions will be created \begin {itemize} \item \textbf{indexAction()} This action lists all records. It can be further extended to accept an incoming parameter, for example and when linked with an appropriate Doctrine query can quickly achieve filtering of the records that are returned. \item \textbf{showAction()}This action shows one given record identified by its primary key which is passed by reference to the underlying object. \item \textbf {newAction()} This action creates a new record. \item \textbf{editAction()} This action edits an existing record identified by its primary key which is passed by reference to the underlying object. \item \textbf{deleteAction(} This action allows the user to deleting an existing record identified by its primary key which is passed by reference to the underlying object. This approach uses the FormBuilder class via calling a special createDeleteForm() method to build the Delete Form. \end {itemize} \textbf {\underline{View Templates}} The routine will also create the following template forms which are equivalent to the controller methods and are based on the standard templates forms in the Resources/skeleton/ directory. It is also useful to note that these skeleton templates can be customised to achieve templating of individual entities from your own modified skeleton template but this approach was not used for this project. \begin {itemize} \item \textbf{index} \item \textbf{show} \item \textbf{new} \item \textbf{edit} \end {itemize} Two points are worth noting here: Firstly, a sample of the new view is included below in order to demonstrate the power of the Symfony templating engine. \begin{verbatim} {% block body %} <h1>Absencereason creation</h1> {{ form_start(form) }} {{ form_widget(form) }} <input type="submit" value="Create"/> {{ form_end(form) }} <ul> <li> <a href="{{ path('absencereason_index') }}" >Back to the list</a> </li> </ul> {% endblock %} \end{verbatim} In a case where the entity in question had a large number of individual properties, the power of the above concise code becomes obvious where the \begin{verbatim}{{form_widget(form)}} \end{verbatim} command will automatically create form labels and input controls for each property, choosing form controls appropriate to the underlying datatype and automatically updating the template as the underlying model changes over time. Secondly, it can be noted that there is one fewer template view than controller method because the deleteAction creates its form within the createDeleteForm method which means that a template view for the Delete Action is not needed. \subsubsection {Step2 : Base Template Inheritance} The work in Step 1 above yielded a set of basic templates and controllers which were powerful in terms of functionality but were unfortunately quite plain and uninteresting in their initial styling. This can be seen in the following screenshots of the initial list and show templates on the next page. \begin{figure}[h!] \includegraphics{absence_reasons_list.png} \caption{Absence Reasons List View} \label{fig:Absence Reasons List View} \end{figure} \begin{figure}[h!] \includegraphics{absence_reasons_show.png} \caption{Absence Reason Show View} \label{fig:Absence Reason Show View} \end{figure} In this second step of development, the author sought to leverage the power of the capacity within the Twig framework for template inheritance. Here, we can define a base template which contains all of the html code, navigation cues and references the style sheets (CSS) which are common to every page in the site. Once this is in place, it is easy to achieve a professional and consistent look and feel throughout the application which is vital to achieving user adoption. In a prototype or `proof of concept' application such as the one being developed in this project, these considerations are less critical than in a full production scenario nevertheless the author attempted to achieve a professional look, within the significant limitations of his visual design talents and styling knowledge, which would give the test users confidence in navigating through the application through the use of various colour coded cues. Even more importantly, it was vital to give the users a chance to contribute to the design and aesthetics of the site at this early stage so that these suggestions could be factored into the vendor discussions in building the final production version. A base template was developed with the following objectives \begin {itemize} \item \textbf{CSS classes}. Include the custom CSS classes which apply to site, these are used to style buttons and define the look and feel of tables, headings and text as well as the layout of navigation controls. \item \textbf{External Jquery and CSS libraries}. The application touched lightly upon the use of Jquery libraries for areas such as calendar controls when selecting dates and also leveraged the now ubiquitous Bootstrap framework in its layout design for more complex pages. The author had hoped to use an external Jquery library to show an interactive control to manage CRUD functions for viewing and updating of rosters but encountered some difficulties and reverted to a simpler dynamic HTML control to display calendars instead. \item \textbf{Navigation bar}. The application sought to use a series of icon driven navigation cues at the top of the screen which are present consistently as a user navigates through the site. \item \textbf{Navigation bar}. The application also sought to display the user's login information when a user had authenticated to the application together with a \textbf{logout} button or in the case where they were not yet logged in to display a \textbf{login} button which would link the user to the login authentication page. Each icon and button contained an embedded anchor tag to link the user to that section of the application. \item \textbf{Flash messages}. Finally, the application used some embedded and colour-coded flash messages which read from the global Session variable. In the case where a standard CRUD event completed an action successfully, a message was appended to the Notice array in the FlashBag() class which the session uses to store alerts. In the event that an error was encountered, for example a save event did not complete successfully, a message was appended to the Error array in the FlashBag() class. Then as the page was refreshed in the browser the base template template shows these to the user, colour-coded as red if the new message was an error or in green if the message was a notice. \end {itemize} A sample of the user interface that was achieved using this approach can be seen in the figure below \begin{figure}[h!] \includegraphics[scale=0.4]{samplewithbase.png} \caption{Sample page with underlying Base template} \label{fig:Sample page with underlying Base template} \end{figure} The approach for using an inherited base template was sufficient for most pages in the application. A reworked edit page, developed originally from the boilerplate code which was generated by the Symfony but now extended using the Base page is shown in screenshot in the figure below, this time for the customer entity. \begin{figure}[h!] \includegraphics[scale=0.4]{editpagewithbase.png} \caption{Edit page extending underlying Base template} \label{fig:Edit page extending underlying Base template} \end{figure} As the reader can determine from the above screenshot, the links have now been restyled using css stored in the button class which represents anchor links styled to look similar to buttons, closely modelled on the css used in the Bootstrap model but remaining as links and not buttons. A colour coded approach was used throughout the application as shown in the screenshot below. \begin{figure}[h!] \includegraphics[scale=0.52]{buttoncolours.png} \caption{Button colour scheme} \label{fig:Button colour scheme} \end{figure} \begin {itemize} \item \textbf{Blue buttons} were used to show additional detail on the selected records. \item \textbf{Green buttons} were used to indicate either a step to add a new record or the commit/save action following the editing of an existing record . \item \textbf{Grey buttons} were used to navigate backwards from the context of an individual record to a list which shows an index view of multiple records. \item \textbf{Amber buttons} were used to indicate a step to edit a record, implying changing the saved values. \item \textbf{Red buttons} were used to indicate the deletion of a record or the removal of an association between two records. \end {itemize} Where the entity in question contained address data, the underlying controller implemented the generic Mapping class which was created as part of the application to include the functionality to automatically geocode addresses using integration with the Google Maps API. The resulting coordinates were then stored individually as latitude and longitude and shown in the detail/show view of each entity which used an I-Frame using standard Google Maps code to display a map on a HTML page based on including two coordinates in the URL passed to the I-Frame. This approach to simply extend base pages and use the generic Symfony templates was deemed sufficient for most simple templates throughout the application. However, some more complex pages, such as the show view for the employee and service user entity needed to expose a number of a underlying entities which relate in a many to one relationship back to the parent entity. For example, the view action for an employee record needed to show a number of different panes of information to the user, which all needed to be accessible to user from the show page for that employee, as follows: \begin {itemize} \item \textbf{Employee details} displayed on the form and editable using the edit employee button which routes to an edit page. \item \textbf{Rosters for the employee}. This area was of central importance to viewing employee details. (A similar approach was taken to the Show page for a service user record.) In this case, as already noted above, it had been intended to use an available JQuery library such as Full Calendar to display a custom control which represented the dates in the roster calendar for an individual service user or employee in an Outlook or Google Calendar style planner view, with CRUD functionality available to the user in a drag and drop approach where a user could click on a time in the calendar and drag the control to the finish time of the appointment. However, some difficulties were encountered in this area, mainly related to the author's lack of experience with handling the JSON events that arise from such JQuery driven controls. As a compromise a simpler HTML driven calendar was implemented as an interim measure with the ability to display and edit (each item displayed acted as an anchor link to the underlying roster record) existing roster. Users also have the ability to click the add button on a given date. The approach that was taken was to include three separate calendar controls which automatically updated based on the date context when the page is rendered. The first one shows the current calendar month, then the next calendar month and finally the previous calendar month, all shown in a top down view, with the current calendar month. The user can edit roster items in the current and next month and can only view items in the previous month. \item \textbf{Association to other related records}. It made sense for the user to be able to add a related record associated with the employee, for example a planned temporary employee absence (period of leave) or a time in the week when the employee was not permanently noted as unavailable, for example due to family commitments, or a second job held. Similar requirements existed on the service user record where it was needed to be able to add an association to an employee as either permanently assigned to that service user, or alternatively to mark an employee as flagged not to be sent to that service user. \item \textbf{Google maps}. There was a requirement identified to display the Google maps representation of the address coordinates in a static map on the employee and service user view (Show) page. \end {itemize} These challenges called for a more structured approach to the user interface design than for most other pages in the site. \newline \begin{figure}[h!] \includegraphics[scale=0.60]{Bootstraptemplate.png} \caption{Bootstrap template approach} \label{fig:Bootstraptemplate} \end{figure} The final design involved extending the base template to include the top navigation ribbon and then to apportion the page to different sized panels left, centre and right. For this the Bootstrap CSS template, originally developed by the team at Google, but now open sourced and widely used across the modern web, was an ideal fit. This approach allows you to define the amount of screen real estate to be assigned to different controls on the page and the page is then automatically resized to allow for the different resolution of different devices, including allowing for a responsive rendering of the page on mobile devices. In practice, this may mean that a series of controls are rendered left to right on wide screen devices such as a laptop or large tablet with 1080pixels available in the horizontal dimensions but will be rendered top to bottom on a narrower screen device such as smartphone. The Bootstrap template uses a grid of 12 parts so a control mapped to the the col-md-3 class would be allocated three twelfths (or one quarter) of the overall width of the screen. The approach that was taken, bearing in mind the amounts of data to be displayed in each area was to allocate three screen units (out of 12) to the leftmost details pane, six units to the roster area control, and three units to the availability view for employees (which also includes the Google Maps control further down in the pane). A sample of the final Service User design using this methodology is shown on the next page. \center \includegraphics[scale=0.59,angle=270]{fullpagesu.png} \section {Evaluation} \subsection {User Feedback} \subsection {Further Enhancements} \section {Conclusions} \subsection {Review of material covered } \subsection {Further Development Opportunities} \subsection {Outstanding Issues/Continuous Improvement Plan} \section {Appendices} \subsection {Code Listing} \subsubsection {Database creation scripts} \subsubsection {Entity Classes} \subsubsection {Mapping Extension Classes} \subsubsection {Repository Classes} \subsubsection {Form Type} \subsubsection {Controller Classes} \subsubsection {Twig Templates} \subsubsection {List of Vendor tools used} \newpage\printbibliography[title={Bibliography}] \end{document}
{"hexsha": "2b4b7f2ebbba75998b1359b0a38ca6eefdf24671", "size": 136083, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "ProjectReport&Documentation/Scheduler Project Report.tex", "max_stars_repo_name": "johnjogrady/scheduler_symfony", "max_stars_repo_head_hexsha": "4ad07739fa72200c2b053ce110e93bb42e6008a4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ProjectReport&Documentation/Scheduler Project Report.tex", "max_issues_repo_name": "johnjogrady/scheduler_symfony", "max_issues_repo_head_hexsha": "4ad07739fa72200c2b053ce110e93bb42e6008a4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ProjectReport&Documentation/Scheduler Project Report.tex", "max_forks_repo_name": "johnjogrady/scheduler_symfony", "max_forks_repo_head_hexsha": "4ad07739fa72200c2b053ce110e93bb42e6008a4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 101.1769516729, "max_line_length": 1571, "alphanum_fraction": 0.7825959157, "num_tokens": 27430}
[STATEMENT] lemma (in weak_lower_semilattice) finite_inf_closed [simp]: "[| finite A; A \<subseteq> carrier L; A \<noteq> {} |] ==> \<Sqinter>A \<in> carrier L" [PROOF STATE] proof (prove) goal (1 subgoal): 1. \<lbrakk>finite A; A \<subseteq> carrier L; A \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>A \<in> carrier L [PROOF STEP] proof (induct set: finite) [PROOF STATE] proof (state) goal (2 subgoals): 1. \<lbrakk>{} \<subseteq> carrier L; {} \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>{} \<in> carrier L 2. \<And>x F. \<lbrakk>finite F; x \<notin> F; \<lbrakk>F \<subseteq> carrier L; F \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>F \<in> carrier L; insert x F \<subseteq> carrier L; insert x F \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>insert x F \<in> carrier L [PROOF STEP] case empty [PROOF STATE] proof (state) this: {} \<subseteq> carrier L {} \<noteq> {} goal (2 subgoals): 1. \<lbrakk>{} \<subseteq> carrier L; {} \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>{} \<in> carrier L 2. \<And>x F. \<lbrakk>finite F; x \<notin> F; \<lbrakk>F \<subseteq> carrier L; F \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>F \<in> carrier L; insert x F \<subseteq> carrier L; insert x F \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>insert x F \<in> carrier L [PROOF STEP] then [PROOF STATE] proof (chain) picking this: {} \<subseteq> carrier L {} \<noteq> {} [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: {} \<subseteq> carrier L {} \<noteq> {} goal (1 subgoal): 1. \<Sqinter>{} \<in> carrier L [PROOF STEP] by simp [PROOF STATE] proof (state) this: \<Sqinter>{} \<in> carrier L goal (1 subgoal): 1. \<And>x F. \<lbrakk>finite F; x \<notin> F; \<lbrakk>F \<subseteq> carrier L; F \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>F \<in> carrier L; insert x F \<subseteq> carrier L; insert x F \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>insert x F \<in> carrier L [PROOF STEP] next [PROOF STATE] proof (state) goal (1 subgoal): 1. \<And>x F. \<lbrakk>finite F; x \<notin> F; \<lbrakk>F \<subseteq> carrier L; F \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>F \<in> carrier L; insert x F \<subseteq> carrier L; insert x F \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>insert x F \<in> carrier L [PROOF STEP] case insert [PROOF STATE] proof (state) this: finite F_ x_ \<notin> F_ \<lbrakk>F_ \<subseteq> carrier L; F_ \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>F_ \<in> carrier L insert x_ F_ \<subseteq> carrier L insert x_ F_ \<noteq> {} goal (1 subgoal): 1. \<And>x F. \<lbrakk>finite F; x \<notin> F; \<lbrakk>F \<subseteq> carrier L; F \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>F \<in> carrier L; insert x F \<subseteq> carrier L; insert x F \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>insert x F \<in> carrier L [PROOF STEP] then [PROOF STATE] proof (chain) picking this: finite F_ x_ \<notin> F_ \<lbrakk>F_ \<subseteq> carrier L; F_ \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>F_ \<in> carrier L insert x_ F_ \<subseteq> carrier L insert x_ F_ \<noteq> {} [PROOF STEP] show ?case [PROOF STATE] proof (prove) using this: finite F_ x_ \<notin> F_ \<lbrakk>F_ \<subseteq> carrier L; F_ \<noteq> {}\<rbrakk> \<Longrightarrow> \<Sqinter>F_ \<in> carrier L insert x_ F_ \<subseteq> carrier L insert x_ F_ \<noteq> {} goal (1 subgoal): 1. \<Sqinter>insert x_ F_ \<in> carrier L [PROOF STEP] by (rule_tac finite_inf_insertI) (simp_all) [PROOF STATE] proof (state) this: \<Sqinter>insert x_ F_ \<in> carrier L goal: No subgoals! [PROOF STEP] qed
{"llama_tokens": 1453, "file": null, "length": 11}
#ifndef INCLUDE_OKVIS_KEYFRAME_FOR_LOOP_DETECTION_HPP_ #define INCLUDE_OKVIS_KEYFRAME_FOR_LOOP_DETECTION_HPP_ #include <vector> #include <Eigen/Core> #include <Eigen/StdVector> #include <okvis/MultiFrame.hpp> #include <okvis/assert_macros.hpp> #include <okvis/class_macros.hpp> #include <loop_closure/InverseTransformMultiplyJacobian.hpp> namespace swift_vio { enum class PoseConstraintType { Odometry = 0, LoopClosure = 1, }; class NeighborConstraintInDatabase { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW NeighborConstraintInDatabase(); NeighborConstraintInDatabase(uint64_t id, okvis::Time stamp, const okvis::kinematics::Transformation& T_BnBr, PoseConstraintType type); ~NeighborConstraintInDatabase(); uint64_t id_; okvis::Time stamp_; // Br is a body frame for reference, B body frame of this neighbor. okvis::kinematics::Transformation T_BBr_; PoseConstraintType type_; // square root info L' of the inverse of the covariance of the between factor // unwhitened/raw error due to measurement noise. LL' = \Lambda = inv(cov) // It depends on definitions of the between factor and errors of the // measurement. e.g., gtsam::BetweenFactor<Pose3>==log(T_z^{-1}T_x^{-1}T_y) // and error of T_z is defined by T_z = Pose3::Retraction(\hat{T}_z, \delta). Eigen::Matrix<double, 6, 6> squareRootInfo_; }; class NeighborConstraintMessage { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW NeighborConstraintMessage(); /** * @brief NeighborConstraintMessage * @param id * @param stamp * @param T_BnBr Bn the body frame associated with this neighbor, * Br the body frame associated with the reference frame of this neighbor. * @param T_WB pose of this neighbor. * @param type */ NeighborConstraintMessage( uint64_t id, okvis::Time stamp, const okvis::kinematics::Transformation& T_BnBr, const okvis::kinematics::Transformation& T_WB, PoseConstraintType type = PoseConstraintType::Odometry); ~NeighborConstraintMessage(); /** * @deprecated * @brief compute the covariance of error in $T_BnBr$ given the covariance of errors in $T_WBr$ and $T_WBn$ * $T_BnBr = T_WBn^{-1} T_WBr$ * The error(perturbation) of $T_WBr$ $T_WBn$ and $T_BnBr$ are defined by * okvis::Transformation::oplus and ominus. * @param T_WBr * @param cov_T_WBr * @param[out] cov_T_BnBr cov for error in $T_BnBr$. * @return */ void computeRelativePoseCovariance( const okvis::kinematics::Transformation& T_WBr, const Eigen::Matrix<double, 6, 6>& cov_T_WBr, Eigen::Matrix<double, 6, 6>* cov_T_BnBr); NeighborConstraintInDatabase core_; // variables used for computing the weighting covariance for the constraint // in the case of odometry pose constraint. In the case of loop constraint, // the covariance is computed inside PnP solver. okvis::kinematics::Transformation T_WB_; // pose of this neighbor keyframe. // cov of T_WB Eigen::Matrix<double, 6, 6> cov_T_WB_; // cov(T_WBr, T_WB) Eigen::Matrix<double, 6, 6> cov_T_WBr_T_WB_; }; /** * @brief The KeyframeInDatabase class is stored keyframe info in loop closure keyframe database. */ class KeyframeInDatabase { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW DELETE_COPY_CONSTRUCTORS(KeyframeInDatabase); KeyframeInDatabase(); KeyframeInDatabase(size_t dbowId, uint64_t vioId, okvis::Time stamp, const okvis::kinematics::Transformation& vio_T_WB, const Eigen::Matrix<double, 6, 6>& cov_T_WB); void setOdometryConstraints( const std::vector<std::shared_ptr<NeighborConstraintMessage>>& odometryConstraintList) { constraintList_.reserve(odometryConstraintList.size()); for (auto constraint : odometryConstraintList) { std::shared_ptr<NeighborConstraintInDatabase> dbConstraint( new NeighborConstraintInDatabase(constraint->core_)); // dbConstraint->squareRootInfo_ will be set later on. constraintList_.push_back(dbConstraint); } } inline std::vector<size_t> convertToLandmarkIndices() const { std::vector<size_t> landmarkIdForKeypoints(keypointList_.size(), 0u); size_t lmId = 0u; for (auto index : keypointIndexForLandmarkList_) { landmarkIdForKeypoints[index] = lmId; ++lmId; } return landmarkIdForKeypoints; } const std::vector<std::shared_ptr<NeighborConstraintInDatabase>>& constraintList() const { return constraintList_; } void addLoopConstraint( std::shared_ptr<NeighborConstraintInDatabase>& loopConstraint) { loopConstraintList_.push_back(loopConstraint); } const cv::Mat frontendDescriptors() const { return frontendDescriptors_; } const cv::Mat frontendDescriptorsWithLandmarks() const { return okvis::selectDescriptors(frontendDescriptors_, keypointIndexForLandmarkList_); } const std::vector<Eigen::Vector4d, Eigen::aligned_allocator<Eigen::Vector4d>>& landmarkPositionList() const { return landmarkPositionList_; } const std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>& keypointList() const { return keypointList_; } void setSquareRootInfo(size_t j, const Eigen::Matrix<double, 6, 6>& squareRootInfo) { constraintList_.at(j)->squareRootInfo_ = squareRootInfo; } void setSquareRootInfoFromCovariance(size_t j, const Eigen::Matrix<double, 6, 6>& covRawError); void setFrontendDescriptors(cv::Mat frontendDescriptors) { frontendDescriptors_ = frontendDescriptors; } void setLandmarkPositionList( const std::vector<Eigen::Vector4d, Eigen::aligned_allocator<Eigen::Vector4d>>& landmarkPositionList) { landmarkPositionList_ = landmarkPositionList; } /** * @brief setKeypointIndexForLandmarkList * @param kpIndexForLandmarks each entry is index in keypoint list of a * keypoint corresponding to every landmark in landmark list. */ void setKeypointIndexForLandmarkList(const std::vector<int>& kpIndexForLandmarks) { keypointIndexForLandmarkList_ = kpIndexForLandmarks; } void setKeypointList( const std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>>& keypointList) { keypointList_ = keypointList; } public: size_t dbowId_; ///< id used in DBoW vocabulary. Determined by the size of the KeyframeInDatabase list. uint64_t id_; ///< frontend keyframe id. okvis::Time stamp_; const okvis::kinematics::Transformation vio_T_WB_; ///< original vio estimated T_WB; const Eigen::Matrix<double, 6, 6> cov_vio_T_WB_; ///< cov of $[\delta p, \delta \theta]$ provided by VIO. private: ///< If we do not construct the pose graph solver from scratches once in a /// while as in VINS Mono, then we do not need the constraint list. std::vector<std::shared_ptr<NeighborConstraintInDatabase>> constraintList_; ///< odometry constraints. std::vector<std::shared_ptr<NeighborConstraintInDatabase>> loopConstraintList_; ///< loop constraints. // The below variables are used to find correspondence between a loop frame // and a query frame and estimate the relative pose. cv::Mat frontendDescriptors_; ///< descriptors for every keypoint from VIO frontend. #columns is the descriptor size, #rows is for landmarks. std::vector<Eigen::Vector4d, Eigen::aligned_allocator<Eigen::Vector4d>> landmarkPositionList_; ///< landmark positions expressed in the body frame of this keyframe passed in by a VIO estimator. std::vector<int> keypointIndexForLandmarkList_; ///< index in keypointList of keypoints associated with landmarks. std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f>> keypointList_; ///< locations and size of every keypoint in left camera. }; /** * @brief The LoopQueryKeyframeMessage class * Only one frame out of nframe will be used for querying keyframe database and * computing loop constraint. As a result, from the NCameraSystem, we only * need the camera intrinsic parameters, but not the extrinsic parameters. * We may reset the NCameraSystem for nframe_ when intrinsic parameters are * estimated online by the estimator. This should not disturb the frontend * feature matching which locks the estimator in matching features. */ class LoopQueryKeyframeMessage { public: EIGEN_MAKE_ALIGNED_OPERATOR_NEW DELETE_COPY_CONSTRUCTORS(LoopQueryKeyframeMessage); LoopQueryKeyframeMessage(); LoopQueryKeyframeMessage(uint64_t id, okvis::Time stamp, const okvis::kinematics::Transformation& T_WB, std::shared_ptr<const okvis::MultiFrame> multiframe); ~LoopQueryKeyframeMessage(); std::shared_ptr<KeyframeInDatabase> toKeyframeInDatebase(size_t dbowId) const { std::shared_ptr<KeyframeInDatabase> keyframeInDB( new KeyframeInDatabase(dbowId, id_, stamp_, T_WB_, cov_T_WB_)); keyframeInDB->setOdometryConstraints(odometryConstraintList_); keyframeInDB->setLandmarkPositionList(landmarkPositionList_); keyframeInDB->setFrontendDescriptors(getFrontendDescriptors()); keyframeInDB->setKeypointList(getFrontendKeypoints()); keyframeInDB->setKeypointIndexForLandmarkList(keypointIndexForLandmarkList_); return keyframeInDB; } bool hasValidCovariance() const { return cov_T_WB_(0, 0) > 1e-7; } const Eigen::Matrix<double, 6, 6>& getCovariance() const { return cov_T_WB_; } void setCovariance(const Eigen::Matrix<double, 6, 6>& cov_T_WB) { cov_T_WB_ = cov_T_WB; } void setZeroCovariance() { cov_T_WB_.setZero(); } /** * @brief setNFrame copy essential parts from frontend NFrame to avoid * read/write at the same time by VIO estimator and loop closure module. */ void setNFrame(std::shared_ptr<const okvis::MultiFrame> multiframe) { // shallow copy camera geometry for each camera. std::shared_ptr<okvis::MultiFrame> nframe(new okvis::MultiFrame( multiframe->cameraSystem(), multiframe->timestamp(), multiframe->id())); // shallow copy one image. nframe->setImage(kQueryCameraIndex, multiframe->image(kQueryCameraIndex)); nframe->setTimestamp(multiframe->timestamp()); nframe->setTimestamp(kQueryCameraIndex, multiframe->timestamp(kQueryCameraIndex)); nframe->resetKeypoints(kQueryCameraIndex, multiframe->getKeypoints(kQueryCameraIndex)); cv::Mat descriptors; cv::Mat rawDescriptors = multiframe->getDescriptors(kQueryCameraIndex); // With motion blurred images, rawDescriptors may be empty. rawDescriptors.copyTo(descriptors); nframe->resetDescriptors(kQueryCameraIndex, descriptors); nframe_ = nframe; } std::shared_ptr<const okvis::MultiFrame> NFrame() const { return nframe_; } const cv::Mat queryImage() const { return nframe_->image(kQueryCameraIndex); } std::shared_ptr<const okvis::cameras::CameraBase> cameraGeometry() const { return nframe_->geometry(kQueryCameraIndex); } const std::vector<std::shared_ptr<NeighborConstraintMessage>>& odometryConstraintList() const { return odometryConstraintList_; } const std::vector<Eigen::Vector4d, Eigen::aligned_allocator<Eigen::Vector4d>>& landmarkPositionList() const { return landmarkPositionList_; } cv::Mat getFrontendDescriptors() const { // deep copy is unneeded because nframe's descriptors are newly allocated. return nframe_->getDescriptors(kQueryCameraIndex); } std::vector<okvis::KeypointReduced, Eigen::aligned_allocator<okvis::KeypointReduced>> getFrontendKeypoints() const { return nframe_->copyKeypoints(kQueryCameraIndex); } ///< \brief get all descriptors for a view in nframe. cv::Mat getDescriptors() const { return nframe_->getDescriptors(kQueryCameraIndex); } std::vector<std::shared_ptr<NeighborConstraintMessage>>& odometryConstraintListMutable() { return odometryConstraintList_; } const std::vector<int>& keypointIndexForLandmarkList() const { return keypointIndexForLandmarkList_; } std::vector<int>& keypointIndexForLandmarkListMutable() { return keypointIndexForLandmarkList_; } std::vector<Eigen::Vector4d, Eigen::aligned_allocator<Eigen::Vector4d>>& landmarkPositionListMutable() { return landmarkPositionList_; } void setLandmarkPositionList( const std::vector<Eigen::Vector4d, Eigen::aligned_allocator<Eigen::Vector4d>>& landmarkPositionList) { landmarkPositionList_ = landmarkPositionList; } uint64_t id_; okvis::Time stamp_; okvis::kinematics::Transformation T_WB_; const static size_t kQueryCameraIndex = 0u; private: Eigen::Matrix<double, 6, 6> cov_T_WB_; ///< cov of $[\delta p, \delta \theta]$. An estimator ///< that does not provide covariance for poses should zero ///< cov_T_WB_. /// @warn Do not hold on to nframe_ which has many images. std::shared_ptr<const okvis::MultiFrame> nframe_; ///< nframe contains the list of keypoints for each subframe, and the camera system info. std::vector<std::shared_ptr<NeighborConstraintMessage>> odometryConstraintList_; ///< The most adjacent neighbor is at the front. std::vector<int> keypointIndexForLandmarkList_; ///< Index of the keypoints with landmark positions. std::vector<Eigen::Vector4d, Eigen::aligned_allocator<Eigen::Vector4d>> landmarkPositionList_; ///< landmark positions expressed in the body frame of this keyframe. }; // LoopQueryKeyframeMessage struct PgoResult { EIGEN_MAKE_ALIGNED_OPERATOR_NEW okvis::Time stamp_; okvis::kinematics::Transformation T_WB_; }; } // namespace swift_vio #endif // INCLUDE_OKVIS_KEYFRAME_FOR_LOOP_DETECTION_HPP_
{"hexsha": "73d801d4becbe47fd95780384198909e6a7e673f", "size": 13873, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "loop_closure/include/loop_closure/KeyframeForLoopDetection.hpp", "max_stars_repo_name": "wbl1997/okvis", "max_stars_repo_head_hexsha": "65e30d6ab25380d65c96c665485148e2ab55e93e", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2022-03-26T15:31:53.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-26T15:31:53.000Z", "max_issues_repo_path": "loop_closure/include/loop_closure/KeyframeForLoopDetection.hpp", "max_issues_repo_name": "wbl1997/okvis", "max_issues_repo_head_hexsha": "65e30d6ab25380d65c96c665485148e2ab55e93e", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "loop_closure/include/loop_closure/KeyframeForLoopDetection.hpp", "max_forks_repo_name": "wbl1997/okvis", "max_forks_repo_head_hexsha": "65e30d6ab25380d65c96c665485148e2ab55e93e", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": 2.0, "max_forks_repo_forks_event_min_datetime": "2021-08-01T16:49:17.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-14T09:00:03.000Z", "avg_line_length": 36.7984084881, "max_line_length": 143, "alphanum_fraction": 0.7270237151, "num_tokens": 3512}
"""Test the write database. """ import json import pytest import os import time import numpy as np from vectorai.models.deployed import ViText2Vec from vectorai.write import ViWriteClient from vectorai.errors import APIError, MissingFieldError, MissingFieldWarning, CollectionNameError from vectorai.client import ViClient from .utils import TempClientWithDocs @pytest.mark.use_client def test_multiprocess_insert(test_client, test_collection_name): NUM_OF_DOCUMENTS_INSERTED = 10 if test_collection_name in test_client.list_collections(): test_client.delete_collection(test_collection_name) time.sleep(10) documents = test_client.create_sample_documents(NUM_OF_DOCUMENTS_INSERTED) results = test_client.insert_documents(test_collection_name, documents, workers=5, overwrite=False) time.sleep(10) assert len(results['failed_document_ids']) == 0 assert test_collection_name in test_client.list_collections() assert test_client.collection_stats(test_collection_name)['number_of_documents'] == NUM_OF_DOCUMENTS_INSERTED test_client.delete_collection(test_collection_name) @pytest.mark.use_client def test_multiprocess_insert_with_error(test_client, test_collection_name): NUM_OF_DOCUMENTS_INSERTED = 100 if test_collection_name in test_client.list_collections(): test_client.delete_collection(test_collection_name) documents = test_client.create_sample_documents(NUM_OF_DOCUMENTS_INSERTED) documents.append({ '_id': '9993', 'color': np.nan }) # This should result in 1 failure results = test_client.insert_documents(test_collection_name, documents, workers=5, overwrite=False) time.sleep(10) assert len(results['failed_document_ids']) == 1 assert test_collection_name in test_client.list_collections() assert test_client.collection_stats(test_collection_name)['number_of_documents'] > 0 test_client.delete_collection(test_collection_name) @pytest.mark.use_client def test_multiprocess_insert_with_error_with_overwrite(test_client, test_collection_name): NUM_OF_DOCUMENTS_INSERTED = 100 if test_collection_name in test_client.list_collections(): test_client.delete_collection(test_collection_name) time.sleep(5) documents = test_client.create_sample_documents(NUM_OF_DOCUMENTS_INSERTED) documents.append({ '_id': '9993', 'color': np.nan }) # This should result in 1 failure results = test_client.insert_documents(test_collection_name, documents, workers=5, overwrite=True) time.sleep(10) assert len(results['failed_document_ids']) == 1 assert test_collection_name in test_client.list_collections() assert test_client.collection_stats(test_collection_name)['number_of_documents'] > 0 test_client.delete_collection(test_collection_name) @pytest.mark.use_client def test_multiprocess_with_overwrite(test_client, test_collection_name): if test_collection_name in test_client.list_collections(): test_client.delete_collection(test_collection_name) time.sleep(5) NUM_OF_DOCS = 10 docs = test_client.create_sample_documents(NUM_OF_DOCS) test_client.insert_documents(test_collection_name, docs[0:5], workers=1, overwrite=False) response = test_client.insert_documents(test_collection_name, docs[3:5], workers=1, overwrite=True) assert response['inserted_successfully'] == 2 @pytest.mark.use_client def test_multiprocess_with_overwrite_insert(test_client, test_collection_name): if test_collection_name in test_client.list_collections(): test_client.delete_collection(test_collection_name) time.sleep(5) NUM_OF_DOCS = 10 docs = test_client.create_sample_documents(NUM_OF_DOCS) test_client.insert_documents(test_collection_name, docs[0:5], workers=1, overwrite=False) response = test_client.insert_documents(test_collection_name, docs[3:5], workers=1, overwrite=True) assert response['inserted_successfully'] == 2 @pytest.mark.use_client def test_multiprocess_overwrite(test_client, test_collection_name): if test_collection_name in test_client.list_collections(): test_client.delete_collection(test_collection_name) time.sleep(5) NUM_OF_DOCS = 100 docs = test_client.create_sample_documents(NUM_OF_DOCS) test_client.insert_documents(test_collection_name, docs[0:5], workers=1, overwrite=False) # For document with id '3' TEST_ID = '3' id_document = test_client.id(collection_name=test_collection_name, document_id=TEST_ID) test_client.set_field('test.field', id_document, 'stranger') docs[3] = id_document print(docs[3]) docs[3].update({'_id': '3'}) response = test_client.insert_documents(test_collection_name, docs[3:5], workers=1, overwrite=True) id_document = test_client.id(collection_name=test_collection_name, document_id=TEST_ID) assert test_client.get_field('test.field', id_document) == 'stranger' time.sleep(5) test_client.delete_collection(test_collection_name) @pytest.mark.use_client def test_multiprocess_not_overwrite(test_client, test_collection_name): if test_collection_name in test_client.list_collections(): test_client.delete_collection(test_collection_name) time.sleep(5) NUM_OF_DOCS = 100 docs = test_client.create_sample_documents(NUM_OF_DOCS) test_client.insert_documents(test_collection_name, docs[0:5], workers=1, overwrite=False) # For document with id '3' TEST_ID = '3' id_document = test_client.id(collection_name=test_collection_name, document_id=TEST_ID) test_client.set_field('test.field', id_document, 'stranger') docs[3] = id_document docs[3].update({'_id': '3'}) response = test_client.insert_documents(test_collection_name, docs[3:5], workers=1, overwrite=False) id_document = test_client.id(collection_name=test_collection_name, document_id=TEST_ID) with pytest.raises(MissingFieldError): test_client.get_field('test.field', id_document) time.sleep(5) test_client.delete_collection(test_collection_name)
{"hexsha": "45aa721778cfda9edfd0b07d2e4b98d441119931", "size": 6103, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_write_multiprocessing.py", "max_stars_repo_name": "Tiamat-Tech/vectorai", "max_stars_repo_head_hexsha": "79e088a70ff79fc6bf18c6a6c0a4f367c1113648", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 255, "max_stars_repo_stars_event_min_datetime": "2020-09-30T12:32:20.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-19T16:12:35.000Z", "max_issues_repo_path": "tests/test_write_multiprocessing.py", "max_issues_repo_name": "Tiamat-Tech/vectorai", "max_issues_repo_head_hexsha": "79e088a70ff79fc6bf18c6a6c0a4f367c1113648", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 20, "max_issues_repo_issues_event_min_datetime": "2020-10-01T06:14:35.000Z", "max_issues_repo_issues_event_max_datetime": "2021-04-12T07:22:57.000Z", "max_forks_repo_path": "tests/test_write_multiprocessing.py", "max_forks_repo_name": "Tiamat-Tech/vectorai", "max_forks_repo_head_hexsha": "79e088a70ff79fc6bf18c6a6c0a4f367c1113648", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 33, "max_forks_repo_forks_event_min_datetime": "2020-10-01T20:52:39.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-18T07:17:25.000Z", "avg_line_length": 45.5447761194, "max_line_length": 113, "alphanum_fraction": 0.7778141897, "include": true, "reason": "import numpy", "num_tokens": 1328}
from __future__ import absolute_import, division, unicode_literals import pytest try: import plotly import plotly.graph_objs as go import plotly.io as pio pio.templates.default = None except: plotly = None plotly_available = pytest.mark.skipif(plotly is None, reason="requires plotly") import numpy as np from panel.models.plotly import PlotlyPlot from panel.pane import Pane, PaneBase, Plotly @plotly_available def test_get_plotly_pane_type_from_figure(): trace = go.Scatter(x=[0, 1], y=[2, 3]) fig = go.Figure([trace]) assert PaneBase.get_pane_type(fig) is Plotly @plotly_available def test_get_plotly_pane_type_from_traces(): trace = go.Scatter(x=[0, 1], y=[2, 3]) assert PaneBase.get_pane_type([trace]) is Plotly @plotly_available def test_get_plotly_pane_type_from_trace(): trace = go.Scatter(x=[0, 1], y=[2, 3]) assert PaneBase.get_pane_type(trace) is Plotly @plotly_available def test_plotly_pane_single_trace(document, comm): trace = go.Scatter(x=[0, 1], y=[2, 3], uid='Test') pane = Pane({'data': [trace], 'layout': {'width': 350}}) # Create pane model = pane.get_root(document, comm=comm) assert isinstance(model, PlotlyPlot) assert pane._models[model.ref['id']][0] is model assert len(model.data) == 1 assert model.data[0]['type'] == 'scatter' assert model.data[0]['x'] == [0, 1] assert model.data[0]['y'] == [2, 3] assert model.layout == {'width': 350} assert len(model.data_sources) == 1 assert model.data_sources[0].data == {} # Replace Pane.object new_trace = go.Bar(x=[2, 3], y=[4, 5]) pane.object = {'data': new_trace, 'layout': {'width': 350}} assert len(model.data) == 1 assert model.data[0]['type'] == 'bar' assert model.data[0]['x'] == [2, 3] assert model.data[0]['y'] == [4, 5] assert model.layout == {'width': 350} assert len(model.data_sources) == 1 assert model.data_sources[0].data == {} assert pane._models[model.ref['id']][0] is model # Cleanup pane._cleanup(model) assert pane._models == {} @plotly_available def test_plotly_pane_numpy_to_cds_traces(document, comm): trace = go.Scatter(x=np.array([1, 2]), y=np.array([2, 3])) pane = Pane({'data': [trace], 'layout': {'width': 350}}) # Create pane model = pane.get_root(document, comm=comm) assert isinstance(model, PlotlyPlot) assert len(model.data) == 1 assert model.data[0]['type'] == 'scatter' assert 'x' not in model.data[0] assert 'y' not in model.data[0] assert model.layout == {'width': 350} assert len(model.data_sources) == 1 cds = model.data_sources[0] assert np.array_equal(cds.data['x'][0], np.array([1, 2])) assert np.array_equal(cds.data['y'][0], np.array([2, 3])) # Replace Pane.object new_trace = [go.Scatter(x=np.array([5, 6]), y=np.array([6, 7])), go.Bar(x=np.array([2, 3]), y=np.array([4, 5]))] pane.object = {'data': new_trace, 'layout': {'width': 350}} assert len(model.data) == 2 assert model.data[0]['type'] == 'scatter' assert 'x' not in model.data[0] assert 'y' not in model.data[0] assert model.data[1]['type'] == 'bar' assert 'x' not in model.data[1] assert 'y' not in model.data[1] assert model.layout == {'width': 350} assert len(model.data_sources) == 2 cds = model.data_sources[0] assert np.array_equal(cds.data['x'][0], np.array([5, 6])) assert np.array_equal(cds.data['y'][0], np.array([6, 7])) cds2 = model.data_sources[1] assert np.array_equal(cds2.data['x'][0], np.array([2, 3])) assert np.array_equal(cds2.data['y'][0], np.array([4, 5])) # Cleanup pane._cleanup(model) assert pane._models == {}
{"hexsha": "07fae2c3261f43ea2834bb028a1ea97f9ebb3050", "size": 3744, "ext": "py", "lang": "Python", "max_stars_repo_path": "panel/tests/pane/test_plotly.py", "max_stars_repo_name": "rupakgoyal/panel-", "max_stars_repo_head_hexsha": "4e1e01e1766ebfc2fc1efb409734fd51efc60c01", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "panel/tests/pane/test_plotly.py", "max_issues_repo_name": "rupakgoyal/panel-", "max_issues_repo_head_hexsha": "4e1e01e1766ebfc2fc1efb409734fd51efc60c01", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "panel/tests/pane/test_plotly.py", "max_forks_repo_name": "rupakgoyal/panel-", "max_forks_repo_head_hexsha": "4e1e01e1766ebfc2fc1efb409734fd51efc60c01", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.8421052632, "max_line_length": 79, "alphanum_fraction": 0.6378205128, "include": true, "reason": "import numpy", "num_tokens": 1134}
import numpy as np import tensorflow as tf from tensorflow.python.framework import ops import gloro def add_extra_column(y): if isinstance(y, np.ndarray): return add_extra_column_np(y) else: return add_extra_column_tf(y) def add_extra_column_tf(y): return tf.concat((y, tf.zeros((tf.shape(y)[0], 1))), axis=1) def add_extra_column_np(y): return np.concatenate((y, np.zeros((y.shape[0], 1))), axis=1) def set_value(x, value): value = np.asarray(value, dtype=x.dtype.base_dtype.name) with ops.init_scope(): x.assign(value) def batch_set_value(tuples): with ops.init_scope(): for x, value in tuples: x.assign(np.asarray(value, dtype=x.dtype.base_dtype.name)) def get_value(x): return x.numpy() def l2_normalize(x): return x / (tf.sqrt(tf.reduce_sum(x**2.)) + gloro.constants.EPS) def print_if_verbose(verbose): if verbose: return lambda s: print(s) else: return lambda s: None
{"hexsha": "b5b32daca1e296f85888a0b443566294392d9ac0", "size": 996, "ext": "py", "lang": "Python", "max_stars_repo_path": "gloro/utils.py", "max_stars_repo_name": "klasleino/gloro", "max_stars_repo_head_hexsha": "5ebfe0f3850bca20e4ee4414fa2ee8a4af303023", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 16, "max_stars_repo_stars_event_min_datetime": "2021-02-17T15:06:07.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-28T19:08:54.000Z", "max_issues_repo_path": "gloro/utils.py", "max_issues_repo_name": "klasleino/gloro", "max_issues_repo_head_hexsha": "5ebfe0f3850bca20e4ee4414fa2ee8a4af303023", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-11-30T15:49:31.000Z", "max_issues_repo_issues_event_max_datetime": "2021-12-06T20:28:49.000Z", "max_forks_repo_path": "gloro/utils.py", "max_forks_repo_name": "klasleino/gloro", "max_forks_repo_head_hexsha": "5ebfe0f3850bca20e4ee4414fa2ee8a4af303023", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-06-20T06:34:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-06-20T06:34:51.000Z", "avg_line_length": 20.3265306122, "max_line_length": 70, "alphanum_fraction": 0.6626506024, "include": true, "reason": "import numpy", "num_tokens": 246}
""" @file core.py @version 0.1.0 @author Lait-au-Cafe @date 10/05/2019 """ # -*- coding: utf-8 -*- import sys import os import typing import dataclasses import numpy as np import cv2 import OpenGL.GL as gl import glfw import glm import ctypes @dataclasses.dataclass class CameraProperty: transform_matrix: glm.mat4 clipping_distance: glm.vec2 field_of_view: float def clone(self): """ @fn clone() @brief Make a deep copy of an instance. """ return CameraProperty( transform_matrix = glm.mat4(self.transform_matrix), clipping_distance = glm.vec2(self.clipping_distance), field_of_view = self.field_of_view) @dataclasses.dataclass class LightSource: position: glm.vec4 ambient: glm.vec3 diffuse: glm.vec3 specular: glm.vec3 @dataclasses.dataclass class CursorStatus: button: typing.Dict[int, bool] position: glm.vec2 class Viewer: """ @class Viewer @brief A class which deals with the rendering of specified model. @detail This class renders the model specified in the argument of constructor as model_vertices and mode_uvmap. """ """ @var model_vertices @brief The list of model vertices ([X1 Y1 Z1 X2 Y2 Z2 ...]). """ model_vertices: typing.List[float] """ @var model_indices @brief The list of model vertices ([ID1 ID2 ID3 ...]). """ model_indices: typing.List[int] """ @var model_uvmap @brief The list of correspondence between model vertices and texture uv coordinate ([U1 V1 U2 V2 ...]). """ model_uvmap: typing.List[float] """ @var window @brief The window object of GLFW. """ window: glfw._GLFWwindow """ @var window_size @brief The size of the window (width, height). """ window_size: typing.Tuple[int, int] """ @var camera_property @brief The properties of the camera. """ camera_property: CameraProperty """ @var default_camera_property @brief The default properties of the camera. """ default_camera_property: CameraProperty """ @var vertex_buffer @brief The ID of vertex buffer. """ vertex_buffer: np.uint32 """ @var index_buffer @brief The ID of index buffer. """ index_buffer: np.uint32 """ @var uv_buffer @brief The ID of uv buffer. """ uv_buffer: np.uint32 """ @var va_object @brief The vertext array object of OpenGL which stores model_vertices and model_uvmap. """ va_object: np.uint32 """ @var texture @brief The ID of texture in OpenGL. """ texture: int """ @var shader_program @brief The ID of shader program in OpenGL. """ shader_program: int @staticmethod def on_error(code: int, message: str): """ @fn on_error() @brief Callback function invoked when glfw encounters errors. @param code Error code. @param message Error message. """ print(f"[GLFW Error] {message} ({code})") @staticmethod def load_shader(shader_id: int, filename: str) -> bool: """ @fn load_shader() @brief Load shader script from a file. @param shader_id The ID of the shader to which the texture should be bound. @param filename The filename of a shader file. @return Whether the loading succeeded. """ with open(filename) as shader_file: shader_code = shader_file.read() gl.glShaderSource(shader_id, [shader_code]) gl.glCompileShader(shader_id) result = gl.glGetShaderiv(shader_id, gl.GL_COMPILE_STATUS) if result != gl.GL_TRUE: print(f"[GLFW Error] {gl.glGetShaderInfoLog(shader_id)}") return False return True @staticmethod def help(): print() print(":======== Window Control ========:") print("Esc: Close the window. ") print("Left-Drag: Panning. ") print("Right-Drag: Rotate around the origin. ") print("Scroll: Zoom-in/out. ") print("W: Move forward. ") print("A: Move left. ") print("S: Move backward. ") print("D: Move right. ") print("Shift + W: Move up. ") print("Shift + S: Move down. ") print("Up: Face up. ") print("Down: Face down. ") print("Left: Turn left. ") print("Right: Turn right. ") print("Space: Reset the camera position. ") print(":================================:") print() def update_model(self, model_vertices: typing.List[float], model_indices: typing.List[int], model_uvmap: typing.List[float]): """ @fn update_model() @brief Update the model data to be rendered. @param model_vertices The list of vertices. @param model_indices The list of indices. @param model_uvmap The list of uvs. """ # --- Vertex buffer --- c_vertex_buffer = (ctypes.c_float*len(model_vertices))(*model_vertices) c_vertex_buffer_size = ctypes.sizeof(ctypes.c_float) * len(model_vertices) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertex_buffer) if len(model_vertices) > len(self.model_vertices): # Reallocate memory gl.glBufferData(gl.GL_ARRAY_BUFFER, c_vertex_buffer, gl.GL_DYNAMIC_DRAW) c_vertex_buffer_size = ctypes.sizeof(ctypes.c_float) * len(model_vertices) size_allocated = gl.glGetBufferParameteriv(gl.GL_ARRAY_BUFFER, gl.GL_BUFFER_SIZE) if size_allocated != c_vertex_buffer_size: print("[GL Error] Failed to allocate memory for buffer. ") gl.glDeleteBuffers(1, self.vertex_buffer); sys.exit() else: # Update elements gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, c_vertex_buffer_size, c_vertex_buffer) self.model_vertices = model_vertices # --- Index buffer --- c_index_buffer = (ctypes.c_uint32*len(model_indices))(*model_indices) c_index_buffer_size = ctypes.sizeof(ctypes.c_uint32) * len(model_indices) gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.index_buffer) if len(model_indices) > len(self.model_indices): # Reallocate memory gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, c_index_buffer, gl.GL_DYNAMIC_DRAW) c_index_buffer_size = ctypes.sizeof(ctypes.c_uint32) * len(model_indices) size_allocated = \ gl.glGetBufferParameteriv(gl.GL_ELEMENT_ARRAY_BUFFER, gl.GL_BUFFER_SIZE) if size_allocated != c_index_buffer_size: print("[GL Error] Failed to allocate memory for buffer. ") gl.glDeleteBuffers(1, self.index_buffer); sys.exit() else: # Update elements gl.glBufferSubData(gl.GL_ELEMENT_ARRAY_BUFFER, 0, c_index_buffer_size, c_index_buffer) self.model_indices = model_indices # --- UV buffer --- c_uv_buffer = (ctypes.c_float*len(model_uvmap))(*model_uvmap) c_uv_buffer_size = ctypes.sizeof(ctypes.c_float) * len(model_uvmap) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.uv_buffer) if len(model_uvmap) > len(self.model_uvmap): # Reallocate memory gl.glBufferData(gl.GL_ARRAY_BUFFER, c_uv_buffer, gl.GL_DYNAMIC_DRAW) c_uv_buffer_size = ctypes.sizeof(ctypes.c_float) * len(model_uvmap) size_allocated = gl.glGetBufferParameteriv(gl.GL_ARRAY_BUFFER, gl.GL_BUFFER_SIZE) if size_allocated != c_uv_buffer_size: print("[GL Error] Failed to allocate memory for buffer. ") gl.glDeleteBuffers(1, self.uv_buffer); sys.exit() else: # Update elements gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, c_uv_buffer_size, c_uv_buffer) self.model_uvmap = model_uvmap # unbind gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0) def update_camera_matrix(self): """ @fn update_camera_matrix() @brief Calculate MVP matrix and upload it to GPU. """ # Calculate the perspective matrix perspective_matrix = glm.perspectiveFovLH_NO( glm.radians(self.camera_property.field_of_view), self.window_size[0], self.window_size[1], self.camera_property.clipping_distance[0], self.camera_property.clipping_distance[1]) # Compose MVP matrix mv_matrix = glm.scale(self.camera_property.transform_matrix, glm.vec3(1.0, 1.0, -1.0)) # Upload to uniform variable in the shader gl.glUseProgram(self.shader_program) gl.glUniformMatrix4fv(gl.glGetUniformLocation(self.shader_program, "mv_matrix"), 1, gl.GL_FALSE, glm.value_ptr(mv_matrix)) gl.glUniformMatrix4fv(gl.glGetUniformLocation(self.shader_program, "p_matrix"), 1, gl.GL_FALSE, glm.value_ptr(perspective_matrix)) # Update light source light_source = LightSource( position = mv_matrix * glm.vec4(100.0, 100.0, 100.0, 1.0), ambient = glm.vec3(1.0, 1.0, 1.0), diffuse = glm.vec3(1.0, 1.0, 1.0), specular = glm.vec3(1.0, 1.0, 1.0)) gl.glUniform4fv(gl.glGetUniformLocation(self.shader_program, "light_source.position"), 1, glm.value_ptr(light_source.position)) gl.glUniform3fv(gl.glGetUniformLocation(self.shader_program, "light_source.ambient"), 1, glm.value_ptr(light_source.ambient)) gl.glUniform3fv(gl.glGetUniformLocation(self.shader_program, "light_source.diffuse"), 1, glm.value_ptr(light_source.diffuse)) gl.glUniform3fv(gl.glGetUniformLocation(self.shader_program, "light_source.specular"), 1, glm.value_ptr(light_source.specular)) def window_size_callback(self, window: glfw._GLFWwindow, new_width: int, new_height: int): """ @fn window_size_callback() @brief The callback function for glfw.set_window_size_callback(). @param window The ID of the window which is resized. @param new_width The updated width of the window. @param new_height The updated height of the window. """ # For the support of retina display, use the framebuffer size instead of the window size. self.window_size = glfw.get_framebuffer_size(self.window) self.update_camera_matrix() gl.glViewport(0, 0, new_width, new_height) def mouse_scroll_callback(self, window: glfw._GLFWwindow, x_offset: float, y_offset: float): """ @fn mouse_scroll_callback() @brief The callback function for glfw.set_sroll_callback(). @param window The ID of the window which is resized. @param x_offset The offset along x axis. @param y_offset The offset along y axis. """ if y_offset != 0.: if y_offset > 0.: tmat = glm.scale(glm.mat4(1.), glm.vec3(1.25)) else: tmat = glm.scale(glm.mat4(1.), glm.vec3(0.8)) self.camera_property.transform_matrix = self.camera_property.transform_matrix * tmat self.update_camera_matrix() def display_all_instance_variables(self): """ @fn display_all_instancevariables() @brief For developpers. List up all the instance variables. """ print(" ==== Instance Variables in Viewer ==== ") [print(f"{key}: {type(val)}") for key, val in vars(self).items()] print(" ====================================== ") def __init__(self, window_title: str, model_vertices: typing.List[float], model_indices: typing.List[int], model_uvmap: typing.List[float], texture_filename: str): """ @fn __init__() @brief Initialization of viewer. @param model_vertices The list of vertices in the model. @param model_uvmap the uvmapping which associate model_vertices with textures @param texture_filename The path to the texture file. @param window_title The title of the window. @note The format of model_vertices is [X1, Y1, Z1, X2, Y2, ...]. @note The format of model_uvmap is [U1, V1, U2, V2, ...]. """ print("Initializing Viewer...") # set callback function on error glfw.set_error_callback(Viewer.on_error) # Initialize if glfw.init() != gl.GL_TRUE: print("[GLFW Error] Failed to initialize GLFW. ") sys.exit() #======================================== # Prepare Window #======================================== print("- Creating a window.") # Window hints glfw.window_hint(glfw.CONTEXT_VERSION_MAJOR, 3) glfw.window_hint(glfw.CONTEXT_VERSION_MINOR, 3) glfw.window_hint(glfw.OPENGL_FORWARD_COMPAT, True) # Create window self.window_size = (640, 480) self.window = glfw.create_window( self.window_size[0], # width self.window_size[1], # height window_title, # window title None, None) if self.window == None: print("[GLFW Error] Failed to Create a window. ") sys.exit() # Create OpenGL context glfw.make_context_current(self.window) # Set background color. gl.glClearColor(0.0, 1.0, 1.0, 1.0) # Set callback functions glfw.set_window_size_callback(self.window, self.window_size_callback) glfw.set_scroll_callback(self.window, self.mouse_scroll_callback) #======================================== # Prepare Buffers #======================================== print("- Preparing buffers.") # --- Vertex buffer --- # Generate & bind buffer self.model_vertices = model_vertices self.vertex_buffer = gl.glGenBuffers(1) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertex_buffer) # Allocate memory c_vertex_buffer = (ctypes.c_float*len(model_vertices))(*model_vertices) gl.glBufferData(gl.GL_ARRAY_BUFFER, c_vertex_buffer, gl.GL_DYNAMIC_DRAW) c_vertex_buffer_size = ctypes.sizeof(ctypes.c_float) * len(model_vertices) size_allocated = gl.glGetBufferParameteriv(gl.GL_ARRAY_BUFFER, gl.GL_BUFFER_SIZE) if size_allocated != c_vertex_buffer_size: print("[GL Error] Failed to allocate memory for buffer. ") gl.glDeleteBuffers(1, self.vertex_buffer); sys.exit() # --- Index buffer --- # Generate & bind buffer self.model_indices = model_indices self.index_buffer = gl.glGenBuffers(1) gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.index_buffer) # Allocate memory c_index_buffer = (ctypes.c_uint32*len(model_indices))(*model_indices) gl.glBufferData(gl.GL_ELEMENT_ARRAY_BUFFER, c_index_buffer, gl.GL_DYNAMIC_DRAW) c_index_buffer_size = ctypes.sizeof(ctypes.c_uint32) * len(model_indices) size_allocated = gl.glGetBufferParameteriv(gl.GL_ELEMENT_ARRAY_BUFFER, gl.GL_BUFFER_SIZE) if size_allocated != c_index_buffer_size: print("[GL Error] Failed to allocate memory for buffer. ") gl.glDeleteBuffers(1, self.index_buffer); sys.exit() # --- UV buffer --- # Generate & bind buffer self.model_uvmap = model_uvmap self.uv_buffer = gl.glGenBuffers(1) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.uv_buffer) # Allocate memory c_uv_buffer = (ctypes.c_float*len(model_uvmap))(*model_uvmap) gl.glBufferData(gl.GL_ARRAY_BUFFER, c_uv_buffer, gl.GL_DYNAMIC_DRAW) c_uv_buffer_size = ctypes.sizeof(ctypes.c_float) * len(model_uvmap) size_allocated = gl.glGetBufferParameteriv(gl.GL_ARRAY_BUFFER, gl.GL_BUFFER_SIZE) if size_allocated != c_uv_buffer_size: print("[GL Error] Failed to allocate memory for buffer. ") gl.glDeleteBuffers(1, self.uv_buffer); sys.exit() # --- Bind to vertex array object --- self.va_object = gl.glGenVertexArrays(1) gl.glBindVertexArray(self.va_object) gl.glEnableVertexAttribArray(0) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.vertex_buffer) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, c_vertex_buffer_size, c_vertex_buffer) gl.glVertexAttribPointer(0, 3, gl.GL_FLOAT, gl.GL_FALSE, 0, None) gl.glEnableVertexAttribArray(1) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, self.uv_buffer) gl.glBufferSubData(gl.GL_ARRAY_BUFFER, 0, c_uv_buffer_size, c_uv_buffer) gl.glVertexAttribPointer(1, 2, gl.GL_FLOAT, gl.GL_FALSE, 0, None) gl.glBindVertexArray(0) gl.glBindBuffer(gl.GL_ARRAY_BUFFER, 0) #======================================== # Prepare Texture #======================================== print("- Preparing textures.") # Load image image = cv2.imread(texture_filename) if image is None: print(f"[CV Error] Cannot open image: {texture_filename}") sys.exit() image = cv2.flip(image, 0) # Create texture self.texture = gl.glGenTextures(1) gl.glBindTexture(gl.GL_TEXTURE_2D, self.texture) # Generate texture gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, 1) gl.glTexImage2D( gl.GL_TEXTURE_2D, # target texture 0, # Mipmap Level gl.GL_RGBA, # The number of color components in the texture image.shape[1], # the width of texture image.shape[0], # the height of texture 0, # border (this value must be 0) gl.GL_BGR, # the format of the pixel data gl.GL_UNSIGNED_BYTE, # the type of pixel data image) # a pointer to the image # Set parameters gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MIN_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_MAG_FILTER, gl.GL_LINEAR) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_S, gl.GL_CLAMP_TO_BORDER) gl.glTexParameteri(gl.GL_TEXTURE_2D, gl.GL_TEXTURE_WRAP_T, gl.GL_CLAMP_TO_BORDER) # Unbind gl.glBindTexture(gl.GL_TEXTURE_2D, 0) #======================================== # Prepare Camera Parameters #======================================== print("- Setting camera parameters.") # Transform matrix trans = glm.vec3(0., 0., 50.) rot = glm.vec3(0., 0., 0.) transform_matrix = glm.mat4(1.) transform_matrix = glm.translate(transform_matrix, trans) transform_matrix = glm.rotate(transform_matrix, glm.radians(rot.x), glm.vec3(1., 0., 0.)) transform_matrix = glm.rotate(transform_matrix, glm.radians(rot.y), glm.vec3(0., 1., 0.)) transform_matrix = glm.rotate(transform_matrix, glm.radians(rot.z), glm.vec3(0., 0., 1.)) self.default_camera_property = CameraProperty( transform_matrix = transform_matrix, clipping_distance = glm.vec2(10., 1000.), field_of_view = 60.) self.camera_property = self.default_camera_property.clone() #======================================== # Prepare Shader Programs #======================================== print("- Preparing shaders.") is_loaded: bool = False vert_shader = gl.glCreateShader(gl.GL_VERTEX_SHADER) is_loaded = Viewer.load_shader(vert_shader, f"{os.path.dirname(__file__)}/glsl/vertex.glsl") if not is_loaded: sys.exit() geom_shader = gl.glCreateShader(gl.GL_GEOMETRY_SHADER) is_loaded = Viewer.load_shader(geom_shader, f"{os.path.dirname(__file__)}/glsl/geometry.glsl") if not is_loaded: sys.exit() frag_shader = gl.glCreateShader(gl.GL_FRAGMENT_SHADER) is_loaded = Viewer.load_shader(frag_shader, f"{os.path.dirname(__file__)}/glsl/fragment.glsl") if not is_loaded: sys.exit() # Create shader program self.shader_program = gl.glCreateProgram() # Bind shader objects gl.glAttachShader(self.shader_program, vert_shader) gl.glAttachShader(self.shader_program, geom_shader) gl.glAttachShader(self.shader_program, frag_shader) gl.glDeleteShader(vert_shader) gl.glDeleteShader(geom_shader) gl.glDeleteShader(frag_shader) # Link shader program gl.glLinkProgram(self.shader_program) result = gl.glGetProgramiv(self.shader_program, gl.GL_LINK_STATUS) if result != gl.GL_TRUE: print(f"[GLFW Error] {gl.glGetShaderInfoLog(shader_id)}") sys.exit() # Specify uniform variables gl.glUseProgram(self.shader_program) gl.glUniform1i(gl.glGetUniformLocation(self.shader_program, "sampler"), 0) #======================================== # Prepare Other Instance Variables #======================================== # Cursor status self.previous_cursor_status = CursorStatus( button = { glfw.MOUSE_BUTTON_LEFT: False, glfw.MOUSE_BUTTON_RIGHT: False}, position = glm.vec3(0.)) print("Initialization done. ") Viewer.help() def update(self) -> bool: """ @fn update() @brief Update the frame. @return Whether the main loop continues. """ #======================================== # Mouse and Keyboard response #======================================== # Exit if glfw.get_key(self.window, glfw.KEY_ESCAPE) == glfw.PRESS: return False # Camera motion (keyboard) if glfw.get_key(self.window, glfw.KEY_SPACE) == glfw.PRESS: self.camera_property = self.default_camera_property.clone() else: trans = glm.vec3(0.) rot = glm.vec3(0.) trans_delta = 0.05 rot_delta = 0.05 if glfw.get_key(self.window, glfw.KEY_W) == glfw.PRESS: if glfw.get_key(self.window, glfw.KEY_LEFT_SHIFT) != glfw.PRESS: trans.z += trans_delta else: trans.y += trans_delta if glfw.get_key(self.window, glfw.KEY_S) == glfw.PRESS: if glfw.get_key(self.window, glfw.KEY_LEFT_SHIFT) != glfw.PRESS: trans.z -= trans_delta else: trans.y -= trans_delta if glfw.get_key(self.window, glfw.KEY_D) == glfw.PRESS: trans.x += trans_delta if glfw.get_key(self.window, glfw.KEY_A) == glfw.PRESS: trans.x -= trans_delta if glfw.get_key(self.window, glfw.KEY_UP) == glfw.PRESS: rot.x += rot_delta if glfw.get_key(self.window, glfw.KEY_DOWN) == glfw.PRESS: rot.x -= rot_delta if glfw.get_key(self.window, glfw.KEY_LEFT) == glfw.PRESS: rot.y += rot_delta if glfw.get_key(self.window, glfw.KEY_RIGHT) == glfw.PRESS: rot.y -= rot_delta tmat = glm.mat4(1.) tmat = glm.translate(tmat, -trans) tmat = glm.rotate(tmat, glm.radians(rot.x), glm.vec3(1., 0., 0.)) tmat = glm.rotate(tmat, glm.radians(rot.y), glm.vec3(0., 1., 0.)) tmat = glm.rotate(tmat, glm.radians(rot.z), glm.vec3(0., 0., 1.)) self.camera_property.transform_matrix = tmat * self.camera_property.transform_matrix if glfw.get_key(self.window, glfw.KEY_R) == glfw.PRESS: tmat = glm.rotate(tmat, glm.radians(0.032), glm.vec3(0., 0., 1.)) self.camera_property.transform_matrix = self.camera_property.transform_matrix * tmat # Camera motion (mouse) current_cursor_status = CursorStatus( button = { glfw.MOUSE_BUTTON_LEFT: False, glfw.MOUSE_BUTTON_RIGHT: False}, position = None) # Acquire current cursor status if glfw.get_mouse_button(self.window, glfw.MOUSE_BUTTON_LEFT) == glfw.PRESS: current_cursor_status.button[glfw.MOUSE_BUTTON_LEFT] = True if glfw.get_mouse_button(self.window, glfw.MOUSE_BUTTON_RIGHT) == glfw.PRESS: current_cursor_status.button[glfw.MOUSE_BUTTON_RIGHT] = True current_cursor_status.position = glm.vec2(glfw.get_cursor_pos(self.window)) # Compare current status with previous one if current_cursor_status.button[glfw.MOUSE_BUTTON_LEFT]\ and self.previous_cursor_status.button[glfw.MOUSE_BUTTON_LEFT]\ and not current_cursor_status.button[glfw.MOUSE_BUTTON_RIGHT]\ and not self.previous_cursor_status.button[glfw.MOUSE_BUTTON_RIGHT]: displ = current_cursor_status.position - self.previous_cursor_status.position displ *= 0.01 # scaling displ = glm.vec3(displ.x, -displ.y, 0.) # 2D -> 3D tmat = glm.translate(glm.mat4(1.), displ) self.camera_property.transform_matrix = tmat * self.camera_property.transform_matrix elif current_cursor_status.button[glfw.MOUSE_BUTTON_RIGHT]\ and self.previous_cursor_status.button[glfw.MOUSE_BUTTON_RIGHT]\ and not current_cursor_status.button[glfw.MOUSE_BUTTON_LEFT]\ and not self.previous_cursor_status.button[glfw.MOUSE_BUTTON_LEFT]: displ = current_cursor_status.position - self.previous_cursor_status.position displ *= -0.1 # scaling displ = glm.vec3(displ.y, displ.x, 0.) # 2D -> 3D if glm.length(displ) != 0: tmat = glm.rotate(glm.mat4(1.), glm.radians(glm.length(displ)), glm.normalize(displ)) self.camera_property.transform_matrix = self.camera_property.transform_matrix * tmat self.previous_cursor_status = current_cursor_status #======================================== # Update the camera matrix #======================================== self.update_camera_matrix() #======================================== # Draw new buffer #======================================== # Initialize gl.glClear(gl.GL_COLOR_BUFFER_BIT | gl.GL_DEPTH_BUFFER_BIT) #gl.glPolygonMode(gl.GL_FRONT_AND_BACK, gl.GL_LINE); # Bind program gl.glUseProgram(self.shader_program) gl.glEnable(gl.GL_DEPTH_TEST) gl.glDepthFunc(gl.GL_LESS) gl.glDepthRange(-1.0, 1.0) # Draw model if len(self.model_vertices) > 0: # Bind buffer gl.glBindVertexArray(self.va_object) gl.glBindBuffer(gl.GL_ELEMENT_ARRAY_BUFFER, self.index_buffer) # Bind buffer gl.glActiveTexture(gl.GL_TEXTURE0) gl.glBindTexture(gl.GL_TEXTURE_2D, self.texture) # Draw if len(self.model_indices) // 3 >= 1: gl.glDrawElements(gl.GL_TRIANGLES, len(self.model_indices), gl.GL_UNSIGNED_INT, None) elif len(self.model_vertices) // 3 >= 3 and len(self.model_uvmap) // 2 >= 3: gl.glDrawArrays(gl.GL_TRIANGLES, 0, len(self.model_vertices) // 3) else: # NOT IMPLEMENTED: Need to bypass geometry shader. #gl.glDrawArrays(gl.GL_POINTS, 0, len(self.model_vertices) // 3) pass # Unbind gl.glBindVertexArray(0) gl.glBindTexture(gl.GL_TEXTURE_2D, 0) # Update glfw.swap_buffers(self.window) glfw.poll_events() return glfw.window_should_close(self.window) != gl.GL_TRUE
{"hexsha": "059c558f243ea82e3e67519a00401ba246ac22b6", "size": 28387, "ext": "py", "lang": "Python", "max_stars_repo_path": "py3dviewer/core.py", "max_stars_repo_name": "Lait-au-Cafe/py3dviewer", "max_stars_repo_head_hexsha": "29b4e09371f1308e37579be9a3e29a3ad3bcd39f", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "py3dviewer/core.py", "max_issues_repo_name": "Lait-au-Cafe/py3dviewer", "max_issues_repo_head_hexsha": "29b4e09371f1308e37579be9a3e29a3ad3bcd39f", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2021-10-12T22:57:10.000Z", "max_issues_repo_issues_event_max_datetime": "2021-10-12T22:57:10.000Z", "max_forks_repo_path": "py3dviewer/core.py", "max_forks_repo_name": "Lait-au-Cafe/py3dviewer", "max_forks_repo_head_hexsha": "29b4e09371f1308e37579be9a3e29a3ad3bcd39f", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 39.3717059639, "max_line_length": 116, "alphanum_fraction": 0.598865678, "include": true, "reason": "import numpy", "num_tokens": 6338}
import pytest from unittest.mock import Mock import numpy as np import matplotlib.pyplot as plt from PIL import Image from figpptx.slide_editor import SlideEditor, SlideTransformer @pytest.mark.parametrize( "instance, expected", [ ((1, 2), (11, 58)), ([3, 5], [13, 55]), ([1, 2, 3, 5], [11, 58, 13, 55]), (np.array([[1, 2], [3, 5]]), np.array([[11, 58], [13, 55]])), ], ) def test_transform(instance, expected): slide = Mock() left = 10 top = 20 size = (30, 40) transformer = SlideTransformer(left=left, top=top, size=size) editor = SlideEditor(slide, transformer) target = editor.transform(instance) assert np.allclose(np.array(target), np.array(expected)) assert type(target) is type(expected) if __name__ == "__main__": pytest.main([__file__, "--capture=no"])
{"hexsha": "ba4a51a90b4d7c4c3407e00b8f5a24705da9d4f6", "size": 853, "ext": "py", "lang": "Python", "max_stars_repo_path": "tests/test_slide_editor.py", "max_stars_repo_name": "Sillte/figpptx", "max_stars_repo_head_hexsha": "bf5539b09eeef4e6a17bb4483f62f29d286138b2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "tests/test_slide_editor.py", "max_issues_repo_name": "Sillte/figpptx", "max_issues_repo_head_hexsha": "bf5539b09eeef4e6a17bb4483f62f29d286138b2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "tests/test_slide_editor.py", "max_forks_repo_name": "Sillte/figpptx", "max_forks_repo_head_hexsha": "bf5539b09eeef4e6a17bb4483f62f29d286138b2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.0882352941, "max_line_length": 69, "alphanum_fraction": 0.6236811254, "include": true, "reason": "import numpy", "num_tokens": 242}
import numpy as np import ndhist a = ndhist.constant_bin_width_axis(np.array([0.,1.,2.,3.])) print(a.nbins) h = ndhist.ndhist((a,)) print(h.nbins)
{"hexsha": "813c1013df11317d8a23970bb8501d0e1acfa28b", "size": 148, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/python/axis_test.py", "max_stars_repo_name": "martwo/ndhist", "max_stars_repo_head_hexsha": "193cef3585b5d0277f0721bb9c3a1e78cc67cf1f", "max_stars_repo_licenses": ["BSD-2-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "examples/python/axis_test.py", "max_issues_repo_name": "martwo/ndhist", "max_issues_repo_head_hexsha": "193cef3585b5d0277f0721bb9c3a1e78cc67cf1f", "max_issues_repo_licenses": ["BSD-2-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "examples/python/axis_test.py", "max_forks_repo_name": "martwo/ndhist", "max_forks_repo_head_hexsha": "193cef3585b5d0277f0721bb9c3a1e78cc67cf1f", "max_forks_repo_licenses": ["BSD-2-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 18.5, "max_line_length": 59, "alphanum_fraction": 0.7027027027, "include": true, "reason": "import numpy", "num_tokens": 48}
[STATEMENT] lemma success_newI [success_intros]: "success (new n x) h" [PROOF STATE] proof (prove) goal (1 subgoal): 1. success (new n x) h [PROOF STEP] by (auto intro: success_intros simp add: new_def)
{"llama_tokens": 83, "file": null, "length": 1}
import matplotlib import matplotlib.pyplot as plt import numpy as np import scipy.optimize as opt import scipy.signal as sig # matplotlib.use('Agg') # For remote use font = {'size': 14} matplotlib.rc('font', **font) root = '/home/george/Desktop/lith_bulk_002/' pk_wl = np.genfromtxt(root+'peak_wls.txt', usecols=(2), unpack=True, skip_header=1) pk_wl = pk_wl[30:90] ind = np.arange(0, len(pk_wl), 1) t = np.arange(0, len(pk_wl), 1)*10/60 plt.plot(t, pk_wl, 'C0.', markersize=2) plt.grid(True) plt.xlabel('Time (minutes)') plt.ylabel('Peak wavelength (nm)') # plt.ylim([652.3, 653.8]) # plt.savefig('fig_raw_data.png') plt.show() i_offset = ind[:100] pk_wl_offset = pk_wl[:100] offset = np.polyfit(i_offset, pk_wl_offset, 1) std = np.std(pk_wl) mean = np.mean(pk_wl) plt.plot(t, pk_wl, 'C0.', markersize=4) plt.plot(t, np.polyval(offset, ind)) plt.grid(True) plt.xlabel('Time (minutes)') plt.ylabel('Peak wavelength (nm)') plt.axhline(mean+std/2, ls='--', color='black', label=f'$\sigma$: {std:1.4f}') plt.axhline(mean-std/2, ls='--', color='black') plt.axhline(mean+2*std/2, ls='--', color='red', label=f'2$\sigma$: {2*std:1.4f}') plt.axhline(mean-2*std/2, ls='--', color='red') plt.axhline(mean+3*std/2, ls='--', color='green', label=f'3$\sigma$: {3*std:1.4f}') plt.axhline(mean-3*std/2, ls='--', color='green') plt.legend() plt.tight_layout() plt.savefig(root+'fig_noise_poly_offset.png') plt.show() pk_wl = pk_wl-np.polyval(offset, ind) std = np.std(pk_wl) plt.plot(t, pk_wl, 'C0.', markersize=4) plt.grid(True) plt.xlabel('Time (minutes)') plt.ylabel('Peak wavelength shift (nm)') plt.axhline(+std/2, ls='--', color='black', label=f'$\sigma$: {std: 1.4f}') plt.axhline(-std/2, ls='--', color='black') plt.axhline(+2*std/2, ls='--', color='red', label=f'2$\sigma$: {2*std: 1.4f}') plt.axhline(-2*std/2, ls='--', color='red') plt.axhline(+3*std/2, ls='--', color='green', label=f'3$\sigma$: {3*std: 1.4f}') plt.axhline(-3*std/2, ls='--', color='green') plt.legend() plt.tight_layout() plt.savefig(root+'fig_noise_poly_corrected.png') plt.show()
{"hexsha": "0c9493488e88a2f1a821257fdbf0b3f8364a37db", "size": 2120, "ext": "py", "lang": "Python", "max_stars_repo_path": "noise_analysis.py", "max_stars_repo_name": "g-duff/processing_absorption_spectra", "max_stars_repo_head_hexsha": "813eede469ae4bce5ad9af9e9135aa2eb49e5ab5", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-17T11:08:29.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-17T11:08:29.000Z", "max_issues_repo_path": "noise_analysis.py", "max_issues_repo_name": "g-duff/processing_absorption_spectra", "max_issues_repo_head_hexsha": "813eede469ae4bce5ad9af9e9135aa2eb49e5ab5", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 4, "max_issues_repo_issues_event_min_datetime": "2019-12-11T12:12:30.000Z", "max_issues_repo_issues_event_max_datetime": "2020-02-25T00:14:44.000Z", "max_forks_repo_path": "noise_analysis.py", "max_forks_repo_name": "g-duff/processing_absorption_spectra", "max_forks_repo_head_hexsha": "813eede469ae4bce5ad9af9e9135aa2eb49e5ab5", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.8947368421, "max_line_length": 78, "alphanum_fraction": 0.6443396226, "include": true, "reason": "import numpy,import scipy", "num_tokens": 729}
# -*- coding: utf-8 -*- ''' GUIDemo.py PyQt5 and matplotlib based See my GitHub uwallgren, ujgwallgren@gmail.com ''' import sys, os import numpy as np import pickle from PyQt5 import QtCore, QtGui, Qt from PyQt5.QtGui import QIcon from PyQt5.QtWidgets import QApplication, QWidget, QAction, QVBoxLayout, QHBoxLayout, QGridLayout, QMainWindow, QPushButton, QHeaderView, QLabel,QSizePolicy import matplotlib import matplotlib.pylab as plt plt.style.use('ggplot') from matplotlib.figure import Figure from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar class MyToolbar(NavigationToolbar): def __init__(self, canvas, parent): self.parent=parent NavigationToolbar.__init__(self, canvas, parent) act=QAction(QIcon("icons/cow.jpg"),'Toggle Cow', self) act.setCheckable(True) act.triggered.connect(self.icon0) self.addAction(act) self.addAction(QIcon(''), 'OFF', self.icon1) self.addAction(QIcon(''), '') self.addAction(QIcon("icons/sync.jpg"), 'Sync all', self.icon1) self.addAction(QIcon("{}"), '---') self.addAction(QIcon("icons/red_cross.jpg"), 'Remove', self.icon2) self.addAction(QIcon("icons/downarrowgreen.jpg"), 'Add', self.icon3) def icon0(self,toggle=False): if toggle: print('toggle on') else: print('toggle off') self.parent.action0(toggle) def icon1(self,event=[]): self.parent.action1('OFF') def icon2(self): self.parent.action2() def icon3(self): self.parent.action3() class MPLDemo(QWidget): def __init__(self, id=[], handle=[]): QWidget.__init__(self) self.id=id self.handle = handle self.cnds=[] self.days=120 self.mx=0; self.my=0 # Mouse moved position self.key='' # Key pressed self.point1=self.point2=self.point3=(); self.setWindowTitle('MPLDemo') self.setGeometry(200, 400, 1450, 600) # (left, top, width, height) self.fig=Figure() self.ax = self.fig.add_subplot(111) self.canvas = FigureCanvas(self.fig) #self.ax=self.canvas.figure.add_subplot(111) self.cursor = matplotlib.widgets.Cursor(self.ax, linewidth=1, color='g') self.fig.subplots_adjust(top=1.0, bottom=0.07, left=0.03, right=1.0, hspace=0.32, wspace=0.2) xxx=0 exp=QSizePolicy.Expanding self.canvas.setSizePolicy(exp,exp) leftLayout=QVBoxLayout() leftLayout2=QVBoxLayout() butLabels=['Clear','BUT1','BUT2','BUT3'] butActions=[self.action0, self.action1, self.action2, self.action3] self.buts=[] for i in range(len(butLabels)): but=QPushButton(butLabels[i]) but.clicked.connect(butActions[i]) but.setStyleSheet('background-color: lightgrey') but.setToolTip(str(i)) leftLayout.addWidget(but) self.buts.append(but) gLayout=QGridLayout() rLayout=QVBoxLayout() self.label1=QPushButton() exp=QSizePolicy.Expanding self.label1.setSizePolicy(exp,exp) self.label1.setFixedHeight(30) #self.label1.setStyleSheet(" font-size:20px; background-color: rgb(255, 0, 255); border:4px solid rgb(0, 255, 0); font-size:30px;") self.label1.setStyleSheet(" font-size:20px; background-color: rgb(255, 255, 0);") rLayout.addWidget(self.label1, alignment=QtCore.Qt.AlignCenter) rLayout.addWidget(self.canvas) toolbar = MyToolbar(self.canvas, self) rLayout.addWidget(toolbar) gLayout.addLayout(leftLayout,0,0,2,1) gLayout.addLayout(leftLayout2,0,1,2,1) gLayout.addLayout(rLayout,0,2,5,20) self.setLayout(gLayout) #Mouse events self.canvas.mpl_connect('button_press_event', self.clicked) self.canvas.mpl_connect('motion_notify_event', self.moved) self.canvas.mpl_connect('draw_event', self.zoomed) self.canvas.mpl_connect('key_press_event', self.on_key) # Must set FocusPolicy and Focus for key_press self.canvas.setFocusPolicy( QtCore.Qt.ClickFocus ) self.canvas.setFocus() def run(self): self.points=[] self.plotplot(self.id) self.show() def clicked(self,event): # Rescaling data does not work good with zoom funtion if event==[]: return if event.canvas.underMouse: if event.button==1: if self.key=='f1': self.point1=(event.xdata, event.ydata) if self.key=='f2': self.point2=(event.xdata, event.ydata) if self.key=='f3': self.point3=(event.xdata, event.ydata) self.drawPoint() def drawPoint(self): if self.point1==(): return self.points.append(self.ax.plot(self.point1[0],self.point1[1],'or')) self.moved(False) def moved(self, event,N=0): if event==False: str1='{} {} {} '.format(' '*50, self.key, ' '*50) self.label1.setText(str1) self.canvas.draw() return try: if event.canvas.underMouse: self.mx=event.xdata self.mx=np.datetime64('now', 'D') + int(self.mx) self.my=event.ydata str1='{} {} {} - y={:5.2f} {}'.format(' '*50, self.key, self.mx, self.my, ' '*50) self.label1.setText(str1) self.canvas.draw() except: pass def zoomed(self, event): #print('zoomed', event) axf=self.ax.get_xticks() # get_major_formatter() today=np.datetime64('now','D') lab=[today + int(x) for x in axf] lab=[np.datetime_as_string(x) for x in lab] self.ax.set_xticklabels(lab) def on_key(self, event): #print('you pressed', event.key, event.xdata, event.ydata) self.key=event.key self.moved(False) def labelClicked(self, event): print('labelClicked') def colorButs(self, nr): for i in [1,2,3]: but=self.buts[i] but.setStyleSheet('background-color: lightgrey') self.buts[nr].setStyleSheet('background-color: lightgreen') def action0(self, event=[]): #if self.points==[]: return for line in self.ax.lines.copy(): color=line.get_color() if color=='r': line.remove() self.key='' self.point1=() self.moved(False) def action1(self, event=[]): print(event) self.colorButs(1) def action2(self, event=[]): self.colorButs(2) def action3(self, event=[]): self.colorButs(3) def plotplot(self,id): refday=np.arange('2019-01-30', '2019-01-01', dtype='datetime64[D]') x=np.arange(0,25,0.1) y=np.sin(x) #x=[np.datetime64(z,'D') for z in x] #self.x=[z.astype('int64') for z in x] #self.y=stock.last[-self.days:] self.ax.cla() self.lines =self.ax.plot(x, y,'b', linewidth=0.5) self.moved(False) self.canvas.draw() if __name__ == "__main__": app = QApplication(sys.argv) win = MPLDemo('DEMO') win.run() sys.exit( app.exec_())
{"hexsha": "56784ce06d8f2ac80e96d15f41455903655a9a52", "size": 7810, "ext": "py", "lang": "Python", "max_stars_repo_path": "MPLDemo.py", "max_stars_repo_name": "uwallgren/PyQt-Matplotlib", "max_stars_repo_head_hexsha": "f559f6e17096052249832e069afc5914c7cf0307", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "MPLDemo.py", "max_issues_repo_name": "uwallgren/PyQt-Matplotlib", "max_issues_repo_head_hexsha": "f559f6e17096052249832e069afc5914c7cf0307", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "MPLDemo.py", "max_forks_repo_name": "uwallgren/PyQt-Matplotlib", "max_forks_repo_head_hexsha": "f559f6e17096052249832e069afc5914c7cf0307", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 38.2843137255, "max_line_length": 186, "alphanum_fraction": 0.5691421255, "include": true, "reason": "import numpy", "num_tokens": 1884}
import os import matplotlib.pyplot as plt import numpy as np import pandas as pd import seaborn as sns import sklearn.datasets as datasets import sklearn.manifold as manifold if os.path.exists("../image") is False: os.mkdir("../image") if __name__ == "__main__": # Data Loading from sklearn.datasets data = datasets.fetch_openml( 'mnist_784', version = 1, return_X_y = True ) pixel_values, targets = data targets = targets.astype(int) # Save an example image of the dataset single_image = pixel_values.iloc[0].values.reshape(28,28) plt.imsave("../image/single_image.png", single_image, cmap="gray") # Create the t-SNE transformation of the data tsne = manifold.TSNE(n_components=2, random_state=42) transformed_data = tsne.fit_transform(pixel_values.iloc[:3000]) # The data need to be converted to pandas dataframe, moreover the # two dimensional components are stacked together with the corresponding # target tsne_df = pd.DataFrame( np.column_stack((transformed_data, targets[:3000])), columns = ["x", "y", "targets"] ) tsne_df.loc[:, "targets"] = tsne_df.targets.astype(int) # Visualization of the images of the dataset as points in a two dimensional # space grid = sns.FacetGrid(tsne_df, hue = "targets", height = 10) grid.map(plt.scatter, "x", "y").add_legend().savefig("../image/output.png")
{"hexsha": "7688456ca2e7d1220d09992aed7d24d4969b9fe2", "size": 1449, "ext": "py", "lang": "Python", "max_stars_repo_path": "src/main.py", "max_stars_repo_name": "PeppeSaccardi/mnist-visualization-via-tsne", "max_stars_repo_head_hexsha": "0e30a697da4a54e7f88c14a8c2787c198ff2811d", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/main.py", "max_issues_repo_name": "PeppeSaccardi/mnist-visualization-via-tsne", "max_issues_repo_head_hexsha": "0e30a697da4a54e7f88c14a8c2787c198ff2811d", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/main.py", "max_forks_repo_name": "PeppeSaccardi/mnist-visualization-via-tsne", "max_forks_repo_head_hexsha": "0e30a697da4a54e7f88c14a8c2787c198ff2811d", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-09-11T03:00:59.000Z", "max_forks_repo_forks_event_max_datetime": "2021-09-11T03:00:59.000Z", "avg_line_length": 32.9318181818, "max_line_length": 79, "alphanum_fraction": 0.6777087647, "include": true, "reason": "import numpy", "num_tokens": 355}
(* autogenerated from github.com/mit-pdos/gokv/dmvcc/txncoordinator *) From Perennial.goose_lang Require Import prelude. From Goose Require github_com.mit_pdos.gokv.dmvcc.index. Section code. Context `{ext_ty: ext_types}. Local Coercion Var' s: expr := Var s. (* 0_server.go *) Definition Server := struct.decl [ "indexCk" :: ptrT ]. Definition Server__TryCommit: val := rec: "Server__TryCommit" "s" "tid" "writes" := let: "err" := ref_to uint64T #0 in MapIter "writes" (λ: "key" <>, (if: (![uint64T] "err" = #0) then "err" <-[uint64T] index.Clerk__AcquireTuple (struct.loadF Server "indexCk" "s") "key" "tid" else #()));; (if: ![uint64T] "err" ≠ #0 then #false else index.Clerk__UpdateAndRelease (struct.loadF Server "indexCk" "s") "tid" "writes";; #true). Definition MakeServer: val := rec: "MakeServer" "indexHost" := struct.new Server [ "indexCk" ::= index.MakeClerk "indexHost" ]. (* clerk.go *) Definition Clerk := struct.decl [ "s" :: ptrT ]. Definition Clerk__TryCommit: val := rec: "Clerk__TryCommit" "ck" "tid" "writes" := Server__TryCommit (struct.loadF Clerk "s" "ck") "tid" "writes". Definition MakeClerk: val := rec: "MakeClerk" "host" := struct.new Clerk [ "s" ::= "host" ]. End code.
{"author": "mit-pdos", "repo": "perennial", "sha": "76dafee3cd47e1c5e5a6d5436f87738a06f13ee0", "save_path": "github-repos/coq/mit-pdos-perennial", "path": "github-repos/coq/mit-pdos-perennial/perennial-76dafee3cd47e1c5e5a6d5436f87738a06f13ee0/external/Goose/github_com/mit_pdos/gokv/dmvcc/txncoordinator.v"}
import json import pandas as pd import numpy as np from scipy import signal from scipy.interpolate import interp1d from astropy.timeseries import LombScargle import plotly.graph_objects as go from plotly.utils import PlotlyJSONEncoder from datasets.constants import domain_types from datasets.processing_methods.method_base import AnalysisMethodBase class FrequencyDomainAnalysis(AnalysisMethodBase): FOURIER = 1 AUTOREGRESSIVE = 2 LOMB_SCARGLE = 3 @classmethod def name(cls): return 'HRV Frequency Domain' @classmethod def domain(cls): return domain_types.FREQUENCY @classmethod def options(cls): return { 'method': { 'title': 'Method', 'type': 'select', 'items': [ {'title': 'Fourier', 'value': cls.FOURIER}, #{'title': 'Auto-Regressive', 'value': cls.AUTOREGRESSIVE}, {'title': 'Lomb-Scargle', 'value': cls.LOMB_SCARGLE}, ], 'default': cls.FOURIER, }, 'fft_interpolation': { 'title': 'FFT interpolation frequency', 'type': 'number', 'default': 4.0 }, 'use_ulf': { 'title': 'Use ULF Band', 'type': 'boolean', 'default': False }, 'lomb_smoothing': { 'title': 'Lomb-Scargle Smoothing', 'type': 'number', 'unit': 'Hz', 'default': 0.02 }, 'ulf': { 'title': 'Ultra Low Frequency', 'type': 'number', 'unit': 'Hz', 'limits': [0, 0.5], 'default': 0.003 }, 'vlf': { 'title': 'Very Low Frequency', 'type': 'number', 'unit': 'Hz', 'limits': [0, 0.5], 'default': 0.04 }, 'lf': { 'title': 'Low Frequency', 'type': 'number', 'unit': 'Hz', 'limits': [0, 0.5], 'default': 0.15 }, 'hf': { 'title': 'High Frequency', 'type': 'number', 'unit': 'Hz', 'limits': [0, 0.5], 'default': 0.4 }, } def lombscargle(self): freq = np.linspace(0, 0.5, 2**10) t = np.cumsum(self.ibi_series.values) psd = LombScargle(t, self.ibi_series.values, normalization='psd').power(freq) psd[0] = np.mean(psd[1:]) # fix nan smoothing = self.configuration['lomb_smoothing'] if smoothing > 0: window = np.max([1, int(round(smoothing / (freq[1] - freq[0])))]) psd = pd.Series(psd).rolling(window, min_periods=1).mean().values return freq, psd def fourier(self): interp_freq = self.configuration['fft_interpolation'] x = np.cumsum(self.ibi_series.values) f_interpol = interp1d(x, self.ibi_series.values, 'cubic') t_interpol = np.arange(x[0], x[-1], 1000/interp_freq) nn_interpol = f_interpol(t_interpol) nperseg = int(round(300 * interp_freq)) # 4 Hz signal => each segment is 300 seconds nfft = np.max([nperseg, 2**10]) freq, psd = signal.welch( nn_interpol, interp_freq, nfft=nfft, nperseg=nperseg, window='hamming', scaling='density', detrend='linear' ) return freq, psd def process(self): # Get selected samples from signal data = pd.DataFrame() for each in self.analysis_samples: data = pd.concat([data, self.signal.samples_dataframe(each.start, each.end)]) # Make ibi_series available for instance self.ibi_series = data[data.columns[0]] # Get configuration parameters use_ulf = self.configuration['use_ulf'] if not use_ulf or self.ibi_series.sum() < 300000: # Do not use ULF band on sample shorter than 5 minutes ulf_limit = 0 else: ulf_limit = self.configuration['ulf'] vlf_limit = self.configuration['vlf'] lf_limit = self.configuration['lf'] hf_limit = self.configuration['hf'] method = self.configuration['method'] if method is self.LOMB_SCARGLE: freq, psd = self.lombscargle() else: freq, psd = self.fourier() abs_index = freq <= hf_limit ulf_index = freq <= ulf_limit vlf_index = (freq >= ulf_limit) & (freq <= vlf_limit) lf_index = (freq >= vlf_limit) & (freq <= lf_limit) hf_index = (freq >= lf_limit) & (freq <= hf_limit) # Get power for each band by integrating over spectral density abs_power = np.trapz(psd[abs_index], freq[abs_index]) ulf = np.trapz(psd[ulf_index], freq[ulf_index]) vlf = np.trapz(psd[vlf_index], freq[vlf_index]) lf = np.trapz(psd[lf_index], freq[lf_index]) hf = np.trapz(psd[hf_index], freq[hf_index]) # Normalized power for LF and HF band lf_nu = lf / (abs_power - vlf - ulf) * 100 hf_nu = hf / (abs_power - vlf - ulf) * 100 # Relative power of each band ulf_perc = (ulf / abs_power) * 100 vlf_perc = (vlf / abs_power) * 100 lf_perc = (lf / abs_power) * 100 hf_perc = (hf / abs_power) * 100 # Frequency with highest power vlf_peak = freq[vlf_index][np.argmax(psd[vlf_index])] lf_peak = freq[lf_index][np.argmax(psd[lf_index])] hf_peak = freq[hf_index][np.argmax(psd[hf_index])] features = { 'VLF peak': [vlf_peak, 'Hz'], 'VLF power': [vlf, 'ms²'], 'VLF power log': [np.log(vlf), ''], 'VLF power rel.': [vlf_perc, '%'], 'LF peak': [lf_peak, 'Hz'], 'LF power': [lf, 'ms²'], 'LF power log': [np.log(lf), ''], 'LF power rel.': [lf_perc, '%'], 'LF power norm.': [lf_nu, 'nu'], 'HF peak': [hf_peak, 'Hz'], 'HF power': [hf, 'ms²'], 'HF power log': [np.log(hf), ''], 'HF power rel.': [hf_perc, '%'], 'HF power norm.': [hf_nu, 'nu'], 'LF/HF': [lf/hf, ''], } # Add ULF parameters, if band available if use_ulf and np.sum(ulf_index) > 0: ulf_peak = freq[np.argmax(psd[ulf_index])] features = { 'ULF peak': [ulf_peak, 'Hz'], 'ULF power': [ulf, 'ms²'], 'ULF power log': [np.log(ulf), ''], 'ULF power rel.': [ulf_perc, '%'], **features } # Convert dictionary to dataframe table_data = pd.DataFrame({ 'Variable': list(features.keys()), 'Value': np.round([v[0] for v in features.values()], 3), 'Unit': [v[1] for v in features.values()] }) table_data['Value'].replace(np.inf, 'inf', inplace=True) table_data['Value'].replace(-np.inf, '-inf', inplace=True) # Create plot for power density distribution # Normalize spectral density for plot psd_n = psd / 1000**2 #np.sum(psd) psd_plot = go.Figure( layout={ 'xaxis_range': [0, 0.5], 'yaxis_range': [0, 0.1], 'xaxis_title_text': 'Frequency (Hz)', 'yaxis_title_text': 'Power (s²/Hz)', 'title_text': 'Periodogram', 'legend_orientation': 'h', } ) if use_ulf and np.sum(ulf_index) > 0: psd_plot.add_trace(go.Scatter( x=freq[ulf_index], y=psd_n[ulf_index], fill='tozeroy', fillcolor='purple', line_color='black', line_width=0.5, mode='lines', name='ULF', )) psd_plot.add_trace(go.Scatter( x=freq[vlf_index], y=psd_n[vlf_index], fill='tozeroy', fillcolor='grey', line_color='black', line_width=0.5, mode='lines', name='VLF', )) psd_plot.add_trace(go.Scatter( x=freq[lf_index], y=psd_n[lf_index], fill='tozeroy', fillcolor='coral', line_color='black', line_width=0.5, mode='lines', name='LF', )) psd_plot.add_trace(go.Scatter( x=freq[hf_index], y=psd_n[hf_index], fill='tozeroy', fillcolor='lightgreen', line_color='black', line_width=0.5, mode='lines', name='HF', )) psd_plot.add_trace(go.Scatter( x=freq[freq >= hf_limit], y=psd_n[freq >= hf_limit], fill='tozeroy', fillcolor='white', line_color='black', line_width=0.5, mode='lines', showlegend=False, name='Above HF' )) # Return results in JSON compliant format return { 'table': { 'columns': ['Variable', 'Unit', 'Value'], 'data': table_data.to_dict('records') }, 'plot': json.loads(json.dumps(psd_plot, cls=PlotlyJSONEncoder)) }
{"hexsha": "5e179b624e45480a7930ce80c0d2110188404530", "size": 9622, "ext": "py", "lang": "Python", "max_stars_repo_path": "backend/datasets/processing_methods/frequency_domain.py", "max_stars_repo_name": "hpi-dhc/alps", "max_stars_repo_head_hexsha": "6f6ab641f5486fc9de6ab42d74fbb0b90b702cdf", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 12, "max_stars_repo_stars_event_min_datetime": "2020-08-12T09:30:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T11:38:48.000Z", "max_issues_repo_path": "backend/datasets/processing_methods/frequency_domain.py", "max_issues_repo_name": "hpi-dhc/alps", "max_issues_repo_head_hexsha": "6f6ab641f5486fc9de6ab42d74fbb0b90b702cdf", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 3, "max_issues_repo_issues_event_min_datetime": "2020-08-12T09:04:16.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-28T01:42:44.000Z", "max_forks_repo_path": "backend/datasets/processing_methods/frequency_domain.py", "max_forks_repo_name": "hpi-dhc/alps", "max_forks_repo_head_hexsha": "6f6ab641f5486fc9de6ab42d74fbb0b90b702cdf", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2021-01-12T05:56:03.000Z", "max_forks_repo_forks_event_max_datetime": "2021-01-12T05:56:03.000Z", "avg_line_length": 32.8395904437, "max_line_length": 92, "alphanum_fraction": 0.4898150073, "include": true, "reason": "import numpy,from scipy,from astropy", "num_tokens": 2434}
program testhbhstokesgreenid ccccccccccccccccccccccccccccccccccccccccccccccccccccccccc c c This program tests the Green's identity for the c Helmholtz Stokes single and double layer kernels c implicit real *8 (a-h,o-z) parameter (nmax = 100000) integer adjs(2,nmax) real *8 hs(nmax),xyin(10),xyout(10) c real *8, allocatable :: chunks(:,:,:),ders(:,:,:), 1 ders2(:,:,:),dsdt(:,:),rnorms(:,:,:), 2 whts(:,:),taus(:,:,:) complex *16, allocatable :: smu(:,:,:), dmu(:,:,:) complex *16, allocatable :: smutau(:,:), dmutau(:,:) complex *16, allocatable :: smunu(:,:), dmunu(:,:) complex *16 :: veltemp(2), stresstemp(2,2), muexact(2) complex *16 :: veldertemp(2,2) complex *16 :: velsum(2), velexact(2), velsums(2), velsumd(2) complex *16 :: mu(2), nu(2) complex *16 :: zk real *8 pars(100), src(2), targ(2) c external fcurve,fcurve2, fcurve3 c character trans trans = 'T' call prini(6,13) done=1 pi=4*atan(done) ima=(0,1) c c c define points in exterior and interior of R. c xout=-6.1d0 yout=-7.2d0 xin = 0.1d0 yin = -0.21d0 c xyout(1)=xout xyout(2)=yout call prin2('xyout=*',xyout,2) xyin(1) = xin xyin(2) = yin c c exact solution is a helmholtz stokes velocity c field induced by the vector density muexact c zk = (1.0d0,0.0d0) muexact(1) = -0.5d0+hkrand(0) muexact(2) = -0.5d0+hkrand(0) c c define then chunk up domain c eps10=1.0d-12 nover1=6 k=16 c maxchunks=nmax allocate(chunks(2,k,maxchunks)) allocate(ders(2,k,maxchunks)) allocate(ders2(2,k,maxchunks)) c ifclosed=1 chsmall=1000 ta=0 tb=2*pi t = 0.3d0 pert = 0.2 do i=1,100 pars(i) = pert*hkrand(0) enddo c call chunkfunc(eps10,ifclosed,chsmall,ta,tb,fcurve3, 1 pars,nover1,k,nch,chunks,adjs,ders,ders2,hs) hsmax = 0 do i=1,nch if(hsmax.le.hs(i)) hsmax=hs(i) do j=1,k c c write(72,*) chunks(1,j,i), chunks(2,j,i) enddo enddo call prin2('hsmax=*',hsmax,1) nt = k*nch c c set up densities for Green's identity c allocate(dsdt(k,maxchunks)) allocate(smu(2,k,maxchunks),dmu(2,k,maxchunks)) allocate(smutau(k,maxchunks),dmutau(k,maxchunks)) allocate(smunu(k,maxchunks),dmunu(k,maxchunks)) allocate(rnorms(2,k,maxchunks)) allocate(taus(2,k,maxchunks)) do i=1,nch do j=1,k rnorm = sqrt(ders(1,j,i)**2 + ders(2,j,i)**2) rnorms(1,j,i) = ders(2,j,i)/rnorm rnorms(2,j,i) = -ders(1,j,i)/rnorm taus(1,j,i) = ders(1,j,i)/rnorm taus(2,j,i) = ders(2,j,i)/rnorm c dsdt(j,i) = rnorm*hs(i) c ifstress=1 call zhelmstokeslet(zk,xyout,chunks(1,j,i),muexact, 1 veltemp,ifstress,stresstemp) c density for single layer (normal component of stress) smu(1,j,i) = stresstemp(1,1)*rnorms(1,j,i) 1 + stresstemp(1,2)*rnorms(2,j,i) smu(2,j,i) = stresstemp(2,1)*rnorms(1,j,i) 1 + stresstemp(2,2)*rnorms(2,j,i) smutau(j,i) = smu(1,j,i)*taus(1,j,i) 1 +smu(2,j,i)*taus(2,j,i) smunu(j,i) = smu(1,j,i)*rnorms(1,j,i) 1 +smu(2,j,i)*rnorms(2,j,i) c density for double layer (velocity) dmu(1,j,i) = veltemp(1) dmu(2,j,i) = veltemp(2) dmutau(j,i) = dmu(1,j,i)*taus(1,j,i) 1 +dmu(2,j,i)*taus(2,j,i) dmunu(j,i) = dmu(1,j,i)*rnorms(1,j,i) 1 +dmu(2,j,i)*rnorms(2,j,i) enddo enddo c c Compute velocity from Green's I.D. c allocate(whts(k,nch)) call chunkwhts(k,nch,chunks,ders,hs,whts) velsum(1) = 0.0d0 velsum(2) = 0.0d0 velsums(1) = 0.0d0 velsums(2) = 0.0d0 velsumd(1) = 0.0d0 velsumd(2) = 0.0d0 dmaxwht = 0.0d0 sumwht = 0.0d0 do i = 1,nch do j = 1,k ifder = 0 ifstress=0 src(1) = chunks(1,j,i) src(2) = chunks(2,j,i) mu(1) = smu(1,j,i) mu(2) = smu(2,j,i) call zhelmstokeslet(zk,src,xyin,mu, 1 veltemp,ifstress,stresstemp) velsums(1) = velsums(1)+veltemp(1)*whts(j,i) velsums(2) = velsums(2)+veltemp(2)*whts(j,i) mu(1) = dmu(1,j,i) mu(2) = dmu(2,j,i) nu(1) = rnorms(1,j,i) nu(2) = rnorms(2,j,i) trans='T' call zhelmstresslet(zk,src,xyin,mu, 1 nu,veltemp,trans) velsumd(1) = velsumd(1)+veltemp(1)*whts(j,i) velsumd(2) = velsumd(2)+veltemp(2)*whts(j,i) enddo enddo velsum(1) = -(velsums(1) + velsumd(1)) velsum(2) = -(velsums(2) + velsumd(2)) call zhelmstokeslet(zk,xyout,xyin,muexact, 1 velexact,ifstress,stresstemp) c write(*,*) velexact(1), velexact(2) c write(*,*) velsum(1), velsum(2) write(*,*) velexact(1)/velsum(1), velexact(2)/velsum(2) c write(*,*) velsums(1), velsums(2) c write(*,*) velsumd(1), velsumd(2) stop end c c c c c c subroutine fcurve3(t,pars,x,y,dxdt,dydt,dxdt2,dydt2) implicit real *8 (a-h,o-z) integer n real *8 pars(100), t, pert, x0,y0, r0,r,rp,rpp c x0=0.0d0 y0=0.0d0 n = 6 r0 = 5 c r = r0 rp = 0 rpp = 0 do i=1,n r = r + pars(i)*dsin(t*i) rp = rp + pars(i)*dcos(t*i)*i rpp = rpp - pars(i)*dsin(t*i)*i*i enddo x=x0+r*cos(t) y=y0+r*sin(t) c call prin2('t=*',t,1) dxdt=-r*sin(t) + rp*cos(t) dydt=r*cos(t) + rp*sin(t) dxdt2= -r*cos(t) + rpp*cos(t) - 2*rp*sin(t) dydt2= -r*sin(t) + rpp*sin(t) + 2*rp*cos(t) c return end c c
{"hexsha": "3516ae40be28ef6436aad8054e3aed8c8fc73aee", "size": 6246, "ext": "f", "lang": "FORTRAN", "max_stars_repo_path": "ye-olde-fortran-test/testhbhstokesgreenid.f", "max_stars_repo_name": "askhamwhat/biharm-evals", "max_stars_repo_head_hexsha": "d836302f544670b3d899bd91ea4cb49e9afb6a75", "max_stars_repo_licenses": ["BSD-3-Clause"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "ye-olde-fortran-test/testhbhstokesgreenid.f", "max_issues_repo_name": "askhamwhat/biharm-evals", "max_issues_repo_head_hexsha": "d836302f544670b3d899bd91ea4cb49e9afb6a75", "max_issues_repo_licenses": ["BSD-3-Clause"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "ye-olde-fortran-test/testhbhstokesgreenid.f", "max_forks_repo_name": "askhamwhat/biharm-evals", "max_forks_repo_head_hexsha": "d836302f544670b3d899bd91ea4cb49e9afb6a75", "max_forks_repo_licenses": ["BSD-3-Clause"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 24.5905511811, "max_line_length": 67, "alphanum_fraction": 0.4967979507, "num_tokens": 2372}
# -*- coding: utf-8 -*- """ Defines methods for configuration. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function __all__ = ["INPUT_TRAIN", "INPUT_VAL", "INPUT_TEST", "load_config", "configure_logger", "configure_input", "configure_estimator_params", "configure_seeds"] import json import logging import logging.handlers import numpy as np import os import sys from nnetmaker.batch import * from nnetmaker.util import * # Loader types dictionary. _LOADERS = {"independent": IndependentBatchLoader, "continuous_sequence": ContinuousSequenceBatchLoader, "discrete_sequence": DiscreteSequenceBatchLoader} # Input modes. INPUT_TRAIN = "train" INPUT_VAL = "val" INPUT_TEST = "test" def _read_json_file(filename): """Reads a json object from the specified file.""" # Strip comments while keeping line numbers. s = "" with open(filename, "r") as f_in: for line in f_in: comment_pos = line.find("//") s += line[:comment_pos] + "\n" return json.loads(s) def load_config(filename): """Reads and validates the configuration file.""" filename = os.path.realpath(filename) sys.path.append(os.path.dirname(filename)) config_obj = _read_json_file(filename) config_val = ArgumentsValidator(config_obj, "Configuration file") with config_val: rand_seed = config_val.get("rand_seed", [ATYPE_NONE, ATYPE_INT], True) epsilon = config_val.get("epsilon", ATYPE_FLOAT, True) model_type = config_val.get("model_type", ATYPE_STRING, True) model_args = config_val.get("model_args", [ATYPE_STRING, ATYPE_DICT], True) if not isinstance(model_args, dict): model_args = _read_json_file(model_args) input_args = config_val.get("input_args", [ATYPE_STRING, ATYPE_DICT], True) if not isinstance(input_args, dict): input_args = _read_json_file(input_args) init_args = config_val.get("init_args", [ATYPE_STRING, ATYPE_DICT], True) if not isinstance(init_args, dict): init_args = _read_json_file(init_args) train_args = config_val.get("training_args", [ATYPE_STRING, ATYPE_DICT], True) if not isinstance(train_args, dict): train_args = _read_json_file(train_args) logger_args = config_val.get("logger_args", [ATYPE_STRING, ATYPE_DICT], False, default={}) if not isinstance(logger_args, dict): logger_args = _read_json_file(logger_args) return (rand_seed, epsilon, model_type, model_args, input_args, init_args, train_args, logger_args) def configure_logger(logger_args): """Configures the global Python logger object.""" logger_val = ArgumentsValidator(logger_args, "Logger arguments") with logger_val: show_debug_logs = logger_val.get("show_debug_logs", ATYPE_BOOL, False, default=False) show_date = logger_val.get("show_date", ATYPE_BOOL, False, default=True) syslog_path = logger_val.get("syslog_path", [ATYPE_NONE, ATYPE_STRING], False) logger = logging.getLogger() if show_debug_logs: logger.setLevel(logging.DEBUG) else: logger.setLevel(logging.INFO) if syslog_path is not None: handler = logging.handlers.SysLogHandler(address=syslog_path) else: handler = logging.StreamHandler(stream=sys.stdout) if show_date: date_str = "%Y-%m-%d %H:%M:%S" else: date_str = "%H:%M:%S" formatter = logging.Formatter("%(asctime)s [%(levelname)s] %(message)s", date_str) handler.setFormatter(formatter) logger.addHandler(handler) def configure_input(model_input_names, model_output_names, input_args, input_mode): """Validates the input arguments and constructs the specified loader to return.""" input_val = ArgumentsValidator(input_args, "Input arguments") with input_val: train_loader_type = input_val.get("train_loader_type", ATYPE_STRING, True) train_loader_args = input_val.get("train_loader_args", [ATYPE_STRING, ATYPE_DICT], True) val_loader_type = input_val.get("val_loader_type", ATYPE_STRING, True) val_loader_args = input_val.get("val_loader_args", [ATYPE_STRING, ATYPE_DICT], True) test_loader_type = input_val.get("test_loader_type", ATYPE_STRING, True) test_loader_args = input_val.get("test_loader_args", [ATYPE_STRING, ATYPE_DICT], True) if input_mode == INPUT_TRAIN: if not isinstance(train_loader_args, dict): train_loader_args = _read_json_file(train_loader_args) loader_type = train_loader_type loader_args = train_loader_args elif input_mode == INPUT_VAL: if not isinstance(val_loader_args, dict): val_loader_args = _read_json_file(val_loader_args) loader_type = val_loader_type loader_args = val_loader_args elif input_mode == INPUT_TEST: if not isinstance(test_loader_args, dict): test_loader_args = _read_json_file(test_loader_args) loader_type = test_loader_type loader_args = test_loader_args try: return _LOADERS[loader_type](model_input_names, model_output_names, loader_args) except KeyError: raise ValueError("Unrecognized loader type: %s" % loader_type) def configure_seeds(num_seeds, rand_seed): """Initializes seeds for random number generators from the specified random seed.""" base_rng = np.random.RandomState(rand_seed) seeds = [base_rng.randint(12345, 2**32) for _ in range(num_seeds)] return seeds def configure_estimator_params(init_args, train_args): """Validates the initialization and training arguments and constructs a `params` dictionary for creating a TensorFlow Estimator object.""" params = {} init_val = ArgumentsValidator(init_args, "Initialization arguments") with init_val: params["rm_dir_on_init"] = init_val.get("rm_dir", ATYPE_BOOL, True) params["use_ortho_weights"] = init_val.get("use_ortho_weights", ATYPE_BOOL, True) params["max_lsuv_iters"] = init_val.get("max_lsuv_iters", [ATYPE_NONE, ATYPE_INT], True) params["lsuv_tolerance"] = init_val.get("lsuv_tolerance", ATYPE_FLOAT, True) params["init_alpha"] = init_val.get("init_alpha", ATYPE_FLOAT, True) train_val = ArgumentsValidator(train_args, "Training arguments") with train_val: params["save_time"] = train_val.get("save_time", ATYPE_FLOAT, True) params["val_throttle_time"] = train_val.get("val_throttle_time", ATYPE_FLOAT, True) params["learning_rate"] = train_val.get("learning_rate", ATYPE_FLOAT, True) params["sgd_momentum"] = train_val.get("sgd_momentum", [ATYPE_NONE, ATYPE_FLOAT], True) params["sgd_use_nesterov"] = train_val.get("sgd_use_nesterov", ATYPE_BOOL, True) params["use_rmsprop"] = train_val.get("use_rmsprop", ATYPE_BOOL, True) params["rmsprop_decay"] = train_val.get("rmsprop_decay", ATYPE_FLOAT, True) params["rmsprop_momentum"] = train_val.get("rmsprop_momentum", ATYPE_FLOAT, True) params["rmsprop_epsilon"] = train_val.get("rmsprop_epsilon", ATYPE_FLOAT, True) params["reg_weight_decay"] = train_val.get("reg_weight_decay", [ATYPE_NONE, ATYPE_FLOAT], True) params["cost_type"] = train_val.get("cost_type", ATYPE_STRING, True).lower() params["max_grad_norm"] = train_val.get("max_grad_norm", [ATYPE_NONE, ATYPE_FLOAT], True) params["parallel_grad_gate"] = train_val.get("parallel_grad_gate", ATYPE_BOOL, True) return params
{"hexsha": "38213a340e53938cd01ed4423cbfbd6ca7d46763", "size": 7316, "ext": "py", "lang": "Python", "max_stars_repo_path": "nnetmaker/config.py", "max_stars_repo_name": "0xsx/nnmaker", "max_stars_repo_head_hexsha": "d7d78e99dc3f738c733895fd937a0bdd092457fe", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "nnetmaker/config.py", "max_issues_repo_name": "0xsx/nnmaker", "max_issues_repo_head_hexsha": "d7d78e99dc3f738c733895fd937a0bdd092457fe", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2018-12-11T00:48:49.000Z", "max_issues_repo_issues_event_max_datetime": "2018-12-21T01:05:23.000Z", "max_forks_repo_path": "nnetmaker/config.py", "max_forks_repo_name": "0xsx/nnmaker", "max_forks_repo_head_hexsha": "d7d78e99dc3f738c733895fd937a0bdd092457fe", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.6108786611, "max_line_length": 99, "alphanum_fraction": 0.734691088, "include": true, "reason": "import numpy", "num_tokens": 1767}
# Project 3: Competition and Collaboration # Udacity Deep Reinforcement Learning nanodegree # # # Deep Neural Network architectures for function approximators # of MADDPG agent and critic functions # # Architectures' code based on that described in the DDPG paper and modify accordingly to fit # with the MADDPG algorithm. # # DDPG paper: Lillicrap, Timothy P., et al. "Continuous control with deep reinforcement learning." # arXiv preprint arXiv:1509.02971 (2015). # MADDPG paper: LOWE, Ryan, et al. "Multi-agent actor-critic for mixed cooperative-competitive # environments". arXiv preprint arXiv:1706.02275 (2017). # # Félix Ramón López Martínez, January 2022 # # This implementation is a modified version of the original # Alexix Cook code for the ddpg-pendulum example in # Udacity Deep Reinforcement Learning Nanodegree # Import required libraries import numpy as np import torch import torch.nn as nn import torch.nn.functional as F # Weights and biases hidden layers initialization # Uniform distribution acc. to DDPG paper def hidden_init(layer): fan_in = layer.weight.data.size()[0] lim = 1. / np.sqrt(fan_in) return (-lim, lim) # Actor class definition class Actor(nn.Module): """Actor (Policy) model to map states to actions Model architecture: feedforward neural network. """ def __init__(self, state_size, action_size, seed, fc1_units=128, fc2_units=128): """Initialize parameters and define model elements. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Manual seed for repetitiveness fc1_units (int): Number of nodes in first hidden layer fc2_units (int): Number of nodes in second hidden layer """ super(Actor, self).__init__() self.seed = torch.manual_seed(seed) # Definition of linear feedforward layers self.fc1 = nn.Linear(state_size, fc1_units) self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, action_size) # Batch normalization (acc. to DDPG paper) #self.bn1 = nn.BatchNorm1d(state_size) #self.bn2 = nn.BatchNorm1d(fc1_units) #self.bn3 = nn.BatchNorm1d(fc2_units) self.reset_parameters() def reset_parameters(self): # acc. to DDPG paper self.fc1.weight.data.uniform_(*hidden_init(self.fc1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-3e-3, 3e-3) def forward(self, state): """Network Forward pass maps state to actions""" #if state.dim() == 1: # If needed, add required 2nd dimension for batch normalitation # state = torch.unsqueeze(state, 0) #x = self.bn1(state) #x = F.relu(self.fc1(state)) x = F.leaky_relu(self.fc1(state)) #x = self.bn2(x) #x = F.relu(self.fc2(x)) x = F.leaky_relu(self.fc2(x)) #x = self.bn3(x) action = F.tanh(self.fc3(x)) return action # Critic class definition class Critic(nn.Module): """Critic (Value) Model to map states to state-values. Model architecture: feedforward neural network. """ def __init__(self, num_agents, state_size, action_size, seed, fcs1_units=128, fc2_units=128): """Initialize parameters and build model. Params ====== state_size (int): Dimension of each state action_size (int): Dimension of each action seed (int): Random seed fcs1_units (int): Number of nodes in the first hidden layer fc2_units (int): Number of nodes in the second hidden layer """ super(Critic, self).__init__() self.seed = torch.manual_seed(seed) # Definition of linear feedforward layers self.fcs1 = nn.Linear(state_size * num_agents, fcs1_units) #self.fcs1 = nn.Linear((state_size + action_size) * num_agents, fc1_units) self.fc2 = nn.Linear(fcs1_units + action_size * num_agents, fc2_units) #self.fc2 = nn.Linear(fc1_units, fc2_units) self.fc3 = nn.Linear(fc2_units, 1) # Batch normalization (acc. to DDPG paper) #self.bn1 = nn.BatchNorm1d(state_size) #self.bn2 = nn.BatchNorm1d(fcs1_units) self.reset_parameters() def reset_parameters(self): self.fcs1.weight.data.uniform_(*hidden_init(self.fcs1)) self.fc2.weight.data.uniform_(*hidden_init(self.fc2)) self.fc3.weight.data.uniform_(-3e-3, 3e-3) def forward(self, state, action): """Network Forward pass maps maps (state, action) pairs to state-values. """ #if state.dim() == 1: # If needed, add required 2nd dimension for batch normalitation # state = torch.unsqueeze(state, 0) #xs = self.bn1(state) #xs = F.relu(self.fcs1(state)) xs = F.leaky_relu(self.fcs1(state)) #xs = torch.cat((state, action), dim=1) #xs = self.bn2(xs) x = torch.cat((xs, action.float()), dim=1) #x = F.leaky_relu(self.fcs1(xs)) #x = F.relu(self.fc2(x)) x = F.leaky_relu(self.fc2(x)) value = self.fc3(x) return value
{"hexsha": "aae8ce216d28b2f69f2205b2a95061834586f64c", "size": 5347, "ext": "py", "lang": "Python", "max_stars_repo_path": "P3-Competition_and_Collaboration/actor_critic_nets.py", "max_stars_repo_name": "felixrlopezm/Udacity_Deep_Reinforcement_Learning", "max_stars_repo_head_hexsha": "78a08da0b40371cc1a2c8ecc06cf845b44cb0d7c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "P3-Competition_and_Collaboration/actor_critic_nets.py", "max_issues_repo_name": "felixrlopezm/Udacity_Deep_Reinforcement_Learning", "max_issues_repo_head_hexsha": "78a08da0b40371cc1a2c8ecc06cf845b44cb0d7c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "P3-Competition_and_Collaboration/actor_critic_nets.py", "max_forks_repo_name": "felixrlopezm/Udacity_Deep_Reinforcement_Learning", "max_forks_repo_head_hexsha": "78a08da0b40371cc1a2c8ecc06cf845b44cb0d7c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.3916083916, "max_line_length": 119, "alphanum_fraction": 0.6371797269, "include": true, "reason": "import numpy", "num_tokens": 1343}
#include "teca_tc_classify.h" #include "teca_table.h" #include "teca_array_collection.h" #include "teca_variant_array.h" #include "teca_metadata.h" #include "teca_distance_function.h" #include "teca_saffir_simpson.h" #include "teca_geometry.h" #include "teca_geography.h" #include <iostream> #include <string> #include <sstream> #if defined(TECA_HAS_BOOST) #include <boost/program_options.hpp> #endif #if defined(TECA_HAS_UDUNITS) #include "teca_calcalcs.h" #endif #if defined(TECA_HAS_MPI) #include <mpi.h> #endif using std::cerr; using std::endl; // -------------------------------------------------------------------------- teca_tc_classify::teca_tc_classify() : track_id_column("track_id"), time_column("time"), x_coordinate_column("lon"), y_coordinate_column("lat"), surface_wind_column("surface_wind"), sea_level_pressure_column("sea_level_pressure") { this->set_number_of_input_connections(1); this->set_number_of_output_ports(1); teca_geography::get_cyclone_basins(this->region_sizes, this->region_starts, this->region_x_coordinates, this->region_y_coordinates, this->region_ids, this->region_names, this->region_long_names); } // -------------------------------------------------------------------------- teca_tc_classify::~teca_tc_classify() {} #if defined(TECA_HAS_BOOST) // -------------------------------------------------------------------------- void teca_tc_classify::get_properties_description( const std::string &prefix, options_description &global_opts) { options_description opts("Options for " + (prefix.empty()?"teca_tc_classify":prefix)); opts.add_options() TECA_POPTS_GET(std::string, prefix, track_id_column, "name of the column containing track ids") TECA_POPTS_GET(std::string, prefix, time_column, "name of the column containing time stamps") TECA_POPTS_GET(std::string, prefix, surface_wind_column, "name of column containing wind speeds") TECA_POPTS_GET(std::string, prefix, x_coordinate_column, "name of the column containing x cooridnates") TECA_POPTS_GET(std::string, prefix, y_coordinate_column, "name of the column containing y cooridnates") TECA_POPTS_MULTI_GET(std::vector<unsigned long>, prefix, region_sizes, "the number of points in each region") TECA_POPTS_MULTI_GET(std::vector<double>, prefix, region_x_coordinates, "list of x coordinates describing the regions") TECA_POPTS_MULTI_GET(std::vector<double>, prefix, region_y_coordinates, "list of y coordinates describing the regions") TECA_POPTS_MULTI_GET(std::vector<int>, prefix, region_ids, "list of numeric ids identifying each region. " " if not provided sequential ids are generated") TECA_POPTS_MULTI_GET(std::vector<std::string>, prefix, region_names, "list of names identifying each region. " "if not provided names are generated from ids") TECA_POPTS_MULTI_GET(std::vector<std::string>, prefix, region_long_names, "list of long/readable names identifying " "each region. if not provided names are generated from ids") ; this->teca_algorithm::get_properties_description(prefix, opts); global_opts.add(opts); } // -------------------------------------------------------------------------- void teca_tc_classify::set_properties( const std::string &prefix, variables_map &opts) { this->teca_algorithm::set_properties(prefix, opts); TECA_POPTS_SET(opts, std::string, prefix, track_id_column) TECA_POPTS_SET(opts, std::string, prefix, time_column) TECA_POPTS_SET(opts, std::string, prefix, surface_wind_column) TECA_POPTS_SET(opts, std::string, prefix, x_coordinate_column) TECA_POPTS_SET(opts, std::string, prefix, y_coordinate_column) TECA_POPTS_SET(opts, std::vector<unsigned long>, prefix, region_sizes) TECA_POPTS_SET(opts, std::vector<double>, prefix, region_x_coordinates) TECA_POPTS_SET(opts, std::vector<double>, prefix, region_y_coordinates) TECA_POPTS_SET(opts, std::vector<int>, prefix, region_ids) TECA_POPTS_SET(opts, std::vector<std::string>, prefix, region_names) TECA_POPTS_SET(opts, std::vector<std::string>, prefix, region_long_names) } #endif // -------------------------------------------------------------------------- const_p_teca_dataset teca_tc_classify::execute( unsigned int port, const std::vector<const_p_teca_dataset> &input_data, const teca_metadata &request) { #ifdef TECA_DEBUG cerr << teca_parallel_id() << "teca_tc_classify::execute" << endl; #endif (void)port; (void)request; // get the input table const_p_teca_table in_table = std::dynamic_pointer_cast<const teca_table>(input_data[0]); // only rank 0 is required to have data int rank = 0; #if defined(TECA_HAS_MPI) int init = 0; MPI_Initialized(&init); if (init) MPI_Comm_rank(this->get_communicator(), &rank); #endif if (!in_table) { if (rank == 0) { TECA_FATAL_ERROR("Input is empty or not a table") } return nullptr; } // get calendar and unit system std::string calendar; if ((in_table->get_calendar(calendar)) && calendar.empty()) { TECA_FATAL_ERROR("Calendar is missing") return nullptr; } std::string time_units; if ((in_table->get_time_units(time_units)) && time_units.empty()) { TECA_FATAL_ERROR("time units are missing") return nullptr; } if (time_units.find("days since") == std::string::npos) { TECA_FATAL_ERROR("Conversion for \"" << time_units << "\" not implemented") return nullptr; } // get the track ids const_p_teca_int_array track_ids = std::dynamic_pointer_cast<const teca_int_array>( in_table->get_column(this->track_id_column)); if (!track_ids) { TECA_FATAL_ERROR("column \"" << this->track_id_column << "\" is not in the table") return nullptr; } // the spatial coorinates const_p_teca_variant_array x = in_table->get_column(this->x_coordinate_column); if (!x) { TECA_FATAL_ERROR("column \"" << this->x_coordinate_column << "\" is not in the table") return nullptr; } const_p_teca_variant_array y = in_table->get_column(this->y_coordinate_column); if (!y) { TECA_FATAL_ERROR("column \"" << this->y_coordinate_column << "\" is not in the table") return nullptr; } // time axis const_p_teca_variant_array time = in_table->get_column(this->time_column); if (!time) { TECA_FATAL_ERROR("column \"" << this->time_column << "\" is not in the table") return nullptr; } // get the surface wind speeds const_p_teca_variant_array surface_wind = in_table->get_column(this->surface_wind_column); if (!surface_wind) { TECA_FATAL_ERROR("column \"" << this->surface_wind_column << "\" is not in the table") return nullptr; } // get the surface wind speeds const_p_teca_variant_array sea_level_pressure = in_table->get_column(this->sea_level_pressure_column); if (!sea_level_pressure) { TECA_FATAL_ERROR("column \"" << this->sea_level_pressure_column << "\" is not in the table") return nullptr; } // scan the track ids and build the random access // data structure std::vector<unsigned long> track_starts(1, 0); size_t n_rows = track_ids->size(); const int *pids = track_ids->get(); for (size_t i = 1; i < n_rows; ++i) if (pids[i] != pids[i-1]) track_starts.push_back(i); track_starts.push_back(n_rows); size_t n_tracks = track_starts.size() - 1; // record track id p_teca_long_array out_ids = teca_long_array::New(n_tracks); long *pout_ids = out_ids->get(); for (size_t i =0; i < n_tracks; ++i) pout_ids[i] = pids[track_starts[i]]; // record track start time p_teca_variant_array start_time = time->new_instance(n_tracks); TEMPLATE_DISPATCH(teca_variant_array_impl, start_time.get(), const NT *ptime = static_cast<const TT*>(time.get())->get(); NT *pstart_time = static_cast<TT*>(start_time.get())->get(); for (size_t i = 0; i < n_tracks; ++i) pstart_time[i] = ptime[track_starts[i]]; ) // record track start position p_teca_variant_array start_x = x->new_instance(n_tracks); p_teca_variant_array start_y = x->new_instance(n_tracks); TEMPLATE_DISPATCH_FP(teca_variant_array_impl, start_x.get(), const NT *px = static_cast<const TT*>(x.get())->get(); const NT *py = static_cast<const TT*>(y.get())->get(); NT *pstart_x = static_cast<TT*>(start_x.get())->get(); NT *pstart_y = static_cast<TT*>(start_y.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long q = track_starts[i]; pstart_x[i] = px[q]; pstart_y[i] = py[q]; } ) // compute the storm duration p_teca_variant_array duration = time->new_instance(n_tracks); TEMPLATE_DISPATCH(teca_variant_array_impl, duration.get(), const NT *ptime = static_cast<const TT*>(time.get())->get(); NT *pduration = static_cast<TT*>(duration.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long first = track_starts[i]; unsigned long last = track_starts[i+1] - 1; pduration[i] = ptime[last] - ptime[first]; } ) // compute the distance traveled p_teca_variant_array length = x->new_instance(n_tracks); TEMPLATE_DISPATCH_FP(teca_variant_array_impl, length.get(), const NT *px = static_cast<const TT*>(x.get())->get(); const NT *py = static_cast<const TT*>(y.get())->get(); NT *plength = static_cast<TT*>(length.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long track_start = track_starts[i]; unsigned long npts = track_starts[i+1] - track_start - 1; plength[i] = NT(); for (unsigned long j = 0; j < npts; ++j) { unsigned long p = track_start + j; unsigned long q = p + 1; plength[i] += teca_distance(px[p], py[p], px[q], py[q]); } } ) // rank the track on Saphir-Simpson scale // record the max wind speed, and position of it p_teca_int_array category = teca_int_array::New(n_tracks); int *pcategory = category->get(); p_teca_variant_array max_surface_wind = surface_wind->new_instance(n_tracks); p_teca_unsigned_long_array max_surface_wind_id = teca_unsigned_long_array::New(n_tracks); unsigned long *pmax_surface_wind_id = max_surface_wind_id->get(); TEMPLATE_DISPATCH_FP(teca_variant_array_impl, max_surface_wind.get(), const NT *psurface_wind = static_cast<const TT*>(surface_wind.get())->get(); NT *pmax_surface_wind = static_cast<TT*>(max_surface_wind.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long track_start = track_starts[i]; unsigned long npts = track_starts[i+1] - track_start; NT max_val = std::numeric_limits<NT>::lowest(); unsigned long max_id = 0; for (size_t j = 0; j < npts; ++j) { unsigned long id = track_start + j; NT val = psurface_wind[id]; bool max_changed = val > max_val; max_val = max_changed ? val : max_val; max_id = max_changed ? id : max_id; } pcategory[i] = teca_saffir_simpson::classify_mps(max_val); pmax_surface_wind[i] = max_val; pmax_surface_wind_id[i] = max_id; } ) // location of the max surface wind p_teca_variant_array max_surface_wind_x = x->new_instance(n_tracks); p_teca_variant_array max_surface_wind_y = x->new_instance(n_tracks); TEMPLATE_DISPATCH_FP(teca_variant_array_impl, start_x.get(), const NT *px = static_cast<const TT*>(x.get())->get(); const NT *py = static_cast<const TT*>(y.get())->get(); NT *pmax_surface_wind_x = static_cast<TT*>(max_surface_wind_x.get())->get(); NT *pmax_surface_wind_y = static_cast<TT*>(max_surface_wind_y.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long q = pmax_surface_wind_id[i]; pmax_surface_wind_x[i] = px[q]; pmax_surface_wind_y[i] = py[q]; } ) // time of max surface wind p_teca_variant_array max_surface_wind_t = time->new_instance(n_tracks); TEMPLATE_DISPATCH(teca_variant_array_impl, max_surface_wind_t.get(), const NT *ptime = static_cast<const TT*>(time.get())->get(); NT *pmax_surface_wind_t = static_cast<TT*>(max_surface_wind_t.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long q = pmax_surface_wind_id[i]; pmax_surface_wind_t[i] = ptime[q]; } ) // record the min sea level pressure p_teca_variant_array min_sea_level_pressure = sea_level_pressure->new_instance(n_tracks); p_teca_unsigned_long_array min_sea_level_pressure_id = teca_unsigned_long_array::New(n_tracks); unsigned long *pmin_sea_level_pressure_id = min_sea_level_pressure_id->get(); TEMPLATE_DISPATCH_FP(teca_variant_array_impl, min_sea_level_pressure.get(), const NT *psea_level_pressure = static_cast<const TT*>(sea_level_pressure.get())->get(); NT *pmin_sea_level_pressure = static_cast<TT*>(min_sea_level_pressure.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long track_start = track_starts[i]; unsigned long npts = track_starts[i+1] - track_start; NT min_val = std::numeric_limits<NT>::max(); unsigned long min_id = 0; for (size_t j = 0; j < npts; ++j) { unsigned long q = track_start + j; NT val = psea_level_pressure[q]; bool min_changed = val < min_val; min_val = min_changed ? val : min_val; min_id = min_changed ? q : min_id; } pmin_sea_level_pressure[i] = min_val; pmin_sea_level_pressure_id[i] = min_id; } ) // location of the min sea level pressure p_teca_variant_array min_sea_level_pressure_x = x->new_instance(n_tracks); p_teca_variant_array min_sea_level_pressure_y = x->new_instance(n_tracks); TEMPLATE_DISPATCH_FP(teca_variant_array_impl, start_x.get(), const NT *px = static_cast<const TT*>(x.get())->get(); const NT *py = static_cast<const TT*>(y.get())->get(); NT *pmin_sea_level_pressure_x = static_cast<TT*>(min_sea_level_pressure_x.get())->get(); NT *pmin_sea_level_pressure_y = static_cast<TT*>(min_sea_level_pressure_y.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long q = pmin_sea_level_pressure_id[i]; pmin_sea_level_pressure_x[i] = px[q]; pmin_sea_level_pressure_y[i] = py[q]; } ) // time of min sea level pressure p_teca_variant_array min_sea_level_pressure_t = time->new_instance(n_tracks); TEMPLATE_DISPATCH(teca_variant_array_impl, min_sea_level_pressure_t.get(), const NT *ptime = static_cast<const TT*>(time.get())->get(); NT *pmin_sea_level_pressure_t = static_cast<TT*>(min_sea_level_pressure_t.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long q = pmin_sea_level_pressure_id[i]; pmin_sea_level_pressure_t[i] = ptime[q]; } ) // ACE (accumulated cyclonigc energy) // The ACE of a season is calculated by summing the squares of the // estimated maximum sustained velocity of every active tropical storm // (wind speed 35 knots (65 km/h) or higher), at six-hour intervals. Since // the calculation is sensitive to the starting point of the six-hour // intervals, the convention is to use 0000, 0600, 1200, and 1800 UTC. If // any storms of a season happen to cross years, the storm's ACE counts for // the previous year.[2] The numbers are usually divided by 10,000 to make // them more manageable. The unit of ACE is 10^4 kn^2, and for use as an // index the unit is assumed. Thus: // {\displaystyle {\text{ACE}}=10^{-4}\sum v_{\max }^{2}} // {\text{ACE}}=10^{{-4}}\sum v_{\max }^{2} where vmax is estimated // sustained wind speed in knots. p_teca_variant_array ACE = surface_wind->new_instance(n_tracks); NESTED_TEMPLATE_DISPATCH_FP(const teca_variant_array_impl, time.get(), _T, const NT_T *ptime = static_cast<TT_T*>(time.get())->get(); NESTED_TEMPLATE_DISPATCH_FP(teca_variant_array_impl, ACE.get(), _W, NT_W *pACE = static_cast<TT_W*>(ACE.get())->get(); const NT_W *psurface_wind = static_cast<const TT_W*>(surface_wind.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long track_start = track_starts[i]; unsigned long npts = track_starts[i+1] - track_start - 1; pACE[i] = NT_W(); // for now skip the first and last track point // could handle these as a special case if needed for (size_t j = 1; j < npts; ++j) { unsigned long id = track_start + j; NT_W dt = ptime[id+1] - ptime[id-1]; NT_W w = psurface_wind[id]; pACE[i] += w < teca_saffir_simpson::get_lower_bound_mps<NT_W>(0) ? NT_W() : w*w*dt; } // correct the units // wind speed conversion : 1 m/s = 1.943844 kn // time unit conversions: 24 hours per day, 6 hours per time unit, // and we sample time in days at t +/- 1/2 => dt*24/2/6 => dt*2 // by convention scale by 10^-4 pACE[i] *= NT_W(2.0)*NT_W(3.778529496)*NT_W(1.0e-4); } ) ) // PDI (power dissipation index) // PDI = \sum v_{max}^{3} \delta t // see: Environmental Factors Affecting Tropical Cyclone Power Dissipation // KERRY EMANUEL, 15 NOVEMBER 2007, JOURNAL OF CLIMATE p_teca_variant_array PDI = surface_wind->new_instance(n_tracks); NESTED_TEMPLATE_DISPATCH_FP(const teca_variant_array_impl, time.get(), _T, const NT_T *ptime = static_cast<TT_T*>(time.get())->get(); NESTED_TEMPLATE_DISPATCH_FP(teca_variant_array_impl, PDI.get(), _W, NT_W *pPDI = static_cast<TT_W*>(PDI.get())->get(); const NT_W *psurface_wind = static_cast<const TT_W*>(surface_wind.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long track_start = track_starts[i]; unsigned long npts = track_starts[i+1] - track_start - 1; pPDI[i] = NT_W(); // for now skip the first and last track point // could handle these as a special case if needed for (size_t j = 1; j < npts; ++j) { unsigned long id = track_start + j; NT_W dt = ptime[id+1] - ptime[id-1]; NT_W w = psurface_wind[id]; pPDI[i] += w < teca_saffir_simpson::get_lower_bound_mps<NT_W>(0) ? NT_W() : w*w*w*dt; } // correct the units // time unit conversions: 24*3600 seconds per day // and we sample time in days at t +/- 1 => dt*24*3600/2 pPDI[i] *= NT_W(43200); } ) ) // cyclogenisis, determine region of origin size_t n_regions = this->region_sizes.size(); std::vector<unsigned long> rstarts(this->region_starts); if (rstarts.empty()) { // generate starts rstarts.reserve(n_regions); rstarts.push_back(0); for (size_t i = 0; i < n_regions; ++i) rstarts.push_back(rstarts[i] + this->region_sizes[i]); } std::vector<int> rids(this->region_ids); if (rids.empty()) { // generate ids rids.reserve(n_regions); for (size_t i = 0; i < n_regions; ++i) rids.push_back(i); } std::vector<std::string> rnames(this->region_names); if (rnames.empty()) { // generate names std::ostringstream oss; rnames.reserve(n_regions); for (size_t i = 0; i < n_regions; ++i) { oss.str(""); oss << "r" << i; rnames.push_back(oss.str()); } } std::vector<std::string> rlnames(this->region_long_names); if (rnames.empty()) { // generate names std::ostringstream oss; rnames.reserve(n_regions); for (size_t i = 0; i < n_regions; ++i) { oss.str(""); oss << "region_" << i; rlnames.push_back(oss.str()); } } p_teca_int_array region_id = teca_int_array::New(n_tracks, -1); int *pregion_id = region_id->get(); p_teca_string_array region_name = teca_string_array::New(n_tracks); std::string *pregion_name = region_name->get(); p_teca_string_array region_long_name = teca_string_array::New(n_tracks); std::string *pregion_long_name = region_long_name->get(); TEMPLATE_DISPATCH_FP(const teca_variant_array_impl, x.get(), const NT *px = static_cast<const TT*>(x.get())->get(); const NT *py = static_cast<const TT*>(y.get())->get(); for (size_t i = 0; i < n_tracks; ++i) { unsigned long q = track_starts[i]; double ptx = px[q]; double pty = py[q]; for (size_t j = 0; j < n_regions; ++j) { double *polyx = this->region_x_coordinates.data() + rstarts[j]; double *polyy = this->region_y_coordinates.data() + rstarts[j]; if (teca_geometry::point_in_poly(ptx, pty, polyx, polyy, this->region_sizes[j])) { unsigned long rid = rids[j]; pregion_id[i] = rid; pregion_name[i] = rnames[rid]; pregion_long_name[i] = rlnames[rid]; // early termination precludes a storm from being counted in // multiple regions. if we want to allow overlapping regions // this would need to change. break; } } if (pregion_id[i] < 0) { TECA_WARNING("track " << i << " is not any of the regions!") } } ) // construct the output p_teca_table out_table = teca_table::New(); out_table->copy_metadata(in_table); out_table->append_column("track_id", out_ids); out_table->append_column("start_time", start_time); out_table->append_column("start_x", start_x); out_table->append_column("start_y", start_y); out_table->append_column("duration", duration); out_table->append_column("length", length); out_table->append_column("category", category); out_table->append_column("ACE", ACE); out_table->append_column("PDI", PDI); out_table->append_column("max_surface_wind", max_surface_wind); out_table->append_column("max_surface_wind_x", max_surface_wind_x); out_table->append_column("max_surface_wind_y", max_surface_wind_y); out_table->append_column("max_surface_wind_t", max_surface_wind_t); out_table->append_column("min_sea_level_pressure", min_sea_level_pressure); out_table->append_column("min_sea_level_pressure_x", min_sea_level_pressure_x); out_table->append_column("min_sea_level_pressure_y", min_sea_level_pressure_y); out_table->append_column("min_sea_level_pressure_t", min_sea_level_pressure_t); out_table->append_column("region_id", region_id); out_table->append_column("region_name", region_name); out_table->append_column("region_long_name", region_long_name); return out_table; }
{"hexsha": "9c82ab80f10a783ac4aa38c49603ff74a46c90ed", "size": 25019, "ext": "cxx", "lang": "C++", "max_stars_repo_path": "alg/teca_tc_classify.cxx", "max_stars_repo_name": "LBL-EESA/TECA", "max_stars_repo_head_hexsha": "63923b8a12914f3758dc9525239bc48cd8864b39", "max_stars_repo_licenses": ["BSD-3-Clause-LBNL"], "max_stars_count": 34.0, "max_stars_repo_stars_event_min_datetime": "2017-03-28T14:22:25.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-23T05:02:25.000Z", "max_issues_repo_path": "alg/teca_tc_classify.cxx", "max_issues_repo_name": "LBL-EESA/TECA", "max_issues_repo_head_hexsha": "63923b8a12914f3758dc9525239bc48cd8864b39", "max_issues_repo_licenses": ["BSD-3-Clause-LBNL"], "max_issues_count": 476.0, "max_issues_repo_issues_event_min_datetime": "2016-11-28T18:06:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-01-25T05:31:42.000Z", "max_forks_repo_path": "alg/teca_tc_classify.cxx", "max_forks_repo_name": "LBL-EESA/TECA", "max_forks_repo_head_hexsha": "63923b8a12914f3758dc9525239bc48cd8864b39", "max_forks_repo_licenses": ["BSD-3-Clause-LBNL"], "max_forks_count": 19.0, "max_forks_repo_forks_event_min_datetime": "2017-04-25T18:15:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-28T18:16:05.000Z", "avg_line_length": 35.387553041, "max_line_length": 96, "alphanum_fraction": 0.5985451057, "num_tokens": 6086}
// Copyright 2012 Cloudera Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "util/runtime-profile.h" #include "common/object-pool.h" #include "util/compress.h" #include "util/cpu-info.h" #include "util/debug-util.h" #include "util/thrift-util.h" #include "util/url-coding.h" #include "util/container-util.h" #include <iomanip> #include <iostream> #include <boost/thread/locks.hpp> #include <boost/thread/thread.hpp> #include <boost/foreach.hpp> using namespace boost; using namespace std; namespace impala { // Period to update rate counters and sampling counters in ms. DEFINE_int32(periodic_counter_update_period_ms, 500, "Period to update rate counters and" " sampling counters in ms"); // Thread counters name static const string THREAD_TOTAL_TIME = "TotalWallClockTime"; static const string THREAD_USER_TIME = "UserTime"; static const string THREAD_SYS_TIME = "SysTime"; static const string THREAD_VOLUNTARY_CONTEXT_SWITCHES = "VoluntaryContextSwitches"; static const string THREAD_INVOLUNTARY_CONTEXT_SWITCHES = "InvoluntaryContextSwitches"; // The root counter name for all top level counters. static const string ROOT_COUNTER = ""; RuntimeProfile::PeriodicCounterUpdateState RuntimeProfile::periodic_counter_update_state_; RuntimeProfile::RuntimeProfile(ObjectPool* pool, const string& name) : pool_(pool), own_pool_(false), name_(name), metadata_(-1), counter_total_time_(TCounterType::TIME_NS), local_time_percent_(0) { counter_map_["TotalTime"] = &counter_total_time_; } RuntimeProfile::~RuntimeProfile() { map<string, Counter*>::const_iterator iter; for (iter = counter_map_.begin(); iter != counter_map_.end(); ++iter) { StopRateCounterUpdates(iter->second); StopSamplingCounterUpdates(iter->second); } set<vector<Counter*>* >::const_iterator buckets_iter; for (buckets_iter = bucketing_counters_.begin(); buckets_iter != bucketing_counters_.end(); ++buckets_iter) { // This is just a clean up. No need to perform conversion. Also, the underlying // counters might be gone already. StopBucketingCountersUpdates(*buckets_iter, false); } if (own_pool_) delete pool_; } RuntimeProfile* RuntimeProfile::CreateFromThrift(ObjectPool* pool, const TRuntimeProfileTree& profiles) { if (profiles.nodes.size() == 0) return NULL; int idx = 0; return RuntimeProfile::CreateFromThrift(pool, profiles.nodes, &idx); } RuntimeProfile* RuntimeProfile::CreateFromThrift(ObjectPool* pool, const vector<TRuntimeProfileNode>& nodes, int* idx) { DCHECK_LT(*idx, nodes.size()); const TRuntimeProfileNode& node = nodes[*idx]; RuntimeProfile* profile = pool->Add(new RuntimeProfile(pool, node.name)); profile->metadata_ = node.metadata; for (int i = 0; i < node.counters.size(); ++i) { const TCounter& counter = node.counters[i]; profile->counter_map_[counter.name] = pool->Add(new Counter(counter.type, counter.value)); } profile->child_counter_map_ = node.child_counters_map; profile->info_strings_ = node.info_strings; profile->info_strings_display_order_ = node.info_strings_display_order; ++*idx; for (int i = 0; i < node.num_children; ++i) { profile->AddChild(RuntimeProfile::CreateFromThrift(pool, nodes, idx)); } return profile; } void RuntimeProfile::Merge(RuntimeProfile* other) { DCHECK(other != NULL); // Merge this level { CounterMap::iterator dst_iter; CounterMap::const_iterator src_iter; lock_guard<mutex> l(counter_map_lock_); lock_guard<mutex> m(other->counter_map_lock_); for (src_iter = other->counter_map_.begin(); src_iter != other->counter_map_.end(); ++src_iter) { dst_iter = counter_map_.find(src_iter->first); if (dst_iter == counter_map_.end()) { counter_map_[src_iter->first] = pool_->Add(new Counter(src_iter->second->type(), src_iter->second->value())); } else { DCHECK(dst_iter->second->type() == src_iter->second->type()); if (dst_iter->second->type() == TCounterType::DOUBLE_VALUE) { double new_val = dst_iter->second->double_value() + src_iter->second->double_value(); dst_iter->second->Set(new_val); } else { dst_iter->second->Update(src_iter->second->value()); } } } ChildCounterMap::const_iterator child_counter_src_itr; for (child_counter_src_itr = other->child_counter_map_.begin(); child_counter_src_itr != other->child_counter_map_.end(); ++child_counter_src_itr) { set<string>* child_counters = FindOrInsert(&child_counter_map_, child_counter_src_itr->first, set<string>()); child_counters->insert(child_counter_src_itr->second.begin(), child_counter_src_itr->second.end()); } } { lock_guard<mutex> l(children_lock_); lock_guard<mutex> m(other->children_lock_); // Recursively merge children with matching names for (int i = 0; i < other->children_.size(); ++i) { RuntimeProfile* other_child = other->children_[i].first; ChildMap::iterator j = child_map_.find(other_child->name_); RuntimeProfile* child = NULL; if (j != child_map_.end()) { child = j->second; } else { child = pool_->Add(new RuntimeProfile(pool_, other_child->name_)); child->local_time_percent_ = other_child->local_time_percent_; child->metadata_ = other_child->metadata_; bool indent_other_child = other->children_[i].second; child_map_[child->name_] = child; children_.push_back(make_pair(child, indent_other_child)); } child->Merge(other_child); } } } void RuntimeProfile::Update(const TRuntimeProfileTree& thrift_profile) { int idx = 0; Update(thrift_profile.nodes, &idx); DCHECK_EQ(idx, thrift_profile.nodes.size()); } void RuntimeProfile::Update(const vector<TRuntimeProfileNode>& nodes, int* idx) { DCHECK_LT(*idx, nodes.size()); const TRuntimeProfileNode& node = nodes[*idx]; { lock_guard<mutex> l(counter_map_lock_); // update this level map<string, Counter*>::iterator dst_iter; for (int i = 0; i < node.counters.size(); ++i) { const TCounter& tcounter = node.counters[i]; CounterMap::iterator j = counter_map_.find(tcounter.name); if (j == counter_map_.end()) { counter_map_[tcounter.name] = pool_->Add(new Counter(tcounter.type, tcounter.value)); } else { if (j->second->type() != tcounter.type) { LOG(ERROR) << "Cannot update counters with the same name (" << j->first << ") but different types."; } else { j->second->Set(tcounter.value); } } } ChildCounterMap::const_iterator child_counter_src_itr; for (child_counter_src_itr = node.child_counters_map.begin(); child_counter_src_itr != node.child_counters_map.end(); ++child_counter_src_itr) { set<string>* child_counters = FindOrInsert(&child_counter_map_, child_counter_src_itr->first, set<string>()); child_counters->insert(child_counter_src_itr->second.begin(), child_counter_src_itr->second.end()); } } { lock_guard<mutex> l(info_strings_lock_); const InfoStrings& info_strings = node.info_strings; BOOST_FOREACH(const string& key, node.info_strings_display_order) { // Look for existing info strings and update in place. If there // are new strings, add them to the end of the display order. // TODO: Is nodes.info_strings always a superset of // info_strings_? If so, can just copy the display order. InfoStrings::const_iterator it = info_strings.find(key); DCHECK(it != info_strings.end()); InfoStrings::iterator existing = info_strings_.find(key); if (existing == info_strings_.end()) { info_strings_.insert(make_pair(key, it->second)); info_strings_display_order_.push_back(key); } else { info_strings_[key] = it->second; } } } ++*idx; { lock_guard<mutex> l(children_lock_); // update children with matching names; create new ones if they don't match for (int i = 0; i < node.num_children; ++i) { const TRuntimeProfileNode& tchild = nodes[*idx]; ChildMap::iterator j = child_map_.find(tchild.name); RuntimeProfile* child = NULL; if (j != child_map_.end()) { child = j->second; } else { child = pool_->Add(new RuntimeProfile(pool_, tchild.name)); child->metadata_ = tchild.metadata; child_map_[tchild.name] = child; children_.push_back(make_pair(child, tchild.indent)); } child->Update(nodes, idx); } } } void RuntimeProfile::Divide(int n) { DCHECK_GT(n, 0); map<string, Counter*>::iterator iter; { lock_guard<mutex> l(counter_map_lock_); for (iter = counter_map_.begin(); iter != counter_map_.end(); ++iter) { if (iter->second->type() == TCounterType::DOUBLE_VALUE) { iter->second->Set(iter->second->double_value() / n); } else { iter->second->value_ /= n; } } } { lock_guard<mutex> l(children_lock_); for (ChildMap::iterator i = child_map_.begin(); i != child_map_.end(); ++i) { i->second->Divide(n); } } } void RuntimeProfile::ComputeTimeInProfile() { ComputeTimeInProfile(total_time_counter()->value()); } void RuntimeProfile::ComputeTimeInProfile(int64_t total) { if (total == 0) return; // Add all the total times in all the children int64_t total_child_time = 0; lock_guard<mutex> l(children_lock_); for (int i = 0; i < children_.size(); ++i) { total_child_time += children_[i].first->total_time_counter()->value(); } int64_t local_time = total_time_counter()->value() - total_child_time; // Counters have some margin, set to 0 if it was negative. local_time = ::max(0L, local_time); local_time_percent_ = static_cast<double>(local_time) / total; local_time_percent_ = ::min(1.0, local_time_percent_) * 100; // Recurse on children for (int i = 0; i < children_.size(); ++i) { children_[i].first->ComputeTimeInProfile(total); } } void RuntimeProfile::AddChild(RuntimeProfile* child, bool indent, RuntimeProfile* loc) { DCHECK(child != NULL); lock_guard<mutex> l(children_lock_); child_map_[child->name_] = child; if (loc == NULL) { children_.push_back(make_pair(child, indent)); } else { for (ChildVector::iterator it = children_.begin(); it != children_.end(); ++it) { if (it->first == loc) { children_.insert(++it, make_pair(child, indent)); return; } } DCHECK(false) << "Invalid loc"; } } void RuntimeProfile::GetChildren(vector<RuntimeProfile*>* children) { children->clear(); lock_guard<mutex> l(children_lock_); for (ChildMap::iterator i = child_map_.begin(); i != child_map_.end(); ++i) { children->push_back(i->second); } } void RuntimeProfile::GetAllChildren(vector<RuntimeProfile*>* children) { lock_guard<mutex> l(children_lock_); for (ChildMap::iterator i = child_map_.begin(); i != child_map_.end(); ++i) { children->push_back(i->second); i->second->GetAllChildren(children); } } void RuntimeProfile::AddInfoString(const string& key, const string& value) { lock_guard<mutex> l(info_strings_lock_); InfoStrings::iterator it = info_strings_.find(key); if (it == info_strings_.end()) { info_strings_.insert(make_pair(key, value)); info_strings_display_order_.push_back(key); } else { it->second = value; } } const string* RuntimeProfile::GetInfoString(const string& key) { lock_guard<mutex> l(info_strings_lock_); InfoStrings::const_iterator it = info_strings_.find(key); if (it == info_strings_.end()) return NULL; return &it->second; } RuntimeProfile::Counter* RuntimeProfile::AddCounter( const string& name, TCounterType::type type, const string& parent_counter_name) { lock_guard<mutex> l(counter_map_lock_); if (counter_map_.find(name) != counter_map_.end()) { // TODO: should we make sure that we don't return existing derived counters? return counter_map_[name]; } DCHECK(parent_counter_name == ROOT_COUNTER || counter_map_.find(parent_counter_name) != counter_map_.end()); Counter* counter = pool_->Add(new Counter(type)); counter_map_[name] = counter; set<string>* child_counters = FindOrInsert(&child_counter_map_, parent_counter_name, set<string>()); child_counters->insert(name); return counter; } RuntimeProfile::DerivedCounter* RuntimeProfile::AddDerivedCounter( const string& name, TCounterType::type type, const DerivedCounterFunction& counter_fn, const string& parent_counter_name) { lock_guard<mutex> l(counter_map_lock_); if (counter_map_.find(name) != counter_map_.end()) return NULL; DerivedCounter* counter = pool_->Add(new DerivedCounter(type, counter_fn)); counter_map_[name] = counter; set<string>* child_counters = FindOrInsert(&child_counter_map_, parent_counter_name, set<string>()); child_counters->insert(name); return counter; } RuntimeProfile::ThreadCounters* RuntimeProfile::AddThreadCounters( const string& prefix) { ThreadCounters* counter = pool_->Add(new ThreadCounters()); counter->total_time_ = AddCounter(prefix + THREAD_TOTAL_TIME, TCounterType::TIME_NS); counter->user_time_ = AddCounter(prefix + THREAD_USER_TIME, TCounterType::TIME_NS, prefix + THREAD_TOTAL_TIME); counter->sys_time_ = AddCounter(prefix + THREAD_SYS_TIME, TCounterType::TIME_NS, prefix + THREAD_TOTAL_TIME); counter->voluntary_context_switches_ = AddCounter(prefix + THREAD_VOLUNTARY_CONTEXT_SWITCHES, TCounterType::UNIT); counter->involuntary_context_switches_ = AddCounter(prefix + THREAD_INVOLUNTARY_CONTEXT_SWITCHES, TCounterType::UNIT); return counter; } RuntimeProfile::Counter* RuntimeProfile::GetCounter(const string& name) { lock_guard<mutex> l(counter_map_lock_); if (counter_map_.find(name) != counter_map_.end()) { return counter_map_[name]; } return NULL; } void RuntimeProfile::GetCounters(const string& name, vector<Counter*>* counters) { Counter* c = GetCounter(name); if (c != NULL) counters->push_back(c); lock_guard<mutex> l(children_lock_); for (int i = 0; i < children_.size(); ++i) { children_[i].first->GetCounters(name, counters); } } // Print the profile: // 1. Profile Name // 2. Info Strings // 3. Counters // 4. Children void RuntimeProfile::PrettyPrint(ostream* s, const string& prefix) const { ostream& stream = *s; // create copy of counter_map_ and child_counter_map_ so we don't need to hold lock // while we call value() on the counters (some of those might be DerivedCounters) CounterMap counter_map; ChildCounterMap child_counter_map; { lock_guard<mutex> l(counter_map_lock_); counter_map = counter_map_; child_counter_map = child_counter_map_; } map<string, Counter*>::const_iterator total_time = counter_map.find("TotalTime"); DCHECK(total_time != counter_map.end()); stream.flags(ios::fixed); stream << prefix << name_ << ":"; if (total_time->second->value() != 0) { stream << "(Active: " << PrettyPrinter::Print(total_time->second->value(), total_time->second->type()) << ", % non-child: " << setprecision(2) << local_time_percent_ << "%)"; } stream << endl; { lock_guard<mutex> l(info_strings_lock_); BOOST_FOREACH(const string& key, info_strings_display_order_) { stream << prefix << " " << key << ": " << info_strings_.find(key)->second << endl; } } { // Print all the event timers as the following: // <EventKey> Timeline: 2s719ms // - Event 1: 6.522us (6.522us) // - Event 2: 2s288ms (2s288ms) // - Event 3: 2s410ms (121.138ms) // The times in parentheses are the time elapsed since the last event. lock_guard<mutex> l(event_sequences_lock_); BOOST_FOREACH( const EventSequenceMap::value_type& event_sequence, event_sequence_map_) { stream << prefix << " " << event_sequence.first << ": " << PrettyPrinter::Print( event_sequence.second->ElapsedTime(), TCounterType::TIME_NS) << endl; int64_t last = 0L; BOOST_FOREACH(const EventSequence::Event& event, event_sequence.second->events()) { stream << prefix << " - " << event.first << ": " << PrettyPrinter::Print( event.second, TCounterType::TIME_NS) << " (" << PrettyPrinter::Print( event.second - last, TCounterType::TIME_NS) << ")" << endl; last = event.second; } } } RuntimeProfile::PrintChildCounters( prefix, ROOT_COUNTER, counter_map, child_counter_map, s); // create copy of children_ so we don't need to hold lock while we call // PrettyPrint() on the children ChildVector children; { lock_guard<mutex> l(children_lock_); children = children_; } for (int i = 0; i < children.size(); ++i) { RuntimeProfile* profile = children[i].first; bool indent = children[i].second; profile->PrettyPrint(s, prefix + (indent ? " " : "")); } } string RuntimeProfile::SerializeToArchiveString() const { stringstream ss; SerializeToArchiveString(&ss); return ss.str(); } void RuntimeProfile::SerializeToArchiveString(stringstream* out) const { TRuntimeProfileTree thrift_object; const_cast<RuntimeProfile*>(this)->ToThrift(&thrift_object); ThriftSerializer serializer(true); vector<uint8_t> serialized_buffer; Status status = serializer.Serialize(&thrift_object, &serialized_buffer); if (!status.ok()) return; // Compress the serialized thrift string. This uses string keys and is very // easy to compress. GzipCompressor compressor(GzipCompressor::ZLIB); vector<uint8_t> compressed_buffer; compressed_buffer.resize(compressor.MaxCompressedLen(serialized_buffer.size())); int result_len = compressed_buffer.size(); compressor.Compress(serialized_buffer.size(), &serialized_buffer[0], &result_len, &compressed_buffer[0]); compressed_buffer.resize(result_len); Base64Encode(compressed_buffer, out); } void RuntimeProfile::ToThrift(TRuntimeProfileTree* tree) { tree->nodes.clear(); ToThrift(&tree->nodes); } void RuntimeProfile::ToThrift(vector<TRuntimeProfileNode>* nodes) { nodes->reserve(nodes->size() + children_.size()); int index = nodes->size(); nodes->push_back(TRuntimeProfileNode()); TRuntimeProfileNode& node = (*nodes)[index]; node.name = name_; node.num_children = children_.size(); node.metadata = metadata_; node.indent = true; CounterMap counter_map; { lock_guard<mutex> l(counter_map_lock_); counter_map = counter_map_; node.child_counters_map = child_counter_map_; } for (map<string, Counter*>::const_iterator iter = counter_map.begin(); iter != counter_map.end(); ++iter) { TCounter counter; counter.name = iter->first; counter.value = iter->second->value(); counter.type = iter->second->type(); node.counters.push_back(counter); } { lock_guard<mutex> l(info_strings_lock_); node.info_strings = info_strings_; node.info_strings_display_order = info_strings_display_order_; } ChildVector children; { lock_guard<mutex> l(children_lock_); children = children_; } for (int i = 0; i < children.size(); ++i) { int child_idx = nodes->size(); children[i].first->ToThrift(nodes); // fix up indentation flag (*nodes)[child_idx].indent = children[i].second; } } int64_t RuntimeProfile::UnitsPerSecond( const RuntimeProfile::Counter* total_counter, const RuntimeProfile::Counter* timer) { DCHECK(total_counter->type() == TCounterType::BYTES || total_counter->type() == TCounterType::UNIT); DCHECK(timer->type() == TCounterType::TIME_NS); if (timer->value() == 0) return 0; double secs = static_cast<double>(timer->value()) / 1000.0 / 1000.0 / 1000.0; return total_counter->value() / secs; } int64_t RuntimeProfile::CounterSum(const vector<Counter*>* counters) { int64_t value = 0; for (int i = 0; i < counters->size(); ++i) { value += (*counters)[i]->value(); } return value; } RuntimeProfile::Counter* RuntimeProfile::AddRateCounter( const string& name, Counter* src_counter) { TCounterType::type dst_type; switch (src_counter->type()) { case TCounterType::BYTES: dst_type = TCounterType::BYTES_PER_SECOND; break; case TCounterType::UNIT: dst_type = TCounterType::UNIT_PER_SECOND; break; default: DCHECK(false) << "Unsupported src counter type: " << src_counter->type(); return NULL; } Counter* dst_counter = AddCounter(name, dst_type); RegisterPeriodicCounter(src_counter, NULL, dst_counter, RATE_COUNTER); return dst_counter; } RuntimeProfile::Counter* RuntimeProfile::AddRateCounter( const string& name, SampleFn fn, TCounterType::type dst_type) { Counter* dst_counter = AddCounter(name, dst_type); RegisterPeriodicCounter(NULL, fn, dst_counter, RATE_COUNTER); return dst_counter; } RuntimeProfile::Counter* RuntimeProfile::AddSamplingCounter( const string& name, Counter* src_counter) { DCHECK(src_counter->type() == TCounterType::UNIT); Counter* dst_counter = AddCounter(name, TCounterType::DOUBLE_VALUE); RegisterPeriodicCounter(src_counter, NULL, dst_counter, SAMPLING_COUNTER); return dst_counter; } RuntimeProfile::Counter* RuntimeProfile::AddSamplingCounter( const string& name, SampleFn sample_fn) { Counter* dst_counter = AddCounter(name, TCounterType::DOUBLE_VALUE); RegisterPeriodicCounter(NULL, sample_fn, dst_counter, SAMPLING_COUNTER); return dst_counter; } void RuntimeProfile::AddBucketingCounters(const string& name, const string& parent_counter_name, Counter* src_counter, int num_buckets, vector<Counter*>* buckets) { { lock_guard<mutex> l(counter_map_lock_); bucketing_counters_.insert(buckets); } for (int i = 0; i < num_buckets; ++i) { stringstream counter_name; counter_name << name << "=" << i; buckets->push_back(AddCounter(counter_name.str(), TCounterType::DOUBLE_VALUE, parent_counter_name)); } lock_guard<mutex> l(periodic_counter_update_state_.lock); if (periodic_counter_update_state_.update_thread.get() == NULL) { periodic_counter_update_state_.update_thread.reset( new thread(&RuntimeProfile::PeriodicCounterUpdateLoop)); } BucketCountersInfo info; info.src_counter = src_counter; info.num_sampled = 0; periodic_counter_update_state_.bucketing_counters[buckets] = info; } RuntimeProfile::EventSequence* RuntimeProfile::AddEventSequence(const string& name) { lock_guard<mutex> l(event_sequences_lock_); EventSequenceMap::iterator timer_it = event_sequence_map_.find(name); if (timer_it != event_sequence_map_.end()) return timer_it->second; EventSequence* timer = pool_->Add(new EventSequence()); event_sequence_map_[name] = timer; return timer; } void RuntimeProfile::RegisterPeriodicCounter(Counter* src_counter, SampleFn sample_fn, Counter* dst_counter, PeriodicCounterType type) { DCHECK(src_counter == NULL || sample_fn == NULL); lock_guard<mutex> l(periodic_counter_update_state_.lock); if (periodic_counter_update_state_.update_thread.get() == NULL) { periodic_counter_update_state_.update_thread.reset( new thread(&RuntimeProfile::PeriodicCounterUpdateLoop)); } switch (type) { case RATE_COUNTER: { RateCounterInfo counter; counter.src_counter = src_counter; counter.sample_fn = sample_fn; counter.elapsed_ms = 0; periodic_counter_update_state_.rate_counters[dst_counter] = counter; break; } case SAMPLING_COUNTER: { SamplingCounterInfo counter; counter.src_counter = src_counter; counter.sample_fn = sample_fn; counter.num_sampled = 0; counter.total_sampled_value = 0; periodic_counter_update_state_.sampling_counters[dst_counter] = counter; break; } default: DCHECK(false) << "Unsupported PeriodicCounterType:" << type; } } void RuntimeProfile::StopRateCounterUpdates(Counter* rate_counter) { lock_guard<mutex> l(periodic_counter_update_state_.lock); periodic_counter_update_state_.rate_counters.erase(rate_counter); } void RuntimeProfile::StopSamplingCounterUpdates(Counter* sampling_counter) { lock_guard<mutex> l(periodic_counter_update_state_.lock); periodic_counter_update_state_.sampling_counters.erase(sampling_counter); } void RuntimeProfile::StopBucketingCountersUpdates(vector<Counter*>* buckets, bool convert) { int64_t num_sampled = 0; { lock_guard<mutex> l(periodic_counter_update_state_.lock); PeriodicCounterUpdateState::BucketCountersMap::const_iterator itr = periodic_counter_update_state_.bucketing_counters.find(buckets); if (itr != periodic_counter_update_state_.bucketing_counters.end()) { num_sampled = itr->second.num_sampled; periodic_counter_update_state_.bucketing_counters.erase(buckets); } } if (convert && num_sampled > 0) { BOOST_FOREACH(Counter* counter, *buckets) { double perc = 100 * counter->value() / (double)num_sampled; counter->Set(perc); } } } RuntimeProfile::PeriodicCounterUpdateState::PeriodicCounterUpdateState() : done_(false) { } RuntimeProfile::PeriodicCounterUpdateState::~PeriodicCounterUpdateState() { if (periodic_counter_update_state_.update_thread.get() != NULL) { { // Lock to ensure the update thread will see the update to done_ lock_guard<mutex> l(periodic_counter_update_state_.lock); done_ = true; } periodic_counter_update_state_.update_thread->join(); } } void RuntimeProfile::PeriodicCounterUpdateLoop() { while (!periodic_counter_update_state_.done_) { system_time before_time = get_system_time(); usleep(FLAGS_periodic_counter_update_period_ms * 1000); posix_time::time_duration elapsed = get_system_time() - before_time; int elapsed_ms = elapsed.total_milliseconds(); lock_guard<mutex> l(periodic_counter_update_state_.lock); for (PeriodicCounterUpdateState::RateCounterMap::iterator it = periodic_counter_update_state_.rate_counters.begin(); it != periodic_counter_update_state_.rate_counters.end(); ++it) { it->second.elapsed_ms += elapsed_ms; int64_t value; if (it->second.src_counter != NULL) { value = it->second.src_counter->value(); } else { DCHECK(it->second.sample_fn != NULL); value = it->second.sample_fn(); } int64_t rate = value * 1000 / (it->second.elapsed_ms); it->first->Set(rate); } for (PeriodicCounterUpdateState::SamplingCounterMap::iterator it = periodic_counter_update_state_.sampling_counters.begin(); it != periodic_counter_update_state_.sampling_counters.end(); ++it) { ++it->second.num_sampled; int64_t value; if (it->second.src_counter != NULL) { value = it->second.src_counter->value(); } else { DCHECK(it->second.sample_fn != NULL); value = it->second.sample_fn(); } it->second.total_sampled_value += value; double average = static_cast<double>(it->second.total_sampled_value) / it->second.num_sampled; it->first->Set(average); } for (PeriodicCounterUpdateState::BucketCountersMap::iterator it = periodic_counter_update_state_.bucketing_counters.begin(); it != periodic_counter_update_state_.bucketing_counters.end(); ++it) { int64_t val = it->second.src_counter->value(); if (val >= it->first->size()) val = it->first->size() - 1; it->first->at(val)->Update(1); ++it->second.num_sampled; } } } void RuntimeProfile::PrintChildCounters(const string& prefix, const string& counter_name, const CounterMap& counter_map, const ChildCounterMap& child_counter_map, ostream* s) { ostream& stream = *s; ChildCounterMap::const_iterator itr = child_counter_map.find(counter_name); if (itr != child_counter_map.end()) { const set<string>& child_counters = itr->second; BOOST_FOREACH(const string& child_counter, child_counters) { CounterMap::const_iterator iter = counter_map.find(child_counter); DCHECK(iter != counter_map.end()); stream << prefix << " - " << iter->first << ": " << PrettyPrinter::Print(iter->second->value(), iter->second->type()) << endl; RuntimeProfile::PrintChildCounters(prefix + " ", child_counter, counter_map, child_counter_map, s); } } } }
{"hexsha": "11e40b84d01a7ce4cb742885ec8d9b1cac161601", "size": 29203, "ext": "cc", "lang": "C++", "max_stars_repo_path": "be/src/util/runtime-profile.cc", "max_stars_repo_name": "wangxnhit/impala", "max_stars_repo_head_hexsha": "d7a37f00a515d6942ca28bd8cd84380bc8c93c5a", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1.0, "max_stars_repo_stars_event_min_datetime": "2016-06-08T06:22:28.000Z", "max_stars_repo_stars_event_max_datetime": "2016-06-08T06:22:28.000Z", "max_issues_repo_path": "be/src/util/runtime-profile.cc", "max_issues_repo_name": "boorad/impala", "max_issues_repo_head_hexsha": "108c5d8d39c45d49edfca98cd2d858352cd44d51", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "be/src/util/runtime-profile.cc", "max_forks_repo_name": "boorad/impala", "max_forks_repo_head_hexsha": "108c5d8d39c45d49edfca98cd2d858352cd44d51", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.7004889976, "max_line_length": 90, "alphanum_fraction": 0.6941410129, "num_tokens": 7098}
import os import sys import operator import math import pickle import numpy as np import pandas as pd ''' P(class|words) --> posterior P(words) --> evidence P(words|class) --> likelihood P(class) --> prior ''' class bayes_classifier(object): def __init__(self, labels, eps = 0.01, sigma = 0.00001): # require labels format: numpy array self.dimension = labels[len(labels)-1][0] self.labels = labels self.likelihood = dict.fromkeys(range(1,self.dimension+1)) for key in self.likelihood: self.likelihood[key] = dict() self.prior = dict.fromkeys(range(1,self.dimension+1), 0) for s in self.labels: self.prior[s[0]] += 1 self.evidence = dict() self.eps = eps self.sigma = sigma self.vocab = set() self.total_samples_count = len(labels) self.total_words_count = 0 ##self.count = dict.fromkeys(range(1,self.dimension+1), 0) def train(self, data): # require data format: numpy array for iterator, s in enumerate(data): current_label = self.labels[iterator][0] ##self.count[current_label] += len(sample) sample = np.fromstring(s[0], dtype = int, sep=',') #print('sample:', sample) self.total_words_count += len(sample) for word in sample: self.vocab.add(word) if word not in self.evidence.keys(): self.evidence[word] = 1 else: self.evidence[word] += 1 if word not in self.likelihood[current_label].keys(): self.likelihood[current_label][word] = 1 else: self.likelihood[current_label][word] += 1 # Normalize evidence, likelihood and prior for i in range(1,self.dimension+1): self.prior[i] /= self.total_samples_count for word in self.evidence.keys(): self.evidence[word] /= self.total_words_count for label_iterator in range(1,self.dimension+1): class_sum = sum(self.likelihood[label_iterator].values()) + len(self.likelihood[label_iterator]) * self.eps for key, dividend in self.likelihood[label_iterator].items(): self.likelihood[label_iterator][key] = (dividend + self.eps)/class_sum def test(self, data): pred = np.array([]) for iterator, s in enumerate(data): sample = np.fromstring(s[0], dtype = int, sep=',') evidence = 1 average_evidence = sum(self.evidence.values())/self.total_words_count for word in sample: if word not in self.evidence: evidence *= average_evidence else: evidence *= self.evidence[word] current_posterior = np.array([]) for current_class in range(1,self.dimension+1): likelihood = 1 average_likelihood = sum(self.likelihood[current_class].values())/len(self.likelihood[current_class]) for word in sample: if word not in self.likelihood[current_class]: likelihood *= average_likelihood else: likelihood *= self.likelihood[current_class][word] prior = self.prior[current_class] if likelihood==0 or evidence==0: current_posterior = np.append(current_posterior, 0) else: current_posterior = np.append(current_posterior, likelihood*prior/evidence) #print('max:', current_posterior) pred = np.append(pred, int(np.argmax(current_posterior)+1)) return pred
{"hexsha": "93e303d7554ef6ef3f11454ef3bdcdfaa624ac27", "size": 3115, "ext": "py", "lang": "Python", "max_stars_repo_path": "classifier/Bayes_Classifier.py", "max_stars_repo_name": "geneti/cse881-data-mining", "max_stars_repo_head_hexsha": "05ffc3c8a9c31338687c596629501f43348eaa39", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "classifier/Bayes_Classifier.py", "max_issues_repo_name": "geneti/cse881-data-mining", "max_issues_repo_head_hexsha": "05ffc3c8a9c31338687c596629501f43348eaa39", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "classifier/Bayes_Classifier.py", "max_forks_repo_name": "geneti/cse881-data-mining", "max_forks_repo_head_hexsha": "05ffc3c8a9c31338687c596629501f43348eaa39", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 32.4479166667, "max_line_length": 110, "alphanum_fraction": 0.6947030498, "include": true, "reason": "import numpy", "num_tokens": 818}
# Copyright 2021 AI Singapore # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Yolo model with model types: v3 and v3tiny """ import os import logging from typing import List, Dict, Any, Tuple import numpy as np from peekingduck.weights_utils import checker, downloader from .yolo_files.detector import Detector class YoloModel: """Yolo model with model types: v3 and v3tiny""" def __init__(self, config: Dict[str, Any]) -> None: super().__init__() self.logger = logging.getLogger(__name__) # check for yolo weights, if none then download into weights folder if not checker.has_weights(config['root'], config['weights_dir']): self.logger.info('---no yolo weights detected. proceeding to download...---') downloader.download_weights(config['root'], config['blob_file']) self.logger.info('---yolo weights download complete.---') # get classnames path to read all the classes classes_path = os.path.join(config['root'], config['classes']) self.class_names = [c.strip() for c in open(classes_path).readlines()] self.detect_ids = config['detect_ids'] self.detector = Detector(config) def predict(self, frame: np.array) -> Tuple[List[np.array], List[str], List[float]]: """predict the bbox from frame returns: object_bboxes(List[Numpy Array]): list of bboxes detected object_labels(List[str]): list of string labels of the object detected for the corresponding bbox object_scores(List(float)): list of confidence scores of the object detected for the corresponding bbox """ assert isinstance(frame, np.ndarray) # return bboxes, object_bboxes, object_labels, object_scores return self.detector.predict_object_bbox_from_image( self.class_names, frame, self.detect_ids ) def get_detect_ids(self) -> List[int]: """getter for selected ids for detection Returns: List[int]: list of selected detection ids """ return self.detect_ids
{"hexsha": "25a1cf38e32be7042bbcbafe6b1e3afdf8908c84", "size": 2695, "ext": "py", "lang": "Python", "max_stars_repo_path": "peekingduck/pipeline/nodes/model/yolov4/yolo_model.py", "max_stars_repo_name": "leeping-ng/PeekingDuck", "max_stars_repo_head_hexsha": "16784b4c35f30c463fcc0c7caccdda6141797a6b", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "peekingduck/pipeline/nodes/model/yolov4/yolo_model.py", "max_issues_repo_name": "leeping-ng/PeekingDuck", "max_issues_repo_head_hexsha": "16784b4c35f30c463fcc0c7caccdda6141797a6b", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "peekingduck/pipeline/nodes/model/yolov4/yolo_model.py", "max_forks_repo_name": "leeping-ng/PeekingDuck", "max_forks_repo_head_hexsha": "16784b4c35f30c463fcc0c7caccdda6141797a6b", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 36.4189189189, "max_line_length": 89, "alphanum_fraction": 0.6619666048, "include": true, "reason": "import numpy", "num_tokens": 562}
module NVML using ..APIUtils using ..CUDA using CEnum using Memoize using Libdl @memoize function libnvml() if Sys.iswindows() # the NVSMI dir isn't added to PATH by the installer nvsmi = joinpath(ENV["ProgramFiles"], "NVIDIA Corporation", "NVSMI") if isdir(nvsmi) joinpath(nvsmi, "nvml.dll") else # let's just hope for the best "nvml" end else "libnvidia-ml.so.1" end end @memoize has_nvml() = Libdl.dlopen(libnvml(); throw_error=false) !== nothing # core library include("libnvml_common.jl") include("error.jl") include("libnvml.jl") include("libnvml_deprecated.jl") # wrappers include("system.jl") include("device.jl") end
{"hexsha": "183b77ae183e4f82be1585107bb960641d1a654c", "size": 733, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "lib/nvml/NVML.jl", "max_stars_repo_name": "glwagner/CUDA.jl", "max_stars_repo_head_hexsha": "7788985167bda2951bdde370e636c1c238c29d70", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/nvml/NVML.jl", "max_issues_repo_name": "glwagner/CUDA.jl", "max_issues_repo_head_hexsha": "7788985167bda2951bdde370e636c1c238c29d70", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/nvml/NVML.jl", "max_forks_repo_name": "glwagner/CUDA.jl", "max_forks_repo_head_hexsha": "7788985167bda2951bdde370e636c1c238c29d70", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 17.4523809524, "max_line_length": 76, "alphanum_fraction": 0.6371077763, "num_tokens": 217}
# Author: Jin Xu # Data: 2019-12-24 # Function: Pre-Processing the dataset #-------------------------- import package --------------------------# import os import re import random import codecs import numpy as np import networkx as nx import scipy as sp #-------------------------- global variable --------------------------# DIR_PATH = "C:\\Users\\HP\\Desktop\\Exp_TotalEmbeddedCombinationGCN\\" #-------------------------- component function --------------------------# def polblogs(): """ Process the polblogs dataset""" # 1.1 Read the file original_file = DIR_PATH + "origin_data\\polblogs\\polblogs.gml" node_pattern = re.compile(r'id \d\d*') value_pattern = re.compile(r'value \d\d*') source_pattern = re.compile(r'source \d\d*') target_pattern = re.compile(r'target \d\d*') with codecs.open(original_file, "r",encoding='utf-8', errors='ignore') as fdata: comtent = fdata.read() node_strlist = re.findall(node_pattern, comtent) value_strlist = re.findall(value_pattern, comtent) source_strlist = re.findall(source_pattern, comtent) target_strlist = re.findall(target_pattern, comtent) # 1.2 Get the original list which start from '0' origin_node_list, origin_label_list, source_list, target_list = [], [], [], [] for item in node_strlist: origin_node_list.append(int(item.replace('id ',''))) min_node, node_num = min(origin_node_list), len(origin_node_list) for i in range(0, node_num): origin_node_list[i] = origin_node_list[i] - min_node for item in value_strlist: origin_label_list.append(int(item.replace('value ',''))) for item in source_strlist: source_list.append(int(item.replace('source ','')) - 1) for item in target_strlist: target_list.append(int(item.replace('target ','')) - 1) # 2.1 Get the node list OrphanedNode_count = 0 node_list, label_list = [], [] for i in range(0, node_num): node = origin_node_list[i] if node in source_list or node in target_list: node_list.append(node) label_list.append(origin_label_list[i]) else: OrphanedNode_count += 1 print(">> Origin node num: {} - Orphaned node num: {} = Now node num: {}."\ .format(node_num, OrphanedNode_count, len(node_list))) node_num = len(node_list) # 2.2 Get the dictionary ID2label = {} for i in range(0, node_num): ID2label[node_list[i]] = label_list[i] node_index = list(range(0, node_num)) random.shuffle(node_index) ID2index, index2ID = {}, {} for i in range(0, node_num): ID2index[node_list[i]] = node_index[i] index2ID[node_index[i]] = node_list[i] index2label = {} for i in range(0, node_num): index2label[i] = ID2label[ index2ID[i] ] # 2.3 Build the edge list edge_list = [] mutiedge_count = 0 for i in range(0, len(source_list)): temp1 = ( ID2index[ source_list[i] ], ID2index[ target_list[i] ] ) temp2 = ( ID2index[ target_list[i] ], ID2index[ source_list[i] ] ) if temp1 not in edge_list and temp2 not in edge_list: edge_list.append(temp1) else: mutiedge_count += 1 print(">> Origin edge num: {} - Muti edge num: {} = Now edge num: {}."\ .format(len(source_list), mutiedge_count, len(edge_list))) # 2.4 Build the Graph G = nx.DiGraph() G.add_edges_from(edge_list) G = G.to_undirected() print(">> Build the Graph with {} nodes and {} edges."\ .format(G.number_of_nodes(), G.number_of_edges())) # 3.0 Build the network file data_file = DIR_PATH + "origin_data\\polblogs\\polblogs_Data.csv" label_file = DIR_PATH + "origin_data\\polblogs\\polblogs_Label.csv" if os.path.exists(data_file) == True: os.remove(data_file) if os.path.exists(label_file) == True: os.remove(label_file) with open(data_file, "a") as f: f.write("0,1\n") for edge in edge_list: f.write("{},{}\n".format(edge[0], edge[1])) with open(label_file, "a") as f: f.write("0,1\n") for i in range(0, node_num): f.write("{},{}\n".format(i, index2label[i] + 1)) print(">> Build file finish!") return def football(): """ Process the football dataset""" # 1.1 Read the file original_file = DIR_PATH + "origin_data\\football\\football.gml" node_pattern = re.compile(r'id \d\d*') value_pattern = re.compile(r'value \d\d*') source_pattern = re.compile(r'source \d\d*') target_pattern = re.compile(r'target \d\d*') with codecs.open(original_file, "r",encoding='utf-8', errors='ignore') as fdata: comtent = fdata.read() node_strlist = re.findall(node_pattern, comtent) value_strlist = re.findall(value_pattern, comtent) source_strlist = re.findall(source_pattern, comtent) target_strlist = re.findall(target_pattern, comtent) # 1.2 Get the original list which start from '0' origin_node_list, origin_label_list, source_list, target_list = [], [], [], [] for item in node_strlist: origin_node_list.append(int(item.replace('id ',''))) min_node, node_num = min(origin_node_list), len(origin_node_list) for i in range(0, node_num): origin_node_list[i] = origin_node_list[i] - min_node for item in value_strlist: origin_label_list.append(int(item.replace('value ',''))) for item in source_strlist: source_list.append(int(item.replace('source ',''))) for item in target_strlist: target_list.append(int(item.replace('target ',''))) # 2.1 Get the node list OrphanedNode_count = 0 node_list, label_list = [], [] for i in range(0, node_num): node = origin_node_list[i] if node in source_list or node in target_list: node_list.append(node) label_list.append(origin_label_list[i]) else: OrphanedNode_count += 1 print(">> Origin node num: {} - Orphaned node num: {} = Now node num: {}."\ .format(node_num, OrphanedNode_count, len(node_list))) node_num = len(node_list) a = target_list print(a, len(a)) os.system('pause') # 2.2 Get the dictionary ID2label = {} for i in range(0, node_num): ID2label[node_list[i]] = label_list[i] node_index = list(range(0, node_num)) random.shuffle(node_index) ID2index, index2ID = {}, {} for i in range(0, node_num): ID2index[node_list[i]] = node_index[i] index2ID[node_index[i]] = node_list[i] index2label = {} for i in range(0, node_num): index2label[i] = ID2label[ index2ID[i] ] # 2.3 Build the edge list edge_list = [] mutiedge_count = 0 for i in range(0, len(source_list)): temp1 = ( ID2index[ source_list[i] ], ID2index[ target_list[i] ] ) temp2 = ( ID2index[ target_list[i] ], ID2index[ source_list[i] ] ) if temp1 not in edge_list and temp2 not in edge_list: edge_list.append(temp1) else: mutiedge_count += 1 print(">> Origin edge num: {} - Muti edge num: {} = Now edge num: {}."\ .format(len(source_list), mutiedge_count, len(edge_list))) # 2.4 Build the Graph G = nx.DiGraph() G.add_edges_from(edge_list) G = G.to_undirected() print(">> Build the Graph with {} nodes and {} edges."\ .format(G.number_of_nodes(), G.number_of_edges())) # 3.0 Build the network file data_file = DIR_PATH + "origin_data\\football\\football_Data.csv" label_file = DIR_PATH + "origin_data\\football\\football_Label.csv" if os.path.exists(data_file) == True: os.remove(data_file) if os.path.exists(label_file) == True: os.remove(label_file) with open(data_file, "a") as f: f.write("0,1\n") for edge in edge_list: f.write("{},{}\n".format(edge[0], edge[1])) with open(label_file, "a") as f: f.write("0,1\n") for i in range(0, node_num): f.write("{},{}\n".format(i, index2label[i] + 1)) print(">> Build file finish!") return def TerrorAttack(): """ Process the TerrorAttack dataset""" # 1.1 Read the node file and build node dictionary label2type = {"http://counterterror.mindswap.org/2005/terrorism.owl#Arson":0,\ "http://counterterror.mindswap.org/2005/terrorism.owl#Bombing":1,\ "http://counterterror.mindswap.org/2005/terrorism.owl#Kidnapping":2,\ "http://counterterror.mindswap.org/2005/terrorism.owl#NBCR_Attack":3,\ "http://counterterror.mindswap.org/2005/terrorism.owl#other_attack":4,\ "http://counterterror.mindswap.org/2005/terrorism.owl#Weapon_Attack":5} name2id, id2name, id2type, id2feature = {}, {}, {}, {} original_nodefile = DIR_PATH + "origin_data\\TerrorAttack\\terrorist_attack.nodes" nodeid = 0 with open(original_nodefile, 'r') as f: while True: temp = f.readline() if not temp: break node_info = temp.split() # build the dictionary name2id[node_info[0]] = nodeid id2name[nodeid] = node_info[0] id2type[nodeid] = label2type[node_info[-1]] id2feature[nodeid] = node_info[1:-1] nodeid += 1 # 1.2 Read the edge file and build the edge info list original_edgefile = DIR_PATH + "origin_data\\TerrorAttack\\terrorist_attack_loc.edges" source, target = [], [] with open(original_edgefile, 'r') as f: while True: temp = f.readline() if not temp: break edge_info = temp.split() source.append(name2id[edge_info[0]]) target.append(name2id[edge_info[1]]) original_edgefile = DIR_PATH + "origin_data\\TerrorAttack\\terrorist_attack_loc_org.edges" with open(original_edgefile, 'r') as f: while True: temp = f.readline() if not temp: break edge_info = temp.split() source.append(name2id[edge_info[0]]) target.append(name2id[edge_info[1]]) # 2.0 Build the node and edge list node_list = list(range(0, nodeid)) print(">> Build the node list with {} nodes.".format(nodeid)) edge_list = [] for i in range(0, len(source)): if (source[i], target[i]) not in edge_list and (target[i], source[i]) not in edge_list: edge_list.append((source[i], target[i])) print(">> Build the edge list with {} -> {} edges.".format(len(source), len(edge_list))) # 3.0 Build the network file data_file = DIR_PATH + "origin_data\\TerrorAttack\\TerrorAttack_Data.csv" label_file = DIR_PATH + "origin_data\\TerrorAttack\\TerrorAttack_Label.csv" feature_file= DIR_PATH + "origin_data\\TerrorAttack\\TerrorAttack_Feature.csv" if os.path.exists(data_file) == True: os.remove(data_file) if os.path.exists(label_file) == True: os.remove(label_file) if os.path.exists(feature_file) == True: os.remove(feature_file) with open(data_file, "a") as f: f.write("0,1\n") for edge in edge_list: f.write("{},{}\n".format(edge[0], edge[1])) with open(label_file, "a") as f: f.write("0,1\n") for i in range(0, nodeid): f.write("{},{}\n".format(i, id2type[i] + 1)) feature_num = len(id2feature[0]) with open(feature_file, "a") as f: temp = list(range(0, feature_num+1)) temp = [str(x) for x in temp] temp = ','.join(temp) f.write("{}\n".format(temp)) for i in range(0, nodeid): temp = id2feature[i] temp = [str(x) for x in temp] temp = str(i) + ',' + ','.join(temp) f.write("{}\n".format(temp)) print(">> Build file finish!") return def www(allow_noid_author = False, allow_nokeyword_paper = False): """ Process the WWW dataset""" # 0.0 Read the file original_paper_file = DIR_PATH + "origin_data\\WWW09-18\\WWW_papers_info_09_13.txt" original_author_file = DIR_PATH + "origin_data\\WWW09-18\\WWW_authors_info_09_13.txt" # paper pattern author_pattern = re.compile(r'\"authors\": \[[^\[]*\}\]') person_pattern = re.compile(r'\{[^\}]*\}') name_pattern = re.compile(r'\"name\": \"[^,\{\}]*\"') id_pattern = re.compile(r'\"id\": \"[^,\{\}]*\"') keyword_pattern = re.compile(r'\"keywords\": \[[^\]]*\]') word_pattern = re.compile(r'\"[^,]*\"') # author pattern tag_pattern = re.compile(r'\"tags\": \[[^\]]*\}\]') t_pattern = re.compile(r'\"t\": \"[^\"]*\"') # 1.0 Process the paper file keyword_name2index = {} keyword_index, author_index, paper_index, author_noid_counting, auther_all_counting = 0, 0, 0, 0, 0 author_name2id, author_name2index, author_id2name = {}, {}, {} paper2keyword, keyword2paper = {}, {} paper2auther_name, autherindex2paper = {}, {} word_total_list, author_idlist, author_namelist = [], [], [] with open(original_paper_file, 'r') as fdata: for eachline in fdata: # 1.1.1 Read the keyword keyword_strlist = re.findall(keyword_pattern, eachline) if allow_nokeyword_paper == False: if len(keyword_strlist) == 0: continue if len(keyword_strlist) == 0: keyword_strlist = ["\"keywords\": [\"None\"]"] temp = keyword_strlist[0] temp = temp.replace("\"keywords\": ", "") word_strlist = re.findall(word_pattern, temp) for i in range(0, len(word_strlist)): word_strlist[i] = word_strlist[i].replace("\"", "") # 1.1.2 Process the word for word in word_strlist: if word not in word_total_list: word_total_list.append(word) keyword_name2index[word] = keyword_index keyword_index += 1 if keyword_name2index[word] not in keyword2paper.keys(): keyword2paper[keyword_name2index[word]] = [paper_index] else: keyword2paper[keyword_name2index[word]].append(paper_index) # 1.2.1 Read the author author_strlist = re.findall(author_pattern, eachline) person_strlist = re.findall(person_pattern, author_strlist[0]) if allow_noid_author == False: flag_allauthorhaveid = True for eachperson in person_strlist: # get author id author_id = re.findall(id_pattern, eachperson) if len(author_id) == 0: flag_allauthorhaveid = False break if flag_allauthorhaveid == False: continue # 1.2.2 Process the author author_list = [] for eachperson in person_strlist: auther_all_counting += 1 # get author name author_name = re.findall(name_pattern, eachperson) if len(author_name) >= 2 or len(author_name) == 0: raise Exception("[ERROR] have over 2 author name or 0 author in 1 line.\n {}".format(eachline)) else: author_name = author_name[0] author_name = author_name.replace("\"name\": ", "") author_name = author_name.replace("\"", "") author_list.append(author_name) # get author id author_id = re.findall(id_pattern, eachperson) if len(author_id) == 1: author_id = author_id[0] author_id = author_id.replace("\"id\": ", "") author_id = author_id.replace("\"", "") elif len(author_id) == 0: author_id = "0" else: raise Exception("[ERROR] have over 2 author id in 1 line.\n {}".format(eachline)) # build the author dictionary if author_id not in author_idlist and author_id != "0": author_idlist.append(author_id) author_namelist.append(author_name) author_name2id[author_name] = author_id author_id2name[author_id] = author_name author_name2index[author_name] = author_index author_index += 1 if author_name not in author_namelist and author_id == "0": author_namelist.append(author_name) author_name2id[author_name] = author_id author_name2index[author_name] = author_index author_index += 1 author_noid_counting += 1 if author_name in author_name2index.keys(): temp = author_name2index[author_name] elif author_id in author_id2name.keys(): temp = author_name2index[author_id2name[author_id]] else: raise Exception("[ERROR] author_name or author_id can`t be found in dictionary.") if temp not in autherindex2paper.keys(): autherindex2paper[temp] = [paper_index] else: autherindex2paper[temp].append(paper_index) # 1.3 Build paper dictionary paper2keyword[paper_index] = word_strlist paper2auther_name[paper_index] = author_list paper_index += 1 # 1.4 Display print(">> Process paper finish![Opintion: allow_noid_author?-{}, allow_nokeyword_paper?-{}]".format(allow_noid_author, allow_nokeyword_paper)) print(" Paper number = {}, Keyword number = {}".format(paper_index, keyword_index)) print(" Author number = {}, where with {} no-ID author.".format(author_index, author_noid_counting)) print(" Total authors = {}, reply times = {}".format(auther_all_counting, auther_all_counting - author_index)) # 2.0 Process the author file tag_name2index = {} tag_index = 0 with open(original_author_file, 'r') as fdata: for eachline in fdata: idstr = re.findall(id_pattern, eachline) idstr = idstr[0].replace("\"id\": ", "") idno = idstr.replace("\"", "") # author id not in the paper file if idno not in author_id2name.keys(): continue tag_strlist = re.findall(tag_pattern, eachline) t_strlist = re.findall(t_pattern, tag_strlist[0]) for i in range(0, len(t_strlist)): t_strlist[i] = t_strlist[i].replace("\"t\": ", "") t_strlist[i] = t_strlist[i].replace("\"", "") for tag in t_strlist: if tag not in tag_name2index.keys(): tag_name2index[tag] = tag_index tag_index += 1 print(idno, t_strlist) return ################################################################################### if __name__ == "__main__": #polblogs() #football() #TerrorAttack() www()
{"hexsha": "ec69855738619690ec78b4391cbef2b4ad4cb5a4", "size": 19605, "ext": "py", "lang": "Python", "max_stars_repo_path": "Pre_ProcessDataset.py", "max_stars_repo_name": "KampfWut/MORE", "max_stars_repo_head_hexsha": "2b42764812becfa29e0f32f033427e76d5dfe9a0", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2021-09-30T01:18:23.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-08T06:04:43.000Z", "max_issues_repo_path": "Pre_ProcessDataset.py", "max_issues_repo_name": "KampfWut/MORE", "max_issues_repo_head_hexsha": "2b42764812becfa29e0f32f033427e76d5dfe9a0", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "Pre_ProcessDataset.py", "max_forks_repo_name": "KampfWut/MORE", "max_forks_repo_head_hexsha": "2b42764812becfa29e0f32f033427e76d5dfe9a0", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 40.5900621118, "max_line_length": 146, "alphanum_fraction": 0.5728640653, "include": true, "reason": "import numpy,import scipy,import networkx", "num_tokens": 4894}
include("src/SSPS.jl") using .SSPS julia_main()
{"hexsha": "361e794eed0315780e23f56344e5230198883101", "size": 51, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "SSPS/ssps_wrapper.jl", "max_stars_repo_name": "gitter-lab/ssps", "max_stars_repo_head_hexsha": "8557cb1961bcd951c5f78102070925e945567600", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2020-05-13T03:57:19.000Z", "max_stars_repo_stars_event_max_datetime": "2021-09-29T02:45:13.000Z", "max_issues_repo_path": "SSPS/ssps_wrapper.jl", "max_issues_repo_name": "gitter-lab/ssps", "max_issues_repo_head_hexsha": "8557cb1961bcd951c5f78102070925e945567600", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 18, "max_issues_repo_issues_event_min_datetime": "2020-04-29T14:33:48.000Z", "max_issues_repo_issues_event_max_datetime": "2021-03-06T23:12:00.000Z", "max_forks_repo_path": "SSPS/ssps_wrapper.jl", "max_forks_repo_name": "gitter-lab/ssps", "max_forks_repo_head_hexsha": "8557cb1961bcd951c5f78102070925e945567600", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 7.2857142857, "max_line_length": 22, "alphanum_fraction": 0.6862745098, "num_tokens": 18}
#!/usr/bin/env python # coding: utf-8 # # Residual Networks # # Welcome to the first assignment of this week! You'll be building a very deep convolutional network, using Residual Networks (ResNets). In theory, very deep networks can represent very complex functions; but in practice, they are hard to train. Residual Networks, introduced by [He et al.](https://arxiv.org/pdf/1512.03385.pdf), allow you to train much deeper networks than were previously feasible. # # **By the end of this assignment, you'll be able to:** # # - Implement the basic building blocks of ResNets in a deep neural network using Keras # - Put together these building blocks to implement and train a state-of-the-art neural network for image classification # - Implement a skip connection in your network # # For this assignment, you'll use Keras. # # Before jumping into the problem, run the cell below to load the required packages. # ## Table of Content # # - [1 - Packages](#1) # - [2 - The Problem of Very Deep Neural Networks](#2) # - [3 - Building a Residual Network](#3) # - [3.1 - The Identity Block](#3-1) # - [Exercise 1 - identity_block](#ex-1) # - [3.2 - The Convolutional Block](#3-2) # - [Exercise 2 - convolutional_block](#ex-2) # - [4 - Building Your First ResNet Model (50 layers)](#4) # - [Exercise 3 - ResNet50](#ex-3) # - [5 - Test on Your Own Image (Optional/Ungraded)](#5) # - [6 - Bibliography](#6) # <a name='1'></a> # ## 1 - Packages # In[1]: import tensorflow as tf import numpy as np import scipy.misc from tensorflow.keras.applications.resnet_v2 import ResNet50V2 from tensorflow.keras.preprocessing import image from tensorflow.keras.applications.resnet_v2 import preprocess_input, decode_predictions from tensorflow.keras import layers from tensorflow.keras.layers import Input, Add, Dense, Activation, ZeroPadding2D, BatchNormalization, Flatten, Conv2D, AveragePooling2D, MaxPooling2D, GlobalMaxPooling2D from tensorflow.keras.models import Model, load_model from resnets_utils import * from tensorflow.keras.initializers import random_uniform, glorot_uniform, constant, identity from tensorflow.python.framework.ops import EagerTensor from matplotlib.pyplot import imshow from test_utils import summary, comparator import public_tests get_ipython().run_line_magic('matplotlib', 'inline') # <a name='2'></a> # ## 2 - The Problem of Very Deep Neural Networks # # Last week, you built your first convolutional neural networks: first manually with numpy, then using Tensorflow and Keras. # # In recent years, neural networks have become much deeper, with state-of-the-art networks evolving from having just a few layers (e.g., AlexNet) to over a hundred layers. # # * The main benefit of a very deep network is that it can represent very complex functions. It can also learn features at many different levels of abstraction, from edges (at the shallower layers, closer to the input) to very complex features (at the deeper layers, closer to the output). # # * However, using a deeper network doesn't always help. A huge barrier to training them is vanishing gradients: very deep networks often have a gradient signal that goes to zero quickly, thus making gradient descent prohibitively slow. # # * More specifically, during gradient descent, as you backpropagate from the final layer back to the first layer, you are multiplying by the weight matrix on each step, and thus the gradient can decrease exponentially quickly to zero (or, in rare cases, grow exponentially quickly and "explode," from gaining very large values). # # * During training, you might therefore see the magnitude (or norm) of the gradient for the shallower layers decrease to zero very rapidly as training proceeds, as shown below: # <img src="images/vanishing_grad_kiank.png" style="width:450px;height:220px;"> # <caption><center> <u> <font color='purple'> <b>Figure 1</b> </u><font color='purple'> : <b>Vanishing gradient</b> <br> The speed of learning decreases very rapidly for the shallower layers as the network trains </center></caption> # # Not to worry! You are now going to solve this problem by building a Residual Network! # <a name='3'></a> # ## 3 - Building a Residual Network # # In ResNets, a "shortcut" or a "skip connection" allows the model to skip layers: # # <img src="images/skip_connection_kiank.png" style="width:650px;height:200px;"> # <caption><center> <u> <font color='purple'> <b>Figure 2</b> </u><font color='purple'> : A ResNet block showing a skip-connection <br> </center></caption> # # The image on the left shows the "main path" through the network. The image on the right adds a shortcut to the main path. By stacking these ResNet blocks on top of each other, you can form a very deep network. # # The lecture mentioned that having ResNet blocks with the shortcut also makes it very easy for one of the blocks to learn an identity function. This means that you can stack on additional ResNet blocks with little risk of harming training set performance. # # On that note, there is also some evidence that the ease of learning an identity function accounts for ResNets' remarkable performance even more than skip connections help with vanishing gradients. # # Two main types of blocks are used in a ResNet, depending mainly on whether the input/output dimensions are the same or different. You are going to implement both of them: the "identity block" and the "convolutional block." # <a name='3-1'></a> # ### 3.1 - The Identity Block # # The identity block is the standard block used in ResNets, and corresponds to the case where the input activation (say $a^{[l]}$) has the same dimension as the output activation (say $a^{[l+2]}$). To flesh out the different steps of what happens in a ResNet's identity block, here is an alternative diagram showing the individual steps: # # <img src="images/idblock2_kiank.png" style="width:650px;height:150px;"> # <caption><center> <u> <font color='purple'> <b>Figure 3</b> </u><font color='purple'> : <b>Identity block.</b> Skip connection "skips over" 2 layers. </center></caption> # # The upper path is the "shortcut path." The lower path is the "main path." In this diagram, notice the CONV2D and ReLU steps in each layer. To speed up training, a BatchNorm step has been added. Don't worry about this being complicated to implement--you'll see that BatchNorm is just one line of code in Keras! # # In this exercise, you'll actually implement a slightly more powerful version of this identity block, in which the skip connection "skips over" 3 hidden layers rather than 2 layers. It looks like this: # # <img src="images/idblock3_kiank.png" style="width:650px;height:150px;"> # <caption><center> <u> <font color='purple'> <b>Figure 4</b> </u><font color='purple'> : <b>Identity block.</b> Skip connection "skips over" 3 layers.</center></caption> # These are the individual steps: # # First component of main path: # - The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid". Use 0 as the seed for the random uniform initialization: `kernel_initializer = initializer(seed=0)`. # - The first BatchNorm is normalizing the 'channels' axis. # - Then apply the ReLU activation function. This has no hyperparameters. # # Second component of main path: # - The second CONV2D has $F_2$ filters of shape $(f,f)$ and a stride of (1,1). Its padding is "same". Use 0 as the seed for the random uniform initialization: `kernel_initializer = initializer(seed=0)`. # - The second BatchNorm is normalizing the 'channels' axis. # - Then apply the ReLU activation function. This has no hyperparameters. # # Third component of main path: # - The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid". Use 0 as the seed for the random uniform initialization: `kernel_initializer = initializer(seed=0)`. # - The third BatchNorm is normalizing the 'channels' axis. # - Note that there is **no** ReLU activation function in this component. # # Final step: # - The `X_shortcut` and the output from the 3rd layer `X` are added together. # - **Hint**: The syntax will look something like `Add()([var1,var2])` # - Then apply the ReLU activation function. This has no hyperparameters. # # <a name='ex-1'></a> # ### Exercise 1 - identity_block # # Implement the ResNet identity block. The first component of the main path has been implemented for you already! First, you should read these docs carefully to make sure you understand what's happening. Then, implement the rest. # - To implement the Conv2D step: [Conv2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D) # - To implement BatchNorm: [BatchNormalization](https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization) `BatchNormalization(axis = 3)(X, training = training)`. If training is set to False, its weights are not updated with the new examples. I.e when the model is used in prediction mode. # - For the activation, use: `Activation('relu')(X)` # - To add the value passed forward by the shortcut: [Add](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add) # # We have added the initializer argument to our functions. This parameter receives an initializer function like the ones included in the package [tensorflow.keras.initializers](https://www.tensorflow.org/api_docs/python/tf/keras/initializers) or any other custom initializer. By default it will be set to [random_uniform](https://www.tensorflow.org/api_docs/python/tf/keras/initializers/RandomUniform) # # Remember that these functions accept a `seed` argument that can be any value you want, but that in this notebook must set to 0 for **grading purposes**. # Here is where you're actually using the power of the Functional API to create a shortcut path: # In[2]: # UNQ_C1 # GRADED FUNCTION: identity_block def identity_block(X, f, filters, training=True, initializer=random_uniform): """ Implementation of the identity block as defined in Figure 4 Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) f -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path training -- True: Behave in training mode False: Behave in inference mode initializer -- to set up the initial weights of a layer. Equals to random uniform initializer Returns: X -- output of the identity block, tensor of shape (m, n_H, n_W, n_C) """ # Retrieve Filters F1, F2, F3 = filters # Save the input value. You'll need this later to add back to the main path. X_shortcut = X # First component of main path X = Conv2D(filters = F1, kernel_size = 1, strides = (1,1), padding = 'valid', kernel_initializer = initializer(seed=0))(X) X = BatchNormalization(axis = 3)(X, training = training) # Default axis X = Activation('relu')(X) ### START CODE HERE ## Second component of main path (≈3 lines) X = Conv2D(filters = F2, kernel_size = (f, f), strides = (1,1), padding = 'same', kernel_initializer = initializer(seed=0))(X) X = BatchNormalization(axis = 3)(X, training = training) X = Activation('relu')(X) ## Third component of main path (≈2 lines) X = Conv2D(filters = F3, kernel_size = (1, 1), strides = (1,1), padding = 'valid', kernel_initializer = initializer(seed=0))(X) X = BatchNormalization(axis = 3)(X, training = training) ## Final step: Add shortcut value to main path, and pass it through a RELU activation (≈2 lines) X = Add()([X, X_shortcut]) X = Activation('relu')(X) ### END CODE HERE return X # In[3]: np.random.seed(1) X1 = np.ones((1, 4, 4, 3)) * -1 X2 = np.ones((1, 4, 4, 3)) * 1 X3 = np.ones((1, 4, 4, 3)) * 3 X = np.concatenate((X1, X2, X3), axis = 0).astype(np.float32) A3 = identity_block(X, f=2, filters=[4, 4, 3], initializer=lambda seed=0:constant(value=1), training=False) print('\033[1mWith training=False\033[0m\n') A3np = A3.numpy() print(np.around(A3.numpy()[:,(0,-1),:,:].mean(axis = 3), 5)) resume = A3np[:,(0,-1),:,:].mean(axis = 3) print(resume[1, 1, 0]) print('\n\033[1mWith training=True\033[0m\n') np.random.seed(1) A4 = identity_block(X, f=2, filters=[3, 3, 3], initializer=lambda seed=0:constant(value=1), training=True) print(np.around(A4.numpy()[:,(0,-1),:,:].mean(axis = 3), 5)) public_tests.identity_block_test(identity_block) # **Expected value** # # ``` # With training=False # # [[[ 0. 0. 0. 0. ] # [ 0. 0. 0. 0. ]] # # [[192.71234 192.71234 192.71234 96.85617] # [ 96.85617 96.85617 96.85617 48.92808]] # # [[578.1371 578.1371 578.1371 290.5685 ] # [290.5685 290.5685 290.5685 146.78426]]] # 96.85617 # # With training=True # # [[[0. 0. 0. 0. ] # [0. 0. 0. 0. ]] # # [[0.40739 0.40739 0.40739 0.40739] # [0.40739 0.40739 0.40739 0.40739]] # # [[4.99991 4.99991 4.99991 3.25948] # [3.25948 3.25948 3.25948 2.40739]]] # ``` # <a name='3-2'></a> # ### 3.2 - The Convolutional Block # # The ResNet "convolutional block" is the second block type. You can use this type of block when the input and output dimensions don't match up. The difference with the identity block is that there is a CONV2D layer in the shortcut path: # # <img src="images/convblock_kiank.png" style="width:650px;height:150px;"> # <caption><center> <u> <font color='purple'> <b>Figure 4</b> </u><font color='purple'> : <b>Convolutional block</b> </center></caption> # # * The CONV2D layer in the shortcut path is used to resize the input $x$ to a different dimension, so that the dimensions match up in the final addition needed to add the shortcut value back to the main path. (This plays a similar role as the matrix $W_s$ discussed in lecture.) # * For example, to reduce the activation dimensions's height and width by a factor of 2, you can use a 1x1 convolution with a stride of 2. # * The CONV2D layer on the shortcut path does not use any non-linear activation function. Its main role is to just apply a (learned) linear function that reduces the dimension of the input, so that the dimensions match up for the later addition step. # * As for the previous exercise, the additional `initializer` argument is required for grading purposes, and it has been set by default to [glorot_uniform](https://www.tensorflow.org/api_docs/python/tf/keras/initializers/GlorotUniform) # # The details of the convolutional block are as follows. # # First component of main path: # - The first CONV2D has $F_1$ filters of shape (1,1) and a stride of (s,s). Its padding is "valid". Use 0 as the `glorot_uniform` seed `kernel_initializer = initializer(seed=0)`. # - The first BatchNorm is normalizing the 'channels' axis. # - Then apply the ReLU activation function. This has no hyperparameters. # # Second component of main path: # - The second CONV2D has $F_2$ filters of shape (f,f) and a stride of (1,1). Its padding is "same". Use 0 as the `glorot_uniform` seed `kernel_initializer = initializer(seed=0)`. # - The second BatchNorm is normalizing the 'channels' axis. # - Then apply the ReLU activation function. This has no hyperparameters. # # Third component of main path: # - The third CONV2D has $F_3$ filters of shape (1,1) and a stride of (1,1). Its padding is "valid". Use 0 as the `glorot_uniform` seed `kernel_initializer = initializer(seed=0)`. # - The third BatchNorm is normalizing the 'channels' axis. Note that there is no ReLU activation function in this component. # # Shortcut path: # - The CONV2D has $F_3$ filters of shape (1,1) and a stride of (s,s). Its padding is "valid". Use 0 as the `glorot_uniform` seed `kernel_initializer = initializer(seed=0)`. # - The BatchNorm is normalizing the 'channels' axis. # # Final step: # - The shortcut and the main path values are added together. # - Then apply the ReLU activation function. This has no hyperparameters. # # <a name='ex-2'></a> # ### Exercise 2 - convolutional_block # # Implement the convolutional block. The first component of the main path is already implemented; then it's your turn to implement the rest! As before, always use 0 as the seed for the random initialization, to ensure consistency with the grader. # - [Conv2D](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D) # - [BatchNormalization](https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization) (axis: Integer, the axis that should be normalized (typically the features axis)) `BatchNormalization(axis = 3)(X, training = training)`. If training is set to False, its weights are not updated with the new examples. I.e when the model is used in prediction mode. # - For the activation, use: `Activation('relu')(X)` # - [Add](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add) # # We have added the initializer argument to our functions. This parameter receives an initializer function like the ones included in the package [tensorflow.keras.initializers](https://www.tensorflow.org/api_docs/python/tf/keras/initializers) or any other custom initializer. By default it will be set to [random_uniform](https://www.tensorflow.org/api_docs/python/tf/keras/initializers/RandomUniform) # # Remember that these functions accept a `seed` argument that can be any value you want, but that in this notebook must set to 0 for **grading purposes**. # In[4]: # UNQ_C2 # GRADED FUNCTION: convolutional_block def convolutional_block(X, f, filters, s = 2, training=True, initializer=glorot_uniform): """ Implementation of the convolutional block as defined in Figure 4 Arguments: X -- input tensor of shape (m, n_H_prev, n_W_prev, n_C_prev) f -- integer, specifying the shape of the middle CONV's window for the main path filters -- python list of integers, defining the number of filters in the CONV layers of the main path s -- Integer, specifying the stride to be used training -- True: Behave in training mode False: Behave in inference mode initializer -- to set up the initial weights of a layer. Equals to Glorot uniform initializer, also called Xavier uniform initializer. Returns: X -- output of the convolutional block, tensor of shape (n_H, n_W, n_C) """ # Retrieve Filters F1, F2, F3 = filters # Save the input value X_shortcut = X ##### MAIN PATH ##### # First component of main path glorot_uniform(seed=0) X = Conv2D(filters = F1, kernel_size = 1, strides = (s, s), padding='valid', kernel_initializer = initializer(seed=0))(X) X = BatchNormalization(axis = 3)(X, training=training) X = Activation('relu')(X) ### START CODE HERE ## Second component of main path (≈3 lines) X = Conv2D(filters = F2, kernel_size = (f,f), strides = (1, 1), padding='same', kernel_initializer = initializer(seed=0))(X) X = BatchNormalization(axis = 3)(X, training=training) X = Activation('relu')(X) ## Third component of main path (≈2 lines) X = Conv2D(filters = F3, kernel_size = (1,1), strides = (1, 1), padding='valid', kernel_initializer = initializer(seed=0))(X) X = BatchNormalization(axis = 3)(X, training=training) ##### SHORTCUT PATH ##### (≈2 lines) X_shortcut = Conv2D(filters = F3, kernel_size = (1,1), strides = (s, s), padding='valid', kernel_initializer = initializer(seed=0))(X_shortcut) X_shortcut = BatchNormalization(axis = 3)(X_shortcut, training=training) ### END CODE HERE # Final step: Add shortcut value to main path (Use this order [X, X_shortcut]), and pass it through a RELU activation X = Add()([X, X_shortcut]) X = Activation('relu')(X) return X # In[5]: from outputs import convolutional_block_output1, convolutional_block_output2 np.random.seed(1) #X = np.random.randn(3, 4, 4, 6).astype(np.float32) X1 = np.ones((1, 4, 4, 3)) * -1 X2 = np.ones((1, 4, 4, 3)) * 1 X3 = np.ones((1, 4, 4, 3)) * 3 X = np.concatenate((X1, X2, X3), axis = 0).astype(np.float32) A = convolutional_block(X, f = 2, filters = [2, 4, 6], training=False) assert type(A) == EagerTensor, "Use only tensorflow and keras functions" assert tuple(tf.shape(A).numpy()) == (3, 2, 2, 6), "Wrong shape." assert np.allclose(A.numpy(), convolutional_block_output1), "Wrong values when training=False." print(A[0]) B = convolutional_block(X, f = 2, filters = [2, 4, 6], training=True) assert np.allclose(B.numpy(), convolutional_block_output2), "Wrong values when training=True." print('\033[92mAll tests passed!') # **Expected value** # # ``` # tf.Tensor( # [[[0. 0.66683817 0. 0. 0.88853896 0.5274254 ] # [0. 0.65053666 0. 0. 0.89592844 0.49965227]] # # [[0. 0.6312079 0. 0. 0.8636247 0.47643146] # [0. 0.5688321 0. 0. 0.85534114 0.41709304]]], shape=(2, 2, 6), dtype=float32) # ``` # <a name='4'></a> # ## 4 - Building Your First ResNet Model (50 layers) # # You now have the necessary blocks to build a very deep ResNet. The following figure describes in detail the architecture of this neural network. "ID BLOCK" in the diagram stands for "Identity block," and "ID BLOCK x3" means you should stack 3 identity blocks together. # # <img src="images/resnet_kiank.png" style="width:850px;height:150px;"> # <caption><center> <u> <font color='purple'> <b>Figure 5</b> </u><font color='purple'> : <b>ResNet-50 model</b> </center></caption> # # The details of this ResNet-50 model are: # - Zero-padding pads the input with a pad of (3,3) # - Stage 1: # - The 2D Convolution has 64 filters of shape (7,7) and uses a stride of (2,2). # - BatchNorm is applied to the 'channels' axis of the input. # - ReLU activation is applied. # - MaxPooling uses a (3,3) window and a (2,2) stride. # - Stage 2: # - The convolutional block uses three sets of filters of size [64,64,256], "f" is 3, and "s" is 1. # - The 2 identity blocks use three sets of filters of size [64,64,256], and "f" is 3. # - Stage 3: # - The convolutional block uses three sets of filters of size [128,128,512], "f" is 3 and "s" is 2. # - The 3 identity blocks use three sets of filters of size [128,128,512] and "f" is 3. # - Stage 4: # - The convolutional block uses three sets of filters of size [256, 256, 1024], "f" is 3 and "s" is 2. # - The 5 identity blocks use three sets of filters of size [256, 256, 1024] and "f" is 3. # - Stage 5: # - The convolutional block uses three sets of filters of size [512, 512, 2048], "f" is 3 and "s" is 2. # - The 2 identity blocks use three sets of filters of size [512, 512, 2048] and "f" is 3. # - The 2D Average Pooling uses a window of shape (2,2). # - The 'flatten' layer doesn't have any hyperparameters. # - The Fully Connected (Dense) layer reduces its input to the number of classes using a softmax activation. # # # <a name='ex-3'></a> # ### Exercise 3 - ResNet50 # # Implement the ResNet with 50 layers described in the figure above. We have implemented Stages 1 and 2. Please implement the rest. (The syntax for implementing Stages 3-5 should be quite similar to that of Stage 2) Make sure you follow the naming convention in the text above. # # You'll need to use this function: # - Average pooling [see reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/AveragePooling2D) # # Here are some other functions we used in the code below: # - Conv2D: [See reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Conv2D) # - BatchNorm: [See reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/BatchNormalization) (axis: Integer, the axis that should be normalized (typically the features axis)) # - Zero padding: [See reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/ZeroPadding2D) # - Max pooling: [See reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/MaxPool2D) # - Fully connected layer: [See reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Dense) # - Addition: [See reference](https://www.tensorflow.org/api_docs/python/tf/keras/layers/Add) # In[9]: # UNQ_C3 # GRADED FUNCTION: ResNet50 def ResNet50(input_shape = (64, 64, 3), classes = 6): """ Stage-wise implementation of the architecture of the popular ResNet50: CONV2D -> BATCHNORM -> RELU -> MAXPOOL -> CONVBLOCK -> IDBLOCK*2 -> CONVBLOCK -> IDBLOCK*3 -> CONVBLOCK -> IDBLOCK*5 -> CONVBLOCK -> IDBLOCK*2 -> AVGPOOL -> FLATTEN -> DENSE Arguments: input_shape -- shape of the images of the dataset classes -- integer, number of classes Returns: model -- a Model() instance in Keras """ # Define the input as a tensor with shape input_shape X_input = Input(input_shape) # Zero-Padding X = ZeroPadding2D((3, 3))(X_input) # Stage 1 X = Conv2D(64, (7, 7), strides = (2, 2), kernel_initializer = glorot_uniform(seed=0))(X) X = BatchNormalization(axis = 3)(X) X = Activation('relu')(X) X = MaxPooling2D((3, 3), strides=(2, 2))(X) # Stage 2 X = convolutional_block(X, f = 3, filters = [64, 64, 256], s = 1) X = identity_block(X, 3, [64, 64, 256]) X = identity_block(X, 3, [64, 64, 256]) ### START CODE HERE ## Stage 3 (≈4 lines) X = convolutional_block(X, f = 3, filters = [128,128,512], s = 2) X = identity_block(X, 3, [128,128,512]) X = identity_block(X, 3, [128,128,512]) X = identity_block(X, 3, [128,128,512]) ## Stage 4 (≈6 lines) X = convolutional_block(X, f = 3, filters = [256,256,1024], s = 2) X = identity_block(X, 3, [256, 256, 1024]) X = identity_block(X, 3, [256, 256, 1024]) X = identity_block(X, 3, [256, 256, 1024]) X = identity_block(X, 3, [256, 256, 1024]) X = identity_block(X, 3, [256, 256, 1024]) ## Stage 5 (≈3 lines) X = convolutional_block(X, f = 3, filters = [512, 512, 2048], s = 2) X = identity_block(X, 3, [512, 512, 2048]) X = identity_block(X, 3, [512, 512, 2048]) ## AVGPOOL (≈1 line). Use "X = AveragePooling2D(...)(X)" X = X = AveragePooling2D((2,2))(X) ### END CODE HERE # output layer X = Flatten()(X) X = Dense(classes, activation='softmax', kernel_initializer = glorot_uniform(seed=0))(X) # Create model model = Model(inputs = X_input, outputs = X) return model # Run the following code to build the model's graph. If your implementation is incorrect, you'll know it by checking your accuracy when running `model.fit(...)` below. # In[10]: model = ResNet50(input_shape = (64, 64, 3), classes = 6) print(model.summary()) # In[11]: from outputs import ResNet50_summary model = ResNet50(input_shape = (64, 64, 3), classes = 6) comparator(summary(model), ResNet50_summary) # As shown in the Keras Tutorial Notebook, prior to training a model, you need to configure the learning process by compiling the model. # In[12]: model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy']) # The model is now ready to be trained. The only thing you need now is a dataset! # Let's load your old friend, the SIGNS dataset. # # <img src="images/signs_data_kiank.png" style="width:450px;height:250px;"> # <caption><center> <u> <font color='purple'> <b>Figure 6</b> </u><font color='purple'> : <b>SIGNS dataset</b> </center></caption> # # In[13]: X_train_orig, Y_train_orig, X_test_orig, Y_test_orig, classes = load_dataset() # Normalize image vectors X_train = X_train_orig / 255. X_test = X_test_orig / 255. # Convert training and test labels to one hot matrices Y_train = convert_to_one_hot(Y_train_orig, 6).T Y_test = convert_to_one_hot(Y_test_orig, 6).T print ("number of training examples = " + str(X_train.shape[0])) print ("number of test examples = " + str(X_test.shape[0])) print ("X_train shape: " + str(X_train.shape)) print ("Y_train shape: " + str(Y_train.shape)) print ("X_test shape: " + str(X_test.shape)) print ("Y_test shape: " + str(Y_test.shape)) # Run the following cell to train your model on 10 epochs with a batch size of 32. On a GPU, it should take less than 2 minutes. # In[14]: model.fit(X_train, Y_train, epochs = 10, batch_size = 32) # **Expected Output**: # # ``` # Epoch 1/10 # 34/34 [==============================] - 1s 34ms/step - loss: 1.9241 - accuracy: 0.4620 # Epoch 2/10 # 34/34 [==============================] - 2s 57ms/step - loss: 0.6403 - accuracy: 0.7898 # Epoch 3/10 # 34/34 [==============================] - 1s 24ms/step - loss: 0.3744 - accuracy: 0.8731 # Epoch 4/10 # 34/34 [==============================] - 2s 44ms/step - loss: 0.2220 - accuracy: 0.9231 # Epoch 5/10 # 34/34 [==============================] - 2s 57ms/step - loss: 0.1333 - accuracy: 0.9583 # Epoch 6/10 # 34/34 [==============================] - 2s 52ms/step - loss: 0.2243 - accuracy: 0.9444 # Epoch 7/10 # 34/34 [==============================] - 2s 48ms/step - loss: 0.2913 - accuracy: 0.9102 # Epoch 8/10 # 34/34 [==============================] - 1s 30ms/step - loss: 0.2269 - accuracy: 0.9306 # Epoch 9/10 # 34/34 [==============================] - 2s 46ms/step - loss: 0.1113 - accuracy: 0.9630 # Epoch 10/10 # 34/34 [==============================] - 2s 57ms/step - loss: 0.0709 - accuracy: 0.9778 # ``` # # The exact values could not match, but don't worry about that. The important thing that you must see is that the loss value decreases, and the accuracy increases for the firsts 5 epochs. # Let's see how this model (trained on only two epochs) performs on the test set. # In[15]: preds = model.evaluate(X_test, Y_test) print ("Loss = " + str(preds[0])) print ("Test Accuracy = " + str(preds[1])) # **Expected Output**: # # <table> # <tr> # <td> # <b>Test Accuracy</b> # </td> # <td> # >0.80 # </td> # </tr> # # </table> # For the purposes of this assignment, you've been asked to train the model for ten epochs. You can see that it performs well. The online grader will only run your code for a small number of epochs as well. Please go ahead and submit your assignment. # After you have finished this official (graded) part of this assignment, you can also optionally train the ResNet for more iterations, if you want. It tends to get much better performance when trained for ~20 epochs, but this does take more than an hour when training on a CPU. # # Using a GPU, this ResNet50 model's weights were trained on the SIGNS dataset. You can load and run the trained model on the test set in the cells below. It may take ≈1min to load the model. Have fun! # In[16]: pre_trained_model = tf.keras.models.load_model('resnet50.h5') # In[17]: preds = pre_trained_model.evaluate(X_test, Y_test) print ("Loss = " + str(preds[0])) print ("Test Accuracy = " + str(preds[1])) # **Congratulations** on finishing this assignment! You've now implemented a state-of-the-art image classification system! Woo hoo! # # ResNet50 is a powerful model for image classification when it's trained for an adequate number of iterations. Hopefully, from this point, you can use what you've learned and apply it to your own classification problem to perform state-of-the-art accuracy. # <font color = 'blue'> # # **What you should remember**: # # - Very deep "plain" networks don't work in practice because vanishing gradients make them hard to train. # - Skip connections help address the Vanishing Gradient problem. They also make it easy for a ResNet block to learn an identity function. # - There are two main types of blocks: The **identity block** and the **convolutional block**. # - Very deep Residual Networks are built by stacking these blocks together. # <a name='5'></a> # ## 5 - Test on Your Own Image (Optional/Ungraded) # If you wish, you can also take a picture of your own hand and see the output of the model. To do this: # 1. Click on "File" in the upper bar of this notebook, then click "Open" to go on your Coursera Hub. # 2. Add your image to this Jupyter Notebook's directory, in the "images" folder # 3. Write your image's name in the following code # 4. Run the code and check if the algorithm is right! # In[ ]: img_path = 'images/my_image.jpg' img = image.load_img(img_path, target_size=(64, 64)) x = image.img_to_array(img) x = np.expand_dims(x, axis=0) x = x/255.0 print('Input image shape:', x.shape) imshow(img) prediction = pre_trained_model.predict(x) print("Class prediction vector [p(0), p(1), p(2), p(3), p(4), p(5)] = ", prediction) print("Class:", np.argmax(prediction)) # Even though the model has high accuracy, it might be performing poorly on your own set of images. Notice that, the shape of the pictures, the lighting where the photos were taken, and all of the preprocessing steps can have an impact on the performance of the model. Considering everything you have learned in this specialization so far, what do you think might be the cause here? # # *Hint*: It might be related to some distributions. Can you come up with a potential solution ? # You can also print a summary of your model by running the following code. # In[ ]: pre_trained_model.summary() # <a name='6'></a> # ## 6 - Bibliography # # This notebook presents the ResNet algorithm from He et al. (2015). The implementation here also took significant inspiration and follows the structure given in the GitHub repository of Francois Chollet: # # - Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun - [Deep Residual Learning for Image Recognition (2015)](https://arxiv.org/abs/1512.03385) # - Francois Chollet's GitHub repository: https://github.com/fchollet/deep-learning-models/blob/master/resnet50.py # # In[ ]:
{"hexsha": "4c62bdd805802b6fc332936b33fe4d2b04208d62", "size": 34144, "ext": "py", "lang": "Python", "max_stars_repo_path": "4 - Convolutional Neural Networks/Residual_Networks.py", "max_stars_repo_name": "pouyalj/DeepLearningCoursera", "max_stars_repo_head_hexsha": "4c0d79a53bbdd24fbb77503fed35e73d24949be2", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2022-01-01T00:14:18.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-01T00:14:18.000Z", "max_issues_repo_path": "4 - Convolutional Neural Networks/Residual_Networks.py", "max_issues_repo_name": "pouyalj/DeepLearningCoursera", "max_issues_repo_head_hexsha": "4c0d79a53bbdd24fbb77503fed35e73d24949be2", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "4 - Convolutional Neural Networks/Residual_Networks.py", "max_forks_repo_name": "pouyalj/DeepLearningCoursera", "max_forks_repo_head_hexsha": "4c0d79a53bbdd24fbb77503fed35e73d24949be2", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 47.2254495159, "max_line_length": 401, "alphanum_fraction": 0.6871192596, "include": true, "reason": "import numpy,import scipy", "num_tokens": 9730}
!----------------------------------*-F90-*---------------------------------- ! ! file FortranCheck/f90sub/drel.f90 ! author Kelly Thompson ! date Tuesday, Jun 12, 2012, 16:03 pm ! brief Test F90 main linking against C++ library and calling a C++ function. ! note Copyright (c) 2016-2019 Triad National Security, LLC. ! All rights reserved. !--------------------------------------------------------------------------- ! Ref: http://gcc.gnu.org/onlinedocs/gfortran/Interoperable-Subroutines-and-Functions.html ! http://fortranwiki.org/fortran/show/Generating+C+Interfaces subroutine drelf90(nf) bind(c, name="drelf90") use iso_c_binding, only : c_size_t,C_NULL_CHAR,c_int implicit none integer(c_int), intent(out) :: nf interface ec_release ! include "ds++//Release.hh" subroutine ec_release(release_string,maxlen) bind( C, name="ec_release" ) use iso_c_binding, only: c_char,c_size_t implicit none character(kind=c_char,len=1), intent(out) :: release_string integer(c_size_t), intent(in), value :: maxlen end subroutine ec_release end interface ec_release interface dsxx_is_big_endian ! include "ds++/Endian.hh" function dsxx_is_big_endian() bind ( C, name = "dsxx_is_big_endian" ) use iso_c_binding, only: c_int implicit none integer(c_int) :: dsxx_is_big_endian end function dsxx_is_big_endian end interface dsxx_is_big_endian interface dsxx_byte_swap ! include "ds++/Endian.hh" subroutine dsxx_byte_swap_int( data ) bind( C, name = "dsxx_byte_swap_int" ) use iso_c_binding, only: c_int implicit none integer(c_int), intent(inout) :: data end subroutine dsxx_byte_swap_int subroutine dsxx_byte_swap_int64_t( data ) bind( C, name = "dsxx_byte_swap_int64_t" ) use iso_c_binding, only: c_int64_t implicit none integer(c_int64_t), intent(inout) :: data end subroutine dsxx_byte_swap_int64_t subroutine dsxx_byte_swap_double( data ) bind( C, name = "dsxx_byte_swap_double" ) use iso_c_binding, only: c_double implicit none real(c_double), intent(inout) :: data end subroutine dsxx_byte_swap_double end interface dsxx_byte_swap !---------------------------------------------------------------------- ! Variable declarations integer, parameter :: maxlen = 80 character(len=maxlen) :: release_string integer :: is_big_endian, idata real(8) :: ddata !---------------------------------------------------------------------- ! Initialization nf = 0 ! init number of failures to zero release_string = repeat(' ',maxlen) release_string(maxlen:maxlen) = C_NULL_CHAR !---------------------------------------------------------------------- ! Retrieve the version string from ds++ call ec_release( release_string, len(release_string,kind=c_size_t) ) print '(a)', trim(release_string) if( release_string(1:6) .eq. "Draco-" )then print '(a)', "Test: passed" print '(a)', " Found 'Draco-' in release string." else print '(a)', "Test: failed" print '(a)', " Did not find 'Draco-' in the release string." nf = nf + 1 endif !---------------------------------------------------------------------- ! Check the ds++/Endian extern "C" functions... is_big_endian = dsxx_is_big_endian() if( is_big_endian.gt.1.or.is_big_endian.lt.0 )then print '(a)', "Test: failed" print '(a)', " dsxx_is_big_endian returned an unexpected value." nf = nf + 1 endif ! note: integers must be signed in F90 (i.e.: we cannot use Z'DEADBEEF') idata = Z'00112233' call dsxx_byte_swap(idata) if( idata /= Z'33221100' )then print '(a)', "Test: failed" print '(a)', " dsxx_byte_swap(int) returned an unexpected value." nf = nf+1 endif ddata=42.0 ! Call swap 2x to get initial value call dsxx_byte_swap(ddata) call dsxx_byte_swap(ddata) if( ddata /= 42.0 )then print '(a)', "Test: failed" print '(a)', " dsxx_byte_swap(double) returned an unexpected value." nf = nf+1 endif if(nf>0)then print '(a)', "Test: failed" print '(a)', " Endianess checks had some failures." else print '(a)', "Test: passed" print '(a)', " Endianess checks all pass." endif !---------------------------------------------------------------------- ! Summary print '(a)', " " print '(a)', "*********************************************" if(nf>0)then print '(a)', "**** cppmain Test: FAILED." else print '(a)', "**** cppmain Test: PASSED." endif print '(a)', "*********************************************" end subroutine drelf90 !--------------------------------------------------------------------------- ! end of drel.f90 !---------------------------------------------------------------------------
{"hexsha": "0d81635e28a91a49573fcba573e450a49bd0ca2d", "size": 4871, "ext": "f90", "lang": "FORTRAN", "max_stars_repo_path": "src/FortranChecks/f90sub/drel.f90", "max_stars_repo_name": "rspavel/Draco", "max_stars_repo_head_hexsha": "b279b1afbfbb39f2d521579697172394c5efd81d", "max_stars_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/FortranChecks/f90sub/drel.f90", "max_issues_repo_name": "rspavel/Draco", "max_issues_repo_head_hexsha": "b279b1afbfbb39f2d521579697172394c5efd81d", "max_issues_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/FortranChecks/f90sub/drel.f90", "max_forks_repo_name": "rspavel/Draco", "max_forks_repo_head_hexsha": "b279b1afbfbb39f2d521579697172394c5efd81d", "max_forks_repo_licenses": ["BSD-3-Clause-Open-MPI"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 34.7928571429, "max_line_length": 90, "alphanum_fraction": 0.5596386779, "num_tokens": 1237}
from collections import defaultdict import json import numpy as np from typing import Set, Iterable, Union, Dict, Any class DataSet: """ A DataSet represents a series of observations (input and output). DataSets are generally constructed via Java using the Apollo library. Implementations should implement the special __getitem__, __setitem_, and __len__ methods. """ def limit(self, n: int) -> 'Dataset': """ Limits the items in the dataset to the first N :param n: the number of items to limit the dataset to :return: this dataset """ raise NotImplementedError def observations(self) -> Set[str]: """ :return: The set of observation names in this dataset. """ raise NotImplementedError() def dimension(self, name: str) -> int: """ Determines the dimension of the given observation. :param name: The name of observation :return: The dimension of the observation or 0 if not defined """ raise NotImplementedError() def select(self, names: Union[Iterable[str], str]) -> Set[Dict[str, Any]]: """ Selects a subset of the observations in this DataSet as a dictionary of name and values. :param names: The name(s) of the observations to select :return: a dictionary of name and values. """ raise NotImplementedError() def __getitem__(self, item): raise NotImplementedError() def __setitem__(self, key, value): raise NotImplementedError() def __len__(self): raise NotImplementedError() class ApolloDataSet(DataSet): """ Base class for Apollo DataSets created via Java """ def __init__(self, data_file: str): super(ApolloDataSet, self).__init__() self.dimensions = None self.data = None self.size = 0 self._parse_data(data_file) def _parse_data(self, data_file: str): raise NotImplementedError() def limit(self, n: int) -> DataSet: __doc__ = DataSet.__doc__ for name, nd in self.data.items(): self.data[name] = nd[:n] self.size = min(self.size, n) return self def __getitem__(self, item): return self.data[item] def __setitem__(self, key, value): self.data[key] = value def __len__(self): return self.size def observations(self) -> Set[str]: __doc__ = DataSet.__doc__ return self.data.keys() def dimension(self, name: str) -> int: __doc__ = DataSet.__doc__ return self.dimensions.get(name, 0) def select(self, names: Union[Iterable[str], str]) -> Set[Dict[str, Any]]: __doc__ = DataSet.__doc__ r = dict() if isinstance(names, str): names = [names] for n in names: r[n] = self.data[n] return r def _parse_observation(self, obs): vtype = obs["@type"].lower() if vtype == "vs": return [self._parse_observation(so) for so in obs["seq"]] if vtype == "dm" or vtype == "sm": matrix = obs["matrix"] shape = obs["shape"] n = np.array(matrix) if shape[3] != 1: return n.reshape((shape[2], shape[3])) return n.reshape(shape[2]) if vtype == "v": p = obs["p"] s = obs["s"] if p == "": return s if s == "": return p return p + "=" + s class ApolloJsonDataSet(ApolloDataSet): """ Apollo DataSet created in Java and serialized to a Json file """ def __init__(self, data_file: str): super().__init__(data_file) def _parse_data(self, data_file: str): with open(data_file) as fp: data_map = json.load(fp) self.dimensions = dict() for name, m in data_map["metadata"].items(): self.dimensions[name] = m.get("dimension", 0) self.data = defaultdict(lambda: list()) for datum in data_map["data"]: self.size += 1 for name, obs in datum.items(): self.data[name].append(self._parse_observation(obs)) class ApolloSQLDataSet(ApolloDataSet): """ Apollo DataSet created in Java and persisted to disk (SQLite) """ def __init__(self, data_file: str): super().__init__(data_file) def _parse_data(self, data_file: str): import sqlite3 self.connection = sqlite3.connect(data_file) c = self.connection.cursor() c.execute("SELECT value FROM metadata WHERE name = '__size__'") self.size = int(c.fetchone()[0]) self.dimensions = dict() c.execute("SELECT name,value FROM metadata WHERE name != '__size__'") for row in c: try: m = json.loads(row[1]) self.dimensions[row[0]] = int(m["dimension"]) except Exception: pass c.close() self.data = defaultdict(lambda: list()) for row in self.row_iterator("SELECT * FROM DATA"): datum = json.loads(row[0]) for name, obs in datum.items(): self.data[name].append(self._parse_observation(obs)) def row_iterator(self, statement): c = self.connection.cursor() c.execute(statement) for row in c: yield row c.close()
{"hexsha": "bb85ebaa5c568822b6c973f3939983a87106b7e0", "size": 5468, "ext": "py", "lang": "Python", "max_stars_repo_path": "python/apollo/data.py", "max_stars_repo_name": "gengoai/mono-repo", "max_stars_repo_head_hexsha": "50e95c16579aaa8a4ee0776582964868b5625415", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2021-03-15T12:12:38.000Z", "max_stars_repo_stars_event_max_datetime": "2021-03-15T12:12:38.000Z", "max_issues_repo_path": "python/apollo/data.py", "max_issues_repo_name": "gengoai/mono-repo", "max_issues_repo_head_hexsha": "50e95c16579aaa8a4ee0776582964868b5625415", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2021-03-31T17:54:38.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-10T02:40:07.000Z", "max_forks_repo_path": "python/apollo/data.py", "max_forks_repo_name": "gengoai/gengoai", "max_forks_repo_head_hexsha": "50e95c16579aaa8a4ee0776582964868b5625415", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 30.3777777778, "max_line_length": 113, "alphanum_fraction": 0.5769934162, "include": true, "reason": "import numpy", "num_tokens": 1217}
# check overall performance using randomly chosen points using Polylogarithms using DataFrames, CSV using PyPlot using PyCall using Printf using StatsBase fs = (4,4) fs2 = (5,4) PyCall.PyDict(matplotlib["rcParams"])["legend.markerscale"] = 3.0 # https://matplotlib.org/3.1.0/api/legend_api.html L = Symbol("Li_s(z)") # input data from Mathematica and reparse into complex numbers C = 2 filename = @sprintf("../data/polylog_test_data_rand_%d.csv", C) data1 = CSV.read(filename; delim=",", type=String) # has trouble reading in numbers like "2." so read all into strings, and parse data1[!,:s] = parse.(Complex{Float64}, data1[!,:s] ) data1[!,:z] = parse.(Complex{Float64}, data1[!,:z] ) data1[!, L] = parse.(Complex{Float64}, data1[!, L] ) m = size(data1,1) Li = data1[!, L] s = data1[!,:s] z = data1[!,:z] # plot the locations of points markerSize = 1 figure("z", figsize=fs) title("distribution of z") plot(real.(z), imag.(z), "."; markersize=markerSize) ylabel("Im(z)") xlabel("Re(z)") axis("equal") figure("s", figsize=fs) title("distribution of s") plot(real.(s), imag.(s), "."; markersize=markerSize) ylabel("Im(s)") xlabel("Re(s)") axis("equal") S1 = zeros(Complex{Float64}, m) S2 = zeros(Complex{Float64}, m) n1 = zeros(Int64, m) series1 = zeros(Int64, m) error1 = zeros(Float64, m) rel_error1 = zeros(Float64, m) accuracy = 1.0e-12 # this is the default anyway, but can change it for testing # polylog(1.5, 0.4) polylog( complex(1.5), complex(0.4), Diagnostics() ) val, t, bytes, gctime, memallocs = @timed begin for i=1:m # print(".") S1[i], n1[i], series1[i] = polylog(s[i], z[i], Diagnostics(); min_iterations=0, accuracy=accuracy ) # S1[i] = result1[1] # n1[i] = result1[2] # series1[i] = result1[3] end end @printf(" finished %d calculations in time t=%.2f seconds, i.e., %.1f microsec each\n", m, t, (t/m)*1.0e6) error1 = abs.( S1 - Li ) rel_error1 = error1 ./ abs.( Li ) println(" max abs. rel. error1 = $(maximum( abs.(rel_error1) ))") fig = figure(@sprintf("../data/polylog_bench_rand_1.csv"), figsize=(6,4)) clf() h = hist(log10.(rel_error1), collect(-16 : 0.5 : -8); align="mid" ) ylabel("number") xlabel("log10(relative absolute error)") plot( log10(maximum(rel_error1))*[1,1], [0,maximum(h[1])], "--") savefig(@sprintf("plots/polylog_bench_rand_%d.pdf", C); bbox_inches="tight") figure("series", figsize=fs2) ms = maximum(series1) x = hist( series1, collect(0.5:1:ms+0.5); align="mid" ) for i=1:length(x[1]) if i<=3 @printf(" series %d used %d times\n", i, x[1][i]) elseif i<8 @printf(" reciprocal + series %d used %d times\n", i-3, x[1][i]) elseif i< 20 @printf(" duplication + series %d used %d times\n", i-10, x[1][i]) else @printf(" duplication used more than once + series %d used %d times\n", i-10, x[1][i]) end end # look at bad points k = findall( rel_error1 .> 1.0e-12 ) count_bad = length(k) bad_guys = DataFrame(s=s[k], z=z[k], Li=Li[k], err=rel_error1[k], series=series1[k], n_terms=n1[k]) figure("z") plot(real.(z[k]), imag.(z[k]), "r."; markersize=markerSize+1) figure("s") plot(real.(s[k]), imag.(s[k]), "r."; markersize=markerSize+1) s_min = floor( minimum(min.(real.(s), imag.(s)) )) s_max = ceil( maximum(max.(real.(s), imag.(s)) )) s_bins = collect( s_min : 0.5 : s_max ) m = length(s_bins) s_b = zeros(m-1) error_v_s = zeros(m-1) for i=1:m-1 s_b[i] = (s_bins[i] + s_bins[i+1])/2 k_i = findall( s_bins[i] .<= imag.(s) .< s_bins[i+1] ) error_v_s[i] = log10.( mean( rel_error1[k_i] ) ) end figure("error vs s", figsize=fs2) plot( s_b, error_v_s) ylabel("number") xlabel("series (values >4 are a sum of two components)") # look at really bad points k = findall( (rel_error1 .> 1.0e-8) .| isinf.(S1) ) count_really_bad = length(k) really_bad_guys = DataFrame(s=s[k], z=z[k], Li=Li[k], err=rel_error1[k], series=series1[k], n_terms=n1[k]) figure("z") plot(real.(z[k]), imag.(z[k]), "r."; markersize=markerSize+3) savefig( @sprintf("plots/polylog_bench_rand_errors_z_%d.pdf", C); bbox_inches="tight") figure("s") plot(real.(s[k]), imag.(s[k]), "r."; markersize=markerSize+3) savefig( @sprintf("plots/polylog_bench_rand_errors_s_%d.pdf", C); bbox_inches="tight") # show which series is used where figure("z2", figsize=fs) for i=1:ms k = findall( series1 .== i ) if length(k) > 0 if i<10 plot(real.(z[k]), imag.(z[k]), "."; markersize=markerSize, label=@sprintf("Series %d",i)) elseif i<50 n_series = (i - floor(i,digits=-1)) / 2 # not right when we get to 50 plot(real.(z[k]), imag.(z[k]), "."; markersize=markerSize, label=@sprintf("Series 2 used %d times", n_series)) elseif i>=50 i -= 50 n_series = 5 n_series += (i - floor(i,digits=-1)) / 2 # not right when we get to 50 plot(real.(z[k]), imag.(z[k]), "."; markersize=markerSize, label=@sprintf("Series 2 used %d times", n_series)) end end end legend() ylabel("Im(z)") xlabel("Re(z)") axis("equal") savefig( @sprintf("plots/polylog_bench_rand_domains_%d.pdf", C); bbox_inches="tight")
{"hexsha": "becf5294ed17da14767a2ce2e520509fce67d47c", "size": 5156, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "extended_tests/polylog_bench_rand.jl", "max_stars_repo_name": "AshtonSBradley/Polylogarithms.jl", "max_stars_repo_head_hexsha": "f52e3075cd1d87b0d67d26c72cd1a79a365eddcd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 7, "max_stars_repo_stars_event_min_datetime": "2020-08-24T09:17:28.000Z", "max_stars_repo_stars_event_max_datetime": "2021-12-26T10:54:45.000Z", "max_issues_repo_path": "extended_tests/polylog_bench_rand.jl", "max_issues_repo_name": "AshtonSBradley/Polylogarithms.jl", "max_issues_repo_head_hexsha": "f52e3075cd1d87b0d67d26c72cd1a79a365eddcd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 5, "max_issues_repo_issues_event_min_datetime": "2020-11-18T00:06:36.000Z", "max_issues_repo_issues_event_max_datetime": "2021-07-18T10:35:43.000Z", "max_forks_repo_path": "extended_tests/polylog_bench_rand.jl", "max_forks_repo_name": "AshtonSBradley/Polylogarithms.jl", "max_forks_repo_head_hexsha": "f52e3075cd1d87b0d67d26c72cd1a79a365eddcd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 4, "max_forks_repo_forks_event_min_datetime": "2020-12-17T16:20:36.000Z", "max_forks_repo_forks_event_max_datetime": "2021-08-02T09:32:06.000Z", "avg_line_length": 31.8271604938, "max_line_length": 122, "alphanum_fraction": 0.626454616, "num_tokens": 1753}
import re from functools import lru_cache from typing import Union import numpy as np import pandas as pd import sidekick as sk from .types import Region db = sk.import_later(".db:db", package=__package__) ISO2 = re.compile(r"[A-Z]{2}") ISO3 = re.compile(r"[A-Z]{3}") MUNDI_CODE = re.compile(r"[A-Z]{2}-\w+(:\w+)*") TYPES_HIERARCHY = ["state", "city", "district", "region"] def regions(country=None, **kwargs) -> pd.DataFrame: """ Query the regions/sub-divisions database. """ if country and "country_code" in kwargs: raise TypeError("cannot specify country and country_code") elif country: kwargs["country_code"] = country_code(country) return db.query(**kwargs) def countries(**kwargs) -> pd.DataFrame: """ Query the country database. """ return regions(type="country", **kwargs) def region(*args, country=None, **kwargs) -> Region: """ Query the regions/sub-divisions database. """ if country: kwargs["country_code"] = country_code(country) if args: (ref,) = args if isinstance(ref, Region): return ref row = db.get(code(ref)) else: row = db.get(**kwargs) return Region(row.name) @lru_cache(1024) def country_code(code: str) -> str: """ Return the country code for the given country. Similar to the code() function, but only accept valid countries. """ if isinstance(code, Region): if code.type != "country": raise ValueError(f"region is not a country: {code}") return code.id if ISO2.fullmatch(code.upper()): try: db.get(code) return code.upper() except LookupError: pass elif ISO3.fullmatch(code.upper()): try: res = db.get(long_code=code.upper(), type="country") return res.name except LookupError: pass if code.isdigit(): res = db.get(numeric_code=code, type="country") return res.name elif "/" not in code: res = db.get(name=code, type="country") return res.name raise LookupError(code) @lru_cache(32_000) def code(code: Union[Region, str]) -> str: """ Return the mundi code for the given region. """ if isinstance(code, Region): return code.id try: return country_code(code) except LookupError: pass if MUNDI_CODE.fullmatch(code.upper()): try: res = db.get(code) return res.name except LookupError: pass country, _, division = code.partition("-") elif "/" in code: country, _, division = code.partition("/") else: raise LookupError(code) country = country_code(country) return _subdivision_code(country, division) @lru_cache(32_000) def _subdivision_code(country: str, subdivision: str) -> str: """ Return the mundi code for the given subdivision of a country. """ if subdivision.isdigit(): res = db.get(numeric_code=subdivision, country_code=country) return f"{country}-{res.name}" else: for lookup in ["short_code", "long_code"]: kwargs = {lookup: subdivision, "country_code": country} try: res = db.get(**kwargs) return f"{country}-{res.name}" except LookupError: pass values = db.query( country_code=country, name=subdivision, cols=("id", "type", "subtype") ) if len(values) == 1: return values.index[0] elif len(values) > 1: pos = np.argsort([TYPES_HIERARCHY.index(x) for x in values["type"]]) return values.index[pos[0]] else: raise LookupError(code)
{"hexsha": "66c684e1c7404f541e95e868872315b151a83268", "size": 3808, "ext": "py", "lang": "Python", "max_stars_repo_path": "mundi/functions.py", "max_stars_repo_name": "pydemic/mundi", "max_stars_repo_head_hexsha": "7c88037bd5aaff8bbed4c63b52f05bebc989b0e4", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2, "max_stars_repo_stars_event_min_datetime": "2020-05-28T22:03:12.000Z", "max_stars_repo_stars_event_max_datetime": "2020-09-30T12:16:04.000Z", "max_issues_repo_path": "mundi/functions.py", "max_issues_repo_name": "pydemic/mundi", "max_issues_repo_head_hexsha": "7c88037bd5aaff8bbed4c63b52f05bebc989b0e4", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "mundi/functions.py", "max_forks_repo_name": "pydemic/mundi", "max_forks_repo_head_hexsha": "7c88037bd5aaff8bbed4c63b52f05bebc989b0e4", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3866666667, "max_line_length": 82, "alphanum_fraction": 0.5863970588, "include": true, "reason": "import numpy", "num_tokens": 885}
import numpy as np import random import copy from collections import namedtuple MoveReport = namedtuple('MoveReport', 'move node') class Move(object): def __init__(self, cost_function, depends_on): self.cost_function = cost_function self.depends_on = depends_on def cost(self): return self.cost_function() class FastCost(object): def __call__(self): return 1 class ShootingCost(object): def __init__(self, mean, dev, skew): pass def __call__(self): return 10 class ShootingMove(Move): def __repr__(self): return "Shooting" + str(self.depends_on[0]) class ReplicaExchangeMove(Move): def __init__(self, repA, repB): super(ReplicaExchangeMove, self).__init__(FastCost(), [repA, repB]) def __repr__(self): return "Repex" + str(self.depends_on) class DefaultStrategy(object): @staticmethod def available_nodes(status): return [i for i in range(len(status)) if status[i] is None] def update(self, legal_moves, status, time): nodes = self.available_nodes(status) moves = [MoveReport(move=legal_moves[i], node=nodes[i]) for i in range(min(len(nodes), len(legal_moves)))] # print moves return moves class Game(object): def __init__(self, moves, strategy, n_nodes): self.moves = moves self.strategy = strategy self.n_nodes = n_nodes self.history = [list([]) for i in range(self.n_nodes)] def assign_move_to_node(self, move, node): cost = move.cost() self.history[node] += [move]*cost @staticmethod def ensemble_history(history, replica): pass def verify(self): # check that history and serial history give the same behavior for # each ensemble pass @staticmethod def legal_moves(status, move_list): active = sum([move.depends_on for move in status if move is not None], []) legal_moves = [] for move in move_list: if list(set(move.depends_on) & set(active)) == []: legal_moves.append(move) active.extend(move.depends_on) return legal_moves def status(self, time): status = [None] * self.n_nodes for node_i in range(self.n_nodes): try: status[node_i] = self.history[node_i][time] except IndexError: pass return status @staticmethod def analysis(history): n_nodes = len(history) wall_time = max([len(node) for node in history]) cpu_time = wall_time * n_nodes wasted_cycles = 0 for node in history: wasted_cycles += len([act for act in node if act is None]) print "Wall time:", wall_time print "CPU time:", cpu_time print "Wasted cycles:", wasted_cycles print "Efficiency:", float(cpu_time-wasted_cycles) / cpu_time def play(self, n_steps): move_queue = [random.choice(self.moves) for i in range(n_steps)] play_queue = copy.copy(move_queue) # TODO: run the correct version of the move queue t = 0 while play_queue != [] or self.status(t) != [None] * self.n_nodes: status = self.status(t) legal_moves = self.legal_moves(status, play_queue) moves = self.strategy.update(legal_moves, status, t) if not isinstance(moves, list): moves = [moves] for move in moves: self.assign_move_to_node(move.move, move.node) play_queue.remove(move.move) status = self.status(t) empty_nodes = [i for i in range(len(status)) if status[i] is None] for node_i in empty_nodes: self.history[node_i].append(None) # print t, self.status(t) t += 1 pass
{"hexsha": "e30c53de9cccb9adc99a949c65ca2e73da4a8084", "size": 3931, "ext": "py", "lang": "Python", "max_stars_repo_path": "parallel_RETIS_game.py", "max_stars_repo_name": "dwhswenson/parallelRETISgame", "max_stars_repo_head_hexsha": "05ba9cd437dd3f7892e26b6226fdfe342c704f31", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "parallel_RETIS_game.py", "max_issues_repo_name": "dwhswenson/parallelRETISgame", "max_issues_repo_head_hexsha": "05ba9cd437dd3f7892e26b6226fdfe342c704f31", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "parallel_RETIS_game.py", "max_forks_repo_name": "dwhswenson/parallelRETISgame", "max_forks_repo_head_hexsha": "05ba9cd437dd3f7892e26b6226fdfe342c704f31", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 29.7803030303, "max_line_length": 78, "alphanum_fraction": 0.6003561435, "include": true, "reason": "import numpy", "num_tokens": 891}
from jax import random import jax.numpy as np from jaxsde import make_brownian_motion from jaxsde import time_reflect_ito, time_reflect_stratonovich, \ ito_to_stratonovich, stratonovich_to_ito D = 10 rng = random.PRNGKey(0) delta_t = 0.0001 def make_example_sde(): t0 = 0.1 t1 = 2.2 y0 = np.linspace(0.1, 0.9, D) args = np.linspace(0.1, 0.4, 4) def f(y, t, args): return -np.sqrt(t) - y + 0.1 - args[0] * np.mean((y + 0.2)**2) + args[1] def g(y, t, args): return args[2] * y**2 + np.sin(0.1 * y) + np.cos(t) + args[3] ts = np.array([t0, t1]) return f, g, y0, ts, args def test_double_reflect_stratonovich(): # Check that reflecting twice gives the same answer. f, g, ts, y0, args = make_example_sde() b = make_brownian_motion(ts[0], np.zeros(y0.shape), ts[-1], rng) f2, g2, b2, t2 = time_reflect_stratonovich(*time_reflect_stratonovich(f, g, b, ts)) t = 0.1 assert(np.all(ts == t2)) assert(np.all(f(y0, t, args) == f2(y0, t, args))) assert(np.all(g(y0, t, args) == g2(y0, t, args))) assert(np.all(b(t) == b2(t))) def test_ito_to_strat_and_back(): # Check that ito_to_stratonovich(stratonovich_to_ito) is the identity. f, g, ts, y0, args = make_example_sde() f2, g2 = ito_to_stratonovich(*stratonovich_to_ito(f, g)) t = 0.1 assert(np.all(f(y0, t, args) == f2(y0, t, args))) assert(np.all(g(y0, t, args) == g2(y0, t, args))) def test_double_reflect_ito(): # Check that reflecting twice gives the same answer. f, g, ts, y0, args = make_example_sde() b = make_brownian_motion(ts[0], np.zeros(y0.shape), ts[-1], rng) f2, g2, b2, t2 = time_reflect_ito(*time_reflect_ito(f, g, b, ts)) t = 0.1 assert(np.all(ts == t2)) assert(np.allclose(f(y0, t, args), f2(y0, t, args))) assert(np.allclose(g(y0, t, args), g2(y0, t, args))) assert(np.all(b(t) == b2(t))) def test_reflect_ito_two_ways(): # Check that reflect_ito = strat_to_ito( reflect_strat ( ito_to_strat ))) f, g, ts, y0, args = make_example_sde() b = make_brownian_motion(ts[0], np.zeros(y0.shape), ts[-1], rng) fr, gr, br, tr = time_reflect_ito(f, g, b, ts) fi, gi = ito_to_stratonovich(f, g) f2, g2, b3, t3 = time_reflect_stratonovich(fi, gi, b, ts) f3, g3 = stratonovich_to_ito(f2, g2) t = 0.1 assert(np.all(tr == t3)) assert(np.allclose(fr(y0, -t, args), f3(y0, -t, args))) assert(np.allclose(gr(y0, -t, args), g3(y0, -t, args))) assert(np.all(br(-t) == b3(-t))) # def test_diagonal_gdg(): # # Check that the fast formula is correct for diagonal functions. # f, g, ts, y0, args = make_example_sde()
{"hexsha": "40394d4d61cf16df3a163915870269ddc768b38b", "size": 2688, "ext": "py", "lang": "Python", "max_stars_repo_path": "jaxsde/tests/test_utils.py", "max_stars_repo_name": "wilsonify/bayesian-sde", "max_stars_repo_head_hexsha": "528f41ac10e65cb8f1ea047a48e3057bb6a777fc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 14, "max_stars_repo_stars_event_min_datetime": "2021-12-04T18:00:28.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-30T23:27:29.000Z", "max_issues_repo_path": "jaxsde/tests/test_utils.py", "max_issues_repo_name": "wilsonify/bayesian-sde", "max_issues_repo_head_hexsha": "528f41ac10e65cb8f1ea047a48e3057bb6a777fc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 1, "max_issues_repo_issues_event_min_datetime": "2022-02-11T03:35:00.000Z", "max_issues_repo_issues_event_max_datetime": "2022-02-11T03:35:00.000Z", "max_forks_repo_path": "jaxsde/tests/test_utils.py", "max_forks_repo_name": "wilsonify/bayesian-sde", "max_forks_repo_head_hexsha": "528f41ac10e65cb8f1ea047a48e3057bb6a777fc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-12-09T17:00:48.000Z", "max_forks_repo_forks_event_max_datetime": "2022-03-04T15:15:33.000Z", "avg_line_length": 32.7804878049, "max_line_length": 87, "alphanum_fraction": 0.6168154762, "include": true, "reason": "import jax,from jax", "num_tokens": 987}
#!/usr/bin/env python3 import pybullet as p import pybullet_data import os import time import numpy as np import cv2 import tempfile #BASEDIR = os.path.dirname(__file__) from lib.pyb.urdf_printer import URDFPrinter # W, H, FOVX #CAMERA_SPECS = (1920, 1080, 64) # https://www.chiefdelphi.com/t/horizontal-fov-of-microsoft-lifecam-cinema/156204/7 CAMERA_SPECS = (224, 224, 45) # https://www.chiefdelphi.com/t/horizontal-fov-of-microsoft-lifecam-cinema/156204/7 class Manipulator: STYLES = [ {'name': 'wire0', 'plate_radius': 0.0025, 'plate_length': 0.05, 'plate0_color': 'Red', 'plate_color': 'Black', 'block1_color': 'Transparent', 'block2_color': 'Transparent', 'camera_color': 'Transparent'}, {'name': 'wire', 'plate_radius': 0.01, 'plate_length': 0.056, 'plate0_color': 'Black', 'plate_color': 'Black', 'block1_color': 'Transparent', 'block2_color': 'Transparent', 'camera_color': 'Transparent'}, {'name': 'original', 'plate_radius': 0.1, 'plate_length': 0.01, 'plate0_color': 'Black', 'plate_color': 'Black', 'block1_color': 'Transparent', 'block2_color': 'Transparent', 'camera_color': 'Black' }, {'name': 'fat', 'plate_radius': 0.05 , 'plate_length': 2*0.028, 'plate0_color': 'Black', 'plate_color': 'Black', 'block1_color': 'Transparent', 'block2_color': 'Transparent', 'camera_color': 'Transparent'} ] def __init__(self, w, NS, NP, style): self.w = w self.NS = NS self.NP = NP self.style = style urdf_file = tempfile.NamedTemporaryFile(mode="w", delete=False) URDFPrinter().print_manipulator(urdf_file, self.NS, self.NP, self.style, scale=1.0/0.028/2/self.NP) urdf_file.close() self.body_id = self.w.loadBody(urdf_file.name) os.remove(urdf_file.name) assert(p.getNumJoints(self.body_id) == self.NS * self.NP * 3 + 1) self.eye_link_id = p.getNumJoints(self.body_id) - 1 def _setJointMotorPosition(self, joint, pos): p.resetJointState(self.body_id, joint, pos) #print(joint, pos) def _setJointPosition(self, section, pos0, pos1): j = (section * self.NP) * 3 pos0 /= self.NP # spread along several axes pos1 /= self.NP for i in range(self.NP): self._setJointMotorPosition(j + 3*i, pos0) self._setJointMotorPosition(j + 3*i + 1, pos1) def step(self, phis): for i in range(self.NS): self._setJointPosition(i, phis[i, 0], phis[i, 1]) def _print_joints_pos(self, body_id=None): if body_id is None: body_id = self.body_id for i in range(p.getNumJoints(body_id)): js = p.getJointState(body_id, i) pos, orn, _, _, _, _ = p.getLinkState(body_id, i) rot_matrix = p.getMatrixFromQuaternion(orn) rot_matrix = np.array(rot_matrix).reshape(3, 3) v = rot_matrix.dot((0, 0, 1)) print("#J%d %f" % (i, js[0])) print("#B%d %s %s" % (i, pos, v)) def close(self): if self.body_id is not None: p.removeBody(self.body_id) self.body_id = None # -------------------------------------------------------------------- class Camera(object): def __init__(self, w, specs=CAMERA_SPECS): self.w = w self.W, self.H, self.FOVX = specs aspect = self.W / self.H self.projection_matrix = p.computeProjectionMatrixFOV(self.FOVX/aspect, aspect, 0.1, 15) def getImages(self, pvu, with_segmask=False): (cam_p, camera_vector, up_vector) = pvu view_matrix = p.computeViewMatrix(cam_p, cam_p + 0.1 * camera_vector, up_vector) if with_segmask: renderer = p.ER_TINY_RENDERER else: renderer = 0 imgs = p.getCameraImage(self.W, self.H, view_matrix, self.projection_matrix, renderer=renderer) assert((self.W, self.H) == (imgs[0], imgs[1])) return imgs def getRGBAImagePVU(self, pvu): imgs = self.getImages(pvu) rgba = np.reshape(imgs[2], (self.H, self.W, 4)).astype(np.float32) return rgba # def getBGRImage(self, pvu): # imgs = self.getCameraImages(pvu) # rgba = np.reshape(imgs[2], (self.H, self.W, 4)).astype(np.uint8) # bgr = cv2.merge((rgba[:,:,2], rgba[:,:,1], rgba[:,:,0])) # take BGR from RBGA # return bgr def getPO(self): raise NotImplemented def getPVU(self): cam_p, cam_o = self.getPO() cam_v, cam_u = self.w.orn2vu(cam_o) return [cam_p, cam_v, cam_u] def getRGBAImage(self): pvu = self.getPVU() img = self.getRGBAImagePVU(pvu) return img def getBodyMask(self, body_id): pvu = self.getPVU() imgs = self.getImages(pvu, with_segmask=True) segmask = np.reshape(imgs[4], (self.H, self.W)).astype(np.float32) #print('body_id=', body_id, 'np.unique(segmask)=', np.unique(segmask)) segmask[segmask != body_id] = 0 segmask[segmask == body_id] = 1 return segmask def close(self): pass class FixedCamera(Camera): def __init__(self, w, pvu): super(FixedCamera, self).__init__(w) self.pvu = pvu def getPVU(self): return self.pvu class LinkedCamera(Camera): def __init__(self, w, body_id, link_id): super(LinkedCamera, self).__init__(w) self.body_id = body_id self.link_id = link_id def getPO(self): cam_p, cam_o, _, _, _, _ = p.getLinkState(self.body_id, self.link_id) return list(cam_p), list(cam_o) # -------------------------------------------------------------------- class World(object): def __init__(self, gui=False): try: if gui: p.connect(p.GUI) p.configureDebugVisualizer(p.COV_ENABLE_GUI, 0) #p.configureDebugVisualizer(p.COV_ENABLE_SEGMENTATION_MARK_PREVIEW, 0) #p.configureDebugVisualizer(p.COV_ENABLE_DEPTH_BUFFER_PREVIEW, 0) #p.configureDebugVisualizer(p.COV_ENABLE_RGB_BUFFER_PREVIEW, 1) p.resetDebugVisualizerCamera(cameraDistance=3, cameraYaw=-90, cameraPitch=-10, cameraTargetPosition=[0, 0, 1]) else: p.connect(p.DIRECT) # don't render # load urdf file path (to load 'plane.urdf' from) p.setAdditionalSearchPath(pybullet_data.getDataPath()) self.reset() except: p.disconnect() raise def loadBody(self, file_name, startPos=[0, 0, 0], startOrientationEuler=[0, 0, 0]): startOrientation = p.getQuaternionFromEuler(startOrientationEuler) bodyId = p.loadURDF(file_name, startPos, startOrientation, useFixedBase=1) return bodyId def reset(self): p.resetSimulation() self.loadBody("plane.urdf", [0, 0, 0], [0, 0, 0]) #self._loadBody("chessboard-%s.urdf" % self.chessboard, # #[1, 0, 1], [np.pi/2, -np.pi/2, -np.pi/2]) # [0, 0, 3], [np.pi, 0, 0]) #self._loadBody("urdfs/plane.urdf", [0, 0, 3], [0, np.pi, 0]) #self._loadBody("urdfs/green-line.urdf", [1.5, 0, 0.5], [np.pi/2, 0, 0]) self.targetId = None def setTarget(self, pos): if self.targetId is not None: p.removeBody(self.targetId) self.targetId = None self.targetId = self.loadBody("lib/pyb/urdfs/target.urdf", pos) def addHeadposMarker(self, pos): self.loadBody("urdfs/marker.urdf", pos) # -------------------------------------------------------------------- #def getDebugVisualizerCameraRGBAImage(self): # width, height, viewMat, projMat, cameraUp, camForward, horizon, vertical, _, _, dist, camTarget = p.getDebugVisualizerCamera() # width, height = 224, 224 # imgs = p.getCameraImage(width, height, viewMat, projMat) # rgba = np.reshape(imgs[2], (height, width, 4)).astype(np.float32) # return rgba def orn2vu(self, cam_o): rot_matrix = p.getMatrixFromQuaternion(cam_o) rot_matrix = np.array(rot_matrix).reshape(3, 3) # Initial vectors init_camera_vector = (0, 0, 1) # z-axis init_up_vector = (0, -1, 0) # x-axis # Rotated vectors camera_vector = rot_matrix.dot(init_camera_vector) up_vector = rot_matrix.dot(init_up_vector) return camera_vector, up_vector def euler2orn(self, alpha, beta, gamma): return list(p.getQuaternionFromEuler([alpha, beta, gamma])) def step(self): p.stepSimulation() def close(self): p.disconnect()
{"hexsha": "32cce8c4b65dfed48a3fc7a3575438fef507368d", "size": 8802, "ext": "py", "lang": "Python", "max_stars_repo_path": "lib/pyb/pybullet_robot.py", "max_stars_repo_name": "abbbe/eye-on-stick", "max_stars_repo_head_hexsha": "005da329e3565706ea7067cc7ea9e4cd35ca3afc", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "lib/pyb/pybullet_robot.py", "max_issues_repo_name": "abbbe/eye-on-stick", "max_issues_repo_head_hexsha": "005da329e3565706ea7067cc7ea9e4cd35ca3afc", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/pyb/pybullet_robot.py", "max_forks_repo_name": "abbbe/eye-on-stick", "max_forks_repo_head_hexsha": "005da329e3565706ea7067cc7ea9e4cd35ca3afc", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-03-22T14:25:15.000Z", "max_forks_repo_forks_event_max_datetime": "2021-03-24T14:32:17.000Z", "avg_line_length": 35.7804878049, "max_line_length": 218, "alphanum_fraction": 0.5844126335, "include": true, "reason": "import numpy", "num_tokens": 2449}
using Combinatorics: combinations using StatsBase: sample @doc raw""" get_contours(fit::AbstractFit, χsq, parameters_combination::Vector{Int}; npts::Int=20, limits=true, sigma = 1.0) For a given fit `fit` and the ``\chi^2`` function `χsq`, gives an array of parameter arrays, with each array corresponding to a set of parameters obtained from calculating the `MINOS` ``1σ`` contour (try to find `npts` points in the contour) for the two parameters in `parameters_combination`. `parameters_combination` is an `Int` array of the numbers of that two parameters, e.g. it is `[1, 2]` for the first two parameters and `[2, 3]` or the second and third parameters. If `limits` is `true`, then fix one parameter to its bounds from `MINOS` of the best fit and get the values for the other parameters; this runs over all parameters. """ function get_contours(fit::Fit, χsq, parameters_combination::Vector{Int}; npts::Int=20, limits=true, sigma = 1.0) fit.migrad_ok() || migrad(fit); # if migrad has not been run then run it first length(fit.merrors) == 0 && minos(fit) # if minos has not been run then run it first fmin_1σ = fit.fval + 1.0; # χsq from the previous best fit + 1.0 tol = 0.05 # tolerance allowing χ² to be ≤ fmin_1σ + tol # setting the parameters to the the best ones from the previous fit kwdarg = Dict{Symbol, Any}(Symbol(k) => v for (k,v) in fit.fitarg) _args = Symbol.(fit.parameters) # func_argnames(χsq) @views for i in 1: length(_args) # reset the limits of parameters to be the bounds from minos if !kwdarg[Symbol(:fix_, _args[i])] kwdarg[Symbol(:limit_, _args[i])] = get(fit.args, i-1) .+ (fit.merrors[String(_args[i])][3], fit.merrors[String(_args[i])][4]) # (fit.merrors[String(_args[i]), -1], fit.merrors[String(_args[i]), 1]) end end container = Vector{Vector{Float64}}() # consider the upper and lower bounds for each parameter if limits @views for a in _args if !kwdarg[Symbol(:fix_, a)] _fit1 = Minuit(χsq; kwdarg..., a => kwdarg[Symbol(:limit_, a)][1], Symbol("fix_", a) => true) _fit2 = Minuit(χsq; kwdarg..., a => kwdarg[Symbol(:limit_, a)][2], Symbol("fix_", a) => true) _fit1.strategy = 1; migrad(_fit1) _fit2.strategy = 1; migrad(_fit2) _fit1.fval < fmin_1σ + tol && push!(container, vcat(_fit1.fval, args(_fit1)) ) _fit2.fval < fmin_1σ + tol && push!(container, vcat(_fit2.fval, args(_fit2)) ) end end end # choose a pair of parameters, and try to compute the MINOS contour of npts points para1, para2 = _args[parameters_combination[1]], _args[parameters_combination[2]] contour_parameters = fit.mncontour(para1, para2, sigma = sigma, numpoints=npts )[3] # for each contour point, get the values of the other parameters for pa in contour_parameters dict = Dict(zip((para1, para2), pa) ) # using dict to overwrite those in kwdarg _fit1 = Minuit(χsq; kwdarg..., dict..., #errordef=1, Symbol("fix_", para1) => true, Symbol("fix_", para2) => true) _fit1.strategy = 1; migrad(_fit1) # filtering only those with χ² in 1σ _fit1.fval < fmin_1σ + tol && push!(container, vcat(_fit1.fval, args(_fit1)) ) end return container end function get_contours(fit::ArrayFit, χsq, parameters_combination::Vector{Int}; npts::Int=20, limits=true, sigma = 1.0) fit.migrad_ok() || migrad(fit); # if migrad has not been run then run it first length(fit.merrors) == 0 && minos(fit) # if minos has not been run then run it first fmin_1σ = fit.fval + 1.0; # χsq from the previous best fit + 1.0 tol = 0.05 # tolerance allowing χ² to be ≤ fmin_1σ + tol # setting the parameters to the the best ones from the previous fit start_array = args(fit) #PyVector{Real}(fit.args) # does not support assignment kwdarg = Dict{Symbol, Any}(Symbol(k) => v for (k,v) in fit.fitarg) _args = Symbol.(fit.parameters) # get names of parameters nargs = length(_args) @views for i in 1: nargs # reset the limits of parameters to be the bounds from minos if !kwdarg[Symbol(:fix_, _args[i])] kwdarg[Symbol(:limit_, _args[i])] = get(fit.args, i-1) .+ (fit.merrors[String(_args[i])][3], fit.merrors[String(_args[i])][4]) end end # igrad ? gradfun(par) = gradient(χsq, par) : gradfun = nothing container = Vector{Vector{Float64}}() # consider the upper and lower bounds for each parameter if limits @views for i in 1:nargs # for a in _args if !kwdarg[Symbol(:fix_, _args[i])] a = _args[i] start_array[i] = kwdarg[Symbol(:limit_, a)][1] _fit1 = Minuit(χsq, start_array; name = _args, kwdarg..., Symbol("fix_", a) => true) start_array[i] = kwdarg[Symbol(:limit_, a)][2] _fit2 = Minuit(χsq, start_array; name = _args, kwdarg..., Symbol("fix_", a) => true) _fit1.strategy = 1; migrad(_fit1) _fit2.strategy = 1; migrad(_fit2) _fit1.fval < fmin_1σ + tol && push!(container, vcat(_fit1.fval, args(_fit1)) ) _fit2.fval < fmin_1σ + tol && push!(container, vcat(_fit2.fval, args(_fit2)) ) end end end # choose a pair of parameters, and try to compute the MINOS contour of npts points para1, para2 = _args[parameters_combination[1]], _args[parameters_combination[2]] contour_parameters = fit.mncontour(para1, para2, sigma = sigma, numpoints=npts )[3] # for each contour point, get the values of the other parameters for pa in contour_parameters start_array[parameters_combination[1]], start_array[parameters_combination[2]] = pa # start_array[parameters_combination[2]] = pa[2] _fit1 = Minuit(χsq, start_array; name = _args, kwdarg..., # dict..., #errordef=1, Symbol("fix_", para1) => true, Symbol("fix_", para2) => true) _fit1.strategy = 1; migrad(_fit1) # filtering only those with χ² in 1σ _fit1.fval < fmin_1σ + tol && push!(container, vcat(_fit1.fval, args(_fit1)) ) end return container end @doc raw""" get_contours_all(fit::AbstractFit, χsq; npts=20, limits=true, sigma = 1.0) For a given fit `fit` and the ``\chi^2`` function `χsq`, gives a list of parameters sets which are at the edge of ``1σ`` `MINOS` contours for all combinations of varying parameters. The case of `limits` being `true` runs only once. """ function get_contours_all(fit::AbstractFit, χsq; npts=20, limits=true, sigma = 1.0) npara = length(func_argnames(χsq)) container = Vector() push!(container, vcat(fit.fval, args(fit)) ) # the first row set to tbe best fit # npara = fit.narg free_pars_indices = findall(x-> x in fit.list_of_vary_param(), fit.parameters) for arr in combinations(free_pars_indices, 2) # (1:npara, 2) push!(container, get_contours(fit, χsq, arr, npts=npts, limits=limits, sigma = sigma)...) limits = false # run limits only once end return container end """ contour_df(fit::AbstractFit, χsq; npts=20, limits=true, sigma = 1.0) parameters in the form of a dataframe. """ function contour_df(fit::AbstractFit, χsq; npts=20, limits=true, sigma = 1.0) parameters_1sigma = get_contours_all(fit, χsq, npts=npts, limits=limits, sigma = sigma) df0 = DataFrame(parameters_1sigma) argnames = Array{Symbol,1}(undef, fit.narg) @. argnames = Symbol(fit.parameters) try DataFrame( collect.(eachrow(df0)), vcat(:chisq, argnames) ) catch @warn "No parameter sets were found." end end """ get_contours_given_parameter(fit::AbstractFit, χsq, para::T, range) where {T <: Union{Symbol, String}} gives parameter sets in one sigma for a given parameter constrained in a range. If `fit` is an `ArrayFit` and no user-defined names have been given to the parameters, then `para` is `"x0"` or `:x0` for the 1st parameter, `"x1"` or `:x1` for the 2nd parameter, ... """ function get_contours_given_parameter(fit::Fit, χsq, para::T, range) where {T <: Union{Symbol, String}} fit.migrad_ok() || migrad(fit); # if migrad has not been run then run it first length(fit.merrors) == 0 && minos(fit) # if minos has not been run then run it first fmin_1σ = fit.fval + 1.0; # χsq from the previous best fit + 1.0 # setting the parameters to the the best ones from the previous fit kwdarg = Dict{Symbol, Any}(Symbol(k) => v for (k,v) in fit.fitarg) _args = Symbol.(fit.parameters) # func_argnames(χsq) @views for i in 1: length(_args) # reset the limits of parameters to be the bounds from minos if !kwdarg[Symbol(:fix_, _args[i])] kwdarg[Symbol(:limit_, _args[i])] = get(fit.args, i-1) .+ (fit.merrors[String(_args[i])][3], fit.merrors[String(_args[i])][4]) # (fit.merrors[String(_args[i]), -1], fit.merrors[String(_args[i]), 1]) end end container = Vector{Vector{Float64}}() for a in range _fit1 = Minuit(χsq; kwdarg..., para => a, Symbol(:fix_, para) => true) _fit1.strategy = 2; _fit1.migrad() _fit1.fval ≤ fmin_1σ && push!(container, vcat(_fit1.fval, args(_fit1)) ) end return container end function get_contours_given_parameter(fit::ArrayFit, χsq, para::T, range) where {T <: Union{Symbol, String}} fit.migrad_ok() || migrad(fit); # if migrad has not been run then run it first length(fit.merrors) == 0 && minos(fit) # if minos has not been run then run it first fmin_1σ = fit.fval + 1.0; # χsq from the previous best fit + 1.0 # setting the parameters to the the best ones from the previous fit start_array = args(fit) #PyVector{Real}(fit.args) # does not support assignment kwdarg = Dict{Symbol, Any}(Symbol(k) => v for (k,v) in fit.fitarg) _args = Symbol.(fit.parameters) nargs = length(_args) @views for i in 1:nargs # reset the limits of parameters to be the bounds from minos if !kwdarg[Symbol(:fix_, _args[i])] kwdarg[Symbol(:limit_, _args[i])] = get(fit.args, i-1) .+ (fit.merrors[String(_args[i])][3], fit.merrors[String(_args[i])][4]) end end container = Vector{Vector{Float64}}() for a in range start_array[findfirst(x-> x == Symbol(para), _args)] = a _fit1 = Minuit(χsq, start_array; name = _args, kwdarg..., Symbol(:fix_, para) => true) _fit1.strategy = 2; migrad(_fit1) _fit1.fval ≤ fmin_1σ && push!(container, vcat(_fit1.fval, args(_fit1)) ) end return container end @doc """ contour_df_given_parameter(fit::AbstractFit, χsq, para::T, range; limits = true) where {T <: Union{Symbol, String}} return parameter sets in one sigma for a given parameter constrained in a range as a `DataFrame`. See also [`get_contours_given_parameter`](@ref). """ function contour_df_given_parameter(fit::AbstractFit, χsq, para::T, range; limits = true) where {T <: Union{Symbol, String}} parameters = get_contours_given_parameter(fit, χsq, para, range) df0 = DataFrame(parameters) argnames = Array{Symbol,1}(undef, fit.narg) @. argnames = Symbol(fit.parameters) try DataFrame( collect.(eachrow(df0)), vcat(:chisq, argnames) ) catch @warn "No parameter sets were found in the given range of $para. Try another range." end end @doc raw""" get_contours_samples(fit::AbstractFit, χsq, paras, ranges; nsamples = 100, MNbounds = true) return 1σ parameter sets as an `Array` (the latter returns a `DataFrame`) for given parameters constrained in `ranges`: * if `paras` is a single parameter, then take equally spaced `nsamples` in `ranges` given in the form of `(min, max)`; * if `paras` contain more (``\geq 2``) parameters, then `paras` should be of the form `(:para1, :para2)`, `ranges` should be of the form `((min1, max1), (min2, max2))`, and values for the parameters given in `paras` are randomly sampled in the given `ranges`; * if `MNbounds` is true, then constrain the parameters in the range provided by `MINOS` no matter whether that is valid or not (to be improved by checking the validity) * if `igrad` is true, then use `ForwardDiff.gradient` to compute the gradient. * For using array parameters, if no user-defined names have been given to the parameters, `paras` should be given such that `"x0"` or `:x0` for the 1st parameter, `"x1"` or `:x1` for the 2nd parameter, ... """ function get_contours_samples(fit::Fit, χsq, paras, ranges; nsamples = 100, MNbounds = true, igrad = false) fit.migrad_ok() || migrad(fit); # if migrad has not been run then run it first length(fit.merrors) == 0 && minos(fit) # if minos has not been run then run it first fmin_1σ = fit.fval + 1.0; # χsq from the previous best fit + 1.0 # setting the parameters to the the best ones from the previous fit kwdarg = Dict{Symbol, Any}(Symbol(k) => v for (k,v) in fit.fitarg) _args = Symbol.(fit.parameters) if MNbounds @views for i in 1: length(_args) # reset the limits of parameters to be the bounds from minos if !kwdarg[Symbol(:fix_, _args[i])] kwdarg[Symbol(:limit_, _args[i])] = get(fit.args, i-1) .+ (fit.merrors[String(_args[i])][3], fit.merrors[String(_args[i])][4]) # (fit.merrors[String(_args[i]), -1], fit.merrors[String(_args[i]), 1]) end end end container = Vector{Vector{Float64}}() push!(container, vcat(fit.fval, args(fit)) ) # the first row set to tbe best fit # make range for each parameter with length len; if one only 1 parameter @views if isa(ranges[1], Number) sam = LinRange(ranges[1], ranges[end], nsamples) pfix = Symbol(:fix_, paras) => true else r = map(x -> LinRange(x[1], x[end], nsamples), ranges) sam = zip(sample.(r, nsamples)...) pfix = map( x -> Symbol(:fix_, x) => true, paras) end igrad ? gradf(a...) = gradient(x->χsq(x...), [a...]) : gradf = nothing @inbounds @simd for p in unique(sam) @views if isa(ranges[1], Number) _fit1 = Minuit(χsq; kwdarg..., paras => p, Symbol(:fix_, paras) => true, grad = gradf) else pdict = Dict(zip(paras, p)) _fit1 = Minuit(χsq; kwdarg..., pdict..., pfix..., grad = gradf) end _fit1.strategy = 1; migrad(_fit1) _fit1.fval ≤ fmin_1σ && push!(container, vcat(_fit1.fval, args(_fit1)) ) end return container end function get_contours_samples(fit::ArrayFit, χsq, paras, ranges; nsamples = 100, MNbounds = true, igrad = false) fit.migrad_ok() || migrad(fit); # if migrad has not been run then run it first length(fit.merrors) == 0 && minos(fit) # if minos has not been run then run it first fmin_1σ = fit.fval + 1.0; # χsq from the previous best fit + 1.0 # setting the parameters to the the best ones from the previous fit start_array = args(fit) kwdarg = Dict{Symbol, Any}(Symbol(k) => v for (k,v) in fit.fitarg) _args = Symbol.(fit.parameters) if MNbounds @views for i in 1: length(_args) # reset the limits of parameters to be the bounds from minos if !kwdarg[Symbol(:fix_, _args[i])] kwdarg[Symbol(:limit_, _args[i])] = get(fit.args, i-1) .+ (fit.merrors[String(_args[i])][3], fit.merrors[String(_args[i])][4]) end end end container = Vector{Vector{Float64}}() push!(container, vcat(fit.fval, args(fit)) ) # the first row set to tbe best fit # make range for each parameter with length len; if one only 1 parameter @views if isa(ranges[1], Number) sam = LinRange(ranges[1], ranges[2], nsamples) pfix = Symbol(:fix_, paras) => true else r = map(x -> LinRange(x[1], x[2], nsamples), ranges) sam = zip(sample.(r, nsamples)...) pfix = map( x -> Symbol(:fix_, x) => true, paras) end igrad ? gradf(a...) = gradient(x->χsq(x...), [a...]) : gradf = nothing # if using @inbounds, then crashes julia for p in unique(sam) @views if isa(ranges[1], Number) start_array[findfirst(x-> x == Symbol(paras), _args)] = p _fit1 = Minuit(χsq, start_array; name = _args, kwdarg..., Symbol(:fix_, paras) => true, grad = gradf) else @views for i in eachindex(paras) start_array[findfirst(x-> x == Symbol(paras[i]), _args)] = p[i] end _fit1 = Minuit(χsq, start_array; name = _args, kwdarg..., pfix..., grad = gradf) end _fit1.strategy = 1; migrad(_fit1) _fit1.fval ≤ fmin_1σ && push!(container, vcat(_fit1.fval, args(_fit1)) ) end return container end @doc raw""" contour_df_samples(fit::AbstractFit, χsq, paras, ranges; nsamples = 100, MNbounds=true) gives 1σ parameter sets as a `DataFrame` for given parameters constrained in `ranges`: * if `paras` is a single parameter, then take equally spaced `nsamples` in `ranges` given in the form of `(min, max)`; * if `paras` contain more parameters, then `paras` should be of the form `(:para1, :para2)`, `ranges` should be of the form `((min1, max1), (min2, max2))`; * `paras` can be more than 2. Values for the parameters given in `paras` are randomly sampled in the given `ranges`. * is `MNbounds` is true, then constrain the parameters in the range provided by `MINOS` no matter whether that is valid or not (to be improved by checking the validity) * if `igrad` is true, then use `ForwardDiff.gradient` to compute the gradient """ function contour_df_samples(fit::AbstractFit, χsq, paras, ranges; nsamples = 100, MNbounds=true, igrad = false) parameters = get_contours_samples(fit, χsq, paras, ranges, nsamples = nsamples, MNbounds = MNbounds, igrad = igrad) df0 = DataFrame(parameters) argnames = Array{Symbol,1}(undef, fit.narg) @. argnames = Symbol(fit.parameters) try DataFrame( collect.(eachrow(df0)), vcat(:chisq, argnames) ) catch @warn "No parameter sets were found in the given ranges of $para. Try another ranges." end end
{"hexsha": "9c430ed58a536a833a3acfceff48b206606b0b5c", "size": 18374, "ext": "jl", "lang": "Julia", "max_stars_repo_path": "src/contour.jl", "max_stars_repo_name": "zhanglm79/IMinuit.jl", "max_stars_repo_head_hexsha": "3d5a685d6b955b32aa88dd3c5aa7bbfc83605aca", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 11, "max_stars_repo_stars_event_min_datetime": "2020-07-09T06:44:22.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-17T14:18:40.000Z", "max_issues_repo_path": "src/contour.jl", "max_issues_repo_name": "zhanglm79/IMinuit.jl", "max_issues_repo_head_hexsha": "3d5a685d6b955b32aa88dd3c5aa7bbfc83605aca", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "src/contour.jl", "max_forks_repo_name": "zhanglm79/IMinuit.jl", "max_forks_repo_head_hexsha": "3d5a685d6b955b32aa88dd3c5aa7bbfc83605aca", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2020-11-24T12:21:23.000Z", "max_forks_repo_forks_event_max_datetime": "2020-11-25T09:12:08.000Z", "avg_line_length": 48.9973333333, "max_line_length": 168, "alphanum_fraction": 0.6395450093, "num_tokens": 5553}
library(caret) # Data splitting with createDataPartition() inTrain <- createDataPartition(y=iris$Species, p=0.75, list=FALSE) training <- iris[inTrain, ] testing <- iris[-inTrain, ] dim(training) dim(testing)
{"hexsha": "44a12f656cf12e9af8d82c001e337a9d313905b5", "size": 215, "ext": "r", "lang": "R", "max_stars_repo_path": "machinelearning/PartitioningData.r", "max_stars_repo_name": "KOBForest/KOBForest.github.io", "max_stars_repo_head_hexsha": "48c5ee6641c591ed1d4d6d95e06381c9df092d59", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "machinelearning/PartitioningData.r", "max_issues_repo_name": "KOBForest/KOBForest.github.io", "max_issues_repo_head_hexsha": "48c5ee6641c591ed1d4d6d95e06381c9df092d59", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "machinelearning/PartitioningData.r", "max_forks_repo_name": "KOBForest/KOBForest.github.io", "max_forks_repo_head_hexsha": "48c5ee6641c591ed1d4d6d95e06381c9df092d59", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 15.3571428571, "max_line_length": 66, "alphanum_fraction": 0.7302325581, "num_tokens": 57}
import numpy as np import cv2 import glob import matplotlib.pyplot as plt from lanedetection_depends import LaneDetection ld = LaneDetection() images_format = './test_images/straight_lines' output_format = './output_images/straight_warped' for idx in range(1, 3): img = cv2.imread(images_format + str(idx) + '.jpg') ld.obtain_perspective(img) undistimg = ld.cc.undistort_image(img) warpedimg = ld.warp_perspective(undistimg) pts = np.array(ld.src, np.int32) undistimg = cv2.polylines(undistimg, [pts], True, (0,0,255), 2) pts = np.array(ld.dst, np.int32) warpedimg = cv2.polylines(warpedimg, [pts], True, (0,0,255), 2) f, (ax1, ax2) = plt.subplots(1, 2, figsize=(24, 9)) f.tight_layout() ax1.imshow(cv2.cvtColor(undistimg, cv2.COLOR_BGR2RGB)) ax1.set_title('Undistorted Image', fontsize=30) ax2.imshow(cv2.cvtColor(warpedimg, cv2.COLOR_BGR2RGB)) ax2.set_title('Undistorted Warped Image', fontsize=30) plt.subplots_adjust(left=0.02, right=0.98, top=1, bottom=0) plt.savefig(output_format + str(idx) + '.png') print(ld.src) print(ld.dst)
{"hexsha": "9a865f75529ec76943e81c6b6cd1127615d4d21d", "size": 1117, "ext": "py", "lang": "Python", "max_stars_repo_path": "test_perspective.py", "max_stars_repo_name": "yeongseok94/CarND-Advanced-Lane-Lines", "max_stars_repo_head_hexsha": "59367410e6fdecde72bc63073dfb93bd65470fbd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "test_perspective.py", "max_issues_repo_name": "yeongseok94/CarND-Advanced-Lane-Lines", "max_issues_repo_head_hexsha": "59367410e6fdecde72bc63073dfb93bd65470fbd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "test_perspective.py", "max_forks_repo_name": "yeongseok94/CarND-Advanced-Lane-Lines", "max_forks_repo_head_hexsha": "59367410e6fdecde72bc63073dfb93bd65470fbd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 33.8484848485, "max_line_length": 67, "alphanum_fraction": 0.6974037601, "include": true, "reason": "import numpy", "num_tokens": 347}
from typing import Optional, List, Sequence import pandas as pd import numpy as np from scipy import stats import sha_calc as sha_calc from gmhazard_calc.im import IM, IMType, to_im_list, to_string_list from gmhazard_calc import gm_data from gmhazard_calc import site from gmhazard_calc import constants from gmhazard_calc import hazard from gmhazard_calc import shared from gmhazard_calc import site_source from gmhazard_calc import disagg from .GroundMotionDataset import GMDataset, HistoricalGMDataset from .GMSResult import GMSResult from .GCIMResult import BranchUniGCIM, IMEnsembleUniGCIM from .CausalParamBounds import CausalParamBounds SF_LOW, SF_HIGH = 0.3, 3.0 def run_ensemble_gms( ensemble: gm_data.Ensemble, site_info: site.SiteInfo, n_gms: int, IMj: IM, gm_dataset: GMDataset, IMs: np.ndarray = None, exceedance: float = None, im_j: float = None, n_replica: int = 10, im_weights: pd.Series = None, cs_param_bounds: CausalParamBounds = None, ) -> GMSResult: """ Performs ensemble based ground motion selection Note: Currently only supports Ensembles based on empirical GMMs (i.e. parametric) Parameters ---------- ensemble: Ensemble site_info: SiteInfo n_gms: int Number of ground IMj: IM Conditioning IM gm_dataset: GMDataset The GM source (either simulations or historical) from which to select ground motions IMs: numpy array of strings The IMs to consider exceedance: float Exceedance of interest Either exceedance or im_j has to be specified im_j: float Level/Value of interest of the conditioning IM n_replica: int Number of times the GM selection process is repeated im_weights: Series Weighting of the IMs cs_param_bounds: CausalParamBounds The causal filter parameters to apply pre-ground motion selection Returns ------- GMSResult """ # Use all available IMs if none are specified if IMs is None: IMs = np.asarray(list(set(ensemble.ims.copy()).intersection(gm_dataset.ims))) IMs = IMs[IMs != IMj] if im_weights is None: im_weights = default_IM_weights(IMj, IMs) else: im_weights.index = to_im_list(im_weights.index) # Sanity checks assert np.all( np.isin(IMs, im_weights.index) ), "IM weights are not specified for all IMs" assert np.isclose(np.sum(im_weights), 1.0), "IM weights need to sum to 1.0" ensemble.check_im(IMj) assert np.all( np.isin(IMs, ensemble.ims) ), f"Not all of the specified IM types are availble in the ensemble {ensemble.name}" assert exceedance is not None or im_j is not None, ( "Either the exceedance probability or the conditioning " "IM level has to be specified" ) assert all( [ ensemble.get_im_ensemble(IMi.im_type).im_data_type == constants.IMDataType.parametric for IMi in IMs ] ), "Currently only support GMS for fully parametric ensembles" if exceedance is not None and im_j is not None: print( f"An exceedance level and a conditioning IM level were specified, " f"ignoring the exceedance level and using the conditioning IM" ) exceedance = None ens_hazard = hazard.run_ensemble_hazard(ensemble, site_info, IMj) if im_j is not None and not ( ens_hazard.im_values.min() < im_j < ens_hazard.im_values.max() ): raise ValueError( "The specified conditioning IM value is not supported (too small or large)" ) # Compute the conditioning IM level using the ensemble hazard if exceedance is not None: if not ( ens_hazard.total_hazard.values.min() < exceedance < ens_hazard.total_hazard.values.max() ): raise ValueError( "The specified conditioning exceedance value is not supported (too small or large)" ) im_j = ens_hazard.exceedance_to_im(exceedance) # Compute the combined rupture weights P_Rup_IMj = sha_calc.compute_rupture_weights( im_j, { cur_branch_name: ( shared.get_IM_params( IMj, cur_branch.get_imdb_ffps(constants.SourceType.fault), site_info ), cur_branch.flt_rupture_df.set_index("rupture_name").annual_rec_prob, ) for cur_branch_name, cur_branch in ensemble.get_im_ensemble( IMj.im_type ).branches_dict.items() }, ) # Compute the adjusted branch weights IMj_adj_branch_weights, IMj_hazard_mean = shared.compute_adj_branch_weights( ensemble, IMj, im_j, site_info ) # Combine & Apply the branch weights P_Rup_IMj = P_Rup_IMj.multiply(IMj_adj_branch_weights, axis=1).sum(axis=1) # Compute the correlation matrix rho = sha_calc.compute_correlation_matrix(np.asarray(to_string_list(IMs)), str(IMj)) # Get correlated vector v_vectors = sha_calc.generate_correlated_vector( n_gms, np.asarray(to_string_list(IMs)), rho, n_replica=n_replica ) # Pre-allocate the realisation IM value array (and array for # sigma of selected lnIMi|IMj,Rup distributions, required for residual calculation) rel_IM_values = [ {IMi: np.full(n_gms, np.nan) for IMi in IMs} for ix in range(n_replica) ] rel_sigma_lnIMi_IMj_Rup = [ {IMi: np.full(n_gms, np.nan) for IMi in IMs} for ix in range(n_replica) ] # Get list of ensembles that cover all IMi in IM vector (i.e. variable IMs) IMi_gcims = {} im_ensembles = list({ensemble.get_im_ensemble(IMi.im_type) for IMi in IMs}) # Computation of GCIM distribution and random realisation generation # Overview of main steps: # Iterate over each IMEnsemble (i.e. IMi set) and compute # 1) Correlation coefficients # For each IMi in the IMi set: # 2) Branch hazard & mean hazard # 3) IMi value corresponding to exceedance of IMj=imj # For each branch: # 4) Compute lnIMi|IMj,RUp and lnIMi|IMj # 5) Generate array of [n_gms, n_replica] random numbers # between 0-1 for branch selection (same across IMi of # the current IMi set) # For each IMi in IMi set: # 6) Compute adjusted branch weights, using results from step 3) # 7) Compute combined (i.e. across branches) lnIMi|IMj # For each replica_ix in n_replica: # 7) Select n_gms random branches using the adjusted # branch weights for IMi # For each of the selected branches: # 8) Select random rupture using rupture weights (at IMj=imj) # 9) Using current branch & rupture lnIMi|IMj,Rup # generate random realisation for cur_im_ensemble in im_ensembles: # Get the relevant IMi for this IMEnsemble cur_IMs = IMs[np.isin(IMs, cur_im_ensemble.ims)] # Get the correlation coefficients corr_coeffs = pd.Series( data=[sha_calc.get_im_correlations(str(IMi), str(IMj)) for IMi in cur_IMs], index=to_string_list(cur_IMs), ) # Compute the branch hazard for each of the current set of IMi cur_branch_hazard = { IMi: hazard.run_branches_hazard(ensemble, site_info, IMi) for IMi in cur_IMs } # Get the ensemble mean hazard IM value for each IMi (in the current set) # corresponding to the exceedance rate for IMj=imj # Needed to calculate the adjusted branch weight cur_ens_hazard = { IMi: hazard.run_ensemble_hazard( ensemble, site_info, IMi, branch_hazard=cur_branch_hazard[IMi] ) for IMi in cur_IMs } cur_mean_hazard_im_values = pd.Series( data=[ cur_ens_hazard[IMi].exceedance_to_im(IMj_hazard_mean) for IMi in cur_IMs ], index=cur_IMs, ) cur_branch_gcims, cur_adj_branch_weights = {}, {} for cur_branch_name, cur_branch in cur_im_ensemble.branches_dict.items(): # Retrieve the IM parameters im_df = shared.get_IM_values( cur_branch.get_imdb_ffps(constants.SourceType.fault), site_info ) sigma_cols = [f"{IMi}_sigma" for IMi in cur_IMs] # Compute lnIMi|IMj, Rup cur_lnIMi_IMj_Rup = sha_calc.compute_lnIMi_IMj_Rup( im_df[to_string_list(cur_IMs)], im_df[sigma_cols].rename( columns={ sig_col: str(IMi) for sig_col, IMi in zip(sigma_cols, cur_IMs) } ), corr_coeffs, str(IMj), im_j, ) # Compute lnIMi|IMj cur_lnIMi_IMj = sha_calc.compute_lnIMi_IMj( cur_lnIMi_IMj_Rup, P_Rup_IMj, str(IMj), im_j ) # Create branch GCIM object and save to dictionary cur_branch_gcims[cur_branch_name] = { IMi: BranchUniGCIM( IMi, IMj, im_j, cur_branch, cur_lnIMi_IMj_Rup[str(IMi)], cur_lnIMi_IMj[str(IMi)], ) for IMi in cur_IMs } # Pick N_gms random numbers, to select the branches for # realisation generation # Use the same random number for each IMi in the current set # to ensure consistent branch/model selection rand_branch_float = np.random.uniform( low=0.0, high=1.0, size=(n_gms, n_replica) ) # Combine the branch lnIMi|IMj distributions for each of the current IMs # and generate random realisation cur_branch_names = np.asarray(list(cur_im_ensemble.branches_dict.keys())) for IMi in cur_IMs: # Compute the adjusted branch weights, using the # ensemble mean exceedance rate for IMj=imj and # the corresponding ensemble hazard mean IM value (for each IMi) cur_adj_branch_weights[IMi] = pd.Series( data=[ hazard.run_branch_hazard( cur_branch, site_info, IMi ).im_to_exceedance(cur_mean_hazard_im_values[IMi]) * cur_branch.weight / IMj_hazard_mean for cur_name, cur_branch in cur_im_ensemble.branches_dict.items() ], index=cur_branch_names, ) # Combine the branches lnIMi|IMj to get # the target distribution for IMi comb_lnIMi_IMj = sha_calc.comb_lnIMi_IMj( { cur_name: cur_branch_gcim[IMi].lnIMi_IMj for cur_name, cur_branch_gcim in cur_branch_gcims.items() }, cur_adj_branch_weights[IMi], ) IMi_gcims[IMi] = IMEnsembleUniGCIM( cur_im_ensemble, IMi, IMj, im_j, comb_lnIMi_IMj, { cur_branch_name: cur_data[IMi] for cur_branch_name, cur_data in cur_branch_gcims.items() }, ) # Generate realisation for current IMi, # 1) select random branch # 2) select random rupture # 3) Apply the mean & sigma of the selected lnIMi|IMj,Rup to the # vector of correlated random numbers for replica_ix in range(n_replica): # Select n_gms random branches based on IMi adjusted branch weights cur_branch_cdf = cur_adj_branch_weights[IMi].sort_values().cumsum() cur_sel_branches = sha_calc.query_non_parametric_cdf_invs( rand_branch_float[:, replica_ix], cur_branch_cdf.index.values.astype(str), cur_branch_cdf.values, ) for rel_ix, cur_branch_name in enumerate(cur_sel_branches): # Select random rupture based on rupture contributions at IMj=imj cur_rupture = np.random.choice( P_Rup_IMj.index.values.astype(str), size=1, p=P_Rup_IMj.values )[0] # Apply mean & sigma of selected lnIMi|IMj,Rup to # to correponding value of correlated vector cur_branch_gcim = cur_branch_gcims[cur_branch_name][IMi] rel_IM_values[replica_ix][IMi][rel_ix] = ( cur_branch_gcim.lnIMi_IMj_Rup.mu[cur_rupture] + cur_branch_gcim.lnIMi_IMj_Rup.sigma[cur_rupture] * v_vectors[replica_ix].loc[rel_ix, str(IMi)] ) rel_sigma_lnIMi_IMj_Rup[replica_ix][IMi][ rel_ix ] = cur_branch_gcim.lnIMi_IMj_Rup.sigma[cur_rupture] # Convert results to dataframes (one per replica) rel_IM_values = [pd.DataFrame(cur_values) for cur_values in rel_IM_values] rel_sigma_lnIMi_IMj_Rup = [ pd.DataFrame(cur_sigma_values) for cur_sigma_values in rel_sigma_lnIMi_IMj_Rup ] # IM scaling, such that IM_j=im_j for all # ground motions in the GM dataset sf = None if isinstance(gm_dataset, HistoricalGMDataset): sf = gm_dataset.compute_scaling_factor(IMj, im_j) # Get the (scaled) ground motions IM values that fall # within the specified causal parameter bounds gms_im_df = gm_dataset.get_im_df( site_info, np.concatenate((to_string_list(IMs), [str(IMj)])), cs_param_bounds=cs_param_bounds, sf=sf, ) gms_im_df.columns = to_im_list(gms_im_df.columns) assert ( gms_im_df.shape[0] > 0 ), "No GMs to select from after applying the causual parameter bounds" assert np.allclose(gms_im_df.loc[:, IMj], im_j) # Compute residuals and select GMs for each replica R_values, sel_gms_ind = [], [] for replica_ix in range(n_replica): # Compute residuals between available GMs and current set of realisations cur_sigma_IMi_Rup_IMj = ( rel_sigma_lnIMi_IMj_Rup[replica_ix].loc[:, IMs].values[:, np.newaxis, :] ) cur_diff = rel_IM_values[replica_ix].loc[:, IMs].values[ :, np.newaxis, : ] - np.log(gms_im_df.loc[:, IMs].values) cur_misfit = pd.DataFrame( index=rel_IM_values[replica_ix].index, data=np.sum( im_weights.loc[IMs].values * (cur_diff / cur_sigma_IMi_Rup_IMj) ** 2, axis=2, ), ) # Select best matching GMs cur_selected_gms_ind = gms_im_df.index.values[cur_misfit.idxmin(axis=1).values] # Compute the KS test statistic for each IM_i # I.e. Check how well the empirical distribution of selected GMs # matches with the target distribution (i.e. lnIMi|IMj) D = [] for IMi in IMs: cur_d, _ = stats.kstest( gms_im_df.loc[cur_selected_gms_ind, IMi].values, lambda x: sha_calc.query_non_parametric_cdf( x, IMi_gcims[IMi].lnIMi_IMj.cdf.index.values, IMi_gcims[IMi].lnIMi_IMj.cdf.values, ), ) D.append(cur_d) D = pd.Series(index=IMs, data=D) # Compute the overall residual & save selected ground motions R_values.append(np.sum(im_weights * (D ** 2))) sel_gms_ind.append(list(cur_selected_gms_ind)) # Select the best fitting set of ground motions (if multiple replica were run) selected_ix = np.argmin(R_values) sel_gms_ind, rel_IM_values = sel_gms_ind[selected_ix], rel_IM_values[selected_ix] return GMSResult( ensemble, site_info, IMj, im_j, IMs, gms_im_df.loc[sel_gms_ind], IMi_gcims, rel_IM_values.apply(np.exp), gm_dataset, cs_param_bounds, sf=sf, ) def default_IM_weights(IM_j: IM, IMs: np.ndarray) -> pd.Series: """ Returns the default IM weights based on the conditioning IM If the conditioning IM (IM_j) is spectral acceleration (SA) the weighting is 70% across the SAs and 30% across all other IMs Otherwise a uniform weighting distribution is used Parameters ---------- IM_j: IM Conditioning IM IMs: list of IM IM types for which to get the default weights Returns ------- im_weights: pandas series Weigths for the specified IM types """ # Use 70% (SA) / 30% (other) weighting if # conditioning IM is SA if IM_j.is_pSA(): pSA_mask = np.asarray([cur_im.im_type is IMType.pSA for cur_im in IMs]) n_pSA_IMs = np.count_nonzero(pSA_mask) n_other_IMs = IMs.size - n_pSA_IMs if n_other_IMs == 0: im_weights = np.ones(n_pSA_IMs, dtype=float) / n_pSA_IMs else: im_weights = np.full(IMs.size, np.nan) im_weights[pSA_mask] = (1.0 / n_pSA_IMs) * 0.7 im_weights[~pSA_mask] = (1.0 / n_other_IMs) * 0.3 # Otherwise, default to uniform weighting else: print( f"WARNING: Defaulting to uniform IM weighting as the " f"conditioning is not SA." ) im_weights = np.ones(IMs.size, dtype=float) / IMs.size return pd.Series(data=im_weights, index=IMs) def default_causal_params( ensemble: gm_data.Ensemble, site_info: site.SiteInfo, IM_j: IM, exceedance: Optional[float] = None, im_value: Optional[float] = None, disagg_data: Optional[disagg.EnsembleDisaggResult] = None, ) -> CausalParamBounds: """ Computes default causal parameters based on "Tarbali, K. and Bradley, B.A., 2016. The effect of causal parameter bounds in PSHA‐based ground motion selection." Using criterion AC (Table III) Parameters ---------- ensemble: Ensemble site_info: SiteInfo IM_j: IM Conditioning IM exceedance : float, optional Compute disagg at this exceedance, either the exceedance or the im_value parameter has to be given im_value: float, optional Compute disagg at this im value if required disagg_data: DisaggResult, optinal Computed Disagg data if pre-calculated Returns ------- Magnitude bounds: pair of floats (Mw lower bound, Mw upper bound) Rrup bounds: pair of floats (Rrup lower bound, Rrup upper bound) Vs30 bounds: pair of floats (Vs30 lower bound, Vs30 upper bound) """ # Calculate disagg if not already specified if disagg_data is None: disagg_data = disagg.run_ensemble_disagg( ensemble, site_info, IM_j, exceedance=exceedance, im_value=im_value, calc_mean_values=True, ) # Vs30 bounds vs_low, vs_high = site_info.vs30 * 0.5, site_info.vs30 * 1.5 contr_df = pd.concat( ( disagg_data.fault_disagg_id.contribution, disagg_data.ds_disagg_id.contribution, ) ) # Mw bounds contr_df = pd.merge( contr_df.to_frame("contribution"), ensemble.rupture_df_id.magnitude.to_frame("magnitude"), how="left", left_index=True, right_index=True, ).sort_values("magnitude") non_nan_mask = ~contr_df.magnitude.isna() mw_low = min( sha_calc.query_non_parametric_cdf_invs( np.asarray([0.01]), contr_df.magnitude.values[non_nan_mask], contr_df.contribution.cumsum().values[non_nan_mask], )[0], sha_calc.query_non_parametric_cdf_invs( np.asarray([0.1]), contr_df.magnitude.values[non_nan_mask], contr_df.contribution.cumsum().values[non_nan_mask], )[0] - 0.5, ) mw_high = max( sha_calc.query_non_parametric_cdf_invs( np.asarray([0.99]), contr_df.magnitude.values[non_nan_mask], contr_df.contribution.cumsum().values[non_nan_mask], )[0], sha_calc.query_non_parametric_cdf_invs( np.asarray([0.90]), contr_df.magnitude.values[non_nan_mask], contr_df.contribution.cumsum().values[non_nan_mask], )[0] + 0.5, ) # Get distances fault_rrup_disagg_df = site_source.match_ruptures( site_source.get_distance_df(ensemble.flt_ssddb_ffp, site_info), disagg_data.fault_disagg_id.contribution.copy(), constants.SourceType.fault, ) ds_rrup_disagg_df = site_source.match_ruptures( site_source.get_distance_df(ensemble.ds_ssddb_ffp, site_info), disagg_data.ds_disagg_id.contribution.copy(), constants.SourceType.distributed, ) contr_df = pd.merge( contr_df, pd.concat([fault_rrup_disagg_df.rrup, ds_rrup_disagg_df.rrup], axis=0).to_frame( "rrup" ), how="left", left_index=True, right_index=True, ).sort_values("rrup") non_nan_mask = ~contr_df.rrup.isna() # Rrup bounds rrup_low = min( sha_calc.query_non_parametric_cdf_invs( np.asarray([0.01]), contr_df.rrup.values[non_nan_mask], contr_df.contribution.cumsum().values[non_nan_mask], )[0], sha_calc.query_non_parametric_cdf_invs( np.asarray([0.1]), contr_df.rrup.values[non_nan_mask], contr_df.contribution.cumsum().values[non_nan_mask], )[0] * 0.5, ) rrup_high = max( sha_calc.query_non_parametric_cdf_invs( np.asarray([0.99]), contr_df.rrup.values[non_nan_mask], contr_df.contribution.cumsum().values[non_nan_mask], )[0], sha_calc.query_non_parametric_cdf_invs( np.asarray([0.90]), contr_df.rrup.values[non_nan_mask], contr_df.contribution.cumsum().values[non_nan_mask], )[0] * 1.5, ) return CausalParamBounds( ensemble, site_info, IM_j, (mw_low, mw_high), (rrup_low, rrup_high), (vs_low, vs_high), sf_bounds=(SF_LOW, SF_HIGH), contr_df=contr_df, exceedance=exceedance, im_value=im_value, )
{"hexsha": "fa1c57c182f71a8ee822c407642062143b27c4b8", "size": 22751, "ext": "py", "lang": "Python", "max_stars_repo_path": "calculation/gmhazard_calc/gmhazard_calc/gms/gms.py", "max_stars_repo_name": "ucgmsim/gmhazard", "max_stars_repo_head_hexsha": "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "calculation/gmhazard_calc/gmhazard_calc/gms/gms.py", "max_issues_repo_name": "ucgmsim/gmhazard", "max_issues_repo_head_hexsha": "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 8, "max_issues_repo_issues_event_min_datetime": "2021-10-13T02:33:23.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-29T21:01:08.000Z", "max_forks_repo_path": "calculation/gmhazard_calc/gmhazard_calc/gms/gms.py", "max_forks_repo_name": "ucgmsim/gmhazard", "max_forks_repo_head_hexsha": "d3d90b4c94b3d9605597a3efeccc8523a1e50c0e", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 35.6040688576, "max_line_length": 99, "alphanum_fraction": 0.611137972, "include": true, "reason": "import numpy,from scipy", "num_tokens": 5642}
import pandas as pd import numpy as np import matplotlib.pyplot as plt from sklearn.decomposition import PCA from sklearn.cluster import KMeans from sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet from sklearn.model_selection import GroupShuffleSplit, GridSearchCV from sklearn.metrics import r2_score, make_scorer from sklearn.neighbors import KNeighborsRegressor from sklearn.ensemble import RandomForestRegressor from sklearn.pipeline import make_pipeline from sklearn.preprocessing import StandardScaler from sklearn.kernel_ridge import KernelRidge import seaborn as sns import pickle from scipy.spatial.distance import * def r2_custom(y_true,y_pred,metric='braycurtis',epsilon=1e-10): y_true = np.hstack([y_true,(1-y_true.sum(axis=1))[:,np.newaxis]]) y_pred = np.hstack([y_pred,(1-y_pred.sum(axis=1))[:,np.newaxis]]) y_null = y_true.mean(axis=0)[np.newaxis,:] d = np.diag(cdist(y_true,y_pred,metric=metric)) d_null = cdist(y_true,y_null,metric=metric).squeeze() return (d_null-d).mean()/(d_null.mean()+epsilon) def best_alpha_UCB(cv_results): UCB = cv_results['mean_test_score']+cv_results['std_test_score'] return np.argmax(UCB) class mh_predict: def __init__(self,carbon,community,p_carb=10,p_com=10,level='Family',n_train=10,n_test=10,reduce_dimension=True,test_data=None,norm=None,metric='braycurtis'): #Format tables for carbons carbon_table = pd.pivot_table(carbon,values='Flux',columns='Reaction',index='Carbon_Source',aggfunc=np.sum,fill_value=0) carbon_metadata = pd.pivot_table(carbon,values='Category',index='Carbon_Source',aggfunc='first') if norm is not None: carbon_table = carbon_table.div(norm,axis=0) #Expand flux data to have one row per experiment first = True for inoc in range(10): for rep in range(10): X_new = carbon_table.copy() X_new['Inoculum'] = inoc X_new['Replicate'] = rep X_new = X_new.set_index(['Inoculum','Replicate'],append=True) if first: X = X_new first = False else: X = X.append(X_new) if reduce_dimension: #reduce dimension of carbon vector using PCA PCA_model = PCA(n_components=p_carb).fit(carbon_table) carbon_table = pd.DataFrame(PCA_model.transform(carbon_table),columns=['PC '+str(k+1) for k in range(p_carb)],index=carbon_table.index) #Format tables for community Y = pd.pivot_table(community,values='Relative_Abundance',columns=level,index=['Carbon_Source','Inoculum','Replicate'],aggfunc=np.sum,fill_value=0) #Find carbon sources for which no flux data is available no_data = list(set(community['Carbon_Source'])-set(carbon_table.index)) if len(no_data) > 0: print('Dropped training CS missing from carbon data: '+', '.join(no_data)) Y = Y.drop(no_data) #Keep only top p_com most abundant families, and make sure they are also in test data chosen = Y.sum().sort_values(ascending=False).index[:p_com] if test_data is not None: no_data_test = list(set(test_data['Carbon_Source'])-set(carbon_table.index)) if len(no_data_test) > 0: print('Dropped test CS missing from carbon data: '+', '.join(no_data_test)) chosen = list(set(chosen).intersection(set(test_data[level]))) Y = Y.T.loc[chosen].T #Create training and test sets with non-overlapping carbon sources, #ensuring that the training set has at least one sugar and one acid go = True k=0 while go and k<1000: if test_data is None: train = np.random.choice(list(set(community['Carbon_Source'])-set(no_data)),size=n_train,replace=False) test = list(set(community['Carbon_Source'])-set(no_data)-set(train)) else: if len(set(test_data['Carbon_Source'])-set(no_data_test))<len(set(community['Carbon_Source'])): test = np.random.choice(list(set(test_data['Carbon_Source'])-set(no_data_test)),size=n_test,replace=False) train = list(set(community['Carbon_Source'])-set(no_data)-set(test)) else: train = np.random.choice(list(set(community['Carbon_Source'])-set(no_data)),size=n_train,replace=False) test = list(set(test_data['Carbon_Source'])-set(no_data_test)-set(train)) t = list(carbon_metadata.reindex(train)['Category']) k+=1 if 'F' in t and 'R' in t: go = False #Save test and train data self.Y_train = Y.loc[train] self.X_train = X.reindex(self.Y_train.index) if test_data is not None: self.Y_test = pd.pivot_table(test_data,values='Relative_Abundance',columns=level,index=['Carbon_Source','Inoculum','Replicate'],aggfunc=np.sum,fill_value=0) self.Y_test = self.Y_test.drop(no_data_test) self.Y_test = self.Y_test.T.loc[chosen].T self.Y_test = self.Y_test.loc[test] else: self.Y_test = Y.loc[test] self.X_test = X.reindex(self.Y_test.index) self.X_test = self.X_test.join(carbon_metadata['Category']).set_index('Category',append=True).reorder_levels([3,0,1,2]).astype(float) self.Y_test = self.Y_test.join(carbon_metadata['Category']).set_index('Category',append=True).reorder_levels([3,0,1,2]).astype(float) self.X_train = self.X_train.join(carbon_metadata['Category']).set_index('Category',append=True).reorder_levels([3,0,1,2]).astype(float) self.Y_train = self.Y_train.join(carbon_metadata['Category']).set_index('Category',append=True).reorder_levels([3,0,1,2]).astype(float) self.Y_null = self.Y_train.mean().values[np.newaxis,:] #Set default values and initialize variables self.metric = metric self.scorer = make_scorer(r2_custom,metric=self.metric) self.train = train self.alpha_lasso = 1e-2 self.alpha_ridge = 1e2 self.alpha_net = 1e-2 self.alpha_kridge = 1e-2 self.gamma_kridge = 1 self.l1_ratio_net = 0.1 self.p_carb = p_carb self.p_com = p_com self.level = level self.train_score = {} self.test_score = {} self.Y_pred = {} self.estimators = {} def run_lasso(self,cross_validate=False,plot=False,lb=-3,ub=2,ns=15,n_splits=50): if cross_validate: params = {'alpha':np.logspace(lb, ub, ns)} self.lasso = GridSearchCV(Lasso(max_iter=100000),params,cv=GroupShuffleSplit(n_splits=n_splits).split(self.X_train,groups=self.X_train.reset_index()['Carbon_Source']), refit = best_alpha_UCB, scoring = self.scorer) self.lasso.fit(self.X_train, self.Y_train) self.lasso.coef_ = self.lasso.best_estimator_.coef_ self.alpha_lasso = self.lasso.best_estimator_.alpha self.estimators['LASSO'] = self.lasso.best_estimator_ if plot: for splitname in ['split'+str(k)+'_test_score' for k in range(n_splits)]: plt.semilogx(self.lasso.cv_results_['param_alpha'],self.lasso.cv_results_[splitname]) plt.ylabel('Performance ('+self.metric+')') plt.xlabel(r'$\alpha$') plt.title('Lasso Performance') plt.ylim([-0.01, 1.0]) plt.show() else: self.lasso = Lasso(alpha=self.alpha_lasso, max_iter=100000) self.lasso.fit(self.X_train, self.Y_train) self.estimators['LASSO'] = self.lasso self.Y_pred['LASSO'] = pd.DataFrame(self.lasso.predict(self.X_test),index=self.Y_test.index,columns=self.Y_test.keys()) self.train_score['LASSO'] = r2_custom(self.Y_train.values,self.lasso.predict(self.X_train),metric=self.metric) self.test_score['LASSO'] = r2_custom(self.Y_test.values,self.lasso.predict(self.X_test),metric=self.metric) def run_ridge(self,cross_validate=False,plot=False,lb=1,ub=6,ns=15,n_splits=20): if cross_validate: params = {'alpha':np.logspace(lb, ub, ns)} self.ridge = GridSearchCV(Ridge(max_iter=10000),params,cv=GroupShuffleSplit(n_splits=n_splits).split(self.X_train,groups=self.X_train.reset_index()['Carbon_Source']), refit = best_alpha_UCB, scoring = self.scorer) self.ridge.fit(self.X_train, self.Y_train) self.ridge.coef_ = self.ridge.best_estimator_.coef_ self.alpha_ridge = self.ridge.best_estimator_.alpha self.estimators['Ridge'] = self.ridge.best_estimator_ if plot: for splitname in ['split'+str(k)+'_test_score' for k in range(n_splits)]: plt.semilogx(self.ridge.cv_results_['param_alpha'],self.ridge.cv_results_[splitname]) plt.ylabel('Performance ('+self.metric+')') plt.xlabel(r'$\alpha$') plt.title('Ridge Performance') plt.ylim([-0.01, 1.0]) plt.show() else: self.ridge = Ridge(alpha=self.alpha_ridge, max_iter=10000) self.ridge.fit(self.X_train, self.Y_train) self.estimators['Ridge'] = self.ridge self.Y_pred['Ridge'] = pd.DataFrame(self.ridge.predict(self.X_test),index=self.Y_test.index,columns=self.Y_test.keys()) self.train_score['Ridge'] = r2_custom(self.Y_train.values,self.ridge.predict(self.X_train),metric=self.metric) self.test_score['Ridge'] = r2_custom(self.Y_test.values,self.ridge.predict(self.X_test),metric=self.metric) def run_elastic_net(self,cross_validate=False,plot=False,lb=[-3,-2],ub=[2,0],ns=5,n_splits=20): if cross_validate: params = {'alpha':np.logspace(lb[0], ub[0], ns), 'l1_ratio':np.logspace(lb[1], ub[1], ns)} self.net = GridSearchCV(ElasticNet(max_iter=10000),params,cv=GroupShuffleSplit(n_splits=n_splits).split(self.X_train,groups=self.X_train.reset_index()['Carbon_Source']), refit = best_alpha_UCB, scoring = self.scorer) self.net.fit(self.X_train, self.Y_train) self.net.coef_ = self.net.best_estimator_.coef_ self.alpha_net = self.net.best_estimator_.alpha self.l1_ratio_net = self.net.best_estimator_.l1_ratio self.estimators['Elastic Net'] = self.net.best_estimator_ if plot: sns.heatmap(pd.pivot_table(pd.DataFrame(self.net.cv_results_),index='param_alpha',columns='param_l1_ratio',values='mean_test_score') + pd.pivot_table(pd.DataFrame(self.net.cv_results_),index='param_alpha',columns='param_l1_ratio',values='std_test_score')) plt.title('Elastic Net Performance') plt.show() else: self.net = ElasticNet(alpha=self.alpha_net,l1_ratio=self.l1_ratio_net, max_iter=10000) self.net.fit(self.X_train, self.Y_train) self.estimators['Elastic Net'] = self.net self.Y_pred['Elastic Net'] = pd.DataFrame(self.net.predict(self.X_test),index=self.Y_test.index,columns=self.Y_test.keys()) self.train_score['Elastic Net'] = r2_custom(self.Y_train.values,self.net.predict(self.X_train),metric=self.metric) self.test_score['Elastic Net'] = r2_custom(self.Y_test.values,self.net.predict(self.X_test),metric=self.metric) def run_kernel_ridge(self,cross_validate=False,plot=False,lb=[-2,-3],ub=[2,0],ns=5,n_splits=20,kernel='rbf'): if cross_validate: params = {'alpha':np.logspace(lb[0], ub[0], ns), 'gamma':np.logspace(lb[1], ub[1], ns)} self.kridge = GridSearchCV(KernelRidge(kernel=kernel),params,cv=GroupShuffleSplit(n_splits=n_splits).split(self.X_train,groups=self.X_train.reset_index()['Carbon_Source']), refit = best_alpha_UCB, scoring = self.scorer) self.kridge.fit(self.X_train, self.Y_train) self.alpha_kridge = self.kridge.best_estimator_.alpha self.gamma_kridge = self.kridge.best_estimator_.gamma self.estimators['Kernel Ridge'] = self.kridge.best_estimator_ if plot: sns.heatmap(pd.pivot_table(pd.DataFrame(self.kridge.cv_results_),index='param_alpha',columns='param_gamma',values='mean_test_score') + pd.pivot_table(pd.DataFrame(self.kridge.cv_results_),index='param_alpha',columns='param_gamma',values='std_test_score')) plt.title('Kernel Ridge Performance') plt.show() else: self.kridge = KernelRidge(alpha=self.alpha_kridge,gamma=self.gamma_kridge) self.kridge.fit(self.X_train, self.Y_train) self.estimators['Kernel Ridge'] = self.kridge self.Y_pred['Kernel Ridge'] = pd.DataFrame(self.kridge.predict(self.X_test),index=self.Y_test.index,columns=self.Y_test.keys()) self.train_score['Kernel Ridge'] = r2_custom(self.Y_train.values,self.kridge.predict(self.X_train),metric=self.metric) self.test_score['Kernel Ridge'] = r2_custom(self.Y_test.values,self.kridge.predict(self.X_test),metric=self.metric) def run_linear(self): self.linear=LinearRegression() self.linear.fit(self.X_train, self.Y_train) self.Y_pred['OLS'] = pd.DataFrame(self.linear.predict(self.X_test),index=self.Y_test.index,columns=self.Y_test.keys()) self.train_score['OLS'] = r2_custom(self.Y_train.values,self.linear.predict(self.X_train),metric=self.metric) self.test_score['OLS'] = r2_custom(self.Y_test.values,self.linear.predict(self.X_test),metric=self.metric) self.estimators['OLS'] = self.linear def run_knn(self): Y_train = self.Y_train.groupby(level=0).mean() X_train = self.X_train.groupby(level=0).mean() self.knn = KNeighborsRegressor(n_neighbors=1) self.knn.fit(X_train,Y_train) self.Y_pred['KNN'] = pd.DataFrame(self.knn.predict(self.X_test),index=self.Y_test.index,columns=self.Y_test.keys()) self.train_score['KNN'] = r2_custom(self.Y_train.values,self.knn.predict(self.X_train),metric=self.metric) self.test_score['KNN'] = r2_custom(self.Y_test.values,self.knn.predict(self.X_test),metric=self.metric) self.estimators['KNN'] = self.knn def run_random_forest(self): self.forest = RandomForestRegressor(n_estimators=100) self.forest.fit(self.X_train,self.Y_train) self.Y_pred['Random Forest'] = pd.DataFrame(self.forest.predict(self.X_test),index=self.X_test.index,columns=self.Y_test.keys()) self.train_score['Random Forest'] = r2_custom(self.Y_train.values,self.forest.predict(self.X_train),metric=self.metric) self.test_score['Random Forest'] = r2_custom(self.Y_test.values,self.forest.predict(self.X_test),metric=self.metric) self.estimators['Random Forest'] = self.forest def regenerate_predictions(self): for method in self.estimators.keys(): self.Y_pred[method] = pd.DataFrame(self.estimators[method].predict(self.X_test),index=self.Y_test.index,columns=self.Y_test.keys()) self.train_score[method] = r2_custom(self.Y_train.values,self.estimators[method].predict(self.X_train),metric=self.metric) self.test_score[method] = r2_custom(self.Y_test.values,self.estimators[method].predict(self.X_test),metric=self.metric)
{"hexsha": "0a7f7bd5d1b94ea6f4989841a9d94573aa549f21", "size": 13851, "ext": "py", "lang": "Python", "max_stars_repo_path": "metabolic_homology/__init__.py", "max_stars_repo_name": "Emergent-Behaviors-in-Biology/metabolic-homology", "max_stars_repo_head_hexsha": "a0cb8030dbc507e5314a25a8c6ced3c50b2d7b29", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 1, "max_stars_repo_stars_event_min_datetime": "2020-05-21T17:05:57.000Z", "max_stars_repo_stars_event_max_datetime": "2020-05-21T17:05:57.000Z", "max_issues_repo_path": "metabolic_homology/__init__.py", "max_issues_repo_name": "Emergent-Behaviors-in-Biology/metabolic-homology", "max_issues_repo_head_hexsha": "a0cb8030dbc507e5314a25a8c6ced3c50b2d7b29", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "metabolic_homology/__init__.py", "max_forks_repo_name": "Emergent-Behaviors-in-Biology/metabolic-homology", "max_forks_repo_head_hexsha": "a0cb8030dbc507e5314a25a8c6ced3c50b2d7b29", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 1, "max_forks_repo_forks_event_min_datetime": "2020-05-21T17:06:04.000Z", "max_forks_repo_forks_event_max_datetime": "2020-05-21T17:06:04.000Z", "avg_line_length": 51.8764044944, "max_line_length": 175, "alphanum_fraction": 0.7436286189, "include": true, "reason": "import numpy,from scipy", "num_tokens": 3763}
// Licensed to the Apache Software Foundation (ASF) under one // or more contributor license agreements. See the NOTICE file // distributed with this work for additional information // regarding copyright ownership. The ASF licenses this file // to you under the Apache License, Version 2.0 (the // "License"); you may not use this file except in compliance // with the License. You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. #include "kudu/master/ts_manager.h" #include <time.h> #include <algorithm> #include <functional> #include <limits> #include <mutex> #include <ostream> #include <utility> #include <boost/optional/optional.hpp> #include <gflags/gflags.h> #include <glog/logging.h> #include "kudu/common/common.pb.h" #include "kudu/common/wire_protocol.pb.h" #include "kudu/gutil/map-util.h" #include "kudu/gutil/port.h" #include "kudu/gutil/strings/substitute.h" #include "kudu/master/location_cache.h" #include "kudu/master/sys_catalog.h" #include "kudu/master/ts_descriptor.h" #include "kudu/util/flag_tags.h" #include "kudu/util/logging.h" #include "kudu/util/metrics.h" #include "kudu/util/pb_util.h" #include "kudu/util/trace.h" DEFINE_bool(location_mapping_by_uuid, false, "Whether the location command is given tablet server identifier " "instead of hostname/IP address (for tests only)."); TAG_FLAG(location_mapping_by_uuid, hidden); TAG_FLAG(location_mapping_by_uuid, unsafe); METRIC_DEFINE_gauge_int32(server, cluster_replica_skew, "Cluster Replica Skew", kudu::MetricUnit::kTablets, "The difference between the number of replicas on " "the tablet server hosting the most replicas and " "the number of replicas on the tablet server hosting " "the least replicas.", kudu::MetricLevel::kWarn); using kudu::pb_util::SecureShortDebugString; using std::lock_guard; using std::unordered_set; using std::shared_ptr; using std::string; using strings::Substitute; namespace kudu { namespace master { class TServerStateLoader : public TServerStateVisitor { public: explicit TServerStateLoader(TSManager* ts_manager) : ts_manager_(ts_manager) {} Status Visit(const std::string& tserver_id, const SysTServerStateEntryPB& metadata) override { ts_manager_->ts_state_lock_.AssertAcquiredForWriting(); TServerStatePB state = metadata.state(); if (state == TServerStatePB::UNKNOWN_STATE) { LOG(WARNING) << Substitute("ignoring unknown tserver state: $0", metadata.state()); return Status::OK(); } DCHECK_NE(TServerStatePB::NONE, state); int64_t timestamp = metadata.has_timestamp_secs() ? metadata.timestamp_secs() : -1; InsertOrDie(&ts_manager_->ts_state_by_uuid_, tserver_id, { state, timestamp }); return Status::OK(); } private: TSManager* ts_manager_; }; TSManager::TSManager(LocationCache* location_cache, const scoped_refptr<MetricEntity>& metric_entity) : ts_state_lock_(RWMutex::Priority::PREFER_READING), location_cache_(location_cache) { METRIC_cluster_replica_skew.InstantiateFunctionGauge( metric_entity, [this]() { return this->ClusterSkew(); }) ->AutoDetach(&metric_detacher_); } TSManager::~TSManager() { } Status TSManager::LookupTS(const NodeInstancePB& instance, shared_ptr<TSDescriptor>* ts_desc) const { shared_lock<rw_spinlock> l(lock_); const shared_ptr<TSDescriptor>* found_ptr = FindOrNull(servers_by_id_, instance.permanent_uuid()); if (!found_ptr) { return Status::NotFound("unknown tablet server ID", SecureShortDebugString(instance)); } const shared_ptr<TSDescriptor>& found = *found_ptr; if (instance.instance_seqno() != found->latest_seqno()) { return Status::NotFound("mismatched instance sequence number", SecureShortDebugString(instance)); } *ts_desc = found; return Status::OK(); } bool TSManager::LookupTSByUUID(const string& uuid, std::shared_ptr<TSDescriptor>* ts_desc) const { shared_lock<rw_spinlock> l(lock_); return FindCopy(servers_by_id_, uuid, ts_desc); } Status TSManager::RegisterTS(const NodeInstancePB& instance, const ServerRegistrationPB& registration, DnsResolver* dns_resolver, std::shared_ptr<TSDescriptor>* desc) { // Pre-condition: registration info should contain at least one RPC end-point. if (registration.rpc_addresses().empty()) { return Status::InvalidArgument( "invalid registration: must have at least one RPC address", SecureShortDebugString(registration)); } const string& uuid = instance.permanent_uuid(); // Assign the location for the tablet server outside the lock: assigning // a location involves calling the location mapping script which is relatively // long and expensive operation. boost::optional<string> location; if (location_cache_) { // In some test scenarios the location is assigned per tablet server UUID. // That's the case when multiple (or even all) tablet servers have the same // IP address for their RPC endpoint. const auto& cmd_arg = PREDICT_FALSE(FLAGS_location_mapping_by_uuid) ? uuid : registration.rpc_addresses(0).host(); TRACE(Substitute("tablet server $0: assigning location", uuid)); string location_str; const auto s = location_cache_->GetLocation(cmd_arg, &location_str); TRACE(Substitute( "tablet server $0: assigned location '$1'", uuid, location_str)); // If location resolution fails, log the error and return the status. if (!s.ok()) { CHECK(!registration.rpc_addresses().empty()); const auto& addr = registration.rpc_addresses(0); KLOG_EVERY_N_SECS(ERROR, 60) << Substitute( "Unable to assign location to tablet server $0: $1", Substitute("$0 ($1:$2)", uuid, addr.host(), addr.port()), s.ToString()); return s; } location.emplace(std::move(location_str)); } shared_ptr<TSDescriptor> descriptor; bool new_tserver = false; { lock_guard<rw_spinlock> l(lock_); auto* descriptor_ptr = FindOrNull(servers_by_id_, uuid); if (descriptor_ptr) { descriptor = *descriptor_ptr; RETURN_NOT_OK(descriptor->Register( instance, registration, location, dns_resolver)); } else { RETURN_NOT_OK(TSDescriptor::RegisterNew( instance, registration, location, dns_resolver, &descriptor)); InsertOrDie(&servers_by_id_, uuid, descriptor); new_tserver = true; } } LOG(INFO) << Substitute("$0 tserver with Master: $1", new_tserver ? "Registered new" : "Re-registered known", descriptor->ToString()); *desc = std::move(descriptor); return Status::OK(); } void TSManager::GetAllDescriptors(TSDescriptorVector* descs) const { descs->clear(); shared_lock<rw_spinlock> l(lock_); AppendValuesFromMap(servers_by_id_, descs); } int TSManager::GetCount() const { shared_lock<rw_spinlock> l(lock_); return servers_by_id_.size(); } int TSManager::GetLiveCount() const { shared_lock<rw_spinlock> l(lock_); int live_count = 0; for (const auto& entry : servers_by_id_) { const shared_ptr<TSDescriptor>& ts = entry.second; if (!ts->PresumedDead()) { live_count++; } } return live_count; } unordered_set<string> TSManager::GetUuidsToIgnoreForUnderreplication() const { unordered_set<string> uuids; shared_lock<RWMutex> tsl(ts_state_lock_); uuids.reserve(ts_state_by_uuid_.size()); for (const auto& ts_and_state_timestamp : ts_state_by_uuid_) { if (ts_and_state_timestamp.second.first == TServerStatePB::MAINTENANCE_MODE) { uuids.emplace(ts_and_state_timestamp.first); } } return uuids; } TServerStateMap TSManager::GetTServerStates() const { shared_lock<RWMutex> tsl(ts_state_lock_); return ts_state_by_uuid_; } void TSManager::GetDescriptorsAvailableForPlacement(TSDescriptorVector* descs) const { descs->clear(); shared_lock<RWMutex> tsl(ts_state_lock_); shared_lock<rw_spinlock> l(lock_); descs->reserve(servers_by_id_.size()); for (const TSDescriptorMap::value_type& entry : servers_by_id_) { const shared_ptr<TSDescriptor>& ts = entry.second; if (AvailableForPlacementUnlocked(*ts)) { descs->push_back(ts); } } } Status TSManager::SetTServerState(const string& ts_uuid, TServerStatePB ts_state, ChangeTServerStateRequestPB::HandleMissingTS handle_missing_ts, SysCatalogTable* sys_catalog) { lock_guard<RWMutex> l(ts_state_lock_); auto existing_state = FindWithDefault( ts_state_by_uuid_, ts_uuid, { TServerStatePB::NONE, -1 }).first; if (existing_state == ts_state) { return Status::OK(); } // If there is no existing state for the tserver, and the tserver hasn't yet // been registered, return an error, as appropriate. shared_ptr<TSDescriptor> ts_desc; if (handle_missing_ts != ChangeTServerStateRequestPB::ALLOW_MISSING_TSERVER && existing_state == TServerStatePB::NONE && !LookupTSByUUID(ts_uuid, &ts_desc)) { return Status::NotFound(Substitute("Requested tserver $0 has not been registered", ts_uuid)); } if (ts_state == TServerStatePB::NONE) { RETURN_NOT_OK_PREPEND(sys_catalog->RemoveTServerState(ts_uuid), Substitute("Failed to remove tserver state for $0", ts_uuid)); ts_state_by_uuid_.erase(ts_uuid); // If exiting maintenance mode, make sure that any replica failures that // may have been ignored while in maintenance mode are reprocessed. To do // so, request full tablet reports across all tablet servers. SetAllTServersNeedFullTabletReports(); LOG(INFO) << Substitute("Unset tserver state for $0 from $1", ts_uuid, TServerStatePB_Name(existing_state)); return Status::OK(); } SysTServerStateEntryPB pb; pb.set_state(ts_state); int64_t timestamp = time(nullptr); pb.set_timestamp_secs(timestamp); RETURN_NOT_OK_PREPEND(sys_catalog->WriteTServerState(ts_uuid, pb), Substitute("Failed to set tserver state for $0 to $1", ts_uuid, TServerStatePB_Name(ts_state))); LOG(INFO) << Substitute("Set tserver state for $0 to $1", ts_uuid, TServerStatePB_Name(ts_state)); InsertOrUpdate(&ts_state_by_uuid_, ts_uuid, { ts_state, timestamp }); return Status::OK(); } TServerStatePB TSManager::GetTServerStateUnlocked(const string& ts_uuid) const { ts_state_lock_.AssertAcquired(); return FindWithDefault(ts_state_by_uuid_, ts_uuid, { TServerStatePB::NONE, -1 }).first; } TServerStatePB TSManager::GetTServerState(const string& ts_uuid) const { shared_lock<RWMutex> l(ts_state_lock_); return GetTServerStateUnlocked(ts_uuid); } Status TSManager::ReloadTServerStates(SysCatalogTable* sys_catalog) { lock_guard<RWMutex> l(ts_state_lock_); ts_state_by_uuid_ = {}; TServerStateLoader loader(this); return sys_catalog->VisitTServerStates(&loader); } void TSManager::SetAllTServersNeedFullTabletReports() { lock_guard<rw_spinlock> l(lock_); for (auto& id_and_desc : servers_by_id_) { id_and_desc.second->UpdateNeedsFullTabletReport(true); } } Status TSManager::UnregisterTServer(const std::string& ts_uuid, bool force_unregister_live_tserver) { lock_guard<rw_spinlock> l(lock_); shared_ptr<TSDescriptor> ts_desc; if (!FindCopy(servers_by_id_, ts_uuid, &ts_desc)) { return Status::NotFound(Substitute("Requested tserver $0 has not been registered", ts_uuid)); } if (!force_unregister_live_tserver && !ts_desc->PresumedDead()) { return Status::IllegalState(Substitute("TServer $0 is not presumed dead.", ts_uuid)); } servers_by_id_.erase(ts_uuid); return Status::OK(); } int TSManager::ClusterSkew() const { int min_count = std::numeric_limits<int>::max(); int max_count = 0; shared_lock<rw_spinlock> l(lock_); for (const TSDescriptorMap::value_type& entry : servers_by_id_) { const shared_ptr<TSDescriptor>& ts = entry.second; if (ts->PresumedDead()) { continue; } int num_live_replicas = ts->num_live_replicas(); min_count = std::min(min_count, num_live_replicas); max_count = std::max(max_count, num_live_replicas); } return max_count - min_count; } bool TSManager::AvailableForPlacementUnlocked(const TSDescriptor& ts) const { ts_state_lock_.AssertAcquired(); // TODO(KUDU-1827): this should also be used when decommissioning a server. if (GetTServerStateUnlocked(ts.permanent_uuid()) == TServerStatePB::MAINTENANCE_MODE) { return false; } // If the tablet server has heartbeated recently enough, it is considered // alive and available for placement. return !ts.PresumedDead(); } } // namespace master } // namespace kudu
{"hexsha": "78e9930338aa4367043054e0d278c156c7e0bb4c", "size": 13499, "ext": "cc", "lang": "C++", "max_stars_repo_path": "src/kudu/master/ts_manager.cc", "max_stars_repo_name": "NagithaAbeywickrema/kudu", "max_stars_repo_head_hexsha": "24f85cedcd47e585ea9c308c317da963c5ab8fa9", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "src/kudu/master/ts_manager.cc", "max_issues_repo_name": "NagithaAbeywickrema/kudu", "max_issues_repo_head_hexsha": "24f85cedcd47e585ea9c308c317da963c5ab8fa9", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 1.0, "max_issues_repo_issues_event_min_datetime": "2022-03-04T03:06:56.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-04T03:06:56.000Z", "max_forks_repo_path": "src/kudu/master/ts_manager.cc", "max_forks_repo_name": "shenxingwuying/kudu", "max_forks_repo_head_hexsha": "a8fb42dc34e8f1f876db5b26fc3f5eb3196ce854", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 37.0851648352, "max_line_length": 97, "alphanum_fraction": 0.6987184236, "num_tokens": 3176}
// Software License for MTL // // Copyright (c) 2007 The Trustees of Indiana University. // 2008 Dresden University of Technology and the Trustees of Indiana University. // 2010 SimuNova UG (haftungsbeschränkt), www.simunova.com. // All rights reserved. // Authors: Peter Gottschling and Andrew Lumsdaine // // This file is part of the Matrix Template Library // // See also license.mtl.txt in the distribution. #ifndef MTL_TRAITS_EVAL_INCLUDE #define MTL_TRAITS_EVAL_INCLUDE #include <boost/numeric/mtl/mtl_fwd.hpp> #include <boost/numeric/mtl/matrix/crtp_base_matrix.hpp> #include <boost/numeric/mtl/matrix/mat_expr.hpp> namespace mtl { namespace traits { template <typename T> struct eval {}; #if 0 // To be done later template <typename Value, typename Parameter> struct eval< mtl::vector::dense_vector<Value, Parameter> > {}; template <typename Value1, typename Vector> struct eval< mtl::vector::scaled_view<Value1, Vector> > {}; template <typename Value1, typename Vector> struct eval< mtl::vector::rscaled_view<Value1, Vector> > {}; #endif namespace impl { template<typename T> struct eval_self_ref { typedef const T& const_reference; explicit eval_self_ref(const T& ref) : ref(ref) {} const_reference value() const { return ref; } const T& ref; }; } template <typename Value, typename Parameter> struct eval< mtl::matrix::dense2D<Value, Parameter> > : public impl::eval_self_ref< mtl::matrix::dense2D<Value, Parameter> > { eval(const mtl::matrix::dense2D<Value, Parameter>& ref) : impl::eval_self_ref< mtl::matrix::dense2D<Value, Parameter> >(ref) {} }; template <typename Value, std::size_t Mask, typename Parameter> struct eval< mtl::matrix::morton_dense<Value, Mask, Parameter> > : public impl::eval_self_ref< mtl::matrix::morton_dense<Value, Mask, Parameter> > { eval(const mtl::matrix::morton_dense<Value, Mask, Parameter>& ref) : impl::eval_self_ref< mtl::matrix::morton_dense<Value, Mask, Parameter> >(ref) {} }; template <typename Value, typename Parameter> struct eval< mtl::matrix::compressed2D<Value, Parameter> > : public impl::eval_self_ref< mtl::matrix::compressed2D<Value, Parameter> > { eval(const mtl::matrix::compressed2D<Value, Parameter>& ref) : impl::eval_self_ref< mtl::matrix::compressed2D<Value, Parameter> >(ref) {} }; #if 0 // only dummy template <typename E1, typename E2> struct eval< mtl::matrix::mat_mat_asgn_expr<E1, E2> > {}; #endif template <typename E1, typename E2> struct eval< mtl::matrix::mat_mat_plus_expr<E1, E2> > : public impl::eval_self_ref< mtl::matrix::mat_mat_plus_expr<E1, E2> > { eval(const mtl::matrix::mat_mat_plus_expr<E1, E2>& ref) : impl::eval_self_ref< mtl::matrix::mat_mat_plus_expr<E1, E2> >(ref) {} }; template <typename E1, typename E2> struct eval< mtl::matrix::mat_mat_minus_expr<E1, E2> > : public impl::eval_self_ref< mtl::matrix::mat_mat_minus_expr<E1, E2> > { eval(const mtl::matrix::mat_mat_minus_expr<E1, E2>& ref) : impl::eval_self_ref< mtl::matrix::mat_mat_minus_expr<E1, E2> >(ref) {} }; template <typename E1, typename E2> struct eval< mtl::matrix::mat_mat_ele_times_expr<E1, E2> > : public impl::eval_self_ref< mtl::matrix::mat_mat_ele_times_expr<E1, E2> > { eval(const mtl::matrix::mat_mat_ele_times_expr<E1, E2>& ref) : impl::eval_self_ref< mtl::matrix::mat_mat_ele_times_expr<E1, E2> >(ref) {} }; template <typename E1, typename E2> struct eval< mtl::matrix::mat_mat_times_expr<E1, E2> > { // Needs dramatic improvement!!! Only for testing!!! typedef matrix::dense2D<double> matrix_type; typedef const matrix_type& const_reference; explicit eval(const mtl::matrix::mat_mat_times_expr<E1, E2>& expr) : prod(expr.first * expr.second) {} const_reference value() { return prod; } private: matrix_type prod; }; template <typename Value1, typename Matrix> struct eval< mtl::matrix::scaled_view<Value1, Matrix> > {}; template <typename Value1, typename Matrix> struct eval< mtl::matrix::rscaled_view<Value1, Matrix> > {}; template <typename T> eval<T> inline evaluate(const T& ref) { return eval<T>(ref); } } // namespace traits namespace matrix { using mtl::traits::evaluate; } namespace vector { using mtl::traits::evaluate; } } // namespace mtl #endif // MTL_TRAITS_EVAL_INCLUDE
{"hexsha": "8c4668f75dae1e0ac90ff37fba8c31819e56b6bb", "size": 4418, "ext": "hpp", "lang": "C++", "max_stars_repo_path": "lib/mtl4/boost/numeric/mtl/utility/eval.hpp", "max_stars_repo_name": "spraetor/amdis2", "max_stars_repo_head_hexsha": "53c45c81a65752a8fafbb54f9ae6724a86639dcd", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 2.0, "max_stars_repo_stars_event_min_datetime": "2018-07-04T16:44:04.000Z", "max_stars_repo_stars_event_max_datetime": "2021-01-03T07:26:27.000Z", "max_issues_repo_path": "lib/mtl4/boost/numeric/mtl/utility/eval.hpp", "max_issues_repo_name": "spraetor/amdis2", "max_issues_repo_head_hexsha": "53c45c81a65752a8fafbb54f9ae6724a86639dcd", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "lib/mtl4/boost/numeric/mtl/utility/eval.hpp", "max_forks_repo_name": "spraetor/amdis2", "max_forks_repo_head_hexsha": "53c45c81a65752a8fafbb54f9ae6724a86639dcd", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 25.3908045977, "max_line_length": 94, "alphanum_fraction": 0.7014486193, "num_tokens": 1231}
[STATEMENT] lemma inv_check_left_moving_Suc_nonempty[simp]: "inv_check_left_moving (as, abc_lm_s lm n (Suc (abc_lm_v lm n))) (s, b, Oc # list) ires \<Longrightarrow> b \<noteq> []" [PROOF STATE] proof (prove) goal (1 subgoal): 1. inv_check_left_moving (as, abc_lm_s lm n (Suc (abc_lm_v lm n))) (s, b, Oc # list) ires \<Longrightarrow> b \<noteq> [] [PROOF STEP] apply(auto simp: inv_check_left_moving.simps inv_check_left_moving_in_middle.simps split: if_splits) [PROOF STATE] proof (prove) goal: No subgoals! [PROOF STEP] done
{"llama_tokens": 236, "file": "Universal_Turing_Machine_Abacus", "length": 2}
import time from humpday.optimizers.dlibcube import dlib_default_cube from humpday.optimizers.nevergradcube import nevergrad_ngopt8_cube from humpday.optimizers.scipycube import scipy_slsqp_cube from humpday.optimizers.ultraoptcube import ultraopt_gbrt_cube from humpday.optimizers.nloptcube import nlopt_direct_cube, nlopt_isres_cube, nlopt_esch_cube, nlopt_directr_cube from humpday.objectives.portfolio import markowitz_realized_on_cube, markowitz_analytic_on_cube, make_solution,\ markowitz_return_on_cube, make_sigma_matrix from pprint import pprint from typing import List import numpy as np def nice_div(a,b): if isinstance(a,List): return [ nice_div(ai,bi) for ai,bi in zip(a,b) ] else: return 1.0 if abs(a-b)<0.01*(abs(a)+abs(b)) else 100. if abs(b)<1e-6 else a/b def normalize(x): return [ xi/sum(x) for xi in x ] if sum(x)>0 else [ 1.0/len(x) for xi in x ] def verify_markowitz(optimizer, n_dim, n_trials): """ How good are derivative free solvers? This little exercise tasks some of the speedier optimizers with variance minimization for a portfolio, and also looks at how consistent the answers are between two runs. Some examples of speedy optimizers are suggested by the unused imports. Obviously, you're better off doing this with derivs or worse case, quadratic solvers """ # Run optimizer twice st = time.time() v1, u1, t1 = optimizer(markowitz_realized_on_cube, n_dim=n_dim, n_trials=n_trials, with_count=True) tau1 = time.time()-st st = time.time() v2, u2, t2 = optimizer(markowitz_analytic_on_cube, n_dim=n_dim, n_trials=n_trials, with_count=True) tau2 = time.time()-st # Solve u3 = make_solution(x_dim=n_dim) v3 = markowitz_analytic_on_cube(u3) v4 = markowitz_realized_on_cube(u3) r = [ nice_div(u1j, u2j) for u1j, u2j in zip(u1,u2) ] r1 = [ nice_div(u1j, u3j) for u1j, u3j in zip(u1,u3) ] r2 = [ nice_div(u2j, u3j) for u2j, u3j in zip(u2,u3) ] nu1 = normalize(u1) nu2 = normalize(u2) nu3 = normalize(u3) results = {'ratio_of_solutions':r,'ratio_to_analytic_1':r1,'ratios_to_analytic_2':r2, 'minimum_1':v1,'minimum_2':v2,'minium_3':v3, 'minimum_4':v4,'trials_1':t1,'trials_2':t2, 'seconds_1':tau1,'seconds_2':tau2, 'weights_1':nu1, 'weights_2':nu2, 'weight_ratio_1':nice_div(nu1,nu3), 'weight_ratio_2':nice_div(nu2,nu3), 'diagonals':np.diag(make_sigma_matrix()[:n_dim,:n_dim])} return results def markowitz_return(optimizer, n_dim, n_trials): """ Maximizing a different objective, just for fun """ # Run optimizer twice st = time.time() v1, u1, t1 = optimizer(markowitz_return_on_cube, n_dim=n_dim, n_trials=n_trials, with_count=True) tau1 = time.time()-st st = time.time() v2, u2, t2 = optimizer(markowitz_return_on_cube, n_dim=n_dim, n_trials=n_trials, with_count=True) tau2 = time.time()-st # Use Markowitz approximation u3 = make_solution(x_dim=n_dim) v3 = markowitz_return_on_cube(u3) u4 = [1/n_dim for _ in range(n_dim)] v4 = markowitz_return_on_cube(u4) r = [ nice_div(u1j, u2j) for u1j, u2j in zip(u1,u2) ] r1 = [ nice_div(u1j, u3j) for u1j, u3j in zip(u1,u3) ] r2 = [ nice_div(u2j, u3j) for u2j, u3j in zip(u2,u3) ] nu1 = normalize(u1) nu2 = normalize(u2) nu3 = normalize(u3) results = {'ratio_of_solutions':r,'ratio_to_analytic_1':r1,'ratios_to_analytic_2':r2, 'minimum_1':v1,'minimum_2':v2,'minimum_markowitz':v3, 'minimum_market':v4,'trials_1':t1,'trials_2':t2, 'seconds_1':tau1,'seconds_2':tau2, 'weights_1':nu1, 'weights_2':nu2, 'weight_ratio_1':nice_div(nu1,nu3), 'weight_ratio_2':nice_div(nu2,nu3), 'diagonals':np.diag(make_sigma_matrix()[:n_dim,:n_dim])} return results if __name__=='__main__': optimizer = nlopt_directr_cube results = markowitz_return(optimizer=optimizer, n_dim=5, n_trials=150000) pprint(results)
{"hexsha": "7f8cc013af499b20a3f03fa56fa5cedef6fc742f", "size": 4161, "ext": "py", "lang": "Python", "max_stars_repo_path": "examples/markowitz.py", "max_stars_repo_name": "MDCHAMP/humpday", "max_stars_repo_head_hexsha": "45e2cea95ae951d991ebc6c1e98314cc8c726f25", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 53, "max_stars_repo_stars_event_min_datetime": "2021-02-13T01:17:02.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-16T10:07:29.000Z", "max_issues_repo_path": "examples/markowitz.py", "max_issues_repo_name": "MDCHAMP/humpday", "max_issues_repo_head_hexsha": "45e2cea95ae951d991ebc6c1e98314cc8c726f25", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 16, "max_issues_repo_issues_event_min_datetime": "2021-02-13T17:42:06.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-06T10:08:50.000Z", "max_forks_repo_path": "examples/markowitz.py", "max_forks_repo_name": "MDCHAMP/humpday", "max_forks_repo_head_hexsha": "45e2cea95ae951d991ebc6c1e98314cc8c726f25", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 12, "max_forks_repo_forks_event_min_datetime": "2020-12-09T03:16:22.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-23T09:34:00.000Z", "avg_line_length": 36.8230088496, "max_line_length": 113, "alphanum_fraction": 0.6733958183, "include": true, "reason": "import numpy,import scipy", "num_tokens": 1306}
import numpy as np from brew.metrics.diversity import paired from brew.metrics.diversity import non_paired class Diversity(object): """Ensemble Diversity Calculator. The class calculates the diversity of ensemble of classifiers. Attributes ---------- `metric` : function, receives the oracle output and returns float Function used to calculate the metric. Parameters ---------- metric : {'e', 'kw', 'q', 'p', 'disagreement', 'agreement', 'df'}, optional Metric used to compute the ensemble diversity: - 'e' (Entropy Measure e) will use :meth:`kuncheva_entropy_measure` - 'kw' (Kohavi Wolpert Variance) will use :meth:`kuncheva_kw` - 'q' (Q Statistics) will use :meth:`kuncheva_q_statistics` - 'p' (Correlation Coefficient p) will use :meth:`kuncheva_correlation_coefficient_p` # noqa - 'disagreement' (Disagreement Measure) will use :meth:`kuncheva_disagreement_measure` # noqa - 'agreement' (Agreement Measure) will use :meth:`kuncheva_agreement_measure` # noqa - 'df' (Double Fault Measure) will use :meth:`kuncheva_double_fault_measure` # noqa Examples -------- >>> from brew.metrics.diversity.base import Diversity >>> from brew.generation.bagging import Bagging >>> >>> from sklearn.tree import DecisionTreeClassifier >>> import numpy as np >>> >>> X = np.array([[-1, 0], [-0.8, 1], [-0.8, -1], [-0.5, 0], [0.5, 0], [1, 0], [0.8, 1], [0.8, -1]]) >>> y = np.array([1, 1, 1, 2, 1, 2, 2, 2]) >>> tree = DecisionTreeClassifier(max_depth=1, min_samples_leaf=1) >>> bag = Bagging(base_classifier=tree, n_classifiers=10) >>> bag.fit(X, y) >>> >>> div = Diversity(metric='q') >>> q = div.calculate(bag.ensemble, Xtst, ytst) >>> q < 1.01 and q > -1.01 True See also -------- brew.metrics.diversity.paired: Paired diversity metrics. brew.metrics.diversity.non_paired: Non-paired diversity metrics. References ---------- Brown, Gavin, et al. "Diversity creation methods: a survey and categorisation." Information Fusion 6.1 (2005): 5-20. Kuncheva, Ludmila I., and Christopher J. Whitaker. "Measures of diversity in classifier ensembles and their relationship with the ensemble accuracy." Machine learning 51.2 (2003): 181-207. Tang, E. Ke, Ponnuthurai N. Suganthan, and Xin Yao. "An analysis of diversity measures." Machine Learning 65.1 (2006): 247-271. """ def __init__(self, metric=''): if metric == 'e': self.metric = non_paired.kuncheva_entropy_measure elif metric == 'kw': self.metric = non_paired.kuncheva_kw elif metric == 'q': self.metric = paired.kuncheva_q_statistics elif metric == 'p': self.metric = paired.kuncheva_correlation_coefficient_p elif metric == 'disagreement': self.metric = paired.kuncheva_disagreement_measure elif metric == 'agreement': self.metric = paired.kuncheva_agreement_measure elif metric == 'df': self.metric = paired.kuncheva_double_fault_measure else: print('invalid metric') def calculate(self, ensemble, X, y): out = ensemble.output(X, mode='labels') oracle = np.equal(out, y[:, np.newaxis]) D = self.metric(oracle) return D
{"hexsha": "b9ee799f724e237e8b9db559de8a90de4a5520d2", "size": 3440, "ext": "py", "lang": "Python", "max_stars_repo_path": "brew/metrics/diversity/base.py", "max_stars_repo_name": "va26/brew", "max_stars_repo_head_hexsha": "3531560df785fa44b39094f3ffad83d3b795b15b", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 344, "max_stars_repo_stars_event_min_datetime": "2015-10-02T19:35:15.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-23T07:18:50.000Z", "max_issues_repo_path": "brew/metrics/diversity/base.py", "max_issues_repo_name": "va26/brew", "max_issues_repo_head_hexsha": "3531560df785fa44b39094f3ffad83d3b795b15b", "max_issues_repo_licenses": ["MIT"], "max_issues_count": 19, "max_issues_repo_issues_event_min_datetime": "2015-03-24T01:26:51.000Z", "max_issues_repo_issues_event_max_datetime": "2018-07-26T20:11:32.000Z", "max_forks_repo_path": "brew/metrics/diversity/base.py", "max_forks_repo_name": "va26/brew", "max_forks_repo_head_hexsha": "3531560df785fa44b39094f3ffad83d3b795b15b", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 93, "max_forks_repo_forks_event_min_datetime": "2015-03-13T18:23:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-12-11T11:14:38.000Z", "avg_line_length": 34.4, "max_line_length": 102, "alphanum_fraction": 0.624127907, "include": true, "reason": "import numpy", "num_tokens": 932}
from typing import Mapping, Union, Tuple from interface.MetricBase import MetricBase from numpy.linalg import norm from numpy import ravel, inf, ndarray class AbsValueNorm(MetricBase): def __call__( self, image: ndarray, adversarial_image: ndarray, model_metadata: Mapping[str, Union[int, bool, Tuple[int, int]]] ) -> float: img = image adv = adversarial_image for _ in range(model_metadata['channels'] - 1): img = ravel(img) adv = ravel(adv) return norm(adv - img, 0) class EuclideanNorm(MetricBase): def __call__( self, image: ndarray, adversarial_image: ndarray, model_metadata: Mapping[str, Union[int, bool, Tuple[int, int]]] ) -> float: return norm(adversarial_image - image, 2) class InfNorm(MetricBase): def __call__( self, image: ndarray, adversarial_image: ndarray, model_metadata: Mapping[str, Union[int, bool, Tuple[int, int]]] ) -> float: return norm(adversarial_image - image, inf)
{"hexsha": "180be07fa3359122fae7bbf7dea6c4ca022f503c", "size": 1105, "ext": "py", "lang": "Python", "max_stars_repo_path": "metrics/PNorm.py", "max_stars_repo_name": "Brauntt/Gicaf_new", "max_stars_repo_head_hexsha": "b3617aeb3c3569ca49ac53bb079e95b1ad03f590", "max_stars_repo_licenses": ["MIT"], "max_stars_count": null, "max_stars_repo_stars_event_min_datetime": null, "max_stars_repo_stars_event_max_datetime": null, "max_issues_repo_path": "metrics/PNorm.py", "max_issues_repo_name": "Brauntt/Gicaf_new", "max_issues_repo_head_hexsha": "b3617aeb3c3569ca49ac53bb079e95b1ad03f590", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "metrics/PNorm.py", "max_forks_repo_name": "Brauntt/Gicaf_new", "max_forks_repo_head_hexsha": "b3617aeb3c3569ca49ac53bb079e95b1ad03f590", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 27.625, "max_line_length": 71, "alphanum_fraction": 0.6171945701, "include": true, "reason": "from numpy", "num_tokens": 275}
import numpy from .. import PulsevizException from .sampler import Sampler from ..pulseaudio.simple_client import SampleFormat def calculate_frequency_weighting(frequency, weighting): """ Calculates the weighting in [dB] for a given frequency. """ if weighting == 'A': a = numpy.power(12194.0, 2) * numpy.power(frequency, 4) b = (numpy.power(frequency, 2) + numpy.power(20.6, 2)) c = (numpy.power(frequency, 2) + numpy.power(107.7, 2)) d = (numpy.power(frequency, 2) + numpy.power(737.9, 2)) e = (numpy.power(frequency, 2) + numpy.power(12194.0, 2)) R_A = a / (b * numpy.sqrt(c * d) * e) A = 20 * numpy.log10(R_A) + 2.0 return A elif weighting == 'C': a = numpy.power(12194.0, 2) * numpy.power(frequency, 2) b = (numpy.power(frequency, 2) + numpy.power(20.6, 2)) c = (numpy.power(frequency, 2) + numpy.power(12194.0, 2)) R_C = (a / (b * c)) C = 20 * numpy.log10(R_C) + 0.06 return C elif weighting == 'Z': return 1.0 else: raise Exception('Unknown weighting type: {0}'.format(weighting)) class FFT(Sampler): """ Applies the Real Discrete Fourier Transformation to the sampled signal and optionally multiplies it with a window function first. """ def __init__(self, sample_size, window_size, output='fft', scaling='lin', window_function='rectangle', window_overlap=0.0, weighting='Z', **kwargs): kwargs['buffer_size'] = sample_size # TODO: Explain kwargs['sample_size'] = int(numpy.floor(window_size * (1.0 - window_overlap))) super().__init__(**kwargs) self._window_size = window_size self._fft_frequencies = numpy.fft.rfftfreq(self.buffer_size, 1.0 / self._pulseaudio_client.sample_frequency) self._fft_values = numpy.zeros(int(self.buffer_size / 2) + 1, dtype='f') if output in ['fft', 'psd']: self._output = output else: raise PulsevizException('Unknown output type: {0}'.format(output)) if scaling in ['lin', 'log']: self._scaling = scaling else: raise PulsevizException('Unknown scaling type: {0}'.format(scaling)) if window_function is 'rectangle': self._window_function = numpy.ones(self.buffer_size) elif window_function == 'hanning': self._window_function = numpy.hanning(self.buffer_size) else: raise PulsevizException('Unknown window function: {0}'.format(window_function)) self._window_function_sum = numpy.sum(self._window_function) if self._pulseaudio_client.sample_format == SampleFormat.PA_SAMPLE_FLOAT32LE: self._reference_value = 1.0 else: raise PulsevizException('Chosen sample format is not supported.') self._fft_weights = numpy.array([calculate_frequency_weighting(f, weighting) for f in self._fft_frequencies]) @property def frequencies(self): return self._fft_frequencies @property def values(self): return self._fft_values def _sample(self): super()._sample() self._fft_values[:] = numpy.abs(numpy.fft.rfft(self._window_function * self.buffer)) if self._output == 'fft': pass elif self._output == 'psd': # Reminder for future-self: # Our Input signal is clamped between -1.0 and +1.0 yet if we convert the values above ^ into dB # we get values way beyond 0dB which makes no sense. Turns out we have to normalize the resulting # vector. # Thank you my hero: https://dsp.stackexchange.com/a/32080 # TODO: This can be pre-calculated! self._fft_values[:] = numpy.power(self._fft_values * 2.0, 2) \ / numpy.power(self._window_function_sum * self._reference_value, 2) else: raise PulsevizException('This should not happen.') if self._scaling == 'lin': pass elif self._scaling == 'log': numpy.seterr(divide='ignore') self._fft_values[:] = 10.0 * numpy.log10(self._fft_values) numpy.seterr(all='raise') # TODO: Use result of numpy.geterr instead? else: raise PulsevizException('This should not happen.') if self._scaling == 'log': self._fft_values[:] += self._fft_weights else: self._fft_values[:] *= numpy.power(10, self._fft_weights / 20) # TODO: Test this.
{"hexsha": "afff9850a7e702fad7a495b9b6ed5615f1a834e5", "size": 4689, "ext": "py", "lang": "Python", "max_stars_repo_path": "pulseviz/dsp/fft.py", "max_stars_repo_name": "drzraf/pulseviz.py", "max_stars_repo_head_hexsha": "25f7b4ab3466e98bca8b8e3cecd533c44837a76c", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 25, "max_stars_repo_stars_event_min_datetime": "2017-06-08T18:06:30.000Z", "max_stars_repo_stars_event_max_datetime": "2022-01-21T02:57:25.000Z", "max_issues_repo_path": "pulseviz/dsp/fft.py", "max_issues_repo_name": "drzraf/pulseviz.py", "max_issues_repo_head_hexsha": "25f7b4ab3466e98bca8b8e3cecd533c44837a76c", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "pulseviz/dsp/fft.py", "max_forks_repo_name": "drzraf/pulseviz.py", "max_forks_repo_head_hexsha": "25f7b4ab3466e98bca8b8e3cecd533c44837a76c", "max_forks_repo_licenses": ["MIT"], "max_forks_count": 2, "max_forks_repo_forks_event_min_datetime": "2021-04-01T02:04:51.000Z", "max_forks_repo_forks_event_max_datetime": "2021-11-15T12:15:57.000Z", "avg_line_length": 38.7520661157, "max_line_length": 110, "alphanum_fraction": 0.5977820431, "include": true, "reason": "import numpy", "num_tokens": 1139}
\documentclass[12pt]{report} \usepackage[T1]{fontenc} \usepackage[utf8]{inputenc} \usepackage{amsmath,amssymb,amsfonts} % Required for some math elements, equation environment and inline $ (math) $ \usepackage{makeidx} \usepackage{graphicx} % Required for the inclusion of images \usepackage{float} \usepackage{txfonts} %\usepackage{times} % Uncomment to use the chosen font \usepackage[skip=5pt,font={small,it},labelfont=bf]{caption} \usepackage{subcaption} \usepackage{siunitx} % Provides the \SI{}{} and \si{} command for typesetting SI units \usepackage{color} \usepackage[english]{babel} %%% SPRAWDZANIE PISOWNI EN %\usepackage[polish]{babel} %%% SPRAWDZANIE PISOWNI PL \usepackage[autostyle]{csquotes} %%% PAGE HEADERS COMMANDS %%% \usepackage{fancyhdr} % Page Headers and Footers \pagestyle{fancy} %page style with FancyHDR package \fancyhf{} % clear the header/footer style \lhead{\leftmark} % hdr left - standard marking (chapter names) \rhead{\thepage} % hdr right - page numbering %\setlength{\textwidth}{14cm} \setlength{\textheight}{20cm} % BIB REF COMMANDS \usepackage[nottoc,notlot,notlof]{tocbibind} \usepackage[backend=bibtex,style=numeric,autocite=plain,sorting=none]{biblatex} \addbibresource{references.bib} % CODE LISTINGS COMMANDS \usepackage{verbatim} % Pre-formatted text environment \usepackage[linesnumbered,ruled,vlined]{algorithm2e} % Algorithms with ctan algorithm2e \usepackage{listings} \definecolor{mygray}{rgb}{0.4,0.4,0.4} \definecolor{mygreen}{rgb}{0,0.6,0.4} \definecolor{myorange}{rgb}{0.8,0.2,0} \lstset{ basicstyle=\footnotesize\sffamily\color{black}, keywordstyle=\color{mygreen}, commentstyle=\color{mygray}, identifierstyle=\color{blue}, stringstyle=\color{myorange}, numbers=left, numbersep=5pt, numberstyle=\tiny\color{mygray}, frame=single, breakatwhitespace=false, breaklines=true, captionpos=t, keepspaces=true, columns=fullflexible, showstringspaces=false, float, tabsize=2, title=\lstname, caption=\lstname, language=C++ } %\newtheorem{definition}{Definicja} % przykład nowego środowiska %\newtheorem{example}{Przykład}[chapter] % przykład nowego środowiska %\newtheorem{corollary}{Wniosek}[chapter] % przykład nowego środowiska %%% TO DO notes %\usepackage[textsize=footnotesize]{todonotes} \usepackage[disable]{todonotes} \newcommand{\td}[1]{\todo[inline]{TO DO: #1}} %%% TOC HYPERLINKS %%% \usepackage{hyperref} % remember to use \tableofcontents after title \hypersetup{ breaklinks=true, colorlinks, citecolor=black, filecolor=black, linkcolor=black, urlcolor=black, pdftitle={Generating two-dimensional game maps with use of cellular automata} } \usepackage{breakurl} \usepackage{cleveref} %%% STRONA TYTUŁOWA - DANE \title{Generating two-dimensional game maps with use of cellular automata} \author{Michał Wolski} \begin{document} \maketitle \renewcommand{\contentsname}{Table of contents} \tableofcontents %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \chapter{Introduction} \label{rozdzial.wstep} During recent years, presence of computer games in human lives has increased. The amount of time spent on playing games by the modern society has shown that games are desirable both as a means for entertainment and a medium of expression. However, as the interest in games rises \footnote{The Interactive Software Federation of Europe compiles and publishes statistics which include frequency of gaming in European countries and show that demand for games is on the rise. \url{https://www.isfe.eu/industry-facts/statistics}} and computer games become increasingly complex, the need for game content must also rise. Elements such as believable maps, textures, sound and models (among other types of content) are a necessary resource for production of games. Studies such as \autocite{hendrikx2013procedural} show where the evidence for insufficiency of manual content creation may be found. In the study, authors point to work of Kelly and McCabe \autocite{kelly2007citygen}, Lefebvre and Neyret \autocite{lefebvre2003pattern}, Smelik et al. \autocite{smelik2009survey} and Iosup \autocite{iosup2009poggi} as sources which reveal game content production as a time-consuming and expensive endeavour. Hence, it is logical to conclude that information contained in studies and statistics on the topic of game development suggest that most projects aimed at creating games or simulations could benefit from seeking new or more efficient automated means of content creation. \subsubsection{Solving the inefficiency issue} In order to provide a solution to the inefficiency of manual content production, formal methods have emerged and are commonly referred to as \textit{procedural generation techniques}, defined by the literature as processes or methods of automatic content creation, through algorithmic mechanisms \autocite{togelius2011search} \autocite{yannakakis2015experience}. Scientific surveys such as \autocite{hendrikx2013procedural} and \autocite{smelik2009survey} show why investigating procedural generation is useful for the game industry, by providing examples of successful methods which can be used to generate content for games. Primary concerns which drive the interest in automated ways to create game content are the rising project costs and increasing development time. In order to reduce the cost of game development, allow for greater replay value or provide a feeling of vastness to the game worlds that designers aim to create, procedural content generation techniques can provide an attractive solution to the problem of content creation. Surveys such as \autocite{hendrikx2013procedural}, \autocite{togelius2011search} and \autocite{de2011survey} show what types of game content can be generated and are a good starting point for seeking methods of procedural generation. %%%%%%%%%%%%%%% \section{Objectives} This thesis focuses on automated creation of 2-dimensional game maps using a cellular automata approach to generate small map tiles and merge them into a bigger map. Such approach allows for a degree of control to the map designer -- who may want to decide which tiles will be merged and at which locations in the map they will be present. Integrating manual editing and control over process parameters to achieve desired results has been proposed before in the works concerning procedural generation techniques \autocite{bidarra2010integrating}, \autocite{smelik2010integrating}, \autocite{smelik2011declarative}. Beginning experimentation with flat maps on 2-dimensional plane avoids the complexity that may arise when dealing with higher dimensions, hence the main aim is to develop a solution to the problem of automating planar map creation for games. Specifically, maps created by the generator should be planar and their layout must have characteristics of shapes that can be helpful in design of believable environments. These characteristics may include irregularity, asymmetry and imbalance. The goal is to create structures that are interesting because of their unpredictability, somewhat resembling the look of results produced by natural processes like erosion, dissolution, deformation, tectonic fragmentation or weathering. %%%%%%%%%%%%%%% \section{Thesis scope} The research carried out to support this thesis was focused on discovery of generative techniques presented in current literature describing methods of content generation for games. Work on preparing the solution has been divided into the following parts, described in the remainder of this thesis: \begin{itemize} \item research on procedural generation of planar maps and method selection \item design of a map generator program and its implementation in C++ \item experiments to find a set of rules that generate satisfying maps \item extending the feature set of the generator \end{itemize} %%%%%%%%%%%%%%% \section{Thesis structure} This thesis includes introduction followed by three chapters. \Cref{rozdzial.teoria} serves as a study on possible mechanisms that could be used for procedural generation and specifically, for creation of 2D maps for games. \Cref{rozdzial.praktyka} describes design and implementation of a solution to the problem along with performed experiments. \Cref{rozdzial.podsumowanie} summarizes the findings and concludes the thesis. %%%%%%%%%%%%%%% \section{Experimental setup} The solution that allowed to carry out experiments in this thesis was implemented using the C++ programming language and compiled with MSVC++ 14.0 compiler, natively included in the Visual Studio 2015 Community Edition IDE. Other tools and libraries used in the project: \begin{itemize} \item Dear ImGui, by Omar Cornut - to easily build an Immediate Mode user interface. Project homepage: \url{https://github.com/ocornut/imgui} \item GLFW 3.2.1 library - to create an OpenGL context and have direct access to texture functions. Project homepage: \url{http://www.glfw.org/} \end{itemize} In order to satisfy requirements for using OpenGL API function calls, it is recommended to use a dedicated graphics processing unit and install updated drivers. For the purposes of development and experiments \textit{nVidia GeForce GTX 560M } was used and drivers were updated to latest available versions. This thesis has been prepared with \LaTeX\space system for document typesetting, included diagrams were drawn with \textit{UMLet} - an open source modelling program. %%%%%%%%%%%%%%% \section{Abbreviations and acronyms} The following terms, abbreviations and acronyms have been used in the thesis. \begin{description} \item[ASCII] American Standard Code for Information Interchange - a character encoding standard, used to represent characters in computers and information systems \item[CA] Cellular Automaton (or Automata). A simulation consisting of cell objects. \item[GUI] Graphical User Interface \item[PCG] Procedural Content Generation. An automated process of creation. \item[RPG] Role-Playing Game \end{description} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \chapter{Research on 2D map generation methods} \label{rozdzial.teoria} %%%%%%%%%%%%%%% \section{Maps and cartography} Historically, maps have been used by the human race since ancient times. The need for navigation in the world has been a driving force behind the evolution of maps. Starting with cave paintings and representations of stars on the sky, our kind had the need for capturing an abstract model of a territory, terrain shape, location of useful resources or some other aspect of surrounding environment in a useful way. Making a model of the physical (or fictional) world with maps requires choice of the data types describing locations represented on the map. List of data types visualized with maps has been growing with the evolution of cartography and whenever new technologies have been introduced to the map making crafts. Some examples of data possible to represent on maps are: \begin{itemize} \item physical maps - terrain shape, elevation, forests, bodies of water, etc. \item political maps - borders around a territory, districts, states \item climate and weather maps - temperature, humidity, precipitation, wind currents \item geologic maps - terrain features, location of precious resources underground \item star maps - views of the distant cosmic objects measured by solid angles from a fixed point \item route maps - transport links, connections joining points on the modeled territory \end{itemize} Although early maps had the form of drawings or etchings on surface of solid materials, now there are other possibilities of representing the abstract model which a map aims to represent. The rise of digital maps and geographic information systems has opened new possibilities - maps have become dynamic entities, stored digitally, easily updated with new data and not limited to the boundaries of physical model. With digital maps, it is possible to show more than one layer of data, as chosen by the user, whereas physical maps are limited to the data and view scale chosen initially at the time when map was crafted. Despite the limitations, they still can serve well as a medium for storage of geographic information, requiring simpler processes during archival and conservation efforts \autocite{bagrow2017history}. \begin{figure}[h!] \centering \includegraphics[width=0.6\linewidth]{images/journal_netw_talk_map} \caption{Pure data map showing the geography of talk in Great Britain. Authors measured the total talk time via communication networks between areas in Britain and used the data to produce the visualization. Source article: \autocite{10.1371/journal.pone.0014248}} \label{fig:journalnetwtalkmap} \end{figure} Digital technology has also brought interesting methods to the art of crafting maps, allowing for new types of maps to be crafted, which brought previously undiscovered insights into the nature of represented territories. For example, with data maps such as the one described by article \autocite{10.1371/journal.pone.0014248} it is possible to draw more useful borders around regions, fitting actual human interaction groups as opposed to those defined by past governments, as shown in figure \cref{fig:journalnetwtalkmap}. Maps like these are generated from vast data sets, collected and stored in databases - an approach that would not be possible without use of digital technology. Since maps can emerge out of pure data, the same approach can be used to create fictional maps, out of generated data. The possibility of visualizing map layers which could not have been created and shown without digital data processing techniques and ease of experimentation with the information and algorithms used to create modern digital maps have also made it apparent that the source data for the layer itself do not necessarily have to measure some aspect of reality, but can be generated using mathematical methods. Such approach effectively allows for creation of fictional maps, representing imaginary territories. Moving on, the next section presents examples of fictional maps and their use in games, physical and digital alike. %%%%%%%%%%%%%%% \section{Maps in games} It is not clear what kind of game was the first one in history to use a map to represent the game world, however two notable examples may easily come into mind: Chess and Go, which are both widely known around the world. Chess, a board tactical war-game developed before 6th century AD, uses black-white board as the map of its world. Although the environment represented by a chess board is very simple, it has some important features and rules. The map is composed of square cells, which are arranged on 8 by 8 grid, effectively creating a rigid boundary around the game world, which according to game rules - cannot be crossed. Each cell has 8 neighbors and can be occupied by only one game piece. Another game, originating from ancient China, defines a similar, grid-based game world, effectively making a map of a uniform planar territory. Go is played on $19x19$ board, however smaller board sizes are used as well. In Go, the goal is to capture more territory than the opponent, which is done by placing game pieces on line intersections, one piece per turn. \begin{figure}[H] \centering \begin{subfigure}[t]{0.4\linewidth} \centering \includegraphics[height=4cm]{images/chessboard} \caption{Empty chess board} \end{subfigure} \hspace{1em} \hfill \begin{subfigure}[t]{0.4\linewidth} \centering \includegraphics[height=4cm]{images/goboard} \caption{Go board during gameplay} \end{subfigure} \hspace{1em} \caption{Chess and Go both use planar boards divided into tiles by a square grid - simple maps to represent the environment in which game is played. Sources: Chessboard image - own work; Go board image - \url{https://senseis.xmp.net/?LargeBoards}, distributed under the terms of the Open Content License } \label{fig:neighborhood_types} \end{figure} Another interesting example of a game world map is the multi-player strategy board game Risk, invented in 1957 by Albert Lamorisse, where the map represents a territory divided into regions, which must be captured by players in order to win. Risk game shows how a political world map with imagined region borders can be creatively used in a game, as a resource for the players to fight over. The game of Risk has been since published in many variations. Most of them share the same gameplay goal: to capture more territory than the opponents do, which is an example of how a game might use environments represented by maps as a limited resource for players to acquire. \begin{figure}[H] \centering \includegraphics[width=0.5\linewidth]{images/risk-rulebook} \caption{Risk rule book fragment, containing a photo of the game board. The board shows a world map with fictional political borders, dividing the map into regions, which serve as a resource for players to capture. Image source: photo taken from original Risk rule book, copyright Hasbro 1993} \label{fig:acrord32risk1} \end{figure} Modern board games have introduced many new ideas to the design of game boards. One example of such ideas is bringing modularity into the board design, composing pieces of the board similarly to how a jigsaw puzzle is composed of singular pieces. Such arrangement allows for greater value in replaying the game, since the game world can be different at each time the game is played. An example of such game is Carcassonne, published in 2000, designed by Klaus-Jürgen Wrede in Germany. The game of Carcassonne involves an interesting mechanism: rather than having a fixed game board, cards with tiles are used to construct the board during gameplay. The game rules around tile placement can be thought of as an algorithm of procedural generation: only one tile can be placed during each game turn, adjacent to other tiles, forming a connection with features that tiles represent - roads must connect to roads, fields to other fields, and cities to cities. The rules ensure that the players will develop the game board as the game progresses, which leads to an interesting observation: each turn, the players are presented with new territory to consider in their decisions - and since the game requires players to deploy their resources onto the constructed game map in order to accumulate score and eventually win, these decisions may often become quite challenging with increasing complexity of the board layout. There is a web version of Carcassonne available at \url{https://concarneau.herokuapp.com/game}. \begin{figure}[H] \centering \includegraphics[width=0.7\linewidth]{images/carcassonne} \caption{Carcassonne game board during gameplay. Players place tiles on gameplay surface once per turn, making sure that each new tile is compatible to surrounding tiles. Image source: \url{https://deerfieldlibrary.org/2016/01/carcassonne-a-modern-board-game-for-adults-teens/}} \label{fig:carcassonne} \end{figure} There are other board games which make use of generative mechanics to create or alter the board layout, before or during gameplay. Some of them rely on randomness, while others introduce rules by which the game world may be altered. An example of such rules is presented by Labirynth, a board game designed by Max Kobbert and first released by Ravensburger company in 1986. The game board in Labirynth represents a dungeon composed of tiles representing path elements: corners, straight paths, crossings. Each turn, the game board is altered by shifting an entire row or column of movable tiles in a direction chosen by player, in effect changing the board layout. \begin{figure}[H] \centering \includegraphics[width=0.7\linewidth]{images/labirynth} \caption{Labirynth game board. Even rows and columns are movable (marked with a yellow triangle on board edges). Image source: \url{https://rulesofplay.co.uk/products/labyrinth} } \label{fig:carcassonne} \end{figure} While it is possible to find or develop interesting mechanics for board games, some more complex game rules and ideas are better implemented using computer simulation, where most of the mundane tasks which do not contribute to gameplay can be automated. Random number generation, board preparation and arrangement, checking player moves against game rules - all those activities are good candidates for automation. The other reason to simulate games may be to develop artificial intelligence algorithms which can simulate player behavior at a chosen level of competitive play, effectively providing a way for beginners who want to learn the game they are interested in or for veterans who want to develop their skills further, as has been done for chess and other classic board games. Among modern board games, there are many more examples which involve interesting mechanics, but their description lies beyond the scope of this thesis project. Next section investigates a few examples of computer games and simulations, where maps are used to construct some aspects of gameplay. %%%%%%%%%%%%%%% \section{Interactivity and automation } The evolution of personal computers has allowed players to enjoy a new form of entertainment - video and computer games. Possibility of performing real-time simulations on computers and development of computer graphics rendering techniques have created a new medium of expression in the form of computer software. At the time when early forms of interactive simulations were created, first computer games were also developed. In the past decades, when the earliest computer games have been created, an industry focused on the craft of game development has emerged. The efforts of game designers and developers have lead to creation of multiple game genres and have driven the evolution of mechanics and challenges that modern games can now offer to players. As stated in \cref{rozdzial.wstep}, the context of this thesis does not deal with projections of 3D objects onto a plane, like the fields of geography and cartography do, as shown by works similar to \autocite{snyder1993flattening}. The goal is to generate planar maps, which makes games that include them of particular interest for finding examples of working solutions to the problem. Early examples involving procedurally created maps are Rogue and Nethack, dungeon crawling games developed in 1980s. Both examples generate a set of rooms with randomized dimensions, which are then connected to each other by a system of corridors, as shown in \cref{fig:nethack}. \begin{figure}[H] \centering \includegraphics[width=0.7\linewidth]{images/nethack} \caption{An exemplary level, generated by Nethack game rules. Image source: Nethack project web page, \url{https://nethack.org/common/index.html}} \label{fig:nethack} \end{figure} Dungeon layouts generated by Rogue or Nethack could be described as suitable for representing indoor spaces, maps produced by connecting rooms are typically do not contain irregular shapes like landmass forms found in depictions of natural terrain. It is however, possible to use such mechanisms anyway. Generated levels found in Diablo series, Torchlight, Path of Exile and a few other similar action-RPG titles suggest that careful design of map components (textures, tiles, rooms) processed later by a generation mechanism can produce believable environments, regardless of what shapes are shown by a map of their layout. %%%%%%%%%%%%%%% \section{Existing solutions for generating planar maps} There have been scientific surveys conducted on PCG (Procedural Content Generation) methods, which describe approaches to map generation employed in the past by successful game development projects. Contents of three such surveys are summarized in the following paragraphs. Authors of the most general survey on PCG techniques \autocite{hendrikx2013procedural} point to other works for deeper exploration into methods involving generative grammars, genetic algorithms and hybrid approaches for generating indoor spaces. For synthesizing outdoor maps, survey lists usage of image filtering, tiling, layering, fractals, Voronoi diagrams and cellular automata. In both cases, pseudo-random number generation and hybrid approaches are listed as tools which can be helpful in finding a promising solution. Another general survey on PCG methods suitable for games \autocite{de2011survey} explores the subject of environmental content, listing and describing methods for generation of landscapes, continents, cityscapes, road networks and rivers. Terrain generated with methods presented in the survey has properties of naturally occurring land features with surface roughness resembling real environments. Authors list the usage of noise algorithms, L-systems, fractals and combinations of these methods along with modifications based on natural process simulations (e.g. erosion) to generate believable terrains. Described methods are categorized into \textit{assisted} and \textit{non-assisted} techniques, the former of which can be controlled by parametrization of inputs to achieve the desired outcomes and often require human support. Methods listed for generation of roads, rivers and cities involve more advanced and complicated techniques, such as the work of \textit{Chen et al.} \autocite{Chen:2008:IPS:1360612.1360702} on guiding the generation of road graphs with tensor fields, cited by the survey. Most of these methods are not suitable for this thesis project due to their complexity or are specifically designed for 3D environments. Survey \autocite{van2014procedural} focuses specifically on generation of dungeon levels, listing and describing how cellular automata, generative grammars, genetic algorithms and constraint-based methods are used to create maps containing features of indoor spaces. The following sections shortly discuss the possibility of applying approaches found in described surveys to generation of planar maps with irregular shapes. Since the goal of this thesis is to build a generator which performs that process, the following sections focus on summarizing characteristics of cellular automata and generative grammars, as these approaches seem to be most promising for map generation purposes. \subsection{Cellular automata} A cellular automaton (CA) is a simulation in which every object in a mathematically defined space is being updated at every step of a simulation. Historically, cellular automata and their properties have been studied since the time of first computers \autocite{Sarkar:2000:BHC:349194.349202}. One of the most complete sources on cellular automata is a book summarizing research carried out by Stephen Wolfram since 1980s \autocite{wolfram2002new}, where a classification is shown along with examples for each kind of CA. Specifically, 2-dimensional automata operate on a grid of cells with arbitrary discrete dimensions. Each cell in the grid has neighbor, which may be relevant to the simulation rules. Depending on the type of rules which are used by a particular CA, a different type of cell neighborhood may be used. To present this concept concisely, a short list of definitions follows. \begin{description} \item[Cell] Cells are units of state in CA simulation. Depending on CA type, they can be represented by simple values - a binary digit (0 or 1), an integer, a real or complex number with constraints, or other, more complicated value. \item[Cell neighborhood] In a context of a 2D square grid of cells, neighborhood is a collection of cells directly adjacent to the selected one. \item[Von Neumann neighborhood] Includes the cell and its immediate neighbours - one to the north, south, east and west of the cell, as shown in \cref{fig:neighborhood_types}. \item[Moore's neighborhood] Includes 8 closest neighbours of the cell - immediate and diagonal, as shown in \cref{fig:neighborhood_types}. \end{description} \begin{figure}[H] \centering \begin{subfigure}[t]{0.4\textwidth} \centering \includegraphics[width=0.5\textwidth]{images/neighborsmoore} \caption{Moore neighborhood} \end{subfigure} \hspace{1em} \hfill \begin{subfigure}[t]{0.4\textwidth} \centering \includegraphics[width=0.5\textwidth]{images/neighborsvonneumann} \caption{Von Neumann neighborhood} \end{subfigure} \hspace{1em} \caption{Two basic types of cell neighborhood. Source: own work} \label{fig:neighborhood_types} \end{figure} Every CA simulation uses rules which drive the process of cell evolution to its next stage. Typically, such rules define how board elements must be changed once a specific cell arrangement is recognized. The set of rules must cover all possible neighborhood patterns of a cell so that the value of each cell in a future step is unambiguous. An example of how rules for 2D cellular automaton operate is presented in \cref{fig:examplecarules}, but does not include all rules that such automaton might use. \begin{figure}[H] \centering \begin{subfigure}[t]{0.4\textwidth} \centering \includegraphics[width=0.5\textwidth]{images/rule0dead} \caption{Rule 3L0 - cell is set to empty when no neighbors are present} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.4\textwidth} \centering \includegraphics[width=0.5\textwidth]{images/rule2dead} \caption{Rule 3L0 - cell is set to empty if there are less than three neighbors} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.4\textwidth} \centering \includegraphics[width=0.5\textwidth]{images/rule3alive} \caption{Rule 31 - cell is set to filled if it has three neighbors} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.4\textwidth} \centering \includegraphics[width=0.5\textwidth]{images/rule3alive2} \caption{Rule 31 - same rule, behavior independent of neighbor positioning} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.4\textwidth} \centering \includegraphics[width=0.5\textwidth]{images/rule5alive} \caption{Rule 3M1 - a cell with 5 neighbors is filled} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.4\textwidth} \centering \includegraphics[width=0.5\textwidth]{images/rule6alive} \caption{Rule 3M1 - a cell with 6 neighbors is filled} \end{subfigure} \caption{A subset of rules for an unknown cellular automaton which uses Moore neighborhood. Rules are identified with two digits or symbols: the first one describes what value a sum of neighbors $n$ triggers a rule, which can be a conditional expression like $3L$ meaning $n < 3$ or $3M$, for $n > 3$. The other digit is simply the value to which a cell will be set when the rule is applied. Source: own work} \label{fig:examplecarules} \end{figure} Observing the presented subset, it should be intuitive to see rules in \cref{fig:examplecarules} as alternate versions of the same rule that sets the cell state according to a simple test: \begin{itemize} \item if selected cell has less than 3 neighbors, it remains empty \item otherwise, it becomes filled \end{itemize} However, not knowing the full set of rules for this particular automaton, it cannot be concluded if those rules can be reduced in such way. For example, there might be rules for arrangements with 4, 7 or 8 neighbors which fill the cell instead of emptying it or rules that behave differently depending on neighbor positions. Without information about the rule set, one cannot prove which properties of the cell neighborhood are measured during simulation, nor predict the results of automaton evolution. These properties may include the following: \begin{itemize} \item count of cells with state $s$ in neighborhood, \item state of selected cell, \item state of adjacent cells (values associated with each cell), \item positions of neighbors with state $s$ \end{itemize} An example demonstrating such difficulty in reasoning about CA rules and their consequences is one of the classic cellular automata simulations, the Game of Life\footnote{Some Game of Life simulation examples: one at MIT website \url{http://web.mit.edu/jb16/www/6170/gameoflife/gol.html} and another at \url{https://copy.sh/life/}}, invented by an American mathematician John Conway \autocite{conway1970game}. The simulation uses a square grid board, where cells can have one of two possible states: alive or dead. The rule set for Game of Life contains only three rules, based on counting alive neighbors of each cell: \begin{itemize} \item Alive cells with less than 2 alive neighbors become dead. \item Alive cells with 4 or more alive neighbors become dead. \item Dead cells with exactly 3 alive neighbors become alive. \end{itemize} \Cref{fig:applygol} shows an example of how these rules affect cells in a $5x5$ board during one simulation step. Cells outside the grid edges are assumed dead. \begin{figure}[H] \centering \includegraphics[width=0.7\linewidth]{images/applygol} \caption{Illustration of step transition in Game of Life. Alive cells which become dead in next turn are marked red, dead cells that become alive are marked green. Coloured cells were assigned a number equal to the count of their alive neighbors. Source: own work} \label{fig:applygol} \end{figure} Despite being based on simple rules, Game of Life is able to generate complex visual patterns, such as those shown in \cref{fig:p3oscillators}. While many of such cell arrangements have been found accidentally, there are organized efforts focused on finding and cataloging them, such as the Life Wiki at \url{http://www.conwaylife.com/wiki/}. \begin{figure}[H] \centering \includegraphics[width=0.9\linewidth]{images/p3oscillators} \caption{A collection of patterns found in Game of Life, which oscillate with period of 3 simulation steps. Image source: adapted from \url{http://copy.sh/life/?pattern=period3oscillators}} \label{fig:p3oscillators} \end{figure} Cellular automata have been used in map generation mechanisms for games, some of notable examples are Dwarf Fortress (2006), Minecraft (Mojang, 2011) and Ultima Ratio Regum (2011). Each of these games use CA-based algorithms to generate map layers, regional structures or other types of data used to describe the game world. Examples of maps found in Dwarf Fortress and Ultima Ratio Regum show that CA-based approach to map generation can indeed yield sufficient results. Dwarf Fortress starts the generative process with randomized fractals \autocite{adams2015simulation} to define various aspects of the data layers from which the final map is built, but also uses a 3D cellular automaton to simulate behavior of fluids (water, lava). Minecraft uses fractal noise algorithms to generate an open world map along with other methods to determine the structure of terrain layers. Also, cellular automata are utilized to simulate fluid flow and possibly to propagate updates among certain world elements (blocks with state, simulated circuits). However, in order to develop a map generator using CA, a rule set which achieves stable board states is needed. Game of Life simulations show quick, but impermanent changes in local areas of the board, while allowing for creation of occasional stable structures. They are however somewhat rare, as \ref{fig:applygol} shows, and may be easily broken by future simulation steps, which makes them insufficient to easily compose larger structures with randomized processes. \begin{figure}[H] \centering \includegraphics[width=0.6\linewidth]{images/stilllife} \caption{A collection of small ($n<8$, $n$ - number of cells in a pattern) still life patterns from Game of Life. Image source: adapted from \url{http://mathworld.wolfram.com/GameofLife.html}} \label{fig:stilllife} \end{figure} One of possible approaches to map generation which can create irregular shapes is the article written by L. Johnson, G. Yannakakis and J. Togelius from IT University of Copenhagen \autocite{johnson2010cellular}. Authors describe rules of a cellular automaton which is able to transform a tile filled initially with random distribution of cells into a map tile with desired properties which can later be merged with other tiles into the game map. Authors define types of cells to indicate which areas of the map are accessible (floor cells) and which are not (rock and wall cells). To achieve better control over the generation process, authors altered the following parameters: \begin{itemize} \item $r$ - percentage of floor cells in initial state, \item $n$ - count of CA generations (steps) to perform, \item $T$ - neighborhood threshold value which defines inaccessible cells, \item $M$ - Moore neighborhood size. \end{itemize} As described in the article, results depend on automaton rules and parameters selected to transform the tile into derived states. Authors claim that it is possible to create structure for playable maps with this approach. The article contains images of generated tiles which show irregular, cave-like shapes and provides an illustration of a map constructed from them, with a roughly shaped area with a number of narrow connections between grander spaces. \subsection{Shape grammars} Generative grammars are systems of rules and transformations which show ability to synthesize constructs out of a predefined set of symbols. Formal languages have been developed in the field of linguistics where Noam Chomsky laid theory foundations in 1950s. Its further expansion as a branch of applied mathematics has led to creation of many grammar sub types and variations \autocite{Harrison:1978:IFL:578595}. They have been later adapted by computer science for use in syntax analysis algorithms, compilers, debuggers, parsers and other applications. One of specific grammar sub types are shape grammars, applied to a certain class of problems in architecture, decorative arts and industrial design. Research on shape grammars performed by G. Stiny and J. Gips \autocite{Stiny1980IntroductionTS}, \autocite{stiny1971shape} and other scholars has been proven useful to computer scientists for development of interactive design frameworks \autocite{Dang:2015:IDP:2816795.2818069} among other applications. Shape grammars can be utilized as a formal foundation for PCG techniques and extended with probabilistic aspects \autocite{sportelli2014} to generate content for games. A domain specific language has also been developed on the foundations of grammar theory \autocite{Chen2018GIGLAD} for such purposes with success, as presented by \cref{fig:gigladomainspecificlanguagefor201904231555971576} adapted from the cited work. \begin{figure}[H] \centering \includegraphics[width=\textwidth]{images/GIGL_A_Domain_Specific_Language_for_2019_04_23_1555971576} \caption{Game levels generated with grammatical PCG. Image source: Work of Tiannan Chen, Stephen J. Guy on Grammatical Item Generation Language (GIGL) \autocite{Chen2018GIGLAD} } \label{fig:gigladomainspecificlanguagefor201904231555971576} \end{figure} As a formal system for shape transformation into more complex constructs, shape grammars are a viable tool for map generation. Despite being limited by sets of shapes and rules which must be prepared before such grammar is applied, results and examples generated by them in cited works show constructs that could be used in generated maps to create interesting game levels. %%%%%%%%%%%%%%% \section{Chosen method: cellular automata} Since this thesis is focused on creation of maps through procedural processes, it is worth noting why cellular automata have been chosen as the method to develop a solution. The following arguments show why CA might be a useful approach: \begin{itemize} \item CA can be successfully applied as map generation engines, as shown by \autocite{johnson2010cellular}, \item 2-dimensional CA can be implemented in a similar way to some image processing algorithms (e.g. erosion, dilation), \item CA are able to model evolution of dynamical systems (e.g. fluids), which may be useful to create maps of terrain similar to natural landmass forms, \item amount of repetition in shapes produced by grammars depends on how complex the set of transformations is \autocite{CHOMSKY1959137}, while CA can generate surprising, chaotic results with simple rules \autocite{wolfram1984cellular}. \end{itemize} Although grammars are favorable for clarity of transformation rules they bring to the process of map generation, CA can be a viable alternative, providing irregularity and surprise to possible results, as proven by the work of A. Khalifa and J. Togelius \autocite{Khalifa2017MarahelA} - which is why they have been chosen for experimentation in this thesis. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \chapter{Generating and visualizing maps - proposed solution} \label{rozdzial.praktyka} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% To describe the developed solution, this chapter consists of five sections: definition of required features, design of simple CA simulation and map generator models, followed by implementation in C++ and description of performed experiments. %%%%%%%%%%%%%%% \section{Analysis of requirements for a map generator} The approach chosen in \cref{rozdzial.teoria} can be imagined as a process that consists of several steps. Starting with generation of a board with random cell states, which after several transformations performed by CA rules becomes structured with irregular, island-like features. The resulting board is then used as one of tiles to be merged into the larger map. Each of these steps must be automated while allowing the designer a degree of control over the generation process. Transformations of the board should be applied according to parameters set by the designer, which must be constrained to ranges that do not allow creation of obviously unusable results (e.g. board filled with cells of identical state). Usable ranges of parameters can be determined with experimentation. To formalize what map generator must do, required features and behaviors can be now extracted from the paragraph above, as follows: \subsubsection{Functional requirements} The map generator program shall perform the following functions: \begin{itemize} \item prepare square tiles with random data for transformation, \item simulate a cellular automaton to generate irregular shapes in tiles, \item have a mechanism to merge tiles into a bigger map, \item show tile states graphically, \item have controls to allow tweaking CA parameters. \end{itemize} \subsubsection{Operational requirements} The map generator program shall operate with the following qualities: \begin{itemize} \item real time drawing of tile and map images, whenever their state changes, \item separate interface elements to display tiles and maps, \item simple controls for switching rule sets, \item saving the history of tile changes and allowing tile selection in from that sequence. \end{itemize} \subsection{Optional extensions} This section presents requirements not strictly required for a working solution. They may be optionally implemented or pushed into future development. \subsubsection{Tile editing and custom rule sets} A potentially useful feature would be manual editing of tiles before they are placed in the map, which would allow for an even greater degree of control over tile contents. Further expansion could be to provide the designer with tools to make their own rules for tile generation, while also allowing them to define types of single cells composing the tile. \subsubsection{Computation cost of tile generation} Since rules required for tile generation are applied to every cell in the tile grid identically, cells do not need to be updated in sequence. A possible improvement would be implementing cell state updates to be applied in parallel, which may reduce the time needed to compute a simulation step. One way to do so would be to apply the findings presented by Reno Fourie in his thesis about applying CUDA technology in 2-dimensional cellular automata simulations \autocite{fourie2015parallel}. \subsubsection{Exporting results of generation} Although exporting maps is not necessary for experimentation and testing, a production version of the program should have such functionality. A map generator without a mechanism for saving the work done by a designer would certainly not be a useful tool. Such program needs a way to export generated maps to a file format that later can be used by a game engine of choice, possibly with procedures written by other programmers. \subsubsection{Automation} Since the goal of including procedural generation techniques into game production is often motivated by reducing the human effort needed to create assets, including tools for automating the process would be a welcome addition. Level designers would benefit from having a generator that eliminates the need for performing repetitive tasks, while still providing map layouts ready to be filled with other game assets and objects. \subsubsection{Non-binary cell states} Developed solution relies on a 2D cellular automaton with cells containing a binary state, which are sufficient for creation and interpretation of a map defining only which areas are passable and which are not. Game designers may however require additional map features to be generated. At that point, future experiments may involve other types of cell state like integers or non-discrete numbers might reveal other interesting results, especially with an interface for rule definition. %%%%%%%%%%%%%%% \section{Design and implementation} In \cref{rozdzial.praktyka}, cellular automata have been chosen as a method for generating maps, so it is helpful to find resources on the topic of building cellular automata simulations. One of them is chapter 7 in Nature of Code, a book by Daniel Shiffman \autocite{shiffman2012nature}, where one can find a short tutorial on building a CA simulation. Author describes elementary concepts needed to construct a basic CA, explains how to implement a working simulation and provides helpful exercises. Another source, a book by Stephen Wolfram \autocite{wolfram2002new} shows advanced research, exploring concepts and organizing knowledge on cellular automata. \subsection{Basic cellular automaton} The initial step required to implement a generator based on cellular automata is to create a framework for performing CA simulations. As stated in Nature of Code \autocite{shiffman2012nature}, a 2-dimensional CA needs the following key elements: \begin{itemize} \item Cell state - every cell has a state updated on each simulation step, \item Grid - a space on which cells are placed, \item Neighborhood - each cell needs to know the state of its neighbors to update its state. \end{itemize} \subsubsection{Data structures} In order to represent state of a cell, a primitive data type is sufficient. Class model in \cref{fig:boardcell} presents an abstraction that can encapsulate a collection of cell states, along with two operations: retrieving the state of a cell in position $(x,y)$ from a grid and setting its state to a chosen value. \begin{figure}[H] \centering \includegraphics[width=0.8\linewidth]{diagrams/boardcell01} \caption{ Model of a \textit{Board} class, which holds cell states in its block of memory and lets its user change their states. Diagram source: own work} \label{fig:boardcell} \end{figure} \subsubsection{Operations} While the \textit{Board} class is sufficient to represent the grid of cells and their states, the concept of a neighborhood is missing. It can be captured and represented by creating a procedure that selects a cell, and treats its neighbors as if an identifier is assigned to each of them, as shown in table \cref{tab:cellneighbors}. \begin{table}[h] \centering \begin{tabular}{| c | c | c |}\hline 0 & 1 & 2 \\ \hline 7 & S & 3 \\ \hline 6 & 5 & 4 \\ \hline \end{tabular} \begin{tabular}{| c | c | c |}\hline $(-1,-1)$ & $( 0,-1)$ & $( 1,-1)$ \\ \hline $(-1, 0)$ & $( 0, 0)$ & $( 1, 0)$ \\ \hline $(-1, 1)$ & $( 0, 1)$ & $( 1, 1)$ \\ \hline \end{tabular} \caption{Table on left: Moore neighborhood, with numbered cells. \textit{S} denotes cell selected as a base for further operations. Table on right: Neighbour positions, relative to selected cell $(0,0)$ Source: own work} \label{tab:cellneighbors} \end{table} In CA simulations used for map generation by \autocite{johnson2010cellular}, summing the values of cells in neighborhood is a required operation. Hence, it is useful to include it into the \textit{Board} abstraction as part of its interface, which yields a class presented on \cref{fig:boardcell2} with added method for computing sum of adjacent cell values around a cell with $(x,y)$ coordinates. The $nht$ parameter can be used to control which cells are treated as neighbors. \begin{figure}[H] \centering \includegraphics[width=0.8\linewidth]{diagrams/boardcell02} \caption{Model of a \textit{Board} class with a method to sum neighbor values. Diagram source: own work} \label{fig:boardcell2} \end{figure} \subsubsection{Implementation} The model presented above is implemented by the \textit{Board} class. It allows to simulate some basic cellular automata based on rules which require a sum of values in cells adjacent to the selected cell to decide its future state. \textit{Board} class methods operate as follows: \begin{itemize} \item \textit{CellAt} can access cell at position $(x,y)$ in the grid model and return its value, as shown on \cref{lst:board-cellat}, \item \textit{SetCellAt} works similarly to the previous method, accessing the cell at position $(x,y)$ and writing a new value provided by \textit{newState} parameter, shown on \cref{lst:board-setcellat}, \item \textit{SumMooreNhd} computes a sum of cell values in a Moore neighborhood with radius \textit{rad} at position $(x,y)$, as shown on \cref{lst:board-sumneighbourhood}. \end{itemize} \pagebreak \lstinputlisting[language=c, caption=Accessing cells, label={lst:board-cellat}]{listings/board-cellat.txt} \lstinputlisting[language=c, caption=Setting cell states, label={lst:board-setcellat}]{listings/board-setcellat.txt} \lstinputlisting[language=c, caption=Summing adjacent cell values, label={lst:board-sumneighbourhood}]{listings/board-sumneighbourhood.txt} \subsection{Simulating CA rules} As described in \cref{rozdzial.teoria}, basic CA rules check the values of adjacent cells to a selected cell and determine its subsequent state. Separating the concept of a rule and its application from the cell grid structure simplifies modifying the CA rule set for further usage. \subsubsection{Data structures} Although applying rules to a grid of cells does not require a specialized data structure, a class to encapsulate procedures used to do so separated from \textit{Board} class can be useful for later development. The \textit{Ruleset} class presented in \cref{fig:ruleset-board} is used to produce a new board state by applying rules encoded in its internal functions, which are used by \textit{EvolveState} method. \begin{figure}[H] \centering \includegraphics[width=0.9\linewidth]{diagrams/ruleset-board} \caption{Model of \textit{Ruleset} class, which is meant to use a \textit{Board} instance to produce a new state of the cell grid in another \textit{Board} instance by rewriting its cell values. Source: own work} \label{fig:ruleset-board} \end{figure} Such abstraction allows to later add an internal data structure to hold separate rules for each possible neighborhood pattern, if needed. \subsubsection{Operations} As shown in \cref{fig:ruleset-board}, the operation which handles computation of a new cell grid state is named \textit{EvolveState}. It does so by calling one of its internal methods chosen by \textit{rules} parameter. \Cref{lst:evs} shows an implemented method which calls \textit{Board} methods to perform the following steps: \begin{enumerate} \item sum the values of cells adjacent to cell $(x,y)$, \item set the future $(x,y)$ cell state to 1 if the sum is less than 5, \item set the future $(x,y)$ cell state to 0 if the sum is more than 5. \end{enumerate} \lstinputlisting[language=c, caption=Applying rules to cell grid, label=lst:evs]{listings/evolvestate.txt} With basic CA operations organized this way, adding new rules requires implementing a new method in the \textit{Ruleset} class. Choosing a new set of rules is done via passing a correct parameter value to \textit{EvolveState} method, which can be done by binding the function call to user interface. \subsection{Presenting the board state} In order to visualize state of \textit{Board}, all cell values need to be drawn as an image. To achieve this, OpenGL and ImGui libraries are used to create an environment in which it is possible to construct interface elements and render textures. \subsubsection{Data structures} Before the cells can be represented as an image, their states must be converted to color values in one of the formats interpreted by OpenGL functions. In order to do so, a class \textit{SimpleTexture2D} has been prepared, as shown in \cref{fig:texture}. \begin{figure}[H] \centering \includegraphics[width=0.9\linewidth]{diagrams/texture} \caption{Model of a \textit{SimpleTexture2D} class, which contains a collection of \textit{Color RGBA} elements. Source: own work} \label{fig:texture} \end{figure} Colors are represented by a 32-bit unsigned integer, with 8 bits dedicated to each color channel (Red, Green, Blue) and additional 8 bits for the opacity (Alpha) channel, defined by type \textit{Color RGBA} as shown in \cref{lst:colourRGBA}. \lstinputlisting[language=c, caption=Definition of ColorRGBA type, label={lst:colourRGBA}]{listings/colourRGBA.txt} In order to allow the program user a degree of control over the generation process, a rudimentary user interface has been prepared. \Cref{fig:uiwindows} shows the model of its classes. The class named \textit{UserInterface} acts as an update and rendering controller for each of the windows represented by the other classes in the model, which can call functions bound to UI controls they contain and show a visual representation of a \textit{Board} or \textit{Map} by using the data encapsulated by \textit{SimpleTexture2D}. \begin{figure}[H] \centering \includegraphics[width=0.8\linewidth]{diagrams/uiwindows} \caption[User Interface model]{UI class model in map generator program. Source: own work} \label{fig:uiwindows} \end{figure} \subsubsection{Operations} Methods provided by \textit{SimpleTexture2D} class allow to: \begin{itemize} \item set individual color values at specified position in the \textit{texelsRGBA} collection, necessary for rendering the \textit{Board} state \item change \textit{texelsRGBA} size, to allow support for \textit{Board}s of different sizes, \item set all color values in the collection to a specified value, \item request rendering of the contained data by OpenGL API. \end{itemize} Such set of operations allows to achieve the goal of simple 2-dimensional image rendering, which can be used to visualize the state of all cells contained in one of the \textit{Board} class instances. Drawing cell states in tile generation and map grid windows is achieved by the \textit{Board} class method, \textit{DrawCellsToTexture}, described previously. Additionally, \textit{Map} provides operations for drawing its state depending on the selected view mode - \textit{DrawMap} and \textit{DrawTileAt}. \subsection{Generating and merging tiles into maps} Generating tiles for map construction can be performed with methods implemented by \textit{TileGenerator} and \textit{Ruleset} classes. Internal mechanics of the \textit{TileGenerator} class allow it to initialize cells in the first generation to randomly selected states and then to use \textit{Ruleset} class to generate future states of the board. \textit{Ruleset} provides a set of rules which describe the possible transformations of a cell depending on the state of its neighborhood, as described in previous sections. Tiles prepared by \textit{TileGenerator} can be transferred into the grid of tiles represented by the \textit{Map} class. \subsubsection{Data structures} The map construction process is simulated by \textit{TileGenerator} and \textit{Map} classes. An ordered collection of \textit{Board} states is contained in both of them, but for different purposes. \textit{TileGenerator} uses the collection as a history of \textit{Board} state transformations. \textit{Map} uses its collection of \textit{Board} instances as a tile grid representation. \Cref{fig:tilegeneratormap} shows how these classes relate to each other. \begin{figure}[H] \centering \includegraphics[width=0.9\linewidth]{diagrams/tilegenerator_map} \caption{ A model of interactions between Tile Generator, Board and Map classes. Source: own work} \label{fig:tilegeneratormap} \end{figure} The \textit{Map} class represents its state in two ways - as a collection of \textit{Board} states and as a single \textit{Board} instance with dimensions equal to tile width and height multiplied by their count along the Map borders - for example, a square map composed of 25 tiles with 64 by 128 size would have a width of 320 and a height of 640. Such dual representation allows the user to switch between two views onto the map - a tile grid view and a complete map view. \subsubsection{Generating tiles} Initializing the first \textit{Board} contained in \textit{TileGenerator} class with random states enables the generator to use it as a base for further transformations, while also ensuring that each generated tile will be different, which is implemented by \textit{InitGenAllRandom} method contained in \textit{TileGenerator}. After the initial state of the \textit{Board} is prepared, \textit{TileGenerator} can then use \textit{GenerateSteps} method to create \textit{Board} states following the initial one by applying operations defined in and selected by current \textit{Ruleset}. \Cref{lst:rules} shows functions which transform cells, of which the first one applies following rules, where $n$ is a sum of adjacent cell values: \begin{itemize} \item $wall$ state with value 1 is set for cells with $n < 5$, \item $floor$ state with value 0 is set for cells with $n > 5$. \end{itemize} Additional sets of rules should be defined similarly, by adding more methods to the \textit{Ruleset} class. \lstinputlisting[language=c, caption=Tile generation rules, label={lst:rules}]{listings/rules_mapgen.txt} Generated tiles are saved in an ordered collection, to allow viewing the history of generation steps, The rules used to generate these tiles show an interesting property - after a few initial steps, values of most cells begin to alternate between $floor$ and $wall$ states. \begin{figure}[H] \centering \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/s_1} \caption{Step 1 - random noise} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/s_2} \caption{Step 2} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/s_3} \caption{Step 3} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/s_4} \caption{Step 4} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/s_5} \caption{Step 5} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/s_6} \caption{Step 6 - partially balanced tile, rough edges} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/s_7} \caption{Step 7} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/s_8} \caption{Step 8 - balanced tile, smooth edges} \end{subfigure} \hspace{1em} \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/s_9} \caption{Step 9 - further transformation} \end{subfigure} \hspace{1em} \label{fig:gensteps} \caption{A sequence of tile evolution steps, starting with a randomized board state. Each step involves computation of future cell states with rules producing wall for $n < 5$ and floor for $n > 5$. Source: own work} \end{figure} Such emergent condition can be thought of as a \textit{balanced state} of a tile, since rules seemingly cannot further modify the tile after a sufficient number of transformations. It is however possible that map designers may decide to use a tile before it becomes \textit{balanced}. Furthermore, exploring different sets of rules for tile generation may reveal better rules for creation of useful tiles. Amount of time required to find them can be reduced by providing a user interface for rule definition and management. \subsubsection{Merging tiles} The process of merging generated tiles into a map consists of several steps and begins with the decision to transfer a tile from the generator to map construction window. Copying a tile state from the \textit{TileGenerator} collection to the \textit{Map} grid requires user interaction. Any tile in the window showing current \textit{Map} state can be clicked by the user, which invokes the \textit{TileReplace} method of the \textit{Map} class, effectively copying the tile state visible in tile generation window to map grid. \begin{figure}[H] \centering \begin{subfigure}[t]{0.9\textwidth} \centering \includegraphics[height=4cm]{images/mapnotiles} \caption{Initial state - blank map, no tile was placed} \end{subfigure} \begin{subfigure}[t]{0.9\textwidth} \centering \includegraphics[height=4cm]{images/map4tiles} \caption{Partially filled map - after copying 4 prepared tiles} \end{subfigure} \caption{Tile and map windows used to construct maps from generated tiles. Source: own work} \label{fig:map_merging} \end{figure} Once the designer decides that the tile prepared in the generation window is a sufficient candidate to be used in a map, its state can be copied to map grid by clicking a square slot in map window. Such action overwrites all cell states in selected space with those contained in the prepared tile. \Cref{fig:map_merging} shows two situations, where map designer starts with a blank map and proceeds to fill its grid with some of the tiles that were generated. Map construction process can be then completed when designers switch map representation mode, which is done by function bound to button labeled as \textit{Join tiles into map}. Doing so also reveals other buttons provided by map window, which can be used to make further adjustments. Switching the mode effectively copies the cells from each tile formerly placed in the grid into a bigger \textit{Board} and renders its visualization instead of tile grid. %%%%%%%%%%%%%%% \section{Experiments in generating maps with CA} After implementing the map generator, it is possible to carry out experiments to find the optimal parameter ranges for map generation. The first experiment carried out using the generator was to build a 5x5 map out of tiles generated with following number of CA steps: \begin{itemize} \item outer ring (edge) - 2 steps, \item inner ring tiles - 5 steps, \item central tile - 11 steps. \end{itemize} \begin{figure}[H] \centering \includegraphics[height=5cm]{images/ex1} \caption{Map constructed from a 5x5 grid of generated tiles. Source: own work} \label{fig:ex1} \end{figure} The resulting map shown in \cref{fig:ex1} has following properties: \begin{itemize} \item impassable areas are encountered more frequently as distance from central tile increases, \item impassable areas have greater sizes closer to central tile, \item there are no obstacles connecting those areas to form maze-like structures. \end{itemize} Other experimental maps, shown in \cref{fig:ex2} illustrates that controlled placement of tiles produces imbalanced and asymmetric forms. However, a problematic property emerges when tiles placed in the map are packed with wall cells. Edges of those tiles produce straight lines which are not modified at any step of the performed transformations. A possible cause is that the active rule set did not modify cells which have exactly 5 adjacent walls. \begin{figure}[th] \centering \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/ex2a} \caption{} \end{subfigure} \hfill \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/ex2b} \caption{} \end{subfigure} \hfill \begin{subfigure}[t]{0.3\textwidth} \centering \includegraphics[height=4cm]{images/ex2c} \caption{} \end{subfigure} \caption{Other experimental maps: 5x5 grid, tile placement controlled by user. Source: own work} \label{fig:ex2} \end{figure} \begin{figure}[h] \centering \includegraphics[width=0.8\textwidth]{images/r2ex1} \caption{Left: one of generated tiles with only one transformation rule: if n > 12, set cell to wall state. Right: construction of experimental map on 5x5 grid. Source: own work} \label{fig:r2ex1} \end{figure} Another experiment, shown in \cref{fig:r2ex1}, uses rules where the radius of Moore neighborhood is increased $(r = 2)$ and walls are placed whenever there are more than 12 adjacent walls to the central cell. In this case, islands produced by transformation are not completely solid, which may be a useful feature to map designers. Comparing \cref{fig:r2ex1} with \cref{fig:ex3rst2} shows how removal or addition of one rule may affect shapes found in generated tiles. Images in this section illustrate the utility of employing CA for generating map assets, however finding the right rules and parameters may require additional trials. \begin{figure}[h] \centering \includegraphics[width=0.8\textwidth]{images/ex3rst2} \caption{Another experiment with map construction. Two rules applied with neighborhood of $r = 2$, placement of walls for $n > 12$, floor for $n < 12$. Source: own work} \label{fig:ex3rst2} \end{figure} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \chapter{Conclusions} \label{rozdzial.podsumowanie} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% The aim of this thesis was to provide a software solution which uses cellular automata for constructing maps. Although the goal of creating a program for map construction has been achieved, questions stand: \begin{itemize} \item Are cellular automata useful for generation of interesting, surprising and unique maps? \item What kind of improvements might be necessary to improve the generator and its mechanisms to satisfy map designers? \item Is the developed solution sufficient for use in production projects? \item Which requirements have been met? \end{itemize} %%%%%%%%%%%%%%% \section{Results} Observing the results presented in figures produced by implemented generator reveal that cellular automata can be used for generation of irregular, surprising shapes. Tiles generated by simple rules from random noise have unique layouts, most of them are useful for constructing a map. However, whenever tiles are packed with impassable cells, merging them together produces unnatural, straight edges. A better method for merging tiles is required to eliminate those effects. A degree of control achieved by introducing a graphical user interface (shown in \cref{fig:interfacemapgen}) has provided a visual representation of how each tile evolves and an ability to select tiles with desired shapes for the map being constructed. Widgets of the interface provide control over some of generation parameters, which is useful for experimentation. Including new widgets for controlling other possible parameters and defining rules for the automaton would be vital for exploring what other kinds of shapes can be generated with cellular automata. Maps created with implemented generator contain emergent structures of cells shaped like naturally occurring caves, corridors, rock formations or coastlines. \begin{figure}[h] \centering \includegraphics[width=\linewidth]{images/interface_mapgen} \caption{The interface of developed Map Generator program. Source: own work} \label{fig:interfacemapgen} \end{figure} %%%%%%%%%%%%%%% \section{Acknowledgments} The author of this thesis would like to express gratitude with special thanks to those who have encouraged him to continue the project, for their continued support and kindness. %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% %%% KONIEC PRACY %%%%%%%%%%%%% %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \printbibliography[heading=bibintoc] \addcontentsline{toc}{chapter}{List of figures} \listoffigures \addcontentsline{toc}{chapter}{List of tables} \listoftables \addcontentsline{toc}{chapter}{Attachments} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% \chapter*{Attachments} \begin{enumerate} \item \item \item \item \end{enumerate} \end{document}
{"hexsha": "ad27042c7cf0445faf9e6b245a05298096ee498e", "size": 69360, "ext": "tex", "lang": "TeX", "max_stars_repo_path": "docs/mw_engineering_thesis.tex", "max_stars_repo_name": "mikkelist/legendary-telegram", "max_stars_repo_head_hexsha": "19c1d0b308bcc9a6871d4b16a6e716bf901f0bd3", "max_stars_repo_licenses": ["MIT"], "max_stars_count": 4, "max_stars_repo_stars_event_min_datetime": "2018-04-21T22:28:17.000Z", "max_stars_repo_stars_event_max_datetime": "2018-09-13T12:02:46.000Z", "max_issues_repo_path": "docs/mw_engineering_thesis.tex", "max_issues_repo_name": "mikkelist/legendary-telegram", "max_issues_repo_head_hexsha": "19c1d0b308bcc9a6871d4b16a6e716bf901f0bd3", "max_issues_repo_licenses": ["MIT"], "max_issues_count": null, "max_issues_repo_issues_event_min_datetime": null, "max_issues_repo_issues_event_max_datetime": null, "max_forks_repo_path": "docs/mw_engineering_thesis.tex", "max_forks_repo_name": "mikkelist/legendary-telegram", "max_forks_repo_head_hexsha": "19c1d0b308bcc9a6871d4b16a6e716bf901f0bd3", "max_forks_repo_licenses": ["MIT"], "max_forks_count": null, "max_forks_repo_forks_event_min_datetime": null, "max_forks_repo_forks_event_max_datetime": null, "avg_line_length": 77.0666666667, "max_line_length": 1242, "alphanum_fraction": 0.7868800461, "num_tokens": 16365}
#!/usr/bin/env python # Copyright 2016 Vijayaditya Peddinti # 2016 Vimal Manohar # Apache 2.0. from __future__ import division import argparse import errno import logging import os import re import sys import warnings sys.path.insert(0, "steps") import libs.nnet3.report.log_parse as log_parse import libs.common as common_lib try: import matplotlib as mpl mpl.use("Agg") import matplotlib.pyplot as plt import numpy as np from matplotlib.patches import Rectangle # matplotlib issue https://github.com/matplotlib/matplotlib/issues/12513 # plt.subplot() generates a false-positive warninig, suppress it for now. from matplotlib.cbook import MatplotlibDeprecationWarning warnings.filterwarnings( "ignore", category=MatplotlibDeprecationWarning, message="Adding an axes using the same arguments", ) g_plot = True except ImportError: g_plot = False logging.basicConfig( format="%(filename)s:%(lineno)s:%(levelname)s:%(message)s", level=logging.INFO ) logger = logging.getLogger(__name__) def get_args(): parser = argparse.ArgumentParser( prog=sys.argv[0], # By default, prog is set this to filename only. formatter_class=type( "", ( argparse.RawDescriptionHelpFormatter, argparse.ArgumentDefaultsHelpFormatter, ), {}, ), description="Parses the training logs and generates a variety of plots.\n" "e.g.: %(prog)s \\\n" " exp/nnet3/tdnn exp/nnet3/tdnn1 exp/nnet3/tdnn2 exp/nnet3/tdnn/report.\n" "The report file 'report.pdf' will be generated in the <output_dir> directory.", ) parser.add_argument( "--start-iter", type=int, metavar="N", default=1, help="Iteration from which plotting will start.", ) parser.add_argument( "--is-chain", type=common_lib.str_to_bool, default="false", metavar="BOOL", help="Set to 'true' if <exp_dir>s contain chain models.", ) parser.add_argument( "--is-rnnlm", type=common_lib.str_to_bool, default="false", metavar="BOOL", help="Set to 'true' if <exp_dir>s contain RNNLM.", ) parser.add_argument( "--output-nodes", type=str, metavar="NODES", action=common_lib.NullstrToNoneAction, help="List of space separated <output-node>:<objective-type> entries, " "one for each output node", ) parser.add_argument( "--comparison-dir", type=str, metavar="DIR", action="append", help="[DEPRECATED] Experiment directories for comparison. " "These will only be used for plots, not tables.", ) parser.add_argument( "exp_dir", nargs="+", help="The first <exp_dir> is the current experiment directory, e.g. " "'exp/nnet3/tdnn'; the rest are up to 6 optional directories of other " "experiments to be graphed on same plots for comparison.", ) parser.add_argument( "output_dir", help="output directory for reports, e.g. 'exp/nnet3/tdnn/report'" ) args = parser.parse_args() if (args.comparison_dir is not None and len(args.comparison_dir) > 6) or ( args.exp_dir is not None and len(args.exp_dir) > 7 ): raise Exception( "Up to 6 comparison directories may be specified. " "If you want to compare with more experiments, you would have to carefully tune " "the plot_colors variable which specified colors used for plotting." ) assert args.start_iter >= 1 if args.is_chain and args.is_rnnlm: raise Exception("Options --is-chain and --is-rnnlm cannot be both true.") return args g_plot_colors = ["red", "blue", "green", "black", "magenta", "yellow", "cyan"] class LatexReport(object): """Class for writing a Latex report""" def __init__(self, pdf_file): self.pdf_file = pdf_file self.document = [] self.document.append( r""" \documentclass[prl,10pt,twocolumn]{revtex4} \usepackage{graphicx} % Used to import the graphics \begin{document} """ ) def add_figure(self, figure_pdf, title): """we will have keep extending this replacement list based on errors during compilation escaping underscores in the title""" title = r"\texttt{" + re.sub("_", "\_", title) + "}" fig_latex = ( r""" %... \newpage \begin{figure}[h] \begin{center} \caption{""" + title + r"""} \includegraphics[width=\textwidth]{""" + figure_pdf + r"""} \end{center} \end{figure} \clearpage %... """ ) self.document.append(fig_latex) def close(self): self.document.append(r"\end{document}") return self.compile() def compile(self): root, ext = os.path.splitext(self.pdf_file) dir_name = os.path.dirname(self.pdf_file) latex_file = root + ".tex" lat_file = open(latex_file, "w") lat_file.write("\n".join(self.document)) lat_file.close() logger.info("Compiling the LaTeX report.") try: common_lib.execute_command( "pdflatex -interaction=batchmode " "-output-directory={0} {1}".format(dir_name, latex_file) ) except Exception as e: logger.warning( "There was an error compiling LaTeX file %s. " "Check report.log generated by pdflatex in the same directory. %s", latex_file, e, ) return False return True def latex_compliant_name(name_string): """this function is required as latex does not allow all the component names allowed by nnet3. Identified incompatibilities : 1. latex does not allow dot(.) in file names """ node_name_string = re.sub("\.", "_dot_", name_string) return node_name_string def generate_acc_logprob_plots( exp_dir, output_dir, plot, key="accuracy", file_basename="accuracy", comparison_dir=None, start_iter=1, latex_report=None, output_name="output", ): assert start_iter >= 1 if plot: fig = plt.figure() plots = [] comparison_dir = [] if comparison_dir is None else comparison_dir dirs = [exp_dir] + comparison_dir index = 0 for dir in dirs: [report, times, data] = log_parse.generate_acc_logprob_report( dir, key, output_name ) if index == 0: # this is the main experiment directory with open("{0}/{1}.log".format(output_dir, file_basename), "w") as f: f.write(report) if plot: color_val = g_plot_colors[index] data = np.array(data) if data.shape[0] == 0: logger.warning( "Couldn't find any rows for the" "accuracy/log-probability plot, not generating it" ) return data = data[data[:, 0] >= start_iter, :] (plot_handle,) = plt.plot( data[:, 0], data[:, 1], color=color_val, linestyle="--", label="train {0}".format(dir), ) plots.append(plot_handle) (plot_handle,) = plt.plot( data[:, 0], data[:, 2], color=color_val, label="valid {0}".format(dir) ) plots.append(plot_handle) index += 1 if plot: plt.xlabel("Iteration") plt.ylabel(key) lgd = plt.legend( handles=plots, loc="lower center", bbox_to_anchor=(0.5, -0.2 + len(dirs) * -0.1), ncol=1, borderaxespad=0.0, ) plt.grid(True) fig.suptitle("{0} plot for {1}".format(key, output_name)) figfile_name = "{0}/{1}_{2}.pdf".format( output_dir, file_basename, latex_compliant_name(output_name) ) plt.savefig(figfile_name, bbox_extra_artists=(lgd,), bbox_inches="tight") if latex_report is not None: latex_report.add_figure( figfile_name, "Plot of {0} vs iterations for {1}".format(key, output_name), ) # The name of five gates of lstmp g_lstm_gate = ["i_t_sigmoid", "f_t_sigmoid", "c_t_tanh", "o_t_sigmoid", "m_t_tanh"] # The "extra" item is a placeholder. As each unit in python plot is # composed by a legend_handle(linestyle) and a legend_label(description). # For the unit which doesn't have linestyle, we use the "extra" placeholder. if g_plot: extra = Rectangle( (0, 0), 1, 1, facecolor="w", fill=False, edgecolor="none", linewidth=0 ) # This function is used to insert a column to the legend, the column_index is 1-based def insert_a_column_legend( legend_handle, legend_label, lp, mp, hp, dir, prefix_length, column_index ): handle = [extra, lp, mp, hp] label = ["[1]{0}".format(dir[prefix_length:]), "", "", ""] for row in range(1, 5): legend_handle.insert(column_index * row - 1, handle[row - 1]) legend_label.insert(column_index * row - 1, label[row - 1]) # This function is used to plot a normal nonlinearity component or a gate of lstmp def plot_a_nonlin_component( fig, dirs, stat_tables_per_component_per_dir, component_name, common_prefix, prefix_length, component_type, start_iter, gate_index=0, with_oderiv=0, ): fig.clf() index = 0 legend_handle = [extra, extra, extra, extra] legend_label = ["", "5th percentile", "50th percentile", "95th percentile"] if not with_oderiv: for dir in dirs: color_val = g_plot_colors[index] index += 1 try: iter_stats = stat_tables_per_component_per_dir[dir][component_name] except KeyError: # this component is not available in this network so lets # not just plot it insert_a_column_legend( legend_handle, legend_label, lp, mp, hp, dir, prefix_length, index + 1, ) continue data = np.array(iter_stats) data = data[data[:, 0] >= start_iter, :] ax = plt.subplot(211) (lp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 5], color=color_val, linestyle="--", ) (mp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 6], color=color_val, linestyle="-" ) (hp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 7], color=color_val, linestyle="--", ) insert_a_column_legend( legend_handle, legend_label, lp, mp, hp, dir, prefix_length, index + 1 ) ax.set_ylabel("Value-{0}".format(component_type)) ax.grid(True) ax = plt.subplot(212) (lp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 8], color=color_val, linestyle="--", ) (mp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 9], color=color_val, linestyle="-" ) (hp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 10], color=color_val, linestyle="--", ) ax.set_xlabel("Iteration") ax.set_ylabel("Derivative-{0}".format(component_type)) ax.grid(True) lgd = plt.legend( legend_handle, legend_label, loc="lower center", bbox_to_anchor=(0.5, -0.5 + len(dirs) * -0.2), ncol=4, handletextpad=-2, title="[1]:{0}".format(common_prefix), borderaxespad=0.0, ) plt.grid(True) else: for dir in dirs: color_val = g_plot_colors[index] index += 1 try: iter_stats = stat_tables_per_component_per_dir[dir][component_name] except KeyError: # this component is not available in this network so lets # not just plot it insert_a_column_legend( legend_handle, legend_label, lp, mp, hp, dir, prefix_length, index + 1, ) continue data = np.array(iter_stats) data = data[data[:, 0] >= start_iter, :] ax = plt.subplot(311) (lp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 7], color=color_val, linestyle="--", ) (mp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 8], color=color_val, linestyle="-" ) (hp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 9], color=color_val, linestyle="--", ) insert_a_column_legend( legend_handle, legend_label, lp, mp, hp, dir, prefix_length, index + 1 ) ax.set_ylabel("Value-{0}".format(component_type)) ax.grid(True) ax = plt.subplot(312) (lp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 10], color=color_val, linestyle="--", ) (mp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 11], color=color_val, linestyle="-", ) (hp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 12], color=color_val, linestyle="--", ) ax.set_ylabel("Derivative-{0}".format(component_type)) ax.grid(True) ax = plt.subplot(313) (lp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 13], color=color_val, linestyle="--", ) (mp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 14], color=color_val, linestyle="-", ) (hp,) = ax.plot( data[:, 0], data[:, gate_index * 10 + 15], color=color_val, linestyle="--", ) ax.set_xlabel("Iteration") ax.set_ylabel("Oderivative-{0}".format(component_type)) ax.grid(True) plt.subplots_adjust(top=0.8, hspace=1.0, bottom=-0.2) lgd = plt.legend( legend_handle, legend_label, loc="lower center", bbox_to_anchor=(0.5, -1.5 + len(dirs) * -0.2), ncol=4, handletextpad=-2, title="[1]:{0}".format(common_prefix), borderaxespad=0.0, ) plt.grid(True) return lgd # This function is used to generate the statistic plots of nonlinearity component # Mainly divided into the following steps: # 1) With log_parse function, we get the statistics from each directory. # 2) Convert the collected nonlinearity statistics into the tables. Each table # contains all the statistics in each component of each directory. # 3) The statistics of each component are stored into corresponding log files. # Each line of the log file contains the statistics of one iteration. # 4) Plot the "Per-dimension average-(value, derivative) percentiles" figure # for each nonlinearity component. def generate_nonlin_stats_plots( exp_dir, output_dir, plot, comparison_dir=None, start_iter=1, latex_report=None ): assert start_iter >= 1 comparison_dir = [] if comparison_dir is None else comparison_dir dirs = [exp_dir] + comparison_dir index = 0 stats_per_dir = {} with_oderiv = 0 for dir in dirs: stats_per_component_per_iter = ( log_parse.parse_progress_logs_for_nonlinearity_stats(dir) ) for key in stats_per_component_per_iter: if len(stats_per_component_per_iter[key]["stats"]) == 0: logger.warning( "Couldn't find any rows for the" "nonlin stats plot, not generating it" ) stats_per_dir[dir] = stats_per_component_per_iter # convert the nonlin stats into tables stat_tables_per_component_per_dir = {} for dir in dirs: stats_per_component_per_iter = stats_per_dir[dir] component_names = stats_per_component_per_iter.keys() stat_tables_per_component = {} for component_name in component_names: comp_data = stats_per_component_per_iter[component_name] comp_type = comp_data["type"] comp_stats = comp_data["stats"] iters = sorted(comp_stats) iter_stats = [] for iter in iters: iter_stats.append([iter] + comp_stats[iter]) stat_tables_per_component[component_name] = iter_stats stat_tables_per_component_per_dir[dir] = stat_tables_per_component if len(comp_stats[iter]) == 15: with_oderiv = 1 main_stat_tables = stat_tables_per_component_per_dir[exp_dir] for component_name in main_stat_tables.keys(): # this is the main experiment directory with open( "{dir}/nonlinstats_{comp_name}.log".format( dir=output_dir, comp_name=component_name ), "w", ) as f: if with_oderiv: # with oderiv-rms f.write( "Iteration\tValueMean\tValueStddev\tDerivMean\tDerivStddev\t" "OderivMean\tOderivStddev\t" "Value_5th\tValue_50th\tValue_95th\t" "Deriv_5th\tDeriv_50th\tDeriv_95th\t" "Oderiv_5th\tOderiv_50th\tOderiv_95th\n" ) else: # without oderiv-rms f.write( "Iteration\tValueMean\tValueStddev\tDerivMean\tDerivStddev\t" "Value_5th\tValue_50th\tValue_95th\t" "Deriv_5th\tDeriv_50th\tDeriv_95th\n" ) iter_stat_report = [] iter_stats = main_stat_tables[component_name] for row in iter_stats: iter_stat_report.append("\t".join([str(x) for x in row])) f.write("\n".join(iter_stat_report)) f.close() if plot: main_component_names = sorted(main_stat_tables) plot_component_names = set(main_component_names) for dir in dirs: component_names = set(stats_per_dir[dir].keys()) plot_component_names = plot_component_names.intersection(component_names) plot_component_names = sorted(plot_component_names) if plot_component_names != main_component_names: logger.warning( "The components in all the neural networks in the " "given experiment dirs are not the same, so comparison plots are " "provided only for common component names. Make sure that these are " "comparable experiments before analyzing these plots." ) fig = plt.figure() common_prefix = os.path.commonprefix(dirs) prefix_length = common_prefix.rfind("/") common_prefix = common_prefix[0:prefix_length] for component_name in main_component_names: if stats_per_dir[exp_dir][component_name]["type"] == "LstmNonlinearity": for i in range(0, 5): component_type = "Lstm-" + g_lstm_gate[i] lgd = plot_a_nonlin_component( fig, dirs, stat_tables_per_component_per_dir, component_name, common_prefix, prefix_length, component_type, start_iter, i, with_oderiv, ) fig.suptitle( "Per-dimension average-(value, derivative) percentiles for " "{component_name}-{gate}".format( component_name=component_name, gate=g_lstm_gate[i] ) ) comp_name = latex_compliant_name(component_name) figfile_name = "{dir}/nonlinstats_{comp_name}_{gate}.pdf".format( dir=output_dir, comp_name=comp_name, gate=g_lstm_gate[i] ) fig.savefig( figfile_name, bbox_extra_artists=(lgd,), bbox_inches="tight" ) if latex_report is not None: latex_report.add_figure( figfile_name, "Per-dimension average-(value, derivative) percentiles for " "{0}-{1}".format(component_name, g_lstm_gate[i]), ) else: component_type = stats_per_dir[exp_dir][component_name]["type"] lgd = plot_a_nonlin_component( fig, dirs, stat_tables_per_component_per_dir, component_name, common_prefix, prefix_length, component_type, start_iter, 0, with_oderiv, ) if with_oderiv: fig.suptitle( "Per-dimension average-(value, derivative) and rms-oderivative percentiles for " "{component_name}".format(component_name=component_name) ) else: fig.suptitle( "Per-dimension average-(value, derivative) percentiles for " "{component_name}".format(component_name=component_name) ) comp_name = latex_compliant_name(component_name) figfile_name = "{dir}/nonlinstats_{comp_name}.pdf".format( dir=output_dir, comp_name=comp_name ) fig.savefig( figfile_name, bbox_extra_artists=(lgd,), bbox_inches="tight" ) if latex_report is not None: if with_oderiv: latex_report.add_figure( figfile_name, "Per-dimension average-(value, derivative) and rms-oderivative percentiles for " "{0}".format(component_name), ) else: latex_report.add_figure( figfile_name, "Per-dimension average-(value, derivative) percentiles for " "{0}".format(component_name), ) def generate_clipped_proportion_plots( exp_dir, output_dir, plot, comparison_dir=None, start_iter=1, latex_report=None ): assert start_iter >= 1 comparison_dir = [] if comparison_dir is None else comparison_dir dirs = [exp_dir] + comparison_dir index = 0 stats_per_dir = {} for dir in dirs: try: stats_per_dir[dir] = log_parse.parse_progress_logs_for_clipped_proportion( dir ) except log_parse.MalformedClippedProportionLineException as e: raise e except common_lib.KaldiCommandException as e: logger.warning( "Could not extract the clipped proportions for %s, " "this might be because there are no ClipGradientComponents.", dir, ) continue if len(stats_per_dir[dir]) == 0: logger.warning( "Couldn't find any rows for the" "clipped proportion plot, not generating it" ) try: main_cp_stats = stats_per_dir[exp_dir]["table"] except KeyError: logger.warning( "The main experiment directory %s does not have clipped proportions. " "Not generating clipped proportion plots.", exp_dir, ) return # this is the main experiment directory file = open("{dir}/clipped_proportion.log".format(dir=output_dir), "w") iter_stat_report = "" for row in main_cp_stats: iter_stat_report += "\t".join([str(x) for x in row]) + "\n" file.write(iter_stat_report) file.close() if plot: main_component_names = sorted( stats_per_dir[exp_dir]["cp_per_iter_per_component"] ) plot_component_names = set(main_component_names) for dir in dirs: try: component_names = set(stats_per_dir[dir]["cp_per_iter_per_component"]) plot_component_names = plot_component_names.intersection( component_names ) except KeyError: continue plot_component_names = sorted(plot_component_names) if plot_component_names != main_component_names: logger.warning( "The components in all the neural networks in the given " "experiment dirs are not the same, so comparison plots are " "provided only for common component names. Make sure that these " "are comparable experiments before analyzing these plots." ) fig = plt.figure() for component_name in main_component_names: fig.clf() index = 0 plots = [] for dir in dirs: color_val = g_plot_colors[index] index += 1 try: iter_stats = stats_per_dir[dir]["cp_per_iter_per_component"][ component_name ] except KeyError: # this component is not available in this network so lets # not just plot it continue data = np.array(iter_stats) data = data[data[:, 0] >= start_iter, :] ax = plt.subplot(111) (mp,) = ax.plot( data[:, 0], data[:, 1], color=color_val, label="Clipped Proportion {0}".format(dir), ) plots.append(mp) ax.set_ylabel("Clipped Proportion") ax.set_ylim([0, 1.2]) ax.grid(True) lgd = plt.legend( handles=plots, loc="lower center", bbox_to_anchor=(0.5, -0.5 + len(dirs) * -0.2), ncol=1, borderaxespad=0.0, ) plt.grid(True) fig.suptitle( "Clipped-proportion value at {comp_name}".format( comp_name=component_name ) ) comp_name = latex_compliant_name(component_name) figfile_name = "{dir}/clipped_proportion_{comp_name}.pdf".format( dir=output_dir, comp_name=comp_name ) fig.savefig(figfile_name, bbox_extra_artists=(lgd,), bbox_inches="tight") if latex_report is not None: latex_report.add_figure( figfile_name, "Clipped proportion at {0}".format(component_name) ) def generate_parameter_diff_plots( exp_dir, output_dir, plot, comparison_dir=None, start_iter=1, latex_report=None ): # Parameter changes assert start_iter >= 1 comparison_dir = [] if comparison_dir is None else comparison_dir dirs = [exp_dir] + comparison_dir index = 0 stats_per_dir = {} key_file = { "Parameter differences": "parameter.diff", "Relative parameter differences": "relative_parameter.diff", } stats_per_dir = {} for dir in dirs: stats_per_dir[dir] = {} for key in key_file: stats_per_dir[dir][key] = log_parse.parse_progress_logs_for_param_diff( dir, key ) # write down the stats for the main experiment directory for diff_type in key_file: with open("{0}/{1}".format(output_dir, key_file[diff_type]), "w") as f: diff_per_component_per_iter = stats_per_dir[exp_dir][diff_type][ "progress_per_component" ] component_names = stats_per_dir[exp_dir][diff_type]["component_names"] max_iter = stats_per_dir[exp_dir][diff_type]["max_iter"] f.write(" ".join(["Iteration"] + component_names) + "\n") total_missing_iterations = 0 gave_user_warning = False for iter in range(max_iter + 1): iter_data = [str(iter)] for c in component_names: try: iter_data.append(str(diff_per_component_per_iter[c][iter])) except KeyError: total_missing_iterations += 1 iter_data.append("NA") if ( float(total_missing_iterations) / len(component_names) > 20 and not gave_user_warning ): logger.warning( "There are more than %.0f missing iterations per component. " "Something might be wrong.", float(total_missing_iterations) / len(component_names), ) gave_user_warning = True f.write(" ".join(iter_data) + "\n") if plot: # get the component names diff_type = list(key_file.keys())[0] main_component_names = sorted( stats_per_dir[exp_dir][diff_type]["progress_per_component"] ) plot_component_names = set(main_component_names) for dir in dirs: try: component_names = set( stats_per_dir[dir][diff_type]["progress_per_component"] ) plot_component_names = plot_component_names.intersection( component_names ) except KeyError: continue plot_component_names = sorted(plot_component_names) if plot_component_names != main_component_names: logger.warning( "The components in all the neural networks in the " "given experiment dirs are not the same, " "so comparison plots are provided only for common " "component names. " "Make sure that these are comparable experiments " "before analyzing these plots." ) assert main_component_names fig = plt.figure() logger.info( "Plotting parameter differences for components: " + ", ".join(main_component_names) ) for component_name in main_component_names: fig.clf() index = 0 plots = [] for dir in dirs: color_val = g_plot_colors[index] index += 1 iter_stats = [] try: for diff_type in [ "Parameter differences", "Relative parameter differences", ]: iter_stats.append( np.array( sorted( stats_per_dir[dir][diff_type][ "progress_per_component" ][component_name].items() ) ) ) except KeyError as e: # this component is not available in this network so lets # not just plot it if dir == exp_dir: raise Exception( "No parameter differences were available even in the main " "experiment dir for the component {0}. Something went " "wrong: {1}.".format(component_name, e) ) continue ax = plt.subplot(211) (mp,) = ax.plot( iter_stats[0][:, 0], iter_stats[0][:, 1], color=color_val, label="Parameter Differences {0}".format(dir), ) plots.append(mp) ax.set_ylabel("Parameter Differences") ax.grid(True) ax = plt.subplot(212) (mp,) = ax.plot( iter_stats[1][:, 0], iter_stats[1][:, 1], color=color_val, label="Relative Parameter " "Differences {0}".format(dir), ) ax.set_xlabel("Iteration") ax.set_ylabel("Relative Parameter Differences") ax.grid(True) lgd = plt.legend( handles=plots, loc="lower center", bbox_to_anchor=(0.5, -0.5 + len(dirs) * -0.2), ncol=1, borderaxespad=0.0, ) plt.grid(True) fig.suptitle( "Parameter differences at {comp_name}".format(comp_name=component_name) ) comp_name = latex_compliant_name(component_name) figfile_name = "{dir}/param_diff_{comp_name}.pdf".format( dir=output_dir, comp_name=comp_name ) fig.savefig(figfile_name, bbox_extra_artists=(lgd,), bbox_inches="tight") if latex_report is not None: latex_report.add_figure( figfile_name, "Parameter differences at {0}".format(component_name) ) def generate_plots( exp_dir, output_dir, output_names, comparison_dir=None, start_iter=1 ): try: os.makedirs(output_dir) except OSError as e: if e.errno == errno.EEXIST and os.path.isdir(output_dir): pass else: raise e if g_plot: latex_report = LatexReport("{0}/report.pdf".format(output_dir)) else: latex_report = None for (output_name, objective_type) in output_names: if objective_type == "linear": logger.info("Generating accuracy plots for '%s'", output_name) generate_acc_logprob_plots( exp_dir, output_dir, g_plot, key="accuracy", file_basename="accuracy", comparison_dir=comparison_dir, start_iter=start_iter, latex_report=latex_report, output_name=output_name, ) logger.info("Generating log-likelihood plots for '%s'", output_name) generate_acc_logprob_plots( exp_dir, output_dir, g_plot, key="log-likelihood", file_basename="loglikelihood", comparison_dir=comparison_dir, start_iter=start_iter, latex_report=latex_report, output_name=output_name, ) elif objective_type == "chain": logger.info("Generating log-probability plots for '%s'", output_name) generate_acc_logprob_plots( exp_dir, output_dir, g_plot, key="log-probability", file_basename="log_probability", comparison_dir=comparison_dir, start_iter=start_iter, latex_report=latex_report, output_name=output_name, ) elif objective_type == "rnnlm_objective": logger.info("Generating RNNLM objective plots for '%s'", output_name) generate_acc_logprob_plots( exp_dir, output_dir, g_plot, key="rnnlm_objective", file_basename="objective", comparison_dir=comparison_dir, start_iter=start_iter, latex_report=latex_report, output_name=output_name, ) else: logger.info( "Generating %s objective plots for '%s'", objective_type, output_name ) generate_acc_logprob_plots( exp_dir, output_dir, g_plot, key="objective", file_basename="objective", comparison_dir=comparison_dir, start_iter=start_iter, latex_report=latex_report, output_name=output_name, ) logger.info("Generating non-linearity stats plots") generate_nonlin_stats_plots( exp_dir, output_dir, g_plot, comparison_dir=comparison_dir, start_iter=start_iter, latex_report=latex_report, ) logger.info("Generating clipped-proportion plots") generate_clipped_proportion_plots( exp_dir, output_dir, g_plot, comparison_dir=comparison_dir, start_iter=start_iter, latex_report=latex_report, ) logger.info("Generating parameter difference plots") generate_parameter_diff_plots( exp_dir, output_dir, g_plot, comparison_dir=comparison_dir, start_iter=start_iter, latex_report=latex_report, ) if g_plot and latex_report is not None: has_compiled = latex_report.close() if has_compiled: logger.info( "Report file %s/report.pdf has been generated successfully.", output_dir ) def main(): args = get_args() if not g_plot: logger.warning( "This script requires matplotlib and numpy.\n" "... Install these packages to generate plots.\n" "... If you are on a cluster where you do not have admin rights, use venv.\n" "... Generating text data table files only." ) output_nodes = [] if args.output_nodes is not None: nodes = args.output_nodes.split(" ") for n in nodes: parts = n.split(":") assert len(parts) == 2 output_nodes.append(tuple(parts)) elif args.is_chain: output_nodes.append(("output", "chain")) output_nodes.append(("output-xent", "chain")) elif args.is_rnnlm: output_nodes.append(("output", "rnnlm_objective")) else: output_nodes.append(("output", "linear")) if args.comparison_dir is not None: generate_plots( args.exp_dir[0], args.output_dir, output_nodes, comparison_dir=args.comparison_dir, start_iter=args.start_iter, ) else: if len(args.exp_dir) == 1: generate_plots( args.exp_dir[0], args.output_dir, output_nodes, start_iter=args.start_iter, ) if len(args.exp_dir) > 1: generate_plots( args.exp_dir[0], args.output_dir, output_nodes, comparison_dir=args.exp_dir[1:], start_iter=args.start_iter, ) if __name__ == "__main__": main()
{"hexsha": "bc8fc53fbd3bc68995bf2fbdd4e682fbec8e7546", "size": 40766, "ext": "py", "lang": "Python", "max_stars_repo_path": "scripts/recipes/librispeech/steps/nnet3/report/generate_plots.py", "max_stars_repo_name": "mlcommons/peoples-speech", "max_stars_repo_head_hexsha": "1bfaa7d843e0f664e16bbdbc308f7fa40ac7e10c", "max_stars_repo_licenses": ["Apache-2.0"], "max_stars_count": 62, "max_stars_repo_stars_event_min_datetime": "2021-03-07T06:15:48.000Z", "max_stars_repo_stars_event_max_datetime": "2022-03-24T18:58:57.000Z", "max_issues_repo_path": "scripts/recipes/librispeech/steps/nnet3/report/generate_plots.py", "max_issues_repo_name": "mlcommons/peoples-speech", "max_issues_repo_head_hexsha": "1bfaa7d843e0f664e16bbdbc308f7fa40ac7e10c", "max_issues_repo_licenses": ["Apache-2.0"], "max_issues_count": 59, "max_issues_repo_issues_event_min_datetime": "2021-02-26T21:37:03.000Z", "max_issues_repo_issues_event_max_datetime": "2022-03-24T16:57:12.000Z", "max_forks_repo_path": "scripts/recipes/librispeech/steps/nnet3/report/generate_plots.py", "max_forks_repo_name": "mlcommons/peoples-speech", "max_forks_repo_head_hexsha": "1bfaa7d843e0f664e16bbdbc308f7fa40ac7e10c", "max_forks_repo_licenses": ["Apache-2.0"], "max_forks_count": 9, "max_forks_repo_forks_event_min_datetime": "2021-02-26T21:34:11.000Z", "max_forks_repo_forks_event_max_datetime": "2022-02-09T04:00:50.000Z", "avg_line_length": 35.112833764, "max_line_length": 108, "alphanum_fraction": 0.5260265908, "include": true, "reason": "import numpy", "num_tokens": 8341}