content stringlengths 1 1.04M | input_ids listlengths 1 774k | ratio_char_token float64 0.38 22.9 | token_count int64 1 774k |
|---|---|---|---|
import os
import random
import time
import numpy as np
from memory_profiler import memory_usage
from mpi4py import MPI
from mpi4py.futures import MPICommExecutor
import traceback
from csv_modules.csv_writer import write_in_file
from experiment.utils_general import remove_get_dirs
from general_utils.pdb_utils import get_ignore_pdbs, get_chains_pdb, get_all_pdb_name
from general_utils.download_utils import download_pdb
from general_utils.list_utils import generate_binary_matrix
from pdb_to_mrc.pdb_2_mrc import pdb_to_mrc_chains
from process_mrc.generate import get_mrc_one
from reconstruction.DLX import solve, gen_y_dicc, gen_x_dicc
from reconstruction.semi_exact_cover import get_semi_exact_s
| [
11748,
28686,
198,
11748,
4738,
198,
11748,
640,
198,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
4088,
62,
5577,
5329,
1330,
4088,
62,
26060,
198,
6738,
285,
14415,
19,
9078,
1330,
4904,
40,
198,
6738,
285,
14415,
19,
9078,
13,
69... | 2.978903 | 237 |
import numpy as np
MAX_VOCAB_SIZE = 60702
import pickle
dist_mat_list = np.load('aux_files/sdist_mat_dic_%d.npy' % (MAX_VOCAB_SIZE))
dist_mat_order = np.load('aux_files/sdist_order_%d.npy' % (MAX_VOCAB_SIZE))
with open('aux_files/dataset_%d.pkl' %MAX_VOCAB_SIZE, 'rb') as f:
dataset = pickle.load(f)
# print(np.shape(dist_mat_list))
# print(dist_mat_list[200:205])
# print(np.shape(dist_mat_order))
# print(dist_mat_order[200:205])
# for i in range(60700,60703):
# cnt_i = i
# if i == 0:
# cnt_i = MAX_VOCAB_SIZE
# print(dataset.inv_dict[cnt_i])
# for j in range(101):
# cnt_dist = dist_mat_order[cnt_i][j]
# if dist_mat_order[cnt_i][j] == 0:
# cnt_dist = MAX_VOCAB_SIZE
# print(cnt_dist, dataset.inv_dict[cnt_dist], dist_mat_list[cnt_i][j])
def pick_most_similar_words(src_word, dist_mat_list, dist_mat_order, ret_count=10, threshold=None):
"""
embeddings is a matrix with (d, vocab_size)
"""
# dist_order = np.argsort(dist_mat[src_word,:])[1:1+ret_count]
# dist_list = dist_mat[src_word][dist_order]
dist_order = dist_mat_order[src_word][1:ret_count+1]
dist_list = dist_mat_list[src_word][1:ret_count+1]
# print(dist_order)
# print(dist_list)
if dist_list[-1] == 0:
return [], []
mask = np.ones_like(dist_list)
if threshold is not None:
mask = np.where(dist_list < threshold)
return dist_order[mask], dist_list[mask]
else:
return dist_order, dist_list
print(pick_most_similar_words(100, dist_mat_list, dist_mat_order))
print(pick_most_similar_words(100, dist_mat_list, dist_mat_order, threshold=1.45)) | [
11748,
299,
32152,
355,
45941,
198,
22921,
62,
53,
4503,
6242,
62,
33489,
796,
3126,
36680,
198,
11748,
2298,
293,
198,
17080,
62,
6759,
62,
4868,
796,
45941,
13,
2220,
10786,
14644,
62,
16624,
14,
82,
17080,
62,
6759,
62,
67,
291,
... | 2.1242 | 781 |
#!/usr/bin/env python3
import sys
import argparse
import numpy as np
from generate_traffic import *
from HomaPkt import *
if __name__=="__main__":
load = 0.70
numOfSpines = 4
numOfSubtrees = 2
numOfToRsPerSubtree = 2
numOfServersPerRack = 4
evaluatedPrefix = "1.0."
linkSpeed = 100e6
parser = argparse.ArgumentParser()
parser.add_argument("seed", type=int, help="RNG seed required.")
parser.add_argument("directory", type=str,
help="Directory prefix of pcaps and dumps")
parser.add_argument("--evaluated_prefix", type=str,
help="IP prefix of evaluated region, e.g., 1.0.")
parser.add_argument("--load", type=float,
help="Portion of bisection bandwidth utilized.")
parser.add_argument("--numClusters", type=int,
help="Number clusters to generate traffic for.")
parser.add_argument("--numToRs", type=int,
help="Number of ToR switches/racks per cluster.")
parser.add_argument("--numServers", type=int,
help="Number of servers per rack.")
parser.add_argument("--linkSpeed", type=float,
help="Link speed")
args = parser.parse_args()
seed = args.seed
data_dir = args.directory
if args.evaluated_prefix:
evaluatedPrefix = args.evaluated_prefix
if args.load:
load = args.load
if args.numClusters:
numOfSubtrees = args.numClusters
if args.numToRs:
numOfToRsPerSubtree = args.numToRs
if args.numServers:
numOfServersPerRack = args.numServers
if args.linkSpeed:
linkSpeed = args.linkSpeed
numOfSpines = numOfToRsPerSubtree * numOfToRsPerSubtree
rng = np.random.RandomState(seed=seed)
emulatedRacks = range(numOfToRsPerSubtree, numOfSubtrees*numOfToRsPerSubtree)
traffic_matrix = generate_traffic_matrix(rng, load, linkSpeed,
numOfServersPerRack,
numOfToRsPerSubtree, numOfSubtrees,
numOfSpines, emulatedRacks)
filename = data_dir + '/eval' + str(numOfSubtrees) + '/eval.raw'
out_fct = data_dir + '/fct_c' + str(numOfSubtrees) + '.dat'
with open(filename, 'r') as eval_file, \
open(out_fct, 'w') as fct_file:
parse_fct(eval_file, fct_file, traffic_matrix, numOfSubtrees,
numOfToRsPerSubtree, numOfServersPerRack, evaluatedPrefix)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
25064,
198,
11748,
1822,
29572,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
7716,
62,
9535,
2108,
1330,
1635,
198,
6738,
367,
6086,
47,
21841,
1330,
1635,
628,
... | 2.13217 | 1,203 |
import random
from config import *
import time
import os
from utils import datacheck
import threading
id_now = 0
available_level = [-3, -2, -1, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20]
# if __name__ == "__main__":
# id_now = 0
# gen_path = os.path.join("test_data", "auto")
# # print(gen(n_batch=5, batch_size=6, time_interval=30))
# # exit(0)
# if not os.path.exists(gen_path):
# os.mkdir(gen_path)
#
# n = 512
# for i in range(n):
# save(os.path.join(gen_path, autoname()), gen(n_batch=40, batch_size=1, time_interval=2.0))
# # gen(n_batch=40, batch_size=1, time_interval=0.1)
| [
11748,
4738,
198,
6738,
4566,
1330,
1635,
198,
11748,
640,
198,
11748,
28686,
198,
6738,
3384,
4487,
1330,
4818,
4891,
694,
198,
11748,
4704,
278,
198,
198,
312,
62,
2197,
796,
657,
198,
15182,
62,
5715,
796,
25915,
18,
11,
532,
17,
... | 2.077399 | 323 |
#!/usr/bin/env python
import re
import subprocess
import platform
import os
import sys
get_ios.guid = {}
get_mac.guid = None | [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
11748,
302,
198,
11748,
850,
14681,
198,
11748,
3859,
198,
11748,
28686,
198,
11748,
25064,
628,
198,
1136,
62,
4267,
13,
5162,
312,
796,
23884,
198,
198,
1136,
62,
20285,
13,
5162,
31... | 2.886364 | 44 |
from __future__ import absolute_import
from chainer.functions import accuracy
import chainer
import numpy as np
from chainer.links import CLink
from chainer.links.eBNN.link_binary_linear import BinaryLinear
from chainer.links.eBNN.link_softmax_cross_entropy import SoftmaxCrossEntropy
from chainer.utils import binary_util as bu
import math
| [
6738,
11593,
37443,
834,
1330,
4112,
62,
11748,
198,
198,
6738,
6333,
263,
13,
12543,
2733,
1330,
9922,
198,
11748,
6333,
263,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6738,
6333,
263,
13,
28751,
1330,
7852,
676,
198,
6738,
6333,
... | 3.43 | 100 |
from django.contrib.admin.widgets import FilteredSelectMultiple
| [
6738,
42625,
14208,
13,
3642,
822,
13,
28482,
13,
28029,
11407,
1330,
7066,
4400,
17563,
31217,
628
] | 3.823529 | 17 |
import boto3
import functools
import logging
import os
from crhelper import CfnResource
helper = CfnResource(json_logging=False, log_level='DEBUG',
boto_level='CRITICAL')
s3 = boto3.resource("s3")
client = boto3.client("s3")
logger = logging.getLogger()
logger.setLevel(os.getenv("LogLevel", logging.INFO))
def with_logging(handler):
"""
Decorator which performs basic logging and makes logger available on context
"""
@functools.wraps(handler)
return wrapper
@with_logging
@helper.create
@helper.update
@with_logging
@helper.delete
| [
11748,
275,
2069,
18,
198,
11748,
1257,
310,
10141,
198,
11748,
18931,
198,
11748,
28686,
198,
198,
6738,
1067,
2978,
525,
1330,
327,
22184,
26198,
628,
198,
2978,
525,
796,
327,
22184,
26198,
7,
17752,
62,
6404,
2667,
28,
25101,
11,
... | 2.632287 | 223 |
#!/usr/bin/env python3
import basic_plot_functions as bpf
import binning_utils as bu
import predefined_configs as pconf
from collections.abc import Iterable
from collections import defaultdict
from copy import deepcopy
import collections
import datetime as dt
import diag_utils as du
import inspect
import logging
import multiprocessing as mp
import numpy as np
from pathlib import Path
import plot_utils as pu
import re
import os
import stat_utils as su
import StatisticsDatabase as sdb
import var_utils as vu
bootStrapStats = []
for x in su.sampleableAggStats:
if x != 'Count': bootStrapStats.append(x)
## plot settings
figureFileType = 'pdf' #['pdf','png']
interiorLabels = True
###################################
## Base class for all analysisTypes
###################################
def categoryBinValsAttributes(dfw, fullBinVar, binMethod, options):
'''
Utility function for providing an ordered list of
pairs of binVals and associated labels for
category binMethods in the context of a DFWrapper
'''
binVar = vu.varDictAll[fullBinVar][1]
dbSelect1DBinVals = dfw.levels('binVal')
binUnitss = dfw.uniquevals('binUnits')
#if (len(binUnitss) == 0 or
# len(dbSelect1DBinVals) == 1): return None, None
assert (len(binUnitss) != 0 and len(dbSelect1DBinVals) > 0), 'ERROR: categoryBinValsAttributes received invalid binVar/binMethod'
binUnits = binUnitss[0]
# reorder select1DBinVals to match binMethod definition
# TODO(JJG): clean up for readability
tmp = deepcopy(pconf.binVarConfigs.get(
fullBinVar,{}).get(
binMethod,{}).get(
'values', dbSelect1DBinVals))
select1DBinVals = []
if (not isinstance(tmp, Iterable) or
isinstance(tmp, str)):
select1DBinVals += [tmp]
else:
select1DBinVals += tmp
for Bin in dbSelect1DBinVals:
if Bin not in select1DBinVals:
select1DBinVals.append(Bin)
for Bin in list(select1DBinVals):
if Bin not in dbSelect1DBinVals:
select1DBinVals.remove(Bin)
binTitles = []
for binVal in select1DBinVals:
if pu.isfloat(binVal) or pu.isint(binVal):
t = ' @ '+binVar+'='+binVal
if binUnits != vu.miss_s:
t = t+' '+binUnits
else:
t = ' @ '+binVal
binTitles.append(t)
binValsMap = list(zip(select1DBinVals, binTitles))
return binValsMap
#=============
# 1-D figures
#=============
class CategoryBinMethodBase(AnalysisBase):
'''
Base class used to analyze statistics across binMethods with zero-dimensioned or
category binValues, e.g., QC flag, named latitude band, cloudiness regime, surface type
'''
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, binValsMap, options,
nsubplots, nxplots, nyplots):
'''
virtual method
'''
raise NotImplementedError()
class CYAxisExpLines(CategoryBinMethodBase):
'''
Creates a timeseries figure between firstCycleDTime and lastCycleDTime
for each forecast length between fcTDeltaFirst and fcTDeltaLast
- x-axis: cycle initial time
- line: per experiment
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar, statistic, and FC lead time (if applicable)
'''
class FCAxisExpLines(CategoryBinMethodBase):
'''
Creates a timeseries figure between fcTDeltaFirst and fcTDeltaLast containing
aggregated statistics for the period between firstCycleDTime and lastCycleDTime
- x-axis: forecast duration
- line: per experiment
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar and statistic
'''
class FCAxisExpLinesDiffCI(CategoryBinMethodBase):
'''
Similar to FCAxisExpLines, except
- shows difference between experiment(s) and control
- control is selected using cntrlExpIndex
- statistics are narrowed down by bootStrapStats
- confidence intervals (CI) are shown at each lead time
- line+shaded region: per experiment
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar and statistic
'''
class CYAxisFCLines(CategoryBinMethodBase):
'''
Similar to CYAxisExpLines, except
each line is for a different forecast lead time and
each experiment is in a different file
- x-axis: valid time of forecast
- line: per FC lead time
- subplot: combination of DiagSpace variable and binVal
- file: combination of binVar, statistic, and experiment
- self.MAX_FC_LINES determines number of FC lead time lines to include
'''
###########################################
## Figures with individual lines per binVal
###########################################
class CYAxisBinValLines(BinValLinesAnalysisType):
'''
Similar to CYAxisExpLines, except
each line is for a different binVal (e.g., latitude band, cloudiness, etc.)
- line: binVals for named bins (e.g., NXTro, Tro, SXTro for latitude)
- subplot: column by experiment, row by DiagSpace variable
- file: combination of statistic and forecast length
'''
# TODO(JJG): implement FCAxisBinValLines similar to FCAxisExpLines
#########################################################
## Figures with binVal on one axis, i.e., 2D and profiles
#########################################################
class OneDimBinMethodBase(AnalysisBase):
'''
Base class used to analyze statistics across binMethods with one-dimensional binValues
that are assigned numerical values, e.g., altitude, pressure, latitude, cloud fraction
'''
def innerloops(self,
dfwDict, diagnosticGroup, myLoc, statName, nVarsLoc, varMapLoc, myBinConfigs, options):
'''
virtual method
'''
raise NotImplementedError()
class CYandBinValAxes2D(OneDimBinMethodBase):
'''
Creates raster maps with binVar binVals on y-axis
- only applicable to binned diagnostics (e.g., vertical dimension, latitude, zenith angle)
- subplot: column by experiment, row by DiagSpace variable
- file: combination of binVar, binMethod, statistic, and FC lead time
'''
class FCandBinValAxes2D(OneDimBinMethodBase):
'''
Creates raster maps with binVar binVals on y-axis
- only applicable to binned diagnostics (e.g., vertical dimension, latitude, zenith angle)
- subplot: column by experiment, row by DiagSpace variable
- file: combination of binVar, binMethod, and statistic
'''
class BinValAxisProfile(OneDimBinMethodBase):
'''
Similar to FCandBinValAxes2D, except
- each vertical column of raster points is plotted as a profile on
a separate set of axes instead of in 2-D color
- therefore this is a valid plot even for a single forecast length (omb)
- line: per experiment
- subplot: column by lead time, row by DiagSpace variable
- file: combination of binVar, binMethod, and statistic
- self.MAX_FC_SUBFIGS determines number of FC lead times to include
'''
class BinValAxisProfileDiffCI(OneDimBinMethodBase):
'''
Similar to BinValAxisProfile, except
shows difference between experiment(s) and control
- control is selected using cntrlExpIndex
- statistics are narrowed down by bootStrapStats
- confidence intervals (CI) are shown at each lead time and binVal
- line+shaded region: per experiment
- subplot: column by lead time, row by DiagSpace variable
- file: combination of binVar, binMethod, and statistic
- self.MAX_FC_SUBFIGS determines number of FC lead times to include
'''
class BinValAxisPDF(AnalysisBase):
'''
Similar to BinValAxisProfile, except
uses Count statistic to analyze a PDF across binVals
- x-axis: binVal
- line: per binMethod
- subplot: combination of FC lead time and DiagSpace variable
- file: per experiment (if applicable)
'''
# TODO: generalize as a sub-class of OneDimBinMethodBase
class BinValAxisStatsComposite(AnalysisBase):
'''
Similar to BinValAxisProfile, except
all statistics (Count, Mean, RMS, STD) are placed on the same axis
- x-axis: binVal
- line: per statistic
- subplot: per DiagSpace variable
- file: combination of FC lead time, experiment, and binMethod (if applicable)
'''
#===========================
# Calculate gross statistics
#===========================
class GrossValues(AnalysisBase):
'''
Calculate gross statistics for specified category binMethods at first forecast length
NOTE: currently only calculates statistics at self.fcTDeltas[0]
adjust minimum forecast length in order to calculate
for non-zero forecast lengths, assuming those lengths
are present in db
'''
AnalysisTypeDict = {
#Derived from CategoryBinMethodBase(AnalysisBase)
'CYAxisExpLines': CYAxisExpLines,
'FCAxisExpLines': FCAxisExpLines,
'FCAxisExpLinesDiffCI': FCAxisExpLinesDiffCI,
'CYAxisFCLines': CYAxisFCLines,
'CYAxisBinValLines': CYAxisBinValLines,
#Derived from OneDimBinMethodBase(AnalysisBase)
'CYandBinValAxes2D': CYandBinValAxes2D,
'FCandBinValAxes2D': FCandBinValAxes2D,
'BinValAxisProfile': BinValAxisProfile,
'BinValAxisProfileDiffCI': BinValAxisProfileDiffCI,
# TODO(JJG): TwoDimBinMethodBase(AnalysisBase)
#'BinValAxes2D': BinValAxes2D,
#Derived from AnalysisBase
'BinValAxisPDF': BinValAxisPDF,
'BinValAxisStatsComposite': BinValAxisStatsComposite,
'GrossValues': GrossValues,
}
# NOTES:
# (1) FCAxis* types require non-zero forecast length
# (2) CYAxis* types require > 1 analysis cycle
# (3) CYAxisFCLines requires (1) and (2)
# (4) *DiffCI types require more than one experiment
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
4096,
62,
29487,
62,
12543,
2733,
355,
275,
79,
69,
198,
11748,
9874,
768,
62,
26791,
355,
809,
198,
11748,
2747,
18156,
62,
11250,
82,
355,
279,
10414,
198,
6738,
17... | 2.750207 | 3,631 |
"""
Test if we can have a list of "allowable levels" and if a user requests
the display of a level not in that list we CANCEL the zoom operation.
Usage: test_displayable_levels.py [-d] [-h] [-t (OSM|GMT)]
"""
import sys
import wx
import pyslip
# initialize the logging system
import pyslip.log as log
try:
log = log.Log("pyslip.log")
except AttributeError:
# already set up, ignore exception
pass
######
# Various constants
######
DemoName = 'pySlip %s - Zoom undo test' % pyslip.__version__
DemoWidth = 1000
DemoHeight = 800
DemoAppSize = (DemoWidth, DemoHeight)
InitViewLevel = 2
InitViewPosition = (100.494167, 13.7525) # Bangkok
################################################################################
# The main application frame
################################################################################
################################################################################
if __name__ == '__main__':
import sys
import getopt
import traceback
# print some usage information
# our own handler for uncaught exceptions
sys.excepthook = excepthook
# decide which tiles to use, default is GMT
argv = sys.argv[1:]
try:
(opts, args) = getopt.getopt(argv, 'ht:', ['help', 'tiles='])
except getopt.error:
usage()
sys.exit(1)
tile_source = 'GMT'
for (opt, param) in opts:
if opt in ['-h', '--help']:
usage()
sys.exit(0)
elif opt in ('-t', '--tiles'):
tile_source = param
tile_source = tile_source.lower()
# set up the appropriate tile source
if tile_source == 'gmt':
import pyslip.gmt_local as Tiles
elif tile_source == 'osm':
import pyslip.open_street_map as Tiles
else:
usage('Bad tile source: %s' % tile_source)
sys.exit(3)
# start wxPython app
app = wx.App()
TestFrame().Show()
app.MainLoop()
| [
37811,
198,
14402,
611,
356,
460,
423,
257,
1351,
286,
366,
12154,
540,
2974,
1,
290,
611,
257,
2836,
7007,
198,
1169,
3359,
286,
257,
1241,
407,
287,
326,
1351,
356,
15628,
34,
3698,
262,
19792,
4905,
13,
198,
198,
28350,
25,
1332,... | 2.739437 | 710 |
"""
Ex 20 - the same teacher from the previous challenge wants to raffle off the order of students' school assignments. Make a program that reads the names of the four students and shows the order of the names drawn
"""
from random import shuffle
Est_1 = str(input('Type the first student: '))
Est_2 = str(input('Type the first student: '))
Est_3 = str(input('Type the first student: '))
Est_4 = str(input('Type the first student: '))
order = [Est_1, Est_2, Est_3, Est_4]
print('-' * 30)
shuffle(order)
print(order)
input('Enter to exit')
| [
37811,
198,
3109,
1160,
532,
262,
976,
4701,
422,
262,
2180,
4427,
3382,
284,
374,
30697,
572,
262,
1502,
286,
2444,
6,
1524,
25815,
13,
6889,
257,
1430,
326,
9743,
262,
3891,
286,
262,
1440,
2444,
290,
2523,
262,
1502,
286,
262,
38... | 3.25 | 168 |
"""
Rick Towler
Midwater Assessment and Conservation Engineering
NOAA Alaska Fisheries Science Center
rick.towler@noaa.gov
"""
from PyQt4.QtCore import *
from PyQt4.QtGui import *
from QIVPolygonItem import QIVPolygonItem
from QIVMarkerText import QIVMarkerText
class QIVPolygon(QGraphicsItemGroup):
"""
QIVPolygon implememts open and closed polygon items with simplified vertex
labeling. The labels are implemented by QIVMarkerText, are non-scaling,
and provide the ability to justify and offset labels from the vertex anchor.
If you only need a simple polygon object without labeling, you can use
QIVPolygonItem directly.
If a polygon is specified as "open" the last vertex is not connected
the first and the polygon cannot be filled. You can also think of open
polygons as polylines. "Closed" polygons do have their last vertext connected
to the first. Closed polygons can be filled by setting the fill keyword.
QIVPolygon Arguments:
vertices - The polygon vertices as:
A list of QPoint or QpointF objects defining the vertices
A list of [x,y] pairs (i.e. [[x,y],[x,y],[x,y],...]
A QRect or QRectF object
color - a 3 element list or tuple containing the RGB triplet
specifying the outline color of the polygon
thickness - A float specifying the outline thickness of the polygon.
alpha - A integer specifying the opacity of the polygon. 0 is transparent
and 255 is solid.
linestyle - '=' for solid, '-' for dashed, and '.' for dotted.
fill - a 3 element list or tuple containing the RGB triplet
specifying the fill color of the polygon. Set to None for
no fill.
"""
def getLabelsFromName(self, labelName):
'''
returns a list of QIVMarkerText references that share the name provided in the
labelName argument.
'''
labelReferences = []
# find label(s) given the label name
for label in self.labels:
if (label.name == labelName):
labelReferences.append(label)
return labelReferences
def removeLabel(self, labels):
'''
removeLabel removes a marker label given the label reference or labelName.
You can also pass a list of references or names. If the label name is provided,
all labels with that name will be removed.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
self.labels.remove(label)
self.removeFromGroup(label)
else:
# assume this is a label reference
try:
self.labels.remove(label)
self.removeFromGroup(label)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given a single item - check if it is a name or ref
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
self.labels.remove(label)
self.removeFromGroup(label)
else:
# assume this is a label reference
try:
self.labels.remove(label)
self.removeFromGroup(label)
except:
# bad reference - not in our list of labels
pass
def removeAllLabels(self):
'''
removeAllLabels is a convenience method to clear all labels associated with this mark.
'''
self.removeLabel(self.labels)
def getLabels(self):
'''
getLabels returns the list of labels associated with this mark
'''
return self.labels
def addLabel(self, vertex, text, size=10, font='helvetica', italics=False, weight=-1,
color=[0,0,0], alpha=255, halign='left', valign='top', name='QIVPolygonLabel',
offset=None):
"""
Add a label to the polygon at a specified vertex. Labels are children of the polygon.
vertex (int) - The 0 based vertex number to attach the label to.
text (string) - The text to add to the dimension line.
offset (QPointF) - An offset from your position. The units are pixels at the
image's native resolution. This gets muddled when used with
classes that transform coordinates, especially QMapViewer.
size (int) - The text size, in point size
font (string) - A string containing the font family to use. Either stick
to the basics with this (i.e. "times", "helvetica") or
consult the QFont docs.
italics (bool) - Set to true to italicise the font.
weight (int) - Set to an integer in the range 0-99. 50 is normal, 75 is bold.
color (list) - A 3 element list or tuple containing the RGB triplet
specifying the color of the text.
alpha (int) - An integer specifying the opacity of the text. 0 is transparent
and 255 is solid.
halign (string) - Set this value to set the horizontal anchor point. Values are:
'left' - Sets the anchor to the left side of the text
'center' - Sets the anchor to the middle of the text
'right' - Sets the anchor to the right side of the text
valign (string) - Set this value to set the vertical anchor point. Values are:
'top' - Sets the anchor to the top of the text
'center' - Sets the anchor to the middle of the text
'bottom' - Sets the anchor to the bottom of the text
name (string) - Set this to the name associated with the text object. The name
can be used to differentiate between your text objects.
"""
if (offset == None) or (offset == []):
offset = QPointF(0,0)
# get the position given the vertex index
position = self.polygon[vertex]
# create a QIVMarkerText associated with the provided mark/line
textItem = QIVMarkerText(position, text, offset=offset, size=size, font=font, italics=italics,
weight=weight, color=color, alpha=alpha, halign=halign,
valign=valign, name=name, view=self.view)
# add the label to our list of labels
self.labels.append(textItem)
self.addToGroup(textItem)
def setLabelText(self, labels, text):
'''
Sets the label text given the label reference or name and text.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
ref.setText(text)
else:
# assume this is a label reference
try:
label.setText(text)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(labels)
for ref in labelRefs:
ref.setText(text)
else:
# assume this is a label reference
try:
labels.setText(text)
except:
# bad reference - not in our list of labels
pass
def setLabelVisible(self, labels, show):
'''
Sets the label visibility given the label reference or name and the
visibility state.
'''
if (labels.__class__.__name__.lower() == 'list'):
# we've been given a list of label references or names
for label in labels:
if (label.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(label)
for ref in labelRefs:
ref.setVisible(show)
else:
# assume this is a label reference
try:
label.setVisible(show)
except:
# bad reference - not in our list of labels
pass
else:
# we've been given
if (labels.__class__.__name__.lower() == 'str'):
# assume this is a label name
labelRefs = self.getLabelsFromName(labels)
for ref in labelRefs:
ref.setVisible(show)
else:
# assume this is a label reference
try:
labels.setVisible(show)
except:
# bad reference - not in our list of labels
pass
def showLabels(self, labels=None):
"""
showLabels makes the provided label or labels visible. Labels can be
a list of label references, a list of label names, or a single reference
or name. If labels is None, all labels for this mark are visible.
"""
if (labels == None):
labels = self.labels
self.setLabelVisible(labels, True)
def hideLabels(self, labels=None):
"""
hideLabels makes the provided label or labels invisible. Labels can be
a list of label references, a list of label names, or a single reference
or name. If labels is None, all labels for this mark are hidden.
"""
if (labels == None):
labels = self.labels
self.setLabelVisible(labels, False)
'''
The following methods operate on the QIVPolygonItem object. See that
class for calling details.
'''
| [
37811,
198,
33048,
24324,
1754,
198,
22622,
7050,
25809,
290,
23702,
14044,
198,
15285,
3838,
12926,
49179,
5800,
3337,
198,
5557,
13,
83,
30014,
31,
3919,
7252,
13,
9567,
198,
37811,
198,
198,
6738,
9485,
48,
83,
19,
13,
48,
83,
1405... | 2.108154 | 5,261 |
from abc import ABC, abstractmethod
import enum
import numpy as np
from logging import Logger
| [
6738,
450,
66,
1330,
9738,
11,
12531,
24396,
198,
11748,
33829,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
18931,
1330,
5972,
1362,
628,
198
] | 3.84 | 25 |
import os
from telapy.api.t2d import Telemac2d
from telapy.api.t3d import Telemac3d
from telapy.api.wac import Tomawac
from telapy.api.sis import Sisyphe
from mpi4py import MPI
import numpy as np
from .apitelemacaws import ApiTelemacAWS
modules = {
"telemac2d":Telemac2d,
"telemac3d":Telemac3d,
"tomawac":Tomawac,
"sisyphe":Sisyphe
}
VARNAMES={
"U":"VELOCITYU",
"V":"VELOCITYV",
"H":"WATERDEPTH",
"S":"FREESURFACE",
"B":"BOTTOMELEVATION",
}
ApiTelemac.__doc__=ApiTelemacAWS.__doc__ | [
11748,
28686,
198,
6738,
13632,
12826,
13,
15042,
13,
83,
17,
67,
1330,
1665,
10671,
330,
17,
67,
198,
6738,
13632,
12826,
13,
15042,
13,
83,
18,
67,
1330,
1665,
10671,
330,
18,
67,
198,
6738,
13632,
12826,
13,
15042,
13,
86,
330,
... | 2.13617 | 235 |
""" Analytical expressions of information theoretical quantities. """
from scipy.linalg import det, inv
from numpy import log, prod, absolute, exp, pi, trace, dot, cumsum, \
hstack, ix_, sqrt, eye, diag, array, sum
from ite.shared import compute_h2
def analytical_value_h_shannon(distr, par):
""" Analytical value of the Shannon entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Shannon entropy.
"""
if distr == 'uniform':
# par = {"a": a, "b": b, "l": l}
h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"])))
elif distr == 'normal':
# par = {"cov": c}
dim = par["cov"].shape[0] # =c.shape[1]
h = 1/2 * log((2 * pi * exp(1))**dim * det(par["cov"]))
# = 1/2 * log(det(c)) + d / 2 * log(2*pi) + d / 2
else:
raise Exception('Distribution=?')
return h
def analytical_value_c_cross_entropy(distr1, distr2, par1, par2):
""" Analytical value of the cross-entropy for the given distributions.
Parameters
----------
distr1, distr2 : str
Name of the distributions.
par1, par2 : dictionaries
Parameters of the distribution. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
c : float
Analytical value of the cross-entropy.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
invc2 = inv(c2)
diffm = m1 - m2
c = 1/2 * (dim * log(2*pi) + log(det(c2)) + trace(dot(invc2, c1)) +
dot(diffm, dot(invc2, diffm)))
else:
raise Exception('Distribution=?')
return c
def analytical_value_d_kullback_leibler(distr1, distr2, par1, par2):
""" Analytical value of the KL divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of the distributions.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of the Kullback-Leibler divergence.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
invc2 = inv(c2)
diffm = m1 - m2
d = 1/2 * (log(det(c2)/det(c1)) + trace(dot(invc2, c1)) +
dot(diffm, dot(invc2, diffm)) - dim)
else:
raise Exception('Distribution=?')
return d
def analytical_value_i_shannon(distr, par):
""" Analytical value of mutual information for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'normal': par["ds"],
par["cov"] are the vector of component dimensions and the (joint)
covariance matrix.
Returns
-------
i : float
Analytical value of the Shannon mutual information.
"""
if distr == 'normal':
c, ds = par["cov"], par["ds"]
# 0,d_1,d_1+d_2,...,d_1+...+d_{M-1}; starting indices of the
# subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
i = 1
for m in range(len(ds)):
idx = range(cum_ds[m], cum_ds[m] + ds[m])
i *= det(c[ix_(idx, idx)])
i = log(i / det(c)) / 2
else:
raise Exception('Distribution=?')
return i
def analytical_value_h_renyi(distr, alpha, par):
""" Analytical value of the Renyi entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, alpha \ne 1
Parameter of the Renyi entropy.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Renyi entropy.
References
----------
Kai-Sheng Song. Renyi information, loglikelihood and an intrinsic
distribution measure. Journal of Statistical Planning and Inference
93: 51-69, 2001.
"""
if distr == 'uniform':
# par = {"a": a, "b": b, "l": l}
# We also apply the transformation rule of the Renyi entropy in
# case of linear transformations:
h = log(prod(par["b"] - par["a"])) + log(absolute(det(par["l"])))
elif distr == 'normal':
# par = {"cov": c}
dim = par["cov"].shape[0] # =c.shape[1]
h = log((2*pi)**(dim / 2) * sqrt(absolute(det(par["cov"])))) -\
dim * log(alpha) / 2 / (1 - alpha)
else:
raise Exception('Distribution=?')
return h
def analytical_value_h_tsallis(distr, alpha, par):
""" Analytical value of the Tsallis entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, alpha \ne 1
Parameter of the Tsallis entropy.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par["a"],
par["b"], par["l"] <- lxU[a,b]. If distr = 'normal' : par["cov"]
is the covariance matrix.
Returns
-------
h : float
Analytical value of the Tsallis entropy.
"""
# Renyi entropy:
h = analytical_value_h_renyi(distr, alpha, par)
# Renyi entropy -> Tsallis entropy:
h = (exp((1 - alpha) * h) - 1) / (1 - alpha)
return h
def analytical_value_k_prob_product(distr1, distr2, rho, par1, par2):
""" Analytical value of the probability product kernel.
Parameters
----------
distr1, distr2 : str
Name of the distributions.
rho: float, >0
Parameter of the probability product kernel.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
k : float
Analytical value of the probability product kernel.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
dim = len(m1)
# inv1, inv2, inv12:
inv1, inv2 = inv(c1), inv(c2)
inv12 = inv(inv1+inv2)
m12 = dot(inv1, m1) + dot(inv2, m2)
exp_arg = \
dot(m1, dot(inv1, m1)) + dot(m2, dot(inv2, m2)) -\
dot(m12, dot(inv12, m12))
k = (2 * pi)**((1 - 2 * rho) * dim / 2) * rho**(-dim / 2) *\
absolute(det(inv12))**(1 / 2) * \
absolute(det(c1))**(-rho / 2) * \
absolute(det(c2))**(-rho / 2) * exp(-rho / 2 * exp_arg)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_expected(distr1, distr2, kernel, par1, par2):
""" Analytical value of expected kernel for the given distributions.
Parameters
----------
distr1, distr2 : str
Names of the distributions.
kernel: Kernel class.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
k : float
Analytical value of the expected kernel.
References
----------
Krikamol Muandet, Kenji Fukumizu, Francesco Dinuzzo, and Bernhard
Scholkopf. Learning from distributions via support measure machines.
In Advances in Neural Information Processing Systems (NIPS), pages
10-18, 2011.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
if kernel.name == 'RBF':
dim = len(m1)
gam = 1 / kernel.sigma ** 2
diffm = m1 - m2
exp_arg = dot(dot(diffm, inv(c1 + c2 + eye(dim) / gam)), diffm)
k = exp(-exp_arg / 2) / \
sqrt(absolute(det(gam * c1 + gam * c2 + eye(dim))))
elif kernel.name == 'polynomial':
if kernel.exponent == 2:
if kernel.c == 1:
k = (dot(m1, m2) + 1)**2 + sum(c1 * c2) + \
dot(m1, dot(c2, m1)) + dot(m2, dot(c1, m2))
else:
raise Exception('The offset of the polynomial kernel' +
' (c) should be one!')
elif kernel.exponent == 3:
if kernel.c == 1:
k = (dot(m1, m2) + 1)**3 + \
6 * dot(dot(c1, m1), dot(c2, m2)) + \
3 * (dot(m1, m2) + 1) * (sum(c1 * c2) +
dot(m1, dot(c2, m1)) +
dot(m2, dot(c1, m2)))
else:
raise Exception('The offset of the polynomial kernel' +
' (c) should be one!')
else:
raise Exception('The exponent of the polynomial kernel ' +
'should be either 2 or 3!')
else:
raise Exception('Kernel=?')
else:
raise Exception('Distribution=?')
return k
def analytical_value_d_mmd(distr1, distr2, kernel, par1, par2):
""" Analytical value of MMD for the given distributions.
Parameters
----------
distr1, distr2 : str
Names of the distributions.
kernel: Kernel class.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of MMD.
"""
d_pp = analytical_value_k_expected(distr1, distr1, kernel, par1, par1)
d_qq = analytical_value_k_expected(distr2, distr2, kernel, par2, par2)
d_pq = analytical_value_k_expected(distr1, distr2, kernel, par1, par2)
d = sqrt(d_pp + d_qq - 2 * d_pq)
return d
def analytical_value_h_sharma_mittal(distr, alpha, beta, par):
""" Analytical value of the Sharma-Mittal entropy.
Parameters
----------
distr : str
Name of the distribution.
alpha : float, 0 < alpha \ne 1
Parameter of the Sharma-Mittal entropy.
beta : float, beta \ne 1
Parameter of the Sharma-Mittal entropy.
par : dictionary
Parameters of the distribution. If distr = 'normal' : par["cov"]
= covariance matrix.
Returns
-------
h : float
Analytical value of the Sharma-Mittal entropy.
References
----------
Frank Nielsen and Richard Nock. A closed-form expression for the
Sharma-Mittal entropy of exponential families. Journal of Physics A:
Mathematical and Theoretical, 45:032003, 2012.
"""
if distr == 'normal':
# par = {"cov": c}
c = par['cov']
dim = c.shape[0] # =c.shape[1]
h = (((2*pi)**(dim / 2) * sqrt(absolute(det(c))))**(1 - beta) /
alpha**(dim * (1 - beta) / (2 * (1 - alpha))) - 1) / \
(1 - beta)
else:
raise Exception('Distribution=?')
return h
def analytical_value_h_phi(distr, par, c):
""" Analytical value of the Phi entropy for the given distribution.
Parameters
----------
distr : str
Name of the distribution.
par : dictionary
Parameters of the distribution. If distr = 'uniform': par.a,
par.b in U[a,b].
c : float, >=1
Parameter of the Phi-entropy: phi = lambda x: x**c
Returns
-------
h : float
Analytical value of the Phi entropy.
"""
if distr == 'uniform':
a, b = par['a'], par['b']
h = 1 / (b-a)**c
else:
raise Exception('Distribution=?')
return h
def analytical_value_d_chi_square(distr1, distr2, par1, par2):
""" Analytical value of chi^2 divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s.
Names of distributions.
par1, par2 : dictionary-s.
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a']. If (distr1, distr2) =
('normalI', 'normalI'), then distr1 = N(m1,I) where m1 =
par1['mean'], distr2 = N(m2,I), where m2 = par2['mean'].
Returns
-------
d : float
Analytical value of the (Pearson) chi^2 divergence.
References
----------
Frank Nielsen and Richard Nock. On the chi square and higher-order chi
distances for approximating f-divergence. IEEE Signal Processing
Letters, 2:10-13, 2014.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = prod(b) / prod(a) - 1
elif distr1 == 'normalI' and distr2 == 'normalI':
m1 = par1['mean']
m2 = par2['mean']
diffm = m2 - m1
d = exp(dot(diffm, diffm)) - 1
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_l2(distr1, distr2, par1, par2):
""" Analytical value of the L2 divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a'].
Returns
-------
d : float
Analytical value of the L2 divergence.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = sqrt(1 / prod(b) - 1 / prod(a))
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_renyi(distr1, distr2, alpha, par1, par2):
""" Analytical value of Renyi divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
d : float
Analytical value of the Renyi divergence.
References
----------
Manuel Gil. On Renyi Divergence Measures for Continuous Alphabet
Sources. Phd Thesis, Queen’s University, 2011.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
mix_c = alpha * c2 + (1 - alpha) * c1
diffm = m1 - m2
d = alpha * (1/2 * dot(dot(diffm, inv(mix_c)), diffm) -
1 / (2 * alpha * (alpha - 1)) *
log(absolute(det(mix_c)) /
(det(c1)**(1 - alpha) * det(c2)**alpha)))
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_tsallis(distr1, distr2, alpha, par1, par2):
""" Analytical value of Tsallis divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
d : float
Analytical value of the Tsallis divergence.
"""
if distr1 == 'normal' and distr2 == 'normal':
d = analytical_value_d_renyi(distr1, distr2, alpha, par1, par2)
d = (exp((alpha - 1) * d) - 1) / (alpha - 1)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_sharma_mittal(distr1, distr2, alpha, beta, par1,
par2):
""" Analytical value of the Sharma-Mittal divergence.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, 0 < alpha \ne 1
Parameter of the Sharma-Mittal divergence.
beta : float, beta \ne 1
Parameter of the Sharma-Mittal divergence.
par1, par2 : dictionary-s
Parameters of distributions.
If (distr1,distr2) = ('normal','normal'), then distr1 =
N(m1,c1), where m1 = par1['mean'], c1 = par1['cov'],
distr2 = N(m2,c2), where m2 = par2['mean'], c2 =
par2['cov'].
Returns
-------
D : float
Analytical value of the Tsallis divergence.
References
----------
Frank Nielsen and Richard Nock. A closed-form expression for the
Sharma-Mittal entropy of exponential families. Journal of Physics A:
Mathematical and Theoretical, 45:032003, 2012.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
c = inv(alpha * inv(c1) + (1 - alpha) * inv(c2))
diffm = m1 - m2
# Jensen difference divergence, c2:
j = (log(absolute(det(c1))**alpha * absolute(det(c2))**(1 -
alpha) /
absolute(det(c))) + alpha * (1 - alpha) *
dot(dot(diffm, inv(c)), diffm)) / 2
c2 = exp(-j)
d = (c2**((1 - beta) / (1 - alpha)) - 1) / (beta - 1)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_bregman(distr1, distr2, alpha, par1, par2):
""" Analytical value of Bregman divergence for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
alpha : float, \ne 1
Parameter of the Bregman divergence.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('uniform', 'uniform'), then both distributions are
uniform: distr1 = U[0,a] with a = par1['a'], distr2 =
U[0,b] with b = par2['a'].
Returns
-------
d : float
Analytical value of the Bregman divergence.
"""
if distr1 == 'uniform' and distr2 == 'uniform':
a = par1['a']
b = par2['a']
d = \
-1 / (alpha - 1) * prod(b)**(1 - alpha) +\
1 / (alpha - 1) * prod(a)**(1 - alpha)
else:
raise Exception('Distribution=?')
return d
def analytical_value_d_jensen_renyi(distr1, distr2, w, par1, par2):
""" Analytical value of the Jensen-Renyi divergence.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
w : vector, w[i] > 0 (for all i), sum(w) = 1
Weight used in the Jensen-Renyi divergence.
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
Returns
-------
d : float
Analytical value of the Jensen-Renyi divergence.
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
term1 = compute_h2(w, (m1, m2), (s1, s2))
term2 = \
w[0] * compute_h2((1,), (m1,), (s1,)) +\
w[1] * compute_h2((1,), (m2,), (s2,))
# H2(\sum_i wi yi) - \sum_i w_i H2(yi), where H2 is the quadratic
# Renyi entropy:
d = term1 - term2
else:
raise Exception('Distribution=?')
return d
def analytical_value_i_renyi(distr, alpha, par):
""" Analytical value of the Renyi mutual information.
Parameters
----------
distr : str
Name of the distribution.
alpha : float
Parameter of the Renyi mutual information.
par : dictionary
Parameters of the distribution. If distr = 'normal': par["cov"]
is the covariance matrix.
Returns
-------
i : float
Analytical value of the Renyi mutual information.
"""
if distr == 'normal':
c = par["cov"]
t1 = -alpha / 2 * log(det(c))
t2 = -(1 - alpha) / 2 * log(prod(diag(c)))
t3 = log(det(alpha * inv(c) + (1 - alpha) * diag(1 / diag(c)))) / 2
i = 1 / (alpha - 1) * (t1 + t2 - t3)
else:
raise Exception('Distribution=?')
return i
def analytical_value_k_ejr1(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Renyi kernel-1.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Renyi kernel-1 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
h = compute_h2(w, (m1, m2), (s1, s2)) # quadratic Renyi entropy
k = exp(-u * h)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejr2(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Renyi kernel-2.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Renyi kernel-2 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
"""
if distr1 == 'normal' and distr2 == 'normal':
w = array([1/2, 1/2])
d = analytical_value_d_jensen_renyi(distr1, distr2, w, par1, par2)
k = exp(-u * d)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejt1(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Tsallis kernel-1.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Tsallis kernel-1 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
(Renyi entropy)
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
h = compute_h2(w, (m1, m2), (s1, s2)) # quadratic Renyi entropy
# quadratic Renyi entropy -> quadratic Tsallis entropy:
h = 1 - exp(-h)
k = exp(-u * h)
else:
raise Exception('Distribution=?')
return k
def analytical_value_k_ejt2(distr1, distr2, u, par1, par2):
""" Analytical value of the Jensen-Tsallis kernel-2.
Parameters
----------
distr1, distr2 : str-s
Names of distributions.
u : float, >0
Parameter of the Jensen-Tsallis kernel-2 (alpha = 2: fixed).
par1, par2 : dictionary-s
Parameters of distributions. If (distr1, distr2) =
('normal', 'normal'), then both distributions are normal:
distr1 = N(m1,s1^2 I) with m1 = par1['mean'], s1 =
par1['std'], distr2 = N(m2,s2^2 I) with m2 =
par2['mean'], s2 = par2['std'].
References
----------
Fei Wang, Tanveer Syeda-Mahmood, Baba C. Vemuri, David Beymer, and
Anand Rangarajan. Closed-Form Jensen-Renyi Divergence for Mixture of
Gaussians and Applications to Group-Wise Shape Registration. Medical
Image Computing and Computer-Assisted Intervention, 12: 648–655, 2009.
(analytical value of the Jensen-Renyi divergence)
"""
if distr1 == 'normal' and distr2 == 'normal':
m1, s1 = par1['mean'], par1['std']
m2, s2 = par2['mean'], par2['std']
w = array([1/2, 1/2])
# quadratic Renyi entropy -> quadratic Tsallis entropy:
term1 = 1 - exp(-compute_h2(w, (m1, m2), (s1, s2)))
term2 = \
w[0] * (1 - exp(-compute_h2((1, ), (m1, ), (s1,)))) +\
w[1] * (1 - exp(-compute_h2((1,), (m2,), (s2,))))
# H2(\sum_i wi Yi) - \sum_i w_i H2(Yi), where H2 is the quadratic
# Tsallis entropy:
d = term1 - term2
k = exp(-u * d)
else:
raise Exception('Distribution=?')
return k
def analytical_value_d_hellinger(distr1, distr2, par1, par2):
""" Analytical value of Hellinger distance for the given distributions.
Parameters
----------
distr1, distr2 : str-s
Names of the distributions.
par1, par2 : dictionary-s
Parameters of the distributions. If distr1 = distr2 =
'normal': par1["mean"], par1["cov"] and par2["mean"],
par2["cov"] are the means and the covariance matrices.
Returns
-------
d : float
Analytical value of the Hellinger distance.
"""
if distr1 == 'normal' and distr2 == 'normal':
# covariance matrices, expectations:
c1, m1 = par1['cov'], par1['mean']
c2, m2 = par2['cov'], par2['mean']
# "https://en.wikipedia.org/wiki/Hellinger_distance": Examples:
diffm = m1 - m2
avgc = (c1 + c2) / 2
inv_avgc = inv(avgc)
d = 1 - det(c1)**(1/4) * det(c2)**(1/4) / sqrt(det(avgc)) * \
exp(-dot(diffm, dot(inv_avgc, diffm))/8) # D^2
d = sqrt(d)
else:
raise Exception('Distribution=?')
return d
def analytical_value_cond_h_shannon(distr, par):
""" Analytical value of the conditional Shannon entropy.
Parameters
----------
distr : str-s
Names of the distributions; 'normal'.
par : dictionary
Parameters of the distribution. If distr is 'normal': par["cov"]
and par["dim1"] are the covariance matrix and the dimension of
y1.
Returns
-------
cond_h : float
Analytical value of the conditional Shannon entropy.
"""
if distr == 'normal':
# h12 (=joint entropy):
h12 = analytical_value_h_shannon(distr, par)
# h2 (=entropy of the conditioning variable):
c, dim1 = par['cov'], par['dim1'] # covariance matrix, dim(y1)
par = {"cov": c[dim1:, dim1:]}
h2 = analytical_value_h_shannon(distr, par)
cond_h = h12 - h2
else:
raise Exception('Distribution=?')
return cond_h
def analytical_value_cond_i_shannon(distr, par):
""" Analytical value of the conditional Shannon mutual information.
Parameters
----------
distr : str-s
Names of the distributions; 'normal'.
par : dictionary
Parameters of the distribution. If distr is 'normal':
par["cov"] and par["ds"] are the (joint) covariance matrix and
the vector of subspace dimensions.
Returns
-------
cond_i : float
Analytical value of the conditional Shannon mutual
information.
"""
# initialization:
ds = par['ds']
len_ds = len(ds)
# 0,d_1,d_1+d_2,...,d_1+...+d_M; starting indices of the subspaces:
cum_ds = cumsum(hstack((0, ds[:-1])))
idx_condition = range(cum_ds[len_ds - 1],
cum_ds[len_ds - 1] + ds[len_ds - 1])
if distr == 'normal':
c = par['cov']
# h_joint:
h_joint = analytical_value_h_shannon(distr, par)
# h_cross:
h_cross = 0
for m in range(len_ds-1): # non-conditioning subspaces
idx_m = range(cum_ds[m], cum_ds[m] + ds[m])
idx_m_and_condition = hstack((idx_m, idx_condition))
par = {"cov": c[ix_(idx_m_and_condition, idx_m_and_condition)]}
h_cross += analytical_value_h_shannon(distr, par)
# h_condition:
par = {"cov": c[ix_(idx_condition, idx_condition)]}
h_condition = analytical_value_h_shannon(distr, par)
cond_i = -h_joint + h_cross - (len_ds - 2) * h_condition
else:
raise Exception('Distribution=?')
return cond_i
| [
37811,
16213,
22869,
14700,
286,
1321,
16200,
17794,
13,
37227,
198,
198,
6738,
629,
541,
88,
13,
75,
1292,
70,
1330,
1062,
11,
800,
198,
6738,
299,
32152,
1330,
2604,
11,
40426,
11,
4112,
11,
1033,
11,
31028,
11,
12854,
11,
16605,
... | 2.045043 | 16,140 |
from IEX_29id.utils.strings import ClearCalcOut
from time import sleep
from epics import caget, caput
from IEX_29id.scans.setup import Scan_FillIn, Scan_Go
from IEX_29id.devices.detectors import cts
def AD_ROI_SetUp(AD,ROInum,xcenter=500,ycenter=500,xsize=50,ysize=50,binX=1,binY=1):
"""
AD = "29id_ps4"
AD = "29iddMPA"
"""
# roiNUM=1 MPA_ROI_SetUp(535,539,50,50) center of MCP
ADplugin=AD+':ROI'+str(ROInum)+':'
xstart=xcenter-xsize/2.0
ystart=ycenter-ysize/2.0
caput(ADplugin+'MinX',xstart)
caput(ADplugin+'MinY',ystart)
caput(ADplugin+'SizeX',xsize)
caput(ADplugin+'SizeY',ysize)
caput(ADplugin+'BinX',binX)
caput(ADplugin+'BinY',binY)
caput(ADplugin+'EnableCallbacks','Enable')
print(ADplugin+' - '+caget(ADplugin+'EnableCallbacks_RBV',as_string=True))
#MPA_ROI_Stats(roiNUM)
def AD_OVER_SetUp(AD,ROInum,OVERnum,linewidth=5,shape='Rectangle'):
"""
AD = "29id_ps4"
AD = "29iddMPA"
shape= 'Cross', 'Rectangle', 'Ellipse','Text'
"""
OVER1=AD+":Over1:"+str(OVERnum)+":"
ROI=AD+":ROI"+str(ROInum)+":"
caput(ROI+'EnableCallbacks','Enable')
caput(OVER1+"Name","ROI"+str(ROInum))
caput(OVER1+"Shape",shape)
caput(OVER1+"Red",0)
caput(OVER1+"Green",255)
caput(OVER1+"Blue",0)
caput(OVER1+'WidthX',linewidth)
caput(OVER1+'WidthY',linewidth)
caput(OVER1+"PositionXLink.DOL",ROI+"MinX_RBV CP")
caput(OVER1+"SizeXLink.DOL",ROI+"SizeX_RBV CP")
caput(OVER1+"PositionYLink.DOL",ROI+"MinY_RBV CP")
caput(OVER1+"SizeYLink.DOL",ROI+"SizeY_RBV CP")
caput(OVER1+"Use","Yes")
def AD_ROI_SetUp(AD,ROInum,xcenter=500,ycenter=500,xsize=50,ysize=50,binX=1,binY=1):
"""
AD = "29id_ps4"
AD = "29iddMPA"
"""
# roiNUM=1 MPA_ROI_SetUp(535,539,50,50) center of MCP
ADplugin=AD+':ROI'+str(ROInum)+':'
xstart=xcenter-xsize/2.0
ystart=ycenter-ysize/2.0
caput(ADplugin+'MinX',xstart)
caput(ADplugin+'MinY',ystart)
caput(ADplugin+'SizeX',xsize)
caput(ADplugin+'SizeY',ysize)
caput(ADplugin+'BinX',binX)
caput(ADplugin+'BinY',binY)
caput(ADplugin+'EnableCallbacks','Enable')
print(ADplugin+' - '+caget(ADplugin+'EnableCallbacks_RBV',as_string=True))
#MPA_ROI_Stats(roiNUM)
def AD_OVER_SetUp(AD,ROInum,OVERnum,linewidth=5,shape='Rectangle'):
"""
AD = "29id_ps4"
AD = "29iddMPA"
shape= 'Cross', 'Rectangle', 'Ellipse','Text'
"""
OVER1=AD+":Over1:"+str(OVERnum)+":"
ROI=AD+":ROI"+str(ROInum)+":"
caput(ROI+'EnableCallbacks','Enable')
caput(OVER1+"Name","ROI"+str(ROInum))
caput(OVER1+"Shape",shape)
caput(OVER1+"Red",0)
caput(OVER1+"Green",255)
caput(OVER1+"Blue",0)
caput(OVER1+'WidthX',linewidth)
caput(OVER1+'WidthY',linewidth)
caput(OVER1+"PositionXLink.DOL",ROI+"MinX_RBV CP")
caput(OVER1+"SizeXLink.DOL",ROI+"SizeX_RBV CP")
caput(OVER1+"PositionYLink.DOL",ROI+"MinY_RBV CP")
caput(OVER1+"SizeYLink.DOL",ROI+"SizeY_RBV CP")
caput(OVER1+"Use","Yes")
| [
6738,
314,
6369,
62,
1959,
312,
13,
26791,
13,
37336,
1330,
11459,
9771,
66,
7975,
198,
6738,
640,
1330,
3993,
198,
6738,
2462,
873,
1330,
269,
363,
316,
11,
1451,
315,
198,
6738,
314,
6369,
62,
1959,
312,
13,
1416,
504,
13,
40406,
... | 1.919036 | 1,618 |
# interpcl: interpolate angular power spectra
#
# author: Nicolas Tessore <n.tessore@ucl.ac.uk>
# license: MIT
'''
Interpolate angular power spectra (:mod:`interpcl`)
===================================================
.. currentmodule:: interpcl
A very small package that does interpolation of angular power spectra for
random fields on the sphere.
Install with pip::
pip install interpcl
Then import into your code::
from interpcl import interpcl
Functionality is absolutely minimal at this point. Please open an issue on
GitHub if you want to see added functionality.
Reference/API
-------------
.. autosummary::
:toctree: api
:nosignatures:
interpcl
'''
__version__ = '2021.5.20'
__all__ = [
'interpcl',
]
import numpy as np
from scipy.interpolate import interp1d
def interpcl(l, cl, lmax=None, dipole=True, monopole=False, **kwargs):
r'''interpolate angular power spectrum
Interpolate an angular power spectrum :math:`C(l)` using spline
interpolation. Given input modes `l`, `cl`, returns the power spectrum for
all integer modes from 0 to `lmax`, or the highest input mode if `lmax` is
not given. The dipole is computed if `dipole` is ``True``, or set to zero,
and similarly for `monopole`.
Parameters
----------
l, cl : array_like
Input angular power spectrum. Must be one-dimensional arrays.
lmax : int, optional
Highest output mode. If not set, the highest input mode is used.
dipole : bool, optional
Compute the dipole (``True``), or set it to zero (``False``).
monopole : bool, optional
Compute the monopole (``True``), or set it to zero (``False``).
**kwargs : dict, optional
Keyword arguments for :class:`scipy.interpolate.interp1d`.
Returns
-------
clout : array_like
Interpolated angular power spectrum.
'''
fv = kwargs.pop('fill_value', 'extrapolate')
if lmax is None:
lmax = np.max(l)
lout = np.arange(lmax+1)
clout = interp1d(l, cl, fill_value=fv, **kwargs)(lout)
if dipole is False:
clout[1] = 0
if monopole is False:
clout[0] = 0
return clout
| [
2,
987,
79,
565,
25,
39555,
378,
32558,
1176,
5444,
430,
198,
2,
198,
2,
1772,
25,
29737,
39412,
382,
1279,
77,
13,
83,
408,
382,
31,
36616,
13,
330,
13,
2724,
29,
198,
2,
5964,
25,
17168,
198,
7061,
6,
198,
198,
9492,
16104,
... | 2.760101 | 792 |
# DO NOT MODIFY THIS FILE
# Run me via: python3 -m unittest test_naive_priority_queue
import unittest
import time
from naive_priority_queue import NaivePriorityQueue
from job import Job
class TestNaivePriorityQueue(unittest.TestCase):
"""
Initialization
"""
def test_instantiation(self):
"""
A NaivePriorityQueue exists.
"""
try:
NaivePriorityQueue()
except NameError:
self.fail("Could not instantiate NaivePriorityQueue.")
# def test_internal(self):
# """
# A NaivePriorityQueue uses a list to store its data.
# """
# pq = NaivePriorityQueue()
# self.assertEqual(list, type(pq.data))
# def test_enqueue_one_internal(self):
# """
# Enqueueing a value adds it to the internal list.
# """
# pq = NaivePriorityQueue()
# j = Job(5, 'The')
# pq.enqueue(j)
# self.assertEqual(j, pq.data[0])
# def test_enqueue_two_internal(self):
# """
# Enqueueing two values results in the first enqueued value being the first
# one in the list, and the second value being the last one in the list.
# """
# pq = NaivePriorityQueue()
# first = Job(5, 'new')
# second = Job(6, 'moon')
# pq.enqueue(first)
# pq.enqueue(second)
# self.assertEqual(first, pq.data[0])
# self.assertEqual(second, pq.data[1])
# def test_enqueue_three_internal(self):
# """
# Enqueueing three values results in the first enqueued value being the first
# one in the list, and the third value being the last one in the list.
# """
# pq = NaivePriorityQueue()
# first = Job(5, 'rode')
# second = Job(6, 'high')
# third = Job(7, 'in')
# pq.enqueue(first)
# pq.enqueue(second)
# pq.enqueue(third)
# self.assertEqual(first, pq.data[0])
# self.assertEqual(second, pq.data[1])
# self.assertEqual(third, pq.data[2])
# def test_dequeue_one(self):
# """
# Dequeuing from a single-element queue returns the single value.
# """
# pq = NaivePriorityQueue()
# j = Job(5, 'the')
# pq.enqueue(j)
# self.assertEqual(j, pq.dequeue())
# def test_dequeue_one_internal(self):
# """
# Dequeuing from a single-element queue removes it from the internal list.
# """
# pq = NaivePriorityQueue()
# job = Job(5, 'crown')
# pq.enqueue(job)
# self.assertEqual(1, len(pq.data))
# _ = pq.dequeue()
# self.assertEqual(0, len(pq.data))
# # Hint: NaivePriorityQueues perform a linear search. Don't optimize.
# def test_dequeue_two(self):
# """
# Dequeuing from a two-element queue returns the one with highest priority.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'of')
# higher_priority = Job(3, 'the')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# self.assertEqual(higher_priority, pq.dequeue())
# def test_dequeue_two_internal(self):
# """
# Dequeuing from a two-element queue removes the job with the highest
# priority from the list.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'metropolis')
# higher_priority = Job(3, 'shining')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# self.assertEqual(1, len(pq.data))
# def test_dequeue_three(self):
# """
# Dequeuing from a three-element queue returns the jobs with the highest
# priority.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'like')
# middle_priority = Job(3, 'who')
# higher_priority = Job(5, 'on')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# pq.enqueue(middle_priority)
# self.assertEqual(higher_priority, pq.dequeue())
# self.assertEqual(middle_priority, pq.dequeue())
# self.assertEqual(lower_priority, pq.dequeue())
# def test_dequeue_three_internal(self):
# """
# Dequeuing from a three-element queue removes each dequeued value from
# the internal list, highest-priority first.
# """
# pq = NaivePriorityQueue()
# lower_priority = Job(1, 'top')
# middle_priority = Job(3, 'of')
# higher_priority = Job(5, 'this')
# pq.enqueue(higher_priority)
# pq.enqueue(lower_priority)
# pq.enqueue(middle_priority)
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# _ = pq.dequeue()
# self.assertEqual(lower_priority, pq.data[0])
# """
# Emptiness
# """
# def test_empty(self):
# """
# A queue is initially empty.
# """
# pq = NaivePriorityQueue()
# self.assertTrue(pq.is_empty())
# def test_not_empty(self):
# """
# A queue with one enqueued value is not empty.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'People'))
# self.assertFalse(pq.is_empty())
# def test_empty_after_dequeue(self):
# """
# A queue with one enqueued value is empty after dequeuing.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'was'))
# _ = pq.dequeue()
# self.assertTrue(pq.is_empty())
# def test_not_empty_multiple(self):
# """
# A queue with two enqueued values is not empty after dequeuing only one.
# """
# pq = NaivePriorityQueue()
# pq.enqueue(Job(1, 'hustling'))
# pq.enqueue(Job(3, 'arguing and bustling'))
# _ = pq.dequeue()
# self.assertFalse(pq.is_empty())
# def test_initial_dequeue(self):
# """
# Dequeuing from an empty queue returns None.
# """
# pq = NaivePriorityQueue()
# self.assertIsNone(pq.dequeue())
# """
# Algorithmic complexity
# """
# def test_enqueue_efficiency(self):
# """
# Enqueing a value is always O(1).
# """
# time_samples = []
# for _ in range(0, 1000):
# pq = NaivePriorityQueue()
# start_time = time.time()
# pq.enqueue('fake')
# end_time = time.time()
# time_samples.append(end_time - start_time)
# small_average_enqueue_time = sum(time_samples) / float(len(time_samples))
# large_queue = NaivePriorityQueue()
# for _ in range(0, 1000000):
# large_queue.enqueue('fake')
# large_time_samples = []
# for _ in range(0, 1000):
# start_time = time.time()
# large_queue.enqueue('fake')
# end_time = time.time()
# large_time_samples.append(end_time - start_time)
# large_average_enqueue_time = sum(large_time_samples) / float(len(large_time_samples))
# self.assertAlmostEqual(small_average_enqueue_time, large_average_enqueue_time, delta=small_average_enqueue_time)
# # While enqueing naively is efficient... what is the complexity of dequeuing?
# def test_dequeue_efficiency(self):
# """
# Dequeuing a value is O(n).
# """
# print("This test will take a while...") # See the comment below.
# time_samples = []
# for _ in range(0, 1000):
# pq = NaivePriorityQueue()
# pq.enqueue('fake')
# start_time = time.time()
# pq.dequeue()
# end_time = time.time()
# time_samples.append(end_time - start_time)
# small_average_dequeue_time = sum(time_samples) / float(len(time_samples))
# large_queue = NaivePriorityQueue()
# for _ in range(0, 1000000):
# large_queue.enqueue('fake')
# large_time_samples = []
# for _ in range(0, 1000):
# start_time = time.time()
# large_queue.dequeue()
# end_time = time.time()
# large_time_samples.append(end_time - start_time)
# large_average_dequeue_time = sum(large_time_samples) / float(len(large_time_samples))
# self.assertNotAlmostEqual(small_average_dequeue_time, large_average_dequeue_time, delta=small_average_dequeue_time)
# Notice how the last test takes time to "prove."
# By studying *algorithm analysis*, you can prove the efficiency deductively,
# with formal proofs, rather than with long-running tests.
if __name__ == '__main__':
unittest.main()
| [
2,
8410,
5626,
19164,
5064,
56,
12680,
45811,
198,
2,
5660,
502,
2884,
25,
21015,
18,
532,
76,
555,
715,
395,
1332,
62,
2616,
425,
62,
49336,
62,
36560,
198,
198,
11748,
555,
715,
395,
198,
11748,
640,
198,
6738,
24354,
62,
49336,
... | 2.153752 | 4,091 |
import sys
import os
import time
import json
import math
from datetime import datetime
import subprocess
import threading
import numpy as np
from os.path import join, dirname, basename, abspath
from std_msgs.msg import (
Header
)
from sensor_msgs.msg import (
PointCloud2,
PointField,
NavSatStatus,
NavSatFix
)
from geometry_msgs.msg import (
Point,
PointStamped,
PoseStamped,
Pose,
PoseWithCovariance,
PoseWithCovarianceStamped,
TransformStamped
)
from nav_msgs.msg import (
Path,
Odometry
)
import std_msgs.msg as std_msgs
import builtin_interfaces.msg as builtin_msgs
from rclpy.node import Node
import riegl.rdb
from vzi_services.controlservice import ControlService
from vzi_services.interfaceservice import InterfaceService
from vzi_services.projectservice import ProjectService
from vzi_services.scannerservice import ScannerService
from vzi_services.geosysservice import GeoSysService
from riegl_vz_interfaces.msg import (
Voxels
)
from .pose import (
readVop,
readPop,
readAllSopv,
readTpl,
getTransformFromPose,
calcRelativePose,
calcRelativeCovariances,
eulerFromQuaternion, quaternionFromEuler
)
from .tf2_geometry_msgs import (
do_transform_pose
)
from .project import RieglVzProject
from .status import RieglVzStatus
from .geosys import RieglVzGeoSys
from .ssh import RieglVzSSH
from .utils import (
SubProcess,
parseCSV
)
appDir = dirname(abspath(__file__))
| [
11748,
25064,
198,
11748,
28686,
198,
11748,
640,
198,
11748,
33918,
198,
11748,
10688,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
11748,
850,
14681,
198,
11748,
4704,
278,
198,
11748,
299,
32152,
355,
45941,
198,
6738,
28686,
13,
6978... | 2.701275 | 549 |
#!/usr/bin/env python
from KPIForecaster.forecaster import KPIForecaster
from configuration.settings import Conf
from database.sql_connect import SQLDatabase
from datetime import datetime
import pandas as pd
import sys
import os.path
import time
path = sys.argv[0].rsplit("/", 1)[0]
# Create configuration and Database connection and our KPI Forecaster Object
#conf = Conf(os.path.join(path,"config.json"))
try:
conf = Conf(os.path.join(path,"config.json"))
except:
conf = Conf("config.json")
sql = SQLDatabase(conf)
KPIForecaster = KPIForecaster(conf, crontab=True)
StoreForecast = False
#input_report = pd.read_csv("DAILY_ANOMALY_REPORT_DL_USER_THROUGHPUT_MBPS_2020_12_13.csv")
#del input_report['Unnamed: 0']
input_df = sql.getDailyKPIData()
input_report = KPIForecaster.getYesterdaysReport(input_df)
#input_report = sql.getYesterdaysReport()
# Ensure all columns are uppercase
input_report.columns = [x.upper() for x in input_report.columns]
# Get unique cell IDs
cell_names = input_report.CELL_NAME.unique()
print(f'[INFO] Analysing {len(cell_names)} Models')
T_START = time.time()
appended_data = []
full_forecast = []
KPI = 'DL_USER_THROUGHPUT_MBPS'
# Iterate through each cell, creating a model, forecast and plot for each
for i,cell_name in enumerate(cell_names):
df_last_day, last_day = KPIForecaster.getLastDay(input_report, cell = cell_name)
ret, forecast = KPIForecaster.getForecastData(cell_name, KPI = KPI)
if ret:
foreLD, long_forecast = KPIForecaster.analyzeData(forecast, df_last_day, last_day, cell = cell_name)
print(str(i+1) + " of " + str(len(cell_names)) + " cells processed.")
appended_data.append(foreLD)
full_forecast.append(long_forecast)
#if i == 2:
# break
# Concatenate all dataframes from appended_data list
appended_data = pd.concat(appended_data, axis=0)
full_forecast = pd.concat(full_forecast, axis=0)
# Rename columns as per SQL DWH naming convention
appended_data = appended_data.rename({'ds':'START_TIME',
'Date':'DATE',
'pred_upper_15':'HISTORICAL_UPPER_BOUND',
'pred_lower_15':'HISTORICAL_LOWER_BOUND',
'Expected_Value':'HISTORICAL_PREDICTION',
'Actual_Value':'ACTUAL_VALUE',
'Exceeds_Thresh':'EXCEEDS_THRESHOLD',
'Under_Thresh':'UNDER_THRESHOLD',
'Investigate_Cell':'OUT_OF_RANGE',
'Delta':'DELTA_FROM_HIST_PREDICTION',
'Delta_from_Bound':'DELTA_FROM_HIST_BOUND'
}, axis='columns')
# Change datatypes to string
appended_data['START_TIME'] = appended_data['START_TIME'].astype(str)
appended_data['EXCEEDS_THRESHOLD'] = appended_data['EXCEEDS_THRESHOLD'].astype(str)
appended_data['UNDER_THRESHOLD'] = appended_data['UNDER_THRESHOLD'].astype(str)
appended_data['OUT_OF_RANGE'] = appended_data['OUT_OF_RANGE'].astype(str)
appended_data = appended_data.fillna(0)
appended_data['KEY'] = appended_data['CELL_NAME'] + appended_data['START_TIME']
# Get AI Predictions
predictions = KPIForecaster.getPredictions(input_df)
fin = pd.merge(appended_data, predictions, on=['KEY'], how='inner')
final = fin[['CELL_NAME',
'START_TIME',
'DATE',
'HISTORICAL_UPPER_BOUND',
'HISTORICAL_LOWER_BOUND',
'EXCEEDS_THRESHOLD',
'UNDER_THRESHOLD',
'OUT_OF_RANGE',
'DELTA_FROM_HIST_PREDICTION',
'DELTA_FROM_HIST_BOUND',
0,
'ACTUAL_VALUE',
'HISTORICAL_PREDICTION']].copy()
final = final.rename({0:'AI_PREDICTION',
'DELTA_FROM_HIST_PREDICTION':'PCT_DELTA_FROM_HIST_PREDICTION',
'DELTA_FROM_HIST_BOUND':'PCT_DELTA_FROM_HIST_BOUND'}, axis='columns')
final['DELTA_FROM_AI_PREDICTION'] = final['ACTUAL_VALUE'] - final['AI_PREDICTION']
final['DELTA_FROM_HIST_PREDICTION'] = final['ACTUAL_VALUE'] - final['HISTORICAL_PREDICTION']
#final = final[['CELL_NAME', 'START_TIME', 'DATE', 'HISTORICAL_UPPER_BOUND', 'HISTORICAL_LOWER_BOUND',
# '0', '1', '2', '3', '0', '1', '2', '3']]
final['START_TIME'] = pd.to_datetime(final['START_TIME'])
final['DATE'] = final['START_TIME'].dt.strftime('%m/%d/%Y')
final['START_TIME'] = final['START_TIME'].dt.strftime('%H:%M:%S')
final['START_TIME'] = final['START_TIME'].astype(str)
final['DATE'] = final['DATE'].astype(str)
# Add Maintenance Window filter
maintenance_window = ['00:00:00','01:00:00','02:00:00' ,'03:00:00' ,'04:00:00','05:00:00']
final['MAINTENANCE_WINDOW'] = final['START_TIME'].isin(maintenance_window)
# Output Statistics
t0 = time.time()
completion_time = t0-T_START
print("******* Total Time to Produce Reports: " + str(completion_time))
print("******* Average Time Per Model " + str(completion_time/len(cell_names)))
path = os.path.join(path,"./Reports/ANOMALY/")
KPIForecaster.makeDir(path)
date = datetime.today().strftime('%Y_%m_%d')
file_name = path + "DAILY_ANOMALY_REPORT_" + KPI + "_" + str(date) + ".csv"
appended_data.to_csv(file_name)
print("[INFO] Analysis Completed.")
print("[INFO] Uploading Report to DWH...")
sql.dumpToDWH(final, "KPI_ANOMALY")
## This should be in Train Model ##
if StoreForecast == True:
full_forecast_df = full_forecast[['CELL_NAME', 'ds',
'pred_upper_15','pred_lower_15','yhat']].copy()
full_forecast_df = full_forecast_df.rename({'ds':'TIMESTAMP',
'yhat':'PREDICTED',
'pred_upper_15':'UPPER_PREDICTION',
'pred_lower_15':'LOWER_PREDICTION'
}, axis='columns')
full_forecast_df['TIMESTAMP'] = full_forecast_df['TIMESTAMP'].astype(str)
sql.dumpToDWH(full_forecast_df, "FORECAST_DATA", if_exists = 'append')
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
6738,
45814,
5064,
382,
17970,
13,
754,
17970,
1330,
45814,
5064,
382,
17970,
198,
6738,
8398,
13,
33692,
1330,
7326,
198,
6738,
6831,
13,
25410,
62,
8443,
1330,
16363,
38105,
198,
6738,... | 2.133813 | 2,780 |
from __future__ import division
# python libs
import sys
# 3rd party
importerrors = []
try:
import statsmodels.nonparametric.kde as kde
except ImportError as ie:
importerrors.append(ie)
try:
import matplotlib as mpl
except ImportError as ie:
importerrors.append(ie)
if len(importerrors) != 0:
for item in importerrors:
print ('Import Error:', item)
sys.exit()
from matplotlib.ticker import FuncFormatter, MaxNLocator, MultipleLocator
import matplotlib.pyplot as plt
def plot_kde(data, ax, title=None, color='r', fill_bt=True):
"""
Plot a smoothed (by kernel density estimate) histogram.
:type data: numpy array
:param data: An array containing the data to be plotted
:type ax: matplotlib.Axes
:param ax: The Axes object to draw to
:type title: str
:param title: The plot title
:type color: str
:param color: The color of the histogram line and fill. Note that the fill
will be plotted with an alpha of 0.35.
:type fill_bt: bool
:param fill_bt: Specify whether to fill the area beneath the histogram line
"""
e = kde.KDEUnivariate(data)
e.fit()
ax.plot(e.support, e.density, color=color, alpha=0.9, linewidth=2.25)
if fill_bt:
ax.fill_between(e.support, e.density, alpha=.35, zorder=1,
antialiased=True, color=color)
if title is not None:
t = ax.set_title(title)
t.set_y(1.05)
def ggplot2_style(ax):
"""
Styles an axes to appear like ggplot2
Must be called after all plot and axis manipulation operations have been
carried out (needs to know final tick spacing)
"""
#set the style of the major and minor grid lines, filled blocks
ax.grid(True, 'major', color='w', linestyle='-', linewidth=1.4)
ax.grid(True, 'minor', color='0.92', linestyle='-', linewidth=0.7)
ax.patch.set_facecolor('0.85')
ax.set_axisbelow(True)
#set minor tick spacing to 1/2 of the major ticks
ax.xaxis.set_minor_locator(MultipleLocator( (plt.xticks()[0][1]-plt.xticks()[0][0]) / 2.0 ))
ax.yaxis.set_minor_locator(MultipleLocator( (plt.yticks()[0][1]-plt.yticks()[0][0]) / 2.0 ))
#remove axis border
for child in ax.get_children():
if isinstance(child, mpl.spines.Spine):
child.set_alpha(0)
#restyle the tick lines
for line in ax.get_xticklines() + ax.get_yticklines():
line.set_markersize(5)
line.set_color("gray")
line.set_markeredgewidth(1.4)
#remove the minor tick lines
for line in ax.xaxis.get_ticklines(minor=True) + ax.yaxis.get_ticklines(minor=True):
line.set_markersize(0)
#only show bottom left ticks, pointing out of axis
mpl.rcParams['xtick.direction'] = 'out'
mpl.rcParams['ytick.direction'] = 'out'
ax.xaxis.set_ticks_position('bottom')
ax.yaxis.set_ticks_position('left')
if ax.legend_ != None:
lg = ax.legend_
lg.get_frame().set_linewidth(0)
lg.get_frame().set_alpha(0.5)
| [
6738,
11593,
37443,
834,
1330,
7297,
198,
2,
21015,
9195,
82,
198,
11748,
25064,
198,
2,
513,
4372,
2151,
198,
320,
26634,
5965,
796,
17635,
198,
28311,
25,
198,
220,
220,
220,
1330,
9756,
27530,
13,
13159,
17143,
19482,
13,
74,
2934,... | 2.399523 | 1,259 |
from setuptools import setup, find_packages
__version__ = '0.2.4'
setup(name='python-seafile-api',
version=__version__,
license='BSD',
description='Client interface for Seafile Web API',
author='Igor Rumyantsev',
author_email='igorrum@mail.ru',
url='https://github.com/Widly/python-seafile',
platforms=['Any'],
packages=find_packages(),
install_requires=['requests'],
classifiers=['Development Status :: 4 - Beta',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python'],
)
| [
6738,
900,
37623,
10141,
1330,
9058,
11,
1064,
62,
43789,
198,
198,
834,
9641,
834,
796,
705,
15,
13,
17,
13,
19,
6,
628,
198,
40406,
7,
3672,
11639,
29412,
12,
325,
1878,
576,
12,
15042,
3256,
198,
220,
220,
220,
220,
220,
2196,
... | 2.341637 | 281 |
# -*- coding:utf-8 -*-
# Author: Kei Choi(hanul93@gmail.com)
import bz2
import kernel
# -------------------------------------------------------------------------
# KavMain 클래스
# -------------------------------------------------------------------------
# ---------------------------------------------------------------------
# init(self, plugins_path)
# 플러그인 엔진을 초기화 한다.
# 인력값 : plugins_path - 플러그인 엔진의 위치
# verbose - 디버그 모드 (True or False)
# 리턴값 : 0 - 성공, 0 이외의 값 - 실패
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# uninit(self)
# 플러그인 엔진을 종료한다.
# 리턴값 : 0 - 성공, 0 이외의 값 - 실패
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# getinfo(self)
# 플러그인 엔진의 주요 정보를 알려준다. (제작자, 버전, ...)
# 리턴값 : 플러그인 엔진 정보
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# format(self, filehandle, filename, filename_ex)
# 파일 포맷을 분석한다.
# 입력값 : filehandle - 파일 핸들
# filename - 파일 이름
# filename_ex - 압축 파일 내부 파일 이름
# 리턴값 : {파일 포맷 분석 정보} or None
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# arclist(self, filename, fileformat)
# 압축 파일 내부의 파일 목록을 얻는다.
# 입력값 : filename - 파일 이름
# fileformat - 파일 포맷 분석 정보
# 리턴값 : [[압축 엔진 ID, 압축된 파일 이름]]
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# unarc(self, arc_engine_id, arc_name, fname_in_arc)
# 입력값 : arc_engine_id - 압축 엔진 ID
# arc_name - 압축 파일
# fname_in_arc - 압축 해제할 파일 이름
# 리턴값 : 압축 해제된 내용 or None
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# arcclose(self)
# 압축 파일 핸들을 닫는다.
# ---------------------------------------------------------------------
# ---------------------------------------------------------------------
# mkarc(self, arc_engine_id, arc_name, file_infos)
# 입력값 : arc_engine_id - 압축 가능 엔진 ID
# arc_name - 최종적으로 압축될 압축 파일 이름
# file_infos - 압축 대상 파일 정보 구조체
# 리턴값 : 압축 성공 여부 (True or False)
# ---------------------------------------------------------------------
| [
2,
532,
9,
12,
19617,
25,
40477,
12,
23,
532,
9,
12,
198,
2,
6434,
25,
3873,
72,
42198,
7,
7637,
377,
6052,
31,
14816,
13,
785,
8,
628,
198,
11748,
275,
89,
17,
198,
11748,
9720,
628,
198,
2,
16529,
45537,
198,
2,
509,
615,
... | 1.904897 | 1,409 |
"""The machinery of importlib: finders, loaders, hooks, etc."""
import _imp
from ._bootstrap import (SOURCE_SUFFIXES, DEBUG_BYTECODE_SUFFIXES,
OPTIMIZED_BYTECODE_SUFFIXES, BYTECODE_SUFFIXES)
from ._bootstrap import BuiltinImporter
from ._bootstrap import FrozenImporter
from ._bootstrap import PathFinder
from ._bootstrap import FileFinder
from ._bootstrap import SourceFileLoader
from ._bootstrap import SourcelessFileLoader
from ._bootstrap import ExtensionFileLoader
EXTENSION_SUFFIXES = _imp.extension_suffixes()
| [
37811,
464,
20230,
286,
1330,
8019,
25,
1064,
364,
11,
3440,
364,
11,
26569,
11,
3503,
526,
15931,
198,
198,
11748,
4808,
11011,
198,
198,
6738,
47540,
18769,
26418,
1330,
357,
47690,
62,
12564,
5777,
10426,
1546,
11,
16959,
62,
17513,
... | 2.914439 | 187 |
# coding=utf-8
import os
import cv2
import sys
import json
import numpy as np
import shutil
print('开始转换,并清除无效标注...')
conver('train')
print('训练集转换完毕')
conver('val')
print('验证集转换完毕')
| [
2,
19617,
28,
40477,
12,
23,
198,
11748,
28686,
198,
11748,
269,
85,
17,
198,
11748,
25064,
198,
11748,
33918,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
4423,
346,
628,
198,
198,
4798,
10786,
28156,
222,
34650,
233,
164,
121,
10... | 1.528926 | 121 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""FINet: Fraction Detection Normalization Network for Image Restoration.
"""
from __future__ import annotations
from typing import Optional
import torch
from torch import nn
from torch import Tensor
from onevision.factory import DEBLUR
from onevision.factory import DEHAZE
from onevision.factory import DENOISE
from onevision.factory import DERAIN
from onevision.factory import IMAGE_ENHANCEMENT
from onevision.factory import MODELS
from onevision.models.enhancement.image_enhancer import ImageEnhancer
from onevision.nn import Conv3x3
from onevision.nn import FractionInstanceNorm
from onevision.nn import SAM
from onevision.type import Indexes
from onevision.type import Pretrained
from onevision.type import Tensors
from onevision.utils import console
__all__ = [
"FINet",
"FINetDeBlur",
"FINetDeBlur_x0_5",
"FINetDeHaze",
"FINetDeNoise",
"FINetDeRain",
]
# MARK: - Modules
# MARK: Magic Functions
# MARK: Forward Pass
# MARK: Magic Functions
# MARK: Forward Pass
# MARK: Magic Functions
# MARK: Forward Pass
# MARK: - FINet
cfgs = {
# De-blur
"finet_deblur": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
"finet_deblur_x0.5": {
"in_channels": 3, "out_channels": 32, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
# De-haze
"finet_dehaze": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
# De-noise
"finet_denoise": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 3, "fin_position_right": 4, "alpha": 0.5,
"selection": "linear",
},
# De-rain
"finet_derain": {
"in_channels": 3, "out_channels": 64, "depth": 5, "relu_slope": 0.2,
"fin_position_left": 0, "fin_position_right": 4, "alpha": 0.0,
"selection": "linear",
},
}
@MODELS.register(name="finet")
@IMAGE_ENHANCEMENT.register(name="finet")
@MODELS.register(name="finet_deblur")
@IMAGE_ENHANCEMENT.register(name="finet_deblur")
@DEBLUR.register(name="finet_deblur")
@MODELS.register(name="finet_deblur_x0.5")
@IMAGE_ENHANCEMENT.register(name="finet_deblur_x0.5")
@DEBLUR.register(name="finet_deblur_x0.5")
@MODELS.register(name="finet_dehaze")
@IMAGE_ENHANCEMENT.register(name="finet_dehaze")
@DEHAZE.register(name="finet_dehaze")
@MODELS.register(name="finet_denoise")
@IMAGE_ENHANCEMENT.register(name="finet_denoise")
@DENOISE.register(name="finet_denoise")
@MODELS.register(name="finet_derain")
@IMAGE_ENHANCEMENT.register(name="finet_derain")
@DERAIN.register(name="finet_derain")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
37811,
20032,
316,
25,
376,
7861,
46254,
14435,
1634,
7311,
329,
7412,
36155,
13,
198,
37811,
198,
198,
6738,
1159... | 2.341634 | 1,285 |
from django.db import models
from django.contrib.auth.models import User
class UserDocumentInteraction(models.Model):
"""Tracks an instance of a document being downloaded"""
user = models.ForeignKey(User, on_delete=models.CASCADE)
design_document = models.ForeignKey('core.DesignDocument', on_delete=models.SET_NULL, null=True)
| [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
42625,
14208,
13,
3642,
822,
13,
18439,
13,
27530,
1330,
11787,
198,
198,
4871,
11787,
24941,
9492,
2673,
7,
27530,
13,
17633,
2599,
198,
220,
220,
220,
37227,
2898,
4595,
281,
4554,
... | 3.31068 | 103 |
################################################################################
#
# sage_interface.py
#
# description: interface between Boole and Sage
#
# Converts Boole expressions to Sage symbolic expressions and back.
#
# In the forward direction, the user specifies the symbolic ring, by
# default the_SymbolicRing().
#
# Note: this is meant to be called from Sage.
#
# TODO: associate domain information with sage constants?
# TODO: define function with arity?
# TODO: need to better understand symbolic functions
#
################################################################################
from boole.core.expr import *
import sage
from sage.symbolic.expression_conversions import Converter
from sage.symbolic.ring import the_SymbolicRing
from sage.symbolic.function_factory import function_factory
import operator as _operator
################################################################################
#
# These dictionaries gives the Sage translations of the built-in symbols,
# built-in sorts, and Sage functions for building constants of the built-in
# sorts.
#
################################################################################
_built_in_sage_funs = {
equals.name: (lambda args: args[0] == args[1]),
not_equals.name: (lambda args: args[0] != args[1]),
plus.name: (lambda args: args[0] + args[1]),
Sum.name: (lambda args: reduce((lambda a, b: a + b), args, 0)),
times.name: (lambda args: args[0] * args[1]),
Product.name: (lambda args: reduce((lambda a, b: a * b), args, 1)),
sub.name: (lambda args: args[0] - args[1]),
div.name: (lambda args: args[0] / args[1]),
power.name: (lambda args: pow(args[0], args[1])),
neg.name: (lambda args: -args[0]),
absf.name: (lambda args: abs(args[0])),
less_than.name: (lambda args: args[0] < args[1]),
less_eq.name: (lambda args: args[0] <= args[1]),
greater_than.name: (lambda args: args[0] > args[1]),
greater_eq.name: (lambda args: args[0] >= args[1])
}
# TODO: use these to set the domain
#
#_built_in_sage_sorts = {
# Int.name: z3.IntSort,
# Real.name: z3.RealSort,
# Bool.name: z3.BoolSort
#}
_built_in_sage_sort_values = {
Int.name: (lambda val: sage.rings.integer.Integer(val)),
Real.name: (lambda val: val),
Bool.name: (lambda val: val)
}
################################################################################
#
# Exceptions associated with the Sage interface
#
################################################################################
class Sage_Interface_Error(Exception):
"""Class of all possible type errors
"""
def __init__(self, mess = ''):
"""
Arguments:
-`mess`: a string that represents the error message
"""
Exception.__init__(self, mess)
class Sage_Unexpected_Type(Sage_Interface_Error):
"""Raised when trying to translate an unexpected type
"""
pass
class Sage_Unexpected_Expression(Sage_Interface_Error):
"""Raised when there is a problem translating an expression
"""
pass
################################################################################
#
# Convert Sage expressions to Boole expressions
#
# for now, put symbolic expressions in the global namespace; later, allow
# user to specify any ring
# also, check global namespace before creating these?
#
################################################################################
class _Expr_Trans(ExprVisitor):
"""Visitor class for translating an expression from Boole
to Sage.
"""
def __init__(self, translator):
"""
Initialize with calling instance of Boole_to_Z3.
"""
self.trans = translator
class Boole_to_Sage():
"""
Translates Boole expressions to a Sage symbolic expression ring,
creating symbols as necessary.
For example:
C = Boole_to_Sage()
print C(x + y)
print C(f(x))
The call of C(x + y) creates Sage variables for x and y.
The call of C(f(x)) creates a Sage function variable for f,
but uses the previous x.
Note: do not use the same name for symbols of different type!
"""
def handle_function(self, fun, args):
"""
fun: Boole function symbol to apply
args: Sage expressions, already translated
"""
if fun.name in self.symbol_dict.keys():
# defined function symbol
sage_fun = self.symbol_dict[fun.name]
return sage_fun(*args)
elif fun.name in _built_in_sage_funs.keys():
# built-in function symbol
sage_fun = _built_in_sage_funs[fun.name]
return sage_fun(args)
else:
# new function symbol
sage_fun = function_factory(fun.name)
self.symbol_dict[fun.name] = sage_fun
return sage_fun(*args)
################################################################################
#
# Convert Sage expressions to Boole expressions
#
################################################################################
| [
29113,
29113,
14468,
198,
2,
198,
2,
35021,
62,
39994,
13,
9078,
198,
2,
198,
2,
6764,
25,
7071,
1022,
21458,
293,
290,
28733,
198,
2,
198,
2,
1482,
24040,
21458,
293,
14700,
284,
28733,
18975,
14700,
290,
736,
13,
198,
2,
198,
2,... | 2.995319 | 1,709 |
#Create a script that lets the user submit a password until they have satisfied three conditions:
#1. Password contains at least one number
#2. Contains one uppercase letter
#3. It is at least 5 chars long
#Print out message "Passowrd is not fine" if the user didn't create a correct password
while True:
psw = input("Enter new password: ")
if any(i.isdigit() for i in psw) and any(i.isupper() for i in psw) and len(psw) >= 5:
print("Password is fine")
break
else:
print("Passowrd is not fine")
| [
2,
16447,
257,
4226,
326,
8781,
262,
2836,
9199,
257,
9206,
1566,
484,
423,
11378,
1115,
3403,
25,
201,
198,
2,
16,
13,
30275,
4909,
379,
1551,
530,
1271,
201,
198,
2,
17,
13,
49850,
530,
334,
39921,
589,
3850,
201,
198,
2,
18,
... | 2.823834 | 193 |
import argparse
import sys
import cv2
from albumentations import Compose, SmallestMaxSize, CenterCrop
from pietoolbelt.viz import ColormapVisualizer
from segmentation import Segmentation
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Segmentation example')
parser.add_argument('-i', '--image', type=str, help='Path to image to predict', required=False)
parser.add_argument('-w', '--web_cam', help='Use web camera id to predict', action='store_true')
parser.add_argument('-d', '--device', type=str, help='Device', required=False, default='cuda')
if len(sys.argv) < 2:
print('Bad arguments passed', file=sys.stderr)
parser.print_help(file=sys.stderr)
exit(2)
args = parser.parse_args()
if (args.image is None and args.web_cam is None) or (args.image is not None and args.web_cam is not None):
print("Please define one of option: -i or -w")
parser.print_help(file=sys.stderr)
sys.exit(1)
vis = ColormapVisualizer([0.5, 0.5])
seg = Segmentation(accuracy_lvl=Segmentation.Level.LEVEL_2)
seg.set_device(args.device)
data_transform = Compose([SmallestMaxSize(max_size=512, always_apply=True),
CenterCrop(height=512, width=512, always_apply=True)], p=1)
if args.image is not None:
image = cv2.cvtColor(cv2.imread(args.image), cv2.COLOR_BGR2RGB)
image = data_transform(image)
cv2.imwrite('result.jpg', seg.process(image)[0])
elif args.web_cam is not None:
title = "Person segmentation example"
cv2.namedWindow(title, cv2.WINDOW_GUI_NORMAL)
cap = cv2.VideoCapture(0)
while cv2.waitKey(1) & 0xFF != ord('q'):
ret, frame = cap.read()
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
frame = data_transform(image=frame)['image']
img, mask = seg.process(frame)
image = vis.process_img(cv2.cvtColor(img, cv2.COLOR_RGB2BGR), mask)
cv2.imshow(title, image)
cap.release()
cv2.destroyAllWindows()
| [
11748,
1822,
29572,
198,
11748,
25064,
198,
198,
11748,
269,
85,
17,
198,
6738,
435,
65,
1713,
602,
1330,
3082,
577,
11,
10452,
395,
11518,
10699,
11,
3337,
34,
1773,
198,
6738,
279,
1155,
970,
37976,
13,
85,
528,
1330,
1623,
579,
4... | 2.307947 | 906 |
import unittest
import javalang
from coastSHARK.util.complexity_java import ComplexityJava, SourcemeterConversion
BINOP_TEST = """package de.ugoe.cs.coast;
public class BinopTest {
public void test1() {
Boolean a = true;
Boolean b = true;
Boolean c = true;
Boolean d = true;
Boolean e = true;
Boolean f = true;
if (a && b && c || d || e && f) {
// if cc = 1
// sequence cc = 3
}
}
public void test2() {
Boolean a = true;
Boolean b = true;
Boolean c = true;
if (a && !(b && c)) {
// if cc = 1
// sequence cc = 2
}
}
public void test3() {
Boolean a = true;
Boolean b = true;
Boolean c = true;
Boolean d = true;
Boolean e = true;
if (a && b || c && d || e) {
// if cc = 1
// sequence cc = 4
}
}
public void test4() {
Boolean a = true;
Boolean b = true;
if(a == b) {
// if = 1
// cc = 1
} else if (a != b) {
// cc = 1
}
}
public void test5() {
Boolean a = true;
Boolean b = true;
Boolean c = a && b;
}
protected void onActivityResult(int requestCode, int resultCode, Intent data) {
String resultText = "";
if (resultCode != RESULT_OK) {
resultText = "An error occured while contacting OI Safe. Does it allow remote access? (Please check in the settings of OI Safe).";
} else {
if (requestCode == ENCRYPT_REQUEST || requestCode == DECRYPT_REQUEST) {
resultText = data.getStringExtra(CryptoIntents.EXTRA_TEXT);
} else if (requestCode == SET_PASSWORD_REQUEST) {
resultText = "Request to set password sent.";
} else if (requestCode == GET_PASSWORD_REQUEST) {
String uname = data.getStringExtra(CryptoIntents.EXTRA_USERNAME);
String pwd = data.getStringExtra(CryptoIntents.EXTRA_PASSWORD);
resultText = uname + ":" + pwd;
} else if (requestCode == SPOOF_REQUEST) {
resultText = data.getStringExtra("masterKey");
}
}
EditText outputText = (EditText) findViewById(R.id.output_entry);
outputText.setText(resultText, android.widget.TextView.BufferType.EDITABLE);
}
}
"""
NESTING_TEST = """package de.ugoe.cs.coast;
public class NestingTest {
public void myMethod() {
Boolean condition1 = true;
Boolean condition2 = true;
try {
// try does not count towards nesting
if (condition1) {
// +1
for (int i = 0; i < 10; i++) {
// +2 (nesting=1)
while (condition2) {
// +3 (nesting=2)
}
}
}
} catch (ExcepType2 e) {
// +1
if (condition2) {
// +2 (nesting=1)
}
}
}
// sum cc = 9
}
"""
# Sonar does not count default: but we include this in our count
SWITCH_TEST = """package de.ugoe.cs.coast;
public class SwitchTest {
public String getWords(int number) { // mccc = +1
switch (number) {
case 1: // mccc = +1
return "one";
case 2: // mccc = +1
return "a couple";
case 3: // mccc = +1
return "a few";
default:
return "lots";
}
}
// mccc = 4
// cc = 1
}
"""
OVERLOADING_TEST = """package de.ugoe.cs.coast;
public class OverloadingTest {
public void test(long number) {
}
public String test(int number1, int number2) {
}
public boolean test(int number1, int number2, boolean test) {
}
}
"""
PARAM_TEST = """
package de.ugoe.cs.coast;
public class ParamTest {
public java.lang.String writeAll(java.sql.ResultSet rs, boolean includeColumnNames) throws SQLException, IOException {
ResultSetMetaData metadata = rs.getMetaData();
if (includeColumnNames) {
writeColumnNames(metadata);
}
int columnCount = metadata.getColumnCount();
while (rs.next()) {
String[] nextLine = new String[columnCount];
for (int i = 0; i < columnCount; i++) {
nextLine[i] = getColumnValue(rs, metadata.getColumnType(i + 1), i + 1);
}
writeNext(nextLine);
}
}
}
"""
CONSTRUCTOR_TEST = """
package de.ugoe.cs.coast;
public class ParamTest {
public ParamTest(int i) {
}
}
"""
STATIC_NESTED_TEST = """
package de.ugoe.cs.coast;
public class ParamTest {
public void test1() {
}
static class ParamTest2 {
public void test2() {
}
}
}
"""
ANO_TEST = """
package de.ugoe.cs.coast;
public class AnoTest {
// nested anonymous class
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void onReceive(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
};
// anonymous class in method
public void test() {
// we need to ignore this
List<String> passDescriptions4Adapter=new ArrayList<String>();
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void onReceive2(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
};
}
// we want to only count NewDialogInterface and not new AlertDialog, this would otherwise mess with the counting of anonymous classes
public void test2() {
dbHelper = new DBHelper(this);
if (dbHelper.isDatabaseOpen()==false) {
Dialog dbError = new AlertDialog.Builder(this)
.setIcon(android.R.drawable.ic_dialog_alert)
.setTitle(R.string.database_error_title)
.setPositiveButton(android.R.string.ok, new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog, int whichButton) {
finish();
}
})
.setMessage(R.string.database_error_msg)
.create();
dbError.show();
return;
}
}
// anonymous class in method with multiple methods
public void test3() {
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void naTest1(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
public void naTest2() {
}
};
}
// multi layer inline is not counted (only outermost layer)
public void test4() {
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void inTest1(Context context, Intent intent) {
//hallo
BroadcastReceiver mIntentReceiver = new BroadcastReceiver() {
public void sTest1(Context context, Intent intent) {
if (intent.getAction().equals(CryptoIntents.ACTION_CRYPTO_LOGGED_OUT)) {
if (debug) Log.d(TAG,"caught ACTION_CRYPTO_LOGGED_OUT");
startActivity(frontdoor);
}
}
public void sTest2() {
}
};
}
};
}
}
"""
INTERFACE_TEST = """
package de.ugoe.cs.coast;
public interface ITest {
public int getWordSize();
}
"""
INLINE_INTERFACE_TEST = """
package de.ugoe.cs.coast;
public class InterfaceClass {
public interface ITest {
public int getWordSize();
}
}
"""
CC_TEST = """
package de.ugoe.cs.coast;
public class CCTestClass {
public long updatePassword(long Id, PassEntry entry) {
ContentValues args = new ContentValues();
args.put("description", entry.description);
args.put("username", entry.username);
args.put("password", entry.password);
args.put("website", entry.website);
args.put("note", entry.note);
args.put("unique_name", entry.uniqueName);
DateFormat dateFormatter = DateFormat.getDateTimeInstance(DateFormat.DEFAULT, DateFormat.FULL);
Date today = new Date();
String dateOut = dateFormatter.format(today);
args.put("lastdatetimeedit", dateOut);
try {
db.update(TABLE_PASSWORDS, args, "id=" + Id, null);
} catch (SQLException e)
{
Log.d(TAG,"updatePassword: SQLite exception: " + e.getLocalizedMessage());
return -1;
}
return Id;
}
}
"""
LONG_NAME1 = """org.openintents.safe.CategoryList.onCreateContextMenu(Landroid/view/ContextMenu;Landroid/view/View;Landroid/view/ContextMenu$ContextMenuInfo;)Landroid/app/Dialog;"""
LONG_NAME2 = """org.openintents.safe.SearchFragment.getRowsIds(Ljava/util/List;)[J"""
LONG_NAME3 = """org.openintents.safe.RestoreHandler.characters([CII)V"""
LONG_NAME4 = """org.openintents.safe.Import.doInBackground([Ljava/lang/String;)Ljava/lang/String;"""
LONG_NAME5 = """org.openintents.safe.CryptoContentProvider.insert(Landroid/net/Uri;Landroid/content/ContentValues;)Landroid/net/Uri;"""
LONG_NAME6 = """org.openintents.safe.CryptoContentProvider.query(Landroid/net/Uri;[Ljava/lang/String;Ljava/lang/String;[Ljava/lang/String;Ljava/lang/String;)Landroid/database/Cursor;"""
LONG_NAME7 = """org.openintents.distribution.EulaActivity$1.onClick(Landroid/view/View;)V"""
LONG_NAME8 = """org.openintents.distribution.AboutDialog.<init>(Landroid/content/Context;)V"""
LONG_NAME9 = """estreamj.ciphers.trivium.Trivium$Maker.getName()Ljava/lang/String;"""
LONG_NAME10 = """de.guoe.cs.test(D)Ljava/lang/String;"""
LONG_NAME11 = """org.apache.zookeeper.ZKParameterized$RunnerFactory.createRunnerForTestWithParameters(LTestWithParameters;)Lorg.junit.runner.Runner;"""
# LONG_NAME12 = """"""
# LONG_NAME13 = """de.guoe.cs.test(LLString;L)V"""
NESTED_ANO_TEST = """package de.ugoe.cs.coast;
public class NestedAnoTest {
private class importTask {
private class importTask2 {
protected void onPostExecute(String result) {
Dialog about = new AlertDialog.Builder(CategoryList.this)
.setIcon(R.drawable.passicon)
.setTitle(R.string.import_complete)
.setPositiveButton(R.string.yes,
new DialogInterface.OnClickListener() {
public void onClick(int whichButton) {
File csvFile = new File(
importedFilename);
// csvFile.delete();
SecureDelete.delete(csvFile);
importedFilename = "";
}
})
.setNegativeButton(R.string.no,
new DialogInterface.OnClickListener() {
public void onClick(DialogInterface dialog,
int whichButton) {
}
}).setMessage(deleteMsg).create();
about.show();
}
}
}
}
"""
NESTED_NAMED_TEST = """package de.ugoe.cs.coast;
public class NestedNamedTest {
private void puit() {
}
private class importTask {
private void zoot() {
}
private class importTask2 {
private importTask2() {
}
private Void narf() {
}
}
}
}
"""
NESTED_INTERFACE_TEST = """package de.ugoe.cs.coast;
public interface NestedInterfaceTest {
public class TestClass {
public void test1() {
}
}
}
"""
LONG_NAME_CONVERSION_TEST = """package de.ugoe.cs.coast;
public class LongNameConversionTest {
public void test1(String a, long b, int i) {
}
public String[] test2(int[] a, byte[][] b) {
}
public String test3(long a, String[] b, long c) {
}
public void test4(K key, V value) {
}
}
"""
OBJECT_NAME_TEST = """package de.ugoe.cs.coast;
public class ObjectNameTest {
public java.lang.Object test1(Object K, java.lang.Object V) {
}
}
"""
ENUM_TEST = """package de.ugoe.cs.coast;
public enum EnumTest {
PERSISTENT_SEQUENTIAL_WITH_TTL(6, false, true, false, true);
EnumTest() {
}
public void test1(int a) {
}
}
"""
ARRAY_TEST = """package de.ugoe.cs.coast;
public class Pinky {
private bytes[] narf(java.lang.String[][] args, int[] a, float b) {
}
}
"""
VARARGS_TEST = """package de.ugoe.cs.coast;
public class Pinky {
private void narf(int a, String... args) {
}
}
"""
# todo:
# - anonymous class in named inner class
# org.openintents.safe.CategoryList$importTask$2.onClick(Landroid/content/DialogInterface;I)V
# import logging, sys
# log = logging.getLogger()
# log.setLevel(logging.DEBUG)
# i = logging.StreamHandler(sys.stdout)
# e = logging.StreamHandler(sys.stderr)
# i.setLevel(logging.DEBUG)
# e.setLevel(logging.ERROR)
# log.addHandler(i)
# log.addHandler(e)
| [
11748,
555,
715,
395,
198,
11748,
474,
9226,
648,
198,
198,
6738,
7051,
9693,
14175,
13,
22602,
13,
41887,
414,
62,
12355,
1330,
19157,
414,
29584,
11,
8090,
27231,
3103,
9641,
628,
198,
33,
1268,
3185,
62,
51,
6465,
796,
37227,
26495... | 1.913484 | 7,987 |
from Pessoa import Pessoa | [
6738,
350,
408,
12162,
1330,
350,
408,
12162
] | 3.125 | 8 |
from .boost import *
from .logger import *
from .utils import *
| [
6738,
764,
39521,
1330,
1635,
198,
6738,
764,
6404,
1362,
1330,
1635,
198,
6738,
764,
26791,
1330,
1635,
198
] | 3.368421 | 19 |
from django.db import models
from datetime import datetime, timedelta
# Create your models here. | [
6738,
42625,
14208,
13,
9945,
1330,
4981,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
28805,
12514,
198,
2,
13610,
534,
4981,
994,
13
] | 4 | 24 |
import random
import matplotlib.pyplot as plt
trial = 20 # 模拟实验次数
amp = 2.0 # 上下振幅(对称乘除)
cash = 1.0 # 初始现金
days = 200 # 每次模拟实验观察天数
print("\n多次实验,每次实验的最终股价与总资产的对比:\n")
for i in range(trial):
money = value = cash / 2 # 一半买为股票,一半保留现金
price = 1.0 # 初始股票价格
shares = value / price # 初始买的股票数,假定允许买卖分数股数
moneys = [money] # 数组,用来存放每天的现金额
values = [value] # 数组,用来存放每天的股票市值
prices = [price] # 数组,用来存放每天的股票价格
assets = [money + value] # 数组,用来存放每天的总资产
for day in range(1, days):
price = price * amp**random.choice([-1,1]) # 随机决定上涨还是下跌
prices.append(price)
val_tmp = shares * price
delta = (val_tmp - money) / price / 2 # 卖出/买入股值与现金的差值一半对应的股票,保持股值与现金相等
shares = shares - delta
value = shares * price
values.append(value)
money = money + delta * price
moneys.append(money)
assets.append(money + value)
print("第{:2d}次实验结果: Price = {:.2e} Assets = {:.2e} A/P = {:.2e}".format(i+1, prices[days-1],assets[days-1],assets[days-1]/prices[days-1]))
# 把最后一次实验数据用走势图展示出来
plt.plot(range(days), prices, label='Stock Price') # 对价格按日期作图(折线图)
plt.plot(range(days), assets, label='Total Assets') # 对资产按日期作图(折线图)
plt.xlabel('Days') # 横坐标名称
plt.ylabel('Total Assets / Stock Price') # 纵坐标名称
plt.yscale('log') # 纵坐标为对数坐标
plt.legend(loc='best') # 自动选择最佳图例位置
plt.title("Earn Money with Shannon's Strategy") # 图表名称
plt.show() # 显示图形
| [
11748,
4738,
198,
11748,
2603,
29487,
8019,
13,
9078,
29487,
355,
458,
83,
198,
198,
45994,
796,
1160,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
1303,
10545,
101,
94,
162,
233,
253,
22522,
... | 1.20069 | 1,450 |
''' This module will handle the text generation with beam search. '''
import torch
import torch.nn as nn
import torch.nn.functional as F
from transformer.Beam import Beam
from transformer.Models import Transformer
class Translator(object):
''' Load with trained model and handle the beam search '''
def translate_batch(self, src_seq, src_pos):
''' Translation work in one batch '''
def get_inst_idx_to_tensor_position_map(inst_idx_list):
''' Indicate the position of an instance in a tensor. '''
return {inst_idx: tensor_position for tensor_position, inst_idx in enumerate(inst_idx_list)}
def collect_active_part(beamed_tensor, curr_active_inst_idx, n_prev_active_inst, n_bm):
''' Collect tensor parts associated to active instances. '''
_, *d_hs = beamed_tensor.size()
n_curr_active_inst = len(curr_active_inst_idx)
new_shape = (n_curr_active_inst * n_bm, *d_hs)
beamed_tensor = beamed_tensor.view(n_prev_active_inst, -1)
beamed_tensor = beamed_tensor.index_select(0, curr_active_inst_idx)
beamed_tensor = beamed_tensor.view(*new_shape)
return beamed_tensor
def beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, enc_output, inst_idx_to_position_map, n_bm):
''' Decode and update beam status, and then return active beam idx '''
n_active_inst = len(inst_idx_to_position_map)
dec_seq = prepare_beam_dec_seq(inst_dec_beams, len_dec_seq)
dec_pos = prepare_beam_dec_pos(len_dec_seq, n_active_inst, n_bm)
word_prob = predict_word(dec_seq, dec_pos, src_seq, enc_output, n_active_inst, n_bm)
# Update the beam with predicted word prob information and collect incomplete instances
active_inst_idx_list = collect_active_inst_idx_list(
inst_dec_beams, word_prob, inst_idx_to_position_map)
return active_inst_idx_list
with torch.no_grad():
# -- Encode
src_seq, src_pos = src_seq.to(self.device), src_pos.to(self.device)
src_enc, *_ = self.model.encoder(src_seq, src_pos)
# -- Repeat data for beam search
n_bm = self.opt.beam_size
n_inst, len_s, d_h = src_enc.size()
src_seq = src_seq.repeat(1, n_bm).view(n_inst * n_bm, len_s)
src_enc = src_enc.repeat(1, n_bm, 1).view(n_inst * n_bm, len_s, d_h)
# -- Prepare beams
inst_dec_beams = [Beam(n_bm, device=self.device) for _ in range(n_inst)]
# -- Bookkeeping for active or not
active_inst_idx_list = list(range(n_inst))
inst_idx_to_position_map = get_inst_idx_to_tensor_position_map(active_inst_idx_list)
# -- Decode
for len_dec_seq in range(1, self.model_opt.max_token_seq_len + 1):
active_inst_idx_list = beam_decode_step(
inst_dec_beams, len_dec_seq, src_seq, src_enc, inst_idx_to_position_map, n_bm)
if not active_inst_idx_list:
break # all instances have finished their path to <EOS>
src_seq, src_enc, inst_idx_to_position_map = collate_active_info(
src_seq, src_enc, inst_idx_to_position_map, active_inst_idx_list)
batch_hyp, batch_scores = collect_hypothesis_and_scores(inst_dec_beams, self.opt.n_best)
return batch_hyp, batch_scores
| [
7061,
6,
770,
8265,
481,
5412,
262,
2420,
5270,
351,
15584,
2989,
13,
705,
7061,
201,
198,
201,
198,
11748,
28034,
201,
198,
11748,
28034,
13,
20471,
355,
299,
77,
201,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
201,
198,
67... | 2.04328 | 1,756 |
from classes.data_splitters.DataSplitter import DataSplitter
from classes.handlers.ParamsHandler import ParamsHandler
import numpy as np
import pandas as pd
import os
import random
import copy
| [
6738,
6097,
13,
7890,
62,
35312,
1010,
13,
6601,
26568,
1967,
1330,
6060,
26568,
1967,
198,
6738,
6097,
13,
4993,
8116,
13,
10044,
4105,
25060,
1330,
2547,
4105,
25060,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
35... | 3.679245 | 53 |
import os
# https://stackoverflow.com/questions/10492869/how-to-perform-leet-with-python
from utilitybelt import change_charset
origspace = "abcdefghijklmnopqrstuvwxyz"
keyspace = "abcd3fgh1jklmnopqr57uvwxyz"
# print(change_charset("leetspeak",origspace, keyspace))
reverse_file='invertido.txt'
with open(reverse_file, 'r') as f:
words = f.readlines()
for w in words:
print(change_charset(w.lower(), keyspace, origspace)) | [
11748,
28686,
198,
198,
2,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
14,
940,
2920,
2078,
3388,
14,
4919,
12,
1462,
12,
525,
687,
12,
293,
316,
12,
4480,
12,
29412,
198,
6738,
10361,
37976,
1330,
1487,
62,
354,
945,
... | 2.463277 | 177 |
from __future__ import annotations
from spark_auto_mapper_fhir.fhir_types.uri import FhirUri
from spark_auto_mapper_fhir.value_sets.generic_type import GenericTypeCode
from spark_auto_mapper.type_definitions.defined_types import AutoMapperTextInputType
# This file is auto-generated by generate_classes so do not edit manually
# noinspection PyPep8Naming
class XPathUsageTypeCode(GenericTypeCode):
"""
XPathUsageType
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
How a search parameter relates to the set of elements returned by evaluating
its xpath query.
"""
"""
http://hl7.org/fhir/search-xpath-usage
"""
codeset: FhirUri = "http://hl7.org/fhir/search-xpath-usage"
class XPathUsageTypeCodeValues:
"""
The search parameter is derived directly from the selected nodes based on the
type definitions.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Normal = XPathUsageTypeCode("normal")
"""
The search parameter is derived by a phonetic transform from the selected
nodes.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Phonetic = XPathUsageTypeCode("phonetic")
"""
The search parameter is based on a spatial transform of the selected nodes.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Nearby = XPathUsageTypeCode("nearby")
"""
The search parameter is based on a spatial transform of the selected nodes,
using physical distance from the middle.
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Distance = XPathUsageTypeCode("distance")
"""
The interpretation of the xpath statement is unknown (and can't be automated).
From: http://hl7.org/fhir/search-xpath-usage in valuesets.xml
"""
Other = XPathUsageTypeCode("other")
| [
6738,
11593,
37443,
834,
1330,
37647,
198,
198,
6738,
9009,
62,
23736,
62,
76,
11463,
62,
69,
71,
343,
13,
69,
71,
343,
62,
19199,
13,
9900,
1330,
376,
71,
343,
52,
380,
198,
198,
6738,
9009,
62,
23736,
62,
76,
11463,
62,
69,
71... | 2.871363 | 653 |
# -*- coding: utf-8 -*-
# @Author: yancz1989
# @Date: 2016-06-20 11:00:29
# @Last Modified by: yancz1989
# @Last Modified time: 2016-06-20 11:00:31
import numpy as np
import tunas.core.arch.theano_mod.ops as ops
reload(ops)
from tunas.core.arch.theano_mod.ops import *
np.random.seed(2012310818)
x = theano.shared(np.random.rand(10, 10).astype('float32'))
y = theano.shared(np.random.rand(10, 10).astype('float32'))
ops_scalar = [round, abs, neg, sign, inv, sqrt, square, exp, log,
ceil, floor, sin, cos, diag, diagv, trace, determinant, matinv,
cholesky, fft, ifft, sum, prod, max, min, argmax, argmin, mean, std, unique, where]
ops_binary = [add, sub, mul, div, pow, elmw_max, elmw_min, matmul, batch_matmul, pad, ]
ops_bool = [eq, lt, le, gt, ge, logic_and, logic_or, logic_not, logic_xor]
rand_func = [randn, rand, binomial, shuffle]
activations = [relu, softplus, softmax, tanh, sigmoid, thresholding, clip, linear]
conv = [conv2d, conv3d, max_pool2d, max_pool3d, avg_pool2d, avg_pool3d, window_slides]
loss = [mse, mae, msle, sqr_hinge, hinge, categorical_crossentropy, binary_crossentropy, cosine_proximity]
optimizer = [gd, momentum, rmsprop, adagrad, adadelta, adam, adamax]
funcs = [ops_scalar, ops_binary, ops_bool, rand_func, activations, conv, loss]
for f in ops_scalar:
print(type(f(x))) | [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
2488,
13838,
25,
331,
1192,
89,
25475,
198,
2,
2488,
10430,
25,
220,
220,
1584,
12,
3312,
12,
1238,
1367,
25,
405,
25,
1959,
198,
2,
2488,
5956,
40499,
416,
25,
... | 2.325862 | 580 |
from tool.runners.python import SubmissionPy
| [
6738,
2891,
13,
36740,
13,
29412,
1330,
42641,
20519,
628
] | 4.6 | 10 |
# Generated by Django 2.1.7 on 2019-03-21 01:26
from django.db import migrations, models
import django.db.models.deletion
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
22,
319,
13130,
12,
3070,
12,
2481,
5534,
25,
2075,
198,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
198,
11748,
42625,
14208,
13,
9945,
13,
27530,
13,
2934,
1616,
295,
... | 2.818182 | 44 |
import argparse
import logging
import sys
import pandas as pd
import sklearn
from model_nb_tree_classifier import ModelNBTreeClassifier
if __name__ == "__main__":
run_main()
| [
11748,
1822,
29572,
198,
11748,
18931,
198,
11748,
25064,
198,
198,
11748,
19798,
292,
355,
279,
67,
198,
11748,
1341,
35720,
198,
198,
6738,
2746,
62,
46803,
62,
21048,
62,
4871,
7483,
1330,
9104,
45,
19313,
631,
9487,
7483,
628,
628,
... | 3 | 62 |
import unittest
import numpy
import chainer
from chainer import functions
from chainer import testing
from chainer.testing import attr
import chainerx
if chainerx.is_available():
import chainerx.testing
@testing.parameterize(*testing.product_dict(
[
{'shape': (3, 4)},
{'shape': ()},
],
[
{'in_type': numpy.bool_},
{'in_type': numpy.uint8},
{'in_type': numpy.uint64},
{'in_type': numpy.int8},
{'in_type': numpy.int64},
{'in_type': numpy.float16},
{'in_type': numpy.float32},
{'in_type': numpy.float64},
],
[
{'out_type': numpy.bool_},
{'out_type': numpy.uint8},
{'out_type': numpy.uint64},
{'out_type': numpy.int8},
{'out_type': numpy.int64},
{'out_type': numpy.float16},
{'out_type': numpy.float32},
{'out_type': numpy.float64},
]
))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
@attr.chainerx
testing.run_module(__name__, __file__)
| [
11748,
555,
715,
395,
198,
198,
11748,
299,
32152,
198,
198,
11748,
6333,
263,
198,
6738,
6333,
263,
1330,
5499,
198,
6738,
6333,
263,
1330,
4856,
198,
6738,
6333,
263,
13,
33407,
1330,
708,
81,
198,
11748,
6333,
263,
87,
628,
198,
... | 1.9573 | 726 |
"""
Copyright 2021 Cesar Miranda Meza (alias: Mortrack)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# -*- coding: utf-8 -*-
"""
Created on Fri May 1 17:15:51 2020
Last updated on Mon May 24 8:40:00 2021
@author: enginer Cesar Miranda Meza (alias: Mortrack)
"""
from ..linearAlgebra import MortrackLinearAlgebraLibrary as mLAL
# IMPORTANT NOTE: Remember to be careful with input argument variables when
# you call any method or class because it gets altered in
# python ignoring parenting or children logic
"""
DiscreteDistribution()
The Combinations class allows you to get some parameters, through some of its
methods, that describe the dataset characteristics (eg. mean, variance and
standard deviation).
"""
class DiscreteDistribution:
"""
getMean(samplesList="will contain a matrix of rows and columns, were we want to get the Mean of each rows data point samples")
Returns a matrix (containing only 1 column for all rows within this class
local variable "samplesList"), were each row will have its corresponding
mean value.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dD = mSL.DiscreteDistribution()
result = dD.getMean(matrix_x)
EXPECTED CODE RESULT:
result =
[[2.0], [5.0], [5.0]]
"""
"""
getVariance(samplesList="will contain a matrix of rows and columns, were we want to get the Variance of each rows data point samples")
Returns a matrix (containing only 1 column for all rows within this class
local variable "samplesList"), were each row will have its corresponding
variance value.
Remember that Variance is also denoted as the square of sigma
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dD = mSL.DiscreteDistribution()
result = dD.getVariance(matrix_x)
EXPECTED CODE RESULT:
result =
[[1.0], [1.0], [16.0], [9.0]]
"""
"""
getStandardDeviation(samplesList="will contain a matrix of rows and columns, were we want to get the Standard Deviation of each rows data point samples")
Returns a matrix (containing only 1 column for all rows within this class
local variable "samplesList"), were each row will have its corresponding
Standard Deviation value.
Remember that Standard Deviation is also denoted as sigma
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dD = mSL.DiscreteDistribution()
result = dD.getStandardDeviation(matrix_x)
EXPECTED CODE RESULT:
result =
[[1.0], [1.0], [16.0], [9.0]]
"""
"""
Tdistribution(desiredTrustInterval="Its a float numeric type value that will represent the desired percentage(%) that you desire for your trust interval")
The Combinations class allows you to get some parameters, through some of its
methods, that describe the dataset characteristics (eg. mean, variance and
standard deviation).
"""
class Tdistribution:
"""
getCriticalValue(numberOfSamples="Must have a whole number that represents the number of samples you want to get the critical value from")
Returns a float numeric value which will represent the Critical Value of
the parameters that you specified (the desired trust interval and the
number of samples)
Remember that the T-distribution considers that your data has a normal
function form tendency.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
tD = mSL.Tdistribution(desiredTrustInterval=95)
result = tD.getCriticalValue(len(matrix_x[0]))
EXPECTED CODE RESULT:
result =
4.303
"""
class TrustIntervals:
"""
getMeanIntervals(samplesList="Must contain the matrix of the dataset from which you want to get the Mean Intervals",
meanList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding mean value.",
standardDeviationList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding standard deviation value.",
tValue="Must contain a float numeric value that represents the T-Value (Critical Value) required to calculate the mean intervals")
This method returns a matrix with 2 columns:
* Column 1 = negative mean interval values in the corresponding "n" number of rows
* Column 2 = positive mean interval values in the corresponding "n" number of rows
Remember that the T-distribution considers that your data has a normal
function form tendency.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
tI = mSL.TrustIntervals()
dD = mSL.DiscreteDistribution()
meanList = dD.getMean(matrix_x)
standardDeviationList = dD.getStandardDeviation(matrix_x)
tD = mSL.Tdistribution(desiredTrustInterval=95)
tValue = tD.getCriticalValue(len(matrix_x[0]))
meanIntervalsList = tI.getMeanIntervals(matrix_x, meanList, standardDeviationList, tValue)
negativeMeanIntervalList = []
positiveMeanIntervalList = []
for row in range(0, len(meanIntervalsList)):
temporalRow = []
temporalRow.append(meanIntervalsList[row][0])
negativeMeanIntervalList.append(temporalRow)
temporalRow = []
temporalRow.append(meanIntervalsList[row][1])
positiveMeanIntervalList.append(temporalRow)
EXPECTED CODE RESULT:
negativeMeanIntervalList =
[[-0.48433820832295993],
[2.51566179167704],
[-4.93735283329184],
[-3.453014624968879]]
positiveMeanIntervalList =
[[4.48433820832296],
[7.48433820832296],
[14.93735283329184],
[11.45301462496888]]
"""
"""
getPredictionIntervals(samplesList="Must contain the matrix of the dataset from which you want to get the Prediction Intervals",
meanList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding mean value.",
standardDeviationList="Must contain the matrix (containing only 1 column for all rows), were each row will have its corresponding standard deviation value.",
tValue="Must contain a float numeric value that represents the T-Value (Critical Value) required to calculate the Prediction intervals")
This method returns a matrix with 2 columns:
* Column 1 = negative Prediction interval values in the corresponding "n" number of rows
* Column 2 = positive Prediction interval values in the corresponding "n" number of rows
Remember that the T-distribution considers that your data has a normal
function form tendency.
EXAMPLE CODE:
matrix_x = [
[1,2,3],
[4,5,6],
[1,5,9],
[1,4,7]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
tI = mSL.TrustIntervals()
dD = mSL.DiscreteDistribution()
meanList = dD.getMean(matrix_x)
standardDeviationList = dD.getStandardDeviation(matrix_x)
tD = mSL.Tdistribution(desiredTrustInterval=95)
numberOfSamples = len(matrix_x[0])
tValue = tD.getCriticalValue(numberOfSamples)
predictionIntervalsList = tI.getPredictionIntervals(numberOfSamples, meanList, standardDeviationList, tValue)
negativePredictionIntervalList = []
positivePredictionIntervalList = []
for row in range(0, len(predictionIntervalsList)):
temporalRow = []
temporalRow.append(predictionIntervalsList[row][0])
negativePredictionIntervalList.append(temporalRow)
temporalRow = []
temporalRow.append(predictionIntervalsList[row][1])
positivePredictionIntervalList.append(temporalRow)
EXPECTED CODE RESULT:
negativePredictionIntervalList =
[[-2.968676416645919],
[0.03132358335408103],
[-14.874705666583676],
[-10.906029249937756]]
positivePredictionIntervalList =
[[6.968676416645919],
[9.96867641664592],
[24.874705666583676],
[18.906029249937756]]
"""
"""
Combinations("The sample list you want to work with")
The Combinations class allows you to get the possible combinations within
the values contained in the "samplesList" variable contained within this class.
"""
class Combinations:
"""
setSamplesList("The new sample list you want to work with")
This method changes the value of the object's variable "samplesList" to a
new set of list values that you want to work with through this class
methods.
"""
"""
getPositionCombinationsList()
Returns all the possible positions of the elements contained within a list
EXAMPLE CODE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
combinations = mSL.Combinations([0,1,2])
result = combinations.getPositionCombinationsList()
EXPECTED CODE RESULT:
result =
[[0, 1, 2], [1, 0, 2], [1, 2, 0], [0, 2, 1], [2, 0, 1]]
"""
"""
getCustomizedPermutationList()
Returns a customized form of permutation of the elements contained within a
list. See code example and expected code result to get a better idea of how
this method works.
EXAMPLE CODE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
combinations = mSL.Combinations([0,1,2])
result = combinations.getCustomizedPermutationList()
EXPECTED CODE RESULT:
result =
[[], [0], [1], [0, 1], [2], [0, 2], [1, 2], [0, 1, 2]]
"""
"""
DatasetSplitting("x independent variable datapoints to model", "y dependent variable datapoints to model")
The DatasetSplitting library allows you to split your dataset into training and
test set.
"""
class DatasetSplitting:
"""
getDatasetSplitted(testSize = "the desired size of the test samples. This value must be greater than zero and lower than one",
isSplittingRandom = "True if you want samples to be splitted randomly. False if otherwise is desired")
This method returns a splited dataset into training and test sets.
CODE EXAMPLE1:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dS = mSL.DatasetSplitting(matrix_x, matrix_y)
datasetSplitResults = dS.getDatasetSplitted(testSize = 0.10, isSplittingRandom = False)
x_train = datasetSplitResults[0]
x_test = datasetSplitResults[1]
y_train = datasetSplitResults[2]
y_test = datasetSplitResults[3]
EXPECTED CODE1 RESULT:
x_train =
[[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]]
x_test =
[[75, 15], [100, 15]]
y_train =
[[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]]
y_test =
[[14.05], [10.55]]
"""
"""
FeatureScaling("datapoints you want to apply Feature Scaling to")
The Feature Scaling library gives several methods to apply feature scaling
techniques to your datasets.
"""
class FeatureScaling:
"""
getStandarization("preferedMean=prefered Mean",
preferedStandardDeviation="prefered Standard Deviation value",
isPreferedDataUsed="True to define you will used prefered values. False to define otherwise.")
This method returns a dataset but with the standarization method, of Feature
Scaling, applied to such dataset. This method will also return the
calculated mean and the calculated standard deviation value.
CODE EXAMPLE1:
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
featureScaling = mSL.FeatureScaling(matrix_x)
normalizedResults = featureScaling.getStandarization()
preferedMean = normalizedResults[0]
preferedStandardDeviation = normalizedResults[1]
normalizedDataPoints = normalizedResults[2]
EXPECTED CODE1 RESULT:
preferedMean =
[[100.0, 21.25]]
preferedStandardDeviation =
[[21.004201260420146, 4.393343895967546]]
normalizedDataPoints =
[[-1.1902380714238083, -1.422606594884729],
[0.0, -1.422606594884729],
[1.1902380714238083, -1.422606594884729],
[-1.1902380714238083, -0.8535639569308374],
[0.0, -0.8535639569308374],
[1.1902380714238083, -0.8535639569308374],
[-1.1902380714238083, -0.2845213189769458],
[0.0, -0.2845213189769458],
[1.1902380714238083, -0.2845213189769458],
[-1.1902380714238083, 0.2845213189769458],
[0.0, 0.2845213189769458],
[1.1902380714238083, 0.2845213189769458],
[-1.1902380714238083, 0.8535639569308374],
[0.0, 0.8535639569308374],
[1.1902380714238083, 0.8535639569308374],
[-1.1902380714238083, 1.422606594884729],
[0.0, 1.422606594884729],
[1.1902380714238083, 1.422606594884729]]
# ------------------------------------------------------------------------- #
CODE EXAMPLE2:
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
featureScaling = mSL.FeatureScaling(matrix_x)
mean = [[100, 21.25]]
standardDeviation = [[21.004201260420146, 4.393343895967546]]
normalizedResults = featureScaling.getStandarization(preferedMean=mean, preferedStandardDeviation=standardDeviation, isPreferedDataUsed = True)
preferedMean = normalizedResults[0]
preferedStandardDeviation = normalizedResults[1]
normalizedDataPoints = normalizedResults[2]
EXPECTED CODE2 RESULT:
preferedMean =
[[100.0, 21.25]]
preferedStandardDeviation =
[[21.004201260420146, 4.393343895967546]]
normalizedDataPoints =
[[-1.1902380714238083, -1.422606594884729],
[0.0, -1.422606594884729],
[1.1902380714238083, -1.422606594884729],
[-1.1902380714238083, -0.8535639569308374],
[0.0, -0.8535639569308374],
[1.1902380714238083, -0.8535639569308374],
[-1.1902380714238083, -0.2845213189769458],
[0.0, -0.2845213189769458],
[1.1902380714238083, -0.2845213189769458],
[-1.1902380714238083, 0.2845213189769458],
[0.0, 0.2845213189769458],
[1.1902380714238083, 0.2845213189769458],
[-1.1902380714238083, 0.8535639569308374],
[0.0, 0.8535639569308374],
[1.1902380714238083, 0.8535639569308374],
[-1.1902380714238083, 1.422606594884729],
[0.0, 1.422606594884729],
[1.1902380714238083, 1.422606594884729]]
"""
"""
getReverseStandarization("preferedMean=prefered Mean",
preferedStandardDeviation="prefered Standard Deviation value")
This method returns a dataset but with its original datapoint values before
having applied the Standarization Feature Scaling method.
CODE EXAMPLE1:
matrix_x = [
[-1.1902380714238083, -1.422606594884729],
[0.0, -1.422606594884729],
[1.1902380714238083, -1.422606594884729],
[-1.1902380714238083, -0.8535639569308374],
[0.0, -0.8535639569308374],
[1.1902380714238083, -0.8535639569308374],
[-1.1902380714238083, -0.2845213189769458],
[0.0, -0.2845213189769458],
[1.1902380714238083, -0.2845213189769458],
[-1.1902380714238083, 0.2845213189769458],
[0.0, 0.2845213189769458],
[1.1902380714238083, 0.2845213189769458],
[-1.1902380714238083, 0.8535639569308374],
[0.0, 0.8535639569308374],
[1.1902380714238083, 0.8535639569308374],
[-1.1902380714238083, 1.422606594884729],
[0.0, 1.422606594884729],
[1.1902380714238083, 1.422606594884729]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
featureScaling = mSL.FeatureScaling(matrix_x)
mean = [[100, 21.25]]
standardDeviation = [[21.004201260420146, 4.393343895967546]]
deNormalizedResults = featureScaling.getReverseStandarization(preferedMean=mean, preferedStandardDeviation=standardDeviation)
preferedMean = deNormalizedResults[0]
preferedStandardDeviation = deNormalizedResults[1]
deNormalizedDataPoints = deNormalizedResults[2]
EXPECTED CODE1 RESULT:
preferedMean =
[[100.0, 21.25]]
preferedStandardDeviation =
[[21.004201260420146, 4.393343895967546]]
deNormalizedDataPoints =
[[75.0, 15.0],
[100.0, 15.0],
[125.0, 15.0],
[75.0, 17.5],
[100.0, 17.5],
[125.0, 17.5],
[75.0, 20.0],
[100.0, 20.0],
[125.0, 20.0],
[75.0, 22.5],
[100.0, 22.5],
[125.0, 22.5],
[75.0, 25.0],
[100.0, 25.0],
[125.0, 25.0],
[75.0, 27.5],
[100.0, 27.5],
[125.0, 27.5]]
"""
"""
setSamplesList(newSamplesList="the new samples list that you wish to work with")
This method sets a new value in the objects local variable "samplesList".
"""
"""
The Regression library gives several different types of coeficients to model
a required data. But notice that the arguments of this class are expected to be
the mean values of both the "x" and the "y" values.
Regression("mean values of the x datapoints to model", "mean values of the y datapoints to model")
"""
class Regression:
"""
# ----------------------------------- #
# ----------------------------------- #
# ----- STILL UNDER DEVELOPMENT ----- #
# ----------------------------------- #
# ----------------------------------- #
getGaussianRegression()
Returns the best fitting model to predict the behavior of a dataset through
a Gaussian Regression model that may have any number of independent
variables (x).
Note that if no fitting model is found, then this method will swap the
dependent variables values in such a way that "0"s will be interpretated as
"1"s and vice-versa to then try again to find at least 1 fitting model to
your dataset. If this still doenst work, then this method will return
modeling results will all coefficients with values equal to zero, predicted
accuracy equal to zero and all predicted values will also equal zero.
CODE EXAMPLE:
# We will simulate a dataset that you would normally have in its original form
matrix_x = [
[2, 3],
[3, 2],
[4, 3],
[3, 4],
[1, 3],
[3, 1],
[5, 3],
[3, 5]
]
matrix_y = [
[1],
[1],
[1],
[1],
[0],
[0],
[0],
[0]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getGaussianRegression()
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[39.139277579342206],
[-13.813509557297337],
[2.302251592882884],
[-13.813509557296968],
[2.302251592882836]]
accuracyFromTraining =
99.94999999999685
predictedData =
[[0.9989999999998915],
[0.9990000000000229],
[0.9989999999999554],
[0.9989999999999234],
[0.0009999999999997621],
[0.0010000000000001175],
[0.00099999999999989],
[0.000999999999999915]]
# NOTE:"predictedData" will try to give "1" for positive values and "0"
# for negative values always, regardless if your negative values
# were originally given to the trained model as "-1"s.
coefficientDistribution =
'Coefficients distribution for the Gaussian function is as follows: Gaussian = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2 ))'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getLinearLogisticRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Logistic Regression model to be able
to predict a classification problem that can have any number of
independent variables (x).
CODE EXAMPLE:
matrix_x = [
[0,2],
[1,3],
[2,4],
[3,5],
[4,6],
[5,7],
[6,8],
[7,9],
[8,10],
[9,11]
]
matrix_y = [
[0],
[0],
[1],
[0],
[1],
[1],
[1],
[1],
[1],
[1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getLinearLogisticRegression(evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[4.395207586412653], [5.985854141495452], [-4.395207586412653]]
accuracyFromTraining =
80.02122762886552
predictedData =
[[0.012185988957723588],
[0.05707820342364075],
[0.22900916243958236],
[0.5930846789223594],
[0.8773292738274195],
[0.9722944298625625],
[0.9942264149220237],
[0.9988179452639562],
[0.9997588776328182],
[0.9999508513195541]]
coefficientDistribution =
'Coefficients distribution is as follows: p = (exp(bo + b1*x1 + b2*x2 + ... + bn*xn))/(1 + exp(bo + b1*x1 + b2*x2 + ... + bn*xn))'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getLinearRegression(isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model to predict the behavior of a dataset through
a regular Linear Regression model. Note that this method can only solve
regression problems that have 1 independent variable (x).
CODE EXAMPLE:
matrix_x = [
[0],
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9]
]
matrix_y = [
[8.5],
[9.7],
[10.7],
[11.5],
[12.1],
[14],
[13.3],
[16.2],
[17.3],
[17.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getLinearRegression(isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[8.470909090909096], [1.0242424242424237]]
accuracyFromTraining =
97.05959379759686
predictedData =
[[8.470909090909096],
[9.49515151515152],
[10.519393939393943],
[11.543636363636367],
[12.56787878787879],
[13.592121212121214],
[14.616363636363639],
[15.640606060606062],
[16.664848484848484],
[17.689090909090908]]
coefficientDistribution =
'Coefficients distribution is as follows: y = b + m*x'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getMultipleLinearRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model of a regression problem that has any number
of independent variables (x) through the Multiple Linear Regression method.
EXAMPLE CODE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultipleLinearRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[36.094678333151364], [1.030512601856226], [-1.8696429022156238], [0]]
accuracyFromTraining =
94.91286851439088
predictedData =
[[27.97866287863839],
[32.47405344687403],
[26.780769909063693],
[38.27922426742052],
[15.633130663659042],
[26.414492729454558],
[27.942743988094456],
[26.30423186956247],
[32.03534812093171],
[26.162019574015964],
[37.5240060242906],
[32.03387415343133],
[27.937442374564142]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x2 + b3*x3 + ... + bn*xn'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getPolynomialRegression(
orderOfThePolynomial = "whole number to represent the desired order of the polynomial model to find",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
Returns the best fitting model of a regression problem that has only 1
independent variable (x) in it, through a polynomial regression solution.
EXAMPLE CODE:
matrix_y = [
[3.4769e-11],
[7.19967e-11],
[1.59797e-10],
[3.79298e-10]
]
matrix_x = [
[-0.7],
[-0.65],
[-0.6],
[-0.55]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getPolynomialRegression(orderOfThePolynomial=3, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[3.468869185343018e-08],
[1.5123521825664843e-07],
[2.2104758041867345e-07],
[1.0817080022072073e-07]]
accuracyFromTraining =
99.99999615014885
predictedData =
[[3.4769003219065136e-11],
[7.199670288280337e-11],
[1.597970024878988e-10],
[3.792980021998557e-10]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x + b2*x^2 + b3*x^3 + ... + bn*x^n'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getMultiplePolynomialRegression(
orderOfThePolynomial = "whole number to represent the desired order of the polynomial model to find",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method returns the best fitting model of a dataset to predict its
behavior through a Multiple Polynomial Regression that may have any number
of independent variables (x). This method gets a model by through the
following equation format:
y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=4, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[-1.745717777706403e-08],
[0],
[0.07581354676648289],
[-0.00104662847289827],
[3.942075523087618e-06],
[-14.202436859894078],
[0.670002091817878],
[-0.009761974914994198],
[-5.8006065221068606e-15]]
accuracyFromTraining =
91.33822971744071
predictedData =
[[14.401799310251064],
[10.481799480368835],
[7.578466505722503],
[13.96195814877683],
[10.041958318894615],
[7.1386253442482825],
[15.490847097061135],
[11.57084726717892],
[8.667514292532587],
[18.073281006823265],
[14.15328117694105],
[11.249948202294718],
[20.794074729782523],
[16.874074899900307],
[13.970741925253975],
[22.73804311765818],
[18.818043287775964],
[15.914710313129632]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + ... + bn*x1^n + b3*x2 + b4*x2^2 + ... + bn*x2^n + b5*x3 + b6*x3^2 + ... + bn*xn^n'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getCustomizedMultipleSecondOrderPolynomialRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method obtains the best solution of a customized 2nd order model when
using specifically 2 independent variables and were the equation to solve
is the following:
y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2
IMPORTANT NOTE: While the book "Probabilidad y estadistica para ingenieria
& ciencias (Walpole, Myers, Myers, Ye)" describes a model whos accuracy is
89.936% through finding a solution using the same model equation as used in
this method, i was able to achieve an algorithm that finds an even
better solution were i was able to get an accuracy of 90.57% (see code
example).
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleSecondOrderPolynomialRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[40.36892063492269],
[-0.29913333333337394],
[0.0008133333333341963],
[-1.2861238095233603],
[0.047676190476181546],
[0]]
accuracyFromTraining =
90.56977726188016
predictedData =
[[13.944206349214937],
[10.0242063492177],
[7.120873015888202],
[14.602587301596287],
[10.68258730159905],
[7.779253968269552],
[15.856920634929907],
[11.936920634932669],
[9.033587301603172],
[17.707206349215795],
[13.787206349218557],
[10.88387301588906],
[20.153444444453953],
[16.233444444456715],
[13.330111111127216],
[23.19563492064438],
[19.275634920647143],
[16.372301587317644]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + b5*x1*x2'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getCustomizedMultipleThirdOrderPolynomialRegression(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method obtains the best solution of a customized 3rd order model when
using specifically 2 independent variables and were the equation to solve
is the following:
y = bo + b1*x1 + b2*x1^2 + b3*x1^3 + b4*x2 + b5*x2^2 + b6*x2^3 + b7*x1*x2 + b8*x1^2*x2 + b9*x1*x2^2
IMPORTANT NOTE: The same base algorithm used in the method
"getCustomizedMultipleSecondOrderPolynomialRegression()" was applied in
this one. This is important to mention because the algorithm i created in
that method demonstrated to be superior of that one used in the book
"Probabilidad y estadistica para ingenieria & ciencias (Walpole, Myers,
Myers, Ye)". See that method's description to see more information about
this.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleThirdOrderPolynomialRegression(evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
RESULT OF CODE:
modelCoefficients =
[[118.62284443469252],
[2.6850685669390923e-10],
[0],
[2.711111111130216e-06],
[-14.043715503707062],
[0.7156842175145357],
[-0.011482404265578339],
[-0.024609341568850862],
[0],
[0.0006459332618172914]]
accuracyFromTraining =
92.07595419629946
predictedData =
[[14.601310971885873],
[10.5735435991239],
[7.56244289303574],
[14.177873191206809],
[9.924073908458023],
[6.686941292383061],
[15.770722763127356],
[11.492745714709685],
[8.23143533296583],
[18.303384287749555],
[14.203083617980887],
[11.11944961488603],
[20.699382365175477],
[16.978612218373712],
[14.274508738245757],
[21.882241595507075],
[18.742856115990087],
[16.62013730314699]]
coefficientDistribution =
'Coefficients distribution is as follows: y = bo + b1*x1 + b2*x1^2 + b3*x1^3 + b4*x2 + b5*x2^2 + b6*x2^3 + b7*x1*x2 + b8*x1^2*x2 + b9*x1*x2^2'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
predictLinearLogisticRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0,2],
[1,3],
[2,4],
[3,5],
[4,6],
[5,7],
[6,8],
[7,9],
[8,10],
[9,11]
]
matrix_y = [
[0],
[0],
[1],
[0],
[1],
[1],
[1],
[1],
[1],
[1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getLinearLogisticRegression(evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictLinearLogisticRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[0.5],
[0.999978721536189],
[0.9999991162466249],
[0.9999999984756125],
[1.7295081461872963e-11]]
"""
"""
predictGaussianRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# We will simulate a dataset that you would normally have in its original form
matrix_x = [
[2, 3],
[3, 2],
[4, 3],
[3, 4],
[1, 3],
[3, 1],
[5, 3],
[3, 5]
]
matrix_y = [
[1],
[1],
[1],
[1],
[0],
[0],
[0],
[0]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getGaussianRegression()
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictGaussianRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[1.003006010014743e-12],
[0.09993332221727314],
[1.0046799183277663e-17],
[1.0318455659367212e-97],
[1.0083723565531913e-28]]
"""
"""
predictLinearRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0],
[1],
[2],
[3],
[4],
[5],
[6],
[7],
[8],
[9]
]
matrix_y = [
[8.5],
[9.7],
[10.7],
[11.5],
[12.1],
[14],
[13.3],
[16.2],
[17.3],
[17.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getLinearRegression(isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0],
[4],
[6],
[10],
[1]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictLinearRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[8.470909090909096],
[12.56787878787879],
[14.616363636363639],
[18.71333333333333],
[9.49515151515152]]
"""
"""
predictMultipleLinearRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultipleLinearRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1,1],
[4,4,4],
[6,6,6],
[10,10,10],
[1,8,9]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictMultipleLinearRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[34.22503543093558],
[32.73815713171364],
[31.059896530994866],
[27.703375329557314],
[22.168047717282477]]
"""
"""
predictPolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[3.4769e-11],
[7.19967e-11],
[1.59797e-10],
[3.79298e-10]
]
matrix_x = [
[-0.7],
[-0.65],
[-0.6],
[-0.55]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getPolynomialRegression(orderOfThePolynomial=3, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0],
[4],
[6],
[10],
[1]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictPolynomialRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[3.468869185343018e-08],
[1.1099322065704926e-05],
[3.226470574414124e-05],
[0.000131822599137008],
[5.151422907494728e-07]]
"""
"""
predictMultiplePolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
orderOfThePolynomial="Assign a whole number that represents the order of degree of the Multiple Polynomial equation you want to make predictions with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# "orderOfThePolynomial" = "whole number to represent the desired order of the polynomial model to find"
# "evtfbmip" stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getMultiplePolynomialRegression(orderOfThePolynomial=4, evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictMultiplePolynomialRegression(coefficients=modelCoefficients, orderOfThePolynomial=4)
EXPECTED CODE RESULT:
predictedValues =
[[-13.54219748494156],
[-37.053240090011386],
[-48.742713747779355],
[-60.84907570434054],
[-73.31818590442116]]
"""
"""
predictCustomizedMultipleSecondOrderPolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleSecondOrderPolynomialRegression(evtfbmip = True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictCustomizedMultipleSecondOrderPolynomialRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[39.13047301587551],
[34.803724444448],
[32.60300063492485],
[29.365301587306917],
[32.832886349211385]]
"""
"""
predictCustomizedMultipleThirdOrderPolynomialRegression(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_y = [
[14.05],
[10.55],
[7.55],
[14.93],
[9.48],
[6.59],
[16.56],
[13.63],
[9.23],
[15.85],
[11.75],
[8.78],
[22.41],
[18.55],
[15.93],
[21.66],
[17.98],
[16.44]
]
matrix_x = [
[75, 15],
[100, 15],
[125, 15],
[75, 17.5],
[100, 17.5],
[125, 17.5],
[75, 20],
[100, 20],
[125, 20],
[75, 22.5],
[100, 22.5],
[125, 22.5],
[75, 25],
[100, 25],
[125, 25],
[75, 27.5],
[100, 27.5],
[125, 27.5]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
modelingResults = regression.getCustomizedMultipleThirdOrderPolynomialRegression(evtfbmip=True, isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# ------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------- #
predictThisValues = [
[0,1],
[4,4],
[6,6],
[10,10],
[1,8]
]
regression.set_xSamplesList(predictThisValues)
predictedValues = regression.predictCustomizedMultipleThirdOrderPolynomialRegression(coefficients=modelCoefficients)
EXPECTED CODE RESULT:
predictedValues =
[[105.28333074423442],
[72.81181980293967],
[56.899154811293464],
[36.45941710222553],
[46.042387049575304]]
"""
"""
Classification("x independent variable datapoints to model", "y dependent variable datapoints to model")
The Classification library gives several methods to be able to get the best
fitting classification model to predict a determined classification problem.
"""
class Classification:
"""
getSupportVectorMachine(evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Linear Support Vector Machine model to
be able to predict a classification problem of any number of independent
variables (x).
CODE EXAMPLE:
matrix_x = [
[0, 0],
[2, 2],
[4, 3],
[2, 4],
[3, 4],
[4, 4],
[5, 3],
[3, 5],
[4, 6]
]
matrix_y = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getSupportVectorMachine(evtfbmip = True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[[1.5736095873424212], [-0.26050769870994606], [-0.25468164794007475]]
accuracyFromTraining =
88.88888888888889
predictedData = [
[1],
[1],
[-1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
coefficientDistribution =
'Coefficients distribution is as follows: b1*x1 + b2*x2 + ... + bn*xn >= -bo (As a note, remember that true equation representation is: w.x>=c)'
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
getKernelSupportVectorMachine(kernel="you specify here the type of kernel that you want to model with. literally write, in strings, gaussian for a gaussian kernel; polynomial for a polynomial kernel; and linear for a linear kernel",
isPolynomialSVC="True if you want to apply a polynomial SVC. False if otherwise is desired",
orderOfPolynomialSVC="If you apply a polynomial SVC through the argument isPolynomialSVC, you then give a whole number here to indicate the order of degree that you desire in such Polynomial SVC",
orderOfPolynomialKernel="if you selected polynomial kernel in the kernel argument, you then here give a whole number to indicate the order of degree that you desire in such Polynomial Kernel",
evtfbmip="True to indicate to Eliminate Variables To Find Better Model If Possible. False if the contrary is desired")
This method returns the best fitting Kernel Support Vector Machine
model to be able to predict a classification problem of any number of
independent variables (x).
* If "gaussian" kernel is applied. This method will find the best
fitting model of such gaussian kernel through a gaussian regression.
* If "polynomimal" kernel is applied. This method will find the best
fitting model of such polynomial kernel through a Multiple Polynomial
Regression. You can specify the order of degree that you desire for your
Multiple Polynomial Kernel through the argument of this method named as
"orderOfPolynomialKernel".
* If "linear" kernel is applied. This method will find the best fitting
model of such polynomial kernel through a Multiple Linear Regression.
* You can also get a modified SVC by getting a non-linear intersection
plane to split your dataset into 2 specified categories. If you apply
this modified SVC, through "isPolynomialSVC" argument of this method,
you will be able to get a polynomial intersecting plane for your dataset
whos degree order can be modified through the argument of this method
named as "orderOfPolynomialSVC".
CODE EXAMPLE:
matrix_x = [
[0, 0],
[2, 2],
[4, 3],
[2, 4],
[3, 4],
[4, 4],
[5, 3],
[3, 5],
[4, 6]
]
matrix_y = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getKernelSupportVectorMachine(kernel='gaussian', isPolynomialSVC=True, orderOfPolynomialSVC=2, orderOfPolynomialKernel=3, evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
EXPECTED CODE RESULT:
modelCoefficients =
[
[
[-0.4067247938936074],
[-2.638275880744686],
[0.6025816805607462],
[1.5978782207152165],
[0.0018850313260649898]
],
[
[17.733125277353782],
[-0.41918858713133034],
[-0.07845753695120994],
[-7.126885817943787],
[0.7414460867570138],
[13.371724079069963],
[-16.435714646771032]
]
]
accuracyFromTraining =
100.0
predictedData = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
coefficientDistribution =
[
'Coefficients distribution for the Gaussian Kernel is as follows: kernel = exp(-(bo + b1*x1 + b2*x1^2 + b3*x2 + b4*x2^2 + ... + b_(n-1)*xn + bn*xn^2))',
[
'Coefficients distribution is as follows: b1*x1 + b2*x2 + ... + b_(n-1)*xn + bn*Kernel >= -b_0 --> for linear SVC (As a note, remember that true equation representation is: w.x>=c and that x here represents each one of the coordinates of your independent samples (x))',
'Coefficients distribution is as follows: b1*x1 + ... + b_(n-5)*x_m^m + b_(n-4)*x_(m-1) + ... + b_(n-3)*x_m^m + ... + b_(n-2)*x_m + ... + b_(n-1)*x_m^m + bn*Kernel >= -b_0 --> for polynomial SVC (m stands for the order degree selected for the polynomial SVC and n stands for the number of coefficients used in the polynomial SVC)'
]
]
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution", "matrix x data"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
predictSupportVectorMachine(coefficients="We give the SVC mathematical coefficients that we want to predict with")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0, 0],
[2, 2],
[4, 3],
[2, 4],
[3, 4],
[4, 4],
[5, 3],
[3, 5],
[4, 6]
]
matrix_y = [
[1],
[1],
[1],
[1],
[-1],
[-1],
[-1],
[-1],
[-1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getSupportVectorMachine(evtfbmip = True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# --------------------------------------------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS: CBES WHEN VOLTAGE APPLIED IS POSITIVE ----- #
# --------------------------------------------------------------------------- #
# Visualising the Training set results
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
plt.figure()
# We plot the Background
x1_samples = []
x2_samples = []
for row in range(0, len(matrix_x)):
x1_samples.append(matrix_x[row][0])
x2_samples.append(matrix_x[row][1])
# linspace(start, stop, num=50)
x1_distance = min(x1_samples) - max(x1_samples)
x2_distance = min(x2_samples) - max(x2_samples)
x1_background = np.linspace(min(x1_samples)+x1_distance*0.1, max(x1_samples)-x1_distance*0.1, num=100)
x2_background = np.linspace(min(x2_samples)+x2_distance*0.1, max(x2_samples)-x2_distance*0.1, num=100)
predictThisValues = []
for row in range(0, len(x1_background)):
for row2 in range(0, len(x2_background)):
temporalRow = []
temporalRow.append(x1_background[row])
temporalRow.append(x2_background[row2])
predictThisValues.append(temporalRow)
classification.set_xSamplesList(predictThisValues)
predictedValuesForBg = classification.predictSupportVectorMachine(coefficients=modelCoefficients)
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(predictedValuesForBg)):
temporalRow = []
if (predictedValuesForBg[row][0] == 1):
temporalRow = []
temporalRow.append(predictThisValues[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(predictThisValues[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=10, label='predicted positives (1)', alpha = 0.1)
plt.scatter(negatives_x, negatives_y, c='red', s=10, label='predicted negatives (-1)', alpha = 0.1)
# We plot the predicted values of our currently trained model
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(matrix_y)):
temporalRow = []
if (matrix_y[row][0] == 1):
temporalRow = []
temporalRow.append(matrix_x[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(matrix_x[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=50, label='real positives (1)')
plt.scatter(negatives_x, negatives_y, c='red', s=50, label='real negatives (-1)')
# Finnally, we define the desired title, the labels and the legend for the data
# points
plt.title('Real Results')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.grid()
# We show the graph with all the specifications we just declared.
plt.show()
EXPECTED CODE RESULT:
"A graph will pop and will show the predicted region of the obtained
model and the scattered points of the true/real results to compare
the modeled results vs the real results"
"""
"""
predictKernelSupportVectorMachine(coefficients="We give the kernel and the SVC mathematical coefficients that we want to predict with",
isPolynomialSVC="True if you want to apply a polynomial SVC. False if otherwise is desired",
orderOfPolynomialSVC="If you apply a polynomial SVC through the argument isPolynomialSVC, you then give a whole number here to indicate the order of degree that you desire in such Polynomial SVC",
orderOfPolynomialKernel="if you selected polynomial kernel in the kernel argument, you then here give a whole number to indicate the order of degree that you desire in such Polynomial Kernel",
kernel="you specify here the type of kernel that you want to predict with. literally write, in strings, gaussian for a gaussian kernel; polynomial for a polynomial kernel; and linear for a linear kernel")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
x = [
[2, 3],
[3, 2],
[4, 3],
[3, 4],
[1, 3],
[3, 1],
[5, 3],
[3, 5],
[3, 3]
]
y = [
[1],
[1],
[1],
[1],
[0],
[0],
[0],
[0],
[0]
]
matrix_y = []
for row in range(0, len(y)):
temporalRow = []
if (y[row][0] == 0):
temporalRow.append(-1)
if (y[row][0] == 1):
temporalRow.append(1)
if ((y[row][0]!=0) and (y[row][0]!=1)):
raise Exception('ERROR: The dependent variable y has values different from 0 and 1.')
matrix_y.append(temporalRow)
matrix_x = []
for row in range(0, len(y)):
temporalRow = []
for column in range(0, len(x[0])):
temporalRow.append(x[row][column])
matrix_x.append(temporalRow)
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
classification = mSL.Classification(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = classification.getKernelSupportVectorMachine(kernel='gaussian', isPolynomialSVC=True, orderOfPolynomialSVC=2, orderOfPolynomialKernel=3, evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# --------------------------------------------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS: CBES WHEN VOLTAGE APPLIED IS POSITIVE ----- #
# --------------------------------------------------------------------------- #
# Visualising the Training set results
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
plt.figure()
# We plot the Background
x1_samples = []
x2_samples = []
for row in range(0, len(matrix_x)):
x1_samples.append(matrix_x[row][0])
x2_samples.append(matrix_x[row][1])
# linspace(start, stop, num=50)
x1_distance = min(x1_samples) - max(x1_samples)
x2_distance = min(x2_samples) - max(x2_samples)
x1_background = np.linspace(min(x1_samples)+x1_distance*0.1, max(x1_samples)-x1_distance*0.1, num=100)
x2_background = np.linspace(min(x2_samples)+x2_distance*0.1, max(x2_samples)-x2_distance*0.1, num=100)
predictThisValues = []
for row in range(0, len(x1_background)):
for row2 in range(0, len(x2_background)):
temporalRow = []
temporalRow.append(x1_background[row])
temporalRow.append(x2_background[row2])
predictThisValues.append(temporalRow)
classification.set_xSamplesList(predictThisValues)
predictedValuesForBg = classification.predictKernelSupportVectorMachine(coefficients=modelCoefficients, isPolynomialSVC=True, orderOfPolynomialSVC=2, orderOfPolynomialKernel=3, kernel='gaussian')
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(predictedValuesForBg)):
temporalRow = []
if (predictedValuesForBg[row][0] == 1):
temporalRow = []
temporalRow.append(predictThisValues[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(predictThisValues[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=10, label='predicted positives (1)', alpha = 0.1)
plt.scatter(negatives_x, negatives_y, c='red', s=10, label='predicted negatives (-1)', alpha = 0.1)
# We plot the predicted values of our currently trained model
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(matrix_y)):
temporalRow = []
if (matrix_y[row][0] == 1):
temporalRow = []
temporalRow.append(matrix_x[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(matrix_x[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=50, label='real positives (1)')
plt.scatter(negatives_x, negatives_y, c='red', s=50, label='real negatives (-1)')
# Finnally, we define the desired title, the labels and the legend for the data
# points
plt.title('Real Results')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.grid()
# We show the graph with all the specifications we just declared.
plt.show()
EXPECTED CODE RESULT:
"A graph will pop and will show the predicted region of the obtained
model and the scattered points of the true/real results to compare
the modeled results vs the real results"
"""
"""
predictLinearLogisticClassifier(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
threshold="We give a value from 0 to 1 to indicate the threshold that we want to apply to classify the predicted data with the Linear Logistic Classifier")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
matrix_x = [
[0,2],
[1,3],
[2,4],
[3,5],
[4,6],
[5,7],
[6,8],
[7,9],
[8,10],
[9,11]
]
matrix_y = [
[0],
[0],
[1],
[0],
[1],
[1],
[1],
[1],
[1],
[1]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
regression = mSL.Regression(matrix_x, matrix_y)
# evtfbmip stands for "Eliminate Variables To Find Better Model If Possible"
modelingResults = regression.getLinearLogisticRegression(evtfbmip=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
coefficientDistribution = modelingResults[3]
allModeledAccuracies = modelingResults[4]
# --------------------------------------------------------------------------- #
# ----- WE VISUALIZE OUR RESULTS: CBES WHEN VOLTAGE APPLIED IS POSITIVE ----- #
# --------------------------------------------------------------------------- #
# Visualising the Training set results
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.colors import ListedColormap
plt.figure()
# We plot the Background
x1_samples = []
x2_samples = []
for row in range(0, len(matrix_x)):
x1_samples.append(matrix_x[row][0])
x2_samples.append(matrix_x[row][1])
# linspace(start, stop, num=50)
x1_distance = min(x1_samples) - max(x1_samples)
x2_distance = min(x2_samples) - max(x2_samples)
x1_background = np.linspace(min(x1_samples)+x1_distance*0.1, max(x1_samples)-x1_distance*0.1, num=100)
x2_background = np.linspace(min(x2_samples)+x2_distance*0.1, max(x2_samples)-x2_distance*0.1, num=100)
predictThisValues = []
for row in range(0, len(x1_background)):
for row2 in range(0, len(x2_background)):
temporalRow = []
temporalRow.append(x1_background[row])
temporalRow.append(x2_background[row2])
predictThisValues.append(temporalRow)
classification = mSL.Classification(predictThisValues, [])
predictedValuesForBg = classification.predictLinearLogisticClassifier(coefficients=modelCoefficients, threshold=0.5)
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(predictedValuesForBg)):
temporalRow = []
if (predictedValuesForBg[row][0] == 1):
temporalRow = []
temporalRow.append(predictThisValues[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(predictThisValues[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(predictThisValues[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=10, label='predicted positives (1)', alpha = 0.1)
plt.scatter(negatives_x, negatives_y, c='red', s=10, label='predicted negatives (-1)', alpha = 0.1)
# We plot the predicted values of our currently trained model
positives_x = []
positives_y = []
negatives_x = []
negatives_y = []
for row in range(0, len(matrix_y)):
temporalRow = []
if (matrix_y[row][0] == 1):
temporalRow = []
temporalRow.append(matrix_x[row][1])
positives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
positives_x.append(temporalRow)
else:
temporalRow = []
temporalRow.append(matrix_x[row][1])
negatives_y.append(temporalRow)
temporalRow = []
temporalRow.append(matrix_x[row][0])
negatives_x.append(temporalRow)
plt.scatter(positives_x, positives_y, c='green', s=50, label='real positives (1)')
plt.scatter(negatives_x, negatives_y, c='red', s=50, label='real negatives (-1)')
# Finnally, we define the desired title, the labels and the legend for the data
# points
plt.title('Real Results')
plt.xlabel('x1')
plt.ylabel('x2')
plt.legend()
plt.grid()
# We show the graph with all the specifications we just declared.
plt.show()
EXPECTED CODE RESULT:
"A graph will pop and will show the predicted region of the obtained
model and the scattered points of the true/real results to compare
the modeled results vs the real results"
"""
"""
The ReinforcementLearning Class gives several methods to make a model that is
able to learn in real time to predict the best option among the ones you tell
it it has available. This is very useful when you actually dont have a dataset
to tell your model the expected output values to compare them and train itself
with them.
Regression("independent values (x) or options that your model will have available to pick from")
"""
class ReinforcementLearning:
"""
getUpperConfidenceBound()
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the normal method "getRealTimeUpperConfidenceBound()", this method
cannot solve a problem in real time, since it needs that you already have
meassured several rounds so that then this algorithm studies it to then
tell you which arm is the best option among all the others.
This methods advantages:
* When this algorithm tries to identify the best arm, it only needs
to know if his current selection was successful or not (0 or 1)
and it doesnt need to know, in that round, anything about the
other arms
This methods disadvantages:
* This is the method that takes the most time to be able to
identify the best arm. Just so that you have it in mind, for a
problem to solve, this algorithm needed around the following
round samples to start identifying the best arm / option for a
random problem that i wanted to solve:
+ For 2 arms --> around 950 samples
+ For 3 arms --> around 1400 samples
+ For 4 arms --> around 1200 samples
+ For 5 arms --> around 320 samples
+ For 6 arms --> around 350 samples
+ For 7 arms --> around 400 samples
+ For 8 arms --> around 270 samples
+ For 9 arms --> around 600 samples
+ For 10 arms --> around 600 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
NOTE: The logic of this algorithm follows the one described and teached by
the Machine Learning Course "Machine Learning A-Z™: Hands-On Python & R In
Data Science" teached by " Kirill Eremenko, Hadelin de Ponteves,
SuperDataScience Team, SuperDataScience Support". I mention this because i
dont quite agree with how this algorithm works but, even though i havent
checked, there is a great chance that this is how other data scientists do
Upper Confidence Bound.
CODE EXAMPLE:
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
rL = mSL.ReinforcementLearning(matrix_y)
modelingResults = rL.getUpperConfidenceBound()
accuracyFromTraining = modelingResults[1]
historyOfPredictedData = modelingResults[3]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
for row in range(0, len(historyOfPredictedData)):
histogram_x_data.append(historyOfPredictedData[row][0])
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
accuracyFromTraining =
21.78
historyOfPredictedData =
NOTE: We wont show this result because it has 10'000 rows and its just
way too long to show here as a demonstration.
"""
"""
getRealTimeUpperConfidenceBound(currentNumberOfSamples="You have to indicate here the current number of samples that have occured for a particular UCB problem to solve",
sumsOfRewardsForEachArm="You have to indicate here the sums of rewards for each of the available arms for a particular UCB problem to solve",
numberOfSelectionsOfArms="You have to indicate here the number of times that each arm was selected by the algorithm for a particular UCB problem to solve")
IMPORTANT NOTE: WHEN YOU RUN THIS METHOD TO SOLVE THE VERY FIRST ROUND OF A
PARTICULAR UCB PROBLEM, DONT DEFINE ANY VALUES IN THE
ARGUMENTS OF THIS METHOD. FOR FURTHER ROUNDS, INPUT IN THE
ARGUMENTS THE OUTPUT VALUES OF THE LAST TIME YOU RAN THIS
METHOD (SEE CODE EXAMPLE).
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the normal method "getUpperConfidenceBound()", this method learns in
real time, while "getUpperConfidenceBound()" expects you to already have
measured several rounds.
This methods advantages:
* When this algorithm tries to identify the best arm, it only needs
to know if his current selection was successful or not (0 or 1)
and it doesnt need to know, in that round, anything about the
other arms
This methods disadvantages:
* This is the method that takes the most time to be able to
identify the best arm. Just so that you have it in mind, for a
problem to solve, this algorithm needed around the following
round samples to start identifying the best arm / option for a
random problem that i wanted to solve:
+ For 2 arms --> around 950 samples
+ For 3 arms --> around 1400 samples
+ For 4 arms --> around 1200 samples
+ For 5 arms --> around 320 samples
+ For 6 arms --> around 350 samples
+ For 7 arms --> around 400 samples
+ For 8 arms --> around 270 samples
+ For 9 arms --> around 600 samples
+ For 10 arms --> around 600 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
NOTE: The logic of this algorithm follows the one described and teached by
the Machine Learning Course "Machine Learning A-Z™: Hands-On Python & R In
Data Science" teached by " Kirill Eremenko, Hadelin de Ponteves,
SuperDataScience Team, SuperDataScience Support". I mention this because i
dont quite agree with how this algorithm works but, even though i havent
checked, there is a great chance that this is how other data scientists do
Upper Confidence Bound.
CODE EXAMPLE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
# With this for-loop, we will simulate that we are getting the data in
# real-time and that we are, at the same time, giving it to the algorithm
numberOfArmsAvailable = len(matrix_y[0])
for currentSample in range(0, len(matrix_y)):
rL = mSL.ReinforcementLearning([matrix_y[currentSample]])
if (currentSample == 0):
modelingResults = rL.getRealTimeUpperConfidenceBound()
else:
modelingResults = rL.getRealTimeUpperConfidenceBound(currentNumberOfSamples, sumsOfRewardsForEachArm, numberOfSelectionsOfArms)
currentNumberOfSamples = modelingResults[0]
currentAccuracyFromTraining = modelingResults[1]
sumsOfRewardsForEachArm = modelingResults[2]
numberOfSelectionsOfArms = modelingResults[3]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
# We now add the real selected options by the algorithm
for currentArm in range(0, numberOfArmsAvailable):
for selectedTimes in range(0, numberOfSelectionsOfArms[0][currentArm]):
histogram_x_data.append(currentArm)
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
currentNumberOfSamples=
10000
currentAccuracyFromTraining =
21.78
sumsOfRewardsForEachArm =
[[120, 47, 7, 38, 1675, 1, 27, 236, 20, 7]]
numberOfSelectionsOfArms =
[[705, 387, 186, 345, 6323, 150, 292, 1170, 256, 186]]
"""
"""
getModifiedUpperConfidenceBound()
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the method "getRealTimeModifiedUpperConfidenceBound()" which learns
in real-time, this method does not and it requires that you have already
meassured several rounds to the input them to this method.
This methods advantages:
* This method is the fastest of all, so far, to detect the best
possible arm (option) among all the available ones:
+ For 2 arms --> around 1 sample
+ For 3 arms --> around 1 sample
+ For 4 arms --> around 1 sample
+ For 5 arms --> around 60 samples
+ For 6 arms --> around 60 samples
+ For 7 arms --> around 60 samples
+ For 8 arms --> around 60 samples
+ For 9 arms --> around 60 samples
+ For 10 arms --> around 60 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
This methods disadvantages:
* When this algorithm tries to identify the best arm, it needs to
know, for each arm (regardless of the one picked by the
algorithm), if they were a successful pick or not (0 or 1),
unlike the "getUpperConfidenceBound()" which only needs
to know if his actual pick was sucessful or not.
CODE EXAMPLE:
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
rL = mSL.ReinforcementLearning(matrix_y)
modelingResults = rL.getModifiedUpperConfidenceBound()
accuracyFromTraining = modelingResults[1]
historyOfPredictedData = modelingResults[3]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
# We first add a fake selection for each available option (arms) so that we
# ensure that they appear in the histogram. Otherwise, if we dont do this and
# if the algorithm never consideres one or some of the available options, it
# will plot considering those options never existed.
numberOfAvailableOptions = len(matrix_y[0])
for row in range(0, numberOfAvailableOptions):
histogram_x_data.append(row)
# We now add the real selected options by the algorithm
for row in range(0, len(historyOfPredictedData)):
histogram_x_data.append(historyOfPredictedData[row][0])
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
accuracyFromTraining =
26.93
historyOfPredictedData =
NOTE: We wont show this result because it has 10'000 rows and its just
way too long to show here as a demonstration.
"""
"""
getRealTimeModifiedUpperConfidenceBound(currentNumberOfSamples="You have to indicate here the current number of samples that have occured for a particular UCB problem to solve",
sumsOfRewardsForEachSelectedArm="You have to indicate the sums of the rewards for each arm but only for those situations were the algorithm picked each arm",
numberOfSelectionsOfArms="You have to indicate here the number of times that each arm was selected by the algorithm for a particular UCB problem to solve",
trueSumsOfRewardsForEachArm="You have to indicate the real number of times that each arm has been a successful result, regardless of what the algorithm identified",
meanList="You have to indicate the mean list of the rewards obtained for each arm",
standardDeviationList="You have to indicate the standard deviation list of the rewards obtained for each arm")
IMPORTANT NOTE: WHEN YOU RUN THIS METHOD TO SOLVE THE VERY FIRST ROUND OF A
PARTICULAR UCB PROBLEM, DONT DEFINE ANY VALUES IN THE
ARGUMENTS OF THIS METHOD. FOR FURTHER ROUNDS, INPUT IN THE
ARGUMENTS THE OUTPUT VALUES OF THE LAST TIME YOU RAN THIS
METHOD (SEE CODE EXAMPLE).
This method helps you to identify what is the best option (these are called
as arms in this algorithm) among many, to get the best number of successful
results when theres actually no possible way to know anything about a
particular problem that we want to figure out how to solve.
Unlike the normal method "getModifiedUpperConfidenceBound()", this method
learns in real time, while "getModifiedUpperConfidenceBound()" expects you
to already have measured several rounds.
This methods advantages:
* This method is the fastest of all, so far, to detect the best
possible arm (option) among all the available ones:
+ For 2 arms --> around 1 sample
+ For 3 arms --> around 1 sample
+ For 4 arms --> around 1 sample
+ For 5 arms --> around 60 samples
+ For 6 arms --> around 60 samples
+ For 7 arms --> around 60 samples
+ For 8 arms --> around 60 samples
+ For 9 arms --> around 60 samples
+ For 10 arms --> around 60 samples
As you can see, there is clearly no proportionality alone by the
number of available arms and it is most likely that the needed
number of samples, so that this algorithm starts identifying the
best arm, will most likely depend on the probability of occurence
for each option available to be selected by the algorithm. This
is a great deficit for this algorithm since according to the
situations were we are supposed to need this algorithm, we are
supposed to not know such probability of occurence.
This methods disadvantages:
* When this algorithm tries to identify the best arm, it needs to
know, for each arm (regardless of the one picked by the
algorithm), if they were a successful pick or not (0 or 1),
unlike the "getRealTimeUpperConfidenceBound()" which only needs
to know if his actual pick was sucessful or not.
CODE EXAMPLE:
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
import pandas as pd
dataset = pd.read_csv('Ads_CTR_Optimisation.csv')
matrix_y = []
for row in range(0, len(dataset)):
temporalRow = []
for column in range(0, len(dataset.iloc[0])):
temporalRow.append(dataset.iloc[row,column])
matrix_y.append(temporalRow)
# With this for-loop, we will simulate that we are getting the data in
# real-time and that we are, at the same time, giving it to the algorithm
numberOfArmsAvailable = len(matrix_y[0])
for currentSample in range(0, len(matrix_y)):
rL = mSL.ReinforcementLearning([matrix_y[currentSample]])
if (currentSample == 0):
modelingResults = rL.getRealTimeModifiedUpperConfidenceBound()
else:
modelingResults = rL.getRealTimeModifiedUpperConfidenceBound(currentNumberOfSamples, sumsOfRewardsForEachSelectedArm, numberOfSelectionsOfArms, trueSumsOfRewardsForEachArm, meanList, standardDeviationList)
currentNumberOfSamples = modelingResults[0]
currentAccuracyFromTraining = modelingResults[1]
sumsOfRewardsForEachSelectedArm = modelingResults[2]
numberOfSelectionsOfArms = modelingResults[3]
trueSumsOfRewardsForEachArm = modelingResults[4]
meanList = modelingResults[5]
standardDeviationList = modelingResults[6]
# ------------------------------------ #
# ----- WE VISUALIZE OUR RESULTS ----- #
# ------------------------------------ #
import matplotlib.pyplot as plt
import numpy as np
histogram_x_data = []
# We first add a fake selection for each available option (arms) so that we
# ensure that they appear in the histogram. Otherwise, if we dont do this and
# if the algorithm never consideres one or some of the available options, it
# will plot considering those options never existed.
for row in range(0, numberOfArmsAvailable):
histogram_x_data.append(row)
# We now add the real selected options by the algorithm
for currentArm in range(0, numberOfArmsAvailable):
for selectedTimes in range(0, numberOfSelectionsOfArms[0][currentArm]):
histogram_x_data.append(currentArm)
plt.figure()
plt.hist(histogram_x_data)
plt.title('Histogram of ads selections by UCB model')
plt.xlabel('Ads')
plt.ylabel('Number of times each ad was selected')
plt.show()
EXPECTED CODE RESULT:
"A histogram graph will pop and will show the number of times that the
algorithm picked each of the available options. The option with the
highest number of selections by the algorithm is basically going to be
the best option among them all"
currentNumberOfSamples=
10000
currentAccuracyFromTraining =
26.93
sumsOfRewardsForEachSelectedArm =
[[3, 0, 0, 0, 2690, 0, 0, 0, 0, 0]]
numberOfSelectionsOfArms =
[[25, 0, 0, 0, 9975, 0, 0, 0, 0, 0]]
trueSumsOfRewardsForEachArm =
[[1703, 1295, 728, 1196, 2695, 126, 1112, 2091, 952, 489]]
meanList =
[[0.1703,
0.1295,
0.0728,
0.1196,
0.2695,
0.0126,
0.1112,
0.2091,
0.0952,
0.0489]]
standardDeviationList =
[[1.2506502260503618,
1.0724240984136193,
0.7004403369435815,
0.9286872458865242,
1.412843221683186,
0.3047987328938745,
0.7525852536272276,
1.2007787911241279,
1.030718190027389,
0.5406998109413704]]
"""
"""
The DeepLearning Class gives several methods to make a model through the
concept of how a real neuron works.
DeepLearning("mean values of the x datapoints to model", "mean values of the y datapoints to model")
"""
class DeepLearning:
"""
getReluActivation(x="the instant independent value from which you want to know the dependent ReLU value/result")
This method calculates and returns the ReLU function value of the instant
independent value that you give in the "x" local variable of this method.
"""
"""
getReluActivationDerivative(x="the instant independent value from which you want to know the derivate of the dependent ReLU value/result")
This method calculates and returns the derivate ReLU function value of the
instant independent value that you give in the "x" local variable of this
method.
"""
"""
getTanhActivation(x="the instant independent value from which you want to know the dependent Hyperbolic Tangent (Tanh) value/result")
This method calculates and returns the Hyperbolic Tangent (Tanh) function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getReluActivation(x="the instant independent value from which you want to know the dependent Sigmoid value/result")
This method calculates and returns the Sigmoid function value of the
instant independent value that you give in the "x" local variable of this
method.
"""
"""
getRaiseToTheSecondPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getRaiseToTheSecondPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getRaiseToTheThirdPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getRaiseToTheThirdPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getRaiseToTheFourthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getRaiseToTheFourthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getRaiseToTheFifthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getRaiseToTheFifthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getRaiseToTheSixthPowerActivation(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the Exponentiation function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getRaiseToTheSixthPowerDerivative(x="the instant independent value from which you want to know the dependent Exponentiation value/result")
This method calculates and returns the derivate Exponentiation function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getExponentialActivation(x="the instant independent value from which you want to know the dependent Exponential-Euler value/result")
This method calculates and returns the Exponential-Euler function value of
the instant independent value that you give in the "x" local variable of
this method.
"""
"""
getExponentialDerivative(x="the instant independent value from which you want to know the dependent Exponential-Euler value/result")
This method calculates and returns the derivate Exponential-Euler function
value of the instant independent value that you give in the "x" local
variable of this method.
"""
"""
getSingleArtificialNeuron(activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The available options are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
learningRate="the rate at which you want your neuron to learn (remember that 1=100% learning rate or normal learning rate)",
numberOfEpochs="The number of times you want your neuron to train itself",
stopTrainingIfAcurracy="define the % value that you want the neuron to stop training itself if such accuracy value is surpassed",
isCustomizedInitialWeights="set to True if you will define a customized innitial weight vector for each neuron. False if you want them to be generated randomly",
firstMatrix_w="If you set the input argument of this method isCustomizedInitialWeights to True, then assign here the customized innitial weight vectors you desire for each neuron",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method creates a single Artificial Neuron and, within this method,
such neuron trains itself to learn to predict the input values that it was
given to study by comparing them with the output expected values.
When the neuron finishes its learning process, this method will return the
modeling results.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
modelingResults = dL.getSingleArtificialNeuron(activationFunction='none', learningRate=0.001, numberOfEpochs=100000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
RESULT OF CODE:
modelCoefficients =
[[28.235246103419946],
[1.12749544645359],
[-1.7353168202914326],
[0.7285727543658252]]
accuracyFromTraining =
95.06995458954695
predictedData =
[[28.868494779855514],
[32.80418405006583],
[25.89997715314427],
[38.25484973427189],
[16.295874460357858],
[26.67205741761012],
[27.198762118476985],
[26.859066716794352],
[31.50391014224514],
[26.42881371215305],
[38.14632853395502],
[30.297502725191123],
[26.929105800646223]]
coefficientDistribution =
"
Coefficients distribution is as follows:
modelCoefficients =
[
[Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM]
]
"
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
This method is used within the method "getArtificialNeuralNetwork()" to get
the weights of a particular neuron from a variable that contains all the
weights of all neurons (matrix_w).
"""
"""
This method is used within the method "getArtificialNeuralNetwork()" to get
the partial derivative of the Total Error (dEtotal) due respect with the
partial derivative of the corresponding Activation Function (dFz) for a
particular neuron within an Artificial Neural Network.
"""
"""
getArtificialNeuralNetwork(artificialNeuralNetworkDistribution="must contain an array that indicates the distribution of the desired neurons for each layer in columns. If a row-column value equals 1, this will mean that you want a neuron in that position. A 0 means otherwise",
activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The activation functions must be assigned in an array accordingly to the distribution specified in argument input variable artificialNeuralNetworkDistribution. The available activation functions are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
learningRate="the rate at which you want your Artificial Neural Network to learn (remember that 1=100% learning rate or normal learning rate)",
numberOfEpochs="The number of times you want your Artificial Neural Network to train itself",
stopTrainingIfAcurracy="define the % value that you want the neuron to stop training itself if such accuracy value is surpassed",
isCustomizedInitialWeights="set to True if you will define a customized innitial weight vector for each neuron. False if you want them to be generated randomly",
firstMatrix_w="If you set the input argument of this method isCustomizedInitialWeights to True, then assign here the customized innitial weight vectors you desire for each neuron",
isClassification="set to True if you are solving a classification problem. False if otherwise")
This method creates an Artificial Neural Network with a customized desired
number of neurons within it and, within this method, such Artificial Neural
Network trains itself to learn to predict the input values that it was
given to study by comparing them with the output expected values.
When the neuron finishes its learning process, this method will return the
modeling results.
CODE EXAMPLE:
# matrix_y = [expectedResultForOutputNeuron1, expectedResultForOutputNeuron2]
matrix_y = [
[0, 1],
[1, 0],
[1, 0],
[0, 1],
[1, 0]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[0, 0, 0],
[0, 0, 1],
[0, 1, 0],
[0, 1, 1],
[1, 0, 0]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
# We will indicate that we want 2 neurons in Layer1 and 1 neuron in Layer2
aNND = [
[1,1,1],
[1,1,1]
]
aF = [
['relu', 'relu', 'sigmoid'],
['relu', 'relu', 'sigmoid']
]
modelingResults = dL.getArtificialNeuralNetwork(artificialNeuralNetworkDistribution=aNND, activationFunction=aF, learningRate=0.1, numberOfEpochs=10000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=True)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
RESULT OF CODE:
modelCoefficients =
[
[2.133298325032156, -0.45548307884431677, -2.1332978269534664, -2.1332978292080043],
[2.287998188065245, 1.3477978318721369, -1.143999014059006, -1.1439990110690932],
[-0.6930287605411998, 0.41058709282271444, 0.6057943758418374],
[4.6826225603458056e-08, -1.8387485390712266, 2.2017181913306803],
[-4.1791269585765285, -2.5797524896448563, 3.3885776200605955],
[4.181437529101815, 2.5824655964639742, -3.3907451300458136]
]
accuracyFromTraining =
98.94028954483407
predictedData =
[[0.011560111421083964, 0.9884872182827878],
[0.9873319964204451, 0.01262867979045398],
[0.9873319961998808, 0.012628680010459043],
[0.015081447917016324, 0.9849528347708301],
[0.9989106156594524, 0.0010867877109744279]]
coefficientDistribution =
"
Coefficients distribution is as follows:
modelCoefficients =
[
[Neuron1_bias, Neuron1_weight1, Neuron1_weight2, ... , Neuron1_weightM],
[Neuron2_bias, Neuron2_weight1, Neuron2_weight2, ... , Neuron2_weightZ],
[ . , . , . , ... , . ],
[ . , . , . , ... , . ],
[ . , . , . , ... , . ],
[NeuronN_bias, NeuronN_weight1, NeuronN_weight2, ... , NeuronN_weightK],
]
"
allModeledAccuracies["independent variable distribution used to get a model"]["model accuracy", "model coefficients obtained but with original distribution"] =
# NOTE: since this variable contains large amounts of information, it
# will not be displayed but only described on how to use it.
"""
"""
predictSingleArtificialNeuron(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The available options are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
isThreshold="Set to True if you want to predict output values of a classification neuron. False if otherwise."
threshold="We give a value from 0 to 1 to indicate the threshold that we want to apply to classify the predicted data with the Linear Logistic Classifier")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
modelingResults = dL.getSingleArtificialNeuron(activationFunction='none', learningRate=0.001, numberOfEpochs=100000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
# -------------------------------------------------------- #
# ----- WE PREDICT SOME DATA WITH OUR CURRENT NEURON ----- #
# -------------------------------------------------------- #
matrix_x = [
[1, 2.3, 3.8],
[3.32, 2.42, 1.4],
[2.22, 3.41, 1.2]
]
dL = mSL.DeepLearning(matrix_x, [])
getPredictedData = dL.predictSingleArtificialNeuron(coefficients=modelCoefficients, activationFunction='none', isThreshold=False, threshold=0.5)
EXPECTED CODE RESULT:
getPredictedData =
[[28.140432977147068], [28.799532314784063], [25.69562041179361]]
"""
"""
predictArtificialNeuralNetwork(coefficients="We give the Linear Logistic mathematical coefficients that we want to predict with",
activationFunction="the name, in lowercaps, of the activation function that you want to apply the neuron. The activation functions must be assigned in an array accordingly to the distribution specified in argument input variable coefficients. The available activation functions are: 'sigmoid', 'relu', 'tanh', 'raiseTo2ndPower', 'raiseTo3rdPower', 'raiseTo4thPower', 'raiseTo5thPower', 'raiseTo6thPower', 'exponential'.",
isThreshold="Set to True if you want to predict output values of a classification neuron. False if otherwise."
threshold="We give a value from 0 to 1 to indicate the threshold that we want to apply to classify the predicted data with the Linear Logistic Classifier")
This method returns the predicting values of the independent input values
that you assign in the local variable of this class: "self.x_samplesList".
The prediction will be made accordingly to the coefficients and
configuration specified in the arguments of this method.
CODE EXAMPLE:
# matrix_y = [expectedResult]
matrix_y = [
[25.5],
[31.2],
[25.9],
[38.4],
[18.4],
[26.7],
[26.4],
[25.9],
[32],
[25.2],
[39.7],
[35.7],
[26.5]
]
# matrix_x = [variable1, variable2, variable3]
matrix_x = [
[1.74, 5.3, 10.8],
[6.32, 5.42, 9.4],
[6.22, 8.41, 7.2],
[10.52, 4.63, 8.5],
[1.19, 11.6, 9.4],
[1.22, 5.85, 9.9],
[4.1, 6.62, 8],
[6.32, 8.72, 9.1],
[4.08, 4.42, 8.7],
[4.15, 7.6, 9.2],
[10.15, 4.83, 9.4],
[1.72, 3.12, 7.6],
[1.7, 5.3, 8.2]
]
from MortrackLibrary.machineLearning import MortrackML_Library as mSL
dL = mSL.DeepLearning(matrix_x, matrix_y)
# We will indicate that we want 2 neurons in Layer1 and 1 neuron in Layer2
aNND = [
[1,1,1],
[0,1,0]
]
aF = [
['none', 'none', 'none'],
['', 'none', '']
]
modelingResults = dL.getArtificialNeuralNetwork(artificialNeuralNetworkDistribution=aNND, activationFunction=aF, learningRate=0.00001, numberOfEpochs=100000, stopTrainingIfAcurracy=99.9, isCustomizedInitialWeights=False, firstMatrix_w=[], isClassification=False)
modelCoefficients = modelingResults[0]
accuracyFromTraining = modelingResults[1]
predictedData = modelingResults[2]
firstMatrix_w = modelingResults[3]
coefficientDistribution = modelingResults[4]
allModeledAccuracies = modelingResults[5]
# -------------------------------------------------------- #
# ----- WE PREDICT SOME DATA WITH OUR CURRENT NEURON ----- #
# -------------------------------------------------------- #
matrix_x = [
[1, 2.3, 3.8],
[3.32, 2.42, 1.4],
[2.22, 3.41, 1.2]
]
# We will indicate that we want 2 neurons in Layer1 and 1 neuron in Layer2
aNND = [
[1,1,1],
[0,1,0]
]
aF = [
['none', 'none', 'none'],
['', 'none', '']
]
dL = mSL.DeepLearning(matrix_x, [])
getPredictedData = dL.predictArtificialNeuralNetwork(coefficients=modelCoefficients, artificialNeuralNetworkDistribution=aNND, activationFunction=aF, isThreshold=False, threshold=0.5)
EXPECTED CODE RESULT:
getPredictedData =
[[28.22084819611869], [28.895166544625255], [25.788001189515317]]
"""
| [
198,
37811,
198,
220,
220,
15069,
33448,
327,
18964,
29575,
2185,
4496,
357,
26011,
25,
10788,
39638,
8,
628,
220,
220,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
220,
220,
345,
743,
407,... | 2.132538 | 66,796 |
import logging
from datetime import timedelta
from django.core.management.base import BaseCommand
from django.utils import timezone
from geotrek.feedback.models import Report
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
6738,
4818,
8079,
1330,
28805,
12514,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
27604,
13,
8692,
1330,
7308,
21575,
198,
6738,
42625,
14208,
13,
26791,
1330,
640,
11340,
198,
198,
6738,
4903,
313,
37818,
13,
12363,
... | 3.540984 | 61 |
import logging
import time
from haipproxy.utils import get_redis_conn
logger = logging.getLogger(__name__)
| [
11748,
18931,
198,
11748,
640,
198,
198,
6738,
387,
3974,
42059,
13,
26791,
1330,
651,
62,
445,
271,
62,
37043,
198,
198,
6404,
1362,
796,
18931,
13,
1136,
11187,
1362,
7,
834,
3672,
834,
8,
628
] | 3.055556 | 36 |
from src.interpreter.function_deco import setupFunctions
from src.interpreter.run import runCode
# stolen from https://stackoverflow.com/questions/287871/how-do-i-print-colored-text-to-the-terminal
# muhahahahahahaha 😈
if __name__ == "__main__":
setupFunctions()
print(Colours.WARNING + "Starting test..." + Colours.ENDC)
testAll()
print()
if Stats.failedTests == 0:
print(Colours.OKGREEN + Colours.BOLD + f"All {Stats.num} tests passed!" + Colours.ENDC)
elif Stats.failedTests < Stats.correctTests:
print(Colours.WARNING + Colours.BOLD + f"{Stats.correctTests} / {Stats.num} passed..." + Colours.ENDC)
else:
print(Colours.FAIL + Colours.BOLD + f"{Stats.correctTests} / {Stats.num} passed..." + Colours.ENDC)
| [
6738,
12351,
13,
3849,
3866,
353,
13,
8818,
62,
12501,
78,
1330,
9058,
24629,
2733,
198,
198,
6738,
12351,
13,
3849,
3866,
353,
13,
5143,
1330,
1057,
10669,
628,
198,
2,
9909,
422,
3740,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
... | 2.55298 | 302 |
import asyncio
from typing import Callable, Any
import json
import logging
import requests
import numpy as np
log = logging.getLogger('pool') | [
11748,
30351,
952,
198,
6738,
19720,
1330,
4889,
540,
11,
4377,
198,
11748,
33918,
198,
11748,
18931,
198,
11748,
7007,
198,
11748,
299,
32152,
355,
45941,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
10786,
7742,
11537
] | 3.736842 | 38 |
#!/usr/bin/env python3
import argparse
import csv
import glob
import subprocess
import sys
import time
import re
parser = argparse.ArgumentParser()
parser.add_argument("--debug", action="store_true")
parser.add_argument("--dry-run", action="store_true")
parser.add_argument("output", type=argparse.FileType('w'))
args = parser.parse_args()
debug = args.debug
output = args.output
dry_run = args.dry_run
outfile = output
architectures = [
"rv64",
"rv32",
"arm64",
"arm32",
"x86_64",
"x86_32",
]
time_re = re.compile(r"\[ OK \] \"VERIFY \((.+)\)\" \((.+)ms cpu\) \((.+)ms real\) \((.+) terms\)")
outfile.write("arch, instr, cputime, realtime, terms\n")
for arch in architectures:
run(arch)
outfile.close()
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
18,
198,
198,
11748,
1822,
29572,
198,
11748,
269,
21370,
198,
11748,
15095,
198,
11748,
850,
14681,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
302,
198,
198,
48610,
796,
1822,
29572,
13... | 2.493333 | 300 |
import json
import math
import os
from typing import List
import cv2
from detectron2.structures import BoxMode
from detectron2.utils.visualizer import Visualizer
from detectron2.data import DatasetCatalog, MetadataCatalog
OBJECT_CATEGORIES = {
"Vehicle": 0,
"Cyclist": 1,
"Pedestrian": 2,
}
DATASET_TRAIN = "zen_2dod_train"
DATASET_VAL = "zen_2dod_val"
if __name__ == "__main__":
# This code is only for debugging / visualization.
DATASET_ROOT = '' # insert the dataset root path here
register_detectron(DATASET_ROOT, split=0, num_splits=3)
dataset = DatasetCatalog.get(DATASET_TRAIN)
for d in dataset:
img = cv2.imread(d["file_name"])
visualizer = Visualizer(
img[:, :, ::-1], scale=1, metadata=MetadataCatalog.get(DATASET_TRAIN)
)
out = visualizer.draw_dataset_dict(d)
cv2.imshow("image", out.get_image()[:, :, ::-1])
cv2.waitKey(0)
break
| [
11748,
33918,
198,
11748,
10688,
198,
11748,
28686,
198,
6738,
19720,
1330,
7343,
198,
198,
11748,
269,
85,
17,
198,
198,
6738,
4886,
1313,
17,
13,
7249,
942,
1330,
8315,
19076,
198,
6738,
4886,
1313,
17,
13,
26791,
13,
41464,
7509,
1... | 2.253555 | 422 |
import sys
from copy import deepcopy
from swift.codegen.lib import ql
from swift.codegen.test.utils import *
@pytest.mark.parametrize("params,expected_local_var", [
(["a", "b", "c"], "x"),
(["a", "x", "c"], "x_"),
(["a", "x", "x_", "c"], "x__"),
(["a", "x", "x_", "x__"], "x___"),
])
@pytest.mark.parametrize("name,expected_article", [
("Argument", "An"),
("Element", "An"),
("Integer", "An"),
("Operator", "An"),
("Unit", "A"),
("Whatever", "A"),
])
if __name__ == '__main__':
sys.exit(pytest.main())
| [
11748,
25064,
198,
6738,
4866,
1330,
2769,
30073,
198,
198,
6738,
14622,
13,
8189,
5235,
13,
8019,
1330,
10662,
75,
198,
6738,
14622,
13,
8189,
5235,
13,
9288,
13,
26791,
1330,
1635,
628,
628,
198,
31,
9078,
9288,
13,
4102,
13,
17143,... | 2.243028 | 251 |
from zoneinfo import ZoneInfo
from datetime import datetime, timezone
from flurry.core import predicate as P
from flurry.core.utils import visit_predicate
from flurry.postgres.postgres import _PostgreSQLSimplifier
DATETIME_A = datetime(2022, 1, 27, 13, 6, 47, 799859, tzinfo=ZoneInfo("UTC"))
DATETIME_B = datetime(2022, 1, 27, 13, 6, 47, 799859, tzinfo=ZoneInfo("EST"))
predicates_to_simplify = {
"empty_or": P.Or(),
"empty_is": P.Is(),
"empty_and": P.And(),
"empty_where": P.Where(),
"simple_and": P.And(P.Where(a=1), P.Where(b=2)),
"simple_or": P.Or(P.Where(a=1), P.Where(b=2)),
"simple_is": P.Is(str, int, float),
"simple_where": P.Where(
a=P.Eq(1),
b=P.NotEq(2),
c=P.Less(3),
d=P.More(4),
e=P.LessEq(5),
f=P.MoreEq(6),
g=P.Between(7, 8),
h=P.OneOf(9, 10),
),
"null_where": P.Where(
a=P.Eq(None),
b=P.NotEq(None),
),
"complex": P.Or(
P.Is(int, str, float),
P.And(
P.Where(a=P.Eq(1)),
P.Where(b=P.NotEq(2)),
P.Where(c=P.Less(3)),
),
P.And(
P.Where(d=P.More(4)),
P.Where(e=P.LessEq(5)),
P.Where(f=P.MoreEq(6)),
),
P.Where(
g=P.Between(7, 8),
h=P.OneOf(9, 10),
),
P.And(P.Is(), P.Where()),
),
"date_and_time": P.Where(
a=P.Eq(DATETIME_A),
b=P.Eq(DATETIME_B),
c=P.NotEq(DATETIME_A),
d=P.NotEq(DATETIME_B),
),
}
| [
6738,
6516,
10951,
1330,
13035,
12360,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,
640,
11340,
628,
198,
6738,
36418,
13,
7295,
1330,
44010,
355,
350,
198,
6738,
36418,
13,
7295,
13,
26791,
1330,
3187,
62,
28764,
5344,
198,
6738,
36418... | 1.7 | 910 |
#!/usr/bin/env python
# coding: utf-8
import sys
from collections import Counter
from rosalind import fasta
ACIDS = "ACGT"
if __name__ == "__main__":
with open("data/rosalind_cons.txt") as f:
cons, matrix = consensus(fasta(f.read()).values())
print(''.join(cons))
for x in ACIDS:
print("{0}: {1}".format(x, ' '.join(map(str, matrix[x]))))
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
19617,
25,
3384,
69,
12,
23,
198,
198,
11748,
25064,
198,
6738,
17268,
1330,
15034,
198,
6738,
686,
21680,
521,
1330,
3049,
64,
628,
198,
2246,
14255,
796,
366,
2246,
19555,
1,
62... | 2.313253 | 166 |
from kivy.uix.stacklayout import StackLayout
from kivy.lang import Builder
import pathlib
from service.widget import Widget
from datetime import datetime, timedelta
Builder.load_file(str(pathlib.Path(__file__).parent.absolute()) + pathlib.os.sep + 'weather3.kv')
| [
6738,
479,
452,
88,
13,
84,
844,
13,
25558,
39786,
1330,
23881,
32517,
198,
6738,
479,
452,
88,
13,
17204,
1330,
35869,
198,
11748,
3108,
8019,
198,
6738,
2139,
13,
42655,
1330,
370,
17484,
198,
6738,
4818,
8079,
1330,
4818,
8079,
11,... | 3.154762 | 84 |
"""Classes for RoboDaniel"""
import importlib
import logging
import re
from data import commands
from datetime import datetime
from groupy import Bot as GroupyBot
from groupy import Group
from groupy import config
class Bot:
"""RoboDaniel bot class"""
def gather_commands(self):
"""gather !command functions and factoids into dicts"""
self.logger.info('gathering !commands...')
# reload command module for when !reload is called
importlib.reload(commands)
r = re.compile('^__')
self.command_dict = {c: getattr(commands, c)
for c in dir(commands)
if not r.match(c)}
# gather factoids
with open('data/factoids.txt') as factoids_file:
self.factoids = {f.split()[0]: ' '.join(f.split()[1:])
for f in factoids_file}
def generate_triggers(self):
"""generate message trigger rules"""
self.logger.info('generating trigger rules...')
with open('data/triggers.txt') as triggers_file:
self.triggers = [(re.compile(t.split()[0]), ' '.join(t.split()[1:]))
for t in triggers_file]
def interpret_command(self, message):
"""decide what to do with a "!command" message"""
# extract the message text, minus the beginning '!'
command = message['text'][1:]
# put a precautionary space before each '@'
# as GroupMe does weird stuff with mentions
command = re.sub('@', ' @', command)
if command in self.factoids:
return [self.factoids[command]]
args = command.split()
if args[0] in self.command_dict:
return self.command_dict[args[0]](args=args[1:],
sender=message['name'],
sender_id=message['user_id'],
attachments=message['attachments'],
bot=self)
else:
self.logger.warning('invalid command: {}'.format(command))
return False
def match_trigger(self, message):
"""attempt to match a message against trigger rules"""
response = None
if message['text'][0] == '!':
# message contains a !command; try to interpret it
self.logger.info('interpreted command: "{}"'.format(message['text']))
response = self.interpret_command(message)
else:
# try each trigger rule
for pattern, trigger in self.triggers:
if pattern.match(message['text']):
# response is triggered
self.logger.info('trigger matched: "{}"'.format(message['text']))
response = [trigger]
break
if response:
# we have a response to send!
logging.info('sending response: "{}"'.format(response))
self.post(*response)
def logmsg(self, message):
"""log a chat message to the appropriate logfile"""
timestamp = datetime.fromtimestamp(message['created_at']).isoformat()
line = '{} {}: {}'.format(timestamp, message['name'], message['text'])
print(line, file=self.chatlog)
def post(self, *message):
"""post a message with optional attachments"""
self.bot.post(*message)
| [
37811,
9487,
274,
329,
39702,
19962,
37811,
198,
198,
11748,
1330,
8019,
198,
11748,
18931,
198,
11748,
302,
198,
6738,
1366,
1330,
9729,
198,
6738,
4818,
8079,
1330,
4818,
8079,
198,
6738,
1448,
88,
1330,
18579,
355,
4912,
88,
20630,
1... | 2.151946 | 1,619 |
# -*- coding: utf-8 -*-
from __future__ import print_function, absolute_import
from builtins import range # pylint: disable=redefined-builtin
import dash_table
import collections
import os
import fnmatch
import glob
import xlsxwriter
from xlsxwriter.utility import xl_rowcol_to_cell
import xlrd
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output, State
#import dash_table_experiments as dt
from .common import generate_table
import pandas as pd
import numpy as np
from . import opti_models
from . import app
import chart_studio.plotly as plt
# pylint: disable=redefined-builtin
# this command is necessary for the app to find the MDL_screens_database
# direcotry when you deploy
script_path = os.path.dirname(os.path.realpath(__file__))
myPath= os.path.join( script_path,'MDL_screens_database')
###############################################################################
def get_controls_var(id, desc, unit, range):
"""
Get controls for each variable.
This includes
* the description
* range
"""
label_reagent = dcc.Input(
id=id + "_label", type='text', value=desc, className="label")
unit_reagent = dcc.Input(
id=id + "_unit", type='text', value=unit, className="label")
range_low = dcc.Input(
id=id + "_low", type='number', value=range[0], className="range")
range_high = dcc.Input(
id=id + "_high", type='number', value=range[1], className="range")
return html.Tr([
html.Td(label_reagent),
html.Td(unit_reagent),
html.Td([range_low, html.Span('to'), range_high])], id=id + "_tr_lhs")
#------------------------------------------------------------------------------
###############################################################################
def get_controls_screen(id, desc, range):
"""
Get screen dimensions nsamples_x and nsamples_y
"""
label = dcc.Input(id = id + "_label", type = 'text', value=desc,
className = 'label')
dimensions_x = dcc.Input(
id=id + "_x", type='number', value=range[0], className="range")
dimensions_y = dcc.Input(
id=id + "_y", type='number', value=range[1], className="range")
return html.Tr([
html.Td(label),
html.Td([dimensions_x, html.Span('\\times'), dimensions_y])
,
# html.Td([
# # html.Span(slider, className="slider")
# # ,
# # html.Span('', id=id + "_weight_label")
# ])
], id=id + "_tr_lhs")
#------------------------------------------------------------------------------
###
# 'code' is the variable that gets as input the value from the user.
# This corresponds to a certain code name of the xlxs files. The program
# use this to to search in the directory for matches.
# First, the characteristics of the variable are set, i.e. how to link the
# variable to the layout-input environment that the user interacts with.
###
code = collections.OrderedDict([
('code_number',
dict(label=['MDL file code'])),
])
NVARS_MAX = 10
###
# inp_nvars: an input variable that is updated with btn_submit and takes the numbers of the reagents
# that are in each hit condition.
###
inp_nvars = html.Tr([
html.Td('Number of reagents: '),
html.Td(
dcc.Input(
id='inp_nvars_lhs',
# type='text',
value=' ',
# max=NVARS_MAX,
# min=1,
className="nvars range"))
])
###
# inp_code_hitwell: two-input variable, caries the values of both the hitwell and the code of the
# screen
###
inp_code_hitwell = html.Tr([
html.Td('Enter screen code (e.g. MD1-40) and hit well (e.g. B1):'),
html.Td(dcc.Input(id='inp_code_lhs',
type='text',
value="MD1-40")),
html.Td(dcc.Input(
id='inp_hitwell_lhs',
type='text',
value="B1")),
html.Div('', id='input_info_lhs')])
btn_submit = html.Tr([html.Td(html.Button('Submit', id = 'submit-button_lhs', className='action-button', n_clicks=0)),
html.Div('', id='submit_info_lhs',style={'width': '50%'}),
])
##############################################################################
lhs_text = """
Latin hypercube sampling (LHS) is a sampling method for searching for optimal
parameters in a high dimensional space. The LHS is a near-random method, i.e.
the optimised condtions are not completely random, instead they obey certain
requirements. These requirements assure that the final sample points
will be spread more evenly across the range. LHS can be used for high-dimension
spaces, i.e. for more than two conditions.
"""
lhs_text_html = [html.P(i) for i in lhs_text.split("\n\n")]
lhs_layout = html.Div( [html.H2("About the Latin Hybercube sampling"),
dcc.Markdown(lhs_text, className="text-container", id="lhs_container",
# **{'data-iframe-height': ''},
style={ 'width': '50%','padding': '20px',
'margin': '10px','justify-content': 'center','align-items': 'center'})])
##############################################################################
# states = label_states + unit_states + low_states + high_states
states = [State('inp_code_lhs', 'value')]
states += [State('inp_hitwell_lhs', 'value')]
@app.callback(
[Output('submit_info_lhs', 'children'),
Output('inp_nvars_lhs', 'value')],
[Input('submit-button_lhs', 'n_clicks')],
states)
#------------------------------------------------------------------------------
###
# This feature is so the user can change the dimensions of the screen, i.e. the
# number of the wells. Initialises by the the dimensions of a common crystallisation
# screen 12x8
###
inp_nsamples = html.Tr([
html.Td('Enter screen dimensions '),
html.Td(
dcc.Input(
id='nsamples_x_lhs', type='number', value=8,
className="nsamples range")),
html.Td(html.Span('x')),
html.Td(
dcc.Input(
id='nsamples_y_lhs', type='number', value=12,
className="nsamples range"))
])
##############################################################################
btn_compute = html.Div([
html.Button('compute using LHS', id='btn_compute_lhs', className='action-button',
n_clicks = 0),
html.Div('', id='compute_info_lhs')
])
###
# Creation of dash app: setting up the layout
###
layout = html.Div(
[
lhs_layout,
html.Table([inp_code_hitwell]),
html.Br(),
html.Table([btn_submit]),
html.Br(),
html.Table([inp_nvars, inp_nsamples]),
html.Br(),
btn_compute,
#graph, hover_info,
],
style={'padding': 20},
id="container_lhs",
# tag for iframe resizer
**{'data-iframe-height': ''},
)
#------------------------------------------------------------------------------
##############################################################################
###
# Using State to share more than one input in the callback.
# ninps: no of inputs
###
# ninps = len(label_states + unit_states + low_states + high_states) + 5
ninps = 5 # no of inputs
states = [State('inp_nvars_lhs', 'value')]
states += [State('nsamples_x_lhs', 'value')]
states += [State('nsamples_y_lhs', 'value')]
states += [State('inp_code_lhs', 'value')]
states += [State('inp_hitwell_lhs', 'value')]
#------------------------------------------------------------------------------
###############################################################################
@app.callback(
dash.dependencies.Output('compute_info_lhs', 'children'),
[dash.dependencies.Input('table_lhs', 'data'),
dash.dependencies.Input('btn_compute_lhs', 'n_clicks'),
], states)
def on_compute(submit_info, n_clicks, *args):
"""Callback for clicking compute button"""
if n_clicks is None :
return ''
df_hit_values = pd.DataFrame(submit_info)
if len(args) != ninps:
raise ValueError("Expected {} arguments".format(ninps))
# parse arguments
hitwell = args[-1]
code_name = args[-2]
nsamples_y = args[-3]
nsamples_x = args[-4]
###
# Count how many columns from each category are on the selected file
###
n_pH = len(df_hit_values.filter(like='pH').columns)
n_units = len(df_hit_values.filter(like='Units').columns)
n_salts = len(df_hit_values.filter(like='Salt').columns)
n_buff = len(df_hit_values.filter(like='Buffer').columns)
n_precip = len(df_hit_values.filter(like='Precipitant').columns)
###
# Only the values of concentration and pH are going to change
###
concentrations = df_hit_values.filter(like='Conc').columns
var = df_hit_values[concentrations].to_numpy()
var = var.T
var_float = var.astype(np.float)
pH = df_hit_values.filter(like='pH').columns
pH = df_hit_values[pH].to_numpy()
###
# In the following lines, the values of the concentration for salt/prec/buffer are assigned.
# The format of the file is crucial in order the following to work.
###
salt_conc = var[0:n_salts]
buff_conc = var[(n_salts):(n_salts+n_buff)]
precip_conc = var[(n_salts+n_buff):(n_salts+n_buff+n_precip)]
# VARY RANGE OF CONCERN --- ATTEMPTS TO MAKE THE RANGE CHANGE
# low_vals = np.array([args[i + NVARS_MAX] for i in range(nvars)])
# high_vals = np.array([args[i + 2 * NVARS_MAX] for i in range(nvars)
# NOTE: check if salt_conc, ph and precip_conc are float arrays. This check is
# important, cause after the user will update the number in the table,
# the values are parsed as str.
pH = pH.astype(float)
pH = pH.T
salt_conc = salt_conc.astype(float)
precip_conc = precip_conc.astype(float)
salt_range = [salt_conc[:]/2, salt_conc[:]*2]
pH_range = [pH[:]-1, pH[:]+1]
precip_range = [precip_conc[:]/4, precip_conc[:]*4]
low_vals = np.concatenate([salt_range[0], pH_range[0], precip_range[0]])
high_vals = np.concatenate([salt_range[1], pH_range[1], precip_range[1]])
nvars = n_salts + n_pH + n_precip
nsamples = nsamples_x*nsamples_y
salts_labels = df_hit_values.filter(like='Salt').columns.values
print('salts_labels',salts_labels)
buff_labels = df_hit_values.filter(like='Buffer').columns.values
print('buff_labels',buff_labels)
perci_labels = df_hit_values.filter(like='Precipitant').columns.values
print('perci_labels',perci_labels)
units_labels = df_hit_values.filter(like='Unit').columns.values
print('unit_labels',units_labels)
reagent_name = np.concatenate([df_hit_values.iloc[0][salts_labels[:]], df_hit_values.iloc[0][buff_labels[:]], df_hit_values.iloc[0][perci_labels[:]] ])
print('reagent_name', reagent_name)
reagent_name = reagent_name.tolist()
reagent_name_1 = reagent_name[0]
reagent_name_2 = reagent_name[1]
labels = reagent_name
labels_array = np.asarray(labels)
dim = len(labels_array)
styling_label_1 = [' ['] * len(labels)
styling_label_2 = [']'] * len(labels)
styling_label_1_array = np.asarray(styling_label_1)
styling_label_2_array = np.asarray(styling_label_2)
unit_name = np.concatenate([df_hit_values.iloc[0][units_labels[:]]])
labels_array_new = ["" for x in range(dim)]
ll = 0
for i in range(dim):
try:
ll = ll+1
counter = labels_array[i] + styling_label_1[i] + unit_name[i] + styling_label_2[i]
labels_array_new[ll-1] = counter
except:
return dcc.Textarea(
placeholder='Enter a value...',
value='An error occurred. Please report at: enquiries@moleculardimensions.com ',
style={'width': '40%'}
)
samples = opti_models.compute_LHS(num_samples=nsamples,
var_LB=low_vals,
var_UB=high_vals)
df = pd.DataFrame(data=samples, columns=labels_array_new)
table = generate_table(df, nsamples_x, nsamples_y, download_link=True)
np.set_printoptions(precision=3)
if n_clicks > 0:
return table
# #------------------------------------------------------------------------------
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
11,
4112,
62,
11748,
198,
6738,
3170,
1040,
1330,
2837,
220,
1303,
279,
2645,
600,
25,
15560,
28,
445,
18156,
12,
18780,
... | 2.53387 | 4,842 |
import json
from collections import OrderedDict
from importlib import import_module
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import render
from fhir.models import SupportedResourceType
from fhir.utils import kickout_400
from fhir.views.utils import check_access_interaction_and_resource_type
from fhir.settings import FHIR_BACKEND_FIND, DF_EXTRA_INFO
| [
11748,
33918,
198,
198,
6738,
17268,
1330,
14230,
1068,
35,
713,
198,
6738,
1330,
8019,
1330,
1330,
62,
21412,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
4023,
1330,
367,
29281,
31077,
198,
6738,
4... | 3.440678 | 118 |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
__author__ = 'Justin'
__mtime__ = '2018-11-05'
"""
import time
import os
from skimage import color
import numpy as np
from skimage import io
import cv2
import torch
from core.util import read_csv_file, get_project_root, get_seeds
from preparation.hsd_transform import hsd2rgb, rgb2hsd
from visdom import Visdom
from core import Random_Gen
from preparation.acd_model import ACD_Model
from torch.autograd import Variable
from core import Block
# Reinhard algorithm
# @staticmethod
# def get_normalization_function(imgCone, params, extract_scale, patch_size, ):
# low_scale = params.GLOBAL_SCALE
# # 在有效检测区域内,均匀抽样
# eff_region = imgCone.get_effective_zone(low_scale)
# sampling_interval = 1000
# seeds = get_seeds(eff_region, low_scale, extract_scale, patch_size, spacingHigh=sampling_interval, margin=-4)
#
# # #不受限制地随机抽样
# # rx2 = int(imgCone.ImageWidth * extract_scale / params.GLOBAL_SCALE)
# # ry2 = int(imgCone.ImageHeight * extract_scale / params.GLOBAL_SCALE)
# # random_gen = Random_Gen("halton")
# #
# # N = 2000
# # # rx1, ry1, rx2, ry2 = self.valid_rect
# # x, y = self.random_gen.generate_random(N, 0, rx2, 0, ry2)
#
# images = []
# for x, y in seeds:
# block = imgCone.get_image_block(extract_scale, x, y, patch_size, patch_size)
# img = block.get_img()
# images.append(img)
#
# normal = HistNormalization("match_hist", hist_target ="hist_templates.npy",
# hist_source = None)
# normal.prepare(images)
#
# return normal
# import tensorflow as tf
# class ACDNormalization_tf(AbstractNormalization):
# def __init__(self, method, **kwarg):
# super(ACDNormalization_tf, self).__init__(method, **kwarg)
# self._pn = 100000
# self._bs = 1500
# self._step_per_epoch = int(self._pn / self._bs)
# self._epoch = int(300 / self._step_per_epoch)
# # self._pn = 100000
# # self._bs = 1500
# # self._step_per_epoch = 20
# # self._epoch = 15
#
# # self.dc_txt = kwarg["dc_txt"]
# # self.w_txt = kwarg["w_txt"]
# # self.template_path = kwarg["template_path"]
# self.dc_txt = "{}/data/{}".format(get_project_root(), kwarg["dc_txt"])
# self.w_txt = "{}/data/{}".format(get_project_root(), kwarg["w_txt"])
# self.template_path = "{}/data/{}".format(get_project_root(), kwarg["template_path"])
# self._template_dc_mat = None
# self._template_w_mat = None
#
# self.input_od = tf.placeholder(dtype=tf.float32, shape=[None, 3])
# self.target, self.cd, self.w = self.acd_model(self.input_od)
# self.init = tf.global_variables_initializer()
#
# # if(not os.path.exists(self.dc_txt) or not os.path.exists(self.w_txt)):
# # self.generate()
# self.generate()
# self._template_dc_mat = np.loadtxt(self.dc_txt)
# self._template_w_mat = np.loadtxt(self.w_txt)
# self.inv = np.linalg.inv(self._template_dc_mat * self._template_w_mat)
#
# def normalize_on_batch(self, src_img):
# img = self.transform(src_img)
# return img
#
# def generate(self):
# template_list = os.listdir(self.template_path)
# temp_images = []
# for i, name in enumerate(template_list):
# # temp_images.append(cv2.imread(os.path.join(self.template_path, name))) # BGR
# # 读入RGB
# temp_images.append(io.imread(os.path.join(self.template_path, name)))
#
# # fit
# st = time.time()
# self.fit(temp_images)
# print('fit time', time.time() - st)
#
# def fit(self, images):
# opt_cd_mat, opt_w_mat = self.extract_adaptive_cd_params(images)
# np.savetxt(self.dc_txt, opt_cd_mat)
# np.savetxt(self.w_txt, opt_w_mat)
#
# def transform(self, images):
#
# od = -np.log((np.asarray(images, np.float) + 1) / 256.0)
# normed_od = np.matmul(od, self.transform_mat)
# normed_images = np.exp(-normed_od) * 256 - 1
#
# return np.maximum(np.minimum(normed_images, 255), 0)/255
#
# def sampling_data(self, images):
# pixels = np.reshape(images, (-1, 3))
# pixels = pixels[:, (2, 1, 0)] # 从RGB变BGR
# pixels = pixels[np.random.choice(pixels.shape[0], min(self._pn * 20, pixels.shape[0]))]
# od = -np.log((np.asarray(pixels, np.float) + 1) / 256.0)
# tmp = np.mean(od, axis=1)
#
# # filter the background pixels (white or black)
# od = od[(tmp > 0.3) & (tmp < -np.log(30 / 256))]
# od = od[np.random.choice(od.shape[0], min(self._pn, od.shape[0]))]
#
# return od
#
# def extract_adaptive_cd_params(self, images):
# """
# :param images: RGB uint8 format in shape of [k, m, n, 3], where
# k is the number of ROIs sampled from a WSI, [m, n] is
# the size of ROI.
# """
# od_data = self.sampling_data(images)
# if self.input_od is None:
# input_od = tf.placeholder(dtype=tf.float32, shape=[None, 3])
# if self.target is None:
# self.target, self.cd, self.w = self.acd_model(input_od)
# if self.init is None:
# self.init = tf.global_variables_initializer()
#
# with tf.Session() as sess:
# with tf.device('/cpu:0'):
# sess.run(self.init)
# for ep in range(self._epoch):
# for step in range(self._step_per_epoch):
# sess.run(self.target, {self.input_od: od_data[step * self._bs:(step + 1) * self._bs]})
# opt_cd = sess.run(self.cd)
# opt_w = sess.run(self.w)
# return opt_cd, opt_w
#
# @staticmethod
# def acd_model(input_od, lambda_p=0.002, lambda_b=10, lambda_e=1, eta=0.6, gamma=0.5):
# """
# Stain matrix estimation via method of
# "Yushan Zheng, et al., Adaptive Color Deconvolution for Histological WSI Normalization."
# """
# init_varphi = np.asarray([[0.6060, 1.2680, 0.7989],
# [1.2383, 1.2540, 0.3927]])
# alpha = tf.Variable(init_varphi[0], dtype='float32')
# beta = tf.Variable(init_varphi[1], dtype='float32')
# w = [tf.Variable(1.0, dtype='float32'), tf.Variable(1.0, dtype='float32'), tf.constant(1.0)]
#
# sca_mat = tf.stack((tf.cos(alpha) * tf.sin(beta), tf.cos(alpha) * tf.cos(beta), tf.sin(alpha)), axis=1)
# cd_mat = tf.matrix_inverse(sca_mat)
#
# s = tf.matmul(input_od, cd_mat) * w
# h, e, b = tf.split(s, (1, 1, 1), axis=1)
#
# l_p1 = tf.reduce_mean(tf.square(b))
# l_p2 = tf.reduce_mean(2 * h * e / (tf.square(h) + tf.square(e)))
# l_b = tf.square((1 - eta) * tf.reduce_mean(h) - eta * tf.reduce_mean(e))
# l_e = tf.square(gamma - tf.reduce_mean(s))
#
# objective = l_p1 + lambda_p * l_p2 + lambda_b * l_b + lambda_e * l_e
#
# tag_dubeg = False
# if tag_dubeg:
# print_op = tf.print(['cd_mat: ', cd_mat])
# print_op2 = tf.print("objective", objective, ['l_p1: ', l_p1], ['l_p2: ', l_p2], ['l_b: ', l_b], ['l_p1: ', l_e])
# with tf.control_dependencies([print_op, print_op2]):
# target = tf.train.AdagradOptimizer(learning_rate=0.05).minimize(objective)
# else:
# target = tf.train.AdagradOptimizer(learning_rate=0.05).minimize(objective)
#
# return target, cd_mat, w
#
# def prepare(self, images):
# self._template_dc_mat = np.loadtxt(self.dc_txt)
# self._template_w_mat = np.loadtxt(self.w_txt)
# if self._template_dc_mat is None:
# raise AssertionError('Run fit function first')
#
# opt_cd_mat, opt_w_mat = self.extract_adaptive_cd_params(images)
# transform_mat = np.matmul(opt_cd_mat * opt_w_mat, self.inv)
#
# # 当输入图像为RGB时
# transform_mat = transform_mat[(2,1,0), :]
# self.transform_mat = transform_mat[:, (2,1,0)]
class ImageNormalizationTool(object):
'''
Lab颜色空间中的L分量用于表示像素的亮度,取值范围是[0,100],表示从纯黑到纯白;
a表示从红色到绿色的范围,取值范围是[127,-128];
b表示从黄色到蓝色的范围,取值范围是[127,-128]。
'''
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
37811,
198,
834,
9800,
834,
796,
705,
33229,
6,
198,
834,
76,
2435,
834,
796,
705,
7908,
12,
1157,
12,
2713,
6,
198... | 1.90072 | 4,442 |
import time
import event_model
from pytest import fixture
import scipy.misc
from xicam.core.data.bluesky_utils import run_from_doc_stream
@fixture
| [
11748,
640,
198,
198,
11748,
1785,
62,
19849,
198,
6738,
12972,
9288,
1330,
29220,
198,
11748,
629,
541,
88,
13,
44374,
198,
198,
6738,
2124,
291,
321,
13,
7295,
13,
7890,
13,
2436,
947,
2584,
62,
26791,
1330,
1057,
62,
6738,
62,
15... | 2.923077 | 52 |
"""
It is well known that if the square root of a natural number is not an integer, then it is irrational. The decimal expansion of such square roots is infinite without any repeating pattern at all.
The square root of two is 1.41421356237309504880..., and the digital sum of the first one hundred decimal digits is 475.
For the first one hundred natural numbers, find the total of the digital sums of the first one hundred decimal digits for all the irrational square roots.
ans: 40886
"""
# cheesed using the decimal class
import decimal
decimal.getcontext().prec = 102
sum = 0
for n in range(1, 101):
n = decimal.Decimal(n).sqrt()
n = str(n)
if "." in n:
for d in n.replace(".","")[:100]:
sum += int(d)
print(sum)
# Babylonian Method
# https://en.wikipedia.org/wiki/Methods_of_computing_square_roots
decimal.getcontext().prec = 110
D = decimal.Decimal
squares = [x**2 for x in range(1,11)]
sum = 0
for n in range(1, 101):
if n not in squares:
sr = D(n)/2
for i in range(10):
sr = (sr + n/sr)/2
for d in str(sr).replace(".","")[:100]:
sum += int(d)
print(sum) | [
37811,
198,
1026,
318,
880,
1900,
326,
611,
262,
6616,
6808,
286,
257,
3288,
1271,
318,
407,
281,
18253,
11,
788,
340,
318,
25086,
13,
383,
32465,
7118,
286,
884,
6616,
11135,
318,
15541,
1231,
597,
20394,
3912,
379,
477,
13,
198,
1... | 2.946237 | 372 |
# -*- coding: utf-8 -*-
"""
Created on Tue Jun 8 09:44:40 2021
@author: limy
"""
import cv2
import numpy as np
import pandas as pd
import os
if __name__ == '__main__':
if not os.path.exists('data_info.csv'):
print('请将data_info.csv文件放在程序根目录!')
else:
data_info = pd.read_csv('data_info.csv')
# 检查csv表格是否异常
if data_info['ori_image_path'].tolist() == [] or data_info['target_image_path'].tolist() == [] or data_info['output_path'].tolist() == [] or data_info['output_file_name'].tolist() == []:
print('请检查data_info.csv文件中内容是否为空!')
else:
ori_image_path = data_info['ori_image_path'].tolist()
for num in range(len(ori_image_path)):
ori_image_path = data_info['ori_image_path'].tolist()[num]
target_image_path = data_info['target_image_path'].tolist()[num]
output_file_path = os.path.join(data_info['output_path'].tolist()[num], data_info['output_file_name'].tolist()[num])
print('您正在使用配准标注平台,请注意:标注时待配准图像和参考图像的标点顺序要一致,若顺序不一致则无法配准!同时每次标注时请标注4个关键点对,不要多也不要少,谢谢您的使用!')
state = 1
while(state):
original_image = cv2.imread(ori_image_path)
ref_win = cv2.imread(ori_image_path)
target_image = cv2.imread(target_image_path)
src_win = cv2.imread(target_image_path)
imagePoints1 = []
imagePoints2 = []
state = annotion_state()
if state == 2:
break
elif state == 0:
if (len(imagePoints1) != len(imagePoints2)) or (len(imagePoints1) == 0 or len(imagePoints2) == 0):
print('标注点对数量不一致请重新标注!')
print('参考图像标注点数量:', len(imagePoints1))
print('待配准图像标注点数量:', len(imagePoints2))
state = 1
elif len(imagePoints1) != 4 or len(imagePoints2) != 4:
print('两次标注点对数量不为4,请重新标注!')
print('参考图像标注点数量:', len(imagePoints1))
print('待配准图像标注点数量:', len(imagePoints2))
state = 1
if len(imagePoints1)==4 and len(imagePoints2)==4:
src_points = np.array(imagePoints2, dtype=np.float32)
den_points = np.array(imagePoints1, dtype=np.float32)
# getPerspectiveTransform可以得到从点集src_points到点集den_points的透视变换矩阵
T = cv2.getPerspectiveTransform(src_points, den_points)
# 进行透视变换
# 注意透视变换第三个参数为变换后图片大小,格式为(高度,宽度)
warp_imgae = cv2.warpPerspective(target_image, T, (original_image.shape[1], original_image.shape[0]), borderValue=[255, 255, 255])
cv2.imshow("transform", warp_imgae)
cv2.imshow("jizhun", ref_win)
cv2.imshow("daipeizhun", src_win)
cv2.imwrite(output_file_path, warp_imgae)
# cv2.imwrite("result.jpg", warp_imgae)
cv2.imwrite(os.path.join(data_info['output_path'].tolist()[0], "src_p.jpg"), src_win)
cv2.imwrite(os.path.join(data_info['output_path'].tolist()[0], "ref_p.jpg"), ref_win)
print('图片已保存到输出目录,请查看!请点击标注窗口,按esc退出此次标注。')
print(output_file_path)
cv2.waitKey()
cv2.destroyAllWindows()
else:
print('您已放弃标注,感谢您的使用!')
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
201,
198,
37811,
201,
198,
41972,
319,
30030,
7653,
220,
807,
7769,
25,
2598,
25,
1821,
33448,
201,
198,
201,
198,
31,
9800,
25,
1761,
88,
201,
198,
37811,
201,
198,
201,
... | 1.339797 | 3,146 |
import unittest
from click.testing import CliRunner
from openvariant.commands.openvar import openvar
| [
11748,
555,
715,
395,
198,
198,
6738,
3904,
13,
33407,
1330,
1012,
72,
49493,
198,
198,
6738,
1280,
25641,
415,
13,
9503,
1746,
13,
9654,
7785,
1330,
1280,
7785,
628
] | 3.466667 | 30 |
import numpy as np
import pandas as pd
from scipy.stats import zscore
from sklearn.linear_model import LogisticRegression, LassoLars
from tqdm import tqdm
import matplotlib.pyplot as plt
import seaborn as sb
def createDataset(n, d=100, d_inf=5, is_classification=True, no_signal=False):
"""
n is number of samples in each of train/test/holdout sets
d is the data dimension (default=100)
d_inf is the number of informative features (default=20)
is_classification is bool stating whether data is for classification
as opposed to regression (default=True)
no_signal is a bool stating whether you want the data randomized so
there's no useful signal (default=False)
"""
# making random inputs, outputs
X = np.random.normal(0, 1, (3*n, d))
y = np.random.normal(0, 1, (3*n, 1))
# thresholding y values for classification
if is_classification:
y = 2.0*((y>0) - 0.5)
# making the first d_inf dimensions informative
if is_classification:
X[:,:d_inf] += y*np.random.normal(1.0, 1.5, X[:,:d_inf].shape)
else:
snr = 0.05
X[:,:d_inf] += snr*y
X = zscore(X, axis=0)
# if you dont want useful signal, randomize the labels
if no_signal:
np.random.shuffle(y)
# Divide into train/test/holdout pairs
outputs = [[X[i::3, :], y[i::3, 0]] for i in range(3)]
return outputs
def thresholdout(train_vals, holdout_vals, tho_scale=1.0):
"""
This is the actual thresholdout algorithm
that takes values from a training-run and a holdout-run
and returns a new set of holdout values
"""
thr = tho_scale
tol = thr / 4
train_vals = np.array(train_vals)
holdout_vals = np.array(holdout_vals)
diffNoise = np.abs(train_vals - holdout_vals) - np.random.normal(0, tol, holdout_vals.shape)
flipIdx = diffNoise > thr
new_holdout_vals = np.copy(train_vals)
new_holdout_vals[flipIdx] = np.copy(holdout_vals)[flipIdx] + np.random.normal(0, tol, new_holdout_vals[flipIdx].shape)
return new_holdout_vals
def repeatexp(n, d, grid_size, reps, tho_scale=0.1, is_classification=True, no_signal=True):
"""
Repeat the experiment multiple times on different
datasets to put errorbars on the graphs
"""
datasetList = ['Train', 'Holdout', 'Test']
colList = ['perm', 'performance', 'dataset']
df_list_std = []
df_list_tho = []
for perm in tqdm(range(reps)):
vals_std, vals_tho = fitModels_paramTuning(n, d, grid_size,
is_classification=is_classification,
tho_scale=tho_scale,
no_signal=no_signal)
for i, ds in enumerate(datasetList):
df_list_std.append((perm, vals_std[i], ds))
df_list_tho.append((perm, vals_tho[i], ds))
df_std = pd.DataFrame(df_list_std, columns=colList)
df_tho = pd.DataFrame(df_list_tho, columns=colList)
return df_std, df_tho
def runExpt_and_makePlots(n, d, grid_size, reps, tho_scale=0.1, is_classification=True):
"""
Run the experiments with and without useful training signal
then make subplots to show how overfitting differs for
standard holdout and thresholdout
n = number of training samples in train/test/holdout sets
d = dimension of data
grid_size = number of steps in parameter grid search
reps = number of times experiment is repeated
is_classification = bool that indicates whether to do classification or regression
"""
args = [n, d, grid_size, reps]
df_std_signal, df_tho_signal = repeatexp(*args,
is_classification=is_classification,
tho_scale=tho_scale,
no_signal=False)
df_std_nosignal, df_tho_nosignal = repeatexp(*args,
is_classification=is_classification,
tho_scale=tho_scale,
no_signal=True)
f, ax = plt.subplots(2, 2, figsize=(8,10), sharex=True, sharey=False)
sb.set_style('whitegrid')
kw_params = {'x':'dataset',
'y':'performance',
'units':'perm'}
sb.barplot(data=df_std_signal,
ax=ax[0,0],
**kw_params)
ax[0,0].set_title('Standard, HAS Signal')
sb.barplot(data=df_tho_signal,
ax=ax[0,1],
**kw_params)
ax[0,1].set_title('Thresholdout, HAS Signal')
sb.barplot(data=df_std_nosignal,
ax=ax[1,0],
**kw_params)
ax[1,0].set_title('Standard, NO Signal')
sb.barplot(data=df_tho_nosignal,
ax=ax[1,1],
**kw_params)
ax[1,1].set_title('Thresholdout, NO Signal')
return f, ax | [
11748,
299,
32152,
355,
45941,
198,
11748,
19798,
292,
355,
279,
67,
198,
198,
6738,
629,
541,
88,
13,
34242,
1330,
1976,
26675,
198,
6738,
1341,
35720,
13,
29127,
62,
19849,
1330,
5972,
2569,
8081,
2234,
11,
406,
28372,
43,
945,
198,... | 2.047735 | 2,472 |
from django.contrib import admin
from .models import Board, Post, Topic
@admin.register(Board)
admin.site.register(Post)
admin.site.register(Topic) | [
6738,
42625,
14208,
13,
3642,
822,
1330,
13169,
198,
6738,
764,
27530,
1330,
5926,
11,
2947,
11,
47373,
628,
198,
31,
28482,
13,
30238,
7,
29828,
8,
628,
198,
28482,
13,
15654,
13,
30238,
7,
6307,
8,
198,
28482,
13,
15654,
13,
30238... | 3.282609 | 46 |
# babysitterWageCalc.py
# A program that accepts a starting time and ending time in hours and minutes
# to calculate the total babysitting bill. The start and end times are in a
# single 24 hour period. Partial hours are prorated.
"""A babysitter charges $2.50 an hour until 9:00 PM when the rate drops to
$1.75 an hour (the children are in bed). Write a program that accepts a
starting time and ending time in hours and minutes and calculates the total
babysitting bill. You may assume that the starting and ending times are in a
single 24-hour period. Partial hours should be prorated."""
from datetime import datetime
main()
| [
2,
46711,
1967,
54,
496,
9771,
66,
13,
9078,
198,
2,
317,
1430,
326,
18178,
257,
3599,
640,
290,
7464,
640,
287,
2250,
290,
2431,
198,
2,
284,
15284,
262,
2472,
46711,
2535,
2855,
13,
383,
923,
290,
886,
1661,
389,
287,
257,
198,
... | 3.919255 | 161 |
import responses
import unittest
import harperdb
import harperdb_testcase
| [
11748,
9109,
198,
11748,
555,
715,
395,
198,
198,
11748,
3971,
525,
9945,
198,
11748,
3971,
525,
9945,
62,
9288,
7442,
628
] | 3.454545 | 22 |
import triangle
from shapely.geometry import Polygon, MultiPolygon, Point
from shapely.ops import unary_union
def triangulate(vertex_list, boundary_vertices=None, boundary_indexes=None):
""" Uses a Python wrapper of Triangle (Shechuck, 1996) to triangulate a set of points. Triangulation is
constrained if bounding vertices are provided; otherwise the triangulation is Delaunay. """
xy_list = [[v.get_x(), v.get_y()] for v in vertex_list]
if boundary_vertices is not None and boundary_indexes is not None:
boundary_points = [[v.get_x(), v.get_y()] for v in boundary_vertices]
unique_points = [point for point in xy_list if point not in boundary_points]
boundary_points.extend(unique_points)
# Constrained
triangulation = triangle.triangulate({'vertices': boundary_points,
'segments': boundary_indexes},
'pCS0') # p: PSLG; C: Exact arithmetic; S_: Steiner point limit
else:
# Delaunay
triangulation = triangle.triangulate({'vertices': xy_list})
return triangulation
def fill_poly_gaps(mqual_poly):
""" Fills gaps in MultiPolygons/Polygons by rebuilding the geometry from the exterior coordinates of each polygon
and then using a bounding rectangle of the new polygons to eliminate any remaining gaps that can occur from
touching polygon edges. """
parts = list()
if mqual_poly.geom_type == 'MultiPolygon':
for geom in mqual_poly.geoms:
p = Polygon(geom.exterior.coords)
parts.append(p)
dissolve_poly = unary_union(parts)
xmin, ymin, xmax, ymax = dissolve_poly.bounds
bounding_rect = Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]).buffer(1, resolution=1)
geom_list = list()
diff_poly = bounding_rect.difference(dissolve_poly)
for diff_poly_geom in diff_poly.geoms:
geom_list.append(diff_poly_geom)
sorted_geoms = sorted(geom_list, key=lambda k: k.bounds)
fill_poly = bounding_rect.difference(sorted_geoms[0])
poly = fill_poly.buffer(0)
else:
dissolve_poly = unary_union(mqual_poly)
xmin, ymin, xmax, ymax = dissolve_poly.bounds
bounding_rect = Polygon([(xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)]).buffer(1, resolution=1)
diff_poly = bounding_rect.difference(dissolve_poly)
fill_poly = bounding_rect.difference(diff_poly)
poly = fill_poly.buffer(0)
if poly.geom_type == 'MultiPolygon':
final_poly = MultiPolygon(poly)
else:
final_poly = Polygon(poly)
return final_poly
def create_idx(start, end):
""" Creates indexes for boundary vertices so that segments can be created for a constrained triangulation. """
return [[i, i + 1] for i in range(start, end)] + [[end, start]]
def get_boundary_points(poly, point_set, point_tree):
""" Extracts polygon vertex coordinates and returns the coordinates as Vertex objects along with the associated
index list. """
boundary_dict = dict()
if poly.geom_type == 'MultiPolygon':
for geom_index in range(len(poly.geoms)):
boundary_dict[geom_index] = list()
geom = poly.geoms[geom_index]
x, y = geom.exterior.coords.xy
for i in range(len(x) - 1):
point_list = list()
p = Point(x[i], y[i])
p_buffer = p.buffer(0.00000001)
point_tree.get_points_in_polygon(point_tree.get_root(), 0, point_set.get_domain(), p_buffer, point_set,
point_list)
boundary_dict[geom_index].append(point_list[0])
else:
x, y = poly.exterior.coords.xy
boundary_dict[0] = list()
for i in range(len(x) - 1):
point_list = list()
p = Point(x[i], y[i])
p_buffer = p.buffer(0.00000001)
point_tree.get_points_in_polygon(point_tree.get_root(), 0, point_set.get_domain(), p_buffer, point_set,
point_list)
boundary_dict[0].append(point_list[0])
boundary_vertices, length_list = list(), list()
for poly_index in boundary_dict.keys():
poly_length = len(boundary_dict[poly_index])
length_list.append(poly_length-1)
for vertex in boundary_dict[poly_index]:
boundary_vertices.append(vertex)
index_list = list()
for i in range(len(length_list)):
if i == 0:
start = 0
end = length_list[i]
else:
start = sum(length_list[:i]) + i
end = start + length_list[i]
index_list.extend(create_idx(start, end))
return boundary_vertices, index_list
def simplify_mqual(triangulation, mqual_poly):
""" Simplifies MQUAL boundary by taking an input Delaunay triangulation of the source soundings, and removing
triangles whose centroid does not intersect the original MQUAL polygon. The simplified MQUAL boundary will have
vertices that are in the source soundings dataset, which is important for the triangle test during validation.
This process can result in geometries with unwanted gaps, which are eliminated using fill_poly_gaps(). """
delete_triangles, tin_triangles, = list(), list()
# Get each triangle of the TIN
for index, value in enumerate(triangulation['triangles']):
tri_list = list()
for v_id in value:
tri_list.append(v_id)
triangle_points = list()
for vertex_id in tri_list:
vertex = triangulation['vertices'][vertex_id]
triangle_points.append([vertex[0], vertex[1]])
triangle_poly = Polygon(triangle_points)
tri_centroid = triangle_poly.centroid
# Flag triangle if centroid is outside MQUAL polygon
if mqual_poly.intersects(tri_centroid) is False:
delete_triangles.append(triangle_poly)
tin_triangles.append(triangle_poly)
tin_shape = unary_union(tin_triangles)
# Delete triangles from shape, beginning with largest area
sorted_del_triangles = sorted(delete_triangles, key=lambda k: k.area, reverse=True)
for delete_poly in sorted_del_triangles:
x, y = delete_poly.exterior.coords.xy
delete_tri_points = list()
for i in range(len(x) - 1):
delete_tri_points.append(Point(x[i], y[i]))
tin_shape = tin_shape.difference(delete_poly)
# Check to ensure removed triangle does not exclude source soundings from simplified polygon
intersect_check = [point.intersects(tin_shape) for point in delete_tri_points]
if False in intersect_check:
tin_shape = unary_union([tin_shape, delete_poly])
if tin_shape.geom_type == 'MultiPolygon':
final_poly = list()
for geom in tin_shape.geoms:
if mqual_poly.intersects(geom.centroid) is True:
final_poly.append(geom)
poly = MultiPolygon(final_poly).buffer(0)
else:
poly = Polygon(tin_shape.buffer(0))
return poly
def modified_binary_search(sorted_vertices, vertex):
""" Modified binary search algorithm to increase performance when removing soundings during the
label-based generalization. """
right, left = 0, 0
vertices_num = len(sorted_vertices)
while right < vertices_num:
i = (right + vertices_num) // 2
if vertex.get_z() < sorted_vertices[i].get_z():
vertices_num = i
else:
right = i + 1
vertices_num = right - 1
while left < vertices_num:
i = (left + vertices_num) // 2
if vertex.get_z() > sorted_vertices[i].get_z():
left = i + 1
else:
vertices_num = i
if left == right-1:
return left
else:
for idx in range(left, right):
if sorted_vertices[idx] == vertex:
return idx
| [
11748,
22950,
201,
198,
6738,
5485,
306,
13,
469,
15748,
1330,
12280,
14520,
11,
15237,
34220,
14520,
11,
6252,
201,
198,
6738,
5485,
306,
13,
2840,
1330,
555,
560,
62,
24592,
201,
198,
201,
198,
201,
198,
4299,
1333,
648,
5039,
7,
... | 2.209936 | 3,744 |
import os
import random
import string
import subprocess as sp
import sys
import time
import uuid
from scripts.util import timestamp, makedirs, indent
from scripts.globals import god
# FIXME: add support for SLURM
# FIXME: add starcluster cluster name argument (SGE)
# FIXME: proc constraints for SGE submissions?
# TODO: add timeout for hung jobs?
# fully testing job wrapper interface would require some docker finesse -
# not sure how easy it is to spin up a simple starcluster, LSF, or SLURM system, since
# they usually involve multiple nodes
| [
11748,
28686,
198,
11748,
4738,
198,
11748,
4731,
198,
11748,
850,
14681,
355,
599,
198,
11748,
25064,
198,
11748,
640,
198,
11748,
334,
27112,
198,
198,
6738,
14750,
13,
22602,
1330,
41033,
11,
285,
4335,
17062,
11,
33793,
198,
6738,
1... | 3.666667 | 153 |
import bpy
bl_info ={
"name": "Camera",
"author" : "Kayla Man",
"version" : (1,0),
"blender" : (2,91,0),
"location" : " ",
"description" : "creating cameras in Blender",
"warning": "",
"wiki_url": "",
"category": "Camera"
}
import bpy
from bpy.props import PointerProperty, BoolProperty
if __name__ == "__main__":
register()
| [
11748,
275,
9078,
201,
198,
2436,
62,
10951,
796,
90,
201,
198,
220,
220,
220,
366,
3672,
1298,
366,
35632,
1600,
201,
198,
220,
220,
220,
366,
9800,
1,
1058,
366,
37247,
5031,
1869,
1600,
201,
198,
220,
220,
220,
366,
9641,
1,
10... | 2.055556 | 198 |
import sys
import numpy as np
import keras.backend as K
from keras.callbacks import Callback
from keras.models import Model, Sequential
| [
11748,
25064,
201,
198,
11748,
299,
32152,
355,
45941,
201,
198,
11748,
41927,
292,
13,
1891,
437,
355,
509,
201,
198,
6738,
41927,
292,
13,
13345,
10146,
1330,
4889,
1891,
201,
198,
6738,
41927,
292,
13,
27530,
1330,
9104,
11,
24604,
... | 3.108696 | 46 |
from rest_framework.response import Response
from django.core.exceptions import PermissionDenied
from django.utils.translation import ugettext as _
from misago.acl import add_acl
from misago.categories import THREADS_ROOT_NAME
from misago.categories.models import Category
from misago.threads.permissions import can_start_thread
from misago.threads.threadtypes import trees_map
| [
6738,
1334,
62,
30604,
13,
26209,
1330,
18261,
198,
198,
6738,
42625,
14208,
13,
7295,
13,
1069,
11755,
1330,
2448,
3411,
21306,
798,
198,
6738,
42625,
14208,
13,
26791,
13,
41519,
1330,
334,
1136,
5239,
355,
4808,
198,
198,
6738,
2984,... | 3.59434 | 106 |
import pigpio
import time
INTERVAL = 0.1
pi = pigpio.pi()
h = pi.spi_open(0, 1000000, 0)
try:
while True:
print(read_adc_ch0(pi, h) * 3.3)
time.sleep(INTERVAL)
except KeyboardInterrupt:
pass
pi.spi_close(h)
pi.stop()
| [
11748,
12967,
79,
952,
198,
11748,
640,
198,
198,
41358,
23428,
796,
657,
13,
16,
628,
198,
198,
14415,
796,
12967,
79,
952,
13,
14415,
3419,
198,
71,
796,
31028,
13,
2777,
72,
62,
9654,
7,
15,
11,
1802,
2388,
11,
657,
8,
198,
1... | 1.991935 | 124 |
# Imports from 3rd party libraries
import dash
import dash_bootstrap_components as dbc
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
from joblib import load
import pandas as pd
# Imports from this application
from app import app
#Pipeline
pipeline = load('assets/pipeline.joblib')
# 2 column layout. 1st column width = 4/12
# https://dash-bootstrap-components.opensource.faculty.ai/l/components/layout
column1 = dbc.Col(
[
dcc.Markdown(
"""
## Predictions
Select a few options that may apply to the shoes.
The average shoe price is the starting point.
*You will be able to make up shoes(Open toed boot.)
"""
),
dcc.Dropdown(
id='brand',
options = [
{'label': 'Brinley Co.', 'value': 'co.'},
{'label': 'Propet', 'value': 'propet'},
{'label': 'SAS', 'value': 'sas'},
{'label': 'Trotters', 'value': 'trotters'},
{'label': 'Pleaser', 'value': 'pleaser'},
{'label': 'Soda', 'value': 'soda'},
{'label': 'Spring Step', 'value': 'spring'},
{'label': 'Aerosoles', 'value': 'aerosoles'},
{'label': 'Softwalk', 'value': 'softwalk'},
{'label': "L'Artiste", 'value': "l'artiste"},
{'label': 'Ellie Shoes', 'value': 'ellie'},
{'label': 'Drew', 'value': 'drew'},
{'label': 'Steve Madden', 'value': 'madden'},
{'label': "New Balance", 'value': "new"},
{'label': "Toms", 'value': "tom"},
{'label': "Other", 'value': "other"},
],
placeholder="Select a Brand",
value = 'Brand',
className='mb-2',
),
html.Div([
dcc.Markdown("Shoe discontinued?"),
dcc.RadioItems(
id='available',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("On Sale?"),
dcc.RadioItems(
id='has_sale',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Purchased Online?"),
dcc.RadioItems(
id='online',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Was shipping free?"),
dcc.RadioItems(
id='free_shipping',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
],
)
column2 = dbc.Col(
[
html.Div([
dcc.Markdown("Does shoe have heel?"),
dcc.RadioItems(
id='has_heel',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does shoe look like a boot?"),
dcc.RadioItems(
id='is_boot',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Is the bottom flat?"),
dcc.RadioItems(
id='is_flat',
options=[
{'label': 'Yes', 'value': '1'},
{'label': 'No', 'value': '0'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Do the toes show?"),
dcc.RadioItems(
id='open_toe',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does the shoe cut off at the ankle?"),
dcc.RadioItems(
id='ankle_height',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does shoe have accessories?(i.e. straps/lace)"),
dcc.RadioItems(
id='accessories',
options=[
{'label': 'Yes', 'value': '0'},
{'label': 'No', 'value': '1'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
html.Div([
dcc.Markdown("Does box/tag have a description?"),
dcc.RadioItems(
id='has_description',
options=[
{'label': 'Yes', 'value': '1'},
{'label': 'No', 'value': '0'}],
value='1',
labelStyle={'display': 'inline-block'}
)
]),
]
)
column3 = dbc.Col(
[
html.H2('Estimated Price of Shoes', className='mb-5'),
html.Div(id='prediction-content', className='lead')
]
)
@app.callback(
Output('prediction-content', 'children'),[
Input('brand', 'value'),
Input('available', 'value'),
Input('has_sale', 'value'),
Input('online', 'value'),
Input('free_shipping', 'value'),
Input('has_heel', 'value'),
Input('is_boot', 'value'),
Input('is_flat', 'value'),
Input('open_toe', 'value'),
Input('ankle_height', 'value'),
Input('accessories', 'value'),
Input('has_description', 'value'),
],
)
layout = dbc.Row([column1, column2, column3])
| [
2,
1846,
3742,
422,
513,
4372,
2151,
12782,
198,
11748,
14470,
198,
11748,
14470,
62,
18769,
26418,
62,
5589,
3906,
355,
288,
15630,
198,
11748,
14470,
62,
7295,
62,
5589,
3906,
355,
288,
535,
198,
11748,
14470,
62,
6494,
62,
5589,
39... | 1.714427 | 4,041 |
import click
from parsec.cli import pass_context, json_loads
from parsec.decorators import custom_exception, list_output
@click.command('get_ftp_files')
@click.option(
"--deleted",
help="Whether to include deleted files",
is_flag=True
)
@pass_context
@custom_exception
@list_output
def cli(ctx, deleted=False):
"""Get a list of local files.
Output:
A list of dicts with details on individual files on FTP
"""
return ctx.gi.ftpfiles.get_ftp_files(deleted=deleted)
| [
11748,
3904,
198,
6738,
1582,
2363,
13,
44506,
1330,
1208,
62,
22866,
11,
33918,
62,
46030,
198,
6738,
1582,
2363,
13,
12501,
273,
2024,
1330,
2183,
62,
1069,
4516,
11,
1351,
62,
22915,
628,
198,
31,
12976,
13,
21812,
10786,
1136,
62,... | 2.79661 | 177 |
import os
from installed_clients.KBaseReportClient import KBaseReport
from installed_clients.DataFileUtilClient import DataFileUtil
| [
11748,
28686,
198,
198,
6738,
6589,
62,
565,
2334,
13,
42,
14881,
19100,
11792,
1330,
14204,
589,
19100,
198,
6738,
6589,
62,
565,
2334,
13,
6601,
8979,
18274,
346,
11792,
1330,
6060,
8979,
18274,
346,
628
] | 3.722222 | 36 |
import re
from pathlib import Path
from tempfile import TemporaryDirectory
from typing import Dict, List
from ..models import RegistryEndpointModel, RepoModel
DOCKER_LOGIN: str = (
"echo ${SCCI_TARGET_REGISTRY_PASSWORD} | "
"docker login ${SCCI_TARGET_REGISTRY_ADDRESS} --username ${SCCI_TARGET_REGISTRY_USER} --password-stdin"
)
CommandList = List[str]
COMMANDS_BUILD: CommandList = [
"git clone ${SCCI_REPO} ${SCCI_CLONE_DIR}",
"cd ${SCCI_CLONE_DIR}",
"ooil compose",
"docker-compose build",
DOCKER_LOGIN,
"docker tag ${SCCI_IMAGE_NAME}:${SCCI_TAG} ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG}",
"docker push ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG}",
]
COMMANDS_TEST_BASE: CommandList = [
"git clone ${SCCI_REPO} ${SCCI_CLONE_DIR}",
"cd ${SCCI_CLONE_DIR}",
DOCKER_LOGIN,
"docker pull ${SCCI_CI_IMAGE_NAME}:${SCCI_TAG}",
# if user defines extra commands those will be append here
]
COMMANDS_PUSH: CommandList = [
DOCKER_LOGIN,
"docker pull ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG}",
"docker tag ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_TEST_IMAGE}:${SCCI_TAG} ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_RELEASE_IMAGE}:${SCCI_TAG}",
"docker push ${SCCI_TARGET_REGISTRY_ADDRESS}/${SCCI_RELEASE_IMAGE}:${SCCI_TAG}",
]
def validate_commands_list(
commands_list: CommandList, env_vars: Dict[str, str]
) -> None:
"""validation is run at runtime before assembling the gitlab ci spec"""
for command in commands_list:
hits = re.findall(r"\$\{(.*?)\}", command)
for hit in hits:
if hit.startswith("SCCI") and hit not in env_vars:
raise ValueError(
f"env var '{hit}'\ndefined in '{command}'\n "
f"not found default injected env vars '{env_vars}'"
)
| [
11748,
302,
198,
6738,
3108,
8019,
1330,
10644,
198,
6738,
20218,
7753,
1330,
46042,
43055,
198,
6738,
19720,
1330,
360,
713,
11,
7343,
198,
198,
6738,
11485,
27530,
1330,
33432,
12915,
4122,
17633,
11,
1432,
78,
17633,
198,
198,
35,
11... | 2.134463 | 885 |
# -*- coding: utf-8 -*-
# ----------------------------------------------------------------------
# noc.core.text legacy wrappers
# flake8: noqa
# ----------------------------------------------------------------------
# Copyright (C) 2007-2019 The NOC Project
# See LICENSE for details
# ----------------------------------------------------------------------
# Python modules
import warnings
# NOC modules
from noc.core.text import (
parse_table,
strip_html_tags,
xml_to_table,
list_to_ranges,
ranges_to_list,
replace_re_group,
indent,
split_alnum,
find_indented,
parse_kv,
str_dict,
quote_safe_path,
to_seconds,
format_table,
clean_number,
safe_shadow,
ch_escape,
tsv_escape,
parse_table_header,
)
from noc.core.deprecations import RemovedInNOC1905Warning
warnings.warn(
"noc.lib.text is deprecated and will be removed in NOC 19.5. "
"Please replace imports to noc.core.text",
RemovedInNOC1905Warning,
stacklevel=2,
)
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
2,
16529,
23031,
198,
2,
299,
420,
13,
7295,
13,
5239,
10655,
7917,
11799,
198,
2,
781,
539,
23,
25,
645,
20402,
198,
2,
16529,
23031,
198,
2,
15069,
357,
34,
8,
4... | 2.902579 | 349 |
# -*- coding: utf-8 -*-
import json
from xml.etree import ElementTree
import utils
from cloud import Cloud
def toDateTime(c):
"""
FIXME
"""
return None
class TypeAbstractPlatformObject(TypeabstractPlatformObjectBase):
"""
Format: attributes
@id: string
@systemId: string
@label: string
@detail: string
@restUrl: string
"""
class TypeDataItemCollection(TypeabstractPlatformObjectBase):
"""
Format:
@dataItem: list of DataItemReference
"""
class TypeAbstractSearchCriteria(TypeabstractPlatformObjectBase):
"""
Format:
attributes
@pageSize: int
Specify the number of entries per page of the results, if not specified defaults to MAX_PAGE_SIZE
@pageNumber: int
Specify which page of the results to return, if not specified defaults to DEFAULT_PAGE.
Using the pageNumber pagination property affects which found object
is returned by the findOne method. For example, pageNumber=1 returns the
first matching found object, while pageNumber=3 returns the 3rd matching
found object, etc.
@sortAscending: boolean
@sortPropertyName: string
"""
class TypeDataItemCriteria(TypeAbstractSearchCriteria):
"""
Format:
@name: string
@alias: string
@modelId: string
Model system id.
@types: list of DataItemType ("ANALOG" / "DIGITAL" / "STRING")
@readOnly: boolean
@visible: boolean
@forwarded: boolean
@historicalOnly: boolean
e.g, "name"
"""
class TypeHistoricalDataItemValueCriteria(TypeAbstractSearchCriteria):
"""
Format:
@assetId: string
@dataItemIds: list
@ item: string
@startDate: dateTime
@endDate: dateTime
"""
class CurrentDataItemValueCriteria(dict):
"""
Format:
@name: string
@alias: string
@assetId: string
Asset system id.
@types: list
@readOnly: boolean
@visible: boolean
@forwarded: boolean
@historicalOnly: boolean
@pageSize: int
@pageNumber: int
Using the pageNumber pagination property affects which found object
is returned by the findOne method. For example, pageNumber=1 returns the
first matching found object, while pageNumber=3 returns the 3rd matching
found object, etc.
@sortAscending: bool
@sortPropertyName: string
e.g, "name"
"""
class Axeda(Cloud):
"""
Axeda platform REST APIs
https://<host>/artisan/apidocs/v1/
https://<host>/artisan/apidocs/v2/
"""
class Auth(Axeda):
"""
API
https://<host>/services/v1/rest/Auth?_wadl
"""
def login(self, username = None, password = None, timeout = 1800):
"""
Creates a new session (sessionId) for the related authenticated user.
Note that when Axeda Platform creates a session for a user, a timeout is
defined for that session. The session will be valid only while the session
is effective; if the session times out, additional calls to the Web services
will return “access defined” errors. Your code should implement error
handling to ensure the session is still valid.
"""
if not username:
username = self.username
if not password:
password = self.password
if not timeout:
timeout = self.timeout
url = self.url_prefix + 'login?principal.username=' + username + \
'&password=' + password + '&sessionTimeout=' + str(timeout)
if self.json:
headers = { 'Accept': 'application/json' }
else:
headers = None
r = utils.get(url, headers = headers, ssl = self.ssl)
if r.status_code != 200:
return False
if self.json:
self.session_id = str(json.loads(r.content)['wsSessionInfo']['sessionId'])
else:
self.session_id = str(utils.parse_xml(r.content, 'sessionId', self.name_space))
if self.session_id:
return True
else:
return False
def logout(self, sessionid = None):
"""
Ends the session for the related user. Invalidates the specified SessionId
such that it can no longer be used.
"""
if not self.session_id and not sessionid:
return False
url = self.url_prefix + 'logout?sessionid='
if sessionid:
url += sessionid
else:
url += self.session_id
r = utils.get(url, ssl = self.ssl)
if r.status_code != 204:
return False
else:
self.session_id = None
return True
class Scripto(Axeda):
"""
API
https://<host>/services/v1/rest/Scripto/?_wadl
"""
class Asset(Axeda):
"""
Asset Object APIs
https://<host>/services/v2/rest/asset?_wadl
"""
def findOne(self, s):
"""
Finds the first Asset that meets the specified criteria.
"""
self.checkParameter((s,))
if not isinstance(s, TypeAssetCriteria):
assert(False)
url = self.setURL("findOne")
headers = self.setHeaders(json = True)
# FIXME: either mode doesn't working with Axeda but Mashery.
if True:
payload = s.toJson()
else:
if True:
payload = \
'''<?xml version="1.0" encoding="UTF-8"?><AssetCriteria xmlns="http://www.axeda.com/services/v2"><modelNumber>''' + c["modelNumber"] + '''</modelNumber><serialNumber>''' + c["serialNumber"] + '''</serialNumber></AssetCriteria>'''
else: # also work with Mashery
payload = \
'''<v2:AssetCriteria sortAscending="true" sortPropertyName="name" xmlns:v2="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"><v2:modelNumber>''' + c["modelNumber"] + '''</v2:modelNumber><v2:serialNumber>''' + c["serialNumber"] + '''</v2:serialNumber></v2:AssetCriteria>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findByIds(self, asset_id, fast = True):
"""
Finds the specified SDK objects based on the ids provided, and returns an
unordered list of found objects. This method will accept only platform ids,
and does not support aternate ids.
"""
self.checkParameter((asset_id,))
if fast:
url = self.setURL("id/" + str(asset_id))
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
else:
url = self.setURL('findByIds')
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"id": asset_id
})
else:
payload = \
'''<IdCollection xmlns="http://www.axeda.com/services/v2">
<id>''' + str(asset_id) + '''</id></IdCollection>'''
r = self.postRequest('findByIds', headers, payload)
if r is not None and r.status_code == 200:
return r.content
else:
return None
class DataItem(Axeda):
"""
API
https://<host>/services/v2/rest/dataItem?_wadl
"""
def create(self, name, model_name, type, alias = ""):
"""
Creates a new Data Item.
@name:
@model:
@type:
DIGITAL
ANALOG
STRING
"""
self.checkParameter((name, model_name, type))
if type not in ("DIGITAL", "ANALOG", "STRING"):
assert(False)
url = self.setURL("")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"name": "valvrave_test_string",
"type": "STRING",
})
else:
payload = \
'''<v2:DataItem xmlns:v2="http://www.axeda.com/services/v2">
<v2:name>''' + name + '''</v2:name>
<ns85:model id="''' + model_name + '''" xsi:type="ns85:ModelReference" xmlns:ns85="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/>
<v2:type>''' + type + '''</v2:type>
</v2:DataItem>'''
r = self.putRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def save(self, name, model_name, type, alias = ""):
"""
Save a new Data Item.
Note:
This same REST call as a create() invokes a Save operation.
@name:
@model:
@type:
DIGITAL
ANALOG
STRING
Return value:
{
"successful":true,
"totalCount":1,
"succeeded":[
{
"ref":"es2015-Galileo-Gen2||valvrave_test_string3",
"id":"427"
}],
"failures":[]
}
"""
self.checkParameter((name, model_name, type,))
if type not in ("DIGITAL", "ANALOG", "STRING"):
assert(False)
url = self.setURL("")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"name": "valvrave_test_string",
"id": "es2015-Galileo-Gen2||valvrave_test_string",
"type": "STRING",
})
else:
payload = \
'''<v2:DataItem xmlns:v2="http://www.axeda.com/services/v2">
<v2:name>''' + name + '''</v2:name>
<ns85:model id="''' + model_name + '''" xsi:type="ns85:ModelReference" xmlns:ns85="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/>
<v2:type>''' + type + '''</v2:type>
</v2:DataItem>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def update(self, dataitem_id, name, model_name, type, alias = ""):
"""
Updates an existing Data Item.
"""
self.checkParameter((dataitem_id, name, model_name, type))
if type not in ("DIGITAL", "ANALOG", "STRING"):
assert(False)
url = self.setURL("id/" + str(dataitem_id))
headers = self.setHeaders(json = False)
if False:
payload = json.dumps(
{
"name": name,
"model": [{
"objType": "ModelReference", "id": model
}],
"type": type
})
else:
payload = \
'''<v2:DataItem xmlns:v2="http://www.axeda.com/services/v2" systemId="''' + str(dataitem_id) + '''">
<v2:name>''' + name + '''</v2:name>
<ns85:model id="''' + model_name + '''" xsi:type="ns85:ModelReference" xmlns:ns85="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"/>
<v2:type>''' + type + '''</v2:type>
<v2:alias>''' + alias + '''</v2:alias>
</v2:DataItem>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return r.content
else:
return None
def delete(self, dataitem_id):
"""
Deletes a data item.
"""
self.checkParameter((dataitem_id,))
url = self.setURL("id/" + str(dataitem_id))
headers = self.setHeaders(json = False)
r = self.deleteRequest(url, headers)
if r is not None and r.status_code == 200:
#return TypeExecutionResult(json.loads(r.content))
return json.loads(r.content)
else:
return None
def find(self, **s):
"""
Finds Data Items based on search criteria.
@criteria: a complete criteria is defined as:
alias: (null)
modelId: (null)
types: ([])
readOnly": (null)
visible": (null)
forwarded: (null)
historicalOnly: (null)
pageSize": (null)
pageNumber: (null)
sortAscending: (null)
sortPropertyName: (null)
"""
url = self.setURL("find")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(s)
else:
payload = None
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findOne(self, **s):
"""
Returns the first Data Item found that meets specified search criteria.
@criteria: See the class Criteria.
Note:
Essentially this API equals to find() with pageNumber=1.
"""
url = self.setURL("findOne")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(s)
else:
payload = None
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findByIds(self, dataitem_ids):
"""
Finds the specified SDK objects based on the ids provided, and returns an
unordered list of found objects. This method will accept only platform ids,
and does not support aternate ids.
"""
self.checkParameter((dataitem_ids,))
url = self.setURL("findByIds")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"id": dataitem_id
})
# FIXME: which one
payload = json.dumps({
"dataItem": [{
"systemId": str(dataitem_id)
}]
})
else:
payload = '''<IdCollection xmlns="http://www.axeda.com/services/v2">'''
for i in dataitem_ids:
payload += '<id>' + str(i) + '</id>'
payload += '</IdCollection>'
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findById(self, dataitem_id):
"""
Finds an Data Item based on its platform identifier.
@dataitem_id
"""
self.checkParameter((dataitem_id,))
url = self.setURL("id/" + str(dataitem_id))
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findByAlternateId(self, name, model_name):
"""
Finds a Data Item based on the alternate identifier.
@alternate_id
The alternate ID of a Data Item takes the following format:
ModelNumber||dataItemName
"""
self.checkParameter((name, model_name))
alternate_id = model_name + "||" + name
url = self.setURL(alternate_id)
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findCurrentValues(self, criteria):
"""
Returns the current values of the specified Data Items.
Note:
For the findCurrentValues method, the assetId input field is required.
"""
self.checkParameter((criteria,))
c = CurrentDataItemValueCriteria(criteria)
url = self.setURL("findCurrentValues")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(c)
else:
payload = \
'''<v2:CurrentDataItemValueCriteria sortAscending="true" sortPropertyName="name" xmlns:v2="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<v2:assetId>''' + str(criteria['asset_id']) + '''</v2:assetId></v2:CurrentDataItemValueCriteria>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def getSourceDataItems(self, id):
"""
Returns the source Data Items associated with the specified source Data Item.
"""
self.checkParameter((id,))
url = self.setURL("id/" + str(id) + "/sourceDataItems")
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def getTargetDataItems(self, id):
"""
Returns the source Data Items associated with the specified target Data Item.
"""
self.checkParameter((id,))
url = self.setURL("id/" + str(id) + "/targetDataItems")
headers = self.setHeaders(json = False)
r = self.getRequest(url, headers)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def findHistoricalValues(self, **p):
"""
Returns the historical values of the specified Data Items.
"""
url = self.setURL("findHistoricalValues")
headers = self.setHeaders(json = True)
if True:
payload = json.dumps(p)
else:
payload = \
'''<v2:CurrentDataItemValueCriteria sortAscending="true" sortPropertyName="name" xmlns:v2="http://www.axeda.com/services/v2" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<v2:assetId>''' + str(criteria['asset_id']) + '''</v2:assetId></v2:CurrentDataItemValueCriteria>'''
r = self.postRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
def bulkCreate(self, c):
"""
Creates a new Data Item.
@name:
@model:
@type:
DIGITAL
ANALOG
STRING
"""
self.checkParameter((c,))
TypeDataItemCollection(c)
url = self.setURL("bulk/create")
headers = self.setHeaders(json = False)
if False:
payload = json.dumps({
"name": name,
"type": type,
"model": {
"id": id,
},
})
else:
payload = \
'''<DataItemCollection xmlns="http://www.axeda.com/services/v2">
<dataItem xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:type="DataItem">
<name>''' + name + '''</name><model id="''' + model + '''"/><type>''' + type + '''</type>
</dataItem></DataItemCollection>'''
r = self.putRequest(url, headers, payload)
if r is not None and r.status_code == 200:
return json.loads(r.content)
else:
return None
| [
2,
532,
9,
12,
19617,
25,
3384,
69,
12,
23,
532,
9,
12,
198,
198,
11748,
33918,
198,
6738,
35555,
13,
316,
631,
1330,
11703,
27660,
198,
11748,
3384,
4487,
198,
6738,
6279,
1330,
10130,
198,
198,
4299,
284,
10430,
7575,
7,
66,
259... | 2.539484 | 6,319 |
import nuke
| [
11748,
299,
4649,
198
] | 3 | 4 |
#!/usr/bin/env python
"""
Unit tests for Jet Stream delphixpy
"""
import sys
import unittest
import js_branch
import js_container
import js_template
from lib.GetSession import GetSession
class JetStreamBranchTests(unittest.TestCase):
"""
Creates, activates, lists destroys JS Branches
Requirements: Parent VDB named jst3, and child VDB named jst3_cld.
Change template_db and database_name to reflect values in your environment.
"""
@classmethod
@classmethod
# Run the test case
if __name__ == "__main__":
unittest.main(buffer=True)
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
198,
37811,
198,
26453,
5254,
329,
19013,
13860,
1619,
746,
844,
9078,
198,
37811,
198,
198,
11748,
25064,
198,
11748,
555,
715,
395,
198,
198,
11748,
44804,
62,
1671,
3702,
198,
11748,
... | 2.994792 | 192 |
from typing import Union
import numpy as np
import pytest
from numpy.testing import assert_array_almost_equal
from docknet.function.activation_function import relu, sigmoid, tanh
sigmoid_test_cases = [
(np.array([-100., 0., 100]), np.array([0., 0.5, 1.])),
(-100., 0.),
(0., 0.5),
(100., 1.),
(np.array([0.]), np.array([0.5])),
]
@pytest.mark.parametrize("x, expected", sigmoid_test_cases)
relu_test_cases = [
(-1., 0.),
(0., 0.),
(1., 1.),
(5., 5.),
(np.array(0.), np.array(0.)),
(np.array([-1., 0., 1., 5.]), np.array([0., 0., 1., 5.]))
]
@pytest.mark.parametrize("x, expected", relu_test_cases)
tanh_test_cases = [
(-100., -1.),
(0., 0.),
(100., 1.),
(np.array(0.), np.array(0.)),
(np.array([-100., 0., 100.]), np.array([-1., 0., 1.]))
]
@pytest.mark.parametrize("x, expected", tanh_test_cases)
| [
6738,
19720,
1330,
4479,
198,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
9288,
198,
6738,
299,
32152,
13,
33407,
1330,
6818,
62,
18747,
62,
28177,
62,
40496,
198,
198,
6738,
23423,
3262,
13,
8818,
13,
48545,
62,
8818,
1330,
... | 2.184539 | 401 |
from __future__ import print_function
import argparse
import keras
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers import Dense, Dropout, Flatten
from keras.layers import Conv2D, MaxPooling2D
from keras import backend as K
import math
import tensorflow as tf
from kungfu import current_cluster_size, current_rank
from kungfu.tensorflow.initializer import BroadcastGlobalVariablesCallback
from kungfu.tensorflow.optimizers import (SynchronousAveragingOptimizer,
SynchronousSGDOptimizer,
PairAveragingOptimizer)
parser = argparse.ArgumentParser(description='Keras MNIST example.')
parser.add_argument('--kf-optimizer',
type=str,
default='sync-sgd',
help='kungfu optimizer')
args = parser.parse_args()
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
K.set_session(tf.Session(config=config))
batch_size = 128
num_classes = 10
# KungFu: adjust number of epochs based on number of GPUs.
epochs = int(math.ceil(4.0 / current_cluster_size()))
# Input image dimensions
img_rows, img_cols = 28, 28
# The data, shuffled and split between train and test sets
(x_train, y_train), (x_test, y_test) = mnist.load_data()
if K.image_data_format() == 'channels_first':
x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)
x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)
input_shape = (1, img_rows, img_cols)
else:
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
input_shape = (img_rows, img_cols, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(
Conv2D(32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(Conv2D(64, (3, 3), activation='relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(128, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes, activation='softmax'))
# KungFu: adjust learning rate based on number of GPUs.
opt = keras.optimizers.Adadelta(1.0 * current_cluster_size())
# KungFu: wrap distributed optimizers.
if args.kf_optimizer == 'sync-sgd':
opt = SynchronousSGDOptimizer(opt, with_keras=True)
elif args.kf_optimizer == 'async-sgd':
opt = PairAveragingOptimizer(opt, with_keras=True)
elif args.kf_optimizer == 'sma':
opt = SynchronousAveragingOptimizer(opt, with_keras=True)
else:
raise RuntimeError('unknown optimizer: %s' % name)
model.compile(loss=keras.losses.categorical_crossentropy,
optimizer=opt,
metrics=['accuracy'])
callbacks = [BroadcastGlobalVariablesCallback(with_keras=True)]
# KungFu: save checkpoints only on worker 0 to prevent other workers from corrupting them.
if current_rank() == 0:
callbacks.append(
keras.callbacks.ModelCheckpoint('./checkpoint-{epoch}.h5'))
model.fit(x_train,
y_train,
batch_size=batch_size,
callbacks=callbacks,
epochs=epochs,
verbose=1 if current_rank() == 0 else 0,
validation_data=(x_test, y_test))
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
| [
6738,
11593,
37443,
834,
1330,
3601,
62,
8818,
198,
11748,
1822,
29572,
198,
11748,
41927,
292,
198,
6738,
41927,
292,
13,
19608,
292,
1039,
1330,
285,
77,
396,
198,
6738,
41927,
292,
13,
27530,
1330,
24604,
1843,
198,
6738,
41927,
292,... | 2.42801 | 1,528 |
try:
from parser.benchmark import Benchmark
except:
from benchmark import Benchmark
import json
import collections
def merge_dict(to_update: collections.defaultdict(list),
other: collections.defaultdict(list)):
"""
Merge benchmarks dictionnaries together
>>> from collections import defaultdict
>>> a = defaultdict(list)
>>> b = defaultdict(list)
>>> a[1] = [1,2]
>>> a[2] = [3,4]
>>> b[1] = [3,4]
>>> b[2] = [1,2]
>>> b[3] = [5,6]
>>> merge_dict(a,b)
defaultdict(<class 'list'>, {1: [1, 2, 3, 4], 2: [3, 4, 1, 2], 3: [5, 6]})
"""
for k,v in other.items():
if k in to_update.keys():
to_update[k] += v
else:
to_update[k] = v
return to_update
| [
28311,
25,
198,
220,
220,
220,
422,
30751,
13,
26968,
4102,
1330,
25187,
4102,
198,
16341,
25,
198,
220,
220,
220,
422,
18335,
1330,
25187,
4102,
198,
198,
11748,
33918,
198,
11748,
17268,
628,
198,
198,
4299,
20121,
62,
11600,
7,
146... | 2.184659 | 352 |
# Functions for generating user input events.
# We would like to use QTest for this purpose, but it seems to be broken.
# See: http://stackoverflow.com/questions/16299779/qt-qgraphicsview-unit-testing-how-to-keep-the-mouse-in-a-pressed-state
from ..Qt import QtCore, QtGui, QT_LIB
| [
198,
2,
40480,
329,
15453,
2836,
5128,
2995,
13,
220,
198,
2,
775,
561,
588,
284,
779,
1195,
14402,
329,
428,
4007,
11,
475,
340,
2331,
284,
307,
5445,
13,
198,
2,
4091,
25,
2638,
1378,
25558,
2502,
11125,
13,
785,
14,
6138,
507,
... | 2.854369 | 103 |
from django.shortcuts import render_to_response
from django.template import RequestContext
| [
6738,
42625,
14208,
13,
19509,
23779,
1330,
8543,
62,
1462,
62,
26209,
198,
6738,
42625,
14208,
13,
28243,
1330,
19390,
21947,
628,
198
] | 4.043478 | 23 |
# Copyright 2016 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module to create or update sample toolchain.bazelrc file."""
import os
from string import Template
from util import get_git_root
TPL = os.path.join(get_git_root(), "release", "toolchain.bazelrc.tpl")
def update_toolchain_bazelrc_file(container_configs_list, bazel_version):
"""Creates/updates toolchain.bazelrc file.
Example toolchain.bazelrc file can be found at
configs/ubuntu16_04_clang/1.0/toolchain.bazelrc.
There is one toolchain.bazelrc file per container per config version.
If the file already exists in this repo, the script will delete it and
generate new one.
Args:
container_configs_list: list of ContainerConfigs, the list of
ContainerConfigs to generate configs for.
bazel_version: string, the version of Bazel used to generate the configs.
"""
for container_configs in container_configs_list:
with open(container_configs.get_toolchain_bazelrc_path(),
"w") as toolchain_bazelrc_file:
# Create or update toolchain.bazelrc file.
with open(TPL, "r") as tpl_file:
tpl = Template(tpl_file.read()).substitute(
CONFIG_VERSION=container_configs.version,
BAZEL_VERSION=bazel_version,
PACKAGE=container_configs.package,
PLATFORM=container_configs.platform_target,
)
toolchain_bazelrc_file.write(tpl)
| [
2,
15069,
1584,
383,
347,
41319,
46665,
13,
1439,
2489,
10395,
13,
198,
2,
198,
2,
49962,
739,
262,
24843,
13789,
11,
10628,
362,
13,
15,
357,
1169,
366,
34156,
15341,
198,
2,
345,
743,
407,
779,
428,
2393,
2845,
287,
11846,
351,
... | 2.959153 | 661 |
from fabric.state import env
from fabtools.require.files import (directory, put)
from fabtools.utils import (run_as_root)
from fabric.api import (cd, run)
#alias fabric's env for simple unit-testing of the rainbow api
fabric_env = env
| [
6738,
9664,
13,
5219,
1330,
17365,
198,
6738,
7843,
31391,
13,
46115,
13,
16624,
1330,
357,
34945,
11,
1234,
8,
198,
6738,
7843,
31391,
13,
26791,
1330,
357,
5143,
62,
292,
62,
15763,
8,
198,
6738,
9664,
13,
15042,
1330,
357,
10210,
... | 3.434783 | 69 |
from django.db.models.signals import post_save
| [
6738,
42625,
14208,
13,
9945,
13,
27530,
13,
12683,
874,
1330,
1281,
62,
21928,
628
] | 3.2 | 15 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.distributions.categorical import Categorical
from torch.distributions.one_hot_categorical import OneHotCategorical
from torch.distributions.relaxed_categorical import RelaxedOneHotCategorical
from torch.distributions.normal import Normal
from torch.distributions.kl import kl_divergence
from models.agent import Agent
import models.encoder as enc
import models.decoder as dec
from models.answerer import FCNet, SimpleClassifier
import models.context_coder as ctx
from misc.vector_quantizer import VectorQuantizerEMA, VectorQuantizer
from misc import utilities as utils
from misc.utilities import gumbel_softmax
import pdb
| [
11748,
28034,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
6738,
28034,
13,
17080,
2455,
507,
13,
66,
2397,
12409,
1330,
327,
2397,
12409,
198,
6738,
28034,
13,
17080,
2455,
507,
... | 3.555556 | 198 |
import rmf_adapter.geometry as geometry
# CIRCLE ======================================================================
# Check construction
| [
11748,
42721,
69,
62,
324,
3429,
13,
469,
15748,
355,
22939,
628,
198,
2,
327,
4663,
29931,
38093,
1421,
28,
198,
2,
6822,
5103,
198
] | 5.72 | 25 |
#!/usr/bin/env python
'''
Created on 23/02/2016
@author: Paul Wang (ppswang@gmail.com)
Generate example data set from CML samples
- need to combine from multiple samples as a way to annonymise patient ID
'''
import os
import HTSeq
import vcf
import sys
# ==============================
# ==============================
BAMDIR = "/data/sacgf/molpath/data/aligned/CML_WES/bam_files"
BAM_FILES = {}
bam_idx = 0
for f in os.listdir(BAMDIR):
if f.endswith(".bam"):
BAM_FILES[bam_idx] = f
bam_idx += 1
# number of variants to get:
VAR_N = 300
VCF_FILE = "1KG_1500_exon_variants_noX.vcf"
invcf = vcf.Reader(filename=VCF_FILE)
SAM1 = "sample1.sam"
SAM2 = "sample2.sam"
HEADER = """@HD VN:1.4 GO:none SO:coordinate
@SQ SN:1 LN:249250621
@SQ SN:2 LN:243199373
@SQ SN:3 LN:198022430
@SQ SN:4 LN:191154276
@SQ SN:5 LN:180915260
@SQ SN:6 LN:171115067
@SQ SN:7 LN:159138663
@SQ SN:8 LN:146364022
@SQ SN:9 LN:141213431
@SQ SN:10 LN:135534747
@SQ SN:11 LN:135006516
@SQ SN:12 LN:133851895
@SQ SN:13 LN:115169878
@SQ SN:14 LN:107349540
@SQ SN:15 LN:102531392
@SQ SN:16 LN:90354753
@SQ SN:17 LN:81195210
@SQ SN:18 LN:78077248
@SQ SN:19 LN:59128983
@SQ SN:20 LN:63025520
@SQ SN:21 LN:48129895
@SQ SN:22 LN:51304566
@SQ SN:X LN:155270560
@SQ SN:Y LN:59373566
@SQ SN:MT LN:16569
@SQ SN:GL000207.1 LN:4262
@SQ SN:GL000226.1 LN:15008
@SQ SN:GL000229.1 LN:19913
@SQ SN:GL000231.1 LN:27386
@SQ SN:GL000210.1 LN:27682
@SQ SN:GL000239.1 LN:33824
@SQ SN:GL000235.1 LN:34474
@SQ SN:GL000201.1 LN:36148
@SQ SN:GL000247.1 LN:36422
@SQ SN:GL000245.1 LN:36651
@SQ SN:GL000197.1 LN:37175
@SQ SN:GL000203.1 LN:37498
@SQ SN:GL000246.1 LN:38154
@SQ SN:GL000249.1 LN:38502
@SQ SN:GL000196.1 LN:38914
@SQ SN:GL000248.1 LN:39786
@SQ SN:GL000244.1 LN:39929
@SQ SN:GL000238.1 LN:39939
@SQ SN:GL000202.1 LN:40103
@SQ SN:GL000234.1 LN:40531
@SQ SN:GL000232.1 LN:40652
@SQ SN:GL000206.1 LN:41001
@SQ SN:GL000240.1 LN:41933
@SQ SN:GL000236.1 LN:41934
@SQ SN:GL000241.1 LN:42152
@SQ SN:GL000243.1 LN:43341
@SQ SN:GL000242.1 LN:43523
@SQ SN:GL000230.1 LN:43691
@SQ SN:GL000237.1 LN:45867
@SQ SN:GL000233.1 LN:45941
@SQ SN:GL000204.1 LN:81310
@SQ SN:GL000198.1 LN:90085
@SQ SN:GL000208.1 LN:92689
@SQ SN:GL000191.1 LN:106433
@SQ SN:GL000227.1 LN:128374
@SQ SN:GL000228.1 LN:129120
@SQ SN:GL000214.1 LN:137718
@SQ SN:GL000221.1 LN:155397
@SQ SN:GL000209.1 LN:159169
@SQ SN:GL000218.1 LN:161147
@SQ SN:GL000220.1 LN:161802
@SQ SN:GL000213.1 LN:164239
@SQ SN:GL000211.1 LN:166566
@SQ SN:GL000199.1 LN:169874
@SQ SN:GL000217.1 LN:172149
@SQ SN:GL000216.1 LN:172294
@SQ SN:GL000215.1 LN:172545
@SQ SN:GL000205.1 LN:174588
@SQ SN:GL000219.1 LN:179198
@SQ SN:GL000224.1 LN:179693
@SQ SN:GL000223.1 LN:180455
@SQ SN:GL000195.1 LN:182896
@SQ SN:GL000212.1 LN:186858
@SQ SN:GL000222.1 LN:186861
@SQ SN:GL000200.1 LN:187035
@SQ SN:GL000193.1 LN:189789
@SQ SN:GL000194.1 LN:191469
@SQ SN:GL000225.1 LN:211173
@SQ SN:GL000192.1 LN:547496
@SQ SN:NC_007605 LN:171823
"""
# write header
sam1_out = open(SAM1, "w")
sam2_out = open(SAM2, "w")
sam1_out.write(HEADER)
sam1_out.write("@RG\tID:sample1\tSM:sample1\tPL:Illumina\n")
sam2_out.write(HEADER)
sam2_out.write("@RG\tID:sample2\tSM:sample2\tPL:Illumina\n")
sample_idx = 0
sample_N = len(BAM_FILES)
var_ct = 0
for var in invcf:
# write SAM1
print var.CHROM, var.POS, var.REF, var.ALT
SAM1_done = False
SAM2_done = False
inbam1 = HTSeq.BAM_Reader(os.path.join(BAMDIR, BAM_FILES[sample_idx]))
sample_idx += 1
sample_idx = sample_idx % sample_N
inbam2 = HTSeq.BAM_Reader(os.path.join(BAMDIR, BAM_FILES[sample_idx]))
sample_idx += 1
sample_idx = sample_idx % sample_N
SAM1_reads = []
SAM2_reads = []
for read in inbam1.fetch(region="%s:%d-%d" % (var.CHROM, var.POS, var.POS)):
if read.pcr_or_optical_duplicate:
continue
if read.proper_pair == False:
continue
SAM1_reads.append(read)
for read in inbam2.fetch(region="%s:%d-%d" % (var.CHROM, var.POS, var.POS)):
if read.pcr_or_optical_duplicate:
continue
if read.proper_pair == False:
continue
SAM2_reads.append(read)
# don't write anything if neither samples are sufficiently covered
if len(SAM1_reads) < 10 or len(SAM2_reads) < 10:
continue
if len(SAM1_reads) > 100 or len(SAM2_reads) > 100:
continue
print "sample 1 writing %d reads" % len(SAM1_reads)
for ct, read in enumerate(SAM1_reads):
# print "%d/%d" % (ct+1, len(SAM1_reads))
sys.stdout.write("\b\b\b\b\b\b\b\b\b\b\b\b%d/%d" % (ct+1, len(SAM1_reads)))
sys.stdout.flush()
# need to replace read group
sam1_out.write(change_RG(read, "sample1") + "\n")
# bits = read.get_sam_line().split("\t")
# for i in range(len(bits)):
# if bits[i].startswith("RG:"):
# bits[i] = "RG:Z:sample1"
# sam1_out.write("\t".join(bits) + "\n")
# get paired mate
mate_pos = read.mate_start
mate_found = False
for read2 in inbam1.fetch(region="%s:%d-%d" % (mate_pos.chrom, mate_pos.pos+1, mate_pos.pos+1)):
if read2.read.name == read.read.name:
mate_found = True
sam1_out.write(change_RG(read2, "sample1") + "\n")
break
if mate_found == False:
print "ERROR: Cannot find mate read"
exit(1)
print "\b\b\b\b\b\b\b\b\b\b\b\bsample 2 writing %d reads" % len(SAM2_reads)
for ct, read in enumerate(SAM2_reads):
# print "%d/%d" % (ct+1, len(SAM2_reads))
sys.stdout.write("\b\b\b\b\b\b\b\b\b\b\b\b%d/%d" % (ct+1, len(SAM2_reads)))
sys.stdout.flush()
# need to replace read group
sam2_out.write(change_RG(read, "sample2") + "\n")
# get paired mate
mate_pos = read.mate_start
mate_found = False
for read2 in inbam2.fetch(region="%s:%d-%d" % (mate_pos.chrom, mate_pos.pos+1, mate_pos.pos+1)):
if read2.read.name == read.read.name:
mate_found = True
sam2_out.write(change_RG(read2, "sample2") + "\n")
break
if mate_found == False:
print "ERROR: Cannot find mate read"
exit(1)
var_ct += 1
print "\b\b\b\b\b\b\b\b\b\b\b\bwrote %d sites" % var_ct
if var_ct >= VAR_N:
break
sam1_out.close()
sam2_out.close()
os.system("samtools view -bS sample1.sam | samtools sort - > sample1.bam")
os.system("samtools index sample1.bam")
os.system("samtools view -bS sample2.sam | samtools sort - > sample2.bam")
os.system("samtools index sample2.bam")
| [
2,
48443,
14629,
14,
8800,
14,
24330,
21015,
198,
7061,
6,
198,
41972,
319,
2242,
14,
2999,
14,
5304,
198,
198,
31,
9800,
25,
3362,
15233,
357,
381,
2032,
648,
31,
14816,
13,
785,
8,
198,
198,
8645,
378,
1672,
1366,
900,
422,
327,... | 1.800217 | 3,689 |
import datetime
import random
import pytest
from flask import url_for
from .create_event_data import flip, fake, create_multiple_events, event_object_factory, \
create_multiple_assets, create_multiple_teams, create_events_assets, create_events_teams, \
create_events_persons, create_events_participants, create_event_images
from .models import Event, EventPerson, EventAsset, EventParticipant, \
EventTeam
from ..assets.models import Asset
from ..images.create_image_data import create_test_images, create_images_events
from ..images.models import Image, ImageEvent
from ..people.models import Person
from ..people.test_people import create_multiple_people
from ..places.models import Country
from ..places.test_places import create_multiple_locations, create_multiple_addresses, create_multiple_areas
from ..teams.models import Team
# ---- Event
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
# ---- Linking tables (asset <-> event)
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
# ---- Linking tables (event <-> team)
@pytest.mark.smoke
@pytest.mark.smoke
# ---- Linking tables (event <-> person)
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
# ---- Linking tables (event <-> participant)
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
@pytest.mark.smoke
| [
11748,
4818,
8079,
198,
11748,
4738,
198,
198,
11748,
12972,
9288,
198,
6738,
42903,
1330,
19016,
62,
1640,
198,
198,
6738,
764,
17953,
62,
15596,
62,
7890,
1330,
14283,
11,
8390,
11,
2251,
62,
48101,
62,
31534,
11,
1785,
62,
15252,
6... | 2.794702 | 604 |
import wave
import numpy as np
import pygame as pg
pg.init()
screen_size = [1000,800]
screen = pg.display.set_mode(screen_size)
wav = wave.open("/home/anjaro/Android/Projects/AnimaWar/android/assets/Sounds/FireBig.wav", mode="r")
nchannels,sampwidth,framerate,nframes,comptype,compname = wav.getparams()
content = wav.readframes(nframes)
types = {
1: np.int8,
2: np.int16,
3: np.int32
}
samples = np.fromstring(content, dtype=types[sampwidth])
#samples = samples[np.where(samples!=0)]
print(len(samples))
isRun = True
while isRun:
ss = []
for e in pg.event.get():
if e.type == pg.QUIT:
isRun = False
elif e.type == pg.KEYDOWN:
if e.key == pg.K_ESCAPE:
isRun = False
screen.fill((250,250,250))
nn = samples.max()
mm = samples.min()
if abs(mm)>abs(nn):
nn = abs(mm)
nn=(screen_size[1])/nn
j = 0
for i in range(len(samples)):
j+=screen_size[0]/len(samples)
ss.append([int(j),int(screen_size[1]//2 + i//nn)])
pg.draw.lines(screen,(30,30,30),0,ss)
pg.display.flip()
isRun = False
while True:
pass | [
11748,
6769,
198,
11748,
299,
32152,
355,
45941,
198,
11748,
12972,
6057,
355,
23241,
198,
6024,
13,
15003,
3419,
198,
9612,
62,
7857,
796,
685,
12825,
11,
7410,
60,
198,
9612,
796,
23241,
13,
13812,
13,
2617,
62,
14171,
7,
9612,
62,
... | 2.105751 | 539 |
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
from nbhelpers import nbhelpers
import boto3
import argparse
batch = boto3.client("batch")
if __name__ == "__main__":
### Command line parser
args, _ = _parse_args()
response = submit_download_data_job(
stack_name = args.stack_name,
job_name = args.job_name,
script = args.script,
cpu = args.cpu,
memory = args.memory,
download_dir = args.download_dir,
download_mode = args.download_mode
)
print(response) | [
2,
15069,
6186,
13,
785,
11,
3457,
13,
393,
663,
29116,
13,
1439,
6923,
33876,
13,
198,
2,
30628,
55,
12,
34156,
12,
33234,
7483,
25,
24843,
12,
17,
13,
15,
198,
198,
6738,
299,
65,
16794,
364,
1330,
299,
65,
16794,
364,
198,
11... | 2.453061 | 245 |
# ----------------------------------------------------------------------------
# Copyright (c) 2019--, gemelli development team.
#
# Distributed under the terms of the Modified BSD License.
#
# The full license is in the file COPYING.txt, distributed with this software.
# ----------------------------------------------------------------------------
from abc import abstractmethod
class _BaseImpute(object):
"""Base class for imputation methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def fit(self):
""" Placeholder for fit this
should be implemetned by sub-method"""
@abstractmethod
def label(self):
""" Placeholder for fit this
should be implemetned by sub-method"""
class _BaseConstruct(object):
"""Base class for transformation/norm methods.
Warning: This class should not be used directly.
Use derived classes instead.
"""
@abstractmethod
def construct(self):
"""
conditional_loading : array-like or list of array-like
The conditional loading vectors
of shape (conditions, r) if there is 1 type
of condition, and a list of such matrices if
there are more than 1 type of condition
feature_loading : array-like
The feature loading vectors
of shape (features, r)
sample_loading : array-like
The sample loading vectors
of shape (samples, r) """
| [
2,
16529,
10541,
198,
2,
15069,
357,
66,
8,
13130,
438,
11,
16840,
23225,
2478,
1074,
13,
198,
2,
198,
2,
4307,
6169,
739,
262,
2846,
286,
262,
40499,
347,
10305,
13789,
13,
198,
2,
198,
2,
383,
1336,
5964,
318,
287,
262,
2393,
... | 3.011742 | 511 |
# Generated by Django 2.1.4 on 2018-12-12 09:27
from django.conf import settings
from django.db import migrations, models
| [
2,
2980,
515,
416,
37770,
362,
13,
16,
13,
19,
319,
2864,
12,
1065,
12,
1065,
7769,
25,
1983,
198,
198,
6738,
42625,
14208,
13,
10414,
1330,
6460,
198,
6738,
42625,
14208,
13,
9945,
1330,
15720,
602,
11,
4981,
628
] | 3.1 | 40 |
import pymel.core as pm
import logging
log = logging.getLogger("ui")
| [
11748,
279,
4948,
417,
13,
7295,
355,
9114,
198,
11748,
18931,
198,
198,
6404,
796,
18931,
13,
1136,
11187,
1362,
7203,
9019,
4943,
198,
220,
220,
220,
220,
220,
220,
220,
220,
198,
220,
220,
220,
220,
220,
220,
220,
220,
220,
220,
... | 1.938776 | 49 |
import sys
import re
import math
from transformers import BertTokenizer, BertModel
import torch.nn as nn
import torch.nn.functional as F
import torch
import numpy as np
import booknlp.common.crf as crf
import booknlp.common.sequence_eval as sequence_eval
from torch.nn import CrossEntropyLoss
| [
11748,
25064,
198,
11748,
302,
198,
11748,
10688,
198,
6738,
6121,
364,
1330,
22108,
30642,
7509,
11,
22108,
17633,
220,
198,
198,
11748,
28034,
13,
20471,
355,
299,
77,
198,
11748,
28034,
13,
20471,
13,
45124,
355,
376,
198,
11748,
280... | 3.402299 | 87 |
from text2chem.chemical_structure import ChemicalStructure
from text2chem.core.default_processing import DefaultProcessing
from text2chem.chemical_data import list_of_elements, name2element, diatomic_molecules
| [
6738,
2420,
17,
15245,
13,
31379,
62,
301,
5620,
1330,
24872,
1273,
5620,
198,
6738,
2420,
17,
15245,
13,
7295,
13,
12286,
62,
36948,
1330,
15161,
18709,
278,
198,
6738,
2420,
17,
15245,
13,
31379,
62,
7890,
1330,
1351,
62,
1659,
62,
... | 3.719298 | 57 |