file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
variables.go
/******************************************************************************* * Copyright 2019 Dell Inc. * Copyright 2020 Intel Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except * in compliance with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed under the License * is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express * or implied. See the License for the specific language governing permissions and limitations under * the License. *******************************************************************************/ package environment import ( "fmt" "os" "reflect" "strconv" "strings" "github.com/edgexfoundry/go-mod-core-contracts/v2/clients/logger" "github.com/edgexfoundry/go-mod-core-contracts/v2/models" "github.com/edgexfoundry/go-mod-configuration/v2/pkg/types" "github.com/pelletier/go-toml" ) const ( bootTimeoutSecondsDefault = 60 bootRetrySecondsDefault = 1 defaultConfDirValue = "./res" envKeyConfigUrl = "EDGEX_CONFIGURATION_PROVIDER" envKeyUseRegistry = "EDGEX_USE_REGISTRY" envKeyStartupDuration = "EDGEX_STARTUP_DURATION" envKeyStartupInterval = "EDGEX_STARTUP_INTERVAL" envConfDir = "EDGEX_CONF_DIR" envProfile = "EDGEX_PROFILE" envFile = "EDGEX_CONFIG_FILE" tomlPathSeparator = "." tomlNameSeparator = "-" envNameSeparator = "_" ) // Variables is receiver that holds Variables variables and encapsulates toml.Tree-based configuration field // overrides. Assumes "_" embedded in Variables variable key separates sub-structs; e.g. foo_bar_baz might refer to // // type foo struct { // bar struct { // baz string // } // } type Variables struct { variables map[string]string lc logger.LoggingClient } // NewVariables constructor reads/stores os.Environ() for use by Variables receiver methods. func NewVariables(lc logger.LoggingClient) *Variables { osEnv := os.Environ() e := &Variables{ variables: make(map[string]string, len(osEnv)), lc: lc, } for _, env := range osEnv { // Can not use Split() on '=' since the value may have an '=' in it, so changed to use Index() index := strings.Index(env, "=") if index == -1 { continue } key := env[:index] value := env[index+1:] e.variables[key] = value } return e } // UseRegistry returns whether the envKeyUseRegistry key is set to true and whether the override was used func (e *Variables) UseRegistry() (bool, bool) { value := os.Getenv(envKeyUseRegistry) if len(value) == 0 { return false, false } logEnvironmentOverride(e.lc, "-r/--registry", envKeyUseRegistry, value) return value == "true", true } // OverrideConfiguration method replaces values in the configuration for matching Variables variable keys. // serviceConfig must be pointer to the service configuration. func (e *Variables)
(serviceConfig interface{}) (int, error) { var overrideCount = 0 contents, err := toml.Marshal(reflect.ValueOf(serviceConfig).Elem().Interface()) if err != nil { return 0, err } configTree, err := toml.LoadBytes(contents) if err != nil { return 0, err } // The toml.Tree API keys() only return to top level keys, rather that paths. // It is also missing a GetPaths so have to spin our own paths := e.buildPaths(configTree.ToMap()) // Now that we have all the paths in the config tree, we need to create map of corresponding override names that // could match override environment variable names. overrideNames := e.buildOverrideNames(paths) for envVar, envValue := range e.variables { path, found := overrideNames[envVar] if !found { continue } oldValue := configTree.Get(path) newValue, err := e.convertToType(oldValue, envValue) if err != nil { return 0, fmt.Errorf("environment value override failed for %s=%s: %s", envVar, envValue, err.Error()) } configTree.Set(path, newValue) overrideCount++ logEnvironmentOverride(e.lc, path, envVar, envValue) } // Put the configuration back into the services configuration struct with the overridden values err = configTree.Unmarshal(serviceConfig) if err != nil { return 0, fmt.Errorf("could not marshal toml configTree to configuration: %s", err.Error()) } return overrideCount, nil } // buildPaths create the path strings for all settings in the Config tree's key map func (e *Variables) buildPaths(keyMap map[string]interface{}) []string { var paths []string for key, item := range keyMap { if reflect.TypeOf(item).Kind() != reflect.Map { paths = append(paths, key) continue } subMap := item.(map[string]interface{}) subPaths := e.buildPaths(subMap) for _, path := range subPaths { paths = append(paths, fmt.Sprintf("%s.%s", key, path)) } } return paths } func (e *Variables) buildOverrideNames(paths []string) map[string]string { names := map[string]string{} for _, path := range paths { names[e.getOverrideNameFor(path)] = path } return names } func (_ *Variables) getOverrideNameFor(path string) string { // "." & "-" are the only special character allowed in TOML path not allowed in environment variable Name override := strings.ReplaceAll(path, tomlPathSeparator, envNameSeparator) override = strings.ReplaceAll(override, tomlNameSeparator, envNameSeparator) override = strings.ToUpper(override) return override } // OverrideConfigProviderInfo overrides the Configuration Provider ServiceConfig values // from an Variables variable value (if it exists). func (e *Variables) OverrideConfigProviderInfo(configProviderInfo types.ServiceConfig) (types.ServiceConfig, error) { url := os.Getenv(envKeyConfigUrl) if len(url) > 0 { logEnvironmentOverride(e.lc, "Configuration Provider Information", envKeyConfigUrl, url) if err := configProviderInfo.PopulateFromUrl(url); err != nil { return types.ServiceConfig{}, err } } return configProviderInfo, nil } // convertToType attempts to convert the string value to the specified type of the old value func (_ *Variables) convertToType(oldValue interface{}, value string) (newValue interface{}, err error) { switch oldValue.(type) { case []string: newValue = parseCommaSeparatedSlice(value) case []interface{}: newValue = parseCommaSeparatedSlice(value) case string: newValue = value case bool: newValue, err = strconv.ParseBool(value) case int: newValue, err = strconv.ParseInt(value, 10, strconv.IntSize) newValue = int(newValue.(int64)) case int8: newValue, err = strconv.ParseInt(value, 10, 8) newValue = int8(newValue.(int64)) case int16: newValue, err = strconv.ParseInt(value, 10, 16) newValue = int16(newValue.(int64)) case int32: newValue, err = strconv.ParseInt(value, 10, 32) newValue = int32(newValue.(int64)) case int64: newValue, err = strconv.ParseInt(value, 10, 64) case uint: newValue, err = strconv.ParseUint(value, 10, strconv.IntSize) newValue = uint(newValue.(uint64)) case uint8: newValue, err = strconv.ParseUint(value, 10, 8) newValue = uint8(newValue.(uint64)) case uint16: newValue, err = strconv.ParseUint(value, 10, 16) newValue = uint16(newValue.(uint64)) case uint32: newValue, err = strconv.ParseUint(value, 10, 32) newValue = uint32(newValue.(uint64)) case uint64: newValue, err = strconv.ParseUint(value, 10, 64) case float32: newValue, err = strconv.ParseFloat(value, 32) newValue = float32(newValue.(float64)) case float64: newValue, err = strconv.ParseFloat(value, 64) default: err = fmt.Errorf( "configuration type of '%s' is not supported for environment variable override", reflect.TypeOf(oldValue).String()) } return newValue, err } // StartupInfo provides the startup timer values which are applied to the StartupTimer created at boot. type StartupInfo struct { Duration int Interval int } // GetStartupInfo gets the Service StartupInfo values from an Variables variable value (if it exists) // or uses the default values. func GetStartupInfo(serviceKey string) StartupInfo { // lc hasn't be created at the time this info is needed so have to create local client. lc := logger.NewClient(serviceKey, models.InfoLog) startup := StartupInfo{ Duration: bootTimeoutSecondsDefault, Interval: bootRetrySecondsDefault, } // Get the startup timer configuration from environment, if provided. value := os.Getenv(envKeyStartupDuration) if len(value) > 0 { logEnvironmentOverride(lc, "Startup Duration", envKeyStartupDuration, value) if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 { startup.Duration = int(n) } } // Get the startup timer interval, if provided. value = os.Getenv(envKeyStartupInterval) if len(value) > 0 { logEnvironmentOverride(lc, "Startup Interval", envKeyStartupInterval, value) if n, err := strconv.ParseInt(value, 10, 0); err == nil && n > 0 { startup.Interval = int(n) } } return startup } // GetConfDir get the config directory value from an Variables variable value (if it exists) // or uses passed in value or default if previous result in blank. func GetConfDir(lc logger.LoggingClient, configDir string) string { envValue := os.Getenv(envConfDir) if len(envValue) > 0 { configDir = envValue logEnvironmentOverride(lc, "-c/-confdir", envConfDir, envValue) } if len(configDir) == 0 { configDir = defaultConfDirValue } return configDir } // GetProfileDir get the profile directory value from an Variables variable value (if it exists) // or uses passed in value or default if previous result in blank. func GetProfileDir(lc logger.LoggingClient, profileDir string) string { envValue := os.Getenv(envProfile) if len(envValue) > 0 { profileDir = envValue logEnvironmentOverride(lc, "-p/-profile", envProfile, envValue) } if len(profileDir) > 0 { profileDir += "/" } return profileDir } // GetConfigFileName gets the configuration filename value from an Variables variable value (if it exists) // or uses passed in value. func GetConfigFileName(lc logger.LoggingClient, configFileName string) string { envValue := os.Getenv(envFile) if len(envValue) > 0 { configFileName = envValue logEnvironmentOverride(lc, "-f/-file", envFile, envValue) } return configFileName } // parseCommaSeparatedSlice converts comma separated list to a string slice func parseCommaSeparatedSlice(value string) (values []interface{}) { // Assumption is environment variable value is comma separated // Whitespace can vary so must be trimmed out result := strings.Split(strings.TrimSpace(value), ",") for _, entry := range result { values = append(values, strings.TrimSpace(entry)) } return values } // logEnvironmentOverride logs that an option or configuration has been override by an environment variable. func logEnvironmentOverride(lc logger.LoggingClient, name string, key string, value string) { lc.Info(fmt.Sprintf("Variables override of '%s' by environment variable: %s=%s", name, key, value)) }
OverrideConfiguration
identifier_name
utils.py
#============================================================================ # UTILS #============================================================================ # Script name: utils.py # Created on: 10/10/2012 # Author: Paula D. Paro Costa # Purpose: Collection of snnipets that may be useful # to someone that likes to play with image processing. # # Updates: # 26/10/2012 - Added 'crop' function # 11/01/2013 - Added 'addcoltofile' function # 18/04/2013 - Added 'visualcheckImageDB' function # 18/04/2013 - Added 'dumpMatrix2File' and 'loadMatrixFromFile' # 07/06/2013 - Added 'applyKernelToPoints' # 03/07/2013 - Added 'cropnscaleImageDB' function. 'crop' function modified # to reflect changes in the library procdb (shapes variable as list # of tuples) # 05/07/2013 - Added functions: 'alignPairShapes', 'RST', 'alignNImages' # 17/07/2013 - Added class 'Eigentextures' # 23/07/2013 - Changed the 'loadMatrixFromFile' function to determine automatically # the size of the matrix if not provided. # # Notice: # Copyright (C) 2013 Paula D. Paro Costa #============================================================================= import numpy as np import numpy.linalg as la import Image,ImageDraw import cv2 import os #================================================ # imdisplay # # Show OpenCV image and waits for ESC key. (CV2) #================================================ def imdisplay(cv2_im): cv2.namedWindow('show') cv2.imshow('show',cv2_im) while True: ch=0xFF & cv2.waitKey() if ch==27: break cv2.imshow('show',cv2_im) cv2.destroyWindow('show') return #================================================ # drawPointsOnImage # # Draw points on a PIL image. # im --> PIL image # x,y --> arrays of point coordinates # radius --> radius of points # zoom --> zoom of displayed image # color --> color of points #================================================ def drawPointsOnImage(im,x,y,radius=5,zoom=1,convert=True,color=(255,255,255)): if convert==True: im=im.convert('RGB') size=im.size size=int(size[0]*zoom),int(size[1]*zoom) #print size x=np.asarray(x) y=np.asarray(y) draw=ImageDraw.Draw(im) for i in range(x.shape[0]): draw.ellipse((x[i]-radius, y[i]-radius, x[i]+radius, y[i]+radius), fill=color) display=im.resize(size) return display #================================================== # centroid # # Calculates the centroid of a shape # sv --> shape vector defined by 'k' points # with coordinates x and y. # sv=[[x0,y0],[x1,y1],...,[xk,yk]] #================================================== def centroid(sv): sv=np.asarray(sv) sv_x=sv[:,0] sv_y=sv[:,1] xc=sum(sv_x)/float(len(sv_x)) yc=sum(sv_y)/float(len(sv_y)) return xc,yc #======================================================================== # alignPairShapes #======================================================================== def alignPairShapes(s1,s2,weights): """ Given two vector shapes, the function applies the minimum squared error to align s2 with s1. The implementation is based on the paper from Cootes et al., "Active Shape Models -- Their Training and Application", 1995 See Appendix A. Key arguments: s1 -- array of tuples representing the first shape vector with n landmarks [(x1,y1),(x2,y2),...,(xn,yn)] s2 -- array of tuples representing the second shape vector with n landmarks weights -- vector of n weights that control how a landmark influences the alignment (greater weight values have greater impact on the alignment). Outputs: The coefficients of the affine Rotation, Scaling and Translation (RST) transform ax -- s.cos(theta) ay -- s.sin(theta) tx -- translation in x ty -- translation in y """ s1=np.asarray(s1) s2=np.asarray(s2) x1k=s1[:,0] y1k=s1[:,1] x2k=s2[:,0] y2k=s2[:,1] X1=sum(x1k*weights) X2=sum(x2k*weights) Y1=sum(y1k*weights) Y2=sum(y2k*weights) Z=sum(weights*(pow(x2k,2)+pow(y2k,2))) W=sum(weights) C1=sum(weights*(x1k*x2k+y1k*y2k)) C2=sum(weights*(y1k*x2k-x1k*y2k)) a=np.asarray([[X2,-Y2,W,0],[Y2,X2,0,W],[Z,0,X2,Y2],[0,Z,-Y2,X2]]) b=np.asarray([X1,Y1,C1,C2]) x=np.linalg.solve(a,b) ax=x[0] ay=x[1] tx=x[2] ty=x[3] return ax,ay,tx,ty #=========================================================== # RST #=========================================================== def RST(s,ax,ay,tx,ty): """ Apply rotation, scale and translation to a shape vector, given the coefficients of the affine transformation matrix. Key arguments: s -- array of tuples representing the shape vector with n landmarks [(x1,y1),(x2,y2),...,(xn,yn)] The coefficients of the affine Rotation, Scaling and Translation (RST) transform: ax -- s.cos(theta) ay -- s.sin(theta) tx -- translation in x ty -- translation in y """ svRST=np.asarray(np.zeros(s.shape)) svRST[:,0]=ax*s[:,0]-ay*s[:,1]+tx svRST[:,1]=ay*s[:,0]+ax*s[:,1]+ty return svRST #======================================================================== # alignNImages #======================================================================== def alignNImages(images,shapes,weights,save_aligned_images=True): """ Aligns a set of images according to their shapes. Together with functions 'alignPairShapes' and 'RST' this function implements the shape alignment algorithm used in the Active Shape Model (ASM). For additional references see: "Active Shape Models", Cootes et al., 1995 "Active Appearance Models", Stegmann, 2000, Chapter 4, Section 4.4.2 Key arguments: images -- array of images filenames (N images) shapes -- array of shapes corresponding to each image weights -- vector of weights that control how a landmark influences the alignment (greater weight values have greater impact on the alignment). save_aligned_images -- if True, aligned images are saved in the same folder with "aligned" prefix. """ shapes=np.asarray(shapes) aligned_shapes=np.asarray(np.zeros(shapes.shape)) aligned_shapes.astype(float) print "Starting alignment of "+str(len(images))+"." # Variables initialization it=0 first=True mean_shape=shapes[0] print mean_shape.shape previous_mean_shape=np.asarray(np.zeros((shapes.shape[1],shapes.shape[2]))) ax=np.asarray(np.zeros(shapes.shape[0])) ay=np.asarray(np.zeros(shapes.shape[0])) tx=np.asarray(np.zeros(shapes.shape[0])) ty=np.asarray(np.zeros(shapes.shape[0])) # The "while" loop checks the convergence of the alignment. # The convergence is checked measuring the difference of previous mean_shape # an the last calculated mean shape. error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print "error = "+str(error) while (error>0.0001): print sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print 'Iteration ',it it=it+1 previous_mean_shape=np.copy(mean_shape) # Normalizing the mean shape to the first shape axm,aym,txm,tym=alignPairShapes(shapes[0],mean_shape,weights) mean_shape=RST(mean_shape,axm,aym,txm,tym) # Align all shapes to the mean shape for i in range(len(images)): #print 'Aligning shape '+str(i) ax[i],ay[i],tx[i],ty[i]=alignPairShapes(mean_shape,shapes[i],weights) aligned_shapes[i]=RST(shapes[i],ax[i],ay[i],tx[i],ty[i]) # Calculate new mean shape mean_shape=np.add.reduce(aligned_shapes)/float(aligned_shapes.shape[0]) #print mean_shape error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print "error = "+str(error) if save_aligned_images==True: for i in range(len(images)): im=cv2.imread(images[i]) dsize=(im.shape[1],im.shape[0]) T=np.asarray([[ax[i],-ay[i],tx[i]],[ay[i],ax[i],ty[i]]]) im=cv2.warpAffine(im,T,dsize) fileName, fileExtension = os.path.splitext(os.path.basename(images[i])) cv2.imwrite(fileName+'_aligned'+fileExtension,im) return mean_shape,aligned_shapes #================================================== # dist # # Calculates the euclidean distance between two points # given their coordinates (x,y) and (u,v) #================================================== def dist(x,y,u,v): dist=np.sqrt(pow((x-u),2)+pow((y-v),2)) return dist #============================================================ # nearest_point # # Given a set of points defined by the sequence of # coordinates in the vectors 'x' e 'y', the function # returns two vectors that determines the nearest point and # the calculated distance between the corresponding point # and the remaining points. # EXAMPLE: # Consider the set of points: # P1=(10,20) # P2=(11,21) # P3=(100,200) # The input to the function will be: # x=([10,11,100]) # y=([20,21,200]) # P2 is the nearest point to P1 and vice-versa. # P2 is the nearest point of P3. # So, the function will return: # indices: [1,0,1] # distances:[1.41,1.41,199.9] # #============================================================ def nearest_point(x,y): x=np.asarray(x) y=np.asarray(y) number_of_points=x.shape[0] ut1,ut2=np.triu_indices(number_of_points,1) #without the main diagonal distances=dist(x[ut1[:]],y[ut1[:]],x[ut2[:]],y[ut2[:]]) d_matrix=np.array(np.zeros((number_of_points,number_of_points))) d_matrix[ut1[:],ut2[:]]=distances[:] d_matrix[ut2[:],ut1[:]]=distances[:] d_matrix[range(number_of_points),range(number_of_points)]=np.max(distances) min_indices=np.array(np.zeros(number_of_points)) min_dist=np.array(np.zeros(number_of_points)) for i in range(number_of_points): min_indices[i]=int(np.argmin(d_matrix[i,:])) min_dist[i]=d_matrix[i,np.uint8(min_indices[i])] print min_indices[i] return min_indices,min_dist #============================================================ # reticulate # # Creates an 1 channel image/array of dimensions h x w pixels # with a reticulate that is spaced s pixels, with lines l # pixels large. (Background is white, net is black) #============================================================ def reticulate(h=302,w=527,s=15,l=2): ret=np.array(np.zeros((h,w))) ret=ret+255 for i in range(l): ret[:,i::s]=0 ret[i::s]=0 return ret #=================================================================== # crop # # im -> image (numpy array) # ox -> column to start crop (column included in cropped image) # oy -> row to start crop (row included in cropped image) # width -> of final image # height -> of final image #=================================================================== def crop(im,ox,oy,width,height): cropped_image=im[oy:(oy+height),ox:(ox+width)] return cropped_image #======================================================================== # addcoltofile # # filename -> the array will be added as a column to this file # a -> array (will be transformed on a 1d array) # sep -> separator string # #======================================================================== def addcoltofile(filename,a,sep): a=np.ravel(np.asarray(a)) try: f=open(filename,'r+') except IOError: try: f=open(filename,'w+r+') except IOError: print "IOError." #return line=f.readline() if line=="": # File is empty for i in range(len(a)): f.write(str(a[i])+'\n') else: EOF=False pointer_to_write=0 pointer_to_read=f.tell() new_line=line.rstrip('\n')+sep+str(a[0])+'\n' #print 'new_line= '+new_line invasion=len(new_line)-len(line) #print 'size of invasion='+str(invasion) #print 'pointer_to_write='+str(pointer_to_write) #print 'pointer_to_read='+str(pointer_to_read) buf="" for i in range(1,len(a)+1): #print EOF if EOF==False: aux=f.read(invasion) buf=buf+aux #print "Invasion read: "+str(aux) aux="" while (aux.find('\n')==-1) and (EOF==False): aux=f.read(1) buf=buf+aux #print 'updated buffer= \n'+buf if aux=="": # Reached EOF EOF=True #print 'EOF' break pointer_to_read=f.tell() f.seek(pointer_to_write) f.write(new_line) pointer_to_write=f.tell() f.seek(pointer_to_read) #print 'pointer_to_read='+str(pointer_to_read) #print 'pointer_to_write='+str(pointer_to_write) if i<(len(a)): x=buf.find('\n') line=buf[0:x+1] #print 'line= '+line new_line=line.rstrip('\n')+sep+str(a[i])+'\n' #print 'new_line= '+new_line invasion=len(new_line) #print 'size of invasion='+str(invasion) buf=buf[x+1::] #print 'buffer without line= \n'+buf else: break f.seek(pointer_to_write) if f.readline()!="": print "Attention!The provided array has less elements than\n" print "the number of lines in the file." f.close() return #======================================================================== # visualCheckImageDB # # imagedb -> CSV filename # imagedbtype -> 0 for a complete database (filenames+labels+shape) # 1 for the simple database (filenames+shape) # zoom -> to scale image on screen # # #======================================================================== def visualcheckImageDB(imagedb,imagedbtype=0,zoom=0.5): import procdb if imagedbtype==0: images,shape,labels=procdb.processImageDB(imagedb) else: images,shape=procdb.processImageDB2(imagedb) shape=np.asarray(shape) print shape for i in range(len(images)): im=Image.open(images[i]) im=drawPointsOnImage(im,shape[i,:,0],shape[i,:,1]) im=im.resize((int(im.size[0]*zoom+0.5),int(im.size[1]*zoom+0.5))) print images[i] im.show() raw_input('Press ENTER to proceed to next image...') return #======================================================================== # dumpMatrix2File # # matrix -> numpy 1D or 2D arrays # filename -> name of the file to be created # # #======================================================================== def dumpMatrix2File(matrix,filename): datafile=open(filename,'w') dim=len(matrix.shape) if dim==1: datafile.write(','.join(map(str,matrix))) elif dim==2: for i in range(matrix.shape[0]): datafile.write(','.join(map(str,matrix[i]))) datafile.write('\n') else: print "The matrix is not an 1D or 2D array. The matrix was not saved." datafile.close() return #======================================================================== # loadMatrixFromFile #======================================================================== def loadMatrixFromFile(filename, rows=0,cols=0,sep=','): """ Loads a numpy matrix from a text file, typically a CSV file. Key arguments: filename -- text rows -- (optional) specifies the number of rows of the matrix cols -- (optional) specifies the number of columns of the matrix sep -- separator of the columns, the default is a comma """ datafile=open(filename,'r') if rows!=0 and cols!=0: matrix=np.asarray(np.zeros((rows,cols))) for i in range(rows): aux=datafile.readline() aux=aux.split(sep) matrix[i]=np.asarray(aux,dtype=np.float64) #print 'Reading line '+str(i)+' of file '+filename return matrix else: aux=datafile.readline() aux=aux.split(sep) matrix=np.asarray([aux],dtype=np.float64) aux=datafile.readline() while aux!="": aux=aux.split(sep) #print aux matrix=np.append(matrix,np.asarray([aux],dtype=np.float64),0) aux=datafile.readline() return matrix #======================================================================== # applyKernelToPoints #======================================================================== def applyKernelToPoints(image,pts,kernel,border_type='BLACK'): """ Applies the kernel (multiply and sum the neighborhood) at the specified points of an image. Returns an array of results for each selected point. The algorithm adds a frame to the original image to calculate the result of applying the kernel to the pixels that are at the borders of the original image. Key arguments: image -- numpy array representing an image pts -- array of points [[x1,y1],[x2,y2],...] kernel -- numpy array with the weighting elements of the sum border_type -- BLACK (default) (added frame filled with pixels=0) WHITE (added frame filled with pixels=255) ANTIALIAS (infinite texture of replicated copies of the original image) """ pts=np.asarray(pts) image=np.asarray(image) image.shape if len(image.shape)>2: grayscale=False shaperesult=(len(pts),image.shape[2]) elif len(image.shape)==1: image=image.reshape(1,image.shape[0]) shaperesult=len(pts) grayscale=True else: grayscale=True # Kernel dimensions - they are integers krows=kernel.shape[0] kcols=kernel.shape[1] if krows%2==0: # Is even ldrows=(krows/2)-1 udrows=krows/2 else: # Is odd ldrows=krows/2 udrows=krows/2 if kcols%2==0: # Is even ldcols=(kcols/2)-1 udcols=kcols/2 else: # Is odd ldcols=kcols/2 udcols=kcols/2 #------------------------------------ # ADD FRAME TO THE ORIGINAL IMAGE #------------------------------------ dummyM=image.shape[0]+krows-1 dummyN=image.shape[1]+kcols-1 if grayscale==True: dummyimage=np.asarray(np.zeros((dummyM,dummyN))) else: dummyimage=np.asarray(np.zeros((dummyM,dummyN,image.shape[2]))) if border_type=="WHITE": dummyimage=dummyimage+255 elif border_type=="ANTIALIAS": # Fills top border dummyimage[0:ldrows,ldcols:ldcols+image.shape[1]]=image[image.shape[0]-ldrows:image.shape[0],:] # Fills bottom border dummyimage[(ldrows+image.shape[0]):,ldcols:(ldcols+image.shape[1])]=image[0:udrows,:] # Fills left border dummyimage[ldrows:ldrows+image.shape[0],0:ldcols]=image[:,image.shape[1]-ldcols:] # Fills right border dummyimage[ldrows:ldrows+image.shape[0],(ldcols+image.shape[1]):]=image[:,0:udcols] # Fills top, left corner dummyimage[0:ldrows,0:ldcols]=image[image.shape[0]-ldrows,image.shape[1]-ldcols] # Fills bottom, left corner dummyimage[(ldrows+image.shape[0]):,0:ldcols]=image[0:udrows,(image.shape[1]-ldcols):] # Fills top, right corner dummyimage[0:ldrows,(ldcols+image.shape[1]):]=image[(image.shape[0]-ldrows):,0:udcols] # Fills bottom, right corner dummyimage[(ldrows+image.shape[0]):,(ldcols+image.shape[1]):]=image[0:udrows,0:udcols] dummyimage[ldrows:ldrows+image.shape[0],ldcols:ldcols+image.shape[1]]=image result=np.asarray(np.zeros(shaperesult)) pts[:,0]=pts[:,0]+ldrows pts[:,1]=pts[:,1]+ldcols for k in range(len(pts)): total=0 for i in range(-ldrows,udrows+1): for j in range(-ldcols,udcols+1): total=total+dummyimage[i+pts[k,0],j+pts[k,1]]*kernel[i+ldrows,j+ldcols] result[k]=total return result #======================================================================== # cropnscaleImageDB #======================================================================== def cropnscaleImageDB(imagedb,newimagedb,ox,oy,width,height,scale,folder="",verbose=False): """ Applies a crop (region of interest) followed by a scale operation on a set of images listed on an image database. The feature points on the image databased are modified to reflect the operations. Key arguments: imagedb -- filename/path of the image database newimagedb -- name of the file that will be created ox -- x origin of the crop operation oy -- y origin of the crop operation width -- width of the region of interest height -- height of the region of interest scale -- used to resize the region of interest folder -- where the images are going to be saved; if not provided, a new directory is created automatically. verbose -- If True provides feedback about the images being processed """ import procdb import os images,shapes,labels=procdb.processImageDB(imagedb) shapes=np.asarray(shapes) #print shapes.shape if verbose==True: print str(len(images))+" images to process." suffix="_"+str(int(width*scale))+"x"+str(int(height*scale)) if folder=="": folder=str(int(width*scale))+"x"+str(int(height*scale)) if not os.path.exists(folder): os.makedirs(folder) else: if not os.path.exists(folder):os.makedirs(folder) newimagedb=open(folder+"/"+newimagedb,'w') for i in range(len(images)): im=cv2.imread(images[i]) im_cropped=crop(im,ox,oy,width,height) newheight=int(height*scale) newwidth=int(width*scale) im_resized=np.asarray(np.zeros((newheight,newwidth))) im_resized=cv2.resize(im_cropped,(newwidth,newheight),im_resized,scale,scale,cv2.INTER_AREA) fileName, fileExtension = os.path.splitext(images[i]) retval=cv2.imwrite(folder+"/"+fileName+suffix+fileExtension,im_resized) if retval==False: print "Problem to save modified image." return False shapes[i,:,0]=shapes[i,:,0]-ox shapes[i,:,1]=shapes[i,:,1]-oy shapes[i]=shapes[i]*scale newshapes='' for j in range(shapes.shape[1]): newshapes=newshapes+',('+str(shapes[i,j,0])+';'+str(shapes[i,j,1])+')' newlabels='' for k in range(len(labels[i])): newlabels=newlabels+','+str(labels[i][k]) newimagedb.write(fileName+suffix+fileExtension+newlabels+newshapes+'\n') if verbose==True: print "Image "+str(i+1)+" successfully processed." newimagedb.close() return True #================================================================================= # Eigentextures #================================================================================= class Eigentextures: ''' This class implements the principal components analysis (PCA) or the eigendecomposition of high dimensional vectors. This class was designed having in mind its use for whole images or parts of images, or simply, textures. If the images contain faces, the algorithm implemented here is equivalent to the Eigenfaces algorithm presented by Matthew Turk and Alex Pentland. The variables names correspond to the variables present in the paper "Eigenfaces for Recognition", Matthew Turk and Alex Pentland, 1991. Key arguments: trainingset - Array of rasterized textures. Each sample corresponds to a row of the array. evr - Explained variance ratio: indicates how much of the overall variance is explained by the corresponding principal component or eigenvector. numpc - This parameter is used to inform how many principal components the function should consider. ''' def __init__(self,trainingset,verbose=False): self.__verbose=verbose self.M=trainingset.shape[0] self.N=trainingset.shape[1] # STEP 1 # Gamma is the matrix which columns are the rasterized pixels of each image of # the training set Gamma=np.transpose(trainingset) # STEP 2 # Compute Psi, that is the average texture over the training set. Psi=Gamma.mean(1) self.Psi=Psi Psi=(Psi.round()).astype(np.int32) Psi=np.reshape(Psi,(Psi.shape[0],1)) # STEP 3 # Subtracts the average face from all samples, creating a zero mean # distribution Phi. self.__Phi=np.asarray(np.zeros(Gamma.shape),dtype=np.int32) self.__Phi=Gamma-Psi del Gamma del trainingset if self.__verbose==True: print "Eigentextures:\tPhi created successfully." # STEP 4 # A minor product of the covariance matrix is calculated. Phi_t=np.transpose(self.__Phi) L=np.dot(Phi_t,self.__Phi) del Phi_t L=L/self.M if self.__verbose==True: print "Eigentextures:\tMinor product generated successfully." # STEP 5 # Calculates the eigenvalues(w) and eigenvectors(v) of # the minor product L. self.__w,self.__v=la.eig(L) del L # STEP 6 # Order the eigenvalues and their corresponding eigenvectors # in the descending order. indices=np.argsort(self.__w) indices=indices[::-1] # descending order self.__w=self.__w[indices] self.__v=self.__v[:,indices] # Calculating the explained variance ratio. self.evr=self.__w/np.sum(self.__w) if self.__verbose==True: print "Eigentextures:\tObject created succesfully." return def getEigentextures(self,numpc="all"): # Calculates the eigenvectors of the original covariance matrix if numpc=='all': self.__u=np.asarray(np.zeros((self.N,self.M))) for col in range(self.M): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u elif numpc>0 and numpc<=self.M: numpc=int(numpc+0.5) self.__u=np.asarray(np.zeros((self.N,numpc))) for col in range(numpc): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u else: print "Eigentextures:\tInvalid value for numpc." return def getEigentexturesEVR(self,variance=1): # Calculates the eigenvectors of the original covariance matrix if variance>=1: self.__u=np.asarray(np.zeros((self.N,self.M))) for col in range(self.M): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u elif variance<1 and variance>0: cols=np.where(np.cumsum(self.evr)<=variance)[0] self.__u=np.asarray(np.zeros((self.N,len(cols)))) for col in cols: if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u else: print "Eigentextures:\t Invalid explained value ratio parameter." return def saveEigentextures2File(self,filename,numpc="all"): u=self.getEigentextures(numpc) dumpMatrix2File(u,filename) return def saveEVR2File(self,filename,variance=1): u=self.getEigentexturesEVR(variance) dumpMatrix2File(u,filename) return #================================================================================= # PCA #================================================================================= class PCA: ''' This class is a simple implementation of principal components analysis (PCA) through the computation of the eigenvectors of the covariance matrix of a training set of samples. For high dimensional vectors, see the class Eigentextures. Key arguments: trainingset - Matrix of samples. Each sample corresponds to a row of the array. evr - Explained variance ratio: indicates how much of the overall variance is explained by the corresponding principal component or eigenvector. numpc - This parameter is used to inform how many principal components the function should consider. ''' def __init__(self,trainingset,verbose=False): self.__verbose=verbose self.N=trainingset.shape[0] # number of samples/trials self.M=trainingset.shape[1] # number of dimensions (size of the sample vector) # STEP 2 # Compute Psi, that is the average vector considering the training set Psi=trainingset.mean(0) self.Psi=Psi # STEP 3 # Subtracts the average from all samples, creating a zero mean # distribution Phi. Phi=np.asarray(np.zeros(trainingset.shape)) Phi=trainingset-Psi if self.__verbose==True: print "PCA:\tPhi created successfully." # STEP 4 # Computes the covariance matrix. # covariance=1/((N-1)*trainingset_t*trainingset) Phi_t=np.transpose(Phi) # M x N matrix covariance=(np.dot(Phi_t,Phi))/(self.N-1) self.cov=covariance self.__w,self.__v=la.eig(covariance) # The covariance is a positive semi-definite matrix # and all of its eigenvalues are positive. # However, the linalg.eig function may return small and # negative eigenvalues. Before calculating the explained variance ratio, # values below 1e-10 are made equal zero. self.__w=np.where(self.__w<=1e-10,0,self.__w) # Putting eigenvectors in the descending order of eigenvalues indices=np.argsort(self.__w) indices=indices[::-1] self.__w=self.__w[indices] self.__v=self.__v[:,indices] # Calculating the explained variance ratio. self.evr=self.__w/np.sum(self.__w) if self.__verbose==True: print "PCA:\tObject created succesfully." return def getPC(self,numpc="all"): # Calculates the eigenvectors of the original covariance matrix if numpc=='all': return self.__v elif numpc>0 and numpc<=self.N: numpc=int(numpc) return self.__v[:,0:numpc] else: print "PCA:\tInvalid value for numpc." return def getEVR(self,variance=1): # Calculates the eigenvectors of the original covariance matrix if variance>=1: return self.__v elif variance<1 and variance>0: cols=np.where(np.cumsum(self.evr)<=variance)[0] return self.__v[:,cols] else: print "PCA:\t Invalid explained variance ratio parameter." return def savePC2File(self,filename,numpc="all"):
def saveEVR2File(self,filename,variance=1): v=self.getEVR(variance) dumpMatrix2File(v,filename) return
v=self.getPC(numpc) dumpMatrix2File(v,filename) return
identifier_body
utils.py
#============================================================================ # UTILS #============================================================================ # Script name: utils.py # Created on: 10/10/2012 # Author: Paula D. Paro Costa # Purpose: Collection of snnipets that may be useful # to someone that likes to play with image processing. # # Updates: # 26/10/2012 - Added 'crop' function # 11/01/2013 - Added 'addcoltofile' function # 18/04/2013 - Added 'visualcheckImageDB' function # 18/04/2013 - Added 'dumpMatrix2File' and 'loadMatrixFromFile' # 07/06/2013 - Added 'applyKernelToPoints' # 03/07/2013 - Added 'cropnscaleImageDB' function. 'crop' function modified # to reflect changes in the library procdb (shapes variable as list # of tuples) # 05/07/2013 - Added functions: 'alignPairShapes', 'RST', 'alignNImages' # 17/07/2013 - Added class 'Eigentextures' # 23/07/2013 - Changed the 'loadMatrixFromFile' function to determine automatically # the size of the matrix if not provided. # # Notice: # Copyright (C) 2013 Paula D. Paro Costa #============================================================================= import numpy as np import numpy.linalg as la import Image,ImageDraw import cv2 import os #================================================ # imdisplay # # Show OpenCV image and waits for ESC key. (CV2) #================================================ def imdisplay(cv2_im): cv2.namedWindow('show') cv2.imshow('show',cv2_im) while True: ch=0xFF & cv2.waitKey() if ch==27: break cv2.imshow('show',cv2_im) cv2.destroyWindow('show') return #================================================ # drawPointsOnImage # # Draw points on a PIL image. # im --> PIL image # x,y --> arrays of point coordinates # radius --> radius of points # zoom --> zoom of displayed image # color --> color of points #================================================ def drawPointsOnImage(im,x,y,radius=5,zoom=1,convert=True,color=(255,255,255)): if convert==True: im=im.convert('RGB') size=im.size size=int(size[0]*zoom),int(size[1]*zoom) #print size x=np.asarray(x) y=np.asarray(y) draw=ImageDraw.Draw(im) for i in range(x.shape[0]): draw.ellipse((x[i]-radius, y[i]-radius, x[i]+radius, y[i]+radius), fill=color) display=im.resize(size) return display #================================================== # centroid # # Calculates the centroid of a shape # sv --> shape vector defined by 'k' points # with coordinates x and y. # sv=[[x0,y0],[x1,y1],...,[xk,yk]] #================================================== def centroid(sv): sv=np.asarray(sv) sv_x=sv[:,0] sv_y=sv[:,1] xc=sum(sv_x)/float(len(sv_x)) yc=sum(sv_y)/float(len(sv_y)) return xc,yc #======================================================================== # alignPairShapes #======================================================================== def alignPairShapes(s1,s2,weights): """ Given two vector shapes, the function applies the minimum squared error to align s2 with s1. The implementation is based on the paper from Cootes et al., "Active Shape Models -- Their Training and Application", 1995 See Appendix A. Key arguments: s1 -- array of tuples representing the first shape vector with n landmarks [(x1,y1),(x2,y2),...,(xn,yn)] s2 -- array of tuples representing the second shape vector with n landmarks weights -- vector of n weights that control how a landmark influences the alignment (greater weight values have greater impact on the alignment). Outputs: The coefficients of the affine Rotation, Scaling and Translation (RST) transform ax -- s.cos(theta) ay -- s.sin(theta) tx -- translation in x ty -- translation in y """ s1=np.asarray(s1) s2=np.asarray(s2) x1k=s1[:,0] y1k=s1[:,1] x2k=s2[:,0] y2k=s2[:,1] X1=sum(x1k*weights) X2=sum(x2k*weights) Y1=sum(y1k*weights) Y2=sum(y2k*weights) Z=sum(weights*(pow(x2k,2)+pow(y2k,2))) W=sum(weights) C1=sum(weights*(x1k*x2k+y1k*y2k)) C2=sum(weights*(y1k*x2k-x1k*y2k)) a=np.asarray([[X2,-Y2,W,0],[Y2,X2,0,W],[Z,0,X2,Y2],[0,Z,-Y2,X2]]) b=np.asarray([X1,Y1,C1,C2]) x=np.linalg.solve(a,b) ax=x[0] ay=x[1] tx=x[2] ty=x[3] return ax,ay,tx,ty #=========================================================== # RST #=========================================================== def RST(s,ax,ay,tx,ty): """ Apply rotation, scale and translation to a shape vector, given the coefficients of the affine transformation matrix. Key arguments: s -- array of tuples representing the shape vector with n landmarks [(x1,y1),(x2,y2),...,(xn,yn)] The coefficients of the affine Rotation, Scaling and Translation (RST) transform: ax -- s.cos(theta) ay -- s.sin(theta) tx -- translation in x ty -- translation in y """ svRST=np.asarray(np.zeros(s.shape)) svRST[:,0]=ax*s[:,0]-ay*s[:,1]+tx svRST[:,1]=ay*s[:,0]+ax*s[:,1]+ty return svRST #======================================================================== # alignNImages #======================================================================== def alignNImages(images,shapes,weights,save_aligned_images=True): """ Aligns a set of images according to their shapes. Together with functions 'alignPairShapes' and 'RST' this function implements the shape alignment algorithm used in the Active Shape Model (ASM). For additional references see: "Active Shape Models", Cootes et al., 1995 "Active Appearance Models", Stegmann, 2000, Chapter 4, Section 4.4.2 Key arguments: images -- array of images filenames (N images) shapes -- array of shapes corresponding to each image weights -- vector of weights that control how a landmark influences the alignment (greater weight values have greater impact on the alignment). save_aligned_images -- if True, aligned images are saved in the same folder with "aligned" prefix. """ shapes=np.asarray(shapes) aligned_shapes=np.asarray(np.zeros(shapes.shape)) aligned_shapes.astype(float) print "Starting alignment of "+str(len(images))+"." # Variables initialization it=0 first=True mean_shape=shapes[0] print mean_shape.shape previous_mean_shape=np.asarray(np.zeros((shapes.shape[1],shapes.shape[2]))) ax=np.asarray(np.zeros(shapes.shape[0])) ay=np.asarray(np.zeros(shapes.shape[0])) tx=np.asarray(np.zeros(shapes.shape[0])) ty=np.asarray(np.zeros(shapes.shape[0])) # The "while" loop checks the convergence of the alignment. # The convergence is checked measuring the difference of previous mean_shape # an the last calculated mean shape. error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print "error = "+str(error) while (error>0.0001): print sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print 'Iteration ',it it=it+1 previous_mean_shape=np.copy(mean_shape) # Normalizing the mean shape to the first shape axm,aym,txm,tym=alignPairShapes(shapes[0],mean_shape,weights) mean_shape=RST(mean_shape,axm,aym,txm,tym) # Align all shapes to the mean shape for i in range(len(images)): #print 'Aligning shape '+str(i) ax[i],ay[i],tx[i],ty[i]=alignPairShapes(mean_shape,shapes[i],weights) aligned_shapes[i]=RST(shapes[i],ax[i],ay[i],tx[i],ty[i]) # Calculate new mean shape mean_shape=np.add.reduce(aligned_shapes)/float(aligned_shapes.shape[0]) #print mean_shape error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print "error = "+str(error) if save_aligned_images==True: for i in range(len(images)): im=cv2.imread(images[i]) dsize=(im.shape[1],im.shape[0]) T=np.asarray([[ax[i],-ay[i],tx[i]],[ay[i],ax[i],ty[i]]]) im=cv2.warpAffine(im,T,dsize) fileName, fileExtension = os.path.splitext(os.path.basename(images[i])) cv2.imwrite(fileName+'_aligned'+fileExtension,im) return mean_shape,aligned_shapes #================================================== # dist # # Calculates the euclidean distance between two points # given their coordinates (x,y) and (u,v) #================================================== def dist(x,y,u,v): dist=np.sqrt(pow((x-u),2)+pow((y-v),2)) return dist #============================================================ # nearest_point # # Given a set of points defined by the sequence of # coordinates in the vectors 'x' e 'y', the function # returns two vectors that determines the nearest point and # the calculated distance between the corresponding point # and the remaining points. # EXAMPLE: # Consider the set of points: # P1=(10,20) # P2=(11,21) # P3=(100,200) # The input to the function will be: # x=([10,11,100]) # y=([20,21,200]) # P2 is the nearest point to P1 and vice-versa. # P2 is the nearest point of P3. # So, the function will return: # indices: [1,0,1] # distances:[1.41,1.41,199.9] # #============================================================ def nearest_point(x,y): x=np.asarray(x) y=np.asarray(y) number_of_points=x.shape[0] ut1,ut2=np.triu_indices(number_of_points,1) #without the main diagonal distances=dist(x[ut1[:]],y[ut1[:]],x[ut2[:]],y[ut2[:]]) d_matrix=np.array(np.zeros((number_of_points,number_of_points))) d_matrix[ut1[:],ut2[:]]=distances[:] d_matrix[ut2[:],ut1[:]]=distances[:] d_matrix[range(number_of_points),range(number_of_points)]=np.max(distances) min_indices=np.array(np.zeros(number_of_points)) min_dist=np.array(np.zeros(number_of_points)) for i in range(number_of_points): min_indices[i]=int(np.argmin(d_matrix[i,:])) min_dist[i]=d_matrix[i,np.uint8(min_indices[i])] print min_indices[i] return min_indices,min_dist #============================================================ # reticulate # # Creates an 1 channel image/array of dimensions h x w pixels # with a reticulate that is spaced s pixels, with lines l # pixels large. (Background is white, net is black) #============================================================ def reticulate(h=302,w=527,s=15,l=2): ret=np.array(np.zeros((h,w))) ret=ret+255 for i in range(l): ret[:,i::s]=0 ret[i::s]=0 return ret #=================================================================== # crop # # im -> image (numpy array) # ox -> column to start crop (column included in cropped image) # oy -> row to start crop (row included in cropped image) # width -> of final image # height -> of final image #=================================================================== def crop(im,ox,oy,width,height): cropped_image=im[oy:(oy+height),ox:(ox+width)] return cropped_image #======================================================================== # addcoltofile # # filename -> the array will be added as a column to this file # a -> array (will be transformed on a 1d array) # sep -> separator string # #======================================================================== def addcoltofile(filename,a,sep): a=np.ravel(np.asarray(a)) try: f=open(filename,'r+') except IOError: try: f=open(filename,'w+r+') except IOError: print "IOError." #return line=f.readline() if line=="": # File is empty for i in range(len(a)): f.write(str(a[i])+'\n') else: EOF=False pointer_to_write=0 pointer_to_read=f.tell() new_line=line.rstrip('\n')+sep+str(a[0])+'\n' #print 'new_line= '+new_line invasion=len(new_line)-len(line) #print 'size of invasion='+str(invasion) #print 'pointer_to_write='+str(pointer_to_write) #print 'pointer_to_read='+str(pointer_to_read) buf="" for i in range(1,len(a)+1): #print EOF if EOF==False: aux=f.read(invasion) buf=buf+aux #print "Invasion read: "+str(aux) aux="" while (aux.find('\n')==-1) and (EOF==False): aux=f.read(1) buf=buf+aux #print 'updated buffer= \n'+buf if aux=="": # Reached EOF EOF=True #print 'EOF' break pointer_to_read=f.tell() f.seek(pointer_to_write) f.write(new_line) pointer_to_write=f.tell() f.seek(pointer_to_read) #print 'pointer_to_read='+str(pointer_to_read) #print 'pointer_to_write='+str(pointer_to_write) if i<(len(a)): x=buf.find('\n') line=buf[0:x+1] #print 'line= '+line new_line=line.rstrip('\n')+sep+str(a[i])+'\n' #print 'new_line= '+new_line invasion=len(new_line) #print 'size of invasion='+str(invasion) buf=buf[x+1::] #print 'buffer without line= \n'+buf else: break f.seek(pointer_to_write) if f.readline()!="": print "Attention!The provided array has less elements than\n" print "the number of lines in the file." f.close() return #======================================================================== # visualCheckImageDB # # imagedb -> CSV filename # imagedbtype -> 0 for a complete database (filenames+labels+shape) # 1 for the simple database (filenames+shape) # zoom -> to scale image on screen # # #======================================================================== def visualcheckImageDB(imagedb,imagedbtype=0,zoom=0.5): import procdb if imagedbtype==0: images,shape,labels=procdb.processImageDB(imagedb) else: images,shape=procdb.processImageDB2(imagedb) shape=np.asarray(shape) print shape for i in range(len(images)): im=Image.open(images[i]) im=drawPointsOnImage(im,shape[i,:,0],shape[i,:,1]) im=im.resize((int(im.size[0]*zoom+0.5),int(im.size[1]*zoom+0.5))) print images[i] im.show() raw_input('Press ENTER to proceed to next image...') return #======================================================================== # dumpMatrix2File # # matrix -> numpy 1D or 2D arrays # filename -> name of the file to be created # # #======================================================================== def dumpMatrix2File(matrix,filename): datafile=open(filename,'w') dim=len(matrix.shape) if dim==1: datafile.write(','.join(map(str,matrix))) elif dim==2: for i in range(matrix.shape[0]): datafile.write(','.join(map(str,matrix[i]))) datafile.write('\n') else: print "The matrix is not an 1D or 2D array. The matrix was not saved." datafile.close() return #======================================================================== # loadMatrixFromFile #======================================================================== def loadMatrixFromFile(filename, rows=0,cols=0,sep=','): """ Loads a numpy matrix from a text file, typically a CSV file. Key arguments: filename -- text rows -- (optional) specifies the number of rows of the matrix cols -- (optional) specifies the number of columns of the matrix sep -- separator of the columns, the default is a comma """ datafile=open(filename,'r') if rows!=0 and cols!=0: matrix=np.asarray(np.zeros((rows,cols))) for i in range(rows): aux=datafile.readline() aux=aux.split(sep) matrix[i]=np.asarray(aux,dtype=np.float64) #print 'Reading line '+str(i)+' of file '+filename return matrix else: aux=datafile.readline() aux=aux.split(sep) matrix=np.asarray([aux],dtype=np.float64) aux=datafile.readline() while aux!="": aux=aux.split(sep) #print aux matrix=np.append(matrix,np.asarray([aux],dtype=np.float64),0) aux=datafile.readline() return matrix #======================================================================== # applyKernelToPoints #======================================================================== def applyKernelToPoints(image,pts,kernel,border_type='BLACK'): """ Applies the kernel (multiply and sum the neighborhood) at the specified points of an image. Returns an array of results for each selected point. The algorithm adds a frame to the original image to calculate the result of applying the kernel to the pixels that are at the borders of the original image. Key arguments: image -- numpy array representing an image pts -- array of points [[x1,y1],[x2,y2],...] kernel -- numpy array with the weighting elements of the sum border_type -- BLACK (default) (added frame filled with pixels=0) WHITE (added frame filled with pixels=255) ANTIALIAS (infinite texture of replicated copies of the original image) """ pts=np.asarray(pts) image=np.asarray(image) image.shape if len(image.shape)>2: grayscale=False shaperesult=(len(pts),image.shape[2]) elif len(image.shape)==1: image=image.reshape(1,image.shape[0]) shaperesult=len(pts) grayscale=True else: grayscale=True # Kernel dimensions - they are integers krows=kernel.shape[0] kcols=kernel.shape[1] if krows%2==0: # Is even ldrows=(krows/2)-1 udrows=krows/2 else: # Is odd ldrows=krows/2 udrows=krows/2 if kcols%2==0: # Is even ldcols=(kcols/2)-1 udcols=kcols/2 else: # Is odd ldcols=kcols/2 udcols=kcols/2 #------------------------------------ # ADD FRAME TO THE ORIGINAL IMAGE #------------------------------------ dummyM=image.shape[0]+krows-1 dummyN=image.shape[1]+kcols-1 if grayscale==True: dummyimage=np.asarray(np.zeros((dummyM,dummyN))) else: dummyimage=np.asarray(np.zeros((dummyM,dummyN,image.shape[2]))) if border_type=="WHITE": dummyimage=dummyimage+255 elif border_type=="ANTIALIAS": # Fills top border dummyimage[0:ldrows,ldcols:ldcols+image.shape[1]]=image[image.shape[0]-ldrows:image.shape[0],:] # Fills bottom border dummyimage[(ldrows+image.shape[0]):,ldcols:(ldcols+image.shape[1])]=image[0:udrows,:] # Fills left border dummyimage[ldrows:ldrows+image.shape[0],0:ldcols]=image[:,image.shape[1]-ldcols:] # Fills right border dummyimage[ldrows:ldrows+image.shape[0],(ldcols+image.shape[1]):]=image[:,0:udcols] # Fills top, left corner dummyimage[0:ldrows,0:ldcols]=image[image.shape[0]-ldrows,image.shape[1]-ldcols] # Fills bottom, left corner dummyimage[(ldrows+image.shape[0]):,0:ldcols]=image[0:udrows,(image.shape[1]-ldcols):] # Fills top, right corner dummyimage[0:ldrows,(ldcols+image.shape[1]):]=image[(image.shape[0]-ldrows):,0:udcols] # Fills bottom, right corner dummyimage[(ldrows+image.shape[0]):,(ldcols+image.shape[1]):]=image[0:udrows,0:udcols] dummyimage[ldrows:ldrows+image.shape[0],ldcols:ldcols+image.shape[1]]=image result=np.asarray(np.zeros(shaperesult)) pts[:,0]=pts[:,0]+ldrows pts[:,1]=pts[:,1]+ldcols for k in range(len(pts)): total=0 for i in range(-ldrows,udrows+1): for j in range(-ldcols,udcols+1): total=total+dummyimage[i+pts[k,0],j+pts[k,1]]*kernel[i+ldrows,j+ldcols] result[k]=total return result #======================================================================== # cropnscaleImageDB #======================================================================== def cropnscaleImageDB(imagedb,newimagedb,ox,oy,width,height,scale,folder="",verbose=False): """ Applies a crop (region of interest) followed by a scale operation on a set of images listed on an image database. The feature points on the image databased are modified to reflect the operations. Key arguments: imagedb -- filename/path of the image database newimagedb -- name of the file that will be created ox -- x origin of the crop operation oy -- y origin of the crop operation width -- width of the region of interest height -- height of the region of interest scale -- used to resize the region of interest folder -- where the images are going to be saved; if not provided, a new directory is created automatically. verbose -- If True provides feedback about the images being processed """ import procdb import os images,shapes,labels=procdb.processImageDB(imagedb) shapes=np.asarray(shapes) #print shapes.shape if verbose==True: print str(len(images))+" images to process." suffix="_"+str(int(width*scale))+"x"+str(int(height*scale)) if folder=="": folder=str(int(width*scale))+"x"+str(int(height*scale)) if not os.path.exists(folder): os.makedirs(folder) else: if not os.path.exists(folder):os.makedirs(folder) newimagedb=open(folder+"/"+newimagedb,'w') for i in range(len(images)): im=cv2.imread(images[i]) im_cropped=crop(im,ox,oy,width,height) newheight=int(height*scale) newwidth=int(width*scale) im_resized=np.asarray(np.zeros((newheight,newwidth))) im_resized=cv2.resize(im_cropped,(newwidth,newheight),im_resized,scale,scale,cv2.INTER_AREA) fileName, fileExtension = os.path.splitext(images[i]) retval=cv2.imwrite(folder+"/"+fileName+suffix+fileExtension,im_resized) if retval==False: print "Problem to save modified image." return False shapes[i,:,0]=shapes[i,:,0]-ox shapes[i,:,1]=shapes[i,:,1]-oy shapes[i]=shapes[i]*scale newshapes='' for j in range(shapes.shape[1]): newshapes=newshapes+',('+str(shapes[i,j,0])+';'+str(shapes[i,j,1])+')' newlabels='' for k in range(len(labels[i])): newlabels=newlabels+','+str(labels[i][k]) newimagedb.write(fileName+suffix+fileExtension+newlabels+newshapes+'\n') if verbose==True: print "Image "+str(i+1)+" successfully processed." newimagedb.close() return True #================================================================================= # Eigentextures #================================================================================= class Eigentextures: ''' This class implements the principal components analysis (PCA) or the eigendecomposition of high dimensional vectors. This class was designed having in mind its use for whole images or parts of images, or simply, textures. If the images contain faces, the algorithm implemented here is equivalent to the Eigenfaces algorithm presented by Matthew Turk and Alex Pentland. The variables names correspond to the variables present in the paper "Eigenfaces for Recognition", Matthew Turk and Alex Pentland, 1991. Key arguments: trainingset - Array of rasterized textures. Each sample corresponds to a row of the array. evr - Explained variance ratio: indicates how much of the overall variance is explained by the corresponding principal component or eigenvector. numpc - This parameter is used to inform how many principal components the function should consider. ''' def __init__(self,trainingset,verbose=False): self.__verbose=verbose self.M=trainingset.shape[0] self.N=trainingset.shape[1] # STEP 1 # Gamma is the matrix which columns are the rasterized pixels of each image of # the training set Gamma=np.transpose(trainingset) # STEP 2 # Compute Psi, that is the average texture over the training set. Psi=Gamma.mean(1) self.Psi=Psi Psi=(Psi.round()).astype(np.int32) Psi=np.reshape(Psi,(Psi.shape[0],1)) # STEP 3 # Subtracts the average face from all samples, creating a zero mean # distribution Phi. self.__Phi=np.asarray(np.zeros(Gamma.shape),dtype=np.int32) self.__Phi=Gamma-Psi del Gamma del trainingset if self.__verbose==True: print "Eigentextures:\tPhi created successfully." # STEP 4 # A minor product of the covariance matrix is calculated. Phi_t=np.transpose(self.__Phi) L=np.dot(Phi_t,self.__Phi) del Phi_t L=L/self.M if self.__verbose==True: print "Eigentextures:\tMinor product generated successfully." # STEP 5 # Calculates the eigenvalues(w) and eigenvectors(v) of # the minor product L. self.__w,self.__v=la.eig(L) del L # STEP 6 # Order the eigenvalues and their corresponding eigenvectors # in the descending order. indices=np.argsort(self.__w) indices=indices[::-1] # descending order self.__w=self.__w[indices] self.__v=self.__v[:,indices] # Calculating the explained variance ratio. self.evr=self.__w/np.sum(self.__w) if self.__verbose==True: print "Eigentextures:\tObject created succesfully." return def getEigentextures(self,numpc="all"): # Calculates the eigenvectors of the original covariance matrix if numpc=='all': self.__u=np.asarray(np.zeros((self.N,self.M))) for col in range(self.M): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u elif numpc>0 and numpc<=self.M: numpc=int(numpc+0.5) self.__u=np.asarray(np.zeros((self.N,numpc))) for col in range(numpc): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u else: print "Eigentextures:\tInvalid value for numpc." return def
(self,variance=1): # Calculates the eigenvectors of the original covariance matrix if variance>=1: self.__u=np.asarray(np.zeros((self.N,self.M))) for col in range(self.M): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u elif variance<1 and variance>0: cols=np.where(np.cumsum(self.evr)<=variance)[0] self.__u=np.asarray(np.zeros((self.N,len(cols)))) for col in cols: if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u else: print "Eigentextures:\t Invalid explained value ratio parameter." return def saveEigentextures2File(self,filename,numpc="all"): u=self.getEigentextures(numpc) dumpMatrix2File(u,filename) return def saveEVR2File(self,filename,variance=1): u=self.getEigentexturesEVR(variance) dumpMatrix2File(u,filename) return #================================================================================= # PCA #================================================================================= class PCA: ''' This class is a simple implementation of principal components analysis (PCA) through the computation of the eigenvectors of the covariance matrix of a training set of samples. For high dimensional vectors, see the class Eigentextures. Key arguments: trainingset - Matrix of samples. Each sample corresponds to a row of the array. evr - Explained variance ratio: indicates how much of the overall variance is explained by the corresponding principal component or eigenvector. numpc - This parameter is used to inform how many principal components the function should consider. ''' def __init__(self,trainingset,verbose=False): self.__verbose=verbose self.N=trainingset.shape[0] # number of samples/trials self.M=trainingset.shape[1] # number of dimensions (size of the sample vector) # STEP 2 # Compute Psi, that is the average vector considering the training set Psi=trainingset.mean(0) self.Psi=Psi # STEP 3 # Subtracts the average from all samples, creating a zero mean # distribution Phi. Phi=np.asarray(np.zeros(trainingset.shape)) Phi=trainingset-Psi if self.__verbose==True: print "PCA:\tPhi created successfully." # STEP 4 # Computes the covariance matrix. # covariance=1/((N-1)*trainingset_t*trainingset) Phi_t=np.transpose(Phi) # M x N matrix covariance=(np.dot(Phi_t,Phi))/(self.N-1) self.cov=covariance self.__w,self.__v=la.eig(covariance) # The covariance is a positive semi-definite matrix # and all of its eigenvalues are positive. # However, the linalg.eig function may return small and # negative eigenvalues. Before calculating the explained variance ratio, # values below 1e-10 are made equal zero. self.__w=np.where(self.__w<=1e-10,0,self.__w) # Putting eigenvectors in the descending order of eigenvalues indices=np.argsort(self.__w) indices=indices[::-1] self.__w=self.__w[indices] self.__v=self.__v[:,indices] # Calculating the explained variance ratio. self.evr=self.__w/np.sum(self.__w) if self.__verbose==True: print "PCA:\tObject created succesfully." return def getPC(self,numpc="all"): # Calculates the eigenvectors of the original covariance matrix if numpc=='all': return self.__v elif numpc>0 and numpc<=self.N: numpc=int(numpc) return self.__v[:,0:numpc] else: print "PCA:\tInvalid value for numpc." return def getEVR(self,variance=1): # Calculates the eigenvectors of the original covariance matrix if variance>=1: return self.__v elif variance<1 and variance>0: cols=np.where(np.cumsum(self.evr)<=variance)[0] return self.__v[:,cols] else: print "PCA:\t Invalid explained variance ratio parameter." return def savePC2File(self,filename,numpc="all"): v=self.getPC(numpc) dumpMatrix2File(v,filename) return def saveEVR2File(self,filename,variance=1): v=self.getEVR(variance) dumpMatrix2File(v,filename) return
getEigentexturesEVR
identifier_name
utils.py
#============================================================================ # UTILS #============================================================================ # Script name: utils.py # Created on: 10/10/2012 # Author: Paula D. Paro Costa # Purpose: Collection of snnipets that may be useful # to someone that likes to play with image processing. # # Updates: # 26/10/2012 - Added 'crop' function # 11/01/2013 - Added 'addcoltofile' function # 18/04/2013 - Added 'visualcheckImageDB' function # 18/04/2013 - Added 'dumpMatrix2File' and 'loadMatrixFromFile' # 07/06/2013 - Added 'applyKernelToPoints' # 03/07/2013 - Added 'cropnscaleImageDB' function. 'crop' function modified # to reflect changes in the library procdb (shapes variable as list # of tuples) # 05/07/2013 - Added functions: 'alignPairShapes', 'RST', 'alignNImages' # 17/07/2013 - Added class 'Eigentextures' # 23/07/2013 - Changed the 'loadMatrixFromFile' function to determine automatically # the size of the matrix if not provided. # # Notice: # Copyright (C) 2013 Paula D. Paro Costa #============================================================================= import numpy as np import numpy.linalg as la import Image,ImageDraw import cv2 import os #================================================ # imdisplay # # Show OpenCV image and waits for ESC key. (CV2) #================================================ def imdisplay(cv2_im): cv2.namedWindow('show') cv2.imshow('show',cv2_im) while True: ch=0xFF & cv2.waitKey() if ch==27: break cv2.imshow('show',cv2_im) cv2.destroyWindow('show') return #================================================ # drawPointsOnImage # # Draw points on a PIL image. # im --> PIL image # x,y --> arrays of point coordinates # radius --> radius of points # zoom --> zoom of displayed image # color --> color of points #================================================ def drawPointsOnImage(im,x,y,radius=5,zoom=1,convert=True,color=(255,255,255)): if convert==True: im=im.convert('RGB') size=im.size size=int(size[0]*zoom),int(size[1]*zoom) #print size x=np.asarray(x) y=np.asarray(y) draw=ImageDraw.Draw(im) for i in range(x.shape[0]): draw.ellipse((x[i]-radius, y[i]-radius, x[i]+radius, y[i]+radius), fill=color) display=im.resize(size) return display #================================================== # centroid # # Calculates the centroid of a shape # sv --> shape vector defined by 'k' points # with coordinates x and y. # sv=[[x0,y0],[x1,y1],...,[xk,yk]] #================================================== def centroid(sv): sv=np.asarray(sv) sv_x=sv[:,0] sv_y=sv[:,1] xc=sum(sv_x)/float(len(sv_x)) yc=sum(sv_y)/float(len(sv_y)) return xc,yc #======================================================================== # alignPairShapes #======================================================================== def alignPairShapes(s1,s2,weights): """ Given two vector shapes, the function applies the minimum squared error to align s2 with s1. The implementation is based on the paper from Cootes et al., "Active Shape Models -- Their Training and Application", 1995 See Appendix A. Key arguments: s1 -- array of tuples representing the first shape vector with n landmarks [(x1,y1),(x2,y2),...,(xn,yn)] s2 -- array of tuples representing the second shape vector with n landmarks weights -- vector of n weights that control how a landmark influences the alignment (greater weight values have greater impact on the alignment). Outputs: The coefficients of the affine Rotation, Scaling and Translation (RST) transform ax -- s.cos(theta) ay -- s.sin(theta) tx -- translation in x ty -- translation in y """ s1=np.asarray(s1) s2=np.asarray(s2) x1k=s1[:,0] y1k=s1[:,1] x2k=s2[:,0] y2k=s2[:,1] X1=sum(x1k*weights) X2=sum(x2k*weights) Y1=sum(y1k*weights) Y2=sum(y2k*weights) Z=sum(weights*(pow(x2k,2)+pow(y2k,2))) W=sum(weights) C1=sum(weights*(x1k*x2k+y1k*y2k)) C2=sum(weights*(y1k*x2k-x1k*y2k)) a=np.asarray([[X2,-Y2,W,0],[Y2,X2,0,W],[Z,0,X2,Y2],[0,Z,-Y2,X2]]) b=np.asarray([X1,Y1,C1,C2]) x=np.linalg.solve(a,b) ax=x[0] ay=x[1] tx=x[2] ty=x[3] return ax,ay,tx,ty #=========================================================== # RST #=========================================================== def RST(s,ax,ay,tx,ty): """ Apply rotation, scale and translation to a shape vector, given the coefficients of the affine transformation matrix. Key arguments: s -- array of tuples representing the shape vector with n landmarks [(x1,y1),(x2,y2),...,(xn,yn)] The coefficients of the affine Rotation, Scaling and Translation (RST) transform: ax -- s.cos(theta) ay -- s.sin(theta) tx -- translation in x ty -- translation in y """ svRST=np.asarray(np.zeros(s.shape)) svRST[:,0]=ax*s[:,0]-ay*s[:,1]+tx svRST[:,1]=ay*s[:,0]+ax*s[:,1]+ty return svRST #======================================================================== # alignNImages #======================================================================== def alignNImages(images,shapes,weights,save_aligned_images=True): """ Aligns a set of images according to their shapes. Together with functions 'alignPairShapes' and 'RST' this function implements the shape alignment algorithm used in the Active Shape Model (ASM). For additional references see: "Active Shape Models", Cootes et al., 1995 "Active Appearance Models", Stegmann, 2000, Chapter 4, Section 4.4.2 Key arguments: images -- array of images filenames (N images) shapes -- array of shapes corresponding to each image weights -- vector of weights that control how a landmark influences the alignment (greater weight values have greater impact on the alignment). save_aligned_images -- if True, aligned images are saved in the same folder with "aligned" prefix. """ shapes=np.asarray(shapes) aligned_shapes=np.asarray(np.zeros(shapes.shape)) aligned_shapes.astype(float) print "Starting alignment of "+str(len(images))+"." # Variables initialization it=0 first=True mean_shape=shapes[0] print mean_shape.shape previous_mean_shape=np.asarray(np.zeros((shapes.shape[1],shapes.shape[2]))) ax=np.asarray(np.zeros(shapes.shape[0])) ay=np.asarray(np.zeros(shapes.shape[0])) tx=np.asarray(np.zeros(shapes.shape[0])) ty=np.asarray(np.zeros(shapes.shape[0])) # The "while" loop checks the convergence of the alignment. # The convergence is checked measuring the difference of previous mean_shape # an the last calculated mean shape. error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print "error = "+str(error) while (error>0.0001): print sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print 'Iteration ',it it=it+1 previous_mean_shape=np.copy(mean_shape) # Normalizing the mean shape to the first shape axm,aym,txm,tym=alignPairShapes(shapes[0],mean_shape,weights) mean_shape=RST(mean_shape,axm,aym,txm,tym) # Align all shapes to the mean shape for i in range(len(images)): #print 'Aligning shape '+str(i) ax[i],ay[i],tx[i],ty[i]=alignPairShapes(mean_shape,shapes[i],weights) aligned_shapes[i]=RST(shapes[i],ax[i],ay[i],tx[i],ty[i]) # Calculate new mean shape mean_shape=np.add.reduce(aligned_shapes)/float(aligned_shapes.shape[0]) #print mean_shape error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print "error = "+str(error) if save_aligned_images==True: for i in range(len(images)): im=cv2.imread(images[i]) dsize=(im.shape[1],im.shape[0]) T=np.asarray([[ax[i],-ay[i],tx[i]],[ay[i],ax[i],ty[i]]]) im=cv2.warpAffine(im,T,dsize) fileName, fileExtension = os.path.splitext(os.path.basename(images[i])) cv2.imwrite(fileName+'_aligned'+fileExtension,im) return mean_shape,aligned_shapes #================================================== # dist # # Calculates the euclidean distance between two points # given their coordinates (x,y) and (u,v) #================================================== def dist(x,y,u,v): dist=np.sqrt(pow((x-u),2)+pow((y-v),2)) return dist #============================================================ # nearest_point # # Given a set of points defined by the sequence of # coordinates in the vectors 'x' e 'y', the function # returns two vectors that determines the nearest point and # the calculated distance between the corresponding point # and the remaining points. # EXAMPLE: # Consider the set of points: # P1=(10,20) # P2=(11,21) # P3=(100,200) # The input to the function will be: # x=([10,11,100]) # y=([20,21,200]) # P2 is the nearest point to P1 and vice-versa. # P2 is the nearest point of P3. # So, the function will return: # indices: [1,0,1] # distances:[1.41,1.41,199.9] # #============================================================ def nearest_point(x,y): x=np.asarray(x) y=np.asarray(y) number_of_points=x.shape[0] ut1,ut2=np.triu_indices(number_of_points,1) #without the main diagonal distances=dist(x[ut1[:]],y[ut1[:]],x[ut2[:]],y[ut2[:]]) d_matrix=np.array(np.zeros((number_of_points,number_of_points))) d_matrix[ut1[:],ut2[:]]=distances[:] d_matrix[ut2[:],ut1[:]]=distances[:] d_matrix[range(number_of_points),range(number_of_points)]=np.max(distances) min_indices=np.array(np.zeros(number_of_points)) min_dist=np.array(np.zeros(number_of_points)) for i in range(number_of_points): min_indices[i]=int(np.argmin(d_matrix[i,:])) min_dist[i]=d_matrix[i,np.uint8(min_indices[i])] print min_indices[i] return min_indices,min_dist #============================================================ # reticulate # # Creates an 1 channel image/array of dimensions h x w pixels # with a reticulate that is spaced s pixels, with lines l # pixels large. (Background is white, net is black) #============================================================ def reticulate(h=302,w=527,s=15,l=2): ret=np.array(np.zeros((h,w))) ret=ret+255 for i in range(l): ret[:,i::s]=0 ret[i::s]=0 return ret #=================================================================== # crop # # im -> image (numpy array) # ox -> column to start crop (column included in cropped image) # oy -> row to start crop (row included in cropped image) # width -> of final image # height -> of final image #=================================================================== def crop(im,ox,oy,width,height): cropped_image=im[oy:(oy+height),ox:(ox+width)] return cropped_image #======================================================================== # addcoltofile # # filename -> the array will be added as a column to this file # a -> array (will be transformed on a 1d array) # sep -> separator string # #======================================================================== def addcoltofile(filename,a,sep): a=np.ravel(np.asarray(a)) try: f=open(filename,'r+') except IOError: try: f=open(filename,'w+r+') except IOError: print "IOError." #return line=f.readline() if line=="": # File is empty for i in range(len(a)): f.write(str(a[i])+'\n') else: EOF=False pointer_to_write=0 pointer_to_read=f.tell() new_line=line.rstrip('\n')+sep+str(a[0])+'\n' #print 'new_line= '+new_line invasion=len(new_line)-len(line) #print 'size of invasion='+str(invasion) #print 'pointer_to_write='+str(pointer_to_write) #print 'pointer_to_read='+str(pointer_to_read) buf="" for i in range(1,len(a)+1): #print EOF if EOF==False: aux=f.read(invasion) buf=buf+aux #print "Invasion read: "+str(aux) aux="" while (aux.find('\n')==-1) and (EOF==False): aux=f.read(1) buf=buf+aux #print 'updated buffer= \n'+buf if aux=="": # Reached EOF EOF=True #print 'EOF' break pointer_to_read=f.tell() f.seek(pointer_to_write) f.write(new_line) pointer_to_write=f.tell() f.seek(pointer_to_read) #print 'pointer_to_read='+str(pointer_to_read) #print 'pointer_to_write='+str(pointer_to_write) if i<(len(a)):
else: break f.seek(pointer_to_write) if f.readline()!="": print "Attention!The provided array has less elements than\n" print "the number of lines in the file." f.close() return #======================================================================== # visualCheckImageDB # # imagedb -> CSV filename # imagedbtype -> 0 for a complete database (filenames+labels+shape) # 1 for the simple database (filenames+shape) # zoom -> to scale image on screen # # #======================================================================== def visualcheckImageDB(imagedb,imagedbtype=0,zoom=0.5): import procdb if imagedbtype==0: images,shape,labels=procdb.processImageDB(imagedb) else: images,shape=procdb.processImageDB2(imagedb) shape=np.asarray(shape) print shape for i in range(len(images)): im=Image.open(images[i]) im=drawPointsOnImage(im,shape[i,:,0],shape[i,:,1]) im=im.resize((int(im.size[0]*zoom+0.5),int(im.size[1]*zoom+0.5))) print images[i] im.show() raw_input('Press ENTER to proceed to next image...') return #======================================================================== # dumpMatrix2File # # matrix -> numpy 1D or 2D arrays # filename -> name of the file to be created # # #======================================================================== def dumpMatrix2File(matrix,filename): datafile=open(filename,'w') dim=len(matrix.shape) if dim==1: datafile.write(','.join(map(str,matrix))) elif dim==2: for i in range(matrix.shape[0]): datafile.write(','.join(map(str,matrix[i]))) datafile.write('\n') else: print "The matrix is not an 1D or 2D array. The matrix was not saved." datafile.close() return #======================================================================== # loadMatrixFromFile #======================================================================== def loadMatrixFromFile(filename, rows=0,cols=0,sep=','): """ Loads a numpy matrix from a text file, typically a CSV file. Key arguments: filename -- text rows -- (optional) specifies the number of rows of the matrix cols -- (optional) specifies the number of columns of the matrix sep -- separator of the columns, the default is a comma """ datafile=open(filename,'r') if rows!=0 and cols!=0: matrix=np.asarray(np.zeros((rows,cols))) for i in range(rows): aux=datafile.readline() aux=aux.split(sep) matrix[i]=np.asarray(aux,dtype=np.float64) #print 'Reading line '+str(i)+' of file '+filename return matrix else: aux=datafile.readline() aux=aux.split(sep) matrix=np.asarray([aux],dtype=np.float64) aux=datafile.readline() while aux!="": aux=aux.split(sep) #print aux matrix=np.append(matrix,np.asarray([aux],dtype=np.float64),0) aux=datafile.readline() return matrix #======================================================================== # applyKernelToPoints #======================================================================== def applyKernelToPoints(image,pts,kernel,border_type='BLACK'): """ Applies the kernel (multiply and sum the neighborhood) at the specified points of an image. Returns an array of results for each selected point. The algorithm adds a frame to the original image to calculate the result of applying the kernel to the pixels that are at the borders of the original image. Key arguments: image -- numpy array representing an image pts -- array of points [[x1,y1],[x2,y2],...] kernel -- numpy array with the weighting elements of the sum border_type -- BLACK (default) (added frame filled with pixels=0) WHITE (added frame filled with pixels=255) ANTIALIAS (infinite texture of replicated copies of the original image) """ pts=np.asarray(pts) image=np.asarray(image) image.shape if len(image.shape)>2: grayscale=False shaperesult=(len(pts),image.shape[2]) elif len(image.shape)==1: image=image.reshape(1,image.shape[0]) shaperesult=len(pts) grayscale=True else: grayscale=True # Kernel dimensions - they are integers krows=kernel.shape[0] kcols=kernel.shape[1] if krows%2==0: # Is even ldrows=(krows/2)-1 udrows=krows/2 else: # Is odd ldrows=krows/2 udrows=krows/2 if kcols%2==0: # Is even ldcols=(kcols/2)-1 udcols=kcols/2 else: # Is odd ldcols=kcols/2 udcols=kcols/2 #------------------------------------ # ADD FRAME TO THE ORIGINAL IMAGE #------------------------------------ dummyM=image.shape[0]+krows-1 dummyN=image.shape[1]+kcols-1 if grayscale==True: dummyimage=np.asarray(np.zeros((dummyM,dummyN))) else: dummyimage=np.asarray(np.zeros((dummyM,dummyN,image.shape[2]))) if border_type=="WHITE": dummyimage=dummyimage+255 elif border_type=="ANTIALIAS": # Fills top border dummyimage[0:ldrows,ldcols:ldcols+image.shape[1]]=image[image.shape[0]-ldrows:image.shape[0],:] # Fills bottom border dummyimage[(ldrows+image.shape[0]):,ldcols:(ldcols+image.shape[1])]=image[0:udrows,:] # Fills left border dummyimage[ldrows:ldrows+image.shape[0],0:ldcols]=image[:,image.shape[1]-ldcols:] # Fills right border dummyimage[ldrows:ldrows+image.shape[0],(ldcols+image.shape[1]):]=image[:,0:udcols] # Fills top, left corner dummyimage[0:ldrows,0:ldcols]=image[image.shape[0]-ldrows,image.shape[1]-ldcols] # Fills bottom, left corner dummyimage[(ldrows+image.shape[0]):,0:ldcols]=image[0:udrows,(image.shape[1]-ldcols):] # Fills top, right corner dummyimage[0:ldrows,(ldcols+image.shape[1]):]=image[(image.shape[0]-ldrows):,0:udcols] # Fills bottom, right corner dummyimage[(ldrows+image.shape[0]):,(ldcols+image.shape[1]):]=image[0:udrows,0:udcols] dummyimage[ldrows:ldrows+image.shape[0],ldcols:ldcols+image.shape[1]]=image result=np.asarray(np.zeros(shaperesult)) pts[:,0]=pts[:,0]+ldrows pts[:,1]=pts[:,1]+ldcols for k in range(len(pts)): total=0 for i in range(-ldrows,udrows+1): for j in range(-ldcols,udcols+1): total=total+dummyimage[i+pts[k,0],j+pts[k,1]]*kernel[i+ldrows,j+ldcols] result[k]=total return result #======================================================================== # cropnscaleImageDB #======================================================================== def cropnscaleImageDB(imagedb,newimagedb,ox,oy,width,height,scale,folder="",verbose=False): """ Applies a crop (region of interest) followed by a scale operation on a set of images listed on an image database. The feature points on the image databased are modified to reflect the operations. Key arguments: imagedb -- filename/path of the image database newimagedb -- name of the file that will be created ox -- x origin of the crop operation oy -- y origin of the crop operation width -- width of the region of interest height -- height of the region of interest scale -- used to resize the region of interest folder -- where the images are going to be saved; if not provided, a new directory is created automatically. verbose -- If True provides feedback about the images being processed """ import procdb import os images,shapes,labels=procdb.processImageDB(imagedb) shapes=np.asarray(shapes) #print shapes.shape if verbose==True: print str(len(images))+" images to process." suffix="_"+str(int(width*scale))+"x"+str(int(height*scale)) if folder=="": folder=str(int(width*scale))+"x"+str(int(height*scale)) if not os.path.exists(folder): os.makedirs(folder) else: if not os.path.exists(folder):os.makedirs(folder) newimagedb=open(folder+"/"+newimagedb,'w') for i in range(len(images)): im=cv2.imread(images[i]) im_cropped=crop(im,ox,oy,width,height) newheight=int(height*scale) newwidth=int(width*scale) im_resized=np.asarray(np.zeros((newheight,newwidth))) im_resized=cv2.resize(im_cropped,(newwidth,newheight),im_resized,scale,scale,cv2.INTER_AREA) fileName, fileExtension = os.path.splitext(images[i]) retval=cv2.imwrite(folder+"/"+fileName+suffix+fileExtension,im_resized) if retval==False: print "Problem to save modified image." return False shapes[i,:,0]=shapes[i,:,0]-ox shapes[i,:,1]=shapes[i,:,1]-oy shapes[i]=shapes[i]*scale newshapes='' for j in range(shapes.shape[1]): newshapes=newshapes+',('+str(shapes[i,j,0])+';'+str(shapes[i,j,1])+')' newlabels='' for k in range(len(labels[i])): newlabels=newlabels+','+str(labels[i][k]) newimagedb.write(fileName+suffix+fileExtension+newlabels+newshapes+'\n') if verbose==True: print "Image "+str(i+1)+" successfully processed." newimagedb.close() return True #================================================================================= # Eigentextures #================================================================================= class Eigentextures: ''' This class implements the principal components analysis (PCA) or the eigendecomposition of high dimensional vectors. This class was designed having in mind its use for whole images or parts of images, or simply, textures. If the images contain faces, the algorithm implemented here is equivalent to the Eigenfaces algorithm presented by Matthew Turk and Alex Pentland. The variables names correspond to the variables present in the paper "Eigenfaces for Recognition", Matthew Turk and Alex Pentland, 1991. Key arguments: trainingset - Array of rasterized textures. Each sample corresponds to a row of the array. evr - Explained variance ratio: indicates how much of the overall variance is explained by the corresponding principal component or eigenvector. numpc - This parameter is used to inform how many principal components the function should consider. ''' def __init__(self,trainingset,verbose=False): self.__verbose=verbose self.M=trainingset.shape[0] self.N=trainingset.shape[1] # STEP 1 # Gamma is the matrix which columns are the rasterized pixels of each image of # the training set Gamma=np.transpose(trainingset) # STEP 2 # Compute Psi, that is the average texture over the training set. Psi=Gamma.mean(1) self.Psi=Psi Psi=(Psi.round()).astype(np.int32) Psi=np.reshape(Psi,(Psi.shape[0],1)) # STEP 3 # Subtracts the average face from all samples, creating a zero mean # distribution Phi. self.__Phi=np.asarray(np.zeros(Gamma.shape),dtype=np.int32) self.__Phi=Gamma-Psi del Gamma del trainingset if self.__verbose==True: print "Eigentextures:\tPhi created successfully." # STEP 4 # A minor product of the covariance matrix is calculated. Phi_t=np.transpose(self.__Phi) L=np.dot(Phi_t,self.__Phi) del Phi_t L=L/self.M if self.__verbose==True: print "Eigentextures:\tMinor product generated successfully." # STEP 5 # Calculates the eigenvalues(w) and eigenvectors(v) of # the minor product L. self.__w,self.__v=la.eig(L) del L # STEP 6 # Order the eigenvalues and their corresponding eigenvectors # in the descending order. indices=np.argsort(self.__w) indices=indices[::-1] # descending order self.__w=self.__w[indices] self.__v=self.__v[:,indices] # Calculating the explained variance ratio. self.evr=self.__w/np.sum(self.__w) if self.__verbose==True: print "Eigentextures:\tObject created succesfully." return def getEigentextures(self,numpc="all"): # Calculates the eigenvectors of the original covariance matrix if numpc=='all': self.__u=np.asarray(np.zeros((self.N,self.M))) for col in range(self.M): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u elif numpc>0 and numpc<=self.M: numpc=int(numpc+0.5) self.__u=np.asarray(np.zeros((self.N,numpc))) for col in range(numpc): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u else: print "Eigentextures:\tInvalid value for numpc." return def getEigentexturesEVR(self,variance=1): # Calculates the eigenvectors of the original covariance matrix if variance>=1: self.__u=np.asarray(np.zeros((self.N,self.M))) for col in range(self.M): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u elif variance<1 and variance>0: cols=np.where(np.cumsum(self.evr)<=variance)[0] self.__u=np.asarray(np.zeros((self.N,len(cols)))) for col in cols: if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u else: print "Eigentextures:\t Invalid explained value ratio parameter." return def saveEigentextures2File(self,filename,numpc="all"): u=self.getEigentextures(numpc) dumpMatrix2File(u,filename) return def saveEVR2File(self,filename,variance=1): u=self.getEigentexturesEVR(variance) dumpMatrix2File(u,filename) return #================================================================================= # PCA #================================================================================= class PCA: ''' This class is a simple implementation of principal components analysis (PCA) through the computation of the eigenvectors of the covariance matrix of a training set of samples. For high dimensional vectors, see the class Eigentextures. Key arguments: trainingset - Matrix of samples. Each sample corresponds to a row of the array. evr - Explained variance ratio: indicates how much of the overall variance is explained by the corresponding principal component or eigenvector. numpc - This parameter is used to inform how many principal components the function should consider. ''' def __init__(self,trainingset,verbose=False): self.__verbose=verbose self.N=trainingset.shape[0] # number of samples/trials self.M=trainingset.shape[1] # number of dimensions (size of the sample vector) # STEP 2 # Compute Psi, that is the average vector considering the training set Psi=trainingset.mean(0) self.Psi=Psi # STEP 3 # Subtracts the average from all samples, creating a zero mean # distribution Phi. Phi=np.asarray(np.zeros(trainingset.shape)) Phi=trainingset-Psi if self.__verbose==True: print "PCA:\tPhi created successfully." # STEP 4 # Computes the covariance matrix. # covariance=1/((N-1)*trainingset_t*trainingset) Phi_t=np.transpose(Phi) # M x N matrix covariance=(np.dot(Phi_t,Phi))/(self.N-1) self.cov=covariance self.__w,self.__v=la.eig(covariance) # The covariance is a positive semi-definite matrix # and all of its eigenvalues are positive. # However, the linalg.eig function may return small and # negative eigenvalues. Before calculating the explained variance ratio, # values below 1e-10 are made equal zero. self.__w=np.where(self.__w<=1e-10,0,self.__w) # Putting eigenvectors in the descending order of eigenvalues indices=np.argsort(self.__w) indices=indices[::-1] self.__w=self.__w[indices] self.__v=self.__v[:,indices] # Calculating the explained variance ratio. self.evr=self.__w/np.sum(self.__w) if self.__verbose==True: print "PCA:\tObject created succesfully." return def getPC(self,numpc="all"): # Calculates the eigenvectors of the original covariance matrix if numpc=='all': return self.__v elif numpc>0 and numpc<=self.N: numpc=int(numpc) return self.__v[:,0:numpc] else: print "PCA:\tInvalid value for numpc." return def getEVR(self,variance=1): # Calculates the eigenvectors of the original covariance matrix if variance>=1: return self.__v elif variance<1 and variance>0: cols=np.where(np.cumsum(self.evr)<=variance)[0] return self.__v[:,cols] else: print "PCA:\t Invalid explained variance ratio parameter." return def savePC2File(self,filename,numpc="all"): v=self.getPC(numpc) dumpMatrix2File(v,filename) return def saveEVR2File(self,filename,variance=1): v=self.getEVR(variance) dumpMatrix2File(v,filename) return
x=buf.find('\n') line=buf[0:x+1] #print 'line= '+line new_line=line.rstrip('\n')+sep+str(a[i])+'\n' #print 'new_line= '+new_line invasion=len(new_line) #print 'size of invasion='+str(invasion) buf=buf[x+1::] #print 'buffer without line= \n'+buf
conditional_block
utils.py
#============================================================================ # UTILS #============================================================================ # Script name: utils.py # Created on: 10/10/2012 # Author: Paula D. Paro Costa # Purpose: Collection of snnipets that may be useful # to someone that likes to play with image processing. # # Updates: # 26/10/2012 - Added 'crop' function # 11/01/2013 - Added 'addcoltofile' function # 18/04/2013 - Added 'visualcheckImageDB' function # 18/04/2013 - Added 'dumpMatrix2File' and 'loadMatrixFromFile' # 07/06/2013 - Added 'applyKernelToPoints' # 03/07/2013 - Added 'cropnscaleImageDB' function. 'crop' function modified # to reflect changes in the library procdb (shapes variable as list # of tuples) # 05/07/2013 - Added functions: 'alignPairShapes', 'RST', 'alignNImages' # 17/07/2013 - Added class 'Eigentextures' # 23/07/2013 - Changed the 'loadMatrixFromFile' function to determine automatically # the size of the matrix if not provided. # # Notice: # Copyright (C) 2013 Paula D. Paro Costa #============================================================================= import numpy as np import numpy.linalg as la import Image,ImageDraw import cv2 import os #================================================ # imdisplay # # Show OpenCV image and waits for ESC key. (CV2) #================================================ def imdisplay(cv2_im): cv2.namedWindow('show') cv2.imshow('show',cv2_im) while True: ch=0xFF & cv2.waitKey() if ch==27: break cv2.imshow('show',cv2_im) cv2.destroyWindow('show') return #================================================ # drawPointsOnImage # # Draw points on a PIL image. # im --> PIL image # x,y --> arrays of point coordinates # radius --> radius of points # zoom --> zoom of displayed image # color --> color of points #================================================ def drawPointsOnImage(im,x,y,radius=5,zoom=1,convert=True,color=(255,255,255)): if convert==True: im=im.convert('RGB') size=im.size size=int(size[0]*zoom),int(size[1]*zoom) #print size x=np.asarray(x) y=np.asarray(y) draw=ImageDraw.Draw(im) for i in range(x.shape[0]): draw.ellipse((x[i]-radius, y[i]-radius, x[i]+radius, y[i]+radius), fill=color) display=im.resize(size) return display #================================================== # centroid # # Calculates the centroid of a shape # sv --> shape vector defined by 'k' points # with coordinates x and y. # sv=[[x0,y0],[x1,y1],...,[xk,yk]] #================================================== def centroid(sv): sv=np.asarray(sv) sv_x=sv[:,0] sv_y=sv[:,1] xc=sum(sv_x)/float(len(sv_x)) yc=sum(sv_y)/float(len(sv_y)) return xc,yc #======================================================================== # alignPairShapes #======================================================================== def alignPairShapes(s1,s2,weights): """ Given two vector shapes, the function applies the minimum squared error to align s2 with s1. The implementation is based on the paper from Cootes et al., "Active Shape Models -- Their Training and Application", 1995 See Appendix A. Key arguments: s1 -- array of tuples representing the first shape vector with n landmarks [(x1,y1),(x2,y2),...,(xn,yn)] s2 -- array of tuples representing the second shape vector with n landmarks weights -- vector of n weights that control how a landmark influences the alignment (greater weight values have greater impact on the alignment). Outputs: The coefficients of the affine Rotation, Scaling and Translation (RST) transform ax -- s.cos(theta) ay -- s.sin(theta) tx -- translation in x ty -- translation in y """ s1=np.asarray(s1) s2=np.asarray(s2) x1k=s1[:,0] y1k=s1[:,1] x2k=s2[:,0] y2k=s2[:,1] X1=sum(x1k*weights) X2=sum(x2k*weights) Y1=sum(y1k*weights) Y2=sum(y2k*weights) Z=sum(weights*(pow(x2k,2)+pow(y2k,2))) W=sum(weights) C1=sum(weights*(x1k*x2k+y1k*y2k)) C2=sum(weights*(y1k*x2k-x1k*y2k)) a=np.asarray([[X2,-Y2,W,0],[Y2,X2,0,W],[Z,0,X2,Y2],[0,Z,-Y2,X2]]) b=np.asarray([X1,Y1,C1,C2]) x=np.linalg.solve(a,b) ax=x[0] ay=x[1] tx=x[2] ty=x[3] return ax,ay,tx,ty #=========================================================== # RST #=========================================================== def RST(s,ax,ay,tx,ty): """ Apply rotation, scale and translation to a shape vector, given the coefficients of the affine transformation matrix. Key arguments: s -- array of tuples representing the shape vector with n landmarks [(x1,y1),(x2,y2),...,(xn,yn)] The coefficients of the affine Rotation, Scaling and Translation (RST) transform: ax -- s.cos(theta) ay -- s.sin(theta) tx -- translation in x ty -- translation in y """ svRST=np.asarray(np.zeros(s.shape)) svRST[:,0]=ax*s[:,0]-ay*s[:,1]+tx svRST[:,1]=ay*s[:,0]+ax*s[:,1]+ty return svRST #======================================================================== # alignNImages #======================================================================== def alignNImages(images,shapes,weights,save_aligned_images=True): """ Aligns a set of images according to their shapes. Together with functions 'alignPairShapes' and 'RST' this function implements the shape alignment algorithm used in the Active Shape Model (ASM). For additional references see: "Active Shape Models", Cootes et al., 1995 "Active Appearance Models", Stegmann, 2000, Chapter 4, Section 4.4.2 Key arguments: images -- array of images filenames (N images) shapes -- array of shapes corresponding to each image weights -- vector of weights that control how a landmark influences the alignment (greater weight values have greater impact on the alignment). save_aligned_images -- if True, aligned images are saved in the same folder with "aligned" prefix. """ shapes=np.asarray(shapes) aligned_shapes=np.asarray(np.zeros(shapes.shape)) aligned_shapes.astype(float) print "Starting alignment of "+str(len(images))+"." # Variables initialization it=0 first=True mean_shape=shapes[0] print mean_shape.shape previous_mean_shape=np.asarray(np.zeros((shapes.shape[1],shapes.shape[2]))) ax=np.asarray(np.zeros(shapes.shape[0])) ay=np.asarray(np.zeros(shapes.shape[0])) tx=np.asarray(np.zeros(shapes.shape[0])) ty=np.asarray(np.zeros(shapes.shape[0])) # The "while" loop checks the convergence of the alignment. # The convergence is checked measuring the difference of previous mean_shape # an the last calculated mean shape. error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print "error = "+str(error) while (error>0.0001): print sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print 'Iteration ',it it=it+1 previous_mean_shape=np.copy(mean_shape) # Normalizing the mean shape to the first shape axm,aym,txm,tym=alignPairShapes(shapes[0],mean_shape,weights) mean_shape=RST(mean_shape,axm,aym,txm,tym) # Align all shapes to the mean shape for i in range(len(images)): #print 'Aligning shape '+str(i) ax[i],ay[i],tx[i],ty[i]=alignPairShapes(mean_shape,shapes[i],weights) aligned_shapes[i]=RST(shapes[i],ax[i],ay[i],tx[i],ty[i]) # Calculate new mean shape mean_shape=np.add.reduce(aligned_shapes)/float(aligned_shapes.shape[0]) #print mean_shape error=sum(abs(np.ravel(previous_mean_shape)-np.ravel(mean_shape))) print "error = "+str(error) if save_aligned_images==True: for i in range(len(images)): im=cv2.imread(images[i]) dsize=(im.shape[1],im.shape[0]) T=np.asarray([[ax[i],-ay[i],tx[i]],[ay[i],ax[i],ty[i]]]) im=cv2.warpAffine(im,T,dsize) fileName, fileExtension = os.path.splitext(os.path.basename(images[i])) cv2.imwrite(fileName+'_aligned'+fileExtension,im) return mean_shape,aligned_shapes #================================================== # dist # # Calculates the euclidean distance between two points # given their coordinates (x,y) and (u,v) #================================================== def dist(x,y,u,v): dist=np.sqrt(pow((x-u),2)+pow((y-v),2)) return dist #============================================================ # nearest_point # # Given a set of points defined by the sequence of # coordinates in the vectors 'x' e 'y', the function # returns two vectors that determines the nearest point and # the calculated distance between the corresponding point # and the remaining points. # EXAMPLE: # Consider the set of points: # P1=(10,20) # P2=(11,21) # P3=(100,200) # The input to the function will be: # x=([10,11,100]) # y=([20,21,200]) # P2 is the nearest point to P1 and vice-versa. # P2 is the nearest point of P3. # So, the function will return: # indices: [1,0,1] # distances:[1.41,1.41,199.9] # #============================================================ def nearest_point(x,y): x=np.asarray(x) y=np.asarray(y) number_of_points=x.shape[0] ut1,ut2=np.triu_indices(number_of_points,1) #without the main diagonal distances=dist(x[ut1[:]],y[ut1[:]],x[ut2[:]],y[ut2[:]]) d_matrix=np.array(np.zeros((number_of_points,number_of_points))) d_matrix[ut1[:],ut2[:]]=distances[:] d_matrix[ut2[:],ut1[:]]=distances[:] d_matrix[range(number_of_points),range(number_of_points)]=np.max(distances) min_indices=np.array(np.zeros(number_of_points)) min_dist=np.array(np.zeros(number_of_points)) for i in range(number_of_points): min_indices[i]=int(np.argmin(d_matrix[i,:])) min_dist[i]=d_matrix[i,np.uint8(min_indices[i])] print min_indices[i] return min_indices,min_dist #============================================================ # reticulate # # Creates an 1 channel image/array of dimensions h x w pixels # with a reticulate that is spaced s pixels, with lines l # pixels large. (Background is white, net is black)
def reticulate(h=302,w=527,s=15,l=2): ret=np.array(np.zeros((h,w))) ret=ret+255 for i in range(l): ret[:,i::s]=0 ret[i::s]=0 return ret #=================================================================== # crop # # im -> image (numpy array) # ox -> column to start crop (column included in cropped image) # oy -> row to start crop (row included in cropped image) # width -> of final image # height -> of final image #=================================================================== def crop(im,ox,oy,width,height): cropped_image=im[oy:(oy+height),ox:(ox+width)] return cropped_image #======================================================================== # addcoltofile # # filename -> the array will be added as a column to this file # a -> array (will be transformed on a 1d array) # sep -> separator string # #======================================================================== def addcoltofile(filename,a,sep): a=np.ravel(np.asarray(a)) try: f=open(filename,'r+') except IOError: try: f=open(filename,'w+r+') except IOError: print "IOError." #return line=f.readline() if line=="": # File is empty for i in range(len(a)): f.write(str(a[i])+'\n') else: EOF=False pointer_to_write=0 pointer_to_read=f.tell() new_line=line.rstrip('\n')+sep+str(a[0])+'\n' #print 'new_line= '+new_line invasion=len(new_line)-len(line) #print 'size of invasion='+str(invasion) #print 'pointer_to_write='+str(pointer_to_write) #print 'pointer_to_read='+str(pointer_to_read) buf="" for i in range(1,len(a)+1): #print EOF if EOF==False: aux=f.read(invasion) buf=buf+aux #print "Invasion read: "+str(aux) aux="" while (aux.find('\n')==-1) and (EOF==False): aux=f.read(1) buf=buf+aux #print 'updated buffer= \n'+buf if aux=="": # Reached EOF EOF=True #print 'EOF' break pointer_to_read=f.tell() f.seek(pointer_to_write) f.write(new_line) pointer_to_write=f.tell() f.seek(pointer_to_read) #print 'pointer_to_read='+str(pointer_to_read) #print 'pointer_to_write='+str(pointer_to_write) if i<(len(a)): x=buf.find('\n') line=buf[0:x+1] #print 'line= '+line new_line=line.rstrip('\n')+sep+str(a[i])+'\n' #print 'new_line= '+new_line invasion=len(new_line) #print 'size of invasion='+str(invasion) buf=buf[x+1::] #print 'buffer without line= \n'+buf else: break f.seek(pointer_to_write) if f.readline()!="": print "Attention!The provided array has less elements than\n" print "the number of lines in the file." f.close() return #======================================================================== # visualCheckImageDB # # imagedb -> CSV filename # imagedbtype -> 0 for a complete database (filenames+labels+shape) # 1 for the simple database (filenames+shape) # zoom -> to scale image on screen # # #======================================================================== def visualcheckImageDB(imagedb,imagedbtype=0,zoom=0.5): import procdb if imagedbtype==0: images,shape,labels=procdb.processImageDB(imagedb) else: images,shape=procdb.processImageDB2(imagedb) shape=np.asarray(shape) print shape for i in range(len(images)): im=Image.open(images[i]) im=drawPointsOnImage(im,shape[i,:,0],shape[i,:,1]) im=im.resize((int(im.size[0]*zoom+0.5),int(im.size[1]*zoom+0.5))) print images[i] im.show() raw_input('Press ENTER to proceed to next image...') return #======================================================================== # dumpMatrix2File # # matrix -> numpy 1D or 2D arrays # filename -> name of the file to be created # # #======================================================================== def dumpMatrix2File(matrix,filename): datafile=open(filename,'w') dim=len(matrix.shape) if dim==1: datafile.write(','.join(map(str,matrix))) elif dim==2: for i in range(matrix.shape[0]): datafile.write(','.join(map(str,matrix[i]))) datafile.write('\n') else: print "The matrix is not an 1D or 2D array. The matrix was not saved." datafile.close() return #======================================================================== # loadMatrixFromFile #======================================================================== def loadMatrixFromFile(filename, rows=0,cols=0,sep=','): """ Loads a numpy matrix from a text file, typically a CSV file. Key arguments: filename -- text rows -- (optional) specifies the number of rows of the matrix cols -- (optional) specifies the number of columns of the matrix sep -- separator of the columns, the default is a comma """ datafile=open(filename,'r') if rows!=0 and cols!=0: matrix=np.asarray(np.zeros((rows,cols))) for i in range(rows): aux=datafile.readline() aux=aux.split(sep) matrix[i]=np.asarray(aux,dtype=np.float64) #print 'Reading line '+str(i)+' of file '+filename return matrix else: aux=datafile.readline() aux=aux.split(sep) matrix=np.asarray([aux],dtype=np.float64) aux=datafile.readline() while aux!="": aux=aux.split(sep) #print aux matrix=np.append(matrix,np.asarray([aux],dtype=np.float64),0) aux=datafile.readline() return matrix #======================================================================== # applyKernelToPoints #======================================================================== def applyKernelToPoints(image,pts,kernel,border_type='BLACK'): """ Applies the kernel (multiply and sum the neighborhood) at the specified points of an image. Returns an array of results for each selected point. The algorithm adds a frame to the original image to calculate the result of applying the kernel to the pixels that are at the borders of the original image. Key arguments: image -- numpy array representing an image pts -- array of points [[x1,y1],[x2,y2],...] kernel -- numpy array with the weighting elements of the sum border_type -- BLACK (default) (added frame filled with pixels=0) WHITE (added frame filled with pixels=255) ANTIALIAS (infinite texture of replicated copies of the original image) """ pts=np.asarray(pts) image=np.asarray(image) image.shape if len(image.shape)>2: grayscale=False shaperesult=(len(pts),image.shape[2]) elif len(image.shape)==1: image=image.reshape(1,image.shape[0]) shaperesult=len(pts) grayscale=True else: grayscale=True # Kernel dimensions - they are integers krows=kernel.shape[0] kcols=kernel.shape[1] if krows%2==0: # Is even ldrows=(krows/2)-1 udrows=krows/2 else: # Is odd ldrows=krows/2 udrows=krows/2 if kcols%2==0: # Is even ldcols=(kcols/2)-1 udcols=kcols/2 else: # Is odd ldcols=kcols/2 udcols=kcols/2 #------------------------------------ # ADD FRAME TO THE ORIGINAL IMAGE #------------------------------------ dummyM=image.shape[0]+krows-1 dummyN=image.shape[1]+kcols-1 if grayscale==True: dummyimage=np.asarray(np.zeros((dummyM,dummyN))) else: dummyimage=np.asarray(np.zeros((dummyM,dummyN,image.shape[2]))) if border_type=="WHITE": dummyimage=dummyimage+255 elif border_type=="ANTIALIAS": # Fills top border dummyimage[0:ldrows,ldcols:ldcols+image.shape[1]]=image[image.shape[0]-ldrows:image.shape[0],:] # Fills bottom border dummyimage[(ldrows+image.shape[0]):,ldcols:(ldcols+image.shape[1])]=image[0:udrows,:] # Fills left border dummyimage[ldrows:ldrows+image.shape[0],0:ldcols]=image[:,image.shape[1]-ldcols:] # Fills right border dummyimage[ldrows:ldrows+image.shape[0],(ldcols+image.shape[1]):]=image[:,0:udcols] # Fills top, left corner dummyimage[0:ldrows,0:ldcols]=image[image.shape[0]-ldrows,image.shape[1]-ldcols] # Fills bottom, left corner dummyimage[(ldrows+image.shape[0]):,0:ldcols]=image[0:udrows,(image.shape[1]-ldcols):] # Fills top, right corner dummyimage[0:ldrows,(ldcols+image.shape[1]):]=image[(image.shape[0]-ldrows):,0:udcols] # Fills bottom, right corner dummyimage[(ldrows+image.shape[0]):,(ldcols+image.shape[1]):]=image[0:udrows,0:udcols] dummyimage[ldrows:ldrows+image.shape[0],ldcols:ldcols+image.shape[1]]=image result=np.asarray(np.zeros(shaperesult)) pts[:,0]=pts[:,0]+ldrows pts[:,1]=pts[:,1]+ldcols for k in range(len(pts)): total=0 for i in range(-ldrows,udrows+1): for j in range(-ldcols,udcols+1): total=total+dummyimage[i+pts[k,0],j+pts[k,1]]*kernel[i+ldrows,j+ldcols] result[k]=total return result #======================================================================== # cropnscaleImageDB #======================================================================== def cropnscaleImageDB(imagedb,newimagedb,ox,oy,width,height,scale,folder="",verbose=False): """ Applies a crop (region of interest) followed by a scale operation on a set of images listed on an image database. The feature points on the image databased are modified to reflect the operations. Key arguments: imagedb -- filename/path of the image database newimagedb -- name of the file that will be created ox -- x origin of the crop operation oy -- y origin of the crop operation width -- width of the region of interest height -- height of the region of interest scale -- used to resize the region of interest folder -- where the images are going to be saved; if not provided, a new directory is created automatically. verbose -- If True provides feedback about the images being processed """ import procdb import os images,shapes,labels=procdb.processImageDB(imagedb) shapes=np.asarray(shapes) #print shapes.shape if verbose==True: print str(len(images))+" images to process." suffix="_"+str(int(width*scale))+"x"+str(int(height*scale)) if folder=="": folder=str(int(width*scale))+"x"+str(int(height*scale)) if not os.path.exists(folder): os.makedirs(folder) else: if not os.path.exists(folder):os.makedirs(folder) newimagedb=open(folder+"/"+newimagedb,'w') for i in range(len(images)): im=cv2.imread(images[i]) im_cropped=crop(im,ox,oy,width,height) newheight=int(height*scale) newwidth=int(width*scale) im_resized=np.asarray(np.zeros((newheight,newwidth))) im_resized=cv2.resize(im_cropped,(newwidth,newheight),im_resized,scale,scale,cv2.INTER_AREA) fileName, fileExtension = os.path.splitext(images[i]) retval=cv2.imwrite(folder+"/"+fileName+suffix+fileExtension,im_resized) if retval==False: print "Problem to save modified image." return False shapes[i,:,0]=shapes[i,:,0]-ox shapes[i,:,1]=shapes[i,:,1]-oy shapes[i]=shapes[i]*scale newshapes='' for j in range(shapes.shape[1]): newshapes=newshapes+',('+str(shapes[i,j,0])+';'+str(shapes[i,j,1])+')' newlabels='' for k in range(len(labels[i])): newlabels=newlabels+','+str(labels[i][k]) newimagedb.write(fileName+suffix+fileExtension+newlabels+newshapes+'\n') if verbose==True: print "Image "+str(i+1)+" successfully processed." newimagedb.close() return True #================================================================================= # Eigentextures #================================================================================= class Eigentextures: ''' This class implements the principal components analysis (PCA) or the eigendecomposition of high dimensional vectors. This class was designed having in mind its use for whole images or parts of images, or simply, textures. If the images contain faces, the algorithm implemented here is equivalent to the Eigenfaces algorithm presented by Matthew Turk and Alex Pentland. The variables names correspond to the variables present in the paper "Eigenfaces for Recognition", Matthew Turk and Alex Pentland, 1991. Key arguments: trainingset - Array of rasterized textures. Each sample corresponds to a row of the array. evr - Explained variance ratio: indicates how much of the overall variance is explained by the corresponding principal component or eigenvector. numpc - This parameter is used to inform how many principal components the function should consider. ''' def __init__(self,trainingset,verbose=False): self.__verbose=verbose self.M=trainingset.shape[0] self.N=trainingset.shape[1] # STEP 1 # Gamma is the matrix which columns are the rasterized pixels of each image of # the training set Gamma=np.transpose(trainingset) # STEP 2 # Compute Psi, that is the average texture over the training set. Psi=Gamma.mean(1) self.Psi=Psi Psi=(Psi.round()).astype(np.int32) Psi=np.reshape(Psi,(Psi.shape[0],1)) # STEP 3 # Subtracts the average face from all samples, creating a zero mean # distribution Phi. self.__Phi=np.asarray(np.zeros(Gamma.shape),dtype=np.int32) self.__Phi=Gamma-Psi del Gamma del trainingset if self.__verbose==True: print "Eigentextures:\tPhi created successfully." # STEP 4 # A minor product of the covariance matrix is calculated. Phi_t=np.transpose(self.__Phi) L=np.dot(Phi_t,self.__Phi) del Phi_t L=L/self.M if self.__verbose==True: print "Eigentextures:\tMinor product generated successfully." # STEP 5 # Calculates the eigenvalues(w) and eigenvectors(v) of # the minor product L. self.__w,self.__v=la.eig(L) del L # STEP 6 # Order the eigenvalues and their corresponding eigenvectors # in the descending order. indices=np.argsort(self.__w) indices=indices[::-1] # descending order self.__w=self.__w[indices] self.__v=self.__v[:,indices] # Calculating the explained variance ratio. self.evr=self.__w/np.sum(self.__w) if self.__verbose==True: print "Eigentextures:\tObject created succesfully." return def getEigentextures(self,numpc="all"): # Calculates the eigenvectors of the original covariance matrix if numpc=='all': self.__u=np.asarray(np.zeros((self.N,self.M))) for col in range(self.M): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u elif numpc>0 and numpc<=self.M: numpc=int(numpc+0.5) self.__u=np.asarray(np.zeros((self.N,numpc))) for col in range(numpc): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u else: print "Eigentextures:\tInvalid value for numpc." return def getEigentexturesEVR(self,variance=1): # Calculates the eigenvectors of the original covariance matrix if variance>=1: self.__u=np.asarray(np.zeros((self.N,self.M))) for col in range(self.M): if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u elif variance<1 and variance>0: cols=np.where(np.cumsum(self.evr)<=variance)[0] self.__u=np.asarray(np.zeros((self.N,len(cols)))) for col in cols: if self.__verbose==True: print "Calculating eigentexture "+str(col+1) h=np.dot(self.__Phi,self.__v[:,col]) h=h/la.norm(h) self.__u[:,col]=h return self.__u else: print "Eigentextures:\t Invalid explained value ratio parameter." return def saveEigentextures2File(self,filename,numpc="all"): u=self.getEigentextures(numpc) dumpMatrix2File(u,filename) return def saveEVR2File(self,filename,variance=1): u=self.getEigentexturesEVR(variance) dumpMatrix2File(u,filename) return #================================================================================= # PCA #================================================================================= class PCA: ''' This class is a simple implementation of principal components analysis (PCA) through the computation of the eigenvectors of the covariance matrix of a training set of samples. For high dimensional vectors, see the class Eigentextures. Key arguments: trainingset - Matrix of samples. Each sample corresponds to a row of the array. evr - Explained variance ratio: indicates how much of the overall variance is explained by the corresponding principal component or eigenvector. numpc - This parameter is used to inform how many principal components the function should consider. ''' def __init__(self,trainingset,verbose=False): self.__verbose=verbose self.N=trainingset.shape[0] # number of samples/trials self.M=trainingset.shape[1] # number of dimensions (size of the sample vector) # STEP 2 # Compute Psi, that is the average vector considering the training set Psi=trainingset.mean(0) self.Psi=Psi # STEP 3 # Subtracts the average from all samples, creating a zero mean # distribution Phi. Phi=np.asarray(np.zeros(trainingset.shape)) Phi=trainingset-Psi if self.__verbose==True: print "PCA:\tPhi created successfully." # STEP 4 # Computes the covariance matrix. # covariance=1/((N-1)*trainingset_t*trainingset) Phi_t=np.transpose(Phi) # M x N matrix covariance=(np.dot(Phi_t,Phi))/(self.N-1) self.cov=covariance self.__w,self.__v=la.eig(covariance) # The covariance is a positive semi-definite matrix # and all of its eigenvalues are positive. # However, the linalg.eig function may return small and # negative eigenvalues. Before calculating the explained variance ratio, # values below 1e-10 are made equal zero. self.__w=np.where(self.__w<=1e-10,0,self.__w) # Putting eigenvectors in the descending order of eigenvalues indices=np.argsort(self.__w) indices=indices[::-1] self.__w=self.__w[indices] self.__v=self.__v[:,indices] # Calculating the explained variance ratio. self.evr=self.__w/np.sum(self.__w) if self.__verbose==True: print "PCA:\tObject created succesfully." return def getPC(self,numpc="all"): # Calculates the eigenvectors of the original covariance matrix if numpc=='all': return self.__v elif numpc>0 and numpc<=self.N: numpc=int(numpc) return self.__v[:,0:numpc] else: print "PCA:\tInvalid value for numpc." return def getEVR(self,variance=1): # Calculates the eigenvectors of the original covariance matrix if variance>=1: return self.__v elif variance<1 and variance>0: cols=np.where(np.cumsum(self.evr)<=variance)[0] return self.__v[:,cols] else: print "PCA:\t Invalid explained variance ratio parameter." return def savePC2File(self,filename,numpc="all"): v=self.getPC(numpc) dumpMatrix2File(v,filename) return def saveEVR2File(self,filename,variance=1): v=self.getEVR(variance) dumpMatrix2File(v,filename) return
#============================================================
random_line_split
main.rs
use std::env; use tokio::stream::StreamExt; use twilight::{ cache::{ twilight_cache_inmemory::config::{EventType, InMemoryConfigBuilder}, InMemoryCache, }, gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig}, gateway::shard::Event, http::Client as HttpClient, model::{ channel::{Channel, Message}, gateway::GatewayIntents, id::{ChannelId, GuildId, UserId}, user::CurrentUser, }, }; mod channel; mod reaction; mod role; mod roles; mod state; mod theme; mod utils; use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels}; use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType}; use role::{handle_give_role, handle_remove_role, has_role}; use roles::ORGANIZER; use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count}; use utils::{Result, send_message}; #[tokio::main] async fn main() -> Result<()> { dotenv::dotenv().ok(); let token = env::var("DISCORD_TOKEN")?; // This is also the default. let scheme = ShardScheme::Auto; let config = ClusterConfig::builder(&token) .shard_scheme(scheme) // Use intents to only listen to GUILD_MESSAGES events .intents(Some( GatewayIntents::GUILD_MESSAGES | GatewayIntents::DIRECT_MESSAGES | GatewayIntents::GUILD_MESSAGE_REACTIONS, )) .build(); // Start up the cluster let cluster = Cluster::new(config); cluster.up().await?; // The http client is seperate from the gateway, // so startup a new one let http = HttpClient::new(&token); // Since we only care about messages and reactions, make // the cache only cache message and reaction related events let cache_config = InMemoryConfigBuilder::new() .event_types( EventType::MESSAGE_CREATE | EventType::MESSAGE_DELETE | EventType::MESSAGE_DELETE_BULK | EventType::MESSAGE_UPDATE | EventType::REACTION_ADD | EventType::REACTION_REMOVE, ) .build(); let cache = InMemoryCache::from(cache_config); let mut events = cluster.events().await; let current_user = http.current_user().await?; // Startup an event loop for each event in the event stream while let Some(event) = events.next().await { // Update the cache cache.update(&event.1).await.expect("Cache failed, OhNoe!"); // Spawn a new task to handle the event handle_event(event, http.clone(), &current_user).await?; } Ok(()) } /// Checks if the specified channel is a private message channel async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> { match http.channel(channel_id).await?.unwrap() { Channel::Private(_) => Ok(true), _ => Ok(false) } } async fn handle_event( event: (u64, Event), http: HttpClient, current_user: &CurrentUser ) -> Result<()> { match event { (_, Event::MessageCreate(msg)) => { // Don't send replies to yourself if msg.author.id != current_user.id { if is_pm(&http, msg.channel_id).await? { handle_pm(&msg, &http).await?; } else { handle_potential_command(&msg, http, current_user) .await?; } } } (_, Event::ReactionAdd(reaction)) => { if !is_pm(&http, reaction.channel_id).await? { handle_reaction_add(&reaction, http, &current_user).await?; } } (_, Event::ReactionRemove(reaction)) => { if !is_pm(&http, reaction.channel_id).await? { handle_reaction_remove(&reaction, http).await?; } } (id, Event::ShardConnected(_)) => { println!("Connected on shard {}", id); } _ => {} } Ok(()) } async fn handle_pm( msg: &Message, http: &HttpClient, ) -> Result<()> { handle_add_theme(http, msg).await?; Ok(()) } async fn handle_potential_command( msg: &Message, http: HttpClient, current_user: &CurrentUser ) -> Result<()> { let mut words = msg.content.split_ascii_whitespace(); match words.next() { Some("!help") => { send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to call for help in non-guild"), ).await?; } Some("!createchannels") => { handle_create_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to create channels in non-guild"), msg.author.id, current_user.id, http ).await?; }, Some("!renamechannels") => { handle_rename_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.author.id, current_user.id, http ).await?; }, Some("!removechannels") => { handle_remove_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to remove channels in non-guild"), msg.author.id, http ).await?; }, Some("!clearassociations") => { handle_clear_channel_associations( msg.channel_id, msg.guild_id.expect("Tried to clear channel associations in non-guild"), msg.author.id, http, ).await?; } Some("!role") => { handle_give_role( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to get role in non-guild"), &msg.author, http ).await?; }, Some("!leave") => { handle_remove_role( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to leave role in non-guild"), &msg.author, http ).await?; }, Some("!generatetheme") => { handle_generate_theme( msg.channel_id, msg.guild_id.expect("Tried to generate theme in non-guild"), &msg.author, http ).await?; } Some("!showallthemes") => { handle_show_all_themes( msg.channel_id, msg.guild_id.expect("Tried to show all themes in non-guild"), &msg.author, http ).await?; } Some("!showthemecount") =>
Some("!setroleassign") => { handle_set_reaction_message( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to set role assignment message in non-guild"), &msg.author, http, msg, ReactionMessageType::RoleAssign, ).await?; } Some(s) if s.chars().next() == Some('!') => { send_message(&http, msg.channel_id, msg.author.id, format!("Unrecognised command `{}`.", s) ).await?; send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to issue a command in non-guild"), ).await?; } // Not a command and probably not for us Some(_) => { // Check if we were mentioned if msg.mentions.contains_key(&current_user.id) { send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to mention us in non-guild"), ).await?; } } None => {} } Ok(()) } async fn send_help_message( http: HttpClient, channel_id: ChannelId, user_id: UserId, guild_id: GuildId, ) -> Result<()> { let standard_message = //"Send me a PM to submit theme ideas.\n\n\ "Get a role to signify one of your skill sets with the command `!role <role name>`\n\ and leave a role with `!leave <role name>`.\n\n\ You can also ask for text and voice channels for your game \ with the command `!createchannels <game name>`\n\ and rename them with `!renamechannels <new game name>`."; let organizer_message = format!( "Since you have the **{}** role, you also have access to the \ following commands:\n\ - `!generatetheme` to generate a theme.\n\ - `!showallthemes` to view all the theme ideas that have been submitted.\n\ - `!showthemecount` to see the number of theme ideas that have been submitted.\n\ - `!removechannels <mention of user>` to remove a user's created channel.\n\ - `!clearassociations` to clear all user–channel associations.\n\ - `!setroleassign <mention of channel with the message> <message ID>` to \ set the server's role assignment message.", ORGANIZER ); let help_message = if has_role(&http, guild_id, user_id, ORGANIZER).await? { format!("{}\n\n{}", standard_message, organizer_message) } else { standard_message.to_string() }; send_message(&http, channel_id, user_id, help_message).await?; Ok(()) }
{ handle_show_theme_count( msg.channel_id, msg.guild_id.expect("Tried to show theme idea count in non-guild"), &msg.author, http ).await?; }
conditional_block
main.rs
use std::env; use tokio::stream::StreamExt; use twilight::{ cache::{ twilight_cache_inmemory::config::{EventType, InMemoryConfigBuilder}, InMemoryCache, }, gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig}, gateway::shard::Event, http::Client as HttpClient, model::{ channel::{Channel, Message}, gateway::GatewayIntents, id::{ChannelId, GuildId, UserId}, user::CurrentUser, }, }; mod channel; mod reaction; mod role; mod roles; mod state; mod theme; mod utils; use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels}; use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType}; use role::{handle_give_role, handle_remove_role, has_role}; use roles::ORGANIZER; use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count}; use utils::{Result, send_message}; #[tokio::main] async fn main() -> Result<()> { dotenv::dotenv().ok(); let token = env::var("DISCORD_TOKEN")?; // This is also the default. let scheme = ShardScheme::Auto; let config = ClusterConfig::builder(&token) .shard_scheme(scheme) // Use intents to only listen to GUILD_MESSAGES events .intents(Some( GatewayIntents::GUILD_MESSAGES | GatewayIntents::DIRECT_MESSAGES | GatewayIntents::GUILD_MESSAGE_REACTIONS, )) .build(); // Start up the cluster let cluster = Cluster::new(config); cluster.up().await?; // The http client is seperate from the gateway, // so startup a new one let http = HttpClient::new(&token); // Since we only care about messages and reactions, make // the cache only cache message and reaction related events let cache_config = InMemoryConfigBuilder::new() .event_types( EventType::MESSAGE_CREATE | EventType::MESSAGE_DELETE | EventType::MESSAGE_DELETE_BULK | EventType::MESSAGE_UPDATE | EventType::REACTION_ADD | EventType::REACTION_REMOVE, ) .build(); let cache = InMemoryCache::from(cache_config); let mut events = cluster.events().await; let current_user = http.current_user().await?; // Startup an event loop for each event in the event stream while let Some(event) = events.next().await { // Update the cache cache.update(&event.1).await.expect("Cache failed, OhNoe!"); // Spawn a new task to handle the event handle_event(event, http.clone(), &current_user).await?; } Ok(()) } /// Checks if the specified channel is a private message channel async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> { match http.channel(channel_id).await?.unwrap() { Channel::Private(_) => Ok(true), _ => Ok(false) } } async fn handle_event( event: (u64, Event), http: HttpClient, current_user: &CurrentUser ) -> Result<()> { match event { (_, Event::MessageCreate(msg)) => { // Don't send replies to yourself if msg.author.id != current_user.id { if is_pm(&http, msg.channel_id).await? { handle_pm(&msg, &http).await?; } else { handle_potential_command(&msg, http, current_user) .await?; } } } (_, Event::ReactionAdd(reaction)) => { if !is_pm(&http, reaction.channel_id).await? { handle_reaction_add(&reaction, http, &current_user).await?; } } (_, Event::ReactionRemove(reaction)) => { if !is_pm(&http, reaction.channel_id).await? { handle_reaction_remove(&reaction, http).await?; } } (id, Event::ShardConnected(_)) => { println!("Connected on shard {}", id); } _ => {} } Ok(()) } async fn handle_pm( msg: &Message, http: &HttpClient, ) -> Result<()> { handle_add_theme(http, msg).await?; Ok(()) } async fn handle_potential_command( msg: &Message, http: HttpClient, current_user: &CurrentUser ) -> Result<()> { let mut words = msg.content.split_ascii_whitespace(); match words.next() { Some("!help") => { send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to call for help in non-guild"), ).await?; } Some("!createchannels") => { handle_create_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to create channels in non-guild"), msg.author.id, current_user.id, http ).await?; }, Some("!renamechannels") => { handle_rename_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.author.id, current_user.id, http ).await?; }, Some("!removechannels") => { handle_remove_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to remove channels in non-guild"), msg.author.id, http ).await?; }, Some("!clearassociations") => { handle_clear_channel_associations( msg.channel_id, msg.guild_id.expect("Tried to clear channel associations in non-guild"), msg.author.id, http, ).await?; } Some("!role") => { handle_give_role( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to get role in non-guild"), &msg.author, http ).await?; }, Some("!leave") => { handle_remove_role( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to leave role in non-guild"), &msg.author, http ).await?; }, Some("!generatetheme") => { handle_generate_theme( msg.channel_id, msg.guild_id.expect("Tried to generate theme in non-guild"), &msg.author, http ).await?; } Some("!showallthemes") => { handle_show_all_themes( msg.channel_id, msg.guild_id.expect("Tried to show all themes in non-guild"), &msg.author, http ).await?; } Some("!showthemecount") => { handle_show_theme_count( msg.channel_id, msg.guild_id.expect("Tried to show theme idea count in non-guild"), &msg.author, http ).await?; } Some("!setroleassign") => { handle_set_reaction_message( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to set role assignment message in non-guild"), &msg.author, http, msg, ReactionMessageType::RoleAssign, ).await?; } Some(s) if s.chars().next() == Some('!') => { send_message(&http, msg.channel_id, msg.author.id, format!("Unrecognised command `{}`.", s) ).await?; send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to issue a command in non-guild"), ).await?; } // Not a command and probably not for us Some(_) => { // Check if we were mentioned if msg.mentions.contains_key(&current_user.id) { send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to mention us in non-guild"), ).await?; } } None => {} } Ok(()) } async fn
( http: HttpClient, channel_id: ChannelId, user_id: UserId, guild_id: GuildId, ) -> Result<()> { let standard_message = //"Send me a PM to submit theme ideas.\n\n\ "Get a role to signify one of your skill sets with the command `!role <role name>`\n\ and leave a role with `!leave <role name>`.\n\n\ You can also ask for text and voice channels for your game \ with the command `!createchannels <game name>`\n\ and rename them with `!renamechannels <new game name>`."; let organizer_message = format!( "Since you have the **{}** role, you also have access to the \ following commands:\n\ - `!generatetheme` to generate a theme.\n\ - `!showallthemes` to view all the theme ideas that have been submitted.\n\ - `!showthemecount` to see the number of theme ideas that have been submitted.\n\ - `!removechannels <mention of user>` to remove a user's created channel.\n\ - `!clearassociations` to clear all user–channel associations.\n\ - `!setroleassign <mention of channel with the message> <message ID>` to \ set the server's role assignment message.", ORGANIZER ); let help_message = if has_role(&http, guild_id, user_id, ORGANIZER).await? { format!("{}\n\n{}", standard_message, organizer_message) } else { standard_message.to_string() }; send_message(&http, channel_id, user_id, help_message).await?; Ok(()) }
send_help_message
identifier_name
main.rs
use std::env; use tokio::stream::StreamExt; use twilight::{ cache::{ twilight_cache_inmemory::config::{EventType, InMemoryConfigBuilder}, InMemoryCache, }, gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig}, gateway::shard::Event, http::Client as HttpClient, model::{ channel::{Channel, Message}, gateway::GatewayIntents, id::{ChannelId, GuildId, UserId}, user::CurrentUser, }, }; mod channel; mod reaction; mod role; mod roles; mod state; mod theme; mod utils; use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels}; use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType}; use role::{handle_give_role, handle_remove_role, has_role}; use roles::ORGANIZER; use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count}; use utils::{Result, send_message}; #[tokio::main] async fn main() -> Result<()> { dotenv::dotenv().ok(); let token = env::var("DISCORD_TOKEN")?; // This is also the default. let scheme = ShardScheme::Auto; let config = ClusterConfig::builder(&token) .shard_scheme(scheme) // Use intents to only listen to GUILD_MESSAGES events .intents(Some( GatewayIntents::GUILD_MESSAGES | GatewayIntents::DIRECT_MESSAGES | GatewayIntents::GUILD_MESSAGE_REACTIONS, )) .build(); // Start up the cluster let cluster = Cluster::new(config); cluster.up().await?; // The http client is seperate from the gateway, // so startup a new one let http = HttpClient::new(&token); // Since we only care about messages and reactions, make // the cache only cache message and reaction related events let cache_config = InMemoryConfigBuilder::new() .event_types( EventType::MESSAGE_CREATE | EventType::MESSAGE_DELETE | EventType::MESSAGE_DELETE_BULK | EventType::MESSAGE_UPDATE | EventType::REACTION_ADD | EventType::REACTION_REMOVE, ) .build(); let cache = InMemoryCache::from(cache_config); let mut events = cluster.events().await; let current_user = http.current_user().await?; // Startup an event loop for each event in the event stream while let Some(event) = events.next().await { // Update the cache cache.update(&event.1).await.expect("Cache failed, OhNoe!"); // Spawn a new task to handle the event handle_event(event, http.clone(), &current_user).await?; } Ok(()) } /// Checks if the specified channel is a private message channel async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool>
async fn handle_event( event: (u64, Event), http: HttpClient, current_user: &CurrentUser ) -> Result<()> { match event { (_, Event::MessageCreate(msg)) => { // Don't send replies to yourself if msg.author.id != current_user.id { if is_pm(&http, msg.channel_id).await? { handle_pm(&msg, &http).await?; } else { handle_potential_command(&msg, http, current_user) .await?; } } } (_, Event::ReactionAdd(reaction)) => { if !is_pm(&http, reaction.channel_id).await? { handle_reaction_add(&reaction, http, &current_user).await?; } } (_, Event::ReactionRemove(reaction)) => { if !is_pm(&http, reaction.channel_id).await? { handle_reaction_remove(&reaction, http).await?; } } (id, Event::ShardConnected(_)) => { println!("Connected on shard {}", id); } _ => {} } Ok(()) } async fn handle_pm( msg: &Message, http: &HttpClient, ) -> Result<()> { handle_add_theme(http, msg).await?; Ok(()) } async fn handle_potential_command( msg: &Message, http: HttpClient, current_user: &CurrentUser ) -> Result<()> { let mut words = msg.content.split_ascii_whitespace(); match words.next() { Some("!help") => { send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to call for help in non-guild"), ).await?; } Some("!createchannels") => { handle_create_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to create channels in non-guild"), msg.author.id, current_user.id, http ).await?; }, Some("!renamechannels") => { handle_rename_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.author.id, current_user.id, http ).await?; }, Some("!removechannels") => { handle_remove_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to remove channels in non-guild"), msg.author.id, http ).await?; }, Some("!clearassociations") => { handle_clear_channel_associations( msg.channel_id, msg.guild_id.expect("Tried to clear channel associations in non-guild"), msg.author.id, http, ).await?; } Some("!role") => { handle_give_role( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to get role in non-guild"), &msg.author, http ).await?; }, Some("!leave") => { handle_remove_role( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to leave role in non-guild"), &msg.author, http ).await?; }, Some("!generatetheme") => { handle_generate_theme( msg.channel_id, msg.guild_id.expect("Tried to generate theme in non-guild"), &msg.author, http ).await?; } Some("!showallthemes") => { handle_show_all_themes( msg.channel_id, msg.guild_id.expect("Tried to show all themes in non-guild"), &msg.author, http ).await?; } Some("!showthemecount") => { handle_show_theme_count( msg.channel_id, msg.guild_id.expect("Tried to show theme idea count in non-guild"), &msg.author, http ).await?; } Some("!setroleassign") => { handle_set_reaction_message( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to set role assignment message in non-guild"), &msg.author, http, msg, ReactionMessageType::RoleAssign, ).await?; } Some(s) if s.chars().next() == Some('!') => { send_message(&http, msg.channel_id, msg.author.id, format!("Unrecognised command `{}`.", s) ).await?; send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to issue a command in non-guild"), ).await?; } // Not a command and probably not for us Some(_) => { // Check if we were mentioned if msg.mentions.contains_key(&current_user.id) { send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to mention us in non-guild"), ).await?; } } None => {} } Ok(()) } async fn send_help_message( http: HttpClient, channel_id: ChannelId, user_id: UserId, guild_id: GuildId, ) -> Result<()> { let standard_message = //"Send me a PM to submit theme ideas.\n\n\ "Get a role to signify one of your skill sets with the command `!role <role name>`\n\ and leave a role with `!leave <role name>`.\n\n\ You can also ask for text and voice channels for your game \ with the command `!createchannels <game name>`\n\ and rename them with `!renamechannels <new game name>`."; let organizer_message = format!( "Since you have the **{}** role, you also have access to the \ following commands:\n\ - `!generatetheme` to generate a theme.\n\ - `!showallthemes` to view all the theme ideas that have been submitted.\n\ - `!showthemecount` to see the number of theme ideas that have been submitted.\n\ - `!removechannels <mention of user>` to remove a user's created channel.\n\ - `!clearassociations` to clear all user–channel associations.\n\ - `!setroleassign <mention of channel with the message> <message ID>` to \ set the server's role assignment message.", ORGANIZER ); let help_message = if has_role(&http, guild_id, user_id, ORGANIZER).await? { format!("{}\n\n{}", standard_message, organizer_message) } else { standard_message.to_string() }; send_message(&http, channel_id, user_id, help_message).await?; Ok(()) }
{ match http.channel(channel_id).await?.unwrap() { Channel::Private(_) => Ok(true), _ => Ok(false) } }
identifier_body
main.rs
use std::env; use tokio::stream::StreamExt; use twilight::{ cache::{ twilight_cache_inmemory::config::{EventType, InMemoryConfigBuilder}, InMemoryCache, }, gateway::cluster::{config::ShardScheme, Cluster, ClusterConfig}, gateway::shard::Event, http::Client as HttpClient, model::{ channel::{Channel, Message}, gateway::GatewayIntents, id::{ChannelId, GuildId, UserId}, user::CurrentUser, }, }; mod channel; mod reaction; mod role; mod roles; mod state; mod theme; mod utils; use channel::{handle_create_channels, handle_remove_channels, handle_clear_channel_associations, handle_rename_channels}; use reaction::{handle_reaction_add, handle_reaction_remove, handle_set_reaction_message, ReactionMessageType}; use role::{handle_give_role, handle_remove_role, has_role}; use roles::ORGANIZER; use theme::{handle_add_theme, handle_generate_theme, handle_show_all_themes, handle_show_theme_count}; use utils::{Result, send_message}; #[tokio::main] async fn main() -> Result<()> { dotenv::dotenv().ok(); let token = env::var("DISCORD_TOKEN")?; // This is also the default. let scheme = ShardScheme::Auto; let config = ClusterConfig::builder(&token) .shard_scheme(scheme) // Use intents to only listen to GUILD_MESSAGES events .intents(Some( GatewayIntents::GUILD_MESSAGES | GatewayIntents::DIRECT_MESSAGES | GatewayIntents::GUILD_MESSAGE_REACTIONS, )) .build(); // Start up the cluster let cluster = Cluster::new(config); cluster.up().await?; // The http client is seperate from the gateway, // so startup a new one let http = HttpClient::new(&token); // Since we only care about messages and reactions, make // the cache only cache message and reaction related events let cache_config = InMemoryConfigBuilder::new() .event_types( EventType::MESSAGE_CREATE | EventType::MESSAGE_DELETE | EventType::MESSAGE_DELETE_BULK | EventType::MESSAGE_UPDATE | EventType::REACTION_ADD | EventType::REACTION_REMOVE, ) .build(); let cache = InMemoryCache::from(cache_config); let mut events = cluster.events().await; let current_user = http.current_user().await?; // Startup an event loop for each event in the event stream while let Some(event) = events.next().await { // Update the cache cache.update(&event.1).await.expect("Cache failed, OhNoe!"); // Spawn a new task to handle the event handle_event(event, http.clone(), &current_user).await?; } Ok(()) } /// Checks if the specified channel is a private message channel async fn is_pm(http: &HttpClient, channel_id: ChannelId) -> Result<bool> { match http.channel(channel_id).await?.unwrap() { Channel::Private(_) => Ok(true), _ => Ok(false) } } async fn handle_event( event: (u64, Event), http: HttpClient, current_user: &CurrentUser ) -> Result<()> { match event { (_, Event::MessageCreate(msg)) => { // Don't send replies to yourself if msg.author.id != current_user.id { if is_pm(&http, msg.channel_id).await? { handle_pm(&msg, &http).await?; } else { handle_potential_command(&msg, http, current_user) .await?; } } } (_, Event::ReactionAdd(reaction)) => { if !is_pm(&http, reaction.channel_id).await? { handle_reaction_add(&reaction, http, &current_user).await?; } } (_, Event::ReactionRemove(reaction)) => { if !is_pm(&http, reaction.channel_id).await? { handle_reaction_remove(&reaction, http).await?; } } (id, Event::ShardConnected(_)) => { println!("Connected on shard {}", id); } _ => {} } Ok(()) } async fn handle_pm( msg: &Message, http: &HttpClient, ) -> Result<()> { handle_add_theme(http, msg).await?; Ok(()) } async fn handle_potential_command( msg: &Message, http: HttpClient, current_user: &CurrentUser ) -> Result<()> { let mut words = msg.content.split_ascii_whitespace(); match words.next() { Some("!help") => { send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to call for help in non-guild"), ).await?; } Some("!createchannels") => { handle_create_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to create channels in non-guild"), msg.author.id, current_user.id, http ).await?; }, Some("!renamechannels") => { handle_rename_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.author.id, current_user.id, http ).await?; }, Some("!removechannels") => { handle_remove_channels( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to remove channels in non-guild"), msg.author.id, http ).await?; }, Some("!clearassociations") => { handle_clear_channel_associations( msg.channel_id, msg.guild_id.expect("Tried to clear channel associations in non-guild"), msg.author.id, http, ).await?; } Some("!role") => { handle_give_role( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to get role in non-guild"), &msg.author, http ).await?; }, Some("!leave") => { handle_remove_role( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to leave role in non-guild"), &msg.author, http ).await?; }, Some("!generatetheme") => { handle_generate_theme( msg.channel_id, msg.guild_id.expect("Tried to generate theme in non-guild"), &msg.author, http ).await?; } Some("!showallthemes") => { handle_show_all_themes( msg.channel_id, msg.guild_id.expect("Tried to show all themes in non-guild"), &msg.author, http ).await?; } Some("!showthemecount") => { handle_show_theme_count( msg.channel_id, msg.guild_id.expect("Tried to show theme idea count in non-guild"), &msg.author, http ).await?; } Some("!setroleassign") => { handle_set_reaction_message( &words.collect::<Vec<_>>(), msg.channel_id, msg.guild_id.expect("Tried to set role assignment message in non-guild"), &msg.author, http, msg, ReactionMessageType::RoleAssign, ).await?; } Some(s) if s.chars().next() == Some('!') => { send_message(&http, msg.channel_id, msg.author.id, format!("Unrecognised command `{}`.", s) ).await?; send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to issue a command in non-guild"), ).await?; } // Not a command and probably not for us Some(_) => { // Check if we were mentioned if msg.mentions.contains_key(&current_user.id) { send_help_message( http, msg.channel_id, msg.author.id, msg.guild_id.expect("Tried to mention us in non-guild"), ).await?; } } None => {} } Ok(()) } async fn send_help_message( http: HttpClient, channel_id: ChannelId, user_id: UserId, guild_id: GuildId, ) -> Result<()> { let standard_message = //"Send me a PM to submit theme ideas.\n\n\ "Get a role to signify one of your skill sets with the command `!role <role name>`\n\ and leave a role with `!leave <role name>`.\n\n\ You can also ask for text and voice channels for your game \ with the command `!createchannels <game name>`\n\ and rename them with `!renamechannels <new game name>`."; let organizer_message = format!( "Since you have the **{}** role, you also have access to the \ following commands:\n\ - `!generatetheme` to generate a theme.\n\ - `!showallthemes` to view all the theme ideas that have been submitted.\n\ - `!showthemecount` to see the number of theme ideas that have been submitted.\n\ - `!removechannels <mention of user>` to remove a user's created channel.\n\ - `!clearassociations` to clear all user–channel associations.\n\ - `!setroleassign <mention of channel with the message> <message ID>` to \ set the server's role assignment message.", ORGANIZER ); let help_message =
}; send_message(&http, channel_id, user_id, help_message).await?; Ok(()) }
if has_role(&http, guild_id, user_id, ORGANIZER).await? { format!("{}\n\n{}", standard_message, organizer_message) } else { standard_message.to_string()
random_line_split
instance.py
""" Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import annotations import asyncio from enum import Enum import logging import ssl from tempfile import TemporaryDirectory from typing import ( Any, Dict, Optional, Tuple, TYPE_CHECKING, ) import aiohttp from cryptography.hazmat.backends import default_backend from cryptography.x509 import load_pem_x509_certificate from google.auth.credentials import Credentials from google.cloud.sql.connector.exceptions import ( AutoIAMAuthNotSupported, CloudSQLIPTypeError, CredentialsTypeError, TLSVersionError, ) from google.cloud.sql.connector.rate_limiter import AsyncRateLimiter from google.cloud.sql.connector.refresh_utils import ( _get_ephemeral, _get_metadata, _is_valid, _seconds_until_refresh, ) from google.cloud.sql.connector.utils import _auth_init, write_to_file from google.cloud.sql.connector.version import __version__ as version if TYPE_CHECKING: import datetime logger = logging.getLogger(name=__name__) APPLICATION_NAME = "cloud-sql-python-connector" class IPTypes(Enum): PUBLIC: str = "PRIMARY" PRIVATE: str = "PRIVATE" PSC: str = "PSC" class InstanceMetadata: ip_addrs: Dict[str, Any] context: ssl.SSLContext database_version: str expiration: datetime.datetime def __init__( self, ephemeral_cert: str, database_version: str, ip_addrs: Dict[str, Any], private_key: bytes, server_ca_cert: str, expiration: datetime.datetime, enable_iam_auth: bool, ) -> None: self.ip_addrs = ip_addrs self.database_version = database_version self.context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) # update ssl.PROTOCOL_TLS_CLIENT default self.context.check_hostname = False # verify OpenSSL version supports TLSv1.3 if ssl.HAS_TLSv1_3: # force TLSv1.3 if supported by client self.context.minimum_version = ssl.TLSVersion.TLSv1_3 # fallback to TLSv1.2 for older versions of OpenSSL else: if enable_iam_auth: raise TLSVersionError( f"Your current version of OpenSSL ({ssl.OPENSSL_VERSION}) does not " "support TLSv1.3, which is required to use IAM Authentication.\n" "Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support." ) logger.warning( "TLSv1.3 is not supported with your version of OpenSSL " f"({ssl.OPENSSL_VERSION}), falling back to TLSv1.2\n" "Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support." ) self.context.minimum_version = ssl.TLSVersion.TLSv1_2 self.expiration = expiration # tmpdir and its contents are automatically deleted after the CA cert # and ephemeral cert are loaded into the SSLcontext. The values # need to be written to files in order to be loaded by the SSLContext with TemporaryDirectory() as tmpdir: ca_filename, cert_filename, key_filename = write_to_file( tmpdir, server_ca_cert, ephemeral_cert, private_key ) self.context.load_cert_chain(cert_filename, keyfile=key_filename) self.context.load_verify_locations(cafile=ca_filename) def get_preferred_ip(self, ip_type: IPTypes) -> str: """Returns the first IP address for the instance, according to the preference supplied by ip_type. If no IP addressess with the given preference are found, an error is raised.""" if ip_type.value in self.ip_addrs: return self.ip_addrs[ip_type.value] raise CloudSQLIPTypeError( "Cloud SQL instance does not have any IP addresses matching " f"preference: {ip_type.value})" ) class Instance: """A class to manage the details of the connection to a Cloud SQL instance, including refreshing the credentials. :param instance_connection_string: The Google Cloud SQL Instance's connection string. :type instance_connection_string: str :param user_agent_string: The user agent string to append to SQLAdmin API requests :type user_agent_string: str :type credentials: google.auth.credentials.Credentials :param credentials Credentials object used to authenticate connections to Cloud SQL server. If not specified, Application Default Credentials are used. :param enable_iam_auth Enables automatic IAM database authentication for Postgres or MySQL instances. :type enable_iam_auth: bool :param loop: A new event loop for the refresh function to run in. :type loop: asyncio.AbstractEventLoop :type quota_project: str :param quota_project The Project ID for an existing Google Cloud project. The project specified is used for quota and billing purposes. If not specified, defaults to project sourced from environment. :type sqladmin_api_endpoint: str :param sqladmin_api_endpoint: Base URL to use when calling the Cloud SQL Admin API endpoint. Defaults to "https://sqladmin.googleapis.com", this argument should only be used in development. """ # asyncio.AbstractEventLoop is used because the default loop, # SelectorEventLoop, is usable on both Unix and Windows but has limited # functionality on Windows. It is recommended to use ProactorEventLoop # while developing on Windows. # Link to Github issue: # https://github.com/GoogleCloudPlatform/cloud-sql-python-connector/issues/22 _loop: asyncio.AbstractEventLoop _enable_iam_auth: bool __client_session: Optional[aiohttp.ClientSession] = None @property def _client_session(self) -> aiohttp.ClientSession: if self.__client_session is None: headers = { "x-goog-api-client": self._user_agent_string, "User-Agent": self._user_agent_string, "Content-Type": "application/json", } if self._quota_project: headers["x-goog-user-project"] = self._quota_project self.__client_session = aiohttp.ClientSession(headers=headers) return self.__client_session _credentials: Optional[Credentials] = None _keys: asyncio.Future _instance_connection_string: str _user_agent_string: str _sqladmin_api_endpoint: str _instance: str _project: str _region: str _refresh_rate_limiter: AsyncRateLimiter _refresh_in_progress: asyncio.locks.Event _current: asyncio.Task # task wraps coroutine that returns InstanceMetadata _next: asyncio.Task # task wraps coroutine that returns another task def __init__( self, instance_connection_string: str, driver_name: str, keys: asyncio.Future, loop: asyncio.AbstractEventLoop, credentials: Optional[Credentials] = None, enable_iam_auth: bool = False, quota_project: str = None, sqladmin_api_endpoint: str = "https://sqladmin.googleapis.com", ) -> None: # Validate connection string connection_string_split = instance_connection_string.split(":") if len(connection_string_split) == 3: self._instance_connection_string = instance_connection_string self._project = connection_string_split[0] self._region = connection_string_split[1] self._instance = connection_string_split[2] else: raise ValueError( "Arg `instance_connection_string` must have " "format: PROJECT:REGION:INSTANCE, " f"got {instance_connection_string}." ) self._enable_iam_auth = enable_iam_auth self._user_agent_string = f"{APPLICATION_NAME}/{version}+{driver_name}" self._quota_project = quota_project self._sqladmin_api_endpoint = sqladmin_api_endpoint self._loop = loop self._keys = keys # validate credentials type if not isinstance(credentials, Credentials) and credentials is not None: raise CredentialsTypeError( "Arg credentials must be type 'google.auth.credentials.Credentials' " "or None (to use Application Default Credentials)" ) self._credentials = _auth_init(credentials) self._refresh_rate_limiter = AsyncRateLimiter( max_capacity=2, rate=1 / 30, loop=self._loop ) self._refresh_in_progress = asyncio.locks.Event() self._current = self._schedule_refresh(0) self._next = self._current async def
(self) -> None: """ Forces a new refresh attempt immediately to be used for future connection attempts. """ # if next refresh is not already in progress, cancel it and schedule new one immediately if not self._refresh_in_progress.is_set(): self._next.cancel() self._next = self._schedule_refresh(0) # block all sequential connection attempts on the next refresh result if current is invalid if not await _is_valid(self._current): self._current = self._next async def _perform_refresh(self) -> InstanceMetadata: """Retrieves instance metadata and ephemeral certificate from the Cloud SQL Instance. :rtype: InstanceMetadata :returns: A dataclass containing a string representing the ephemeral certificate, a dict containing the instances IP adresses, a string representing a PEM-encoded private key and a string representing a PEM-encoded certificate authority. """ self._refresh_in_progress.set() logger.debug( f"['{self._instance_connection_string}']: Entered _perform_refresh" ) try: await self._refresh_rate_limiter.acquire() priv_key, pub_key = await self._keys logger.debug(f"['{self._instance_connection_string}']: Creating context") metadata_task = self._loop.create_task( _get_metadata( self._client_session, self._sqladmin_api_endpoint, self._credentials, self._project, self._region, self._instance, ) ) ephemeral_task = self._loop.create_task( _get_ephemeral( self._client_session, self._sqladmin_api_endpoint, self._credentials, self._project, self._instance, pub_key, self._enable_iam_auth, ) ) try: metadata = await metadata_task # check if automatic IAM database authn is supported for database engine if self._enable_iam_auth and not metadata[ "database_version" ].startswith(("POSTGRES", "MYSQL")): raise AutoIAMAuthNotSupported( f"'{metadata['database_version']}' does not support automatic IAM authentication. It is only supported with Cloud SQL Postgres or MySQL instances." ) except Exception: # cancel ephemeral cert task if exception occurs before it is awaited ephemeral_task.cancel() raise ephemeral_cert = await ephemeral_task x509 = load_pem_x509_certificate( ephemeral_cert.encode("UTF-8"), default_backend() ) expiration = x509.not_valid_after if self._enable_iam_auth: if self._credentials is not None: token_expiration: datetime.datetime = self._credentials.expiry if expiration > token_expiration: expiration = token_expiration except aiohttp.ClientResponseError as e: logger.debug( f"['{self._instance_connection_string}']: Error occurred during _perform_refresh." ) if e.status == 403: e.message = "Forbidden: Authenticated IAM principal does not seeem authorized to make API request. Verify 'Cloud SQL Admin API' is enabled within your GCP project and 'Cloud SQL Client' role has been granted to IAM principal." raise except Exception: logger.debug( f"['{self._instance_connection_string}']: Error occurred during _perform_refresh." ) raise finally: self._refresh_in_progress.clear() return InstanceMetadata( ephemeral_cert, metadata["database_version"], metadata["ip_addresses"], priv_key, metadata["server_ca_cert"], expiration, self._enable_iam_auth, ) def _schedule_refresh(self, delay: int) -> asyncio.Task: """ Schedule task to sleep and then perform refresh to get InstanceMetadata. :type delay: int :param delay Time in seconds to sleep before running _perform_refresh. :rtype: asyncio.Task :returns: A Task representing the scheduled _perform_refresh. """ async def _refresh_task(self: Instance, delay: int) -> InstanceMetadata: """ A coroutine that sleeps for the specified amount of time before running _perform_refresh. """ refresh_task: asyncio.Task try: logger.debug(f"['{self._instance_connection_string}']: Entering sleep") if delay > 0: await asyncio.sleep(delay) refresh_task = self._loop.create_task(self._perform_refresh()) refresh_data = await refresh_task except asyncio.CancelledError: logger.debug( f"['{self._instance_connection_string}']: Schedule refresh task cancelled." ) raise # bad refresh attempt except Exception as e: logger.exception( f"['{self._instance_connection_string}']: " "An error occurred while performing refresh. " "Scheduling another refresh attempt immediately", exc_info=e, ) # check if current metadata is invalid (expired), # don't want to replace valid metadata with invalid refresh if not await _is_valid(self._current): self._current = refresh_task # schedule new refresh attempt immediately self._next = self._schedule_refresh(0) raise # if valid refresh, replace current with valid metadata and schedule next refresh self._current = refresh_task # calculate refresh delay based on certificate expiration delay = _seconds_until_refresh(refresh_data.expiration) self._next = self._schedule_refresh(delay) return refresh_data # schedule refresh task and return it scheduled_task = self._loop.create_task(_refresh_task(self, delay)) return scheduled_task async def connect_info( self, ip_type: IPTypes, ) -> Tuple[InstanceMetadata, str]: """Retrieve instance metadata and ip address required for making connection to Cloud SQL instance. :type ip_type: IPTypes :param ip_type: Enum specifying whether to look for public or private IP address. :rtype instance_data: InstanceMetadata :returns: Instance metadata for Cloud SQL instance. :rtype ip_address: str :returns: A string representing the IP address of the given Cloud SQL instance. """ logger.debug( f"['{self._instance_connection_string}']: Entered connect_info method" ) instance_data: InstanceMetadata instance_data = await self._current ip_address: str = instance_data.get_preferred_ip(ip_type) return instance_data, ip_address async def close(self) -> None: """Cleanup function to make sure ClientSession is closed and tasks have finished to have a graceful exit. """ logger.debug( f"['{self._instance_connection_string}']: Waiting for _current to be cancelled" ) self._current.cancel() logger.debug( f"['{self._instance_connection_string}']: Waiting for _next to be cancelled" ) self._next.cancel() logger.debug( f"['{self._instance_connection_string}']: Waiting for _client_session to close" ) await self._client_session.close()
force_refresh
identifier_name
instance.py
""" Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import annotations import asyncio from enum import Enum import logging import ssl from tempfile import TemporaryDirectory from typing import ( Any, Dict, Optional, Tuple, TYPE_CHECKING, ) import aiohttp from cryptography.hazmat.backends import default_backend from cryptography.x509 import load_pem_x509_certificate from google.auth.credentials import Credentials from google.cloud.sql.connector.exceptions import ( AutoIAMAuthNotSupported, CloudSQLIPTypeError, CredentialsTypeError, TLSVersionError, ) from google.cloud.sql.connector.rate_limiter import AsyncRateLimiter from google.cloud.sql.connector.refresh_utils import ( _get_ephemeral, _get_metadata, _is_valid, _seconds_until_refresh, ) from google.cloud.sql.connector.utils import _auth_init, write_to_file from google.cloud.sql.connector.version import __version__ as version if TYPE_CHECKING: import datetime logger = logging.getLogger(name=__name__) APPLICATION_NAME = "cloud-sql-python-connector" class IPTypes(Enum): PUBLIC: str = "PRIMARY" PRIVATE: str = "PRIVATE" PSC: str = "PSC" class InstanceMetadata: ip_addrs: Dict[str, Any] context: ssl.SSLContext database_version: str expiration: datetime.datetime def __init__( self, ephemeral_cert: str, database_version: str, ip_addrs: Dict[str, Any], private_key: bytes, server_ca_cert: str, expiration: datetime.datetime, enable_iam_auth: bool, ) -> None: self.ip_addrs = ip_addrs self.database_version = database_version self.context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) # update ssl.PROTOCOL_TLS_CLIENT default self.context.check_hostname = False # verify OpenSSL version supports TLSv1.3 if ssl.HAS_TLSv1_3: # force TLSv1.3 if supported by client self.context.minimum_version = ssl.TLSVersion.TLSv1_3 # fallback to TLSv1.2 for older versions of OpenSSL else: if enable_iam_auth: raise TLSVersionError( f"Your current version of OpenSSL ({ssl.OPENSSL_VERSION}) does not " "support TLSv1.3, which is required to use IAM Authentication.\n" "Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support." ) logger.warning( "TLSv1.3 is not supported with your version of OpenSSL " f"({ssl.OPENSSL_VERSION}), falling back to TLSv1.2\n" "Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support." ) self.context.minimum_version = ssl.TLSVersion.TLSv1_2 self.expiration = expiration # tmpdir and its contents are automatically deleted after the CA cert # and ephemeral cert are loaded into the SSLcontext. The values # need to be written to files in order to be loaded by the SSLContext with TemporaryDirectory() as tmpdir: ca_filename, cert_filename, key_filename = write_to_file( tmpdir, server_ca_cert, ephemeral_cert, private_key ) self.context.load_cert_chain(cert_filename, keyfile=key_filename) self.context.load_verify_locations(cafile=ca_filename) def get_preferred_ip(self, ip_type: IPTypes) -> str: """Returns the first IP address for the instance, according to the preference supplied by ip_type. If no IP addressess with the given preference are found, an error is raised.""" if ip_type.value in self.ip_addrs: return self.ip_addrs[ip_type.value] raise CloudSQLIPTypeError( "Cloud SQL instance does not have any IP addresses matching " f"preference: {ip_type.value})" ) class Instance: """A class to manage the details of the connection to a Cloud SQL instance, including refreshing the credentials. :param instance_connection_string: The Google Cloud SQL Instance's connection string. :type instance_connection_string: str :param user_agent_string: The user agent string to append to SQLAdmin API requests :type user_agent_string: str :type credentials: google.auth.credentials.Credentials :param credentials Credentials object used to authenticate connections to Cloud SQL server. If not specified, Application Default Credentials are used. :param enable_iam_auth Enables automatic IAM database authentication for Postgres or MySQL instances. :type enable_iam_auth: bool :param loop: A new event loop for the refresh function to run in. :type loop: asyncio.AbstractEventLoop :type quota_project: str :param quota_project The Project ID for an existing Google Cloud project. The project specified is used for quota and billing purposes. If not specified, defaults to project sourced from environment. :type sqladmin_api_endpoint: str :param sqladmin_api_endpoint: Base URL to use when calling the Cloud SQL Admin API endpoint. Defaults to "https://sqladmin.googleapis.com", this argument should only be used in development. """ # asyncio.AbstractEventLoop is used because the default loop, # SelectorEventLoop, is usable on both Unix and Windows but has limited # functionality on Windows. It is recommended to use ProactorEventLoop # while developing on Windows. # Link to Github issue: # https://github.com/GoogleCloudPlatform/cloud-sql-python-connector/issues/22 _loop: asyncio.AbstractEventLoop _enable_iam_auth: bool __client_session: Optional[aiohttp.ClientSession] = None @property def _client_session(self) -> aiohttp.ClientSession: if self.__client_session is None: headers = { "x-goog-api-client": self._user_agent_string, "User-Agent": self._user_agent_string, "Content-Type": "application/json", } if self._quota_project: headers["x-goog-user-project"] = self._quota_project self.__client_session = aiohttp.ClientSession(headers=headers) return self.__client_session _credentials: Optional[Credentials] = None _keys: asyncio.Future _instance_connection_string: str _user_agent_string: str _sqladmin_api_endpoint: str _instance: str _project: str _region: str _refresh_rate_limiter: AsyncRateLimiter _refresh_in_progress: asyncio.locks.Event _current: asyncio.Task # task wraps coroutine that returns InstanceMetadata _next: asyncio.Task # task wraps coroutine that returns another task def __init__( self, instance_connection_string: str, driver_name: str, keys: asyncio.Future, loop: asyncio.AbstractEventLoop, credentials: Optional[Credentials] = None, enable_iam_auth: bool = False, quota_project: str = None, sqladmin_api_endpoint: str = "https://sqladmin.googleapis.com", ) -> None: # Validate connection string connection_string_split = instance_connection_string.split(":") if len(connection_string_split) == 3: self._instance_connection_string = instance_connection_string self._project = connection_string_split[0] self._region = connection_string_split[1] self._instance = connection_string_split[2] else: raise ValueError( "Arg `instance_connection_string` must have " "format: PROJECT:REGION:INSTANCE, " f"got {instance_connection_string}." ) self._enable_iam_auth = enable_iam_auth self._user_agent_string = f"{APPLICATION_NAME}/{version}+{driver_name}" self._quota_project = quota_project self._sqladmin_api_endpoint = sqladmin_api_endpoint self._loop = loop self._keys = keys # validate credentials type if not isinstance(credentials, Credentials) and credentials is not None: raise CredentialsTypeError( "Arg credentials must be type 'google.auth.credentials.Credentials' " "or None (to use Application Default Credentials)" ) self._credentials = _auth_init(credentials) self._refresh_rate_limiter = AsyncRateLimiter( max_capacity=2, rate=1 / 30, loop=self._loop ) self._refresh_in_progress = asyncio.locks.Event() self._current = self._schedule_refresh(0) self._next = self._current async def force_refresh(self) -> None: """ Forces a new refresh attempt immediately to be used for future connection attempts. """ # if next refresh is not already in progress, cancel it and schedule new one immediately if not self._refresh_in_progress.is_set(): self._next.cancel() self._next = self._schedule_refresh(0) # block all sequential connection attempts on the next refresh result if current is invalid if not await _is_valid(self._current): self._current = self._next async def _perform_refresh(self) -> InstanceMetadata: """Retrieves instance metadata and ephemeral certificate from the Cloud SQL Instance. :rtype: InstanceMetadata :returns: A dataclass containing a string representing the ephemeral certificate, a dict containing the instances IP adresses, a string representing a PEM-encoded private key and a string representing a PEM-encoded certificate authority. """ self._refresh_in_progress.set() logger.debug( f"['{self._instance_connection_string}']: Entered _perform_refresh" ) try: await self._refresh_rate_limiter.acquire() priv_key, pub_key = await self._keys logger.debug(f"['{self._instance_connection_string}']: Creating context") metadata_task = self._loop.create_task( _get_metadata( self._client_session, self._sqladmin_api_endpoint, self._credentials, self._project, self._region, self._instance, ) ) ephemeral_task = self._loop.create_task( _get_ephemeral( self._client_session, self._sqladmin_api_endpoint, self._credentials, self._project, self._instance, pub_key, self._enable_iam_auth, ) ) try: metadata = await metadata_task # check if automatic IAM database authn is supported for database engine if self._enable_iam_auth and not metadata[ "database_version" ].startswith(("POSTGRES", "MYSQL")): raise AutoIAMAuthNotSupported( f"'{metadata['database_version']}' does not support automatic IAM authentication. It is only supported with Cloud SQL Postgres or MySQL instances." ) except Exception: # cancel ephemeral cert task if exception occurs before it is awaited ephemeral_task.cancel() raise ephemeral_cert = await ephemeral_task x509 = load_pem_x509_certificate( ephemeral_cert.encode("UTF-8"), default_backend() ) expiration = x509.not_valid_after if self._enable_iam_auth: if self._credentials is not None: token_expiration: datetime.datetime = self._credentials.expiry if expiration > token_expiration: expiration = token_expiration except aiohttp.ClientResponseError as e: logger.debug( f"['{self._instance_connection_string}']: Error occurred during _perform_refresh." ) if e.status == 403: e.message = "Forbidden: Authenticated IAM principal does not seeem authorized to make API request. Verify 'Cloud SQL Admin API' is enabled within your GCP project and 'Cloud SQL Client' role has been granted to IAM principal." raise except Exception: logger.debug( f"['{self._instance_connection_string}']: Error occurred during _perform_refresh." ) raise finally: self._refresh_in_progress.clear() return InstanceMetadata( ephemeral_cert, metadata["database_version"], metadata["ip_addresses"], priv_key, metadata["server_ca_cert"], expiration, self._enable_iam_auth, ) def _schedule_refresh(self, delay: int) -> asyncio.Task: """ Schedule task to sleep and then perform refresh to get InstanceMetadata. :type delay: int :param delay Time in seconds to sleep before running _perform_refresh. :rtype: asyncio.Task :returns: A Task representing the scheduled _perform_refresh. """ async def _refresh_task(self: Instance, delay: int) -> InstanceMetadata: """ A coroutine that sleeps for the specified amount of time before running _perform_refresh. """ refresh_task: asyncio.Task try: logger.debug(f"['{self._instance_connection_string}']: Entering sleep") if delay > 0:
refresh_task = self._loop.create_task(self._perform_refresh()) refresh_data = await refresh_task except asyncio.CancelledError: logger.debug( f"['{self._instance_connection_string}']: Schedule refresh task cancelled." ) raise # bad refresh attempt except Exception as e: logger.exception( f"['{self._instance_connection_string}']: " "An error occurred while performing refresh. " "Scheduling another refresh attempt immediately", exc_info=e, ) # check if current metadata is invalid (expired), # don't want to replace valid metadata with invalid refresh if not await _is_valid(self._current): self._current = refresh_task # schedule new refresh attempt immediately self._next = self._schedule_refresh(0) raise # if valid refresh, replace current with valid metadata and schedule next refresh self._current = refresh_task # calculate refresh delay based on certificate expiration delay = _seconds_until_refresh(refresh_data.expiration) self._next = self._schedule_refresh(delay) return refresh_data # schedule refresh task and return it scheduled_task = self._loop.create_task(_refresh_task(self, delay)) return scheduled_task async def connect_info( self, ip_type: IPTypes, ) -> Tuple[InstanceMetadata, str]: """Retrieve instance metadata and ip address required for making connection to Cloud SQL instance. :type ip_type: IPTypes :param ip_type: Enum specifying whether to look for public or private IP address. :rtype instance_data: InstanceMetadata :returns: Instance metadata for Cloud SQL instance. :rtype ip_address: str :returns: A string representing the IP address of the given Cloud SQL instance. """ logger.debug( f"['{self._instance_connection_string}']: Entered connect_info method" ) instance_data: InstanceMetadata instance_data = await self._current ip_address: str = instance_data.get_preferred_ip(ip_type) return instance_data, ip_address async def close(self) -> None: """Cleanup function to make sure ClientSession is closed and tasks have finished to have a graceful exit. """ logger.debug( f"['{self._instance_connection_string}']: Waiting for _current to be cancelled" ) self._current.cancel() logger.debug( f"['{self._instance_connection_string}']: Waiting for _next to be cancelled" ) self._next.cancel() logger.debug( f"['{self._instance_connection_string}']: Waiting for _client_session to close" ) await self._client_session.close()
await asyncio.sleep(delay)
conditional_block
instance.py
""" Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import annotations import asyncio from enum import Enum import logging import ssl from tempfile import TemporaryDirectory from typing import ( Any, Dict, Optional, Tuple, TYPE_CHECKING, ) import aiohttp from cryptography.hazmat.backends import default_backend from cryptography.x509 import load_pem_x509_certificate from google.auth.credentials import Credentials from google.cloud.sql.connector.exceptions import ( AutoIAMAuthNotSupported, CloudSQLIPTypeError, CredentialsTypeError, TLSVersionError, ) from google.cloud.sql.connector.rate_limiter import AsyncRateLimiter from google.cloud.sql.connector.refresh_utils import ( _get_ephemeral, _get_metadata, _is_valid, _seconds_until_refresh, ) from google.cloud.sql.connector.utils import _auth_init, write_to_file from google.cloud.sql.connector.version import __version__ as version if TYPE_CHECKING: import datetime logger = logging.getLogger(name=__name__) APPLICATION_NAME = "cloud-sql-python-connector" class IPTypes(Enum): PUBLIC: str = "PRIMARY" PRIVATE: str = "PRIVATE" PSC: str = "PSC" class InstanceMetadata: ip_addrs: Dict[str, Any] context: ssl.SSLContext database_version: str expiration: datetime.datetime def __init__( self, ephemeral_cert: str, database_version: str, ip_addrs: Dict[str, Any], private_key: bytes, server_ca_cert: str, expiration: datetime.datetime, enable_iam_auth: bool,
# update ssl.PROTOCOL_TLS_CLIENT default self.context.check_hostname = False # verify OpenSSL version supports TLSv1.3 if ssl.HAS_TLSv1_3: # force TLSv1.3 if supported by client self.context.minimum_version = ssl.TLSVersion.TLSv1_3 # fallback to TLSv1.2 for older versions of OpenSSL else: if enable_iam_auth: raise TLSVersionError( f"Your current version of OpenSSL ({ssl.OPENSSL_VERSION}) does not " "support TLSv1.3, which is required to use IAM Authentication.\n" "Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support." ) logger.warning( "TLSv1.3 is not supported with your version of OpenSSL " f"({ssl.OPENSSL_VERSION}), falling back to TLSv1.2\n" "Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support." ) self.context.minimum_version = ssl.TLSVersion.TLSv1_2 self.expiration = expiration # tmpdir and its contents are automatically deleted after the CA cert # and ephemeral cert are loaded into the SSLcontext. The values # need to be written to files in order to be loaded by the SSLContext with TemporaryDirectory() as tmpdir: ca_filename, cert_filename, key_filename = write_to_file( tmpdir, server_ca_cert, ephemeral_cert, private_key ) self.context.load_cert_chain(cert_filename, keyfile=key_filename) self.context.load_verify_locations(cafile=ca_filename) def get_preferred_ip(self, ip_type: IPTypes) -> str: """Returns the first IP address for the instance, according to the preference supplied by ip_type. If no IP addressess with the given preference are found, an error is raised.""" if ip_type.value in self.ip_addrs: return self.ip_addrs[ip_type.value] raise CloudSQLIPTypeError( "Cloud SQL instance does not have any IP addresses matching " f"preference: {ip_type.value})" ) class Instance: """A class to manage the details of the connection to a Cloud SQL instance, including refreshing the credentials. :param instance_connection_string: The Google Cloud SQL Instance's connection string. :type instance_connection_string: str :param user_agent_string: The user agent string to append to SQLAdmin API requests :type user_agent_string: str :type credentials: google.auth.credentials.Credentials :param credentials Credentials object used to authenticate connections to Cloud SQL server. If not specified, Application Default Credentials are used. :param enable_iam_auth Enables automatic IAM database authentication for Postgres or MySQL instances. :type enable_iam_auth: bool :param loop: A new event loop for the refresh function to run in. :type loop: asyncio.AbstractEventLoop :type quota_project: str :param quota_project The Project ID for an existing Google Cloud project. The project specified is used for quota and billing purposes. If not specified, defaults to project sourced from environment. :type sqladmin_api_endpoint: str :param sqladmin_api_endpoint: Base URL to use when calling the Cloud SQL Admin API endpoint. Defaults to "https://sqladmin.googleapis.com", this argument should only be used in development. """ # asyncio.AbstractEventLoop is used because the default loop, # SelectorEventLoop, is usable on both Unix and Windows but has limited # functionality on Windows. It is recommended to use ProactorEventLoop # while developing on Windows. # Link to Github issue: # https://github.com/GoogleCloudPlatform/cloud-sql-python-connector/issues/22 _loop: asyncio.AbstractEventLoop _enable_iam_auth: bool __client_session: Optional[aiohttp.ClientSession] = None @property def _client_session(self) -> aiohttp.ClientSession: if self.__client_session is None: headers = { "x-goog-api-client": self._user_agent_string, "User-Agent": self._user_agent_string, "Content-Type": "application/json", } if self._quota_project: headers["x-goog-user-project"] = self._quota_project self.__client_session = aiohttp.ClientSession(headers=headers) return self.__client_session _credentials: Optional[Credentials] = None _keys: asyncio.Future _instance_connection_string: str _user_agent_string: str _sqladmin_api_endpoint: str _instance: str _project: str _region: str _refresh_rate_limiter: AsyncRateLimiter _refresh_in_progress: asyncio.locks.Event _current: asyncio.Task # task wraps coroutine that returns InstanceMetadata _next: asyncio.Task # task wraps coroutine that returns another task def __init__( self, instance_connection_string: str, driver_name: str, keys: asyncio.Future, loop: asyncio.AbstractEventLoop, credentials: Optional[Credentials] = None, enable_iam_auth: bool = False, quota_project: str = None, sqladmin_api_endpoint: str = "https://sqladmin.googleapis.com", ) -> None: # Validate connection string connection_string_split = instance_connection_string.split(":") if len(connection_string_split) == 3: self._instance_connection_string = instance_connection_string self._project = connection_string_split[0] self._region = connection_string_split[1] self._instance = connection_string_split[2] else: raise ValueError( "Arg `instance_connection_string` must have " "format: PROJECT:REGION:INSTANCE, " f"got {instance_connection_string}." ) self._enable_iam_auth = enable_iam_auth self._user_agent_string = f"{APPLICATION_NAME}/{version}+{driver_name}" self._quota_project = quota_project self._sqladmin_api_endpoint = sqladmin_api_endpoint self._loop = loop self._keys = keys # validate credentials type if not isinstance(credentials, Credentials) and credentials is not None: raise CredentialsTypeError( "Arg credentials must be type 'google.auth.credentials.Credentials' " "or None (to use Application Default Credentials)" ) self._credentials = _auth_init(credentials) self._refresh_rate_limiter = AsyncRateLimiter( max_capacity=2, rate=1 / 30, loop=self._loop ) self._refresh_in_progress = asyncio.locks.Event() self._current = self._schedule_refresh(0) self._next = self._current async def force_refresh(self) -> None: """ Forces a new refresh attempt immediately to be used for future connection attempts. """ # if next refresh is not already in progress, cancel it and schedule new one immediately if not self._refresh_in_progress.is_set(): self._next.cancel() self._next = self._schedule_refresh(0) # block all sequential connection attempts on the next refresh result if current is invalid if not await _is_valid(self._current): self._current = self._next async def _perform_refresh(self) -> InstanceMetadata: """Retrieves instance metadata and ephemeral certificate from the Cloud SQL Instance. :rtype: InstanceMetadata :returns: A dataclass containing a string representing the ephemeral certificate, a dict containing the instances IP adresses, a string representing a PEM-encoded private key and a string representing a PEM-encoded certificate authority. """ self._refresh_in_progress.set() logger.debug( f"['{self._instance_connection_string}']: Entered _perform_refresh" ) try: await self._refresh_rate_limiter.acquire() priv_key, pub_key = await self._keys logger.debug(f"['{self._instance_connection_string}']: Creating context") metadata_task = self._loop.create_task( _get_metadata( self._client_session, self._sqladmin_api_endpoint, self._credentials, self._project, self._region, self._instance, ) ) ephemeral_task = self._loop.create_task( _get_ephemeral( self._client_session, self._sqladmin_api_endpoint, self._credentials, self._project, self._instance, pub_key, self._enable_iam_auth, ) ) try: metadata = await metadata_task # check if automatic IAM database authn is supported for database engine if self._enable_iam_auth and not metadata[ "database_version" ].startswith(("POSTGRES", "MYSQL")): raise AutoIAMAuthNotSupported( f"'{metadata['database_version']}' does not support automatic IAM authentication. It is only supported with Cloud SQL Postgres or MySQL instances." ) except Exception: # cancel ephemeral cert task if exception occurs before it is awaited ephemeral_task.cancel() raise ephemeral_cert = await ephemeral_task x509 = load_pem_x509_certificate( ephemeral_cert.encode("UTF-8"), default_backend() ) expiration = x509.not_valid_after if self._enable_iam_auth: if self._credentials is not None: token_expiration: datetime.datetime = self._credentials.expiry if expiration > token_expiration: expiration = token_expiration except aiohttp.ClientResponseError as e: logger.debug( f"['{self._instance_connection_string}']: Error occurred during _perform_refresh." ) if e.status == 403: e.message = "Forbidden: Authenticated IAM principal does not seeem authorized to make API request. Verify 'Cloud SQL Admin API' is enabled within your GCP project and 'Cloud SQL Client' role has been granted to IAM principal." raise except Exception: logger.debug( f"['{self._instance_connection_string}']: Error occurred during _perform_refresh." ) raise finally: self._refresh_in_progress.clear() return InstanceMetadata( ephemeral_cert, metadata["database_version"], metadata["ip_addresses"], priv_key, metadata["server_ca_cert"], expiration, self._enable_iam_auth, ) def _schedule_refresh(self, delay: int) -> asyncio.Task: """ Schedule task to sleep and then perform refresh to get InstanceMetadata. :type delay: int :param delay Time in seconds to sleep before running _perform_refresh. :rtype: asyncio.Task :returns: A Task representing the scheduled _perform_refresh. """ async def _refresh_task(self: Instance, delay: int) -> InstanceMetadata: """ A coroutine that sleeps for the specified amount of time before running _perform_refresh. """ refresh_task: asyncio.Task try: logger.debug(f"['{self._instance_connection_string}']: Entering sleep") if delay > 0: await asyncio.sleep(delay) refresh_task = self._loop.create_task(self._perform_refresh()) refresh_data = await refresh_task except asyncio.CancelledError: logger.debug( f"['{self._instance_connection_string}']: Schedule refresh task cancelled." ) raise # bad refresh attempt except Exception as e: logger.exception( f"['{self._instance_connection_string}']: " "An error occurred while performing refresh. " "Scheduling another refresh attempt immediately", exc_info=e, ) # check if current metadata is invalid (expired), # don't want to replace valid metadata with invalid refresh if not await _is_valid(self._current): self._current = refresh_task # schedule new refresh attempt immediately self._next = self._schedule_refresh(0) raise # if valid refresh, replace current with valid metadata and schedule next refresh self._current = refresh_task # calculate refresh delay based on certificate expiration delay = _seconds_until_refresh(refresh_data.expiration) self._next = self._schedule_refresh(delay) return refresh_data # schedule refresh task and return it scheduled_task = self._loop.create_task(_refresh_task(self, delay)) return scheduled_task async def connect_info( self, ip_type: IPTypes, ) -> Tuple[InstanceMetadata, str]: """Retrieve instance metadata and ip address required for making connection to Cloud SQL instance. :type ip_type: IPTypes :param ip_type: Enum specifying whether to look for public or private IP address. :rtype instance_data: InstanceMetadata :returns: Instance metadata for Cloud SQL instance. :rtype ip_address: str :returns: A string representing the IP address of the given Cloud SQL instance. """ logger.debug( f"['{self._instance_connection_string}']: Entered connect_info method" ) instance_data: InstanceMetadata instance_data = await self._current ip_address: str = instance_data.get_preferred_ip(ip_type) return instance_data, ip_address async def close(self) -> None: """Cleanup function to make sure ClientSession is closed and tasks have finished to have a graceful exit. """ logger.debug( f"['{self._instance_connection_string}']: Waiting for _current to be cancelled" ) self._current.cancel() logger.debug( f"['{self._instance_connection_string}']: Waiting for _next to be cancelled" ) self._next.cancel() logger.debug( f"['{self._instance_connection_string}']: Waiting for _client_session to close" ) await self._client_session.close()
) -> None: self.ip_addrs = ip_addrs self.database_version = database_version self.context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT)
random_line_split
instance.py
""" Copyright 2019 Google LLC Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from __future__ import annotations import asyncio from enum import Enum import logging import ssl from tempfile import TemporaryDirectory from typing import ( Any, Dict, Optional, Tuple, TYPE_CHECKING, ) import aiohttp from cryptography.hazmat.backends import default_backend from cryptography.x509 import load_pem_x509_certificate from google.auth.credentials import Credentials from google.cloud.sql.connector.exceptions import ( AutoIAMAuthNotSupported, CloudSQLIPTypeError, CredentialsTypeError, TLSVersionError, ) from google.cloud.sql.connector.rate_limiter import AsyncRateLimiter from google.cloud.sql.connector.refresh_utils import ( _get_ephemeral, _get_metadata, _is_valid, _seconds_until_refresh, ) from google.cloud.sql.connector.utils import _auth_init, write_to_file from google.cloud.sql.connector.version import __version__ as version if TYPE_CHECKING: import datetime logger = logging.getLogger(name=__name__) APPLICATION_NAME = "cloud-sql-python-connector" class IPTypes(Enum):
class InstanceMetadata: ip_addrs: Dict[str, Any] context: ssl.SSLContext database_version: str expiration: datetime.datetime def __init__( self, ephemeral_cert: str, database_version: str, ip_addrs: Dict[str, Any], private_key: bytes, server_ca_cert: str, expiration: datetime.datetime, enable_iam_auth: bool, ) -> None: self.ip_addrs = ip_addrs self.database_version = database_version self.context = ssl.SSLContext(ssl.PROTOCOL_TLS_CLIENT) # update ssl.PROTOCOL_TLS_CLIENT default self.context.check_hostname = False # verify OpenSSL version supports TLSv1.3 if ssl.HAS_TLSv1_3: # force TLSv1.3 if supported by client self.context.minimum_version = ssl.TLSVersion.TLSv1_3 # fallback to TLSv1.2 for older versions of OpenSSL else: if enable_iam_auth: raise TLSVersionError( f"Your current version of OpenSSL ({ssl.OPENSSL_VERSION}) does not " "support TLSv1.3, which is required to use IAM Authentication.\n" "Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support." ) logger.warning( "TLSv1.3 is not supported with your version of OpenSSL " f"({ssl.OPENSSL_VERSION}), falling back to TLSv1.2\n" "Upgrade your OpenSSL version to 1.1.1 for TLSv1.3 support." ) self.context.minimum_version = ssl.TLSVersion.TLSv1_2 self.expiration = expiration # tmpdir and its contents are automatically deleted after the CA cert # and ephemeral cert are loaded into the SSLcontext. The values # need to be written to files in order to be loaded by the SSLContext with TemporaryDirectory() as tmpdir: ca_filename, cert_filename, key_filename = write_to_file( tmpdir, server_ca_cert, ephemeral_cert, private_key ) self.context.load_cert_chain(cert_filename, keyfile=key_filename) self.context.load_verify_locations(cafile=ca_filename) def get_preferred_ip(self, ip_type: IPTypes) -> str: """Returns the first IP address for the instance, according to the preference supplied by ip_type. If no IP addressess with the given preference are found, an error is raised.""" if ip_type.value in self.ip_addrs: return self.ip_addrs[ip_type.value] raise CloudSQLIPTypeError( "Cloud SQL instance does not have any IP addresses matching " f"preference: {ip_type.value})" ) class Instance: """A class to manage the details of the connection to a Cloud SQL instance, including refreshing the credentials. :param instance_connection_string: The Google Cloud SQL Instance's connection string. :type instance_connection_string: str :param user_agent_string: The user agent string to append to SQLAdmin API requests :type user_agent_string: str :type credentials: google.auth.credentials.Credentials :param credentials Credentials object used to authenticate connections to Cloud SQL server. If not specified, Application Default Credentials are used. :param enable_iam_auth Enables automatic IAM database authentication for Postgres or MySQL instances. :type enable_iam_auth: bool :param loop: A new event loop for the refresh function to run in. :type loop: asyncio.AbstractEventLoop :type quota_project: str :param quota_project The Project ID for an existing Google Cloud project. The project specified is used for quota and billing purposes. If not specified, defaults to project sourced from environment. :type sqladmin_api_endpoint: str :param sqladmin_api_endpoint: Base URL to use when calling the Cloud SQL Admin API endpoint. Defaults to "https://sqladmin.googleapis.com", this argument should only be used in development. """ # asyncio.AbstractEventLoop is used because the default loop, # SelectorEventLoop, is usable on both Unix and Windows but has limited # functionality on Windows. It is recommended to use ProactorEventLoop # while developing on Windows. # Link to Github issue: # https://github.com/GoogleCloudPlatform/cloud-sql-python-connector/issues/22 _loop: asyncio.AbstractEventLoop _enable_iam_auth: bool __client_session: Optional[aiohttp.ClientSession] = None @property def _client_session(self) -> aiohttp.ClientSession: if self.__client_session is None: headers = { "x-goog-api-client": self._user_agent_string, "User-Agent": self._user_agent_string, "Content-Type": "application/json", } if self._quota_project: headers["x-goog-user-project"] = self._quota_project self.__client_session = aiohttp.ClientSession(headers=headers) return self.__client_session _credentials: Optional[Credentials] = None _keys: asyncio.Future _instance_connection_string: str _user_agent_string: str _sqladmin_api_endpoint: str _instance: str _project: str _region: str _refresh_rate_limiter: AsyncRateLimiter _refresh_in_progress: asyncio.locks.Event _current: asyncio.Task # task wraps coroutine that returns InstanceMetadata _next: asyncio.Task # task wraps coroutine that returns another task def __init__( self, instance_connection_string: str, driver_name: str, keys: asyncio.Future, loop: asyncio.AbstractEventLoop, credentials: Optional[Credentials] = None, enable_iam_auth: bool = False, quota_project: str = None, sqladmin_api_endpoint: str = "https://sqladmin.googleapis.com", ) -> None: # Validate connection string connection_string_split = instance_connection_string.split(":") if len(connection_string_split) == 3: self._instance_connection_string = instance_connection_string self._project = connection_string_split[0] self._region = connection_string_split[1] self._instance = connection_string_split[2] else: raise ValueError( "Arg `instance_connection_string` must have " "format: PROJECT:REGION:INSTANCE, " f"got {instance_connection_string}." ) self._enable_iam_auth = enable_iam_auth self._user_agent_string = f"{APPLICATION_NAME}/{version}+{driver_name}" self._quota_project = quota_project self._sqladmin_api_endpoint = sqladmin_api_endpoint self._loop = loop self._keys = keys # validate credentials type if not isinstance(credentials, Credentials) and credentials is not None: raise CredentialsTypeError( "Arg credentials must be type 'google.auth.credentials.Credentials' " "or None (to use Application Default Credentials)" ) self._credentials = _auth_init(credentials) self._refresh_rate_limiter = AsyncRateLimiter( max_capacity=2, rate=1 / 30, loop=self._loop ) self._refresh_in_progress = asyncio.locks.Event() self._current = self._schedule_refresh(0) self._next = self._current async def force_refresh(self) -> None: """ Forces a new refresh attempt immediately to be used for future connection attempts. """ # if next refresh is not already in progress, cancel it and schedule new one immediately if not self._refresh_in_progress.is_set(): self._next.cancel() self._next = self._schedule_refresh(0) # block all sequential connection attempts on the next refresh result if current is invalid if not await _is_valid(self._current): self._current = self._next async def _perform_refresh(self) -> InstanceMetadata: """Retrieves instance metadata and ephemeral certificate from the Cloud SQL Instance. :rtype: InstanceMetadata :returns: A dataclass containing a string representing the ephemeral certificate, a dict containing the instances IP adresses, a string representing a PEM-encoded private key and a string representing a PEM-encoded certificate authority. """ self._refresh_in_progress.set() logger.debug( f"['{self._instance_connection_string}']: Entered _perform_refresh" ) try: await self._refresh_rate_limiter.acquire() priv_key, pub_key = await self._keys logger.debug(f"['{self._instance_connection_string}']: Creating context") metadata_task = self._loop.create_task( _get_metadata( self._client_session, self._sqladmin_api_endpoint, self._credentials, self._project, self._region, self._instance, ) ) ephemeral_task = self._loop.create_task( _get_ephemeral( self._client_session, self._sqladmin_api_endpoint, self._credentials, self._project, self._instance, pub_key, self._enable_iam_auth, ) ) try: metadata = await metadata_task # check if automatic IAM database authn is supported for database engine if self._enable_iam_auth and not metadata[ "database_version" ].startswith(("POSTGRES", "MYSQL")): raise AutoIAMAuthNotSupported( f"'{metadata['database_version']}' does not support automatic IAM authentication. It is only supported with Cloud SQL Postgres or MySQL instances." ) except Exception: # cancel ephemeral cert task if exception occurs before it is awaited ephemeral_task.cancel() raise ephemeral_cert = await ephemeral_task x509 = load_pem_x509_certificate( ephemeral_cert.encode("UTF-8"), default_backend() ) expiration = x509.not_valid_after if self._enable_iam_auth: if self._credentials is not None: token_expiration: datetime.datetime = self._credentials.expiry if expiration > token_expiration: expiration = token_expiration except aiohttp.ClientResponseError as e: logger.debug( f"['{self._instance_connection_string}']: Error occurred during _perform_refresh." ) if e.status == 403: e.message = "Forbidden: Authenticated IAM principal does not seeem authorized to make API request. Verify 'Cloud SQL Admin API' is enabled within your GCP project and 'Cloud SQL Client' role has been granted to IAM principal." raise except Exception: logger.debug( f"['{self._instance_connection_string}']: Error occurred during _perform_refresh." ) raise finally: self._refresh_in_progress.clear() return InstanceMetadata( ephemeral_cert, metadata["database_version"], metadata["ip_addresses"], priv_key, metadata["server_ca_cert"], expiration, self._enable_iam_auth, ) def _schedule_refresh(self, delay: int) -> asyncio.Task: """ Schedule task to sleep and then perform refresh to get InstanceMetadata. :type delay: int :param delay Time in seconds to sleep before running _perform_refresh. :rtype: asyncio.Task :returns: A Task representing the scheduled _perform_refresh. """ async def _refresh_task(self: Instance, delay: int) -> InstanceMetadata: """ A coroutine that sleeps for the specified amount of time before running _perform_refresh. """ refresh_task: asyncio.Task try: logger.debug(f"['{self._instance_connection_string}']: Entering sleep") if delay > 0: await asyncio.sleep(delay) refresh_task = self._loop.create_task(self._perform_refresh()) refresh_data = await refresh_task except asyncio.CancelledError: logger.debug( f"['{self._instance_connection_string}']: Schedule refresh task cancelled." ) raise # bad refresh attempt except Exception as e: logger.exception( f"['{self._instance_connection_string}']: " "An error occurred while performing refresh. " "Scheduling another refresh attempt immediately", exc_info=e, ) # check if current metadata is invalid (expired), # don't want to replace valid metadata with invalid refresh if not await _is_valid(self._current): self._current = refresh_task # schedule new refresh attempt immediately self._next = self._schedule_refresh(0) raise # if valid refresh, replace current with valid metadata and schedule next refresh self._current = refresh_task # calculate refresh delay based on certificate expiration delay = _seconds_until_refresh(refresh_data.expiration) self._next = self._schedule_refresh(delay) return refresh_data # schedule refresh task and return it scheduled_task = self._loop.create_task(_refresh_task(self, delay)) return scheduled_task async def connect_info( self, ip_type: IPTypes, ) -> Tuple[InstanceMetadata, str]: """Retrieve instance metadata and ip address required for making connection to Cloud SQL instance. :type ip_type: IPTypes :param ip_type: Enum specifying whether to look for public or private IP address. :rtype instance_data: InstanceMetadata :returns: Instance metadata for Cloud SQL instance. :rtype ip_address: str :returns: A string representing the IP address of the given Cloud SQL instance. """ logger.debug( f"['{self._instance_connection_string}']: Entered connect_info method" ) instance_data: InstanceMetadata instance_data = await self._current ip_address: str = instance_data.get_preferred_ip(ip_type) return instance_data, ip_address async def close(self) -> None: """Cleanup function to make sure ClientSession is closed and tasks have finished to have a graceful exit. """ logger.debug( f"['{self._instance_connection_string}']: Waiting for _current to be cancelled" ) self._current.cancel() logger.debug( f"['{self._instance_connection_string}']: Waiting for _next to be cancelled" ) self._next.cancel() logger.debug( f"['{self._instance_connection_string}']: Waiting for _client_session to close" ) await self._client_session.close()
PUBLIC: str = "PRIMARY" PRIVATE: str = "PRIVATE" PSC: str = "PSC"
identifier_body
statutes_parse.py
import itertools from collections import Counter from regex import regex from quantlaw.de_extract.statutes_abstract import StatutesProcessor from quantlaw.de_extract.statutes_parse_patterns import ( numb_pattern, pre_numb_pattern, sgb_dict, split_citation_into_parts_pattern, split_citation_into_range_parts_pattern, split_unit_number_pattern, unit_patterns, ) from quantlaw.de_extract.stemming import stem_law_name class StringCaseException(Exception): """ Exception is raised if a unit in a reference cannot be parsed. In this case it is often an issue of upper oder lower case formatting. """ pass class NoUnitMatched(Exception): """ Exception is raised if a unit in a refren cannot be parsed. """ pass class StatutesParser(StatutesProcessor): """ Class to parse the content of a reference area identified by StatutesExtractor """ def parse_main(self, main_text: str) -> list: """ Parses a string containing a reference to a specific section within a given law. E.g. "§ 123 Abs. 4 Satz 5 und 6". The parsed informtaion is formatted into lists nested in lists nested in lists. The outer list is a list of references. References are lists of path components. A path component is e.g. "Abs. 4". A path component is represented by a list with two elements: The first contains the unit the second the value. The example above would be represented as `[[['§', '123'], ['Abs', '4'], ['Satz', '5']], [['§', '123'], ['Abs', '4'], ['Satz', '6']]]`. Args: main_text: string to parse Returns: The parsed reference. """ citation = self.fix_errors_in_citation(main_text.strip()) enum_parts = self.split_citation_into_enum_parts(citation) reference_paths = [] for enum_part in enum_parts: for string in enum_part: splitted_citation_part_list = list(self.split_citation_part(string)) if len(splitted_citation_part_list): reference_paths.append(splitted_citation_part_list) else: print(f"Empty citation part in {citation} in part {string}") reference_paths = self.split_parts_accidently_joined(reference_paths) for reference_path in reference_paths[1:]: prev_reference_path = reference_paths[ reference_paths.index(reference_path) - 1 ] self.infer_units(reference_path, prev_reference_path) return reference_paths def parse_law(self, law_text: str, match_type: str, current_lawid: str = None): """ Parses the law information from a references found by StatutesMatchWithMainArea Args: main_text: E.g. "§ 123 Abs. 4 und 5 Nr. 6" law_text: E.g. "BGB" match_type: E.g. "dict" Returns: The key of a parse law. """ if match_type == "dict": lawname_stem = stem_law_name(law_text) match = self.match_law_name(lawname_stem) return self.laws_lookup[match] elif match_type == "sgb": lawid = sgb_dict[stem_law_name(law_text)] if type(lawid) is tuple: asse
else: return lawid elif match_type == "internal": if current_lawid is None: raise Exception("Current law id must be set for internal reference") return current_lawid else: return None # match_type: ignore or unknown @staticmethod def stem_unit(unit: str): """ Brings a unit into a standard format. E.g. removes abbreviations, grammatical differences spelling errors, etc. Args: unit: A string containing a unit that should be converted into a standard format. Returns: Unit in a standard format as string. E.g. §, Art, Nr, Halbsatz, Anhang, ... """ for unit_pattern in unit_patterns: if regex.fullmatch(unit_pattern, unit): return unit_patterns[unit_pattern] raise NoUnitMatched(unit) @staticmethod def is_unit(token: str): """ Returns: True if the token is a unit """ return regex.fullmatch("|".join(unit_patterns.keys()), token) @staticmethod def is_pre_numb(token: str): """ Returns: True if the token is a number that comes *before* the unit. E.g. '*erster* Halbsatz' """ return pre_numb_pattern.fullmatch( token, ) @staticmethod def is_numb(token: str): """ Returns: True if the token is a 'numeric' value of the reference. """ return numb_pattern.fullmatch( token, ) @staticmethod def fix_errors_in_citation(citation): """ Fix some common inconsistencies in the references such as double spaces. """ result = regex.sub(r"\s+", " ", citation) result = regex.sub(r"§(?=\d)", "§ ", result) result = regex.sub(r",\sbis\s", " bis ", result) return result @staticmethod def split_citation_into_enum_parts(citation): """ A citation can contain references to multiple parts of the law. E.g. '§§ 20 und 35' or 'Art. 3 Abs. 1 Satz 1, Abs. 3 Satz 1'. The citation is split into parts so that each referenced section of the law is separated. E.g. '§§ 20' and '35' resp. 'Art. 3 Abs. 1 Satz 1' and 'Abs. 3 Satz 1'. However, ranges are not spit: E.g. "§§ 1 bis 10" will not be split. """ enum_parts = split_citation_into_parts_pattern.split( citation, ) # Split range enum_parts = [ split_citation_into_range_parts_pattern.split(part) for part in enum_parts ] return enum_parts @staticmethod def split_parts_accidently_joined(reference_paths): """ Reformats the parsed references to separate accitently joined references. E.g. the original referehence "§ 123 § 126" will not be split by split_citation_into_enum_parts because the separation is falsly not indicated by a ',', 'or' etc. It come from the unit '§' that it can be inferred that the citation contains references to two parts of statutes. This function accounts for the case that the unit '§' or 'Art' appears twice in the same reference path and split the path into several elements. """ new_reference_paths = [] main_unit = ( "Art" if Counter([part[0] for part in itertools.chain(*reference_paths)]).get( "Art" ) else "§" ) for reference_path in reference_paths: temp_path = [] for part in reference_path: if part[0] == main_unit: if len(temp_path): new_reference_paths.append(temp_path) temp_path = [] temp_path.append(part) new_reference_paths.append(temp_path) return new_reference_paths @staticmethod def infer_units(reference_path, prev_reference_path): """ In some cases of an enumeration a numeric value is not directed prefixed by the corresponding unit. E.g. "§ 123 Abs. 1 S. 2, 3 S. 4". In this case "3" is not prefixed with its unit. Instead it can be inferred by looking at the whole citation that it is next higher unit of "S.", hence "Abs.". These inferred units are added to parsed data. """ prev_path_units = [o[0] for o in prev_reference_path] if reference_path[0][0]: pass elif len(reference_path) > 1: try: prev_unit_index = prev_path_units.index(reference_path[1][0]) # if not prev_unit_index > 0: # print(f'Infer unit error: {citation}') reference_path[0][0] = prev_path_units[prev_unit_index - 1] except ValueError: reference_path[0][0] = prev_path_units[-1] else: reference_path[0][0] = prev_path_units[-1] try: prev_unit_index = prev_path_units.index(reference_path[0][0]) reference_path[0:0] = prev_reference_path[:prev_unit_index] except Exception: reference_path[0:0] = prev_reference_path @staticmethod def split_citation_part(string: str): """ A string a tokenizes. Tokens are identified as units or values. Pairs are built to connect the units with their respective values. If the unit cannot be indentified (and must be inferred later) None is returned. Args: string: A string that is part of a reference and cites *one* part a statute. Retruns: As a generator tuples are returned, each containing the unit (or None) and the respecive value. """ # Tokenization # fmt: off string = regex.sub( r"(" r"\d+(?>\.\d+)?[a-z]?|" r"\b[ivx]+|" r"\b[a-z]\)?" r")" r"(\sff?\.|\sff\b)", r"\1ff.", string, flags=regex.IGNORECASE, ) # fmt: on tokens = split_unit_number_pattern.split( string, ) # Building pairs of units with their resp. values while len(tokens) > 0: token = tokens.pop(0) if StatutesParser.is_unit(token): if len(tokens) > 0: unit = StatutesParser.stem_unit(token) token = tokens.pop(0) numb = token assert StatutesParser.is_numb(numb), numb else: # when citation ends with unit print( f"Citation {string} ends with unit {token}. Ignoring last unit." ) break elif StatutesParser.is_pre_numb(token): numb = token token = tokens.pop(0) if not StatutesParser.is_unit(token): print(token, "is not a unit in", string) continue # to fix citation "§ 30 DRITTER ABSCHNITT" # Last part in now ignored, # but reference areas can still be improved. unit = StatutesParser.stem_unit(token) elif StatutesParser.is_numb(token): unit = None numb = token else: raise StringCaseException(token, "in", string) numb = regex.sub(r"(ff?\.|ff|\))$", "", numb) yield [unit, numb]
rt len(lawid) == 2 if lawid[0] in self.laws_lookup.values(): return lawid[0] elif lawid[1] in self.laws_lookup.values(): return lawid[1] else: return lawid[1]
conditional_block
statutes_parse.py
import itertools from collections import Counter from regex import regex from quantlaw.de_extract.statutes_abstract import StatutesProcessor from quantlaw.de_extract.statutes_parse_patterns import ( numb_pattern, pre_numb_pattern, sgb_dict, split_citation_into_parts_pattern, split_citation_into_range_parts_pattern, split_unit_number_pattern, unit_patterns, ) from quantlaw.de_extract.stemming import stem_law_name class StringCaseException(Exception): """ Exception is raised if a unit in a reference cannot be parsed. In this case it is often an issue of upper oder lower case formatting. """ pass class NoUnitMatched(Exception): """ Exception is raised if a unit in a refren cannot be parsed. """ pass class StatutesParser(StatutesProcessor): """ Class to parse the content of a reference area identified by StatutesExtractor """
def parse_main(self, main_text: str) -> list: """ Parses a string containing a reference to a specific section within a given law. E.g. "§ 123 Abs. 4 Satz 5 und 6". The parsed informtaion is formatted into lists nested in lists nested in lists. The outer list is a list of references. References are lists of path components. A path component is e.g. "Abs. 4". A path component is represented by a list with two elements: The first contains the unit the second the value. The example above would be represented as `[[['§', '123'], ['Abs', '4'], ['Satz', '5']], [['§', '123'], ['Abs', '4'], ['Satz', '6']]]`. Args: main_text: string to parse Returns: The parsed reference. """ citation = self.fix_errors_in_citation(main_text.strip()) enum_parts = self.split_citation_into_enum_parts(citation) reference_paths = [] for enum_part in enum_parts: for string in enum_part: splitted_citation_part_list = list(self.split_citation_part(string)) if len(splitted_citation_part_list): reference_paths.append(splitted_citation_part_list) else: print(f"Empty citation part in {citation} in part {string}") reference_paths = self.split_parts_accidently_joined(reference_paths) for reference_path in reference_paths[1:]: prev_reference_path = reference_paths[ reference_paths.index(reference_path) - 1 ] self.infer_units(reference_path, prev_reference_path) return reference_paths def parse_law(self, law_text: str, match_type: str, current_lawid: str = None): """ Parses the law information from a references found by StatutesMatchWithMainArea Args: main_text: E.g. "§ 123 Abs. 4 und 5 Nr. 6" law_text: E.g. "BGB" match_type: E.g. "dict" Returns: The key of a parse law. """ if match_type == "dict": lawname_stem = stem_law_name(law_text) match = self.match_law_name(lawname_stem) return self.laws_lookup[match] elif match_type == "sgb": lawid = sgb_dict[stem_law_name(law_text)] if type(lawid) is tuple: assert len(lawid) == 2 if lawid[0] in self.laws_lookup.values(): return lawid[0] elif lawid[1] in self.laws_lookup.values(): return lawid[1] else: return lawid[1] else: return lawid elif match_type == "internal": if current_lawid is None: raise Exception("Current law id must be set for internal reference") return current_lawid else: return None # match_type: ignore or unknown @staticmethod def stem_unit(unit: str): """ Brings a unit into a standard format. E.g. removes abbreviations, grammatical differences spelling errors, etc. Args: unit: A string containing a unit that should be converted into a standard format. Returns: Unit in a standard format as string. E.g. §, Art, Nr, Halbsatz, Anhang, ... """ for unit_pattern in unit_patterns: if regex.fullmatch(unit_pattern, unit): return unit_patterns[unit_pattern] raise NoUnitMatched(unit) @staticmethod def is_unit(token: str): """ Returns: True if the token is a unit """ return regex.fullmatch("|".join(unit_patterns.keys()), token) @staticmethod def is_pre_numb(token: str): """ Returns: True if the token is a number that comes *before* the unit. E.g. '*erster* Halbsatz' """ return pre_numb_pattern.fullmatch( token, ) @staticmethod def is_numb(token: str): """ Returns: True if the token is a 'numeric' value of the reference. """ return numb_pattern.fullmatch( token, ) @staticmethod def fix_errors_in_citation(citation): """ Fix some common inconsistencies in the references such as double spaces. """ result = regex.sub(r"\s+", " ", citation) result = regex.sub(r"§(?=\d)", "§ ", result) result = regex.sub(r",\sbis\s", " bis ", result) return result @staticmethod def split_citation_into_enum_parts(citation): """ A citation can contain references to multiple parts of the law. E.g. '§§ 20 und 35' or 'Art. 3 Abs. 1 Satz 1, Abs. 3 Satz 1'. The citation is split into parts so that each referenced section of the law is separated. E.g. '§§ 20' and '35' resp. 'Art. 3 Abs. 1 Satz 1' and 'Abs. 3 Satz 1'. However, ranges are not spit: E.g. "§§ 1 bis 10" will not be split. """ enum_parts = split_citation_into_parts_pattern.split( citation, ) # Split range enum_parts = [ split_citation_into_range_parts_pattern.split(part) for part in enum_parts ] return enum_parts @staticmethod def split_parts_accidently_joined(reference_paths): """ Reformats the parsed references to separate accitently joined references. E.g. the original referehence "§ 123 § 126" will not be split by split_citation_into_enum_parts because the separation is falsly not indicated by a ',', 'or' etc. It come from the unit '§' that it can be inferred that the citation contains references to two parts of statutes. This function accounts for the case that the unit '§' or 'Art' appears twice in the same reference path and split the path into several elements. """ new_reference_paths = [] main_unit = ( "Art" if Counter([part[0] for part in itertools.chain(*reference_paths)]).get( "Art" ) else "§" ) for reference_path in reference_paths: temp_path = [] for part in reference_path: if part[0] == main_unit: if len(temp_path): new_reference_paths.append(temp_path) temp_path = [] temp_path.append(part) new_reference_paths.append(temp_path) return new_reference_paths @staticmethod def infer_units(reference_path, prev_reference_path): """ In some cases of an enumeration a numeric value is not directed prefixed by the corresponding unit. E.g. "§ 123 Abs. 1 S. 2, 3 S. 4". In this case "3" is not prefixed with its unit. Instead it can be inferred by looking at the whole citation that it is next higher unit of "S.", hence "Abs.". These inferred units are added to parsed data. """ prev_path_units = [o[0] for o in prev_reference_path] if reference_path[0][0]: pass elif len(reference_path) > 1: try: prev_unit_index = prev_path_units.index(reference_path[1][0]) # if not prev_unit_index > 0: # print(f'Infer unit error: {citation}') reference_path[0][0] = prev_path_units[prev_unit_index - 1] except ValueError: reference_path[0][0] = prev_path_units[-1] else: reference_path[0][0] = prev_path_units[-1] try: prev_unit_index = prev_path_units.index(reference_path[0][0]) reference_path[0:0] = prev_reference_path[:prev_unit_index] except Exception: reference_path[0:0] = prev_reference_path @staticmethod def split_citation_part(string: str): """ A string a tokenizes. Tokens are identified as units or values. Pairs are built to connect the units with their respective values. If the unit cannot be indentified (and must be inferred later) None is returned. Args: string: A string that is part of a reference and cites *one* part a statute. Retruns: As a generator tuples are returned, each containing the unit (or None) and the respecive value. """ # Tokenization # fmt: off string = regex.sub( r"(" r"\d+(?>\.\d+)?[a-z]?|" r"\b[ivx]+|" r"\b[a-z]\)?" r")" r"(\sff?\.|\sff\b)", r"\1ff.", string, flags=regex.IGNORECASE, ) # fmt: on tokens = split_unit_number_pattern.split( string, ) # Building pairs of units with their resp. values while len(tokens) > 0: token = tokens.pop(0) if StatutesParser.is_unit(token): if len(tokens) > 0: unit = StatutesParser.stem_unit(token) token = tokens.pop(0) numb = token assert StatutesParser.is_numb(numb), numb else: # when citation ends with unit print( f"Citation {string} ends with unit {token}. Ignoring last unit." ) break elif StatutesParser.is_pre_numb(token): numb = token token = tokens.pop(0) if not StatutesParser.is_unit(token): print(token, "is not a unit in", string) continue # to fix citation "§ 30 DRITTER ABSCHNITT" # Last part in now ignored, # but reference areas can still be improved. unit = StatutesParser.stem_unit(token) elif StatutesParser.is_numb(token): unit = None numb = token else: raise StringCaseException(token, "in", string) numb = regex.sub(r"(ff?\.|ff|\))$", "", numb) yield [unit, numb]
random_line_split
statutes_parse.py
import itertools from collections import Counter from regex import regex from quantlaw.de_extract.statutes_abstract import StatutesProcessor from quantlaw.de_extract.statutes_parse_patterns import ( numb_pattern, pre_numb_pattern, sgb_dict, split_citation_into_parts_pattern, split_citation_into_range_parts_pattern, split_unit_number_pattern, unit_patterns, ) from quantlaw.de_extract.stemming import stem_law_name class StringCaseException(Exception): """ Exception is raised if a unit in a reference cannot be parsed. In this case it is often an issue of upper oder lower case formatting. """ pass class NoUnitMatched(Exception): """ Exception is raised if a unit in a refren cannot be parsed. """ pass class StatutesParser(StatutesProcessor): """ Class to parse the content of a reference area identified by StatutesExtractor """ def parse_main(self, main_text: str) -> list: """ Parses a string containing a reference to a specific section within a given law. E.g. "§ 123 Abs. 4 Satz 5 und 6". The parsed informtaion is formatted into lists nested in lists nested in lists. The outer list is a list of references. References are lists of path components. A path component is e.g. "Abs. 4". A path component is represented by a list with two elements: The first contains the unit the second the value. The example above would be represented as `[[['§', '123'], ['Abs', '4'], ['Satz', '5']], [['§', '123'], ['Abs', '4'], ['Satz', '6']]]`. Args: main_text: string to parse Returns: The parsed reference. """ citation = self.fix_errors_in_citation(main_text.strip()) enum_parts = self.split_citation_into_enum_parts(citation) reference_paths = [] for enum_part in enum_parts: for string in enum_part: splitted_citation_part_list = list(self.split_citation_part(string)) if len(splitted_citation_part_list): reference_paths.append(splitted_citation_part_list) else: print(f"Empty citation part in {citation} in part {string}") reference_paths = self.split_parts_accidently_joined(reference_paths) for reference_path in reference_paths[1:]: prev_reference_path = reference_paths[ reference_paths.index(reference_path) - 1 ] self.infer_units(reference_path, prev_reference_path) return reference_paths def parse_law(self, law_text: str, match_type: str, current_lawid: str = None): """ Parses the law information from a references found by StatutesMatchWithMainArea Args: main_text: E.g. "§ 123 Abs. 4 und 5 Nr. 6" law_text: E.g. "BGB" match_type: E.g. "dict" Returns: The key of a parse law. """ if match_type == "dict": lawname_stem = stem_law_name(law_text) match = self.match_law_name(lawname_stem) return self.laws_lookup[match] elif match_type == "sgb": lawid = sgb_dict[stem_law_name(law_text)] if type(lawid) is tuple: assert len(lawid) == 2 if lawid[0] in self.laws_lookup.values(): return lawid[0] elif lawid[1] in self.laws_lookup.values(): return lawid[1] else: return lawid[1] else: return lawid elif match_type == "internal": if current_lawid is None: raise Exception("Current law id must be set for internal reference") return current_lawid else: return None # match_type: ignore or unknown @staticmethod def stem_unit(unit: str): """ Brings a unit into a standard format. E.g. removes abbreviations, grammatical differences spelling errors, etc. Args: unit: A string containing a unit that should be converted into a standard format. Returns: Unit in a standard format as string. E.g. §, Art, Nr, Halbsatz, Anhang, ... """ for unit_pattern in unit_patterns: if regex.fullmatch(unit_pattern, unit): return unit_patterns[unit_pattern] raise NoUnitMatched(unit) @staticmethod def is_unit(token: str): """ Returns: True if the token is a unit """ return regex.fullmatch("|".join(unit_patterns.keys()), token) @staticmethod def is_pre_numb(token: str): """ Returns: True if the token is a number that comes *before* the unit. E.g. '*erster* Halbsatz' """ return pre_numb_pattern.fullmatch( token, ) @staticmethod def is_nu
n: str): """ Returns: True if the token is a 'numeric' value of the reference. """ return numb_pattern.fullmatch( token, ) @staticmethod def fix_errors_in_citation(citation): """ Fix some common inconsistencies in the references such as double spaces. """ result = regex.sub(r"\s+", " ", citation) result = regex.sub(r"§(?=\d)", "§ ", result) result = regex.sub(r",\sbis\s", " bis ", result) return result @staticmethod def split_citation_into_enum_parts(citation): """ A citation can contain references to multiple parts of the law. E.g. '§§ 20 und 35' or 'Art. 3 Abs. 1 Satz 1, Abs. 3 Satz 1'. The citation is split into parts so that each referenced section of the law is separated. E.g. '§§ 20' and '35' resp. 'Art. 3 Abs. 1 Satz 1' and 'Abs. 3 Satz 1'. However, ranges are not spit: E.g. "§§ 1 bis 10" will not be split. """ enum_parts = split_citation_into_parts_pattern.split( citation, ) # Split range enum_parts = [ split_citation_into_range_parts_pattern.split(part) for part in enum_parts ] return enum_parts @staticmethod def split_parts_accidently_joined(reference_paths): """ Reformats the parsed references to separate accitently joined references. E.g. the original referehence "§ 123 § 126" will not be split by split_citation_into_enum_parts because the separation is falsly not indicated by a ',', 'or' etc. It come from the unit '§' that it can be inferred that the citation contains references to two parts of statutes. This function accounts for the case that the unit '§' or 'Art' appears twice in the same reference path and split the path into several elements. """ new_reference_paths = [] main_unit = ( "Art" if Counter([part[0] for part in itertools.chain(*reference_paths)]).get( "Art" ) else "§" ) for reference_path in reference_paths: temp_path = [] for part in reference_path: if part[0] == main_unit: if len(temp_path): new_reference_paths.append(temp_path) temp_path = [] temp_path.append(part) new_reference_paths.append(temp_path) return new_reference_paths @staticmethod def infer_units(reference_path, prev_reference_path): """ In some cases of an enumeration a numeric value is not directed prefixed by the corresponding unit. E.g. "§ 123 Abs. 1 S. 2, 3 S. 4". In this case "3" is not prefixed with its unit. Instead it can be inferred by looking at the whole citation that it is next higher unit of "S.", hence "Abs.". These inferred units are added to parsed data. """ prev_path_units = [o[0] for o in prev_reference_path] if reference_path[0][0]: pass elif len(reference_path) > 1: try: prev_unit_index = prev_path_units.index(reference_path[1][0]) # if not prev_unit_index > 0: # print(f'Infer unit error: {citation}') reference_path[0][0] = prev_path_units[prev_unit_index - 1] except ValueError: reference_path[0][0] = prev_path_units[-1] else: reference_path[0][0] = prev_path_units[-1] try: prev_unit_index = prev_path_units.index(reference_path[0][0]) reference_path[0:0] = prev_reference_path[:prev_unit_index] except Exception: reference_path[0:0] = prev_reference_path @staticmethod def split_citation_part(string: str): """ A string a tokenizes. Tokens are identified as units or values. Pairs are built to connect the units with their respective values. If the unit cannot be indentified (and must be inferred later) None is returned. Args: string: A string that is part of a reference and cites *one* part a statute. Retruns: As a generator tuples are returned, each containing the unit (or None) and the respecive value. """ # Tokenization # fmt: off string = regex.sub( r"(" r"\d+(?>\.\d+)?[a-z]?|" r"\b[ivx]+|" r"\b[a-z]\)?" r")" r"(\sff?\.|\sff\b)", r"\1ff.", string, flags=regex.IGNORECASE, ) # fmt: on tokens = split_unit_number_pattern.split( string, ) # Building pairs of units with their resp. values while len(tokens) > 0: token = tokens.pop(0) if StatutesParser.is_unit(token): if len(tokens) > 0: unit = StatutesParser.stem_unit(token) token = tokens.pop(0) numb = token assert StatutesParser.is_numb(numb), numb else: # when citation ends with unit print( f"Citation {string} ends with unit {token}. Ignoring last unit." ) break elif StatutesParser.is_pre_numb(token): numb = token token = tokens.pop(0) if not StatutesParser.is_unit(token): print(token, "is not a unit in", string) continue # to fix citation "§ 30 DRITTER ABSCHNITT" # Last part in now ignored, # but reference areas can still be improved. unit = StatutesParser.stem_unit(token) elif StatutesParser.is_numb(token): unit = None numb = token else: raise StringCaseException(token, "in", string) numb = regex.sub(r"(ff?\.|ff|\))$", "", numb) yield [unit, numb]
mb(toke
identifier_name
statutes_parse.py
import itertools from collections import Counter from regex import regex from quantlaw.de_extract.statutes_abstract import StatutesProcessor from quantlaw.de_extract.statutes_parse_patterns import ( numb_pattern, pre_numb_pattern, sgb_dict, split_citation_into_parts_pattern, split_citation_into_range_parts_pattern, split_unit_number_pattern, unit_patterns, ) from quantlaw.de_extract.stemming import stem_law_name class StringCaseException(Exception): """ Exception is raised if a unit in a reference cannot be parsed. In this case it is often an issue of upper oder lower case formatting. """ pass class NoUnitMatched(Exception): """ Exception is raised if a unit in a refren cannot be parsed. """ pass class StatutesParser(StatutesProcessor): """ Class to parse the content of a reference area identified by StatutesExtractor """ def parse_main(self, main_text: str) -> list: """ Parses a string containing a reference to a specific section within a given law. E.g. "§ 123 Abs. 4 Satz 5 und 6". The parsed informtaion is formatted into lists nested in lists nested in lists. The outer list is a list of references. References are lists of path components. A path component is e.g. "Abs. 4". A path component is represented by a list with two elements: The first contains the unit the second the value. The example above would be represented as `[[['§', '123'], ['Abs', '4'], ['Satz', '5']], [['§', '123'], ['Abs', '4'], ['Satz', '6']]]`. Args: main_text: string to parse Returns: The parsed reference. """ citation = self.fix_errors_in_citation(main_text.strip()) enum_parts = self.split_citation_into_enum_parts(citation) reference_paths = [] for enum_part in enum_parts: for string in enum_part: splitted_citation_part_list = list(self.split_citation_part(string)) if len(splitted_citation_part_list): reference_paths.append(splitted_citation_part_list) else: print(f"Empty citation part in {citation} in part {string}") reference_paths = self.split_parts_accidently_joined(reference_paths) for reference_path in reference_paths[1:]: prev_reference_path = reference_paths[ reference_paths.index(reference_path) - 1 ] self.infer_units(reference_path, prev_reference_path) return reference_paths def parse_law(self, law_text: str, match_type: str, current_lawid: str = None): """ Parses the law information from a references found by StatutesMatchWithMainArea Args: main_text: E.g. "§ 123 Abs. 4 und 5 Nr. 6" law_text: E.g. "BGB" match_type: E.g. "dict" Returns: The key of a parse law. """ if match_type == "dict": lawname_stem = stem_law_name(law_text) match = self.match_law_name(lawname_stem) return self.laws_lookup[match] elif match_type == "sgb": lawid = sgb_dict[stem_law_name(law_text)] if type(lawid) is tuple: assert len(lawid) == 2 if lawid[0] in self.laws_lookup.values(): return lawid[0] elif lawid[1] in self.laws_lookup.values(): return lawid[1] else: return lawid[1] else: return lawid elif match_type == "internal": if current_lawid is None: raise Exception("Current law id must be set for internal reference") return current_lawid else: return None # match_type: ignore or unknown @staticmethod def stem_unit(unit: str): """ Brings a unit into a standard format. E.g. removes abbreviations, grammatical differences spelling errors, etc. Args: unit: A string containing a unit that should be converted into a standard format. Returns: Unit in a standard format as string. E.g. §, Art, Nr, Halbsatz, Anhang, ... """ for unit_pattern in unit_patterns: if regex.fullmatch(unit_pattern, unit): return unit_patterns[unit_pattern] raise NoUnitMatched(unit) @staticmethod def is_unit(token: str): """ Returns: True if the token is a unit """ return regex.fullmatch("|".join(unit_patterns.keys()), token) @staticmethod def is_pre_numb(token: str): """ Returns: True if the token is a number that comes *before* the unit. E.g. '*erster* Halbsatz' """ return pre_numb_pattern.fullmatch( token, ) @staticmethod def is_numb(token: str): """ Returns: True if the token is a 'numeric' value of the reference. """ return numb_pattern.fullmatch( token, ) @staticmethod def fix_errors_in_citation(citation): """ Fix some common inconsistencies in the references such as double spaces. """ result = regex.sub(r"\s+", " ", citation) result = regex.sub(r"§(?=\d)", "§ ", result) result = regex.sub(r",\sbis\s", " bis ", result) return result @staticmethod def split_citation_into_enum_parts(citation): """
method def split_parts_accidently_joined(reference_paths): """ Reformats the parsed references to separate accitently joined references. E.g. the original referehence "§ 123 § 126" will not be split by split_citation_into_enum_parts because the separation is falsly not indicated by a ',', 'or' etc. It come from the unit '§' that it can be inferred that the citation contains references to two parts of statutes. This function accounts for the case that the unit '§' or 'Art' appears twice in the same reference path and split the path into several elements. """ new_reference_paths = [] main_unit = ( "Art" if Counter([part[0] for part in itertools.chain(*reference_paths)]).get( "Art" ) else "§" ) for reference_path in reference_paths: temp_path = [] for part in reference_path: if part[0] == main_unit: if len(temp_path): new_reference_paths.append(temp_path) temp_path = [] temp_path.append(part) new_reference_paths.append(temp_path) return new_reference_paths @staticmethod def infer_units(reference_path, prev_reference_path): """ In some cases of an enumeration a numeric value is not directed prefixed by the corresponding unit. E.g. "§ 123 Abs. 1 S. 2, 3 S. 4". In this case "3" is not prefixed with its unit. Instead it can be inferred by looking at the whole citation that it is next higher unit of "S.", hence "Abs.". These inferred units are added to parsed data. """ prev_path_units = [o[0] for o in prev_reference_path] if reference_path[0][0]: pass elif len(reference_path) > 1: try: prev_unit_index = prev_path_units.index(reference_path[1][0]) # if not prev_unit_index > 0: # print(f'Infer unit error: {citation}') reference_path[0][0] = prev_path_units[prev_unit_index - 1] except ValueError: reference_path[0][0] = prev_path_units[-1] else: reference_path[0][0] = prev_path_units[-1] try: prev_unit_index = prev_path_units.index(reference_path[0][0]) reference_path[0:0] = prev_reference_path[:prev_unit_index] except Exception: reference_path[0:0] = prev_reference_path @staticmethod def split_citation_part(string: str): """ A string a tokenizes. Tokens are identified as units or values. Pairs are built to connect the units with their respective values. If the unit cannot be indentified (and must be inferred later) None is returned. Args: string: A string that is part of a reference and cites *one* part a statute. Retruns: As a generator tuples are returned, each containing the unit (or None) and the respecive value. """ # Tokenization # fmt: off string = regex.sub( r"(" r"\d+(?>\.\d+)?[a-z]?|" r"\b[ivx]+|" r"\b[a-z]\)?" r")" r"(\sff?\.|\sff\b)", r"\1ff.", string, flags=regex.IGNORECASE, ) # fmt: on tokens = split_unit_number_pattern.split( string, ) # Building pairs of units with their resp. values while len(tokens) > 0: token = tokens.pop(0) if StatutesParser.is_unit(token): if len(tokens) > 0: unit = StatutesParser.stem_unit(token) token = tokens.pop(0) numb = token assert StatutesParser.is_numb(numb), numb else: # when citation ends with unit print( f"Citation {string} ends with unit {token}. Ignoring last unit." ) break elif StatutesParser.is_pre_numb(token): numb = token token = tokens.pop(0) if not StatutesParser.is_unit(token): print(token, "is not a unit in", string) continue # to fix citation "§ 30 DRITTER ABSCHNITT" # Last part in now ignored, # but reference areas can still be improved. unit = StatutesParser.stem_unit(token) elif StatutesParser.is_numb(token): unit = None numb = token else: raise StringCaseException(token, "in", string) numb = regex.sub(r"(ff?\.|ff|\))$", "", numb) yield [unit, numb]
A citation can contain references to multiple parts of the law. E.g. '§§ 20 und 35' or 'Art. 3 Abs. 1 Satz 1, Abs. 3 Satz 1'. The citation is split into parts so that each referenced section of the law is separated. E.g. '§§ 20' and '35' resp. 'Art. 3 Abs. 1 Satz 1' and 'Abs. 3 Satz 1'. However, ranges are not spit: E.g. "§§ 1 bis 10" will not be split. """ enum_parts = split_citation_into_parts_pattern.split( citation, ) # Split range enum_parts = [ split_citation_into_range_parts_pattern.split(part) for part in enum_parts ] return enum_parts @static
identifier_body
matrix.py
import maya.cmds as mc import maya.OpenMaya as OpenMaya import mathUtils import math class MissingPluginError(Exception): pass def getMatrix(transform,local=False,time=None): ''' @param transform: Transform object to get world matrix from @type transform: str @param local: Get local space matrix instead of the world space matrix @type local: bool @param time: The frame to get the transforms world matrix for. If left at default, will use the current frame. @type time: int or float ''' # Check transform if not mc.objExists(transform): raise Exception('Object "'+transform+'" does not exist!!') # Define Matrix attribute matAttr = 'worldMatrix[0]' if local: matAttr = 'matrix' # Get time mat = OpenMaya.MMatrix() if time != None: mat = mc.getAttr(transform+'.'+matAttr,t=frame) else: mat = mc.getAttr(transform+'.'+matAttr) # Build Matrix matrix = buildMatrix(translate=(mat[12],mat[13],mat[14]),xAxis=(mat[0],mat[1],mat[2]),yAxis=(mat[4],mat[5],mat[6]),zAxis=(mat[8],mat[9],mat[10])) # Return result return matrix def buildMatrix(translate=(0,0,0),xAxis=(1,0,0),yAxis=(0,1,0),zAxis=(0,0,1)): ''' Build a transformation matrix based on the input vectors @param translate: Translate values for the matrix @type translate: tuple/list @param xAxis: xAxis of the matrix @type xAxis: tuple/list @param yAxis: yAxis of the matrix @type yAxis: tuple/list @param zAxis: zAxis of the matrix @type zAxis: tuple/list ''' # Create transformation matrix from input vectors matrix = OpenMaya.MMatrix() values = [] OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, xAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, xAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, xAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, yAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, yAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, yAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, zAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, zAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, zAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, translate[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, translate[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, translate[2]) return matrix def vectorMatrixMultiply(vector,matrix,transformAsPoint=False,invertMatrix=False): ''' Transform a vector (or point) by a given transformation matrix. @param vector: Vector or point to be transformed @type vector: tuple/list @param matrix: MMatrix object to provide the transformation @type matrix: OpenMaya.MMatrix @param transformAsPoint: Transform the vector as a point @type transformAsPoint: bool @param invertMatrix: Use the matrix inverse to transform the vector @type invertMatrix: bool ''' # Create MPoint/MVector object for transformation if transformAsPoint: vector = OpenMaya.MPoint(vector[0],vector[1],vector[2],1.0) else: vector = OpenMaya.MVector(vector[0],vector[1],vector[2]) # Check input is of type MMatrix if type(matrix) != OpenMaya.MMatrix: raise Exception('Matrix input variable is not of expected type! Expecting MMatrix, received '+str(type(matrix))+'!!') # Transform vector if matrix != OpenMaya.MMatrix.identity: if invertMatrix: matrix = matrix.inverse() vector *= matrix # Return new vector return [vector.x,vector.y,vector.z] def getTranslation(matrix): ''' Return the translation component of a matrix. @param matrix: Matrix to extract translation from @type matrix: maya.OpenMaya.MMatrix ''' x = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],0) y = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],1) z = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],2) return (x,y,z) def getRotation(matrix,rotationOrder='xyz'): ''' Return the rotation component of a matrix as euler (XYZ) values. @param matrix: Matrix to extract rotation from @type matrix: maya.OpenMaya.MMatrix @param rotationOrder: Rotation order of the matrix @type rotationOrder: str or int ''' # Calculate radian constant radian = 180.0/math.pi # Check rotation order if type(rotationOrder) == str: rotationOrder = rotationOrder.lower() rotateOrder = {'xyz':0,'yzx':1,'zxy':2,'xzy':3,'yxz':4,'zyx':5} if not rotateOrder.has_key(rotationOrder): raise Exception('Invalid rotation order supplied!') rotationOrder = rotateOrder[rotationOrder] else: rotationOrder = int(rotationOrder) # Get transformation matrix transformMatrix = OpenMaya.MTransformationMatrix(matrix) # Get Euler rotation from matrix eulerRot = transformMatrix.eulerRotation() # Reorder rotation eulerRot.reorderIt(rotationOrder) # Return XYZ rotation values return (eulerRot.x*radian,eulerRot.y*radian,eulerRot.z*radian) def buildRotation(aimVector,upVector=(0,1,0),aimAxis='x',upAxis='y'): ''' Build rotation matrix from the specified inputs @param aimVector: Aim vector for construction of rotation matrix (worldSpace) @type aimVector: tuple or list @param upVector: Up vector for construction of rotation matrix (worldSpace) @type upVector: tuple or list @param aimAxis: Aim vector for construction of rotation matrix @type aimAxis: str @param upAxis: Up vector for construction of rotation matrix @type upAxis: str ''' # Check negative axis negAim = False negUp = False if aimAxis[0] == '-': aimAxis = aimAxis[1] negAim = True if upAxis[0] == '-': upAxis = upAxis[1] negUp = True # Check valid axis axisList = ['x','y','z'] if not axisList.count(aimAxis): raise Exception('Aim axis is not valid!') if not axisList.count(upAxis): raise Exception('Up axis is not valid!') if aimAxis == upAxis: raise Exception('Aim and Up axis must be unique!') # Determine cross axis axisList.remove(aimAxis) axisList.remove(upAxis) crossAxis = axisList[0] # Normaize aimVector aimVector = mathUtils.normalizeVector(aimVector) if negAim: aimVector = (-aimVector[0],-aimVector[1],-aimVector[2]) # Normaize upVector upVector = mathUtils.normalizeVector(upVector) if negUp: upVector = (-upVector[0],-upVector[1],-upVector[2]) # Get cross product vector crossVector = (0,0,0) if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'): crossVector = mathUtils.crossProduct(upVector,aimVector) else: crossVector = mathUtils.crossProduct(aimVector,upVector) # Recalculate upVector (orthogonalize) if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'): upVector = mathUtils.crossProduct(aimVector,crossVector) else: upVector = mathUtils.crossProduct(crossVector,aimVector) # Build axis dictionary axisDict={aimAxis: aimVector,upAxis: upVector,crossAxis: crossVector} # Build rotation matrix mat = buildMatrix(xAxis=axisDict['x'],yAxis=axisDict['y'],zAxis=axisDict['z']) # Return rotation matrix return mat def inverseTransform(source,destination,translate=True,rotate=True,scale=True): ''' Apply the inverse of a specified transform to another target transform. @param source: The source transform that will supply the transformation @type source: str @param destination: The destination transform that will receive the inverse transformation @type destination: str @param translate: Apply inverse translate to destination transform @type translate: bool @param rotate: Apply inverse rotation to destination transform @type rotate: bool @param scale: Apply inverse scale to destination transform @type scale: bool ''' # ========== # - Checks - # ========== if not mc.objExists(source): raise Exception('Transform "'+source+'" does not exist!!') if not mc.objExists(destination): raise Exception('Transform "'+destination+'" does not exist!!') # Load decomposeMatrix plugin if not mc.pluginInfo('decomposeMatrix',q=True,l=True): try: mc.loadPlugin('decomposeMatrix') except: raise MissingPluginError('Unable to load "decomposeMatrix" plugin!!') # ================================= # - Apply Inverse Transformations - # ================================= # Create and name decomposeMatrix node dcm = mc.createNode('decomposeMatrix',n=source+'_decomposeMatrix') # Make connections mc.connectAttr(source+'.inverseMatrix',dcm+'.inputMatrix',f=True) if translate: mc.connectAttr(dcm+'.outputTranslate',destination+'.translate',f=True) if rotate: mc.connectAttr(dcm+'.outputRotate',destination+'.rotate',f=True) if scale: mc.connectAttr(dcm+'.outputScale',destination+'.scale',f=True) # ================= # - Return Result - # ================= return dcm def fromList(valueList): ''' Create matrix from value list. @param valueList: List of matrix values @type valueList: list ''' # Check Value List if len(valueList) != 16: raise Exception('Invalid value list! Expecting 16 element, found '+str(len(valueList))) # Create transformation matrix from input vaules matrix = OpenMaya.MMatrix() OpenMaya.MScriptUtil.createMatrixFromList(valueList,matrix) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, valueList[0]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, valueList[1]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, valueList[2]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 3, valueList[3]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, valueList[4]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, valueList[5]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, valueList[6]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 3, valueList[7]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, valueList[8]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, valueList[9]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, valueList[10]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 3, valueList[11]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, valueList[12]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, valueList[13]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, valueList[14]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 3, valueList[15]) # Return Result return matrix def asList(matrix):
def printMatrix(matrix): ''' Print the specified matrix values to the script editor @param matrix: Matrix to print @type matrix: maya.OpenMaya.MMatrix ''' print ('%.3f' % matrix(0,0))+', '+('%.3f' % matrix(0,1))+', '+('%.3f' % matrix(0,2))+', '+('%.3f' % matrix(0,3)) print ('%.3f' % matrix(1,0))+', '+('%.3f' % matrix(1,1))+', '+('%.3f' % matrix(1,2))+', '+('%.3f' % matrix(1,3)) print ('%.3f' % matrix(2,0))+', '+('%.3f' % matrix(2,1))+', '+('%.3f' % matrix(2,2))+', '+('%.3f' % matrix(2,3)) print ('%.3f' % matrix(3,0))+', '+('%.3f' % matrix(3,1))+', '+('%.3f' % matrix(3,2))+', '+('%.3f' % matrix(3,3))
''' Return the specified matrix as a list @param matrix: Matrix to return list for @type matrix: maya.OpenMaya.MMatrix ''' return [ matrix(0,0),matrix(0,1),matrix(0,2),matrix(0,3), matrix(1,0),matrix(1,1),matrix(1,2),matrix(1,3), matrix(2,0),matrix(2,1),matrix(2,2),matrix(2,3), matrix(3,0),matrix(3,1),matrix(3,2),matrix(3,3), ]
identifier_body
matrix.py
import maya.cmds as mc import maya.OpenMaya as OpenMaya import mathUtils import math class MissingPluginError(Exception): pass def getMatrix(transform,local=False,time=None): ''' @param transform: Transform object to get world matrix from @type transform: str @param local: Get local space matrix instead of the world space matrix @type local: bool @param time: The frame to get the transforms world matrix for. If left at default, will use the current frame. @type time: int or float ''' # Check transform if not mc.objExists(transform): raise Exception('Object "'+transform+'" does not exist!!') # Define Matrix attribute matAttr = 'worldMatrix[0]' if local: matAttr = 'matrix' # Get time mat = OpenMaya.MMatrix() if time != None: mat = mc.getAttr(transform+'.'+matAttr,t=frame) else: mat = mc.getAttr(transform+'.'+matAttr) # Build Matrix matrix = buildMatrix(translate=(mat[12],mat[13],mat[14]),xAxis=(mat[0],mat[1],mat[2]),yAxis=(mat[4],mat[5],mat[6]),zAxis=(mat[8],mat[9],mat[10])) # Return result return matrix def buildMatrix(translate=(0,0,0),xAxis=(1,0,0),yAxis=(0,1,0),zAxis=(0,0,1)): ''' Build a transformation matrix based on the input vectors @param translate: Translate values for the matrix @type translate: tuple/list @param xAxis: xAxis of the matrix @type xAxis: tuple/list @param yAxis: yAxis of the matrix @type yAxis: tuple/list @param zAxis: zAxis of the matrix @type zAxis: tuple/list ''' # Create transformation matrix from input vectors matrix = OpenMaya.MMatrix() values = [] OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, xAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, xAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, xAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, yAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, yAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, yAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, zAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, zAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, zAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, translate[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, translate[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, translate[2]) return matrix def vectorMatrixMultiply(vector,matrix,transformAsPoint=False,invertMatrix=False): ''' Transform a vector (or point) by a given transformation matrix. @param vector: Vector or point to be transformed @type vector: tuple/list @param matrix: MMatrix object to provide the transformation @type matrix: OpenMaya.MMatrix @param transformAsPoint: Transform the vector as a point @type transformAsPoint: bool @param invertMatrix: Use the matrix inverse to transform the vector @type invertMatrix: bool ''' # Create MPoint/MVector object for transformation if transformAsPoint: vector = OpenMaya.MPoint(vector[0],vector[1],vector[2],1.0) else: vector = OpenMaya.MVector(vector[0],vector[1],vector[2]) # Check input is of type MMatrix if type(matrix) != OpenMaya.MMatrix: raise Exception('Matrix input variable is not of expected type! Expecting MMatrix, received '+str(type(matrix))+'!!') # Transform vector if matrix != OpenMaya.MMatrix.identity: if invertMatrix: matrix = matrix.inverse() vector *= matrix # Return new vector return [vector.x,vector.y,vector.z] def getTranslation(matrix): ''' Return the translation component of a matrix. @param matrix: Matrix to extract translation from @type matrix: maya.OpenMaya.MMatrix ''' x = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],0) y = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],1) z = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],2) return (x,y,z) def getRotation(matrix,rotationOrder='xyz'): ''' Return the rotation component of a matrix as euler (XYZ) values. @param matrix: Matrix to extract rotation from @type matrix: maya.OpenMaya.MMatrix @param rotationOrder: Rotation order of the matrix @type rotationOrder: str or int ''' # Calculate radian constant radian = 180.0/math.pi # Check rotation order if type(rotationOrder) == str: rotationOrder = rotationOrder.lower() rotateOrder = {'xyz':0,'yzx':1,'zxy':2,'xzy':3,'yxz':4,'zyx':5} if not rotateOrder.has_key(rotationOrder): raise Exception('Invalid rotation order supplied!') rotationOrder = rotateOrder[rotationOrder] else: rotationOrder = int(rotationOrder) # Get transformation matrix transformMatrix = OpenMaya.MTransformationMatrix(matrix) # Get Euler rotation from matrix eulerRot = transformMatrix.eulerRotation() # Reorder rotation eulerRot.reorderIt(rotationOrder) # Return XYZ rotation values return (eulerRot.x*radian,eulerRot.y*radian,eulerRot.z*radian) def buildRotation(aimVector,upVector=(0,1,0),aimAxis='x',upAxis='y'): ''' Build rotation matrix from the specified inputs @param aimVector: Aim vector for construction of rotation matrix (worldSpace) @type aimVector: tuple or list @param upVector: Up vector for construction of rotation matrix (worldSpace) @type upVector: tuple or list @param aimAxis: Aim vector for construction of rotation matrix @type aimAxis: str @param upAxis: Up vector for construction of rotation matrix @type upAxis: str ''' # Check negative axis negAim = False negUp = False if aimAxis[0] == '-': aimAxis = aimAxis[1] negAim = True if upAxis[0] == '-': upAxis = upAxis[1] negUp = True # Check valid axis axisList = ['x','y','z'] if not axisList.count(aimAxis): raise Exception('Aim axis is not valid!') if not axisList.count(upAxis): raise Exception('Up axis is not valid!') if aimAxis == upAxis: raise Exception('Aim and Up axis must be unique!') # Determine cross axis axisList.remove(aimAxis) axisList.remove(upAxis) crossAxis = axisList[0] # Normaize aimVector aimVector = mathUtils.normalizeVector(aimVector) if negAim: aimVector = (-aimVector[0],-aimVector[1],-aimVector[2]) # Normaize upVector upVector = mathUtils.normalizeVector(upVector) if negUp: upVector = (-upVector[0],-upVector[1],-upVector[2]) # Get cross product vector crossVector = (0,0,0) if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'): crossVector = mathUtils.crossProduct(upVector,aimVector) else: crossVector = mathUtils.crossProduct(aimVector,upVector) # Recalculate upVector (orthogonalize) if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'): upVector = mathUtils.crossProduct(aimVector,crossVector) else: upVector = mathUtils.crossProduct(crossVector,aimVector) # Build axis dictionary axisDict={aimAxis: aimVector,upAxis: upVector,crossAxis: crossVector} # Build rotation matrix mat = buildMatrix(xAxis=axisDict['x'],yAxis=axisDict['y'],zAxis=axisDict['z']) # Return rotation matrix return mat def inverseTransform(source,destination,translate=True,rotate=True,scale=True): ''' Apply the inverse of a specified transform to another target transform. @param source: The source transform that will supply the transformation @type source: str @param destination: The destination transform that will receive the inverse transformation @type destination: str @param translate: Apply inverse translate to destination transform @type translate: bool @param rotate: Apply inverse rotation to destination transform @type rotate: bool @param scale: Apply inverse scale to destination transform @type scale: bool ''' # ========== # - Checks - # ========== if not mc.objExists(source): raise Exception('Transform "'+source+'" does not exist!!') if not mc.objExists(destination): raise Exception('Transform "'+destination+'" does not exist!!') # Load decomposeMatrix plugin if not mc.pluginInfo('decomposeMatrix',q=True,l=True): try: mc.loadPlugin('decomposeMatrix') except: raise MissingPluginError('Unable to load "decomposeMatrix" plugin!!') # ================================= # - Apply Inverse Transformations - # ================================= # Create and name decomposeMatrix node dcm = mc.createNode('decomposeMatrix',n=source+'_decomposeMatrix') # Make connections mc.connectAttr(source+'.inverseMatrix',dcm+'.inputMatrix',f=True) if translate: mc.connectAttr(dcm+'.outputTranslate',destination+'.translate',f=True) if rotate: mc.connectAttr(dcm+'.outputRotate',destination+'.rotate',f=True) if scale: mc.connectAttr(dcm+'.outputScale',destination+'.scale',f=True) # ================= # - Return Result - # ================= return dcm def fromList(valueList): ''' Create matrix from value list. @param valueList: List of matrix values @type valueList: list ''' # Check Value List if len(valueList) != 16: raise Exception('Invalid value list! Expecting 16 element, found '+str(len(valueList))) # Create transformation matrix from input vaules matrix = OpenMaya.MMatrix() OpenMaya.MScriptUtil.createMatrixFromList(valueList,matrix) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, valueList[0]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, valueList[1])
#OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, valueList[5]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, valueList[6]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 3, valueList[7]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, valueList[8]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, valueList[9]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, valueList[10]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 3, valueList[11]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, valueList[12]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, valueList[13]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, valueList[14]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 3, valueList[15]) # Return Result return matrix def asList(matrix): ''' Return the specified matrix as a list @param matrix: Matrix to return list for @type matrix: maya.OpenMaya.MMatrix ''' return [ matrix(0,0),matrix(0,1),matrix(0,2),matrix(0,3), matrix(1,0),matrix(1,1),matrix(1,2),matrix(1,3), matrix(2,0),matrix(2,1),matrix(2,2),matrix(2,3), matrix(3,0),matrix(3,1),matrix(3,2),matrix(3,3), ] def printMatrix(matrix): ''' Print the specified matrix values to the script editor @param matrix: Matrix to print @type matrix: maya.OpenMaya.MMatrix ''' print ('%.3f' % matrix(0,0))+', '+('%.3f' % matrix(0,1))+', '+('%.3f' % matrix(0,2))+', '+('%.3f' % matrix(0,3)) print ('%.3f' % matrix(1,0))+', '+('%.3f' % matrix(1,1))+', '+('%.3f' % matrix(1,2))+', '+('%.3f' % matrix(1,3)) print ('%.3f' % matrix(2,0))+', '+('%.3f' % matrix(2,1))+', '+('%.3f' % matrix(2,2))+', '+('%.3f' % matrix(2,3)) print ('%.3f' % matrix(3,0))+', '+('%.3f' % matrix(3,1))+', '+('%.3f' % matrix(3,2))+', '+('%.3f' % matrix(3,3))
#OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, valueList[2]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 3, valueList[3]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, valueList[4])
random_line_split
matrix.py
import maya.cmds as mc import maya.OpenMaya as OpenMaya import mathUtils import math class MissingPluginError(Exception): pass def getMatrix(transform,local=False,time=None): ''' @param transform: Transform object to get world matrix from @type transform: str @param local: Get local space matrix instead of the world space matrix @type local: bool @param time: The frame to get the transforms world matrix for. If left at default, will use the current frame. @type time: int or float ''' # Check transform if not mc.objExists(transform): raise Exception('Object "'+transform+'" does not exist!!') # Define Matrix attribute matAttr = 'worldMatrix[0]' if local: matAttr = 'matrix' # Get time mat = OpenMaya.MMatrix() if time != None: mat = mc.getAttr(transform+'.'+matAttr,t=frame) else: mat = mc.getAttr(transform+'.'+matAttr) # Build Matrix matrix = buildMatrix(translate=(mat[12],mat[13],mat[14]),xAxis=(mat[0],mat[1],mat[2]),yAxis=(mat[4],mat[5],mat[6]),zAxis=(mat[8],mat[9],mat[10])) # Return result return matrix def buildMatrix(translate=(0,0,0),xAxis=(1,0,0),yAxis=(0,1,0),zAxis=(0,0,1)): ''' Build a transformation matrix based on the input vectors @param translate: Translate values for the matrix @type translate: tuple/list @param xAxis: xAxis of the matrix @type xAxis: tuple/list @param yAxis: yAxis of the matrix @type yAxis: tuple/list @param zAxis: zAxis of the matrix @type zAxis: tuple/list ''' # Create transformation matrix from input vectors matrix = OpenMaya.MMatrix() values = [] OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, xAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, xAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, xAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, yAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, yAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, yAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, zAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, zAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, zAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, translate[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, translate[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, translate[2]) return matrix def vectorMatrixMultiply(vector,matrix,transformAsPoint=False,invertMatrix=False): ''' Transform a vector (or point) by a given transformation matrix. @param vector: Vector or point to be transformed @type vector: tuple/list @param matrix: MMatrix object to provide the transformation @type matrix: OpenMaya.MMatrix @param transformAsPoint: Transform the vector as a point @type transformAsPoint: bool @param invertMatrix: Use the matrix inverse to transform the vector @type invertMatrix: bool ''' # Create MPoint/MVector object for transformation if transformAsPoint: vector = OpenMaya.MPoint(vector[0],vector[1],vector[2],1.0) else: vector = OpenMaya.MVector(vector[0],vector[1],vector[2]) # Check input is of type MMatrix if type(matrix) != OpenMaya.MMatrix: raise Exception('Matrix input variable is not of expected type! Expecting MMatrix, received '+str(type(matrix))+'!!') # Transform vector if matrix != OpenMaya.MMatrix.identity: if invertMatrix: matrix = matrix.inverse() vector *= matrix # Return new vector return [vector.x,vector.y,vector.z] def getTranslation(matrix): ''' Return the translation component of a matrix. @param matrix: Matrix to extract translation from @type matrix: maya.OpenMaya.MMatrix ''' x = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],0) y = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],1) z = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],2) return (x,y,z) def getRotation(matrix,rotationOrder='xyz'): ''' Return the rotation component of a matrix as euler (XYZ) values. @param matrix: Matrix to extract rotation from @type matrix: maya.OpenMaya.MMatrix @param rotationOrder: Rotation order of the matrix @type rotationOrder: str or int ''' # Calculate radian constant radian = 180.0/math.pi # Check rotation order if type(rotationOrder) == str: rotationOrder = rotationOrder.lower() rotateOrder = {'xyz':0,'yzx':1,'zxy':2,'xzy':3,'yxz':4,'zyx':5} if not rotateOrder.has_key(rotationOrder): raise Exception('Invalid rotation order supplied!') rotationOrder = rotateOrder[rotationOrder] else: rotationOrder = int(rotationOrder) # Get transformation matrix transformMatrix = OpenMaya.MTransformationMatrix(matrix) # Get Euler rotation from matrix eulerRot = transformMatrix.eulerRotation() # Reorder rotation eulerRot.reorderIt(rotationOrder) # Return XYZ rotation values return (eulerRot.x*radian,eulerRot.y*radian,eulerRot.z*radian) def buildRotation(aimVector,upVector=(0,1,0),aimAxis='x',upAxis='y'): ''' Build rotation matrix from the specified inputs @param aimVector: Aim vector for construction of rotation matrix (worldSpace) @type aimVector: tuple or list @param upVector: Up vector for construction of rotation matrix (worldSpace) @type upVector: tuple or list @param aimAxis: Aim vector for construction of rotation matrix @type aimAxis: str @param upAxis: Up vector for construction of rotation matrix @type upAxis: str ''' # Check negative axis negAim = False negUp = False if aimAxis[0] == '-': aimAxis = aimAxis[1] negAim = True if upAxis[0] == '-': upAxis = upAxis[1] negUp = True # Check valid axis axisList = ['x','y','z'] if not axisList.count(aimAxis): raise Exception('Aim axis is not valid!') if not axisList.count(upAxis): raise Exception('Up axis is not valid!') if aimAxis == upAxis: raise Exception('Aim and Up axis must be unique!') # Determine cross axis axisList.remove(aimAxis) axisList.remove(upAxis) crossAxis = axisList[0] # Normaize aimVector aimVector = mathUtils.normalizeVector(aimVector) if negAim: aimVector = (-aimVector[0],-aimVector[1],-aimVector[2]) # Normaize upVector upVector = mathUtils.normalizeVector(upVector) if negUp: upVector = (-upVector[0],-upVector[1],-upVector[2]) # Get cross product vector crossVector = (0,0,0) if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'): crossVector = mathUtils.crossProduct(upVector,aimVector) else: crossVector = mathUtils.crossProduct(aimVector,upVector) # Recalculate upVector (orthogonalize) if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'): upVector = mathUtils.crossProduct(aimVector,crossVector) else: upVector = mathUtils.crossProduct(crossVector,aimVector) # Build axis dictionary axisDict={aimAxis: aimVector,upAxis: upVector,crossAxis: crossVector} # Build rotation matrix mat = buildMatrix(xAxis=axisDict['x'],yAxis=axisDict['y'],zAxis=axisDict['z']) # Return rotation matrix return mat def inverseTransform(source,destination,translate=True,rotate=True,scale=True): ''' Apply the inverse of a specified transform to another target transform. @param source: The source transform that will supply the transformation @type source: str @param destination: The destination transform that will receive the inverse transformation @type destination: str @param translate: Apply inverse translate to destination transform @type translate: bool @param rotate: Apply inverse rotation to destination transform @type rotate: bool @param scale: Apply inverse scale to destination transform @type scale: bool ''' # ========== # - Checks - # ========== if not mc.objExists(source): raise Exception('Transform "'+source+'" does not exist!!') if not mc.objExists(destination): raise Exception('Transform "'+destination+'" does not exist!!') # Load decomposeMatrix plugin if not mc.pluginInfo('decomposeMatrix',q=True,l=True): try: mc.loadPlugin('decomposeMatrix') except: raise MissingPluginError('Unable to load "decomposeMatrix" plugin!!') # ================================= # - Apply Inverse Transformations - # ================================= # Create and name decomposeMatrix node dcm = mc.createNode('decomposeMatrix',n=source+'_decomposeMatrix') # Make connections mc.connectAttr(source+'.inverseMatrix',dcm+'.inputMatrix',f=True) if translate: mc.connectAttr(dcm+'.outputTranslate',destination+'.translate',f=True) if rotate: mc.connectAttr(dcm+'.outputRotate',destination+'.rotate',f=True) if scale: mc.connectAttr(dcm+'.outputScale',destination+'.scale',f=True) # ================= # - Return Result - # ================= return dcm def fromList(valueList): ''' Create matrix from value list. @param valueList: List of matrix values @type valueList: list ''' # Check Value List if len(valueList) != 16: raise Exception('Invalid value list! Expecting 16 element, found '+str(len(valueList))) # Create transformation matrix from input vaules matrix = OpenMaya.MMatrix() OpenMaya.MScriptUtil.createMatrixFromList(valueList,matrix) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, valueList[0]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, valueList[1]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, valueList[2]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 3, valueList[3]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, valueList[4]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, valueList[5]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, valueList[6]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 3, valueList[7]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, valueList[8]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, valueList[9]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, valueList[10]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 3, valueList[11]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, valueList[12]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, valueList[13]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, valueList[14]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 3, valueList[15]) # Return Result return matrix def
(matrix): ''' Return the specified matrix as a list @param matrix: Matrix to return list for @type matrix: maya.OpenMaya.MMatrix ''' return [ matrix(0,0),matrix(0,1),matrix(0,2),matrix(0,3), matrix(1,0),matrix(1,1),matrix(1,2),matrix(1,3), matrix(2,0),matrix(2,1),matrix(2,2),matrix(2,3), matrix(3,0),matrix(3,1),matrix(3,2),matrix(3,3), ] def printMatrix(matrix): ''' Print the specified matrix values to the script editor @param matrix: Matrix to print @type matrix: maya.OpenMaya.MMatrix ''' print ('%.3f' % matrix(0,0))+', '+('%.3f' % matrix(0,1))+', '+('%.3f' % matrix(0,2))+', '+('%.3f' % matrix(0,3)) print ('%.3f' % matrix(1,0))+', '+('%.3f' % matrix(1,1))+', '+('%.3f' % matrix(1,2))+', '+('%.3f' % matrix(1,3)) print ('%.3f' % matrix(2,0))+', '+('%.3f' % matrix(2,1))+', '+('%.3f' % matrix(2,2))+', '+('%.3f' % matrix(2,3)) print ('%.3f' % matrix(3,0))+', '+('%.3f' % matrix(3,1))+', '+('%.3f' % matrix(3,2))+', '+('%.3f' % matrix(3,3))
asList
identifier_name
matrix.py
import maya.cmds as mc import maya.OpenMaya as OpenMaya import mathUtils import math class MissingPluginError(Exception): pass def getMatrix(transform,local=False,time=None): ''' @param transform: Transform object to get world matrix from @type transform: str @param local: Get local space matrix instead of the world space matrix @type local: bool @param time: The frame to get the transforms world matrix for. If left at default, will use the current frame. @type time: int or float ''' # Check transform if not mc.objExists(transform): raise Exception('Object "'+transform+'" does not exist!!') # Define Matrix attribute matAttr = 'worldMatrix[0]' if local: matAttr = 'matrix' # Get time mat = OpenMaya.MMatrix() if time != None: mat = mc.getAttr(transform+'.'+matAttr,t=frame) else: mat = mc.getAttr(transform+'.'+matAttr) # Build Matrix matrix = buildMatrix(translate=(mat[12],mat[13],mat[14]),xAxis=(mat[0],mat[1],mat[2]),yAxis=(mat[4],mat[5],mat[6]),zAxis=(mat[8],mat[9],mat[10])) # Return result return matrix def buildMatrix(translate=(0,0,0),xAxis=(1,0,0),yAxis=(0,1,0),zAxis=(0,0,1)): ''' Build a transformation matrix based on the input vectors @param translate: Translate values for the matrix @type translate: tuple/list @param xAxis: xAxis of the matrix @type xAxis: tuple/list @param yAxis: yAxis of the matrix @type yAxis: tuple/list @param zAxis: zAxis of the matrix @type zAxis: tuple/list ''' # Create transformation matrix from input vectors matrix = OpenMaya.MMatrix() values = [] OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, xAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, xAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, xAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, yAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, yAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, yAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, zAxis[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, zAxis[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, zAxis[2]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, translate[0]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, translate[1]) OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, translate[2]) return matrix def vectorMatrixMultiply(vector,matrix,transformAsPoint=False,invertMatrix=False): ''' Transform a vector (or point) by a given transformation matrix. @param vector: Vector or point to be transformed @type vector: tuple/list @param matrix: MMatrix object to provide the transformation @type matrix: OpenMaya.MMatrix @param transformAsPoint: Transform the vector as a point @type transformAsPoint: bool @param invertMatrix: Use the matrix inverse to transform the vector @type invertMatrix: bool ''' # Create MPoint/MVector object for transformation if transformAsPoint: vector = OpenMaya.MPoint(vector[0],vector[1],vector[2],1.0) else: vector = OpenMaya.MVector(vector[0],vector[1],vector[2]) # Check input is of type MMatrix if type(matrix) != OpenMaya.MMatrix: raise Exception('Matrix input variable is not of expected type! Expecting MMatrix, received '+str(type(matrix))+'!!') # Transform vector if matrix != OpenMaya.MMatrix.identity: if invertMatrix: matrix = matrix.inverse() vector *= matrix # Return new vector return [vector.x,vector.y,vector.z] def getTranslation(matrix): ''' Return the translation component of a matrix. @param matrix: Matrix to extract translation from @type matrix: maya.OpenMaya.MMatrix ''' x = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],0) y = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],1) z = OpenMaya.MScriptUtil.getDoubleArrayItem(matrix[3],2) return (x,y,z) def getRotation(matrix,rotationOrder='xyz'): ''' Return the rotation component of a matrix as euler (XYZ) values. @param matrix: Matrix to extract rotation from @type matrix: maya.OpenMaya.MMatrix @param rotationOrder: Rotation order of the matrix @type rotationOrder: str or int ''' # Calculate radian constant radian = 180.0/math.pi # Check rotation order if type(rotationOrder) == str: rotationOrder = rotationOrder.lower() rotateOrder = {'xyz':0,'yzx':1,'zxy':2,'xzy':3,'yxz':4,'zyx':5} if not rotateOrder.has_key(rotationOrder): raise Exception('Invalid rotation order supplied!') rotationOrder = rotateOrder[rotationOrder] else: rotationOrder = int(rotationOrder) # Get transformation matrix transformMatrix = OpenMaya.MTransformationMatrix(matrix) # Get Euler rotation from matrix eulerRot = transformMatrix.eulerRotation() # Reorder rotation eulerRot.reorderIt(rotationOrder) # Return XYZ rotation values return (eulerRot.x*radian,eulerRot.y*radian,eulerRot.z*radian) def buildRotation(aimVector,upVector=(0,1,0),aimAxis='x',upAxis='y'): ''' Build rotation matrix from the specified inputs @param aimVector: Aim vector for construction of rotation matrix (worldSpace) @type aimVector: tuple or list @param upVector: Up vector for construction of rotation matrix (worldSpace) @type upVector: tuple or list @param aimAxis: Aim vector for construction of rotation matrix @type aimAxis: str @param upAxis: Up vector for construction of rotation matrix @type upAxis: str ''' # Check negative axis negAim = False negUp = False if aimAxis[0] == '-': aimAxis = aimAxis[1] negAim = True if upAxis[0] == '-': upAxis = upAxis[1] negUp = True # Check valid axis axisList = ['x','y','z'] if not axisList.count(aimAxis): raise Exception('Aim axis is not valid!') if not axisList.count(upAxis): raise Exception('Up axis is not valid!') if aimAxis == upAxis: raise Exception('Aim and Up axis must be unique!') # Determine cross axis axisList.remove(aimAxis) axisList.remove(upAxis) crossAxis = axisList[0] # Normaize aimVector aimVector = mathUtils.normalizeVector(aimVector) if negAim: aimVector = (-aimVector[0],-aimVector[1],-aimVector[2]) # Normaize upVector upVector = mathUtils.normalizeVector(upVector) if negUp: upVector = (-upVector[0],-upVector[1],-upVector[2]) # Get cross product vector crossVector = (0,0,0) if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'): crossVector = mathUtils.crossProduct(upVector,aimVector) else: crossVector = mathUtils.crossProduct(aimVector,upVector) # Recalculate upVector (orthogonalize) if (aimAxis == 'x' and upAxis == 'z') or (aimAxis == 'z' and upAxis == 'y'):
else: upVector = mathUtils.crossProduct(crossVector,aimVector) # Build axis dictionary axisDict={aimAxis: aimVector,upAxis: upVector,crossAxis: crossVector} # Build rotation matrix mat = buildMatrix(xAxis=axisDict['x'],yAxis=axisDict['y'],zAxis=axisDict['z']) # Return rotation matrix return mat def inverseTransform(source,destination,translate=True,rotate=True,scale=True): ''' Apply the inverse of a specified transform to another target transform. @param source: The source transform that will supply the transformation @type source: str @param destination: The destination transform that will receive the inverse transformation @type destination: str @param translate: Apply inverse translate to destination transform @type translate: bool @param rotate: Apply inverse rotation to destination transform @type rotate: bool @param scale: Apply inverse scale to destination transform @type scale: bool ''' # ========== # - Checks - # ========== if not mc.objExists(source): raise Exception('Transform "'+source+'" does not exist!!') if not mc.objExists(destination): raise Exception('Transform "'+destination+'" does not exist!!') # Load decomposeMatrix plugin if not mc.pluginInfo('decomposeMatrix',q=True,l=True): try: mc.loadPlugin('decomposeMatrix') except: raise MissingPluginError('Unable to load "decomposeMatrix" plugin!!') # ================================= # - Apply Inverse Transformations - # ================================= # Create and name decomposeMatrix node dcm = mc.createNode('decomposeMatrix',n=source+'_decomposeMatrix') # Make connections mc.connectAttr(source+'.inverseMatrix',dcm+'.inputMatrix',f=True) if translate: mc.connectAttr(dcm+'.outputTranslate',destination+'.translate',f=True) if rotate: mc.connectAttr(dcm+'.outputRotate',destination+'.rotate',f=True) if scale: mc.connectAttr(dcm+'.outputScale',destination+'.scale',f=True) # ================= # - Return Result - # ================= return dcm def fromList(valueList): ''' Create matrix from value list. @param valueList: List of matrix values @type valueList: list ''' # Check Value List if len(valueList) != 16: raise Exception('Invalid value list! Expecting 16 element, found '+str(len(valueList))) # Create transformation matrix from input vaules matrix = OpenMaya.MMatrix() OpenMaya.MScriptUtil.createMatrixFromList(valueList,matrix) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 0, valueList[0]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 1, valueList[1]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 2, valueList[2]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[0], 3, valueList[3]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 0, valueList[4]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 1, valueList[5]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 2, valueList[6]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[1], 3, valueList[7]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 0, valueList[8]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 1, valueList[9]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 2, valueList[10]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[2], 3, valueList[11]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 0, valueList[12]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 1, valueList[13]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 2, valueList[14]) #OpenMaya.MScriptUtil.setDoubleArray(matrix[3], 3, valueList[15]) # Return Result return matrix def asList(matrix): ''' Return the specified matrix as a list @param matrix: Matrix to return list for @type matrix: maya.OpenMaya.MMatrix ''' return [ matrix(0,0),matrix(0,1),matrix(0,2),matrix(0,3), matrix(1,0),matrix(1,1),matrix(1,2),matrix(1,3), matrix(2,0),matrix(2,1),matrix(2,2),matrix(2,3), matrix(3,0),matrix(3,1),matrix(3,2),matrix(3,3), ] def printMatrix(matrix): ''' Print the specified matrix values to the script editor @param matrix: Matrix to print @type matrix: maya.OpenMaya.MMatrix ''' print ('%.3f' % matrix(0,0))+', '+('%.3f' % matrix(0,1))+', '+('%.3f' % matrix(0,2))+', '+('%.3f' % matrix(0,3)) print ('%.3f' % matrix(1,0))+', '+('%.3f' % matrix(1,1))+', '+('%.3f' % matrix(1,2))+', '+('%.3f' % matrix(1,3)) print ('%.3f' % matrix(2,0))+', '+('%.3f' % matrix(2,1))+', '+('%.3f' % matrix(2,2))+', '+('%.3f' % matrix(2,3)) print ('%.3f' % matrix(3,0))+', '+('%.3f' % matrix(3,1))+', '+('%.3f' % matrix(3,2))+', '+('%.3f' % matrix(3,3))
upVector = mathUtils.crossProduct(aimVector,crossVector)
conditional_block
main.rs
use futures::future::join_all; use generational_arena::{Arena, Index}; use nalgebra::Vector2; use std::env::args; use std::error::Error; use std::net::SocketAddr; use std::num::Wrapping; use std::time::Duration; use std::time::SystemTime; use tokio::net::{TcpListener, TcpStream, UdpSocket}; use tokio::prelude::*; use tokio::sync::mpsc::{channel, Sender}; use tokio::time::interval; use game; #[derive(Debug)] struct Player { player: game::Player, tcp_tx: Sender<game::TcpClientMessage>, random_bytes: [u8; game::NUM_RANDOM_BYTES], udp_addr: Option<SocketAddr>, input: Vector2<f64>, angle: f64, firing: bool, fire_counter: f64, } #[derive(Debug)] struct Bullet { bullet: game::Bullet, velocity: Vector2<f64>, lifetime: f64, } fn accept( players: &mut Arena<Player>, bullets: &Arena<Bullet>, stream: TcpStream, mut internal_tcp_tx: Sender<(Index, Option<game::TcpServerMessage>)>, tick_rate: u32, tick_zero: SystemTime, tick: game::Tick, ) { println!("connection!"); let (tx, mut rx) = channel(4); let idx = match players.try_insert(Player { player: game::Player { id: 0, // Set below. radius: 1.0, position: Vector2::new(0.0, 0.0), velocity: Vector2::new(0.0, 0.0), }, tcp_tx: tx, udp_addr: None, random_bytes: rand::random(), input: Vector2::new(0.0, 0.0), angle: 0.0, firing: false, fire_counter: 0.0, }) { Ok(idx) => idx, Err(_) => { println!("rejecting connection; too many players"); return; } };
// Set the user ID to some combinaiton of the arena index and generation. let id = idx.into_raw_parts().0 as u8; players[idx].player.id = id; // TODO(jack) Broadcast PlayerLeft messages. // Broadcast PlayerJoined messages. let mut tcp_txs: Vec<_> = players .iter() .filter(|(other_idx, _)| *other_idx != idx) .map(|(_, p)| p.tcp_tx.clone()) .collect(); let msg = game::TcpClientMessage::PlayerJoined(id); tokio::spawn(async move { let join_handles = tcp_txs.iter_mut().map(|tcp_tx| tcp_tx.send(msg.clone())); join_all(join_handles).await; }); // Start tasks to read-from / write-to the TCP socket. let (mut reader, mut writer) = stream.into_split(); tokio::spawn(async move { loop { const MAX_PACKET_SIZE_PLUS_ONE: usize = 64; let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE]; let num_bytes = match reader.read(&mut buf).await { Ok(0) => break, Ok(MAX_PACKET_SIZE_PLUS_ONE) => break, Err(err) => { eprintln!("{}", err); break; } Ok(num_bytes) => num_bytes, }; match bincode::deserialize(&buf[..num_bytes]) { Ok(msg) => match internal_tcp_tx.send((idx, Some(msg))).await { Ok(_) => (), Err(_) => break, }, Err(err) => { eprintln!("{}", err); break; } }; } // One consequence of every client publishing TCP packets to the same channel // is that we don't know when any one disconnects. // We signal it here with a `None`. internal_tcp_tx.send((idx, None)).await.ok(); }); let random_bytes = players[idx].random_bytes.clone(); let update = game::WorldUpdate { tick, players: players.iter().map(|(_, p)| p.player).collect(), bullets: bullets.iter().map(|(_, b)| b.bullet).collect(), }; tokio::spawn(async move { // Send the init packet. // For now, this will just include a random sequence of bytes. // We'll then wait for the random sequence of bytes via UDP to identify the client's external port number. let bytes = bincode::serialize(&game::TcpClientMessage::Init(game::ClientInit { id, random_bytes, update, tick_rate: tick_rate as u8, tick_zero, })) .unwrap(); if let Err(err) = writer.write_all(&bytes[..]).await { eprintln!("{}", err); return; } println!("wrote init message"); loop { match rx.recv().await { Some(msg) => { if let Err(_) = writer .write_all(bincode::serialize(&msg).unwrap().as_slice()) .await { break; } } None => break, }; } }); } fn step(players: &mut Arena<Player>, bullets: &mut Arena<Bullet>, dt: f64) { // Apply player impulse. for (_, player) in players.iter_mut() { let acceleration = 64.0; let max_velocity = 16.0; let friction = 16.0; // Acceleration ranges from `friction` to `friction + acceleration`, // and is inversely proportional to the projection of the current velocity onto the input vector. let acceleration_index = player.player.velocity.dot(&player.input) / max_velocity; let acceleration_index = if acceleration_index < 0.0 { 0.0 } else { acceleration_index.sqrt() }; let adjusted_acceleration = friction + acceleration * (1.0 - acceleration_index); player.player.velocity += adjusted_acceleration * dt * player.input; let dampened_velocity_unclamped = player.player.velocity.magnitude() - dt * friction; let dampened_velocity = if dampened_velocity_unclamped < 0.0 { 0.0 } else { dampened_velocity_unclamped }; let velocity_unit = player .player .velocity .try_normalize(0.0) .unwrap_or(Vector2::new(0.0, 0.0)); player.player.velocity = dampened_velocity * velocity_unit; player.player.position += dt * player.player.velocity; } // Remove expired bullets. let bullets_to_remove: Vec<_> = bullets .iter() .filter(|(_, b)| b.lifetime > 1.0) .map(|(idx, _)| idx) .collect(); for idx in bullets_to_remove.iter() { bullets.remove(*idx); } // Fire bullets. for (_, player) in players.iter_mut().filter(|(_, p)| p.firing) { let rof = 30.0; player.fire_counter += rof * dt; if player.fire_counter >= 1.0 { player.fire_counter %= 1.0; let idx = match bullets.try_insert(Bullet { bullet: game::Bullet { id: 0, player_id: player.player.id, position: player.player.position, angle: player.angle, radius: 0.5, }, velocity: 32.0 * Vector2::new(player.angle.cos(), player.angle.sin()), lifetime: 0.0, }) { Ok(idx) => idx, Err(_) => { eprintln!("too many bullets!"); break; } }; // Set the user ID to the arena index. let raw_parts = idx.into_raw_parts(); bullets[idx].bullet.id = ((raw_parts.0 & 10) | ((raw_parts.1 as usize) << 10)) as u16; } } // Update bullets. for (_, bullet) in bullets.iter_mut() { bullet.bullet.position += dt * bullet.velocity; bullet.lifetime += dt; } // Manage collisions. // We have to collect the idxs to avoid borrowing `players`. let idxs: Vec<_> = players.iter().map(|(idx, _)| idx).collect(); let idx_pairs = idxs .iter() .map(|a| idxs.iter().map(move |b| (a, b))) .flatten() .filter(|(a, b)| a.into_raw_parts().0 < b.into_raw_parts().0); for (a, b) in idx_pairs { let (a, b) = players.get2_mut(*a, *b); let a = a.unwrap(); let b = b.unwrap(); let distance = match a.player.position - b.player.position { v if v.x == 0.0 && v.y == 0.0 => Vector2::new(0.001, 0.001), v => v, }; let max_distance = a.player.radius + b.player.radius; if distance.magnitude_squared() >= max_distance.powi(2) { continue; // No collision. } let displacement_unit = distance.try_normalize(0.0).unwrap(); let displacement = displacement_unit * (max_distance - distance.magnitude()); a.player.position += 0.5 * displacement; b.player.position += -0.5 * displacement; let momentum = a.player.velocity.magnitude() + b.player.velocity.magnitude(); let elasticity = 2.0; a.player.velocity = 0.5 * elasticity * momentum * displacement_unit; b.player.velocity = -0.5 * elasticity * momentum * displacement_unit; } } #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let port: u32 = match args().nth(1).and_then(|s| s.parse().ok()) { Some(port) => port, None => { eprintln!("Usage: {} PORT", args().nth(0).unwrap()); return Ok(()); } }; let mut players = Arena::with_capacity(16); let mut bullets = Arena::with_capacity(1024); let tick_rate = 60; let mut ticker = interval(Duration::from_secs(1) / tick_rate); let snapshot_rate = 10; let mut snapshot_ticker = interval(Duration::from_secs(1) / snapshot_rate); let mut tcp_listener = TcpListener::bind(format!("0.0.0.0:{}", port)).await?; let mut udp_socket = UdpSocket::bind(format!("0.0.0.0:{}", port)).await?; // tcp_rx is our global receiver for TCP events. // This means that each player holds a copy of tcp_tx which packets are passed to. let (tcp_tx, mut tcp_rx) = channel(4); let mut tick = Wrapping(0); let tick_zero = SystemTime::now(); loop { const MAX_PACKET_SIZE_PLUS_ONE: usize = 64; let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE]; // TODO(jack) Redesign this select! call to execute as little code linearly as possible. tokio::select! { _ = ticker.tick() => { // Update game state. let dt = 1.0 / tick_rate as f64; // TODO(jack) Measure actual elapsed time. step(&mut players, &mut bullets, dt); tick = tick + Wrapping(1); }, _ = snapshot_ticker.tick() => { // Broadcast. let update = game::WorldUpdate { tick: tick.0, players: players.iter().map(|(_, p)| p.player).collect(), bullets: bullets.iter().map(|(_, b)| b.bullet).collect(), }; let bytes = bincode::serialize(&game::UdpClientMessage::WorldUpdate(update)).unwrap(); for (_, player) in players.iter().filter(|(_, p)| p.udp_addr.is_some()) { udp_socket.send_to(&bytes, player.udp_addr.unwrap()).await?; } }, accept_result = tcp_listener.accept() => match accept_result { Ok((stream, _)) => accept(&mut players, &bullets, stream, tcp_tx.clone(), tick_rate, tick_zero, tick.0), Err(err) => { eprintln!("{}", err); break }, }, // TODO(jack) TCP messages from the client should end up in a channel. result = tcp_rx.recv() => match result { Some((idx, None)) => { println!("disconnection!"); let id = players[idx].player.id; // Broadcast that a player left. let mut tcp_txs: Vec<_> = players .iter() .filter(|(other_idx, _)| *other_idx != idx) .map(|(_, p)| p.tcp_tx.clone()) .collect(); tokio::spawn(async move { let msg = game::TcpClientMessage::PlayerLeft(id); let join_handles = tcp_txs .iter_mut() .map(|tcp_tx| tcp_tx.send(msg.clone())); // TODO(jack) Can we do this without allocations? join_all(join_handles).await; }); players.remove(idx); }, Some((idx, Some(msg))) => println!("{:?}: {:?}", idx, msg), None => break, }, result = udp_socket.recv_from(&mut buf) => match result { Ok((0, _)) => break, Ok((MAX_PACKET_SIZE_PLUS_ONE, _)) => break, Ok((num_bytes, socket_addr)) => { let bytes = &buf[..num_bytes]; let msg: game::UdpServerMessage = match bincode::deserialize(&bytes) { Ok(msg) => msg, Err(err) => { eprintln!("{}", err); continue }, }; match msg { game::UdpServerMessage::Init(game::ServerInit { random_bytes }) => { println!("received init message: {:?}", random_bytes); if let Some((_, player)) = players.iter_mut().find(|(_, player)| player.random_bytes == random_bytes) { player.udp_addr = Some(socket_addr); println!("{:?}", player.udp_addr); } }, game::UdpServerMessage::PlayerInput(inputs) => { let (_, player) = match players.iter_mut().find(|(_, player)| player.udp_addr.is_some() && player.udp_addr.unwrap() == socket_addr) { Some((idx, player)) => (idx, player), None => continue, }; // TODO(jack) Apply the inputs according to their tick. // Right now, we're just taking the most recent one. if inputs.len() == 0 { continue } let input = inputs.iter().last().unwrap(); player.input = Vector2::new( (input.right as i32 - input.left as i32) as f64, (input.down as i32 - input.up as i32) as f64, ).try_normalize(0.0).unwrap_or(Vector2::new(0.0, 0.0)); player.angle = input.angle; // TODO(jack) We probably just want to compose input in the player struct. if input.mouse_left { player.firing = true; } else { player.firing = false; player.fire_counter = 0.0; } }, }; }, Err(err) => { eprintln!("{}", err); break }, } } } Ok(()) }
random_line_split
main.rs
use futures::future::join_all; use generational_arena::{Arena, Index}; use nalgebra::Vector2; use std::env::args; use std::error::Error; use std::net::SocketAddr; use std::num::Wrapping; use std::time::Duration; use std::time::SystemTime; use tokio::net::{TcpListener, TcpStream, UdpSocket}; use tokio::prelude::*; use tokio::sync::mpsc::{channel, Sender}; use tokio::time::interval; use game; #[derive(Debug)] struct Player { player: game::Player, tcp_tx: Sender<game::TcpClientMessage>, random_bytes: [u8; game::NUM_RANDOM_BYTES], udp_addr: Option<SocketAddr>, input: Vector2<f64>, angle: f64, firing: bool, fire_counter: f64, } #[derive(Debug)] struct Bullet { bullet: game::Bullet, velocity: Vector2<f64>, lifetime: f64, } fn accept( players: &mut Arena<Player>, bullets: &Arena<Bullet>, stream: TcpStream, mut internal_tcp_tx: Sender<(Index, Option<game::TcpServerMessage>)>, tick_rate: u32, tick_zero: SystemTime, tick: game::Tick, ) { println!("connection!"); let (tx, mut rx) = channel(4); let idx = match players.try_insert(Player { player: game::Player { id: 0, // Set below. radius: 1.0, position: Vector2::new(0.0, 0.0), velocity: Vector2::new(0.0, 0.0), }, tcp_tx: tx, udp_addr: None, random_bytes: rand::random(), input: Vector2::new(0.0, 0.0), angle: 0.0, firing: false, fire_counter: 0.0, }) { Ok(idx) => idx, Err(_) => { println!("rejecting connection; too many players"); return; } }; // Set the user ID to some combinaiton of the arena index and generation. let id = idx.into_raw_parts().0 as u8; players[idx].player.id = id; // TODO(jack) Broadcast PlayerLeft messages. // Broadcast PlayerJoined messages. let mut tcp_txs: Vec<_> = players .iter() .filter(|(other_idx, _)| *other_idx != idx) .map(|(_, p)| p.tcp_tx.clone()) .collect(); let msg = game::TcpClientMessage::PlayerJoined(id); tokio::spawn(async move { let join_handles = tcp_txs.iter_mut().map(|tcp_tx| tcp_tx.send(msg.clone())); join_all(join_handles).await; }); // Start tasks to read-from / write-to the TCP socket. let (mut reader, mut writer) = stream.into_split(); tokio::spawn(async move { loop { const MAX_PACKET_SIZE_PLUS_ONE: usize = 64; let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE]; let num_bytes = match reader.read(&mut buf).await { Ok(0) => break, Ok(MAX_PACKET_SIZE_PLUS_ONE) => break, Err(err) => { eprintln!("{}", err); break; } Ok(num_bytes) => num_bytes, }; match bincode::deserialize(&buf[..num_bytes]) { Ok(msg) => match internal_tcp_tx.send((idx, Some(msg))).await { Ok(_) => (), Err(_) => break, }, Err(err) => { eprintln!("{}", err); break; } }; } // One consequence of every client publishing TCP packets to the same channel // is that we don't know when any one disconnects. // We signal it here with a `None`. internal_tcp_tx.send((idx, None)).await.ok(); }); let random_bytes = players[idx].random_bytes.clone(); let update = game::WorldUpdate { tick, players: players.iter().map(|(_, p)| p.player).collect(), bullets: bullets.iter().map(|(_, b)| b.bullet).collect(), }; tokio::spawn(async move { // Send the init packet. // For now, this will just include a random sequence of bytes. // We'll then wait for the random sequence of bytes via UDP to identify the client's external port number. let bytes = bincode::serialize(&game::TcpClientMessage::Init(game::ClientInit { id, random_bytes, update, tick_rate: tick_rate as u8, tick_zero, })) .unwrap(); if let Err(err) = writer.write_all(&bytes[..]).await { eprintln!("{}", err); return; } println!("wrote init message"); loop { match rx.recv().await { Some(msg) => { if let Err(_) = writer .write_all(bincode::serialize(&msg).unwrap().as_slice()) .await { break; } } None => break, }; } }); } fn step(players: &mut Arena<Player>, bullets: &mut Arena<Bullet>, dt: f64)
#[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let port: u32 = match args().nth(1).and_then(|s| s.parse().ok()) { Some(port) => port, None => { eprintln!("Usage: {} PORT", args().nth(0).unwrap()); return Ok(()); } }; let mut players = Arena::with_capacity(16); let mut bullets = Arena::with_capacity(1024); let tick_rate = 60; let mut ticker = interval(Duration::from_secs(1) / tick_rate); let snapshot_rate = 10; let mut snapshot_ticker = interval(Duration::from_secs(1) / snapshot_rate); let mut tcp_listener = TcpListener::bind(format!("0.0.0.0:{}", port)).await?; let mut udp_socket = UdpSocket::bind(format!("0.0.0.0:{}", port)).await?; // tcp_rx is our global receiver for TCP events. // This means that each player holds a copy of tcp_tx which packets are passed to. let (tcp_tx, mut tcp_rx) = channel(4); let mut tick = Wrapping(0); let tick_zero = SystemTime::now(); loop { const MAX_PACKET_SIZE_PLUS_ONE: usize = 64; let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE]; // TODO(jack) Redesign this select! call to execute as little code linearly as possible. tokio::select! { _ = ticker.tick() => { // Update game state. let dt = 1.0 / tick_rate as f64; // TODO(jack) Measure actual elapsed time. step(&mut players, &mut bullets, dt); tick = tick + Wrapping(1); }, _ = snapshot_ticker.tick() => { // Broadcast. let update = game::WorldUpdate { tick: tick.0, players: players.iter().map(|(_, p)| p.player).collect(), bullets: bullets.iter().map(|(_, b)| b.bullet).collect(), }; let bytes = bincode::serialize(&game::UdpClientMessage::WorldUpdate(update)).unwrap(); for (_, player) in players.iter().filter(|(_, p)| p.udp_addr.is_some()) { udp_socket.send_to(&bytes, player.udp_addr.unwrap()).await?; } }, accept_result = tcp_listener.accept() => match accept_result { Ok((stream, _)) => accept(&mut players, &bullets, stream, tcp_tx.clone(), tick_rate, tick_zero, tick.0), Err(err) => { eprintln!("{}", err); break }, }, // TODO(jack) TCP messages from the client should end up in a channel. result = tcp_rx.recv() => match result { Some((idx, None)) => { println!("disconnection!"); let id = players[idx].player.id; // Broadcast that a player left. let mut tcp_txs: Vec<_> = players .iter() .filter(|(other_idx, _)| *other_idx != idx) .map(|(_, p)| p.tcp_tx.clone()) .collect(); tokio::spawn(async move { let msg = game::TcpClientMessage::PlayerLeft(id); let join_handles = tcp_txs .iter_mut() .map(|tcp_tx| tcp_tx.send(msg.clone())); // TODO(jack) Can we do this without allocations? join_all(join_handles).await; }); players.remove(idx); }, Some((idx, Some(msg))) => println!("{:?}: {:?}", idx, msg), None => break, }, result = udp_socket.recv_from(&mut buf) => match result { Ok((0, _)) => break, Ok((MAX_PACKET_SIZE_PLUS_ONE, _)) => break, Ok((num_bytes, socket_addr)) => { let bytes = &buf[..num_bytes]; let msg: game::UdpServerMessage = match bincode::deserialize(&bytes) { Ok(msg) => msg, Err(err) => { eprintln!("{}", err); continue }, }; match msg { game::UdpServerMessage::Init(game::ServerInit { random_bytes }) => { println!("received init message: {:?}", random_bytes); if let Some((_, player)) = players.iter_mut().find(|(_, player)| player.random_bytes == random_bytes) { player.udp_addr = Some(socket_addr); println!("{:?}", player.udp_addr); } }, game::UdpServerMessage::PlayerInput(inputs) => { let (_, player) = match players.iter_mut().find(|(_, player)| player.udp_addr.is_some() && player.udp_addr.unwrap() == socket_addr) { Some((idx, player)) => (idx, player), None => continue, }; // TODO(jack) Apply the inputs according to their tick. // Right now, we're just taking the most recent one. if inputs.len() == 0 { continue } let input = inputs.iter().last().unwrap(); player.input = Vector2::new( (input.right as i32 - input.left as i32) as f64, (input.down as i32 - input.up as i32) as f64, ).try_normalize(0.0).unwrap_or(Vector2::new(0.0, 0.0)); player.angle = input.angle; // TODO(jack) We probably just want to compose input in the player struct. if input.mouse_left { player.firing = true; } else { player.firing = false; player.fire_counter = 0.0; } }, }; }, Err(err) => { eprintln!("{}", err); break }, } } } Ok(()) }
{ // Apply player impulse. for (_, player) in players.iter_mut() { let acceleration = 64.0; let max_velocity = 16.0; let friction = 16.0; // Acceleration ranges from `friction` to `friction + acceleration`, // and is inversely proportional to the projection of the current velocity onto the input vector. let acceleration_index = player.player.velocity.dot(&player.input) / max_velocity; let acceleration_index = if acceleration_index < 0.0 { 0.0 } else { acceleration_index.sqrt() }; let adjusted_acceleration = friction + acceleration * (1.0 - acceleration_index); player.player.velocity += adjusted_acceleration * dt * player.input; let dampened_velocity_unclamped = player.player.velocity.magnitude() - dt * friction; let dampened_velocity = if dampened_velocity_unclamped < 0.0 { 0.0 } else { dampened_velocity_unclamped }; let velocity_unit = player .player .velocity .try_normalize(0.0) .unwrap_or(Vector2::new(0.0, 0.0)); player.player.velocity = dampened_velocity * velocity_unit; player.player.position += dt * player.player.velocity; } // Remove expired bullets. let bullets_to_remove: Vec<_> = bullets .iter() .filter(|(_, b)| b.lifetime > 1.0) .map(|(idx, _)| idx) .collect(); for idx in bullets_to_remove.iter() { bullets.remove(*idx); } // Fire bullets. for (_, player) in players.iter_mut().filter(|(_, p)| p.firing) { let rof = 30.0; player.fire_counter += rof * dt; if player.fire_counter >= 1.0 { player.fire_counter %= 1.0; let idx = match bullets.try_insert(Bullet { bullet: game::Bullet { id: 0, player_id: player.player.id, position: player.player.position, angle: player.angle, radius: 0.5, }, velocity: 32.0 * Vector2::new(player.angle.cos(), player.angle.sin()), lifetime: 0.0, }) { Ok(idx) => idx, Err(_) => { eprintln!("too many bullets!"); break; } }; // Set the user ID to the arena index. let raw_parts = idx.into_raw_parts(); bullets[idx].bullet.id = ((raw_parts.0 & 10) | ((raw_parts.1 as usize) << 10)) as u16; } } // Update bullets. for (_, bullet) in bullets.iter_mut() { bullet.bullet.position += dt * bullet.velocity; bullet.lifetime += dt; } // Manage collisions. // We have to collect the idxs to avoid borrowing `players`. let idxs: Vec<_> = players.iter().map(|(idx, _)| idx).collect(); let idx_pairs = idxs .iter() .map(|a| idxs.iter().map(move |b| (a, b))) .flatten() .filter(|(a, b)| a.into_raw_parts().0 < b.into_raw_parts().0); for (a, b) in idx_pairs { let (a, b) = players.get2_mut(*a, *b); let a = a.unwrap(); let b = b.unwrap(); let distance = match a.player.position - b.player.position { v if v.x == 0.0 && v.y == 0.0 => Vector2::new(0.001, 0.001), v => v, }; let max_distance = a.player.radius + b.player.radius; if distance.magnitude_squared() >= max_distance.powi(2) { continue; // No collision. } let displacement_unit = distance.try_normalize(0.0).unwrap(); let displacement = displacement_unit * (max_distance - distance.magnitude()); a.player.position += 0.5 * displacement; b.player.position += -0.5 * displacement; let momentum = a.player.velocity.magnitude() + b.player.velocity.magnitude(); let elasticity = 2.0; a.player.velocity = 0.5 * elasticity * momentum * displacement_unit; b.player.velocity = -0.5 * elasticity * momentum * displacement_unit; } }
identifier_body
main.rs
use futures::future::join_all; use generational_arena::{Arena, Index}; use nalgebra::Vector2; use std::env::args; use std::error::Error; use std::net::SocketAddr; use std::num::Wrapping; use std::time::Duration; use std::time::SystemTime; use tokio::net::{TcpListener, TcpStream, UdpSocket}; use tokio::prelude::*; use tokio::sync::mpsc::{channel, Sender}; use tokio::time::interval; use game; #[derive(Debug)] struct Player { player: game::Player, tcp_tx: Sender<game::TcpClientMessage>, random_bytes: [u8; game::NUM_RANDOM_BYTES], udp_addr: Option<SocketAddr>, input: Vector2<f64>, angle: f64, firing: bool, fire_counter: f64, } #[derive(Debug)] struct Bullet { bullet: game::Bullet, velocity: Vector2<f64>, lifetime: f64, } fn
( players: &mut Arena<Player>, bullets: &Arena<Bullet>, stream: TcpStream, mut internal_tcp_tx: Sender<(Index, Option<game::TcpServerMessage>)>, tick_rate: u32, tick_zero: SystemTime, tick: game::Tick, ) { println!("connection!"); let (tx, mut rx) = channel(4); let idx = match players.try_insert(Player { player: game::Player { id: 0, // Set below. radius: 1.0, position: Vector2::new(0.0, 0.0), velocity: Vector2::new(0.0, 0.0), }, tcp_tx: tx, udp_addr: None, random_bytes: rand::random(), input: Vector2::new(0.0, 0.0), angle: 0.0, firing: false, fire_counter: 0.0, }) { Ok(idx) => idx, Err(_) => { println!("rejecting connection; too many players"); return; } }; // Set the user ID to some combinaiton of the arena index and generation. let id = idx.into_raw_parts().0 as u8; players[idx].player.id = id; // TODO(jack) Broadcast PlayerLeft messages. // Broadcast PlayerJoined messages. let mut tcp_txs: Vec<_> = players .iter() .filter(|(other_idx, _)| *other_idx != idx) .map(|(_, p)| p.tcp_tx.clone()) .collect(); let msg = game::TcpClientMessage::PlayerJoined(id); tokio::spawn(async move { let join_handles = tcp_txs.iter_mut().map(|tcp_tx| tcp_tx.send(msg.clone())); join_all(join_handles).await; }); // Start tasks to read-from / write-to the TCP socket. let (mut reader, mut writer) = stream.into_split(); tokio::spawn(async move { loop { const MAX_PACKET_SIZE_PLUS_ONE: usize = 64; let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE]; let num_bytes = match reader.read(&mut buf).await { Ok(0) => break, Ok(MAX_PACKET_SIZE_PLUS_ONE) => break, Err(err) => { eprintln!("{}", err); break; } Ok(num_bytes) => num_bytes, }; match bincode::deserialize(&buf[..num_bytes]) { Ok(msg) => match internal_tcp_tx.send((idx, Some(msg))).await { Ok(_) => (), Err(_) => break, }, Err(err) => { eprintln!("{}", err); break; } }; } // One consequence of every client publishing TCP packets to the same channel // is that we don't know when any one disconnects. // We signal it here with a `None`. internal_tcp_tx.send((idx, None)).await.ok(); }); let random_bytes = players[idx].random_bytes.clone(); let update = game::WorldUpdate { tick, players: players.iter().map(|(_, p)| p.player).collect(), bullets: bullets.iter().map(|(_, b)| b.bullet).collect(), }; tokio::spawn(async move { // Send the init packet. // For now, this will just include a random sequence of bytes. // We'll then wait for the random sequence of bytes via UDP to identify the client's external port number. let bytes = bincode::serialize(&game::TcpClientMessage::Init(game::ClientInit { id, random_bytes, update, tick_rate: tick_rate as u8, tick_zero, })) .unwrap(); if let Err(err) = writer.write_all(&bytes[..]).await { eprintln!("{}", err); return; } println!("wrote init message"); loop { match rx.recv().await { Some(msg) => { if let Err(_) = writer .write_all(bincode::serialize(&msg).unwrap().as_slice()) .await { break; } } None => break, }; } }); } fn step(players: &mut Arena<Player>, bullets: &mut Arena<Bullet>, dt: f64) { // Apply player impulse. for (_, player) in players.iter_mut() { let acceleration = 64.0; let max_velocity = 16.0; let friction = 16.0; // Acceleration ranges from `friction` to `friction + acceleration`, // and is inversely proportional to the projection of the current velocity onto the input vector. let acceleration_index = player.player.velocity.dot(&player.input) / max_velocity; let acceleration_index = if acceleration_index < 0.0 { 0.0 } else { acceleration_index.sqrt() }; let adjusted_acceleration = friction + acceleration * (1.0 - acceleration_index); player.player.velocity += adjusted_acceleration * dt * player.input; let dampened_velocity_unclamped = player.player.velocity.magnitude() - dt * friction; let dampened_velocity = if dampened_velocity_unclamped < 0.0 { 0.0 } else { dampened_velocity_unclamped }; let velocity_unit = player .player .velocity .try_normalize(0.0) .unwrap_or(Vector2::new(0.0, 0.0)); player.player.velocity = dampened_velocity * velocity_unit; player.player.position += dt * player.player.velocity; } // Remove expired bullets. let bullets_to_remove: Vec<_> = bullets .iter() .filter(|(_, b)| b.lifetime > 1.0) .map(|(idx, _)| idx) .collect(); for idx in bullets_to_remove.iter() { bullets.remove(*idx); } // Fire bullets. for (_, player) in players.iter_mut().filter(|(_, p)| p.firing) { let rof = 30.0; player.fire_counter += rof * dt; if player.fire_counter >= 1.0 { player.fire_counter %= 1.0; let idx = match bullets.try_insert(Bullet { bullet: game::Bullet { id: 0, player_id: player.player.id, position: player.player.position, angle: player.angle, radius: 0.5, }, velocity: 32.0 * Vector2::new(player.angle.cos(), player.angle.sin()), lifetime: 0.0, }) { Ok(idx) => idx, Err(_) => { eprintln!("too many bullets!"); break; } }; // Set the user ID to the arena index. let raw_parts = idx.into_raw_parts(); bullets[idx].bullet.id = ((raw_parts.0 & 10) | ((raw_parts.1 as usize) << 10)) as u16; } } // Update bullets. for (_, bullet) in bullets.iter_mut() { bullet.bullet.position += dt * bullet.velocity; bullet.lifetime += dt; } // Manage collisions. // We have to collect the idxs to avoid borrowing `players`. let idxs: Vec<_> = players.iter().map(|(idx, _)| idx).collect(); let idx_pairs = idxs .iter() .map(|a| idxs.iter().map(move |b| (a, b))) .flatten() .filter(|(a, b)| a.into_raw_parts().0 < b.into_raw_parts().0); for (a, b) in idx_pairs { let (a, b) = players.get2_mut(*a, *b); let a = a.unwrap(); let b = b.unwrap(); let distance = match a.player.position - b.player.position { v if v.x == 0.0 && v.y == 0.0 => Vector2::new(0.001, 0.001), v => v, }; let max_distance = a.player.radius + b.player.radius; if distance.magnitude_squared() >= max_distance.powi(2) { continue; // No collision. } let displacement_unit = distance.try_normalize(0.0).unwrap(); let displacement = displacement_unit * (max_distance - distance.magnitude()); a.player.position += 0.5 * displacement; b.player.position += -0.5 * displacement; let momentum = a.player.velocity.magnitude() + b.player.velocity.magnitude(); let elasticity = 2.0; a.player.velocity = 0.5 * elasticity * momentum * displacement_unit; b.player.velocity = -0.5 * elasticity * momentum * displacement_unit; } } #[tokio::main] async fn main() -> Result<(), Box<dyn Error>> { let port: u32 = match args().nth(1).and_then(|s| s.parse().ok()) { Some(port) => port, None => { eprintln!("Usage: {} PORT", args().nth(0).unwrap()); return Ok(()); } }; let mut players = Arena::with_capacity(16); let mut bullets = Arena::with_capacity(1024); let tick_rate = 60; let mut ticker = interval(Duration::from_secs(1) / tick_rate); let snapshot_rate = 10; let mut snapshot_ticker = interval(Duration::from_secs(1) / snapshot_rate); let mut tcp_listener = TcpListener::bind(format!("0.0.0.0:{}", port)).await?; let mut udp_socket = UdpSocket::bind(format!("0.0.0.0:{}", port)).await?; // tcp_rx is our global receiver for TCP events. // This means that each player holds a copy of tcp_tx which packets are passed to. let (tcp_tx, mut tcp_rx) = channel(4); let mut tick = Wrapping(0); let tick_zero = SystemTime::now(); loop { const MAX_PACKET_SIZE_PLUS_ONE: usize = 64; let mut buf: [u8; MAX_PACKET_SIZE_PLUS_ONE] = [0; MAX_PACKET_SIZE_PLUS_ONE]; // TODO(jack) Redesign this select! call to execute as little code linearly as possible. tokio::select! { _ = ticker.tick() => { // Update game state. let dt = 1.0 / tick_rate as f64; // TODO(jack) Measure actual elapsed time. step(&mut players, &mut bullets, dt); tick = tick + Wrapping(1); }, _ = snapshot_ticker.tick() => { // Broadcast. let update = game::WorldUpdate { tick: tick.0, players: players.iter().map(|(_, p)| p.player).collect(), bullets: bullets.iter().map(|(_, b)| b.bullet).collect(), }; let bytes = bincode::serialize(&game::UdpClientMessage::WorldUpdate(update)).unwrap(); for (_, player) in players.iter().filter(|(_, p)| p.udp_addr.is_some()) { udp_socket.send_to(&bytes, player.udp_addr.unwrap()).await?; } }, accept_result = tcp_listener.accept() => match accept_result { Ok((stream, _)) => accept(&mut players, &bullets, stream, tcp_tx.clone(), tick_rate, tick_zero, tick.0), Err(err) => { eprintln!("{}", err); break }, }, // TODO(jack) TCP messages from the client should end up in a channel. result = tcp_rx.recv() => match result { Some((idx, None)) => { println!("disconnection!"); let id = players[idx].player.id; // Broadcast that a player left. let mut tcp_txs: Vec<_> = players .iter() .filter(|(other_idx, _)| *other_idx != idx) .map(|(_, p)| p.tcp_tx.clone()) .collect(); tokio::spawn(async move { let msg = game::TcpClientMessage::PlayerLeft(id); let join_handles = tcp_txs .iter_mut() .map(|tcp_tx| tcp_tx.send(msg.clone())); // TODO(jack) Can we do this without allocations? join_all(join_handles).await; }); players.remove(idx); }, Some((idx, Some(msg))) => println!("{:?}: {:?}", idx, msg), None => break, }, result = udp_socket.recv_from(&mut buf) => match result { Ok((0, _)) => break, Ok((MAX_PACKET_SIZE_PLUS_ONE, _)) => break, Ok((num_bytes, socket_addr)) => { let bytes = &buf[..num_bytes]; let msg: game::UdpServerMessage = match bincode::deserialize(&bytes) { Ok(msg) => msg, Err(err) => { eprintln!("{}", err); continue }, }; match msg { game::UdpServerMessage::Init(game::ServerInit { random_bytes }) => { println!("received init message: {:?}", random_bytes); if let Some((_, player)) = players.iter_mut().find(|(_, player)| player.random_bytes == random_bytes) { player.udp_addr = Some(socket_addr); println!("{:?}", player.udp_addr); } }, game::UdpServerMessage::PlayerInput(inputs) => { let (_, player) = match players.iter_mut().find(|(_, player)| player.udp_addr.is_some() && player.udp_addr.unwrap() == socket_addr) { Some((idx, player)) => (idx, player), None => continue, }; // TODO(jack) Apply the inputs according to their tick. // Right now, we're just taking the most recent one. if inputs.len() == 0 { continue } let input = inputs.iter().last().unwrap(); player.input = Vector2::new( (input.right as i32 - input.left as i32) as f64, (input.down as i32 - input.up as i32) as f64, ).try_normalize(0.0).unwrap_or(Vector2::new(0.0, 0.0)); player.angle = input.angle; // TODO(jack) We probably just want to compose input in the player struct. if input.mouse_left { player.firing = true; } else { player.firing = false; player.fire_counter = 0.0; } }, }; }, Err(err) => { eprintln!("{}", err); break }, } } } Ok(()) }
accept
identifier_name
Loan Eligibility Prediction_Benchmark_ML_Algorithm.py
# coding: utf-8 # # Task 10 : Benchmark Top ML Algorithms # # This task tests your ability to use different ML algorithms when solving a specific problem. # # ### Dataset # Predict Loan Eligibility for Dream Housing Finance company # # Dream Housing Finance company deals in all kinds of home loans. They have presence across all urban, semi urban and rural areas. Customer first applies for home loan and after that company validates the customer eligibility for loan. # # Company wants to automate the loan eligibility process (real time) based on customer detail provided while filling online application form. These details are Gender, Marital Status, Education, Number of Dependents, Income, Loan Amount, Credit History and others. To automate this process, they have provided a dataset to identify the customers segments that are eligible for loan amount so that they can specifically target these customers. # # Train: https://raw.githubusercontent.com/subashgandyer/datasets/main/loan_train.csv # # Test: https://raw.githubusercontent.com/subashgandyer/datasets/main/loan_test.csv # ## Task Requirements # ### You can have the following Classification models built using different ML algorithms # - Decision Tree
# ### Use GridSearchCV for finding the best model with the best hyperparameters # - ### Build models # - ### Create Parameter Grid # - ### Run GridSearchCV # - ### Choose the best model with the best hyperparameter # - ### Give the best accuracy # - ### Also, benchmark the best accuracy that you could get for every classification algorithm asked above # #### Your final output will be something like this: # - Best algorithm accuracy # - Best hyperparameter accuracy for every algorithm # # **Table 1 (Algorithm wise best model with best hyperparameter)** # # Algorithm | Accuracy | Hyperparameters # - DT # - KNN # - LR # - SVM # - RF # - anyother # # **Table 2 (Best overall)** # # Algorithm | Accuracy | Hyperparameters # # # ### Submission # - Submit Notebook containing all saved ran code with outputs # - Document with the above two tables # In[1]: import pandas as pd import matplotlib.pyplot as plt # In[4]: import seaborn as sns sns.set_theme(style="darkgrid") ax = sns.countplot(x="Loan_Status", data=data) # Data is imbalanced # In[5]: data=pd.read_csv('https://raw.githubusercontent.com/subashgandyer/datasets/main/loan_train.csv') data # checking missing values # In[6]: data.isna().sum() # In[7]: data['Gender']= data['Gender'].fillna('U') data['Married']= data['Married'].fillna('U') data['Self_Employed']= data['Self_Employed'].fillna('U') # In[8]: data.isna().sum() # In[9]: from numpy import NaN data[['LoanAmount','Loan_Amount_Term','Credit_History']] = data[['LoanAmount','Loan_Amount_Term','Credit_History']].replace(0, NaN) # In[10]: data.fillna(data.mean(), inplace=True) data # In[11]: data.info() # In[12]: data.Dependents.value_counts() # # Handling Categorical Variable # In[13]: from sklearn.preprocessing import LabelEncoder labelencoder_X=LabelEncoder() xm=data.apply(LabelEncoder().fit_transform) xm # In[14]: X=xm.drop(['Loan_Status'], axis=1) X # In[15]: y_new=xm.iloc[:,12] y_new # In[16]: test=pd.read_csv('https://raw.githubusercontent.com/subashgandyer/datasets/main/loan_test.csv') # In[17]: test.isna().sum() # In[18]: test['Gender']= test['Gender'].fillna('U') test['Self_Employed']= test['Self_Employed'].fillna('U') # In[19]: test.isna().sum() # In[20]: from numpy import NaN test[['LoanAmount','Loan_Amount_Term','Credit_History']] = test[['LoanAmount','Loan_Amount_Term','Credit_History']].replace(0, NaN) # In[21]: test.fillna(test.mean(), inplace=True) test.isna().sum() # In[22]: test.Dependents.value_counts() # In[23]: from sklearn.preprocessing import LabelEncoder labelencoder_X=LabelEncoder() xm_new=test.apply(LabelEncoder().fit_transform) xm_new # In[24]: X.columns # In[25]: X_train_new=X[['Loan_ID', 'Gender', 'Married', 'Dependents', 'Education', 'Self_Employed', 'ApplicantIncome', 'CoapplicantIncome', 'LoanAmount', 'Loan_Amount_Term', 'Credit_History', 'Property_Area']] X_train_new y_train_new=xm.iloc[:,12] y_train_new # In[26]: X_train_new # In[27]: y_train_new # In[28]: X_test_new= xm_new X_test_new # In[29]: from sklearn.model_selection import train_test_split #X_train, X_test, y_train, y_test = train_test_split(X, y_new, test_size = 0.30, random_state=5) # In[30]: n = 247 # Dropping last n rows using drop y_new.drop(y_new.tail(n).index, inplace = True) # Printing dataframe print(y_new) # In[31]: print(X_train_new.shape) print(X_test_new.shape) print(y_train_new.shape) print(y_new.shape) # In[32]: y_test_new= y_new y_test_new # # Model Building # In[33]: from sklearn.neighbors import KNeighborsClassifier knn_classifier = KNeighborsClassifier() # In[34]: knn_classifier.fit(X_train_new, y_train_new) # In[35]: knn_predictions = knn_classifier.predict(X_test_new) # In[37]: print(knn_classifier.score(X_test_new, y_test_new)) print(knn_classifier.score(X_train_new, y_train_new)) # # knn_classifier # In[38]: from sklearn.model_selection import GridSearchCV # In[39]: grid_params= {'n_neighbors':[3,5,11,19],'weights':['uniform','distance'],'metric':['euclidean','manhattan'] } # In[40]: gridsearch= GridSearchCV(knn_classifier,grid_params, verbose=1,cv=3,n_jobs=-1) # In[41]: gs_results=gridsearch.fit(X_train_new, y_train_new) # In[42]: gs_results.best_score_ # In[43]: gs_results.best_estimator_ # In[44]: gs_results.best_params_ # # Random Forest With GridsearchCv # In[45]: from sklearn.ensemble import RandomForestClassifier # Create the parameter grid based on the results of random search param_grid = { 'bootstrap': [True], 'max_depth': [80, 90, 100, 110], 'max_features': [2, 3], 'min_samples_leaf': [3, 4, 5], 'min_samples_split': [8, 10, 12], 'n_estimators': [100, 200, 300, 1000] } # Create a based model rf = RandomForestClassifier() # Instantiate the grid search model grid_search = GridSearchCV(estimator = rf, param_grid = param_grid, cv = 3, n_jobs = -1, verbose = 2) # In[46]: rf_results=grid_search.fit(X_train_new, y_train_new) # In[58]: rf_results.best_score_ # In[47]: rf_results.best_params_ # # Decision Tree with GridSearchCv # In[48]: from sklearn.tree import DecisionTreeClassifier from sklearn.model_selection import GridSearchCV params = {'max_leaf_nodes': list(range(2, 100)), 'min_samples_split': [2, 3, 4]} grid_search_cv = GridSearchCV(DecisionTreeClassifier(random_state=42), params, verbose=1, cv=3) dt=grid_search_cv.fit(X_train_new, y_train_new) # In[49]: grid_search_cv.best_params_ # In[50]: grid_search_cv.best_score_ # # Logistic Regression # In[51]: from sklearn.linear_model import LogisticRegression import numpy as np model=LogisticRegression() # In[52]: from sklearn.model_selection import RepeatedStratifiedKFold # Create grid search object solvers = ['newton-cg', 'lbfgs', 'liblinear'] penalty = ['l2'] c_values = [100, 10, 1.0, 0.1, 0.01] # define grid search grid = dict(solver=solvers,penalty=penalty,C=c_values) cv = RepeatedStratifiedKFold(n_splits=10, n_repeats=3, random_state=1) grid_search = GridSearchCV(estimator=model, param_grid=grid, n_jobs=-1, cv=cv, scoring='accuracy',error_score=0) lg = grid_search.fit(X_train_new, y_train_new) # In[54]: lg.best_score_ # In[53]: lg.best_params_ # # svm # In[ ]: from sklearn.svm import SVC from sklearn.metrics import classification_report, confusion_matrix import matplotlib.pyplot as plt get_ipython().run_line_magic('matplotlib', 'inline') # In[ ]: from sklearn.model_selection import GridSearchCV param_grid = { 'C':[0.1,1,100,1000],'kernel':['rbf','poly','sigmoid','linear'],'degree':[1,2,3,4,5,6],'gamma': [1, 0.1, 0.01, 0.001, 0.0001]} grid = GridSearchCV(SVC(),param_grid) # In[ ]: grid.fit(X_train_new,y_train_new) # In[59]: grid.best_score_ # # Naivebayes # In[55]: import numpy as np param_grid_nb = { 'var_smoothing': np.logspace(0,-9, num=100) } # In[56]: from sklearn.naive_bayes import GaussianNB from sklearn.model_selection import GridSearchCV nbModel_grid = GridSearchCV(estimator=GaussianNB(), param_grid=param_grid_nb, verbose=1, cv=10, n_jobs=-1) nbModel_grid.fit(X_train_new, y_train_new) print(nbModel_grid.best_estimator_) ... #Fitting 10 folds for each of 100 candidates, totalling 1000 fits GaussianNB(priors=None, var_smoothing=1.0) # In[57]: print(nbModel_grid.best_score_) print(nbModel_grid.best_params_) # # AdaBoost Classifier # In[ ]: from sklearn.ensemble import AdaBoostClassifier from sklearn.tree import DecisionTreeClassifier adb = AdaBoostClassifier(DecisionTreeClassifier(min_samples_split=10,max_depth=4),n_estimators=10,learning_rate=0.6) adb.fit(X_train_new, y_train_new) print("score on test: " + str(adb.score(X_test_new, y_test_new))) print("score on train: "+ str(adb.score(X_train_new, y_train_new))) # # BaggingClassifier # In[61]: from sklearn.ensemble import BaggingClassifier from sklearn.tree import DecisionTreeClassifier # max_samples: maximum size 0.5=50% of each sample taken from the full dataset # max_features: maximum of features 1=100% taken here all 10K # n_estimators: number of decision trees bg=BaggingClassifier(DecisionTreeClassifier(),max_samples=0.5,max_features=1.0,n_estimators=10) bg.fit(X_train_new, y_train_new) print("score on test: " + str(bg.score(X_test_new, y_test_new))) print("score on train: "+ str(bg.score(X_train_new, y_train_new))) # # Voting Classifier # In[62]: from sklearn.ensemble import VotingClassifier # 1) naive bias = mnb # 2) logistic regression =lr # 3) random forest =rf # 4) support vector machine = svm evc=VotingClassifier(estimators=[('gs_results',gs_results),('lg',lg),('rf_results',rf_results),('dt',dt),('bg',bg),('adb',adb)],voting='hard') evc.fit(X_train_new, y_train_new) print("score on test: " + str(evc.score(X_test_new, y_test_new))) print("score on train: "+ str(evc.score(X_train_new, y_train_new)))
# - KNN # - Logistic Regression # - SVM # - Random Forest # - Any other algorithm of your choice
random_line_split
main.rs
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![forbid(unsafe_code)] use std::{ borrow::Cow, fs::File, io::Write, os::unix::prelude::PermissionsExt, path::{Path, PathBuf}, }; use autocxx_engine::{get_clang_path, make_clang_args, preprocess}; use autocxx_parser::IncludeCppConfig; use clap::{crate_authors, crate_version, Arg, ArgMatches, Command}; use indexmap::IndexSet; use indoc::indoc; use itertools::Itertools; use quote::ToTokens; use regex::Regex; use tempfile::TempDir; static LONG_HELP: &str = indoc! {" Command line utility to minimize autocxx bug cases. This is a wrapper for creduce. Example command-line: autocxx-reduce file -I my-inc-dir -h my-header -d 'generate!(\"MyClass\")' -k -- --n 64 "}; fn main() { // Assemble some defaults for command line arguments let current_exe = std::env::current_exe().unwrap(); let our_dir = current_exe.parent().unwrap(); let default_gen_cmd = our_dir.join("autocxx-gen").to_str().unwrap().to_string(); let rust_libs_path1 = our_dir.to_str().unwrap().to_string(); let rust_libs_path2 = our_dir.join("deps").to_str().unwrap().to_string(); let default_rlibs = &[rust_libs_path1.as_str(), rust_libs_path2.as_str()]; let matches = Command::new("autocxx-reduce") .version(crate_version!()) .author(crate_authors!()) .about("Reduce a C++ test case") .long_about(LONG_HELP) .subcommand(Command::new("file") .about("reduce a header file") .arg( Arg::new("inc") .short('I') .long("inc") .multiple_occurrences(true) .number_of_values(1) .value_name("INCLUDE DIRS") .help("include path") .takes_value(true), ) .arg( Arg::new("define") .short('D') .long("define") .multiple_occurrences(true) .number_of_values(1) .value_name("DEFINE") .help("macro definition") .takes_value(true), ) .arg( Arg::new("header") .long("header") .multiple_occurrences(true) .number_of_values(1) .required(true) .value_name("HEADER") .help("header file name") .takes_value(true), ) .arg( Arg::new("directive") .short('d') .long("directive") .multiple_occurrences(true) .number_of_values(1) .value_name("DIRECTIVE") .help("directives to put within include_cpp!") .takes_value(true), ) ) .subcommand(Command::new("repro") .about("reduce a repro case JSON file") .arg( Arg::new("repro") .short('r') .long("repro") .required(true) .value_name("REPRODUCTION CASE JSON") .help("reproduction case JSON file name") .takes_value(true), ) .arg( Arg::new("header") .long("header") .multiple_occurrences(true) .number_of_values(1) .value_name("HEADER") .help("header file name; specify to resume a part-completed run") .takes_value(true), ) ) .arg( Arg::new("problem") .short('p') .long("problem") .required(true) .value_name("PROBLEM") .help("problem string we're looking for... may be in logs, or in generated C++, or generated .rs") .takes_value(true), ) .arg( Arg::new("creduce") .long("creduce") .value_name("PATH") .help("creduce binary location") .default_value("creduce") .takes_value(true), ) .arg( Arg::new("output") .short('o') .long("output") .value_name("OUTPUT") .help("where to write minimized output") .takes_value(true), ) .arg( Arg::new("gen-cmd") .short('g') .long("gen-cmd") .value_name("GEN-CMD") .help("where to find autocxx-gen") .default_value(&default_gen_cmd) .takes_value(true), ) .arg( Arg::new("rustc") .long("rustc") .value_name("RUSTC") .help("where to find rustc") .default_value("rustc") .takes_value(true), ) .arg( Arg::new("rlibs") .long("rlibs") .value_name("LIBDIR") .help("where to find rlibs/rmetas for cxx and autocxx") .default_values(default_rlibs) .multiple_values(true) .takes_value(true), ) .arg( Arg::new("keep") .short('k') .long("keep-dir") .help("keep the temporary directory for debugging purposes"), ) .arg( Arg::new("clang-args") .short('c') .long("clang-arg") .multiple_occurrences(true) .value_name("CLANG_ARG") .help("Extra arguments to pass to Clang"), ) .arg( Arg::new("creduce-args") .long("creduce-arg") .multiple_occurrences(true) .value_name("CREDUCE_ARG") .help("Extra arguments to pass to Clang"), ) .arg( Arg::new("no-precompile") .long("no-precompile") .help("Do not precompile the C++ header before passing to autocxxgen"), ) .arg( Arg::new("no-postcompile") .long("no-postcompile") .help("Do not post-compile the C++ generated by autocxxgen"), ) .arg( Arg::new("no-rustc") .long("no-rustc") .help("Do not compile the rust generated by autocxxgen"), ) .arg( Arg::new("suppress-cxx-inclusions") .long("suppress-cxx-inclusions") .takes_value(true) .possible_value("yes") .possible_value("no") .possible_value("auto") .default_value("auto") .help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.") ) .arg_required_else_help(true) .get_matches(); run(matches).unwrap(); } fn run(matches: ArgMatches) -> Result<(), std::io::Error> { let keep_tmp = matches.is_present("keep"); let tmp_dir = TempDir::new()?; let r = do_run(matches, &tmp_dir); if keep_tmp { println!( "Keeping temp dir created at: {}", tmp_dir.into_path().to_str().unwrap() ); } r } #[derive(serde_derive::Deserialize)] struct
{ config: String, header: String, } fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> { let rs_path = tmp_dir.path().join("input.rs"); let concat_path = tmp_dir.path().join("concat.h"); match matches.subcommand_matches("repro") { None => { let submatches = matches.subcommand_matches("file").unwrap(); let incs: Vec<_> = submatches .values_of("inc") .unwrap_or_default() .map(PathBuf::from) .collect(); let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect(); let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect(); assert!(!headers.is_empty()); let listing_path = tmp_dir.path().join("listing.h"); create_concatenated_header(&headers, &listing_path)?; announce_progress(&format!( "Preprocessing {listing_path:?} to {concat_path:?}" )); preprocess(&listing_path, &concat_path, &incs, &defs)?; let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string()) .chain( submatches .values_of("directive") .unwrap_or_default() .map(|s| format!("{s}\n")), ) .collect(); create_rs_file(&rs_path, &directives)?; } Some(submatches) => { let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from( submatches.value_of("repro").unwrap(), ))?) .unwrap(); // Replace the headers in the config let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap(); config.replace_included_headers("concat.h"); create_file( &rs_path, &format!("autocxx::include_cpp!({});", config.to_token_stream()), )?; if let Some(header) = submatches.value_of("header") { std::fs::copy(PathBuf::from(header), &concat_path)?; } else { create_file(&concat_path, &case.header)? } } } let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() { "yes" => true, "no" => false, "auto" => detect_cxx_h(&concat_path)?, _ => panic!("unexpected value"), }; let cxx_suppressions = if suppress_cxx_classes { get_cxx_suppressions() } else { Vec::new() }; let extra_clang_args: Vec<_> = matches .values_of("clang-args") .unwrap_or_default() .map(Cow::Borrowed) .chain(cxx_suppressions.into_iter().map(Cow::Owned)) .collect(); let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec(); let gen_cmd = matches.value_of("gen-cmd").unwrap(); if !Path::new(gen_cmd).exists() { panic!( "autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen" ); } run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?; // Create and run an interestingness test which does not filter its output through grep. let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test"); std::fs::create_dir(&demo_interestingness_test_dir).unwrap(); let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh"); create_interestingness_test( &matches, gen_cmd, &interestingness_test, None, &rs_path, &extra_clang_args, )?; let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h"); std::fs::copy(&concat_path, demo_dir_concat_path).unwrap(); run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap(); // Now the main interestingness test let interestingness_test = tmp_dir.path().join("test.sh"); create_interestingness_test( &matches, gen_cmd, &interestingness_test, Some(matches.value_of("problem").unwrap()), &rs_path, &extra_clang_args, )?; run_creduce( matches.value_of("creduce").unwrap(), &interestingness_test, &concat_path, matches.values_of("creduce-args").unwrap_or_default(), ); announce_progress("creduce completed"); let output_path = matches.value_of("output"); match output_path { None => print_minimized_case(&concat_path)?, Some(output_path) => { std::fs::copy(&concat_path, PathBuf::from(output_path))?; } }; Ok(()) } /// Try to detect whether the preprocessed source code already contains /// a preprocessed version of cxx.h. This is hard because all the comments /// and preprocessor symbols may have been removed, and in fact if we're /// part way through reduction, parts of the code may have been removed too. fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> { let haystack = std::fs::read_to_string(concat_path)?; Ok(["class Box", "class Vec", "class Slice"] .iter() .all(|needle| haystack.contains(needle))) } fn announce_progress(msg: &str) { println!("=== {msg} ==="); } fn print_minimized_case(concat_path: &Path) -> Result<(), std::io::Error> { announce_progress("Completed. Minimized test case:"); let contents = std::fs::read_to_string(concat_path)?; println!("{contents}"); Ok(()) } /// Arguments we pass to creduce if supported. This pass always seems to cause a crash /// as far as I can tell, so always exclude it. It may be environment-dependent, /// of course, but as I'm the primary user of this tool I am ruthlessly removing it. const REMOVE_PASS_LINE_MARKERS: &[&str] = &["--remove-pass", "pass_line_markers", "*"]; const SKIP_INITIAL_PASSES: &[&str] = &["--skip-initial-passes"]; fn creduce_supports_remove_pass(creduce_cmd: &str) -> bool { let cmd = std::process::Command::new(creduce_cmd) .arg("--help") .output(); let msg = match cmd { Err(error) => panic!("failed to run creduce. creduce_cmd = {creduce_cmd}. hint: autocxx-reduce --creduce /path/to/creduce. error = {error}"), Ok(result) => result.stdout }; let msg = std::str::from_utf8(&msg).unwrap(); msg.contains("--remove-pass") } fn run_creduce<'a>( creduce_cmd: &str, interestingness_test: &'a Path, concat_path: &'a Path, creduce_args: impl Iterator<Item = &'a str>, ) { announce_progress("creduce"); let args = std::iter::once(interestingness_test.to_str().unwrap()) .chain(std::iter::once(concat_path.to_str().unwrap())) .chain(creduce_args) .chain( if creduce_supports_remove_pass(creduce_cmd) { REMOVE_PASS_LINE_MARKERS } else { SKIP_INITIAL_PASSES } .iter() .copied(), ) .collect::<Vec<_>>(); println!("Command: {} {}", creduce_cmd, args.join(" ")); std::process::Command::new(creduce_cmd) .args(args) .status() .expect("failed to creduce"); } fn run_sample_gen_cmd( gen_cmd: &str, rs_file: &Path, tmp_dir: &Path, extra_clang_args: &[&str], ) -> Result<(), std::io::Error> { let args = format_gen_cmd(rs_file, tmp_dir.to_str().unwrap(), extra_clang_args); let args = args.collect::<Vec<_>>(); let args_str = args.join(" "); announce_progress(&format!("Running sample gen cmd: {gen_cmd} {args_str}")); std::process::Command::new(gen_cmd).args(args).status()?; Ok(()) } fn run_demo_interestingness_test(demo_dir: &Path, test: &Path) -> Result<(), std::io::Error> { announce_progress(&format!( "Running demo interestingness test in {}", demo_dir.to_string_lossy() )); std::process::Command::new(test) .current_dir(demo_dir) .status()?; Ok(()) } fn format_gen_cmd<'a>( rs_file: &Path, dir: &str, extra_clang_args: &'a [&str], ) -> impl Iterator<Item = String> + 'a { let args = [ "-o".to_string(), dir.to_string(), "-I".to_string(), dir.to_string(), rs_file.to_str().unwrap().to_string(), "--gen-rs-include".to_string(), "--gen-cpp".to_string(), "--suppress-system-headers".to_string(), "--".to_string(), ] .to_vec(); args.into_iter() .chain(extra_clang_args.iter().map(|s| s.to_string())) } fn create_interestingness_test( matches: &ArgMatches, gen_cmd: &str, test_path: &Path, problem: Option<&str>, rs_file: &Path, extra_clang_args: &[&str], ) -> Result<(), std::io::Error> { announce_progress("Creating interestingness test"); let precompile = !matches.is_present("no-precompile"); let postcompile = !matches.is_present("no-postcompile"); let rustc = !matches.is_present("no-rustc"); let rustc_path = matches.value_of("rustc").unwrap(); let rust_libs_path: Vec<String> = matches .get_many::<String>("rlibs") .expect("No rlib path specified") .cloned() .collect(); // Ensure we refer to the input header by relative path // because creduce will invoke us in some other directory with // a copy thereof. let mut args = format_gen_cmd(rs_file, "$(pwd)", extra_clang_args); let args = args.join(" "); let precompile_step = make_compile_step(precompile, "concat.h", extra_clang_args); // For the compile afterwards, we have to avoid including any system headers. // We rely on equivalent content being hermetically inside concat.h. let postcompile_step = make_compile_step(postcompile, "gen0.cc", extra_clang_args); let rustc_step = if rustc { let rust_libs_path = rust_libs_path.iter().map(|p| format!(" -L{p}")).join(" "); format!("{rustc_path} --extern cxx --extern autocxx {rust_libs_path} --crate-type rlib --emit=metadata --edition=2021 autocxx-ffi-default-gen.rs 2>&1") } else { "echo Skipping rustc".to_string() }; // -q below to exit immediately as soon as a match is found, to avoid // extra compile/codegen steps let problem_grep = problem .map(|problem| format!("| grep -q \"{problem}\" >/dev/null 2>&1")) .unwrap_or_default(); // We formerly had a 'trap' below but it seems to have caused problems // (trap \"if [[ \\$? -eq 139 ]]; then echo Segfault; fi\" CHLD; {} {} 2>&1 && cat autocxx-ffi-default-gen.rs && cat autocxxgen*.h && {} && {} 2>&1 ) {} let content = format!( indoc! {" #!/bin/bash set -e echo Precompile {} echo Move mv concat.h concat-body.h (echo \"#ifndef __CONCAT_H__\"; echo \"#define __CONCAT_H__\"; echo '#include \"concat-body.h\"'; echo \"#endif\") > concat.h echo Codegen ({} {} 2>&1 && cat autocxx-ffi-default-gen.rs && cat autocxxgen*.h && {} && {} 2>&1) {} echo Remove rm concat.h echo Swap back mv concat-body.h concat.h echo Done "}, precompile_step, gen_cmd, args, rustc_step, postcompile_step, problem_grep ); println!("Interestingness test:\n{content}"); { let mut file = File::create(test_path)?; file.write_all(content.as_bytes())?; } let mut perms = std::fs::metadata(test_path)?.permissions(); perms.set_mode(0o700); std::fs::set_permissions(test_path, perms)?; Ok(()) } fn make_compile_step(enabled: bool, file: &str, extra_clang_args: &[&str]) -> String { if enabled { format!( "{} {} -c {}", get_clang_path(), make_clang_args(&[PathBuf::from(".")], extra_clang_args).join(" "), file, ) } else { "echo 'Skipping compilation'".into() } } fn create_rs_file(rs_path: &Path, directives: &[String]) -> Result<(), std::io::Error> { announce_progress("Creating Rust input file"); let mut file = File::create(rs_path)?; file.write_all("use autocxx::include_cpp;\ninclude_cpp! (\n".as_bytes())?; for directive in directives { file.write_all(directive.as_bytes())?; } file.write_all(");\n".as_bytes())?; Ok(()) } fn create_concatenated_header(headers: &[&str], listing_path: &Path) -> Result<(), std::io::Error> { announce_progress("Creating preprocessed header"); let mut file = File::create(listing_path)?; for header in headers { file.write_all(format!("#include \"{header}\"\n").as_bytes())?; } Ok(()) } fn create_file(path: &Path, content: &str) -> Result<(), std::io::Error> { let mut file = File::create(path)?; write!(file, "{content}")?; Ok(()) } fn get_cxx_suppressions() -> Vec<String> { let defines: IndexSet<_> = Regex::new(r"\bCXXBRIDGE1_\w+\b") .unwrap() .find_iter(cxx_gen::HEADER) .map(|m| m.as_str()) .collect(); // for uniqueness defines.into_iter().map(|def| format!("-D{def}")).collect() } #[test] fn test_get_cxx_suppressions() { let defines = get_cxx_suppressions(); assert!(defines.contains(&"-DCXXBRIDGE1_RUST_BITCOPY_T".to_string())); assert!(defines.contains(&"-DCXXBRIDGE1_RUST_STR".to_string())); }
ReproCase
identifier_name
main.rs
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![forbid(unsafe_code)] use std::{ borrow::Cow, fs::File, io::Write, os::unix::prelude::PermissionsExt, path::{Path, PathBuf}, }; use autocxx_engine::{get_clang_path, make_clang_args, preprocess}; use autocxx_parser::IncludeCppConfig; use clap::{crate_authors, crate_version, Arg, ArgMatches, Command}; use indexmap::IndexSet; use indoc::indoc; use itertools::Itertools; use quote::ToTokens; use regex::Regex; use tempfile::TempDir; static LONG_HELP: &str = indoc! {" Command line utility to minimize autocxx bug cases. This is a wrapper for creduce. Example command-line: autocxx-reduce file -I my-inc-dir -h my-header -d 'generate!(\"MyClass\")' -k -- --n 64 "}; fn main() { // Assemble some defaults for command line arguments let current_exe = std::env::current_exe().unwrap(); let our_dir = current_exe.parent().unwrap(); let default_gen_cmd = our_dir.join("autocxx-gen").to_str().unwrap().to_string(); let rust_libs_path1 = our_dir.to_str().unwrap().to_string(); let rust_libs_path2 = our_dir.join("deps").to_str().unwrap().to_string(); let default_rlibs = &[rust_libs_path1.as_str(), rust_libs_path2.as_str()]; let matches = Command::new("autocxx-reduce") .version(crate_version!()) .author(crate_authors!()) .about("Reduce a C++ test case") .long_about(LONG_HELP) .subcommand(Command::new("file") .about("reduce a header file") .arg( Arg::new("inc") .short('I') .long("inc") .multiple_occurrences(true) .number_of_values(1) .value_name("INCLUDE DIRS") .help("include path") .takes_value(true), ) .arg( Arg::new("define") .short('D') .long("define") .multiple_occurrences(true) .number_of_values(1) .value_name("DEFINE") .help("macro definition") .takes_value(true), ) .arg( Arg::new("header") .long("header") .multiple_occurrences(true) .number_of_values(1) .required(true) .value_name("HEADER") .help("header file name") .takes_value(true), ) .arg( Arg::new("directive") .short('d') .long("directive") .multiple_occurrences(true) .number_of_values(1) .value_name("DIRECTIVE") .help("directives to put within include_cpp!") .takes_value(true), ) ) .subcommand(Command::new("repro") .about("reduce a repro case JSON file") .arg( Arg::new("repro") .short('r') .long("repro") .required(true) .value_name("REPRODUCTION CASE JSON") .help("reproduction case JSON file name") .takes_value(true), ) .arg( Arg::new("header") .long("header") .multiple_occurrences(true) .number_of_values(1) .value_name("HEADER") .help("header file name; specify to resume a part-completed run") .takes_value(true), ) ) .arg( Arg::new("problem") .short('p') .long("problem") .required(true) .value_name("PROBLEM") .help("problem string we're looking for... may be in logs, or in generated C++, or generated .rs") .takes_value(true), ) .arg( Arg::new("creduce") .long("creduce") .value_name("PATH") .help("creduce binary location") .default_value("creduce") .takes_value(true), ) .arg( Arg::new("output") .short('o') .long("output") .value_name("OUTPUT") .help("where to write minimized output") .takes_value(true), ) .arg( Arg::new("gen-cmd") .short('g') .long("gen-cmd") .value_name("GEN-CMD") .help("where to find autocxx-gen") .default_value(&default_gen_cmd) .takes_value(true), ) .arg( Arg::new("rustc") .long("rustc") .value_name("RUSTC") .help("where to find rustc") .default_value("rustc") .takes_value(true), ) .arg( Arg::new("rlibs") .long("rlibs") .value_name("LIBDIR") .help("where to find rlibs/rmetas for cxx and autocxx") .default_values(default_rlibs) .multiple_values(true) .takes_value(true), ) .arg( Arg::new("keep") .short('k') .long("keep-dir") .help("keep the temporary directory for debugging purposes"), ) .arg( Arg::new("clang-args") .short('c') .long("clang-arg") .multiple_occurrences(true) .value_name("CLANG_ARG") .help("Extra arguments to pass to Clang"), ) .arg( Arg::new("creduce-args") .long("creduce-arg") .multiple_occurrences(true) .value_name("CREDUCE_ARG") .help("Extra arguments to pass to Clang"), ) .arg( Arg::new("no-precompile") .long("no-precompile") .help("Do not precompile the C++ header before passing to autocxxgen"), ) .arg( Arg::new("no-postcompile") .long("no-postcompile") .help("Do not post-compile the C++ generated by autocxxgen"), ) .arg( Arg::new("no-rustc") .long("no-rustc") .help("Do not compile the rust generated by autocxxgen"), ) .arg( Arg::new("suppress-cxx-inclusions") .long("suppress-cxx-inclusions") .takes_value(true) .possible_value("yes") .possible_value("no") .possible_value("auto") .default_value("auto") .help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.") ) .arg_required_else_help(true) .get_matches(); run(matches).unwrap(); } fn run(matches: ArgMatches) -> Result<(), std::io::Error> { let keep_tmp = matches.is_present("keep"); let tmp_dir = TempDir::new()?; let r = do_run(matches, &tmp_dir); if keep_tmp { println!( "Keeping temp dir created at: {}", tmp_dir.into_path().to_str().unwrap() ); } r } #[derive(serde_derive::Deserialize)] struct ReproCase { config: String, header: String, } fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> { let rs_path = tmp_dir.path().join("input.rs"); let concat_path = tmp_dir.path().join("concat.h"); match matches.subcommand_matches("repro") { None => { let submatches = matches.subcommand_matches("file").unwrap(); let incs: Vec<_> = submatches .values_of("inc") .unwrap_or_default() .map(PathBuf::from) .collect(); let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect(); let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect(); assert!(!headers.is_empty()); let listing_path = tmp_dir.path().join("listing.h"); create_concatenated_header(&headers, &listing_path)?; announce_progress(&format!( "Preprocessing {listing_path:?} to {concat_path:?}" )); preprocess(&listing_path, &concat_path, &incs, &defs)?; let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string()) .chain( submatches .values_of("directive") .unwrap_or_default() .map(|s| format!("{s}\n")), ) .collect(); create_rs_file(&rs_path, &directives)?; } Some(submatches) => { let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from( submatches.value_of("repro").unwrap(), ))?) .unwrap(); // Replace the headers in the config let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap(); config.replace_included_headers("concat.h"); create_file( &rs_path, &format!("autocxx::include_cpp!({});", config.to_token_stream()), )?; if let Some(header) = submatches.value_of("header") { std::fs::copy(PathBuf::from(header), &concat_path)?; } else { create_file(&concat_path, &case.header)? } } } let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() { "yes" => true, "no" => false, "auto" => detect_cxx_h(&concat_path)?, _ => panic!("unexpected value"), }; let cxx_suppressions = if suppress_cxx_classes { get_cxx_suppressions() } else { Vec::new() }; let extra_clang_args: Vec<_> = matches .values_of("clang-args") .unwrap_or_default() .map(Cow::Borrowed) .chain(cxx_suppressions.into_iter().map(Cow::Owned)) .collect(); let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec(); let gen_cmd = matches.value_of("gen-cmd").unwrap(); if !Path::new(gen_cmd).exists() { panic!( "autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen" ); } run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?; // Create and run an interestingness test which does not filter its output through grep. let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test"); std::fs::create_dir(&demo_interestingness_test_dir).unwrap(); let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh"); create_interestingness_test( &matches, gen_cmd, &interestingness_test, None, &rs_path, &extra_clang_args, )?; let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h"); std::fs::copy(&concat_path, demo_dir_concat_path).unwrap(); run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap(); // Now the main interestingness test let interestingness_test = tmp_dir.path().join("test.sh"); create_interestingness_test( &matches, gen_cmd, &interestingness_test, Some(matches.value_of("problem").unwrap()), &rs_path, &extra_clang_args, )?; run_creduce( matches.value_of("creduce").unwrap(), &interestingness_test, &concat_path, matches.values_of("creduce-args").unwrap_or_default(), ); announce_progress("creduce completed"); let output_path = matches.value_of("output"); match output_path { None => print_minimized_case(&concat_path)?, Some(output_path) => { std::fs::copy(&concat_path, PathBuf::from(output_path))?; } }; Ok(()) } /// Try to detect whether the preprocessed source code already contains /// a preprocessed version of cxx.h. This is hard because all the comments /// and preprocessor symbols may have been removed, and in fact if we're /// part way through reduction, parts of the code may have been removed too. fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error>
fn announce_progress(msg: &str) { println!("=== {msg} ==="); } fn print_minimized_case(concat_path: &Path) -> Result<(), std::io::Error> { announce_progress("Completed. Minimized test case:"); let contents = std::fs::read_to_string(concat_path)?; println!("{contents}"); Ok(()) } /// Arguments we pass to creduce if supported. This pass always seems to cause a crash /// as far as I can tell, so always exclude it. It may be environment-dependent, /// of course, but as I'm the primary user of this tool I am ruthlessly removing it. const REMOVE_PASS_LINE_MARKERS: &[&str] = &["--remove-pass", "pass_line_markers", "*"]; const SKIP_INITIAL_PASSES: &[&str] = &["--skip-initial-passes"]; fn creduce_supports_remove_pass(creduce_cmd: &str) -> bool { let cmd = std::process::Command::new(creduce_cmd) .arg("--help") .output(); let msg = match cmd { Err(error) => panic!("failed to run creduce. creduce_cmd = {creduce_cmd}. hint: autocxx-reduce --creduce /path/to/creduce. error = {error}"), Ok(result) => result.stdout }; let msg = std::str::from_utf8(&msg).unwrap(); msg.contains("--remove-pass") } fn run_creduce<'a>( creduce_cmd: &str, interestingness_test: &'a Path, concat_path: &'a Path, creduce_args: impl Iterator<Item = &'a str>, ) { announce_progress("creduce"); let args = std::iter::once(interestingness_test.to_str().unwrap()) .chain(std::iter::once(concat_path.to_str().unwrap())) .chain(creduce_args) .chain( if creduce_supports_remove_pass(creduce_cmd) { REMOVE_PASS_LINE_MARKERS } else { SKIP_INITIAL_PASSES } .iter() .copied(), ) .collect::<Vec<_>>(); println!("Command: {} {}", creduce_cmd, args.join(" ")); std::process::Command::new(creduce_cmd) .args(args) .status() .expect("failed to creduce"); } fn run_sample_gen_cmd( gen_cmd: &str, rs_file: &Path, tmp_dir: &Path, extra_clang_args: &[&str], ) -> Result<(), std::io::Error> { let args = format_gen_cmd(rs_file, tmp_dir.to_str().unwrap(), extra_clang_args); let args = args.collect::<Vec<_>>(); let args_str = args.join(" "); announce_progress(&format!("Running sample gen cmd: {gen_cmd} {args_str}")); std::process::Command::new(gen_cmd).args(args).status()?; Ok(()) } fn run_demo_interestingness_test(demo_dir: &Path, test: &Path) -> Result<(), std::io::Error> { announce_progress(&format!( "Running demo interestingness test in {}", demo_dir.to_string_lossy() )); std::process::Command::new(test) .current_dir(demo_dir) .status()?; Ok(()) } fn format_gen_cmd<'a>( rs_file: &Path, dir: &str, extra_clang_args: &'a [&str], ) -> impl Iterator<Item = String> + 'a { let args = [ "-o".to_string(), dir.to_string(), "-I".to_string(), dir.to_string(), rs_file.to_str().unwrap().to_string(), "--gen-rs-include".to_string(), "--gen-cpp".to_string(), "--suppress-system-headers".to_string(), "--".to_string(), ] .to_vec(); args.into_iter() .chain(extra_clang_args.iter().map(|s| s.to_string())) } fn create_interestingness_test( matches: &ArgMatches, gen_cmd: &str, test_path: &Path, problem: Option<&str>, rs_file: &Path, extra_clang_args: &[&str], ) -> Result<(), std::io::Error> { announce_progress("Creating interestingness test"); let precompile = !matches.is_present("no-precompile"); let postcompile = !matches.is_present("no-postcompile"); let rustc = !matches.is_present("no-rustc"); let rustc_path = matches.value_of("rustc").unwrap(); let rust_libs_path: Vec<String> = matches .get_many::<String>("rlibs") .expect("No rlib path specified") .cloned() .collect(); // Ensure we refer to the input header by relative path // because creduce will invoke us in some other directory with // a copy thereof. let mut args = format_gen_cmd(rs_file, "$(pwd)", extra_clang_args); let args = args.join(" "); let precompile_step = make_compile_step(precompile, "concat.h", extra_clang_args); // For the compile afterwards, we have to avoid including any system headers. // We rely on equivalent content being hermetically inside concat.h. let postcompile_step = make_compile_step(postcompile, "gen0.cc", extra_clang_args); let rustc_step = if rustc { let rust_libs_path = rust_libs_path.iter().map(|p| format!(" -L{p}")).join(" "); format!("{rustc_path} --extern cxx --extern autocxx {rust_libs_path} --crate-type rlib --emit=metadata --edition=2021 autocxx-ffi-default-gen.rs 2>&1") } else { "echo Skipping rustc".to_string() }; // -q below to exit immediately as soon as a match is found, to avoid // extra compile/codegen steps let problem_grep = problem .map(|problem| format!("| grep -q \"{problem}\" >/dev/null 2>&1")) .unwrap_or_default(); // We formerly had a 'trap' below but it seems to have caused problems // (trap \"if [[ \\$? -eq 139 ]]; then echo Segfault; fi\" CHLD; {} {} 2>&1 && cat autocxx-ffi-default-gen.rs && cat autocxxgen*.h && {} && {} 2>&1 ) {} let content = format!( indoc! {" #!/bin/bash set -e echo Precompile {} echo Move mv concat.h concat-body.h (echo \"#ifndef __CONCAT_H__\"; echo \"#define __CONCAT_H__\"; echo '#include \"concat-body.h\"'; echo \"#endif\") > concat.h echo Codegen ({} {} 2>&1 && cat autocxx-ffi-default-gen.rs && cat autocxxgen*.h && {} && {} 2>&1) {} echo Remove rm concat.h echo Swap back mv concat-body.h concat.h echo Done "}, precompile_step, gen_cmd, args, rustc_step, postcompile_step, problem_grep ); println!("Interestingness test:\n{content}"); { let mut file = File::create(test_path)?; file.write_all(content.as_bytes())?; } let mut perms = std::fs::metadata(test_path)?.permissions(); perms.set_mode(0o700); std::fs::set_permissions(test_path, perms)?; Ok(()) } fn make_compile_step(enabled: bool, file: &str, extra_clang_args: &[&str]) -> String { if enabled { format!( "{} {} -c {}", get_clang_path(), make_clang_args(&[PathBuf::from(".")], extra_clang_args).join(" "), file, ) } else { "echo 'Skipping compilation'".into() } } fn create_rs_file(rs_path: &Path, directives: &[String]) -> Result<(), std::io::Error> { announce_progress("Creating Rust input file"); let mut file = File::create(rs_path)?; file.write_all("use autocxx::include_cpp;\ninclude_cpp! (\n".as_bytes())?; for directive in directives { file.write_all(directive.as_bytes())?; } file.write_all(");\n".as_bytes())?; Ok(()) } fn create_concatenated_header(headers: &[&str], listing_path: &Path) -> Result<(), std::io::Error> { announce_progress("Creating preprocessed header"); let mut file = File::create(listing_path)?; for header in headers { file.write_all(format!("#include \"{header}\"\n").as_bytes())?; } Ok(()) } fn create_file(path: &Path, content: &str) -> Result<(), std::io::Error> { let mut file = File::create(path)?; write!(file, "{content}")?; Ok(()) } fn get_cxx_suppressions() -> Vec<String> { let defines: IndexSet<_> = Regex::new(r"\bCXXBRIDGE1_\w+\b") .unwrap() .find_iter(cxx_gen::HEADER) .map(|m| m.as_str()) .collect(); // for uniqueness defines.into_iter().map(|def| format!("-D{def}")).collect() } #[test] fn test_get_cxx_suppressions() { let defines = get_cxx_suppressions(); assert!(defines.contains(&"-DCXXBRIDGE1_RUST_BITCOPY_T".to_string())); assert!(defines.contains(&"-DCXXBRIDGE1_RUST_STR".to_string())); }
{ let haystack = std::fs::read_to_string(concat_path)?; Ok(["class Box", "class Vec", "class Slice"] .iter() .all(|needle| haystack.contains(needle))) }
identifier_body
main.rs
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![forbid(unsafe_code)] use std::{ borrow::Cow, fs::File, io::Write, os::unix::prelude::PermissionsExt, path::{Path, PathBuf}, }; use autocxx_engine::{get_clang_path, make_clang_args, preprocess}; use autocxx_parser::IncludeCppConfig; use clap::{crate_authors, crate_version, Arg, ArgMatches, Command}; use indexmap::IndexSet; use indoc::indoc; use itertools::Itertools; use quote::ToTokens; use regex::Regex; use tempfile::TempDir; static LONG_HELP: &str = indoc! {" Command line utility to minimize autocxx bug cases. This is a wrapper for creduce. Example command-line: autocxx-reduce file -I my-inc-dir -h my-header -d 'generate!(\"MyClass\")' -k -- --n 64 "}; fn main() { // Assemble some defaults for command line arguments let current_exe = std::env::current_exe().unwrap(); let our_dir = current_exe.parent().unwrap(); let default_gen_cmd = our_dir.join("autocxx-gen").to_str().unwrap().to_string(); let rust_libs_path1 = our_dir.to_str().unwrap().to_string(); let rust_libs_path2 = our_dir.join("deps").to_str().unwrap().to_string(); let default_rlibs = &[rust_libs_path1.as_str(), rust_libs_path2.as_str()]; let matches = Command::new("autocxx-reduce") .version(crate_version!()) .author(crate_authors!()) .about("Reduce a C++ test case") .long_about(LONG_HELP) .subcommand(Command::new("file") .about("reduce a header file") .arg( Arg::new("inc") .short('I') .long("inc") .multiple_occurrences(true) .number_of_values(1) .value_name("INCLUDE DIRS") .help("include path") .takes_value(true), ) .arg( Arg::new("define") .short('D') .long("define") .multiple_occurrences(true) .number_of_values(1) .value_name("DEFINE") .help("macro definition") .takes_value(true), ) .arg( Arg::new("header") .long("header") .multiple_occurrences(true) .number_of_values(1) .required(true) .value_name("HEADER") .help("header file name") .takes_value(true), ) .arg( Arg::new("directive") .short('d') .long("directive") .multiple_occurrences(true) .number_of_values(1) .value_name("DIRECTIVE") .help("directives to put within include_cpp!") .takes_value(true), ) ) .subcommand(Command::new("repro") .about("reduce a repro case JSON file") .arg( Arg::new("repro") .short('r') .long("repro") .required(true) .value_name("REPRODUCTION CASE JSON") .help("reproduction case JSON file name") .takes_value(true), ) .arg( Arg::new("header") .long("header") .multiple_occurrences(true) .number_of_values(1) .value_name("HEADER") .help("header file name; specify to resume a part-completed run") .takes_value(true), ) ) .arg( Arg::new("problem") .short('p') .long("problem") .required(true) .value_name("PROBLEM") .help("problem string we're looking for... may be in logs, or in generated C++, or generated .rs") .takes_value(true), ) .arg( Arg::new("creduce") .long("creduce") .value_name("PATH") .help("creduce binary location") .default_value("creduce") .takes_value(true), ) .arg( Arg::new("output") .short('o') .long("output") .value_name("OUTPUT") .help("where to write minimized output") .takes_value(true), ) .arg( Arg::new("gen-cmd") .short('g') .long("gen-cmd") .value_name("GEN-CMD") .help("where to find autocxx-gen") .default_value(&default_gen_cmd) .takes_value(true), ) .arg( Arg::new("rustc") .long("rustc") .value_name("RUSTC") .help("where to find rustc") .default_value("rustc") .takes_value(true), ) .arg( Arg::new("rlibs") .long("rlibs") .value_name("LIBDIR") .help("where to find rlibs/rmetas for cxx and autocxx") .default_values(default_rlibs) .multiple_values(true) .takes_value(true), ) .arg( Arg::new("keep") .short('k') .long("keep-dir") .help("keep the temporary directory for debugging purposes"), ) .arg( Arg::new("clang-args") .short('c') .long("clang-arg") .multiple_occurrences(true) .value_name("CLANG_ARG") .help("Extra arguments to pass to Clang"), ) .arg( Arg::new("creduce-args") .long("creduce-arg") .multiple_occurrences(true) .value_name("CREDUCE_ARG") .help("Extra arguments to pass to Clang"), ) .arg( Arg::new("no-precompile") .long("no-precompile") .help("Do not precompile the C++ header before passing to autocxxgen"), ) .arg( Arg::new("no-postcompile") .long("no-postcompile") .help("Do not post-compile the C++ generated by autocxxgen"), ) .arg( Arg::new("no-rustc") .long("no-rustc") .help("Do not compile the rust generated by autocxxgen"), ) .arg( Arg::new("suppress-cxx-inclusions") .long("suppress-cxx-inclusions") .takes_value(true) .possible_value("yes") .possible_value("no") .possible_value("auto") .default_value("auto") .help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.") ) .arg_required_else_help(true) .get_matches(); run(matches).unwrap(); } fn run(matches: ArgMatches) -> Result<(), std::io::Error> { let keep_tmp = matches.is_present("keep"); let tmp_dir = TempDir::new()?; let r = do_run(matches, &tmp_dir); if keep_tmp { println!( "Keeping temp dir created at: {}", tmp_dir.into_path().to_str().unwrap() ); } r } #[derive(serde_derive::Deserialize)] struct ReproCase { config: String, header: String, } fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> { let rs_path = tmp_dir.path().join("input.rs"); let concat_path = tmp_dir.path().join("concat.h"); match matches.subcommand_matches("repro") { None => { let submatches = matches.subcommand_matches("file").unwrap(); let incs: Vec<_> = submatches .values_of("inc") .unwrap_or_default() .map(PathBuf::from) .collect(); let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect(); let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect(); assert!(!headers.is_empty()); let listing_path = tmp_dir.path().join("listing.h"); create_concatenated_header(&headers, &listing_path)?; announce_progress(&format!( "Preprocessing {listing_path:?} to {concat_path:?}" )); preprocess(&listing_path, &concat_path, &incs, &defs)?; let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string()) .chain( submatches .values_of("directive") .unwrap_or_default() .map(|s| format!("{s}\n")), ) .collect(); create_rs_file(&rs_path, &directives)?; } Some(submatches) => { let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from( submatches.value_of("repro").unwrap(), ))?) .unwrap(); // Replace the headers in the config let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap(); config.replace_included_headers("concat.h"); create_file( &rs_path, &format!("autocxx::include_cpp!({});", config.to_token_stream()), )?; if let Some(header) = submatches.value_of("header") { std::fs::copy(PathBuf::from(header), &concat_path)?; } else { create_file(&concat_path, &case.header)? } } } let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() { "yes" => true, "no" => false, "auto" => detect_cxx_h(&concat_path)?, _ => panic!("unexpected value"), }; let cxx_suppressions = if suppress_cxx_classes { get_cxx_suppressions() } else { Vec::new() }; let extra_clang_args: Vec<_> = matches .values_of("clang-args") .unwrap_or_default() .map(Cow::Borrowed) .chain(cxx_suppressions.into_iter().map(Cow::Owned)) .collect(); let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec(); let gen_cmd = matches.value_of("gen-cmd").unwrap(); if !Path::new(gen_cmd).exists() { panic!( "autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen" ); } run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?; // Create and run an interestingness test which does not filter its output through grep. let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test"); std::fs::create_dir(&demo_interestingness_test_dir).unwrap(); let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh"); create_interestingness_test( &matches, gen_cmd, &interestingness_test, None, &rs_path, &extra_clang_args, )?; let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h"); std::fs::copy(&concat_path, demo_dir_concat_path).unwrap(); run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap(); // Now the main interestingness test let interestingness_test = tmp_dir.path().join("test.sh"); create_interestingness_test( &matches, gen_cmd, &interestingness_test, Some(matches.value_of("problem").unwrap()), &rs_path, &extra_clang_args, )?; run_creduce( matches.value_of("creduce").unwrap(), &interestingness_test, &concat_path, matches.values_of("creduce-args").unwrap_or_default(), ); announce_progress("creduce completed"); let output_path = matches.value_of("output"); match output_path { None => print_minimized_case(&concat_path)?, Some(output_path) => { std::fs::copy(&concat_path, PathBuf::from(output_path))?; } }; Ok(()) } /// Try to detect whether the preprocessed source code already contains /// a preprocessed version of cxx.h. This is hard because all the comments /// and preprocessor symbols may have been removed, and in fact if we're /// part way through reduction, parts of the code may have been removed too. fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> { let haystack = std::fs::read_to_string(concat_path)?; Ok(["class Box", "class Vec", "class Slice"] .iter() .all(|needle| haystack.contains(needle))) } fn announce_progress(msg: &str) { println!("=== {msg} ==="); } fn print_minimized_case(concat_path: &Path) -> Result<(), std::io::Error> { announce_progress("Completed. Minimized test case:"); let contents = std::fs::read_to_string(concat_path)?; println!("{contents}"); Ok(()) } /// Arguments we pass to creduce if supported. This pass always seems to cause a crash /// as far as I can tell, so always exclude it. It may be environment-dependent, /// of course, but as I'm the primary user of this tool I am ruthlessly removing it. const REMOVE_PASS_LINE_MARKERS: &[&str] = &["--remove-pass", "pass_line_markers", "*"]; const SKIP_INITIAL_PASSES: &[&str] = &["--skip-initial-passes"]; fn creduce_supports_remove_pass(creduce_cmd: &str) -> bool { let cmd = std::process::Command::new(creduce_cmd) .arg("--help") .output(); let msg = match cmd { Err(error) => panic!("failed to run creduce. creduce_cmd = {creduce_cmd}. hint: autocxx-reduce --creduce /path/to/creduce. error = {error}"), Ok(result) => result.stdout }; let msg = std::str::from_utf8(&msg).unwrap(); msg.contains("--remove-pass") } fn run_creduce<'a>( creduce_cmd: &str, interestingness_test: &'a Path, concat_path: &'a Path, creduce_args: impl Iterator<Item = &'a str>, ) { announce_progress("creduce"); let args = std::iter::once(interestingness_test.to_str().unwrap()) .chain(std::iter::once(concat_path.to_str().unwrap())) .chain(creduce_args) .chain( if creduce_supports_remove_pass(creduce_cmd) { REMOVE_PASS_LINE_MARKERS } else { SKIP_INITIAL_PASSES } .iter() .copied(), ) .collect::<Vec<_>>(); println!("Command: {} {}", creduce_cmd, args.join(" ")); std::process::Command::new(creduce_cmd) .args(args) .status() .expect("failed to creduce"); } fn run_sample_gen_cmd( gen_cmd: &str, rs_file: &Path, tmp_dir: &Path, extra_clang_args: &[&str], ) -> Result<(), std::io::Error> { let args = format_gen_cmd(rs_file, tmp_dir.to_str().unwrap(), extra_clang_args); let args = args.collect::<Vec<_>>(); let args_str = args.join(" "); announce_progress(&format!("Running sample gen cmd: {gen_cmd} {args_str}")); std::process::Command::new(gen_cmd).args(args).status()?; Ok(()) } fn run_demo_interestingness_test(demo_dir: &Path, test: &Path) -> Result<(), std::io::Error> { announce_progress(&format!( "Running demo interestingness test in {}", demo_dir.to_string_lossy() )); std::process::Command::new(test) .current_dir(demo_dir) .status()?; Ok(()) } fn format_gen_cmd<'a>( rs_file: &Path, dir: &str, extra_clang_args: &'a [&str], ) -> impl Iterator<Item = String> + 'a { let args = [ "-o".to_string(), dir.to_string(), "-I".to_string(), dir.to_string(), rs_file.to_str().unwrap().to_string(), "--gen-rs-include".to_string(), "--gen-cpp".to_string(), "--suppress-system-headers".to_string(), "--".to_string(), ] .to_vec(); args.into_iter() .chain(extra_clang_args.iter().map(|s| s.to_string())) } fn create_interestingness_test( matches: &ArgMatches,
test_path: &Path, problem: Option<&str>, rs_file: &Path, extra_clang_args: &[&str], ) -> Result<(), std::io::Error> { announce_progress("Creating interestingness test"); let precompile = !matches.is_present("no-precompile"); let postcompile = !matches.is_present("no-postcompile"); let rustc = !matches.is_present("no-rustc"); let rustc_path = matches.value_of("rustc").unwrap(); let rust_libs_path: Vec<String> = matches .get_many::<String>("rlibs") .expect("No rlib path specified") .cloned() .collect(); // Ensure we refer to the input header by relative path // because creduce will invoke us in some other directory with // a copy thereof. let mut args = format_gen_cmd(rs_file, "$(pwd)", extra_clang_args); let args = args.join(" "); let precompile_step = make_compile_step(precompile, "concat.h", extra_clang_args); // For the compile afterwards, we have to avoid including any system headers. // We rely on equivalent content being hermetically inside concat.h. let postcompile_step = make_compile_step(postcompile, "gen0.cc", extra_clang_args); let rustc_step = if rustc { let rust_libs_path = rust_libs_path.iter().map(|p| format!(" -L{p}")).join(" "); format!("{rustc_path} --extern cxx --extern autocxx {rust_libs_path} --crate-type rlib --emit=metadata --edition=2021 autocxx-ffi-default-gen.rs 2>&1") } else { "echo Skipping rustc".to_string() }; // -q below to exit immediately as soon as a match is found, to avoid // extra compile/codegen steps let problem_grep = problem .map(|problem| format!("| grep -q \"{problem}\" >/dev/null 2>&1")) .unwrap_or_default(); // We formerly had a 'trap' below but it seems to have caused problems // (trap \"if [[ \\$? -eq 139 ]]; then echo Segfault; fi\" CHLD; {} {} 2>&1 && cat autocxx-ffi-default-gen.rs && cat autocxxgen*.h && {} && {} 2>&1 ) {} let content = format!( indoc! {" #!/bin/bash set -e echo Precompile {} echo Move mv concat.h concat-body.h (echo \"#ifndef __CONCAT_H__\"; echo \"#define __CONCAT_H__\"; echo '#include \"concat-body.h\"'; echo \"#endif\") > concat.h echo Codegen ({} {} 2>&1 && cat autocxx-ffi-default-gen.rs && cat autocxxgen*.h && {} && {} 2>&1) {} echo Remove rm concat.h echo Swap back mv concat-body.h concat.h echo Done "}, precompile_step, gen_cmd, args, rustc_step, postcompile_step, problem_grep ); println!("Interestingness test:\n{content}"); { let mut file = File::create(test_path)?; file.write_all(content.as_bytes())?; } let mut perms = std::fs::metadata(test_path)?.permissions(); perms.set_mode(0o700); std::fs::set_permissions(test_path, perms)?; Ok(()) } fn make_compile_step(enabled: bool, file: &str, extra_clang_args: &[&str]) -> String { if enabled { format!( "{} {} -c {}", get_clang_path(), make_clang_args(&[PathBuf::from(".")], extra_clang_args).join(" "), file, ) } else { "echo 'Skipping compilation'".into() } } fn create_rs_file(rs_path: &Path, directives: &[String]) -> Result<(), std::io::Error> { announce_progress("Creating Rust input file"); let mut file = File::create(rs_path)?; file.write_all("use autocxx::include_cpp;\ninclude_cpp! (\n".as_bytes())?; for directive in directives { file.write_all(directive.as_bytes())?; } file.write_all(");\n".as_bytes())?; Ok(()) } fn create_concatenated_header(headers: &[&str], listing_path: &Path) -> Result<(), std::io::Error> { announce_progress("Creating preprocessed header"); let mut file = File::create(listing_path)?; for header in headers { file.write_all(format!("#include \"{header}\"\n").as_bytes())?; } Ok(()) } fn create_file(path: &Path, content: &str) -> Result<(), std::io::Error> { let mut file = File::create(path)?; write!(file, "{content}")?; Ok(()) } fn get_cxx_suppressions() -> Vec<String> { let defines: IndexSet<_> = Regex::new(r"\bCXXBRIDGE1_\w+\b") .unwrap() .find_iter(cxx_gen::HEADER) .map(|m| m.as_str()) .collect(); // for uniqueness defines.into_iter().map(|def| format!("-D{def}")).collect() } #[test] fn test_get_cxx_suppressions() { let defines = get_cxx_suppressions(); assert!(defines.contains(&"-DCXXBRIDGE1_RUST_BITCOPY_T".to_string())); assert!(defines.contains(&"-DCXXBRIDGE1_RUST_STR".to_string())); }
gen_cmd: &str,
random_line_split
main.rs
// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // https://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or https://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![forbid(unsafe_code)] use std::{ borrow::Cow, fs::File, io::Write, os::unix::prelude::PermissionsExt, path::{Path, PathBuf}, }; use autocxx_engine::{get_clang_path, make_clang_args, preprocess}; use autocxx_parser::IncludeCppConfig; use clap::{crate_authors, crate_version, Arg, ArgMatches, Command}; use indexmap::IndexSet; use indoc::indoc; use itertools::Itertools; use quote::ToTokens; use regex::Regex; use tempfile::TempDir; static LONG_HELP: &str = indoc! {" Command line utility to minimize autocxx bug cases. This is a wrapper for creduce. Example command-line: autocxx-reduce file -I my-inc-dir -h my-header -d 'generate!(\"MyClass\")' -k -- --n 64 "}; fn main() { // Assemble some defaults for command line arguments let current_exe = std::env::current_exe().unwrap(); let our_dir = current_exe.parent().unwrap(); let default_gen_cmd = our_dir.join("autocxx-gen").to_str().unwrap().to_string(); let rust_libs_path1 = our_dir.to_str().unwrap().to_string(); let rust_libs_path2 = our_dir.join("deps").to_str().unwrap().to_string(); let default_rlibs = &[rust_libs_path1.as_str(), rust_libs_path2.as_str()]; let matches = Command::new("autocxx-reduce") .version(crate_version!()) .author(crate_authors!()) .about("Reduce a C++ test case") .long_about(LONG_HELP) .subcommand(Command::new("file") .about("reduce a header file") .arg( Arg::new("inc") .short('I') .long("inc") .multiple_occurrences(true) .number_of_values(1) .value_name("INCLUDE DIRS") .help("include path") .takes_value(true), ) .arg( Arg::new("define") .short('D') .long("define") .multiple_occurrences(true) .number_of_values(1) .value_name("DEFINE") .help("macro definition") .takes_value(true), ) .arg( Arg::new("header") .long("header") .multiple_occurrences(true) .number_of_values(1) .required(true) .value_name("HEADER") .help("header file name") .takes_value(true), ) .arg( Arg::new("directive") .short('d') .long("directive") .multiple_occurrences(true) .number_of_values(1) .value_name("DIRECTIVE") .help("directives to put within include_cpp!") .takes_value(true), ) ) .subcommand(Command::new("repro") .about("reduce a repro case JSON file") .arg( Arg::new("repro") .short('r') .long("repro") .required(true) .value_name("REPRODUCTION CASE JSON") .help("reproduction case JSON file name") .takes_value(true), ) .arg( Arg::new("header") .long("header") .multiple_occurrences(true) .number_of_values(1) .value_name("HEADER") .help("header file name; specify to resume a part-completed run") .takes_value(true), ) ) .arg( Arg::new("problem") .short('p') .long("problem") .required(true) .value_name("PROBLEM") .help("problem string we're looking for... may be in logs, or in generated C++, or generated .rs") .takes_value(true), ) .arg( Arg::new("creduce") .long("creduce") .value_name("PATH") .help("creduce binary location") .default_value("creduce") .takes_value(true), ) .arg( Arg::new("output") .short('o') .long("output") .value_name("OUTPUT") .help("where to write minimized output") .takes_value(true), ) .arg( Arg::new("gen-cmd") .short('g') .long("gen-cmd") .value_name("GEN-CMD") .help("where to find autocxx-gen") .default_value(&default_gen_cmd) .takes_value(true), ) .arg( Arg::new("rustc") .long("rustc") .value_name("RUSTC") .help("where to find rustc") .default_value("rustc") .takes_value(true), ) .arg( Arg::new("rlibs") .long("rlibs") .value_name("LIBDIR") .help("where to find rlibs/rmetas for cxx and autocxx") .default_values(default_rlibs) .multiple_values(true) .takes_value(true), ) .arg( Arg::new("keep") .short('k') .long("keep-dir") .help("keep the temporary directory for debugging purposes"), ) .arg( Arg::new("clang-args") .short('c') .long("clang-arg") .multiple_occurrences(true) .value_name("CLANG_ARG") .help("Extra arguments to pass to Clang"), ) .arg( Arg::new("creduce-args") .long("creduce-arg") .multiple_occurrences(true) .value_name("CREDUCE_ARG") .help("Extra arguments to pass to Clang"), ) .arg( Arg::new("no-precompile") .long("no-precompile") .help("Do not precompile the C++ header before passing to autocxxgen"), ) .arg( Arg::new("no-postcompile") .long("no-postcompile") .help("Do not post-compile the C++ generated by autocxxgen"), ) .arg( Arg::new("no-rustc") .long("no-rustc") .help("Do not compile the rust generated by autocxxgen"), ) .arg( Arg::new("suppress-cxx-inclusions") .long("suppress-cxx-inclusions") .takes_value(true) .possible_value("yes") .possible_value("no") .possible_value("auto") .default_value("auto") .help("Whether the preprocessed header already includes cxx.h. If so, we'll try to suppress the natural behavior of cxx to include duplicate definitions of some of the types within gen0.cc.") ) .arg_required_else_help(true) .get_matches(); run(matches).unwrap(); } fn run(matches: ArgMatches) -> Result<(), std::io::Error> { let keep_tmp = matches.is_present("keep"); let tmp_dir = TempDir::new()?; let r = do_run(matches, &tmp_dir); if keep_tmp { println!( "Keeping temp dir created at: {}", tmp_dir.into_path().to_str().unwrap() ); } r } #[derive(serde_derive::Deserialize)] struct ReproCase { config: String, header: String, } fn do_run(matches: ArgMatches, tmp_dir: &TempDir) -> Result<(), std::io::Error> { let rs_path = tmp_dir.path().join("input.rs"); let concat_path = tmp_dir.path().join("concat.h"); match matches.subcommand_matches("repro") { None => { let submatches = matches.subcommand_matches("file").unwrap(); let incs: Vec<_> = submatches .values_of("inc") .unwrap_or_default() .map(PathBuf::from) .collect(); let defs: Vec<_> = submatches.values_of("define").unwrap_or_default().collect(); let headers: Vec<_> = submatches.values_of("header").unwrap_or_default().collect(); assert!(!headers.is_empty()); let listing_path = tmp_dir.path().join("listing.h"); create_concatenated_header(&headers, &listing_path)?; announce_progress(&format!( "Preprocessing {listing_path:?} to {concat_path:?}" )); preprocess(&listing_path, &concat_path, &incs, &defs)?; let directives: Vec<_> = std::iter::once("#include \"concat.h\"\n".to_string()) .chain( submatches .values_of("directive") .unwrap_or_default() .map(|s| format!("{s}\n")), ) .collect(); create_rs_file(&rs_path, &directives)?; } Some(submatches) => { let case: ReproCase = serde_json::from_reader(File::open(PathBuf::from( submatches.value_of("repro").unwrap(), ))?) .unwrap(); // Replace the headers in the config let mut config: IncludeCppConfig = syn::parse_str(&case.config).unwrap(); config.replace_included_headers("concat.h"); create_file( &rs_path, &format!("autocxx::include_cpp!({});", config.to_token_stream()), )?; if let Some(header) = submatches.value_of("header") { std::fs::copy(PathBuf::from(header), &concat_path)?; } else { create_file(&concat_path, &case.header)? } } } let suppress_cxx_classes = match matches.value_of("suppress-cxx-inclusions").unwrap() { "yes" => true, "no" => false, "auto" => detect_cxx_h(&concat_path)?, _ => panic!("unexpected value"), }; let cxx_suppressions = if suppress_cxx_classes { get_cxx_suppressions() } else { Vec::new() }; let extra_clang_args: Vec<_> = matches .values_of("clang-args") .unwrap_or_default() .map(Cow::Borrowed) .chain(cxx_suppressions.into_iter().map(Cow::Owned)) .collect(); let extra_clang_args: Vec<&str> = extra_clang_args.iter().map(|s| s.as_ref()).collect_vec(); let gen_cmd = matches.value_of("gen-cmd").unwrap(); if !Path::new(gen_cmd).exists() { panic!( "autocxx-gen not found in {gen_cmd}. hint: autocxx-reduce --gen-cmd /path/to/autocxx-gen" ); } run_sample_gen_cmd(gen_cmd, &rs_path, tmp_dir.path(), &extra_clang_args)?; // Create and run an interestingness test which does not filter its output through grep. let demo_interestingness_test_dir = tmp_dir.path().join("demo-interestingness-test"); std::fs::create_dir(&demo_interestingness_test_dir).unwrap(); let interestingness_test = demo_interestingness_test_dir.join("test-demo.sh"); create_interestingness_test( &matches, gen_cmd, &interestingness_test, None, &rs_path, &extra_clang_args, )?; let demo_dir_concat_path = demo_interestingness_test_dir.join("concat.h"); std::fs::copy(&concat_path, demo_dir_concat_path).unwrap(); run_demo_interestingness_test(&demo_interestingness_test_dir, &interestingness_test).unwrap(); // Now the main interestingness test let interestingness_test = tmp_dir.path().join("test.sh"); create_interestingness_test( &matches, gen_cmd, &interestingness_test, Some(matches.value_of("problem").unwrap()), &rs_path, &extra_clang_args, )?; run_creduce( matches.value_of("creduce").unwrap(), &interestingness_test, &concat_path, matches.values_of("creduce-args").unwrap_or_default(), ); announce_progress("creduce completed"); let output_path = matches.value_of("output"); match output_path { None => print_minimized_case(&concat_path)?, Some(output_path) =>
}; Ok(()) } /// Try to detect whether the preprocessed source code already contains /// a preprocessed version of cxx.h. This is hard because all the comments /// and preprocessor symbols may have been removed, and in fact if we're /// part way through reduction, parts of the code may have been removed too. fn detect_cxx_h(concat_path: &Path) -> Result<bool, std::io::Error> { let haystack = std::fs::read_to_string(concat_path)?; Ok(["class Box", "class Vec", "class Slice"] .iter() .all(|needle| haystack.contains(needle))) } fn announce_progress(msg: &str) { println!("=== {msg} ==="); } fn print_minimized_case(concat_path: &Path) -> Result<(), std::io::Error> { announce_progress("Completed. Minimized test case:"); let contents = std::fs::read_to_string(concat_path)?; println!("{contents}"); Ok(()) } /// Arguments we pass to creduce if supported. This pass always seems to cause a crash /// as far as I can tell, so always exclude it. It may be environment-dependent, /// of course, but as I'm the primary user of this tool I am ruthlessly removing it. const REMOVE_PASS_LINE_MARKERS: &[&str] = &["--remove-pass", "pass_line_markers", "*"]; const SKIP_INITIAL_PASSES: &[&str] = &["--skip-initial-passes"]; fn creduce_supports_remove_pass(creduce_cmd: &str) -> bool { let cmd = std::process::Command::new(creduce_cmd) .arg("--help") .output(); let msg = match cmd { Err(error) => panic!("failed to run creduce. creduce_cmd = {creduce_cmd}. hint: autocxx-reduce --creduce /path/to/creduce. error = {error}"), Ok(result) => result.stdout }; let msg = std::str::from_utf8(&msg).unwrap(); msg.contains("--remove-pass") } fn run_creduce<'a>( creduce_cmd: &str, interestingness_test: &'a Path, concat_path: &'a Path, creduce_args: impl Iterator<Item = &'a str>, ) { announce_progress("creduce"); let args = std::iter::once(interestingness_test.to_str().unwrap()) .chain(std::iter::once(concat_path.to_str().unwrap())) .chain(creduce_args) .chain( if creduce_supports_remove_pass(creduce_cmd) { REMOVE_PASS_LINE_MARKERS } else { SKIP_INITIAL_PASSES } .iter() .copied(), ) .collect::<Vec<_>>(); println!("Command: {} {}", creduce_cmd, args.join(" ")); std::process::Command::new(creduce_cmd) .args(args) .status() .expect("failed to creduce"); } fn run_sample_gen_cmd( gen_cmd: &str, rs_file: &Path, tmp_dir: &Path, extra_clang_args: &[&str], ) -> Result<(), std::io::Error> { let args = format_gen_cmd(rs_file, tmp_dir.to_str().unwrap(), extra_clang_args); let args = args.collect::<Vec<_>>(); let args_str = args.join(" "); announce_progress(&format!("Running sample gen cmd: {gen_cmd} {args_str}")); std::process::Command::new(gen_cmd).args(args).status()?; Ok(()) } fn run_demo_interestingness_test(demo_dir: &Path, test: &Path) -> Result<(), std::io::Error> { announce_progress(&format!( "Running demo interestingness test in {}", demo_dir.to_string_lossy() )); std::process::Command::new(test) .current_dir(demo_dir) .status()?; Ok(()) } fn format_gen_cmd<'a>( rs_file: &Path, dir: &str, extra_clang_args: &'a [&str], ) -> impl Iterator<Item = String> + 'a { let args = [ "-o".to_string(), dir.to_string(), "-I".to_string(), dir.to_string(), rs_file.to_str().unwrap().to_string(), "--gen-rs-include".to_string(), "--gen-cpp".to_string(), "--suppress-system-headers".to_string(), "--".to_string(), ] .to_vec(); args.into_iter() .chain(extra_clang_args.iter().map(|s| s.to_string())) } fn create_interestingness_test( matches: &ArgMatches, gen_cmd: &str, test_path: &Path, problem: Option<&str>, rs_file: &Path, extra_clang_args: &[&str], ) -> Result<(), std::io::Error> { announce_progress("Creating interestingness test"); let precompile = !matches.is_present("no-precompile"); let postcompile = !matches.is_present("no-postcompile"); let rustc = !matches.is_present("no-rustc"); let rustc_path = matches.value_of("rustc").unwrap(); let rust_libs_path: Vec<String> = matches .get_many::<String>("rlibs") .expect("No rlib path specified") .cloned() .collect(); // Ensure we refer to the input header by relative path // because creduce will invoke us in some other directory with // a copy thereof. let mut args = format_gen_cmd(rs_file, "$(pwd)", extra_clang_args); let args = args.join(" "); let precompile_step = make_compile_step(precompile, "concat.h", extra_clang_args); // For the compile afterwards, we have to avoid including any system headers. // We rely on equivalent content being hermetically inside concat.h. let postcompile_step = make_compile_step(postcompile, "gen0.cc", extra_clang_args); let rustc_step = if rustc { let rust_libs_path = rust_libs_path.iter().map(|p| format!(" -L{p}")).join(" "); format!("{rustc_path} --extern cxx --extern autocxx {rust_libs_path} --crate-type rlib --emit=metadata --edition=2021 autocxx-ffi-default-gen.rs 2>&1") } else { "echo Skipping rustc".to_string() }; // -q below to exit immediately as soon as a match is found, to avoid // extra compile/codegen steps let problem_grep = problem .map(|problem| format!("| grep -q \"{problem}\" >/dev/null 2>&1")) .unwrap_or_default(); // We formerly had a 'trap' below but it seems to have caused problems // (trap \"if [[ \\$? -eq 139 ]]; then echo Segfault; fi\" CHLD; {} {} 2>&1 && cat autocxx-ffi-default-gen.rs && cat autocxxgen*.h && {} && {} 2>&1 ) {} let content = format!( indoc! {" #!/bin/bash set -e echo Precompile {} echo Move mv concat.h concat-body.h (echo \"#ifndef __CONCAT_H__\"; echo \"#define __CONCAT_H__\"; echo '#include \"concat-body.h\"'; echo \"#endif\") > concat.h echo Codegen ({} {} 2>&1 && cat autocxx-ffi-default-gen.rs && cat autocxxgen*.h && {} && {} 2>&1) {} echo Remove rm concat.h echo Swap back mv concat-body.h concat.h echo Done "}, precompile_step, gen_cmd, args, rustc_step, postcompile_step, problem_grep ); println!("Interestingness test:\n{content}"); { let mut file = File::create(test_path)?; file.write_all(content.as_bytes())?; } let mut perms = std::fs::metadata(test_path)?.permissions(); perms.set_mode(0o700); std::fs::set_permissions(test_path, perms)?; Ok(()) } fn make_compile_step(enabled: bool, file: &str, extra_clang_args: &[&str]) -> String { if enabled { format!( "{} {} -c {}", get_clang_path(), make_clang_args(&[PathBuf::from(".")], extra_clang_args).join(" "), file, ) } else { "echo 'Skipping compilation'".into() } } fn create_rs_file(rs_path: &Path, directives: &[String]) -> Result<(), std::io::Error> { announce_progress("Creating Rust input file"); let mut file = File::create(rs_path)?; file.write_all("use autocxx::include_cpp;\ninclude_cpp! (\n".as_bytes())?; for directive in directives { file.write_all(directive.as_bytes())?; } file.write_all(");\n".as_bytes())?; Ok(()) } fn create_concatenated_header(headers: &[&str], listing_path: &Path) -> Result<(), std::io::Error> { announce_progress("Creating preprocessed header"); let mut file = File::create(listing_path)?; for header in headers { file.write_all(format!("#include \"{header}\"\n").as_bytes())?; } Ok(()) } fn create_file(path: &Path, content: &str) -> Result<(), std::io::Error> { let mut file = File::create(path)?; write!(file, "{content}")?; Ok(()) } fn get_cxx_suppressions() -> Vec<String> { let defines: IndexSet<_> = Regex::new(r"\bCXXBRIDGE1_\w+\b") .unwrap() .find_iter(cxx_gen::HEADER) .map(|m| m.as_str()) .collect(); // for uniqueness defines.into_iter().map(|def| format!("-D{def}")).collect() } #[test] fn test_get_cxx_suppressions() { let defines = get_cxx_suppressions(); assert!(defines.contains(&"-DCXXBRIDGE1_RUST_BITCOPY_T".to_string())); assert!(defines.contains(&"-DCXXBRIDGE1_RUST_STR".to_string())); }
{ std::fs::copy(&concat_path, PathBuf::from(output_path))?; }
conditional_block
cn.js
import React from "react"; import Layout from '@theme/Layout'; import Video from '@site/src/components/Video'; import Features from '@site/src/components/Features'; import FeaturesWide from '@site/src/components/FeaturesWide'; import FAQ from '@site/src/components/FAQ'; import Customers from '@site/src/components/Customers'; import * as Icons from '@site/src/components/HeroIcons/outline'; export default function
() { return ( <Layout title="华炎魔方,华炎办公,审批王,低代码,零代码,快速开发工具,企业PaaS平台" description="华炎魔方是一款随需应变的管理软件开发工具,旨在通过其强大的敏捷性、灵活性和开放性帮助企业创新、扩展和集成企业业务系统。基于该平台,您可以快速创建智能化、移动化的企业应用。" keywords={["低代码,低代码开发,低代码开发平台,开源低代码开发平台,快速开发平台,快速开发工具,paas,零代码,零代码开发,零代码开发平台"]} > <section className="flex bg-cover bg-no-repeat bg-gray-100"> <div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16"> <div className="lg:grid lg:grid-cols-12 lg:gap-8"> <div className="sm:text-center md:max-w-2xl md:mx-auto lg:col-span-6 lg:text-left"> {/* <div className="text-sm font-semibold uppercase tracking-wide text-gray-700 sm:text-base lg:text-sm xl:text-base"> 新一代低代码开发平台 </div> */} <h2 className="mt-1 text-4xl tracking-tight leading-10 font-extrabold text-gray-900 sm:leading-none sm:text-4xl lg:text-5xl xl:text-6xl"> 高效搭建企业应用的 <br className="hidden md:inline"/> <span className="text-green-700">神奇魔方</span> </h2> <p className="mt-3 text-base text-gray-700 sm:mt-5 sm:text-xl lg:text-lg xl:text-xl"> 华炎魔方基于商业智能和模型驱动,即使是不懂编程的业务人员,也能轻松便捷地创建智能化、移动化的企业应用。 </p> <div className="mt-5 sm:mt-8 sm:flex sm:justify-center lg:justify-start"> <div className="rounded-md shadow"> <a href="http://oss.steedos.com/apps/pdfviewer/web/viewer.html?file=http://oss.steedos.com/docs/%E5%8D%8E%E7%82%8E%E9%AD%94%E6%96%B9%E6%8A%80%E6%9C%AF%E7%99%BD%E7%9A%AE%E4%B9%A6.pdf" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-white bg-green-600 hover:bg-green-500 focus:outline-none focus:border-green-700 focus:shadow-outline-teal transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10 hover:text-white"> 技术白皮书 </a> </div> <div className="mt-3 sm:mt-0 sm:ml-3"> <a href="/form/trial/" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-green-700 bg-green-100 hover:text-green-600 hover:bg-green-50 focus:outline-none focus:shadow-outline-teal focus:border-green-300 transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10"> 预约演示 </a> </div> </div> </div> <div className="mt-12 relative sm:max-w-lg sm:mx-auto lg:mt-0 lg:max-w-none lg:mx-0 lg:col-span-6 lg:flex lg:items-center"> <div className="relative mx-auto w-full lg:max-w-md"> <div className="relative block w-full rounded-lg shadow-lg overflow-hidden focus:outline-none focus:shadow-outline"> <Video poster="https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.jpg" autoplay={false} urls={[ {name:"高清", url:"https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.mp4"}, ]}/> </div> </div> </div> </div> </div> </section> <section className="flex bg-cover bg-no-repeat bg-gray-50"> <div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16"> <div class="relative"> <div class="lg:grid lg:grid-flow-row-dense lg:grid-cols-2 lg:gap-8 lg:items-center"> <div class="lg:col-start-2"> <h3 class="text-2xl font-extrabold text-gray-900 tracking-tight sm:text-3xl"> 轻松搭建、快速创新,赋能每个员工 </h3> <p class="mt-3 text-lg text-gray-500"> </p> <dl class="mt-10 space-y-10"> <div class="relative"> <dt> <svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true"> <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" /> </svg> <p class="ml-9 text-lg leading-6 text-gray-700">只需点击鼠标,就能进行应用系统的编码和设计,帮助业务人员和IT部门融合在一起,在一个平台上轻松协作。</p> </dt> <dd class="mt-2 ml-9 text-base text-gray-500"> </dd> </div> <div class="relative"> <dt> <svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true"> <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" /> </svg> <p class="ml-9 text-lg leading-6 text-gray-700">基于领先的人工智能和现代计算技术,管理每个部门的业务并实现自动化处理,加速企业数字化转型。</p> </dt> <dd class="mt-2 ml-9 text-base text-gray-500"> </dd> </div> <div class="relative"> <dt> <svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true"> <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" /> </svg> <p class="ml-9 text-lg leading-6 text-gray-700">提供开箱即用的安全性和治理功能,您可以在受信任、完全开源的平台上自信地扩展和构建。</p> </dt> <dd class="mt-2 ml-9 text-base text-gray-500"> </dd> </div> </dl> </div> <div class="mt-10 -mx-4 relative lg:mt-0 lg:col-start-1"> <svg class="absolute left-1/2 transform -translate-x-1/2 translate-y-16 lg:hidden" width="784" height="404" fill="none" viewBox="0 0 784 404" aria-hidden="true"> <defs> <pattern id="e80155a9-dfde-425a-b5ea-1f6fadd20131" x="0" y="0" width="20" height="20" patternUnits="userSpaceOnUse"> <rect x="0" y="0" width="4" height="4" class="text-gray-200" fill="currentColor"></rect> </pattern> </defs> <rect width="784" height="404" fill="url(#e80155a9-dfde-425a-b5ea-1f6fadd20131)"></rect> </svg> <img class="relative mx-auto" width="490" src="https://images.unsplash.com/photo-1520333789090-1afc82db536a?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=2102&q=80" alt=""/> </div> </div> </div> </div> </section> <div class="relative bg-gray-50 pt-16 pb-20 px-4 sm:px-6 lg:pt-24 lg:pb-28 lg:px-8"> <div class="absolute inset-0"> <div class="bg-white h-1/3 sm:h-2/3"></div> </div> <div class="relative max-w-7xl mx-auto"> <div class="text-center"> <h2 class="text-3xl tracking-tight font-extrabold text-gray-900 sm:text-4xl"> 企业级低代码的核心特点 </h2> <p class="mt-3 max-w-2xl mx-auto text-xl text-gray-500 sm:mt-4"> 助力企业数字化转型 </p> </div> <div class="mt-12 max-w-lg mx-auto grid gap-5 lg:grid-cols-3 lg:max-w-none"> <div class="flex flex-col rounded-lg shadow-lg overflow-hidden"> <div class="flex-shrink-0"> <img class="h-48 w-full object-cover" src="https://images.unsplash.com/photo-1496128858413-b36217c2ce36?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1679&q=80" alt=""/> </div> <div class="flex-1 bg-white p-6 flex flex-col justify-between"> <div class="flex-1"> <a href="#" class="block mt-2"> <p class="text-xl font-semibold text-gray-900"> 可视化设计 </p> <p class="mt-3 text-base text-gray-500"> 可通过图形界面(而不是传统的计算机编程)来创建业务应用程序,通过拖拽式的UI组件及可视化模型来代替一部分甚至全部的编码工作。 </p> </a> </div> </div> </div> <div class="flex flex-col rounded-lg shadow-lg overflow-hidden"> <div class="flex-shrink-0"> <img class="h-48 w-full object-cover" src="https://images.unsplash.com/photo-1547586696-ea22b4d4235d?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1679&q=80" alt=""/> </div> <div class="flex-1 bg-white p-6 flex flex-col justify-between"> <div class="flex-1"> <a href="#" class="block mt-2"> <p class="text-xl font-semibold text-gray-900"> 快速构建 </p> <p class="mt-3 text-base text-gray-500"> 使用大量的组件和封装的接口进行开发,使得低代码能够提升30%以上的开发效率,并大幅降低开发成本。可在短时间内搭建业务系统,快速实现企业数字化转型。 </p> </a> </div> </div> </div> <div class="flex flex-col rounded-lg shadow-lg overflow-hidden"> <div class="flex-shrink-0"> <img class="h-48 w-full object-cover" src="https://images.unsplash.com/photo-1492724441997-5dc865305da7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1679&q=80" alt=""/> </div> <div class="flex-1 bg-white p-6 flex flex-col justify-between"> <div class="flex-1"> <a href="#" class="block mt-2"> <p class="text-xl font-semibold text-gray-900"> 灵活复用 </p> <p class="mt-3 text-base text-gray-500"> 企业变化是不可避免的,低代码开发平台使更改应用程序以及让它们适应新要求变得非常容易。无论是简单的审批表单还是复杂的业务需求,用户都无需编写代码即可实现。 </p> </a> </div> </div> </div> </div> </div> </div> <div class="bg-gray-200"> <div class="max-w-screen-xl mx-auto text-center py-10 px-4 sm:px-6 lg:py-12 lg:px-8"> <h2 class="text-3xl leading-9 font-extrabold tracking-tight text-gray-900 sm:text-4xl sm:leading-10"> 开放源码、随心定制 </h2> <p class="mt-4 text-xl leading-7 text-gray-500 lg:mx-auto"> 华炎协同办公解决方案基于低代码技术、完全开源。可以根据业务需求随心调整、无缝迭代,帮助业务加速创新,提升竞争力。 </p> <div class="mt-8 flex flex justify-center justify-start"> <div class="rounded-md shadow"> <a href="https://github.com/steedos/steedos-project-oa" target="_blank" class="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-white bg-green-600 hover:bg-green-500 hover:text-white focus:outline-none focus:border-green-700 focus:shadow-outline-green transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10"> 查看源码 </a> </div> <div class="ml-3 rounded-md shadow"> <a href="/form/trial" target="_blank" class="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-green-600 bg-white hover:text-green-500 focus:outline-none focus:border-green-300 focus:shadow-outline-green transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10"> 预约产品演示 </a> </div> </div> </div> </div> <Customers/> </Layout> ); }
Landing
identifier_name
cn.js
import React from "react"; import Layout from '@theme/Layout'; import Video from '@site/src/components/Video'; import Features from '@site/src/components/Features'; import FeaturesWide from '@site/src/components/FeaturesWide'; import FAQ from '@site/src/components/FAQ'; import Customers from '@site/src/components/Customers'; import * as Icons from '@site/src/components/HeroIcons/outline'; export default function Landing()
{ return ( <Layout title="华炎魔方,华炎办公,审批王,低代码,零代码,快速开发工具,企业PaaS平台" description="华炎魔方是一款随需应变的管理软件开发工具,旨在通过其强大的敏捷性、灵活性和开放性帮助企业创新、扩展和集成企业业务系统。基于该平台,您可以快速创建智能化、移动化的企业应用。" keywords={["低代码,低代码开发,低代码开发平台,开源低代码开发平台,快速开发平台,快速开发工具,paas,零代码,零代码开发,零代码开发平台"]} > <section className="flex bg-cover bg-no-repeat bg-gray-100"> <div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16"> <div className="lg:grid lg:grid-cols-12 lg:gap-8"> <div className="sm:text-center md:max-w-2xl md:mx-auto lg:col-span-6 lg:text-left"> {/* <div className="text-sm font-semibold uppercase tracking-wide text-gray-700 sm:text-base lg:text-sm xl:text-base"> 新一代低代码开发平台 </div> */} <h2 className="mt-1 text-4xl tracking-tight leading-10 font-extrabold text-gray-900 sm:leading-none sm:text-4xl lg:text-5xl xl:text-6xl"> 高效搭建企业应用的 <br className="hidden md:inline"/> <span className="text-green-700">神奇魔方</span> </h2> <p className="mt-3 text-base text-gray-700 sm:mt-5 sm:text-xl lg:text-lg xl:text-xl"> 华炎魔方基于商业智能和模型驱动,即使是不懂编程的业务人员,也能轻松便捷地创建智能化、移动化的企业应用。 </p> <div className="mt-5 sm:mt-8 sm:flex sm:justify-center lg:justify-start"> <div className="rounded-md shadow"> <a href="http://oss.steedos.com/apps/pdfviewer/web/viewer.html?file=http://oss.steedos.com/docs/%E5%8D%8E%E7%82%8E%E9%AD%94%E6%96%B9%E6%8A%80%E6%9C%AF%E7%99%BD%E7%9A%AE%E4%B9%A6.pdf" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-white bg-green-600 hover:bg-green-500 focus:outline-none focus:border-green-700 focus:shadow-outline-teal transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10 hover:text-white"> 技术白皮书 </a> </div> <div className="mt-3 sm:mt-0 sm:ml-3"> <a href="/form/trial/" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-green-700 bg-green-100 hover:text-green-600 hover:bg-green-50 focus:outline-none focus:shadow-outline-teal focus:border-green-300 transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10"> 预约演示 </a> </div> </div> </div> <div className="mt-12 relative sm:max-w-lg sm:mx-auto lg:mt-0 lg:max-w-none lg:mx-0 lg:col-span-6 lg:flex lg:items-center"> <div className="relative mx-auto w-full lg:max-w-md"> <div className="relative block w-full rounded-lg shadow-lg overflow-hidden focus:outline-none focus:shadow-outline"> <Video poster="https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.jpg" autoplay={false} urls={[ {name:"高清", url:"https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.mp4"}, ]}/> </div> </div> </div> </div> </div> </section> <section className="flex bg-cover bg-no-repeat bg-gray-50"> <div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16"> <div class="relative"> <div class="lg:grid lg:grid-flow-row-dense lg:grid-cols-2 lg:gap-8 lg:items-center"> <div class="lg:col-start-2"> <h3 class="text-2xl font-extrabold text-gray-900 tracking-tight sm:text-3xl"> 轻松搭建、快速创新,赋能每个员工 </h3> <p class="mt-3 text-lg text-gray-500"> </p> <dl class="mt-10 space-y-10"> <div class="relative"> <dt> <svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true"> <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" /> </svg> <p class="ml-9 text-lg leading-6 text-gray-700">只需点击鼠标,就能进行应用系统的编码和设计,帮助业务人员和IT部门融合在一起,在一个平台上轻松协作。</p> </dt> <dd class="mt-2 ml-9 text-base text-gray-500"> </dd> </div> <div class="relative"> <dt> <svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true"> <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" /> </svg> <p class="ml-9 text-lg leading-6 text-gray-700">基于领先的人工智能和现代计算技术,管理每个部门的业务并实现自动化处理,加速企业数字化转型。</p> </dt> <dd class="mt-2 ml-9 text-base text-gray-500"> </dd> </div> <div class="relative"> <dt> <svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true"> <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" /> </svg> <p class="ml-9 text-lg leading-6 text-gray-700">提供开箱即用的安全性和治理功能,您可以在受信任、完全开源的平台上自信地扩展和构建。</p> </dt> <dd class="mt-2 ml-9 text-base text-gray-500"> </dd> </div> </dl> </div> <div class="mt-10 -mx-4 relative lg:mt-0 lg:col-start-1"> <svg class="absolute left-1/2 transform -translate-x-1/2 translate-y-16 lg:hidden" width="784" height="404" fill="none" viewBox="0 0 784 404" aria-hidden="true"> <defs> <pattern id="e80155a9-dfde-425a-b5ea-1f6fadd20131" x="0" y="0" width="20" height="20" patternUnits="userSpaceOnUse"> <rect x="0" y="0" width="4" height="4" class="text-gray-200" fill="currentColor"></rect> </pattern> </defs> <rect width="784" height="404" fill="url(#e80155a9-dfde-425a-b5ea-1f6fadd20131)"></rect> </svg> <img class="relative mx-auto" width="490" src="https://images.unsplash.com/photo-1520333789090-1afc82db536a?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=2102&q=80" alt=""/> </div> </div> </div> </div> </section> <div class="relative bg-gray-50 pt-16 pb-20 px-4 sm:px-6 lg:pt-24 lg:pb-28 lg:px-8"> <div class="absolute inset-0"> <div class="bg-white h-1/3 sm:h-2/3"></div> </div> <div class="relative max-w-7xl mx-auto"> <div class="text-center"> <h2 class="text-3xl tracking-tight font-extrabold text-gray-900 sm:text-4xl"> 企业级低代码的核心特点 </h2> <p class="mt-3 max-w-2xl mx-auto text-xl text-gray-500 sm:mt-4"> 助力企业数字化转型 </p> </div> <div class="mt-12 max-w-lg mx-auto grid gap-5 lg:grid-cols-3 lg:max-w-none"> <div class="flex flex-col rounded-lg shadow-lg overflow-hidden"> <div class="flex-shrink-0"> <img class="h-48 w-full object-cover" src="https://images.unsplash.com/photo-1496128858413-b36217c2ce36?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1679&q=80" alt=""/> </div> <div class="flex-1 bg-white p-6 flex flex-col justify-between"> <div class="flex-1"> <a href="#" class="block mt-2"> <p class="text-xl font-semibold text-gray-900"> 可视化设计 </p> <p class="mt-3 text-base text-gray-500"> 可通过图形界面(而不是传统的计算机编程)来创建业务应用程序,通过拖拽式的UI组件及可视化模型来代替一部分甚至全部的编码工作。 </p> </a> </div> </div> </div> <div class="flex flex-col rounded-lg shadow-lg overflow-hidden"> <div class="flex-shrink-0"> <img class="h-48 w-full object-cover" src="https://images.unsplash.com/photo-1547586696-ea22b4d4235d?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1679&q=80" alt=""/> </div> <div class="flex-1 bg-white p-6 flex flex-col justify-between"> <div class="flex-1"> <a href="#" class="block mt-2"> <p class="text-xl font-semibold text-gray-900"> 快速构建 </p> <p class="mt-3 text-base text-gray-500"> 使用大量的组件和封装的接口进行开发,使得低代码能够提升30%以上的开发效率,并大幅降低开发成本。可在短时间内搭建业务系统,快速实现企业数字化转型。 </p> </a> </div> </div> </div> <div class="flex flex-col rounded-lg shadow-lg overflow-hidden"> <div class="flex-shrink-0"> <img class="h-48 w-full object-cover" src="https://images.unsplash.com/photo-1492724441997-5dc865305da7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1679&q=80" alt=""/> </div> <div class="flex-1 bg-white p-6 flex flex-col justify-between"> <div class="flex-1"> <a href="#" class="block mt-2"> <p class="text-xl font-semibold text-gray-900"> 灵活复用 </p> <p class="mt-3 text-base text-gray-500"> 企业变化是不可避免的,低代码开发平台使更改应用程序以及让它们适应新要求变得非常容易。无论是简单的审批表单还是复杂的业务需求,用户都无需编写代码即可实现。 </p> </a> </div> </div> </div> </div> </div> </div> <div class="bg-gray-200"> <div class="max-w-screen-xl mx-auto text-center py-10 px-4 sm:px-6 lg:py-12 lg:px-8"> <h2 class="text-3xl leading-9 font-extrabold tracking-tight text-gray-900 sm:text-4xl sm:leading-10"> 开放源码、随心定制 </h2> <p class="mt-4 text-xl leading-7 text-gray-500 lg:mx-auto"> 华炎协同办公解决方案基于低代码技术、完全开源。可以根据业务需求随心调整、无缝迭代,帮助业务加速创新,提升竞争力。 </p> <div class="mt-8 flex flex justify-center justify-start"> <div class="rounded-md shadow"> <a href="https://github.com/steedos/steedos-project-oa" target="_blank" class="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-white bg-green-600 hover:bg-green-500 hover:text-white focus:outline-none focus:border-green-700 focus:shadow-outline-green transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10"> 查看源码 </a> </div> <div class="ml-3 rounded-md shadow"> <a href="/form/trial" target="_blank" class="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-green-600 bg-white hover:text-green-500 focus:outline-none focus:border-green-300 focus:shadow-outline-green transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10"> 预约产品演示 </a> </div> </div> </div> </div> <Customers/> </Layout> ); }
identifier_body
cn.js
import React from "react"; import Layout from '@theme/Layout'; import Video from '@site/src/components/Video'; import Features from '@site/src/components/Features'; import FeaturesWide from '@site/src/components/FeaturesWide'; import FAQ from '@site/src/components/FAQ'; import Customers from '@site/src/components/Customers'; import * as Icons from '@site/src/components/HeroIcons/outline'; export default function Landing() { return ( <Layout title="华炎魔方,华炎办公,审批王,低代码,零代码,快速开发工具,企业PaaS平台" description="华炎魔方是一款随需应变的管理软件开发工具,旨在通过其强大的敏捷性、灵活性和开放性帮助企业创新、扩展和集成企业业务系统。基于该平台,您可以快速创建智能化、移动化的企业应用。" keywords={["低代码,低代码开发,低代码开发平台,开源低代码开发平台,快速开发平台,快速开发工具,paas,零代码,零代码开发,零代码开发平台"]} > <section className="flex bg-cover bg-no-repeat bg-gray-100"> <div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16"> <div className="lg:grid lg:grid-cols-12 lg:gap-8"> <div className="sm:text-center md:max-w-2xl md:mx-auto lg:col-span-6 lg:text-left"> {/* <div className="text-sm font-semibold uppercase tracking-wide text-gray-700 sm:text-base lg:text-sm xl:text-base"> 新一代低代码开发平台 </div> */} <h2 className="mt-1 text-4xl tracking-tight leading-10 font-extrabold text-gray-900 sm:leading-none sm:text-4xl lg:text-5xl xl:text-6xl"> 高效搭建企业应用的 <br className="hidden md:inline"/> <span className="text-green-700">神奇魔方</span> </h2> <p className="mt-3 text-base text-gray-700 sm:mt-5 sm:text-xl lg:text-lg xl:text-xl"> 华炎魔方基于商业智能和模型驱动,即使是不懂编程的业务人员,也能轻松便捷地创建智能化、移动化的企业应用。 </p> <div className="mt-5 sm:mt-8 sm:flex sm:justify-center lg:justify-start"> <div className="rounded-md shadow"> <a href="http://oss.steedos.com/apps/pdfviewer/web/viewer.html?file=http://oss.steedos.com/docs/%E5%8D%8E%E7%82%8E%E9%AD%94%E6%96%B9%E6%8A%80%E6%9C%AF%E7%99%BD%E7%9A%AE%E4%B9%A6.pdf" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-white bg-green-600 hover:bg-green-500 focus:outline-none focus:border-green-700 focus:shadow-outline-teal transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10 hover:text-white"> 技术白皮书 </a> </div> <div className="mt-3 sm:mt-0 sm:ml-3"> <a href="/form/trial/" target="_blank" className="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-green-700 bg-green-100 hover:text-green-600 hover:bg-green-50 focus:outline-none focus:shadow-outline-teal focus:border-green-300 transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10"> 预约演示 </a> </div> </div> </div> <div className="mt-12 relative sm:max-w-lg sm:mx-auto lg:mt-0 lg:max-w-none lg:mx-0 lg:col-span-6 lg:flex lg:items-center"> <div className="relative mx-auto w-full lg:max-w-md"> <div className="relative block w-full rounded-lg shadow-lg overflow-hidden focus:outline-none focus:shadow-outline"> <Video poster="https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.jpg" autoplay={false}
</div> </div> </div> </div> </div> </section> <section className="flex bg-cover bg-no-repeat bg-gray-50"> <div className="mx-auto max-w-screen-xl px-4 sm:px-6 lg:px-8 my-16"> <div class="relative"> <div class="lg:grid lg:grid-flow-row-dense lg:grid-cols-2 lg:gap-8 lg:items-center"> <div class="lg:col-start-2"> <h3 class="text-2xl font-extrabold text-gray-900 tracking-tight sm:text-3xl"> 轻松搭建、快速创新,赋能每个员工 </h3> <p class="mt-3 text-lg text-gray-500"> </p> <dl class="mt-10 space-y-10"> <div class="relative"> <dt> <svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true"> <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" /> </svg> <p class="ml-9 text-lg leading-6 text-gray-700">只需点击鼠标,就能进行应用系统的编码和设计,帮助业务人员和IT部门融合在一起,在一个平台上轻松协作。</p> </dt> <dd class="mt-2 ml-9 text-base text-gray-500"> </dd> </div> <div class="relative"> <dt> <svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true"> <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" /> </svg> <p class="ml-9 text-lg leading-6 text-gray-700">基于领先的人工智能和现代计算技术,管理每个部门的业务并实现自动化处理,加速企业数字化转型。</p> </dt> <dd class="mt-2 ml-9 text-base text-gray-500"> </dd> </div> <div class="relative"> <dt> <svg class="absolute h-6 w-6 text-green-500" xmlns="http://www.w3.org/2000/svg" fill="none" viewBox="0 0 24 24" stroke="currentColor" aria-hidden="true"> <path stroke-linecap="round" stroke-linejoin="round" stroke-width="2" d="M5 13l4 4L19 7" /> </svg> <p class="ml-9 text-lg leading-6 text-gray-700">提供开箱即用的安全性和治理功能,您可以在受信任、完全开源的平台上自信地扩展和构建。</p> </dt> <dd class="mt-2 ml-9 text-base text-gray-500"> </dd> </div> </dl> </div> <div class="mt-10 -mx-4 relative lg:mt-0 lg:col-start-1"> <svg class="absolute left-1/2 transform -translate-x-1/2 translate-y-16 lg:hidden" width="784" height="404" fill="none" viewBox="0 0 784 404" aria-hidden="true"> <defs> <pattern id="e80155a9-dfde-425a-b5ea-1f6fadd20131" x="0" y="0" width="20" height="20" patternUnits="userSpaceOnUse"> <rect x="0" y="0" width="4" height="4" class="text-gray-200" fill="currentColor"></rect> </pattern> </defs> <rect width="784" height="404" fill="url(#e80155a9-dfde-425a-b5ea-1f6fadd20131)"></rect> </svg> <img class="relative mx-auto" width="490" src="https://images.unsplash.com/photo-1520333789090-1afc82db536a?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=2102&q=80" alt=""/> </div> </div> </div> </div> </section> <div class="relative bg-gray-50 pt-16 pb-20 px-4 sm:px-6 lg:pt-24 lg:pb-28 lg:px-8"> <div class="absolute inset-0"> <div class="bg-white h-1/3 sm:h-2/3"></div> </div> <div class="relative max-w-7xl mx-auto"> <div class="text-center"> <h2 class="text-3xl tracking-tight font-extrabold text-gray-900 sm:text-4xl"> 企业级低代码的核心特点 </h2> <p class="mt-3 max-w-2xl mx-auto text-xl text-gray-500 sm:mt-4"> 助力企业数字化转型 </p> </div> <div class="mt-12 max-w-lg mx-auto grid gap-5 lg:grid-cols-3 lg:max-w-none"> <div class="flex flex-col rounded-lg shadow-lg overflow-hidden"> <div class="flex-shrink-0"> <img class="h-48 w-full object-cover" src="https://images.unsplash.com/photo-1496128858413-b36217c2ce36?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1679&q=80" alt=""/> </div> <div class="flex-1 bg-white p-6 flex flex-col justify-between"> <div class="flex-1"> <a href="#" class="block mt-2"> <p class="text-xl font-semibold text-gray-900"> 可视化设计 </p> <p class="mt-3 text-base text-gray-500"> 可通过图形界面(而不是传统的计算机编程)来创建业务应用程序,通过拖拽式的UI组件及可视化模型来代替一部分甚至全部的编码工作。 </p> </a> </div> </div> </div> <div class="flex flex-col rounded-lg shadow-lg overflow-hidden"> <div class="flex-shrink-0"> <img class="h-48 w-full object-cover" src="https://images.unsplash.com/photo-1547586696-ea22b4d4235d?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1679&q=80" alt=""/> </div> <div class="flex-1 bg-white p-6 flex flex-col justify-between"> <div class="flex-1"> <a href="#" class="block mt-2"> <p class="text-xl font-semibold text-gray-900"> 快速构建 </p> <p class="mt-3 text-base text-gray-500"> 使用大量的组件和封装的接口进行开发,使得低代码能够提升30%以上的开发效率,并大幅降低开发成本。可在短时间内搭建业务系统,快速实现企业数字化转型。 </p> </a> </div> </div> </div> <div class="flex flex-col rounded-lg shadow-lg overflow-hidden"> <div class="flex-shrink-0"> <img class="h-48 w-full object-cover" src="https://images.unsplash.com/photo-1492724441997-5dc865305da7?ixlib=rb-1.2.1&ixid=eyJhcHBfaWQiOjEyMDd9&auto=format&fit=crop&w=1679&q=80" alt=""/> </div> <div class="flex-1 bg-white p-6 flex flex-col justify-between"> <div class="flex-1"> <a href="#" class="block mt-2"> <p class="text-xl font-semibold text-gray-900"> 灵活复用 </p> <p class="mt-3 text-base text-gray-500"> 企业变化是不可避免的,低代码开发平台使更改应用程序以及让它们适应新要求变得非常容易。无论是简单的审批表单还是复杂的业务需求,用户都无需编写代码即可实现。 </p> </a> </div> </div> </div> </div> </div> </div> <div class="bg-gray-200"> <div class="max-w-screen-xl mx-auto text-center py-10 px-4 sm:px-6 lg:py-12 lg:px-8"> <h2 class="text-3xl leading-9 font-extrabold tracking-tight text-gray-900 sm:text-4xl sm:leading-10"> 开放源码、随心定制 </h2> <p class="mt-4 text-xl leading-7 text-gray-500 lg:mx-auto"> 华炎协同办公解决方案基于低代码技术、完全开源。可以根据业务需求随心调整、无缝迭代,帮助业务加速创新,提升竞争力。 </p> <div class="mt-8 flex flex justify-center justify-start"> <div class="rounded-md shadow"> <a href="https://github.com/steedos/steedos-project-oa" target="_blank" class="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-white bg-green-600 hover:bg-green-500 hover:text-white focus:outline-none focus:border-green-700 focus:shadow-outline-green transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10"> 查看源码 </a> </div> <div class="ml-3 rounded-md shadow"> <a href="/form/trial" target="_blank" class="w-full flex items-center justify-center px-8 py-3 border border-transparent text-base leading-6 font-medium rounded-md text-green-600 bg-white hover:text-green-500 focus:outline-none focus:border-green-300 focus:shadow-outline-green transition duration-150 ease-in-out md:py-4 md:text-lg md:px-10"> 预约产品演示 </a> </div> </div> </div> </div> <Customers/> </Layout> ); }
urls={[ {name:"高清", url:"https://www-steedos-com.oss-accelerate.aliyuncs.com/videos/creator/steedos-guide.mp4"}, ]}/>
random_line_split
model_transformer.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-explicit-length-test """Apply graph transformations to a tf.keras model.""" import collections import copy import re import tensorflow as tf from tensorflow_model_optimization.python.core.quantization.keras.graph_transformations import transforms as transforms_mod LayerNode = transforms_mod.LayerNode keras = tf.keras K = tf.keras.backend class ModelTransformer(object): """Matches patterns to apply transforms in a tf.keras model graph.""" def __init__(self, model, transforms, candidate_layers=None, layer_metadata=None): """Construct ModelTransformer. Args: model: Keras model to be transformed. transforms: List of transforms to be applied to the model. candidate_layers: Names of layers which may be transformed. Only layers whose names are in candidate_layers are matched against patterns. The default is that all layers may be transformed. layer_metadata: Dictionary of metadata associated with each layer in the model. The keys are layer names. """ if not self._is_sequential_or_functional_model(model): raise ValueError( 'Only tf.keras sequential or functional models can be transformed.') if layer_metadata is None: layer_metadata = {} self.model = model self.transforms = transforms self.candidate_layers = candidate_layers self.layer_metadata = layer_metadata def _is_sequential_or_functional_model(self, model): return ModelTransformer._is_functional_model(self, model) or isinstance( model, keras.Sequential) def _is_functional_model(self, model): return isinstance(model, keras.Model) \ and not isinstance(model, keras.Sequential) \ and model._is_graph_network # pylint: disable=protected-access def _inbound_node_generator(self, layer): for inbound_node in layer['inbound_nodes']: if (isinstance(inbound_node, list) and len(inbound_node) > 0 and isinstance(inbound_node[0], str)): # TODO(tfmot): The case for the SlicingOpLambda. yield [inbound_node] else: yield inbound_node def _get_inbound_layer_names(self, layer): """Return all the inbound connection layer names for the layer.""" inbound_layer_names = [] for inbound_node in self._inbound_node_generator(layer): # TODO(b/197935452): temporary fix when the input is a dictionary of # tensors. A comprehensive solution may be needed. if isinstance(inbound_node, dict): inbound_node = inbound_node.values() for connection_info in inbound_node: # input argument case. inbound_layer_names.append(connection_info[0]) # **kwarg argument case. inbound_layer_names += [ value[0] for value in connection_info[3].values() if isinstance( value, list) ] return inbound_layer_names def _get_consuming_layers(self, check_layer): """Returns all the layers which are out nodes from the layer.""" consuming_layers = [] check_layer_name = check_layer['config']['name'] for layer in self._config['layers']: if check_layer_name in self._get_inbound_layer_names(layer): consuming_layers.append(layer) return consuming_layers def _get_output_consumers(self, check_layer): """Returns if any tensors from the layer are outputs of the model.""" output_consumers = [] for output_layer in self._config['output_layers']: if output_layer[0] == check_layer['config']['name']: output_consumers.append(output_layer) return output_consumers def _get_layers(self, layer_names): return [ layer for layer in self._config['layers'] if layer['config']['name'] in layer_names ] def _get_layer_weights(self, layer_name): return self._layer_weights_map.get(layer_name, {}) def _get_layer_names_and_weights(self, layer_name): return self._layer_names_and_weights_map.get(layer_name, {}) def _get_layer_metadata(self, layer_name): return self._layer_metadata_map.get(layer_name, {}) def _match_pattern(self, target, pattern): return re.match('^' + pattern + '$', target) is not None def _match_layer(self, layer, pattern): """Check if specific layer matches the pattern.""" if self.candidate_layers and \ layer['config']['name'] not in self.candidate_layers: return False if not self._match_pattern(layer['class_name'], pattern.class_name): return False layer_config = layer['config'] for key, value in pattern.config.items(): # Either the provided value should equal the config value, or # be a regex match to str(value). if not (self._match_pattern(str(layer_config.get(key)), str(value)) or \ layer_config.get(key) == value): return False return True def _is_match_supported(self, layer, is_head_node): """Check if ModelTransformer supports transformations given number of inputs and outputs at a layer. Args: layer: layer for pattern matching. Must come from a Functional model. is_head_node: whether this is the head node (e.g. in A -> B , B is the head node). Returns: whether match is supported. """ inbound_nodes = layer['inbound_nodes'] if len(inbound_nodes) > 1: # `layer` is re-used for more than 1 connection from previous layers. If # a pattern matches one set of inputs and is replaced, it will break the # other connection. # # Note that theoretically it's possible to have multiple connections have # exactly the same pattern, and in that case the transform might be # applied. But that's a very complicated edge case not worth handling. return False # If a layer has multiple inbound nodes, it will produce multiple outbound # connections as well. Hence no need to explicitly check that. consuming_layers = self._get_consuming_layers(layer) output_consumers = self._get_output_consumers(layer) if len(consuming_layers) + len(output_consumers) > 1: # Even if a layer has only 1 incoming connection, multiple layers may # still consume the output. Having multiple consumers is only supported # for the head node, and not intermediate layers. Replacing intermediate # nodes with >1 consumer will lead to dangling nodes. # # Note that theoretically, intermediate layers can supported, as a part # of a general layer transform tool. This is not supported given no # motivating use case. if not is_head_node: return False return True def _get_input_layer_names(self, layer): """Get the names of a layer's input layers.""" if self._is_functional_model(self.model): inbound_nodes = layer['inbound_nodes'] return [connection_info[0] for connection_info in inbound_nodes[0]] else: # Sequential model. layers = self._config['layers'] i = layers.index(layer) if i == 0: # First layer has no inputs. return [] else: return [layers[i - 1]['config']['name']] def _match_layer_with_inputs(self, layer, pattern, is_head_node): """Match pattern at this layer, and continue to match at its inputs.""" if not self._match_layer(layer, pattern): return None if self._is_functional_model( self.model) and not self._is_match_supported(layer, is_head_node): return None if len(pattern.inputs) == 0: # Leaf layer in pattern. return LayerNode( layer, self._get_layer_weights(layer['config']['name']), [], self._get_layer_metadata(layer['config']['name']), self._get_layer_names_and_weights(layer['config']['name'])) # There is a possible edge case where a single layer may output multiple # tensors and multiple tensors from that layer may be used by the # connection. Ignoring those for now. input_layer_names = self._get_input_layer_names(layer) input_layers = self._get_layers(input_layer_names) if len(input_layers) != len(pattern.inputs): # Number of inputs this layer takes is different from the number of # inputs in the pattern. # # This path currently has the limitation that it requires an exact number # of inputs to match a pattern. For example, if a user wants to match # 2 Convs -> Concat and 3 Convs -> Concat, they would need to write # 2 different patterns. return None # Inbound layers can have different order from the list of input patterns. # TODO(pulkitb): Fix by checking all permutations. input_match_layer_nodes = [] for input_layer, pattern_ in zip(input_layers, pattern.inputs): match_layer_node = self._match_layer_with_inputs( input_layer, pattern_, is_head_node=False) if not match_layer_node: return None input_match_layer_nodes.append(match_layer_node) return LayerNode(layer, self._get_layer_weights(layer['config']['name']), input_match_layer_nodes, self._get_layer_metadata(layer['config']['name']), self._get_layer_names_and_weights(layer['config']['name'])) def _find_pattern(self, pattern, matched_layers=None): for layer in self._config['layers']: if matched_layers and layer['config']['name'] in matched_layers: continue match_layer = self._match_layer_with_inputs( layer, pattern, is_head_node=True) if match_layer: return match_layer return None def _get_leaf_layers(self, match_layer): """Return leaf layers from this sub-graph tree.""" if not match_layer.input_layers: return [match_layer.layer] # If 2 different layers point to the same input, or if a layer uses the # same input multiple times, the input layer can be repeated. But it # preserves a bit of structure. leaf_layers = [] for inp in match_layer.input_layers: leaf_layers.extend(self._get_leaf_layers(inp)) return leaf_layers def _get_layer_names(self, layer_node): result = [layer_node.layer['config']['name']] for input_layer in layer_node.input_layers: result.extend(self._get_layer_names(input_layer)) return result def _remove_layers(self, layers_to_remove, layers_to_remove_names): # Remove layers. for layer_to_remove in layers_to_remove: self._config['layers'].remove(layer_to_remove) # Remove entry from weight and metadata maps, # now that layer has been removed. for layer_name in layers_to_remove_names: self._layer_weights_map.pop(layer_name, None) self._layer_names_and_weights_map.pop(layer_name, None) self._layer_metadata_map.pop(layer_name, None) def _replace(self, match_layer_node, replacement_layer_node): """Replace the tree or chain of match_layer_node with replacement_layer_node.""" if self._is_functional_model(self.model): self._replace_functional(match_layer_node, replacement_layer_node) else: self._replace_sequential(match_layer_node, replacement_layer_node) def _replace_functional(self, match_layer_node, replacement_layer_node): """Functional model: replace the tree of match_layer_node with replacement_layer_node.""" # 1. Point all consumers of the head of the matching sub-tree to the head # replacement layer. # # There are some assumptions baked in. The head layer only has 1 inbound and # outbound node. The resulting number and shape of tensors from the # replaced layer should equal the original layer. consuming_layers = self._get_consuming_layers(match_layer_node.layer) match_name = match_layer_node.layer['config']['name'] replacement_name = replacement_layer_node.layer['config']['name'] def _replace_layer_name_for_connection_info(connection_info, match_name, replacement_name): if connection_info[0] == match_name: connection_info[0] = replacement_name for key in connection_info[3]: if isinstance(connection_info[3][key], list): if connection_info[3][key][0] == match_name: connection_info[3][key][0] = replacement_name for consumer in consuming_layers: for inbound_node in self._inbound_node_generator(consumer): if isinstance(inbound_node, dict): inbound_node = inbound_node.values() for connection_info in inbound_node: _replace_layer_name_for_connection_info(connection_info, match_name, replacement_name) output_consumers = self._get_output_consumers(match_layer_node.layer) for output_consumer in output_consumers: output_consumer[0] = replacement_layer_node.layer['config']['name'] # 2. Create inbound nodes for the replacement layers. This connects all # the replacement layers. def _assign_inbounds_for_replacement(layer_node): """_assign_inbounds_for_replacement.""" if not layer_node.input_layers: return layer_node.layer['inbound_nodes'] = [[]] for input_layer in layer_node.input_layers: # inbound_nodes can be specific tensors from multiple inbound # connections. We make the following assumptions. # - Only 1 inbound node for each replacement layer. # - Only 1 tensor output from the previous layer which is connected. # - call() method during construction does not have any args. # These are reasonable assumptions for almost all case we are # interested in. layer_node.layer['inbound_nodes'][0].append( [input_layer.layer['config']['name'], 0, 0, {}]) _assign_inbounds_for_replacement(input_layer) _assign_inbounds_for_replacement(replacement_layer_node) # 3. Connect the leaves of the replacement_layers to the inbound_nodes of # the leaves in the original layer. original_leaf_layers = self._get_leaf_layers(match_layer_node) original_inbound_nodes = [ layer['inbound_nodes'] for layer in original_leaf_layers ] replacement_leaf_layers = self._get_leaf_layers(replacement_layer_node) # The original pattern and the replacement pattern can potentially have # different number of leaf nodes and differences in how they consume these # input layers. Matching them will require sophisticated hackery to recreate # the new layers with the original input structure. # Given our existing transforms, we can assume they match. if len(original_leaf_layers) != len(replacement_leaf_layers): raise RuntimeError('Different size of leaf layers not supported yet.') for original_inbound_nodes, replacement_leaf_layer in zip( original_inbound_nodes, replacement_leaf_layers): replacement_leaf_layer['inbound_nodes'] = original_inbound_nodes # 4. Remove the original matched layers layers_to_remove_names = self._get_layer_names(match_layer_node) layers_to_remove = self._get_layers(layers_to_remove_names) self._remove_layers(layers_to_remove, layers_to_remove_names) # 5. Add in the new layers. def _add_replacement_layer(layer_node): """Recursively add new layers.""" self._config['layers'].append(layer_node.layer) layer_name = layer_node.layer['config']['name'] # TODO(b/184603494): Remove weight map structure from model_transformer. if layer_node.weights: self._layer_weights_map[layer_name] = layer_node.weights if layer_node.names_and_weights: self._layer_names_and_weights_map[ layer_name] = layer_node.names_and_weights if layer_node.metadata: self._layer_metadata_map[layer_name] = layer_node.metadata if self.candidate_layers: self.candidate_layers.add(layer_name) for input_layer in layer_node.input_layers: _add_replacement_layer(input_layer) _add_replacement_layer(replacement_layer_node) def _replace_sequential(self, match_layer_node, replacement_layer_node): """Sequential model: replace the chain of match_layer_node with replacement_layer_node.""" # 1. Remove the original matched layers. layers_to_remove_names = self._get_layer_names(match_layer_node) layers_to_remove = self._get_layers(layers_to_remove_names) # These variables are needed when adding the new layers # and must be set before _remove_layers removes them. first_layer_removed = layers_to_remove[0] first_layer_removed_index = self._config['layers'].index( first_layer_removed) self._remove_layers(layers_to_remove, layers_to_remove_names) # 2. Add in the new layers. def _get_replacement_nodes(replacement_node): """Get list of replacement nodes in Sequential order.""" replacement_nodes = [] for input_layer in replacement_node.input_layers: replacement_nodes.extend(_get_replacement_nodes(input_layer)) replacement_nodes.append(replacement_node) return replacement_nodes def _add_replacement_nodes(first_layer_removed_index, replacement_nodes): """Add replacement nodes to Sequential model.""" # Potentially insert nodes into middle of model. i = first_layer_removed_index for replacement_node in replacement_nodes: self._config['layers'].insert(i, replacement_node.layer) layer_name = replacement_node.layer['config']['name'] if replacement_node.weights: self._layer_weights_map[layer_name] = replacement_node.weights if replacement_node.names_and_weights: self._layer_names_and_weights_map[ layer_name] = replacement_node.names_and_weights if replacement_node.metadata: self._layer_metadata_map[layer_name] = replacement_node.metadata if self.candidate_layers: self.candidate_layers.add(layer_name) i += 1 replacement_nodes = _get_replacement_nodes(replacement_layer_node) _add_replacement_nodes(first_layer_removed_index, replacement_nodes) def _weight_name(self, name): """Extracts the weight name by removing layer from TF variable name. For example, returns 'kernel:0' for 'dense_2/kernel:0'. Args: name: TensorFlow variable name. Returns: Extracted weight name. """ return name.split('/')[-1] def _get_keras_layer_weights(self, keras_layer): """Returns a map of weight name, weight matrix. Keeps keras ordering.""" weights_map = collections.OrderedDict() for weight_tensor, weight_numpy in \ zip(keras_layer.weights, keras_layer.get_weights()): weights_map[self._weight_name(weight_tensor.name)] = weight_numpy if len(weights_map) != len(keras_layer.weights): # The case that variable identifier is not unique. It's a fallback that # uses weight list instead of the weights map. return None return weights_map def _get_keras_layer_names_and_weights(self, keras_layer): return zip([weight.name for weight in keras_layer.weights], keras_layer.get_weights()) def _set_layer_weights(self, layer, weights_map): """Sets the values of weights in a Keras layer.""" weight_value_tuples = [] for weight_tensor in layer.weights: weight_name = self._weight_name(weight_tensor.name) if weight_name in weights_map:
def _set_layer_names_and_weights(self, layer, names_and_weights): layer.set_weights([weight for _, weight in names_and_weights]) def _name(self, obj): return obj.__class__.__name__ def _get_matched_layers(self, transform): return self._transform_matched_layers_map.get(self._name(transform), []) def _store_successful_match(self, transform, layer_node): if self._name(transform) not in self._transform_matched_layers_map: self._transform_matched_layers_map[self._name(transform)] = [] self._transform_matched_layers_map[self._name(transform)].append( layer_node.layer['config']['name']) def transform(self): """Transforms the Keras model by applying all the specified transforms. This is the main entry point function used to apply the transformations to the Keras model. Not suitable for multi-threaded use. Creates and manipulates internal state. Returns: (Keras model after transformation, Updated layer metadata map) """ # Gets a serialized dict representation of the model, containing all its # layers, their connections and configuration. This is the main structure # which is used to understand model structure, and also manipulate it. # # config = { # 'input_layers': [ ... ], # 'layers': [{ # 'inbound_nodes': [INPUT CONFIG OF LAYER], # 'name': 'LAYER_NAME', # 'config': { LAYER_CONFIG } # }, { # ... # }], # 'output_layers': [ ... ], # 'name': 'MODEL_NAME', # # Ensures old Keras serialization format self.model.use_legacy_config = True self._config = self.model.get_config() # Stores map of Transform -> List of layer names matched by transform. # Same transform should not match+replace the same layer more than once # to prevent infinite loops. self._transform_matched_layers_map = {} self._layer_weights_map = {} self._layer_names_and_weights_map = {} for layer in self.model.layers: self._layer_weights_map[layer.name] = self._get_keras_layer_weights(layer) self._layer_names_and_weights_map[ layer.name] = self._get_keras_layer_names_and_weights(layer) # Maintains a current mutable copy of the metadata through transformation. self._layer_metadata_map = copy.deepcopy(self.layer_metadata) # We run an infinite loop and keep applying transformations as long as # patterns are found. This allows recursive pattern matching where a # modification by one transform may lead to another match. # # TODO(pulkitb): This leads to infinite loops with poor patterns which may # match their replacement. Add counters with limits to fix it. while True: match_found = False for transform in self.transforms: # A transform may find multiple instances of a pattern in the model. # Keep finding and replacing till done. while True: match_layer_node = self._find_pattern( transform.pattern(), self._get_matched_layers(transform)) # Pattern did not match any layer. Move to next transform. if not match_layer_node: break self._store_successful_match(transform, match_layer_node) # Copying the match_layer_node ensures the replacement code can # freely modify the match. replacement_layer_node = transform.replacement( copy.deepcopy(match_layer_node)) # If equal, the matched layers are being replaced with exactly the # same set of layers that were matched with the same config. # For Transforms, that may inadvertently do this we can end up in # an infinite loop. Move on if no meaningful change has been made. if match_layer_node == replacement_layer_node: continue match_found = True self._replace(match_layer_node, replacement_layer_node) # None of the transforms found a pattern. We can stop now. if not match_found: break custom_objects = {} for transform in self.transforms: custom_objects.update(transform.custom_objects()) # Reconstruct model from the config, using the cloned layers. if self._is_functional_model(self.model): transformed_model = keras.Model.from_config(self._config, custom_objects) else: transformed_model = keras.Sequential.from_config(self._config, custom_objects) for layer in transformed_model.layers: weights = self._layer_weights_map.get(layer.name) if weights: self._set_layer_weights(layer, weights) else: names_and_weights = self._layer_names_and_weights_map.get(layer.name) if names_and_weights: self._set_layer_names_and_weights(layer, names_and_weights) # Ensures old Keras serialization format transformed_model.use_legacy_config = True return transformed_model, copy.deepcopy(self._layer_metadata_map)
weight_value_tuples.append((weight_tensor, weights_map[weight_name])) K.batch_set_value(weight_value_tuples)
random_line_split
model_transformer.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-explicit-length-test """Apply graph transformations to a tf.keras model.""" import collections import copy import re import tensorflow as tf from tensorflow_model_optimization.python.core.quantization.keras.graph_transformations import transforms as transforms_mod LayerNode = transforms_mod.LayerNode keras = tf.keras K = tf.keras.backend class ModelTransformer(object): """Matches patterns to apply transforms in a tf.keras model graph.""" def __init__(self, model, transforms, candidate_layers=None, layer_metadata=None): """Construct ModelTransformer. Args: model: Keras model to be transformed. transforms: List of transforms to be applied to the model. candidate_layers: Names of layers which may be transformed. Only layers whose names are in candidate_layers are matched against patterns. The default is that all layers may be transformed. layer_metadata: Dictionary of metadata associated with each layer in the model. The keys are layer names. """ if not self._is_sequential_or_functional_model(model): raise ValueError( 'Only tf.keras sequential or functional models can be transformed.') if layer_metadata is None: layer_metadata = {} self.model = model self.transforms = transforms self.candidate_layers = candidate_layers self.layer_metadata = layer_metadata def _is_sequential_or_functional_model(self, model): return ModelTransformer._is_functional_model(self, model) or isinstance( model, keras.Sequential) def _is_functional_model(self, model): return isinstance(model, keras.Model) \ and not isinstance(model, keras.Sequential) \ and model._is_graph_network # pylint: disable=protected-access def _inbound_node_generator(self, layer): for inbound_node in layer['inbound_nodes']: if (isinstance(inbound_node, list) and len(inbound_node) > 0 and isinstance(inbound_node[0], str)): # TODO(tfmot): The case for the SlicingOpLambda. yield [inbound_node] else: yield inbound_node def _get_inbound_layer_names(self, layer): """Return all the inbound connection layer names for the layer.""" inbound_layer_names = [] for inbound_node in self._inbound_node_generator(layer): # TODO(b/197935452): temporary fix when the input is a dictionary of # tensors. A comprehensive solution may be needed. if isinstance(inbound_node, dict): inbound_node = inbound_node.values() for connection_info in inbound_node: # input argument case. inbound_layer_names.append(connection_info[0]) # **kwarg argument case. inbound_layer_names += [ value[0] for value in connection_info[3].values() if isinstance( value, list) ] return inbound_layer_names def _get_consuming_layers(self, check_layer): """Returns all the layers which are out nodes from the layer.""" consuming_layers = [] check_layer_name = check_layer['config']['name'] for layer in self._config['layers']: if check_layer_name in self._get_inbound_layer_names(layer): consuming_layers.append(layer) return consuming_layers def _get_output_consumers(self, check_layer): """Returns if any tensors from the layer are outputs of the model.""" output_consumers = [] for output_layer in self._config['output_layers']: if output_layer[0] == check_layer['config']['name']: output_consumers.append(output_layer) return output_consumers def _get_layers(self, layer_names): return [ layer for layer in self._config['layers'] if layer['config']['name'] in layer_names ] def _get_layer_weights(self, layer_name): return self._layer_weights_map.get(layer_name, {}) def _get_layer_names_and_weights(self, layer_name): return self._layer_names_and_weights_map.get(layer_name, {}) def _get_layer_metadata(self, layer_name): return self._layer_metadata_map.get(layer_name, {}) def _match_pattern(self, target, pattern): return re.match('^' + pattern + '$', target) is not None def _match_layer(self, layer, pattern): """Check if specific layer matches the pattern.""" if self.candidate_layers and \ layer['config']['name'] not in self.candidate_layers: return False if not self._match_pattern(layer['class_name'], pattern.class_name): return False layer_config = layer['config'] for key, value in pattern.config.items(): # Either the provided value should equal the config value, or # be a regex match to str(value). if not (self._match_pattern(str(layer_config.get(key)), str(value)) or \ layer_config.get(key) == value): return False return True def _is_match_supported(self, layer, is_head_node): """Check if ModelTransformer supports transformations given number of inputs and outputs at a layer. Args: layer: layer for pattern matching. Must come from a Functional model. is_head_node: whether this is the head node (e.g. in A -> B , B is the head node). Returns: whether match is supported. """ inbound_nodes = layer['inbound_nodes'] if len(inbound_nodes) > 1: # `layer` is re-used for more than 1 connection from previous layers. If # a pattern matches one set of inputs and is replaced, it will break the # other connection. # # Note that theoretically it's possible to have multiple connections have # exactly the same pattern, and in that case the transform might be # applied. But that's a very complicated edge case not worth handling. return False # If a layer has multiple inbound nodes, it will produce multiple outbound # connections as well. Hence no need to explicitly check that. consuming_layers = self._get_consuming_layers(layer) output_consumers = self._get_output_consumers(layer) if len(consuming_layers) + len(output_consumers) > 1: # Even if a layer has only 1 incoming connection, multiple layers may # still consume the output. Having multiple consumers is only supported # for the head node, and not intermediate layers. Replacing intermediate # nodes with >1 consumer will lead to dangling nodes. # # Note that theoretically, intermediate layers can supported, as a part # of a general layer transform tool. This is not supported given no # motivating use case. if not is_head_node: return False return True def _get_input_layer_names(self, layer): """Get the names of a layer's input layers.""" if self._is_functional_model(self.model): inbound_nodes = layer['inbound_nodes'] return [connection_info[0] for connection_info in inbound_nodes[0]] else: # Sequential model. layers = self._config['layers'] i = layers.index(layer) if i == 0: # First layer has no inputs. return [] else: return [layers[i - 1]['config']['name']] def _match_layer_with_inputs(self, layer, pattern, is_head_node): """Match pattern at this layer, and continue to match at its inputs.""" if not self._match_layer(layer, pattern): return None if self._is_functional_model( self.model) and not self._is_match_supported(layer, is_head_node): return None if len(pattern.inputs) == 0: # Leaf layer in pattern. return LayerNode( layer, self._get_layer_weights(layer['config']['name']), [], self._get_layer_metadata(layer['config']['name']), self._get_layer_names_and_weights(layer['config']['name'])) # There is a possible edge case where a single layer may output multiple # tensors and multiple tensors from that layer may be used by the # connection. Ignoring those for now. input_layer_names = self._get_input_layer_names(layer) input_layers = self._get_layers(input_layer_names) if len(input_layers) != len(pattern.inputs): # Number of inputs this layer takes is different from the number of # inputs in the pattern. # # This path currently has the limitation that it requires an exact number # of inputs to match a pattern. For example, if a user wants to match # 2 Convs -> Concat and 3 Convs -> Concat, they would need to write # 2 different patterns. return None # Inbound layers can have different order from the list of input patterns. # TODO(pulkitb): Fix by checking all permutations. input_match_layer_nodes = [] for input_layer, pattern_ in zip(input_layers, pattern.inputs): match_layer_node = self._match_layer_with_inputs( input_layer, pattern_, is_head_node=False) if not match_layer_node: return None input_match_layer_nodes.append(match_layer_node) return LayerNode(layer, self._get_layer_weights(layer['config']['name']), input_match_layer_nodes, self._get_layer_metadata(layer['config']['name']), self._get_layer_names_and_weights(layer['config']['name'])) def _find_pattern(self, pattern, matched_layers=None): for layer in self._config['layers']: if matched_layers and layer['config']['name'] in matched_layers: continue match_layer = self._match_layer_with_inputs( layer, pattern, is_head_node=True) if match_layer: return match_layer return None def _get_leaf_layers(self, match_layer): """Return leaf layers from this sub-graph tree.""" if not match_layer.input_layers: return [match_layer.layer] # If 2 different layers point to the same input, or if a layer uses the # same input multiple times, the input layer can be repeated. But it # preserves a bit of structure. leaf_layers = [] for inp in match_layer.input_layers: leaf_layers.extend(self._get_leaf_layers(inp)) return leaf_layers def _get_layer_names(self, layer_node): result = [layer_node.layer['config']['name']] for input_layer in layer_node.input_layers: result.extend(self._get_layer_names(input_layer)) return result def _remove_layers(self, layers_to_remove, layers_to_remove_names): # Remove layers. for layer_to_remove in layers_to_remove: self._config['layers'].remove(layer_to_remove) # Remove entry from weight and metadata maps, # now that layer has been removed. for layer_name in layers_to_remove_names: self._layer_weights_map.pop(layer_name, None) self._layer_names_and_weights_map.pop(layer_name, None) self._layer_metadata_map.pop(layer_name, None) def _replace(self, match_layer_node, replacement_layer_node): """Replace the tree or chain of match_layer_node with replacement_layer_node.""" if self._is_functional_model(self.model): self._replace_functional(match_layer_node, replacement_layer_node) else: self._replace_sequential(match_layer_node, replacement_layer_node) def _replace_functional(self, match_layer_node, replacement_layer_node): """Functional model: replace the tree of match_layer_node with replacement_layer_node.""" # 1. Point all consumers of the head of the matching sub-tree to the head # replacement layer. # # There are some assumptions baked in. The head layer only has 1 inbound and # outbound node. The resulting number and shape of tensors from the # replaced layer should equal the original layer. consuming_layers = self._get_consuming_layers(match_layer_node.layer) match_name = match_layer_node.layer['config']['name'] replacement_name = replacement_layer_node.layer['config']['name'] def _replace_layer_name_for_connection_info(connection_info, match_name, replacement_name): if connection_info[0] == match_name: connection_info[0] = replacement_name for key in connection_info[3]: if isinstance(connection_info[3][key], list): if connection_info[3][key][0] == match_name: connection_info[3][key][0] = replacement_name for consumer in consuming_layers: for inbound_node in self._inbound_node_generator(consumer): if isinstance(inbound_node, dict): inbound_node = inbound_node.values() for connection_info in inbound_node: _replace_layer_name_for_connection_info(connection_info, match_name, replacement_name) output_consumers = self._get_output_consumers(match_layer_node.layer) for output_consumer in output_consumers: output_consumer[0] = replacement_layer_node.layer['config']['name'] # 2. Create inbound nodes for the replacement layers. This connects all # the replacement layers. def _assign_inbounds_for_replacement(layer_node): """_assign_inbounds_for_replacement.""" if not layer_node.input_layers: return layer_node.layer['inbound_nodes'] = [[]] for input_layer in layer_node.input_layers: # inbound_nodes can be specific tensors from multiple inbound # connections. We make the following assumptions. # - Only 1 inbound node for each replacement layer. # - Only 1 tensor output from the previous layer which is connected. # - call() method during construction does not have any args. # These are reasonable assumptions for almost all case we are # interested in. layer_node.layer['inbound_nodes'][0].append( [input_layer.layer['config']['name'], 0, 0, {}]) _assign_inbounds_for_replacement(input_layer) _assign_inbounds_for_replacement(replacement_layer_node) # 3. Connect the leaves of the replacement_layers to the inbound_nodes of # the leaves in the original layer. original_leaf_layers = self._get_leaf_layers(match_layer_node) original_inbound_nodes = [ layer['inbound_nodes'] for layer in original_leaf_layers ] replacement_leaf_layers = self._get_leaf_layers(replacement_layer_node) # The original pattern and the replacement pattern can potentially have # different number of leaf nodes and differences in how they consume these # input layers. Matching them will require sophisticated hackery to recreate # the new layers with the original input structure. # Given our existing transforms, we can assume they match. if len(original_leaf_layers) != len(replacement_leaf_layers): raise RuntimeError('Different size of leaf layers not supported yet.') for original_inbound_nodes, replacement_leaf_layer in zip( original_inbound_nodes, replacement_leaf_layers): replacement_leaf_layer['inbound_nodes'] = original_inbound_nodes # 4. Remove the original matched layers layers_to_remove_names = self._get_layer_names(match_layer_node) layers_to_remove = self._get_layers(layers_to_remove_names) self._remove_layers(layers_to_remove, layers_to_remove_names) # 5. Add in the new layers. def _add_replacement_layer(layer_node): """Recursively add new layers.""" self._config['layers'].append(layer_node.layer) layer_name = layer_node.layer['config']['name'] # TODO(b/184603494): Remove weight map structure from model_transformer. if layer_node.weights: self._layer_weights_map[layer_name] = layer_node.weights if layer_node.names_and_weights: self._layer_names_and_weights_map[ layer_name] = layer_node.names_and_weights if layer_node.metadata: self._layer_metadata_map[layer_name] = layer_node.metadata if self.candidate_layers: self.candidate_layers.add(layer_name) for input_layer in layer_node.input_layers: _add_replacement_layer(input_layer) _add_replacement_layer(replacement_layer_node) def _replace_sequential(self, match_layer_node, replacement_layer_node): """Sequential model: replace the chain of match_layer_node with replacement_layer_node.""" # 1. Remove the original matched layers. layers_to_remove_names = self._get_layer_names(match_layer_node) layers_to_remove = self._get_layers(layers_to_remove_names) # These variables are needed when adding the new layers # and must be set before _remove_layers removes them. first_layer_removed = layers_to_remove[0] first_layer_removed_index = self._config['layers'].index( first_layer_removed) self._remove_layers(layers_to_remove, layers_to_remove_names) # 2. Add in the new layers. def _get_replacement_nodes(replacement_node): """Get list of replacement nodes in Sequential order.""" replacement_nodes = [] for input_layer in replacement_node.input_layers:
replacement_nodes.append(replacement_node) return replacement_nodes def _add_replacement_nodes(first_layer_removed_index, replacement_nodes): """Add replacement nodes to Sequential model.""" # Potentially insert nodes into middle of model. i = first_layer_removed_index for replacement_node in replacement_nodes: self._config['layers'].insert(i, replacement_node.layer) layer_name = replacement_node.layer['config']['name'] if replacement_node.weights: self._layer_weights_map[layer_name] = replacement_node.weights if replacement_node.names_and_weights: self._layer_names_and_weights_map[ layer_name] = replacement_node.names_and_weights if replacement_node.metadata: self._layer_metadata_map[layer_name] = replacement_node.metadata if self.candidate_layers: self.candidate_layers.add(layer_name) i += 1 replacement_nodes = _get_replacement_nodes(replacement_layer_node) _add_replacement_nodes(first_layer_removed_index, replacement_nodes) def _weight_name(self, name): """Extracts the weight name by removing layer from TF variable name. For example, returns 'kernel:0' for 'dense_2/kernel:0'. Args: name: TensorFlow variable name. Returns: Extracted weight name. """ return name.split('/')[-1] def _get_keras_layer_weights(self, keras_layer): """Returns a map of weight name, weight matrix. Keeps keras ordering.""" weights_map = collections.OrderedDict() for weight_tensor, weight_numpy in \ zip(keras_layer.weights, keras_layer.get_weights()): weights_map[self._weight_name(weight_tensor.name)] = weight_numpy if len(weights_map) != len(keras_layer.weights): # The case that variable identifier is not unique. It's a fallback that # uses weight list instead of the weights map. return None return weights_map def _get_keras_layer_names_and_weights(self, keras_layer): return zip([weight.name for weight in keras_layer.weights], keras_layer.get_weights()) def _set_layer_weights(self, layer, weights_map): """Sets the values of weights in a Keras layer.""" weight_value_tuples = [] for weight_tensor in layer.weights: weight_name = self._weight_name(weight_tensor.name) if weight_name in weights_map: weight_value_tuples.append((weight_tensor, weights_map[weight_name])) K.batch_set_value(weight_value_tuples) def _set_layer_names_and_weights(self, layer, names_and_weights): layer.set_weights([weight for _, weight in names_and_weights]) def _name(self, obj): return obj.__class__.__name__ def _get_matched_layers(self, transform): return self._transform_matched_layers_map.get(self._name(transform), []) def _store_successful_match(self, transform, layer_node): if self._name(transform) not in self._transform_matched_layers_map: self._transform_matched_layers_map[self._name(transform)] = [] self._transform_matched_layers_map[self._name(transform)].append( layer_node.layer['config']['name']) def transform(self): """Transforms the Keras model by applying all the specified transforms. This is the main entry point function used to apply the transformations to the Keras model. Not suitable for multi-threaded use. Creates and manipulates internal state. Returns: (Keras model after transformation, Updated layer metadata map) """ # Gets a serialized dict representation of the model, containing all its # layers, their connections and configuration. This is the main structure # which is used to understand model structure, and also manipulate it. # # config = { # 'input_layers': [ ... ], # 'layers': [{ # 'inbound_nodes': [INPUT CONFIG OF LAYER], # 'name': 'LAYER_NAME', # 'config': { LAYER_CONFIG } # }, { # ... # }], # 'output_layers': [ ... ], # 'name': 'MODEL_NAME', # # Ensures old Keras serialization format self.model.use_legacy_config = True self._config = self.model.get_config() # Stores map of Transform -> List of layer names matched by transform. # Same transform should not match+replace the same layer more than once # to prevent infinite loops. self._transform_matched_layers_map = {} self._layer_weights_map = {} self._layer_names_and_weights_map = {} for layer in self.model.layers: self._layer_weights_map[layer.name] = self._get_keras_layer_weights(layer) self._layer_names_and_weights_map[ layer.name] = self._get_keras_layer_names_and_weights(layer) # Maintains a current mutable copy of the metadata through transformation. self._layer_metadata_map = copy.deepcopy(self.layer_metadata) # We run an infinite loop and keep applying transformations as long as # patterns are found. This allows recursive pattern matching where a # modification by one transform may lead to another match. # # TODO(pulkitb): This leads to infinite loops with poor patterns which may # match their replacement. Add counters with limits to fix it. while True: match_found = False for transform in self.transforms: # A transform may find multiple instances of a pattern in the model. # Keep finding and replacing till done. while True: match_layer_node = self._find_pattern( transform.pattern(), self._get_matched_layers(transform)) # Pattern did not match any layer. Move to next transform. if not match_layer_node: break self._store_successful_match(transform, match_layer_node) # Copying the match_layer_node ensures the replacement code can # freely modify the match. replacement_layer_node = transform.replacement( copy.deepcopy(match_layer_node)) # If equal, the matched layers are being replaced with exactly the # same set of layers that were matched with the same config. # For Transforms, that may inadvertently do this we can end up in # an infinite loop. Move on if no meaningful change has been made. if match_layer_node == replacement_layer_node: continue match_found = True self._replace(match_layer_node, replacement_layer_node) # None of the transforms found a pattern. We can stop now. if not match_found: break custom_objects = {} for transform in self.transforms: custom_objects.update(transform.custom_objects()) # Reconstruct model from the config, using the cloned layers. if self._is_functional_model(self.model): transformed_model = keras.Model.from_config(self._config, custom_objects) else: transformed_model = keras.Sequential.from_config(self._config, custom_objects) for layer in transformed_model.layers: weights = self._layer_weights_map.get(layer.name) if weights: self._set_layer_weights(layer, weights) else: names_and_weights = self._layer_names_and_weights_map.get(layer.name) if names_and_weights: self._set_layer_names_and_weights(layer, names_and_weights) # Ensures old Keras serialization format transformed_model.use_legacy_config = True return transformed_model, copy.deepcopy(self._layer_metadata_map)
replacement_nodes.extend(_get_replacement_nodes(input_layer))
conditional_block
model_transformer.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-explicit-length-test """Apply graph transformations to a tf.keras model.""" import collections import copy import re import tensorflow as tf from tensorflow_model_optimization.python.core.quantization.keras.graph_transformations import transforms as transforms_mod LayerNode = transforms_mod.LayerNode keras = tf.keras K = tf.keras.backend class ModelTransformer(object): """Matches patterns to apply transforms in a tf.keras model graph.""" def __init__(self, model, transforms, candidate_layers=None, layer_metadata=None): """Construct ModelTransformer. Args: model: Keras model to be transformed. transforms: List of transforms to be applied to the model. candidate_layers: Names of layers which may be transformed. Only layers whose names are in candidate_layers are matched against patterns. The default is that all layers may be transformed. layer_metadata: Dictionary of metadata associated with each layer in the model. The keys are layer names. """ if not self._is_sequential_or_functional_model(model): raise ValueError( 'Only tf.keras sequential or functional models can be transformed.') if layer_metadata is None: layer_metadata = {} self.model = model self.transforms = transforms self.candidate_layers = candidate_layers self.layer_metadata = layer_metadata def _is_sequential_or_functional_model(self, model): return ModelTransformer._is_functional_model(self, model) or isinstance( model, keras.Sequential) def _is_functional_model(self, model): return isinstance(model, keras.Model) \ and not isinstance(model, keras.Sequential) \ and model._is_graph_network # pylint: disable=protected-access def _inbound_node_generator(self, layer): for inbound_node in layer['inbound_nodes']: if (isinstance(inbound_node, list) and len(inbound_node) > 0 and isinstance(inbound_node[0], str)): # TODO(tfmot): The case for the SlicingOpLambda. yield [inbound_node] else: yield inbound_node def _get_inbound_layer_names(self, layer): """Return all the inbound connection layer names for the layer.""" inbound_layer_names = [] for inbound_node in self._inbound_node_generator(layer): # TODO(b/197935452): temporary fix when the input is a dictionary of # tensors. A comprehensive solution may be needed. if isinstance(inbound_node, dict): inbound_node = inbound_node.values() for connection_info in inbound_node: # input argument case. inbound_layer_names.append(connection_info[0]) # **kwarg argument case. inbound_layer_names += [ value[0] for value in connection_info[3].values() if isinstance( value, list) ] return inbound_layer_names def _get_consuming_layers(self, check_layer): """Returns all the layers which are out nodes from the layer.""" consuming_layers = [] check_layer_name = check_layer['config']['name'] for layer in self._config['layers']: if check_layer_name in self._get_inbound_layer_names(layer): consuming_layers.append(layer) return consuming_layers def _get_output_consumers(self, check_layer): """Returns if any tensors from the layer are outputs of the model.""" output_consumers = [] for output_layer in self._config['output_layers']: if output_layer[0] == check_layer['config']['name']: output_consumers.append(output_layer) return output_consumers def _get_layers(self, layer_names): return [ layer for layer in self._config['layers'] if layer['config']['name'] in layer_names ] def _get_layer_weights(self, layer_name): return self._layer_weights_map.get(layer_name, {}) def _get_layer_names_and_weights(self, layer_name): return self._layer_names_and_weights_map.get(layer_name, {}) def _get_layer_metadata(self, layer_name): return self._layer_metadata_map.get(layer_name, {}) def _match_pattern(self, target, pattern): return re.match('^' + pattern + '$', target) is not None def _match_layer(self, layer, pattern): """Check if specific layer matches the pattern.""" if self.candidate_layers and \ layer['config']['name'] not in self.candidate_layers: return False if not self._match_pattern(layer['class_name'], pattern.class_name): return False layer_config = layer['config'] for key, value in pattern.config.items(): # Either the provided value should equal the config value, or # be a regex match to str(value). if not (self._match_pattern(str(layer_config.get(key)), str(value)) or \ layer_config.get(key) == value): return False return True def _is_match_supported(self, layer, is_head_node): """Check if ModelTransformer supports transformations given number of inputs and outputs at a layer. Args: layer: layer for pattern matching. Must come from a Functional model. is_head_node: whether this is the head node (e.g. in A -> B , B is the head node). Returns: whether match is supported. """ inbound_nodes = layer['inbound_nodes'] if len(inbound_nodes) > 1: # `layer` is re-used for more than 1 connection from previous layers. If # a pattern matches one set of inputs and is replaced, it will break the # other connection. # # Note that theoretically it's possible to have multiple connections have # exactly the same pattern, and in that case the transform might be # applied. But that's a very complicated edge case not worth handling. return False # If a layer has multiple inbound nodes, it will produce multiple outbound # connections as well. Hence no need to explicitly check that. consuming_layers = self._get_consuming_layers(layer) output_consumers = self._get_output_consumers(layer) if len(consuming_layers) + len(output_consumers) > 1: # Even if a layer has only 1 incoming connection, multiple layers may # still consume the output. Having multiple consumers is only supported # for the head node, and not intermediate layers. Replacing intermediate # nodes with >1 consumer will lead to dangling nodes. # # Note that theoretically, intermediate layers can supported, as a part # of a general layer transform tool. This is not supported given no # motivating use case. if not is_head_node: return False return True def _get_input_layer_names(self, layer):
def _match_layer_with_inputs(self, layer, pattern, is_head_node): """Match pattern at this layer, and continue to match at its inputs.""" if not self._match_layer(layer, pattern): return None if self._is_functional_model( self.model) and not self._is_match_supported(layer, is_head_node): return None if len(pattern.inputs) == 0: # Leaf layer in pattern. return LayerNode( layer, self._get_layer_weights(layer['config']['name']), [], self._get_layer_metadata(layer['config']['name']), self._get_layer_names_and_weights(layer['config']['name'])) # There is a possible edge case where a single layer may output multiple # tensors and multiple tensors from that layer may be used by the # connection. Ignoring those for now. input_layer_names = self._get_input_layer_names(layer) input_layers = self._get_layers(input_layer_names) if len(input_layers) != len(pattern.inputs): # Number of inputs this layer takes is different from the number of # inputs in the pattern. # # This path currently has the limitation that it requires an exact number # of inputs to match a pattern. For example, if a user wants to match # 2 Convs -> Concat and 3 Convs -> Concat, they would need to write # 2 different patterns. return None # Inbound layers can have different order from the list of input patterns. # TODO(pulkitb): Fix by checking all permutations. input_match_layer_nodes = [] for input_layer, pattern_ in zip(input_layers, pattern.inputs): match_layer_node = self._match_layer_with_inputs( input_layer, pattern_, is_head_node=False) if not match_layer_node: return None input_match_layer_nodes.append(match_layer_node) return LayerNode(layer, self._get_layer_weights(layer['config']['name']), input_match_layer_nodes, self._get_layer_metadata(layer['config']['name']), self._get_layer_names_and_weights(layer['config']['name'])) def _find_pattern(self, pattern, matched_layers=None): for layer in self._config['layers']: if matched_layers and layer['config']['name'] in matched_layers: continue match_layer = self._match_layer_with_inputs( layer, pattern, is_head_node=True) if match_layer: return match_layer return None def _get_leaf_layers(self, match_layer): """Return leaf layers from this sub-graph tree.""" if not match_layer.input_layers: return [match_layer.layer] # If 2 different layers point to the same input, or if a layer uses the # same input multiple times, the input layer can be repeated. But it # preserves a bit of structure. leaf_layers = [] for inp in match_layer.input_layers: leaf_layers.extend(self._get_leaf_layers(inp)) return leaf_layers def _get_layer_names(self, layer_node): result = [layer_node.layer['config']['name']] for input_layer in layer_node.input_layers: result.extend(self._get_layer_names(input_layer)) return result def _remove_layers(self, layers_to_remove, layers_to_remove_names): # Remove layers. for layer_to_remove in layers_to_remove: self._config['layers'].remove(layer_to_remove) # Remove entry from weight and metadata maps, # now that layer has been removed. for layer_name in layers_to_remove_names: self._layer_weights_map.pop(layer_name, None) self._layer_names_and_weights_map.pop(layer_name, None) self._layer_metadata_map.pop(layer_name, None) def _replace(self, match_layer_node, replacement_layer_node): """Replace the tree or chain of match_layer_node with replacement_layer_node.""" if self._is_functional_model(self.model): self._replace_functional(match_layer_node, replacement_layer_node) else: self._replace_sequential(match_layer_node, replacement_layer_node) def _replace_functional(self, match_layer_node, replacement_layer_node): """Functional model: replace the tree of match_layer_node with replacement_layer_node.""" # 1. Point all consumers of the head of the matching sub-tree to the head # replacement layer. # # There are some assumptions baked in. The head layer only has 1 inbound and # outbound node. The resulting number and shape of tensors from the # replaced layer should equal the original layer. consuming_layers = self._get_consuming_layers(match_layer_node.layer) match_name = match_layer_node.layer['config']['name'] replacement_name = replacement_layer_node.layer['config']['name'] def _replace_layer_name_for_connection_info(connection_info, match_name, replacement_name): if connection_info[0] == match_name: connection_info[0] = replacement_name for key in connection_info[3]: if isinstance(connection_info[3][key], list): if connection_info[3][key][0] == match_name: connection_info[3][key][0] = replacement_name for consumer in consuming_layers: for inbound_node in self._inbound_node_generator(consumer): if isinstance(inbound_node, dict): inbound_node = inbound_node.values() for connection_info in inbound_node: _replace_layer_name_for_connection_info(connection_info, match_name, replacement_name) output_consumers = self._get_output_consumers(match_layer_node.layer) for output_consumer in output_consumers: output_consumer[0] = replacement_layer_node.layer['config']['name'] # 2. Create inbound nodes for the replacement layers. This connects all # the replacement layers. def _assign_inbounds_for_replacement(layer_node): """_assign_inbounds_for_replacement.""" if not layer_node.input_layers: return layer_node.layer['inbound_nodes'] = [[]] for input_layer in layer_node.input_layers: # inbound_nodes can be specific tensors from multiple inbound # connections. We make the following assumptions. # - Only 1 inbound node for each replacement layer. # - Only 1 tensor output from the previous layer which is connected. # - call() method during construction does not have any args. # These are reasonable assumptions for almost all case we are # interested in. layer_node.layer['inbound_nodes'][0].append( [input_layer.layer['config']['name'], 0, 0, {}]) _assign_inbounds_for_replacement(input_layer) _assign_inbounds_for_replacement(replacement_layer_node) # 3. Connect the leaves of the replacement_layers to the inbound_nodes of # the leaves in the original layer. original_leaf_layers = self._get_leaf_layers(match_layer_node) original_inbound_nodes = [ layer['inbound_nodes'] for layer in original_leaf_layers ] replacement_leaf_layers = self._get_leaf_layers(replacement_layer_node) # The original pattern and the replacement pattern can potentially have # different number of leaf nodes and differences in how they consume these # input layers. Matching them will require sophisticated hackery to recreate # the new layers with the original input structure. # Given our existing transforms, we can assume they match. if len(original_leaf_layers) != len(replacement_leaf_layers): raise RuntimeError('Different size of leaf layers not supported yet.') for original_inbound_nodes, replacement_leaf_layer in zip( original_inbound_nodes, replacement_leaf_layers): replacement_leaf_layer['inbound_nodes'] = original_inbound_nodes # 4. Remove the original matched layers layers_to_remove_names = self._get_layer_names(match_layer_node) layers_to_remove = self._get_layers(layers_to_remove_names) self._remove_layers(layers_to_remove, layers_to_remove_names) # 5. Add in the new layers. def _add_replacement_layer(layer_node): """Recursively add new layers.""" self._config['layers'].append(layer_node.layer) layer_name = layer_node.layer['config']['name'] # TODO(b/184603494): Remove weight map structure from model_transformer. if layer_node.weights: self._layer_weights_map[layer_name] = layer_node.weights if layer_node.names_and_weights: self._layer_names_and_weights_map[ layer_name] = layer_node.names_and_weights if layer_node.metadata: self._layer_metadata_map[layer_name] = layer_node.metadata if self.candidate_layers: self.candidate_layers.add(layer_name) for input_layer in layer_node.input_layers: _add_replacement_layer(input_layer) _add_replacement_layer(replacement_layer_node) def _replace_sequential(self, match_layer_node, replacement_layer_node): """Sequential model: replace the chain of match_layer_node with replacement_layer_node.""" # 1. Remove the original matched layers. layers_to_remove_names = self._get_layer_names(match_layer_node) layers_to_remove = self._get_layers(layers_to_remove_names) # These variables are needed when adding the new layers # and must be set before _remove_layers removes them. first_layer_removed = layers_to_remove[0] first_layer_removed_index = self._config['layers'].index( first_layer_removed) self._remove_layers(layers_to_remove, layers_to_remove_names) # 2. Add in the new layers. def _get_replacement_nodes(replacement_node): """Get list of replacement nodes in Sequential order.""" replacement_nodes = [] for input_layer in replacement_node.input_layers: replacement_nodes.extend(_get_replacement_nodes(input_layer)) replacement_nodes.append(replacement_node) return replacement_nodes def _add_replacement_nodes(first_layer_removed_index, replacement_nodes): """Add replacement nodes to Sequential model.""" # Potentially insert nodes into middle of model. i = first_layer_removed_index for replacement_node in replacement_nodes: self._config['layers'].insert(i, replacement_node.layer) layer_name = replacement_node.layer['config']['name'] if replacement_node.weights: self._layer_weights_map[layer_name] = replacement_node.weights if replacement_node.names_and_weights: self._layer_names_and_weights_map[ layer_name] = replacement_node.names_and_weights if replacement_node.metadata: self._layer_metadata_map[layer_name] = replacement_node.metadata if self.candidate_layers: self.candidate_layers.add(layer_name) i += 1 replacement_nodes = _get_replacement_nodes(replacement_layer_node) _add_replacement_nodes(first_layer_removed_index, replacement_nodes) def _weight_name(self, name): """Extracts the weight name by removing layer from TF variable name. For example, returns 'kernel:0' for 'dense_2/kernel:0'. Args: name: TensorFlow variable name. Returns: Extracted weight name. """ return name.split('/')[-1] def _get_keras_layer_weights(self, keras_layer): """Returns a map of weight name, weight matrix. Keeps keras ordering.""" weights_map = collections.OrderedDict() for weight_tensor, weight_numpy in \ zip(keras_layer.weights, keras_layer.get_weights()): weights_map[self._weight_name(weight_tensor.name)] = weight_numpy if len(weights_map) != len(keras_layer.weights): # The case that variable identifier is not unique. It's a fallback that # uses weight list instead of the weights map. return None return weights_map def _get_keras_layer_names_and_weights(self, keras_layer): return zip([weight.name for weight in keras_layer.weights], keras_layer.get_weights()) def _set_layer_weights(self, layer, weights_map): """Sets the values of weights in a Keras layer.""" weight_value_tuples = [] for weight_tensor in layer.weights: weight_name = self._weight_name(weight_tensor.name) if weight_name in weights_map: weight_value_tuples.append((weight_tensor, weights_map[weight_name])) K.batch_set_value(weight_value_tuples) def _set_layer_names_and_weights(self, layer, names_and_weights): layer.set_weights([weight for _, weight in names_and_weights]) def _name(self, obj): return obj.__class__.__name__ def _get_matched_layers(self, transform): return self._transform_matched_layers_map.get(self._name(transform), []) def _store_successful_match(self, transform, layer_node): if self._name(transform) not in self._transform_matched_layers_map: self._transform_matched_layers_map[self._name(transform)] = [] self._transform_matched_layers_map[self._name(transform)].append( layer_node.layer['config']['name']) def transform(self): """Transforms the Keras model by applying all the specified transforms. This is the main entry point function used to apply the transformations to the Keras model. Not suitable for multi-threaded use. Creates and manipulates internal state. Returns: (Keras model after transformation, Updated layer metadata map) """ # Gets a serialized dict representation of the model, containing all its # layers, their connections and configuration. This is the main structure # which is used to understand model structure, and also manipulate it. # # config = { # 'input_layers': [ ... ], # 'layers': [{ # 'inbound_nodes': [INPUT CONFIG OF LAYER], # 'name': 'LAYER_NAME', # 'config': { LAYER_CONFIG } # }, { # ... # }], # 'output_layers': [ ... ], # 'name': 'MODEL_NAME', # # Ensures old Keras serialization format self.model.use_legacy_config = True self._config = self.model.get_config() # Stores map of Transform -> List of layer names matched by transform. # Same transform should not match+replace the same layer more than once # to prevent infinite loops. self._transform_matched_layers_map = {} self._layer_weights_map = {} self._layer_names_and_weights_map = {} for layer in self.model.layers: self._layer_weights_map[layer.name] = self._get_keras_layer_weights(layer) self._layer_names_and_weights_map[ layer.name] = self._get_keras_layer_names_and_weights(layer) # Maintains a current mutable copy of the metadata through transformation. self._layer_metadata_map = copy.deepcopy(self.layer_metadata) # We run an infinite loop and keep applying transformations as long as # patterns are found. This allows recursive pattern matching where a # modification by one transform may lead to another match. # # TODO(pulkitb): This leads to infinite loops with poor patterns which may # match their replacement. Add counters with limits to fix it. while True: match_found = False for transform in self.transforms: # A transform may find multiple instances of a pattern in the model. # Keep finding and replacing till done. while True: match_layer_node = self._find_pattern( transform.pattern(), self._get_matched_layers(transform)) # Pattern did not match any layer. Move to next transform. if not match_layer_node: break self._store_successful_match(transform, match_layer_node) # Copying the match_layer_node ensures the replacement code can # freely modify the match. replacement_layer_node = transform.replacement( copy.deepcopy(match_layer_node)) # If equal, the matched layers are being replaced with exactly the # same set of layers that were matched with the same config. # For Transforms, that may inadvertently do this we can end up in # an infinite loop. Move on if no meaningful change has been made. if match_layer_node == replacement_layer_node: continue match_found = True self._replace(match_layer_node, replacement_layer_node) # None of the transforms found a pattern. We can stop now. if not match_found: break custom_objects = {} for transform in self.transforms: custom_objects.update(transform.custom_objects()) # Reconstruct model from the config, using the cloned layers. if self._is_functional_model(self.model): transformed_model = keras.Model.from_config(self._config, custom_objects) else: transformed_model = keras.Sequential.from_config(self._config, custom_objects) for layer in transformed_model.layers: weights = self._layer_weights_map.get(layer.name) if weights: self._set_layer_weights(layer, weights) else: names_and_weights = self._layer_names_and_weights_map.get(layer.name) if names_and_weights: self._set_layer_names_and_weights(layer, names_and_weights) # Ensures old Keras serialization format transformed_model.use_legacy_config = True return transformed_model, copy.deepcopy(self._layer_metadata_map)
"""Get the names of a layer's input layers.""" if self._is_functional_model(self.model): inbound_nodes = layer['inbound_nodes'] return [connection_info[0] for connection_info in inbound_nodes[0]] else: # Sequential model. layers = self._config['layers'] i = layers.index(layer) if i == 0: # First layer has no inputs. return [] else: return [layers[i - 1]['config']['name']]
identifier_body
model_transformer.py
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # pylint: disable=g-explicit-length-test """Apply graph transformations to a tf.keras model.""" import collections import copy import re import tensorflow as tf from tensorflow_model_optimization.python.core.quantization.keras.graph_transformations import transforms as transforms_mod LayerNode = transforms_mod.LayerNode keras = tf.keras K = tf.keras.backend class ModelTransformer(object): """Matches patterns to apply transforms in a tf.keras model graph.""" def __init__(self, model, transforms, candidate_layers=None, layer_metadata=None): """Construct ModelTransformer. Args: model: Keras model to be transformed. transforms: List of transforms to be applied to the model. candidate_layers: Names of layers which may be transformed. Only layers whose names are in candidate_layers are matched against patterns. The default is that all layers may be transformed. layer_metadata: Dictionary of metadata associated with each layer in the model. The keys are layer names. """ if not self._is_sequential_or_functional_model(model): raise ValueError( 'Only tf.keras sequential or functional models can be transformed.') if layer_metadata is None: layer_metadata = {} self.model = model self.transforms = transforms self.candidate_layers = candidate_layers self.layer_metadata = layer_metadata def _is_sequential_or_functional_model(self, model): return ModelTransformer._is_functional_model(self, model) or isinstance( model, keras.Sequential) def _is_functional_model(self, model): return isinstance(model, keras.Model) \ and not isinstance(model, keras.Sequential) \ and model._is_graph_network # pylint: disable=protected-access def _inbound_node_generator(self, layer): for inbound_node in layer['inbound_nodes']: if (isinstance(inbound_node, list) and len(inbound_node) > 0 and isinstance(inbound_node[0], str)): # TODO(tfmot): The case for the SlicingOpLambda. yield [inbound_node] else: yield inbound_node def _get_inbound_layer_names(self, layer): """Return all the inbound connection layer names for the layer.""" inbound_layer_names = [] for inbound_node in self._inbound_node_generator(layer): # TODO(b/197935452): temporary fix when the input is a dictionary of # tensors. A comprehensive solution may be needed. if isinstance(inbound_node, dict): inbound_node = inbound_node.values() for connection_info in inbound_node: # input argument case. inbound_layer_names.append(connection_info[0]) # **kwarg argument case. inbound_layer_names += [ value[0] for value in connection_info[3].values() if isinstance( value, list) ] return inbound_layer_names def _get_consuming_layers(self, check_layer): """Returns all the layers which are out nodes from the layer.""" consuming_layers = [] check_layer_name = check_layer['config']['name'] for layer in self._config['layers']: if check_layer_name in self._get_inbound_layer_names(layer): consuming_layers.append(layer) return consuming_layers def _get_output_consumers(self, check_layer): """Returns if any tensors from the layer are outputs of the model.""" output_consumers = [] for output_layer in self._config['output_layers']: if output_layer[0] == check_layer['config']['name']: output_consumers.append(output_layer) return output_consumers def _get_layers(self, layer_names): return [ layer for layer in self._config['layers'] if layer['config']['name'] in layer_names ] def _get_layer_weights(self, layer_name): return self._layer_weights_map.get(layer_name, {}) def _get_layer_names_and_weights(self, layer_name): return self._layer_names_and_weights_map.get(layer_name, {}) def _get_layer_metadata(self, layer_name): return self._layer_metadata_map.get(layer_name, {}) def _match_pattern(self, target, pattern): return re.match('^' + pattern + '$', target) is not None def _match_layer(self, layer, pattern): """Check if specific layer matches the pattern.""" if self.candidate_layers and \ layer['config']['name'] not in self.candidate_layers: return False if not self._match_pattern(layer['class_name'], pattern.class_name): return False layer_config = layer['config'] for key, value in pattern.config.items(): # Either the provided value should equal the config value, or # be a regex match to str(value). if not (self._match_pattern(str(layer_config.get(key)), str(value)) or \ layer_config.get(key) == value): return False return True def _is_match_supported(self, layer, is_head_node): """Check if ModelTransformer supports transformations given number of inputs and outputs at a layer. Args: layer: layer for pattern matching. Must come from a Functional model. is_head_node: whether this is the head node (e.g. in A -> B , B is the head node). Returns: whether match is supported. """ inbound_nodes = layer['inbound_nodes'] if len(inbound_nodes) > 1: # `layer` is re-used for more than 1 connection from previous layers. If # a pattern matches one set of inputs and is replaced, it will break the # other connection. # # Note that theoretically it's possible to have multiple connections have # exactly the same pattern, and in that case the transform might be # applied. But that's a very complicated edge case not worth handling. return False # If a layer has multiple inbound nodes, it will produce multiple outbound # connections as well. Hence no need to explicitly check that. consuming_layers = self._get_consuming_layers(layer) output_consumers = self._get_output_consumers(layer) if len(consuming_layers) + len(output_consumers) > 1: # Even if a layer has only 1 incoming connection, multiple layers may # still consume the output. Having multiple consumers is only supported # for the head node, and not intermediate layers. Replacing intermediate # nodes with >1 consumer will lead to dangling nodes. # # Note that theoretically, intermediate layers can supported, as a part # of a general layer transform tool. This is not supported given no # motivating use case. if not is_head_node: return False return True def _get_input_layer_names(self, layer): """Get the names of a layer's input layers.""" if self._is_functional_model(self.model): inbound_nodes = layer['inbound_nodes'] return [connection_info[0] for connection_info in inbound_nodes[0]] else: # Sequential model. layers = self._config['layers'] i = layers.index(layer) if i == 0: # First layer has no inputs. return [] else: return [layers[i - 1]['config']['name']] def _match_layer_with_inputs(self, layer, pattern, is_head_node): """Match pattern at this layer, and continue to match at its inputs.""" if not self._match_layer(layer, pattern): return None if self._is_functional_model( self.model) and not self._is_match_supported(layer, is_head_node): return None if len(pattern.inputs) == 0: # Leaf layer in pattern. return LayerNode( layer, self._get_layer_weights(layer['config']['name']), [], self._get_layer_metadata(layer['config']['name']), self._get_layer_names_and_weights(layer['config']['name'])) # There is a possible edge case where a single layer may output multiple # tensors and multiple tensors from that layer may be used by the # connection. Ignoring those for now. input_layer_names = self._get_input_layer_names(layer) input_layers = self._get_layers(input_layer_names) if len(input_layers) != len(pattern.inputs): # Number of inputs this layer takes is different from the number of # inputs in the pattern. # # This path currently has the limitation that it requires an exact number # of inputs to match a pattern. For example, if a user wants to match # 2 Convs -> Concat and 3 Convs -> Concat, they would need to write # 2 different patterns. return None # Inbound layers can have different order from the list of input patterns. # TODO(pulkitb): Fix by checking all permutations. input_match_layer_nodes = [] for input_layer, pattern_ in zip(input_layers, pattern.inputs): match_layer_node = self._match_layer_with_inputs( input_layer, pattern_, is_head_node=False) if not match_layer_node: return None input_match_layer_nodes.append(match_layer_node) return LayerNode(layer, self._get_layer_weights(layer['config']['name']), input_match_layer_nodes, self._get_layer_metadata(layer['config']['name']), self._get_layer_names_and_weights(layer['config']['name'])) def _find_pattern(self, pattern, matched_layers=None): for layer in self._config['layers']: if matched_layers and layer['config']['name'] in matched_layers: continue match_layer = self._match_layer_with_inputs( layer, pattern, is_head_node=True) if match_layer: return match_layer return None def _get_leaf_layers(self, match_layer): """Return leaf layers from this sub-graph tree.""" if not match_layer.input_layers: return [match_layer.layer] # If 2 different layers point to the same input, or if a layer uses the # same input multiple times, the input layer can be repeated. But it # preserves a bit of structure. leaf_layers = [] for inp in match_layer.input_layers: leaf_layers.extend(self._get_leaf_layers(inp)) return leaf_layers def _get_layer_names(self, layer_node): result = [layer_node.layer['config']['name']] for input_layer in layer_node.input_layers: result.extend(self._get_layer_names(input_layer)) return result def _remove_layers(self, layers_to_remove, layers_to_remove_names): # Remove layers. for layer_to_remove in layers_to_remove: self._config['layers'].remove(layer_to_remove) # Remove entry from weight and metadata maps, # now that layer has been removed. for layer_name in layers_to_remove_names: self._layer_weights_map.pop(layer_name, None) self._layer_names_and_weights_map.pop(layer_name, None) self._layer_metadata_map.pop(layer_name, None) def _replace(self, match_layer_node, replacement_layer_node): """Replace the tree or chain of match_layer_node with replacement_layer_node.""" if self._is_functional_model(self.model): self._replace_functional(match_layer_node, replacement_layer_node) else: self._replace_sequential(match_layer_node, replacement_layer_node) def _replace_functional(self, match_layer_node, replacement_layer_node): """Functional model: replace the tree of match_layer_node with replacement_layer_node.""" # 1. Point all consumers of the head of the matching sub-tree to the head # replacement layer. # # There are some assumptions baked in. The head layer only has 1 inbound and # outbound node. The resulting number and shape of tensors from the # replaced layer should equal the original layer. consuming_layers = self._get_consuming_layers(match_layer_node.layer) match_name = match_layer_node.layer['config']['name'] replacement_name = replacement_layer_node.layer['config']['name'] def _replace_layer_name_for_connection_info(connection_info, match_name, replacement_name): if connection_info[0] == match_name: connection_info[0] = replacement_name for key in connection_info[3]: if isinstance(connection_info[3][key], list): if connection_info[3][key][0] == match_name: connection_info[3][key][0] = replacement_name for consumer in consuming_layers: for inbound_node in self._inbound_node_generator(consumer): if isinstance(inbound_node, dict): inbound_node = inbound_node.values() for connection_info in inbound_node: _replace_layer_name_for_connection_info(connection_info, match_name, replacement_name) output_consumers = self._get_output_consumers(match_layer_node.layer) for output_consumer in output_consumers: output_consumer[0] = replacement_layer_node.layer['config']['name'] # 2. Create inbound nodes for the replacement layers. This connects all # the replacement layers. def _assign_inbounds_for_replacement(layer_node): """_assign_inbounds_for_replacement.""" if not layer_node.input_layers: return layer_node.layer['inbound_nodes'] = [[]] for input_layer in layer_node.input_layers: # inbound_nodes can be specific tensors from multiple inbound # connections. We make the following assumptions. # - Only 1 inbound node for each replacement layer. # - Only 1 tensor output from the previous layer which is connected. # - call() method during construction does not have any args. # These are reasonable assumptions for almost all case we are # interested in. layer_node.layer['inbound_nodes'][0].append( [input_layer.layer['config']['name'], 0, 0, {}]) _assign_inbounds_for_replacement(input_layer) _assign_inbounds_for_replacement(replacement_layer_node) # 3. Connect the leaves of the replacement_layers to the inbound_nodes of # the leaves in the original layer. original_leaf_layers = self._get_leaf_layers(match_layer_node) original_inbound_nodes = [ layer['inbound_nodes'] for layer in original_leaf_layers ] replacement_leaf_layers = self._get_leaf_layers(replacement_layer_node) # The original pattern and the replacement pattern can potentially have # different number of leaf nodes and differences in how they consume these # input layers. Matching them will require sophisticated hackery to recreate # the new layers with the original input structure. # Given our existing transforms, we can assume they match. if len(original_leaf_layers) != len(replacement_leaf_layers): raise RuntimeError('Different size of leaf layers not supported yet.') for original_inbound_nodes, replacement_leaf_layer in zip( original_inbound_nodes, replacement_leaf_layers): replacement_leaf_layer['inbound_nodes'] = original_inbound_nodes # 4. Remove the original matched layers layers_to_remove_names = self._get_layer_names(match_layer_node) layers_to_remove = self._get_layers(layers_to_remove_names) self._remove_layers(layers_to_remove, layers_to_remove_names) # 5. Add in the new layers. def _add_replacement_layer(layer_node): """Recursively add new layers.""" self._config['layers'].append(layer_node.layer) layer_name = layer_node.layer['config']['name'] # TODO(b/184603494): Remove weight map structure from model_transformer. if layer_node.weights: self._layer_weights_map[layer_name] = layer_node.weights if layer_node.names_and_weights: self._layer_names_and_weights_map[ layer_name] = layer_node.names_and_weights if layer_node.metadata: self._layer_metadata_map[layer_name] = layer_node.metadata if self.candidate_layers: self.candidate_layers.add(layer_name) for input_layer in layer_node.input_layers: _add_replacement_layer(input_layer) _add_replacement_layer(replacement_layer_node) def _replace_sequential(self, match_layer_node, replacement_layer_node): """Sequential model: replace the chain of match_layer_node with replacement_layer_node.""" # 1. Remove the original matched layers. layers_to_remove_names = self._get_layer_names(match_layer_node) layers_to_remove = self._get_layers(layers_to_remove_names) # These variables are needed when adding the new layers # and must be set before _remove_layers removes them. first_layer_removed = layers_to_remove[0] first_layer_removed_index = self._config['layers'].index( first_layer_removed) self._remove_layers(layers_to_remove, layers_to_remove_names) # 2. Add in the new layers. def _get_replacement_nodes(replacement_node): """Get list of replacement nodes in Sequential order.""" replacement_nodes = [] for input_layer in replacement_node.input_layers: replacement_nodes.extend(_get_replacement_nodes(input_layer)) replacement_nodes.append(replacement_node) return replacement_nodes def _add_replacement_nodes(first_layer_removed_index, replacement_nodes): """Add replacement nodes to Sequential model.""" # Potentially insert nodes into middle of model. i = first_layer_removed_index for replacement_node in replacement_nodes: self._config['layers'].insert(i, replacement_node.layer) layer_name = replacement_node.layer['config']['name'] if replacement_node.weights: self._layer_weights_map[layer_name] = replacement_node.weights if replacement_node.names_and_weights: self._layer_names_and_weights_map[ layer_name] = replacement_node.names_and_weights if replacement_node.metadata: self._layer_metadata_map[layer_name] = replacement_node.metadata if self.candidate_layers: self.candidate_layers.add(layer_name) i += 1 replacement_nodes = _get_replacement_nodes(replacement_layer_node) _add_replacement_nodes(first_layer_removed_index, replacement_nodes) def _weight_name(self, name): """Extracts the weight name by removing layer from TF variable name. For example, returns 'kernel:0' for 'dense_2/kernel:0'. Args: name: TensorFlow variable name. Returns: Extracted weight name. """ return name.split('/')[-1] def _get_keras_layer_weights(self, keras_layer): """Returns a map of weight name, weight matrix. Keeps keras ordering.""" weights_map = collections.OrderedDict() for weight_tensor, weight_numpy in \ zip(keras_layer.weights, keras_layer.get_weights()): weights_map[self._weight_name(weight_tensor.name)] = weight_numpy if len(weights_map) != len(keras_layer.weights): # The case that variable identifier is not unique. It's a fallback that # uses weight list instead of the weights map. return None return weights_map def _get_keras_layer_names_and_weights(self, keras_layer): return zip([weight.name for weight in keras_layer.weights], keras_layer.get_weights()) def
(self, layer, weights_map): """Sets the values of weights in a Keras layer.""" weight_value_tuples = [] for weight_tensor in layer.weights: weight_name = self._weight_name(weight_tensor.name) if weight_name in weights_map: weight_value_tuples.append((weight_tensor, weights_map[weight_name])) K.batch_set_value(weight_value_tuples) def _set_layer_names_and_weights(self, layer, names_and_weights): layer.set_weights([weight for _, weight in names_and_weights]) def _name(self, obj): return obj.__class__.__name__ def _get_matched_layers(self, transform): return self._transform_matched_layers_map.get(self._name(transform), []) def _store_successful_match(self, transform, layer_node): if self._name(transform) not in self._transform_matched_layers_map: self._transform_matched_layers_map[self._name(transform)] = [] self._transform_matched_layers_map[self._name(transform)].append( layer_node.layer['config']['name']) def transform(self): """Transforms the Keras model by applying all the specified transforms. This is the main entry point function used to apply the transformations to the Keras model. Not suitable for multi-threaded use. Creates and manipulates internal state. Returns: (Keras model after transformation, Updated layer metadata map) """ # Gets a serialized dict representation of the model, containing all its # layers, their connections and configuration. This is the main structure # which is used to understand model structure, and also manipulate it. # # config = { # 'input_layers': [ ... ], # 'layers': [{ # 'inbound_nodes': [INPUT CONFIG OF LAYER], # 'name': 'LAYER_NAME', # 'config': { LAYER_CONFIG } # }, { # ... # }], # 'output_layers': [ ... ], # 'name': 'MODEL_NAME', # # Ensures old Keras serialization format self.model.use_legacy_config = True self._config = self.model.get_config() # Stores map of Transform -> List of layer names matched by transform. # Same transform should not match+replace the same layer more than once # to prevent infinite loops. self._transform_matched_layers_map = {} self._layer_weights_map = {} self._layer_names_and_weights_map = {} for layer in self.model.layers: self._layer_weights_map[layer.name] = self._get_keras_layer_weights(layer) self._layer_names_and_weights_map[ layer.name] = self._get_keras_layer_names_and_weights(layer) # Maintains a current mutable copy of the metadata through transformation. self._layer_metadata_map = copy.deepcopy(self.layer_metadata) # We run an infinite loop and keep applying transformations as long as # patterns are found. This allows recursive pattern matching where a # modification by one transform may lead to another match. # # TODO(pulkitb): This leads to infinite loops with poor patterns which may # match their replacement. Add counters with limits to fix it. while True: match_found = False for transform in self.transforms: # A transform may find multiple instances of a pattern in the model. # Keep finding and replacing till done. while True: match_layer_node = self._find_pattern( transform.pattern(), self._get_matched_layers(transform)) # Pattern did not match any layer. Move to next transform. if not match_layer_node: break self._store_successful_match(transform, match_layer_node) # Copying the match_layer_node ensures the replacement code can # freely modify the match. replacement_layer_node = transform.replacement( copy.deepcopy(match_layer_node)) # If equal, the matched layers are being replaced with exactly the # same set of layers that were matched with the same config. # For Transforms, that may inadvertently do this we can end up in # an infinite loop. Move on if no meaningful change has been made. if match_layer_node == replacement_layer_node: continue match_found = True self._replace(match_layer_node, replacement_layer_node) # None of the transforms found a pattern. We can stop now. if not match_found: break custom_objects = {} for transform in self.transforms: custom_objects.update(transform.custom_objects()) # Reconstruct model from the config, using the cloned layers. if self._is_functional_model(self.model): transformed_model = keras.Model.from_config(self._config, custom_objects) else: transformed_model = keras.Sequential.from_config(self._config, custom_objects) for layer in transformed_model.layers: weights = self._layer_weights_map.get(layer.name) if weights: self._set_layer_weights(layer, weights) else: names_and_weights = self._layer_names_and_weights_map.get(layer.name) if names_and_weights: self._set_layer_names_and_weights(layer, names_and_weights) # Ensures old Keras serialization format transformed_model.use_legacy_config = True return transformed_model, copy.deepcopy(self._layer_metadata_map)
_set_layer_weights
identifier_name
generate_samples.py
import bpy from math import radians, pi, sin, cos import random import os, glob from scipy import ndimage, misc from skimage import draw, color import numpy as np def getChildren(objs): ## returns children of a blender object result = [] for o in bpy.data.objects: p = o.parent if p: for target in objs: if p.name == target.name: result.append(o) break; return result def place_objects_rand_on_bg(objects, img_list, step, tree_nodes): # not used in current version ## load background images and adjust blender scene # bg_img_path = img_list[random.randint(0, len(img_list)-1)] # randomized images bg_img_path = img_list[step % len(img_list)-1] # unrandomized print(bg_img_path) tree_nodes["Image"].image = bpy.data.images.load(bg_img_path) bg_size = np.asarray(bpy.data.images[bg_img_path.split('/')[-1]].size[:],dtype='float64') print(bg_size) bpy.data.scenes["Scene"].render.resolution_x = bg_size[0] bpy.data.scenes["Scene"].render.resolution_y = bg_size[1] # random list object positions OD = bpy.data.worlds["World"]["obj_distance"] object_positions = [(OD,0),(OD,OD),(0,OD),(-OD,OD),(-OD,0),(-OD,-OD),(0,-OD),(OD,-OD),(0,0)] random.shuffle(object_positions) object_positions = np.asarray(object_positions, 'float32') # hide all objects for obj in objects: obj.hide_render = True # obj.hide = True for child in getChildren(objects): child.hide_render = True # choose objects from the list; make visible, move and rotate objects_order = list(range(len(objects))) random.shuffle(objects_order) n_objects = random.randint(1,len(objects)-1) obj_center = np.asarray([0,0], 'float32') for i in range(n_objects): obj = objects[objects_order[i]] ## unhide obj.hide_render= False for child in getChildren([obj]): child.hide_render = False ## random rotation rotation_min = obj['rotation_range'][0] rotation_range = obj['rotation_range'][1] - rotation_min rotation_angle = rotation_min + rotation_range*random.random() obj.rotation_euler[2] = radians(rotation_angle) ## position pos = object_positions[i] obj.location[0] = pos[0] obj.location[1] = pos[1] obj_center+=pos # center_obj = objects[objects_order[0]] obj_center/=n_objects # Ground Texture to background for more realistic reflections bpy.data.images["ground.jpg"].filepath = bg_img_path return obj_center, bg_size def cyclic_arangement(objects, camera, cam_dist, step, step_count, img_list): ## hides, reveals and rotates the objects and moves the camera so every object is visible for the same amount of images from a diverse range of viewpoints MAX_CAM_STEPS = 20 ## the max amount of camera steps to go from the lowest to the highest position and start again. lowest and highest position are defined by cam_pos_range BACKGROUND_REFLECTIONS = True # Image texture onto background for more realistic reflections if BACKGROUND_REFLECTIONS: bg_img_path = img_list[step % len(img_list)-1] # unrandomized bpy.data.images["ground.jpg"].filepath = bg_img_path # hide all objects for obj in objects: obj.hide_render = True # obj.hide = True for child in getChildren(objects): child.hide_render = True # get the current object when every object should be visible in the same number of render steps # (step_count/len(objects) is the number of steps for each object) steps_per_obj = step_count/len(objects) obj = objects[int(step/steps_per_obj)] # visibility and rotation of object obj.hide_render = False for child in getChildren([obj]): child.hide_render = False # obj.hide = False rotation_min = obj['rotation_range'][0] rotation_range = obj['rotation_range'][1] - rotation_min rotation_angle = rotation_min + (step % steps_per_obj) * (rotation_range / steps_per_obj) obj.rotation_euler[2] = radians(rotation_angle) # cam placement cam_steps = min(steps_per_obj, MAX_CAM_STEPS) cam_pos_min = obj['cam_pos_range'][0] cam_pos_range = obj['cam_pos_range'][1] - cam_pos_min camera.location[0] = cam_dist*cos(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps)) camera.location[1] = 0 camera.location[2] = cam_dist*sin(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps)) def random_cam_placement(camera, focus, target_obj): # not used in current version ## places the camera randomly x_range = camera["x_range"] #[.6,3.0] y_range = camera["y_range"] #[-0.25,0.25] z_range = camera["z_range"] #[.45,1.60] # x_rot_range = camera["x_rot_range"] #[-0.15,0.15] # y_rot_range = camera["y_rot_range"] #[-0.15,0.15] # z_rot_range = camera["z_rot_range"] #[-0.15,0.15] rand_pos = np.random.rand(3) # rand_rot = np.random.rand(3) camera.location[0] =x_range[1]-(x_range[1]-x_range[0])*rand_pos[0] camera.location[1] =y_range[1]-(y_range[1]-y_range[0])*rand_pos[1] camera.location[2] =z_range[1]-(z_range[1]-z_range[0])*rand_pos[2] # place the camera target target_obj.location[0] = focus[0] target_obj.location[1] = focus[1] def shape_key_adjustments(objects): ## iterates all shape keys of all objects and sets them to a random value for obj in objects: if obj.data.shape_keys: keys = obj.data.shape_keys.key_blocks if len(keys): for i, k in enumerate(keys): if i: k.value = random.random() def texture_adjustments(): ## iterates all materials in the blender file and applies random adjustments based on naming conventions textures = bpy.data.materials #['rand_plastic'] for t in textures: # random color for rand if "rand" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "RGB" in n.name: if random.random() < .3: #30% chance for grey else random color
else: n.outputs[0].default_value = [random.random(), random.random(), random.random(), 1] if "rand" in n.name: n.outputs[0].default_value = random.random() if "switch" in n.name: n.outputs[0].default_value = random.randint(0,1) # random shift for shift if "shift" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "Mapping" in n.name: n.translation = [random.random()*10,random.random()*10,0] # random mix for mix if "mix" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "noise_mix" in n.name: n.inputs[0].default_value = .35 + random.random()*.65 def save_label(output_path, bg_size, objects, bg_img_path=None, read_classes=True, segmentation=False): if segmentation: ## Save the segmentation image classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[0],bg_size[1],-1) # One value per pixel classImg = classImg[::-1,:,0] # classImg = np.array( [ [ pixel[0] for pixel in row ] for row in classImg ] ) if not os.path.exists(output_path): os.makedirs(output_path) misc.toimage(classImg, cmin=0, cmax=255).save(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png') # misc.imsave(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png', classImg) else: f_label = open(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.txt', 'w') if read_classes and os.path.isfile(bg_img_path[:-4] + '.txt'): bg_annotation = open(bg_img_path[:-4] + '.txt', 'r') f_label.write(bg_annotation.read()) bg_annotation.close() classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[1],bg_size[0],-1) # One value per pixel classImg = classImg[::-1,:,0] # YOLO style boundingboxes for i in range(len(objects)): # Finding non zero values mask = (classImg == i+1) rows = np.any(mask, axis=1) cols = np.any(mask, axis=0) if rows.any(): # min and max indices for bounding box ymin, ymax = np.where(rows)[0][[0, -1]] xmin, xmax = np.where(cols)[0][[0, -1]] print(ymin, ymax, xmin, xmax) x = ((xmin + xmax)/2)/ bg_size[0] width = (xmax - xmin) / bg_size[0] y = ((ymin + ymax)/2)/ bg_size[1] height = (ymax - ymin) / bg_size[1] if (width*height)>.005: print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height)) print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height), file = f_label) f_label.close() def main(): # SEGMENTATION = bpy.context.scene.sg_label_mode == "sgSegment" # RENDER_CROPPED = bpy.context.scene.sg_render_mode == "sgCropped" SEGMENTATION = False RENDER_CROPPED = True cam = bpy.context.scene.sg_cam objects = bpy.context.scene.sg_objectGroup.objects # objects = [bpy.data.objects[obj] for obj in objectsList] sun = bpy.context.scene.sg_sun ground = bpy.context.scene.sg_ground lamp_sun = bpy.data.lamps[sun.name] cam_target = bpy.context.scene.sg_cam_target tree_nodes = bpy.context.scene.node_tree.nodes bg_size = bpy.context.scene.sg_img_size cam_dist = bpy.context.scene.sg_cam_dist compositing_node_group = bpy.data.scenes["Scene"].node_tree step_count = bpy.context.scene.sg_nSamples bg_path = bpy.context.scene.sg_backgroundPath.replace("//","") output_path = tree_nodes['File Output'].base_path.replace('//','./') img_list = sorted(glob.glob(bg_path+"*.png")+glob.glob(bg_path+"*.jpg")) if RENDER_CROPPED: output_path += 'rgba/' elif SEGMENTATION: output_path+="SegmentationClass/" else: output_path+="rgb/" # Initial settings ground.cycles.is_shadow_catcher = True for i, o in enumerate(objects): if not 'class' in o: print(o.name, 'has no class yet. Set to 1.') o["class"] = 1 if not 'rotation_range' in o: print(o.name, 'has no rotation range yet. Set to [0,360].') o["rotation_range"] = (0,360) if not 'cam_pos_range' in o: print(o.name, 'has no camera position range yet. Set to [0,90].') o["cam_pos_range"] = (0,90) o.pass_index = i + 1 ## adjustments for cropped rendering if RENDER_CROPPED: ## place objects in the center of the scene for o in objects: o.location[0]=0 o.location[1]=0 cam_target.location[0]=0 cam_target.location[1]=0 bpy.data.scenes["Scene"].render.resolution_x = bg_size[0] bpy.data.scenes["Scene"].render.resolution_y = bg_size[1] c_nodes = compositing_node_group.nodes ## remove all links from render layers for out in c_nodes["Render Layers"].outputs: for l in out.links: compositing_node_group.links.remove(l) compositing_node_group.links.new(c_nodes["Render Layers"].outputs["Image"],c_nodes["File Output"].inputs[3]) compositing_node_group.links.new(c_nodes["Render Layers"].outputs["IndexOB"],c_nodes["Viewer"].inputs[0]) for step in range(0, step_count): if RENDER_CROPPED: cyclic_arangement(objects, cam, cam_dist, step, step_count, img_list) else: # Object placement obj_center, bg_size = place_objects_rand_on_bg(objects, img_list, step, tree_nodes) # random camera position random_cam_placement(cam, obj_center, cam_target) # random changes to textures texture_adjustments() # random changes to shape keys shape_key_adjustments(objects) # randomize light angle sun.rotation_euler[2] = random.random()*pi sun.rotation_euler[1] = random.random()*pi/2 # randomize light strength sun.data.node_tree.nodes['Emission'].inputs[1].default_value = random.random()*7 + .8 lamp_sun.shadow_soft_size = random.random()*.3+.015 ## Rendering bpy.ops.render.render( write_still=True ) # save Label if RENDER_CROPPED: save_label(output_path, bg_size, objects, read_classes=False, segmentation=SEGMENTATION) else: save_label(output_path, bg_size, objects, bg_img_path=bg_img_path, segmentation=SEGMENTATION) bpy.data.scenes['Scene'].frame_current += 1 # hide all objects again for obj in objects: obj.hide_render = True bpy.data.scenes['Scene'].frame_current = 0 if __name__ == "__main__": main()
c=random.random() n.outputs[0].default_value = [c, c, c, 1]
conditional_block
generate_samples.py
import bpy from math import radians, pi, sin, cos import random import os, glob from scipy import ndimage, misc from skimage import draw, color import numpy as np def getChildren(objs): ## returns children of a blender object result = [] for o in bpy.data.objects: p = o.parent if p: for target in objs: if p.name == target.name: result.append(o) break; return result def place_objects_rand_on_bg(objects, img_list, step, tree_nodes): # not used in current version ## load background images and adjust blender scene # bg_img_path = img_list[random.randint(0, len(img_list)-1)] # randomized images bg_img_path = img_list[step % len(img_list)-1] # unrandomized print(bg_img_path) tree_nodes["Image"].image = bpy.data.images.load(bg_img_path) bg_size = np.asarray(bpy.data.images[bg_img_path.split('/')[-1]].size[:],dtype='float64') print(bg_size) bpy.data.scenes["Scene"].render.resolution_x = bg_size[0] bpy.data.scenes["Scene"].render.resolution_y = bg_size[1] # random list object positions OD = bpy.data.worlds["World"]["obj_distance"] object_positions = [(OD,0),(OD,OD),(0,OD),(-OD,OD),(-OD,0),(-OD,-OD),(0,-OD),(OD,-OD),(0,0)] random.shuffle(object_positions) object_positions = np.asarray(object_positions, 'float32') # hide all objects for obj in objects: obj.hide_render = True # obj.hide = True for child in getChildren(objects): child.hide_render = True # choose objects from the list; make visible, move and rotate objects_order = list(range(len(objects))) random.shuffle(objects_order) n_objects = random.randint(1,len(objects)-1) obj_center = np.asarray([0,0], 'float32') for i in range(n_objects): obj = objects[objects_order[i]] ## unhide obj.hide_render= False for child in getChildren([obj]): child.hide_render = False ## random rotation rotation_min = obj['rotation_range'][0] rotation_range = obj['rotation_range'][1] - rotation_min rotation_angle = rotation_min + rotation_range*random.random() obj.rotation_euler[2] = radians(rotation_angle) ## position pos = object_positions[i] obj.location[0] = pos[0] obj.location[1] = pos[1] obj_center+=pos # center_obj = objects[objects_order[0]] obj_center/=n_objects # Ground Texture to background for more realistic reflections bpy.data.images["ground.jpg"].filepath = bg_img_path return obj_center, bg_size def cyclic_arangement(objects, camera, cam_dist, step, step_count, img_list): ## hides, reveals and rotates the objects and moves the camera so every object is visible for the same amount of images from a diverse range of viewpoints MAX_CAM_STEPS = 20 ## the max amount of camera steps to go from the lowest to the highest position and start again. lowest and highest position are defined by cam_pos_range BACKGROUND_REFLECTIONS = True # Image texture onto background for more realistic reflections if BACKGROUND_REFLECTIONS: bg_img_path = img_list[step % len(img_list)-1] # unrandomized bpy.data.images["ground.jpg"].filepath = bg_img_path # hide all objects for obj in objects: obj.hide_render = True # obj.hide = True for child in getChildren(objects): child.hide_render = True # get the current object when every object should be visible in the same number of render steps # (step_count/len(objects) is the number of steps for each object) steps_per_obj = step_count/len(objects) obj = objects[int(step/steps_per_obj)] # visibility and rotation of object obj.hide_render = False for child in getChildren([obj]): child.hide_render = False # obj.hide = False rotation_min = obj['rotation_range'][0] rotation_range = obj['rotation_range'][1] - rotation_min rotation_angle = rotation_min + (step % steps_per_obj) * (rotation_range / steps_per_obj) obj.rotation_euler[2] = radians(rotation_angle) # cam placement cam_steps = min(steps_per_obj, MAX_CAM_STEPS) cam_pos_min = obj['cam_pos_range'][0] cam_pos_range = obj['cam_pos_range'][1] - cam_pos_min camera.location[0] = cam_dist*cos(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps)) camera.location[1] = 0 camera.location[2] = cam_dist*sin(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps)) def random_cam_placement(camera, focus, target_obj): # not used in current version ## places the camera randomly x_range = camera["x_range"] #[.6,3.0] y_range = camera["y_range"] #[-0.25,0.25] z_range = camera["z_range"] #[.45,1.60] # x_rot_range = camera["x_rot_range"] #[-0.15,0.15] # y_rot_range = camera["y_rot_range"] #[-0.15,0.15] # z_rot_range = camera["z_rot_range"] #[-0.15,0.15] rand_pos = np.random.rand(3) # rand_rot = np.random.rand(3) camera.location[0] =x_range[1]-(x_range[1]-x_range[0])*rand_pos[0] camera.location[1] =y_range[1]-(y_range[1]-y_range[0])*rand_pos[1] camera.location[2] =z_range[1]-(z_range[1]-z_range[0])*rand_pos[2] # place the camera target target_obj.location[0] = focus[0] target_obj.location[1] = focus[1] def shape_key_adjustments(objects): ## iterates all shape keys of all objects and sets them to a random value for obj in objects: if obj.data.shape_keys: keys = obj.data.shape_keys.key_blocks if len(keys): for i, k in enumerate(keys): if i: k.value = random.random() def texture_adjustments(): ## iterates all materials in the blender file and applies random adjustments based on naming conventions textures = bpy.data.materials #['rand_plastic'] for t in textures: # random color for rand if "rand" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "RGB" in n.name: if random.random() < .3: #30% chance for grey else random color c=random.random() n.outputs[0].default_value = [c, c, c, 1] else: n.outputs[0].default_value = [random.random(), random.random(), random.random(), 1] if "rand" in n.name: n.outputs[0].default_value = random.random() if "switch" in n.name: n.outputs[0].default_value = random.randint(0,1) # random shift for shift if "shift" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "Mapping" in n.name: n.translation = [random.random()*10,random.random()*10,0] # random mix for mix if "mix" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "noise_mix" in n.name: n.inputs[0].default_value = .35 + random.random()*.65 def save_label(output_path, bg_size, objects, bg_img_path=None, read_classes=True, segmentation=False): if segmentation: ## Save the segmentation image classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[0],bg_size[1],-1) # One value per pixel classImg = classImg[::-1,:,0] # classImg = np.array( [ [ pixel[0] for pixel in row ] for row in classImg ] ) if not os.path.exists(output_path): os.makedirs(output_path) misc.toimage(classImg, cmin=0, cmax=255).save(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png') # misc.imsave(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png', classImg) else: f_label = open(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.txt', 'w') if read_classes and os.path.isfile(bg_img_path[:-4] + '.txt'): bg_annotation = open(bg_img_path[:-4] + '.txt', 'r') f_label.write(bg_annotation.read()) bg_annotation.close() classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[1],bg_size[0],-1) # One value per pixel classImg = classImg[::-1,:,0] # YOLO style boundingboxes for i in range(len(objects)): # Finding non zero values mask = (classImg == i+1) rows = np.any(mask, axis=1) cols = np.any(mask, axis=0) if rows.any(): # min and max indices for bounding box ymin, ymax = np.where(rows)[0][[0, -1]] xmin, xmax = np.where(cols)[0][[0, -1]] print(ymin, ymax, xmin, xmax) x = ((xmin + xmax)/2)/ bg_size[0] width = (xmax - xmin) / bg_size[0] y = ((ymin + ymax)/2)/ bg_size[1] height = (ymax - ymin) / bg_size[1] if (width*height)>.005: print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height)) print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height), file = f_label) f_label.close() def main(): # SEGMENTATION = bpy.context.scene.sg_label_mode == "sgSegment" # RENDER_CROPPED = bpy.context.scene.sg_render_mode == "sgCropped" SEGMENTATION = False RENDER_CROPPED = True cam = bpy.context.scene.sg_cam objects = bpy.context.scene.sg_objectGroup.objects # objects = [bpy.data.objects[obj] for obj in objectsList] sun = bpy.context.scene.sg_sun ground = bpy.context.scene.sg_ground lamp_sun = bpy.data.lamps[sun.name] cam_target = bpy.context.scene.sg_cam_target tree_nodes = bpy.context.scene.node_tree.nodes bg_size = bpy.context.scene.sg_img_size cam_dist = bpy.context.scene.sg_cam_dist compositing_node_group = bpy.data.scenes["Scene"].node_tree step_count = bpy.context.scene.sg_nSamples bg_path = bpy.context.scene.sg_backgroundPath.replace("//","") output_path = tree_nodes['File Output'].base_path.replace('//','./') img_list = sorted(glob.glob(bg_path+"*.png")+glob.glob(bg_path+"*.jpg")) if RENDER_CROPPED: output_path += 'rgba/' elif SEGMENTATION: output_path+="SegmentationClass/" else: output_path+="rgb/" # Initial settings ground.cycles.is_shadow_catcher = True for i, o in enumerate(objects): if not 'class' in o: print(o.name, 'has no class yet. Set to 1.') o["class"] = 1 if not 'rotation_range' in o:
o["cam_pos_range"] = (0,90) o.pass_index = i + 1 ## adjustments for cropped rendering if RENDER_CROPPED: ## place objects in the center of the scene for o in objects: o.location[0]=0 o.location[1]=0 cam_target.location[0]=0 cam_target.location[1]=0 bpy.data.scenes["Scene"].render.resolution_x = bg_size[0] bpy.data.scenes["Scene"].render.resolution_y = bg_size[1] c_nodes = compositing_node_group.nodes ## remove all links from render layers for out in c_nodes["Render Layers"].outputs: for l in out.links: compositing_node_group.links.remove(l) compositing_node_group.links.new(c_nodes["Render Layers"].outputs["Image"],c_nodes["File Output"].inputs[3]) compositing_node_group.links.new(c_nodes["Render Layers"].outputs["IndexOB"],c_nodes["Viewer"].inputs[0]) for step in range(0, step_count): if RENDER_CROPPED: cyclic_arangement(objects, cam, cam_dist, step, step_count, img_list) else: # Object placement obj_center, bg_size = place_objects_rand_on_bg(objects, img_list, step, tree_nodes) # random camera position random_cam_placement(cam, obj_center, cam_target) # random changes to textures texture_adjustments() # random changes to shape keys shape_key_adjustments(objects) # randomize light angle sun.rotation_euler[2] = random.random()*pi sun.rotation_euler[1] = random.random()*pi/2 # randomize light strength sun.data.node_tree.nodes['Emission'].inputs[1].default_value = random.random()*7 + .8 lamp_sun.shadow_soft_size = random.random()*.3+.015 ## Rendering bpy.ops.render.render( write_still=True ) # save Label if RENDER_CROPPED: save_label(output_path, bg_size, objects, read_classes=False, segmentation=SEGMENTATION) else: save_label(output_path, bg_size, objects, bg_img_path=bg_img_path, segmentation=SEGMENTATION) bpy.data.scenes['Scene'].frame_current += 1 # hide all objects again for obj in objects: obj.hide_render = True bpy.data.scenes['Scene'].frame_current = 0 if __name__ == "__main__": main()
print(o.name, 'has no rotation range yet. Set to [0,360].') o["rotation_range"] = (0,360) if not 'cam_pos_range' in o: print(o.name, 'has no camera position range yet. Set to [0,90].')
random_line_split
generate_samples.py
import bpy from math import radians, pi, sin, cos import random import os, glob from scipy import ndimage, misc from skimage import draw, color import numpy as np def getChildren(objs): ## returns children of a blender object result = [] for o in bpy.data.objects: p = o.parent if p: for target in objs: if p.name == target.name: result.append(o) break; return result def place_objects_rand_on_bg(objects, img_list, step, tree_nodes): # not used in current version ## load background images and adjust blender scene # bg_img_path = img_list[random.randint(0, len(img_list)-1)] # randomized images bg_img_path = img_list[step % len(img_list)-1] # unrandomized print(bg_img_path) tree_nodes["Image"].image = bpy.data.images.load(bg_img_path) bg_size = np.asarray(bpy.data.images[bg_img_path.split('/')[-1]].size[:],dtype='float64') print(bg_size) bpy.data.scenes["Scene"].render.resolution_x = bg_size[0] bpy.data.scenes["Scene"].render.resolution_y = bg_size[1] # random list object positions OD = bpy.data.worlds["World"]["obj_distance"] object_positions = [(OD,0),(OD,OD),(0,OD),(-OD,OD),(-OD,0),(-OD,-OD),(0,-OD),(OD,-OD),(0,0)] random.shuffle(object_positions) object_positions = np.asarray(object_positions, 'float32') # hide all objects for obj in objects: obj.hide_render = True # obj.hide = True for child in getChildren(objects): child.hide_render = True # choose objects from the list; make visible, move and rotate objects_order = list(range(len(objects))) random.shuffle(objects_order) n_objects = random.randint(1,len(objects)-1) obj_center = np.asarray([0,0], 'float32') for i in range(n_objects): obj = objects[objects_order[i]] ## unhide obj.hide_render= False for child in getChildren([obj]): child.hide_render = False ## random rotation rotation_min = obj['rotation_range'][0] rotation_range = obj['rotation_range'][1] - rotation_min rotation_angle = rotation_min + rotation_range*random.random() obj.rotation_euler[2] = radians(rotation_angle) ## position pos = object_positions[i] obj.location[0] = pos[0] obj.location[1] = pos[1] obj_center+=pos # center_obj = objects[objects_order[0]] obj_center/=n_objects # Ground Texture to background for more realistic reflections bpy.data.images["ground.jpg"].filepath = bg_img_path return obj_center, bg_size def cyclic_arangement(objects, camera, cam_dist, step, step_count, img_list): ## hides, reveals and rotates the objects and moves the camera so every object is visible for the same amount of images from a diverse range of viewpoints MAX_CAM_STEPS = 20 ## the max amount of camera steps to go from the lowest to the highest position and start again. lowest and highest position are defined by cam_pos_range BACKGROUND_REFLECTIONS = True # Image texture onto background for more realistic reflections if BACKGROUND_REFLECTIONS: bg_img_path = img_list[step % len(img_list)-1] # unrandomized bpy.data.images["ground.jpg"].filepath = bg_img_path # hide all objects for obj in objects: obj.hide_render = True # obj.hide = True for child in getChildren(objects): child.hide_render = True # get the current object when every object should be visible in the same number of render steps # (step_count/len(objects) is the number of steps for each object) steps_per_obj = step_count/len(objects) obj = objects[int(step/steps_per_obj)] # visibility and rotation of object obj.hide_render = False for child in getChildren([obj]): child.hide_render = False # obj.hide = False rotation_min = obj['rotation_range'][0] rotation_range = obj['rotation_range'][1] - rotation_min rotation_angle = rotation_min + (step % steps_per_obj) * (rotation_range / steps_per_obj) obj.rotation_euler[2] = radians(rotation_angle) # cam placement cam_steps = min(steps_per_obj, MAX_CAM_STEPS) cam_pos_min = obj['cam_pos_range'][0] cam_pos_range = obj['cam_pos_range'][1] - cam_pos_min camera.location[0] = cam_dist*cos(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps)) camera.location[1] = 0 camera.location[2] = cam_dist*sin(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps)) def random_cam_placement(camera, focus, target_obj): # not used in current version ## places the camera randomly x_range = camera["x_range"] #[.6,3.0] y_range = camera["y_range"] #[-0.25,0.25] z_range = camera["z_range"] #[.45,1.60] # x_rot_range = camera["x_rot_range"] #[-0.15,0.15] # y_rot_range = camera["y_rot_range"] #[-0.15,0.15] # z_rot_range = camera["z_rot_range"] #[-0.15,0.15] rand_pos = np.random.rand(3) # rand_rot = np.random.rand(3) camera.location[0] =x_range[1]-(x_range[1]-x_range[0])*rand_pos[0] camera.location[1] =y_range[1]-(y_range[1]-y_range[0])*rand_pos[1] camera.location[2] =z_range[1]-(z_range[1]-z_range[0])*rand_pos[2] # place the camera target target_obj.location[0] = focus[0] target_obj.location[1] = focus[1] def shape_key_adjustments(objects): ## iterates all shape keys of all objects and sets them to a random value for obj in objects: if obj.data.shape_keys: keys = obj.data.shape_keys.key_blocks if len(keys): for i, k in enumerate(keys): if i: k.value = random.random() def
(): ## iterates all materials in the blender file and applies random adjustments based on naming conventions textures = bpy.data.materials #['rand_plastic'] for t in textures: # random color for rand if "rand" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "RGB" in n.name: if random.random() < .3: #30% chance for grey else random color c=random.random() n.outputs[0].default_value = [c, c, c, 1] else: n.outputs[0].default_value = [random.random(), random.random(), random.random(), 1] if "rand" in n.name: n.outputs[0].default_value = random.random() if "switch" in n.name: n.outputs[0].default_value = random.randint(0,1) # random shift for shift if "shift" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "Mapping" in n.name: n.translation = [random.random()*10,random.random()*10,0] # random mix for mix if "mix" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "noise_mix" in n.name: n.inputs[0].default_value = .35 + random.random()*.65 def save_label(output_path, bg_size, objects, bg_img_path=None, read_classes=True, segmentation=False): if segmentation: ## Save the segmentation image classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[0],bg_size[1],-1) # One value per pixel classImg = classImg[::-1,:,0] # classImg = np.array( [ [ pixel[0] for pixel in row ] for row in classImg ] ) if not os.path.exists(output_path): os.makedirs(output_path) misc.toimage(classImg, cmin=0, cmax=255).save(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png') # misc.imsave(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png', classImg) else: f_label = open(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.txt', 'w') if read_classes and os.path.isfile(bg_img_path[:-4] + '.txt'): bg_annotation = open(bg_img_path[:-4] + '.txt', 'r') f_label.write(bg_annotation.read()) bg_annotation.close() classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[1],bg_size[0],-1) # One value per pixel classImg = classImg[::-1,:,0] # YOLO style boundingboxes for i in range(len(objects)): # Finding non zero values mask = (classImg == i+1) rows = np.any(mask, axis=1) cols = np.any(mask, axis=0) if rows.any(): # min and max indices for bounding box ymin, ymax = np.where(rows)[0][[0, -1]] xmin, xmax = np.where(cols)[0][[0, -1]] print(ymin, ymax, xmin, xmax) x = ((xmin + xmax)/2)/ bg_size[0] width = (xmax - xmin) / bg_size[0] y = ((ymin + ymax)/2)/ bg_size[1] height = (ymax - ymin) / bg_size[1] if (width*height)>.005: print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height)) print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height), file = f_label) f_label.close() def main(): # SEGMENTATION = bpy.context.scene.sg_label_mode == "sgSegment" # RENDER_CROPPED = bpy.context.scene.sg_render_mode == "sgCropped" SEGMENTATION = False RENDER_CROPPED = True cam = bpy.context.scene.sg_cam objects = bpy.context.scene.sg_objectGroup.objects # objects = [bpy.data.objects[obj] for obj in objectsList] sun = bpy.context.scene.sg_sun ground = bpy.context.scene.sg_ground lamp_sun = bpy.data.lamps[sun.name] cam_target = bpy.context.scene.sg_cam_target tree_nodes = bpy.context.scene.node_tree.nodes bg_size = bpy.context.scene.sg_img_size cam_dist = bpy.context.scene.sg_cam_dist compositing_node_group = bpy.data.scenes["Scene"].node_tree step_count = bpy.context.scene.sg_nSamples bg_path = bpy.context.scene.sg_backgroundPath.replace("//","") output_path = tree_nodes['File Output'].base_path.replace('//','./') img_list = sorted(glob.glob(bg_path+"*.png")+glob.glob(bg_path+"*.jpg")) if RENDER_CROPPED: output_path += 'rgba/' elif SEGMENTATION: output_path+="SegmentationClass/" else: output_path+="rgb/" # Initial settings ground.cycles.is_shadow_catcher = True for i, o in enumerate(objects): if not 'class' in o: print(o.name, 'has no class yet. Set to 1.') o["class"] = 1 if not 'rotation_range' in o: print(o.name, 'has no rotation range yet. Set to [0,360].') o["rotation_range"] = (0,360) if not 'cam_pos_range' in o: print(o.name, 'has no camera position range yet. Set to [0,90].') o["cam_pos_range"] = (0,90) o.pass_index = i + 1 ## adjustments for cropped rendering if RENDER_CROPPED: ## place objects in the center of the scene for o in objects: o.location[0]=0 o.location[1]=0 cam_target.location[0]=0 cam_target.location[1]=0 bpy.data.scenes["Scene"].render.resolution_x = bg_size[0] bpy.data.scenes["Scene"].render.resolution_y = bg_size[1] c_nodes = compositing_node_group.nodes ## remove all links from render layers for out in c_nodes["Render Layers"].outputs: for l in out.links: compositing_node_group.links.remove(l) compositing_node_group.links.new(c_nodes["Render Layers"].outputs["Image"],c_nodes["File Output"].inputs[3]) compositing_node_group.links.new(c_nodes["Render Layers"].outputs["IndexOB"],c_nodes["Viewer"].inputs[0]) for step in range(0, step_count): if RENDER_CROPPED: cyclic_arangement(objects, cam, cam_dist, step, step_count, img_list) else: # Object placement obj_center, bg_size = place_objects_rand_on_bg(objects, img_list, step, tree_nodes) # random camera position random_cam_placement(cam, obj_center, cam_target) # random changes to textures texture_adjustments() # random changes to shape keys shape_key_adjustments(objects) # randomize light angle sun.rotation_euler[2] = random.random()*pi sun.rotation_euler[1] = random.random()*pi/2 # randomize light strength sun.data.node_tree.nodes['Emission'].inputs[1].default_value = random.random()*7 + .8 lamp_sun.shadow_soft_size = random.random()*.3+.015 ## Rendering bpy.ops.render.render( write_still=True ) # save Label if RENDER_CROPPED: save_label(output_path, bg_size, objects, read_classes=False, segmentation=SEGMENTATION) else: save_label(output_path, bg_size, objects, bg_img_path=bg_img_path, segmentation=SEGMENTATION) bpy.data.scenes['Scene'].frame_current += 1 # hide all objects again for obj in objects: obj.hide_render = True bpy.data.scenes['Scene'].frame_current = 0 if __name__ == "__main__": main()
texture_adjustments
identifier_name
generate_samples.py
import bpy from math import radians, pi, sin, cos import random import os, glob from scipy import ndimage, misc from skimage import draw, color import numpy as np def getChildren(objs): ## returns children of a blender object result = [] for o in bpy.data.objects: p = o.parent if p: for target in objs: if p.name == target.name: result.append(o) break; return result def place_objects_rand_on_bg(objects, img_list, step, tree_nodes): # not used in current version ## load background images and adjust blender scene # bg_img_path = img_list[random.randint(0, len(img_list)-1)] # randomized images bg_img_path = img_list[step % len(img_list)-1] # unrandomized print(bg_img_path) tree_nodes["Image"].image = bpy.data.images.load(bg_img_path) bg_size = np.asarray(bpy.data.images[bg_img_path.split('/')[-1]].size[:],dtype='float64') print(bg_size) bpy.data.scenes["Scene"].render.resolution_x = bg_size[0] bpy.data.scenes["Scene"].render.resolution_y = bg_size[1] # random list object positions OD = bpy.data.worlds["World"]["obj_distance"] object_positions = [(OD,0),(OD,OD),(0,OD),(-OD,OD),(-OD,0),(-OD,-OD),(0,-OD),(OD,-OD),(0,0)] random.shuffle(object_positions) object_positions = np.asarray(object_positions, 'float32') # hide all objects for obj in objects: obj.hide_render = True # obj.hide = True for child in getChildren(objects): child.hide_render = True # choose objects from the list; make visible, move and rotate objects_order = list(range(len(objects))) random.shuffle(objects_order) n_objects = random.randint(1,len(objects)-1) obj_center = np.asarray([0,0], 'float32') for i in range(n_objects): obj = objects[objects_order[i]] ## unhide obj.hide_render= False for child in getChildren([obj]): child.hide_render = False ## random rotation rotation_min = obj['rotation_range'][0] rotation_range = obj['rotation_range'][1] - rotation_min rotation_angle = rotation_min + rotation_range*random.random() obj.rotation_euler[2] = radians(rotation_angle) ## position pos = object_positions[i] obj.location[0] = pos[0] obj.location[1] = pos[1] obj_center+=pos # center_obj = objects[objects_order[0]] obj_center/=n_objects # Ground Texture to background for more realistic reflections bpy.data.images["ground.jpg"].filepath = bg_img_path return obj_center, bg_size def cyclic_arangement(objects, camera, cam_dist, step, step_count, img_list): ## hides, reveals and rotates the objects and moves the camera so every object is visible for the same amount of images from a diverse range of viewpoints MAX_CAM_STEPS = 20 ## the max amount of camera steps to go from the lowest to the highest position and start again. lowest and highest position are defined by cam_pos_range BACKGROUND_REFLECTIONS = True # Image texture onto background for more realistic reflections if BACKGROUND_REFLECTIONS: bg_img_path = img_list[step % len(img_list)-1] # unrandomized bpy.data.images["ground.jpg"].filepath = bg_img_path # hide all objects for obj in objects: obj.hide_render = True # obj.hide = True for child in getChildren(objects): child.hide_render = True # get the current object when every object should be visible in the same number of render steps # (step_count/len(objects) is the number of steps for each object) steps_per_obj = step_count/len(objects) obj = objects[int(step/steps_per_obj)] # visibility and rotation of object obj.hide_render = False for child in getChildren([obj]): child.hide_render = False # obj.hide = False rotation_min = obj['rotation_range'][0] rotation_range = obj['rotation_range'][1] - rotation_min rotation_angle = rotation_min + (step % steps_per_obj) * (rotation_range / steps_per_obj) obj.rotation_euler[2] = radians(rotation_angle) # cam placement cam_steps = min(steps_per_obj, MAX_CAM_STEPS) cam_pos_min = obj['cam_pos_range'][0] cam_pos_range = obj['cam_pos_range'][1] - cam_pos_min camera.location[0] = cam_dist*cos(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps)) camera.location[1] = 0 camera.location[2] = cam_dist*sin(radians(cam_pos_min + (step%cam_steps)*cam_pos_range/cam_steps)) def random_cam_placement(camera, focus, target_obj): # not used in current version ## places the camera randomly x_range = camera["x_range"] #[.6,3.0] y_range = camera["y_range"] #[-0.25,0.25] z_range = camera["z_range"] #[.45,1.60] # x_rot_range = camera["x_rot_range"] #[-0.15,0.15] # y_rot_range = camera["y_rot_range"] #[-0.15,0.15] # z_rot_range = camera["z_rot_range"] #[-0.15,0.15] rand_pos = np.random.rand(3) # rand_rot = np.random.rand(3) camera.location[0] =x_range[1]-(x_range[1]-x_range[0])*rand_pos[0] camera.location[1] =y_range[1]-(y_range[1]-y_range[0])*rand_pos[1] camera.location[2] =z_range[1]-(z_range[1]-z_range[0])*rand_pos[2] # place the camera target target_obj.location[0] = focus[0] target_obj.location[1] = focus[1] def shape_key_adjustments(objects): ## iterates all shape keys of all objects and sets them to a random value
def texture_adjustments(): ## iterates all materials in the blender file and applies random adjustments based on naming conventions textures = bpy.data.materials #['rand_plastic'] for t in textures: # random color for rand if "rand" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "RGB" in n.name: if random.random() < .3: #30% chance for grey else random color c=random.random() n.outputs[0].default_value = [c, c, c, 1] else: n.outputs[0].default_value = [random.random(), random.random(), random.random(), 1] if "rand" in n.name: n.outputs[0].default_value = random.random() if "switch" in n.name: n.outputs[0].default_value = random.randint(0,1) # random shift for shift if "shift" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "Mapping" in n.name: n.translation = [random.random()*10,random.random()*10,0] # random mix for mix if "mix" in t.name: tex_nodes = t.node_tree.nodes for n in tex_nodes: if "noise_mix" in n.name: n.inputs[0].default_value = .35 + random.random()*.65 def save_label(output_path, bg_size, objects, bg_img_path=None, read_classes=True, segmentation=False): if segmentation: ## Save the segmentation image classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[0],bg_size[1],-1) # One value per pixel classImg = classImg[::-1,:,0] # classImg = np.array( [ [ pixel[0] for pixel in row ] for row in classImg ] ) if not os.path.exists(output_path): os.makedirs(output_path) misc.toimage(classImg, cmin=0, cmax=255).save(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png') # misc.imsave(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.png', classImg) else: f_label = open(output_path + str(bpy.data.scenes['Scene'].frame_current).zfill(4) + '.txt', 'w') if read_classes and os.path.isfile(bg_img_path[:-4] + '.txt'): bg_annotation = open(bg_img_path[:-4] + '.txt', 'r') f_label.write(bg_annotation.read()) bg_annotation.close() classImg = np.array(bpy.data.images['Viewer Node'].pixels[:]).reshape(bg_size[1],bg_size[0],-1) # One value per pixel classImg = classImg[::-1,:,0] # YOLO style boundingboxes for i in range(len(objects)): # Finding non zero values mask = (classImg == i+1) rows = np.any(mask, axis=1) cols = np.any(mask, axis=0) if rows.any(): # min and max indices for bounding box ymin, ymax = np.where(rows)[0][[0, -1]] xmin, xmax = np.where(cols)[0][[0, -1]] print(ymin, ymax, xmin, xmax) x = ((xmin + xmax)/2)/ bg_size[0] width = (xmax - xmin) / bg_size[0] y = ((ymin + ymax)/2)/ bg_size[1] height = (ymax - ymin) / bg_size[1] if (width*height)>.005: print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height)) print("{} {} {} {} {}".format(objects[i]['class'],x,y,width,height), file = f_label) f_label.close() def main(): # SEGMENTATION = bpy.context.scene.sg_label_mode == "sgSegment" # RENDER_CROPPED = bpy.context.scene.sg_render_mode == "sgCropped" SEGMENTATION = False RENDER_CROPPED = True cam = bpy.context.scene.sg_cam objects = bpy.context.scene.sg_objectGroup.objects # objects = [bpy.data.objects[obj] for obj in objectsList] sun = bpy.context.scene.sg_sun ground = bpy.context.scene.sg_ground lamp_sun = bpy.data.lamps[sun.name] cam_target = bpy.context.scene.sg_cam_target tree_nodes = bpy.context.scene.node_tree.nodes bg_size = bpy.context.scene.sg_img_size cam_dist = bpy.context.scene.sg_cam_dist compositing_node_group = bpy.data.scenes["Scene"].node_tree step_count = bpy.context.scene.sg_nSamples bg_path = bpy.context.scene.sg_backgroundPath.replace("//","") output_path = tree_nodes['File Output'].base_path.replace('//','./') img_list = sorted(glob.glob(bg_path+"*.png")+glob.glob(bg_path+"*.jpg")) if RENDER_CROPPED: output_path += 'rgba/' elif SEGMENTATION: output_path+="SegmentationClass/" else: output_path+="rgb/" # Initial settings ground.cycles.is_shadow_catcher = True for i, o in enumerate(objects): if not 'class' in o: print(o.name, 'has no class yet. Set to 1.') o["class"] = 1 if not 'rotation_range' in o: print(o.name, 'has no rotation range yet. Set to [0,360].') o["rotation_range"] = (0,360) if not 'cam_pos_range' in o: print(o.name, 'has no camera position range yet. Set to [0,90].') o["cam_pos_range"] = (0,90) o.pass_index = i + 1 ## adjustments for cropped rendering if RENDER_CROPPED: ## place objects in the center of the scene for o in objects: o.location[0]=0 o.location[1]=0 cam_target.location[0]=0 cam_target.location[1]=0 bpy.data.scenes["Scene"].render.resolution_x = bg_size[0] bpy.data.scenes["Scene"].render.resolution_y = bg_size[1] c_nodes = compositing_node_group.nodes ## remove all links from render layers for out in c_nodes["Render Layers"].outputs: for l in out.links: compositing_node_group.links.remove(l) compositing_node_group.links.new(c_nodes["Render Layers"].outputs["Image"],c_nodes["File Output"].inputs[3]) compositing_node_group.links.new(c_nodes["Render Layers"].outputs["IndexOB"],c_nodes["Viewer"].inputs[0]) for step in range(0, step_count): if RENDER_CROPPED: cyclic_arangement(objects, cam, cam_dist, step, step_count, img_list) else: # Object placement obj_center, bg_size = place_objects_rand_on_bg(objects, img_list, step, tree_nodes) # random camera position random_cam_placement(cam, obj_center, cam_target) # random changes to textures texture_adjustments() # random changes to shape keys shape_key_adjustments(objects) # randomize light angle sun.rotation_euler[2] = random.random()*pi sun.rotation_euler[1] = random.random()*pi/2 # randomize light strength sun.data.node_tree.nodes['Emission'].inputs[1].default_value = random.random()*7 + .8 lamp_sun.shadow_soft_size = random.random()*.3+.015 ## Rendering bpy.ops.render.render( write_still=True ) # save Label if RENDER_CROPPED: save_label(output_path, bg_size, objects, read_classes=False, segmentation=SEGMENTATION) else: save_label(output_path, bg_size, objects, bg_img_path=bg_img_path, segmentation=SEGMENTATION) bpy.data.scenes['Scene'].frame_current += 1 # hide all objects again for obj in objects: obj.hide_render = True bpy.data.scenes['Scene'].frame_current = 0 if __name__ == "__main__": main()
for obj in objects: if obj.data.shape_keys: keys = obj.data.shape_keys.key_blocks if len(keys): for i, k in enumerate(keys): if i: k.value = random.random()
identifier_body
kogitoapp_types.go
// Copyright 2019 Red Hat, Inc. and/or its affiliates // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha1 import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // KogitoAppCRDName is the name of the KogitoApp CRD in the cluster. const KogitoAppCRDName = "kogitoapps.app.kiegroup.org" // KogitoAppSpec defines the desired state of KogitoApp. // +k8s:openapi-gen=true type KogitoAppSpec struct { KogitoServiceSpec `json:",inline"`
// +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Runtime" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:label" // +kubebuilder:validation:Enum=quarkus;springboot Runtime RuntimeType `json:"runtime,omitempty"` // S2I Build configuration. // Default value: nil Build *KogitoAppBuildObject `json:"build"` // Kubernetes Service configuration. // Default value: nil Service KogitoAppServiceObject `json:"service,omitempty"` // Annotates the pods managed by the operator with the required metadata for Istio to setup its sidecars, enabling the mesh. Defaults to false. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Istio" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableIstio bool `json:"enableIstio,omitempty"` // Set this property to true to tell the operator to deploy an instance of Infinispan via the Infinispan Operator and // configure this service to connect to the deployed server. // For Quarkus runtime, it sets QUARKUS_INFINISPAN_CLIENT_* environment variables. For Spring Boot, these variables start with SPRING_INFINISPAN_CLIENT_*. // More info: https://github.com/kiegroup/kogito-cloud-operator#kogito-services. // Set to false or ignore it if your service does not need persistence or if you are going to configure the persistence infrastructure yourself. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Persistence" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnablePersistence bool `json:"enablePersistence,omitempty"` // Set this property to true to tell the operator to deploy an instance of Kafka via the Strimzi Operator and configure this service with // the proper information to connect to the Kafka cluster. // The Kafka cluster service endpoint will be injected in the Kogito Service container via an environment variable named "KAFKA_BOOTSTRAP_SERVERS" e.g.: kafka-kogito:9092. // Set to false or ignore it if your service does not need messaging or if you are going to configure the messaging infrastructure yourself. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Events" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableEvents bool `json:"enableEvents,omitempty"` } // GetRuntime ... func (k *KogitoAppSpec) GetRuntime() RuntimeType { return k.Runtime } // GetBuild ... func (k *KogitoAppSpec) GetBuild() *KogitoAppBuildObject { if k == nil { return nil } return k.Build } // IsGitURIEmpty checks if the provided Git URI is empty or not. func (k *KogitoAppSpec) IsGitURIEmpty() bool { if k == nil { return true } if k.Build == nil { return true } return len(k.Build.GitSource.URI) == 0 } // KogitoAppBuildObject Data to define how to build an application from source. // +k8s:openapi-gen=true // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Service Build" type KogitoAppBuildObject struct { Incremental bool `json:"incremental,omitempty"` // Environment variables used during build time. // +listType=atomic // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Build Env Variables" Envs []corev1.EnvVar `json:"envs,omitempty"` // Information about the git repository where the Kogito App source code resides. // If set, the operator will use source to image strategy build. // +optional GitSource GitSource `json:"gitSource,omitempty"` // WebHook secrets for build configs. // +listType=atomic // +optional Webhooks []WebhookSecret `json:"webhooks,omitempty"` // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Images Version" // Image version for the Kogito official images used during the build. E.g.: 0.6.0. Default to current Operator version. ImageVersion string `json:"imageVersion,omitempty"` // Custom image used by the source to image process to build the Kogito Service binaries. Takes precedence over ImageVersion attribute. // + optional // +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))` ImageS2ITag string `json:"imageS2ITag,omitempty"` // Custom image used by the source to image process to build the final Kogito Service image. Takes precedence over ImageVersion attribute. // + optional // +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))` ImageRuntimeTag string `json:"imageRuntimeTag,omitempty"` // Native indicates if the Kogito Service built should be compiled to run on native mode when Runtime is Quarkus. For more information, see https://www.graalvm.org/docs/reference-manual/aot-compilation/. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Native Build" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" Native bool `json:"native,omitempty"` // Resources for S2I builder pods. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:resourceRequirements" Resources corev1.ResourceRequirements `json:"resources,omitempty"` // Internal Maven Mirror to be used during source-to-image builds to considerably increase build speed. MavenMirrorURL string `json:"mavenMirrorURL,omitempty"` // Artifact contains override information for building the Maven artifact. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact" Artifact Artifact `json:"artifact,omitempty"` // If set to true will print the logs for downloading/uploading of maven dependencies. Defaults to false. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableMavenDownloadOutput bool `json:"enableMavenDownloadOutput,omitempty"` } // AddEnvironmentVariable adds new environment variable to build environment variables. func (k *KogitoAppBuildObject) AddEnvironmentVariable(name, value string) { env := corev1.EnvVar{ Name: name, Value: value, } k.Envs = append(k.Envs, env) } // AddResourceRequest adds new resource request. Works also on an uninitialized Requests field. func (k *KogitoAppBuildObject) AddResourceRequest(name, value string) { if k.Resources.Requests == nil { k.Resources.Requests = corev1.ResourceList{} } k.Resources.Requests[corev1.ResourceName(name)] = resource.MustParse(value) } // AddResourceLimit adds new resource limit. Works also on an uninitialized Limits field. func (k *KogitoAppBuildObject) AddResourceLimit(name, value string) { if k.Resources.Limits == nil { k.Resources.Limits = corev1.ResourceList{} } k.Resources.Limits[corev1.ResourceName(name)] = resource.MustParse(value) } // KogitoAppServiceObject Data to define the service of the Kogito application. // +k8s:openapi-gen=true type KogitoAppServiceObject struct { // Labels for the application service. Labels map[string]string `json:"labels,omitempty"` } // GitSource Git coordinates to locate the source code to build. // +k8s:openapi-gen=true // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Git Source" type GitSource struct { // Git URI for the s2i source. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git URI" URI string `json:"uri"` // Branch to use in the Git repository. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Reference" Reference string `json:"reference,omitempty"` // Context/subdirectory where the code is located, relative to the repo root. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Context" ContextDir string `json:"contextDir,omitempty"` } // WebhookType literal type to distinguish between different types of webhooks. type WebhookType string const ( // GitHubWebhook GitHub webhook. GitHubWebhook WebhookType = "GitHub" // GenericWebhook Generic webhook. GenericWebhook WebhookType = "Generic" ) // WebhookSecret Secret to use for a given webhook. // +k8s:openapi-gen=true type WebhookSecret struct { // WebHook type, either GitHub or Generic. // +kubebuilder:validation:Enum=GitHub;Generic Type WebhookType `json:"type,omitempty"` // Secret value for webhook Secret string `json:"secret,omitempty"` } // KogitoAppStatus defines the observed state of KogitoApp. // +k8s:openapi-gen=true type KogitoAppStatus struct { ConditionsMeta `json:",inline"` // External URL for the service. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Route" // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.x-descriptors="urn:alm:descriptor:org.w3:link" Route string `json:"route,omitempty"` // History of service deployments status. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Deployments" // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses" Deployments Deployments `json:"deployments"` // History of service builds status. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Builds" Builds Builds `json:"builds"` } // RuntimeType - type of condition. type RuntimeType string const ( // QuarkusRuntimeType - The KogitoApp is deployed. QuarkusRuntimeType RuntimeType = "quarkus" // SpringbootRuntimeType - The KogitoApp is being provisioned. SpringbootRuntimeType RuntimeType = "springboot" ) // Deployments ... // +k8s:openapi-gen=true type Deployments struct { // Deployments are ready to serve requests. // +listType=set Ready []string `json:"ready,omitempty"` // Deployments are starting // +listType=set Starting []string `json:"starting,omitempty"` // Deployments are not starting and the next step is unclear. // +listType=set Stopped []string `json:"stopped,omitempty"` // Deployments failed // +listType=set Failed []string `json:"failed,omitempty"` } // Builds ... // +k8s:openapi-gen=true type Builds struct { // Builds are being created. // +listType=set New []string `json:"new,omitempty"` // Builds are about to start running. // +listType=set Pending []string `json:"pending,omitempty"` // Builds are running. // +listType=set Running []string `json:"running,omitempty"` // Builds have executed and succeeded. // +listType=set Complete []string `json:"complete,omitempty"` // Builds have executed and failed. // +listType=set Failed []string `json:"failed,omitempty"` // Builds have been prevented from executing by an error. // +listType=set Error []string `json:"error,omitempty"` // Builds have been stopped from executing. // +listType=set Cancelled []string `json:"cancelled,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KogitoApp is a project prescription running a Kogito service (it's meant to be used to Build and Deploy the application at the same time). // KogitoApp is under deprecation, please use a combination of KogitoRuntime and KogitoBuild instead. See: https://issues.redhat.com/browse/KOGITO-1998 // +k8s:openapi-gen=true // +kubebuilder:resource:path=kogitoapps,scope=Namespaced // +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="Number of replicas set for this service" // +kubebuilder:printcolumn:name="Runtime",type="string",JSONPath=".spec.runtime",description="Runtime used to build the service" // +kubebuilder:printcolumn:name="Enable Persistence",type="boolean",JSONPath=".spec.enablePersistence",description="Indicates if persistence is enabled" // +kubebuilder:printcolumn:name="Enable Events",type="boolean",JSONPath=".spec.enableEvents",description="Indicates if events is enabled" // +kubebuilder:printcolumn:name="Image Version",type="string",JSONPath=".spec.build.imageVersion",description="Build image version" // +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".status.route",description="External URI to access this service" // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Application" // +operator-sdk:gen-csv:customresourcedefinitions.resources="DeploymentConfigs,apps.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ImageStreams,image.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="BuildConfigs,build.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="Routes,route.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ConfigMaps,v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="Services,v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ServiceMonitors,monitoring.coreos.com/v1" type KogitoApp struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec KogitoAppSpec `json:"spec,omitempty"` Status KogitoAppStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KogitoAppList contains a list of KogitoApp. type KogitoAppList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` // +listType=atomic Items []KogitoApp `json:"items"` } func init() { SchemeBuilder.Register(&KogitoApp{}, &KogitoAppList{}) } // Artifact contains override information for building the Maven artifact. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact" type Artifact struct { //Indicates the unique identifier of the organization or group that created the project. // + optional GroupID string `json:"groupId,omitempty"` //Indicates the unique base name of the primary artifact being generated. // + optional ArtifactID string `json:"artifactId,omitempty"` //Indicates the version of the artifact generated by the project. // + optional Version string `json:"version,omitempty"` }
// The name of the runtime used, either Quarkus or SpringBoot. // Default value: quarkus. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true
random_line_split
kogitoapp_types.go
// Copyright 2019 Red Hat, Inc. and/or its affiliates // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha1 import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // KogitoAppCRDName is the name of the KogitoApp CRD in the cluster. const KogitoAppCRDName = "kogitoapps.app.kiegroup.org" // KogitoAppSpec defines the desired state of KogitoApp. // +k8s:openapi-gen=true type KogitoAppSpec struct { KogitoServiceSpec `json:",inline"` // The name of the runtime used, either Quarkus or SpringBoot. // Default value: quarkus. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Runtime" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:label" // +kubebuilder:validation:Enum=quarkus;springboot Runtime RuntimeType `json:"runtime,omitempty"` // S2I Build configuration. // Default value: nil Build *KogitoAppBuildObject `json:"build"` // Kubernetes Service configuration. // Default value: nil Service KogitoAppServiceObject `json:"service,omitempty"` // Annotates the pods managed by the operator with the required metadata for Istio to setup its sidecars, enabling the mesh. Defaults to false. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Istio" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableIstio bool `json:"enableIstio,omitempty"` // Set this property to true to tell the operator to deploy an instance of Infinispan via the Infinispan Operator and // configure this service to connect to the deployed server. // For Quarkus runtime, it sets QUARKUS_INFINISPAN_CLIENT_* environment variables. For Spring Boot, these variables start with SPRING_INFINISPAN_CLIENT_*. // More info: https://github.com/kiegroup/kogito-cloud-operator#kogito-services. // Set to false or ignore it if your service does not need persistence or if you are going to configure the persistence infrastructure yourself. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Persistence" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnablePersistence bool `json:"enablePersistence,omitempty"` // Set this property to true to tell the operator to deploy an instance of Kafka via the Strimzi Operator and configure this service with // the proper information to connect to the Kafka cluster. // The Kafka cluster service endpoint will be injected in the Kogito Service container via an environment variable named "KAFKA_BOOTSTRAP_SERVERS" e.g.: kafka-kogito:9092. // Set to false or ignore it if your service does not need messaging or if you are going to configure the messaging infrastructure yourself. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Events" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableEvents bool `json:"enableEvents,omitempty"` } // GetRuntime ... func (k *KogitoAppSpec) GetRuntime() RuntimeType { return k.Runtime } // GetBuild ... func (k *KogitoAppSpec) GetBuild() *KogitoAppBuildObject { if k == nil { return nil } return k.Build } // IsGitURIEmpty checks if the provided Git URI is empty or not. func (k *KogitoAppSpec) IsGitURIEmpty() bool { if k == nil { return true } if k.Build == nil { return true } return len(k.Build.GitSource.URI) == 0 } // KogitoAppBuildObject Data to define how to build an application from source. // +k8s:openapi-gen=true // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Service Build" type KogitoAppBuildObject struct { Incremental bool `json:"incremental,omitempty"` // Environment variables used during build time. // +listType=atomic // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Build Env Variables" Envs []corev1.EnvVar `json:"envs,omitempty"` // Information about the git repository where the Kogito App source code resides. // If set, the operator will use source to image strategy build. // +optional GitSource GitSource `json:"gitSource,omitempty"` // WebHook secrets for build configs. // +listType=atomic // +optional Webhooks []WebhookSecret `json:"webhooks,omitempty"` // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Images Version" // Image version for the Kogito official images used during the build. E.g.: 0.6.0. Default to current Operator version. ImageVersion string `json:"imageVersion,omitempty"` // Custom image used by the source to image process to build the Kogito Service binaries. Takes precedence over ImageVersion attribute. // + optional // +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))` ImageS2ITag string `json:"imageS2ITag,omitempty"` // Custom image used by the source to image process to build the final Kogito Service image. Takes precedence over ImageVersion attribute. // + optional // +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))` ImageRuntimeTag string `json:"imageRuntimeTag,omitempty"` // Native indicates if the Kogito Service built should be compiled to run on native mode when Runtime is Quarkus. For more information, see https://www.graalvm.org/docs/reference-manual/aot-compilation/. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Native Build" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" Native bool `json:"native,omitempty"` // Resources for S2I builder pods. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:resourceRequirements" Resources corev1.ResourceRequirements `json:"resources,omitempty"` // Internal Maven Mirror to be used during source-to-image builds to considerably increase build speed. MavenMirrorURL string `json:"mavenMirrorURL,omitempty"` // Artifact contains override information for building the Maven artifact. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact" Artifact Artifact `json:"artifact,omitempty"` // If set to true will print the logs for downloading/uploading of maven dependencies. Defaults to false. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableMavenDownloadOutput bool `json:"enableMavenDownloadOutput,omitempty"` } // AddEnvironmentVariable adds new environment variable to build environment variables. func (k *KogitoAppBuildObject) AddEnvironmentVariable(name, value string) { env := corev1.EnvVar{ Name: name, Value: value, } k.Envs = append(k.Envs, env) } // AddResourceRequest adds new resource request. Works also on an uninitialized Requests field. func (k *KogitoAppBuildObject) AddResourceRequest(name, value string) { if k.Resources.Requests == nil { k.Resources.Requests = corev1.ResourceList{} } k.Resources.Requests[corev1.ResourceName(name)] = resource.MustParse(value) } // AddResourceLimit adds new resource limit. Works also on an uninitialized Limits field. func (k *KogitoAppBuildObject)
(name, value string) { if k.Resources.Limits == nil { k.Resources.Limits = corev1.ResourceList{} } k.Resources.Limits[corev1.ResourceName(name)] = resource.MustParse(value) } // KogitoAppServiceObject Data to define the service of the Kogito application. // +k8s:openapi-gen=true type KogitoAppServiceObject struct { // Labels for the application service. Labels map[string]string `json:"labels,omitempty"` } // GitSource Git coordinates to locate the source code to build. // +k8s:openapi-gen=true // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Git Source" type GitSource struct { // Git URI for the s2i source. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git URI" URI string `json:"uri"` // Branch to use in the Git repository. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Reference" Reference string `json:"reference,omitempty"` // Context/subdirectory where the code is located, relative to the repo root. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Context" ContextDir string `json:"contextDir,omitempty"` } // WebhookType literal type to distinguish between different types of webhooks. type WebhookType string const ( // GitHubWebhook GitHub webhook. GitHubWebhook WebhookType = "GitHub" // GenericWebhook Generic webhook. GenericWebhook WebhookType = "Generic" ) // WebhookSecret Secret to use for a given webhook. // +k8s:openapi-gen=true type WebhookSecret struct { // WebHook type, either GitHub or Generic. // +kubebuilder:validation:Enum=GitHub;Generic Type WebhookType `json:"type,omitempty"` // Secret value for webhook Secret string `json:"secret,omitempty"` } // KogitoAppStatus defines the observed state of KogitoApp. // +k8s:openapi-gen=true type KogitoAppStatus struct { ConditionsMeta `json:",inline"` // External URL for the service. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Route" // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.x-descriptors="urn:alm:descriptor:org.w3:link" Route string `json:"route,omitempty"` // History of service deployments status. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Deployments" // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses" Deployments Deployments `json:"deployments"` // History of service builds status. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Builds" Builds Builds `json:"builds"` } // RuntimeType - type of condition. type RuntimeType string const ( // QuarkusRuntimeType - The KogitoApp is deployed. QuarkusRuntimeType RuntimeType = "quarkus" // SpringbootRuntimeType - The KogitoApp is being provisioned. SpringbootRuntimeType RuntimeType = "springboot" ) // Deployments ... // +k8s:openapi-gen=true type Deployments struct { // Deployments are ready to serve requests. // +listType=set Ready []string `json:"ready,omitempty"` // Deployments are starting // +listType=set Starting []string `json:"starting,omitempty"` // Deployments are not starting and the next step is unclear. // +listType=set Stopped []string `json:"stopped,omitempty"` // Deployments failed // +listType=set Failed []string `json:"failed,omitempty"` } // Builds ... // +k8s:openapi-gen=true type Builds struct { // Builds are being created. // +listType=set New []string `json:"new,omitempty"` // Builds are about to start running. // +listType=set Pending []string `json:"pending,omitempty"` // Builds are running. // +listType=set Running []string `json:"running,omitempty"` // Builds have executed and succeeded. // +listType=set Complete []string `json:"complete,omitempty"` // Builds have executed and failed. // +listType=set Failed []string `json:"failed,omitempty"` // Builds have been prevented from executing by an error. // +listType=set Error []string `json:"error,omitempty"` // Builds have been stopped from executing. // +listType=set Cancelled []string `json:"cancelled,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KogitoApp is a project prescription running a Kogito service (it's meant to be used to Build and Deploy the application at the same time). // KogitoApp is under deprecation, please use a combination of KogitoRuntime and KogitoBuild instead. See: https://issues.redhat.com/browse/KOGITO-1998 // +k8s:openapi-gen=true // +kubebuilder:resource:path=kogitoapps,scope=Namespaced // +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="Number of replicas set for this service" // +kubebuilder:printcolumn:name="Runtime",type="string",JSONPath=".spec.runtime",description="Runtime used to build the service" // +kubebuilder:printcolumn:name="Enable Persistence",type="boolean",JSONPath=".spec.enablePersistence",description="Indicates if persistence is enabled" // +kubebuilder:printcolumn:name="Enable Events",type="boolean",JSONPath=".spec.enableEvents",description="Indicates if events is enabled" // +kubebuilder:printcolumn:name="Image Version",type="string",JSONPath=".spec.build.imageVersion",description="Build image version" // +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".status.route",description="External URI to access this service" // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Application" // +operator-sdk:gen-csv:customresourcedefinitions.resources="DeploymentConfigs,apps.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ImageStreams,image.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="BuildConfigs,build.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="Routes,route.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ConfigMaps,v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="Services,v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ServiceMonitors,monitoring.coreos.com/v1" type KogitoApp struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec KogitoAppSpec `json:"spec,omitempty"` Status KogitoAppStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KogitoAppList contains a list of KogitoApp. type KogitoAppList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` // +listType=atomic Items []KogitoApp `json:"items"` } func init() { SchemeBuilder.Register(&KogitoApp{}, &KogitoAppList{}) } // Artifact contains override information for building the Maven artifact. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact" type Artifact struct { //Indicates the unique identifier of the organization or group that created the project. // + optional GroupID string `json:"groupId,omitempty"` //Indicates the unique base name of the primary artifact being generated. // + optional ArtifactID string `json:"artifactId,omitempty"` //Indicates the version of the artifact generated by the project. // + optional Version string `json:"version,omitempty"` }
AddResourceLimit
identifier_name
kogitoapp_types.go
// Copyright 2019 Red Hat, Inc. and/or its affiliates // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha1 import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // KogitoAppCRDName is the name of the KogitoApp CRD in the cluster. const KogitoAppCRDName = "kogitoapps.app.kiegroup.org" // KogitoAppSpec defines the desired state of KogitoApp. // +k8s:openapi-gen=true type KogitoAppSpec struct { KogitoServiceSpec `json:",inline"` // The name of the runtime used, either Quarkus or SpringBoot. // Default value: quarkus. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Runtime" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:label" // +kubebuilder:validation:Enum=quarkus;springboot Runtime RuntimeType `json:"runtime,omitempty"` // S2I Build configuration. // Default value: nil Build *KogitoAppBuildObject `json:"build"` // Kubernetes Service configuration. // Default value: nil Service KogitoAppServiceObject `json:"service,omitempty"` // Annotates the pods managed by the operator with the required metadata for Istio to setup its sidecars, enabling the mesh. Defaults to false. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Istio" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableIstio bool `json:"enableIstio,omitempty"` // Set this property to true to tell the operator to deploy an instance of Infinispan via the Infinispan Operator and // configure this service to connect to the deployed server. // For Quarkus runtime, it sets QUARKUS_INFINISPAN_CLIENT_* environment variables. For Spring Boot, these variables start with SPRING_INFINISPAN_CLIENT_*. // More info: https://github.com/kiegroup/kogito-cloud-operator#kogito-services. // Set to false or ignore it if your service does not need persistence or if you are going to configure the persistence infrastructure yourself. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Persistence" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnablePersistence bool `json:"enablePersistence,omitempty"` // Set this property to true to tell the operator to deploy an instance of Kafka via the Strimzi Operator and configure this service with // the proper information to connect to the Kafka cluster. // The Kafka cluster service endpoint will be injected in the Kogito Service container via an environment variable named "KAFKA_BOOTSTRAP_SERVERS" e.g.: kafka-kogito:9092. // Set to false or ignore it if your service does not need messaging or if you are going to configure the messaging infrastructure yourself. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Events" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableEvents bool `json:"enableEvents,omitempty"` } // GetRuntime ... func (k *KogitoAppSpec) GetRuntime() RuntimeType { return k.Runtime } // GetBuild ... func (k *KogitoAppSpec) GetBuild() *KogitoAppBuildObject { if k == nil { return nil } return k.Build } // IsGitURIEmpty checks if the provided Git URI is empty or not. func (k *KogitoAppSpec) IsGitURIEmpty() bool { if k == nil { return true } if k.Build == nil { return true } return len(k.Build.GitSource.URI) == 0 } // KogitoAppBuildObject Data to define how to build an application from source. // +k8s:openapi-gen=true // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Service Build" type KogitoAppBuildObject struct { Incremental bool `json:"incremental,omitempty"` // Environment variables used during build time. // +listType=atomic // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Build Env Variables" Envs []corev1.EnvVar `json:"envs,omitempty"` // Information about the git repository where the Kogito App source code resides. // If set, the operator will use source to image strategy build. // +optional GitSource GitSource `json:"gitSource,omitempty"` // WebHook secrets for build configs. // +listType=atomic // +optional Webhooks []WebhookSecret `json:"webhooks,omitempty"` // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Images Version" // Image version for the Kogito official images used during the build. E.g.: 0.6.0. Default to current Operator version. ImageVersion string `json:"imageVersion,omitempty"` // Custom image used by the source to image process to build the Kogito Service binaries. Takes precedence over ImageVersion attribute. // + optional // +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))` ImageS2ITag string `json:"imageS2ITag,omitempty"` // Custom image used by the source to image process to build the final Kogito Service image. Takes precedence over ImageVersion attribute. // + optional // +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))` ImageRuntimeTag string `json:"imageRuntimeTag,omitempty"` // Native indicates if the Kogito Service built should be compiled to run on native mode when Runtime is Quarkus. For more information, see https://www.graalvm.org/docs/reference-manual/aot-compilation/. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Native Build" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" Native bool `json:"native,omitempty"` // Resources for S2I builder pods. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:resourceRequirements" Resources corev1.ResourceRequirements `json:"resources,omitempty"` // Internal Maven Mirror to be used during source-to-image builds to considerably increase build speed. MavenMirrorURL string `json:"mavenMirrorURL,omitempty"` // Artifact contains override information for building the Maven artifact. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact" Artifact Artifact `json:"artifact,omitempty"` // If set to true will print the logs for downloading/uploading of maven dependencies. Defaults to false. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableMavenDownloadOutput bool `json:"enableMavenDownloadOutput,omitempty"` } // AddEnvironmentVariable adds new environment variable to build environment variables. func (k *KogitoAppBuildObject) AddEnvironmentVariable(name, value string) { env := corev1.EnvVar{ Name: name, Value: value, } k.Envs = append(k.Envs, env) } // AddResourceRequest adds new resource request. Works also on an uninitialized Requests field. func (k *KogitoAppBuildObject) AddResourceRequest(name, value string) { if k.Resources.Requests == nil { k.Resources.Requests = corev1.ResourceList{} } k.Resources.Requests[corev1.ResourceName(name)] = resource.MustParse(value) } // AddResourceLimit adds new resource limit. Works also on an uninitialized Limits field. func (k *KogitoAppBuildObject) AddResourceLimit(name, value string) { if k.Resources.Limits == nil
k.Resources.Limits[corev1.ResourceName(name)] = resource.MustParse(value) } // KogitoAppServiceObject Data to define the service of the Kogito application. // +k8s:openapi-gen=true type KogitoAppServiceObject struct { // Labels for the application service. Labels map[string]string `json:"labels,omitempty"` } // GitSource Git coordinates to locate the source code to build. // +k8s:openapi-gen=true // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Git Source" type GitSource struct { // Git URI for the s2i source. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git URI" URI string `json:"uri"` // Branch to use in the Git repository. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Reference" Reference string `json:"reference,omitempty"` // Context/subdirectory where the code is located, relative to the repo root. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Context" ContextDir string `json:"contextDir,omitempty"` } // WebhookType literal type to distinguish between different types of webhooks. type WebhookType string const ( // GitHubWebhook GitHub webhook. GitHubWebhook WebhookType = "GitHub" // GenericWebhook Generic webhook. GenericWebhook WebhookType = "Generic" ) // WebhookSecret Secret to use for a given webhook. // +k8s:openapi-gen=true type WebhookSecret struct { // WebHook type, either GitHub or Generic. // +kubebuilder:validation:Enum=GitHub;Generic Type WebhookType `json:"type,omitempty"` // Secret value for webhook Secret string `json:"secret,omitempty"` } // KogitoAppStatus defines the observed state of KogitoApp. // +k8s:openapi-gen=true type KogitoAppStatus struct { ConditionsMeta `json:",inline"` // External URL for the service. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Route" // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.x-descriptors="urn:alm:descriptor:org.w3:link" Route string `json:"route,omitempty"` // History of service deployments status. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Deployments" // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses" Deployments Deployments `json:"deployments"` // History of service builds status. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Builds" Builds Builds `json:"builds"` } // RuntimeType - type of condition. type RuntimeType string const ( // QuarkusRuntimeType - The KogitoApp is deployed. QuarkusRuntimeType RuntimeType = "quarkus" // SpringbootRuntimeType - The KogitoApp is being provisioned. SpringbootRuntimeType RuntimeType = "springboot" ) // Deployments ... // +k8s:openapi-gen=true type Deployments struct { // Deployments are ready to serve requests. // +listType=set Ready []string `json:"ready,omitempty"` // Deployments are starting // +listType=set Starting []string `json:"starting,omitempty"` // Deployments are not starting and the next step is unclear. // +listType=set Stopped []string `json:"stopped,omitempty"` // Deployments failed // +listType=set Failed []string `json:"failed,omitempty"` } // Builds ... // +k8s:openapi-gen=true type Builds struct { // Builds are being created. // +listType=set New []string `json:"new,omitempty"` // Builds are about to start running. // +listType=set Pending []string `json:"pending,omitempty"` // Builds are running. // +listType=set Running []string `json:"running,omitempty"` // Builds have executed and succeeded. // +listType=set Complete []string `json:"complete,omitempty"` // Builds have executed and failed. // +listType=set Failed []string `json:"failed,omitempty"` // Builds have been prevented from executing by an error. // +listType=set Error []string `json:"error,omitempty"` // Builds have been stopped from executing. // +listType=set Cancelled []string `json:"cancelled,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KogitoApp is a project prescription running a Kogito service (it's meant to be used to Build and Deploy the application at the same time). // KogitoApp is under deprecation, please use a combination of KogitoRuntime and KogitoBuild instead. See: https://issues.redhat.com/browse/KOGITO-1998 // +k8s:openapi-gen=true // +kubebuilder:resource:path=kogitoapps,scope=Namespaced // +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="Number of replicas set for this service" // +kubebuilder:printcolumn:name="Runtime",type="string",JSONPath=".spec.runtime",description="Runtime used to build the service" // +kubebuilder:printcolumn:name="Enable Persistence",type="boolean",JSONPath=".spec.enablePersistence",description="Indicates if persistence is enabled" // +kubebuilder:printcolumn:name="Enable Events",type="boolean",JSONPath=".spec.enableEvents",description="Indicates if events is enabled" // +kubebuilder:printcolumn:name="Image Version",type="string",JSONPath=".spec.build.imageVersion",description="Build image version" // +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".status.route",description="External URI to access this service" // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Application" // +operator-sdk:gen-csv:customresourcedefinitions.resources="DeploymentConfigs,apps.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ImageStreams,image.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="BuildConfigs,build.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="Routes,route.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ConfigMaps,v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="Services,v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ServiceMonitors,monitoring.coreos.com/v1" type KogitoApp struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec KogitoAppSpec `json:"spec,omitempty"` Status KogitoAppStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KogitoAppList contains a list of KogitoApp. type KogitoAppList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` // +listType=atomic Items []KogitoApp `json:"items"` } func init() { SchemeBuilder.Register(&KogitoApp{}, &KogitoAppList{}) } // Artifact contains override information for building the Maven artifact. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact" type Artifact struct { //Indicates the unique identifier of the organization or group that created the project. // + optional GroupID string `json:"groupId,omitempty"` //Indicates the unique base name of the primary artifact being generated. // + optional ArtifactID string `json:"artifactId,omitempty"` //Indicates the version of the artifact generated by the project. // + optional Version string `json:"version,omitempty"` }
{ k.Resources.Limits = corev1.ResourceList{} }
conditional_block
kogitoapp_types.go
// Copyright 2019 Red Hat, Inc. and/or its affiliates // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package v1alpha1 import ( corev1 "k8s.io/api/core/v1" "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ) // KogitoAppCRDName is the name of the KogitoApp CRD in the cluster. const KogitoAppCRDName = "kogitoapps.app.kiegroup.org" // KogitoAppSpec defines the desired state of KogitoApp. // +k8s:openapi-gen=true type KogitoAppSpec struct { KogitoServiceSpec `json:",inline"` // The name of the runtime used, either Quarkus or SpringBoot. // Default value: quarkus. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Runtime" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:label" // +kubebuilder:validation:Enum=quarkus;springboot Runtime RuntimeType `json:"runtime,omitempty"` // S2I Build configuration. // Default value: nil Build *KogitoAppBuildObject `json:"build"` // Kubernetes Service configuration. // Default value: nil Service KogitoAppServiceObject `json:"service,omitempty"` // Annotates the pods managed by the operator with the required metadata for Istio to setup its sidecars, enabling the mesh. Defaults to false. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Istio" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableIstio bool `json:"enableIstio,omitempty"` // Set this property to true to tell the operator to deploy an instance of Infinispan via the Infinispan Operator and // configure this service to connect to the deployed server. // For Quarkus runtime, it sets QUARKUS_INFINISPAN_CLIENT_* environment variables. For Spring Boot, these variables start with SPRING_INFINISPAN_CLIENT_*. // More info: https://github.com/kiegroup/kogito-cloud-operator#kogito-services. // Set to false or ignore it if your service does not need persistence or if you are going to configure the persistence infrastructure yourself. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Persistence" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnablePersistence bool `json:"enablePersistence,omitempty"` // Set this property to true to tell the operator to deploy an instance of Kafka via the Strimzi Operator and configure this service with // the proper information to connect to the Kafka cluster. // The Kafka cluster service endpoint will be injected in the Kogito Service container via an environment variable named "KAFKA_BOOTSTRAP_SERVERS" e.g.: kafka-kogito:9092. // Set to false or ignore it if your service does not need messaging or if you are going to configure the messaging infrastructure yourself. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Enable Events" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableEvents bool `json:"enableEvents,omitempty"` } // GetRuntime ... func (k *KogitoAppSpec) GetRuntime() RuntimeType { return k.Runtime } // GetBuild ... func (k *KogitoAppSpec) GetBuild() *KogitoAppBuildObject { if k == nil { return nil } return k.Build } // IsGitURIEmpty checks if the provided Git URI is empty or not. func (k *KogitoAppSpec) IsGitURIEmpty() bool { if k == nil { return true } if k.Build == nil { return true } return len(k.Build.GitSource.URI) == 0 } // KogitoAppBuildObject Data to define how to build an application from source. // +k8s:openapi-gen=true // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Service Build" type KogitoAppBuildObject struct { Incremental bool `json:"incremental,omitempty"` // Environment variables used during build time. // +listType=atomic // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Build Env Variables" Envs []corev1.EnvVar `json:"envs,omitempty"` // Information about the git repository where the Kogito App source code resides. // If set, the operator will use source to image strategy build. // +optional GitSource GitSource `json:"gitSource,omitempty"` // WebHook secrets for build configs. // +listType=atomic // +optional Webhooks []WebhookSecret `json:"webhooks,omitempty"` // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Images Version" // Image version for the Kogito official images used during the build. E.g.: 0.6.0. Default to current Operator version. ImageVersion string `json:"imageVersion,omitempty"` // Custom image used by the source to image process to build the Kogito Service binaries. Takes precedence over ImageVersion attribute. // + optional // +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))` ImageS2ITag string `json:"imageS2ITag,omitempty"` // Custom image used by the source to image process to build the final Kogito Service image. Takes precedence over ImageVersion attribute. // + optional // +kubebuilder:validation:Pattern=`(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)+[a-z0-9][a-z0-9-]{0,61}[a-z0-9](?::[0-9]{0,5})?/([a-z0-9-]+)/([a-z0-9-]+):(([a-z0-9\.-]+))` ImageRuntimeTag string `json:"imageRuntimeTag,omitempty"` // Native indicates if the Kogito Service built should be compiled to run on native mode when Runtime is Quarkus. For more information, see https://www.graalvm.org/docs/reference-manual/aot-compilation/. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Native Build" // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" Native bool `json:"native,omitempty"` // Resources for S2I builder pods. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:resourceRequirements" Resources corev1.ResourceRequirements `json:"resources,omitempty"` // Internal Maven Mirror to be used during source-to-image builds to considerably increase build speed. MavenMirrorURL string `json:"mavenMirrorURL,omitempty"` // Artifact contains override information for building the Maven artifact. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact" Artifact Artifact `json:"artifact,omitempty"` // If set to true will print the logs for downloading/uploading of maven dependencies. Defaults to false. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:booleanSwitch" EnableMavenDownloadOutput bool `json:"enableMavenDownloadOutput,omitempty"` } // AddEnvironmentVariable adds new environment variable to build environment variables. func (k *KogitoAppBuildObject) AddEnvironmentVariable(name, value string) { env := corev1.EnvVar{ Name: name, Value: value, } k.Envs = append(k.Envs, env) } // AddResourceRequest adds new resource request. Works also on an uninitialized Requests field. func (k *KogitoAppBuildObject) AddResourceRequest(name, value string)
// AddResourceLimit adds new resource limit. Works also on an uninitialized Limits field. func (k *KogitoAppBuildObject) AddResourceLimit(name, value string) { if k.Resources.Limits == nil { k.Resources.Limits = corev1.ResourceList{} } k.Resources.Limits[corev1.ResourceName(name)] = resource.MustParse(value) } // KogitoAppServiceObject Data to define the service of the Kogito application. // +k8s:openapi-gen=true type KogitoAppServiceObject struct { // Labels for the application service. Labels map[string]string `json:"labels,omitempty"` } // GitSource Git coordinates to locate the source code to build. // +k8s:openapi-gen=true // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Git Source" type GitSource struct { // Git URI for the s2i source. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git URI" URI string `json:"uri"` // Branch to use in the Git repository. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Reference" Reference string `json:"reference,omitempty"` // Context/subdirectory where the code is located, relative to the repo root. // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Git Context" ContextDir string `json:"contextDir,omitempty"` } // WebhookType literal type to distinguish between different types of webhooks. type WebhookType string const ( // GitHubWebhook GitHub webhook. GitHubWebhook WebhookType = "GitHub" // GenericWebhook Generic webhook. GenericWebhook WebhookType = "Generic" ) // WebhookSecret Secret to use for a given webhook. // +k8s:openapi-gen=true type WebhookSecret struct { // WebHook type, either GitHub or Generic. // +kubebuilder:validation:Enum=GitHub;Generic Type WebhookType `json:"type,omitempty"` // Secret value for webhook Secret string `json:"secret,omitempty"` } // KogitoAppStatus defines the observed state of KogitoApp. // +k8s:openapi-gen=true type KogitoAppStatus struct { ConditionsMeta `json:",inline"` // External URL for the service. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Route" // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.x-descriptors="urn:alm:descriptor:org.w3:link" Route string `json:"route,omitempty"` // History of service deployments status. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Deployments" // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.x-descriptors="urn:alm:descriptor:com.tectonic.ui:podStatuses" Deployments Deployments `json:"deployments"` // History of service builds status. // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.statusDescriptors.displayName="Builds" Builds Builds `json:"builds"` } // RuntimeType - type of condition. type RuntimeType string const ( // QuarkusRuntimeType - The KogitoApp is deployed. QuarkusRuntimeType RuntimeType = "quarkus" // SpringbootRuntimeType - The KogitoApp is being provisioned. SpringbootRuntimeType RuntimeType = "springboot" ) // Deployments ... // +k8s:openapi-gen=true type Deployments struct { // Deployments are ready to serve requests. // +listType=set Ready []string `json:"ready,omitempty"` // Deployments are starting // +listType=set Starting []string `json:"starting,omitempty"` // Deployments are not starting and the next step is unclear. // +listType=set Stopped []string `json:"stopped,omitempty"` // Deployments failed // +listType=set Failed []string `json:"failed,omitempty"` } // Builds ... // +k8s:openapi-gen=true type Builds struct { // Builds are being created. // +listType=set New []string `json:"new,omitempty"` // Builds are about to start running. // +listType=set Pending []string `json:"pending,omitempty"` // Builds are running. // +listType=set Running []string `json:"running,omitempty"` // Builds have executed and succeeded. // +listType=set Complete []string `json:"complete,omitempty"` // Builds have executed and failed. // +listType=set Failed []string `json:"failed,omitempty"` // Builds have been prevented from executing by an error. // +listType=set Error []string `json:"error,omitempty"` // Builds have been stopped from executing. // +listType=set Cancelled []string `json:"cancelled,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KogitoApp is a project prescription running a Kogito service (it's meant to be used to Build and Deploy the application at the same time). // KogitoApp is under deprecation, please use a combination of KogitoRuntime and KogitoBuild instead. See: https://issues.redhat.com/browse/KOGITO-1998 // +k8s:openapi-gen=true // +kubebuilder:resource:path=kogitoapps,scope=Namespaced // +kubebuilder:printcolumn:name="Replicas",type="integer",JSONPath=".spec.replicas",description="Number of replicas set for this service" // +kubebuilder:printcolumn:name="Runtime",type="string",JSONPath=".spec.runtime",description="Runtime used to build the service" // +kubebuilder:printcolumn:name="Enable Persistence",type="boolean",JSONPath=".spec.enablePersistence",description="Indicates if persistence is enabled" // +kubebuilder:printcolumn:name="Enable Events",type="boolean",JSONPath=".spec.enableEvents",description="Indicates if events is enabled" // +kubebuilder:printcolumn:name="Image Version",type="string",JSONPath=".spec.build.imageVersion",description="Build image version" // +kubebuilder:printcolumn:name="Endpoint",type="string",JSONPath=".status.route",description="External URI to access this service" // +operator-sdk:gen-csv:customresourcedefinitions.displayName="Kogito Application" // +operator-sdk:gen-csv:customresourcedefinitions.resources="DeploymentConfigs,apps.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ImageStreams,image.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="BuildConfigs,build.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="Routes,route.openshift.io/v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ConfigMaps,v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="Services,v1" // +operator-sdk:gen-csv:customresourcedefinitions.resources="ServiceMonitors,monitoring.coreos.com/v1" type KogitoApp struct { metav1.TypeMeta `json:",inline"` metav1.ObjectMeta `json:"metadata,omitempty"` Spec KogitoAppSpec `json:"spec,omitempty"` Status KogitoAppStatus `json:"status,omitempty"` } // +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object // KogitoAppList contains a list of KogitoApp. type KogitoAppList struct { metav1.TypeMeta `json:",inline"` metav1.ListMeta `json:"metadata,omitempty"` // +listType=atomic Items []KogitoApp `json:"items"` } func init() { SchemeBuilder.Register(&KogitoApp{}, &KogitoAppList{}) } // Artifact contains override information for building the Maven artifact. // + optional // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors=true // +operator-sdk:gen-csv:customresourcedefinitions.specDescriptors.displayName="Final Artifact" type Artifact struct { //Indicates the unique identifier of the organization or group that created the project. // + optional GroupID string `json:"groupId,omitempty"` //Indicates the unique base name of the primary artifact being generated. // + optional ArtifactID string `json:"artifactId,omitempty"` //Indicates the version of the artifact generated by the project. // + optional Version string `json:"version,omitempty"` }
{ if k.Resources.Requests == nil { k.Resources.Requests = corev1.ResourceList{} } k.Resources.Requests[corev1.ResourceName(name)] = resource.MustParse(value) }
identifier_body
block_stream.rs
use anyhow::Error; use async_stream::stream; use futures03::Stream; use std::fmt; use std::sync::Arc; use thiserror::Error; use tokio::sync::mpsc::{self, Receiver, Sender}; use super::{Block, BlockPtr, Blockchain}; use crate::anyhow::Result; use crate::components::store::{BlockNumber, DeploymentLocator}; use crate::data::subgraph::UnifiedMappingApiVersion; use crate::firehose::{self, FirehoseEndpoint}; use crate::substreams_rpc::response::Message; use crate::{prelude::*, prometheus::labels}; pub struct BufferedBlockStream<C: Blockchain> {
stream: Box<dyn BlockStream<C>>, size_hint: usize, ) -> Box<dyn BlockStream<C>> { let (sender, receiver) = mpsc::channel::<Result<BlockStreamEvent<C>, Error>>(size_hint); crate::spawn(async move { BufferedBlockStream::stream_blocks(stream, sender).await }); Box::new(BufferedBlockStream::new(receiver)) } pub fn new(mut receiver: Receiver<Result<BlockStreamEvent<C>, Error>>) -> Self { let inner = stream! { loop { let event = match receiver.recv().await { Some(evt) => evt, None => return, }; yield event } }; Self { inner: Box::pin(inner), } } pub async fn stream_blocks( mut stream: Box<dyn BlockStream<C>>, sender: Sender<Result<BlockStreamEvent<C>, Error>>, ) -> Result<(), Error> { while let Some(event) = stream.next().await { match sender.send(event).await { Ok(_) => continue, Err(err) => { return Err(anyhow!( "buffered blockstream channel is closed, stopping. Err: {}", err )) } } } Ok(()) } } impl<C: Blockchain> BlockStream<C> for BufferedBlockStream<C> {} impl<C: Blockchain> Stream for BufferedBlockStream<C> { type Item = Result<BlockStreamEvent<C>, Error>; fn poll_next( mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll<Option<Self::Item>> { self.inner.poll_next_unpin(cx) } } pub trait BlockStream<C: Blockchain>: Stream<Item = Result<BlockStreamEvent<C>, Error>> + Unpin + Send { } /// BlockRefetcher abstraction allows a chain to decide if a block must be refetched after a dynamic data source was added #[async_trait] pub trait BlockRefetcher<C: Blockchain>: Send + Sync { fn required(&self, chain: &C) -> bool; async fn get_block( &self, chain: &C, logger: &Logger, cursor: FirehoseCursor, ) -> Result<C::Block, Error>; } /// BlockStreamBuilder is an abstraction that would separate the logic for building streams from the blockchain trait #[async_trait] pub trait BlockStreamBuilder<C: Blockchain>: Send + Sync { async fn build_firehose( &self, chain: &C, deployment: DeploymentLocator, block_cursor: FirehoseCursor, start_blocks: Vec<BlockNumber>, subgraph_current_block: Option<BlockPtr>, filter: Arc<C::TriggerFilter>, unified_api_version: UnifiedMappingApiVersion, ) -> Result<Box<dyn BlockStream<C>>>; async fn build_polling( &self, chain: &C, deployment: DeploymentLocator, start_blocks: Vec<BlockNumber>, subgraph_current_block: Option<BlockPtr>, filter: Arc<C::TriggerFilter>, unified_api_version: UnifiedMappingApiVersion, ) -> Result<Box<dyn BlockStream<C>>>; } #[derive(Debug, Clone)] pub struct FirehoseCursor(Option<String>); impl FirehoseCursor { #[allow(non_upper_case_globals)] pub const None: Self = FirehoseCursor(None); pub fn is_none(&self) -> bool { self.0.is_none() } } impl fmt::Display for FirehoseCursor { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { f.write_str(self.0.as_deref().unwrap_or("")) } } impl From<String> for FirehoseCursor { fn from(cursor: String) -> Self { // Treat a cursor of "" as None, not absolutely necessary for correctness since the firehose // treats both as the same, but makes it a little clearer. if cursor.is_empty() { FirehoseCursor::None } else { FirehoseCursor(Some(cursor)) } } } impl From<Option<String>> for FirehoseCursor { fn from(cursor: Option<String>) -> Self { match cursor { None => FirehoseCursor::None, Some(s) => FirehoseCursor::from(s), } } } impl AsRef<Option<String>> for FirehoseCursor { fn as_ref(&self) -> &Option<String> { &self.0 } } #[derive(Debug)] pub struct BlockWithTriggers<C: Blockchain> { pub block: C::Block, pub trigger_data: Vec<C::TriggerData>, } impl<C: Blockchain> Clone for BlockWithTriggers<C> where C::TriggerData: Clone, { fn clone(&self) -> Self { Self { block: self.block.clone(), trigger_data: self.trigger_data.clone(), } } } impl<C: Blockchain> BlockWithTriggers<C> { /// Creates a BlockWithTriggers structure, which holds /// the trigger data ordered and without any duplicates. pub fn new(block: C::Block, mut trigger_data: Vec<C::TriggerData>, logger: &Logger) -> Self { // This is where triggers get sorted. trigger_data.sort(); let old_len = trigger_data.len(); // This is removing the duplicate triggers in the case of multiple // data sources fetching the same event/call/etc. trigger_data.dedup(); let new_len = trigger_data.len(); if new_len != old_len { debug!( logger, "Trigger data had duplicate triggers"; "block_number" => block.number(), "block_hash" => block.hash().hash_hex(), "old_length" => old_len, "new_length" => new_len, ); } Self { block, trigger_data, } } pub fn trigger_count(&self) -> usize { self.trigger_data.len() } pub fn ptr(&self) -> BlockPtr { self.block.ptr() } pub fn parent_ptr(&self) -> Option<BlockPtr> { self.block.parent_ptr() } } #[async_trait] pub trait TriggersAdapter<C: Blockchain>: Send + Sync { // Return the block that is `offset` blocks before the block pointed to // by `ptr` from the local cache. An offset of 0 means the block itself, // an offset of 1 means the block's parent etc. If the block is not in // the local cache, return `None` async fn ancestor_block( &self, ptr: BlockPtr, offset: BlockNumber, ) -> Result<Option<C::Block>, Error>; // Returns a sequence of blocks in increasing order of block number. // Each block will include all of its triggers that match the given `filter`. // The sequence may omit blocks that contain no triggers, // but all returned blocks must part of a same chain starting at `chain_base`. // At least one block will be returned, even if it contains no triggers. // `step_size` is the suggested number blocks to be scanned. async fn scan_triggers( &self, from: BlockNumber, to: BlockNumber, filter: &C::TriggerFilter, ) -> Result<Vec<BlockWithTriggers<C>>, Error>; // Used for reprocessing blocks when creating a data source. async fn triggers_in_block( &self, logger: &Logger, block: C::Block, filter: &C::TriggerFilter, ) -> Result<BlockWithTriggers<C>, Error>; /// Return `true` if the block with the given hash and number is on the /// main chain, i.e., the chain going back from the current chain head. async fn is_on_main_chain(&self, ptr: BlockPtr) -> Result<bool, Error>; /// Get pointer to parent of `block`. This is called when reverting `block`. async fn parent_ptr(&self, block: &BlockPtr) -> Result<Option<BlockPtr>, Error>; } #[async_trait] pub trait FirehoseMapper<C: Blockchain>: Send + Sync { async fn to_block_stream_event( &self, logger: &Logger, response: &firehose::Response, adapter: &Arc<dyn TriggersAdapter<C>>, filter: &C::TriggerFilter, ) -> Result<BlockStreamEvent<C>, FirehoseError>; /// Returns the [BlockPtr] value for this given block number. This is the block pointer /// of the longuest according to Firehose view of the blockchain state. /// /// This is a thin wrapper around [FirehoseEndpoint#block_ptr_for_number] to make /// it chain agnostic and callable from chain agnostic [FirehoseBlockStream]. async fn block_ptr_for_number( &self, logger: &Logger, endpoint: &Arc<FirehoseEndpoint>, number: BlockNumber, ) -> Result<BlockPtr, Error>; /// Returns the closest final block ptr to the block ptr received. /// On probablitics chain like Ethereum, final is determined by /// the confirmations threshold configured for the Firehose stack (currently /// hard-coded to 200). /// /// On some other chain like NEAR, the actual final block number is determined /// from the block itself since it contains information about which block number /// is final against the current block. /// /// To take an example, assuming we are on Ethereum, the final block pointer /// for block #10212 would be the determined final block #10012 (10212 - 200 = 10012). async fn final_block_ptr_for( &self, logger: &Logger, endpoint: &Arc<FirehoseEndpoint>, block: &C::Block, ) -> Result<BlockPtr, Error>; } #[async_trait] pub trait SubstreamsMapper<C: Blockchain>: Send + Sync { async fn to_block_stream_event( &self, logger: &Logger, response: Option<Message>, // adapter: &Arc<dyn TriggersAdapter<C>>, // filter: &C::TriggerFilter, ) -> Result<Option<BlockStreamEvent<C>>, SubstreamsError>; } #[derive(Error, Debug)] pub enum FirehoseError { /// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block) #[error("received gRPC block payload cannot be decoded: {0}")] DecodingError(#[from] prost::DecodeError), /// Some unknown error occurred #[error("unknown error")] UnknownError(#[from] anyhow::Error), } #[derive(Error, Debug)] pub enum SubstreamsError { #[error("response is missing the clock information")] MissingClockError, #[error("invalid undo message")] InvalidUndoError, /// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block) #[error("received gRPC block payload cannot be decoded: {0}")] DecodingError(#[from] prost::DecodeError), /// Some unknown error occurred #[error("unknown error")] UnknownError(#[from] anyhow::Error), #[error("multiple module output error")] MultipleModuleOutputError, #[error("module output was not available (none) or wrong data provided")] ModuleOutputNotPresentOrUnexpected, #[error("unexpected store delta output")] UnexpectedStoreDeltaOutput, } #[derive(Debug)] pub enum BlockStreamEvent<C: Blockchain> { // The payload is the block the subgraph should revert to, so it becomes the new subgraph head. Revert(BlockPtr, FirehoseCursor), ProcessBlock(BlockWithTriggers<C>, FirehoseCursor), } impl<C: Blockchain> Clone for BlockStreamEvent<C> where C::TriggerData: Clone, { fn clone(&self) -> Self { match self { Self::Revert(arg0, arg1) => Self::Revert(arg0.clone(), arg1.clone()), Self::ProcessBlock(arg0, arg1) => Self::ProcessBlock(arg0.clone(), arg1.clone()), } } } #[derive(Clone)] pub struct BlockStreamMetrics { pub deployment_head: Box<Gauge>, pub deployment_failed: Box<Gauge>, pub reverted_blocks: Gauge, pub stopwatch: StopwatchMetrics, } impl BlockStreamMetrics { pub fn new( registry: Arc<MetricsRegistry>, deployment_id: &DeploymentHash, network: String, shard: String, stopwatch: StopwatchMetrics, ) -> Self { let reverted_blocks = registry .new_deployment_gauge( "deployment_reverted_blocks", "Track the last reverted block for a subgraph deployment", deployment_id.as_str(), ) .expect("Failed to create `deployment_reverted_blocks` gauge"); let labels = labels! { String::from("deployment") => deployment_id.to_string(), String::from("network") => network, String::from("shard") => shard }; let deployment_head = registry .new_gauge( "deployment_head", "Track the head block number for a deployment", labels.clone(), ) .expect("failed to create `deployment_head` gauge"); let deployment_failed = registry .new_gauge( "deployment_failed", "Boolean gauge to indicate whether the deployment has failed (1 == failed)", labels, ) .expect("failed to create `deployment_failed` gauge"); Self { deployment_head, deployment_failed, reverted_blocks, stopwatch, } } } /// Notifications about the chain head advancing. The block ingestor sends /// an update on this stream whenever the head of the underlying chain /// changes. The updates have no payload, receivers should call /// `Store::chain_head_ptr` to check what the latest block is. pub type ChainHeadUpdateStream = Box<dyn Stream<Item = ()> + Send + Unpin>; pub trait ChainHeadUpdateListener: Send + Sync + 'static { /// Subscribe to chain head updates for the given network. fn subscribe(&self, network: String, logger: Logger) -> ChainHeadUpdateStream; } #[cfg(test)] mod test { use std::{collections::HashSet, task::Poll}; use anyhow::Error; use futures03::{Stream, StreamExt, TryStreamExt}; use crate::{ blockchain::mock::{MockBlock, MockBlockchain}, ext::futures::{CancelableError, SharedCancelGuard, StreamExtension}, }; use super::{ BlockStream, BlockStreamEvent, BlockWithTriggers, BufferedBlockStream, FirehoseCursor, }; #[derive(Debug)] struct TestStream { number: u64, } impl BlockStream<MockBlockchain> for TestStream {} impl Stream for TestStream { type Item = Result<BlockStreamEvent<MockBlockchain>, Error>; fn poll_next( mut self: std::pin::Pin<&mut Self>, _cx: &mut std::task::Context<'_>, ) -> std::task::Poll<Option<Self::Item>> { self.number += 1; Poll::Ready(Some(Ok(BlockStreamEvent::ProcessBlock( BlockWithTriggers::<MockBlockchain> { block: MockBlock { number: self.number - 1, }, trigger_data: vec![], }, FirehoseCursor::None, )))) } } #[tokio::test] async fn consume_stream() { let initial_block = 100; let buffer_size = 5; let stream = Box::new(TestStream { number: initial_block, }); let guard = SharedCancelGuard::new(); let mut stream = BufferedBlockStream::spawn_from_stream(stream, buffer_size) .map_err(CancelableError::Error) .cancelable(&guard, || Err(CancelableError::Cancel)); let mut blocks = HashSet::<MockBlock>::new(); let mut count = 0; loop { match stream.next().await { None if blocks.is_empty() => panic!("None before blocks"), Some(Err(CancelableError::Cancel)) => { assert!(guard.is_canceled(), "Guard shouldn't be called yet"); break; } Some(Ok(BlockStreamEvent::ProcessBlock(block_triggers, _))) => { let block = block_triggers.block; blocks.insert(block.clone()); count += 1; if block.number > initial_block + buffer_size as u64 { guard.cancel(); } } _ => panic!("Should not happen"), }; } assert!( blocks.len() > buffer_size, "should consume at least a full buffer, consumed {}", count ); assert_eq!(count, blocks.len(), "should not have duplicated blocks"); } }
inner: Pin<Box<dyn Stream<Item = Result<BlockStreamEvent<C>, Error>> + Send>>, } impl<C: Blockchain + 'static> BufferedBlockStream<C> { pub fn spawn_from_stream(
random_line_split
block_stream.rs
use anyhow::Error; use async_stream::stream; use futures03::Stream; use std::fmt; use std::sync::Arc; use thiserror::Error; use tokio::sync::mpsc::{self, Receiver, Sender}; use super::{Block, BlockPtr, Blockchain}; use crate::anyhow::Result; use crate::components::store::{BlockNumber, DeploymentLocator}; use crate::data::subgraph::UnifiedMappingApiVersion; use crate::firehose::{self, FirehoseEndpoint}; use crate::substreams_rpc::response::Message; use crate::{prelude::*, prometheus::labels}; pub struct BufferedBlockStream<C: Blockchain> { inner: Pin<Box<dyn Stream<Item = Result<BlockStreamEvent<C>, Error>> + Send>>, } impl<C: Blockchain + 'static> BufferedBlockStream<C> { pub fn spawn_from_stream( stream: Box<dyn BlockStream<C>>, size_hint: usize, ) -> Box<dyn BlockStream<C>> { let (sender, receiver) = mpsc::channel::<Result<BlockStreamEvent<C>, Error>>(size_hint); crate::spawn(async move { BufferedBlockStream::stream_blocks(stream, sender).await }); Box::new(BufferedBlockStream::new(receiver)) } pub fn new(mut receiver: Receiver<Result<BlockStreamEvent<C>, Error>>) -> Self { let inner = stream! { loop { let event = match receiver.recv().await { Some(evt) => evt, None => return, }; yield event } }; Self { inner: Box::pin(inner), } } pub async fn stream_blocks( mut stream: Box<dyn BlockStream<C>>, sender: Sender<Result<BlockStreamEvent<C>, Error>>, ) -> Result<(), Error> { while let Some(event) = stream.next().await { match sender.send(event).await { Ok(_) => continue, Err(err) => { return Err(anyhow!( "buffered blockstream channel is closed, stopping. Err: {}", err )) } } } Ok(()) } } impl<C: Blockchain> BlockStream<C> for BufferedBlockStream<C> {} impl<C: Blockchain> Stream for BufferedBlockStream<C> { type Item = Result<BlockStreamEvent<C>, Error>; fn poll_next( mut self: Pin<&mut Self>, cx: &mut std::task::Context<'_>, ) -> std::task::Poll<Option<Self::Item>> { self.inner.poll_next_unpin(cx) } } pub trait BlockStream<C: Blockchain>: Stream<Item = Result<BlockStreamEvent<C>, Error>> + Unpin + Send { } /// BlockRefetcher abstraction allows a chain to decide if a block must be refetched after a dynamic data source was added #[async_trait] pub trait BlockRefetcher<C: Blockchain>: Send + Sync { fn required(&self, chain: &C) -> bool; async fn get_block( &self, chain: &C, logger: &Logger, cursor: FirehoseCursor, ) -> Result<C::Block, Error>; } /// BlockStreamBuilder is an abstraction that would separate the logic for building streams from the blockchain trait #[async_trait] pub trait BlockStreamBuilder<C: Blockchain>: Send + Sync { async fn build_firehose( &self, chain: &C, deployment: DeploymentLocator, block_cursor: FirehoseCursor, start_blocks: Vec<BlockNumber>, subgraph_current_block: Option<BlockPtr>, filter: Arc<C::TriggerFilter>, unified_api_version: UnifiedMappingApiVersion, ) -> Result<Box<dyn BlockStream<C>>>; async fn build_polling( &self, chain: &C, deployment: DeploymentLocator, start_blocks: Vec<BlockNumber>, subgraph_current_block: Option<BlockPtr>, filter: Arc<C::TriggerFilter>, unified_api_version: UnifiedMappingApiVersion, ) -> Result<Box<dyn BlockStream<C>>>; } #[derive(Debug, Clone)] pub struct FirehoseCursor(Option<String>); impl FirehoseCursor { #[allow(non_upper_case_globals)] pub const None: Self = FirehoseCursor(None); pub fn is_none(&self) -> bool { self.0.is_none() } } impl fmt::Display for FirehoseCursor { fn fmt(&self, f: &mut fmt::Formatter) -> Result<(), fmt::Error> { f.write_str(self.0.as_deref().unwrap_or("")) } } impl From<String> for FirehoseCursor { fn from(cursor: String) -> Self { // Treat a cursor of "" as None, not absolutely necessary for correctness since the firehose // treats both as the same, but makes it a little clearer. if cursor.is_empty() { FirehoseCursor::None } else { FirehoseCursor(Some(cursor)) } } } impl From<Option<String>> for FirehoseCursor { fn from(cursor: Option<String>) -> Self { match cursor { None => FirehoseCursor::None, Some(s) => FirehoseCursor::from(s), } } } impl AsRef<Option<String>> for FirehoseCursor { fn as_ref(&self) -> &Option<String> { &self.0 } } #[derive(Debug)] pub struct BlockWithTriggers<C: Blockchain> { pub block: C::Block, pub trigger_data: Vec<C::TriggerData>, } impl<C: Blockchain> Clone for BlockWithTriggers<C> where C::TriggerData: Clone, { fn clone(&self) -> Self { Self { block: self.block.clone(), trigger_data: self.trigger_data.clone(), } } } impl<C: Blockchain> BlockWithTriggers<C> { /// Creates a BlockWithTriggers structure, which holds /// the trigger data ordered and without any duplicates. pub fn
(block: C::Block, mut trigger_data: Vec<C::TriggerData>, logger: &Logger) -> Self { // This is where triggers get sorted. trigger_data.sort(); let old_len = trigger_data.len(); // This is removing the duplicate triggers in the case of multiple // data sources fetching the same event/call/etc. trigger_data.dedup(); let new_len = trigger_data.len(); if new_len != old_len { debug!( logger, "Trigger data had duplicate triggers"; "block_number" => block.number(), "block_hash" => block.hash().hash_hex(), "old_length" => old_len, "new_length" => new_len, ); } Self { block, trigger_data, } } pub fn trigger_count(&self) -> usize { self.trigger_data.len() } pub fn ptr(&self) -> BlockPtr { self.block.ptr() } pub fn parent_ptr(&self) -> Option<BlockPtr> { self.block.parent_ptr() } } #[async_trait] pub trait TriggersAdapter<C: Blockchain>: Send + Sync { // Return the block that is `offset` blocks before the block pointed to // by `ptr` from the local cache. An offset of 0 means the block itself, // an offset of 1 means the block's parent etc. If the block is not in // the local cache, return `None` async fn ancestor_block( &self, ptr: BlockPtr, offset: BlockNumber, ) -> Result<Option<C::Block>, Error>; // Returns a sequence of blocks in increasing order of block number. // Each block will include all of its triggers that match the given `filter`. // The sequence may omit blocks that contain no triggers, // but all returned blocks must part of a same chain starting at `chain_base`. // At least one block will be returned, even if it contains no triggers. // `step_size` is the suggested number blocks to be scanned. async fn scan_triggers( &self, from: BlockNumber, to: BlockNumber, filter: &C::TriggerFilter, ) -> Result<Vec<BlockWithTriggers<C>>, Error>; // Used for reprocessing blocks when creating a data source. async fn triggers_in_block( &self, logger: &Logger, block: C::Block, filter: &C::TriggerFilter, ) -> Result<BlockWithTriggers<C>, Error>; /// Return `true` if the block with the given hash and number is on the /// main chain, i.e., the chain going back from the current chain head. async fn is_on_main_chain(&self, ptr: BlockPtr) -> Result<bool, Error>; /// Get pointer to parent of `block`. This is called when reverting `block`. async fn parent_ptr(&self, block: &BlockPtr) -> Result<Option<BlockPtr>, Error>; } #[async_trait] pub trait FirehoseMapper<C: Blockchain>: Send + Sync { async fn to_block_stream_event( &self, logger: &Logger, response: &firehose::Response, adapter: &Arc<dyn TriggersAdapter<C>>, filter: &C::TriggerFilter, ) -> Result<BlockStreamEvent<C>, FirehoseError>; /// Returns the [BlockPtr] value for this given block number. This is the block pointer /// of the longuest according to Firehose view of the blockchain state. /// /// This is a thin wrapper around [FirehoseEndpoint#block_ptr_for_number] to make /// it chain agnostic and callable from chain agnostic [FirehoseBlockStream]. async fn block_ptr_for_number( &self, logger: &Logger, endpoint: &Arc<FirehoseEndpoint>, number: BlockNumber, ) -> Result<BlockPtr, Error>; /// Returns the closest final block ptr to the block ptr received. /// On probablitics chain like Ethereum, final is determined by /// the confirmations threshold configured for the Firehose stack (currently /// hard-coded to 200). /// /// On some other chain like NEAR, the actual final block number is determined /// from the block itself since it contains information about which block number /// is final against the current block. /// /// To take an example, assuming we are on Ethereum, the final block pointer /// for block #10212 would be the determined final block #10012 (10212 - 200 = 10012). async fn final_block_ptr_for( &self, logger: &Logger, endpoint: &Arc<FirehoseEndpoint>, block: &C::Block, ) -> Result<BlockPtr, Error>; } #[async_trait] pub trait SubstreamsMapper<C: Blockchain>: Send + Sync { async fn to_block_stream_event( &self, logger: &Logger, response: Option<Message>, // adapter: &Arc<dyn TriggersAdapter<C>>, // filter: &C::TriggerFilter, ) -> Result<Option<BlockStreamEvent<C>>, SubstreamsError>; } #[derive(Error, Debug)] pub enum FirehoseError { /// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block) #[error("received gRPC block payload cannot be decoded: {0}")] DecodingError(#[from] prost::DecodeError), /// Some unknown error occurred #[error("unknown error")] UnknownError(#[from] anyhow::Error), } #[derive(Error, Debug)] pub enum SubstreamsError { #[error("response is missing the clock information")] MissingClockError, #[error("invalid undo message")] InvalidUndoError, /// We were unable to decode the received block payload into the chain specific Block struct (e.g. chain_ethereum::pb::Block) #[error("received gRPC block payload cannot be decoded: {0}")] DecodingError(#[from] prost::DecodeError), /// Some unknown error occurred #[error("unknown error")] UnknownError(#[from] anyhow::Error), #[error("multiple module output error")] MultipleModuleOutputError, #[error("module output was not available (none) or wrong data provided")] ModuleOutputNotPresentOrUnexpected, #[error("unexpected store delta output")] UnexpectedStoreDeltaOutput, } #[derive(Debug)] pub enum BlockStreamEvent<C: Blockchain> { // The payload is the block the subgraph should revert to, so it becomes the new subgraph head. Revert(BlockPtr, FirehoseCursor), ProcessBlock(BlockWithTriggers<C>, FirehoseCursor), } impl<C: Blockchain> Clone for BlockStreamEvent<C> where C::TriggerData: Clone, { fn clone(&self) -> Self { match self { Self::Revert(arg0, arg1) => Self::Revert(arg0.clone(), arg1.clone()), Self::ProcessBlock(arg0, arg1) => Self::ProcessBlock(arg0.clone(), arg1.clone()), } } } #[derive(Clone)] pub struct BlockStreamMetrics { pub deployment_head: Box<Gauge>, pub deployment_failed: Box<Gauge>, pub reverted_blocks: Gauge, pub stopwatch: StopwatchMetrics, } impl BlockStreamMetrics { pub fn new( registry: Arc<MetricsRegistry>, deployment_id: &DeploymentHash, network: String, shard: String, stopwatch: StopwatchMetrics, ) -> Self { let reverted_blocks = registry .new_deployment_gauge( "deployment_reverted_blocks", "Track the last reverted block for a subgraph deployment", deployment_id.as_str(), ) .expect("Failed to create `deployment_reverted_blocks` gauge"); let labels = labels! { String::from("deployment") => deployment_id.to_string(), String::from("network") => network, String::from("shard") => shard }; let deployment_head = registry .new_gauge( "deployment_head", "Track the head block number for a deployment", labels.clone(), ) .expect("failed to create `deployment_head` gauge"); let deployment_failed = registry .new_gauge( "deployment_failed", "Boolean gauge to indicate whether the deployment has failed (1 == failed)", labels, ) .expect("failed to create `deployment_failed` gauge"); Self { deployment_head, deployment_failed, reverted_blocks, stopwatch, } } } /// Notifications about the chain head advancing. The block ingestor sends /// an update on this stream whenever the head of the underlying chain /// changes. The updates have no payload, receivers should call /// `Store::chain_head_ptr` to check what the latest block is. pub type ChainHeadUpdateStream = Box<dyn Stream<Item = ()> + Send + Unpin>; pub trait ChainHeadUpdateListener: Send + Sync + 'static { /// Subscribe to chain head updates for the given network. fn subscribe(&self, network: String, logger: Logger) -> ChainHeadUpdateStream; } #[cfg(test)] mod test { use std::{collections::HashSet, task::Poll}; use anyhow::Error; use futures03::{Stream, StreamExt, TryStreamExt}; use crate::{ blockchain::mock::{MockBlock, MockBlockchain}, ext::futures::{CancelableError, SharedCancelGuard, StreamExtension}, }; use super::{ BlockStream, BlockStreamEvent, BlockWithTriggers, BufferedBlockStream, FirehoseCursor, }; #[derive(Debug)] struct TestStream { number: u64, } impl BlockStream<MockBlockchain> for TestStream {} impl Stream for TestStream { type Item = Result<BlockStreamEvent<MockBlockchain>, Error>; fn poll_next( mut self: std::pin::Pin<&mut Self>, _cx: &mut std::task::Context<'_>, ) -> std::task::Poll<Option<Self::Item>> { self.number += 1; Poll::Ready(Some(Ok(BlockStreamEvent::ProcessBlock( BlockWithTriggers::<MockBlockchain> { block: MockBlock { number: self.number - 1, }, trigger_data: vec![], }, FirehoseCursor::None, )))) } } #[tokio::test] async fn consume_stream() { let initial_block = 100; let buffer_size = 5; let stream = Box::new(TestStream { number: initial_block, }); let guard = SharedCancelGuard::new(); let mut stream = BufferedBlockStream::spawn_from_stream(stream, buffer_size) .map_err(CancelableError::Error) .cancelable(&guard, || Err(CancelableError::Cancel)); let mut blocks = HashSet::<MockBlock>::new(); let mut count = 0; loop { match stream.next().await { None if blocks.is_empty() => panic!("None before blocks"), Some(Err(CancelableError::Cancel)) => { assert!(guard.is_canceled(), "Guard shouldn't be called yet"); break; } Some(Ok(BlockStreamEvent::ProcessBlock(block_triggers, _))) => { let block = block_triggers.block; blocks.insert(block.clone()); count += 1; if block.number > initial_block + buffer_size as u64 { guard.cancel(); } } _ => panic!("Should not happen"), }; } assert!( blocks.len() > buffer_size, "should consume at least a full buffer, consumed {}", count ); assert_eq!(count, blocks.len(), "should not have duplicated blocks"); } }
new
identifier_name
MPO.py
from __future__ import division import pandas_datareader as pdr from pandas_datareader.quandl import QuandlReader from collections import defaultdict import numpy as np from numpy import dot, sqrt import pandas as pd import datetime from random import uniform from scipy.stats import linregress from scipy.optimize import minimize, fsolve from scipy.interpolate import splrep, splev import plotly import plotly.plotly as py import plotly.graph_objs as go import re import plotly.offline as offline import pickle from time import sleep """ Mess: Market returns in pct: self.market_returns = self.data[self.market_indecies].pct_change().mean(axis=0) Covariance in pct: self.cov_matrix = self.data.pct_change().cov() """ """ TODO: - generate and plot sharpe-ratio of a whole bunch of random portfolio weight combinations - calculate and plot capital market line - page 333 in python finance book - First derivative of efficient frontier?? - ADD required return - ADD sanity check to see if wndow size and window move matches - ADD option to save - Make line and at markers same collor in plot """ class Calcualtion_pack(): "L4NT0W" def __init__(self, stock_ticks=None, stock_names=None, market_indecies=None, start=datetime.datetime(1997, 1, 1), end=datetime.date.today(), risk_free_rate=0.0, window_size=None, window_move=None, source="quandl", online=False, n_sim=0, annotations=None, name_of_data="develop", stack_windows=None, required_return=None, auto_open=False): self.auto_open = auto_open self.stock_ticks = stock_ticks self.market_indecies = market_indecies #Concatenate different markets indecies to get "real" market # Possibly do this with different weights self.start = start self.end = end self.risk_free_rate = risk_free_rate self.window_move = window_move self.window_size = window_size self.source = source self.online = online self.n_sim = n_sim self.stock_names = stock_names self.save_data = True self.name_of_data = name_of_data self.stack_windows = stack_windows #ie. only one plot self.annotations = annotations self.required_return = required_return def logic_gate(): self.plot_as_windows = True if self.window_size and self.window_move else False if self.plot_as_windows: self.CAPMs = list() if self.required_return: self.CMLpw_weights = list() if not self.plot_as_windows: self.stack_windows = None # dont stack if only one window self.plot_CML = True if self.risk_free_rate > 0 else False self.plot_simulation = True if self.n_sim > 0 and not self.stack_windows else False if self.stack_windows: self.annotations = False def sanity_check(): pass #TODO: # if not stock names use stock ticks # Maximum 20 moving windows # n_sim max 80.000 if offline and 30.000 if online # start must be before end #MAKE SURE ALL STRING INPUT IS CORRECT and that start/end is in datetime format logic_gate() sanity_check() def get_monthly_data(self): #TODO add better module for data management and auto naming files if self.source == "pickle": self.data = pickle.load(open( "{}.p".format(self.name_of_data), "rb" ) ) else: if self.source in ["google", "yahoo"]: #DEPRICATED raw_data = pdr.DataReader(self.market_indecies + self.stock_ticks, self.source, self.start, self.end) adj_data = raw_data["Close"] if self.source == "quandl": adj_data = defaultdict() for ticker in self.stock_ticks + self.market_indecies: data = QuandlReader(symbols=ticker, start=self.start, end=self.end).read() if "AdjClose" in data.columns: adj_data[ticker] = data["AdjClose"] elif "IndexValue" in data.columns: adj_data[ticker] = data["IndexValue"] sleep(0.4) adj_data = pd.DataFrame(adj_data) self.data = adj_data.groupby(pd.Grouper(freq='MS')).mean() #adjusted monthly data if self.save_data: pickle.dump(self.data, open("{}.p".format(self.name_of_data), "wb")) # TODO: TEST FOR EMPTY DATA! def calculate_log_change(self): self.log_change_data = (np.log(self.data) - np.log(self.data).shift(1)).dropna() def assign_data_window(self, opperation_type=None): """the unelegance is here that there needs to be first a calculation of the backtest weights with one range of data held out followed then by a calculation of the backtest expected return calculated on the held out data this assign data method is fairly adhock""" if opperation_type == "backtest_weights": df1 = self.log_change_data df2 = self.log_change_data[self.start:self.end] self.data_window = pd.concat([df1, df2]).drop_duplicates(keep=False) # removes the window from the dataframe ie. hold one out elif opperation_type == "windows": self.data_window = self.log_change_data[self.start:self.end] else: self.data_window = self.log_change_data def calculate_covariance_and_var(self): self.cov_matrix = self.data_window.cov() * 12 self.var = pd.Series(np.diag(self.cov_matrix), index=[self.cov_matrix.columns]) def CAPM_prediction(self): rf = self.risk_free_rate b = self.var.drop(self.market_indecies) Rm = self.market_returns_yr self.CAPM = sum((rf + b*(Rm-rf).values) * (self.Wmp))/100 #CAPM times weigths of market portfolio if self.plot_as_windows: self.CAPMs.append(self.CAPM) def calculate_beta(self): #can be done vith linalg cov_matrix * var #getting beta as covar/var d = defaultdict(list) for index in self.market_indecies: var = self.cov_matrix.loc[index, index] for tick in self.stock_ticks: covar = self.cov_matrix.loc[index, tick] d[index] += [covar/var] self.beta1 = pd.DataFrame(data=d, index=self.stock_ticks) def calculate_regress_params(self): #getting alpha and beta with linear regression a = defaultdict(list) b = defaultdict(list) for market in self.market_indecies: for tick in self.stock_ticks: slope, intercept, _, _, _ = linregress(self.data_window[tick], self.data_window[market]) a[market] += [intercept] b[market] += [slope] self.alfa = pd.DataFrame(data=a, index=self.stock_ticks) self.beta = pd.DataFrame(data=b, index=self.stock_ticks) def calculate_expected_market_return(self): #Using plane mean value self.market_returns = self.data_window[self.market_indecies].mean() #scaling to yearly using eulers self.market_returns_yr = np.exp(self.market_returns*12)-1 def calculate_exp_return(self): # #Using CAPM # self.exp_return = self.risk_free_rate + (self.market_returns-self.risk_free_rate) * self.beta # Using plane mean value self.exp_return = self.data_window[self.stock_ticks].mean() self.exp_return_yr = np.exp(self.exp_return*12)-1 def solve_elements_for_plot(self): """Operations""" def quad_var(W, C): return np.sqrt(dot(dot(W.T, C), W)) # Quadratic expression to calculate portfolio risk def exp_return(W, R): return np.dot(W.T, R).sum() # Expectd_portfolio_return def exp_return1(W, R, rf): return rf + np.dot(W.T, (R-rf)).sum() def sharpe_ratio(Fr, Vf, rf): # returns Sr return (Fr - rf) / Vf def CML(Vf, rf, Sr): # risk_free_rate + risk * Sharpe_ratio_of_the_market_portfolio return Vf * Sr + rf def qsolve(R, Fr, C): """ where: R is the vector of expected returns Fr is the range expected returns on the EFF C is the var-covariance matrix """ # TODO: add options to short and borrow W = R*0 + 1/len(R) #Initialize equal procent weights #Bounds (inequality constraints) b = [(0.,1.) for i in W] # weights between 0%..100%. - no borrowing -No shorting def fitness(W, C, r): Pv = quad_var(W, C) return Pv Vf, Wf = [], [] # Equality constraints h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100% {'type':'eq', 'fun': lambda W: exp_return(W, R) - r}) # equalizes portfolio return to r for r in Fr: # For given level of return r, find weights which minimizes portfolio variance. optimized = minimize(fitness, W, args=(C, r), method='SLSQP', #Sequential Least SQuares Programming constraints=h, bounds=b) X = optimized.x Wf.append(X) Vx = quad_var(X,C) Vf.append(Vx) return Vf, Wf R = self.exp_return_yr.values Fr = np.linspace(min(R), max(R), num=100) C = self.cov_matrix.iloc[:-1,:-1].values #cov matrix without market index Vf, Wf = qsolve(R, Fr, C) rf = self.risk_free_rate self.EFFsr = sharpe_ratio(Fr, Vf, rf) #sharpe ratio for portfolios on the eficient frontier # FRONTIER self.frontier_exp_return = Fr #Y axis of EFF self.frontier_risk = Vf #X axis of EFF self.frontier_weights = [[round(w*100,2) for w in ws] for ws in Wf] #TODO might be done directly in pandas #MARKET PORTFOLIO idxmax = np.argmax(self.EFFsr) # index of "market" portfolio MPsr = self.EFFsr[idxmax] # sharpe ratio of "market" portfolio ie. slope of CML self.Wmp = self.frontier_weights[idxmax]# weights of market portfolio self.marketPx = Vf[idxmax] # "market" portfolio x and y self.marketPy = Fr[idxmax] #MINIMUM RISK PORTFOLIO idxmin = self.idxmin = np.argmin(Vf) # index of minimum risk portfolio self.minriskPx = Vf[idxmin] self.minriskPy = Fr[idxmin] if self.plot_CML: #CAPITAL MARKET LINE self.CMLx = np.linspace(0, max(Vf), num=100) self.CMLy = [CML(x, rf, MPsr) for x in self.CMLx] def qsolve1(CMLy, CMLx, C, R): #TODO: make 1 Qsolver with intuitive changeable constraints W = R*0 + 1/len(R) #Initialize equal procent weights rf = self.risk_free_rate #Bounds (inequality constraints) b = [(0.,2.) for i in W] # weights between 0%..100%. - no borrowing -No shorting def fitness(W, x, y, rf, ri, re): Pv = sharpe_ratio(x, y, rf) return - Pv.sum() Vf, Wf = [], [] # Equality constraints h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100% {'type':'eq', 'fun': lambda W: quad_var(W, C) - ri}, # equalizes portfolio risk to ri {'type':'eq', 'fun': lambda W: exp_return1(W, R, rf) - re}) # equalizes portfolio return to re for ri, re in zip(CMLx, CMLy): # For given level of return r, find weights which minimizes portfolio variance. optimized = minimize(fitness, W, args=(CMLx, CMLy, rf, ri, re), method='SLSQP', #Sequential Least SQuares Programming constraints=h, bounds=b) X = optimized.x Wf.append(X) Vx = quad_var(X,C) Vf.append(Vx) return Vf, Wf R1 = self.exp_return_yr R1["Rf"] = self.risk_free_rate R1 = R1.values C1 = self.cov_matrix.iloc[:-1,:-1] C1["Rf"] = 0.0 C1.loc["Rf"] = 0.0 C1 = C1.values _, Wcml = qsolve1(self.CMLy, self.CMLx, C1, R1) self.CML_weights = [[round(w*100,2) for w in ws] for ws in Wcml] #portfolio on CML with rr as return if self.required_return: # DANGER! Mess ahead rr = self.required_return risk = (rr-rf)/MPsr self.CMLPx = risk self.CMLPy = rr _, CMLpw = qsolve1(np.array([rr]), np.array([risk]), C1, R1) self.CMLpw = [round(w*100,2) for w in CMLpw[0]] #Fix: why index? if self.plot_as_windows: self.CMLpw_weights.append(self.CMLpw) if self.plot_simulation: def MCsimulation(R, C, rf): returns, volatility, ratio = [], [], [] for single_portfolio in range(self.n_sim): W = np.random.normal(scale=4,size=len(self.stock_ticks))**2 W /= np.sum(W) ret = exp_return(W, R) vol = quad_var(W, C) returns.append(ret) volatility.append(vol) ratio.append(sharpe_ratio(ret, vol, rf)) self.MCx = volatility self.MCy = returns self.MCsr = ratio MCsimulation(R, C, rf) #TODO: plot 100% in one stock for evry stock def prepare_plot(self): def weights_in_text(n): if n == "EFF": PD = pd.DataFrame(self.frontier_weights, columns=self.stock_names) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index] if n == "EFP": T = "Efficient portfolio<br>"+"".join(["{0}: {1}%<br>".format(name, weight) for name, weight in zip(self.stock_names, self.Wmp)]) if n == "CML": PD = pd.DataFrame(self.CML_weights, columns=self.stock_names+["Risk-free rate"]) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index] if n == "CMLp": PD = pd.DataFrame(self.CMLpw, index=self.stock_names+["Risk-free rate"]) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.to_string() ))+"%"] return T def annotations(strings, placements): # TODO: better annotations annotations=list() for s, p in zip(strings, placements): d = dict( x=p[0], y=p[1], xref='paper', yref='paper', text=s, showarrow=True, arrowhead=20 ) annotations.append(d) return annotations start = "{0}-{1}-{2}".format(self.start.day, self.start.month, self.start.year) end = "{0}-{1}-{2}".format(self.end.day, self.end.month, self.end.year) self.name = name = "{0} - {1}".format(start, end) data = list() EFF = go.Scatter( x = self.frontier_risk[self.idxmin:], y = self.frontier_exp_return[self.idxmin:], mode = 'markers+lines', legendgroup = name if self.stack_windows else None, showlegend = True, marker = dict(size=5, symbol="circle"), #, color=[1 for _ in self.frontier_risk[self.idxmin:]], colorscale="Electric"), text = weights_in_text("EFF")[self.idxmin:], name = "Efficient frontier:<br>{}".format(name) if not self.stack_windows else name ) EFP = go.Scatter( x = [self.marketPx], y = [self.marketPy], mode = 'markers', legendgroup = name if self.stack_windows else None, showlegend = False if self.stack_windows else True, marker = dict(size=15, symbol="circle"), #color=[1], colorscale="Electric"), name = "Market/Efficient portfolio" ) MVP = go.Scatter( x = [self.minriskPx], y = [self.minriskPy], mode = "markers", legendgroup = name if self.stack_windows else None, showlegend = False if self.stack_windows else True, marker = dict(size=15, symbol="diamond-x"), #, color=[1], colorscale="Electric"), name = "minimum variance portfolio" ) if self.plot_CML: CML = go.Scatter( x = self.CMLx, y = self.CMLy, mode='lines+markers', legendgroup = name if self.stack_windows else None, text = weights_in_text("CML"), name = "Capital market line" #marker = make coler outside space of efficient frontier different collor ) data.append(CML) if self.required_return: CMLp = go.Scatter( x = [self.CMLPx], y = [self.CMLPy], mode = "markers", legendgroup = name if self.stack_windows else None, text = weights_in_text("CMLp"), name = "optimal allocation<br>with required return of {0}%".format(self.required_return*100), marker = dict(size=15, symbol="diamond-x") ) data.append(CMLp) if self.plot_simulation: MonteCarlo = go.Scatter( x = self.MCx, y = self.MCy, mode = "markers", marker = dict(size=6, colorscale="Electric", color=self.MCsr, showscale=True, colorbar=dict(title="Sharpe Ratio", titleside="right")), name = "MonteCarlo Simulated portfolios" ) data.append(MonteCarlo) data += [EFF, EFP, MVP] title = "Efficent Frontier" if not self.plot_as_windows: title = format("{0}<br>from {1} to {2}".format(title, start, end)) self.layout = go.Layout( annotations = annotations([weights_in_text("EFP")],[(0.2,0.8)]) if self.annotations else annotations("",(0,0)), #TODO: better (less hacky) annotations legend=dict( x=1.2, y=1.2, traceorder='grouped', tracegroupgap=20, font=dict( family='sans-serif', size=20, color='#000' ), bgcolor='#E2E2E2', bordercolor='#FFFFFF', borderwidth=2 ), title=title, showlegend=True, font=dict( size=20, color='#000' ), hovermode='closest', yaxis=dict(title="Portfolio Return"), xaxis=dict(title="Portfolio Variance"), height=1000, width=1200, ) self.plot_data += data def execute_plot(self): fig = go.Figure(data=self.plot_data, layout=self.layout) if self.online: plotly.tools.set_credentials_file(username="TheVizWiz", api_key="92x5KNp4VDPBDGNtLR2l") py.plot(fig, filename='efficent_frontier') if not self.online: name = self.name_of_data + self.name plot_url = offline.plot(fig, image='png',auto_open=self.auto_open, image_filename=name, output_type='file', image_width=1200, image_height=1000, filename="figures/{0}.html".format(name) # run some sys to create folder ) self.plot_data = list() # Clear plot data when plot is made def with_moving_windows(self, operation): def func_wrapper(): time = self.end - self.start # self.absolute_start = self.start # self.absolute_end = self.end window = datetime.timedelta(days=self.window_size) window_m = datetime.timedelta(days=self.window_move) while time-window >= datetime.timedelta(1): self.end = self.start + window operation() self.start = self.start + window_m time -= window_m return func_wrapper def prepare_data(self): self.get_monthly_data() self.calculate_log_change() def analyze_data(self): self.calculate_covariance_and_var() self.calculate_expected_market_return() self.calculate_beta() self.calculate_regress_params() self.calculate_exp_return() self.solve_elements_for_plot() self.CAPM_prediction() def run_backtest(self): #cross-validation of model (WARNING: NOT PRETTY! - gaffataped in last moment) # TODO. can't run backtest after run_pack why? fix! self.window_size=365 self.window_move=365 self.market_portfolios = list() self.expected_portfolio_returns = list() self.prepare_data() def one_window(): self.assign_data_window("windows") self.analyze_data() self.expected_portfolio_returns.append(self.exp_return_yr) self.assign_data_window("backtest_weights") self.analyze_data() self.market_portfolios.append(self.Wmp) self.with_moving_windows(one_window)() self.backtest_results = [(i*x).sum() for i,x in zip(self.expected_portfolio_returns, self.market_portfolios)] def run_pack(self): self.plot_data = list() self.prepare_data() def one_window(): self.assign_data_window("windows") self.analyze_data() self.prepare_plot() if not self.stack_windows: self.execute_plot() if self.plot_as_windows: self.with_moving_windows(one_window)() if self.stack_windows: self.execute_plot() else: one_window() if __name__ == '__main__': CP = Calcualtion_pack( stock_ticks = ["WIKI/AAPL", "WIKI/ABC", "WIKI/AGN", "WIKI/ADP", "WIKI/ADSK", "WIKI/IBM", "WIKI/GE"], stock_names = "APL ABC AGN ADP ADSK IBM GE".split(), # stock_ticks=["NASDAQOMX/NQDK4000DKK", "NASDAQOMX/NQDE", "NASDAQOMX/NQJP2000JPY", # "NASDAQOMX/NQHK2000HKD", "NASDAQOMX/NQGB", "NASDAQOMX/NQSE", # "NASDAQOMX/NQFI"], market_indecies=["GOOGL"], start=datetime.datetime(1999, 1, 1), end=datetime.datetime(2018,1,1), risk_free_rate= 0.03, source = "pickle", name_of_data = "USA", n_sim = 10000,
# stack_windows = True, annotations=True, auto_open=True, required_return=0.177 ) CP.run_pack() # CP
# online = True, # window_size=3650, # window_move=365,
random_line_split
MPO.py
from __future__ import division import pandas_datareader as pdr from pandas_datareader.quandl import QuandlReader from collections import defaultdict import numpy as np from numpy import dot, sqrt import pandas as pd import datetime from random import uniform from scipy.stats import linregress from scipy.optimize import minimize, fsolve from scipy.interpolate import splrep, splev import plotly import plotly.plotly as py import plotly.graph_objs as go import re import plotly.offline as offline import pickle from time import sleep """ Mess: Market returns in pct: self.market_returns = self.data[self.market_indecies].pct_change().mean(axis=0) Covariance in pct: self.cov_matrix = self.data.pct_change().cov() """ """ TODO: - generate and plot sharpe-ratio of a whole bunch of random portfolio weight combinations - calculate and plot capital market line - page 333 in python finance book - First derivative of efficient frontier?? - ADD required return - ADD sanity check to see if wndow size and window move matches - ADD option to save - Make line and at markers same collor in plot """ class Calcualtion_pack(): "L4NT0W" def __init__(self, stock_ticks=None, stock_names=None, market_indecies=None, start=datetime.datetime(1997, 1, 1), end=datetime.date.today(), risk_free_rate=0.0, window_size=None, window_move=None, source="quandl", online=False, n_sim=0, annotations=None, name_of_data="develop", stack_windows=None, required_return=None, auto_open=False): self.auto_open = auto_open self.stock_ticks = stock_ticks self.market_indecies = market_indecies #Concatenate different markets indecies to get "real" market # Possibly do this with different weights self.start = start self.end = end self.risk_free_rate = risk_free_rate self.window_move = window_move self.window_size = window_size self.source = source self.online = online self.n_sim = n_sim self.stock_names = stock_names self.save_data = True self.name_of_data = name_of_data self.stack_windows = stack_windows #ie. only one plot self.annotations = annotations self.required_return = required_return def logic_gate(): self.plot_as_windows = True if self.window_size and self.window_move else False if self.plot_as_windows: self.CAPMs = list() if self.required_return: self.CMLpw_weights = list() if not self.plot_as_windows: self.stack_windows = None # dont stack if only one window self.plot_CML = True if self.risk_free_rate > 0 else False self.plot_simulation = True if self.n_sim > 0 and not self.stack_windows else False if self.stack_windows: self.annotations = False def sanity_check(): pass #TODO: # if not stock names use stock ticks # Maximum 20 moving windows # n_sim max 80.000 if offline and 30.000 if online # start must be before end #MAKE SURE ALL STRING INPUT IS CORRECT and that start/end is in datetime format logic_gate() sanity_check() def get_monthly_data(self): #TODO add better module for data management and auto naming files if self.source == "pickle": self.data = pickle.load(open( "{}.p".format(self.name_of_data), "rb" ) ) else: if self.source in ["google", "yahoo"]: #DEPRICATED raw_data = pdr.DataReader(self.market_indecies + self.stock_ticks, self.source, self.start, self.end) adj_data = raw_data["Close"] if self.source == "quandl": adj_data = defaultdict() for ticker in self.stock_ticks + self.market_indecies: data = QuandlReader(symbols=ticker, start=self.start, end=self.end).read() if "AdjClose" in data.columns: adj_data[ticker] = data["AdjClose"] elif "IndexValue" in data.columns: adj_data[ticker] = data["IndexValue"] sleep(0.4) adj_data = pd.DataFrame(adj_data) self.data = adj_data.groupby(pd.Grouper(freq='MS')).mean() #adjusted monthly data if self.save_data: pickle.dump(self.data, open("{}.p".format(self.name_of_data), "wb")) # TODO: TEST FOR EMPTY DATA! def calculate_log_change(self): self.log_change_data = (np.log(self.data) - np.log(self.data).shift(1)).dropna() def assign_data_window(self, opperation_type=None): """the unelegance is here that there needs to be first a calculation of the backtest weights with one range of data held out followed then by a calculation of the backtest expected return calculated on the held out data this assign data method is fairly adhock""" if opperation_type == "backtest_weights": df1 = self.log_change_data df2 = self.log_change_data[self.start:self.end] self.data_window = pd.concat([df1, df2]).drop_duplicates(keep=False) # removes the window from the dataframe ie. hold one out elif opperation_type == "windows": self.data_window = self.log_change_data[self.start:self.end] else: self.data_window = self.log_change_data def calculate_covariance_and_var(self): self.cov_matrix = self.data_window.cov() * 12 self.var = pd.Series(np.diag(self.cov_matrix), index=[self.cov_matrix.columns]) def CAPM_prediction(self): rf = self.risk_free_rate b = self.var.drop(self.market_indecies) Rm = self.market_returns_yr self.CAPM = sum((rf + b*(Rm-rf).values) * (self.Wmp))/100 #CAPM times weigths of market portfolio if self.plot_as_windows: self.CAPMs.append(self.CAPM) def calculate_beta(self): #can be done vith linalg cov_matrix * var #getting beta as covar/var d = defaultdict(list) for index in self.market_indecies: var = self.cov_matrix.loc[index, index] for tick in self.stock_ticks: covar = self.cov_matrix.loc[index, tick] d[index] += [covar/var] self.beta1 = pd.DataFrame(data=d, index=self.stock_ticks) def calculate_regress_params(self): #getting alpha and beta with linear regression a = defaultdict(list) b = defaultdict(list) for market in self.market_indecies: for tick in self.stock_ticks: slope, intercept, _, _, _ = linregress(self.data_window[tick], self.data_window[market]) a[market] += [intercept] b[market] += [slope] self.alfa = pd.DataFrame(data=a, index=self.stock_ticks) self.beta = pd.DataFrame(data=b, index=self.stock_ticks) def calculate_expected_market_return(self): #Using plane mean value self.market_returns = self.data_window[self.market_indecies].mean() #scaling to yearly using eulers self.market_returns_yr = np.exp(self.market_returns*12)-1 def calculate_exp_return(self): # #Using CAPM # self.exp_return = self.risk_free_rate + (self.market_returns-self.risk_free_rate) * self.beta # Using plane mean value self.exp_return = self.data_window[self.stock_ticks].mean() self.exp_return_yr = np.exp(self.exp_return*12)-1 def solve_elements_for_plot(self): """Operations""" def quad_var(W, C): return np.sqrt(dot(dot(W.T, C), W)) # Quadratic expression to calculate portfolio risk def exp_return(W, R): return np.dot(W.T, R).sum() # Expectd_portfolio_return def exp_return1(W, R, rf): return rf + np.dot(W.T, (R-rf)).sum() def sharpe_ratio(Fr, Vf, rf): # returns Sr return (Fr - rf) / Vf def CML(Vf, rf, Sr): # risk_free_rate + risk * Sharpe_ratio_of_the_market_portfolio return Vf * Sr + rf def qsolve(R, Fr, C):
R = self.exp_return_yr.values Fr = np.linspace(min(R), max(R), num=100) C = self.cov_matrix.iloc[:-1,:-1].values #cov matrix without market index Vf, Wf = qsolve(R, Fr, C) rf = self.risk_free_rate self.EFFsr = sharpe_ratio(Fr, Vf, rf) #sharpe ratio for portfolios on the eficient frontier # FRONTIER self.frontier_exp_return = Fr #Y axis of EFF self.frontier_risk = Vf #X axis of EFF self.frontier_weights = [[round(w*100,2) for w in ws] for ws in Wf] #TODO might be done directly in pandas #MARKET PORTFOLIO idxmax = np.argmax(self.EFFsr) # index of "market" portfolio MPsr = self.EFFsr[idxmax] # sharpe ratio of "market" portfolio ie. slope of CML self.Wmp = self.frontier_weights[idxmax]# weights of market portfolio self.marketPx = Vf[idxmax] # "market" portfolio x and y self.marketPy = Fr[idxmax] #MINIMUM RISK PORTFOLIO idxmin = self.idxmin = np.argmin(Vf) # index of minimum risk portfolio self.minriskPx = Vf[idxmin] self.minriskPy = Fr[idxmin] if self.plot_CML: #CAPITAL MARKET LINE self.CMLx = np.linspace(0, max(Vf), num=100) self.CMLy = [CML(x, rf, MPsr) for x in self.CMLx] def qsolve1(CMLy, CMLx, C, R): #TODO: make 1 Qsolver with intuitive changeable constraints W = R*0 + 1/len(R) #Initialize equal procent weights rf = self.risk_free_rate #Bounds (inequality constraints) b = [(0.,2.) for i in W] # weights between 0%..100%. - no borrowing -No shorting def fitness(W, x, y, rf, ri, re): Pv = sharpe_ratio(x, y, rf) return - Pv.sum() Vf, Wf = [], [] # Equality constraints h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100% {'type':'eq', 'fun': lambda W: quad_var(W, C) - ri}, # equalizes portfolio risk to ri {'type':'eq', 'fun': lambda W: exp_return1(W, R, rf) - re}) # equalizes portfolio return to re for ri, re in zip(CMLx, CMLy): # For given level of return r, find weights which minimizes portfolio variance. optimized = minimize(fitness, W, args=(CMLx, CMLy, rf, ri, re), method='SLSQP', #Sequential Least SQuares Programming constraints=h, bounds=b) X = optimized.x Wf.append(X) Vx = quad_var(X,C) Vf.append(Vx) return Vf, Wf R1 = self.exp_return_yr R1["Rf"] = self.risk_free_rate R1 = R1.values C1 = self.cov_matrix.iloc[:-1,:-1] C1["Rf"] = 0.0 C1.loc["Rf"] = 0.0 C1 = C1.values _, Wcml = qsolve1(self.CMLy, self.CMLx, C1, R1) self.CML_weights = [[round(w*100,2) for w in ws] for ws in Wcml] #portfolio on CML with rr as return if self.required_return: # DANGER! Mess ahead rr = self.required_return risk = (rr-rf)/MPsr self.CMLPx = risk self.CMLPy = rr _, CMLpw = qsolve1(np.array([rr]), np.array([risk]), C1, R1) self.CMLpw = [round(w*100,2) for w in CMLpw[0]] #Fix: why index? if self.plot_as_windows: self.CMLpw_weights.append(self.CMLpw) if self.plot_simulation: def MCsimulation(R, C, rf): returns, volatility, ratio = [], [], [] for single_portfolio in range(self.n_sim): W = np.random.normal(scale=4,size=len(self.stock_ticks))**2 W /= np.sum(W) ret = exp_return(W, R) vol = quad_var(W, C) returns.append(ret) volatility.append(vol) ratio.append(sharpe_ratio(ret, vol, rf)) self.MCx = volatility self.MCy = returns self.MCsr = ratio MCsimulation(R, C, rf) #TODO: plot 100% in one stock for evry stock def prepare_plot(self): def weights_in_text(n): if n == "EFF": PD = pd.DataFrame(self.frontier_weights, columns=self.stock_names) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index] if n == "EFP": T = "Efficient portfolio<br>"+"".join(["{0}: {1}%<br>".format(name, weight) for name, weight in zip(self.stock_names, self.Wmp)]) if n == "CML": PD = pd.DataFrame(self.CML_weights, columns=self.stock_names+["Risk-free rate"]) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index] if n == "CMLp": PD = pd.DataFrame(self.CMLpw, index=self.stock_names+["Risk-free rate"]) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.to_string() ))+"%"] return T def annotations(strings, placements): # TODO: better annotations annotations=list() for s, p in zip(strings, placements): d = dict( x=p[0], y=p[1], xref='paper', yref='paper', text=s, showarrow=True, arrowhead=20 ) annotations.append(d) return annotations start = "{0}-{1}-{2}".format(self.start.day, self.start.month, self.start.year) end = "{0}-{1}-{2}".format(self.end.day, self.end.month, self.end.year) self.name = name = "{0} - {1}".format(start, end) data = list() EFF = go.Scatter( x = self.frontier_risk[self.idxmin:], y = self.frontier_exp_return[self.idxmin:], mode = 'markers+lines', legendgroup = name if self.stack_windows else None, showlegend = True, marker = dict(size=5, symbol="circle"), #, color=[1 for _ in self.frontier_risk[self.idxmin:]], colorscale="Electric"), text = weights_in_text("EFF")[self.idxmin:], name = "Efficient frontier:<br>{}".format(name) if not self.stack_windows else name ) EFP = go.Scatter( x = [self.marketPx], y = [self.marketPy], mode = 'markers', legendgroup = name if self.stack_windows else None, showlegend = False if self.stack_windows else True, marker = dict(size=15, symbol="circle"), #color=[1], colorscale="Electric"), name = "Market/Efficient portfolio" ) MVP = go.Scatter( x = [self.minriskPx], y = [self.minriskPy], mode = "markers", legendgroup = name if self.stack_windows else None, showlegend = False if self.stack_windows else True, marker = dict(size=15, symbol="diamond-x"), #, color=[1], colorscale="Electric"), name = "minimum variance portfolio" ) if self.plot_CML: CML = go.Scatter( x = self.CMLx, y = self.CMLy, mode='lines+markers', legendgroup = name if self.stack_windows else None, text = weights_in_text("CML"), name = "Capital market line" #marker = make coler outside space of efficient frontier different collor ) data.append(CML) if self.required_return: CMLp = go.Scatter( x = [self.CMLPx], y = [self.CMLPy], mode = "markers", legendgroup = name if self.stack_windows else None, text = weights_in_text("CMLp"), name = "optimal allocation<br>with required return of {0}%".format(self.required_return*100), marker = dict(size=15, symbol="diamond-x") ) data.append(CMLp) if self.plot_simulation: MonteCarlo = go.Scatter( x = self.MCx, y = self.MCy, mode = "markers", marker = dict(size=6, colorscale="Electric", color=self.MCsr, showscale=True, colorbar=dict(title="Sharpe Ratio", titleside="right")), name = "MonteCarlo Simulated portfolios" ) data.append(MonteCarlo) data += [EFF, EFP, MVP] title = "Efficent Frontier" if not self.plot_as_windows: title = format("{0}<br>from {1} to {2}".format(title, start, end)) self.layout = go.Layout( annotations = annotations([weights_in_text("EFP")],[(0.2,0.8)]) if self.annotations else annotations("",(0,0)), #TODO: better (less hacky) annotations legend=dict( x=1.2, y=1.2, traceorder='grouped', tracegroupgap=20, font=dict( family='sans-serif', size=20, color='#000' ), bgcolor='#E2E2E2', bordercolor='#FFFFFF', borderwidth=2 ), title=title, showlegend=True, font=dict( size=20, color='#000' ), hovermode='closest', yaxis=dict(title="Portfolio Return"), xaxis=dict(title="Portfolio Variance"), height=1000, width=1200, ) self.plot_data += data def execute_plot(self): fig = go.Figure(data=self.plot_data, layout=self.layout) if self.online: plotly.tools.set_credentials_file(username="TheVizWiz", api_key="92x5KNp4VDPBDGNtLR2l") py.plot(fig, filename='efficent_frontier') if not self.online: name = self.name_of_data + self.name plot_url = offline.plot(fig, image='png',auto_open=self.auto_open, image_filename=name, output_type='file', image_width=1200, image_height=1000, filename="figures/{0}.html".format(name) # run some sys to create folder ) self.plot_data = list() # Clear plot data when plot is made def with_moving_windows(self, operation): def func_wrapper(): time = self.end - self.start # self.absolute_start = self.start # self.absolute_end = self.end window = datetime.timedelta(days=self.window_size) window_m = datetime.timedelta(days=self.window_move) while time-window >= datetime.timedelta(1): self.end = self.start + window operation() self.start = self.start + window_m time -= window_m return func_wrapper def prepare_data(self): self.get_monthly_data() self.calculate_log_change() def analyze_data(self): self.calculate_covariance_and_var() self.calculate_expected_market_return() self.calculate_beta() self.calculate_regress_params() self.calculate_exp_return() self.solve_elements_for_plot() self.CAPM_prediction() def run_backtest(self): #cross-validation of model (WARNING: NOT PRETTY! - gaffataped in last moment) # TODO. can't run backtest after run_pack why? fix! self.window_size=365 self.window_move=365 self.market_portfolios = list() self.expected_portfolio_returns = list() self.prepare_data() def one_window(): self.assign_data_window("windows") self.analyze_data() self.expected_portfolio_returns.append(self.exp_return_yr) self.assign_data_window("backtest_weights") self.analyze_data() self.market_portfolios.append(self.Wmp) self.with_moving_windows(one_window)() self.backtest_results = [(i*x).sum() for i,x in zip(self.expected_portfolio_returns, self.market_portfolios)] def run_pack(self): self.plot_data = list() self.prepare_data() def one_window(): self.assign_data_window("windows") self.analyze_data() self.prepare_plot() if not self.stack_windows: self.execute_plot() if self.plot_as_windows: self.with_moving_windows(one_window)() if self.stack_windows: self.execute_plot() else: one_window() if __name__ == '__main__': CP = Calcualtion_pack( stock_ticks = ["WIKI/AAPL", "WIKI/ABC", "WIKI/AGN", "WIKI/ADP", "WIKI/ADSK", "WIKI/IBM", "WIKI/GE"], stock_names = "APL ABC AGN ADP ADSK IBM GE".split(), # stock_ticks=["NASDAQOMX/NQDK4000DKK", "NASDAQOMX/NQDE", "NASDAQOMX/NQJP2000JPY", # "NASDAQOMX/NQHK2000HKD", "NASDAQOMX/NQGB", "NASDAQOMX/NQSE", # "NASDAQOMX/NQFI"], market_indecies=["GOOGL"], start=datetime.datetime(1999, 1, 1), end=datetime.datetime(2018,1,1), risk_free_rate= 0.03, source = "pickle", name_of_data = "USA", n_sim = 10000, # online = True, # window_size=3650, # window_move=365, # stack_windows = True, annotations=True, auto_open=True, required_return=0.177 ) CP.run_pack() # CP
""" where: R is the vector of expected returns Fr is the range expected returns on the EFF C is the var-covariance matrix """ # TODO: add options to short and borrow W = R*0 + 1/len(R) #Initialize equal procent weights #Bounds (inequality constraints) b = [(0.,1.) for i in W] # weights between 0%..100%. - no borrowing -No shorting def fitness(W, C, r): Pv = quad_var(W, C) return Pv Vf, Wf = [], [] # Equality constraints h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100% {'type':'eq', 'fun': lambda W: exp_return(W, R) - r}) # equalizes portfolio return to r for r in Fr: # For given level of return r, find weights which minimizes portfolio variance. optimized = minimize(fitness, W, args=(C, r), method='SLSQP', #Sequential Least SQuares Programming constraints=h, bounds=b) X = optimized.x Wf.append(X) Vx = quad_var(X,C) Vf.append(Vx) return Vf, Wf
identifier_body
MPO.py
from __future__ import division import pandas_datareader as pdr from pandas_datareader.quandl import QuandlReader from collections import defaultdict import numpy as np from numpy import dot, sqrt import pandas as pd import datetime from random import uniform from scipy.stats import linregress from scipy.optimize import minimize, fsolve from scipy.interpolate import splrep, splev import plotly import plotly.plotly as py import plotly.graph_objs as go import re import plotly.offline as offline import pickle from time import sleep """ Mess: Market returns in pct: self.market_returns = self.data[self.market_indecies].pct_change().mean(axis=0) Covariance in pct: self.cov_matrix = self.data.pct_change().cov() """ """ TODO: - generate and plot sharpe-ratio of a whole bunch of random portfolio weight combinations - calculate and plot capital market line - page 333 in python finance book - First derivative of efficient frontier?? - ADD required return - ADD sanity check to see if wndow size and window move matches - ADD option to save - Make line and at markers same collor in plot """ class Calcualtion_pack(): "L4NT0W" def __init__(self, stock_ticks=None, stock_names=None, market_indecies=None, start=datetime.datetime(1997, 1, 1), end=datetime.date.today(), risk_free_rate=0.0, window_size=None, window_move=None, source="quandl", online=False, n_sim=0, annotations=None, name_of_data="develop", stack_windows=None, required_return=None, auto_open=False): self.auto_open = auto_open self.stock_ticks = stock_ticks self.market_indecies = market_indecies #Concatenate different markets indecies to get "real" market # Possibly do this with different weights self.start = start self.end = end self.risk_free_rate = risk_free_rate self.window_move = window_move self.window_size = window_size self.source = source self.online = online self.n_sim = n_sim self.stock_names = stock_names self.save_data = True self.name_of_data = name_of_data self.stack_windows = stack_windows #ie. only one plot self.annotations = annotations self.required_return = required_return def logic_gate(): self.plot_as_windows = True if self.window_size and self.window_move else False if self.plot_as_windows: self.CAPMs = list() if self.required_return: self.CMLpw_weights = list() if not self.plot_as_windows: self.stack_windows = None # dont stack if only one window self.plot_CML = True if self.risk_free_rate > 0 else False self.plot_simulation = True if self.n_sim > 0 and not self.stack_windows else False if self.stack_windows: self.annotations = False def sanity_check(): pass #TODO: # if not stock names use stock ticks # Maximum 20 moving windows # n_sim max 80.000 if offline and 30.000 if online # start must be before end #MAKE SURE ALL STRING INPUT IS CORRECT and that start/end is in datetime format logic_gate() sanity_check() def get_monthly_data(self): #TODO add better module for data management and auto naming files if self.source == "pickle": self.data = pickle.load(open( "{}.p".format(self.name_of_data), "rb" ) ) else: if self.source in ["google", "yahoo"]: #DEPRICATED raw_data = pdr.DataReader(self.market_indecies + self.stock_ticks, self.source, self.start, self.end) adj_data = raw_data["Close"] if self.source == "quandl": adj_data = defaultdict() for ticker in self.stock_ticks + self.market_indecies: data = QuandlReader(symbols=ticker, start=self.start, end=self.end).read() if "AdjClose" in data.columns: adj_data[ticker] = data["AdjClose"] elif "IndexValue" in data.columns: adj_data[ticker] = data["IndexValue"] sleep(0.4) adj_data = pd.DataFrame(adj_data) self.data = adj_data.groupby(pd.Grouper(freq='MS')).mean() #adjusted monthly data if self.save_data: pickle.dump(self.data, open("{}.p".format(self.name_of_data), "wb")) # TODO: TEST FOR EMPTY DATA! def calculate_log_change(self): self.log_change_data = (np.log(self.data) - np.log(self.data).shift(1)).dropna() def assign_data_window(self, opperation_type=None): """the unelegance is here that there needs to be first a calculation of the backtest weights with one range of data held out followed then by a calculation of the backtest expected return calculated on the held out data this assign data method is fairly adhock""" if opperation_type == "backtest_weights": df1 = self.log_change_data df2 = self.log_change_data[self.start:self.end] self.data_window = pd.concat([df1, df2]).drop_duplicates(keep=False) # removes the window from the dataframe ie. hold one out elif opperation_type == "windows": self.data_window = self.log_change_data[self.start:self.end] else: self.data_window = self.log_change_data def calculate_covariance_and_var(self): self.cov_matrix = self.data_window.cov() * 12 self.var = pd.Series(np.diag(self.cov_matrix), index=[self.cov_matrix.columns]) def CAPM_prediction(self): rf = self.risk_free_rate b = self.var.drop(self.market_indecies) Rm = self.market_returns_yr self.CAPM = sum((rf + b*(Rm-rf).values) * (self.Wmp))/100 #CAPM times weigths of market portfolio if self.plot_as_windows: self.CAPMs.append(self.CAPM) def calculate_beta(self): #can be done vith linalg cov_matrix * var #getting beta as covar/var d = defaultdict(list) for index in self.market_indecies: var = self.cov_matrix.loc[index, index] for tick in self.stock_ticks: covar = self.cov_matrix.loc[index, tick] d[index] += [covar/var] self.beta1 = pd.DataFrame(data=d, index=self.stock_ticks) def calculate_regress_params(self): #getting alpha and beta with linear regression a = defaultdict(list) b = defaultdict(list) for market in self.market_indecies: for tick in self.stock_ticks: slope, intercept, _, _, _ = linregress(self.data_window[tick], self.data_window[market]) a[market] += [intercept] b[market] += [slope] self.alfa = pd.DataFrame(data=a, index=self.stock_ticks) self.beta = pd.DataFrame(data=b, index=self.stock_ticks) def calculate_expected_market_return(self): #Using plane mean value self.market_returns = self.data_window[self.market_indecies].mean() #scaling to yearly using eulers self.market_returns_yr = np.exp(self.market_returns*12)-1 def calculate_exp_return(self): # #Using CAPM # self.exp_return = self.risk_free_rate + (self.market_returns-self.risk_free_rate) * self.beta # Using plane mean value self.exp_return = self.data_window[self.stock_ticks].mean() self.exp_return_yr = np.exp(self.exp_return*12)-1 def solve_elements_for_plot(self): """Operations""" def quad_var(W, C): return np.sqrt(dot(dot(W.T, C), W)) # Quadratic expression to calculate portfolio risk def exp_return(W, R): return np.dot(W.T, R).sum() # Expectd_portfolio_return def exp_return1(W, R, rf): return rf + np.dot(W.T, (R-rf)).sum() def sharpe_ratio(Fr, Vf, rf): # returns Sr return (Fr - rf) / Vf def CML(Vf, rf, Sr): # risk_free_rate + risk * Sharpe_ratio_of_the_market_portfolio return Vf * Sr + rf def qsolve(R, Fr, C): """ where: R is the vector of expected returns Fr is the range expected returns on the EFF C is the var-covariance matrix """ # TODO: add options to short and borrow W = R*0 + 1/len(R) #Initialize equal procent weights #Bounds (inequality constraints) b = [(0.,1.) for i in W] # weights between 0%..100%. - no borrowing -No shorting def fitness(W, C, r): Pv = quad_var(W, C) return Pv Vf, Wf = [], [] # Equality constraints h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100% {'type':'eq', 'fun': lambda W: exp_return(W, R) - r}) # equalizes portfolio return to r for r in Fr: # For given level of return r, find weights which minimizes portfolio variance. optimized = minimize(fitness, W, args=(C, r), method='SLSQP', #Sequential Least SQuares Programming constraints=h, bounds=b) X = optimized.x Wf.append(X) Vx = quad_var(X,C) Vf.append(Vx) return Vf, Wf R = self.exp_return_yr.values Fr = np.linspace(min(R), max(R), num=100) C = self.cov_matrix.iloc[:-1,:-1].values #cov matrix without market index Vf, Wf = qsolve(R, Fr, C) rf = self.risk_free_rate self.EFFsr = sharpe_ratio(Fr, Vf, rf) #sharpe ratio for portfolios on the eficient frontier # FRONTIER self.frontier_exp_return = Fr #Y axis of EFF self.frontier_risk = Vf #X axis of EFF self.frontier_weights = [[round(w*100,2) for w in ws] for ws in Wf] #TODO might be done directly in pandas #MARKET PORTFOLIO idxmax = np.argmax(self.EFFsr) # index of "market" portfolio MPsr = self.EFFsr[idxmax] # sharpe ratio of "market" portfolio ie. slope of CML self.Wmp = self.frontier_weights[idxmax]# weights of market portfolio self.marketPx = Vf[idxmax] # "market" portfolio x and y self.marketPy = Fr[idxmax] #MINIMUM RISK PORTFOLIO idxmin = self.idxmin = np.argmin(Vf) # index of minimum risk portfolio self.minriskPx = Vf[idxmin] self.minriskPy = Fr[idxmin] if self.plot_CML: #CAPITAL MARKET LINE self.CMLx = np.linspace(0, max(Vf), num=100) self.CMLy = [CML(x, rf, MPsr) for x in self.CMLx] def qsolve1(CMLy, CMLx, C, R): #TODO: make 1 Qsolver with intuitive changeable constraints W = R*0 + 1/len(R) #Initialize equal procent weights rf = self.risk_free_rate #Bounds (inequality constraints) b = [(0.,2.) for i in W] # weights between 0%..100%. - no borrowing -No shorting def fitness(W, x, y, rf, ri, re): Pv = sharpe_ratio(x, y, rf) return - Pv.sum() Vf, Wf = [], [] # Equality constraints h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100% {'type':'eq', 'fun': lambda W: quad_var(W, C) - ri}, # equalizes portfolio risk to ri {'type':'eq', 'fun': lambda W: exp_return1(W, R, rf) - re}) # equalizes portfolio return to re for ri, re in zip(CMLx, CMLy): # For given level of return r, find weights which minimizes portfolio variance. optimized = minimize(fitness, W, args=(CMLx, CMLy, rf, ri, re), method='SLSQP', #Sequential Least SQuares Programming constraints=h, bounds=b) X = optimized.x Wf.append(X) Vx = quad_var(X,C) Vf.append(Vx) return Vf, Wf R1 = self.exp_return_yr R1["Rf"] = self.risk_free_rate R1 = R1.values C1 = self.cov_matrix.iloc[:-1,:-1] C1["Rf"] = 0.0 C1.loc["Rf"] = 0.0 C1 = C1.values _, Wcml = qsolve1(self.CMLy, self.CMLx, C1, R1) self.CML_weights = [[round(w*100,2) for w in ws] for ws in Wcml] #portfolio on CML with rr as return if self.required_return: # DANGER! Mess ahead rr = self.required_return risk = (rr-rf)/MPsr self.CMLPx = risk self.CMLPy = rr _, CMLpw = qsolve1(np.array([rr]), np.array([risk]), C1, R1) self.CMLpw = [round(w*100,2) for w in CMLpw[0]] #Fix: why index? if self.plot_as_windows: self.CMLpw_weights.append(self.CMLpw) if self.plot_simulation: def MCsimulation(R, C, rf): returns, volatility, ratio = [], [], [] for single_portfolio in range(self.n_sim): W = np.random.normal(scale=4,size=len(self.stock_ticks))**2 W /= np.sum(W) ret = exp_return(W, R) vol = quad_var(W, C) returns.append(ret) volatility.append(vol) ratio.append(sharpe_ratio(ret, vol, rf)) self.MCx = volatility self.MCy = returns self.MCsr = ratio MCsimulation(R, C, rf) #TODO: plot 100% in one stock for evry stock def prepare_plot(self): def weights_in_text(n): if n == "EFF": PD = pd.DataFrame(self.frontier_weights, columns=self.stock_names) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index] if n == "EFP": T = "Efficient portfolio<br>"+"".join(["{0}: {1}%<br>".format(name, weight) for name, weight in zip(self.stock_names, self.Wmp)]) if n == "CML": PD = pd.DataFrame(self.CML_weights, columns=self.stock_names+["Risk-free rate"]) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index] if n == "CMLp": PD = pd.DataFrame(self.CMLpw, index=self.stock_names+["Risk-free rate"]) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.to_string() ))+"%"] return T def annotations(strings, placements): # TODO: better annotations annotations=list() for s, p in zip(strings, placements): d = dict( x=p[0], y=p[1], xref='paper', yref='paper', text=s, showarrow=True, arrowhead=20 ) annotations.append(d) return annotations start = "{0}-{1}-{2}".format(self.start.day, self.start.month, self.start.year) end = "{0}-{1}-{2}".format(self.end.day, self.end.month, self.end.year) self.name = name = "{0} - {1}".format(start, end) data = list() EFF = go.Scatter( x = self.frontier_risk[self.idxmin:], y = self.frontier_exp_return[self.idxmin:], mode = 'markers+lines', legendgroup = name if self.stack_windows else None, showlegend = True, marker = dict(size=5, symbol="circle"), #, color=[1 for _ in self.frontier_risk[self.idxmin:]], colorscale="Electric"), text = weights_in_text("EFF")[self.idxmin:], name = "Efficient frontier:<br>{}".format(name) if not self.stack_windows else name ) EFP = go.Scatter( x = [self.marketPx], y = [self.marketPy], mode = 'markers', legendgroup = name if self.stack_windows else None, showlegend = False if self.stack_windows else True, marker = dict(size=15, symbol="circle"), #color=[1], colorscale="Electric"), name = "Market/Efficient portfolio" ) MVP = go.Scatter( x = [self.minriskPx], y = [self.minriskPy], mode = "markers", legendgroup = name if self.stack_windows else None, showlegend = False if self.stack_windows else True, marker = dict(size=15, symbol="diamond-x"), #, color=[1], colorscale="Electric"), name = "minimum variance portfolio" ) if self.plot_CML: CML = go.Scatter( x = self.CMLx, y = self.CMLy, mode='lines+markers', legendgroup = name if self.stack_windows else None, text = weights_in_text("CML"), name = "Capital market line" #marker = make coler outside space of efficient frontier different collor ) data.append(CML) if self.required_return: CMLp = go.Scatter( x = [self.CMLPx], y = [self.CMLPy], mode = "markers", legendgroup = name if self.stack_windows else None, text = weights_in_text("CMLp"), name = "optimal allocation<br>with required return of {0}%".format(self.required_return*100), marker = dict(size=15, symbol="diamond-x") ) data.append(CMLp) if self.plot_simulation: MonteCarlo = go.Scatter( x = self.MCx, y = self.MCy, mode = "markers", marker = dict(size=6, colorscale="Electric", color=self.MCsr, showscale=True, colorbar=dict(title="Sharpe Ratio", titleside="right")), name = "MonteCarlo Simulated portfolios" ) data.append(MonteCarlo) data += [EFF, EFP, MVP] title = "Efficent Frontier" if not self.plot_as_windows: title = format("{0}<br>from {1} to {2}".format(title, start, end)) self.layout = go.Layout( annotations = annotations([weights_in_text("EFP")],[(0.2,0.8)]) if self.annotations else annotations("",(0,0)), #TODO: better (less hacky) annotations legend=dict( x=1.2, y=1.2, traceorder='grouped', tracegroupgap=20, font=dict( family='sans-serif', size=20, color='#000' ), bgcolor='#E2E2E2', bordercolor='#FFFFFF', borderwidth=2 ), title=title, showlegend=True, font=dict( size=20, color='#000' ), hovermode='closest', yaxis=dict(title="Portfolio Return"), xaxis=dict(title="Portfolio Variance"), height=1000, width=1200, ) self.plot_data += data def execute_plot(self): fig = go.Figure(data=self.plot_data, layout=self.layout) if self.online: plotly.tools.set_credentials_file(username="TheVizWiz", api_key="92x5KNp4VDPBDGNtLR2l") py.plot(fig, filename='efficent_frontier') if not self.online:
self.plot_data = list() # Clear plot data when plot is made def with_moving_windows(self, operation): def func_wrapper(): time = self.end - self.start # self.absolute_start = self.start # self.absolute_end = self.end window = datetime.timedelta(days=self.window_size) window_m = datetime.timedelta(days=self.window_move) while time-window >= datetime.timedelta(1): self.end = self.start + window operation() self.start = self.start + window_m time -= window_m return func_wrapper def prepare_data(self): self.get_monthly_data() self.calculate_log_change() def analyze_data(self): self.calculate_covariance_and_var() self.calculate_expected_market_return() self.calculate_beta() self.calculate_regress_params() self.calculate_exp_return() self.solve_elements_for_plot() self.CAPM_prediction() def run_backtest(self): #cross-validation of model (WARNING: NOT PRETTY! - gaffataped in last moment) # TODO. can't run backtest after run_pack why? fix! self.window_size=365 self.window_move=365 self.market_portfolios = list() self.expected_portfolio_returns = list() self.prepare_data() def one_window(): self.assign_data_window("windows") self.analyze_data() self.expected_portfolio_returns.append(self.exp_return_yr) self.assign_data_window("backtest_weights") self.analyze_data() self.market_portfolios.append(self.Wmp) self.with_moving_windows(one_window)() self.backtest_results = [(i*x).sum() for i,x in zip(self.expected_portfolio_returns, self.market_portfolios)] def run_pack(self): self.plot_data = list() self.prepare_data() def one_window(): self.assign_data_window("windows") self.analyze_data() self.prepare_plot() if not self.stack_windows: self.execute_plot() if self.plot_as_windows: self.with_moving_windows(one_window)() if self.stack_windows: self.execute_plot() else: one_window() if __name__ == '__main__': CP = Calcualtion_pack( stock_ticks = ["WIKI/AAPL", "WIKI/ABC", "WIKI/AGN", "WIKI/ADP", "WIKI/ADSK", "WIKI/IBM", "WIKI/GE"], stock_names = "APL ABC AGN ADP ADSK IBM GE".split(), # stock_ticks=["NASDAQOMX/NQDK4000DKK", "NASDAQOMX/NQDE", "NASDAQOMX/NQJP2000JPY", # "NASDAQOMX/NQHK2000HKD", "NASDAQOMX/NQGB", "NASDAQOMX/NQSE", # "NASDAQOMX/NQFI"], market_indecies=["GOOGL"], start=datetime.datetime(1999, 1, 1), end=datetime.datetime(2018,1,1), risk_free_rate= 0.03, source = "pickle", name_of_data = "USA", n_sim = 10000, # online = True, # window_size=3650, # window_move=365, # stack_windows = True, annotations=True, auto_open=True, required_return=0.177 ) CP.run_pack() # CP
name = self.name_of_data + self.name plot_url = offline.plot(fig, image='png',auto_open=self.auto_open, image_filename=name, output_type='file', image_width=1200, image_height=1000, filename="figures/{0}.html".format(name) # run some sys to create folder )
conditional_block
MPO.py
from __future__ import division import pandas_datareader as pdr from pandas_datareader.quandl import QuandlReader from collections import defaultdict import numpy as np from numpy import dot, sqrt import pandas as pd import datetime from random import uniform from scipy.stats import linregress from scipy.optimize import minimize, fsolve from scipy.interpolate import splrep, splev import plotly import plotly.plotly as py import plotly.graph_objs as go import re import plotly.offline as offline import pickle from time import sleep """ Mess: Market returns in pct: self.market_returns = self.data[self.market_indecies].pct_change().mean(axis=0) Covariance in pct: self.cov_matrix = self.data.pct_change().cov() """ """ TODO: - generate and plot sharpe-ratio of a whole bunch of random portfolio weight combinations - calculate and plot capital market line - page 333 in python finance book - First derivative of efficient frontier?? - ADD required return - ADD sanity check to see if wndow size and window move matches - ADD option to save - Make line and at markers same collor in plot """ class Calcualtion_pack(): "L4NT0W" def __init__(self, stock_ticks=None, stock_names=None, market_indecies=None, start=datetime.datetime(1997, 1, 1), end=datetime.date.today(), risk_free_rate=0.0, window_size=None, window_move=None, source="quandl", online=False, n_sim=0, annotations=None, name_of_data="develop", stack_windows=None, required_return=None, auto_open=False): self.auto_open = auto_open self.stock_ticks = stock_ticks self.market_indecies = market_indecies #Concatenate different markets indecies to get "real" market # Possibly do this with different weights self.start = start self.end = end self.risk_free_rate = risk_free_rate self.window_move = window_move self.window_size = window_size self.source = source self.online = online self.n_sim = n_sim self.stock_names = stock_names self.save_data = True self.name_of_data = name_of_data self.stack_windows = stack_windows #ie. only one plot self.annotations = annotations self.required_return = required_return def logic_gate(): self.plot_as_windows = True if self.window_size and self.window_move else False if self.plot_as_windows: self.CAPMs = list() if self.required_return: self.CMLpw_weights = list() if not self.plot_as_windows: self.stack_windows = None # dont stack if only one window self.plot_CML = True if self.risk_free_rate > 0 else False self.plot_simulation = True if self.n_sim > 0 and not self.stack_windows else False if self.stack_windows: self.annotations = False def sanity_check(): pass #TODO: # if not stock names use stock ticks # Maximum 20 moving windows # n_sim max 80.000 if offline and 30.000 if online # start must be before end #MAKE SURE ALL STRING INPUT IS CORRECT and that start/end is in datetime format logic_gate() sanity_check() def get_monthly_data(self): #TODO add better module for data management and auto naming files if self.source == "pickle": self.data = pickle.load(open( "{}.p".format(self.name_of_data), "rb" ) ) else: if self.source in ["google", "yahoo"]: #DEPRICATED raw_data = pdr.DataReader(self.market_indecies + self.stock_ticks, self.source, self.start, self.end) adj_data = raw_data["Close"] if self.source == "quandl": adj_data = defaultdict() for ticker in self.stock_ticks + self.market_indecies: data = QuandlReader(symbols=ticker, start=self.start, end=self.end).read() if "AdjClose" in data.columns: adj_data[ticker] = data["AdjClose"] elif "IndexValue" in data.columns: adj_data[ticker] = data["IndexValue"] sleep(0.4) adj_data = pd.DataFrame(adj_data) self.data = adj_data.groupby(pd.Grouper(freq='MS')).mean() #adjusted monthly data if self.save_data: pickle.dump(self.data, open("{}.p".format(self.name_of_data), "wb")) # TODO: TEST FOR EMPTY DATA! def calculate_log_change(self): self.log_change_data = (np.log(self.data) - np.log(self.data).shift(1)).dropna() def assign_data_window(self, opperation_type=None): """the unelegance is here that there needs to be first a calculation of the backtest weights with one range of data held out followed then by a calculation of the backtest expected return calculated on the held out data this assign data method is fairly adhock""" if opperation_type == "backtest_weights": df1 = self.log_change_data df2 = self.log_change_data[self.start:self.end] self.data_window = pd.concat([df1, df2]).drop_duplicates(keep=False) # removes the window from the dataframe ie. hold one out elif opperation_type == "windows": self.data_window = self.log_change_data[self.start:self.end] else: self.data_window = self.log_change_data def calculate_covariance_and_var(self): self.cov_matrix = self.data_window.cov() * 12 self.var = pd.Series(np.diag(self.cov_matrix), index=[self.cov_matrix.columns]) def CAPM_prediction(self): rf = self.risk_free_rate b = self.var.drop(self.market_indecies) Rm = self.market_returns_yr self.CAPM = sum((rf + b*(Rm-rf).values) * (self.Wmp))/100 #CAPM times weigths of market portfolio if self.plot_as_windows: self.CAPMs.append(self.CAPM) def calculate_beta(self): #can be done vith linalg cov_matrix * var #getting beta as covar/var d = defaultdict(list) for index in self.market_indecies: var = self.cov_matrix.loc[index, index] for tick in self.stock_ticks: covar = self.cov_matrix.loc[index, tick] d[index] += [covar/var] self.beta1 = pd.DataFrame(data=d, index=self.stock_ticks) def calculate_regress_params(self): #getting alpha and beta with linear regression a = defaultdict(list) b = defaultdict(list) for market in self.market_indecies: for tick in self.stock_ticks: slope, intercept, _, _, _ = linregress(self.data_window[tick], self.data_window[market]) a[market] += [intercept] b[market] += [slope] self.alfa = pd.DataFrame(data=a, index=self.stock_ticks) self.beta = pd.DataFrame(data=b, index=self.stock_ticks) def
(self): #Using plane mean value self.market_returns = self.data_window[self.market_indecies].mean() #scaling to yearly using eulers self.market_returns_yr = np.exp(self.market_returns*12)-1 def calculate_exp_return(self): # #Using CAPM # self.exp_return = self.risk_free_rate + (self.market_returns-self.risk_free_rate) * self.beta # Using plane mean value self.exp_return = self.data_window[self.stock_ticks].mean() self.exp_return_yr = np.exp(self.exp_return*12)-1 def solve_elements_for_plot(self): """Operations""" def quad_var(W, C): return np.sqrt(dot(dot(W.T, C), W)) # Quadratic expression to calculate portfolio risk def exp_return(W, R): return np.dot(W.T, R).sum() # Expectd_portfolio_return def exp_return1(W, R, rf): return rf + np.dot(W.T, (R-rf)).sum() def sharpe_ratio(Fr, Vf, rf): # returns Sr return (Fr - rf) / Vf def CML(Vf, rf, Sr): # risk_free_rate + risk * Sharpe_ratio_of_the_market_portfolio return Vf * Sr + rf def qsolve(R, Fr, C): """ where: R is the vector of expected returns Fr is the range expected returns on the EFF C is the var-covariance matrix """ # TODO: add options to short and borrow W = R*0 + 1/len(R) #Initialize equal procent weights #Bounds (inequality constraints) b = [(0.,1.) for i in W] # weights between 0%..100%. - no borrowing -No shorting def fitness(W, C, r): Pv = quad_var(W, C) return Pv Vf, Wf = [], [] # Equality constraints h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100% {'type':'eq', 'fun': lambda W: exp_return(W, R) - r}) # equalizes portfolio return to r for r in Fr: # For given level of return r, find weights which minimizes portfolio variance. optimized = minimize(fitness, W, args=(C, r), method='SLSQP', #Sequential Least SQuares Programming constraints=h, bounds=b) X = optimized.x Wf.append(X) Vx = quad_var(X,C) Vf.append(Vx) return Vf, Wf R = self.exp_return_yr.values Fr = np.linspace(min(R), max(R), num=100) C = self.cov_matrix.iloc[:-1,:-1].values #cov matrix without market index Vf, Wf = qsolve(R, Fr, C) rf = self.risk_free_rate self.EFFsr = sharpe_ratio(Fr, Vf, rf) #sharpe ratio for portfolios on the eficient frontier # FRONTIER self.frontier_exp_return = Fr #Y axis of EFF self.frontier_risk = Vf #X axis of EFF self.frontier_weights = [[round(w*100,2) for w in ws] for ws in Wf] #TODO might be done directly in pandas #MARKET PORTFOLIO idxmax = np.argmax(self.EFFsr) # index of "market" portfolio MPsr = self.EFFsr[idxmax] # sharpe ratio of "market" portfolio ie. slope of CML self.Wmp = self.frontier_weights[idxmax]# weights of market portfolio self.marketPx = Vf[idxmax] # "market" portfolio x and y self.marketPy = Fr[idxmax] #MINIMUM RISK PORTFOLIO idxmin = self.idxmin = np.argmin(Vf) # index of minimum risk portfolio self.minriskPx = Vf[idxmin] self.minriskPy = Fr[idxmin] if self.plot_CML: #CAPITAL MARKET LINE self.CMLx = np.linspace(0, max(Vf), num=100) self.CMLy = [CML(x, rf, MPsr) for x in self.CMLx] def qsolve1(CMLy, CMLx, C, R): #TODO: make 1 Qsolver with intuitive changeable constraints W = R*0 + 1/len(R) #Initialize equal procent weights rf = self.risk_free_rate #Bounds (inequality constraints) b = [(0.,2.) for i in W] # weights between 0%..100%. - no borrowing -No shorting def fitness(W, x, y, rf, ri, re): Pv = sharpe_ratio(x, y, rf) return - Pv.sum() Vf, Wf = [], [] # Equality constraints h = ({'type':'eq', 'fun': lambda W: sum(W)-1.}, # Sum of weights = 100% {'type':'eq', 'fun': lambda W: quad_var(W, C) - ri}, # equalizes portfolio risk to ri {'type':'eq', 'fun': lambda W: exp_return1(W, R, rf) - re}) # equalizes portfolio return to re for ri, re in zip(CMLx, CMLy): # For given level of return r, find weights which minimizes portfolio variance. optimized = minimize(fitness, W, args=(CMLx, CMLy, rf, ri, re), method='SLSQP', #Sequential Least SQuares Programming constraints=h, bounds=b) X = optimized.x Wf.append(X) Vx = quad_var(X,C) Vf.append(Vx) return Vf, Wf R1 = self.exp_return_yr R1["Rf"] = self.risk_free_rate R1 = R1.values C1 = self.cov_matrix.iloc[:-1,:-1] C1["Rf"] = 0.0 C1.loc["Rf"] = 0.0 C1 = C1.values _, Wcml = qsolve1(self.CMLy, self.CMLx, C1, R1) self.CML_weights = [[round(w*100,2) for w in ws] for ws in Wcml] #portfolio on CML with rr as return if self.required_return: # DANGER! Mess ahead rr = self.required_return risk = (rr-rf)/MPsr self.CMLPx = risk self.CMLPy = rr _, CMLpw = qsolve1(np.array([rr]), np.array([risk]), C1, R1) self.CMLpw = [round(w*100,2) for w in CMLpw[0]] #Fix: why index? if self.plot_as_windows: self.CMLpw_weights.append(self.CMLpw) if self.plot_simulation: def MCsimulation(R, C, rf): returns, volatility, ratio = [], [], [] for single_portfolio in range(self.n_sim): W = np.random.normal(scale=4,size=len(self.stock_ticks))**2 W /= np.sum(W) ret = exp_return(W, R) vol = quad_var(W, C) returns.append(ret) volatility.append(vol) ratio.append(sharpe_ratio(ret, vol, rf)) self.MCx = volatility self.MCy = returns self.MCsr = ratio MCsimulation(R, C, rf) #TODO: plot 100% in one stock for evry stock def prepare_plot(self): def weights_in_text(n): if n == "EFF": PD = pd.DataFrame(self.frontier_weights, columns=self.stock_names) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index] if n == "EFP": T = "Efficient portfolio<br>"+"".join(["{0}: {1}%<br>".format(name, weight) for name, weight in zip(self.stock_names, self.Wmp)]) if n == "CML": PD = pd.DataFrame(self.CML_weights, columns=self.stock_names+["Risk-free rate"]) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.iloc[i].to_string() ))+"%" for i in PD.index] if n == "CMLp": PD = pd.DataFrame(self.CMLpw, index=self.stock_names+["Risk-free rate"]) T = [re.sub(r'\n', "% <br>", re.sub(r'[ ]+', " ", PD.to_string() ))+"%"] return T def annotations(strings, placements): # TODO: better annotations annotations=list() for s, p in zip(strings, placements): d = dict( x=p[0], y=p[1], xref='paper', yref='paper', text=s, showarrow=True, arrowhead=20 ) annotations.append(d) return annotations start = "{0}-{1}-{2}".format(self.start.day, self.start.month, self.start.year) end = "{0}-{1}-{2}".format(self.end.day, self.end.month, self.end.year) self.name = name = "{0} - {1}".format(start, end) data = list() EFF = go.Scatter( x = self.frontier_risk[self.idxmin:], y = self.frontier_exp_return[self.idxmin:], mode = 'markers+lines', legendgroup = name if self.stack_windows else None, showlegend = True, marker = dict(size=5, symbol="circle"), #, color=[1 for _ in self.frontier_risk[self.idxmin:]], colorscale="Electric"), text = weights_in_text("EFF")[self.idxmin:], name = "Efficient frontier:<br>{}".format(name) if not self.stack_windows else name ) EFP = go.Scatter( x = [self.marketPx], y = [self.marketPy], mode = 'markers', legendgroup = name if self.stack_windows else None, showlegend = False if self.stack_windows else True, marker = dict(size=15, symbol="circle"), #color=[1], colorscale="Electric"), name = "Market/Efficient portfolio" ) MVP = go.Scatter( x = [self.minriskPx], y = [self.minriskPy], mode = "markers", legendgroup = name if self.stack_windows else None, showlegend = False if self.stack_windows else True, marker = dict(size=15, symbol="diamond-x"), #, color=[1], colorscale="Electric"), name = "minimum variance portfolio" ) if self.plot_CML: CML = go.Scatter( x = self.CMLx, y = self.CMLy, mode='lines+markers', legendgroup = name if self.stack_windows else None, text = weights_in_text("CML"), name = "Capital market line" #marker = make coler outside space of efficient frontier different collor ) data.append(CML) if self.required_return: CMLp = go.Scatter( x = [self.CMLPx], y = [self.CMLPy], mode = "markers", legendgroup = name if self.stack_windows else None, text = weights_in_text("CMLp"), name = "optimal allocation<br>with required return of {0}%".format(self.required_return*100), marker = dict(size=15, symbol="diamond-x") ) data.append(CMLp) if self.plot_simulation: MonteCarlo = go.Scatter( x = self.MCx, y = self.MCy, mode = "markers", marker = dict(size=6, colorscale="Electric", color=self.MCsr, showscale=True, colorbar=dict(title="Sharpe Ratio", titleside="right")), name = "MonteCarlo Simulated portfolios" ) data.append(MonteCarlo) data += [EFF, EFP, MVP] title = "Efficent Frontier" if not self.plot_as_windows: title = format("{0}<br>from {1} to {2}".format(title, start, end)) self.layout = go.Layout( annotations = annotations([weights_in_text("EFP")],[(0.2,0.8)]) if self.annotations else annotations("",(0,0)), #TODO: better (less hacky) annotations legend=dict( x=1.2, y=1.2, traceorder='grouped', tracegroupgap=20, font=dict( family='sans-serif', size=20, color='#000' ), bgcolor='#E2E2E2', bordercolor='#FFFFFF', borderwidth=2 ), title=title, showlegend=True, font=dict( size=20, color='#000' ), hovermode='closest', yaxis=dict(title="Portfolio Return"), xaxis=dict(title="Portfolio Variance"), height=1000, width=1200, ) self.plot_data += data def execute_plot(self): fig = go.Figure(data=self.plot_data, layout=self.layout) if self.online: plotly.tools.set_credentials_file(username="TheVizWiz", api_key="92x5KNp4VDPBDGNtLR2l") py.plot(fig, filename='efficent_frontier') if not self.online: name = self.name_of_data + self.name plot_url = offline.plot(fig, image='png',auto_open=self.auto_open, image_filename=name, output_type='file', image_width=1200, image_height=1000, filename="figures/{0}.html".format(name) # run some sys to create folder ) self.plot_data = list() # Clear plot data when plot is made def with_moving_windows(self, operation): def func_wrapper(): time = self.end - self.start # self.absolute_start = self.start # self.absolute_end = self.end window = datetime.timedelta(days=self.window_size) window_m = datetime.timedelta(days=self.window_move) while time-window >= datetime.timedelta(1): self.end = self.start + window operation() self.start = self.start + window_m time -= window_m return func_wrapper def prepare_data(self): self.get_monthly_data() self.calculate_log_change() def analyze_data(self): self.calculate_covariance_and_var() self.calculate_expected_market_return() self.calculate_beta() self.calculate_regress_params() self.calculate_exp_return() self.solve_elements_for_plot() self.CAPM_prediction() def run_backtest(self): #cross-validation of model (WARNING: NOT PRETTY! - gaffataped in last moment) # TODO. can't run backtest after run_pack why? fix! self.window_size=365 self.window_move=365 self.market_portfolios = list() self.expected_portfolio_returns = list() self.prepare_data() def one_window(): self.assign_data_window("windows") self.analyze_data() self.expected_portfolio_returns.append(self.exp_return_yr) self.assign_data_window("backtest_weights") self.analyze_data() self.market_portfolios.append(self.Wmp) self.with_moving_windows(one_window)() self.backtest_results = [(i*x).sum() for i,x in zip(self.expected_portfolio_returns, self.market_portfolios)] def run_pack(self): self.plot_data = list() self.prepare_data() def one_window(): self.assign_data_window("windows") self.analyze_data() self.prepare_plot() if not self.stack_windows: self.execute_plot() if self.plot_as_windows: self.with_moving_windows(one_window)() if self.stack_windows: self.execute_plot() else: one_window() if __name__ == '__main__': CP = Calcualtion_pack( stock_ticks = ["WIKI/AAPL", "WIKI/ABC", "WIKI/AGN", "WIKI/ADP", "WIKI/ADSK", "WIKI/IBM", "WIKI/GE"], stock_names = "APL ABC AGN ADP ADSK IBM GE".split(), # stock_ticks=["NASDAQOMX/NQDK4000DKK", "NASDAQOMX/NQDE", "NASDAQOMX/NQJP2000JPY", # "NASDAQOMX/NQHK2000HKD", "NASDAQOMX/NQGB", "NASDAQOMX/NQSE", # "NASDAQOMX/NQFI"], market_indecies=["GOOGL"], start=datetime.datetime(1999, 1, 1), end=datetime.datetime(2018,1,1), risk_free_rate= 0.03, source = "pickle", name_of_data = "USA", n_sim = 10000, # online = True, # window_size=3650, # window_move=365, # stack_windows = True, annotations=True, auto_open=True, required_return=0.177 ) CP.run_pack() # CP
calculate_expected_market_return
identifier_name
my.js
var yt = yt || {}; //加载图片 yt.loadImg = function ($imgs, time) { var _time = 0; time = time || 200; $imgs.each(function () { var $that = $(this); if ($that.data('hasload')) { return false; } setTimeout(function () { $that.fadeOut(0); $that.attr('src', $that.data('src')); $that.attr('data-hasload', 'true'); $that.fadeIn(500); }, _time); _time += time; }); }; //wap端环境 yt.isWap = function () { var s = navigator.userAgent.toLowerCase(); var ipad = s.match(/ipad/i) == "ipad" , iphone = s.match(/iphone os/i) == "iphone os" , midp = s.match(/midp/i) == "midp" , uc7 = s.match(/rv:1.2.3.4/i) == "rv:1.2.3.4" , uc = s.match(/ucweb/i) == "ucweb" , android = s.match(/android/i) == "android" , ce = s.match(/windows ce/i) == "windows ce" , wm = s.match(/windows mobile/i) == "windows mobile"; if (iphone || midp || uc7 || uc || android || ce || wm || ipad) { return true; } return false; }; //滑动绑定 yt.app = function () { var $swiperContainer = $("#swiper-container1"), $pages = $("#wrapper").children(), $as = $("#nav li a"), $lis = $("#nav li"), $win =$(window), slideCount = $pages.length, nowIndex = 0, acn = "animation", mySwiper; var params = { selectorClassName: "swiper-container", animationClassName: acn, animationElm: $("." + acn) }; var setCssText = function (prop, value) { return prop + ': ' + value + '; '; }; /* * insertCss(rule) * 向文档<head>底部插入css rule操作 * rule: 传入的css text * */ var insertCss = function (rule) { var head = document.head || document.getElementsByTagName('head')[0], style; if (!!head.getElementsByTagName('style').length) { style = head.getElementsByTagName('style')[0]; if (style.styleSheet) { style.styleSheet.cssText = rule; } else { style.innerHTML = ''; style.appendChild(document.createTextNode(rule)); } } else { style = document.createElement('style'); style.type = 'text/css'; if (style.styleSheet) { style.styleSheet.cssText = rule; } else { style.appendChild(document.createTextNode(rule)); } head.appendChild(style); } }; var setAnimationStyle=function() { var cssText = ''; cssText += '.' + params.animationClassName + '{' + setCssText('display', 'none') + '}' + '.touchstart .' + params.animationClassName + '{' + setCssText('-webkit-animation-duration', '0 !important') + setCssText('-webkit-animation-delay', '0 !important') + setCssText('-webkit-animation-iteration-count', '1 !important') + '}'; var index = mySwiper.activeIndex, _index = index + 1, $ans = $pages.eq(index).find('.' + params.animationClassName); $ans.each(function () { var obj = $(this); _className = obj.attr('data-item'), _animation = obj.attr('data-animation'), _duration = ((obj.attr('data-duration') / 1000) || 1) + 's', _timing = obj.attr('data-timing-function') || 'ease', _delay = ((obj.attr('data-delay') || 0) / 1000) + 's', _count = obj.attr('data-iteration-count') || 1; var _t = '.' + params.selectorClassName + ' .page-' + _index + ' .' + _className; cssText += _t + '{' + setCssText('display', 'block !important') + setCssText('-webkit-animation-name', _animation) + setCssText('-webkit-animation-duration', _duration) + setCssText('-webkit-animation-timing-function', _timing) + setCssText('-webkit-animation-delay', _delay) + setCssText('-webkit-animation-fill-mode', 'both') + setCssText('-webkit-animation-iteration-count', _count) + '}'; }); return cssText; }; //设置动画 var setAms = function () { insertCss(setAnimationStyle()); }; //设置布局 var setLayout = function () { var $wrapers = $("#swiper-container1 .wraper"), $wraper1 = $("#wraper1"), isWap=yt.isWap(), w = 720, h = 1135; var sl = function () { var _w = $wraper1.width(), h = $win.height(), _h = isWap && _w<h?$win.height():_w * 1135 / 720; $wrapers.height(_h); if($win.height()<300){ $(".cn-slidetips").hide(); }else{ $(".cn-slidetips").show(); } }; sl(); $win.resize(sl); }; //滑动绑定函数 var onSlideChangeTime = 0; var onSlideChange = function () { if (onSlideChangeTime>1) { return; } var index = mySwiper.activeIndex; if (nowIndex == index && mySwiper.touches['abs'] < 50) { return; } onSlideChangeTime = 20; setAms(); nowIndex = index || 0; //history.pushState(null, null, "index.html?p=" + (nowIndex + 1)); //执行动画 var timer=setInterval(function () { onSlideChangeTime -= 1; if (onSlideChangeTime == 0) { clearInterval(timer); } },1); }; //触摸结束绑定 var onTouchEnd = function () { var index = mySwiper.index; if (nowIndex == slideCount-1 && +mySwiper.touches['diff'] <-50) { return mySwiper.swipeTo(0); } }; //滑动结束绑定 var onSlideChangeEnd = function () { if(mySwiper.activeIndex==9){ $("#ewm").show(); }else{ $("#ewm").hide(); } $(".swiper-slide").eq(mySwiper.activeIndex).find("img").each(function(){ if($(this).attr("date-src")){ $(this).attr("src",$(this).attr("date-src")); } }); onSlideChange(); }; //绑定滑动主函数 var bindSwiper = function () { mySwiper = $swiperContainer.swiper({ onTouchEnd: onTouchEnd, onSlideChangeEnd: onSlideChangeEnd, //mousewheelControl:true, mode: 'vertical' }); }; //滚到下一个屏 var bindNext = function () { $(".next").on("click", function () { mySwiper.activeIndex = mySwiper.activeIndex || 0; var index = mySwiper.activeIndex == slideCount - 1 ? 0 : (mySwiper.activeIndex||0) + 1; mySwiper.swipeTo(index); }); }; //初始化 bindSwiper(); bindNext(); setLayout(); setAms(); }; //初始化 yt.init = function () { window.onload = function () { $("#loading").hide(); setTimeout(yt.app); //设置可抽奖次数 $("#count").val(2); $("#zdxz").val(1); }; }; yt.init(); /***************************************/ function jpqh(type){ if(type=='left'){ if($('.jp img').attr('zdy')=='jp1') { $('.jp img').attr('src','images/jp2.png?vid=1.0'); $('.jp img').attr('zdy','jp2'); } else{ $('.jp img').attr('src','images/jp1.png'); $('.jp img').attr('zdy','jp1'); } } else{ if($('.jp img').attr('zdy')=='jp1') { $('.jp img').attr('src','images/jp2.png?vid=1.0'); $('.jp img').attr('zdy','jp2'); } else{ $('.jp img').attr('src','images/jp1.png'); $('.jp img').attr('zdy','jp1'); } } } function qie(obj){ if($(obj).text()=='活动规则'){ //活动规则: $("#jpgz").css('display','inline'); $("#jpzs").css('display','none'); $('#my2').css('background-color','#e1e1e1'); $('#my1').css('background-color','#ffffff'); } else{ //奖品展示 $("#jpgz").css('display','none'); $("#jpzs").css('display','inline'); $('#my1').css('background-color','#e1e1e1'); $('#my2').css('background-color','#ffffff'); } } // 次数用完 function cs(){ var _tan=$(".tan"); var _bg = $(".bg"); _bg.fadeIn(); _tan.fadeIn(); _bg.click(function(){ _bg.fadeOut(); _tan.fadeOut(); }); } $('#fximg').c
ck(function(){ var _fxbg=$('.fxbg'); var _tan=$(".tan"); var _bg = $(".bg"); _fxbg.fadeIn(); _bg.fadeOut(); _tan.fadeOut(); }) //分享成功 function fxsuccess(){ var _fxbg=$(".fxbg"); _fxbg.fadeIn(); } $("#zp").click(function(){ var type=0; var count=$("#count").val(); if($("#zdxz").val()==0){ // alert($("#zdxz").val()); return false; } var endTag = true; //活动结束 //tanprd('<p>活动结束后</p><p>会有专人通知您的了!</p>'); // if(count<1){ // cs(''); // return; // } $.ajax({ async:false, url:'server.php', data:{act:'start'}, type:'post', dataType:'json', success:function(result){ if(result.errcode!=0) { if(result.errcode == 1003) { cs(); } else { common('<p>系统提示</p>','<p>'+result.errmsg+'</p>'); } endTag = false; } else { type=result.prize; } } }); if(!endTag) { return false; } switch (type) { case 1: rotateFunc(1,0,'箭牌智能坐便器AKB1130'); count--; break; case 2: rotateFunc(0,60,'谢谢参与'); count--; break; case 3: rotateFunc(2,120,'箭牌卫浴智能马桶盖AK1002'); count--; break; case 4: rotateFunc(0,180,''); count--; break; case 5: rotateFunc(3,240,'箭牌卫浴品牌优质毛巾'); count--; break; default: rotateFunc(0,300,''); count--; } $("#count").val(count); $("#zdxz").val(0); }); var rotateFunc = function(awards,angle,text){ //awards:奖项,angle:奖项对应的角度 var _zdpic=$('#zd'); _zdpic.stopRotate(); _zdpic.rotate({ angle: 0, duration: 4000, animateTo: angle + 1800, //angle是图片上各奖项对应的角度,1440是让指针固定旋转4圈 callback: function(){ tanprd(awards); } }); }; setInterval(function(){ $("#zdxz").val(1); },8000); function tanprd(type){ var _title=$("#tanprd2 .product-tilte"); var _con=$("#tanprd2 .product-con"); var _btn=$("#tanprd2 .product-btn"); //var _countcon=$("#tanprd2 .product-count"); //var count=1;//剩余抽奖次数 var _product=$('#tanprd2'); var _bg = $(".bg"); switch(type){ case 1: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了特等奖</p><p>箭牌智能坐便器</p><p>AKB1130!</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'1\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 2: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了优秀奖</p><p>箭牌卫浴智能马桶盖</p><p>AK1002!</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'2\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 3: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了参与奖</p><p>箭牌卫浴品牌优质毛巾</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'3\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 0: cs(); // _title.html('<p>很遗憾。</p>'); // _con.html('<p>感谢您的参与,下次中</p><p>奖机会一定属于您!</p>'); // _btn.html('<a href="index.php"><img src="images/btn4.png" width="50%"></a>'); // _bg.click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); break; default: } } //公共弹窗 function common(title,text){ var _title=$("#tanprd .product-tilte"); var _con=$("#tanprd .product-con"); var _btn=$("#tanprd .product-btn"); var _product=$('#tanprd'); var _bg = $(".bg"); _product.fadeIn(); _bg.fadeIn(); _title.html(title); _con.html(text); _btn.html('<a id="combtn"><img src="images/btn2.png" width="50%"></a>'); // _bg.click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); $("#combtn").click(function(){ _bg.fadeOut(); _product.fadeOut(); }); } //填写个人资料 function message(type){ var _type=type;//奖项 var _msg=$('#messageid'); var _bg = $(".bg"); var _product=$('#tanprd2'); _product.fadeOut(); _msg.fadeIn(); _bg.fadeIn(); } function zh(obj){ var _msg=$('#messageid'); // var _product=$('#tanprd'); // var _bg = $(".bg"); var errTag=true; var name = $("#name").val(); var phone = $("#phone").val(); var address = $("#address").val(); console.log(name); $.ajax({ async:false, url:'server.php', data:{act:'addinfo',name:name,phone:phone,address:address}, type:'post', dataType:'json', success:function(result){ if(result.errcode!=0) { alert(result.errmsg); errTag = false; } else { return false; } } }); if(!errTag) { return false; } _msg.fadeOut(); //_product.fadeIn(); common('<p>谢谢您参与!</p>','<p>活动结束后</p><p>会有专人通知您的了!</p>') // $("#end").click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); } function producttag(type){ var _bg = $(".bg"); var _tag=$("#tag"); _bg.fadeIn(); _tag.fadeIn(); var _tagpic=$("#tag .prd-tag-pic"); var _tagtitle=$("#tag .prd-tag-title"); var _tagcon=$("#tag .prd-tag-con"); var _tagtype=$("#tag .prd-tag-type"); var _close=$("#tag .prd-tag-close"); switch(type){ case '1': _tagpic.find('img').attr('src','images/product1-1.png'); _tagtitle.html('<p>3D奈丽实木浴室柜</p>'); _tagcon.html('<p>箭牌卫浴将3D奈丽一体成形技术应用与浴室柜,突破传统的制造技术,是产品造型更加优美、性能更加稳定同时简约流畅的线条又极高提升了浴室柜的空间装饰价值,典雅高尚。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 3D奈丽板</p>\ <p><span class="prd-tag-bg">型号</span> APGM10L4136-B</p>\ <p><span class="prd-tag-bg">规格</span> 盆尺寸:702*409*198mm</p>\ <p class="prd-tag-sj">主柜尺寸:1010*545*820mm</p> <p class="prd-tag-sj">镜柜尺寸:950*160*650mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '2': _tagpic.find('img').attr('src','images/product1-2.png'); _tagtitle.html('<p>气泡按摩浴缸</p>'); _tagcon.html('<p>经过整整一天紧张的工作后,每个人都应该在一个舒适浴室中让自己回到从容,得到抚慰。韵.格雅系列包括了高质量的多功能浴室家具和令人难以割舍的无缝对接浴缸等多种产品,完全可以满足你的全方位需要。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 压克力</p>\ <p><span class="prd-tag-bg">型号</span> AQ1601UQ</p>\ <p><span class="prd-tag-bg">规格</span> L1600*W800*H680mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '3': _tagpic.find('img').attr('src','images/product1-3.png'); _tagtitle.html('<p>喷射虹吸式连体坐便器</p>'); _tagcon.html('<p>韵.格雅系列将纯粹的几何美学发挥得淋漓尽致,所有角落均变得柔滑,反映它蕴藏的美学思想。陶瓷产品超薄盖板、环保节水尖端技术的应用,使其功能品质也得到了最佳呈现。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 陶瓷</p>\ <p><span class="prd-tag-bg">型号</span> AB1170</p>\ <p><span class="prd-tag-bg">规格</span> L686*W370*H730mm</p>\ <p class="prd-tag-sj">坑距280/390mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '4': _tagpic.find('img').attr('src','images/product2-1.png'); _tagtitle.html('<p>3D奈丽实木浴室柜(诺曼系列)</p>'); _tagcon.html('<p><div class="prd-tag-yuan">1</div>产品时尚而又极具个性,可与任何现代家居装修风格搭配;诺曼系列采用突破传统的一体柜脚设计,产品整体设计一气呵成,直指追求时尚个性的内心,时尚的配色与经典立体花纹相得益彰,点缀经典款镶钻拉手,诺曼系列就是要带给大家华丽、个性、永恒的时尚;</p>\ <p><div class="prd-tag-yuan">2</div>产品采用箭牌全新的3D奈丽一体成形技术,呈现完美而又充满个性的外观;</p>\ <p><div class="prd-tag-yuan">3</div>进口全橡胶实木为基材,环保油漆工艺技术;</p>\ <p><div class="prd-tag-yuan">4</div>精致的立体花纹面,纯手工打磨,保证产品的经典外观,价值非凡;</p>\ <p><div class="prd-tag-yuan">5</div>配套柜盆、镜柜,置物功能丰富;</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 3D奈丽板</p>\ <p><span class="prd-tag-bg">型号</span> APGM8L3218-B</p>\ <p><span class="prd-tag-bg">规格</span> 柜盆:805x502x188mm</p>\ <p class="prd-tag-sj">主柜:780*490*780mm</p>\ <p class="prd-tag-sj">镜柜:790*180*680mm</p>'); // $('.prd-tag-up').css('display','inline'); // $('.prd-tag-down').css('display','inline'); break; case '5': _tagpic.find('img').attr('src','images/product2-2.png'); _tagtitle.html('<p>气泡按摩浴缸</p>'); _tagcon.html('<p><p><div class="prd-tag-yuan">1</div>一体化全裙边设计;</p>\ <p><div class="prd-tag-yuan">2</div>简约溢水孔设计;</p>\ <p><div class="prd-tag-yuan">3</div>直线,亦可与墙面无缝贴近,无需要定制左右裙;</p>\ <p><div class="prd-tag-yuan">4</div>弧度充当靠垫,躺着舒适,人性化设计</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 压克力</p>\ <p><span class="prd-tag-bg">型号</span> AQ16807TQ</p>\ <p><span class="prd-tag-bg">规格</span> L1600xW800xH700mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '6': _tagpic.find('img').attr('src','images/product2-3.png'); _tagtitle.html('<p>智能座便器</p>'); _tagcon.html('<p>舒乐一体超感坐便器,以时尚为创意远点,宛若一朵绽放于艺术上的鲜花。它简约圆滑的设计,勾勒出优雅曼妙的外形,配先进金的洁体科技,以一体化的人性方式呈现出人性科技与尖端设计的完美融合。</p>\ <p><div class="prd-tag-yuan">1</div>无水箱设计,超薄盖板,产品小尺寸设计、更加整体时尚;</p>\ <p><div class="prd-tag-yuan">2</div>即热式加热,使用更方便;</p>\ <p><div class="prd-tag-yuan">3</div>不锈钢喷枪,三档可调;</p>\ <p><div class="prd-tag-yuan">4</div>内部模块化设计,不同的款式内部结构、模块全部通用,方便维护;</p>\ <p><div class="prd-tag-yuan">5</div>低水压下冲洗也非常干净,并具备防虹吸功能;</p>\ <p><div class="prd-tag-yuan">6</div>妇洗、洗便、座圈加热、烘干、除臭、节能、遥控器功能;</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 陶瓷</p>\ <p><span class="prd-tag-bg">型号</span> AKB1130</p>\ <p><span class="prd-tag-bg">规格</span> 尺寸:L600xW404xH533mm</p>\ <p class="prd-tag-sj">坑距:290/390mm</p>'); // $('.prd-tag-up').css('display','inline'); // $('.prd-tag-down').css('display','inline'); break; default: } _close.click(function(){ _bg.fadeOut(); _tag.fadeOut(); }); }
li
identifier_name
my.js
var yt = yt || {}; //加载图片 yt.loadImg = function ($imgs, time) { var _time = 0; time = time || 200; $imgs.each(function () { var $that = $(this); if ($that.data('hasload')) { return false; } setTimeout(function () { $that.fadeOut(0); $that.attr('src', $that.data('src')); $that.attr('data-hasload', 'true'); $that.fadeIn(500); }, _time); _time += time; }); }; //wap端环境 yt.isWap = function () { var s = navigator.userAgent.toLowerCase(); var ipad = s.match(/ipad/i) == "ipad" , iphone = s.match(/iphone os/i) == "iphone os" , midp = s.match(/midp/i) == "midp" , uc7 = s.match(/rv:1.2.3.4/i) == "rv:1.2.3.4" , uc = s.match(/ucweb/i) == "ucweb" , android = s.match(/android/i) == "android" , ce = s.match(/windows ce/i) == "windows ce" , wm = s.match(/windows mobile/i) == "windows mobile"; if (iphone || midp || uc7 || uc || android || ce || wm || ipad) { return true; } return false; }; //滑动绑定 yt.app = function () { var $swiperContainer = $("#swiper-container1"), $pages = $("#wrapper").children(), $as = $("#nav li a"), $lis = $("#nav li"), $win =$(window), slideCount = $pages.length, nowIndex = 0, acn = "animation", mySwiper; var params = { selectorClassName: "swiper-container", animationClassName: acn, animationElm: $("." + acn) }; var setCssText = function (prop, value) { return prop + ': ' + value + '; '; }; /* * insertCss(rule) * 向文档<head>底部插入css rule操作 * rule: 传入的css text * */ var insertCss = function (rule) { var head = document.head || document.getElementsByTagName('head')[0], style; if (!!head.getElementsByTagName('style').length) { style = head.getElementsByTagName('style')[0]; if (style.styleSheet) { style.styleSheet.cssText = rule; } else { style.innerHTML = ''; style.appendChild(document.createTextNode(rule)); } } else { style = document.createElement('style'); style.type = 'text/css'; if (style.styleSheet) { style.styleSheet.cssText = rule; } else { style.appendChild(document.createTextNode(rule)); } head.appendChild(style); } }; var setAnimationStyle=function() { var cssText = ''; cssText += '.' + params.animationClassName + '{' + setCssText('display', 'none') + '}' + '.touchstart .' + params.animationClassName + '{' + setCssText('-webkit-animation-duration', '0 !important') + setCssText('-webkit-animation-delay', '0 !important') + setCssText('-webkit-animation-iteration-count', '1 !important') + '}'; var index = mySwiper.activeIndex, _index = index + 1, $ans = $pages.eq(index).find('.' + params.animationClassName); $ans.each(function () { var obj = $(this); _className = obj.attr('data-item'), _animation = obj.attr('data-animation'), _duration = ((obj.attr('data-duration') / 1000) || 1) + 's', _timing = obj.attr('data-timing-function') || 'ease', _delay = ((obj.attr('data-delay') || 0) / 1000) + 's', _count = obj.attr('data-iteration-count') || 1; var _t = '.' + params.selectorClassName + ' .page-' + _index + ' .' + _className; cssText += _t + '{' + setCssText('display', 'block !important') + setCssText('-webkit-animation-name', _animation) + setCssText('-webkit-animation-duration', _duration) + setCssText('-webkit-animation-timing-function', _timing) + setCssText('-webkit-animation-delay', _delay) + setCssText('-webkit-animation-fill-mode', 'both') + setCssText('-webkit-animation-iteration-count', _count) + '}'; }); return cssText; }; //设置动画 var setAms = function () { insertCss(setAnimationStyle()); }; //设置布局 var setLayout = function () { var $wrapers = $("#swiper-container1 .wraper"), $wraper1 = $("#wraper1"),
isWap=yt.isWap(), w = 720, h = 1135; var sl = function () { var _w = $wraper1.width(), h = $win.height(), _h = isWap && _w<h?$win.height():_w * 1135 / 720; $wrapers.height(_h); if($win.height()<300){ $(".cn-slidetips").hide(); }else{ $(".cn-slidetips").show(); } }; sl(); $win.resize(sl); }; //滑动绑定函数 var onSlideChangeTime = 0; var onSlideChange = function () { if (onSlideChangeTime>1) { return; } var index = mySwiper.activeIndex; if (nowIndex == index && mySwiper.touches['abs'] < 50) { return; } onSlideChangeTime = 20; setAms(); nowIndex = index || 0; //history.pushState(null, null, "index.html?p=" + (nowIndex + 1)); //执行动画 var timer=setInterval(function () { onSlideChangeTime -= 1; if (onSlideChangeTime == 0) { clearInterval(timer); } },1); }; //触摸结束绑定 var onTouchEnd = function () { var index = mySwiper.index; if (nowIndex == slideCount-1 && +mySwiper.touches['diff'] <-50) { return mySwiper.swipeTo(0); } }; //滑动结束绑定 var onSlideChangeEnd = function () { if(mySwiper.activeIndex==9){ $("#ewm").show(); }else{ $("#ewm").hide(); } $(".swiper-slide").eq(mySwiper.activeIndex).find("img").each(function(){ if($(this).attr("date-src")){ $(this).attr("src",$(this).attr("date-src")); } }); onSlideChange(); }; //绑定滑动主函数 var bindSwiper = function () { mySwiper = $swiperContainer.swiper({ onTouchEnd: onTouchEnd, onSlideChangeEnd: onSlideChangeEnd, //mousewheelControl:true, mode: 'vertical' }); }; //滚到下一个屏 var bindNext = function () { $(".next").on("click", function () { mySwiper.activeIndex = mySwiper.activeIndex || 0; var index = mySwiper.activeIndex == slideCount - 1 ? 0 : (mySwiper.activeIndex||0) + 1; mySwiper.swipeTo(index); }); }; //初始化 bindSwiper(); bindNext(); setLayout(); setAms(); }; //初始化 yt.init = function () { window.onload = function () { $("#loading").hide(); setTimeout(yt.app); //设置可抽奖次数 $("#count").val(2); $("#zdxz").val(1); }; }; yt.init(); /***************************************/ function jpqh(type){ if(type=='left'){ if($('.jp img').attr('zdy')=='jp1') { $('.jp img').attr('src','images/jp2.png?vid=1.0'); $('.jp img').attr('zdy','jp2'); } else{ $('.jp img').attr('src','images/jp1.png'); $('.jp img').attr('zdy','jp1'); } } else{ if($('.jp img').attr('zdy')=='jp1') { $('.jp img').attr('src','images/jp2.png?vid=1.0'); $('.jp img').attr('zdy','jp2'); } else{ $('.jp img').attr('src','images/jp1.png'); $('.jp img').attr('zdy','jp1'); } } } function qie(obj){ if($(obj).text()=='活动规则'){ //活动规则: $("#jpgz").css('display','inline'); $("#jpzs").css('display','none'); $('#my2').css('background-color','#e1e1e1'); $('#my1').css('background-color','#ffffff'); } else{ //奖品展示 $("#jpgz").css('display','none'); $("#jpzs").css('display','inline'); $('#my1').css('background-color','#e1e1e1'); $('#my2').css('background-color','#ffffff'); } } // 次数用完 function cs(){ var _tan=$(".tan"); var _bg = $(".bg"); _bg.fadeIn(); _tan.fadeIn(); _bg.click(function(){ _bg.fadeOut(); _tan.fadeOut(); }); } $('#fximg').click(function(){ var _fxbg=$('.fxbg'); var _tan=$(".tan"); var _bg = $(".bg"); _fxbg.fadeIn(); _bg.fadeOut(); _tan.fadeOut(); }) //分享成功 function fxsuccess(){ var _fxbg=$(".fxbg"); _fxbg.fadeIn(); } $("#zp").click(function(){ var type=0; var count=$("#count").val(); if($("#zdxz").val()==0){ // alert($("#zdxz").val()); return false; } var endTag = true; //活动结束 //tanprd('<p>活动结束后</p><p>会有专人通知您的了!</p>'); // if(count<1){ // cs(''); // return; // } $.ajax({ async:false, url:'server.php', data:{act:'start'}, type:'post', dataType:'json', success:function(result){ if(result.errcode!=0) { if(result.errcode == 1003) { cs(); } else { common('<p>系统提示</p>','<p>'+result.errmsg+'</p>'); } endTag = false; } else { type=result.prize; } } }); if(!endTag) { return false; } switch (type) { case 1: rotateFunc(1,0,'箭牌智能坐便器AKB1130'); count--; break; case 2: rotateFunc(0,60,'谢谢参与'); count--; break; case 3: rotateFunc(2,120,'箭牌卫浴智能马桶盖AK1002'); count--; break; case 4: rotateFunc(0,180,''); count--; break; case 5: rotateFunc(3,240,'箭牌卫浴品牌优质毛巾'); count--; break; default: rotateFunc(0,300,''); count--; } $("#count").val(count); $("#zdxz").val(0); }); var rotateFunc = function(awards,angle,text){ //awards:奖项,angle:奖项对应的角度 var _zdpic=$('#zd'); _zdpic.stopRotate(); _zdpic.rotate({ angle: 0, duration: 4000, animateTo: angle + 1800, //angle是图片上各奖项对应的角度,1440是让指针固定旋转4圈 callback: function(){ tanprd(awards); } }); }; setInterval(function(){ $("#zdxz").val(1); },8000); function tanprd(type){ var _title=$("#tanprd2 .product-tilte"); var _con=$("#tanprd2 .product-con"); var _btn=$("#tanprd2 .product-btn"); //var _countcon=$("#tanprd2 .product-count"); //var count=1;//剩余抽奖次数 var _product=$('#tanprd2'); var _bg = $(".bg"); switch(type){ case 1: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了特等奖</p><p>箭牌智能坐便器</p><p>AKB1130!</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'1\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 2: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了优秀奖</p><p>箭牌卫浴智能马桶盖</p><p>AK1002!</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'2\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 3: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了参与奖</p><p>箭牌卫浴品牌优质毛巾</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'3\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 0: cs(); // _title.html('<p>很遗憾。</p>'); // _con.html('<p>感谢您的参与,下次中</p><p>奖机会一定属于您!</p>'); // _btn.html('<a href="index.php"><img src="images/btn4.png" width="50%"></a>'); // _bg.click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); break; default: } } //公共弹窗 function common(title,text){ var _title=$("#tanprd .product-tilte"); var _con=$("#tanprd .product-con"); var _btn=$("#tanprd .product-btn"); var _product=$('#tanprd'); var _bg = $(".bg"); _product.fadeIn(); _bg.fadeIn(); _title.html(title); _con.html(text); _btn.html('<a id="combtn"><img src="images/btn2.png" width="50%"></a>'); // _bg.click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); $("#combtn").click(function(){ _bg.fadeOut(); _product.fadeOut(); }); } //填写个人资料 function message(type){ var _type=type;//奖项 var _msg=$('#messageid'); var _bg = $(".bg"); var _product=$('#tanprd2'); _product.fadeOut(); _msg.fadeIn(); _bg.fadeIn(); } function zh(obj){ var _msg=$('#messageid'); // var _product=$('#tanprd'); // var _bg = $(".bg"); var errTag=true; var name = $("#name").val(); var phone = $("#phone").val(); var address = $("#address").val(); console.log(name); $.ajax({ async:false, url:'server.php', data:{act:'addinfo',name:name,phone:phone,address:address}, type:'post', dataType:'json', success:function(result){ if(result.errcode!=0) { alert(result.errmsg); errTag = false; } else { return false; } } }); if(!errTag) { return false; } _msg.fadeOut(); //_product.fadeIn(); common('<p>谢谢您参与!</p>','<p>活动结束后</p><p>会有专人通知您的了!</p>') // $("#end").click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); } function producttag(type){ var _bg = $(".bg"); var _tag=$("#tag"); _bg.fadeIn(); _tag.fadeIn(); var _tagpic=$("#tag .prd-tag-pic"); var _tagtitle=$("#tag .prd-tag-title"); var _tagcon=$("#tag .prd-tag-con"); var _tagtype=$("#tag .prd-tag-type"); var _close=$("#tag .prd-tag-close"); switch(type){ case '1': _tagpic.find('img').attr('src','images/product1-1.png'); _tagtitle.html('<p>3D奈丽实木浴室柜</p>'); _tagcon.html('<p>箭牌卫浴将3D奈丽一体成形技术应用与浴室柜,突破传统的制造技术,是产品造型更加优美、性能更加稳定同时简约流畅的线条又极高提升了浴室柜的空间装饰价值,典雅高尚。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 3D奈丽板</p>\ <p><span class="prd-tag-bg">型号</span> APGM10L4136-B</p>\ <p><span class="prd-tag-bg">规格</span> 盆尺寸:702*409*198mm</p>\ <p class="prd-tag-sj">主柜尺寸:1010*545*820mm</p> <p class="prd-tag-sj">镜柜尺寸:950*160*650mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '2': _tagpic.find('img').attr('src','images/product1-2.png'); _tagtitle.html('<p>气泡按摩浴缸</p>'); _tagcon.html('<p>经过整整一天紧张的工作后,每个人都应该在一个舒适浴室中让自己回到从容,得到抚慰。韵.格雅系列包括了高质量的多功能浴室家具和令人难以割舍的无缝对接浴缸等多种产品,完全可以满足你的全方位需要。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 压克力</p>\ <p><span class="prd-tag-bg">型号</span> AQ1601UQ</p>\ <p><span class="prd-tag-bg">规格</span> L1600*W800*H680mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '3': _tagpic.find('img').attr('src','images/product1-3.png'); _tagtitle.html('<p>喷射虹吸式连体坐便器</p>'); _tagcon.html('<p>韵.格雅系列将纯粹的几何美学发挥得淋漓尽致,所有角落均变得柔滑,反映它蕴藏的美学思想。陶瓷产品超薄盖板、环保节水尖端技术的应用,使其功能品质也得到了最佳呈现。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 陶瓷</p>\ <p><span class="prd-tag-bg">型号</span> AB1170</p>\ <p><span class="prd-tag-bg">规格</span> L686*W370*H730mm</p>\ <p class="prd-tag-sj">坑距280/390mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '4': _tagpic.find('img').attr('src','images/product2-1.png'); _tagtitle.html('<p>3D奈丽实木浴室柜(诺曼系列)</p>'); _tagcon.html('<p><div class="prd-tag-yuan">1</div>产品时尚而又极具个性,可与任何现代家居装修风格搭配;诺曼系列采用突破传统的一体柜脚设计,产品整体设计一气呵成,直指追求时尚个性的内心,时尚的配色与经典立体花纹相得益彰,点缀经典款镶钻拉手,诺曼系列就是要带给大家华丽、个性、永恒的时尚;</p>\ <p><div class="prd-tag-yuan">2</div>产品采用箭牌全新的3D奈丽一体成形技术,呈现完美而又充满个性的外观;</p>\ <p><div class="prd-tag-yuan">3</div>进口全橡胶实木为基材,环保油漆工艺技术;</p>\ <p><div class="prd-tag-yuan">4</div>精致的立体花纹面,纯手工打磨,保证产品的经典外观,价值非凡;</p>\ <p><div class="prd-tag-yuan">5</div>配套柜盆、镜柜,置物功能丰富;</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 3D奈丽板</p>\ <p><span class="prd-tag-bg">型号</span> APGM8L3218-B</p>\ <p><span class="prd-tag-bg">规格</span> 柜盆:805x502x188mm</p>\ <p class="prd-tag-sj">主柜:780*490*780mm</p>\ <p class="prd-tag-sj">镜柜:790*180*680mm</p>'); // $('.prd-tag-up').css('display','inline'); // $('.prd-tag-down').css('display','inline'); break; case '5': _tagpic.find('img').attr('src','images/product2-2.png'); _tagtitle.html('<p>气泡按摩浴缸</p>'); _tagcon.html('<p><p><div class="prd-tag-yuan">1</div>一体化全裙边设计;</p>\ <p><div class="prd-tag-yuan">2</div>简约溢水孔设计;</p>\ <p><div class="prd-tag-yuan">3</div>直线,亦可与墙面无缝贴近,无需要定制左右裙;</p>\ <p><div class="prd-tag-yuan">4</div>弧度充当靠垫,躺着舒适,人性化设计</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 压克力</p>\ <p><span class="prd-tag-bg">型号</span> AQ16807TQ</p>\ <p><span class="prd-tag-bg">规格</span> L1600xW800xH700mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '6': _tagpic.find('img').attr('src','images/product2-3.png'); _tagtitle.html('<p>智能座便器</p>'); _tagcon.html('<p>舒乐一体超感坐便器,以时尚为创意远点,宛若一朵绽放于艺术上的鲜花。它简约圆滑的设计,勾勒出优雅曼妙的外形,配先进金的洁体科技,以一体化的人性方式呈现出人性科技与尖端设计的完美融合。</p>\ <p><div class="prd-tag-yuan">1</div>无水箱设计,超薄盖板,产品小尺寸设计、更加整体时尚;</p>\ <p><div class="prd-tag-yuan">2</div>即热式加热,使用更方便;</p>\ <p><div class="prd-tag-yuan">3</div>不锈钢喷枪,三档可调;</p>\ <p><div class="prd-tag-yuan">4</div>内部模块化设计,不同的款式内部结构、模块全部通用,方便维护;</p>\ <p><div class="prd-tag-yuan">5</div>低水压下冲洗也非常干净,并具备防虹吸功能;</p>\ <p><div class="prd-tag-yuan">6</div>妇洗、洗便、座圈加热、烘干、除臭、节能、遥控器功能;</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 陶瓷</p>\ <p><span class="prd-tag-bg">型号</span> AKB1130</p>\ <p><span class="prd-tag-bg">规格</span> 尺寸:L600xW404xH533mm</p>\ <p class="prd-tag-sj">坑距:290/390mm</p>'); // $('.prd-tag-up').css('display','inline'); // $('.prd-tag-down').css('display','inline'); break; default: } _close.click(function(){ _bg.fadeOut(); _tag.fadeOut(); }); }
random_line_split
my.js
var yt = yt || {}; //加载图片 yt.loadImg = function ($imgs, time) { var _time = 0; time = time || 200; $imgs.each(function () { var $that = $(this); if ($that.data('hasload')) { return false; } setTimeout(function () { $that.fadeOut(0); $that.attr('src', $that.data('src')); $that.attr('data-hasload', 'true'); $that.fadeIn(500); }, _time); _time += time; }); }; //wap端环境 yt.isWap = function () { var s = navigator.userAgent.toLowerCase(); var ipad = s.match(/ipad/i) == "ipad" , iphone = s.match(/iphone os/i) == "iphone os" , midp = s.match(/midp/i) == "midp" , uc7 = s.match(/rv:1.2.3.4/i) == "rv:1.2.3.4" , uc = s.match(/ucweb/i) == "ucweb" , android = s.match(/android/i) == "android" , ce = s.match(/windows ce/i) == "windows ce" , wm = s.match(/windows mobile/i) == "windows mobile"; if (iphone || midp || uc7 || uc || android || ce || wm || ipad) { return true;
lse; }; //滑动绑定 yt.app = function () { var $swiperContainer = $("#swiper-container1"), $pages = $("#wrapper").children(), $as = $("#nav li a"), $lis = $("#nav li"), $win =$(window), slideCount = $pages.length, nowIndex = 0, acn = "animation", mySwiper; var params = { selectorClassName: "swiper-container", animationClassName: acn, animationElm: $("." + acn) }; var setCssText = function (prop, value) { return prop + ': ' + value + '; '; }; /* * insertCss(rule) * 向文档<head>底部插入css rule操作 * rule: 传入的css text * */ var insertCss = function (rule) { var head = document.head || document.getElementsByTagName('head')[0], style; if (!!head.getElementsByTagName('style').length) { style = head.getElementsByTagName('style')[0]; if (style.styleSheet) { style.styleSheet.cssText = rule; } else { style.innerHTML = ''; style.appendChild(document.createTextNode(rule)); } } else { style = document.createElement('style'); style.type = 'text/css'; if (style.styleSheet) { style.styleSheet.cssText = rule; } else { style.appendChild(document.createTextNode(rule)); } head.appendChild(style); } }; var setAnimationStyle=function() { var cssText = ''; cssText += '.' + params.animationClassName + '{' + setCssText('display', 'none') + '}' + '.touchstart .' + params.animationClassName + '{' + setCssText('-webkit-animation-duration', '0 !important') + setCssText('-webkit-animation-delay', '0 !important') + setCssText('-webkit-animation-iteration-count', '1 !important') + '}'; var index = mySwiper.activeIndex, _index = index + 1, $ans = $pages.eq(index).find('.' + params.animationClassName); $ans.each(function () { var obj = $(this); _className = obj.attr('data-item'), _animation = obj.attr('data-animation'), _duration = ((obj.attr('data-duration') / 1000) || 1) + 's', _timing = obj.attr('data-timing-function') || 'ease', _delay = ((obj.attr('data-delay') || 0) / 1000) + 's', _count = obj.attr('data-iteration-count') || 1; var _t = '.' + params.selectorClassName + ' .page-' + _index + ' .' + _className; cssText += _t + '{' + setCssText('display', 'block !important') + setCssText('-webkit-animation-name', _animation) + setCssText('-webkit-animation-duration', _duration) + setCssText('-webkit-animation-timing-function', _timing) + setCssText('-webkit-animation-delay', _delay) + setCssText('-webkit-animation-fill-mode', 'both') + setCssText('-webkit-animation-iteration-count', _count) + '}'; }); return cssText; }; //设置动画 var setAms = function () { insertCss(setAnimationStyle()); }; //设置布局 var setLayout = function () { var $wrapers = $("#swiper-container1 .wraper"), $wraper1 = $("#wraper1"), isWap=yt.isWap(), w = 720, h = 1135; var sl = function () { var _w = $wraper1.width(), h = $win.height(), _h = isWap && _w<h?$win.height():_w * 1135 / 720; $wrapers.height(_h); if($win.height()<300){ $(".cn-slidetips").hide(); }else{ $(".cn-slidetips").show(); } }; sl(); $win.resize(sl); }; //滑动绑定函数 var onSlideChangeTime = 0; var onSlideChange = function () { if (onSlideChangeTime>1) { return; } var index = mySwiper.activeIndex; if (nowIndex == index && mySwiper.touches['abs'] < 50) { return; } onSlideChangeTime = 20; setAms(); nowIndex = index || 0; //history.pushState(null, null, "index.html?p=" + (nowIndex + 1)); //执行动画 var timer=setInterval(function () { onSlideChangeTime -= 1; if (onSlideChangeTime == 0) { clearInterval(timer); } },1); }; //触摸结束绑定 var onTouchEnd = function () { var index = mySwiper.index; if (nowIndex == slideCount-1 && +mySwiper.touches['diff'] <-50) { return mySwiper.swipeTo(0); } }; //滑动结束绑定 var onSlideChangeEnd = function () { if(mySwiper.activeIndex==9){ $("#ewm").show(); }else{ $("#ewm").hide(); } $(".swiper-slide").eq(mySwiper.activeIndex).find("img").each(function(){ if($(this).attr("date-src")){ $(this).attr("src",$(this).attr("date-src")); } }); onSlideChange(); }; //绑定滑动主函数 var bindSwiper = function () { mySwiper = $swiperContainer.swiper({ onTouchEnd: onTouchEnd, onSlideChangeEnd: onSlideChangeEnd, //mousewheelControl:true, mode: 'vertical' }); }; //滚到下一个屏 var bindNext = function () { $(".next").on("click", function () { mySwiper.activeIndex = mySwiper.activeIndex || 0; var index = mySwiper.activeIndex == slideCount - 1 ? 0 : (mySwiper.activeIndex||0) + 1; mySwiper.swipeTo(index); }); }; //初始化 bindSwiper(); bindNext(); setLayout(); setAms(); }; //初始化 yt.init = function () { window.onload = function () { $("#loading").hide(); setTimeout(yt.app); //设置可抽奖次数 $("#count").val(2); $("#zdxz").val(1); }; }; yt.init(); /***************************************/ function jpqh(type){ if(type=='left'){ if($('.jp img').attr('zdy')=='jp1') { $('.jp img').attr('src','images/jp2.png?vid=1.0'); $('.jp img').attr('zdy','jp2'); } else{ $('.jp img').attr('src','images/jp1.png'); $('.jp img').attr('zdy','jp1'); } } else{ if($('.jp img').attr('zdy')=='jp1') { $('.jp img').attr('src','images/jp2.png?vid=1.0'); $('.jp img').attr('zdy','jp2'); } else{ $('.jp img').attr('src','images/jp1.png'); $('.jp img').attr('zdy','jp1'); } } } function qie(obj){ if($(obj).text()=='活动规则'){ //活动规则: $("#jpgz").css('display','inline'); $("#jpzs").css('display','none'); $('#my2').css('background-color','#e1e1e1'); $('#my1').css('background-color','#ffffff'); } else{ //奖品展示 $("#jpgz").css('display','none'); $("#jpzs").css('display','inline'); $('#my1').css('background-color','#e1e1e1'); $('#my2').css('background-color','#ffffff'); } } // 次数用完 function cs(){ var _tan=$(".tan"); var _bg = $(".bg"); _bg.fadeIn(); _tan.fadeIn(); _bg.click(function(){ _bg.fadeOut(); _tan.fadeOut(); }); } $('#fximg').click(function(){ var _fxbg=$('.fxbg'); var _tan=$(".tan"); var _bg = $(".bg"); _fxbg.fadeIn(); _bg.fadeOut(); _tan.fadeOut(); }) //分享成功 function fxsuccess(){ var _fxbg=$(".fxbg"); _fxbg.fadeIn(); } $("#zp").click(function(){ var type=0; var count=$("#count").val(); if($("#zdxz").val()==0){ // alert($("#zdxz").val()); return false; } var endTag = true; //活动结束 //tanprd('<p>活动结束后</p><p>会有专人通知您的了!</p>'); // if(count<1){ // cs(''); // return; // } $.ajax({ async:false, url:'server.php', data:{act:'start'}, type:'post', dataType:'json', success:function(result){ if(result.errcode!=0) { if(result.errcode == 1003) { cs(); } else { common('<p>系统提示</p>','<p>'+result.errmsg+'</p>'); } endTag = false; } else { type=result.prize; } } }); if(!endTag) { return false; } switch (type) { case 1: rotateFunc(1,0,'箭牌智能坐便器AKB1130'); count--; break; case 2: rotateFunc(0,60,'谢谢参与'); count--; break; case 3: rotateFunc(2,120,'箭牌卫浴智能马桶盖AK1002'); count--; break; case 4: rotateFunc(0,180,''); count--; break; case 5: rotateFunc(3,240,'箭牌卫浴品牌优质毛巾'); count--; break; default: rotateFunc(0,300,''); count--; } $("#count").val(count); $("#zdxz").val(0); }); var rotateFunc = function(awards,angle,text){ //awards:奖项,angle:奖项对应的角度 var _zdpic=$('#zd'); _zdpic.stopRotate(); _zdpic.rotate({ angle: 0, duration: 4000, animateTo: angle + 1800, //angle是图片上各奖项对应的角度,1440是让指针固定旋转4圈 callback: function(){ tanprd(awards); } }); }; setInterval(function(){ $("#zdxz").val(1); },8000); function tanprd(type){ var _title=$("#tanprd2 .product-tilte"); var _con=$("#tanprd2 .product-con"); var _btn=$("#tanprd2 .product-btn"); //var _countcon=$("#tanprd2 .product-count"); //var count=1;//剩余抽奖次数 var _product=$('#tanprd2'); var _bg = $(".bg"); switch(type){ case 1: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了特等奖</p><p>箭牌智能坐便器</p><p>AKB1130!</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'1\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 2: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了优秀奖</p><p>箭牌卫浴智能马桶盖</p><p>AK1002!</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'2\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 3: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了参与奖</p><p>箭牌卫浴品牌优质毛巾</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'3\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 0: cs(); // _title.html('<p>很遗憾。</p>'); // _con.html('<p>感谢您的参与,下次中</p><p>奖机会一定属于您!</p>'); // _btn.html('<a href="index.php"><img src="images/btn4.png" width="50%"></a>'); // _bg.click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); break; default: } } //公共弹窗 function common(title,text){ var _title=$("#tanprd .product-tilte"); var _con=$("#tanprd .product-con"); var _btn=$("#tanprd .product-btn"); var _product=$('#tanprd'); var _bg = $(".bg"); _product.fadeIn(); _bg.fadeIn(); _title.html(title); _con.html(text); _btn.html('<a id="combtn"><img src="images/btn2.png" width="50%"></a>'); // _bg.click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); $("#combtn").click(function(){ _bg.fadeOut(); _product.fadeOut(); }); } //填写个人资料 function message(type){ var _type=type;//奖项 var _msg=$('#messageid'); var _bg = $(".bg"); var _product=$('#tanprd2'); _product.fadeOut(); _msg.fadeIn(); _bg.fadeIn(); } function zh(obj){ var _msg=$('#messageid'); // var _product=$('#tanprd'); // var _bg = $(".bg"); var errTag=true; var name = $("#name").val(); var phone = $("#phone").val(); var address = $("#address").val(); console.log(name); $.ajax({ async:false, url:'server.php', data:{act:'addinfo',name:name,phone:phone,address:address}, type:'post', dataType:'json', success:function(result){ if(result.errcode!=0) { alert(result.errmsg); errTag = false; } else { return false; } } }); if(!errTag) { return false; } _msg.fadeOut(); //_product.fadeIn(); common('<p>谢谢您参与!</p>','<p>活动结束后</p><p>会有专人通知您的了!</p>') // $("#end").click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); } function producttag(type){ var _bg = $(".bg"); var _tag=$("#tag"); _bg.fadeIn(); _tag.fadeIn(); var _tagpic=$("#tag .prd-tag-pic"); var _tagtitle=$("#tag .prd-tag-title"); var _tagcon=$("#tag .prd-tag-con"); var _tagtype=$("#tag .prd-tag-type"); var _close=$("#tag .prd-tag-close"); switch(type){ case '1': _tagpic.find('img').attr('src','images/product1-1.png'); _tagtitle.html('<p>3D奈丽实木浴室柜</p>'); _tagcon.html('<p>箭牌卫浴将3D奈丽一体成形技术应用与浴室柜,突破传统的制造技术,是产品造型更加优美、性能更加稳定同时简约流畅的线条又极高提升了浴室柜的空间装饰价值,典雅高尚。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 3D奈丽板</p>\ <p><span class="prd-tag-bg">型号</span> APGM10L4136-B</p>\ <p><span class="prd-tag-bg">规格</span> 盆尺寸:702*409*198mm</p>\ <p class="prd-tag-sj">主柜尺寸:1010*545*820mm</p> <p class="prd-tag-sj">镜柜尺寸:950*160*650mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '2': _tagpic.find('img').attr('src','images/product1-2.png'); _tagtitle.html('<p>气泡按摩浴缸</p>'); _tagcon.html('<p>经过整整一天紧张的工作后,每个人都应该在一个舒适浴室中让自己回到从容,得到抚慰。韵.格雅系列包括了高质量的多功能浴室家具和令人难以割舍的无缝对接浴缸等多种产品,完全可以满足你的全方位需要。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 压克力</p>\ <p><span class="prd-tag-bg">型号</span> AQ1601UQ</p>\ <p><span class="prd-tag-bg">规格</span> L1600*W800*H680mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '3': _tagpic.find('img').attr('src','images/product1-3.png'); _tagtitle.html('<p>喷射虹吸式连体坐便器</p>'); _tagcon.html('<p>韵.格雅系列将纯粹的几何美学发挥得淋漓尽致,所有角落均变得柔滑,反映它蕴藏的美学思想。陶瓷产品超薄盖板、环保节水尖端技术的应用,使其功能品质也得到了最佳呈现。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 陶瓷</p>\ <p><span class="prd-tag-bg">型号</span> AB1170</p>\ <p><span class="prd-tag-bg">规格</span> L686*W370*H730mm</p>\ <p class="prd-tag-sj">坑距280/390mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '4': _tagpic.find('img').attr('src','images/product2-1.png'); _tagtitle.html('<p>3D奈丽实木浴室柜(诺曼系列)</p>'); _tagcon.html('<p><div class="prd-tag-yuan">1</div>产品时尚而又极具个性,可与任何现代家居装修风格搭配;诺曼系列采用突破传统的一体柜脚设计,产品整体设计一气呵成,直指追求时尚个性的内心,时尚的配色与经典立体花纹相得益彰,点缀经典款镶钻拉手,诺曼系列就是要带给大家华丽、个性、永恒的时尚;</p>\ <p><div class="prd-tag-yuan">2</div>产品采用箭牌全新的3D奈丽一体成形技术,呈现完美而又充满个性的外观;</p>\ <p><div class="prd-tag-yuan">3</div>进口全橡胶实木为基材,环保油漆工艺技术;</p>\ <p><div class="prd-tag-yuan">4</div>精致的立体花纹面,纯手工打磨,保证产品的经典外观,价值非凡;</p>\ <p><div class="prd-tag-yuan">5</div>配套柜盆、镜柜,置物功能丰富;</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 3D奈丽板</p>\ <p><span class="prd-tag-bg">型号</span> APGM8L3218-B</p>\ <p><span class="prd-tag-bg">规格</span> 柜盆:805x502x188mm</p>\ <p class="prd-tag-sj">主柜:780*490*780mm</p>\ <p class="prd-tag-sj">镜柜:790*180*680mm</p>'); // $('.prd-tag-up').css('display','inline'); // $('.prd-tag-down').css('display','inline'); break; case '5': _tagpic.find('img').attr('src','images/product2-2.png'); _tagtitle.html('<p>气泡按摩浴缸</p>'); _tagcon.html('<p><p><div class="prd-tag-yuan">1</div>一体化全裙边设计;</p>\ <p><div class="prd-tag-yuan">2</div>简约溢水孔设计;</p>\ <p><div class="prd-tag-yuan">3</div>直线,亦可与墙面无缝贴近,无需要定制左右裙;</p>\ <p><div class="prd-tag-yuan">4</div>弧度充当靠垫,躺着舒适,人性化设计</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 压克力</p>\ <p><span class="prd-tag-bg">型号</span> AQ16807TQ</p>\ <p><span class="prd-tag-bg">规格</span> L1600xW800xH700mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '6': _tagpic.find('img').attr('src','images/product2-3.png'); _tagtitle.html('<p>智能座便器</p>'); _tagcon.html('<p>舒乐一体超感坐便器,以时尚为创意远点,宛若一朵绽放于艺术上的鲜花。它简约圆滑的设计,勾勒出优雅曼妙的外形,配先进金的洁体科技,以一体化的人性方式呈现出人性科技与尖端设计的完美融合。</p>\ <p><div class="prd-tag-yuan">1</div>无水箱设计,超薄盖板,产品小尺寸设计、更加整体时尚;</p>\ <p><div class="prd-tag-yuan">2</div>即热式加热,使用更方便;</p>\ <p><div class="prd-tag-yuan">3</div>不锈钢喷枪,三档可调;</p>\ <p><div class="prd-tag-yuan">4</div>内部模块化设计,不同的款式内部结构、模块全部通用,方便维护;</p>\ <p><div class="prd-tag-yuan">5</div>低水压下冲洗也非常干净,并具备防虹吸功能;</p>\ <p><div class="prd-tag-yuan">6</div>妇洗、洗便、座圈加热、烘干、除臭、节能、遥控器功能;</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 陶瓷</p>\ <p><span class="prd-tag-bg">型号</span> AKB1130</p>\ <p><span class="prd-tag-bg">规格</span> 尺寸:L600xW404xH533mm</p>\ <p class="prd-tag-sj">坑距:290/390mm</p>'); // $('.prd-tag-up').css('display','inline'); // $('.prd-tag-down').css('display','inline'); break; default: } _close.click(function(){ _bg.fadeOut(); _tag.fadeOut(); }); }
} return fa
conditional_block
my.js
var yt = yt || {}; //加载图片 yt.loadImg = function ($imgs, time) { var _time = 0; time = time || 200; $imgs.each(function () { var $that = $(this); if ($that.data('hasload')) { return false; } setTimeout(function () { $that.fadeOut(0); $that.attr('src', $that.data('src')); $that.attr('data-hasload', 'true'); $that.fadeIn(500); }, _time); _time += time; }); }; //wap端环境 yt.isWap = function () { var s = navigator.userAgent.toLowerCase(); var ipad = s.match(/ipad/i) == "ipad" , iphone = s.match(/iphone os/i) == "iphone os" , midp = s.match(/midp/i) == "midp" , uc7 = s.match(/rv:1.2.3.4/i) == "rv:1.2.3.4" , uc = s.match(/ucweb/i) == "ucweb" , android = s.match(/android/i) == "android" , ce = s.match(/windows ce/i) == "windows ce" , wm = s.match(/windows mobile/i) == "windows mobile"; if (iphone || midp || uc7 || uc || android || ce || wm || ipad) { return true; } return false; }; //滑动绑定 yt.app = function () { var $swiperContainer = $("#swiper-container1"), $pages = $("#wrapper").children(), $as = $("#nav li a"), $lis = $("#nav li"), $win =$(window), slideCount = $pages.length, nowIndex = 0, acn = "animation", mySwiper; var params = { selectorClassName: "swiper-container", animationClassName: acn, animationElm: $("." + acn) }; var setCssText = function (prop, value) { return prop + ': ' + value + '; '; }; /* * insertCss(rule) * 向文档<head>底部插入css rule操作 * rule: 传入的css text * */ var insertCss = function (rule) { var head = document.head || document.getElementsByTagName('head')[0], style; if (!!head.getElementsByTagName('style').length) { style = head.getElementsByTagName('style')[0]; if (style.styleSheet) { style.styleSheet.cssText = rule; } else { style.innerHTML = ''; style.appendChild(document.createTextNode(rule)); } } else { style = document.createElement('style'); style.type = 'text/css'; if (style.styleSheet) { style.styleSheet.cssText = rule; } else { style.appendChild(document.createTextNode(rule)); } head.appendChild(style); } }; var setAnimationStyle=function() { var cssText = ''; cssText += '.' + params.animationClassName + '{' + setCssText('display', 'none') + '}' + '.touchstart .' + params.animationClassName + '{' + setCssText('-webkit-animation-duration', '0 !important') + setCssText('-webkit-animation-delay', '0 !important') + setCssText('-webkit-animation-iteration-count', '1 !important') + '}'; var index = mySwiper.activeIndex, _index = index + 1, $ans = $pages.eq(index).find('.' + params.animationClassName); $ans.each(function () { var obj = $(this); _className = obj.attr('data-item'), _animation = obj.attr('data-animation'), _duration = ((obj.attr('data-duration') / 1000) || 1) + 's', _timing = obj.attr('data-timing-function') || 'ease', _delay = ((obj.attr('data-delay') || 0) / 1000) + 's', _count = obj.attr('data-iteration-count') || 1; var _t = '.' + params.selectorClassName + ' .page-' + _index + ' .' + _className; cssText += _t + '{' + setCssText('display', 'block !important') + setCssText('-webkit-animation-name', _animation) + setCssText('-webkit-animation-duration', _duration) + setCssText('-webkit-animation-timing-function', _timing) + setCssText('-webkit-animation-delay', _delay) + setCssText('-webkit-animation-fill-mode', 'both') + setCssText('-webkit-animation-iteration-count', _count) + '}'; }); return cssText; }; //设置动画 var setAms = function () { insertCss(setAnimationStyle()); }; //设置布局 var setLayout = function () { var $wrapers = $("#swiper-container1 .wraper"), $wraper1 = $("#wraper1"), isWap=yt.isWap(), w = 720, h = 1135; var sl = function () { var _w = $wraper1.width(), h = $win.height(), _h = isWap && _w<h?$win.height():_w * 1135 / 720; $wrapers.height(_h); if($win.height()<300){ $(".cn-slidetips").hide(); }else{ $(".cn-slidetips").show(); } }; sl(); $win.resize(sl); }; //滑动绑定函数 var onSlideChangeTime = 0; var onSlideChange = function () { if (onSlideChangeTime>1) { return; } var index = mySwiper.activeIndex; if (nowIndex == index && mySwiper.touches['abs'] < 50) { return; } onSlideChangeTime = 20; setAms(); nowIndex = index || 0; //history.pushState(null, null, "index.html?p=" + (nowIndex + 1)); //执行动画 var timer=setInterval(function () { onSlideChangeTime -= 1; if (onSlideChangeTime == 0) { clearInterval(timer); } },1); }; //触摸结束绑定 var onTouchEnd = function () { var index = mySwiper.index; if (nowIndex == slideCount-1 && +mySwiper.touches['diff'] <-50) { return mySwiper.swipeTo(0); } }; //滑动结束绑定 var onSlideChangeEnd = function () { if(mySwiper.activeIndex==9){ $("#ewm").show(); }else{ $("#ewm").hide(); } $(".swiper-slide").eq(mySwiper.activeIndex).find("img").each(function(){ if($(this).attr("date-src")){ $(this).attr("src",$(this).attr("date-src")); } }); onSlideChange(); }; //绑定滑动主函数 var bindSwiper = function () { mySwiper = $swiperContainer.swiper({ onTouchEnd: onTouchEnd, onSlideChangeEnd: onSlideChangeEnd, //mousewheelControl:true, mode: 'vertical' }); }; //滚到下一个屏 var bindNext = function () { $(".next").on("click", function () { mySwiper.activeIndex = mySwiper.activeIndex || 0; var index = mySwiper.activeIndex == slideCount - 1 ? 0 : (mySwiper.activeIndex||0) + 1; mySwiper.swipeTo(index); }); }; //初始化 bindSwiper(); bindNext(); setLayout(); setAms(); }; //初始化 yt.init = function () { window.onload = function () { $("#loading").hide(); setTimeout(yt.app); //设置可抽奖次数 $("#count").val(2); $("#zdxz").val(1); }; }; yt.init(); /***************************************/ function jpqh(type){ if(type=='left'){ if($('.jp img').attr('zdy')=='jp1') { $('.jp img').attr('src','images/jp2.png?vid=1.0'); $('.jp img').attr('zdy','jp2'); } else{ $('.jp img').attr('src','images/jp1.png'); $('.jp img').attr('zdy','jp1'); } } else{ if($('.jp img').attr('zdy')=='jp1') { $('.jp img').attr('src','images/jp2.png?vid=1.0'); $('.jp img').attr('zdy','jp2'); } else{ $('.jp img').attr('src','images/jp1.png'); $('.jp img').attr('zdy','jp1'); } } } function qie(obj){ if($(obj).text()=='活动规则'){ //活动规则: $("#jpgz").css('display','inline'); $("#jpzs").css('display','none'); $('#my2').css('background-color','#e1e1e1'); $('#my1').css('background-color','#ffffff'); } else{ //奖品展示 $("#jpgz").css('display','none'); $("#jpzs").css('display','inline'); $('#my1').css('background-color','#e1e1e1'); $('#my2').css('background-color','#ffffff'); } } // 次数用完 function cs(){ var _tan=$(".tan"); var _bg = $(".bg"); _bg.fadeIn(); _tan.fadeIn(); _bg.click(function(){ _bg.fadeOut(); _tan.fadeOut(); }); } $('#fximg').click(function(){ var _fxbg=$('.fxbg'); var _tan=$(".tan"); var _bg = $(".bg"); _fxbg.fadeIn(); _bg.fadeOut(); _tan.fadeOut(); }) //分享成功 function fxsuccess(){ var _fxbg=$(".fxbg"); _fxbg.fadeIn(); } $("#zp").click(function(){ var type=0; var count=$("#count").val(); if($("#zdxz").val()==0){ // alert($("#zdxz").val()); return false; } var endTag = true; //活动结束 //tanprd('<p>活动结束后</p><p>会有专人通知您的了!</p>'); // if(count<1){ // cs(''); // return; // } $.ajax({ async:false, url:'server.php', data:{act:'start'}, type:'post', dataType:'json', success:function(result){ if(result.errcode!=0) { if(result.errcode == 1003) { cs(); } else { common('<p>系统提示</p>','<p>'+result.errmsg+'</p>'); } endTag = false; } else { type=result.prize; } } }); if(!endTag) { return false; } switch (type) { case 1: rotateFunc(1,0,'箭牌智能坐便器AKB1130'); count--; break; case 2: rotateFunc(0,60,'谢谢参与'); count--; break; case 3: rotateFunc(2,120,'箭牌卫浴智能马桶盖AK1002'); count--; break; case 4: rotateFunc(0,180,''); count--; break; case 5: rotateFunc(3,240,'箭牌卫浴品牌优质毛巾'); count--; break; default: rotateFunc(0,300,''); count--; } $("#count").val(count); $("#zdxz").val(0); }); var rotateFunc = function(awards,angle,text){ //awards:奖项,angle:奖项对应的角度 var _zdpic=$('#zd'); _zdpic.stopRotate(); _zdpic.rotate({ angle: 0, duration: 4000, animateTo: angle + 1800, //angle是图片上各奖项对应的角度,1440是让指针固定旋转4圈 callback: function(){ tanprd(awards); } }); }; setInterval(function(){ $("#zdxz").val(1); },8000); function tanprd(type){ var _title=$("#tanprd2 .product-tilte"); var _con=$("#tanprd2 .product-con"); var _btn=$("#tanprd2 .product-btn"); //var _countcon=$("#tanprd2 .product-count"); //var count=1;//剩余抽奖次数 var _product=$('#tanprd2'); var _bg = $(".bg"); switch(type){ case 1: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了特等奖</p><p>箭牌智能坐便器</p><p>AKB1130!</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'1\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 2: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了优秀奖</p><p>箭牌卫浴智能马桶盖</p><p>AK1002!</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'2\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 3: _product.fadeIn(); _bg.fadeIn(); _title.html('<p>恭喜您!</p>'); _con.html('<p>获得了参与奖</p><p>箭牌卫浴品牌优质毛巾</p>'); _btn.html('<img src="images/btn3.png" width="50%" onclick="message(\'3\')">'); //_countcon.html('<p>您还剩有<font>'+count+'</font>次抽奖机会!</p>'); break; case 0: cs(); // _title.html('<p>很遗憾。</p>'); // _con.html('<p>感谢您的参与,下次中</p><p>奖机会一定属于您!</p>'); // _btn.html('<a href="index.php"><img src="images/btn4.png" width="50%"></a>'); // _bg.click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); break; default: } } //公共弹窗 function common(title,text){ var _title=$("#tanprd .product-tilte"); var _con=$("#tanprd .product-con"); var _btn=$("#tanprd .product-btn"); var _product=$('#tanprd'); var _bg = $(".bg"); _product.fadeIn(); _bg.fadeIn(); _title.html(title); _con.html(text); _btn.html('<a id="combtn"><img src="images/btn2.png" width="50%"></a>'); // _bg.click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); $("#combtn").click(function(){ _bg.fadeOut(); _product.fadeOut(); }); } //填写个人资料 function message(type){ var _type=type;//奖项 var _msg=$('#messageid'); var _bg = $(".bg"); var _product=$('#tanprd2'); _product.fadeOut(); _msg.fadeIn(); _bg.fadeIn(); } function zh(obj){ var _msg=$('#messageid'); // var _product=$('#tanprd'); // var _bg = $(".bg"); var errTag=true; var name = $("#name").val(); var phone = $("#phone").val(); var address = $("#address").val(); console.log(name); $.ajax({ async:false, url:'server.php', data:{act:'addinfo',name:name,phone:phone,address:address}, type:'post', dataType:'json', success:function(result){ if(result.errcode!=0) { alert(result.errmsg); errTag = false; } else { return false; } } }); if(!errTag) { return false; } _msg.fadeOut(); //_product.fadeIn(); common('<p
p>\ <p><span class="prd-tag-bg">型号</span> APGM10L4136-B</p>\ <p><span class="prd-tag-bg">规格</span> 盆尺寸:702*409*198mm</p>\ <p class="prd-tag-sj">主柜尺寸:1010*545*820mm</p> <p class="prd-tag-sj">镜柜尺寸:950*160*650mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '2': _tagpic.find('img').attr('src','images/product1-2.png'); _tagtitle.html('<p>气泡按摩浴缸</p>'); _tagcon.html('<p>经过整整一天紧张的工作后,每个人都应该在一个舒适浴室中让自己回到从容,得到抚慰。韵.格雅系列包括了高质量的多功能浴室家具和令人难以割舍的无缝对接浴缸等多种产品,完全可以满足你的全方位需要。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 压克力</p>\ <p><span class="prd-tag-bg">型号</span> AQ1601UQ</p>\ <p><span class="prd-tag-bg">规格</span> L1600*W800*H680mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '3': _tagpic.find('img').attr('src','images/product1-3.png'); _tagtitle.html('<p>喷射虹吸式连体坐便器</p>'); _tagcon.html('<p>韵.格雅系列将纯粹的几何美学发挥得淋漓尽致,所有角落均变得柔滑,反映它蕴藏的美学思想。陶瓷产品超薄盖板、环保节水尖端技术的应用,使其功能品质也得到了最佳呈现。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 陶瓷</p>\ <p><span class="prd-tag-bg">型号</span> AB1170</p>\ <p><span class="prd-tag-bg">规格</span> L686*W370*H730mm</p>\ <p class="prd-tag-sj">坑距280/390mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '4': _tagpic.find('img').attr('src','images/product2-1.png'); _tagtitle.html('<p>3D奈丽实木浴室柜(诺曼系列)</p>'); _tagcon.html('<p><div class="prd-tag-yuan">1</div>产品时尚而又极具个性,可与任何现代家居装修风格搭配;诺曼系列采用突破传统的一体柜脚设计,产品整体设计一气呵成,直指追求时尚个性的内心,时尚的配色与经典立体花纹相得益彰,点缀经典款镶钻拉手,诺曼系列就是要带给大家华丽、个性、永恒的时尚;</p>\ <p><div class="prd-tag-yuan">2</div>产品采用箭牌全新的3D奈丽一体成形技术,呈现完美而又充满个性的外观;</p>\ <p><div class="prd-tag-yuan">3</div>进口全橡胶实木为基材,环保油漆工艺技术;</p>\ <p><div class="prd-tag-yuan">4</div>精致的立体花纹面,纯手工打磨,保证产品的经典外观,价值非凡;</p>\ <p><div class="prd-tag-yuan">5</div>配套柜盆、镜柜,置物功能丰富;</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 3D奈丽板</p>\ <p><span class="prd-tag-bg">型号</span> APGM8L3218-B</p>\ <p><span class="prd-tag-bg">规格</span> 柜盆:805x502x188mm</p>\ <p class="prd-tag-sj">主柜:780*490*780mm</p>\ <p class="prd-tag-sj">镜柜:790*180*680mm</p>'); // $('.prd-tag-up').css('display','inline'); // $('.prd-tag-down').css('display','inline'); break; case '5': _tagpic.find('img').attr('src','images/product2-2.png'); _tagtitle.html('<p>气泡按摩浴缸</p>'); _tagcon.html('<p><p><div class="prd-tag-yuan">1</div>一体化全裙边设计;</p>\ <p><div class="prd-tag-yuan">2</div>简约溢水孔设计;</p>\ <p><div class="prd-tag-yuan">3</div>直线,亦可与墙面无缝贴近,无需要定制左右裙;</p>\ <p><div class="prd-tag-yuan">4</div>弧度充当靠垫,躺着舒适,人性化设计</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 压克力</p>\ <p><span class="prd-tag-bg">型号</span> AQ16807TQ</p>\ <p><span class="prd-tag-bg">规格</span> L1600xW800xH700mm</p>'); // $('.prd-tag-up').css('display','none'); // $('.prd-tag-down').css('display','none'); break; case '6': _tagpic.find('img').attr('src','images/product2-3.png'); _tagtitle.html('<p>智能座便器</p>'); _tagcon.html('<p>舒乐一体超感坐便器,以时尚为创意远点,宛若一朵绽放于艺术上的鲜花。它简约圆滑的设计,勾勒出优雅曼妙的外形,配先进金的洁体科技,以一体化的人性方式呈现出人性科技与尖端设计的完美融合。</p>\ <p><div class="prd-tag-yuan">1</div>无水箱设计,超薄盖板,产品小尺寸设计、更加整体时尚;</p>\ <p><div class="prd-tag-yuan">2</div>即热式加热,使用更方便;</p>\ <p><div class="prd-tag-yuan">3</div>不锈钢喷枪,三档可调;</p>\ <p><div class="prd-tag-yuan">4</div>内部模块化设计,不同的款式内部结构、模块全部通用,方便维护;</p>\ <p><div class="prd-tag-yuan">5</div>低水压下冲洗也非常干净,并具备防虹吸功能;</p>\ <p><div class="prd-tag-yuan">6</div>妇洗、洗便、座圈加热、烘干、除臭、节能、遥控器功能;</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 陶瓷</p>\ <p><span class="prd-tag-bg">型号</span> AKB1130</p>\ <p><span class="prd-tag-bg">规格</span> 尺寸:L600xW404xH533mm</p>\ <p class="prd-tag-sj">坑距:290/390mm</p>'); // $('.prd-tag-up').css('display','inline'); // $('.prd-tag-down').css('display','inline'); break; default: } _close.click(function(){ _bg.fadeOut(); _tag.fadeOut(); }); }
>谢谢您参与!</p>','<p>活动结束后</p><p>会有专人通知您的了!</p>') // $("#end").click(function(){ // _bg.fadeOut(); // _product.fadeOut(); // }); } function producttag(type){ var _bg = $(".bg"); var _tag=$("#tag"); _bg.fadeIn(); _tag.fadeIn(); var _tagpic=$("#tag .prd-tag-pic"); var _tagtitle=$("#tag .prd-tag-title"); var _tagcon=$("#tag .prd-tag-con"); var _tagtype=$("#tag .prd-tag-type"); var _close=$("#tag .prd-tag-close"); switch(type){ case '1': _tagpic.find('img').attr('src','images/product1-1.png'); _tagtitle.html('<p>3D奈丽实木浴室柜</p>'); _tagcon.html('<p>箭牌卫浴将3D奈丽一体成形技术应用与浴室柜,突破传统的制造技术,是产品造型更加优美、性能更加稳定同时简约流畅的线条又极高提升了浴室柜的空间装饰价值,典雅高尚。</p>'); _tagtype.html('<p><span class="prd-tag-bg">材质</span> 3D奈丽板</
identifier_body
client_dns.go
// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "context" "fmt" "strings" "time" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/services/alidns" "github.com/gardener/gardener/pkg/utils" "golang.org/x/time/rate" "sigs.k8s.io/controller-runtime/pkg/log" "github.com/gardener/gardener-extension-provider-alicloud/pkg/alicloud" ) const ( domainsCacheTTL = 1 * time.Hour rateLimiterCacheTTL = 1 * time.Hour ) // RateLimiterWaitError is an error to be reported if waiting for a aliyun dns rate limiter fails. // This can only happen if the wait time would exceed the configured wait timeout. type RateLimiterWaitError struct { Cause error } func (e *RateLimiterWaitError) Error() string { return fmt.Sprintf("could not wait for client-side aliyun dns rate limiter: %+v", e.Cause) } // NewDNSClient creates a new DNS client with given region, accessKeyID, and accessKeySecret. func (f *clientFactory) NewDNSClient(region, accessKeyID, accessKeySecret string) (DNS, error) { client, err := alidns.NewClientWithAccessKey(region, accessKeyID, accessKeySecret) if err != nil { return nil, err } return &dnsClient{ Client: *client, accessKeyID: accessKeyID, domainsCache: f.domainsCache, domainsCacheMutex: &f.domainsCacheMutex, RateLimiter: f.getRateLimiter(accessKeyID), RateLimiterWaitTimeout: f.waitTimeout, Logger: log.Log.WithName("ali-dnsclient"), }, nil } func (f *clientFactory) getRateLimiter(accessKeyID string) *rate.Limiter { // cache.Expiring Get and Set methods are concurrency-safe // However, if f rate limiter is not present in the cache, it may happen that multiple rate limiters are created // at the same time for the same access key id, and the desired QPS is exceeded, so use f mutex to guard against this f.rateLimitersMutex.Lock() defer f.rateLimitersMutex.Unlock() // Get f rate limiter from the cache, or create f new one if not present var rateLimiter *rate.Limiter if v, ok := f.rateLimiters.Get(accessKeyID); ok { rateLimiter = v.(*rate.Limiter) } else { rateLimiter = rate.NewLimiter(f.limit, f.burst) } // Set should be called on every Get with cache.Expiring to refresh the TTL f.rateLimiters.Set(accessKeyID, rateLimiter, rateLimiterCacheTTL) return rateLimiter } // GetDomainNames returns a map of all domain names mapped to their composite domain names. func (d *dnsClient) GetDomainNames(ctx context.Context) (map[string]string, error) { domains, err := d.getDomainsWithCache(ctx) if err != nil { return nil, err } domainNames := make(map[string]string) for _, domain := range domains { domainNames[domain.DomainName] = CompositeDomainName(domain.DomainName, domain.DomainId) } return domainNames, nil } // GetDomainName returns the composite domain name of the domain with the given domain id. func (d *dnsClient) GetDomainName(ctx context.Context, domainId string) (string, error) { domains, err := d.getDomainsWithCache(ctx) if err != nil { return "", err } domain, ok := domains[domainId] if !ok { return "", fmt.Errorf("DNS domain with id %s not found", domainId) } return CompositeDomainName(domain.DomainName, domain.DomainId), nil } // CreateOrUpdateDomainRecords creates or updates the domain records with the given domain name, name, record type, // values, and ttl. // * For each element in values that has an existing domain record, the existing record is updated if needed. // * For each element in values that doesn't have an existing domain record, a new domain record is created. // * For each existing domain record that doesn't have a corresponding element in values, the existing record is deleted. func (d *dnsClient) CreateOrUpdateDomainRecords(ctx context.Context, domainName, name, recordType string, values []string, ttl int64) error { domainName, _ = DomainNameAndId(domainName) rr, err := getRR(name, domainName) if err != nil { return err } records, err := d.getDomainRecords(ctx, domainName, rr, recordType) if err != nil { return err } for _, value := range values { if record, ok := records[value]; ok { // Only update the existing domain record if the current TTL value is different from the given one // At this point we know that rr, recordType, and value are the same if record.TTL != ttl { if err := d.updateDomainRecord(ctx, record.RecordId, rr, recordType, value, ttl); err != nil { return err } } } else { if err := d.createDomainRecord(ctx, domainName, rr, recordType, value, ttl); err != nil { return err } } } for value, record := range records { if !utils.ValueExists(value, values) { if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil { return err } } } return nil } // DeleteDomainRecords deletes the domain records with the given domain name, name and record type. func (d *dnsClient) DeleteDomainRecords(ctx context.Context, domainName, name, recordType string) error { domainName, _ = DomainNameAndId(domainName) rr, err := getRR(name, domainName) if err != nil { return err } records, err := d.getDomainRecords(ctx, domainName, rr, recordType) if err != nil { return err } for _, record := range records { if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil { return err } } return nil } func (d *dnsClient) getDomainsWithCache(ctx context.Context) (map[string]alidns.Domain, error) { // cache.Expiring Get and Set methods are concurrency-safe. // However, if an accessKeyID is not present in the cache and multiple DNSRecords are reconciled at the same time, // it may happen that getDomains is called multiple times instead of just one, so use a mutex to guard against this. // It is ok to use a shared mutex here as far as the number of accessKeyIDs using custom domains is low. // This may need to be revisited with a larger number of such accessKeyIDs to avoid them blocking each other // during the (potentially long-running) call to getDomains. d.domainsCacheMutex.Lock() defer d.domainsCacheMutex.Unlock() if v, ok := d.domainsCache.Get(d.accessKeyID); ok { return v.(map[string]alidns.Domain), nil } domains, err := d.getDomains(ctx) if err != nil { return nil, err } d.domainsCache.Set(d.accessKeyID, domains, domainsCacheTTL) return domains, nil } // getDomains returns all domains. func (d *dnsClient) getDomains(ctx context.Context) (map[string]alidns.Domain, error) { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return nil, err } domains := make(map[string]alidns.Domain) pageSize, pageNumber := 20, 1 req := alidns.CreateDescribeDomainsRequest() req.PageSize = requests.NewInteger(pageSize) for { req.PageNumber = requests.NewInteger(pageNumber) resp, err := d.Client.DescribeDomains(req) if err != nil { return nil, err } for _, domain := range resp.Domains.Domain { domains[domain.DomainId] = getDomainFromResponse(domain) } if resp.PageNumber*int64(pageSize) >= resp.TotalCount { break } pageNumber++ } return domains, nil } func getDomainFromResponse(domainFromResp alidns.DomainInDescribeDomains) alidns.Domain { return alidns.Domain{ DomainId: domainFromResp.DomainId, DomainName: domainFromResp.DomainName, AliDomain: domainFromResp.AliDomain, CreateTimestamp: domainFromResp.CreateTimestamp, //ExpireTimestamp: , InstanceEndTime: domainFromResp.InstanceEndTime, CreateTime: domainFromResp.CreateTime, // SourceProtocol: domainFromResp.SourceProtocol, GroupName: domainFromResp.GroupName, VersionCode: domainFromResp.VersionCode, // UpdateTimestamp: domainFromResp.UpdateTimestamp, RecordCount: domainFromResp.RecordCount, InstanceExpired: domainFromResp.InstanceExpired, ResourceGroupId: domainFromResp.ResourceGroupId, // CacheTtlMin: domainFromResp.CacheTtlMin, InstanceId: domainFromResp.InstanceId, //ExpireTime:, GroupId: domainFromResp.GroupId, // SourceEdns: domainFromResp.SourceEdns, RegistrantEmail: domainFromResp.RegistrantEmail, VersionName: domainFromResp.VersionName, // UpdateTime: domainFromResp.UpdateTime, Remark: domainFromResp.Remark, // CacheTtlMax: domainFromResp.CacheTtlMax, PunyCode: domainFromResp.PunyCode, Starmark: domainFromResp.Starmark, // DnsServers: domainFromResp.DnsServers, Tags: domainFromResp.Tags, // SourceDnsServers: domainFromResp.SourceDnsServers, } } // getDomainRecords returns the domain records with the given domain name, rr, and record type. func (d *dnsClient) getDomainRecords(ctx context.Context, domainName, rr, recordType string) (map[string]alidns.Record, error) { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return nil, err } records := make(map[string]alidns.Record) pageSize, pageNumber := 20, 1 req := alidns.CreateDescribeDomainRecordsRequest() req.PageSize = requests.NewInteger(pageSize) for { req.PageNumber = requests.NewInteger(pageNumber) req.DomainName = domainName req.RRKeyWord = rr req.TypeKeyWord = recordType resp, err := d.Client.DescribeDomainRecords(req) if err != nil { return nil, err } for _, record := range resp.DomainRecords.Record { records[record.Value] = record } if resp.PageNumber*int64(pageSize) >= resp.TotalCount { break } pageNumber++ } return records, nil } func (d *dnsClient) createDomainRecord(ctx context.Context, domainName, rr, recordType, value string, ttl int64) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateAddDomainRecordRequest() req.DomainName = domainName req.RR = rr req.Type = recordType req.Value = value req.TTL = requests.NewInteger(int(ttl)) _, err := d.Client.AddDomainRecord(req) return err } func (d *dnsClient) updateDomainRecord(ctx context.Context, id, rr, recordType, value string, ttl int64) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateUpdateDomainRecordRequest() req.RecordId = id req.RR = rr req.Type = recordType req.Value = value req.TTL = requests.NewInteger(int(ttl)) _, err := d.Client.UpdateDomainRecord(req) return err } func (d *dnsClient) deleteDomainRecord(ctx context.Context, id string) error
func (d *dnsClient) waitForAliDNSRateLimiter(ctx context.Context) error { timeoutCtx, cancel := context.WithTimeout(ctx, d.RateLimiterWaitTimeout) defer cancel() t := time.Now() if err := d.RateLimiter.Wait(timeoutCtx); err != nil { return &RateLimiterWaitError{Cause: err} } if waitDuration := time.Since(t); waitDuration.Seconds() > 1/float64(d.RateLimiter.Limit()) { d.Logger.Info("Waited for client-side aliyun DNS rate limiter", "waitDuration", waitDuration.String()) } return nil } func getRR(name, domainName string) (string, error) { if name == domainName { return "@", nil } suffix := "." + domainName if !strings.HasSuffix(name, suffix) { return "", fmt.Errorf("name %s does not match domain name %s", name, domainName) } return strings.TrimSuffix(name, suffix), nil } func isDomainRecordDoesNotExistError(err error) bool { if serverError, ok := err.(*errors.ServerError); ok { if serverError.ErrorCode() == alicloud.ErrorCodeDomainRecordNotBelongToUser { return true } } return false } // CompositeDomainName composes and returns a composite domain name from the given domain name and id, // in the format <domainName>:<domainId> func CompositeDomainName(domainName, domainId string) string { if domainId != "" { return domainName + ":" + domainId } return domainName } // DomainNameAndId decomposes the given composite domain name in the format <domainName>:<domainId> // into its constituent domain name and id. func DomainNameAndId(compositeDomainName string) (string, string) { if parts := strings.Split(compositeDomainName, ":"); len(parts) == 2 { return parts[0], parts[1] } return compositeDomainName, "" } // IsThrottlingError returns true if the error is a throttling error. func IsThrottlingError(err error) bool { if alierr, ok := err.(errors.Error); ok && strings.Contains(alierr.Message(), "Throttling") { return true } return false }
{ if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateDeleteDomainRecordRequest() req.RecordId = id if _, err := d.Client.DeleteDomainRecord(req); err != nil && !isDomainRecordDoesNotExistError(err) { return err } return nil }
identifier_body
client_dns.go
// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "context" "fmt" "strings" "time" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/services/alidns" "github.com/gardener/gardener/pkg/utils" "golang.org/x/time/rate" "sigs.k8s.io/controller-runtime/pkg/log" "github.com/gardener/gardener-extension-provider-alicloud/pkg/alicloud" ) const ( domainsCacheTTL = 1 * time.Hour rateLimiterCacheTTL = 1 * time.Hour ) // RateLimiterWaitError is an error to be reported if waiting for a aliyun dns rate limiter fails. // This can only happen if the wait time would exceed the configured wait timeout. type RateLimiterWaitError struct { Cause error } func (e *RateLimiterWaitError) Error() string { return fmt.Sprintf("could not wait for client-side aliyun dns rate limiter: %+v", e.Cause) } // NewDNSClient creates a new DNS client with given region, accessKeyID, and accessKeySecret. func (f *clientFactory) NewDNSClient(region, accessKeyID, accessKeySecret string) (DNS, error) { client, err := alidns.NewClientWithAccessKey(region, accessKeyID, accessKeySecret) if err != nil { return nil, err } return &dnsClient{ Client: *client, accessKeyID: accessKeyID, domainsCache: f.domainsCache, domainsCacheMutex: &f.domainsCacheMutex, RateLimiter: f.getRateLimiter(accessKeyID), RateLimiterWaitTimeout: f.waitTimeout, Logger: log.Log.WithName("ali-dnsclient"), }, nil } func (f *clientFactory) getRateLimiter(accessKeyID string) *rate.Limiter { // cache.Expiring Get and Set methods are concurrency-safe // However, if f rate limiter is not present in the cache, it may happen that multiple rate limiters are created // at the same time for the same access key id, and the desired QPS is exceeded, so use f mutex to guard against this f.rateLimitersMutex.Lock() defer f.rateLimitersMutex.Unlock() // Get f rate limiter from the cache, or create f new one if not present var rateLimiter *rate.Limiter if v, ok := f.rateLimiters.Get(accessKeyID); ok { rateLimiter = v.(*rate.Limiter) } else { rateLimiter = rate.NewLimiter(f.limit, f.burst) } // Set should be called on every Get with cache.Expiring to refresh the TTL f.rateLimiters.Set(accessKeyID, rateLimiter, rateLimiterCacheTTL) return rateLimiter } // GetDomainNames returns a map of all domain names mapped to their composite domain names. func (d *dnsClient) GetDomainNames(ctx context.Context) (map[string]string, error) { domains, err := d.getDomainsWithCache(ctx) if err != nil { return nil, err } domainNames := make(map[string]string) for _, domain := range domains { domainNames[domain.DomainName] = CompositeDomainName(domain.DomainName, domain.DomainId) } return domainNames, nil } // GetDomainName returns the composite domain name of the domain with the given domain id. func (d *dnsClient) GetDomainName(ctx context.Context, domainId string) (string, error) { domains, err := d.getDomainsWithCache(ctx) if err != nil { return "", err } domain, ok := domains[domainId] if !ok { return "", fmt.Errorf("DNS domain with id %s not found", domainId) } return CompositeDomainName(domain.DomainName, domain.DomainId), nil } // CreateOrUpdateDomainRecords creates or updates the domain records with the given domain name, name, record type, // values, and ttl. // * For each element in values that has an existing domain record, the existing record is updated if needed. // * For each element in values that doesn't have an existing domain record, a new domain record is created. // * For each existing domain record that doesn't have a corresponding element in values, the existing record is deleted. func (d *dnsClient) CreateOrUpdateDomainRecords(ctx context.Context, domainName, name, recordType string, values []string, ttl int64) error { domainName, _ = DomainNameAndId(domainName) rr, err := getRR(name, domainName) if err != nil { return err } records, err := d.getDomainRecords(ctx, domainName, rr, recordType) if err != nil { return err } for _, value := range values { if record, ok := records[value]; ok { // Only update the existing domain record if the current TTL value is different from the given one // At this point we know that rr, recordType, and value are the same if record.TTL != ttl { if err := d.updateDomainRecord(ctx, record.RecordId, rr, recordType, value, ttl); err != nil { return err } } } else { if err := d.createDomainRecord(ctx, domainName, rr, recordType, value, ttl); err != nil { return err } } } for value, record := range records { if !utils.ValueExists(value, values) { if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil { return err } } } return nil } // DeleteDomainRecords deletes the domain records with the given domain name, name and record type. func (d *dnsClient) DeleteDomainRecords(ctx context.Context, domainName, name, recordType string) error { domainName, _ = DomainNameAndId(domainName) rr, err := getRR(name, domainName) if err != nil { return err } records, err := d.getDomainRecords(ctx, domainName, rr, recordType) if err != nil { return err } for _, record := range records { if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil { return err } } return nil } func (d *dnsClient) getDomainsWithCache(ctx context.Context) (map[string]alidns.Domain, error) { // cache.Expiring Get and Set methods are concurrency-safe. // However, if an accessKeyID is not present in the cache and multiple DNSRecords are reconciled at the same time, // it may happen that getDomains is called multiple times instead of just one, so use a mutex to guard against this. // It is ok to use a shared mutex here as far as the number of accessKeyIDs using custom domains is low. // This may need to be revisited with a larger number of such accessKeyIDs to avoid them blocking each other // during the (potentially long-running) call to getDomains. d.domainsCacheMutex.Lock() defer d.domainsCacheMutex.Unlock() if v, ok := d.domainsCache.Get(d.accessKeyID); ok { return v.(map[string]alidns.Domain), nil } domains, err := d.getDomains(ctx) if err != nil { return nil, err } d.domainsCache.Set(d.accessKeyID, domains, domainsCacheTTL) return domains, nil } // getDomains returns all domains. func (d *dnsClient) getDomains(ctx context.Context) (map[string]alidns.Domain, error) { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return nil, err } domains := make(map[string]alidns.Domain) pageSize, pageNumber := 20, 1 req := alidns.CreateDescribeDomainsRequest() req.PageSize = requests.NewInteger(pageSize) for { req.PageNumber = requests.NewInteger(pageNumber) resp, err := d.Client.DescribeDomains(req) if err != nil { return nil, err } for _, domain := range resp.Domains.Domain { domains[domain.DomainId] = getDomainFromResponse(domain) } if resp.PageNumber*int64(pageSize) >= resp.TotalCount { break } pageNumber++ } return domains, nil } func getDomainFromResponse(domainFromResp alidns.DomainInDescribeDomains) alidns.Domain { return alidns.Domain{ DomainId: domainFromResp.DomainId, DomainName: domainFromResp.DomainName, AliDomain: domainFromResp.AliDomain, CreateTimestamp: domainFromResp.CreateTimestamp, //ExpireTimestamp: , InstanceEndTime: domainFromResp.InstanceEndTime, CreateTime: domainFromResp.CreateTime, // SourceProtocol: domainFromResp.SourceProtocol, GroupName: domainFromResp.GroupName, VersionCode: domainFromResp.VersionCode, // UpdateTimestamp: domainFromResp.UpdateTimestamp, RecordCount: domainFromResp.RecordCount, InstanceExpired: domainFromResp.InstanceExpired, ResourceGroupId: domainFromResp.ResourceGroupId, // CacheTtlMin: domainFromResp.CacheTtlMin, InstanceId: domainFromResp.InstanceId, //ExpireTime:, GroupId: domainFromResp.GroupId, // SourceEdns: domainFromResp.SourceEdns, RegistrantEmail: domainFromResp.RegistrantEmail, VersionName: domainFromResp.VersionName, // UpdateTime: domainFromResp.UpdateTime, Remark: domainFromResp.Remark, // CacheTtlMax: domainFromResp.CacheTtlMax, PunyCode: domainFromResp.PunyCode, Starmark: domainFromResp.Starmark, // DnsServers: domainFromResp.DnsServers, Tags: domainFromResp.Tags, // SourceDnsServers: domainFromResp.SourceDnsServers, } } // getDomainRecords returns the domain records with the given domain name, rr, and record type. func (d *dnsClient) getDomainRecords(ctx context.Context, domainName, rr, recordType string) (map[string]alidns.Record, error) {
records := make(map[string]alidns.Record) pageSize, pageNumber := 20, 1 req := alidns.CreateDescribeDomainRecordsRequest() req.PageSize = requests.NewInteger(pageSize) for { req.PageNumber = requests.NewInteger(pageNumber) req.DomainName = domainName req.RRKeyWord = rr req.TypeKeyWord = recordType resp, err := d.Client.DescribeDomainRecords(req) if err != nil { return nil, err } for _, record := range resp.DomainRecords.Record { records[record.Value] = record } if resp.PageNumber*int64(pageSize) >= resp.TotalCount { break } pageNumber++ } return records, nil } func (d *dnsClient) createDomainRecord(ctx context.Context, domainName, rr, recordType, value string, ttl int64) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateAddDomainRecordRequest() req.DomainName = domainName req.RR = rr req.Type = recordType req.Value = value req.TTL = requests.NewInteger(int(ttl)) _, err := d.Client.AddDomainRecord(req) return err } func (d *dnsClient) updateDomainRecord(ctx context.Context, id, rr, recordType, value string, ttl int64) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateUpdateDomainRecordRequest() req.RecordId = id req.RR = rr req.Type = recordType req.Value = value req.TTL = requests.NewInteger(int(ttl)) _, err := d.Client.UpdateDomainRecord(req) return err } func (d *dnsClient) deleteDomainRecord(ctx context.Context, id string) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateDeleteDomainRecordRequest() req.RecordId = id if _, err := d.Client.DeleteDomainRecord(req); err != nil && !isDomainRecordDoesNotExistError(err) { return err } return nil } func (d *dnsClient) waitForAliDNSRateLimiter(ctx context.Context) error { timeoutCtx, cancel := context.WithTimeout(ctx, d.RateLimiterWaitTimeout) defer cancel() t := time.Now() if err := d.RateLimiter.Wait(timeoutCtx); err != nil { return &RateLimiterWaitError{Cause: err} } if waitDuration := time.Since(t); waitDuration.Seconds() > 1/float64(d.RateLimiter.Limit()) { d.Logger.Info("Waited for client-side aliyun DNS rate limiter", "waitDuration", waitDuration.String()) } return nil } func getRR(name, domainName string) (string, error) { if name == domainName { return "@", nil } suffix := "." + domainName if !strings.HasSuffix(name, suffix) { return "", fmt.Errorf("name %s does not match domain name %s", name, domainName) } return strings.TrimSuffix(name, suffix), nil } func isDomainRecordDoesNotExistError(err error) bool { if serverError, ok := err.(*errors.ServerError); ok { if serverError.ErrorCode() == alicloud.ErrorCodeDomainRecordNotBelongToUser { return true } } return false } // CompositeDomainName composes and returns a composite domain name from the given domain name and id, // in the format <domainName>:<domainId> func CompositeDomainName(domainName, domainId string) string { if domainId != "" { return domainName + ":" + domainId } return domainName } // DomainNameAndId decomposes the given composite domain name in the format <domainName>:<domainId> // into its constituent domain name and id. func DomainNameAndId(compositeDomainName string) (string, string) { if parts := strings.Split(compositeDomainName, ":"); len(parts) == 2 { return parts[0], parts[1] } return compositeDomainName, "" } // IsThrottlingError returns true if the error is a throttling error. func IsThrottlingError(err error) bool { if alierr, ok := err.(errors.Error); ok && strings.Contains(alierr.Message(), "Throttling") { return true } return false }
if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return nil, err }
random_line_split
client_dns.go
// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "context" "fmt" "strings" "time" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/services/alidns" "github.com/gardener/gardener/pkg/utils" "golang.org/x/time/rate" "sigs.k8s.io/controller-runtime/pkg/log" "github.com/gardener/gardener-extension-provider-alicloud/pkg/alicloud" ) const ( domainsCacheTTL = 1 * time.Hour rateLimiterCacheTTL = 1 * time.Hour ) // RateLimiterWaitError is an error to be reported if waiting for a aliyun dns rate limiter fails. // This can only happen if the wait time would exceed the configured wait timeout. type RateLimiterWaitError struct { Cause error } func (e *RateLimiterWaitError) Error() string { return fmt.Sprintf("could not wait for client-side aliyun dns rate limiter: %+v", e.Cause) } // NewDNSClient creates a new DNS client with given region, accessKeyID, and accessKeySecret. func (f *clientFactory) NewDNSClient(region, accessKeyID, accessKeySecret string) (DNS, error) { client, err := alidns.NewClientWithAccessKey(region, accessKeyID, accessKeySecret) if err != nil { return nil, err } return &dnsClient{ Client: *client, accessKeyID: accessKeyID, domainsCache: f.domainsCache, domainsCacheMutex: &f.domainsCacheMutex, RateLimiter: f.getRateLimiter(accessKeyID), RateLimiterWaitTimeout: f.waitTimeout, Logger: log.Log.WithName("ali-dnsclient"), }, nil } func (f *clientFactory) getRateLimiter(accessKeyID string) *rate.Limiter { // cache.Expiring Get and Set methods are concurrency-safe // However, if f rate limiter is not present in the cache, it may happen that multiple rate limiters are created // at the same time for the same access key id, and the desired QPS is exceeded, so use f mutex to guard against this f.rateLimitersMutex.Lock() defer f.rateLimitersMutex.Unlock() // Get f rate limiter from the cache, or create f new one if not present var rateLimiter *rate.Limiter if v, ok := f.rateLimiters.Get(accessKeyID); ok { rateLimiter = v.(*rate.Limiter) } else { rateLimiter = rate.NewLimiter(f.limit, f.burst) } // Set should be called on every Get with cache.Expiring to refresh the TTL f.rateLimiters.Set(accessKeyID, rateLimiter, rateLimiterCacheTTL) return rateLimiter } // GetDomainNames returns a map of all domain names mapped to their composite domain names. func (d *dnsClient) GetDomainNames(ctx context.Context) (map[string]string, error) { domains, err := d.getDomainsWithCache(ctx) if err != nil { return nil, err } domainNames := make(map[string]string) for _, domain := range domains { domainNames[domain.DomainName] = CompositeDomainName(domain.DomainName, domain.DomainId) } return domainNames, nil } // GetDomainName returns the composite domain name of the domain with the given domain id. func (d *dnsClient) GetDomainName(ctx context.Context, domainId string) (string, error) { domains, err := d.getDomainsWithCache(ctx) if err != nil { return "", err } domain, ok := domains[domainId] if !ok { return "", fmt.Errorf("DNS domain with id %s not found", domainId) } return CompositeDomainName(domain.DomainName, domain.DomainId), nil } // CreateOrUpdateDomainRecords creates or updates the domain records with the given domain name, name, record type, // values, and ttl. // * For each element in values that has an existing domain record, the existing record is updated if needed. // * For each element in values that doesn't have an existing domain record, a new domain record is created. // * For each existing domain record that doesn't have a corresponding element in values, the existing record is deleted. func (d *dnsClient) CreateOrUpdateDomainRecords(ctx context.Context, domainName, name, recordType string, values []string, ttl int64) error { domainName, _ = DomainNameAndId(domainName) rr, err := getRR(name, domainName) if err != nil { return err } records, err := d.getDomainRecords(ctx, domainName, rr, recordType) if err != nil { return err } for _, value := range values { if record, ok := records[value]; ok { // Only update the existing domain record if the current TTL value is different from the given one // At this point we know that rr, recordType, and value are the same if record.TTL != ttl { if err := d.updateDomainRecord(ctx, record.RecordId, rr, recordType, value, ttl); err != nil { return err } } } else { if err := d.createDomainRecord(ctx, domainName, rr, recordType, value, ttl); err != nil { return err } } } for value, record := range records { if !utils.ValueExists(value, values) { if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil { return err } } } return nil } // DeleteDomainRecords deletes the domain records with the given domain name, name and record type. func (d *dnsClient) DeleteDomainRecords(ctx context.Context, domainName, name, recordType string) error { domainName, _ = DomainNameAndId(domainName) rr, err := getRR(name, domainName) if err != nil { return err } records, err := d.getDomainRecords(ctx, domainName, rr, recordType) if err != nil { return err } for _, record := range records { if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil { return err } } return nil } func (d *dnsClient) getDomainsWithCache(ctx context.Context) (map[string]alidns.Domain, error) { // cache.Expiring Get and Set methods are concurrency-safe. // However, if an accessKeyID is not present in the cache and multiple DNSRecords are reconciled at the same time, // it may happen that getDomains is called multiple times instead of just one, so use a mutex to guard against this. // It is ok to use a shared mutex here as far as the number of accessKeyIDs using custom domains is low. // This may need to be revisited with a larger number of such accessKeyIDs to avoid them blocking each other // during the (potentially long-running) call to getDomains. d.domainsCacheMutex.Lock() defer d.domainsCacheMutex.Unlock() if v, ok := d.domainsCache.Get(d.accessKeyID); ok { return v.(map[string]alidns.Domain), nil } domains, err := d.getDomains(ctx) if err != nil { return nil, err } d.domainsCache.Set(d.accessKeyID, domains, domainsCacheTTL) return domains, nil } // getDomains returns all domains. func (d *dnsClient) getDomains(ctx context.Context) (map[string]alidns.Domain, error) { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return nil, err } domains := make(map[string]alidns.Domain) pageSize, pageNumber := 20, 1 req := alidns.CreateDescribeDomainsRequest() req.PageSize = requests.NewInteger(pageSize) for { req.PageNumber = requests.NewInteger(pageNumber) resp, err := d.Client.DescribeDomains(req) if err != nil { return nil, err } for _, domain := range resp.Domains.Domain { domains[domain.DomainId] = getDomainFromResponse(domain) } if resp.PageNumber*int64(pageSize) >= resp.TotalCount { break } pageNumber++ } return domains, nil } func getDomainFromResponse(domainFromResp alidns.DomainInDescribeDomains) alidns.Domain { return alidns.Domain{ DomainId: domainFromResp.DomainId, DomainName: domainFromResp.DomainName, AliDomain: domainFromResp.AliDomain, CreateTimestamp: domainFromResp.CreateTimestamp, //ExpireTimestamp: , InstanceEndTime: domainFromResp.InstanceEndTime, CreateTime: domainFromResp.CreateTime, // SourceProtocol: domainFromResp.SourceProtocol, GroupName: domainFromResp.GroupName, VersionCode: domainFromResp.VersionCode, // UpdateTimestamp: domainFromResp.UpdateTimestamp, RecordCount: domainFromResp.RecordCount, InstanceExpired: domainFromResp.InstanceExpired, ResourceGroupId: domainFromResp.ResourceGroupId, // CacheTtlMin: domainFromResp.CacheTtlMin, InstanceId: domainFromResp.InstanceId, //ExpireTime:, GroupId: domainFromResp.GroupId, // SourceEdns: domainFromResp.SourceEdns, RegistrantEmail: domainFromResp.RegistrantEmail, VersionName: domainFromResp.VersionName, // UpdateTime: domainFromResp.UpdateTime, Remark: domainFromResp.Remark, // CacheTtlMax: domainFromResp.CacheTtlMax, PunyCode: domainFromResp.PunyCode, Starmark: domainFromResp.Starmark, // DnsServers: domainFromResp.DnsServers, Tags: domainFromResp.Tags, // SourceDnsServers: domainFromResp.SourceDnsServers, } } // getDomainRecords returns the domain records with the given domain name, rr, and record type. func (d *dnsClient) getDomainRecords(ctx context.Context, domainName, rr, recordType string) (map[string]alidns.Record, error) { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return nil, err } records := make(map[string]alidns.Record) pageSize, pageNumber := 20, 1 req := alidns.CreateDescribeDomainRecordsRequest() req.PageSize = requests.NewInteger(pageSize) for { req.PageNumber = requests.NewInteger(pageNumber) req.DomainName = domainName req.RRKeyWord = rr req.TypeKeyWord = recordType resp, err := d.Client.DescribeDomainRecords(req) if err != nil { return nil, err } for _, record := range resp.DomainRecords.Record { records[record.Value] = record } if resp.PageNumber*int64(pageSize) >= resp.TotalCount { break } pageNumber++ } return records, nil } func (d *dnsClient) createDomainRecord(ctx context.Context, domainName, rr, recordType, value string, ttl int64) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateAddDomainRecordRequest() req.DomainName = domainName req.RR = rr req.Type = recordType req.Value = value req.TTL = requests.NewInteger(int(ttl)) _, err := d.Client.AddDomainRecord(req) return err } func (d *dnsClient) updateDomainRecord(ctx context.Context, id, rr, recordType, value string, ttl int64) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateUpdateDomainRecordRequest() req.RecordId = id req.RR = rr req.Type = recordType req.Value = value req.TTL = requests.NewInteger(int(ttl)) _, err := d.Client.UpdateDomainRecord(req) return err } func (d *dnsClient) deleteDomainRecord(ctx context.Context, id string) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateDeleteDomainRecordRequest() req.RecordId = id if _, err := d.Client.DeleteDomainRecord(req); err != nil && !isDomainRecordDoesNotExistError(err) { return err } return nil } func (d *dnsClient) waitForAliDNSRateLimiter(ctx context.Context) error { timeoutCtx, cancel := context.WithTimeout(ctx, d.RateLimiterWaitTimeout) defer cancel() t := time.Now() if err := d.RateLimiter.Wait(timeoutCtx); err != nil { return &RateLimiterWaitError{Cause: err} } if waitDuration := time.Since(t); waitDuration.Seconds() > 1/float64(d.RateLimiter.Limit()) { d.Logger.Info("Waited for client-side aliyun DNS rate limiter", "waitDuration", waitDuration.String()) } return nil } func getRR(name, domainName string) (string, error) { if name == domainName { return "@", nil } suffix := "." + domainName if !strings.HasSuffix(name, suffix) { return "", fmt.Errorf("name %s does not match domain name %s", name, domainName) } return strings.TrimSuffix(name, suffix), nil } func isDomainRecordDoesNotExistError(err error) bool { if serverError, ok := err.(*errors.ServerError); ok { if serverError.ErrorCode() == alicloud.ErrorCodeDomainRecordNotBelongToUser
} return false } // CompositeDomainName composes and returns a composite domain name from the given domain name and id, // in the format <domainName>:<domainId> func CompositeDomainName(domainName, domainId string) string { if domainId != "" { return domainName + ":" + domainId } return domainName } // DomainNameAndId decomposes the given composite domain name in the format <domainName>:<domainId> // into its constituent domain name and id. func DomainNameAndId(compositeDomainName string) (string, string) { if parts := strings.Split(compositeDomainName, ":"); len(parts) == 2 { return parts[0], parts[1] } return compositeDomainName, "" } // IsThrottlingError returns true if the error is a throttling error. func IsThrottlingError(err error) bool { if alierr, ok := err.(errors.Error); ok && strings.Contains(alierr.Message(), "Throttling") { return true } return false }
{ return true }
conditional_block
client_dns.go
// Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package client import ( "context" "fmt" "strings" "time" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/errors" "github.com/aliyun/alibaba-cloud-sdk-go/sdk/requests" "github.com/aliyun/alibaba-cloud-sdk-go/services/alidns" "github.com/gardener/gardener/pkg/utils" "golang.org/x/time/rate" "sigs.k8s.io/controller-runtime/pkg/log" "github.com/gardener/gardener-extension-provider-alicloud/pkg/alicloud" ) const ( domainsCacheTTL = 1 * time.Hour rateLimiterCacheTTL = 1 * time.Hour ) // RateLimiterWaitError is an error to be reported if waiting for a aliyun dns rate limiter fails. // This can only happen if the wait time would exceed the configured wait timeout. type RateLimiterWaitError struct { Cause error } func (e *RateLimiterWaitError) Error() string { return fmt.Sprintf("could not wait for client-side aliyun dns rate limiter: %+v", e.Cause) } // NewDNSClient creates a new DNS client with given region, accessKeyID, and accessKeySecret. func (f *clientFactory) NewDNSClient(region, accessKeyID, accessKeySecret string) (DNS, error) { client, err := alidns.NewClientWithAccessKey(region, accessKeyID, accessKeySecret) if err != nil { return nil, err } return &dnsClient{ Client: *client, accessKeyID: accessKeyID, domainsCache: f.domainsCache, domainsCacheMutex: &f.domainsCacheMutex, RateLimiter: f.getRateLimiter(accessKeyID), RateLimiterWaitTimeout: f.waitTimeout, Logger: log.Log.WithName("ali-dnsclient"), }, nil } func (f *clientFactory) getRateLimiter(accessKeyID string) *rate.Limiter { // cache.Expiring Get and Set methods are concurrency-safe // However, if f rate limiter is not present in the cache, it may happen that multiple rate limiters are created // at the same time for the same access key id, and the desired QPS is exceeded, so use f mutex to guard against this f.rateLimitersMutex.Lock() defer f.rateLimitersMutex.Unlock() // Get f rate limiter from the cache, or create f new one if not present var rateLimiter *rate.Limiter if v, ok := f.rateLimiters.Get(accessKeyID); ok { rateLimiter = v.(*rate.Limiter) } else { rateLimiter = rate.NewLimiter(f.limit, f.burst) } // Set should be called on every Get with cache.Expiring to refresh the TTL f.rateLimiters.Set(accessKeyID, rateLimiter, rateLimiterCacheTTL) return rateLimiter } // GetDomainNames returns a map of all domain names mapped to their composite domain names. func (d *dnsClient) GetDomainNames(ctx context.Context) (map[string]string, error) { domains, err := d.getDomainsWithCache(ctx) if err != nil { return nil, err } domainNames := make(map[string]string) for _, domain := range domains { domainNames[domain.DomainName] = CompositeDomainName(domain.DomainName, domain.DomainId) } return domainNames, nil } // GetDomainName returns the composite domain name of the domain with the given domain id. func (d *dnsClient) GetDomainName(ctx context.Context, domainId string) (string, error) { domains, err := d.getDomainsWithCache(ctx) if err != nil { return "", err } domain, ok := domains[domainId] if !ok { return "", fmt.Errorf("DNS domain with id %s not found", domainId) } return CompositeDomainName(domain.DomainName, domain.DomainId), nil } // CreateOrUpdateDomainRecords creates or updates the domain records with the given domain name, name, record type, // values, and ttl. // * For each element in values that has an existing domain record, the existing record is updated if needed. // * For each element in values that doesn't have an existing domain record, a new domain record is created. // * For each existing domain record that doesn't have a corresponding element in values, the existing record is deleted. func (d *dnsClient) CreateOrUpdateDomainRecords(ctx context.Context, domainName, name, recordType string, values []string, ttl int64) error { domainName, _ = DomainNameAndId(domainName) rr, err := getRR(name, domainName) if err != nil { return err } records, err := d.getDomainRecords(ctx, domainName, rr, recordType) if err != nil { return err } for _, value := range values { if record, ok := records[value]; ok { // Only update the existing domain record if the current TTL value is different from the given one // At this point we know that rr, recordType, and value are the same if record.TTL != ttl { if err := d.updateDomainRecord(ctx, record.RecordId, rr, recordType, value, ttl); err != nil { return err } } } else { if err := d.createDomainRecord(ctx, domainName, rr, recordType, value, ttl); err != nil { return err } } } for value, record := range records { if !utils.ValueExists(value, values) { if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil { return err } } } return nil } // DeleteDomainRecords deletes the domain records with the given domain name, name and record type. func (d *dnsClient) DeleteDomainRecords(ctx context.Context, domainName, name, recordType string) error { domainName, _ = DomainNameAndId(domainName) rr, err := getRR(name, domainName) if err != nil { return err } records, err := d.getDomainRecords(ctx, domainName, rr, recordType) if err != nil { return err } for _, record := range records { if err := d.deleteDomainRecord(ctx, record.RecordId); err != nil { return err } } return nil } func (d *dnsClient) getDomainsWithCache(ctx context.Context) (map[string]alidns.Domain, error) { // cache.Expiring Get and Set methods are concurrency-safe. // However, if an accessKeyID is not present in the cache and multiple DNSRecords are reconciled at the same time, // it may happen that getDomains is called multiple times instead of just one, so use a mutex to guard against this. // It is ok to use a shared mutex here as far as the number of accessKeyIDs using custom domains is low. // This may need to be revisited with a larger number of such accessKeyIDs to avoid them blocking each other // during the (potentially long-running) call to getDomains. d.domainsCacheMutex.Lock() defer d.domainsCacheMutex.Unlock() if v, ok := d.domainsCache.Get(d.accessKeyID); ok { return v.(map[string]alidns.Domain), nil } domains, err := d.getDomains(ctx) if err != nil { return nil, err } d.domainsCache.Set(d.accessKeyID, domains, domainsCacheTTL) return domains, nil } // getDomains returns all domains. func (d *dnsClient) getDomains(ctx context.Context) (map[string]alidns.Domain, error) { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return nil, err } domains := make(map[string]alidns.Domain) pageSize, pageNumber := 20, 1 req := alidns.CreateDescribeDomainsRequest() req.PageSize = requests.NewInteger(pageSize) for { req.PageNumber = requests.NewInteger(pageNumber) resp, err := d.Client.DescribeDomains(req) if err != nil { return nil, err } for _, domain := range resp.Domains.Domain { domains[domain.DomainId] = getDomainFromResponse(domain) } if resp.PageNumber*int64(pageSize) >= resp.TotalCount { break } pageNumber++ } return domains, nil } func getDomainFromResponse(domainFromResp alidns.DomainInDescribeDomains) alidns.Domain { return alidns.Domain{ DomainId: domainFromResp.DomainId, DomainName: domainFromResp.DomainName, AliDomain: domainFromResp.AliDomain, CreateTimestamp: domainFromResp.CreateTimestamp, //ExpireTimestamp: , InstanceEndTime: domainFromResp.InstanceEndTime, CreateTime: domainFromResp.CreateTime, // SourceProtocol: domainFromResp.SourceProtocol, GroupName: domainFromResp.GroupName, VersionCode: domainFromResp.VersionCode, // UpdateTimestamp: domainFromResp.UpdateTimestamp, RecordCount: domainFromResp.RecordCount, InstanceExpired: domainFromResp.InstanceExpired, ResourceGroupId: domainFromResp.ResourceGroupId, // CacheTtlMin: domainFromResp.CacheTtlMin, InstanceId: domainFromResp.InstanceId, //ExpireTime:, GroupId: domainFromResp.GroupId, // SourceEdns: domainFromResp.SourceEdns, RegistrantEmail: domainFromResp.RegistrantEmail, VersionName: domainFromResp.VersionName, // UpdateTime: domainFromResp.UpdateTime, Remark: domainFromResp.Remark, // CacheTtlMax: domainFromResp.CacheTtlMax, PunyCode: domainFromResp.PunyCode, Starmark: domainFromResp.Starmark, // DnsServers: domainFromResp.DnsServers, Tags: domainFromResp.Tags, // SourceDnsServers: domainFromResp.SourceDnsServers, } } // getDomainRecords returns the domain records with the given domain name, rr, and record type. func (d *dnsClient)
(ctx context.Context, domainName, rr, recordType string) (map[string]alidns.Record, error) { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return nil, err } records := make(map[string]alidns.Record) pageSize, pageNumber := 20, 1 req := alidns.CreateDescribeDomainRecordsRequest() req.PageSize = requests.NewInteger(pageSize) for { req.PageNumber = requests.NewInteger(pageNumber) req.DomainName = domainName req.RRKeyWord = rr req.TypeKeyWord = recordType resp, err := d.Client.DescribeDomainRecords(req) if err != nil { return nil, err } for _, record := range resp.DomainRecords.Record { records[record.Value] = record } if resp.PageNumber*int64(pageSize) >= resp.TotalCount { break } pageNumber++ } return records, nil } func (d *dnsClient) createDomainRecord(ctx context.Context, domainName, rr, recordType, value string, ttl int64) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateAddDomainRecordRequest() req.DomainName = domainName req.RR = rr req.Type = recordType req.Value = value req.TTL = requests.NewInteger(int(ttl)) _, err := d.Client.AddDomainRecord(req) return err } func (d *dnsClient) updateDomainRecord(ctx context.Context, id, rr, recordType, value string, ttl int64) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateUpdateDomainRecordRequest() req.RecordId = id req.RR = rr req.Type = recordType req.Value = value req.TTL = requests.NewInteger(int(ttl)) _, err := d.Client.UpdateDomainRecord(req) return err } func (d *dnsClient) deleteDomainRecord(ctx context.Context, id string) error { if err := d.waitForAliDNSRateLimiter(ctx); err != nil { return err } req := alidns.CreateDeleteDomainRecordRequest() req.RecordId = id if _, err := d.Client.DeleteDomainRecord(req); err != nil && !isDomainRecordDoesNotExistError(err) { return err } return nil } func (d *dnsClient) waitForAliDNSRateLimiter(ctx context.Context) error { timeoutCtx, cancel := context.WithTimeout(ctx, d.RateLimiterWaitTimeout) defer cancel() t := time.Now() if err := d.RateLimiter.Wait(timeoutCtx); err != nil { return &RateLimiterWaitError{Cause: err} } if waitDuration := time.Since(t); waitDuration.Seconds() > 1/float64(d.RateLimiter.Limit()) { d.Logger.Info("Waited for client-side aliyun DNS rate limiter", "waitDuration", waitDuration.String()) } return nil } func getRR(name, domainName string) (string, error) { if name == domainName { return "@", nil } suffix := "." + domainName if !strings.HasSuffix(name, suffix) { return "", fmt.Errorf("name %s does not match domain name %s", name, domainName) } return strings.TrimSuffix(name, suffix), nil } func isDomainRecordDoesNotExistError(err error) bool { if serverError, ok := err.(*errors.ServerError); ok { if serverError.ErrorCode() == alicloud.ErrorCodeDomainRecordNotBelongToUser { return true } } return false } // CompositeDomainName composes and returns a composite domain name from the given domain name and id, // in the format <domainName>:<domainId> func CompositeDomainName(domainName, domainId string) string { if domainId != "" { return domainName + ":" + domainId } return domainName } // DomainNameAndId decomposes the given composite domain name in the format <domainName>:<domainId> // into its constituent domain name and id. func DomainNameAndId(compositeDomainName string) (string, string) { if parts := strings.Split(compositeDomainName, ":"); len(parts) == 2 { return parts[0], parts[1] } return compositeDomainName, "" } // IsThrottlingError returns true if the error is a throttling error. func IsThrottlingError(err error) bool { if alierr, ok := err.(errors.Error); ok && strings.Contains(alierr.Message(), "Throttling") { return true } return false }
getDomainRecords
identifier_name
lib.rs
// Copyright 2018 Benjamin Fry <benjaminfry@me.com> // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. #![recursion_limit = "128"] extern crate proc_macro; extern crate proc_macro2; #[macro_use] extern crate syn; #[macro_use] extern crate quote; use proc_macro2::{Ident, Span, TokenStream}; use quote::ToTokens; use syn::punctuated::Punctuated; use syn::token::Comma; fn create_function_params(num_args: usize) -> TokenStream { let mut tokens = TokenStream::new(); for i in 0..num_args { let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site()); tokens.extend(quote!( #arg_name, )); } tokens } fn extract_arg_data(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream { let mut get_args_stream = TokenStream::new(); for (i, arg) in inputs.iter().enumerate() { let arg_type: &syn::Type = match *arg { syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => { panic!("self functions not supported") } syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"), syn::FnArg::Captured(ref captured) => &captured.ty, syn::FnArg::Ignored(ref ty) => ty, }; let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site()); let arg_error = format!("unsupported function argument type for {}", arg_name); let get_arg = quote!( let #arg_name: #arg_type = pg_extend::pg_datum::TryFromPgDatum::try_from( pg_extend::pg_datum::PgDatum::from_raw( *args.next().expect("wrong number of args passed into get_args for args?"), args_null.next().expect("wrong number of args passed into get_args for args_null?") ), ) .expect(#arg_error); ); get_args_stream.extend(get_arg); } get_args_stream } fn sql_param_list(num_args: usize) -> String { let mut tokens = String::new(); if num_args == 0 { return tokens; } let arg_name = |num: usize| format!("{{sql_{}}}", num); for i in 0..(num_args - 1) { let arg_name = arg_name(i); tokens.push_str(&format!("{},", arg_name)); } let arg_name = arg_name(num_args - 1); tokens.push_str(&arg_name); tokens } fn sql_param_types(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream { let mut tokens = TokenStream::new(); for (i, arg) in inputs.iter().enumerate() { let arg_type: &syn::Type = match *arg { syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => { panic!("self functions not supported") } syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"), syn::FnArg::Captured(ref captured) => &captured.ty, syn::FnArg::Ignored(ref ty) => ty, }; let sql_name = Ident::new(&format!("sql_{}", i), Span::call_site()); let sql_param = quote!( #sql_name = pg_extend::pg_type::PgType::from_rust::<#arg_type>().as_str(), ); tokens.extend(sql_param); } tokens } fn sql_return_type(outputs: &syn::ReturnType) -> TokenStream { let ty = match outputs { syn::ReturnType::Default => quote!(()), syn::ReturnType::Type(_, ty) => quote!(#ty), }; quote!(pg_extend::pg_type::PgType::from_rust::<#ty>().return_stmt()) } fn impl_info_for_fdw(item: &syn::Item) -> TokenStream { let typ = if let syn::Item::Struct(typ) = item { typ } else { panic!("Annotation only supported on structs") }; let mut decl = item.clone().into_token_stream(); let struct_name = &typ.ident; let func_name = syn::Ident::new( &format!("fdw_{}", struct_name), Span::call_site(), ); let info_fn = get_info_fn(&func_name); let fdw_fn = quote!( #[no_mangle] pub extern "C" fn #func_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum { pg_extend::pg_fdw::ForeignWrapper::<#struct_name>::into_datum() } ); let create_sql_name = syn::Ident::new(&format!("{}_pg_create_stmt", struct_name), Span::call_site()); let sql_stmt = format!( " CREATE OR REPLACE FUNCTION {0}() RETURNS fdw_handler AS '{{library_path}}', '{1}' LANGUAGE C STRICT; CREATE FOREIGN DATA WRAPPER {0} handler {0} NO VALIDATOR; ", struct_name, func_name, ); // declare a function that can be used to output a create statement for the externed function // all create statements will be put into a common module for access let create_sql_def = quote!( #[allow(unused)] pub fn #create_sql_name(library_path: &str) -> String { use pg_extend::pg_type::PgTypeInfo; format!( #sql_stmt, library_path = library_path ) } ); decl.extend(info_fn); decl.extend(create_sql_def); decl.extend(fdw_fn); decl } fn get_info_fn(func_name: &syn::Ident) -> TokenStream { let func_info_name = syn::Ident::new( &format!("pg_finfo_{}", func_name), Span::call_site(), ); // create the postgres info quote!( #[no_mangle] pub extern "C" fn #func_info_name () -> &'static pg_extend::pg_sys::Pg_finfo_record { const my_finfo: pg_extend::pg_sys::Pg_finfo_record = pg_extend::pg_sys::Pg_finfo_record { api_version: 1 }; &my_finfo } ) } fn impl_info_for_fn(item: &syn::Item) -> TokenStream { let func = if let syn::Item::Fn(func) = item { func } else { panic!("annotation only supported on functions"); }; let func_name = &func.ident; let func_decl = &func.decl; if func_decl.variadic.is_some() { panic!("variadic functions (...) not supported") } //let generics = &func_decl.generics; let inputs = &func_decl.inputs; let output = &func_decl.output; //let func_block = &func.block; // declare the function let mut function = item.clone().into_token_stream(); let func_wrapper_name = syn::Ident::new(&format!("pg_{}", func_name), Span::call_site()); let func_info = get_info_fn(&func_wrapper_name); // join the function information in function.extend(func_info); let get_args_from_datums = extract_arg_data(inputs); let func_params = create_function_params(inputs.len()); // wrap the original function in a pg_wrapper function let func_wrapper = quote!( #[no_mangle] pub extern "C" fn #func_wrapper_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum { use std::panic; let func_info: &mut pg_extend::pg_sys::FunctionCallInfoData = unsafe { func_call_info .as_mut() .expect("func_call_info was unexpectedly NULL") }; // guard the Postgres process against the panic, and give us an oportunity to cleanup let panic_result = panic::catch_unwind(|| { // extract the argument list let (mut args, mut args_null) = pg_extend::get_args(func_info); // arbitrary Datum conversions occur here, and could panic // so this is inside the catch unwind #get_args_from_datums // this is the meat of the function call into the extension code let result = #func_name(#func_params); // arbitrary Rust code could panic, so this is guarded pg_extend::pg_datum::PgDatum::from(result) }); // see if we caught a panic match panic_result { Ok(result) => { // in addition to the null case, we should handle result types probably let isnull: pg_extend::pg_bool::Bool = result.is_null().into(); func_info.isnull = isnull.into(); // return the datum result.into_datum() } Err(err) => { // ensure the return value is null func_info.isnull = pg_extend::pg_bool::Bool::from(true).into(); // TODO: anything else to cean up before resuming the panic? panic::resume_unwind(err) } } } ); let create_sql_name = syn::Ident::new(&format!("{}_pg_create_stmt", func_name), Span::call_site()); let sql_params = sql_param_list(inputs.len()); let sql_param_types = sql_param_types(inputs); let sql_return = sql_return_type(output); // ret and library_path are replacements at runtime let sql_stmt = format!( // FIXME: Add/remove STRICT keywords based on Option<> arguments. "CREATE or REPLACE FUNCTION {}({}) {{ret}} AS '{{library_path}}', '{}' LANGUAGE C STRICT;", func_name, sql_params, func_wrapper_name, ); // declare a function that can be used to output a create statement for the externed function // all create statements will be put into a common module for access let create_sql_def = quote!( #[allow(unused)] pub fn #create_sql_name(library_path: &str) -> String { use pg_extend::pg_type::PgTypeInfo; format!( #sql_stmt, #sql_param_types ret = #sql_return, library_path = library_path ) } ); function.extend(func_wrapper); function.extend(create_sql_def); function } /// An attribute macro for wrapping Rust functions with boiler plate for defining and /// calling conventions between Postgres and Rust. /// /// This mimics the C macro for defining functions /// /// ```c /// #define PG_FUNCTION_INFO_V1(funcname) \ /// extern Datum funcname(PG_FUNCTION_ARGS); \ /// extern PGDLLEXPORT const Pg_finfo_record * CppConcat(pg_finfo_,funcname)(void); \ /// const Pg_finfo_record * \ /// CppConcat(pg_finfo_,funcname) (void) \ /// { \ /// static const Pg_finfo_record my_finfo = { 1 }; \ /// return &my_finfo; \ /// } \ /// ``` /// /// # Returns /// /// The result of this macro will be to produce a new function wrapping the one annotated but prepended with /// `pg_` to distinquish them and also declares a function for Postgres to get the Function information; /// /// For example: if the signature `fn add_one(value: i32) -> i32` is annotated, two functions will be produced, /// the wrapper function with a signature of: /// /// ```rust,no_run /// extern crate pg_extend; /// use pg_extend::pg_sys; /// /// #[no_mangle] /// pub extern "C" fn pg_add_one(func_call_info: pg_sys::FunctionCallInfo) -> pg_sys::Datum /// # { /// # unimplemented!() /// # } /// ``` /// /// and the info function with a signature of: /// /// ```rust,no_run /// extern crate pg_extend; /// use pg_extend::pg_sys; /// /// #[no_mangle] /// pub extern "C" fn pg_finfo_pg_add_one() -> &'static pg_sys::Pg_finfo_record /// # { /// # unimplemented!() /// # } /// ``` /// #[proc_macro_attribute] #[allow(clippy::needless_pass_by_value)] pub fn pg_extern( _attr: proc_macro::TokenStream, item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { // get a usable token stream let ast: syn::Item = parse_macro_input!(item as syn::Item); // Build the impl let expanded: TokenStream = impl_info_for_fn(&ast); // Return the generated impl proc_macro::TokenStream::from(expanded) } /// An attribute macro for wrapping Rust structs with boiler plate for defining and exposing a foreign data wrapper /// This is mostly a slimmed down version of pg_extern, with none of the data argument handling. #[proc_macro_attribute] #[allow(clippy::needless_pass_by_value)] pub fn
( _attr: proc_macro::TokenStream, item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { // get a usable token stream let ast: syn::Item = parse_macro_input!(item as syn::Item); // Build the impl let expanded: TokenStream = impl_info_for_fdw(&ast); // Return the generated impl proc_macro::TokenStream::from(expanded) }
pg_foreignwrapper
identifier_name
lib.rs
// Copyright 2018 Benjamin Fry <benjaminfry@me.com> // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. #![recursion_limit = "128"] extern crate proc_macro; extern crate proc_macro2; #[macro_use] extern crate syn; #[macro_use] extern crate quote; use proc_macro2::{Ident, Span, TokenStream}; use quote::ToTokens; use syn::punctuated::Punctuated; use syn::token::Comma; fn create_function_params(num_args: usize) -> TokenStream { let mut tokens = TokenStream::new(); for i in 0..num_args { let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site()); tokens.extend(quote!( #arg_name, )); } tokens } fn extract_arg_data(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream { let mut get_args_stream = TokenStream::new(); for (i, arg) in inputs.iter().enumerate() { let arg_type: &syn::Type = match *arg { syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => { panic!("self functions not supported") } syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"), syn::FnArg::Captured(ref captured) => &captured.ty, syn::FnArg::Ignored(ref ty) => ty, }; let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site()); let arg_error = format!("unsupported function argument type for {}", arg_name); let get_arg = quote!( let #arg_name: #arg_type = pg_extend::pg_datum::TryFromPgDatum::try_from( pg_extend::pg_datum::PgDatum::from_raw( *args.next().expect("wrong number of args passed into get_args for args?"), args_null.next().expect("wrong number of args passed into get_args for args_null?") ), ) .expect(#arg_error); ); get_args_stream.extend(get_arg); } get_args_stream } fn sql_param_list(num_args: usize) -> String { let mut tokens = String::new(); if num_args == 0 { return tokens; } let arg_name = |num: usize| format!("{{sql_{}}}", num); for i in 0..(num_args - 1) { let arg_name = arg_name(i); tokens.push_str(&format!("{},", arg_name)); } let arg_name = arg_name(num_args - 1); tokens.push_str(&arg_name); tokens } fn sql_param_types(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream { let mut tokens = TokenStream::new(); for (i, arg) in inputs.iter().enumerate() { let arg_type: &syn::Type = match *arg { syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => { panic!("self functions not supported") } syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"), syn::FnArg::Captured(ref captured) => &captured.ty, syn::FnArg::Ignored(ref ty) => ty, }; let sql_name = Ident::new(&format!("sql_{}", i), Span::call_site()); let sql_param = quote!( #sql_name = pg_extend::pg_type::PgType::from_rust::<#arg_type>().as_str(), ); tokens.extend(sql_param); } tokens } fn sql_return_type(outputs: &syn::ReturnType) -> TokenStream
fn impl_info_for_fdw(item: &syn::Item) -> TokenStream { let typ = if let syn::Item::Struct(typ) = item { typ } else { panic!("Annotation only supported on structs") }; let mut decl = item.clone().into_token_stream(); let struct_name = &typ.ident; let func_name = syn::Ident::new( &format!("fdw_{}", struct_name), Span::call_site(), ); let info_fn = get_info_fn(&func_name); let fdw_fn = quote!( #[no_mangle] pub extern "C" fn #func_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum { pg_extend::pg_fdw::ForeignWrapper::<#struct_name>::into_datum() } ); let create_sql_name = syn::Ident::new(&format!("{}_pg_create_stmt", struct_name), Span::call_site()); let sql_stmt = format!( " CREATE OR REPLACE FUNCTION {0}() RETURNS fdw_handler AS '{{library_path}}', '{1}' LANGUAGE C STRICT; CREATE FOREIGN DATA WRAPPER {0} handler {0} NO VALIDATOR; ", struct_name, func_name, ); // declare a function that can be used to output a create statement for the externed function // all create statements will be put into a common module for access let create_sql_def = quote!( #[allow(unused)] pub fn #create_sql_name(library_path: &str) -> String { use pg_extend::pg_type::PgTypeInfo; format!( #sql_stmt, library_path = library_path ) } ); decl.extend(info_fn); decl.extend(create_sql_def); decl.extend(fdw_fn); decl } fn get_info_fn(func_name: &syn::Ident) -> TokenStream { let func_info_name = syn::Ident::new( &format!("pg_finfo_{}", func_name), Span::call_site(), ); // create the postgres info quote!( #[no_mangle] pub extern "C" fn #func_info_name () -> &'static pg_extend::pg_sys::Pg_finfo_record { const my_finfo: pg_extend::pg_sys::Pg_finfo_record = pg_extend::pg_sys::Pg_finfo_record { api_version: 1 }; &my_finfo } ) } fn impl_info_for_fn(item: &syn::Item) -> TokenStream { let func = if let syn::Item::Fn(func) = item { func } else { panic!("annotation only supported on functions"); }; let func_name = &func.ident; let func_decl = &func.decl; if func_decl.variadic.is_some() { panic!("variadic functions (...) not supported") } //let generics = &func_decl.generics; let inputs = &func_decl.inputs; let output = &func_decl.output; //let func_block = &func.block; // declare the function let mut function = item.clone().into_token_stream(); let func_wrapper_name = syn::Ident::new(&format!("pg_{}", func_name), Span::call_site()); let func_info = get_info_fn(&func_wrapper_name); // join the function information in function.extend(func_info); let get_args_from_datums = extract_arg_data(inputs); let func_params = create_function_params(inputs.len()); // wrap the original function in a pg_wrapper function let func_wrapper = quote!( #[no_mangle] pub extern "C" fn #func_wrapper_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum { use std::panic; let func_info: &mut pg_extend::pg_sys::FunctionCallInfoData = unsafe { func_call_info .as_mut() .expect("func_call_info was unexpectedly NULL") }; // guard the Postgres process against the panic, and give us an oportunity to cleanup let panic_result = panic::catch_unwind(|| { // extract the argument list let (mut args, mut args_null) = pg_extend::get_args(func_info); // arbitrary Datum conversions occur here, and could panic // so this is inside the catch unwind #get_args_from_datums // this is the meat of the function call into the extension code let result = #func_name(#func_params); // arbitrary Rust code could panic, so this is guarded pg_extend::pg_datum::PgDatum::from(result) }); // see if we caught a panic match panic_result { Ok(result) => { // in addition to the null case, we should handle result types probably let isnull: pg_extend::pg_bool::Bool = result.is_null().into(); func_info.isnull = isnull.into(); // return the datum result.into_datum() } Err(err) => { // ensure the return value is null func_info.isnull = pg_extend::pg_bool::Bool::from(true).into(); // TODO: anything else to cean up before resuming the panic? panic::resume_unwind(err) } } } ); let create_sql_name = syn::Ident::new(&format!("{}_pg_create_stmt", func_name), Span::call_site()); let sql_params = sql_param_list(inputs.len()); let sql_param_types = sql_param_types(inputs); let sql_return = sql_return_type(output); // ret and library_path are replacements at runtime let sql_stmt = format!( // FIXME: Add/remove STRICT keywords based on Option<> arguments. "CREATE or REPLACE FUNCTION {}({}) {{ret}} AS '{{library_path}}', '{}' LANGUAGE C STRICT;", func_name, sql_params, func_wrapper_name, ); // declare a function that can be used to output a create statement for the externed function // all create statements will be put into a common module for access let create_sql_def = quote!( #[allow(unused)] pub fn #create_sql_name(library_path: &str) -> String { use pg_extend::pg_type::PgTypeInfo; format!( #sql_stmt, #sql_param_types ret = #sql_return, library_path = library_path ) } ); function.extend(func_wrapper); function.extend(create_sql_def); function } /// An attribute macro for wrapping Rust functions with boiler plate for defining and /// calling conventions between Postgres and Rust. /// /// This mimics the C macro for defining functions /// /// ```c /// #define PG_FUNCTION_INFO_V1(funcname) \ /// extern Datum funcname(PG_FUNCTION_ARGS); \ /// extern PGDLLEXPORT const Pg_finfo_record * CppConcat(pg_finfo_,funcname)(void); \ /// const Pg_finfo_record * \ /// CppConcat(pg_finfo_,funcname) (void) \ /// { \ /// static const Pg_finfo_record my_finfo = { 1 }; \ /// return &my_finfo; \ /// } \ /// ``` /// /// # Returns /// /// The result of this macro will be to produce a new function wrapping the one annotated but prepended with /// `pg_` to distinquish them and also declares a function for Postgres to get the Function information; /// /// For example: if the signature `fn add_one(value: i32) -> i32` is annotated, two functions will be produced, /// the wrapper function with a signature of: /// /// ```rust,no_run /// extern crate pg_extend; /// use pg_extend::pg_sys; /// /// #[no_mangle] /// pub extern "C" fn pg_add_one(func_call_info: pg_sys::FunctionCallInfo) -> pg_sys::Datum /// # { /// # unimplemented!() /// # } /// ``` /// /// and the info function with a signature of: /// /// ```rust,no_run /// extern crate pg_extend; /// use pg_extend::pg_sys; /// /// #[no_mangle] /// pub extern "C" fn pg_finfo_pg_add_one() -> &'static pg_sys::Pg_finfo_record /// # { /// # unimplemented!() /// # } /// ``` /// #[proc_macro_attribute] #[allow(clippy::needless_pass_by_value)] pub fn pg_extern( _attr: proc_macro::TokenStream, item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { // get a usable token stream let ast: syn::Item = parse_macro_input!(item as syn::Item); // Build the impl let expanded: TokenStream = impl_info_for_fn(&ast); // Return the generated impl proc_macro::TokenStream::from(expanded) } /// An attribute macro for wrapping Rust structs with boiler plate for defining and exposing a foreign data wrapper /// This is mostly a slimmed down version of pg_extern, with none of the data argument handling. #[proc_macro_attribute] #[allow(clippy::needless_pass_by_value)] pub fn pg_foreignwrapper( _attr: proc_macro::TokenStream, item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { // get a usable token stream let ast: syn::Item = parse_macro_input!(item as syn::Item); // Build the impl let expanded: TokenStream = impl_info_for_fdw(&ast); // Return the generated impl proc_macro::TokenStream::from(expanded) }
{ let ty = match outputs { syn::ReturnType::Default => quote!(()), syn::ReturnType::Type(_, ty) => quote!(#ty), }; quote!(pg_extend::pg_type::PgType::from_rust::<#ty>().return_stmt()) }
identifier_body
lib.rs
// Copyright 2018 Benjamin Fry <benjaminfry@me.com> // // Licensed under the Apache License, Version 2.0, <LICENSE-APACHE or // http://apache.org/licenses/LICENSE-2.0> or the MIT license <LICENSE-MIT or // http://opensource.org/licenses/MIT>, at your option. This file may not be // copied, modified, or distributed except according to those terms. #![recursion_limit = "128"] extern crate proc_macro; extern crate proc_macro2; #[macro_use] extern crate syn; #[macro_use] extern crate quote; use proc_macro2::{Ident, Span, TokenStream}; use quote::ToTokens; use syn::punctuated::Punctuated; use syn::token::Comma; fn create_function_params(num_args: usize) -> TokenStream { let mut tokens = TokenStream::new(); for i in 0..num_args { let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site()); tokens.extend(quote!( #arg_name, )); } tokens } fn extract_arg_data(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream { let mut get_args_stream = TokenStream::new(); for (i, arg) in inputs.iter().enumerate() { let arg_type: &syn::Type = match *arg { syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => { panic!("self functions not supported") } syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"), syn::FnArg::Captured(ref captured) => &captured.ty, syn::FnArg::Ignored(ref ty) => ty, }; let arg_name = Ident::new(&format!("arg_{}", i), Span::call_site()); let arg_error = format!("unsupported function argument type for {}", arg_name); let get_arg = quote!( let #arg_name: #arg_type = pg_extend::pg_datum::TryFromPgDatum::try_from( pg_extend::pg_datum::PgDatum::from_raw( *args.next().expect("wrong number of args passed into get_args for args?"), args_null.next().expect("wrong number of args passed into get_args for args_null?") ), ) .expect(#arg_error); ); get_args_stream.extend(get_arg); } get_args_stream } fn sql_param_list(num_args: usize) -> String { let mut tokens = String::new(); if num_args == 0 { return tokens; } let arg_name = |num: usize| format!("{{sql_{}}}", num); for i in 0..(num_args - 1) { let arg_name = arg_name(i); tokens.push_str(&format!("{},", arg_name)); } let arg_name = arg_name(num_args - 1); tokens.push_str(&arg_name); tokens } fn sql_param_types(inputs: &Punctuated<syn::FnArg, Comma>) -> TokenStream { let mut tokens = TokenStream::new(); for (i, arg) in inputs.iter().enumerate() { let arg_type: &syn::Type = match *arg { syn::FnArg::SelfRef(_) | syn::FnArg::SelfValue(_) => { panic!("self functions not supported") } syn::FnArg::Inferred(_) => panic!("inferred function parameters not supported"), syn::FnArg::Captured(ref captured) => &captured.ty, syn::FnArg::Ignored(ref ty) => ty, }; let sql_name = Ident::new(&format!("sql_{}", i), Span::call_site()); let sql_param = quote!( #sql_name = pg_extend::pg_type::PgType::from_rust::<#arg_type>().as_str(), ); tokens.extend(sql_param); } tokens } fn sql_return_type(outputs: &syn::ReturnType) -> TokenStream { let ty = match outputs { syn::ReturnType::Default => quote!(()), syn::ReturnType::Type(_, ty) => quote!(#ty), }; quote!(pg_extend::pg_type::PgType::from_rust::<#ty>().return_stmt()) } fn impl_info_for_fdw(item: &syn::Item) -> TokenStream { let typ = if let syn::Item::Struct(typ) = item { typ } else { panic!("Annotation only supported on structs") }; let mut decl = item.clone().into_token_stream(); let struct_name = &typ.ident; let func_name = syn::Ident::new( &format!("fdw_{}", struct_name), Span::call_site(), ); let info_fn = get_info_fn(&func_name); let fdw_fn = quote!( #[no_mangle] pub extern "C" fn #func_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum { pg_extend::pg_fdw::ForeignWrapper::<#struct_name>::into_datum() } ); let create_sql_name = syn::Ident::new(&format!("{}_pg_create_stmt", struct_name), Span::call_site()); let sql_stmt = format!( " CREATE OR REPLACE FUNCTION {0}() RETURNS fdw_handler AS '{{library_path}}', '{1}' LANGUAGE C STRICT; CREATE FOREIGN DATA WRAPPER {0} handler {0} NO VALIDATOR; ", struct_name, func_name, ); // declare a function that can be used to output a create statement for the externed function // all create statements will be put into a common module for access let create_sql_def = quote!( #[allow(unused)] pub fn #create_sql_name(library_path: &str) -> String { use pg_extend::pg_type::PgTypeInfo; format!( #sql_stmt, library_path = library_path ) } ); decl.extend(info_fn); decl.extend(create_sql_def); decl.extend(fdw_fn); decl } fn get_info_fn(func_name: &syn::Ident) -> TokenStream { let func_info_name = syn::Ident::new( &format!("pg_finfo_{}", func_name), Span::call_site(), ); // create the postgres info quote!( #[no_mangle] pub extern "C" fn #func_info_name () -> &'static pg_extend::pg_sys::Pg_finfo_record { const my_finfo: pg_extend::pg_sys::Pg_finfo_record = pg_extend::pg_sys::Pg_finfo_record { api_version: 1 }; &my_finfo } ) } fn impl_info_for_fn(item: &syn::Item) -> TokenStream { let func = if let syn::Item::Fn(func) = item { func } else { panic!("annotation only supported on functions"); }; let func_name = &func.ident; let func_decl = &func.decl; if func_decl.variadic.is_some() { panic!("variadic functions (...) not supported") } //let generics = &func_decl.generics; let inputs = &func_decl.inputs; let output = &func_decl.output; //let func_block = &func.block; // declare the function let mut function = item.clone().into_token_stream(); let func_wrapper_name = syn::Ident::new(&format!("pg_{}", func_name), Span::call_site()); let func_info = get_info_fn(&func_wrapper_name); // join the function information in function.extend(func_info); let get_args_from_datums = extract_arg_data(inputs); let func_params = create_function_params(inputs.len()); // wrap the original function in a pg_wrapper function let func_wrapper = quote!( #[no_mangle] pub extern "C" fn #func_wrapper_name (func_call_info: pg_extend::pg_sys::FunctionCallInfo) -> pg_extend::pg_sys::Datum { use std::panic; let func_info: &mut pg_extend::pg_sys::FunctionCallInfoData = unsafe { func_call_info .as_mut() .expect("func_call_info was unexpectedly NULL") };
// arbitrary Datum conversions occur here, and could panic // so this is inside the catch unwind #get_args_from_datums // this is the meat of the function call into the extension code let result = #func_name(#func_params); // arbitrary Rust code could panic, so this is guarded pg_extend::pg_datum::PgDatum::from(result) }); // see if we caught a panic match panic_result { Ok(result) => { // in addition to the null case, we should handle result types probably let isnull: pg_extend::pg_bool::Bool = result.is_null().into(); func_info.isnull = isnull.into(); // return the datum result.into_datum() } Err(err) => { // ensure the return value is null func_info.isnull = pg_extend::pg_bool::Bool::from(true).into(); // TODO: anything else to cean up before resuming the panic? panic::resume_unwind(err) } } } ); let create_sql_name = syn::Ident::new(&format!("{}_pg_create_stmt", func_name), Span::call_site()); let sql_params = sql_param_list(inputs.len()); let sql_param_types = sql_param_types(inputs); let sql_return = sql_return_type(output); // ret and library_path are replacements at runtime let sql_stmt = format!( // FIXME: Add/remove STRICT keywords based on Option<> arguments. "CREATE or REPLACE FUNCTION {}({}) {{ret}} AS '{{library_path}}', '{}' LANGUAGE C STRICT;", func_name, sql_params, func_wrapper_name, ); // declare a function that can be used to output a create statement for the externed function // all create statements will be put into a common module for access let create_sql_def = quote!( #[allow(unused)] pub fn #create_sql_name(library_path: &str) -> String { use pg_extend::pg_type::PgTypeInfo; format!( #sql_stmt, #sql_param_types ret = #sql_return, library_path = library_path ) } ); function.extend(func_wrapper); function.extend(create_sql_def); function } /// An attribute macro for wrapping Rust functions with boiler plate for defining and /// calling conventions between Postgres and Rust. /// /// This mimics the C macro for defining functions /// /// ```c /// #define PG_FUNCTION_INFO_V1(funcname) \ /// extern Datum funcname(PG_FUNCTION_ARGS); \ /// extern PGDLLEXPORT const Pg_finfo_record * CppConcat(pg_finfo_,funcname)(void); \ /// const Pg_finfo_record * \ /// CppConcat(pg_finfo_,funcname) (void) \ /// { \ /// static const Pg_finfo_record my_finfo = { 1 }; \ /// return &my_finfo; \ /// } \ /// ``` /// /// # Returns /// /// The result of this macro will be to produce a new function wrapping the one annotated but prepended with /// `pg_` to distinquish them and also declares a function for Postgres to get the Function information; /// /// For example: if the signature `fn add_one(value: i32) -> i32` is annotated, two functions will be produced, /// the wrapper function with a signature of: /// /// ```rust,no_run /// extern crate pg_extend; /// use pg_extend::pg_sys; /// /// #[no_mangle] /// pub extern "C" fn pg_add_one(func_call_info: pg_sys::FunctionCallInfo) -> pg_sys::Datum /// # { /// # unimplemented!() /// # } /// ``` /// /// and the info function with a signature of: /// /// ```rust,no_run /// extern crate pg_extend; /// use pg_extend::pg_sys; /// /// #[no_mangle] /// pub extern "C" fn pg_finfo_pg_add_one() -> &'static pg_sys::Pg_finfo_record /// # { /// # unimplemented!() /// # } /// ``` /// #[proc_macro_attribute] #[allow(clippy::needless_pass_by_value)] pub fn pg_extern( _attr: proc_macro::TokenStream, item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { // get a usable token stream let ast: syn::Item = parse_macro_input!(item as syn::Item); // Build the impl let expanded: TokenStream = impl_info_for_fn(&ast); // Return the generated impl proc_macro::TokenStream::from(expanded) } /// An attribute macro for wrapping Rust structs with boiler plate for defining and exposing a foreign data wrapper /// This is mostly a slimmed down version of pg_extern, with none of the data argument handling. #[proc_macro_attribute] #[allow(clippy::needless_pass_by_value)] pub fn pg_foreignwrapper( _attr: proc_macro::TokenStream, item: proc_macro::TokenStream, ) -> proc_macro::TokenStream { // get a usable token stream let ast: syn::Item = parse_macro_input!(item as syn::Item); // Build the impl let expanded: TokenStream = impl_info_for_fdw(&ast); // Return the generated impl proc_macro::TokenStream::from(expanded) }
// guard the Postgres process against the panic, and give us an oportunity to cleanup let panic_result = panic::catch_unwind(|| { // extract the argument list let (mut args, mut args_null) = pg_extend::get_args(func_info);
random_line_split
biogrid-class.ts
/** * @summary defines the main grid or control centre in the microgrid where all components of the grid * are connected to the grid. * @author Lev Stambler <levst@google.com> * @author Roland Naijuka <rnaijuka@google.com> * * Created at : 6/26/2020, 3:33:10 PM * Last modified : 7/29/2020, 9:43:17 AM */ import { Grid, GridAction, GridOptions, Town, TownSize, ItemPosition, Energy, Battery, GridItem, Distance, Power, } from '@biogrid/grid-simulator'; import * as bioconstants from '../config/bio-constants'; import { BioBattery, BiogridState, Building, SolarPanel, SolarPanelParams, } from '@biogrid/biogrid-simulator'; import { Graph } from 'graphlib'; import { EnergySource } from '../bioenergy-source/bioenergy-source'; import { BatteryParams } from '../biobattery'; export interface BiogridOptions extends GridOptions { numberOfSmallBatteryCells: number; numberOfLargeBatteryCells: number; numberOfSolarPanels: number; startDate?: Date; } export class Biogrid implements Grid { // TODO create a singleton for the Biogrid not BiogridState private state: BiogridState; // The date for when the simulation begins // Used in initializing the Solar Panels private startDate: Date; // All details for the batteries in the grid // The small batteries in the grid, will approximately have a maxCapacity of 13,500KJ private smallBatteries: Battery[]; // The large batteries in the grid, will approximately have a maxCapacity of 540,000KJ private largeBatteries: Battery[]; // A dictionary with the position as its key // Used to keep track of whether an item is already placed in a position private itemInPosition: { [positionString: string]: boolean } = {}; // All details for the source of energy private solarPanels: EnergySource[]; // Holds the efficiency of the grid private efficiency: number; constructor(private town: Town, opts: BiogridOptions) { const todayMidnight = new Date(); todayMidnight.setHours(0); this.startDate = opts.startDate || todayMidnight; // Batteries const smallBatteryPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfSmallBatteryCells ); const largeBatteryPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfLargeBatteryCells ); this.smallBatteries = this.createBatteries( smallBatteryPositions, bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY ); this.largeBatteries = this.createBatteries( largeBatteryPositions, bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ); // Enery Source const solarPanelPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfSolarPanels ); this.solarPanels = this.createSolarPanels(solarPanelPositions); this.state = new BiogridState(this.createGridItems(), town.getTownSize()); // Set the effieciency to 0 at the beginning this.efficiency = 0; } private createGridItems(): GridItem[] { return [ ...this.smallBatteries, ...this.largeBatteries, ...this.town.getEnergyUsers(), ...this.solarPanels, ]; } getTownSize() { return this.town.getTownSize(); } getSystemState() { return this.state; } getEfficiency() { // Round off the efficiency to 3 dps return this.efficiency.toFixed(3); } getJsonGraphDetails() { return this.state.getJsonGraph(); } private createBatteries( positions: ItemPosition[], gridItemName: string ): Battery[] { const batteryResistance = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.RESISTANCE.LARGE_BATTERY : bioconstants.RESISTANCE.SMALL_BATTERY; const maxCapacity = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.LARGE_BATTERY.MAX_CAPACITY : bioconstants.SMALL_BATTERY.MAX_CAPACITY; const initEnergy = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.LARGE_BATTERY.DEFAULT_START_ENERGY : bioconstants.SMALL_BATTERY.DEFAULT_START_ENERGY; return positions.map( (position, index) => new BioBattery({ x: position.x, y: position.y, gridItemName: `${gridItemName}-${index}`, gridItemResistance: batteryResistance, energyInKiloWattHour: initEnergy, maxCapacity, } as BatteryParams) ); } /** * This method creates a list of solar panels placed depending on their positions * @param positions holds the positions where the solar panels are going to be placed */ // TODO pass a list of equal length to hold the area for the solar panels private createSolarPanels(positions: ItemPosition[]): EnergySource[] { return positions.map( (position, index) => new SolarPanel({ x: position.x, y: position.y, efficiency: 0.75, areaSquareMeters: bioconstants.SOLAR_PANEL.AREA, gridItemName: `${bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL}-${index}`, date: this.startDate, } as SolarPanelParams) ); } /** * Drain the energy users according to the time of day */ updateEnergyUsage(date: Date) { this.town.getEnergyUsers().forEach((energyUser) => { energyUser.decreaseEnergyAccordingToTimeOfDay(date); }); } /** * This method takes the results of th brain and then it changes the state graph as suggested by the brain. * The results of the brain are in form of an object key:value pair, with the receiver gridItemName as key and supplier gridItemName as value * @param action holds the results from the brain * @returns a the current state with a new graph which includes the changes that were suggested by the brain */ takeAction(action: GridAction) { const powerEdges: { v: string; w: string; power: Power }[] = []; // Set new efficiency this.efficiency = action.getEfficiency(); // RETURN a new BiogridState const allSupplyingPaths = action.getSupplyingPaths(); this.state.resetPowerOnEdges(); const clonedGraph = this.state.cloneStateGraph(); for (const supplyPath in allSupplyingPaths) { const oldGridItem = this.state.getGridItem(supplyPath); // take energy from the supplying grid item and transfer it to the energy user const supplyingGridItem = this.state.getGridItem( allSupplyingPaths[supplyPath] ); const typeOldGridItem = this.getGridItemType(oldGridItem); const energyUser = oldGridItem as Building | BioBattery; const energyUserReq = energyUser.getMaxCapacity() - energyUser.getEnergyInKilowattHour(); const typeSupplyingGridItem = this.getGridItemType(supplyingGridItem); if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.ENERGY_USER) { if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY || typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY ) { const battery = supplyingGridItem as BioBattery; battery.supplyPower(energyUserReq); clonedGraph.setNode(battery.gridItemName, battery); } else if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL ) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); clonedGraph.setNode(solarpanel.gridItemName, solarpanel); } else { continue; } (energyUser as Building).increaseEnergy(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) { if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) { const battery = supplyingGridItem as BioBattery; battery.supplyPower(energyUserReq); clonedGraph.setNode(battery.gridItemName, battery); } else if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL ) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); clonedGraph.setNode(solarpanel.gridItemName, solarpanel); } else { continue; } (energyUser as BioBattery).startCharging(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) { if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); } else { continue; } (energyUser as BioBattery).startCharging(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } powerEdges.push({ v: supplyingGridItem.gridItemName, w: energyUser.gridItemName, // Convert kilowatthours into kilowatts power: energyUserReq / bioconstants.TIME.DISCRETE_UNIT_HOURS, }); } this.state.setnewStateGraph(clonedGraph); powerEdges.forEach((powerEdge) => { this.state.setPowerBetweenNodes( powerEdge.v, powerEdge.w, powerEdge.power ); }); return this.state; } private getGridItemType(gridItem: GridItem): string { if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.ENERGY_USER) ) { return bioconstants.GRID_ITEM_NAMES.ENERGY_USER; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) ) { return bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) ) { return bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) ) { return bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL; } return bioconstants.GRID_ITEM_NAMES.GRID; } /** * A simplified algorithm to (mostly) evenly space out batteries throughout the square town * Split the town into rows and columns and then place a battery in the center of each cell * TODO: have a smart algorithm for placement, see https://github.com/googleinterns/step141-2020/issues/42 */ private createGridItemPositions( townSize: TownSize, numberOfGridItems: number ): ItemPosition[] { const cols = Math.ceil(numberOfGridItems / townSize.width); const rows = Math.ceil(numberOfGridItems / cols); const positions: ItemPosition[] = []; for (let i = 0; i < numberOfGridItems; i++) { const newPositionUnverified = { x: this.roundToGridDistance( (((i % cols) + 0.5) / cols) * townSize.width ), y: this.roundToGridDistance( ((Math.floor(i / cols) + 0.5) / rows) * townSize.height ), }; const newPosition = this.findNearestUnoccupiedPosition( newPositionUnverified, townSize ); positions.push(newPosition); this.itemInPosition[this.formatItemPosition(newPosition)] = true; } return positions; } /** * Find the nearest unoccupied position to {@code pos} by looking looking in a spiral with pos at its center * First the space immediately right of pos is checked, then the one above, then to the left, then below, then two right spaces out, two right up, etc */ private findNearestUnoccupiedPosition( pos: ItemPosition, townSize: TownSize ): ItemPosition { let radius = bioconstants.GRID_DISTANCES.INCREMENTS_KM; let angle = 0; let outOfBoundsCount = 0; let xOffset = 0, yOffset = 0; let newPos = { x: pos.x + xOffset, y: pos.y + yOffset }; // If {@code outOfBoundsCount} is greater than 3, then that means the upwards, left, right, and down // Are all out of bounds. Thus there is no where left to place the item while ( (this.positionOutOfBounds(newPos, townSize) || this.positionOccupied(newPos)) && outOfBoundsCount <= 3 )
if (outOfBoundsCount > 3) { throw new Error( `There are too many items on the grid. New items could not be placed with a minimum distance of ${bioconstants.GRID_DISTANCES.INCREMENTS_KM} km apart` ); } return newPos; } private positionOutOfBounds(pos: ItemPosition, townSize: TownSize): boolean { return pos.x > townSize.width || pos.y > townSize.height; } private roundToGridDistance(distance: Distance): Distance { return ( Math.floor(distance / bioconstants.GRID_DISTANCES.INCREMENTS_KM) * bioconstants.GRID_DISTANCES.INCREMENTS_KM ); } private positionOccupied(pos: ItemPosition): boolean { return this.itemInPosition[this.formatItemPosition(pos)]; } /** * Convert an item into a string */ private formatItemPosition(pos: ItemPosition): string { return `${pos.x}, ${pos.y}`; } }
{ if (this.positionOutOfBounds(newPos, townSize)) { outOfBoundsCount++; } switch (angle) { case 0: yOffset = 0; xOffset = radius; break; case 90: xOffset = 0; yOffset = radius; break; case 180: xOffset = -1 * radius; yOffset = 0; break; case 270: xOffset = 0; yOffset = -1 * radius; break; } newPos = { x: pos.x + xOffset, y: pos.y + yOffset }; // Increment the angle by 90 degrees angle = angle + 90; if (angle === 360) { radius += bioconstants.GRID_DISTANCES.INCREMENTS_KM; // Reset the angle angle = 0; } }
conditional_block
biogrid-class.ts
/** * @summary defines the main grid or control centre in the microgrid where all components of the grid * are connected to the grid. * @author Lev Stambler <levst@google.com> * @author Roland Naijuka <rnaijuka@google.com> * * Created at : 6/26/2020, 3:33:10 PM * Last modified : 7/29/2020, 9:43:17 AM */ import { Grid, GridAction, GridOptions, Town, TownSize, ItemPosition, Energy, Battery, GridItem, Distance, Power, } from '@biogrid/grid-simulator'; import * as bioconstants from '../config/bio-constants'; import { BioBattery, BiogridState, Building, SolarPanel, SolarPanelParams, } from '@biogrid/biogrid-simulator'; import { Graph } from 'graphlib'; import { EnergySource } from '../bioenergy-source/bioenergy-source'; import { BatteryParams } from '../biobattery'; export interface BiogridOptions extends GridOptions { numberOfSmallBatteryCells: number; numberOfLargeBatteryCells: number; numberOfSolarPanels: number; startDate?: Date; } export class Biogrid implements Grid { // TODO create a singleton for the Biogrid not BiogridState private state: BiogridState; // The date for when the simulation begins // Used in initializing the Solar Panels private startDate: Date; // All details for the batteries in the grid // The small batteries in the grid, will approximately have a maxCapacity of 13,500KJ private smallBatteries: Battery[]; // The large batteries in the grid, will approximately have a maxCapacity of 540,000KJ private largeBatteries: Battery[]; // A dictionary with the position as its key // Used to keep track of whether an item is already placed in a position private itemInPosition: { [positionString: string]: boolean } = {}; // All details for the source of energy private solarPanels: EnergySource[]; // Holds the efficiency of the grid private efficiency: number; constructor(private town: Town, opts: BiogridOptions) { const todayMidnight = new Date(); todayMidnight.setHours(0); this.startDate = opts.startDate || todayMidnight; // Batteries const smallBatteryPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfSmallBatteryCells ); const largeBatteryPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfLargeBatteryCells ); this.smallBatteries = this.createBatteries( smallBatteryPositions, bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY ); this.largeBatteries = this.createBatteries( largeBatteryPositions, bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ); // Enery Source const solarPanelPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfSolarPanels ); this.solarPanels = this.createSolarPanels(solarPanelPositions); this.state = new BiogridState(this.createGridItems(), town.getTownSize()); // Set the effieciency to 0 at the beginning this.efficiency = 0; } private createGridItems(): GridItem[] { return [ ...this.smallBatteries, ...this.largeBatteries, ...this.town.getEnergyUsers(), ...this.solarPanels, ]; } getTownSize() { return this.town.getTownSize(); } getSystemState() { return this.state; } getEfficiency() { // Round off the efficiency to 3 dps return this.efficiency.toFixed(3); } getJsonGraphDetails()
private createBatteries( positions: ItemPosition[], gridItemName: string ): Battery[] { const batteryResistance = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.RESISTANCE.LARGE_BATTERY : bioconstants.RESISTANCE.SMALL_BATTERY; const maxCapacity = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.LARGE_BATTERY.MAX_CAPACITY : bioconstants.SMALL_BATTERY.MAX_CAPACITY; const initEnergy = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.LARGE_BATTERY.DEFAULT_START_ENERGY : bioconstants.SMALL_BATTERY.DEFAULT_START_ENERGY; return positions.map( (position, index) => new BioBattery({ x: position.x, y: position.y, gridItemName: `${gridItemName}-${index}`, gridItemResistance: batteryResistance, energyInKiloWattHour: initEnergy, maxCapacity, } as BatteryParams) ); } /** * This method creates a list of solar panels placed depending on their positions * @param positions holds the positions where the solar panels are going to be placed */ // TODO pass a list of equal length to hold the area for the solar panels private createSolarPanels(positions: ItemPosition[]): EnergySource[] { return positions.map( (position, index) => new SolarPanel({ x: position.x, y: position.y, efficiency: 0.75, areaSquareMeters: bioconstants.SOLAR_PANEL.AREA, gridItemName: `${bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL}-${index}`, date: this.startDate, } as SolarPanelParams) ); } /** * Drain the energy users according to the time of day */ updateEnergyUsage(date: Date) { this.town.getEnergyUsers().forEach((energyUser) => { energyUser.decreaseEnergyAccordingToTimeOfDay(date); }); } /** * This method takes the results of th brain and then it changes the state graph as suggested by the brain. * The results of the brain are in form of an object key:value pair, with the receiver gridItemName as key and supplier gridItemName as value * @param action holds the results from the brain * @returns a the current state with a new graph which includes the changes that were suggested by the brain */ takeAction(action: GridAction) { const powerEdges: { v: string; w: string; power: Power }[] = []; // Set new efficiency this.efficiency = action.getEfficiency(); // RETURN a new BiogridState const allSupplyingPaths = action.getSupplyingPaths(); this.state.resetPowerOnEdges(); const clonedGraph = this.state.cloneStateGraph(); for (const supplyPath in allSupplyingPaths) { const oldGridItem = this.state.getGridItem(supplyPath); // take energy from the supplying grid item and transfer it to the energy user const supplyingGridItem = this.state.getGridItem( allSupplyingPaths[supplyPath] ); const typeOldGridItem = this.getGridItemType(oldGridItem); const energyUser = oldGridItem as Building | BioBattery; const energyUserReq = energyUser.getMaxCapacity() - energyUser.getEnergyInKilowattHour(); const typeSupplyingGridItem = this.getGridItemType(supplyingGridItem); if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.ENERGY_USER) { if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY || typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY ) { const battery = supplyingGridItem as BioBattery; battery.supplyPower(energyUserReq); clonedGraph.setNode(battery.gridItemName, battery); } else if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL ) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); clonedGraph.setNode(solarpanel.gridItemName, solarpanel); } else { continue; } (energyUser as Building).increaseEnergy(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) { if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) { const battery = supplyingGridItem as BioBattery; battery.supplyPower(energyUserReq); clonedGraph.setNode(battery.gridItemName, battery); } else if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL ) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); clonedGraph.setNode(solarpanel.gridItemName, solarpanel); } else { continue; } (energyUser as BioBattery).startCharging(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) { if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); } else { continue; } (energyUser as BioBattery).startCharging(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } powerEdges.push({ v: supplyingGridItem.gridItemName, w: energyUser.gridItemName, // Convert kilowatthours into kilowatts power: energyUserReq / bioconstants.TIME.DISCRETE_UNIT_HOURS, }); } this.state.setnewStateGraph(clonedGraph); powerEdges.forEach((powerEdge) => { this.state.setPowerBetweenNodes( powerEdge.v, powerEdge.w, powerEdge.power ); }); return this.state; } private getGridItemType(gridItem: GridItem): string { if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.ENERGY_USER) ) { return bioconstants.GRID_ITEM_NAMES.ENERGY_USER; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) ) { return bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) ) { return bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) ) { return bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL; } return bioconstants.GRID_ITEM_NAMES.GRID; } /** * A simplified algorithm to (mostly) evenly space out batteries throughout the square town * Split the town into rows and columns and then place a battery in the center of each cell * TODO: have a smart algorithm for placement, see https://github.com/googleinterns/step141-2020/issues/42 */ private createGridItemPositions( townSize: TownSize, numberOfGridItems: number ): ItemPosition[] { const cols = Math.ceil(numberOfGridItems / townSize.width); const rows = Math.ceil(numberOfGridItems / cols); const positions: ItemPosition[] = []; for (let i = 0; i < numberOfGridItems; i++) { const newPositionUnverified = { x: this.roundToGridDistance( (((i % cols) + 0.5) / cols) * townSize.width ), y: this.roundToGridDistance( ((Math.floor(i / cols) + 0.5) / rows) * townSize.height ), }; const newPosition = this.findNearestUnoccupiedPosition( newPositionUnverified, townSize ); positions.push(newPosition); this.itemInPosition[this.formatItemPosition(newPosition)] = true; } return positions; } /** * Find the nearest unoccupied position to {@code pos} by looking looking in a spiral with pos at its center * First the space immediately right of pos is checked, then the one above, then to the left, then below, then two right spaces out, two right up, etc */ private findNearestUnoccupiedPosition( pos: ItemPosition, townSize: TownSize ): ItemPosition { let radius = bioconstants.GRID_DISTANCES.INCREMENTS_KM; let angle = 0; let outOfBoundsCount = 0; let xOffset = 0, yOffset = 0; let newPos = { x: pos.x + xOffset, y: pos.y + yOffset }; // If {@code outOfBoundsCount} is greater than 3, then that means the upwards, left, right, and down // Are all out of bounds. Thus there is no where left to place the item while ( (this.positionOutOfBounds(newPos, townSize) || this.positionOccupied(newPos)) && outOfBoundsCount <= 3 ) { if (this.positionOutOfBounds(newPos, townSize)) { outOfBoundsCount++; } switch (angle) { case 0: yOffset = 0; xOffset = radius; break; case 90: xOffset = 0; yOffset = radius; break; case 180: xOffset = -1 * radius; yOffset = 0; break; case 270: xOffset = 0; yOffset = -1 * radius; break; } newPos = { x: pos.x + xOffset, y: pos.y + yOffset }; // Increment the angle by 90 degrees angle = angle + 90; if (angle === 360) { radius += bioconstants.GRID_DISTANCES.INCREMENTS_KM; // Reset the angle angle = 0; } } if (outOfBoundsCount > 3) { throw new Error( `There are too many items on the grid. New items could not be placed with a minimum distance of ${bioconstants.GRID_DISTANCES.INCREMENTS_KM} km apart` ); } return newPos; } private positionOutOfBounds(pos: ItemPosition, townSize: TownSize): boolean { return pos.x > townSize.width || pos.y > townSize.height; } private roundToGridDistance(distance: Distance): Distance { return ( Math.floor(distance / bioconstants.GRID_DISTANCES.INCREMENTS_KM) * bioconstants.GRID_DISTANCES.INCREMENTS_KM ); } private positionOccupied(pos: ItemPosition): boolean { return this.itemInPosition[this.formatItemPosition(pos)]; } /** * Convert an item into a string */ private formatItemPosition(pos: ItemPosition): string { return `${pos.x}, ${pos.y}`; } }
{ return this.state.getJsonGraph(); }
identifier_body
biogrid-class.ts
/** * @summary defines the main grid or control centre in the microgrid where all components of the grid * are connected to the grid. * @author Lev Stambler <levst@google.com> * @author Roland Naijuka <rnaijuka@google.com> * * Created at : 6/26/2020, 3:33:10 PM * Last modified : 7/29/2020, 9:43:17 AM */ import { Grid, GridAction, GridOptions, Town, TownSize, ItemPosition, Energy, Battery, GridItem, Distance, Power, } from '@biogrid/grid-simulator'; import * as bioconstants from '../config/bio-constants'; import { BioBattery, BiogridState, Building, SolarPanel, SolarPanelParams, } from '@biogrid/biogrid-simulator'; import { Graph } from 'graphlib'; import { EnergySource } from '../bioenergy-source/bioenergy-source'; import { BatteryParams } from '../biobattery'; export interface BiogridOptions extends GridOptions { numberOfSmallBatteryCells: number; numberOfLargeBatteryCells: number; numberOfSolarPanels: number; startDate?: Date; } export class Biogrid implements Grid { // TODO create a singleton for the Biogrid not BiogridState private state: BiogridState; // The date for when the simulation begins // Used in initializing the Solar Panels private startDate: Date; // All details for the batteries in the grid // The small batteries in the grid, will approximately have a maxCapacity of 13,500KJ private smallBatteries: Battery[]; // The large batteries in the grid, will approximately have a maxCapacity of 540,000KJ private largeBatteries: Battery[]; // A dictionary with the position as its key // Used to keep track of whether an item is already placed in a position private itemInPosition: { [positionString: string]: boolean } = {}; // All details for the source of energy private solarPanels: EnergySource[]; // Holds the efficiency of the grid private efficiency: number; constructor(private town: Town, opts: BiogridOptions) { const todayMidnight = new Date(); todayMidnight.setHours(0); this.startDate = opts.startDate || todayMidnight; // Batteries const smallBatteryPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfSmallBatteryCells ); const largeBatteryPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfLargeBatteryCells ); this.smallBatteries = this.createBatteries( smallBatteryPositions, bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY ); this.largeBatteries = this.createBatteries( largeBatteryPositions, bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ); // Enery Source const solarPanelPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfSolarPanels ); this.solarPanels = this.createSolarPanels(solarPanelPositions); this.state = new BiogridState(this.createGridItems(), town.getTownSize()); // Set the effieciency to 0 at the beginning this.efficiency = 0; } private createGridItems(): GridItem[] { return [ ...this.smallBatteries, ...this.largeBatteries, ...this.town.getEnergyUsers(), ...this.solarPanels, ]; } getTownSize() { return this.town.getTownSize(); } getSystemState() { return this.state; } getEfficiency() { // Round off the efficiency to 3 dps return this.efficiency.toFixed(3); } getJsonGraphDetails() { return this.state.getJsonGraph(); } private createBatteries( positions: ItemPosition[], gridItemName: string ): Battery[] { const batteryResistance = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.RESISTANCE.LARGE_BATTERY : bioconstants.RESISTANCE.SMALL_BATTERY; const maxCapacity = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.LARGE_BATTERY.MAX_CAPACITY : bioconstants.SMALL_BATTERY.MAX_CAPACITY; const initEnergy = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.LARGE_BATTERY.DEFAULT_START_ENERGY : bioconstants.SMALL_BATTERY.DEFAULT_START_ENERGY; return positions.map( (position, index) => new BioBattery({ x: position.x, y: position.y, gridItemName: `${gridItemName}-${index}`, gridItemResistance: batteryResistance, energyInKiloWattHour: initEnergy, maxCapacity, } as BatteryParams) ); } /** * This method creates a list of solar panels placed depending on their positions * @param positions holds the positions where the solar panels are going to be placed */ // TODO pass a list of equal length to hold the area for the solar panels private createSolarPanels(positions: ItemPosition[]): EnergySource[] { return positions.map( (position, index) => new SolarPanel({ x: position.x, y: position.y, efficiency: 0.75, areaSquareMeters: bioconstants.SOLAR_PANEL.AREA, gridItemName: `${bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL}-${index}`, date: this.startDate, } as SolarPanelParams) ); } /** * Drain the energy users according to the time of day */ updateEnergyUsage(date: Date) { this.town.getEnergyUsers().forEach((energyUser) => { energyUser.decreaseEnergyAccordingToTimeOfDay(date); }); } /** * This method takes the results of th brain and then it changes the state graph as suggested by the brain. * The results of the brain are in form of an object key:value pair, with the receiver gridItemName as key and supplier gridItemName as value * @param action holds the results from the brain * @returns a the current state with a new graph which includes the changes that were suggested by the brain */ takeAction(action: GridAction) { const powerEdges: { v: string; w: string; power: Power }[] = []; // Set new efficiency this.efficiency = action.getEfficiency(); // RETURN a new BiogridState const allSupplyingPaths = action.getSupplyingPaths(); this.state.resetPowerOnEdges(); const clonedGraph = this.state.cloneStateGraph(); for (const supplyPath in allSupplyingPaths) { const oldGridItem = this.state.getGridItem(supplyPath); // take energy from the supplying grid item and transfer it to the energy user const supplyingGridItem = this.state.getGridItem( allSupplyingPaths[supplyPath] ); const typeOldGridItem = this.getGridItemType(oldGridItem); const energyUser = oldGridItem as Building | BioBattery; const energyUserReq = energyUser.getMaxCapacity() - energyUser.getEnergyInKilowattHour(); const typeSupplyingGridItem = this.getGridItemType(supplyingGridItem); if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.ENERGY_USER) { if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY || typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY ) { const battery = supplyingGridItem as BioBattery; battery.supplyPower(energyUserReq); clonedGraph.setNode(battery.gridItemName, battery); } else if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL ) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); clonedGraph.setNode(solarpanel.gridItemName, solarpanel); } else { continue; } (energyUser as Building).increaseEnergy(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) { if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) { const battery = supplyingGridItem as BioBattery; battery.supplyPower(energyUserReq); clonedGraph.setNode(battery.gridItemName, battery); } else if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL ) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); clonedGraph.setNode(solarpanel.gridItemName, solarpanel); } else { continue; } (energyUser as BioBattery).startCharging(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) { if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); } else { continue; } (energyUser as BioBattery).startCharging(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } powerEdges.push({ v: supplyingGridItem.gridItemName, w: energyUser.gridItemName, // Convert kilowatthours into kilowatts power: energyUserReq / bioconstants.TIME.DISCRETE_UNIT_HOURS, }); } this.state.setnewStateGraph(clonedGraph); powerEdges.forEach((powerEdge) => { this.state.setPowerBetweenNodes( powerEdge.v, powerEdge.w, powerEdge.power ); }); return this.state; } private getGridItemType(gridItem: GridItem): string { if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.ENERGY_USER) ) { return bioconstants.GRID_ITEM_NAMES.ENERGY_USER; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) ) { return bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) ) { return bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) ) { return bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL; } return bioconstants.GRID_ITEM_NAMES.GRID; } /** * A simplified algorithm to (mostly) evenly space out batteries throughout the square town * Split the town into rows and columns and then place a battery in the center of each cell * TODO: have a smart algorithm for placement, see https://github.com/googleinterns/step141-2020/issues/42 */ private createGridItemPositions(
const cols = Math.ceil(numberOfGridItems / townSize.width); const rows = Math.ceil(numberOfGridItems / cols); const positions: ItemPosition[] = []; for (let i = 0; i < numberOfGridItems; i++) { const newPositionUnverified = { x: this.roundToGridDistance( (((i % cols) + 0.5) / cols) * townSize.width ), y: this.roundToGridDistance( ((Math.floor(i / cols) + 0.5) / rows) * townSize.height ), }; const newPosition = this.findNearestUnoccupiedPosition( newPositionUnverified, townSize ); positions.push(newPosition); this.itemInPosition[this.formatItemPosition(newPosition)] = true; } return positions; } /** * Find the nearest unoccupied position to {@code pos} by looking looking in a spiral with pos at its center * First the space immediately right of pos is checked, then the one above, then to the left, then below, then two right spaces out, two right up, etc */ private findNearestUnoccupiedPosition( pos: ItemPosition, townSize: TownSize ): ItemPosition { let radius = bioconstants.GRID_DISTANCES.INCREMENTS_KM; let angle = 0; let outOfBoundsCount = 0; let xOffset = 0, yOffset = 0; let newPos = { x: pos.x + xOffset, y: pos.y + yOffset }; // If {@code outOfBoundsCount} is greater than 3, then that means the upwards, left, right, and down // Are all out of bounds. Thus there is no where left to place the item while ( (this.positionOutOfBounds(newPos, townSize) || this.positionOccupied(newPos)) && outOfBoundsCount <= 3 ) { if (this.positionOutOfBounds(newPos, townSize)) { outOfBoundsCount++; } switch (angle) { case 0: yOffset = 0; xOffset = radius; break; case 90: xOffset = 0; yOffset = radius; break; case 180: xOffset = -1 * radius; yOffset = 0; break; case 270: xOffset = 0; yOffset = -1 * radius; break; } newPos = { x: pos.x + xOffset, y: pos.y + yOffset }; // Increment the angle by 90 degrees angle = angle + 90; if (angle === 360) { radius += bioconstants.GRID_DISTANCES.INCREMENTS_KM; // Reset the angle angle = 0; } } if (outOfBoundsCount > 3) { throw new Error( `There are too many items on the grid. New items could not be placed with a minimum distance of ${bioconstants.GRID_DISTANCES.INCREMENTS_KM} km apart` ); } return newPos; } private positionOutOfBounds(pos: ItemPosition, townSize: TownSize): boolean { return pos.x > townSize.width || pos.y > townSize.height; } private roundToGridDistance(distance: Distance): Distance { return ( Math.floor(distance / bioconstants.GRID_DISTANCES.INCREMENTS_KM) * bioconstants.GRID_DISTANCES.INCREMENTS_KM ); } private positionOccupied(pos: ItemPosition): boolean { return this.itemInPosition[this.formatItemPosition(pos)]; } /** * Convert an item into a string */ private formatItemPosition(pos: ItemPosition): string { return `${pos.x}, ${pos.y}`; } }
townSize: TownSize, numberOfGridItems: number ): ItemPosition[] {
random_line_split
biogrid-class.ts
/** * @summary defines the main grid or control centre in the microgrid where all components of the grid * are connected to the grid. * @author Lev Stambler <levst@google.com> * @author Roland Naijuka <rnaijuka@google.com> * * Created at : 6/26/2020, 3:33:10 PM * Last modified : 7/29/2020, 9:43:17 AM */ import { Grid, GridAction, GridOptions, Town, TownSize, ItemPosition, Energy, Battery, GridItem, Distance, Power, } from '@biogrid/grid-simulator'; import * as bioconstants from '../config/bio-constants'; import { BioBattery, BiogridState, Building, SolarPanel, SolarPanelParams, } from '@biogrid/biogrid-simulator'; import { Graph } from 'graphlib'; import { EnergySource } from '../bioenergy-source/bioenergy-source'; import { BatteryParams } from '../biobattery'; export interface BiogridOptions extends GridOptions { numberOfSmallBatteryCells: number; numberOfLargeBatteryCells: number; numberOfSolarPanels: number; startDate?: Date; } export class Biogrid implements Grid { // TODO create a singleton for the Biogrid not BiogridState private state: BiogridState; // The date for when the simulation begins // Used in initializing the Solar Panels private startDate: Date; // All details for the batteries in the grid // The small batteries in the grid, will approximately have a maxCapacity of 13,500KJ private smallBatteries: Battery[]; // The large batteries in the grid, will approximately have a maxCapacity of 540,000KJ private largeBatteries: Battery[]; // A dictionary with the position as its key // Used to keep track of whether an item is already placed in a position private itemInPosition: { [positionString: string]: boolean } = {}; // All details for the source of energy private solarPanels: EnergySource[]; // Holds the efficiency of the grid private efficiency: number; constructor(private town: Town, opts: BiogridOptions) { const todayMidnight = new Date(); todayMidnight.setHours(0); this.startDate = opts.startDate || todayMidnight; // Batteries const smallBatteryPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfSmallBatteryCells ); const largeBatteryPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfLargeBatteryCells ); this.smallBatteries = this.createBatteries( smallBatteryPositions, bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY ); this.largeBatteries = this.createBatteries( largeBatteryPositions, bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ); // Enery Source const solarPanelPositions = this.createGridItemPositions( town.getTownSize(), opts.numberOfSolarPanels ); this.solarPanels = this.createSolarPanels(solarPanelPositions); this.state = new BiogridState(this.createGridItems(), town.getTownSize()); // Set the effieciency to 0 at the beginning this.efficiency = 0; } private createGridItems(): GridItem[] { return [ ...this.smallBatteries, ...this.largeBatteries, ...this.town.getEnergyUsers(), ...this.solarPanels, ]; } getTownSize() { return this.town.getTownSize(); } getSystemState() { return this.state; } getEfficiency() { // Round off the efficiency to 3 dps return this.efficiency.toFixed(3); } getJsonGraphDetails() { return this.state.getJsonGraph(); } private createBatteries( positions: ItemPosition[], gridItemName: string ): Battery[] { const batteryResistance = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.RESISTANCE.LARGE_BATTERY : bioconstants.RESISTANCE.SMALL_BATTERY; const maxCapacity = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.LARGE_BATTERY.MAX_CAPACITY : bioconstants.SMALL_BATTERY.MAX_CAPACITY; const initEnergy = gridItemName === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY ? bioconstants.LARGE_BATTERY.DEFAULT_START_ENERGY : bioconstants.SMALL_BATTERY.DEFAULT_START_ENERGY; return positions.map( (position, index) => new BioBattery({ x: position.x, y: position.y, gridItemName: `${gridItemName}-${index}`, gridItemResistance: batteryResistance, energyInKiloWattHour: initEnergy, maxCapacity, } as BatteryParams) ); } /** * This method creates a list of solar panels placed depending on their positions * @param positions holds the positions where the solar panels are going to be placed */ // TODO pass a list of equal length to hold the area for the solar panels private createSolarPanels(positions: ItemPosition[]): EnergySource[] { return positions.map( (position, index) => new SolarPanel({ x: position.x, y: position.y, efficiency: 0.75, areaSquareMeters: bioconstants.SOLAR_PANEL.AREA, gridItemName: `${bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL}-${index}`, date: this.startDate, } as SolarPanelParams) ); } /** * Drain the energy users according to the time of day */ updateEnergyUsage(date: Date) { this.town.getEnergyUsers().forEach((energyUser) => { energyUser.decreaseEnergyAccordingToTimeOfDay(date); }); } /** * This method takes the results of th brain and then it changes the state graph as suggested by the brain. * The results of the brain are in form of an object key:value pair, with the receiver gridItemName as key and supplier gridItemName as value * @param action holds the results from the brain * @returns a the current state with a new graph which includes the changes that were suggested by the brain */ takeAction(action: GridAction) { const powerEdges: { v: string; w: string; power: Power }[] = []; // Set new efficiency this.efficiency = action.getEfficiency(); // RETURN a new BiogridState const allSupplyingPaths = action.getSupplyingPaths(); this.state.resetPowerOnEdges(); const clonedGraph = this.state.cloneStateGraph(); for (const supplyPath in allSupplyingPaths) { const oldGridItem = this.state.getGridItem(supplyPath); // take energy from the supplying grid item and transfer it to the energy user const supplyingGridItem = this.state.getGridItem( allSupplyingPaths[supplyPath] ); const typeOldGridItem = this.getGridItemType(oldGridItem); const energyUser = oldGridItem as Building | BioBattery; const energyUserReq = energyUser.getMaxCapacity() - energyUser.getEnergyInKilowattHour(); const typeSupplyingGridItem = this.getGridItemType(supplyingGridItem); if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.ENERGY_USER) { if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY || typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY ) { const battery = supplyingGridItem as BioBattery; battery.supplyPower(energyUserReq); clonedGraph.setNode(battery.gridItemName, battery); } else if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL ) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); clonedGraph.setNode(solarpanel.gridItemName, solarpanel); } else { continue; } (energyUser as Building).increaseEnergy(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) { if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) { const battery = supplyingGridItem as BioBattery; battery.supplyPower(energyUserReq); clonedGraph.setNode(battery.gridItemName, battery); } else if ( typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL ) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); clonedGraph.setNode(solarpanel.gridItemName, solarpanel); } else { continue; } (energyUser as BioBattery).startCharging(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } else if (typeOldGridItem === bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) { if (typeSupplyingGridItem === bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) { const solarpanel = supplyingGridItem as SolarPanel; solarpanel.supplyPower(energyUserReq); } else { continue; } (energyUser as BioBattery).startCharging(energyUserReq); clonedGraph.setNode(energyUser.gridItemName, energyUser); } powerEdges.push({ v: supplyingGridItem.gridItemName, w: energyUser.gridItemName, // Convert kilowatthours into kilowatts power: energyUserReq / bioconstants.TIME.DISCRETE_UNIT_HOURS, }); } this.state.setnewStateGraph(clonedGraph); powerEdges.forEach((powerEdge) => { this.state.setPowerBetweenNodes( powerEdge.v, powerEdge.w, powerEdge.power ); }); return this.state; } private getGridItemType(gridItem: GridItem): string { if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.ENERGY_USER) ) { return bioconstants.GRID_ITEM_NAMES.ENERGY_USER; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY) ) { return bioconstants.GRID_ITEM_NAMES.SMALL_BATTERY; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY) ) { return bioconstants.GRID_ITEM_NAMES.LARGE_BATTERY; } else if ( gridItem.gridItemName.includes(bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL) ) { return bioconstants.GRID_ITEM_NAMES.SOLAR_PANEL; } return bioconstants.GRID_ITEM_NAMES.GRID; } /** * A simplified algorithm to (mostly) evenly space out batteries throughout the square town * Split the town into rows and columns and then place a battery in the center of each cell * TODO: have a smart algorithm for placement, see https://github.com/googleinterns/step141-2020/issues/42 */ private createGridItemPositions( townSize: TownSize, numberOfGridItems: number ): ItemPosition[] { const cols = Math.ceil(numberOfGridItems / townSize.width); const rows = Math.ceil(numberOfGridItems / cols); const positions: ItemPosition[] = []; for (let i = 0; i < numberOfGridItems; i++) { const newPositionUnverified = { x: this.roundToGridDistance( (((i % cols) + 0.5) / cols) * townSize.width ), y: this.roundToGridDistance( ((Math.floor(i / cols) + 0.5) / rows) * townSize.height ), }; const newPosition = this.findNearestUnoccupiedPosition( newPositionUnverified, townSize ); positions.push(newPosition); this.itemInPosition[this.formatItemPosition(newPosition)] = true; } return positions; } /** * Find the nearest unoccupied position to {@code pos} by looking looking in a spiral with pos at its center * First the space immediately right of pos is checked, then the one above, then to the left, then below, then two right spaces out, two right up, etc */ private findNearestUnoccupiedPosition( pos: ItemPosition, townSize: TownSize ): ItemPosition { let radius = bioconstants.GRID_DISTANCES.INCREMENTS_KM; let angle = 0; let outOfBoundsCount = 0; let xOffset = 0, yOffset = 0; let newPos = { x: pos.x + xOffset, y: pos.y + yOffset }; // If {@code outOfBoundsCount} is greater than 3, then that means the upwards, left, right, and down // Are all out of bounds. Thus there is no where left to place the item while ( (this.positionOutOfBounds(newPos, townSize) || this.positionOccupied(newPos)) && outOfBoundsCount <= 3 ) { if (this.positionOutOfBounds(newPos, townSize)) { outOfBoundsCount++; } switch (angle) { case 0: yOffset = 0; xOffset = radius; break; case 90: xOffset = 0; yOffset = radius; break; case 180: xOffset = -1 * radius; yOffset = 0; break; case 270: xOffset = 0; yOffset = -1 * radius; break; } newPos = { x: pos.x + xOffset, y: pos.y + yOffset }; // Increment the angle by 90 degrees angle = angle + 90; if (angle === 360) { radius += bioconstants.GRID_DISTANCES.INCREMENTS_KM; // Reset the angle angle = 0; } } if (outOfBoundsCount > 3) { throw new Error( `There are too many items on the grid. New items could not be placed with a minimum distance of ${bioconstants.GRID_DISTANCES.INCREMENTS_KM} km apart` ); } return newPos; } private positionOutOfBounds(pos: ItemPosition, townSize: TownSize): boolean { return pos.x > townSize.width || pos.y > townSize.height; } private roundToGridDistance(distance: Distance): Distance { return ( Math.floor(distance / bioconstants.GRID_DISTANCES.INCREMENTS_KM) * bioconstants.GRID_DISTANCES.INCREMENTS_KM ); } private
(pos: ItemPosition): boolean { return this.itemInPosition[this.formatItemPosition(pos)]; } /** * Convert an item into a string */ private formatItemPosition(pos: ItemPosition): string { return `${pos.x}, ${pos.y}`; } }
positionOccupied
identifier_name
sync.rs
// Copyright 2017 The xi-editor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Architecture for synchronizing a CRDT with the ledger. Separated into a //! module so that it is easier to add other sync stores later. use std::io::Write; use std::sync::mpsc::{Receiver, RecvError, Sender}; use log; use apps_ledger_services_public::*; use fidl::{self, Future, Promise}; use fuchsia::read_entire_vmo; use magenta::{Channel, ChannelOpts, HandleBase}; use serde_json; use super::ledger::{self, ledger_crash_callback}; use tabs::{BufferContainerRef, BufferIdentifier}; use xi_rope::engine::Engine; // TODO switch these to bincode fn state_to_buf(state: &Engine) -> Vec<u8> { serde_json::to_vec(state).unwrap() } fn buf_to_state(buf: &[u8]) -> Result<Engine, serde_json::Error> { serde_json::from_slice(buf) } /// Stores state needed by the container to perform synchronization. pub struct SyncStore { page: Page_Proxy, key: Vec<u8>, updates: Sender<SyncMsg>, transaction_pending: bool, buffer: BufferIdentifier, } impl SyncStore { /// - `page` is a reference to the Ledger page to store data under. /// - `key` is the key the `Syncable` managed by this `SyncStore` will be stored under. /// This example only supports storing things under a single key per page. /// - `updates` is a channel to a `SyncUpdater` that will handle events. /// /// Returns a sync store and schedules the loading of initial /// state and subscribes to state updates for this document. pub fn new( mut page: Page_Proxy, key: Vec<u8>, updates: Sender<SyncMsg>, buffer: BufferIdentifier, ) -> SyncStore { let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap(); let watcher_client = PageWatcher_Client::from_handle(s1.into_handle()); let watcher_client_ptr = ::fidl::InterfacePtr { inner: watcher_client, version: PageWatcher_Metadata::VERSION }; let watcher = PageWatcherServer { updates: updates.clone(), buffer: buffer.clone() }; let _ = fidl::Server::new(watcher, s2).spawn(); let (mut snap, snap_request) = PageSnapshot_new_pair(); page.get_snapshot(snap_request, Some(key.clone()), Some(watcher_client_ptr)) .with(ledger_crash_callback); let initial_state_chan = updates.clone(); let initial_buffer = buffer.clone(); snap.get(key.clone()).with(move |raw_res| { match raw_res.map(|res| ledger::value_result(res)) { Ok(Ok(Some(buf))) => { initial_state_chan .send(SyncMsg::NewState { buffer: initial_buffer, new_buf: buf, done: None, }) .unwrap(); } Ok(Ok(None)) => (), // No initial state saved yet Err(err) => error!("FIDL failed on initial response: {:?}", err), Ok(Err(err)) => error!("Ledger failed to retrieve key: {:?}", err), } }); SyncStore { page, key, updates, buffer, transaction_pending: false } } /// Called whenever this app changed its own state and would like to /// persist the changes to the ledger. Changes can't be committed /// immediately since we have to wait for PageWatcher changes that may not /// have arrived yet. pub fn state_changed(&mut self) { if !self.transaction_pending { self.transaction_pending = true; let ready_future = self.page.start_transaction(); let done_chan = self.updates.clone(); let buffer = self.buffer.clone(); ready_future.with(move |res| match res { Ok(ledger::OK) => { done_chan.send(SyncMsg::TransactionReady { buffer }).unwrap(); } Ok(err_status) => error!("Ledger failed to start transaction: {:?}", err_status), Err(err) => error!("FIDL failed on starting transaction: {:?}", err), }); } } /// Should be called in SyncContainer::transaction_ready to persist the current state. pub fn commit_transaction(&mut self, state: &Engine) { assert!(self.transaction_pending, "must call state_changed (and wait) before commit"); self.page.put(self.key.clone(), state_to_buf(state)).with(ledger_crash_callback); self.page.commit().with(ledger_crash_callback); self.transaction_pending = false; } } /// All the different asynchronous events the updater thread needs to listen for and act on pub enum SyncMsg { NewState { buffer: BufferIdentifier, new_buf: Vec<u8>, done: Option<Promise<Option<PageSnapshot_Server>, fidl::Error>>, }, TransactionReady { buffer: BufferIdentifier, }, /// Shut down the updater thread Stop, } /// We want to be able to register to receive events from inside the /// `SyncStore`/`SyncContainer` but from there we don't have access to the /// Mutex that holds the container, so we give channel Senders to all the /// futures so that they can all trigger events in one place that does have /// the right reference. /// /// Additionally, the individual `Editor`s aren't wrapped in a `Mutex` so we /// have to hold a `BufferContainerRef` and use `BufferIdentifier`s with one /// `SyncUpdater` for all buffers. pub struct SyncUpdater<W: Write> { container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>, } impl<W: Write + Send + 'static> SyncUpdater<W> { pub fn new(container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>) -> SyncUpdater<W>
/// Run this in a thread, it will return when it encounters an error /// reading the channel or when the `Stop` message is recieved. pub fn work(&self) -> Result<(), RecvError> { loop { let msg = self.chan.recv()?; match msg { SyncMsg::Stop => return Ok(()), SyncMsg::TransactionReady { buffer } => { let mut container = self.container_ref.lock(); // if the buffer was closed, hopefully the page connection was as well, which I hope aborts transactions if let Some(mut editor) = container.editor_for_buffer_mut(&buffer) { editor.transaction_ready(); } } SyncMsg::NewState { new_buf, done, buffer } => { let mut container = self.container_ref.lock(); match (container.editor_for_buffer_mut(&buffer), buf_to_state(&new_buf)) { (Some(mut editor), Ok(new_state)) => { editor.merge_new_state(new_state); if let Some(promise) = done { promise.set_ok(None); } } (None, _) => (), // buffer was closed (_, Err(err)) => error!("Ledger was set to invalid state: {:?}", err), } } } } } } struct PageWatcherServer { updates: Sender<SyncMsg>, buffer: BufferIdentifier, } impl PageWatcher for PageWatcherServer { fn on_change( &mut self, page_change: PageChange, result_state: ResultState, ) -> Future<Option<PageSnapshot_Server>, fidl::Error> { let (future, done) = Future::make_promise(); let value_opt = page_change.changes.get(0).and_then(|c| c.value.as_ref()); if let (ledger::RESULT_COMPLETED, Some(value_vmo)) = (result_state, value_opt) { let new_buf = read_entire_vmo(value_vmo).expect("failed to read key Vmo"); self.updates .send(SyncMsg::NewState { buffer: self.buffer.clone(), new_buf, done: Some(done) }) .unwrap(); } else { error!("Xi state corrupted, should have one key but has multiple."); // I don't think this should be a FIDL-level error, so set okay done.set_ok(None); } future } } impl PageWatcher_Stub for PageWatcherServer { // Use default dispatching, but we could override it here. } impl_fidl_stub!(PageWatcherServer: PageWatcher_Stub); // ============= Conflict resolution pub fn start_conflict_resolver_factory(ledger: &mut Ledger_Proxy, key: Vec<u8>) { let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap(); let resolver_client = ConflictResolverFactory_Client::from_handle(s1.into_handle()); let resolver_client_ptr = ::fidl::InterfacePtr { inner: resolver_client, version: ConflictResolverFactory_Metadata::VERSION, }; let _ = fidl::Server::new(ConflictResolverFactoryServer { key }, s2).spawn(); ledger.set_conflict_resolver_factory(Some(resolver_client_ptr)).with(ledger_crash_callback); } struct ConflictResolverFactoryServer { key: Vec<u8>, } impl ConflictResolverFactory for ConflictResolverFactoryServer { fn get_policy(&mut self, _page_id: Vec<u8>) -> Future<MergePolicy, ::fidl::Error> { Future::done(Ok(MergePolicy_Custom)) } /// Our resolvers are the same for every page fn new_conflict_resolver(&mut self, _page_id: Vec<u8>, resolver: ConflictResolver_Server) { let _ = fidl::Server::new( ConflictResolverServer { key: self.key.clone() }, resolver.into_channel(), ) .spawn(); } } impl ConflictResolverFactory_Stub for ConflictResolverFactoryServer { // Use default dispatching, but we could override it here. } impl_fidl_stub!(ConflictResolverFactoryServer: ConflictResolverFactory_Stub); fn state_from_snapshot<F>( snapshot: ::fidl::InterfacePtr<PageSnapshot_Client>, key: Vec<u8>, done: F, ) where F: Send + FnOnce(Result<Option<Engine>, ()>) + 'static, { assert_eq!(PageSnapshot_Metadata::VERSION, snapshot.version); let mut snapshot_proxy = PageSnapshot_new_Proxy(snapshot.inner); // TODO get a reference when too big snapshot_proxy.get(key).with(move |raw_res| { let state = match raw_res.map(|res| ledger::value_result(res)) { // the .ok() has the behavior of acting like invalid state is empty // and thus deleting invalid state and overwriting it with good state Ok(Ok(Some(buf))) => Ok(buf_to_state(&buf).ok()), Ok(Ok(None)) => { info!("No state in conflicting page"); Ok(None) } Err(err) => { warn!("FIDL failed on initial response: {:?}", err); Err(()) } Ok(Err(err)) => { warn!("Ledger failed to retrieve key: {:?}", err); Err(()) } }; done(state); }); } struct ConflictResolverServer { key: Vec<u8>, } impl ConflictResolver for ConflictResolverServer { fn resolve( &mut self, left: ::fidl::InterfacePtr<PageSnapshot_Client>, right: ::fidl::InterfacePtr<PageSnapshot_Client>, _common_version: Option<::fidl::InterfacePtr<PageSnapshot_Client>>, result_provider: ::fidl::InterfacePtr<MergeResultProvider_Client>, ) { // TODO in the futures-rs future, do this in parallel with Future combinators let key2 = self.key.clone(); state_from_snapshot(left, self.key.clone(), move |e1_opt| { let key3 = key2.clone(); state_from_snapshot(right, key2, move |e2_opt| { let result_opt = match (e1_opt, e2_opt) { (Ok(Some(mut e1)), Ok(Some(e2))) => { e1.merge(&e2); Some(e1) } // one engine didn't exist yet, I'm not sure if Ledger actually generates a conflict in this case (Ok(Some(e)), Ok(None)) | (Ok(None), Ok(Some(e))) => Some(e), // failed to get one of the engines, we can't do the merge properly (Err(()), _) | (_, Err(())) => None, // if state is invalid or missing on both sides, can't merge (Ok(None), Ok(None)) => None, }; if let Some(out_state) = result_opt { let buf = state_to_buf(&out_state); // TODO use a reference here when buf is too big let new_value = Some(Box::new(BytesOrReference::Bytes(buf))); let merged = MergedValue { key: key3, source: ValueSource_New, new_value, priority: Priority_Eager, }; assert_eq!(MergeResultProvider_Metadata::VERSION, result_provider.version); let mut result_provider_proxy = MergeResultProvider_new_Proxy(result_provider.inner); result_provider_proxy.merge(vec![merged]); result_provider_proxy.done().with(ledger_crash_callback); } }); }); } } impl ConflictResolver_Stub for ConflictResolverServer { // Use default dispatching, but we could override it here. } impl_fidl_stub!(ConflictResolverServer: ConflictResolver_Stub);
{ SyncUpdater { container_ref, chan } }
identifier_body
sync.rs
// Copyright 2017 The xi-editor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Architecture for synchronizing a CRDT with the ledger. Separated into a //! module so that it is easier to add other sync stores later. use std::io::Write; use std::sync::mpsc::{Receiver, RecvError, Sender}; use log; use apps_ledger_services_public::*; use fidl::{self, Future, Promise}; use fuchsia::read_entire_vmo; use magenta::{Channel, ChannelOpts, HandleBase}; use serde_json; use super::ledger::{self, ledger_crash_callback}; use tabs::{BufferContainerRef, BufferIdentifier}; use xi_rope::engine::Engine; // TODO switch these to bincode fn state_to_buf(state: &Engine) -> Vec<u8> { serde_json::to_vec(state).unwrap() } fn buf_to_state(buf: &[u8]) -> Result<Engine, serde_json::Error> { serde_json::from_slice(buf) } /// Stores state needed by the container to perform synchronization. pub struct SyncStore { page: Page_Proxy, key: Vec<u8>, updates: Sender<SyncMsg>, transaction_pending: bool, buffer: BufferIdentifier, } impl SyncStore { /// - `page` is a reference to the Ledger page to store data under. /// - `key` is the key the `Syncable` managed by this `SyncStore` will be stored under. /// This example only supports storing things under a single key per page. /// - `updates` is a channel to a `SyncUpdater` that will handle events. /// /// Returns a sync store and schedules the loading of initial /// state and subscribes to state updates for this document. pub fn new( mut page: Page_Proxy, key: Vec<u8>, updates: Sender<SyncMsg>, buffer: BufferIdentifier, ) -> SyncStore { let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap(); let watcher_client = PageWatcher_Client::from_handle(s1.into_handle()); let watcher_client_ptr = ::fidl::InterfacePtr { inner: watcher_client, version: PageWatcher_Metadata::VERSION }; let watcher = PageWatcherServer { updates: updates.clone(), buffer: buffer.clone() }; let _ = fidl::Server::new(watcher, s2).spawn(); let (mut snap, snap_request) = PageSnapshot_new_pair(); page.get_snapshot(snap_request, Some(key.clone()), Some(watcher_client_ptr)) .with(ledger_crash_callback); let initial_state_chan = updates.clone(); let initial_buffer = buffer.clone(); snap.get(key.clone()).with(move |raw_res| { match raw_res.map(|res| ledger::value_result(res)) { Ok(Ok(Some(buf))) => { initial_state_chan .send(SyncMsg::NewState { buffer: initial_buffer, new_buf: buf, done: None, }) .unwrap(); } Ok(Ok(None)) => (), // No initial state saved yet Err(err) => error!("FIDL failed on initial response: {:?}", err), Ok(Err(err)) => error!("Ledger failed to retrieve key: {:?}", err), } }); SyncStore { page, key, updates, buffer, transaction_pending: false } } /// Called whenever this app changed its own state and would like to /// persist the changes to the ledger. Changes can't be committed /// immediately since we have to wait for PageWatcher changes that may not /// have arrived yet. pub fn state_changed(&mut self) { if !self.transaction_pending { self.transaction_pending = true; let ready_future = self.page.start_transaction(); let done_chan = self.updates.clone(); let buffer = self.buffer.clone(); ready_future.with(move |res| match res { Ok(ledger::OK) => { done_chan.send(SyncMsg::TransactionReady { buffer }).unwrap(); } Ok(err_status) => error!("Ledger failed to start transaction: {:?}", err_status), Err(err) => error!("FIDL failed on starting transaction: {:?}", err), }); } } /// Should be called in SyncContainer::transaction_ready to persist the current state. pub fn commit_transaction(&mut self, state: &Engine) { assert!(self.transaction_pending, "must call state_changed (and wait) before commit"); self.page.put(self.key.clone(), state_to_buf(state)).with(ledger_crash_callback); self.page.commit().with(ledger_crash_callback); self.transaction_pending = false; } } /// All the different asynchronous events the updater thread needs to listen for and act on pub enum SyncMsg { NewState { buffer: BufferIdentifier, new_buf: Vec<u8>, done: Option<Promise<Option<PageSnapshot_Server>, fidl::Error>>, }, TransactionReady { buffer: BufferIdentifier, }, /// Shut down the updater thread Stop, } /// We want to be able to register to receive events from inside the /// `SyncStore`/`SyncContainer` but from there we don't have access to the /// Mutex that holds the container, so we give channel Senders to all the /// futures so that they can all trigger events in one place that does have /// the right reference. /// /// Additionally, the individual `Editor`s aren't wrapped in a `Mutex` so we /// have to hold a `BufferContainerRef` and use `BufferIdentifier`s with one /// `SyncUpdater` for all buffers. pub struct SyncUpdater<W: Write> { container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>, } impl<W: Write + Send + 'static> SyncUpdater<W> { pub fn new(container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>) -> SyncUpdater<W> { SyncUpdater { container_ref, chan } } /// Run this in a thread, it will return when it encounters an error /// reading the channel or when the `Stop` message is recieved. pub fn work(&self) -> Result<(), RecvError> { loop { let msg = self.chan.recv()?; match msg { SyncMsg::Stop => return Ok(()), SyncMsg::TransactionReady { buffer } => { let mut container = self.container_ref.lock(); // if the buffer was closed, hopefully the page connection was as well, which I hope aborts transactions if let Some(mut editor) = container.editor_for_buffer_mut(&buffer) { editor.transaction_ready(); } } SyncMsg::NewState { new_buf, done, buffer } => { let mut container = self.container_ref.lock(); match (container.editor_for_buffer_mut(&buffer), buf_to_state(&new_buf)) { (Some(mut editor), Ok(new_state)) => { editor.merge_new_state(new_state); if let Some(promise) = done { promise.set_ok(None); } } (None, _) => (), // buffer was closed (_, Err(err)) => error!("Ledger was set to invalid state: {:?}", err), } } } } } } struct PageWatcherServer { updates: Sender<SyncMsg>, buffer: BufferIdentifier, } impl PageWatcher for PageWatcherServer { fn on_change( &mut self, page_change: PageChange, result_state: ResultState, ) -> Future<Option<PageSnapshot_Server>, fidl::Error> { let (future, done) = Future::make_promise(); let value_opt = page_change.changes.get(0).and_then(|c| c.value.as_ref()); if let (ledger::RESULT_COMPLETED, Some(value_vmo)) = (result_state, value_opt) { let new_buf = read_entire_vmo(value_vmo).expect("failed to read key Vmo"); self.updates .send(SyncMsg::NewState { buffer: self.buffer.clone(), new_buf, done: Some(done) }) .unwrap(); } else { error!("Xi state corrupted, should have one key but has multiple."); // I don't think this should be a FIDL-level error, so set okay done.set_ok(None); } future } } impl PageWatcher_Stub for PageWatcherServer { // Use default dispatching, but we could override it here. } impl_fidl_stub!(PageWatcherServer: PageWatcher_Stub); // ============= Conflict resolution pub fn
(ledger: &mut Ledger_Proxy, key: Vec<u8>) { let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap(); let resolver_client = ConflictResolverFactory_Client::from_handle(s1.into_handle()); let resolver_client_ptr = ::fidl::InterfacePtr { inner: resolver_client, version: ConflictResolverFactory_Metadata::VERSION, }; let _ = fidl::Server::new(ConflictResolverFactoryServer { key }, s2).spawn(); ledger.set_conflict_resolver_factory(Some(resolver_client_ptr)).with(ledger_crash_callback); } struct ConflictResolverFactoryServer { key: Vec<u8>, } impl ConflictResolverFactory for ConflictResolverFactoryServer { fn get_policy(&mut self, _page_id: Vec<u8>) -> Future<MergePolicy, ::fidl::Error> { Future::done(Ok(MergePolicy_Custom)) } /// Our resolvers are the same for every page fn new_conflict_resolver(&mut self, _page_id: Vec<u8>, resolver: ConflictResolver_Server) { let _ = fidl::Server::new( ConflictResolverServer { key: self.key.clone() }, resolver.into_channel(), ) .spawn(); } } impl ConflictResolverFactory_Stub for ConflictResolverFactoryServer { // Use default dispatching, but we could override it here. } impl_fidl_stub!(ConflictResolverFactoryServer: ConflictResolverFactory_Stub); fn state_from_snapshot<F>( snapshot: ::fidl::InterfacePtr<PageSnapshot_Client>, key: Vec<u8>, done: F, ) where F: Send + FnOnce(Result<Option<Engine>, ()>) + 'static, { assert_eq!(PageSnapshot_Metadata::VERSION, snapshot.version); let mut snapshot_proxy = PageSnapshot_new_Proxy(snapshot.inner); // TODO get a reference when too big snapshot_proxy.get(key).with(move |raw_res| { let state = match raw_res.map(|res| ledger::value_result(res)) { // the .ok() has the behavior of acting like invalid state is empty // and thus deleting invalid state and overwriting it with good state Ok(Ok(Some(buf))) => Ok(buf_to_state(&buf).ok()), Ok(Ok(None)) => { info!("No state in conflicting page"); Ok(None) } Err(err) => { warn!("FIDL failed on initial response: {:?}", err); Err(()) } Ok(Err(err)) => { warn!("Ledger failed to retrieve key: {:?}", err); Err(()) } }; done(state); }); } struct ConflictResolverServer { key: Vec<u8>, } impl ConflictResolver for ConflictResolverServer { fn resolve( &mut self, left: ::fidl::InterfacePtr<PageSnapshot_Client>, right: ::fidl::InterfacePtr<PageSnapshot_Client>, _common_version: Option<::fidl::InterfacePtr<PageSnapshot_Client>>, result_provider: ::fidl::InterfacePtr<MergeResultProvider_Client>, ) { // TODO in the futures-rs future, do this in parallel with Future combinators let key2 = self.key.clone(); state_from_snapshot(left, self.key.clone(), move |e1_opt| { let key3 = key2.clone(); state_from_snapshot(right, key2, move |e2_opt| { let result_opt = match (e1_opt, e2_opt) { (Ok(Some(mut e1)), Ok(Some(e2))) => { e1.merge(&e2); Some(e1) } // one engine didn't exist yet, I'm not sure if Ledger actually generates a conflict in this case (Ok(Some(e)), Ok(None)) | (Ok(None), Ok(Some(e))) => Some(e), // failed to get one of the engines, we can't do the merge properly (Err(()), _) | (_, Err(())) => None, // if state is invalid or missing on both sides, can't merge (Ok(None), Ok(None)) => None, }; if let Some(out_state) = result_opt { let buf = state_to_buf(&out_state); // TODO use a reference here when buf is too big let new_value = Some(Box::new(BytesOrReference::Bytes(buf))); let merged = MergedValue { key: key3, source: ValueSource_New, new_value, priority: Priority_Eager, }; assert_eq!(MergeResultProvider_Metadata::VERSION, result_provider.version); let mut result_provider_proxy = MergeResultProvider_new_Proxy(result_provider.inner); result_provider_proxy.merge(vec![merged]); result_provider_proxy.done().with(ledger_crash_callback); } }); }); } } impl ConflictResolver_Stub for ConflictResolverServer { // Use default dispatching, but we could override it here. } impl_fidl_stub!(ConflictResolverServer: ConflictResolver_Stub);
start_conflict_resolver_factory
identifier_name
sync.rs
// Copyright 2017 The xi-editor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Architecture for synchronizing a CRDT with the ledger. Separated into a //! module so that it is easier to add other sync stores later. use std::io::Write; use std::sync::mpsc::{Receiver, RecvError, Sender}; use log; use apps_ledger_services_public::*; use fidl::{self, Future, Promise}; use fuchsia::read_entire_vmo; use magenta::{Channel, ChannelOpts, HandleBase}; use serde_json; use super::ledger::{self, ledger_crash_callback}; use tabs::{BufferContainerRef, BufferIdentifier}; use xi_rope::engine::Engine; // TODO switch these to bincode fn state_to_buf(state: &Engine) -> Vec<u8> { serde_json::to_vec(state).unwrap() } fn buf_to_state(buf: &[u8]) -> Result<Engine, serde_json::Error> { serde_json::from_slice(buf) } /// Stores state needed by the container to perform synchronization. pub struct SyncStore { page: Page_Proxy, key: Vec<u8>, updates: Sender<SyncMsg>, transaction_pending: bool, buffer: BufferIdentifier, } impl SyncStore { /// - `page` is a reference to the Ledger page to store data under. /// - `key` is the key the `Syncable` managed by this `SyncStore` will be stored under. /// This example only supports storing things under a single key per page. /// - `updates` is a channel to a `SyncUpdater` that will handle events. /// /// Returns a sync store and schedules the loading of initial /// state and subscribes to state updates for this document. pub fn new( mut page: Page_Proxy, key: Vec<u8>, updates: Sender<SyncMsg>, buffer: BufferIdentifier, ) -> SyncStore { let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap();
let watcher = PageWatcherServer { updates: updates.clone(), buffer: buffer.clone() }; let _ = fidl::Server::new(watcher, s2).spawn(); let (mut snap, snap_request) = PageSnapshot_new_pair(); page.get_snapshot(snap_request, Some(key.clone()), Some(watcher_client_ptr)) .with(ledger_crash_callback); let initial_state_chan = updates.clone(); let initial_buffer = buffer.clone(); snap.get(key.clone()).with(move |raw_res| { match raw_res.map(|res| ledger::value_result(res)) { Ok(Ok(Some(buf))) => { initial_state_chan .send(SyncMsg::NewState { buffer: initial_buffer, new_buf: buf, done: None, }) .unwrap(); } Ok(Ok(None)) => (), // No initial state saved yet Err(err) => error!("FIDL failed on initial response: {:?}", err), Ok(Err(err)) => error!("Ledger failed to retrieve key: {:?}", err), } }); SyncStore { page, key, updates, buffer, transaction_pending: false } } /// Called whenever this app changed its own state and would like to /// persist the changes to the ledger. Changes can't be committed /// immediately since we have to wait for PageWatcher changes that may not /// have arrived yet. pub fn state_changed(&mut self) { if !self.transaction_pending { self.transaction_pending = true; let ready_future = self.page.start_transaction(); let done_chan = self.updates.clone(); let buffer = self.buffer.clone(); ready_future.with(move |res| match res { Ok(ledger::OK) => { done_chan.send(SyncMsg::TransactionReady { buffer }).unwrap(); } Ok(err_status) => error!("Ledger failed to start transaction: {:?}", err_status), Err(err) => error!("FIDL failed on starting transaction: {:?}", err), }); } } /// Should be called in SyncContainer::transaction_ready to persist the current state. pub fn commit_transaction(&mut self, state: &Engine) { assert!(self.transaction_pending, "must call state_changed (and wait) before commit"); self.page.put(self.key.clone(), state_to_buf(state)).with(ledger_crash_callback); self.page.commit().with(ledger_crash_callback); self.transaction_pending = false; } } /// All the different asynchronous events the updater thread needs to listen for and act on pub enum SyncMsg { NewState { buffer: BufferIdentifier, new_buf: Vec<u8>, done: Option<Promise<Option<PageSnapshot_Server>, fidl::Error>>, }, TransactionReady { buffer: BufferIdentifier, }, /// Shut down the updater thread Stop, } /// We want to be able to register to receive events from inside the /// `SyncStore`/`SyncContainer` but from there we don't have access to the /// Mutex that holds the container, so we give channel Senders to all the /// futures so that they can all trigger events in one place that does have /// the right reference. /// /// Additionally, the individual `Editor`s aren't wrapped in a `Mutex` so we /// have to hold a `BufferContainerRef` and use `BufferIdentifier`s with one /// `SyncUpdater` for all buffers. pub struct SyncUpdater<W: Write> { container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>, } impl<W: Write + Send + 'static> SyncUpdater<W> { pub fn new(container_ref: BufferContainerRef<W>, chan: Receiver<SyncMsg>) -> SyncUpdater<W> { SyncUpdater { container_ref, chan } } /// Run this in a thread, it will return when it encounters an error /// reading the channel or when the `Stop` message is recieved. pub fn work(&self) -> Result<(), RecvError> { loop { let msg = self.chan.recv()?; match msg { SyncMsg::Stop => return Ok(()), SyncMsg::TransactionReady { buffer } => { let mut container = self.container_ref.lock(); // if the buffer was closed, hopefully the page connection was as well, which I hope aborts transactions if let Some(mut editor) = container.editor_for_buffer_mut(&buffer) { editor.transaction_ready(); } } SyncMsg::NewState { new_buf, done, buffer } => { let mut container = self.container_ref.lock(); match (container.editor_for_buffer_mut(&buffer), buf_to_state(&new_buf)) { (Some(mut editor), Ok(new_state)) => { editor.merge_new_state(new_state); if let Some(promise) = done { promise.set_ok(None); } } (None, _) => (), // buffer was closed (_, Err(err)) => error!("Ledger was set to invalid state: {:?}", err), } } } } } } struct PageWatcherServer { updates: Sender<SyncMsg>, buffer: BufferIdentifier, } impl PageWatcher for PageWatcherServer { fn on_change( &mut self, page_change: PageChange, result_state: ResultState, ) -> Future<Option<PageSnapshot_Server>, fidl::Error> { let (future, done) = Future::make_promise(); let value_opt = page_change.changes.get(0).and_then(|c| c.value.as_ref()); if let (ledger::RESULT_COMPLETED, Some(value_vmo)) = (result_state, value_opt) { let new_buf = read_entire_vmo(value_vmo).expect("failed to read key Vmo"); self.updates .send(SyncMsg::NewState { buffer: self.buffer.clone(), new_buf, done: Some(done) }) .unwrap(); } else { error!("Xi state corrupted, should have one key but has multiple."); // I don't think this should be a FIDL-level error, so set okay done.set_ok(None); } future } } impl PageWatcher_Stub for PageWatcherServer { // Use default dispatching, but we could override it here. } impl_fidl_stub!(PageWatcherServer: PageWatcher_Stub); // ============= Conflict resolution pub fn start_conflict_resolver_factory(ledger: &mut Ledger_Proxy, key: Vec<u8>) { let (s1, s2) = Channel::create(ChannelOpts::Normal).unwrap(); let resolver_client = ConflictResolverFactory_Client::from_handle(s1.into_handle()); let resolver_client_ptr = ::fidl::InterfacePtr { inner: resolver_client, version: ConflictResolverFactory_Metadata::VERSION, }; let _ = fidl::Server::new(ConflictResolverFactoryServer { key }, s2).spawn(); ledger.set_conflict_resolver_factory(Some(resolver_client_ptr)).with(ledger_crash_callback); } struct ConflictResolverFactoryServer { key: Vec<u8>, } impl ConflictResolverFactory for ConflictResolverFactoryServer { fn get_policy(&mut self, _page_id: Vec<u8>) -> Future<MergePolicy, ::fidl::Error> { Future::done(Ok(MergePolicy_Custom)) } /// Our resolvers are the same for every page fn new_conflict_resolver(&mut self, _page_id: Vec<u8>, resolver: ConflictResolver_Server) { let _ = fidl::Server::new( ConflictResolverServer { key: self.key.clone() }, resolver.into_channel(), ) .spawn(); } } impl ConflictResolverFactory_Stub for ConflictResolverFactoryServer { // Use default dispatching, but we could override it here. } impl_fidl_stub!(ConflictResolverFactoryServer: ConflictResolverFactory_Stub); fn state_from_snapshot<F>( snapshot: ::fidl::InterfacePtr<PageSnapshot_Client>, key: Vec<u8>, done: F, ) where F: Send + FnOnce(Result<Option<Engine>, ()>) + 'static, { assert_eq!(PageSnapshot_Metadata::VERSION, snapshot.version); let mut snapshot_proxy = PageSnapshot_new_Proxy(snapshot.inner); // TODO get a reference when too big snapshot_proxy.get(key).with(move |raw_res| { let state = match raw_res.map(|res| ledger::value_result(res)) { // the .ok() has the behavior of acting like invalid state is empty // and thus deleting invalid state and overwriting it with good state Ok(Ok(Some(buf))) => Ok(buf_to_state(&buf).ok()), Ok(Ok(None)) => { info!("No state in conflicting page"); Ok(None) } Err(err) => { warn!("FIDL failed on initial response: {:?}", err); Err(()) } Ok(Err(err)) => { warn!("Ledger failed to retrieve key: {:?}", err); Err(()) } }; done(state); }); } struct ConflictResolverServer { key: Vec<u8>, } impl ConflictResolver for ConflictResolverServer { fn resolve( &mut self, left: ::fidl::InterfacePtr<PageSnapshot_Client>, right: ::fidl::InterfacePtr<PageSnapshot_Client>, _common_version: Option<::fidl::InterfacePtr<PageSnapshot_Client>>, result_provider: ::fidl::InterfacePtr<MergeResultProvider_Client>, ) { // TODO in the futures-rs future, do this in parallel with Future combinators let key2 = self.key.clone(); state_from_snapshot(left, self.key.clone(), move |e1_opt| { let key3 = key2.clone(); state_from_snapshot(right, key2, move |e2_opt| { let result_opt = match (e1_opt, e2_opt) { (Ok(Some(mut e1)), Ok(Some(e2))) => { e1.merge(&e2); Some(e1) } // one engine didn't exist yet, I'm not sure if Ledger actually generates a conflict in this case (Ok(Some(e)), Ok(None)) | (Ok(None), Ok(Some(e))) => Some(e), // failed to get one of the engines, we can't do the merge properly (Err(()), _) | (_, Err(())) => None, // if state is invalid or missing on both sides, can't merge (Ok(None), Ok(None)) => None, }; if let Some(out_state) = result_opt { let buf = state_to_buf(&out_state); // TODO use a reference here when buf is too big let new_value = Some(Box::new(BytesOrReference::Bytes(buf))); let merged = MergedValue { key: key3, source: ValueSource_New, new_value, priority: Priority_Eager, }; assert_eq!(MergeResultProvider_Metadata::VERSION, result_provider.version); let mut result_provider_proxy = MergeResultProvider_new_Proxy(result_provider.inner); result_provider_proxy.merge(vec![merged]); result_provider_proxy.done().with(ledger_crash_callback); } }); }); } } impl ConflictResolver_Stub for ConflictResolverServer { // Use default dispatching, but we could override it here. } impl_fidl_stub!(ConflictResolverServer: ConflictResolver_Stub);
let watcher_client = PageWatcher_Client::from_handle(s1.into_handle()); let watcher_client_ptr = ::fidl::InterfacePtr { inner: watcher_client, version: PageWatcher_Metadata::VERSION };
random_line_split
test_rnn2rnn_power.py
# encoding: utf-8 """ @author : zhirui zhou @contact: evilpsycho42@gmail.com @time : 2020/5/13 10:17 """ from deepseries.model.rnn2rnn import RNN2RNN from deepseries.train import Learner from deepseries.dataset import TimeSeries, FeatureStore, Seq2SeqDataLoader import numpy as np from torch.optim import Adam import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import torch from deepseries.model import BasicSeq2Seq from deepseries.dataset import Property, TimeSeries, Seq2SeqDataLoader from deepseries.nn.loss import MSELoss, RMSELoss from deepseries.train import Learner from deepseries.optim import ReduceCosineAnnealingLR import deepseries.functional as F from deepseries.analysis import SeriesAnalysisModel from torch.optim import Adam from torch import nn import matplotlib as mpl from sklearn.metrics import mean_absolute_error import chinese_calendar as calendar import datetime as dt def normalize(x, axis, fill_zero=True): mu = np.nanmean(x, axis, keepdims=True) std = np.nanstd(x, axis, keepdims=True) x_norm = (x - mu) / std if fill_zero: x_norm = np.nan_to_num(x_norm) return x_norm, mu, std power = pd.read_csv('./data/df.csv', parse_dates=['data_time'])[['data_time', 'cid', 'value']] power_15min = power.set_index("data_time").groupby("cid").resample("15min").sum().reset_index() power_15min = power_15min.pivot(index='cid', columns='data_time', values='value') power_daily = power.set_index("data_time").groupby("cid").resample("1D").sum().reset_index() power_daily = power_daily.pivot(index='cid', columns='data_time', values='value') xy_15min = power_15min.values.reshape(62, -1, 4*24) # (62, 1082, 96) xy_daily = power_daily.values N_TEST = 30 N_VALID = 2 DEC_LEN = 2 ENC_LEN = 7 drop_before = 1000 starts, ends = F.get_valid_start_end(np.isnan(xy_daily)) corr_7 = F.batch_autocorr(xy_daily, 7, starts, ends, 1.05, use_smooth=False, smooth_offset=None) corr_14 = F.batch_autocorr(xy_daily, 14, starts, ends, 1.05, use_smooth=False, smooth_offset=None) corr_365 = F.batch_autocorr(xy_daily, 365, starts, ends, 1.05, use_smooth=True, smooth_offset=2) xy_daily_auto_corr = np.concatenate([corr_7, corr_14, corr_365], 1) xy_daily_auto_corr = normalize(xy_daily_auto_corr, 0)[0] xy_lags = normalize(F.make_lags(xy_daily, [7, 14, 365])[:, :, drop_before:], axis=2)[0].transpose([0, 2, 1]) weights = (~np.bitwise_or(np.isnan(xy_15min), xy_15min == 0)).astype("float32")[:, drop_before:] # weights[:, :, np.where((power.columns >= "2020-02-01") & (power.columns < "2020-03-01"), 1, 0)] = 0.01 # weights = weights * xy_mean / xy_mean.mean() # weights = weights.transpose([0, 2, 1]) xy_cat = np.expand_dims(np.arange(len(weights)), 1) def get_holiday_features(dts): select_holidays = ["Spring Festival", "National Day", "Labour Day", "New Year's Day", "Mid-autumn Festival", "Tomb-sweeping Day"] def _get_holidays(x): is_holiday, holiday_name = calendar.get_holiday_detail(x) if holiday_name in select_holidays and is_holiday: return holiday_name holidays = pd.get_dummies(pd.Series(dts).apply(lambda x: _get_holidays(x))) holidays['sick'] = np.where((power_daily.columns >= "2020-02-01") & (power_daily.columns < "2020-03-01"), 1, 0) holidays.index = dts return holidays def holiday_apply(x, holidays, func): result = pd.DataFrame() for h in holidays.columns: result[h] = x.loc[:, holidays[h].values.astype(bool)].agg(func, axis=1).values return result holidays = get_holiday_features(power_daily.columns) xy_holiday_mean = holiday_apply(power_daily, holidays, np.mean).values xy_holiday_mean = normalize(xy_holiday_mean, 0)[0] xy_weekday = pd.get_dummies(power_daily.columns.weekday).values xy_hour = pd.get_dummies(power_daily.columns.hour).values xy_month = pd.get_dummies(power_daily.columns.month).values xy_date = np.concatenate([xy_weekday, xy_hour, xy_month, holidays], 1)[drop_before:] xy_date = np.repeat(np.expand_dims(xy_date, 0), xy_daily.shape[0], axis=0) xy, xy_mean, xy_std = normalize(xy_15min[:, drop_before:], 1) class ForwardSpliter:
spliter = ForwardSpliter() train_idx, valid_idx = spliter.split(np.arange(xy.shape[1]), ENC_LEN, N_TEST + N_VALID) valid_idx, test_idx = spliter.split(valid_idx, ENC_LEN, N_TEST) train_xy = TimeSeries(xy[:, train_idx]) valid_xy = TimeSeries(xy[:, valid_idx]) trn_weight = TimeSeries(weights[:, train_idx]) val_weight = TimeSeries(weights[:, valid_idx]) trn_enc_cat = [Property(xy_cat)] val_enc_cat = [Property(xy_cat)] trn_dec_cat = [Property(xy_cat)] val_dec_cat = [Property(xy_cat)] trn_enc_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)] val_enc_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)] trn_dec_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)] val_dec_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)] train_frame = Seq2SeqDataLoader(train_xy, batch_size=8, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True, mode='train', time_free_space=0, enc_num_feats=trn_enc_num, enc_cat_feats=trn_enc_cat, dec_num_feats=trn_dec_num, dec_cat_feats=trn_dec_cat, weights=trn_weight, seq_last=False) valid_frame = Seq2SeqDataLoader(valid_xy, batch_size=64, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True, mode='valid', time_free_space=0, time_interval=48, enc_num_feats=val_enc_num, enc_cat_feats=val_enc_cat, dec_num_feats=val_dec_num, dec_cat_feats=val_dec_cat, seq_last=False) model = RNN2RNN(96, hidden_size=256, compress_size=128, enc_num_size=40, enc_cat_size=[(62, 4)], dec_num_size=40, dec_cat_size=[(62, 4)], residual=True, beta1=.0, beta2=.0, attn_heads=1, attn_size=128, num_layers=1, dropout=0.0, rnn_type='GRU') opt = Adam(model.parameters(), 0.001) loss_fn = MSELoss() model.cuda() lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=5e-5) learner = Learner(model, opt, './power_preds', verbose=20, lr_scheduler=lr_scheduler) learner.fit(300, train_frame, valid_frame, patient=128, start_save=1, early_stopping=True) learner.load(299) learner.model.eval() preds = [] trues = [] for batch in valid_frame: batch[0].pop('dec_x') preds.append(learner.model(**batch[0])[0]) trues.append(batch[1]) trues = torch.cat(trues, 2).squeeze().cpu().numpy() * xy_std + xy_mean preds = torch.cat(preds, 2).squeeze().detach().cpu().numpy() * xy_std + xy_mean k = 42 plt.plot(trues[k].reshape(-1)) plt.plot(preds[k].reshape(-1), label='preds') plt.legend() test_xy = torch.as_tensor(xy[:, test_idx]).float().cuda() test_xy_num_feats = torch.as_tensor( np.concatenate([xy_date[:, test_idx], np.repeat(np.expand_dims(xy_holiday_mean, 1), len(test_idx), 1), xy_lags[:, test_idx], np.repeat(np.expand_dims(xy_daily_auto_corr, 1), len(test_idx), 1)], axis=2)).float().cuda() test_xy_cat_feats = torch.as_tensor(np.repeat(np.expand_dims(xy_cat, 1), test_xy.shape[1], 1)).long().cuda() def plot(x_true, y_true, y_pred): enc_ticks = np.arange(x_true.shape[1]) dec_ticks = np.arange(y_pred.shape[1]) + x_true.shape[1] for idx in range(x_true.shape[0]): plt.figure(figsize=(12, 3)) plt.plot(enc_ticks, x_true[idx]) plt.plot(dec_ticks, y_pred[idx], label='pred') plt.plot(dec_ticks, y_true[idx], label='true') plt.title(idx) plt.legend() def wmape(y_hat, y): scores = [] for day in range(int(y.shape[0] / 24)): scores.append(np.abs(y[day * 24: (day + 1) * 24] - y_hat[day * 24: (day + 1) * 24]).sum() / np.sum( y[day * 24: (day + 1) * 24])) return scores def metric(y_true, y_pred): scores = {} for idx, name in enumerate(power_daily.index): scores[name] = wmape(y_pred[idx], y_true[idx]) return pd.DataFrame(scores) def predict(learner, xy, x_num, x_cat, y_num, y_cat): preds = [] days = int(xy.shape[1] - ENC_LEN - DEC_LEN + 1) for step in range(days): step_pred = learner.model( xy[:, step: step + ENC_LEN], enc_num=x_num[:, step: step + ENC_LEN], dec_num=y_num[:, step + ENC_LEN: step + ENC_LEN + DEC_LEN], enc_cat=x_cat[:, step: step + ENC_LEN], dec_cat=y_cat[:, step + ENC_LEN: step + ENC_LEN + DEC_LEN], dec_len=DEC_LEN )[0].cpu().detach().numpy() preds.append(step_pred[:, [1]]) preds = np.concatenate(preds, axis=1) preds = preds.squeeze() * xy_std + xy_mean x_true = xy[:, :ENC_LEN + 1].cpu().numpy().squeeze() * xy_std + xy_mean y_true = xy[:, ENC_LEN + 1:].cpu().numpy().squeeze() * xy_std + xy_mean return x_true, y_true, preds norm_data = pd.read_csv("./data/20200315_20200415.csv").drop(['Unnamed: 0', 'model_name'], axis=1) norm_data = norm_data[norm_data.contributor_id.isin(power_daily.index)].reset_index(drop=True) norm_data['data_time'] = pd.to_datetime(norm_data.data_time) norm_data = norm_data.set_index("data_time").groupby("contributor_id").resample('15min')[['forecast_pwr', 'value']].sum().reset_index() norm_true = norm_data.pivot(index='contributor_id', columns='data_time', values='value').iloc[:, 48:] norm_pred = norm_data.pivot(index='contributor_id', columns='data_time', values='forecast_pwr').iloc[:, 48:] x_true, y_true, y_pred = predict(learner, test_xy, test_xy_num_feats, test_xy_cat_feats, test_xy_num_feats, test_xy_cat_feats) metric(y_true, y_pred).mean().rename("wave").describe() scores = pd.DataFrame([metric(y_true, y_pred).mean().rename("wave"), metric(norm_true.values, norm_pred.values).mean().rename("v1")]).T.dropna() scores.describe() # attn 1 mean: 0.19 50%: 0.154 # attn 3 mean: 0.187 50%: 0.156 # attn 0 mean: 0.188 50%: 0.159 # add reg mean: 0.187 50% 0.156 # 128 compress attn: 0.180 50% 0.15 # 2 layer: 0.189 0.153 # use 12 dim: 0.187 0.150 # no res : 0.179 0.154 # enc 14 day: 0.188 0.162 # enc 2 day : 0.19 0.157 plot(x_true.reshape(62, -1), y_true.reshape(62, -1), y_pred.reshape(62, -1))
def split(self, time_idx, enc_len, valid_size): if valid_size < 1: valid_size = int(np.floor(len(time_idx) * valid_size)) valid_idx = time_idx[-(valid_size + enc_len):] train_idx = time_idx[:-valid_size] return train_idx, valid_idx
identifier_body
test_rnn2rnn_power.py
# encoding: utf-8 """ @author : zhirui zhou @contact: evilpsycho42@gmail.com @time : 2020/5/13 10:17 """ from deepseries.model.rnn2rnn import RNN2RNN from deepseries.train import Learner from deepseries.dataset import TimeSeries, FeatureStore, Seq2SeqDataLoader import numpy as np from torch.optim import Adam import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import torch from deepseries.model import BasicSeq2Seq from deepseries.dataset import Property, TimeSeries, Seq2SeqDataLoader from deepseries.nn.loss import MSELoss, RMSELoss from deepseries.train import Learner from deepseries.optim import ReduceCosineAnnealingLR import deepseries.functional as F from deepseries.analysis import SeriesAnalysisModel from torch.optim import Adam from torch import nn import matplotlib as mpl from sklearn.metrics import mean_absolute_error import chinese_calendar as calendar import datetime as dt def
(x, axis, fill_zero=True): mu = np.nanmean(x, axis, keepdims=True) std = np.nanstd(x, axis, keepdims=True) x_norm = (x - mu) / std if fill_zero: x_norm = np.nan_to_num(x_norm) return x_norm, mu, std power = pd.read_csv('./data/df.csv', parse_dates=['data_time'])[['data_time', 'cid', 'value']] power_15min = power.set_index("data_time").groupby("cid").resample("15min").sum().reset_index() power_15min = power_15min.pivot(index='cid', columns='data_time', values='value') power_daily = power.set_index("data_time").groupby("cid").resample("1D").sum().reset_index() power_daily = power_daily.pivot(index='cid', columns='data_time', values='value') xy_15min = power_15min.values.reshape(62, -1, 4*24) # (62, 1082, 96) xy_daily = power_daily.values N_TEST = 30 N_VALID = 2 DEC_LEN = 2 ENC_LEN = 7 drop_before = 1000 starts, ends = F.get_valid_start_end(np.isnan(xy_daily)) corr_7 = F.batch_autocorr(xy_daily, 7, starts, ends, 1.05, use_smooth=False, smooth_offset=None) corr_14 = F.batch_autocorr(xy_daily, 14, starts, ends, 1.05, use_smooth=False, smooth_offset=None) corr_365 = F.batch_autocorr(xy_daily, 365, starts, ends, 1.05, use_smooth=True, smooth_offset=2) xy_daily_auto_corr = np.concatenate([corr_7, corr_14, corr_365], 1) xy_daily_auto_corr = normalize(xy_daily_auto_corr, 0)[0] xy_lags = normalize(F.make_lags(xy_daily, [7, 14, 365])[:, :, drop_before:], axis=2)[0].transpose([0, 2, 1]) weights = (~np.bitwise_or(np.isnan(xy_15min), xy_15min == 0)).astype("float32")[:, drop_before:] # weights[:, :, np.where((power.columns >= "2020-02-01") & (power.columns < "2020-03-01"), 1, 0)] = 0.01 # weights = weights * xy_mean / xy_mean.mean() # weights = weights.transpose([0, 2, 1]) xy_cat = np.expand_dims(np.arange(len(weights)), 1) def get_holiday_features(dts): select_holidays = ["Spring Festival", "National Day", "Labour Day", "New Year's Day", "Mid-autumn Festival", "Tomb-sweeping Day"] def _get_holidays(x): is_holiday, holiday_name = calendar.get_holiday_detail(x) if holiday_name in select_holidays and is_holiday: return holiday_name holidays = pd.get_dummies(pd.Series(dts).apply(lambda x: _get_holidays(x))) holidays['sick'] = np.where((power_daily.columns >= "2020-02-01") & (power_daily.columns < "2020-03-01"), 1, 0) holidays.index = dts return holidays def holiday_apply(x, holidays, func): result = pd.DataFrame() for h in holidays.columns: result[h] = x.loc[:, holidays[h].values.astype(bool)].agg(func, axis=1).values return result holidays = get_holiday_features(power_daily.columns) xy_holiday_mean = holiday_apply(power_daily, holidays, np.mean).values xy_holiday_mean = normalize(xy_holiday_mean, 0)[0] xy_weekday = pd.get_dummies(power_daily.columns.weekday).values xy_hour = pd.get_dummies(power_daily.columns.hour).values xy_month = pd.get_dummies(power_daily.columns.month).values xy_date = np.concatenate([xy_weekday, xy_hour, xy_month, holidays], 1)[drop_before:] xy_date = np.repeat(np.expand_dims(xy_date, 0), xy_daily.shape[0], axis=0) xy, xy_mean, xy_std = normalize(xy_15min[:, drop_before:], 1) class ForwardSpliter: def split(self, time_idx, enc_len, valid_size): if valid_size < 1: valid_size = int(np.floor(len(time_idx) * valid_size)) valid_idx = time_idx[-(valid_size + enc_len):] train_idx = time_idx[:-valid_size] return train_idx, valid_idx spliter = ForwardSpliter() train_idx, valid_idx = spliter.split(np.arange(xy.shape[1]), ENC_LEN, N_TEST + N_VALID) valid_idx, test_idx = spliter.split(valid_idx, ENC_LEN, N_TEST) train_xy = TimeSeries(xy[:, train_idx]) valid_xy = TimeSeries(xy[:, valid_idx]) trn_weight = TimeSeries(weights[:, train_idx]) val_weight = TimeSeries(weights[:, valid_idx]) trn_enc_cat = [Property(xy_cat)] val_enc_cat = [Property(xy_cat)] trn_dec_cat = [Property(xy_cat)] val_dec_cat = [Property(xy_cat)] trn_enc_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)] val_enc_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)] trn_dec_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)] val_dec_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)] train_frame = Seq2SeqDataLoader(train_xy, batch_size=8, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True, mode='train', time_free_space=0, enc_num_feats=trn_enc_num, enc_cat_feats=trn_enc_cat, dec_num_feats=trn_dec_num, dec_cat_feats=trn_dec_cat, weights=trn_weight, seq_last=False) valid_frame = Seq2SeqDataLoader(valid_xy, batch_size=64, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True, mode='valid', time_free_space=0, time_interval=48, enc_num_feats=val_enc_num, enc_cat_feats=val_enc_cat, dec_num_feats=val_dec_num, dec_cat_feats=val_dec_cat, seq_last=False) model = RNN2RNN(96, hidden_size=256, compress_size=128, enc_num_size=40, enc_cat_size=[(62, 4)], dec_num_size=40, dec_cat_size=[(62, 4)], residual=True, beta1=.0, beta2=.0, attn_heads=1, attn_size=128, num_layers=1, dropout=0.0, rnn_type='GRU') opt = Adam(model.parameters(), 0.001) loss_fn = MSELoss() model.cuda() lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=5e-5) learner = Learner(model, opt, './power_preds', verbose=20, lr_scheduler=lr_scheduler) learner.fit(300, train_frame, valid_frame, patient=128, start_save=1, early_stopping=True) learner.load(299) learner.model.eval() preds = [] trues = [] for batch in valid_frame: batch[0].pop('dec_x') preds.append(learner.model(**batch[0])[0]) trues.append(batch[1]) trues = torch.cat(trues, 2).squeeze().cpu().numpy() * xy_std + xy_mean preds = torch.cat(preds, 2).squeeze().detach().cpu().numpy() * xy_std + xy_mean k = 42 plt.plot(trues[k].reshape(-1)) plt.plot(preds[k].reshape(-1), label='preds') plt.legend() test_xy = torch.as_tensor(xy[:, test_idx]).float().cuda() test_xy_num_feats = torch.as_tensor( np.concatenate([xy_date[:, test_idx], np.repeat(np.expand_dims(xy_holiday_mean, 1), len(test_idx), 1), xy_lags[:, test_idx], np.repeat(np.expand_dims(xy_daily_auto_corr, 1), len(test_idx), 1)], axis=2)).float().cuda() test_xy_cat_feats = torch.as_tensor(np.repeat(np.expand_dims(xy_cat, 1), test_xy.shape[1], 1)).long().cuda() def plot(x_true, y_true, y_pred): enc_ticks = np.arange(x_true.shape[1]) dec_ticks = np.arange(y_pred.shape[1]) + x_true.shape[1] for idx in range(x_true.shape[0]): plt.figure(figsize=(12, 3)) plt.plot(enc_ticks, x_true[idx]) plt.plot(dec_ticks, y_pred[idx], label='pred') plt.plot(dec_ticks, y_true[idx], label='true') plt.title(idx) plt.legend() def wmape(y_hat, y): scores = [] for day in range(int(y.shape[0] / 24)): scores.append(np.abs(y[day * 24: (day + 1) * 24] - y_hat[day * 24: (day + 1) * 24]).sum() / np.sum( y[day * 24: (day + 1) * 24])) return scores def metric(y_true, y_pred): scores = {} for idx, name in enumerate(power_daily.index): scores[name] = wmape(y_pred[idx], y_true[idx]) return pd.DataFrame(scores) def predict(learner, xy, x_num, x_cat, y_num, y_cat): preds = [] days = int(xy.shape[1] - ENC_LEN - DEC_LEN + 1) for step in range(days): step_pred = learner.model( xy[:, step: step + ENC_LEN], enc_num=x_num[:, step: step + ENC_LEN], dec_num=y_num[:, step + ENC_LEN: step + ENC_LEN + DEC_LEN], enc_cat=x_cat[:, step: step + ENC_LEN], dec_cat=y_cat[:, step + ENC_LEN: step + ENC_LEN + DEC_LEN], dec_len=DEC_LEN )[0].cpu().detach().numpy() preds.append(step_pred[:, [1]]) preds = np.concatenate(preds, axis=1) preds = preds.squeeze() * xy_std + xy_mean x_true = xy[:, :ENC_LEN + 1].cpu().numpy().squeeze() * xy_std + xy_mean y_true = xy[:, ENC_LEN + 1:].cpu().numpy().squeeze() * xy_std + xy_mean return x_true, y_true, preds norm_data = pd.read_csv("./data/20200315_20200415.csv").drop(['Unnamed: 0', 'model_name'], axis=1) norm_data = norm_data[norm_data.contributor_id.isin(power_daily.index)].reset_index(drop=True) norm_data['data_time'] = pd.to_datetime(norm_data.data_time) norm_data = norm_data.set_index("data_time").groupby("contributor_id").resample('15min')[['forecast_pwr', 'value']].sum().reset_index() norm_true = norm_data.pivot(index='contributor_id', columns='data_time', values='value').iloc[:, 48:] norm_pred = norm_data.pivot(index='contributor_id', columns='data_time', values='forecast_pwr').iloc[:, 48:] x_true, y_true, y_pred = predict(learner, test_xy, test_xy_num_feats, test_xy_cat_feats, test_xy_num_feats, test_xy_cat_feats) metric(y_true, y_pred).mean().rename("wave").describe() scores = pd.DataFrame([metric(y_true, y_pred).mean().rename("wave"), metric(norm_true.values, norm_pred.values).mean().rename("v1")]).T.dropna() scores.describe() # attn 1 mean: 0.19 50%: 0.154 # attn 3 mean: 0.187 50%: 0.156 # attn 0 mean: 0.188 50%: 0.159 # add reg mean: 0.187 50% 0.156 # 128 compress attn: 0.180 50% 0.15 # 2 layer: 0.189 0.153 # use 12 dim: 0.187 0.150 # no res : 0.179 0.154 # enc 14 day: 0.188 0.162 # enc 2 day : 0.19 0.157 plot(x_true.reshape(62, -1), y_true.reshape(62, -1), y_pred.reshape(62, -1))
normalize
identifier_name
test_rnn2rnn_power.py
# encoding: utf-8 """ @author : zhirui zhou @contact: evilpsycho42@gmail.com @time : 2020/5/13 10:17 """ from deepseries.model.rnn2rnn import RNN2RNN from deepseries.train import Learner from deepseries.dataset import TimeSeries, FeatureStore, Seq2SeqDataLoader import numpy as np from torch.optim import Adam import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import torch from deepseries.model import BasicSeq2Seq from deepseries.dataset import Property, TimeSeries, Seq2SeqDataLoader from deepseries.nn.loss import MSELoss, RMSELoss from deepseries.train import Learner from deepseries.optim import ReduceCosineAnnealingLR import deepseries.functional as F from deepseries.analysis import SeriesAnalysisModel from torch.optim import Adam from torch import nn import matplotlib as mpl from sklearn.metrics import mean_absolute_error import chinese_calendar as calendar import datetime as dt def normalize(x, axis, fill_zero=True): mu = np.nanmean(x, axis, keepdims=True) std = np.nanstd(x, axis, keepdims=True) x_norm = (x - mu) / std if fill_zero: x_norm = np.nan_to_num(x_norm) return x_norm, mu, std power = pd.read_csv('./data/df.csv', parse_dates=['data_time'])[['data_time', 'cid', 'value']] power_15min = power.set_index("data_time").groupby("cid").resample("15min").sum().reset_index() power_15min = power_15min.pivot(index='cid', columns='data_time', values='value') power_daily = power.set_index("data_time").groupby("cid").resample("1D").sum().reset_index() power_daily = power_daily.pivot(index='cid', columns='data_time', values='value') xy_15min = power_15min.values.reshape(62, -1, 4*24) # (62, 1082, 96) xy_daily = power_daily.values N_TEST = 30 N_VALID = 2 DEC_LEN = 2 ENC_LEN = 7 drop_before = 1000 starts, ends = F.get_valid_start_end(np.isnan(xy_daily)) corr_7 = F.batch_autocorr(xy_daily, 7, starts, ends, 1.05, use_smooth=False, smooth_offset=None) corr_14 = F.batch_autocorr(xy_daily, 14, starts, ends, 1.05, use_smooth=False, smooth_offset=None) corr_365 = F.batch_autocorr(xy_daily, 365, starts, ends, 1.05, use_smooth=True, smooth_offset=2) xy_daily_auto_corr = np.concatenate([corr_7, corr_14, corr_365], 1) xy_daily_auto_corr = normalize(xy_daily_auto_corr, 0)[0] xy_lags = normalize(F.make_lags(xy_daily, [7, 14, 365])[:, :, drop_before:], axis=2)[0].transpose([0, 2, 1]) weights = (~np.bitwise_or(np.isnan(xy_15min), xy_15min == 0)).astype("float32")[:, drop_before:] # weights[:, :, np.where((power.columns >= "2020-02-01") & (power.columns < "2020-03-01"), 1, 0)] = 0.01 # weights = weights * xy_mean / xy_mean.mean() # weights = weights.transpose([0, 2, 1]) xy_cat = np.expand_dims(np.arange(len(weights)), 1) def get_holiday_features(dts): select_holidays = ["Spring Festival", "National Day", "Labour Day", "New Year's Day", "Mid-autumn Festival", "Tomb-sweeping Day"] def _get_holidays(x): is_holiday, holiday_name = calendar.get_holiday_detail(x) if holiday_name in select_holidays and is_holiday: return holiday_name holidays = pd.get_dummies(pd.Series(dts).apply(lambda x: _get_holidays(x))) holidays['sick'] = np.where((power_daily.columns >= "2020-02-01") & (power_daily.columns < "2020-03-01"), 1, 0) holidays.index = dts return holidays def holiday_apply(x, holidays, func): result = pd.DataFrame() for h in holidays.columns: result[h] = x.loc[:, holidays[h].values.astype(bool)].agg(func, axis=1).values return result holidays = get_holiday_features(power_daily.columns) xy_holiday_mean = holiday_apply(power_daily, holidays, np.mean).values xy_holiday_mean = normalize(xy_holiday_mean, 0)[0] xy_weekday = pd.get_dummies(power_daily.columns.weekday).values xy_hour = pd.get_dummies(power_daily.columns.hour).values xy_month = pd.get_dummies(power_daily.columns.month).values xy_date = np.concatenate([xy_weekday, xy_hour, xy_month, holidays], 1)[drop_before:] xy_date = np.repeat(np.expand_dims(xy_date, 0), xy_daily.shape[0], axis=0) xy, xy_mean, xy_std = normalize(xy_15min[:, drop_before:], 1) class ForwardSpliter: def split(self, time_idx, enc_len, valid_size): if valid_size < 1: valid_size = int(np.floor(len(time_idx) * valid_size)) valid_idx = time_idx[-(valid_size + enc_len):] train_idx = time_idx[:-valid_size] return train_idx, valid_idx spliter = ForwardSpliter() train_idx, valid_idx = spliter.split(np.arange(xy.shape[1]), ENC_LEN, N_TEST + N_VALID) valid_idx, test_idx = spliter.split(valid_idx, ENC_LEN, N_TEST) train_xy = TimeSeries(xy[:, train_idx]) valid_xy = TimeSeries(xy[:, valid_idx]) trn_weight = TimeSeries(weights[:, train_idx]) val_weight = TimeSeries(weights[:, valid_idx]) trn_enc_cat = [Property(xy_cat)] val_enc_cat = [Property(xy_cat)] trn_dec_cat = [Property(xy_cat)] val_dec_cat = [Property(xy_cat)] trn_enc_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)] val_enc_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)] trn_dec_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)] val_dec_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)] train_frame = Seq2SeqDataLoader(train_xy, batch_size=8, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True,
valid_frame = Seq2SeqDataLoader(valid_xy, batch_size=64, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True, mode='valid', time_free_space=0, time_interval=48, enc_num_feats=val_enc_num, enc_cat_feats=val_enc_cat, dec_num_feats=val_dec_num, dec_cat_feats=val_dec_cat, seq_last=False) model = RNN2RNN(96, hidden_size=256, compress_size=128, enc_num_size=40, enc_cat_size=[(62, 4)], dec_num_size=40, dec_cat_size=[(62, 4)], residual=True, beta1=.0, beta2=.0, attn_heads=1, attn_size=128, num_layers=1, dropout=0.0, rnn_type='GRU') opt = Adam(model.parameters(), 0.001) loss_fn = MSELoss() model.cuda() lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=5e-5) learner = Learner(model, opt, './power_preds', verbose=20, lr_scheduler=lr_scheduler) learner.fit(300, train_frame, valid_frame, patient=128, start_save=1, early_stopping=True) learner.load(299) learner.model.eval() preds = [] trues = [] for batch in valid_frame: batch[0].pop('dec_x') preds.append(learner.model(**batch[0])[0]) trues.append(batch[1]) trues = torch.cat(trues, 2).squeeze().cpu().numpy() * xy_std + xy_mean preds = torch.cat(preds, 2).squeeze().detach().cpu().numpy() * xy_std + xy_mean k = 42 plt.plot(trues[k].reshape(-1)) plt.plot(preds[k].reshape(-1), label='preds') plt.legend() test_xy = torch.as_tensor(xy[:, test_idx]).float().cuda() test_xy_num_feats = torch.as_tensor( np.concatenate([xy_date[:, test_idx], np.repeat(np.expand_dims(xy_holiday_mean, 1), len(test_idx), 1), xy_lags[:, test_idx], np.repeat(np.expand_dims(xy_daily_auto_corr, 1), len(test_idx), 1)], axis=2)).float().cuda() test_xy_cat_feats = torch.as_tensor(np.repeat(np.expand_dims(xy_cat, 1), test_xy.shape[1], 1)).long().cuda() def plot(x_true, y_true, y_pred): enc_ticks = np.arange(x_true.shape[1]) dec_ticks = np.arange(y_pred.shape[1]) + x_true.shape[1] for idx in range(x_true.shape[0]): plt.figure(figsize=(12, 3)) plt.plot(enc_ticks, x_true[idx]) plt.plot(dec_ticks, y_pred[idx], label='pred') plt.plot(dec_ticks, y_true[idx], label='true') plt.title(idx) plt.legend() def wmape(y_hat, y): scores = [] for day in range(int(y.shape[0] / 24)): scores.append(np.abs(y[day * 24: (day + 1) * 24] - y_hat[day * 24: (day + 1) * 24]).sum() / np.sum( y[day * 24: (day + 1) * 24])) return scores def metric(y_true, y_pred): scores = {} for idx, name in enumerate(power_daily.index): scores[name] = wmape(y_pred[idx], y_true[idx]) return pd.DataFrame(scores) def predict(learner, xy, x_num, x_cat, y_num, y_cat): preds = [] days = int(xy.shape[1] - ENC_LEN - DEC_LEN + 1) for step in range(days): step_pred = learner.model( xy[:, step: step + ENC_LEN], enc_num=x_num[:, step: step + ENC_LEN], dec_num=y_num[:, step + ENC_LEN: step + ENC_LEN + DEC_LEN], enc_cat=x_cat[:, step: step + ENC_LEN], dec_cat=y_cat[:, step + ENC_LEN: step + ENC_LEN + DEC_LEN], dec_len=DEC_LEN )[0].cpu().detach().numpy() preds.append(step_pred[:, [1]]) preds = np.concatenate(preds, axis=1) preds = preds.squeeze() * xy_std + xy_mean x_true = xy[:, :ENC_LEN + 1].cpu().numpy().squeeze() * xy_std + xy_mean y_true = xy[:, ENC_LEN + 1:].cpu().numpy().squeeze() * xy_std + xy_mean return x_true, y_true, preds norm_data = pd.read_csv("./data/20200315_20200415.csv").drop(['Unnamed: 0', 'model_name'], axis=1) norm_data = norm_data[norm_data.contributor_id.isin(power_daily.index)].reset_index(drop=True) norm_data['data_time'] = pd.to_datetime(norm_data.data_time) norm_data = norm_data.set_index("data_time").groupby("contributor_id").resample('15min')[['forecast_pwr', 'value']].sum().reset_index() norm_true = norm_data.pivot(index='contributor_id', columns='data_time', values='value').iloc[:, 48:] norm_pred = norm_data.pivot(index='contributor_id', columns='data_time', values='forecast_pwr').iloc[:, 48:] x_true, y_true, y_pred = predict(learner, test_xy, test_xy_num_feats, test_xy_cat_feats, test_xy_num_feats, test_xy_cat_feats) metric(y_true, y_pred).mean().rename("wave").describe() scores = pd.DataFrame([metric(y_true, y_pred).mean().rename("wave"), metric(norm_true.values, norm_pred.values).mean().rename("v1")]).T.dropna() scores.describe() # attn 1 mean: 0.19 50%: 0.154 # attn 3 mean: 0.187 50%: 0.156 # attn 0 mean: 0.188 50%: 0.159 # add reg mean: 0.187 50% 0.156 # 128 compress attn: 0.180 50% 0.15 # 2 layer: 0.189 0.153 # use 12 dim: 0.187 0.150 # no res : 0.179 0.154 # enc 14 day: 0.188 0.162 # enc 2 day : 0.19 0.157 plot(x_true.reshape(62, -1), y_true.reshape(62, -1), y_pred.reshape(62, -1))
mode='train', time_free_space=0, enc_num_feats=trn_enc_num, enc_cat_feats=trn_enc_cat, dec_num_feats=trn_dec_num, dec_cat_feats=trn_dec_cat, weights=trn_weight, seq_last=False)
random_line_split
test_rnn2rnn_power.py
# encoding: utf-8 """ @author : zhirui zhou @contact: evilpsycho42@gmail.com @time : 2020/5/13 10:17 """ from deepseries.model.rnn2rnn import RNN2RNN from deepseries.train import Learner from deepseries.dataset import TimeSeries, FeatureStore, Seq2SeqDataLoader import numpy as np from torch.optim import Adam import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import torch from deepseries.model import BasicSeq2Seq from deepseries.dataset import Property, TimeSeries, Seq2SeqDataLoader from deepseries.nn.loss import MSELoss, RMSELoss from deepseries.train import Learner from deepseries.optim import ReduceCosineAnnealingLR import deepseries.functional as F from deepseries.analysis import SeriesAnalysisModel from torch.optim import Adam from torch import nn import matplotlib as mpl from sklearn.metrics import mean_absolute_error import chinese_calendar as calendar import datetime as dt def normalize(x, axis, fill_zero=True): mu = np.nanmean(x, axis, keepdims=True) std = np.nanstd(x, axis, keepdims=True) x_norm = (x - mu) / std if fill_zero: x_norm = np.nan_to_num(x_norm) return x_norm, mu, std power = pd.read_csv('./data/df.csv', parse_dates=['data_time'])[['data_time', 'cid', 'value']] power_15min = power.set_index("data_time").groupby("cid").resample("15min").sum().reset_index() power_15min = power_15min.pivot(index='cid', columns='data_time', values='value') power_daily = power.set_index("data_time").groupby("cid").resample("1D").sum().reset_index() power_daily = power_daily.pivot(index='cid', columns='data_time', values='value') xy_15min = power_15min.values.reshape(62, -1, 4*24) # (62, 1082, 96) xy_daily = power_daily.values N_TEST = 30 N_VALID = 2 DEC_LEN = 2 ENC_LEN = 7 drop_before = 1000 starts, ends = F.get_valid_start_end(np.isnan(xy_daily)) corr_7 = F.batch_autocorr(xy_daily, 7, starts, ends, 1.05, use_smooth=False, smooth_offset=None) corr_14 = F.batch_autocorr(xy_daily, 14, starts, ends, 1.05, use_smooth=False, smooth_offset=None) corr_365 = F.batch_autocorr(xy_daily, 365, starts, ends, 1.05, use_smooth=True, smooth_offset=2) xy_daily_auto_corr = np.concatenate([corr_7, corr_14, corr_365], 1) xy_daily_auto_corr = normalize(xy_daily_auto_corr, 0)[0] xy_lags = normalize(F.make_lags(xy_daily, [7, 14, 365])[:, :, drop_before:], axis=2)[0].transpose([0, 2, 1]) weights = (~np.bitwise_or(np.isnan(xy_15min), xy_15min == 0)).astype("float32")[:, drop_before:] # weights[:, :, np.where((power.columns >= "2020-02-01") & (power.columns < "2020-03-01"), 1, 0)] = 0.01 # weights = weights * xy_mean / xy_mean.mean() # weights = weights.transpose([0, 2, 1]) xy_cat = np.expand_dims(np.arange(len(weights)), 1) def get_holiday_features(dts): select_holidays = ["Spring Festival", "National Day", "Labour Day", "New Year's Day", "Mid-autumn Festival", "Tomb-sweeping Day"] def _get_holidays(x): is_holiday, holiday_name = calendar.get_holiday_detail(x) if holiday_name in select_holidays and is_holiday: return holiday_name holidays = pd.get_dummies(pd.Series(dts).apply(lambda x: _get_holidays(x))) holidays['sick'] = np.where((power_daily.columns >= "2020-02-01") & (power_daily.columns < "2020-03-01"), 1, 0) holidays.index = dts return holidays def holiday_apply(x, holidays, func): result = pd.DataFrame() for h in holidays.columns:
return result holidays = get_holiday_features(power_daily.columns) xy_holiday_mean = holiday_apply(power_daily, holidays, np.mean).values xy_holiday_mean = normalize(xy_holiday_mean, 0)[0] xy_weekday = pd.get_dummies(power_daily.columns.weekday).values xy_hour = pd.get_dummies(power_daily.columns.hour).values xy_month = pd.get_dummies(power_daily.columns.month).values xy_date = np.concatenate([xy_weekday, xy_hour, xy_month, holidays], 1)[drop_before:] xy_date = np.repeat(np.expand_dims(xy_date, 0), xy_daily.shape[0], axis=0) xy, xy_mean, xy_std = normalize(xy_15min[:, drop_before:], 1) class ForwardSpliter: def split(self, time_idx, enc_len, valid_size): if valid_size < 1: valid_size = int(np.floor(len(time_idx) * valid_size)) valid_idx = time_idx[-(valid_size + enc_len):] train_idx = time_idx[:-valid_size] return train_idx, valid_idx spliter = ForwardSpliter() train_idx, valid_idx = spliter.split(np.arange(xy.shape[1]), ENC_LEN, N_TEST + N_VALID) valid_idx, test_idx = spliter.split(valid_idx, ENC_LEN, N_TEST) train_xy = TimeSeries(xy[:, train_idx]) valid_xy = TimeSeries(xy[:, valid_idx]) trn_weight = TimeSeries(weights[:, train_idx]) val_weight = TimeSeries(weights[:, valid_idx]) trn_enc_cat = [Property(xy_cat)] val_enc_cat = [Property(xy_cat)] trn_dec_cat = [Property(xy_cat)] val_dec_cat = [Property(xy_cat)] trn_enc_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)] val_enc_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)] trn_dec_num = [TimeSeries(xy_date[:, train_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, train_idx]), Property(xy_daily_auto_corr)] val_dec_num = [TimeSeries(xy_date[:, valid_idx]), Property(xy_holiday_mean), TimeSeries(xy_lags[:, valid_idx]), Property(xy_daily_auto_corr)] train_frame = Seq2SeqDataLoader(train_xy, batch_size=8, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True, mode='train', time_free_space=0, enc_num_feats=trn_enc_num, enc_cat_feats=trn_enc_cat, dec_num_feats=trn_dec_num, dec_cat_feats=trn_dec_cat, weights=trn_weight, seq_last=False) valid_frame = Seq2SeqDataLoader(valid_xy, batch_size=64, enc_lens=ENC_LEN, dec_lens=DEC_LEN, use_cuda=True, mode='valid', time_free_space=0, time_interval=48, enc_num_feats=val_enc_num, enc_cat_feats=val_enc_cat, dec_num_feats=val_dec_num, dec_cat_feats=val_dec_cat, seq_last=False) model = RNN2RNN(96, hidden_size=256, compress_size=128, enc_num_size=40, enc_cat_size=[(62, 4)], dec_num_size=40, dec_cat_size=[(62, 4)], residual=True, beta1=.0, beta2=.0, attn_heads=1, attn_size=128, num_layers=1, dropout=0.0, rnn_type='GRU') opt = Adam(model.parameters(), 0.001) loss_fn = MSELoss() model.cuda() lr_scheduler = ReduceCosineAnnealingLR(opt, 64, eta_min=5e-5) learner = Learner(model, opt, './power_preds', verbose=20, lr_scheduler=lr_scheduler) learner.fit(300, train_frame, valid_frame, patient=128, start_save=1, early_stopping=True) learner.load(299) learner.model.eval() preds = [] trues = [] for batch in valid_frame: batch[0].pop('dec_x') preds.append(learner.model(**batch[0])[0]) trues.append(batch[1]) trues = torch.cat(trues, 2).squeeze().cpu().numpy() * xy_std + xy_mean preds = torch.cat(preds, 2).squeeze().detach().cpu().numpy() * xy_std + xy_mean k = 42 plt.plot(trues[k].reshape(-1)) plt.plot(preds[k].reshape(-1), label='preds') plt.legend() test_xy = torch.as_tensor(xy[:, test_idx]).float().cuda() test_xy_num_feats = torch.as_tensor( np.concatenate([xy_date[:, test_idx], np.repeat(np.expand_dims(xy_holiday_mean, 1), len(test_idx), 1), xy_lags[:, test_idx], np.repeat(np.expand_dims(xy_daily_auto_corr, 1), len(test_idx), 1)], axis=2)).float().cuda() test_xy_cat_feats = torch.as_tensor(np.repeat(np.expand_dims(xy_cat, 1), test_xy.shape[1], 1)).long().cuda() def plot(x_true, y_true, y_pred): enc_ticks = np.arange(x_true.shape[1]) dec_ticks = np.arange(y_pred.shape[1]) + x_true.shape[1] for idx in range(x_true.shape[0]): plt.figure(figsize=(12, 3)) plt.plot(enc_ticks, x_true[idx]) plt.plot(dec_ticks, y_pred[idx], label='pred') plt.plot(dec_ticks, y_true[idx], label='true') plt.title(idx) plt.legend() def wmape(y_hat, y): scores = [] for day in range(int(y.shape[0] / 24)): scores.append(np.abs(y[day * 24: (day + 1) * 24] - y_hat[day * 24: (day + 1) * 24]).sum() / np.sum( y[day * 24: (day + 1) * 24])) return scores def metric(y_true, y_pred): scores = {} for idx, name in enumerate(power_daily.index): scores[name] = wmape(y_pred[idx], y_true[idx]) return pd.DataFrame(scores) def predict(learner, xy, x_num, x_cat, y_num, y_cat): preds = [] days = int(xy.shape[1] - ENC_LEN - DEC_LEN + 1) for step in range(days): step_pred = learner.model( xy[:, step: step + ENC_LEN], enc_num=x_num[:, step: step + ENC_LEN], dec_num=y_num[:, step + ENC_LEN: step + ENC_LEN + DEC_LEN], enc_cat=x_cat[:, step: step + ENC_LEN], dec_cat=y_cat[:, step + ENC_LEN: step + ENC_LEN + DEC_LEN], dec_len=DEC_LEN )[0].cpu().detach().numpy() preds.append(step_pred[:, [1]]) preds = np.concatenate(preds, axis=1) preds = preds.squeeze() * xy_std + xy_mean x_true = xy[:, :ENC_LEN + 1].cpu().numpy().squeeze() * xy_std + xy_mean y_true = xy[:, ENC_LEN + 1:].cpu().numpy().squeeze() * xy_std + xy_mean return x_true, y_true, preds norm_data = pd.read_csv("./data/20200315_20200415.csv").drop(['Unnamed: 0', 'model_name'], axis=1) norm_data = norm_data[norm_data.contributor_id.isin(power_daily.index)].reset_index(drop=True) norm_data['data_time'] = pd.to_datetime(norm_data.data_time) norm_data = norm_data.set_index("data_time").groupby("contributor_id").resample('15min')[['forecast_pwr', 'value']].sum().reset_index() norm_true = norm_data.pivot(index='contributor_id', columns='data_time', values='value').iloc[:, 48:] norm_pred = norm_data.pivot(index='contributor_id', columns='data_time', values='forecast_pwr').iloc[:, 48:] x_true, y_true, y_pred = predict(learner, test_xy, test_xy_num_feats, test_xy_cat_feats, test_xy_num_feats, test_xy_cat_feats) metric(y_true, y_pred).mean().rename("wave").describe() scores = pd.DataFrame([metric(y_true, y_pred).mean().rename("wave"), metric(norm_true.values, norm_pred.values).mean().rename("v1")]).T.dropna() scores.describe() # attn 1 mean: 0.19 50%: 0.154 # attn 3 mean: 0.187 50%: 0.156 # attn 0 mean: 0.188 50%: 0.159 # add reg mean: 0.187 50% 0.156 # 128 compress attn: 0.180 50% 0.15 # 2 layer: 0.189 0.153 # use 12 dim: 0.187 0.150 # no res : 0.179 0.154 # enc 14 day: 0.188 0.162 # enc 2 day : 0.19 0.157 plot(x_true.reshape(62, -1), y_true.reshape(62, -1), y_pred.reshape(62, -1))
result[h] = x.loc[:, holidays[h].values.astype(bool)].agg(func, axis=1).values
conditional_block
authconn_internal.py
# -*- coding: utf-8 -*- # Copyright 2018 Telefonica S.A. # Copyright 2018 ALTRAN Innovación S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # For those usages not covered by the Apache License, Version 2.0 please # contact: esousa@whitestack.com or glavado@whitestack.com ## """ AuthconnInternal implements implements the connector for OSM Internal Authentication Backend and leverages the RBAC model """ __author__ = "Pedro de la Cruz Ramos <pdelacruzramos@altran.com>" __date__ = "$06-jun-2019 11:16:08$" from authconn import Authconn, AuthException from osm_common.dbbase import DbException from base_topic import BaseTopic import logging from time import time from http import HTTPStatus from uuid import uuid4 from hashlib import sha256 from copy import deepcopy from random import choice as random_choice class AuthconnInternal(Authconn): def __init__(self, config, db, token_cache): Authconn.__init__(self, config) self.logger = logging.getLogger("nbi.authenticator.internal") # Get Configuration # self.xxx = config.get("xxx", "default") self.db = db self.token_cache = token_cache # To be Confirmed self.auth = None self.sess = None # def create_token (self, user, password, projects=[], project=None, remote=None): # Not Required # def authenticate_with_user_password(self, user, password, project=None, remote=None): # Not Required # def authenticate_with_token(self, token, project=None, remote=None): # Not Required # def get_user_project_list(self, token): # Not Required # def get_user_role_list(self, token): # Not Required # def create_user(self, user, password): # Not Required # def change_password(self, user, new_password): # Not Required # def delete_user(self, user_id): # Not Required # def get_user_list(self, filter_q={}): # Not Required # def get_project_list(self, filter_q={}): # Not required # def create_project(self, project): # Not required # def delete_project(self, project_id): # Not required # def assign_role_to_user(self, user, project, role): # Not required in Phase 1 # def remove_role_from_user(self, user, project, role): # Not required in Phase 1 def validate_token(self, token): """ Check if the token is valid. :param token: token to validate :return: dictionary with information associated with the token: "_id": token id "project_id": project id "project_name": project name "user_id": user id "username": user name "roles": list with dict containing {name, id} "expires": expiration date If the token is not valid an exception is raised. """ try: if not token: raise AuthException("Needed a token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) # try to get from cache first now = time() token_info = self.token_cache.get(token) if token_info and token_info["expires"] < now: # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del self.token_cache.pop(token, None) token_info = None # get from database if not in cache if not token_info: token_info = self.db.get_one("tokens", {"_id": token}) if token_info["expires"] < now: raise AuthException("Expired Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) return token_info except DbException as e: if e.http_code == HTTPStatus.NOT_FOUND: raise AuthException("Invalid Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) else: raise except AuthException: if self.config["global"].get("test.user_not_authorized"): return {"id": "fake-token-id-for-test", "project_id": self.config["global"].get("test.project_not_authorized", "admin"), "username": self.config["global"]["test.user_not_authorized"], "admin": True} else: raise except Exception: self.logger.exception("Error during token validation using internal backend") raise AuthException("Error during token validation using internal backend", http_code=HTTPStatus.UNAUTHORIZED) def revoke_token(self, token): """ Invalidate a token. :param token: token to be revoked """ try: self.token_cache.pop(token, None) self.db.del_one("tokens", {"_id": token}) return True except DbException as e: if e.http_code == HTTPStatus.NOT_FOUND: raise AuthException("Token '{}' not found".format(token), http_code=HTTPStatus.NOT_FOUND) else: # raise msg = "Error during token revocation using internal backend" self.logger.exception(msg) raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED) def authenticate(self, user, password, project=None, token_info=None): """ Authenticate a user using username/password or previous token_info plus project; its creates a new token :param user: user: name, id or None :param password: password or None :param project: name, id, or None. If None first found project will be used to get an scope token :param token_info: previous token_info to obtain authorization :param remote: remote host information :return: the scoped token info or raises an exception. The token is a dictionary with: _id: token string id, username: username, project_id: scoped_token project_id, project_name: scoped_token project_name, expires: epoch time when it expires, """ now = time() user_content = None try: # Try using username/password if user: user_rows = self.db.get_list("users", {BaseTopic.id_field("users", user): user}) if user_rows: user_content = user_rows[0] salt = user_content["_admin"]["salt"] shadow_password = sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest() if shadow_password != user_content["password"]: user_content = None if not user_content: raise AuthException("Invalid username/password", http_code=HTTPStatus.UNAUTHORIZED) elif token_info: user_rows = self.db.get_list("users", {"username": token_info["username"]}) if user_rows: user_content = user_rows[0] else: raise AuthException("Invalid token", http_code=HTTPStatus.UNAUTHORIZED) else: raise AuthException("Provide credentials: username/password or Authorization Bearer token", http_code=HTTPStatus.UNAUTHORIZED) token_id = ''.join(random_choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for _ in range(0, 32)) # TODO when user contained project_role_mappings with project_id,project_ name this checking to
proj = self.db.get_one("projects", {BaseTopic.id_field("projects", project): project}) if proj["_id"] not in user_content["projects"] and proj["name"] not in user_content["projects"]: raise AuthException("project {} not allowed for this user".format(project), http_code=HTTPStatus.UNAUTHORIZED) # TODO remove admin, this vill be used by roles RBAC if proj["name"] == "admin": token_admin = True else: token_admin = proj.get("admin", False) # TODO add token roles - PROVISIONAL. Get this list from user_content["project_role_mappings"] role_id = self.db.get_one("roles", {"name": "system_admin"})["_id"] roles_list = [{"name": "system_admin", "id": role_id}] new_token = {"issued_at": now, "expires": now + 3600, "_id": token_id, "id": token_id, "project_id": proj["_id"], "project_name": proj["name"], "username": user_content["username"], "user_id": user_content["_id"], "admin": token_admin, "roles": roles_list, } self.token_cache[token_id] = new_token self.db.create("tokens", new_token) return deepcopy(new_token) except Exception as e: msg = "Error during user authentication using internal backend: {}".format(e) self.logger.exception(msg) raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED) def get_role_list(self): """ Get role list. :return: returns the list of roles. """ try: role_list = self.db.get_list("roles") roles = [{"name": role["name"], "_id": role["_id"]} for role in role_list] # if role.name != "service" ? return roles except Exception: raise AuthException("Error during role listing using internal backend", http_code=HTTPStatus.UNAUTHORIZED) def create_role(self, role): """ Create a role. :param role: role name. :raises AuthconnOperationException: if role creation failed. """ # try: # TODO: Check that role name does not exist ? return str(uuid4()) # except Exception: # raise AuthconnOperationException("Error during role creation using internal backend") # except Conflict as ex: # self.logger.info("Duplicate entry: %s", str(ex)) def delete_role(self, role_id): """ Delete a role. :param role_id: role identifier. :raises AuthconnOperationException: if role deletion failed. """ # try: # TODO: Check that role exists ? return True # except Exception: # raise AuthconnOperationException("Error during role deletion using internal backend")
# database will not be needed if not project: project = user_content["projects"][0] # To allow project names in project_id
random_line_split
authconn_internal.py
# -*- coding: utf-8 -*- # Copyright 2018 Telefonica S.A. # Copyright 2018 ALTRAN Innovación S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # For those usages not covered by the Apache License, Version 2.0 please # contact: esousa@whitestack.com or glavado@whitestack.com ## """ AuthconnInternal implements implements the connector for OSM Internal Authentication Backend and leverages the RBAC model """ __author__ = "Pedro de la Cruz Ramos <pdelacruzramos@altran.com>" __date__ = "$06-jun-2019 11:16:08$" from authconn import Authconn, AuthException from osm_common.dbbase import DbException from base_topic import BaseTopic import logging from time import time from http import HTTPStatus from uuid import uuid4 from hashlib import sha256 from copy import deepcopy from random import choice as random_choice class AuthconnInternal(Authconn): def _
self, config, db, token_cache): Authconn.__init__(self, config) self.logger = logging.getLogger("nbi.authenticator.internal") # Get Configuration # self.xxx = config.get("xxx", "default") self.db = db self.token_cache = token_cache # To be Confirmed self.auth = None self.sess = None # def create_token (self, user, password, projects=[], project=None, remote=None): # Not Required # def authenticate_with_user_password(self, user, password, project=None, remote=None): # Not Required # def authenticate_with_token(self, token, project=None, remote=None): # Not Required # def get_user_project_list(self, token): # Not Required # def get_user_role_list(self, token): # Not Required # def create_user(self, user, password): # Not Required # def change_password(self, user, new_password): # Not Required # def delete_user(self, user_id): # Not Required # def get_user_list(self, filter_q={}): # Not Required # def get_project_list(self, filter_q={}): # Not required # def create_project(self, project): # Not required # def delete_project(self, project_id): # Not required # def assign_role_to_user(self, user, project, role): # Not required in Phase 1 # def remove_role_from_user(self, user, project, role): # Not required in Phase 1 def validate_token(self, token): """ Check if the token is valid. :param token: token to validate :return: dictionary with information associated with the token: "_id": token id "project_id": project id "project_name": project name "user_id": user id "username": user name "roles": list with dict containing {name, id} "expires": expiration date If the token is not valid an exception is raised. """ try: if not token: raise AuthException("Needed a token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) # try to get from cache first now = time() token_info = self.token_cache.get(token) if token_info and token_info["expires"] < now: # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del self.token_cache.pop(token, None) token_info = None # get from database if not in cache if not token_info: token_info = self.db.get_one("tokens", {"_id": token}) if token_info["expires"] < now: raise AuthException("Expired Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) return token_info except DbException as e: if e.http_code == HTTPStatus.NOT_FOUND: raise AuthException("Invalid Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) else: raise except AuthException: if self.config["global"].get("test.user_not_authorized"): return {"id": "fake-token-id-for-test", "project_id": self.config["global"].get("test.project_not_authorized", "admin"), "username": self.config["global"]["test.user_not_authorized"], "admin": True} else: raise except Exception: self.logger.exception("Error during token validation using internal backend") raise AuthException("Error during token validation using internal backend", http_code=HTTPStatus.UNAUTHORIZED) def revoke_token(self, token): """ Invalidate a token. :param token: token to be revoked """ try: self.token_cache.pop(token, None) self.db.del_one("tokens", {"_id": token}) return True except DbException as e: if e.http_code == HTTPStatus.NOT_FOUND: raise AuthException("Token '{}' not found".format(token), http_code=HTTPStatus.NOT_FOUND) else: # raise msg = "Error during token revocation using internal backend" self.logger.exception(msg) raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED) def authenticate(self, user, password, project=None, token_info=None): """ Authenticate a user using username/password or previous token_info plus project; its creates a new token :param user: user: name, id or None :param password: password or None :param project: name, id, or None. If None first found project will be used to get an scope token :param token_info: previous token_info to obtain authorization :param remote: remote host information :return: the scoped token info or raises an exception. The token is a dictionary with: _id: token string id, username: username, project_id: scoped_token project_id, project_name: scoped_token project_name, expires: epoch time when it expires, """ now = time() user_content = None try: # Try using username/password if user: user_rows = self.db.get_list("users", {BaseTopic.id_field("users", user): user}) if user_rows: user_content = user_rows[0] salt = user_content["_admin"]["salt"] shadow_password = sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest() if shadow_password != user_content["password"]: user_content = None if not user_content: raise AuthException("Invalid username/password", http_code=HTTPStatus.UNAUTHORIZED) elif token_info: user_rows = self.db.get_list("users", {"username": token_info["username"]}) if user_rows: user_content = user_rows[0] else: raise AuthException("Invalid token", http_code=HTTPStatus.UNAUTHORIZED) else: raise AuthException("Provide credentials: username/password or Authorization Bearer token", http_code=HTTPStatus.UNAUTHORIZED) token_id = ''.join(random_choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for _ in range(0, 32)) # TODO when user contained project_role_mappings with project_id,project_ name this checking to # database will not be needed if not project: project = user_content["projects"][0] # To allow project names in project_id proj = self.db.get_one("projects", {BaseTopic.id_field("projects", project): project}) if proj["_id"] not in user_content["projects"] and proj["name"] not in user_content["projects"]: raise AuthException("project {} not allowed for this user".format(project), http_code=HTTPStatus.UNAUTHORIZED) # TODO remove admin, this vill be used by roles RBAC if proj["name"] == "admin": token_admin = True else: token_admin = proj.get("admin", False) # TODO add token roles - PROVISIONAL. Get this list from user_content["project_role_mappings"] role_id = self.db.get_one("roles", {"name": "system_admin"})["_id"] roles_list = [{"name": "system_admin", "id": role_id}] new_token = {"issued_at": now, "expires": now + 3600, "_id": token_id, "id": token_id, "project_id": proj["_id"], "project_name": proj["name"], "username": user_content["username"], "user_id": user_content["_id"], "admin": token_admin, "roles": roles_list, } self.token_cache[token_id] = new_token self.db.create("tokens", new_token) return deepcopy(new_token) except Exception as e: msg = "Error during user authentication using internal backend: {}".format(e) self.logger.exception(msg) raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED) def get_role_list(self): """ Get role list. :return: returns the list of roles. """ try: role_list = self.db.get_list("roles") roles = [{"name": role["name"], "_id": role["_id"]} for role in role_list] # if role.name != "service" ? return roles except Exception: raise AuthException("Error during role listing using internal backend", http_code=HTTPStatus.UNAUTHORIZED) def create_role(self, role): """ Create a role. :param role: role name. :raises AuthconnOperationException: if role creation failed. """ # try: # TODO: Check that role name does not exist ? return str(uuid4()) # except Exception: # raise AuthconnOperationException("Error during role creation using internal backend") # except Conflict as ex: # self.logger.info("Duplicate entry: %s", str(ex)) def delete_role(self, role_id): """ Delete a role. :param role_id: role identifier. :raises AuthconnOperationException: if role deletion failed. """ # try: # TODO: Check that role exists ? return True # except Exception: # raise AuthconnOperationException("Error during role deletion using internal backend")
_init__(
identifier_name
authconn_internal.py
# -*- coding: utf-8 -*- # Copyright 2018 Telefonica S.A. # Copyright 2018 ALTRAN Innovación S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # For those usages not covered by the Apache License, Version 2.0 please # contact: esousa@whitestack.com or glavado@whitestack.com ## """ AuthconnInternal implements implements the connector for OSM Internal Authentication Backend and leverages the RBAC model """ __author__ = "Pedro de la Cruz Ramos <pdelacruzramos@altran.com>" __date__ = "$06-jun-2019 11:16:08$" from authconn import Authconn, AuthException from osm_common.dbbase import DbException from base_topic import BaseTopic import logging from time import time from http import HTTPStatus from uuid import uuid4 from hashlib import sha256 from copy import deepcopy from random import choice as random_choice class AuthconnInternal(Authconn): def __init__(self, config, db, token_cache): Authconn.__init__(self, config) self.logger = logging.getLogger("nbi.authenticator.internal") # Get Configuration # self.xxx = config.get("xxx", "default") self.db = db self.token_cache = token_cache # To be Confirmed self.auth = None self.sess = None # def create_token (self, user, password, projects=[], project=None, remote=None): # Not Required # def authenticate_with_user_password(self, user, password, project=None, remote=None): # Not Required # def authenticate_with_token(self, token, project=None, remote=None): # Not Required # def get_user_project_list(self, token): # Not Required # def get_user_role_list(self, token): # Not Required # def create_user(self, user, password): # Not Required # def change_password(self, user, new_password): # Not Required # def delete_user(self, user_id): # Not Required # def get_user_list(self, filter_q={}): # Not Required # def get_project_list(self, filter_q={}): # Not required # def create_project(self, project): # Not required # def delete_project(self, project_id): # Not required # def assign_role_to_user(self, user, project, role): # Not required in Phase 1 # def remove_role_from_user(self, user, project, role): # Not required in Phase 1 def validate_token(self, token): """ Check if the token is valid. :param token: token to validate :return: dictionary with information associated with the token: "_id": token id "project_id": project id "project_name": project name "user_id": user id "username": user name "roles": list with dict containing {name, id} "expires": expiration date If the token is not valid an exception is raised. """ try: if not token: raise AuthException("Needed a token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) # try to get from cache first now = time() token_info = self.token_cache.get(token) if token_info and token_info["expires"] < now: # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del self.token_cache.pop(token, None) token_info = None # get from database if not in cache if not token_info: token_info = self.db.get_one("tokens", {"_id": token}) if token_info["expires"] < now: raise AuthException("Expired Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) return token_info except DbException as e: if e.http_code == HTTPStatus.NOT_FOUND: raise AuthException("Invalid Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) else: raise except AuthException: if self.config["global"].get("test.user_not_authorized"): return {"id": "fake-token-id-for-test", "project_id": self.config["global"].get("test.project_not_authorized", "admin"), "username": self.config["global"]["test.user_not_authorized"], "admin": True} else: raise except Exception: self.logger.exception("Error during token validation using internal backend") raise AuthException("Error during token validation using internal backend", http_code=HTTPStatus.UNAUTHORIZED) def revoke_token(self, token): "
def authenticate(self, user, password, project=None, token_info=None): """ Authenticate a user using username/password or previous token_info plus project; its creates a new token :param user: user: name, id or None :param password: password or None :param project: name, id, or None. If None first found project will be used to get an scope token :param token_info: previous token_info to obtain authorization :param remote: remote host information :return: the scoped token info or raises an exception. The token is a dictionary with: _id: token string id, username: username, project_id: scoped_token project_id, project_name: scoped_token project_name, expires: epoch time when it expires, """ now = time() user_content = None try: # Try using username/password if user: user_rows = self.db.get_list("users", {BaseTopic.id_field("users", user): user}) if user_rows: user_content = user_rows[0] salt = user_content["_admin"]["salt"] shadow_password = sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest() if shadow_password != user_content["password"]: user_content = None if not user_content: raise AuthException("Invalid username/password", http_code=HTTPStatus.UNAUTHORIZED) elif token_info: user_rows = self.db.get_list("users", {"username": token_info["username"]}) if user_rows: user_content = user_rows[0] else: raise AuthException("Invalid token", http_code=HTTPStatus.UNAUTHORIZED) else: raise AuthException("Provide credentials: username/password or Authorization Bearer token", http_code=HTTPStatus.UNAUTHORIZED) token_id = ''.join(random_choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for _ in range(0, 32)) # TODO when user contained project_role_mappings with project_id,project_ name this checking to # database will not be needed if not project: project = user_content["projects"][0] # To allow project names in project_id proj = self.db.get_one("projects", {BaseTopic.id_field("projects", project): project}) if proj["_id"] not in user_content["projects"] and proj["name"] not in user_content["projects"]: raise AuthException("project {} not allowed for this user".format(project), http_code=HTTPStatus.UNAUTHORIZED) # TODO remove admin, this vill be used by roles RBAC if proj["name"] == "admin": token_admin = True else: token_admin = proj.get("admin", False) # TODO add token roles - PROVISIONAL. Get this list from user_content["project_role_mappings"] role_id = self.db.get_one("roles", {"name": "system_admin"})["_id"] roles_list = [{"name": "system_admin", "id": role_id}] new_token = {"issued_at": now, "expires": now + 3600, "_id": token_id, "id": token_id, "project_id": proj["_id"], "project_name": proj["name"], "username": user_content["username"], "user_id": user_content["_id"], "admin": token_admin, "roles": roles_list, } self.token_cache[token_id] = new_token self.db.create("tokens", new_token) return deepcopy(new_token) except Exception as e: msg = "Error during user authentication using internal backend: {}".format(e) self.logger.exception(msg) raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED) def get_role_list(self): """ Get role list. :return: returns the list of roles. """ try: role_list = self.db.get_list("roles") roles = [{"name": role["name"], "_id": role["_id"]} for role in role_list] # if role.name != "service" ? return roles except Exception: raise AuthException("Error during role listing using internal backend", http_code=HTTPStatus.UNAUTHORIZED) def create_role(self, role): """ Create a role. :param role: role name. :raises AuthconnOperationException: if role creation failed. """ # try: # TODO: Check that role name does not exist ? return str(uuid4()) # except Exception: # raise AuthconnOperationException("Error during role creation using internal backend") # except Conflict as ex: # self.logger.info("Duplicate entry: %s", str(ex)) def delete_role(self, role_id): """ Delete a role. :param role_id: role identifier. :raises AuthconnOperationException: if role deletion failed. """ # try: # TODO: Check that role exists ? return True # except Exception: # raise AuthconnOperationException("Error during role deletion using internal backend")
"" Invalidate a token. :param token: token to be revoked """ try: self.token_cache.pop(token, None) self.db.del_one("tokens", {"_id": token}) return True except DbException as e: if e.http_code == HTTPStatus.NOT_FOUND: raise AuthException("Token '{}' not found".format(token), http_code=HTTPStatus.NOT_FOUND) else: # raise msg = "Error during token revocation using internal backend" self.logger.exception(msg) raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED)
identifier_body
authconn_internal.py
# -*- coding: utf-8 -*- # Copyright 2018 Telefonica S.A. # Copyright 2018 ALTRAN Innovación S.L. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. # # For those usages not covered by the Apache License, Version 2.0 please # contact: esousa@whitestack.com or glavado@whitestack.com ## """ AuthconnInternal implements implements the connector for OSM Internal Authentication Backend and leverages the RBAC model """ __author__ = "Pedro de la Cruz Ramos <pdelacruzramos@altran.com>" __date__ = "$06-jun-2019 11:16:08$" from authconn import Authconn, AuthException from osm_common.dbbase import DbException from base_topic import BaseTopic import logging from time import time from http import HTTPStatus from uuid import uuid4 from hashlib import sha256 from copy import deepcopy from random import choice as random_choice class AuthconnInternal(Authconn): def __init__(self, config, db, token_cache): Authconn.__init__(self, config) self.logger = logging.getLogger("nbi.authenticator.internal") # Get Configuration # self.xxx = config.get("xxx", "default") self.db = db self.token_cache = token_cache # To be Confirmed self.auth = None self.sess = None # def create_token (self, user, password, projects=[], project=None, remote=None): # Not Required # def authenticate_with_user_password(self, user, password, project=None, remote=None): # Not Required # def authenticate_with_token(self, token, project=None, remote=None): # Not Required # def get_user_project_list(self, token): # Not Required # def get_user_role_list(self, token): # Not Required # def create_user(self, user, password): # Not Required # def change_password(self, user, new_password): # Not Required # def delete_user(self, user_id): # Not Required # def get_user_list(self, filter_q={}): # Not Required # def get_project_list(self, filter_q={}): # Not required # def create_project(self, project): # Not required # def delete_project(self, project_id): # Not required # def assign_role_to_user(self, user, project, role): # Not required in Phase 1 # def remove_role_from_user(self, user, project, role): # Not required in Phase 1 def validate_token(self, token): """ Check if the token is valid. :param token: token to validate :return: dictionary with information associated with the token: "_id": token id "project_id": project id "project_name": project name "user_id": user id "username": user name "roles": list with dict containing {name, id} "expires": expiration date If the token is not valid an exception is raised. """ try: if not token: raise AuthException("Needed a token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) # try to get from cache first now = time() token_info = self.token_cache.get(token) if token_info and token_info["expires"] < now: # delete token. MUST be done with care, as another thread maybe already delete it. Do not use del self.token_cache.pop(token, None) token_info = None # get from database if not in cache if not token_info: token_info = self.db.get_one("tokens", {"_id": token}) if token_info["expires"] < now: raise AuthException("Expired Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) return token_info except DbException as e: if e.http_code == HTTPStatus.NOT_FOUND: raise AuthException("Invalid Token or Authorization HTTP header", http_code=HTTPStatus.UNAUTHORIZED) else: raise except AuthException: if self.config["global"].get("test.user_not_authorized"): return {"id": "fake-token-id-for-test", "project_id": self.config["global"].get("test.project_not_authorized", "admin"), "username": self.config["global"]["test.user_not_authorized"], "admin": True} else: raise except Exception: self.logger.exception("Error during token validation using internal backend") raise AuthException("Error during token validation using internal backend", http_code=HTTPStatus.UNAUTHORIZED) def revoke_token(self, token): """ Invalidate a token. :param token: token to be revoked """ try: self.token_cache.pop(token, None) self.db.del_one("tokens", {"_id": token}) return True except DbException as e: if e.http_code == HTTPStatus.NOT_FOUND: r
else: # raise msg = "Error during token revocation using internal backend" self.logger.exception(msg) raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED) def authenticate(self, user, password, project=None, token_info=None): """ Authenticate a user using username/password or previous token_info plus project; its creates a new token :param user: user: name, id or None :param password: password or None :param project: name, id, or None. If None first found project will be used to get an scope token :param token_info: previous token_info to obtain authorization :param remote: remote host information :return: the scoped token info or raises an exception. The token is a dictionary with: _id: token string id, username: username, project_id: scoped_token project_id, project_name: scoped_token project_name, expires: epoch time when it expires, """ now = time() user_content = None try: # Try using username/password if user: user_rows = self.db.get_list("users", {BaseTopic.id_field("users", user): user}) if user_rows: user_content = user_rows[0] salt = user_content["_admin"]["salt"] shadow_password = sha256(password.encode('utf-8') + salt.encode('utf-8')).hexdigest() if shadow_password != user_content["password"]: user_content = None if not user_content: raise AuthException("Invalid username/password", http_code=HTTPStatus.UNAUTHORIZED) elif token_info: user_rows = self.db.get_list("users", {"username": token_info["username"]}) if user_rows: user_content = user_rows[0] else: raise AuthException("Invalid token", http_code=HTTPStatus.UNAUTHORIZED) else: raise AuthException("Provide credentials: username/password or Authorization Bearer token", http_code=HTTPStatus.UNAUTHORIZED) token_id = ''.join(random_choice('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789') for _ in range(0, 32)) # TODO when user contained project_role_mappings with project_id,project_ name this checking to # database will not be needed if not project: project = user_content["projects"][0] # To allow project names in project_id proj = self.db.get_one("projects", {BaseTopic.id_field("projects", project): project}) if proj["_id"] not in user_content["projects"] and proj["name"] not in user_content["projects"]: raise AuthException("project {} not allowed for this user".format(project), http_code=HTTPStatus.UNAUTHORIZED) # TODO remove admin, this vill be used by roles RBAC if proj["name"] == "admin": token_admin = True else: token_admin = proj.get("admin", False) # TODO add token roles - PROVISIONAL. Get this list from user_content["project_role_mappings"] role_id = self.db.get_one("roles", {"name": "system_admin"})["_id"] roles_list = [{"name": "system_admin", "id": role_id}] new_token = {"issued_at": now, "expires": now + 3600, "_id": token_id, "id": token_id, "project_id": proj["_id"], "project_name": proj["name"], "username": user_content["username"], "user_id": user_content["_id"], "admin": token_admin, "roles": roles_list, } self.token_cache[token_id] = new_token self.db.create("tokens", new_token) return deepcopy(new_token) except Exception as e: msg = "Error during user authentication using internal backend: {}".format(e) self.logger.exception(msg) raise AuthException(msg, http_code=HTTPStatus.UNAUTHORIZED) def get_role_list(self): """ Get role list. :return: returns the list of roles. """ try: role_list = self.db.get_list("roles") roles = [{"name": role["name"], "_id": role["_id"]} for role in role_list] # if role.name != "service" ? return roles except Exception: raise AuthException("Error during role listing using internal backend", http_code=HTTPStatus.UNAUTHORIZED) def create_role(self, role): """ Create a role. :param role: role name. :raises AuthconnOperationException: if role creation failed. """ # try: # TODO: Check that role name does not exist ? return str(uuid4()) # except Exception: # raise AuthconnOperationException("Error during role creation using internal backend") # except Conflict as ex: # self.logger.info("Duplicate entry: %s", str(ex)) def delete_role(self, role_id): """ Delete a role. :param role_id: role identifier. :raises AuthconnOperationException: if role deletion failed. """ # try: # TODO: Check that role exists ? return True # except Exception: # raise AuthconnOperationException("Error during role deletion using internal backend")
aise AuthException("Token '{}' not found".format(token), http_code=HTTPStatus.NOT_FOUND)
conditional_block
setup-cluster-images.py
#!/usr/bin/env python3 """ Usage: setup-cluster-images image-archive [num_nodes [targetdir]] image-archive - zip file as downloaded from raspberry-pi.org num_nodes - number of nodes in the cluster [4] node_prefix - prefix for the cluster nodes [gg] targetdir - destination directory [current directory] """ import sys from _sha256 import sha256 from contextlib import contextmanager from logging import info, basicConfig, debug, DEBUG from os import chdir, getcwd, makedirs, mkdir, geteuid, chmod, chown, stat, unlink, \ listdir, rename from shutil import rmtree, copy2 from subprocess import check_output, check_call from tempfile import mkdtemp from urllib import request from zipfile import ZipFile from os.path import join, abspath, isdir, dirname, isfile # Number of raspberries in the cluster BASE_IP = '192.168.8.2' NODE_COUNT = 4 # prefix for the node names. # nodes will be named <prefix>-master, <prefix>-node1, <prefix>-node2, ... NODE_PREFIX = 'gg' USR_LOCAL_BIN=join('usr', 'local', 'bin') SETUP_NODE_SH=join(USR_LOCAL_BIN, 'setup_node.sh') CFSSL_PROGS_SHA256 = """ 0725a1cca3857392158807b543b75dc6388e2102e8a189792c4da7ac19f750b5 cfssl-bundle 48685e849565cd7d27ac2daf68faa835a5151fd3feac87c6715bcb92d58dc280 cfssl-certinfo 4106c11c61aa9e98b1967adab6db711d2b50a0f02f844329e9ad44f199bdf135 cfssl-newkey 71e41ef447f49ad236d75ec42152625c6fcf6c37122784740bd19b0a7c399560 cfssl-scan 11c708acaf48a69abf6f896f5c6158f7547a3c1bf44e14ca3b3ab440c1f808f1 cfssl e138102329d96f5a67aa168034a256a8376febf4ecde7b8e837c3f2e08b1cd19 cfssljson dac738390bc346b94c497b62a82f75cb05f0dafd5dad7d9dd63dedb9bc31092a mkbundle d53bbc0d2ac2d57c089d4f730d9e7b2d365701adc1bb417153a5f70a16bd10d6 multirootca """ # Shell script to setup the necessary software for kubernetes # FIXME - howto add a static IP # TODO - add static certificates # TODO - add kubeadm call for master PKG_SETUP = """\ #!/bin/sh setup_params="$1" setup_machine_id() { sudo rm -f /etc/machine-id /var/lib/dbus/machine-id sudo dbus-uuidgen --ensure=/etc/machine-id } setup_static_ip() { } set -e nodename=`awk -F= '/^nodename=/ { print $2 }' "$setup_params"` ipaddress=`awk -F= '/^ip=/ { print $2 }' "$setup_params"` sudo hostname "$nodename" setup_static_ip "$ipaddress" curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y sudo apt-get install -y policykit-1 docker-ce setup_machine_id sudo dphys-swapfile swapoff sudo dphys-swapfile uninstall sudo update-rc.d dphys-swapfile remove echo "Getting kubernetes packages" sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni sudo /usr/bin/raspi-config --expand-rootfs """ SETUP_SCRIPT = """ if [[ -e /boot/setup.txt ]] ; then tmp=`mktemp` mv /boot/setup.txt "$tmp" sh -x "/%s" "$tmp" >/boot/setup.log 2>&1 rm -f "$tmp" fi """ % SETUP_NODE_SH def absjoin(*params): return abspath(join(*params)) # FIXME - add comments to the methods class ClusterSetup: def __call__(self, archive, node_names, targetdir, ipbase): targetinfo = stat(targetdir) with self._mktemp(): info('Download cfssl') cfssldir = abspath('cfssl') self._download_cfssl(cfssldir) ipaddress = ipbase for name in node_names: node_image = absjoin(targetdir, '%s.img' % name) info('prepare image for node %s in %s' % (name, node_image)) info('Unpacking archive %s' % archive) self._unzip(archive, node_image) try: self._prepare_node_image(node_image, name, node_names[0], ipaddress, cfssldir) except Exception as e: unlink(node_image) raise chown(node_image, targetinfo.st_uid, targetinfo.st_gid) ipaddress = self._increment_ip(ipaddress) info('done') def _setup_cgroups(self): debug('setup cgrops in %s' % getcwd()) with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline: cmdline.write('cgroup_enable=cpuset cgroup_memory=1') def _enable_ssh(self): debug('enable ssh in %s' % getcwd()) with open(absjoin('boot', 'ssh'), 'w') as ssh: ssh.write('') def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl): with self._mount(image): self._setup_nodename(master, nodename) self._enable_ssh() self._setup_cgroups() debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN)) self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN)) self._init_first_boot(ipadddress, nodename) def _copytree(self, srcdir, dstdir): for f in listdir(srcdir): copy2(absjoin(srcdir, f), dstdir) def _setup_nodename(self, master, nodename): debug('setup nodename %s in %s' % (nodename, getcwd())) with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname: print(nodename, file=hostname) with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts: print('127.0.1.1 %(nodename)s' % locals(), file=hosts) if nodename != master: print('10.0.0.1 %(master)s' % locals(), file=hosts) def _init_first_boot(self, ipadddress, nodename): debug('Prepare first boot in %s' % getcwd()) with self._executable(absjoin('system', SETUP_NODE_SH)) as fname: self.create_setup_script(fname) with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal: self.setup_rclocal(rclocal) self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress, nodename) def create_setup_script(self, setup_node_sh): with open(setup_node_sh, 'x') as setup_node: print(PKG_SETUP % locals(), file=setup_node) def setup_rclocal(self, rc_local): with open(rc_local, 'r+') as script: script.write(self._edit(script.read(), SETUP_SCRIPT)) def _create_setup_txt(self, fname, ipadddress, nodename): with open(fname, 'w') as setup: print('nodename=%s' % nodename, file=setup) print('ip=%s' % ipadddress, file=setup) def _edit(self, setup_script, setup_node_sh): lines = [l.rstrip() for l in setup_script.splitlines()] if 'exit 0' in lines: exit_line = lines.index('exit 0') lines.insert(exit_line, setup_node_sh) else: lines.append(setup_node_sh) lines.append('exit 0') return '\n'.join(lines) def _download_cfssl(self, dstdir): if not isdir(dstdir): makedirs(dstdir) for line in CFSSL_PROGS_SHA256.splitlines(): if line: checksum, fname = line.split() dstfile = absjoin(dstdir, fname) self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' % fname, dstfile, checksum) chmod(dstfile, 0o755) def _download(self, url, dstfile, checksum): request.urlretrieve(url, dstfile) m = sha256() with open(dstfile, 'rb') as f: hash = m.update(f.read()) if checksum != m.hexdigest(): raise RuntimeError('Checksum of %s does not match!' % dstfile) @staticmethod def _unzip(archive, dst_image): with ZipFile(archive) as image_archive: for name in image_archive.namelist(): if name.endswith('.img'): image = image_archive.extract(name, dirname(dst_image)) if isfile(dst_image): unlink(dst_image) rename(image, dst_image) return dst_image raise RuntimeError('No image file contained in archive %s' % archive) @contextmanager def _mktemp(self): here = getcwd() tempdir = mkdtemp() try: chdir(tempdir) yield tempdir, here finally: chdir(here) rmtree(tempdir) @contextmanager def _mount(self, image): with self._kpartx(abspath(image)) as nodes: with self._mktemp() as (here, cwd): for d in nodes.keys(): mkdir(d) boot = abspath('boot') system = abspath('system') with self._mounted(nodes['boot'], boot) as boot: with self._mounted(nodes['system'], system) as system: chdir(here) yield boot, system @contextmanager def _kpartx(self, image): output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image), universal_newlines=True) # $ sudo kpartx -a -v -s 2018-03-13-raspbian-stretch-lite.img # add map loop1p1 (252:7): 0 85611 linear /dev/loop1 8192 # add map loop1p2 (252:8): 0 3530752 linear /dev/loop1 98304 try: nodes = [] for l in output.splitlines(): if l: fields = l.split() nodes.append((fields[2], fields[5])) assert len(nodes) == 2 # sort nodes by size - the smaller node is 'boot' nodes.sort(key=lambda t: t[1], reverse=True) yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': '/dev/mapper/%s' % nodes[1][0]} finally: check_call(('sudo', 'kpartx', '-d', '-s', image)) @contextmanager def _mounted(self, mapping, mountpoint): try: debug('mount %s on %s' % (mapping, mountpoint)) check_call(('sudo', 'mount', mapping, mountpoint)) yield mountpoint finally: check_call(('sudo', 'umount', mountpoint)) @contextmanager def _executable(self, param): yield param chmod(param, 0o755) def _increment_ip(self, ipbase): octets = [int(o) for o in ipbase.split('.')] octets[3] += 1 return '.'.join([str(o) for o in octets]) def _check_ip(param): octets = [int(o) for o in param.split('.')] for o in octets: if 0 <= o <= 255:
raise RuntimeError('Invalid IP address: %s' % param) return param def main(*args): targetdir = getcwd() if len(args) < 4 else args[3] nodenames = prepare_names( NODE_COUNT if len(args) < 2 else int(args[1]), NODE_PREFIX if len(args) < 3 else args[2]) ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4]) raspbian_archive = abspath(args[0]) setup = ClusterSetup() setup(raspbian_archive, nodenames, targetdir, ipaddress) if __name__ == '__main__': def prepare_names(num_nodes, prefix): result = [prefix + '-master'] for i in range(1, num_nodes): result += ['%s-node-%d' % (prefix, i)] return tuple(result) if len(sys.argv) < 2: exit(__doc__) if geteuid() != 0: exit("You must be root to use this software") basicConfig(level=DEBUG) try: main(*sys.argv[1:]) except RuntimeError as e: exit('\n'.join((str(e), __doc__)))
continue
conditional_block
setup-cluster-images.py
#!/usr/bin/env python3 """ Usage: setup-cluster-images image-archive [num_nodes [targetdir]] image-archive - zip file as downloaded from raspberry-pi.org num_nodes - number of nodes in the cluster [4] node_prefix - prefix for the cluster nodes [gg] targetdir - destination directory [current directory] """ import sys from _sha256 import sha256 from contextlib import contextmanager from logging import info, basicConfig, debug, DEBUG from os import chdir, getcwd, makedirs, mkdir, geteuid, chmod, chown, stat, unlink, \ listdir, rename from shutil import rmtree, copy2 from subprocess import check_output, check_call from tempfile import mkdtemp from urllib import request from zipfile import ZipFile from os.path import join, abspath, isdir, dirname, isfile # Number of raspberries in the cluster BASE_IP = '192.168.8.2' NODE_COUNT = 4 # prefix for the node names. # nodes will be named <prefix>-master, <prefix>-node1, <prefix>-node2, ... NODE_PREFIX = 'gg' USR_LOCAL_BIN=join('usr', 'local', 'bin') SETUP_NODE_SH=join(USR_LOCAL_BIN, 'setup_node.sh') CFSSL_PROGS_SHA256 = """ 0725a1cca3857392158807b543b75dc6388e2102e8a189792c4da7ac19f750b5 cfssl-bundle 48685e849565cd7d27ac2daf68faa835a5151fd3feac87c6715bcb92d58dc280 cfssl-certinfo 4106c11c61aa9e98b1967adab6db711d2b50a0f02f844329e9ad44f199bdf135 cfssl-newkey 71e41ef447f49ad236d75ec42152625c6fcf6c37122784740bd19b0a7c399560 cfssl-scan 11c708acaf48a69abf6f896f5c6158f7547a3c1bf44e14ca3b3ab440c1f808f1 cfssl e138102329d96f5a67aa168034a256a8376febf4ecde7b8e837c3f2e08b1cd19 cfssljson dac738390bc346b94c497b62a82f75cb05f0dafd5dad7d9dd63dedb9bc31092a mkbundle d53bbc0d2ac2d57c089d4f730d9e7b2d365701adc1bb417153a5f70a16bd10d6 multirootca """ # Shell script to setup the necessary software for kubernetes # FIXME - howto add a static IP # TODO - add static certificates # TODO - add kubeadm call for master PKG_SETUP = """\ #!/bin/sh setup_params="$1" setup_machine_id() { sudo rm -f /etc/machine-id /var/lib/dbus/machine-id sudo dbus-uuidgen --ensure=/etc/machine-id } setup_static_ip() { } set -e nodename=`awk -F= '/^nodename=/ { print $2 }' "$setup_params"` ipaddress=`awk -F= '/^ip=/ { print $2 }' "$setup_params"` sudo hostname "$nodename" setup_static_ip "$ipaddress" curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y sudo apt-get install -y policykit-1 docker-ce setup_machine_id sudo dphys-swapfile swapoff sudo dphys-swapfile uninstall sudo update-rc.d dphys-swapfile remove echo "Getting kubernetes packages" sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni sudo /usr/bin/raspi-config --expand-rootfs """ SETUP_SCRIPT = """ if [[ -e /boot/setup.txt ]] ; then tmp=`mktemp` mv /boot/setup.txt "$tmp" sh -x "/%s" "$tmp" >/boot/setup.log 2>&1 rm -f "$tmp" fi """ % SETUP_NODE_SH def absjoin(*params): return abspath(join(*params)) # FIXME - add comments to the methods class ClusterSetup: def __call__(self, archive, node_names, targetdir, ipbase): targetinfo = stat(targetdir) with self._mktemp(): info('Download cfssl') cfssldir = abspath('cfssl') self._download_cfssl(cfssldir) ipaddress = ipbase for name in node_names: node_image = absjoin(targetdir, '%s.img' % name) info('prepare image for node %s in %s' % (name, node_image)) info('Unpacking archive %s' % archive) self._unzip(archive, node_image) try: self._prepare_node_image(node_image, name, node_names[0], ipaddress, cfssldir) except Exception as e: unlink(node_image) raise chown(node_image, targetinfo.st_uid, targetinfo.st_gid) ipaddress = self._increment_ip(ipaddress) info('done') def _setup_cgroups(self): debug('setup cgrops in %s' % getcwd()) with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline: cmdline.write('cgroup_enable=cpuset cgroup_memory=1') def _enable_ssh(self): debug('enable ssh in %s' % getcwd()) with open(absjoin('boot', 'ssh'), 'w') as ssh: ssh.write('') def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl): with self._mount(image): self._setup_nodename(master, nodename) self._enable_ssh() self._setup_cgroups() debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN)) self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN)) self._init_first_boot(ipadddress, nodename) def _copytree(self, srcdir, dstdir): for f in listdir(srcdir): copy2(absjoin(srcdir, f), dstdir) def _setup_nodename(self, master, nodename): debug('setup nodename %s in %s' % (nodename, getcwd())) with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname: print(nodename, file=hostname) with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts: print('127.0.1.1 %(nodename)s' % locals(), file=hosts) if nodename != master: print('10.0.0.1 %(master)s' % locals(), file=hosts) def _init_first_boot(self, ipadddress, nodename): debug('Prepare first boot in %s' % getcwd()) with self._executable(absjoin('system', SETUP_NODE_SH)) as fname: self.create_setup_script(fname) with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal: self.setup_rclocal(rclocal) self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress, nodename) def create_setup_script(self, setup_node_sh): with open(setup_node_sh, 'x') as setup_node: print(PKG_SETUP % locals(), file=setup_node) def setup_rclocal(self, rc_local): with open(rc_local, 'r+') as script: script.write(self._edit(script.read(), SETUP_SCRIPT)) def _create_setup_txt(self, fname, ipadddress, nodename): with open(fname, 'w') as setup: print('nodename=%s' % nodename, file=setup) print('ip=%s' % ipadddress, file=setup) def _edit(self, setup_script, setup_node_sh): lines = [l.rstrip() for l in setup_script.splitlines()] if 'exit 0' in lines: exit_line = lines.index('exit 0') lines.insert(exit_line, setup_node_sh) else: lines.append(setup_node_sh) lines.append('exit 0') return '\n'.join(lines) def _download_cfssl(self, dstdir): if not isdir(dstdir): makedirs(dstdir) for line in CFSSL_PROGS_SHA256.splitlines(): if line: checksum, fname = line.split() dstfile = absjoin(dstdir, fname) self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' % fname, dstfile, checksum) chmod(dstfile, 0o755) def _download(self, url, dstfile, checksum): request.urlretrieve(url, dstfile) m = sha256() with open(dstfile, 'rb') as f: hash = m.update(f.read()) if checksum != m.hexdigest(): raise RuntimeError('Checksum of %s does not match!' % dstfile) @staticmethod def _unzip(archive, dst_image): with ZipFile(archive) as image_archive: for name in image_archive.namelist(): if name.endswith('.img'): image = image_archive.extract(name, dirname(dst_image)) if isfile(dst_image): unlink(dst_image) rename(image, dst_image) return dst_image raise RuntimeError('No image file contained in archive %s' % archive) @contextmanager def _mktemp(self): here = getcwd() tempdir = mkdtemp() try: chdir(tempdir) yield tempdir, here finally: chdir(here) rmtree(tempdir) @contextmanager def _mount(self, image): with self._kpartx(abspath(image)) as nodes: with self._mktemp() as (here, cwd): for d in nodes.keys(): mkdir(d) boot = abspath('boot') system = abspath('system') with self._mounted(nodes['boot'], boot) as boot: with self._mounted(nodes['system'], system) as system: chdir(here) yield boot, system @contextmanager def _kpartx(self, image): output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image), universal_newlines=True) # $ sudo kpartx -a -v -s 2018-03-13-raspbian-stretch-lite.img # add map loop1p1 (252:7): 0 85611 linear /dev/loop1 8192 # add map loop1p2 (252:8): 0 3530752 linear /dev/loop1 98304 try: nodes = [] for l in output.splitlines(): if l: fields = l.split() nodes.append((fields[2], fields[5])) assert len(nodes) == 2 # sort nodes by size - the smaller node is 'boot' nodes.sort(key=lambda t: t[1], reverse=True) yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': '/dev/mapper/%s' % nodes[1][0]} finally: check_call(('sudo', 'kpartx', '-d', '-s', image)) @contextmanager def _mounted(self, mapping, mountpoint): try: debug('mount %s on %s' % (mapping, mountpoint)) check_call(('sudo', 'mount', mapping, mountpoint)) yield mountpoint finally: check_call(('sudo', 'umount', mountpoint)) @contextmanager def _executable(self, param): yield param chmod(param, 0o755) def _increment_ip(self, ipbase): octets = [int(o) for o in ipbase.split('.')] octets[3] += 1 return '.'.join([str(o) for o in octets]) def _check_ip(param): octets = [int(o) for o in param.split('.')] for o in octets: if 0 <= o <= 255: continue raise RuntimeError('Invalid IP address: %s' % param) return param def main(*args):
if __name__ == '__main__': def prepare_names(num_nodes, prefix): result = [prefix + '-master'] for i in range(1, num_nodes): result += ['%s-node-%d' % (prefix, i)] return tuple(result) if len(sys.argv) < 2: exit(__doc__) if geteuid() != 0: exit("You must be root to use this software") basicConfig(level=DEBUG) try: main(*sys.argv[1:]) except RuntimeError as e: exit('\n'.join((str(e), __doc__)))
targetdir = getcwd() if len(args) < 4 else args[3] nodenames = prepare_names( NODE_COUNT if len(args) < 2 else int(args[1]), NODE_PREFIX if len(args) < 3 else args[2]) ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4]) raspbian_archive = abspath(args[0]) setup = ClusterSetup() setup(raspbian_archive, nodenames, targetdir, ipaddress)
identifier_body
setup-cluster-images.py
#!/usr/bin/env python3 """ Usage: setup-cluster-images image-archive [num_nodes [targetdir]] image-archive - zip file as downloaded from raspberry-pi.org num_nodes - number of nodes in the cluster [4] node_prefix - prefix for the cluster nodes [gg] targetdir - destination directory [current directory] """ import sys from _sha256 import sha256 from contextlib import contextmanager from logging import info, basicConfig, debug, DEBUG from os import chdir, getcwd, makedirs, mkdir, geteuid, chmod, chown, stat, unlink, \ listdir, rename from shutil import rmtree, copy2 from subprocess import check_output, check_call from tempfile import mkdtemp from urllib import request from zipfile import ZipFile from os.path import join, abspath, isdir, dirname, isfile # Number of raspberries in the cluster BASE_IP = '192.168.8.2' NODE_COUNT = 4 # prefix for the node names. # nodes will be named <prefix>-master, <prefix>-node1, <prefix>-node2, ... NODE_PREFIX = 'gg' USR_LOCAL_BIN=join('usr', 'local', 'bin') SETUP_NODE_SH=join(USR_LOCAL_BIN, 'setup_node.sh') CFSSL_PROGS_SHA256 = """ 0725a1cca3857392158807b543b75dc6388e2102e8a189792c4da7ac19f750b5 cfssl-bundle 48685e849565cd7d27ac2daf68faa835a5151fd3feac87c6715bcb92d58dc280 cfssl-certinfo 4106c11c61aa9e98b1967adab6db711d2b50a0f02f844329e9ad44f199bdf135 cfssl-newkey 71e41ef447f49ad236d75ec42152625c6fcf6c37122784740bd19b0a7c399560 cfssl-scan 11c708acaf48a69abf6f896f5c6158f7547a3c1bf44e14ca3b3ab440c1f808f1 cfssl e138102329d96f5a67aa168034a256a8376febf4ecde7b8e837c3f2e08b1cd19 cfssljson dac738390bc346b94c497b62a82f75cb05f0dafd5dad7d9dd63dedb9bc31092a mkbundle d53bbc0d2ac2d57c089d4f730d9e7b2d365701adc1bb417153a5f70a16bd10d6 multirootca """ # Shell script to setup the necessary software for kubernetes # FIXME - howto add a static IP # TODO - add static certificates # TODO - add kubeadm call for master PKG_SETUP = """\ #!/bin/sh setup_params="$1" setup_machine_id() { sudo rm -f /etc/machine-id /var/lib/dbus/machine-id sudo dbus-uuidgen --ensure=/etc/machine-id } setup_static_ip() { } set -e nodename=`awk -F= '/^nodename=/ { print $2 }' "$setup_params"` ipaddress=`awk -F= '/^ip=/ { print $2 }' "$setup_params"` sudo hostname "$nodename" setup_static_ip "$ipaddress" curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y sudo apt-get install -y policykit-1 docker-ce setup_machine_id sudo dphys-swapfile swapoff sudo dphys-swapfile uninstall sudo update-rc.d dphys-swapfile remove echo "Getting kubernetes packages" sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni sudo /usr/bin/raspi-config --expand-rootfs """ SETUP_SCRIPT = """ if [[ -e /boot/setup.txt ]] ; then tmp=`mktemp` mv /boot/setup.txt "$tmp" sh -x "/%s" "$tmp" >/boot/setup.log 2>&1 rm -f "$tmp" fi """ % SETUP_NODE_SH def absjoin(*params): return abspath(join(*params)) # FIXME - add comments to the methods class ClusterSetup: def __call__(self, archive, node_names, targetdir, ipbase): targetinfo = stat(targetdir) with self._mktemp(): info('Download cfssl') cfssldir = abspath('cfssl') self._download_cfssl(cfssldir) ipaddress = ipbase for name in node_names: node_image = absjoin(targetdir, '%s.img' % name) info('prepare image for node %s in %s' % (name, node_image)) info('Unpacking archive %s' % archive) self._unzip(archive, node_image) try: self._prepare_node_image(node_image, name, node_names[0], ipaddress, cfssldir) except Exception as e: unlink(node_image) raise chown(node_image, targetinfo.st_uid, targetinfo.st_gid) ipaddress = self._increment_ip(ipaddress) info('done') def _setup_cgroups(self): debug('setup cgrops in %s' % getcwd()) with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline: cmdline.write('cgroup_enable=cpuset cgroup_memory=1') def _enable_ssh(self): debug('enable ssh in %s' % getcwd()) with open(absjoin('boot', 'ssh'), 'w') as ssh: ssh.write('') def
(self, image, nodename, master, ipadddress, cfssl): with self._mount(image): self._setup_nodename(master, nodename) self._enable_ssh() self._setup_cgroups() debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN)) self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN)) self._init_first_boot(ipadddress, nodename) def _copytree(self, srcdir, dstdir): for f in listdir(srcdir): copy2(absjoin(srcdir, f), dstdir) def _setup_nodename(self, master, nodename): debug('setup nodename %s in %s' % (nodename, getcwd())) with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname: print(nodename, file=hostname) with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts: print('127.0.1.1 %(nodename)s' % locals(), file=hosts) if nodename != master: print('10.0.0.1 %(master)s' % locals(), file=hosts) def _init_first_boot(self, ipadddress, nodename): debug('Prepare first boot in %s' % getcwd()) with self._executable(absjoin('system', SETUP_NODE_SH)) as fname: self.create_setup_script(fname) with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal: self.setup_rclocal(rclocal) self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress, nodename) def create_setup_script(self, setup_node_sh): with open(setup_node_sh, 'x') as setup_node: print(PKG_SETUP % locals(), file=setup_node) def setup_rclocal(self, rc_local): with open(rc_local, 'r+') as script: script.write(self._edit(script.read(), SETUP_SCRIPT)) def _create_setup_txt(self, fname, ipadddress, nodename): with open(fname, 'w') as setup: print('nodename=%s' % nodename, file=setup) print('ip=%s' % ipadddress, file=setup) def _edit(self, setup_script, setup_node_sh): lines = [l.rstrip() for l in setup_script.splitlines()] if 'exit 0' in lines: exit_line = lines.index('exit 0') lines.insert(exit_line, setup_node_sh) else: lines.append(setup_node_sh) lines.append('exit 0') return '\n'.join(lines) def _download_cfssl(self, dstdir): if not isdir(dstdir): makedirs(dstdir) for line in CFSSL_PROGS_SHA256.splitlines(): if line: checksum, fname = line.split() dstfile = absjoin(dstdir, fname) self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' % fname, dstfile, checksum) chmod(dstfile, 0o755) def _download(self, url, dstfile, checksum): request.urlretrieve(url, dstfile) m = sha256() with open(dstfile, 'rb') as f: hash = m.update(f.read()) if checksum != m.hexdigest(): raise RuntimeError('Checksum of %s does not match!' % dstfile) @staticmethod def _unzip(archive, dst_image): with ZipFile(archive) as image_archive: for name in image_archive.namelist(): if name.endswith('.img'): image = image_archive.extract(name, dirname(dst_image)) if isfile(dst_image): unlink(dst_image) rename(image, dst_image) return dst_image raise RuntimeError('No image file contained in archive %s' % archive) @contextmanager def _mktemp(self): here = getcwd() tempdir = mkdtemp() try: chdir(tempdir) yield tempdir, here finally: chdir(here) rmtree(tempdir) @contextmanager def _mount(self, image): with self._kpartx(abspath(image)) as nodes: with self._mktemp() as (here, cwd): for d in nodes.keys(): mkdir(d) boot = abspath('boot') system = abspath('system') with self._mounted(nodes['boot'], boot) as boot: with self._mounted(nodes['system'], system) as system: chdir(here) yield boot, system @contextmanager def _kpartx(self, image): output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image), universal_newlines=True) # $ sudo kpartx -a -v -s 2018-03-13-raspbian-stretch-lite.img # add map loop1p1 (252:7): 0 85611 linear /dev/loop1 8192 # add map loop1p2 (252:8): 0 3530752 linear /dev/loop1 98304 try: nodes = [] for l in output.splitlines(): if l: fields = l.split() nodes.append((fields[2], fields[5])) assert len(nodes) == 2 # sort nodes by size - the smaller node is 'boot' nodes.sort(key=lambda t: t[1], reverse=True) yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': '/dev/mapper/%s' % nodes[1][0]} finally: check_call(('sudo', 'kpartx', '-d', '-s', image)) @contextmanager def _mounted(self, mapping, mountpoint): try: debug('mount %s on %s' % (mapping, mountpoint)) check_call(('sudo', 'mount', mapping, mountpoint)) yield mountpoint finally: check_call(('sudo', 'umount', mountpoint)) @contextmanager def _executable(self, param): yield param chmod(param, 0o755) def _increment_ip(self, ipbase): octets = [int(o) for o in ipbase.split('.')] octets[3] += 1 return '.'.join([str(o) for o in octets]) def _check_ip(param): octets = [int(o) for o in param.split('.')] for o in octets: if 0 <= o <= 255: continue raise RuntimeError('Invalid IP address: %s' % param) return param def main(*args): targetdir = getcwd() if len(args) < 4 else args[3] nodenames = prepare_names( NODE_COUNT if len(args) < 2 else int(args[1]), NODE_PREFIX if len(args) < 3 else args[2]) ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4]) raspbian_archive = abspath(args[0]) setup = ClusterSetup() setup(raspbian_archive, nodenames, targetdir, ipaddress) if __name__ == '__main__': def prepare_names(num_nodes, prefix): result = [prefix + '-master'] for i in range(1, num_nodes): result += ['%s-node-%d' % (prefix, i)] return tuple(result) if len(sys.argv) < 2: exit(__doc__) if geteuid() != 0: exit("You must be root to use this software") basicConfig(level=DEBUG) try: main(*sys.argv[1:]) except RuntimeError as e: exit('\n'.join((str(e), __doc__)))
_prepare_node_image
identifier_name
setup-cluster-images.py
#!/usr/bin/env python3 """ Usage: setup-cluster-images image-archive [num_nodes [targetdir]] image-archive - zip file as downloaded from raspberry-pi.org num_nodes - number of nodes in the cluster [4] node_prefix - prefix for the cluster nodes [gg] targetdir - destination directory [current directory] """ import sys from _sha256 import sha256 from contextlib import contextmanager from logging import info, basicConfig, debug, DEBUG from os import chdir, getcwd, makedirs, mkdir, geteuid, chmod, chown, stat, unlink, \ listdir, rename from shutil import rmtree, copy2 from subprocess import check_output, check_call from tempfile import mkdtemp from urllib import request from zipfile import ZipFile from os.path import join, abspath, isdir, dirname, isfile # Number of raspberries in the cluster BASE_IP = '192.168.8.2' NODE_COUNT = 4 # prefix for the node names. # nodes will be named <prefix>-master, <prefix>-node1, <prefix>-node2, ... NODE_PREFIX = 'gg' USR_LOCAL_BIN=join('usr', 'local', 'bin') SETUP_NODE_SH=join(USR_LOCAL_BIN, 'setup_node.sh') CFSSL_PROGS_SHA256 = """ 0725a1cca3857392158807b543b75dc6388e2102e8a189792c4da7ac19f750b5 cfssl-bundle 48685e849565cd7d27ac2daf68faa835a5151fd3feac87c6715bcb92d58dc280 cfssl-certinfo 4106c11c61aa9e98b1967adab6db711d2b50a0f02f844329e9ad44f199bdf135 cfssl-newkey 71e41ef447f49ad236d75ec42152625c6fcf6c37122784740bd19b0a7c399560 cfssl-scan 11c708acaf48a69abf6f896f5c6158f7547a3c1bf44e14ca3b3ab440c1f808f1 cfssl e138102329d96f5a67aa168034a256a8376febf4ecde7b8e837c3f2e08b1cd19 cfssljson dac738390bc346b94c497b62a82f75cb05f0dafd5dad7d9dd63dedb9bc31092a mkbundle d53bbc0d2ac2d57c089d4f730d9e7b2d365701adc1bb417153a5f70a16bd10d6 multirootca """ # Shell script to setup the necessary software for kubernetes # FIXME - howto add a static IP # TODO - add static certificates # TODO - add kubeadm call for master PKG_SETUP = """\ #!/bin/sh setup_params="$1" setup_machine_id() { sudo rm -f /etc/machine-id /var/lib/dbus/machine-id sudo dbus-uuidgen --ensure=/etc/machine-id } setup_static_ip() { } set -e nodename=`awk -F= '/^nodename=/ { print $2 }' "$setup_params"` ipaddress=`awk -F= '/^ip=/ { print $2 }' "$setup_params"` sudo hostname "$nodename" setup_static_ip "$ipaddress" curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add - echo "deb http://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list sudo apt-get update -y sudo apt-get install -y policykit-1 docker-ce setup_machine_id sudo dphys-swapfile swapoff sudo dphys-swapfile uninstall sudo update-rc.d dphys-swapfile remove echo "Getting kubernetes packages" sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni sudo /usr/bin/raspi-config --expand-rootfs """ SETUP_SCRIPT = """ if [[ -e /boot/setup.txt ]] ; then tmp=`mktemp` mv /boot/setup.txt "$tmp" sh -x "/%s" "$tmp" >/boot/setup.log 2>&1 rm -f "$tmp" fi """ % SETUP_NODE_SH def absjoin(*params): return abspath(join(*params)) # FIXME - add comments to the methods class ClusterSetup: def __call__(self, archive, node_names, targetdir, ipbase): targetinfo = stat(targetdir) with self._mktemp(): info('Download cfssl') cfssldir = abspath('cfssl') self._download_cfssl(cfssldir) ipaddress = ipbase for name in node_names: node_image = absjoin(targetdir, '%s.img' % name) info('prepare image for node %s in %s' % (name, node_image)) info('Unpacking archive %s' % archive) self._unzip(archive, node_image) try: self._prepare_node_image(node_image, name, node_names[0], ipaddress, cfssldir) except Exception as e: unlink(node_image) raise chown(node_image, targetinfo.st_uid, targetinfo.st_gid)
def _setup_cgroups(self): debug('setup cgrops in %s' % getcwd()) with open(absjoin('boot', 'cmdline.txt'), 'a') as cmdline: cmdline.write('cgroup_enable=cpuset cgroup_memory=1') def _enable_ssh(self): debug('enable ssh in %s' % getcwd()) with open(absjoin('boot', 'ssh'), 'w') as ssh: ssh.write('') def _prepare_node_image(self, image, nodename, master, ipadddress, cfssl): with self._mount(image): self._setup_nodename(master, nodename) self._enable_ssh() self._setup_cgroups() debug('install cfssl to %s' % absjoin('system', USR_LOCAL_BIN)) self._copytree(cfssl, absjoin('system', USR_LOCAL_BIN)) self._init_first_boot(ipadddress, nodename) def _copytree(self, srcdir, dstdir): for f in listdir(srcdir): copy2(absjoin(srcdir, f), dstdir) def _setup_nodename(self, master, nodename): debug('setup nodename %s in %s' % (nodename, getcwd())) with open(absjoin('system', 'etc', 'hostname'), 'w') as hostname: print(nodename, file=hostname) with open(absjoin('system', 'etc', 'hosts'), 'w') as hosts: print('127.0.1.1 %(nodename)s' % locals(), file=hosts) if nodename != master: print('10.0.0.1 %(master)s' % locals(), file=hosts) def _init_first_boot(self, ipadddress, nodename): debug('Prepare first boot in %s' % getcwd()) with self._executable(absjoin('system', SETUP_NODE_SH)) as fname: self.create_setup_script(fname) with self._executable(absjoin('system', 'etc', 'rc.local')) as rclocal: self.setup_rclocal(rclocal) self._create_setup_txt(absjoin('boot', 'setup.txt'), ipadddress, nodename) def create_setup_script(self, setup_node_sh): with open(setup_node_sh, 'x') as setup_node: print(PKG_SETUP % locals(), file=setup_node) def setup_rclocal(self, rc_local): with open(rc_local, 'r+') as script: script.write(self._edit(script.read(), SETUP_SCRIPT)) def _create_setup_txt(self, fname, ipadddress, nodename): with open(fname, 'w') as setup: print('nodename=%s' % nodename, file=setup) print('ip=%s' % ipadddress, file=setup) def _edit(self, setup_script, setup_node_sh): lines = [l.rstrip() for l in setup_script.splitlines()] if 'exit 0' in lines: exit_line = lines.index('exit 0') lines.insert(exit_line, setup_node_sh) else: lines.append(setup_node_sh) lines.append('exit 0') return '\n'.join(lines) def _download_cfssl(self, dstdir): if not isdir(dstdir): makedirs(dstdir) for line in CFSSL_PROGS_SHA256.splitlines(): if line: checksum, fname = line.split() dstfile = absjoin(dstdir, fname) self._download('https://pkg.cfssl.org/R1.2/%s_linux-arm' % fname, dstfile, checksum) chmod(dstfile, 0o755) def _download(self, url, dstfile, checksum): request.urlretrieve(url, dstfile) m = sha256() with open(dstfile, 'rb') as f: hash = m.update(f.read()) if checksum != m.hexdigest(): raise RuntimeError('Checksum of %s does not match!' % dstfile) @staticmethod def _unzip(archive, dst_image): with ZipFile(archive) as image_archive: for name in image_archive.namelist(): if name.endswith('.img'): image = image_archive.extract(name, dirname(dst_image)) if isfile(dst_image): unlink(dst_image) rename(image, dst_image) return dst_image raise RuntimeError('No image file contained in archive %s' % archive) @contextmanager def _mktemp(self): here = getcwd() tempdir = mkdtemp() try: chdir(tempdir) yield tempdir, here finally: chdir(here) rmtree(tempdir) @contextmanager def _mount(self, image): with self._kpartx(abspath(image)) as nodes: with self._mktemp() as (here, cwd): for d in nodes.keys(): mkdir(d) boot = abspath('boot') system = abspath('system') with self._mounted(nodes['boot'], boot) as boot: with self._mounted(nodes['system'], system) as system: chdir(here) yield boot, system @contextmanager def _kpartx(self, image): output = check_output(('sudo', 'kpartx', '-a', '-v', '-s', image), universal_newlines=True) # $ sudo kpartx -a -v -s 2018-03-13-raspbian-stretch-lite.img # add map loop1p1 (252:7): 0 85611 linear /dev/loop1 8192 # add map loop1p2 (252:8): 0 3530752 linear /dev/loop1 98304 try: nodes = [] for l in output.splitlines(): if l: fields = l.split() nodes.append((fields[2], fields[5])) assert len(nodes) == 2 # sort nodes by size - the smaller node is 'boot' nodes.sort(key=lambda t: t[1], reverse=True) yield {'boot': '/dev/mapper/%s' % nodes[0][0], 'system': '/dev/mapper/%s' % nodes[1][0]} finally: check_call(('sudo', 'kpartx', '-d', '-s', image)) @contextmanager def _mounted(self, mapping, mountpoint): try: debug('mount %s on %s' % (mapping, mountpoint)) check_call(('sudo', 'mount', mapping, mountpoint)) yield mountpoint finally: check_call(('sudo', 'umount', mountpoint)) @contextmanager def _executable(self, param): yield param chmod(param, 0o755) def _increment_ip(self, ipbase): octets = [int(o) for o in ipbase.split('.')] octets[3] += 1 return '.'.join([str(o) for o in octets]) def _check_ip(param): octets = [int(o) for o in param.split('.')] for o in octets: if 0 <= o <= 255: continue raise RuntimeError('Invalid IP address: %s' % param) return param def main(*args): targetdir = getcwd() if len(args) < 4 else args[3] nodenames = prepare_names( NODE_COUNT if len(args) < 2 else int(args[1]), NODE_PREFIX if len(args) < 3 else args[2]) ipaddress = BASE_IP if len(args) < 5 else _check_ip(args[4]) raspbian_archive = abspath(args[0]) setup = ClusterSetup() setup(raspbian_archive, nodenames, targetdir, ipaddress) if __name__ == '__main__': def prepare_names(num_nodes, prefix): result = [prefix + '-master'] for i in range(1, num_nodes): result += ['%s-node-%d' % (prefix, i)] return tuple(result) if len(sys.argv) < 2: exit(__doc__) if geteuid() != 0: exit("You must be root to use this software") basicConfig(level=DEBUG) try: main(*sys.argv[1:]) except RuntimeError as e: exit('\n'.join((str(e), __doc__)))
ipaddress = self._increment_ip(ipaddress) info('done')
random_line_split
lib.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Helper crate for secure and convenient configuration of the Exonum nodes. //! //! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves //! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without //! need in exchanging private keys between administrators. //! //! # How to Run the Network //! //! 1. Generate common (template) part of the nodes configuration using `generate-template` command. //! Generated `.toml` file must be spread among all the nodes and must be used in the following //! configuration step. //! 2. Generate public and secret (private) parts of the node configuration using `generate-config` //! command. At this step, Exonum will generate master key from which consensus and service //! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key //! is used for communications between the nodes, while service secret key is used //! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a //! password. The public part of the node configuration must be spread among all nodes, while the //! secret part must be only accessible by the node administrator only. //! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of //! the node configuration with public configurations of every other node, producing a single //! configuration file with all the necessary node and network settings. //! 4. Use `run` command and provide it with final node configuration file produced at the previous //! step. If the secret keys are protected with passwords, the user need to enter the password. //! Running node will automatically connect to other nodes in the network using IP addresses from //! public parts of the node configurations. //! //! ## Additional Commands //! //! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node //! administrators and easier debugging. //! //! * `run-dev` command automatically generates network configuration with a single node and runs //! it. This command can be useful for fast testing of the services during development process. //! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and //! restart node's service migration script with `restart-migration`. //! //! ## How to Extend Parameters //! //! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI //! commands with arbitrary behavior. To do so, you need to implement a structure with a list of //! additional parameters and use `flatten` macro attribute of [`serde`][serde] and //! [`structopt`][structopt] libraries. //! //! ```ignore //! #[derive(Serialize, Deserialize, StructOpt)] //! struct MyRunCommand { //! #[serde(flatten)] //! #[structopt(flatten)] //! default: Run //! /// My awesome parameter //! secret_number: i32 //! } //! ``` //! //! You can also create own list of commands by implementing an enum with a similar principle: //! //! ```ignore //! #[derive(StructOpt)] //! enum MyCommands { //! #[structopt(name = "run") //! DefaultRun(Run), //! #[structopt(name = "my-run") //! MyAwesomeRun(MyRunCommand), //! } //! ``` //! //! While implementing custom behavior for your commands, you may use //! [`StandardResult`](./command/enum.StandardResult.html) enum for //! accessing node configuration files created and filled by the standard Exonum commands. //! //! [serde]: https://crates.io/crates/serde //! [structopt]: https://crates.io/crates/structopt #![deny(missing_docs)] pub use crate::config_manager::DefaultConfigManager; pub use structopt; use exonum::{ blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams}, merkledb::RocksDB, runtime::{RuntimeInstance, WellKnownRuntime}, }; use exonum_explorer_service::ExplorerFactory; use exonum_node::{Node, NodeBuilder as CoreNodeBuilder}; use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory}; use exonum_supervisor::{Supervisor, SupervisorConfig}; use exonum_system_api::SystemApiPlugin; use structopt::StructOpt; use tempfile::TempDir; use std::{env, ffi::OsString, iter, path::PathBuf}; use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult}; pub mod command; pub mod config; pub mod io; pub mod password; mod config_manager; /// Rust-specific node builder used for constructing a node with a list /// of provided services. #[derive(Debug)] pub struct NodeBuilder { rust_runtime: RustRuntimeBuilder, external_runtimes: Vec<RuntimeInstance>, builtin_instances: Vec<InstanceInitParams>, args: Option<Vec<OsString>>, temp_dir: Option<TempDir>, } impl Default for NodeBuilder { fn default() -> Self { Self::new() } } impl NodeBuilder { /// Creates a new builder. pub fn new() -> Self { Self { rust_runtime: RustRuntimeBuilder::new() .with_factory(Supervisor) .with_factory(ExplorerFactory), external_runtimes: vec![], builtin_instances: vec![], args: None, temp_dir: None, } } /// Creates a new builder with the provided command-line arguments. The path /// to the current executable **does not** need to be specified as the first argument. #[doc(hidden)] // unstable pub fn with_args<I>(args: I) -> Self where I: IntoIterator, I::Item: Into<OsString>, { let mut this = Self::new(); let executable = env::current_exe() .map(PathBuf::into_os_string) .unwrap_or_else(|_| "node".into()); let all_args = iter::once(executable) .chain(args.into_iter().map(Into::into)) .collect(); this.args = Some(all_args); this } /// Creates a single-node development network with default settings. The node stores /// its data in a temporary directory, which is automatically removed when the node is stopped. /// /// # Return value /// /// Returns an error if the temporary directory cannot be created. pub fn development_node() -> Result<Self, failure::Error> { let temp_dir = TempDir::new()?; let mut this = Self::with_args(vec![ OsString::from("run-dev"), OsString::from("--artifacts-dir"), temp_dir.path().into(), ]); this.temp_dir = Some(temp_dir); Ok(this) } /// Adds new Rust service to the list of available services. pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self { self.rust_runtime = self.rust_runtime.with_factory(service); self } /// Adds a new `Runtime` to the list of available runtimes. /// /// Note that you don't have to add the Rust runtime, since it is included by default. pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self { self.external_runtimes.push(runtime.into()); self } /// Adds a service instance that will be available immediately after creating a genesis block. /// /// For Rust services, the service factory needs to be separately supplied /// via [`with_rust_service`](#method.with_rust_service). pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self { self.builtin_instances.push(instance.into()); self } /// Adds a default Rust service instance that will be available immediately after creating a /// genesis block. pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self { self.with_instance(service.default_instance()) .with_rust_service(service) } /// Executes a command received from the command line. /// /// # Return value /// /// Returns: /// /// - `Ok(Some(_))` if the command lead to the node creation /// - `Ok(None)` if the command executed successfully and did not lead to node creation /// - `Err(_)` if an error occurred during command execution #[doc(hidden)] // unstable pub fn execute_command(self) -> Result<Option<Node>, failure::Error> { let command = if let Some(args) = self.args
else { Command::from_args() }; if let StandardResult::Run(run_config) = command.execute()? { let genesis_config = Self::genesis_config(&run_config, self.builtin_instances); let db_options = &run_config.node_config.private_config.database; let database = RocksDB::open(run_config.db_path, db_options)?; let node_config_path = run_config.node_config_path.to_string_lossy(); let config_manager = DefaultConfigManager::new(node_config_path.into_owned()); let rust_runtime = self.rust_runtime; let node_config = run_config.node_config.into(); let node_keys = run_config.node_keys; let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys) .with_genesis_config(genesis_config) .with_config_manager(config_manager) .with_plugin(SystemApiPlugin) .with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender())); for runtime in self.external_runtimes { node_builder = node_builder.with_runtime(runtime); } Ok(Some(node_builder.build())) } else { Ok(None) } } /// Configures the node using parameters provided by user from stdin and then runs it. pub fn run(mut self) -> Result<(), failure::Error> { // Store temporary directory until the node is done. let _temp_dir = self.temp_dir.take(); if let Some(node) = self.execute_command()? { node.run() } else { Ok(()) } } fn genesis_config( run_config: &NodeRunConfig, default_instances: Vec<InstanceInitParams>, ) -> GenesisConfig { let mut builder = GenesisConfigBuilder::with_consensus_config( run_config.node_config.public_config.consensus.clone(), ); // Add builtin services to genesis config. builder = builder .with_artifact(Supervisor.artifact_id()) .with_instance(Self::supervisor_service(&run_config)) .with_artifact(ExplorerFactory.artifact_id()) .with_instance(ExplorerFactory.default_instance()); // Add default instances. for instance in default_instances { builder = builder .with_artifact(instance.instance_spec.artifact.clone()) .with_instance(instance) } builder.build() } fn supervisor_service(run_config: &NodeRunConfig) -> InstanceInitParams { let mode = run_config .node_config .public_config .general .supervisor_mode .clone(); Supervisor::builtin_instance(SupervisorConfig { mode }) } }
{ Command::from_iter(args) }
conditional_block
lib.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Helper crate for secure and convenient configuration of the Exonum nodes. //! //! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves //! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without //! need in exchanging private keys between administrators. //! //! # How to Run the Network //! //! 1. Generate common (template) part of the nodes configuration using `generate-template` command. //! Generated `.toml` file must be spread among all the nodes and must be used in the following //! configuration step. //! 2. Generate public and secret (private) parts of the node configuration using `generate-config` //! command. At this step, Exonum will generate master key from which consensus and service //! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key //! is used for communications between the nodes, while service secret key is used //! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a //! password. The public part of the node configuration must be spread among all nodes, while the //! secret part must be only accessible by the node administrator only. //! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of //! the node configuration with public configurations of every other node, producing a single //! configuration file with all the necessary node and network settings. //! 4. Use `run` command and provide it with final node configuration file produced at the previous //! step. If the secret keys are protected with passwords, the user need to enter the password. //! Running node will automatically connect to other nodes in the network using IP addresses from //! public parts of the node configurations. //! //! ## Additional Commands //! //! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node //! administrators and easier debugging. //! //! * `run-dev` command automatically generates network configuration with a single node and runs //! it. This command can be useful for fast testing of the services during development process. //! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and //! restart node's service migration script with `restart-migration`. //! //! ## How to Extend Parameters //! //! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI //! commands with arbitrary behavior. To do so, you need to implement a structure with a list of //! additional parameters and use `flatten` macro attribute of [`serde`][serde] and //! [`structopt`][structopt] libraries. //! //! ```ignore //! #[derive(Serialize, Deserialize, StructOpt)] //! struct MyRunCommand { //! #[serde(flatten)] //! #[structopt(flatten)] //! default: Run //! /// My awesome parameter //! secret_number: i32 //! } //! ``` //! //! You can also create own list of commands by implementing an enum with a similar principle: //! //! ```ignore //! #[derive(StructOpt)] //! enum MyCommands { //! #[structopt(name = "run") //! DefaultRun(Run), //! #[structopt(name = "my-run") //! MyAwesomeRun(MyRunCommand), //! } //! ``` //! //! While implementing custom behavior for your commands, you may use //! [`StandardResult`](./command/enum.StandardResult.html) enum for //! accessing node configuration files created and filled by the standard Exonum commands. //! //! [serde]: https://crates.io/crates/serde //! [structopt]: https://crates.io/crates/structopt #![deny(missing_docs)] pub use crate::config_manager::DefaultConfigManager; pub use structopt; use exonum::{ blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams}, merkledb::RocksDB, runtime::{RuntimeInstance, WellKnownRuntime}, }; use exonum_explorer_service::ExplorerFactory; use exonum_node::{Node, NodeBuilder as CoreNodeBuilder}; use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory}; use exonum_supervisor::{Supervisor, SupervisorConfig}; use exonum_system_api::SystemApiPlugin; use structopt::StructOpt; use tempfile::TempDir; use std::{env, ffi::OsString, iter, path::PathBuf}; use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult}; pub mod command; pub mod config; pub mod io; pub mod password; mod config_manager; /// Rust-specific node builder used for constructing a node with a list /// of provided services. #[derive(Debug)] pub struct NodeBuilder { rust_runtime: RustRuntimeBuilder, external_runtimes: Vec<RuntimeInstance>, builtin_instances: Vec<InstanceInitParams>, args: Option<Vec<OsString>>, temp_dir: Option<TempDir>, } impl Default for NodeBuilder { fn default() -> Self { Self::new() } } impl NodeBuilder { /// Creates a new builder. pub fn new() -> Self { Self { rust_runtime: RustRuntimeBuilder::new() .with_factory(Supervisor) .with_factory(ExplorerFactory), external_runtimes: vec![], builtin_instances: vec![], args: None, temp_dir: None, } } /// Creates a new builder with the provided command-line arguments. The path /// to the current executable **does not** need to be specified as the first argument. #[doc(hidden)] // unstable pub fn with_args<I>(args: I) -> Self where I: IntoIterator, I::Item: Into<OsString>, { let mut this = Self::new(); let executable = env::current_exe() .map(PathBuf::into_os_string) .unwrap_or_else(|_| "node".into()); let all_args = iter::once(executable) .chain(args.into_iter().map(Into::into)) .collect(); this.args = Some(all_args); this } /// Creates a single-node development network with default settings. The node stores /// its data in a temporary directory, which is automatically removed when the node is stopped. /// /// # Return value /// /// Returns an error if the temporary directory cannot be created. pub fn development_node() -> Result<Self, failure::Error>
/// Adds new Rust service to the list of available services. pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self { self.rust_runtime = self.rust_runtime.with_factory(service); self } /// Adds a new `Runtime` to the list of available runtimes. /// /// Note that you don't have to add the Rust runtime, since it is included by default. pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self { self.external_runtimes.push(runtime.into()); self } /// Adds a service instance that will be available immediately after creating a genesis block. /// /// For Rust services, the service factory needs to be separately supplied /// via [`with_rust_service`](#method.with_rust_service). pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self { self.builtin_instances.push(instance.into()); self } /// Adds a default Rust service instance that will be available immediately after creating a /// genesis block. pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self { self.with_instance(service.default_instance()) .with_rust_service(service) } /// Executes a command received from the command line. /// /// # Return value /// /// Returns: /// /// - `Ok(Some(_))` if the command lead to the node creation /// - `Ok(None)` if the command executed successfully and did not lead to node creation /// - `Err(_)` if an error occurred during command execution #[doc(hidden)] // unstable pub fn execute_command(self) -> Result<Option<Node>, failure::Error> { let command = if let Some(args) = self.args { Command::from_iter(args) } else { Command::from_args() }; if let StandardResult::Run(run_config) = command.execute()? { let genesis_config = Self::genesis_config(&run_config, self.builtin_instances); let db_options = &run_config.node_config.private_config.database; let database = RocksDB::open(run_config.db_path, db_options)?; let node_config_path = run_config.node_config_path.to_string_lossy(); let config_manager = DefaultConfigManager::new(node_config_path.into_owned()); let rust_runtime = self.rust_runtime; let node_config = run_config.node_config.into(); let node_keys = run_config.node_keys; let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys) .with_genesis_config(genesis_config) .with_config_manager(config_manager) .with_plugin(SystemApiPlugin) .with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender())); for runtime in self.external_runtimes { node_builder = node_builder.with_runtime(runtime); } Ok(Some(node_builder.build())) } else { Ok(None) } } /// Configures the node using parameters provided by user from stdin and then runs it. pub fn run(mut self) -> Result<(), failure::Error> { // Store temporary directory until the node is done. let _temp_dir = self.temp_dir.take(); if let Some(node) = self.execute_command()? { node.run() } else { Ok(()) } } fn genesis_config( run_config: &NodeRunConfig, default_instances: Vec<InstanceInitParams>, ) -> GenesisConfig { let mut builder = GenesisConfigBuilder::with_consensus_config( run_config.node_config.public_config.consensus.clone(), ); // Add builtin services to genesis config. builder = builder .with_artifact(Supervisor.artifact_id()) .with_instance(Self::supervisor_service(&run_config)) .with_artifact(ExplorerFactory.artifact_id()) .with_instance(ExplorerFactory.default_instance()); // Add default instances. for instance in default_instances { builder = builder .with_artifact(instance.instance_spec.artifact.clone()) .with_instance(instance) } builder.build() } fn supervisor_service(run_config: &NodeRunConfig) -> InstanceInitParams { let mode = run_config .node_config .public_config .general .supervisor_mode .clone(); Supervisor::builtin_instance(SupervisorConfig { mode }) } }
{ let temp_dir = TempDir::new()?; let mut this = Self::with_args(vec![ OsString::from("run-dev"), OsString::from("--artifacts-dir"), temp_dir.path().into(), ]); this.temp_dir = Some(temp_dir); Ok(this) }
identifier_body
lib.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Helper crate for secure and convenient configuration of the Exonum nodes. //! //! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves //! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without //! need in exchanging private keys between administrators. //! //! # How to Run the Network //! //! 1. Generate common (template) part of the nodes configuration using `generate-template` command. //! Generated `.toml` file must be spread among all the nodes and must be used in the following //! configuration step. //! 2. Generate public and secret (private) parts of the node configuration using `generate-config` //! command. At this step, Exonum will generate master key from which consensus and service //! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key //! is used for communications between the nodes, while service secret key is used //! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a //! password. The public part of the node configuration must be spread among all nodes, while the //! secret part must be only accessible by the node administrator only. //! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of //! the node configuration with public configurations of every other node, producing a single //! configuration file with all the necessary node and network settings. //! 4. Use `run` command and provide it with final node configuration file produced at the previous //! step. If the secret keys are protected with passwords, the user need to enter the password. //! Running node will automatically connect to other nodes in the network using IP addresses from //! public parts of the node configurations. //! //! ## Additional Commands //! //! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node //! administrators and easier debugging. //! //! * `run-dev` command automatically generates network configuration with a single node and runs //! it. This command can be useful for fast testing of the services during development process. //! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and //! restart node's service migration script with `restart-migration`. //! //! ## How to Extend Parameters //! //! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI //! commands with arbitrary behavior. To do so, you need to implement a structure with a list of //! additional parameters and use `flatten` macro attribute of [`serde`][serde] and //! [`structopt`][structopt] libraries. //! //! ```ignore //! #[derive(Serialize, Deserialize, StructOpt)] //! struct MyRunCommand { //! #[serde(flatten)] //! #[structopt(flatten)] //! default: Run //! /// My awesome parameter //! secret_number: i32 //! } //! ``` //! //! You can also create own list of commands by implementing an enum with a similar principle: //! //! ```ignore //! #[derive(StructOpt)] //! enum MyCommands { //! #[structopt(name = "run") //! DefaultRun(Run), //! #[structopt(name = "my-run") //! MyAwesomeRun(MyRunCommand), //! } //! ``` //! //! While implementing custom behavior for your commands, you may use //! [`StandardResult`](./command/enum.StandardResult.html) enum for //! accessing node configuration files created and filled by the standard Exonum commands. //! //! [serde]: https://crates.io/crates/serde //! [structopt]: https://crates.io/crates/structopt #![deny(missing_docs)] pub use crate::config_manager::DefaultConfigManager; pub use structopt; use exonum::{ blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams}, merkledb::RocksDB, runtime::{RuntimeInstance, WellKnownRuntime}, }; use exonum_explorer_service::ExplorerFactory; use exonum_node::{Node, NodeBuilder as CoreNodeBuilder}; use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory}; use exonum_supervisor::{Supervisor, SupervisorConfig}; use exonum_system_api::SystemApiPlugin; use structopt::StructOpt;
use std::{env, ffi::OsString, iter, path::PathBuf}; use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult}; pub mod command; pub mod config; pub mod io; pub mod password; mod config_manager; /// Rust-specific node builder used for constructing a node with a list /// of provided services. #[derive(Debug)] pub struct NodeBuilder { rust_runtime: RustRuntimeBuilder, external_runtimes: Vec<RuntimeInstance>, builtin_instances: Vec<InstanceInitParams>, args: Option<Vec<OsString>>, temp_dir: Option<TempDir>, } impl Default for NodeBuilder { fn default() -> Self { Self::new() } } impl NodeBuilder { /// Creates a new builder. pub fn new() -> Self { Self { rust_runtime: RustRuntimeBuilder::new() .with_factory(Supervisor) .with_factory(ExplorerFactory), external_runtimes: vec![], builtin_instances: vec![], args: None, temp_dir: None, } } /// Creates a new builder with the provided command-line arguments. The path /// to the current executable **does not** need to be specified as the first argument. #[doc(hidden)] // unstable pub fn with_args<I>(args: I) -> Self where I: IntoIterator, I::Item: Into<OsString>, { let mut this = Self::new(); let executable = env::current_exe() .map(PathBuf::into_os_string) .unwrap_or_else(|_| "node".into()); let all_args = iter::once(executable) .chain(args.into_iter().map(Into::into)) .collect(); this.args = Some(all_args); this } /// Creates a single-node development network with default settings. The node stores /// its data in a temporary directory, which is automatically removed when the node is stopped. /// /// # Return value /// /// Returns an error if the temporary directory cannot be created. pub fn development_node() -> Result<Self, failure::Error> { let temp_dir = TempDir::new()?; let mut this = Self::with_args(vec![ OsString::from("run-dev"), OsString::from("--artifacts-dir"), temp_dir.path().into(), ]); this.temp_dir = Some(temp_dir); Ok(this) } /// Adds new Rust service to the list of available services. pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self { self.rust_runtime = self.rust_runtime.with_factory(service); self } /// Adds a new `Runtime` to the list of available runtimes. /// /// Note that you don't have to add the Rust runtime, since it is included by default. pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self { self.external_runtimes.push(runtime.into()); self } /// Adds a service instance that will be available immediately after creating a genesis block. /// /// For Rust services, the service factory needs to be separately supplied /// via [`with_rust_service`](#method.with_rust_service). pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self { self.builtin_instances.push(instance.into()); self } /// Adds a default Rust service instance that will be available immediately after creating a /// genesis block. pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self { self.with_instance(service.default_instance()) .with_rust_service(service) } /// Executes a command received from the command line. /// /// # Return value /// /// Returns: /// /// - `Ok(Some(_))` if the command lead to the node creation /// - `Ok(None)` if the command executed successfully and did not lead to node creation /// - `Err(_)` if an error occurred during command execution #[doc(hidden)] // unstable pub fn execute_command(self) -> Result<Option<Node>, failure::Error> { let command = if let Some(args) = self.args { Command::from_iter(args) } else { Command::from_args() }; if let StandardResult::Run(run_config) = command.execute()? { let genesis_config = Self::genesis_config(&run_config, self.builtin_instances); let db_options = &run_config.node_config.private_config.database; let database = RocksDB::open(run_config.db_path, db_options)?; let node_config_path = run_config.node_config_path.to_string_lossy(); let config_manager = DefaultConfigManager::new(node_config_path.into_owned()); let rust_runtime = self.rust_runtime; let node_config = run_config.node_config.into(); let node_keys = run_config.node_keys; let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys) .with_genesis_config(genesis_config) .with_config_manager(config_manager) .with_plugin(SystemApiPlugin) .with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender())); for runtime in self.external_runtimes { node_builder = node_builder.with_runtime(runtime); } Ok(Some(node_builder.build())) } else { Ok(None) } } /// Configures the node using parameters provided by user from stdin and then runs it. pub fn run(mut self) -> Result<(), failure::Error> { // Store temporary directory until the node is done. let _temp_dir = self.temp_dir.take(); if let Some(node) = self.execute_command()? { node.run() } else { Ok(()) } } fn genesis_config( run_config: &NodeRunConfig, default_instances: Vec<InstanceInitParams>, ) -> GenesisConfig { let mut builder = GenesisConfigBuilder::with_consensus_config( run_config.node_config.public_config.consensus.clone(), ); // Add builtin services to genesis config. builder = builder .with_artifact(Supervisor.artifact_id()) .with_instance(Self::supervisor_service(&run_config)) .with_artifact(ExplorerFactory.artifact_id()) .with_instance(ExplorerFactory.default_instance()); // Add default instances. for instance in default_instances { builder = builder .with_artifact(instance.instance_spec.artifact.clone()) .with_instance(instance) } builder.build() } fn supervisor_service(run_config: &NodeRunConfig) -> InstanceInitParams { let mode = run_config .node_config .public_config .general .supervisor_mode .clone(); Supervisor::builtin_instance(SupervisorConfig { mode }) } }
use tempfile::TempDir;
random_line_split
lib.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Helper crate for secure and convenient configuration of the Exonum nodes. //! //! `exonum-cli` supports multi-stage configuration process made with safety in mind. It involves //! 4 steps (or stages) and allows to configure and run multiple blockchain nodes without //! need in exchanging private keys between administrators. //! //! # How to Run the Network //! //! 1. Generate common (template) part of the nodes configuration using `generate-template` command. //! Generated `.toml` file must be spread among all the nodes and must be used in the following //! configuration step. //! 2. Generate public and secret (private) parts of the node configuration using `generate-config` //! command. At this step, Exonum will generate master key from which consensus and service //! validator keys are derived. Master key is stored in the encrypted file. Consensus secret key //! is used for communications between the nodes, while service secret key is used //! mainly to sign transactions generated by the node. Both secret keys may be encrypted with a //! password. The public part of the node configuration must be spread among all nodes, while the //! secret part must be only accessible by the node administrator only. //! 3. Generate final node configuration using `finalize` command. Exonum combines secret part of //! the node configuration with public configurations of every other node, producing a single //! configuration file with all the necessary node and network settings. //! 4. Use `run` command and provide it with final node configuration file produced at the previous //! step. If the secret keys are protected with passwords, the user need to enter the password. //! Running node will automatically connect to other nodes in the network using IP addresses from //! public parts of the node configurations. //! //! ## Additional Commands //! //! `exonum-cli` also supports additional CLI commands for performing maintenance actions by node //! administrators and easier debugging. //! //! * `run-dev` command automatically generates network configuration with a single node and runs //! it. This command can be useful for fast testing of the services during development process. //! * `maintenance` command allows to clear node's consensus messages with `clear-cache`, and //! restart node's service migration script with `restart-migration`. //! //! ## How to Extend Parameters //! //! `exonum-cli` allows to extend the list of the parameters for any command and even add new CLI //! commands with arbitrary behavior. To do so, you need to implement a structure with a list of //! additional parameters and use `flatten` macro attribute of [`serde`][serde] and //! [`structopt`][structopt] libraries. //! //! ```ignore //! #[derive(Serialize, Deserialize, StructOpt)] //! struct MyRunCommand { //! #[serde(flatten)] //! #[structopt(flatten)] //! default: Run //! /// My awesome parameter //! secret_number: i32 //! } //! ``` //! //! You can also create own list of commands by implementing an enum with a similar principle: //! //! ```ignore //! #[derive(StructOpt)] //! enum MyCommands { //! #[structopt(name = "run") //! DefaultRun(Run), //! #[structopt(name = "my-run") //! MyAwesomeRun(MyRunCommand), //! } //! ``` //! //! While implementing custom behavior for your commands, you may use //! [`StandardResult`](./command/enum.StandardResult.html) enum for //! accessing node configuration files created and filled by the standard Exonum commands. //! //! [serde]: https://crates.io/crates/serde //! [structopt]: https://crates.io/crates/structopt #![deny(missing_docs)] pub use crate::config_manager::DefaultConfigManager; pub use structopt; use exonum::{ blockchain::config::{GenesisConfig, GenesisConfigBuilder, InstanceInitParams}, merkledb::RocksDB, runtime::{RuntimeInstance, WellKnownRuntime}, }; use exonum_explorer_service::ExplorerFactory; use exonum_node::{Node, NodeBuilder as CoreNodeBuilder}; use exonum_rust_runtime::{DefaultInstance, RustRuntimeBuilder, ServiceFactory}; use exonum_supervisor::{Supervisor, SupervisorConfig}; use exonum_system_api::SystemApiPlugin; use structopt::StructOpt; use tempfile::TempDir; use std::{env, ffi::OsString, iter, path::PathBuf}; use crate::command::{run::NodeRunConfig, Command, ExonumCommand, StandardResult}; pub mod command; pub mod config; pub mod io; pub mod password; mod config_manager; /// Rust-specific node builder used for constructing a node with a list /// of provided services. #[derive(Debug)] pub struct NodeBuilder { rust_runtime: RustRuntimeBuilder, external_runtimes: Vec<RuntimeInstance>, builtin_instances: Vec<InstanceInitParams>, args: Option<Vec<OsString>>, temp_dir: Option<TempDir>, } impl Default for NodeBuilder { fn default() -> Self { Self::new() } } impl NodeBuilder { /// Creates a new builder. pub fn new() -> Self { Self { rust_runtime: RustRuntimeBuilder::new() .with_factory(Supervisor) .with_factory(ExplorerFactory), external_runtimes: vec![], builtin_instances: vec![], args: None, temp_dir: None, } } /// Creates a new builder with the provided command-line arguments. The path /// to the current executable **does not** need to be specified as the first argument. #[doc(hidden)] // unstable pub fn with_args<I>(args: I) -> Self where I: IntoIterator, I::Item: Into<OsString>, { let mut this = Self::new(); let executable = env::current_exe() .map(PathBuf::into_os_string) .unwrap_or_else(|_| "node".into()); let all_args = iter::once(executable) .chain(args.into_iter().map(Into::into)) .collect(); this.args = Some(all_args); this } /// Creates a single-node development network with default settings. The node stores /// its data in a temporary directory, which is automatically removed when the node is stopped. /// /// # Return value /// /// Returns an error if the temporary directory cannot be created. pub fn development_node() -> Result<Self, failure::Error> { let temp_dir = TempDir::new()?; let mut this = Self::with_args(vec![ OsString::from("run-dev"), OsString::from("--artifacts-dir"), temp_dir.path().into(), ]); this.temp_dir = Some(temp_dir); Ok(this) } /// Adds new Rust service to the list of available services. pub fn with_rust_service(mut self, service: impl ServiceFactory) -> Self { self.rust_runtime = self.rust_runtime.with_factory(service); self } /// Adds a new `Runtime` to the list of available runtimes. /// /// Note that you don't have to add the Rust runtime, since it is included by default. pub fn with_external_runtime(mut self, runtime: impl WellKnownRuntime) -> Self { self.external_runtimes.push(runtime.into()); self } /// Adds a service instance that will be available immediately after creating a genesis block. /// /// For Rust services, the service factory needs to be separately supplied /// via [`with_rust_service`](#method.with_rust_service). pub fn with_instance(mut self, instance: impl Into<InstanceInitParams>) -> Self { self.builtin_instances.push(instance.into()); self } /// Adds a default Rust service instance that will be available immediately after creating a /// genesis block. pub fn with_default_rust_service(self, service: impl DefaultInstance) -> Self { self.with_instance(service.default_instance()) .with_rust_service(service) } /// Executes a command received from the command line. /// /// # Return value /// /// Returns: /// /// - `Ok(Some(_))` if the command lead to the node creation /// - `Ok(None)` if the command executed successfully and did not lead to node creation /// - `Err(_)` if an error occurred during command execution #[doc(hidden)] // unstable pub fn execute_command(self) -> Result<Option<Node>, failure::Error> { let command = if let Some(args) = self.args { Command::from_iter(args) } else { Command::from_args() }; if let StandardResult::Run(run_config) = command.execute()? { let genesis_config = Self::genesis_config(&run_config, self.builtin_instances); let db_options = &run_config.node_config.private_config.database; let database = RocksDB::open(run_config.db_path, db_options)?; let node_config_path = run_config.node_config_path.to_string_lossy(); let config_manager = DefaultConfigManager::new(node_config_path.into_owned()); let rust_runtime = self.rust_runtime; let node_config = run_config.node_config.into(); let node_keys = run_config.node_keys; let mut node_builder = CoreNodeBuilder::new(database, node_config, node_keys) .with_genesis_config(genesis_config) .with_config_manager(config_manager) .with_plugin(SystemApiPlugin) .with_runtime_fn(|channel| rust_runtime.build(channel.endpoints_sender())); for runtime in self.external_runtimes { node_builder = node_builder.with_runtime(runtime); } Ok(Some(node_builder.build())) } else { Ok(None) } } /// Configures the node using parameters provided by user from stdin and then runs it. pub fn run(mut self) -> Result<(), failure::Error> { // Store temporary directory until the node is done. let _temp_dir = self.temp_dir.take(); if let Some(node) = self.execute_command()? { node.run() } else { Ok(()) } } fn genesis_config( run_config: &NodeRunConfig, default_instances: Vec<InstanceInitParams>, ) -> GenesisConfig { let mut builder = GenesisConfigBuilder::with_consensus_config( run_config.node_config.public_config.consensus.clone(), ); // Add builtin services to genesis config. builder = builder .with_artifact(Supervisor.artifact_id()) .with_instance(Self::supervisor_service(&run_config)) .with_artifact(ExplorerFactory.artifact_id()) .with_instance(ExplorerFactory.default_instance()); // Add default instances. for instance in default_instances { builder = builder .with_artifact(instance.instance_spec.artifact.clone()) .with_instance(instance) } builder.build() } fn
(run_config: &NodeRunConfig) -> InstanceInitParams { let mode = run_config .node_config .public_config .general .supervisor_mode .clone(); Supervisor::builtin_instance(SupervisorConfig { mode }) } }
supervisor_service
identifier_name
main.py
"""This program displays a customizable list of items by priority value, with priority 1 being the highest. Allows the user to add, edit, mark complete, show completed (hidden), and remove items. Stores the list of items in a .txt file located where this program's main.py file is. All changes are automatically saved to the .txt file. Also includes a fun technical knowledge demonstration using numbers and text responses. The program will create a new save file if none exists, and prompts for save file overwrite if data cannot be read successfully. Menu navigation is accomplished through numeric inputs due to the text-only interface and tedium of typing out each word accurately and repeatedly.""" __author__ = 'Jordan Kooyman' # 1/26/21 - 4/15/2021 To-Do List Program - Integration Project for COP 1500 # Spring 2021 # Configurable settings saved to a separate file (?) # Ability to load a different data or config file (?) # Color code items by group (?) # Add a basic calculator to meet math (and string?) command requirements (?) # TODO: Implement a group system that shows all groups combined, just one # group, or all categorized by group, and group names - be able to change # group names (new function) - all functions support groups (individual or # combined) import random # Random number generation used as random verification number when # overwriting the save file in the event of a failure to load from the save # file class ListItem: # Create a class object that will store the data for each # entry in the list (custom variable) """A custom object that stores four pieces of data representing each entry in the todo list. Contains the text of the todo list entry, the priority of the entry, the group code (NYI), and the visibility of the entry""" def __init__(self, text, priority, group, visible): # From w3schools.com self.text = text self.priority = priority self.group = group self.visible = visible def concept_demonstration(): """The purpose of this function is to prompt the user for numbers and strings and manipulate them to demonstrate programming fluency with string and integer operations. :returns nothing""" number = clean_input("Please enter a positive number") number2 = clean_input("Please enter a number") while number2 == 0: # Rejects a 0 if it is input as the second number print("Error: Cannot Divide by 0") number2 = clean_input("Please enter a different number") color = input("Please enter a color\n") thing = input("Please enter a thing\n") thing2 = thing + ' ' # Adding space so that when thing is repeated, it # has a space in between # Raise the first number to the second number location = input("Please enter a location\n") print(str(number) + " raised to the power of " + str(number2) + " is " + str(number ** number2)) # Multiply the two numbers print("{0} multiplied by {1} is {2}".format(str(number), str(number2), str(number * number2))) # Divide the first number by the second number print("{0} divided by {1} is {2}".format(str(number), str(number2), str(number / number2))) # Find the modulus of the two numbers print("The remainder from dividing {0} by {1} is {2}".format(str(number), str(number2), str(number % number2)) ) # Divide the first number by the second and round it down (floor it) print("{0} divided by {1} rounded down is {2}".format(str(number), str(number2), str(number // number2 ))) # Add the two numbers print("{0} plus {1} is {2}".format(str(number), str(number2), str(number + number2))) # Subtract the second number from the first number print("{0} minus {1} is {2}".format(str(number), str(number2), str(number - number2))) if number > 1: # if the first number entered is greater than 1 print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing, location, thing2 * int(number - 1) + thing)) # Combine two strings with + (no added space), repeat a string x # number of times with * (must use an integer) (I have the minus 1 # and + thing to get the spacing to look proper and still repeat # number amount of times) -if a negative number is used when # multiplying a string, it does nothing (but does not crash) - but # it is still handled in the other statement with some added user # shaming elif number < 0: # if the first number entered is negative print("The {0} at {1} yelled '{2}'\nYou entered a negative number " "when a positive number was requested, so you made the {3} " "mute. Good Job.".format(color + ' ' + thing, location, thing2 * int(number), thing)) # Same as above, expect that it will print nothing in the yelled # section if the first number entered is negative else: # if the first number entered is 0 or 1 (because of the int() # function removing a decimal) print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing, location, thing * int(number))) # this is to prevent errant spaces or showing the phrase too many times return def cascade_list(priority_to_cascade_from, todo_list): """The purpose of this function is to decrement the priority number of every item in the provided todo list greater than the priority number provided. :param priority_to_cascade_from: the number that is inserted by moving everything equal to or greater than up by one :param todo_list: the list of ListItem objects to check in""" for item in todo_list: if item.priority >= priority_to_cascade_from: item.priority += 1 return def check_priority_overlap(priority_to_check, todo_list): """The purpose of this function is to check if the user's priority number input overlaps with a priority number already in the list, and if it does, prompts the user whether they want to keep it, change it, or move everything in the list that has a larger priority value up by one. :param priority_to_check: the number to check for overlap with :param todo_list: the list of ListItem objects to check in :returns the priority value, either changed or the original input""" overlap = False for item in todo_list: if item.priority == priority_to_check: overlap = True if overlap: answer = 0 while answer > 3 or answer < 1: answer = clean_input("The priority number you entered overlaps " "with another entry's priority. Enter:\n1 to " "change priority number\n2 to leave as is " "with overlap\n3 to push all priority numbers" " below this entry down by 1") if answer > 3 or answer < 1: print("Invalid Option Selected\nPlease Try Again") if answer == 1: priority_to_check = check_priority_overlap( int(clean_input("New Priority:")), todo_list) # change the priority value input elif answer == 3: cascade_list(priority_to_check, todo_list) return priority_to_check def sorting(list_object): # Takes in a ListItem object and returns the # priority value - from w3schools.com """The purpose of this function is to take in a ListItem custom object and return the priority value stored in it to be used in sorting. :param list_object: one ListItem object :returns the priority value stored in the ListItem object""" return list_object.priority def print_list(save_file_location, my_list, to_save=False, show_hidden=False): # Prints out the To-Do list from the common list variable and saves list # to the .txt file """The purpose of this function is to take in the location of the save file, the todo list variable, whether or not to save, and whether or not to show hidden and print out the todo list variable, skipping items marked as hidden unless it is told to show hidden, and saving the todo list to the file in the save file location if it is told to save. :param save_file_location: the file path to get to the .txt save file :param my_list: the list of ListItem objects to check in :param to_save: whether or not to save the list of items to the file, default is false :param show_hidden: whether or not to display the hidden list items, default it false :returns nothing""" my_list.sort(key=sorting) # Uses a custom function to be able to get the # right value to sort by print("To-Do:") for item_index in my_list: # The range needs to be the length of the list # being printed if item_index.visible and not show_hidden: # Only print visible items # if show hidden is false print(item_index.priority, item_index.text, sep='.\t') elif show_hidden: # Print everything is show hidden is trues if item_index.visible: print(item_index.priority, item_index.text, sep='.\t') else: print("{0}.~\t{1}".format(item_index.priority, item_index.text) ) # Indicate hidden items # Printing the item priority with a dot, then the item, with a tab # separating them if to_save: save_list(my_list, save_file_location) return def divider(size=100): # Draws a dividing line to go between sections # (default 100 characters long) """The purpose of this function is to print a dashed line across the screen with a specified length. :param size: how many characters long the line should be, default is 100 :returns nothing""" for i in range(size): print('-', end='') # Prints out a single dash, no newline afterwards # (the end= sets the last character to blank print('') # Print out a newline (using the default ending of a print # statement being a newline return def clean_input(prompt='Error'): # A special input function that will reject a # user's input of text when a number is requested -- if no prompt is # specified in the program, it will display "Error" """The purpose of this function is to prompt the user for a numerical input and only accept a numerical input, rejects no input and text input. :param prompt: the prompt the user sees, default is Error :returns the user input as a float""" text = True phrase = '0' while text: phrase = input(prompt + '\n') try: # Adapted from an example in the ThinkPython textbook (15.7) - # Checks whether the input is a number, positive or negative. If # not, rejects the input and user gets to try again float(phrase) text = False except ValueError: print("Error: Non-Numeric Entry Detected") # if phrase.isnumeric(): # Checks for a positive number (negative # rejected as well as text) - replaced with superior form from textbook # example # return float(phrase) # Return the number the user entered # else: # print("Error: Non-Numeric Entry Detected") return float(phrase) # Return the number the user entered def load_from_file(save_location): # This is a function for readability - # opens txt file in read mode and loads it """The purpose of this function is to open the .txt save file and read the contents into memory in the form of a list of custom ListItem objects. :param save_location: the location the save file is stored in :returns a list of ListItem objects that is populated with the data from the save file""" # into an array (list) of ListItem variables data_file_r = open(save_location, "r") # Open txt file in read mode list_item = ["Text", -1, 2, True] # Item, Item Priority, group, is visible todo = [] # make a list of lists temp = 1 # Temporary counter variable to reconstruct lists from .txt file line_counter = 1 try: for item in data_file_r: # loop through each line in the file, one at # a time - from w3schools.com if (line_counter - 1) % 5 != 0 and line_counter > 0: cleaned_item = "" for character_index in range(len( item)): # Loop through each character in the extracted # string if character_index != len( item) - 1: # if it is not the last character, add # it to the cleaned string cleaned_item += item[character_index] # Add every character to a # but \n if temp == 1: # Item Text list_item[0] = cleaned_item temp = 2 elif temp == 2: # Item Priority list_item[1] = int(cleaned_item) temp = 3 elif temp == 3: # Item Group list_item[2] = int(cleaned_item) temp = 4 elif temp == 4: # Is Visible if cleaned_item == "False": list_item[3] = False else: # Assume the item is visible if the text is not # False list_item[3] = True todo.insert(0, ListItem(list_item[0], list_item[1], list_item[2], list_item[3])) temp = 1 else: # If some error occurred and a condition outside of the # possible four is met, restart temp = 1 line_counter += 1 except ValueError: print("An error has occurred trying to load the file") result = int(clean_input( "Please enter a 2 to overwrite the current save file and start " "over or any other number to exit the program")) if result == 2: key = random.randint(2, 9) # Generate a random integer between 2 # and 9 to be used as a second dynamic check if key == 2: key = 1 # If the random number is 2, set it to one so that # the same number (2) cannot be used as the verification number result2 = int(clean_input("Are you sure you want to delete all " "of your saved data\nEnter {0} to " "proceed, or anything else to " "cancel".format(str(key)))) if result2 == key: data_file_w = open("C:Item_List.txt", "w") data_file_w.close() todo = [] print("Save Data Erased") return todo # Return an empty list if file load failed else: print("Program Exiting") quit(1) else: print("Program Exiting") quit(1) # Exit the program with the exit code of 1 data_file_r.close() # All the list functions above referenced from w3schools.com What is # happening above: Opening the file, initializing a list to hold all # four pieces of data, then after pulling the data from the file and # storing in the list, it is copied (not referenced) into my main list # of ListItem objects return todo def save_list(todo_list, save_location): """The purpose of this function is to save a list of ListItem objects to a specified location in a .txt file with the first line of the document being an explanation of the file format being used. :param todo_list: the list of ListItem objects to save to the save file :param save_location: the location to create or overwrite the save file :returns nothing""" data_file_w = open(save_location, "w") # open the save file and clear the data from it data_file_w.write("Warning: The Todo-List Program will not be able to " "load this save file if it is incorrectly modified. " "Modify at your own risk. The structure is Entry " "Text, Entry Priority as a number, Entry Group as a " "number (Not Yet Utilized, but necessary), and Entry " "Visibility as a boolean, each on a separate line, a " "single line gap in between, and the " "very first line is skipped\n") for item in todo_list: data_file_w.write("{0}\n{1}\n{2}\n{3}\n\n".format(item.text, str(item.priority), str(item.group), str(item.visible))) data_file_w.close() return def add_item(todo_list): """The purpose of this function is to prompt the user for the two fields of necessary information to make a new entry in the todo list, the item name and priority, checking if the priority overlaps with an existing entry in the todo list. :param todo_list: the list of ListItem objects to add a new ListItem object to :returns nothing""" text = input("Please enter the name of the new item\n") priority = check_priority_overlap( int(clean_input("Please enter the priority of this item")), todo_list) # group = int(clean_input("Please enter the group number of this item")) group = 0 # Set the group value to zero, group system NYI visible = True todo_list.insert(0, ListItem(text, priority, group, visible)) # Join # the inputs to be added to the overall list return def
(todo_list, prompt='Error'): # Ask the user # which item from the list is to be modified """The purpose of this function is to display a list of all items in the todo list and number each individually to allow the user to select an item to modify or delete. The available numbers may skip some if some items are hidden :param todo_list: the list of ListItem objects to display :param prompt: the prompt to display to the user, default is Error :returns the user selected item's index in a computer friendly form ( starting at 0 instead of 1)""" valid = False index = 0 while not valid: counter = 1 # counter for index printing for item in todo_list: # The range needs to be the length of the list # being printed if item.visible: print(counter, item.text, sep='\t') else: print(counter, "~ {0} ~".format(item.text), sep='\t') counter += 1 # Printing the item number, then the item, with a tab separating # them index = int(clean_input(prompt)) if index < counter: valid = True else: print("Invalid Input: Number is too big") return index - 1 def remove_item(todo_list): """The purpose of this function is to delete a ListItem object from a list of ListItem objects by prompting the user for the index and verifying they want to delete the item. :param todo_list: the list of ListItem objects from which to remove one object :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "remove\nEnter a negative number or zero " "to cancel") if item >= 0: # 0, not 1 because the index returned is shifted to be # computer friendly todo_list.pop(item) return def mark_complete(todo_list): """The purpose of this function is to mark a selectedListItem object as hidden and not to be printed unless specified, apart from selecting items. :param todo_list: the list of ListItem objects to modify :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "Mark Completed and hide from the " "list\nEnter a negative number or zero to " "cancel") if item >= 0: todo_list[item].visible = False return def edit_item(todo_list): """The purpose of this function is to edit a ListItem object in the list of ListItem objects, changing either the name or priority :param todo_list: the list of ListItem objects that gets one object modified :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "edit\nEnter a negative number or zero to " "cancel") if item >= 0: while True: value = clean_input("Which value would you like to edit? Enter:\n1" " for the Item Text (Currently: {0})\n2 for " "the Item Priority (Currently: {1})\n3 to " "Cancel and Exit".format(todo_list[item].text, str(todo_list[item]. priority))) if value == 1: # Item Text Change print("The Current Text is: {0}".format(todo_list[item].text)) todo_list[item].text = input("New Text:\n") elif value == 2: # Item Priority Change print("The Current Priority is: {0}".format(str(todo_list[item] .priority))) todo_list[item].priority = check_priority_overlap( int(clean_input("New Priority:")), todo_list) # elif value == 3: # Item Group Change # print(f"The Current Group is: {todo_list[item].group}") # todo_list[item].group = int(clean_input("New Group Number:")) elif value == 3: # Exit Changing Menu break else: print("Invalid Input - Please Try Again") return def check_list_status(todo_list): # Checks if the list is completely hidden # (2), completely empty (1), or neither (0) """The purpose of this function is to check whether there are visible items in the list, the entire list is hidden, or the list contains no more ListItem objects :param todo_list: the list of ListItem objects to check :returns which condition using integer codes""" if len(todo_list) == 0: state = 1 # Empty List else: state = 2 # Entirely Hidden List for item_index in range(len(todo_list)): if todo_list[item_index].visible: # If an item is visible, then # they are not all hidden state = 0 # Neither return state def menu_loop(todo_list, save_file_location): """The purpose of this function is to repeatedly display the todo list and user prompts menu until the program is closed :param todo_list: the list of ListItem objects to display or modify :param save_file_location: where the .txt save file is located for saving :returns nothing""" show_hidden = False selection = 0 invalid_input = False while selection != 6: if invalid_input: invalid_input = False else: print_list(save_file_location, todo_list, True, show_hidden) divider(137 + 17) # Length of prompt statement below list_status = check_list_status(todo_list) if list_status == 0: # No Issues selection = int(clean_input("Please enter: 1 for Add Item, 2 for " "Remove Item, 3 for Edit Item, " "4 for Mark Item Complete, " "5 for Toggle Hidden, and 6 for " "Exit, 7 for Concept " "Demonstration\n")) elif list_status == 1: # Empty List - No Remove, Edit, Mark, or Toggle selection = int(clean_input("Please enter: 1 for Add Item, and 6 " "for Exit, 7 for Concept " "Demonstration\n")) else: # Entirely Hidden List selection = int(clean_input("Please enter: 1 for Add Item, 5 for " "Toggle Hidden, and 6 for Exit, " "7 for Concept Demonstration\n")) # Uses the clean_input function above to get a number from the # user, converting it to an int so a decimal won't return an # invalid input in the following steps print("") # Blank Print statement to add an extra blank line after # user input before displaying response if selection == 1: # Add Item - modify the list variable, then save # to file add_item(todo_list) elif selection == 2: # Remove Item - modify the list variable, then # save to file if list_status == 0: remove_item(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to remove") else: print("Invalid Command: The Todo List has no items to remove") elif selection == 3: # Edit Item - modify the list variable, then save # to file if list_status == 0: edit_item(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to edit") else: print("Invalid Command: The Todo List has no items to edit") elif selection == 4: # Mark Item Complete - modify the list variable, # then save to file if list_status == 0: mark_complete(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to mark complete") else: print("Invalid Command: The Todo List has no items to mark " "complete") elif selection == 5: # Show Hidden - modify the list variable, then # save to file if list_status == 0 or list_status == 2: if show_hidden: print("No longer showing hidden items") show_hidden = False else: print("Now showing hidden items") show_hidden = True else: print("Invalid Command: The Todo List has no items to show or " "hide") elif selection == 6: # Exit Program print("Now Closing") elif selection == 7: # Extra section to demonstrate proficiency with # topics covered in class - Sprint 1 concept_demonstration() else: invalid_input = True print("Invalid Input\nPlease Try Again") def main(): """The purpose of this function is to ensure the save file exists at the specified save file location, load the save file into memory, display a welcome message with a divider, then start the menu loop until the program is closed :returns nothing""" save_file_location = "Item_List.txt" data_file_a = open(save_file_location, "a") # Opens ItemList.txt which # is accessible in the file variable, in append mode (using this so that # if the file exists, nothing happens, but if it does not exist, it gets # created from w3schools.com data_file_a.close() # Close the file, I now know it exists loaded_list = load_from_file(save_file_location) print("Welcome to the To-Do List - Version: 0.1.2") divider(42) # Length of welcome statement above menu_loop(loaded_list, save_file_location) if __name__ == "__main__": main()
select_item
identifier_name
main.py
"""This program displays a customizable list of items by priority value, with priority 1 being the highest. Allows the user to add, edit, mark complete, show completed (hidden), and remove items. Stores the list of items in a .txt file located where this program's main.py file is. All changes are automatically saved to the .txt file. Also includes a fun technical knowledge demonstration using numbers and text responses. The program will create a new save file if none exists, and prompts for save file overwrite if data cannot be read successfully. Menu navigation is accomplished through numeric inputs due to the text-only interface and tedium of typing out each word accurately and repeatedly.""" __author__ = 'Jordan Kooyman' # 1/26/21 - 4/15/2021 To-Do List Program - Integration Project for COP 1500 # Spring 2021 # Configurable settings saved to a separate file (?) # Ability to load a different data or config file (?) # Color code items by group (?) # Add a basic calculator to meet math (and string?) command requirements (?) # TODO: Implement a group system that shows all groups combined, just one # group, or all categorized by group, and group names - be able to change # group names (new function) - all functions support groups (individual or # combined) import random # Random number generation used as random verification number when # overwriting the save file in the event of a failure to load from the save # file class ListItem: # Create a class object that will store the data for each # entry in the list (custom variable) """A custom object that stores four pieces of data representing each entry in the todo list. Contains the text of the todo list entry, the priority of the entry, the group code (NYI), and the visibility of the entry""" def __init__(self, text, priority, group, visible): # From w3schools.com self.text = text self.priority = priority self.group = group self.visible = visible def concept_demonstration(): """The purpose of this function is to prompt the user for numbers and strings and manipulate them to demonstrate programming fluency with string and integer operations. :returns nothing""" number = clean_input("Please enter a positive number") number2 = clean_input("Please enter a number") while number2 == 0: # Rejects a 0 if it is input as the second number print("Error: Cannot Divide by 0") number2 = clean_input("Please enter a different number") color = input("Please enter a color\n") thing = input("Please enter a thing\n") thing2 = thing + ' ' # Adding space so that when thing is repeated, it # has a space in between # Raise the first number to the second number location = input("Please enter a location\n") print(str(number) + " raised to the power of " + str(number2) + " is " + str(number ** number2)) # Multiply the two numbers print("{0} multiplied by {1} is {2}".format(str(number), str(number2), str(number * number2))) # Divide the first number by the second number print("{0} divided by {1} is {2}".format(str(number), str(number2), str(number / number2))) # Find the modulus of the two numbers print("The remainder from dividing {0} by {1} is {2}".format(str(number), str(number2), str(number % number2)) ) # Divide the first number by the second and round it down (floor it) print("{0} divided by {1} rounded down is {2}".format(str(number), str(number2), str(number // number2 ))) # Add the two numbers print("{0} plus {1} is {2}".format(str(number), str(number2), str(number + number2))) # Subtract the second number from the first number print("{0} minus {1} is {2}".format(str(number), str(number2), str(number - number2))) if number > 1: # if the first number entered is greater than 1 print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing, location, thing2 * int(number - 1) + thing)) # Combine two strings with + (no added space), repeat a string x # number of times with * (must use an integer) (I have the minus 1 # and + thing to get the spacing to look proper and still repeat # number amount of times) -if a negative number is used when # multiplying a string, it does nothing (but does not crash) - but # it is still handled in the other statement with some added user # shaming elif number < 0: # if the first number entered is negative print("The {0} at {1} yelled '{2}'\nYou entered a negative number " "when a positive number was requested, so you made the {3} " "mute. Good Job.".format(color + ' ' + thing, location, thing2 * int(number), thing)) # Same as above, expect that it will print nothing in the yelled # section if the first number entered is negative else: # if the first number entered is 0 or 1 (because of the int() # function removing a decimal) print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing, location, thing * int(number))) # this is to prevent errant spaces or showing the phrase too many times return def cascade_list(priority_to_cascade_from, todo_list): """The purpose of this function is to decrement the priority number of every item in the provided todo list greater than the priority number provided. :param priority_to_cascade_from: the number that is inserted by moving everything equal to or greater than up by one :param todo_list: the list of ListItem objects to check in""" for item in todo_list: if item.priority >= priority_to_cascade_from: item.priority += 1 return def check_priority_overlap(priority_to_check, todo_list): """The purpose of this function is to check if the user's priority number input overlaps with a priority number already in the list, and if it does, prompts the user whether they want to keep it, change it, or move everything in the list that has a larger priority value up by one. :param priority_to_check: the number to check for overlap with :param todo_list: the list of ListItem objects to check in :returns the priority value, either changed or the original input""" overlap = False for item in todo_list: if item.priority == priority_to_check: overlap = True if overlap: answer = 0 while answer > 3 or answer < 1: answer = clean_input("The priority number you entered overlaps " "with another entry's priority. Enter:\n1 to " "change priority number\n2 to leave as is " "with overlap\n3 to push all priority numbers" " below this entry down by 1") if answer > 3 or answer < 1: print("Invalid Option Selected\nPlease Try Again") if answer == 1: priority_to_check = check_priority_overlap( int(clean_input("New Priority:")), todo_list) # change the priority value input elif answer == 3: cascade_list(priority_to_check, todo_list) return priority_to_check def sorting(list_object): # Takes in a ListItem object and returns the # priority value - from w3schools.com """The purpose of this function is to take in a ListItem custom object and return the priority value stored in it to be used in sorting. :param list_object: one ListItem object :returns the priority value stored in the ListItem object""" return list_object.priority def print_list(save_file_location, my_list, to_save=False, show_hidden=False): # Prints out the To-Do list from the common list variable and saves list # to the .txt file """The purpose of this function is to take in the location of the save file, the todo list variable, whether or not to save, and whether or not to show hidden and print out the todo list variable, skipping items marked as hidden unless it is told to show hidden, and saving the todo list to the file in the save file location if it is told to save. :param save_file_location: the file path to get to the .txt save file :param my_list: the list of ListItem objects to check in :param to_save: whether or not to save the list of items to the file, default is false :param show_hidden: whether or not to display the hidden list items, default it false :returns nothing""" my_list.sort(key=sorting) # Uses a custom function to be able to get the # right value to sort by print("To-Do:") for item_index in my_list: # The range needs to be the length of the list # being printed if item_index.visible and not show_hidden: # Only print visible items # if show hidden is false print(item_index.priority, item_index.text, sep='.\t') elif show_hidden: # Print everything is show hidden is trues if item_index.visible: print(item_index.priority, item_index.text, sep='.\t') else: print("{0}.~\t{1}".format(item_index.priority, item_index.text) ) # Indicate hidden items # Printing the item priority with a dot, then the item, with a tab # separating them if to_save: save_list(my_list, save_file_location) return def divider(size=100): # Draws a dividing line to go between sections # (default 100 characters long) """The purpose of this function is to print a dashed line across the screen with a specified length. :param size: how many characters long the line should be, default is 100 :returns nothing""" for i in range(size): print('-', end='') # Prints out a single dash, no newline afterwards # (the end= sets the last character to blank print('') # Print out a newline (using the default ending of a print # statement being a newline return def clean_input(prompt='Error'): # A special input function that will reject a # user's input of text when a number is requested -- if no prompt is # specified in the program, it will display "Error" """The purpose of this function is to prompt the user for a numerical input and only accept a numerical input, rejects no input and text input. :param prompt: the prompt the user sees, default is Error :returns the user input as a float""" text = True phrase = '0' while text: phrase = input(prompt + '\n') try: # Adapted from an example in the ThinkPython textbook (15.7) - # Checks whether the input is a number, positive or negative. If # not, rejects the input and user gets to try again float(phrase) text = False except ValueError: print("Error: Non-Numeric Entry Detected") # if phrase.isnumeric(): # Checks for a positive number (negative # rejected as well as text) - replaced with superior form from textbook # example # return float(phrase) # Return the number the user entered # else: # print("Error: Non-Numeric Entry Detected") return float(phrase) # Return the number the user entered def load_from_file(save_location): # This is a function for readability - # opens txt file in read mode and loads it """The purpose of this function is to open the .txt save file and read the contents into memory in the form of a list of custom ListItem objects. :param save_location: the location the save file is stored in :returns a list of ListItem objects that is populated with the data from the save file""" # into an array (list) of ListItem variables data_file_r = open(save_location, "r") # Open txt file in read mode list_item = ["Text", -1, 2, True] # Item, Item Priority, group, is visible todo = [] # make a list of lists temp = 1 # Temporary counter variable to reconstruct lists from .txt file line_counter = 1 try: for item in data_file_r: # loop through each line in the file, one at # a time - from w3schools.com if (line_counter - 1) % 5 != 0 and line_counter > 0: cleaned_item = "" for character_index in range(len( item)): # Loop through each character in the extracted # string if character_index != len( item) - 1: # if it is not the last character, add # it to the cleaned string cleaned_item += item[character_index] # Add every character to a # but \n if temp == 1: # Item Text list_item[0] = cleaned_item temp = 2 elif temp == 2: # Item Priority list_item[1] = int(cleaned_item) temp = 3 elif temp == 3: # Item Group list_item[2] = int(cleaned_item) temp = 4 elif temp == 4: # Is Visible if cleaned_item == "False": list_item[3] = False else: # Assume the item is visible if the text is not # False list_item[3] = True todo.insert(0, ListItem(list_item[0], list_item[1], list_item[2], list_item[3])) temp = 1 else: # If some error occurred and a condition outside of the # possible four is met, restart temp = 1 line_counter += 1 except ValueError: print("An error has occurred trying to load the file") result = int(clean_input( "Please enter a 2 to overwrite the current save file and start " "over or any other number to exit the program")) if result == 2: key = random.randint(2, 9) # Generate a random integer between 2 # and 9 to be used as a second dynamic check if key == 2: key = 1 # If the random number is 2, set it to one so that # the same number (2) cannot be used as the verification number result2 = int(clean_input("Are you sure you want to delete all " "of your saved data\nEnter {0} to " "proceed, or anything else to " "cancel".format(str(key)))) if result2 == key: data_file_w = open("C:Item_List.txt", "w") data_file_w.close() todo = [] print("Save Data Erased") return todo # Return an empty list if file load failed else: print("Program Exiting") quit(1) else: print("Program Exiting") quit(1) # Exit the program with the exit code of 1 data_file_r.close() # All the list functions above referenced from w3schools.com What is # happening above: Opening the file, initializing a list to hold all # four pieces of data, then after pulling the data from the file and # storing in the list, it is copied (not referenced) into my main list # of ListItem objects return todo def save_list(todo_list, save_location): """The purpose of this function is to save a list of ListItem objects to a specified location in a .txt file with the first line of the document being an explanation of the file format being used. :param todo_list: the list of ListItem objects to save to the save file :param save_location: the location to create or overwrite the save file :returns nothing""" data_file_w = open(save_location, "w") # open the save file and clear the data from it data_file_w.write("Warning: The Todo-List Program will not be able to " "load this save file if it is incorrectly modified. " "Modify at your own risk. The structure is Entry " "Text, Entry Priority as a number, Entry Group as a " "number (Not Yet Utilized, but necessary), and Entry " "Visibility as a boolean, each on a separate line, a " "single line gap in between, and the " "very first line is skipped\n") for item in todo_list: data_file_w.write("{0}\n{1}\n{2}\n{3}\n\n".format(item.text, str(item.priority), str(item.group), str(item.visible))) data_file_w.close() return def add_item(todo_list): """The purpose of this function is to prompt the user for the two fields of necessary information to make a new entry in the todo list, the item name and priority, checking if the priority overlaps with an existing entry in the todo list. :param todo_list: the list of ListItem objects to add a new ListItem object to :returns nothing""" text = input("Please enter the name of the new item\n") priority = check_priority_overlap( int(clean_input("Please enter the priority of this item")), todo_list) # group = int(clean_input("Please enter the group number of this item")) group = 0 # Set the group value to zero, group system NYI visible = True todo_list.insert(0, ListItem(text, priority, group, visible)) # Join # the inputs to be added to the overall list return def select_item(todo_list, prompt='Error'): # Ask the user # which item from the list is to be modified
def remove_item(todo_list): """The purpose of this function is to delete a ListItem object from a list of ListItem objects by prompting the user for the index and verifying they want to delete the item. :param todo_list: the list of ListItem objects from which to remove one object :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "remove\nEnter a negative number or zero " "to cancel") if item >= 0: # 0, not 1 because the index returned is shifted to be # computer friendly todo_list.pop(item) return def mark_complete(todo_list): """The purpose of this function is to mark a selectedListItem object as hidden and not to be printed unless specified, apart from selecting items. :param todo_list: the list of ListItem objects to modify :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "Mark Completed and hide from the " "list\nEnter a negative number or zero to " "cancel") if item >= 0: todo_list[item].visible = False return def edit_item(todo_list): """The purpose of this function is to edit a ListItem object in the list of ListItem objects, changing either the name or priority :param todo_list: the list of ListItem objects that gets one object modified :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "edit\nEnter a negative number or zero to " "cancel") if item >= 0: while True: value = clean_input("Which value would you like to edit? Enter:\n1" " for the Item Text (Currently: {0})\n2 for " "the Item Priority (Currently: {1})\n3 to " "Cancel and Exit".format(todo_list[item].text, str(todo_list[item]. priority))) if value == 1: # Item Text Change print("The Current Text is: {0}".format(todo_list[item].text)) todo_list[item].text = input("New Text:\n") elif value == 2: # Item Priority Change print("The Current Priority is: {0}".format(str(todo_list[item] .priority))) todo_list[item].priority = check_priority_overlap( int(clean_input("New Priority:")), todo_list) # elif value == 3: # Item Group Change # print(f"The Current Group is: {todo_list[item].group}") # todo_list[item].group = int(clean_input("New Group Number:")) elif value == 3: # Exit Changing Menu break else: print("Invalid Input - Please Try Again") return def check_list_status(todo_list): # Checks if the list is completely hidden # (2), completely empty (1), or neither (0) """The purpose of this function is to check whether there are visible items in the list, the entire list is hidden, or the list contains no more ListItem objects :param todo_list: the list of ListItem objects to check :returns which condition using integer codes""" if len(todo_list) == 0: state = 1 # Empty List else: state = 2 # Entirely Hidden List for item_index in range(len(todo_list)): if todo_list[item_index].visible: # If an item is visible, then # they are not all hidden state = 0 # Neither return state def menu_loop(todo_list, save_file_location): """The purpose of this function is to repeatedly display the todo list and user prompts menu until the program is closed :param todo_list: the list of ListItem objects to display or modify :param save_file_location: where the .txt save file is located for saving :returns nothing""" show_hidden = False selection = 0 invalid_input = False while selection != 6: if invalid_input: invalid_input = False else: print_list(save_file_location, todo_list, True, show_hidden) divider(137 + 17) # Length of prompt statement below list_status = check_list_status(todo_list) if list_status == 0: # No Issues selection = int(clean_input("Please enter: 1 for Add Item, 2 for " "Remove Item, 3 for Edit Item, " "4 for Mark Item Complete, " "5 for Toggle Hidden, and 6 for " "Exit, 7 for Concept " "Demonstration\n")) elif list_status == 1: # Empty List - No Remove, Edit, Mark, or Toggle selection = int(clean_input("Please enter: 1 for Add Item, and 6 " "for Exit, 7 for Concept " "Demonstration\n")) else: # Entirely Hidden List selection = int(clean_input("Please enter: 1 for Add Item, 5 for " "Toggle Hidden, and 6 for Exit, " "7 for Concept Demonstration\n")) # Uses the clean_input function above to get a number from the # user, converting it to an int so a decimal won't return an # invalid input in the following steps print("") # Blank Print statement to add an extra blank line after # user input before displaying response if selection == 1: # Add Item - modify the list variable, then save # to file add_item(todo_list) elif selection == 2: # Remove Item - modify the list variable, then # save to file if list_status == 0: remove_item(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to remove") else: print("Invalid Command: The Todo List has no items to remove") elif selection == 3: # Edit Item - modify the list variable, then save # to file if list_status == 0: edit_item(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to edit") else: print("Invalid Command: The Todo List has no items to edit") elif selection == 4: # Mark Item Complete - modify the list variable, # then save to file if list_status == 0: mark_complete(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to mark complete") else: print("Invalid Command: The Todo List has no items to mark " "complete") elif selection == 5: # Show Hidden - modify the list variable, then # save to file if list_status == 0 or list_status == 2: if show_hidden: print("No longer showing hidden items") show_hidden = False else: print("Now showing hidden items") show_hidden = True else: print("Invalid Command: The Todo List has no items to show or " "hide") elif selection == 6: # Exit Program print("Now Closing") elif selection == 7: # Extra section to demonstrate proficiency with # topics covered in class - Sprint 1 concept_demonstration() else: invalid_input = True print("Invalid Input\nPlease Try Again") def main(): """The purpose of this function is to ensure the save file exists at the specified save file location, load the save file into memory, display a welcome message with a divider, then start the menu loop until the program is closed :returns nothing""" save_file_location = "Item_List.txt" data_file_a = open(save_file_location, "a") # Opens ItemList.txt which # is accessible in the file variable, in append mode (using this so that # if the file exists, nothing happens, but if it does not exist, it gets # created from w3schools.com data_file_a.close() # Close the file, I now know it exists loaded_list = load_from_file(save_file_location) print("Welcome to the To-Do List - Version: 0.1.2") divider(42) # Length of welcome statement above menu_loop(loaded_list, save_file_location) if __name__ == "__main__": main()
"""The purpose of this function is to display a list of all items in the todo list and number each individually to allow the user to select an item to modify or delete. The available numbers may skip some if some items are hidden :param todo_list: the list of ListItem objects to display :param prompt: the prompt to display to the user, default is Error :returns the user selected item's index in a computer friendly form ( starting at 0 instead of 1)""" valid = False index = 0 while not valid: counter = 1 # counter for index printing for item in todo_list: # The range needs to be the length of the list # being printed if item.visible: print(counter, item.text, sep='\t') else: print(counter, "~ {0} ~".format(item.text), sep='\t') counter += 1 # Printing the item number, then the item, with a tab separating # them index = int(clean_input(prompt)) if index < counter: valid = True else: print("Invalid Input: Number is too big") return index - 1
identifier_body
main.py
"""This program displays a customizable list of items by priority value, with priority 1 being the highest. Allows the user to add, edit, mark complete, show completed (hidden), and remove items. Stores the list of items in a .txt file located where this program's main.py file is. All changes are automatically saved to the .txt file. Also includes a fun technical knowledge demonstration using numbers and text responses. The program will create a new save file if none exists, and prompts for save file overwrite if data cannot be read successfully. Menu navigation is accomplished through numeric inputs due to the text-only interface and tedium of typing out each word accurately and repeatedly.""" __author__ = 'Jordan Kooyman' # 1/26/21 - 4/15/2021 To-Do List Program - Integration Project for COP 1500 # Spring 2021 # Configurable settings saved to a separate file (?) # Ability to load a different data or config file (?) # Color code items by group (?) # Add a basic calculator to meet math (and string?) command requirements (?) # TODO: Implement a group system that shows all groups combined, just one # group, or all categorized by group, and group names - be able to change # group names (new function) - all functions support groups (individual or # combined) import random # Random number generation used as random verification number when # overwriting the save file in the event of a failure to load from the save # file class ListItem: # Create a class object that will store the data for each # entry in the list (custom variable) """A custom object that stores four pieces of data representing each entry in the todo list. Contains the text of the todo list entry, the priority of the entry, the group code (NYI), and the visibility of the entry""" def __init__(self, text, priority, group, visible): # From w3schools.com self.text = text self.priority = priority self.group = group self.visible = visible def concept_demonstration(): """The purpose of this function is to prompt the user for numbers and strings and manipulate them to demonstrate programming fluency with string and integer operations. :returns nothing""" number = clean_input("Please enter a positive number") number2 = clean_input("Please enter a number") while number2 == 0: # Rejects a 0 if it is input as the second number print("Error: Cannot Divide by 0") number2 = clean_input("Please enter a different number") color = input("Please enter a color\n") thing = input("Please enter a thing\n") thing2 = thing + ' ' # Adding space so that when thing is repeated, it # has a space in between # Raise the first number to the second number location = input("Please enter a location\n") print(str(number) + " raised to the power of " + str(number2) + " is " + str(number ** number2)) # Multiply the two numbers print("{0} multiplied by {1} is {2}".format(str(number), str(number2), str(number * number2))) # Divide the first number by the second number print("{0} divided by {1} is {2}".format(str(number), str(number2), str(number / number2))) # Find the modulus of the two numbers print("The remainder from dividing {0} by {1} is {2}".format(str(number), str(number2), str(number % number2)) ) # Divide the first number by the second and round it down (floor it) print("{0} divided by {1} rounded down is {2}".format(str(number), str(number2), str(number // number2 ))) # Add the two numbers print("{0} plus {1} is {2}".format(str(number), str(number2), str(number + number2))) # Subtract the second number from the first number print("{0} minus {1} is {2}".format(str(number), str(number2), str(number - number2))) if number > 1: # if the first number entered is greater than 1 print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing, location, thing2 * int(number - 1) + thing)) # Combine two strings with + (no added space), repeat a string x # number of times with * (must use an integer) (I have the minus 1 # and + thing to get the spacing to look proper and still repeat # number amount of times) -if a negative number is used when # multiplying a string, it does nothing (but does not crash) - but # it is still handled in the other statement with some added user # shaming elif number < 0: # if the first number entered is negative print("The {0} at {1} yelled '{2}'\nYou entered a negative number " "when a positive number was requested, so you made the {3} " "mute. Good Job.".format(color + ' ' + thing, location, thing2 * int(number), thing)) # Same as above, expect that it will print nothing in the yelled # section if the first number entered is negative else: # if the first number entered is 0 or 1 (because of the int() # function removing a decimal) print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing, location, thing * int(number))) # this is to prevent errant spaces or showing the phrase too many times return def cascade_list(priority_to_cascade_from, todo_list): """The purpose of this function is to decrement the priority number of every item in the provided todo list greater than the priority number provided. :param priority_to_cascade_from: the number that is inserted by moving everything equal to or greater than up by one :param todo_list: the list of ListItem objects to check in""" for item in todo_list: if item.priority >= priority_to_cascade_from: item.priority += 1 return def check_priority_overlap(priority_to_check, todo_list): """The purpose of this function is to check if the user's priority number input overlaps with a priority number already in the list, and if it does, prompts the user whether they want to keep it, change it, or move everything in the list that has a larger priority value up by one. :param priority_to_check: the number to check for overlap with :param todo_list: the list of ListItem objects to check in :returns the priority value, either changed or the original input""" overlap = False for item in todo_list: if item.priority == priority_to_check: overlap = True if overlap: answer = 0 while answer > 3 or answer < 1: answer = clean_input("The priority number you entered overlaps " "with another entry's priority. Enter:\n1 to " "change priority number\n2 to leave as is " "with overlap\n3 to push all priority numbers" " below this entry down by 1") if answer > 3 or answer < 1: print("Invalid Option Selected\nPlease Try Again") if answer == 1: priority_to_check = check_priority_overlap( int(clean_input("New Priority:")), todo_list) # change the priority value input elif answer == 3: cascade_list(priority_to_check, todo_list) return priority_to_check def sorting(list_object): # Takes in a ListItem object and returns the # priority value - from w3schools.com """The purpose of this function is to take in a ListItem custom object and return the priority value stored in it to be used in sorting. :param list_object: one ListItem object :returns the priority value stored in the ListItem object""" return list_object.priority def print_list(save_file_location, my_list, to_save=False, show_hidden=False): # Prints out the To-Do list from the common list variable and saves list # to the .txt file """The purpose of this function is to take in the location of the save file, the todo list variable, whether or not to save, and whether or not to show hidden and print out the todo list variable, skipping items marked as hidden unless it is told to show hidden, and saving the todo list to the file in the save file location if it is told to save. :param save_file_location: the file path to get to the .txt save file :param my_list: the list of ListItem objects to check in :param to_save: whether or not to save the list of items to the file, default is false :param show_hidden: whether or not to display the hidden list items, default it false :returns nothing""" my_list.sort(key=sorting) # Uses a custom function to be able to get the # right value to sort by print("To-Do:") for item_index in my_list: # The range needs to be the length of the list # being printed if item_index.visible and not show_hidden: # Only print visible items # if show hidden is false print(item_index.priority, item_index.text, sep='.\t') elif show_hidden: # Print everything is show hidden is trues if item_index.visible: print(item_index.priority, item_index.text, sep='.\t') else: print("{0}.~\t{1}".format(item_index.priority, item_index.text) ) # Indicate hidden items # Printing the item priority with a dot, then the item, with a tab # separating them if to_save: save_list(my_list, save_file_location) return def divider(size=100): # Draws a dividing line to go between sections # (default 100 characters long) """The purpose of this function is to print a dashed line across the screen with a specified length. :param size: how many characters long the line should be, default is 100 :returns nothing""" for i in range(size): print('-', end='') # Prints out a single dash, no newline afterwards # (the end= sets the last character to blank print('') # Print out a newline (using the default ending of a print # statement being a newline return def clean_input(prompt='Error'): # A special input function that will reject a # user's input of text when a number is requested -- if no prompt is # specified in the program, it will display "Error" """The purpose of this function is to prompt the user for a numerical input and only accept a numerical input, rejects no input and text input. :param prompt: the prompt the user sees, default is Error :returns the user input as a float""" text = True phrase = '0' while text: phrase = input(prompt + '\n') try: # Adapted from an example in the ThinkPython textbook (15.7) - # Checks whether the input is a number, positive or negative. If # not, rejects the input and user gets to try again float(phrase) text = False except ValueError: print("Error: Non-Numeric Entry Detected") # if phrase.isnumeric(): # Checks for a positive number (negative # rejected as well as text) - replaced with superior form from textbook # example # return float(phrase) # Return the number the user entered # else: # print("Error: Non-Numeric Entry Detected") return float(phrase) # Return the number the user entered def load_from_file(save_location): # This is a function for readability - # opens txt file in read mode and loads it """The purpose of this function is to open the .txt save file and read the contents into memory in the form of a list of custom ListItem objects. :param save_location: the location the save file is stored in :returns a list of ListItem objects that is populated with the data from the save file""" # into an array (list) of ListItem variables data_file_r = open(save_location, "r") # Open txt file in read mode list_item = ["Text", -1, 2, True] # Item, Item Priority, group, is visible todo = [] # make a list of lists temp = 1 # Temporary counter variable to reconstruct lists from .txt file line_counter = 1 try: for item in data_file_r: # loop through each line in the file, one at # a time - from w3schools.com if (line_counter - 1) % 5 != 0 and line_counter > 0: cleaned_item = "" for character_index in range(len( item)): # Loop through each character in the extracted # string if character_index != len( item) - 1: # if it is not the last character, add # it to the cleaned string cleaned_item += item[character_index] # Add every character to a # but \n if temp == 1: # Item Text list_item[0] = cleaned_item temp = 2 elif temp == 2: # Item Priority list_item[1] = int(cleaned_item) temp = 3 elif temp == 3: # Item Group list_item[2] = int(cleaned_item) temp = 4 elif temp == 4: # Is Visible if cleaned_item == "False": list_item[3] = False else: # Assume the item is visible if the text is not # False list_item[3] = True todo.insert(0, ListItem(list_item[0], list_item[1], list_item[2], list_item[3])) temp = 1 else: # If some error occurred and a condition outside of the # possible four is met, restart temp = 1 line_counter += 1 except ValueError: print("An error has occurred trying to load the file") result = int(clean_input( "Please enter a 2 to overwrite the current save file and start " "over or any other number to exit the program")) if result == 2: key = random.randint(2, 9) # Generate a random integer between 2 # and 9 to be used as a second dynamic check if key == 2: key = 1 # If the random number is 2, set it to one so that # the same number (2) cannot be used as the verification number result2 = int(clean_input("Are you sure you want to delete all " "of your saved data\nEnter {0} to " "proceed, or anything else to " "cancel".format(str(key)))) if result2 == key: data_file_w = open("C:Item_List.txt", "w") data_file_w.close() todo = [] print("Save Data Erased") return todo # Return an empty list if file load failed else: print("Program Exiting") quit(1) else: print("Program Exiting") quit(1) # Exit the program with the exit code of 1 data_file_r.close() # All the list functions above referenced from w3schools.com What is # happening above: Opening the file, initializing a list to hold all # four pieces of data, then after pulling the data from the file and # storing in the list, it is copied (not referenced) into my main list # of ListItem objects return todo def save_list(todo_list, save_location): """The purpose of this function is to save a list of ListItem objects to a specified location in a .txt file with the first line of the document being an explanation of the file format being used. :param todo_list: the list of ListItem objects to save to the save file :param save_location: the location to create or overwrite the save file :returns nothing""" data_file_w = open(save_location, "w") # open the save file and clear the data from it data_file_w.write("Warning: The Todo-List Program will not be able to " "load this save file if it is incorrectly modified. " "Modify at your own risk. The structure is Entry " "Text, Entry Priority as a number, Entry Group as a " "number (Not Yet Utilized, but necessary), and Entry " "Visibility as a boolean, each on a separate line, a " "single line gap in between, and the " "very first line is skipped\n") for item in todo_list: data_file_w.write("{0}\n{1}\n{2}\n{3}\n\n".format(item.text, str(item.priority), str(item.group), str(item.visible))) data_file_w.close() return def add_item(todo_list): """The purpose of this function is to prompt the user for the two fields of necessary information to make a new entry in the todo list, the item name and priority, checking if the priority overlaps with an existing entry in the todo list. :param todo_list: the list of ListItem objects to add a new ListItem object to :returns nothing""" text = input("Please enter the name of the new item\n") priority = check_priority_overlap( int(clean_input("Please enter the priority of this item")), todo_list) # group = int(clean_input("Please enter the group number of this item")) group = 0 # Set the group value to zero, group system NYI visible = True todo_list.insert(0, ListItem(text, priority, group, visible)) # Join # the inputs to be added to the overall list return def select_item(todo_list, prompt='Error'): # Ask the user # which item from the list is to be modified """The purpose of this function is to display a list of all items in the todo list and number each individually to allow the user to select an item to modify or delete. The available numbers may skip some if some items are hidden :param todo_list: the list of ListItem objects to display :param prompt: the prompt to display to the user, default is Error :returns the user selected item's index in a computer friendly form ( starting at 0 instead of 1)""" valid = False index = 0 while not valid: counter = 1 # counter for index printing for item in todo_list: # The range needs to be the length of the list # being printed if item.visible: print(counter, item.text, sep='\t') else: print(counter, "~ {0} ~".format(item.text), sep='\t') counter += 1 # Printing the item number, then the item, with a tab separating # them index = int(clean_input(prompt)) if index < counter: valid = True else: print("Invalid Input: Number is too big") return index - 1 def remove_item(todo_list): """The purpose of this function is to delete a ListItem object from a list of ListItem objects by prompting the user for the index and verifying they want to delete the item. :param todo_list: the list of ListItem objects from which to remove one object :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "remove\nEnter a negative number or zero " "to cancel") if item >= 0: # 0, not 1 because the index returned is shifted to be # computer friendly todo_list.pop(item) return def mark_complete(todo_list): """The purpose of this function is to mark a selectedListItem object as hidden and not to be printed unless specified, apart from selecting items. :param todo_list: the list of ListItem objects to modify :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "Mark Completed and hide from the " "list\nEnter a negative number or zero to " "cancel") if item >= 0: todo_list[item].visible = False return def edit_item(todo_list): """The purpose of this function is to edit a ListItem object in the list of ListItem objects, changing either the name or priority :param todo_list: the list of ListItem objects that gets one object modified :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "edit\nEnter a negative number or zero to " "cancel") if item >= 0: while True: value = clean_input("Which value would you like to edit? Enter:\n1" " for the Item Text (Currently: {0})\n2 for " "the Item Priority (Currently: {1})\n3 to " "Cancel and Exit".format(todo_list[item].text, str(todo_list[item]. priority))) if value == 1: # Item Text Change print("The Current Text is: {0}".format(todo_list[item].text)) todo_list[item].text = input("New Text:\n") elif value == 2: # Item Priority Change print("The Current Priority is: {0}".format(str(todo_list[item] .priority))) todo_list[item].priority = check_priority_overlap( int(clean_input("New Priority:")), todo_list) # elif value == 3: # Item Group Change # print(f"The Current Group is: {todo_list[item].group}") # todo_list[item].group = int(clean_input("New Group Number:")) elif value == 3: # Exit Changing Menu break else: print("Invalid Input - Please Try Again") return def check_list_status(todo_list): # Checks if the list is completely hidden # (2), completely empty (1), or neither (0) """The purpose of this function is to check whether there are visible items in the list, the entire list is hidden, or the list contains no more ListItem objects :param todo_list: the list of ListItem objects to check :returns which condition using integer codes""" if len(todo_list) == 0: state = 1 # Empty List else: state = 2 # Entirely Hidden List for item_index in range(len(todo_list)): if todo_list[item_index].visible: # If an item is visible, then # they are not all hidden state = 0 # Neither return state def menu_loop(todo_list, save_file_location): """The purpose of this function is to repeatedly display the todo list and user prompts menu until the program is closed :param todo_list: the list of ListItem objects to display or modify :param save_file_location: where the .txt save file is located for saving :returns nothing""" show_hidden = False selection = 0
invalid_input = False while selection != 6: if invalid_input: invalid_input = False else: print_list(save_file_location, todo_list, True, show_hidden) divider(137 + 17) # Length of prompt statement below list_status = check_list_status(todo_list) if list_status == 0: # No Issues selection = int(clean_input("Please enter: 1 for Add Item, 2 for " "Remove Item, 3 for Edit Item, " "4 for Mark Item Complete, " "5 for Toggle Hidden, and 6 for " "Exit, 7 for Concept " "Demonstration\n")) elif list_status == 1: # Empty List - No Remove, Edit, Mark, or Toggle selection = int(clean_input("Please enter: 1 for Add Item, and 6 " "for Exit, 7 for Concept " "Demonstration\n")) else: # Entirely Hidden List selection = int(clean_input("Please enter: 1 for Add Item, 5 for " "Toggle Hidden, and 6 for Exit, " "7 for Concept Demonstration\n")) # Uses the clean_input function above to get a number from the # user, converting it to an int so a decimal won't return an # invalid input in the following steps print("") # Blank Print statement to add an extra blank line after # user input before displaying response if selection == 1: # Add Item - modify the list variable, then save # to file add_item(todo_list) elif selection == 2: # Remove Item - modify the list variable, then # save to file if list_status == 0: remove_item(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to remove") else: print("Invalid Command: The Todo List has no items to remove") elif selection == 3: # Edit Item - modify the list variable, then save # to file if list_status == 0: edit_item(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to edit") else: print("Invalid Command: The Todo List has no items to edit") elif selection == 4: # Mark Item Complete - modify the list variable, # then save to file if list_status == 0: mark_complete(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to mark complete") else: print("Invalid Command: The Todo List has no items to mark " "complete") elif selection == 5: # Show Hidden - modify the list variable, then # save to file if list_status == 0 or list_status == 2: if show_hidden: print("No longer showing hidden items") show_hidden = False else: print("Now showing hidden items") show_hidden = True else: print("Invalid Command: The Todo List has no items to show or " "hide") elif selection == 6: # Exit Program print("Now Closing") elif selection == 7: # Extra section to demonstrate proficiency with # topics covered in class - Sprint 1 concept_demonstration() else: invalid_input = True print("Invalid Input\nPlease Try Again") def main(): """The purpose of this function is to ensure the save file exists at the specified save file location, load the save file into memory, display a welcome message with a divider, then start the menu loop until the program is closed :returns nothing""" save_file_location = "Item_List.txt" data_file_a = open(save_file_location, "a") # Opens ItemList.txt which # is accessible in the file variable, in append mode (using this so that # if the file exists, nothing happens, but if it does not exist, it gets # created from w3schools.com data_file_a.close() # Close the file, I now know it exists loaded_list = load_from_file(save_file_location) print("Welcome to the To-Do List - Version: 0.1.2") divider(42) # Length of welcome statement above menu_loop(loaded_list, save_file_location) if __name__ == "__main__": main()
random_line_split
main.py
"""This program displays a customizable list of items by priority value, with priority 1 being the highest. Allows the user to add, edit, mark complete, show completed (hidden), and remove items. Stores the list of items in a .txt file located where this program's main.py file is. All changes are automatically saved to the .txt file. Also includes a fun technical knowledge demonstration using numbers and text responses. The program will create a new save file if none exists, and prompts for save file overwrite if data cannot be read successfully. Menu navigation is accomplished through numeric inputs due to the text-only interface and tedium of typing out each word accurately and repeatedly.""" __author__ = 'Jordan Kooyman' # 1/26/21 - 4/15/2021 To-Do List Program - Integration Project for COP 1500 # Spring 2021 # Configurable settings saved to a separate file (?) # Ability to load a different data or config file (?) # Color code items by group (?) # Add a basic calculator to meet math (and string?) command requirements (?) # TODO: Implement a group system that shows all groups combined, just one # group, or all categorized by group, and group names - be able to change # group names (new function) - all functions support groups (individual or # combined) import random # Random number generation used as random verification number when # overwriting the save file in the event of a failure to load from the save # file class ListItem: # Create a class object that will store the data for each # entry in the list (custom variable) """A custom object that stores four pieces of data representing each entry in the todo list. Contains the text of the todo list entry, the priority of the entry, the group code (NYI), and the visibility of the entry""" def __init__(self, text, priority, group, visible): # From w3schools.com self.text = text self.priority = priority self.group = group self.visible = visible def concept_demonstration(): """The purpose of this function is to prompt the user for numbers and strings and manipulate them to demonstrate programming fluency with string and integer operations. :returns nothing""" number = clean_input("Please enter a positive number") number2 = clean_input("Please enter a number") while number2 == 0: # Rejects a 0 if it is input as the second number print("Error: Cannot Divide by 0") number2 = clean_input("Please enter a different number") color = input("Please enter a color\n") thing = input("Please enter a thing\n") thing2 = thing + ' ' # Adding space so that when thing is repeated, it # has a space in between # Raise the first number to the second number location = input("Please enter a location\n") print(str(number) + " raised to the power of " + str(number2) + " is " + str(number ** number2)) # Multiply the two numbers print("{0} multiplied by {1} is {2}".format(str(number), str(number2), str(number * number2))) # Divide the first number by the second number print("{0} divided by {1} is {2}".format(str(number), str(number2), str(number / number2))) # Find the modulus of the two numbers print("The remainder from dividing {0} by {1} is {2}".format(str(number), str(number2), str(number % number2)) ) # Divide the first number by the second and round it down (floor it) print("{0} divided by {1} rounded down is {2}".format(str(number), str(number2), str(number // number2 ))) # Add the two numbers print("{0} plus {1} is {2}".format(str(number), str(number2), str(number + number2))) # Subtract the second number from the first number print("{0} minus {1} is {2}".format(str(number), str(number2), str(number - number2))) if number > 1: # if the first number entered is greater than 1 print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing, location, thing2 * int(number - 1) + thing)) # Combine two strings with + (no added space), repeat a string x # number of times with * (must use an integer) (I have the minus 1 # and + thing to get the spacing to look proper and still repeat # number amount of times) -if a negative number is used when # multiplying a string, it does nothing (but does not crash) - but # it is still handled in the other statement with some added user # shaming elif number < 0: # if the first number entered is negative print("The {0} at {1} yelled '{2}'\nYou entered a negative number " "when a positive number was requested, so you made the {3} " "mute. Good Job.".format(color + ' ' + thing, location, thing2 * int(number), thing)) # Same as above, expect that it will print nothing in the yelled # section if the first number entered is negative else: # if the first number entered is 0 or 1 (because of the int() # function removing a decimal) print("The {0} at {1} yelled '{2}'".format(color + ' ' + thing, location, thing * int(number))) # this is to prevent errant spaces or showing the phrase too many times return def cascade_list(priority_to_cascade_from, todo_list): """The purpose of this function is to decrement the priority number of every item in the provided todo list greater than the priority number provided. :param priority_to_cascade_from: the number that is inserted by moving everything equal to or greater than up by one :param todo_list: the list of ListItem objects to check in""" for item in todo_list: if item.priority >= priority_to_cascade_from: item.priority += 1 return def check_priority_overlap(priority_to_check, todo_list): """The purpose of this function is to check if the user's priority number input overlaps with a priority number already in the list, and if it does, prompts the user whether they want to keep it, change it, or move everything in the list that has a larger priority value up by one. :param priority_to_check: the number to check for overlap with :param todo_list: the list of ListItem objects to check in :returns the priority value, either changed or the original input""" overlap = False for item in todo_list: if item.priority == priority_to_check: overlap = True if overlap: answer = 0 while answer > 3 or answer < 1: answer = clean_input("The priority number you entered overlaps " "with another entry's priority. Enter:\n1 to " "change priority number\n2 to leave as is " "with overlap\n3 to push all priority numbers" " below this entry down by 1") if answer > 3 or answer < 1: print("Invalid Option Selected\nPlease Try Again") if answer == 1: priority_to_check = check_priority_overlap( int(clean_input("New Priority:")), todo_list) # change the priority value input elif answer == 3: cascade_list(priority_to_check, todo_list) return priority_to_check def sorting(list_object): # Takes in a ListItem object and returns the # priority value - from w3schools.com """The purpose of this function is to take in a ListItem custom object and return the priority value stored in it to be used in sorting. :param list_object: one ListItem object :returns the priority value stored in the ListItem object""" return list_object.priority def print_list(save_file_location, my_list, to_save=False, show_hidden=False): # Prints out the To-Do list from the common list variable and saves list # to the .txt file """The purpose of this function is to take in the location of the save file, the todo list variable, whether or not to save, and whether or not to show hidden and print out the todo list variable, skipping items marked as hidden unless it is told to show hidden, and saving the todo list to the file in the save file location if it is told to save. :param save_file_location: the file path to get to the .txt save file :param my_list: the list of ListItem objects to check in :param to_save: whether or not to save the list of items to the file, default is false :param show_hidden: whether or not to display the hidden list items, default it false :returns nothing""" my_list.sort(key=sorting) # Uses a custom function to be able to get the # right value to sort by print("To-Do:") for item_index in my_list: # The range needs to be the length of the list # being printed if item_index.visible and not show_hidden: # Only print visible items # if show hidden is false print(item_index.priority, item_index.text, sep='.\t') elif show_hidden: # Print everything is show hidden is trues if item_index.visible: print(item_index.priority, item_index.text, sep='.\t') else: print("{0}.~\t{1}".format(item_index.priority, item_index.text) ) # Indicate hidden items # Printing the item priority with a dot, then the item, with a tab # separating them if to_save: save_list(my_list, save_file_location) return def divider(size=100): # Draws a dividing line to go between sections # (default 100 characters long) """The purpose of this function is to print a dashed line across the screen with a specified length. :param size: how many characters long the line should be, default is 100 :returns nothing""" for i in range(size): print('-', end='') # Prints out a single dash, no newline afterwards # (the end= sets the last character to blank print('') # Print out a newline (using the default ending of a print # statement being a newline return def clean_input(prompt='Error'): # A special input function that will reject a # user's input of text when a number is requested -- if no prompt is # specified in the program, it will display "Error" """The purpose of this function is to prompt the user for a numerical input and only accept a numerical input, rejects no input and text input. :param prompt: the prompt the user sees, default is Error :returns the user input as a float""" text = True phrase = '0' while text: phrase = input(prompt + '\n') try: # Adapted from an example in the ThinkPython textbook (15.7) - # Checks whether the input is a number, positive or negative. If # not, rejects the input and user gets to try again float(phrase) text = False except ValueError: print("Error: Non-Numeric Entry Detected") # if phrase.isnumeric(): # Checks for a positive number (negative # rejected as well as text) - replaced with superior form from textbook # example # return float(phrase) # Return the number the user entered # else: # print("Error: Non-Numeric Entry Detected") return float(phrase) # Return the number the user entered def load_from_file(save_location): # This is a function for readability - # opens txt file in read mode and loads it """The purpose of this function is to open the .txt save file and read the contents into memory in the form of a list of custom ListItem objects. :param save_location: the location the save file is stored in :returns a list of ListItem objects that is populated with the data from the save file""" # into an array (list) of ListItem variables data_file_r = open(save_location, "r") # Open txt file in read mode list_item = ["Text", -1, 2, True] # Item, Item Priority, group, is visible todo = [] # make a list of lists temp = 1 # Temporary counter variable to reconstruct lists from .txt file line_counter = 1 try: for item in data_file_r: # loop through each line in the file, one at # a time - from w3schools.com if (line_counter - 1) % 5 != 0 and line_counter > 0: cleaned_item = "" for character_index in range(len( item)): # Loop through each character in the extracted # string if character_index != len( item) - 1: # if it is not the last character, add # it to the cleaned string cleaned_item += item[character_index] # Add every character to a # but \n if temp == 1: # Item Text list_item[0] = cleaned_item temp = 2 elif temp == 2: # Item Priority list_item[1] = int(cleaned_item) temp = 3 elif temp == 3: # Item Group list_item[2] = int(cleaned_item) temp = 4 elif temp == 4: # Is Visible if cleaned_item == "False": list_item[3] = False else: # Assume the item is visible if the text is not # False list_item[3] = True todo.insert(0, ListItem(list_item[0], list_item[1], list_item[2], list_item[3])) temp = 1 else: # If some error occurred and a condition outside of the # possible four is met, restart temp = 1 line_counter += 1 except ValueError: print("An error has occurred trying to load the file") result = int(clean_input( "Please enter a 2 to overwrite the current save file and start " "over or any other number to exit the program")) if result == 2: key = random.randint(2, 9) # Generate a random integer between 2 # and 9 to be used as a second dynamic check if key == 2: key = 1 # If the random number is 2, set it to one so that # the same number (2) cannot be used as the verification number result2 = int(clean_input("Are you sure you want to delete all " "of your saved data\nEnter {0} to " "proceed, or anything else to " "cancel".format(str(key)))) if result2 == key: data_file_w = open("C:Item_List.txt", "w") data_file_w.close() todo = [] print("Save Data Erased") return todo # Return an empty list if file load failed else: print("Program Exiting") quit(1) else: print("Program Exiting") quit(1) # Exit the program with the exit code of 1 data_file_r.close() # All the list functions above referenced from w3schools.com What is # happening above: Opening the file, initializing a list to hold all # four pieces of data, then after pulling the data from the file and # storing in the list, it is copied (not referenced) into my main list # of ListItem objects return todo def save_list(todo_list, save_location): """The purpose of this function is to save a list of ListItem objects to a specified location in a .txt file with the first line of the document being an explanation of the file format being used. :param todo_list: the list of ListItem objects to save to the save file :param save_location: the location to create or overwrite the save file :returns nothing""" data_file_w = open(save_location, "w") # open the save file and clear the data from it data_file_w.write("Warning: The Todo-List Program will not be able to " "load this save file if it is incorrectly modified. " "Modify at your own risk. The structure is Entry " "Text, Entry Priority as a number, Entry Group as a " "number (Not Yet Utilized, but necessary), and Entry " "Visibility as a boolean, each on a separate line, a " "single line gap in between, and the " "very first line is skipped\n") for item in todo_list: data_file_w.write("{0}\n{1}\n{2}\n{3}\n\n".format(item.text, str(item.priority), str(item.group), str(item.visible))) data_file_w.close() return def add_item(todo_list): """The purpose of this function is to prompt the user for the two fields of necessary information to make a new entry in the todo list, the item name and priority, checking if the priority overlaps with an existing entry in the todo list. :param todo_list: the list of ListItem objects to add a new ListItem object to :returns nothing""" text = input("Please enter the name of the new item\n") priority = check_priority_overlap( int(clean_input("Please enter the priority of this item")), todo_list) # group = int(clean_input("Please enter the group number of this item")) group = 0 # Set the group value to zero, group system NYI visible = True todo_list.insert(0, ListItem(text, priority, group, visible)) # Join # the inputs to be added to the overall list return def select_item(todo_list, prompt='Error'): # Ask the user # which item from the list is to be modified """The purpose of this function is to display a list of all items in the todo list and number each individually to allow the user to select an item to modify or delete. The available numbers may skip some if some items are hidden :param todo_list: the list of ListItem objects to display :param prompt: the prompt to display to the user, default is Error :returns the user selected item's index in a computer friendly form ( starting at 0 instead of 1)""" valid = False index = 0 while not valid: counter = 1 # counter for index printing for item in todo_list: # The range needs to be the length of the list # being printed if item.visible: print(counter, item.text, sep='\t') else: print(counter, "~ {0} ~".format(item.text), sep='\t') counter += 1 # Printing the item number, then the item, with a tab separating # them index = int(clean_input(prompt)) if index < counter: valid = True else: print("Invalid Input: Number is too big") return index - 1 def remove_item(todo_list): """The purpose of this function is to delete a ListItem object from a list of ListItem objects by prompting the user for the index and verifying they want to delete the item. :param todo_list: the list of ListItem objects from which to remove one object :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "remove\nEnter a negative number or zero " "to cancel") if item >= 0: # 0, not 1 because the index returned is shifted to be # computer friendly todo_list.pop(item) return def mark_complete(todo_list): """The purpose of this function is to mark a selectedListItem object as hidden and not to be printed unless specified, apart from selecting items. :param todo_list: the list of ListItem objects to modify :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "Mark Completed and hide from the " "list\nEnter a negative number or zero to " "cancel") if item >= 0: todo_list[item].visible = False return def edit_item(todo_list): """The purpose of this function is to edit a ListItem object in the list of ListItem objects, changing either the name or priority :param todo_list: the list of ListItem objects that gets one object modified :returns nothing""" item = select_item(todo_list, "Please enter the item number you wish to " "edit\nEnter a negative number or zero to " "cancel") if item >= 0: while True: value = clean_input("Which value would you like to edit? Enter:\n1" " for the Item Text (Currently: {0})\n2 for " "the Item Priority (Currently: {1})\n3 to " "Cancel and Exit".format(todo_list[item].text, str(todo_list[item]. priority))) if value == 1: # Item Text Change print("The Current Text is: {0}".format(todo_list[item].text)) todo_list[item].text = input("New Text:\n") elif value == 2: # Item Priority Change print("The Current Priority is: {0}".format(str(todo_list[item] .priority))) todo_list[item].priority = check_priority_overlap( int(clean_input("New Priority:")), todo_list) # elif value == 3: # Item Group Change # print(f"The Current Group is: {todo_list[item].group}") # todo_list[item].group = int(clean_input("New Group Number:")) elif value == 3: # Exit Changing Menu break else: print("Invalid Input - Please Try Again") return def check_list_status(todo_list): # Checks if the list is completely hidden # (2), completely empty (1), or neither (0) """The purpose of this function is to check whether there are visible items in the list, the entire list is hidden, or the list contains no more ListItem objects :param todo_list: the list of ListItem objects to check :returns which condition using integer codes""" if len(todo_list) == 0: state = 1 # Empty List else: state = 2 # Entirely Hidden List for item_index in range(len(todo_list)):
return state def menu_loop(todo_list, save_file_location): """The purpose of this function is to repeatedly display the todo list and user prompts menu until the program is closed :param todo_list: the list of ListItem objects to display or modify :param save_file_location: where the .txt save file is located for saving :returns nothing""" show_hidden = False selection = 0 invalid_input = False while selection != 6: if invalid_input: invalid_input = False else: print_list(save_file_location, todo_list, True, show_hidden) divider(137 + 17) # Length of prompt statement below list_status = check_list_status(todo_list) if list_status == 0: # No Issues selection = int(clean_input("Please enter: 1 for Add Item, 2 for " "Remove Item, 3 for Edit Item, " "4 for Mark Item Complete, " "5 for Toggle Hidden, and 6 for " "Exit, 7 for Concept " "Demonstration\n")) elif list_status == 1: # Empty List - No Remove, Edit, Mark, or Toggle selection = int(clean_input("Please enter: 1 for Add Item, and 6 " "for Exit, 7 for Concept " "Demonstration\n")) else: # Entirely Hidden List selection = int(clean_input("Please enter: 1 for Add Item, 5 for " "Toggle Hidden, and 6 for Exit, " "7 for Concept Demonstration\n")) # Uses the clean_input function above to get a number from the # user, converting it to an int so a decimal won't return an # invalid input in the following steps print("") # Blank Print statement to add an extra blank line after # user input before displaying response if selection == 1: # Add Item - modify the list variable, then save # to file add_item(todo_list) elif selection == 2: # Remove Item - modify the list variable, then # save to file if list_status == 0: remove_item(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to remove") else: print("Invalid Command: The Todo List has no items to remove") elif selection == 3: # Edit Item - modify the list variable, then save # to file if list_status == 0: edit_item(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to edit") else: print("Invalid Command: The Todo List has no items to edit") elif selection == 4: # Mark Item Complete - modify the list variable, # then save to file if list_status == 0: mark_complete(todo_list) elif list_status == 2: print("Invalid Command: The Todo List has no visible items " "to mark complete") else: print("Invalid Command: The Todo List has no items to mark " "complete") elif selection == 5: # Show Hidden - modify the list variable, then # save to file if list_status == 0 or list_status == 2: if show_hidden: print("No longer showing hidden items") show_hidden = False else: print("Now showing hidden items") show_hidden = True else: print("Invalid Command: The Todo List has no items to show or " "hide") elif selection == 6: # Exit Program print("Now Closing") elif selection == 7: # Extra section to demonstrate proficiency with # topics covered in class - Sprint 1 concept_demonstration() else: invalid_input = True print("Invalid Input\nPlease Try Again") def main(): """The purpose of this function is to ensure the save file exists at the specified save file location, load the save file into memory, display a welcome message with a divider, then start the menu loop until the program is closed :returns nothing""" save_file_location = "Item_List.txt" data_file_a = open(save_file_location, "a") # Opens ItemList.txt which # is accessible in the file variable, in append mode (using this so that # if the file exists, nothing happens, but if it does not exist, it gets # created from w3schools.com data_file_a.close() # Close the file, I now know it exists loaded_list = load_from_file(save_file_location) print("Welcome to the To-Do List - Version: 0.1.2") divider(42) # Length of welcome statement above menu_loop(loaded_list, save_file_location) if __name__ == "__main__": main()
if todo_list[item_index].visible: # If an item is visible, then # they are not all hidden state = 0 # Neither
conditional_block
routes.py
""" Analysis dashboards module. """ try: from collections.abc import Iterable except ImportError: from collections import Iterable import copy from datetime import datetime, timedelta import json import logging import re import numpy as np import pandas as pd from flask_login import login_required from flask import render_template, request from sqlalchemy import and_ from app.dashboards import blueprint from cropcore import queries from cropcore.utils import ( download_csv, parse_date_range_argument, query_result_to_array, ) from cropcore.structure import SQLA as db from cropcore.structure import ( SensorClass, TypeClass, ReadingsAegisIrrigationClass, ReadingsEnergyClass, ReadingsAranetCO2Class, ReadingsAranetTRHClass, ReadingsAranetAirVelocityClass, ) from cropcore.constants import CONST_MAX_RECORDS, CONST_TIMESTAMP_FORMAT # Temperature constants TEMP_BINS = { "Propagation": [0.0, 20.0, 23.0, 25.0, 144.0], "FrontFarm": [0.0, 18.0, 21.0, 25.0, 144.0], "Fridge": [0.0, 20.0, 23.0, 25.0, 144.0], "MidFarm": [0.0, 20.0, 23.0, 25.0, 144.0], "BackFarm": [0.0, 20.0, 25.0, 28.0, 144.0], "Tunnel": [0.0, 20.0, 25.0, 28.0, 144.0], "R&D": [0.0, 20.0, 23.0, 25.0, 144.0], } # TODO Read these from the database. SENSOR_CATEORIES = { 18: "MidFarm", 19: "Tunnel", 20: "Propagation", 21: "FrontFarm", 22: "BackFarm", 23: "MidFarm", 24: "R&D", 25: "R&D", 26: "Fridge", 27: "MidFarm", 48: "Propagation", 49: "R&D", } # Ventilation constants CONST_SFP = 2.39 # specific fan power CONST_VTOT = 20337.0 # total volume – m3 DEFAULT_SENSOR_TYPE = "Aranet T&RH" # Some data that varies based on sensor type. # DATA_COLUMNS_BY_SENSOR_TYPE names the class for the readings table. DATA_TABLES_BY_SENSOR_TYPE = { "Aranet T&RH": lambda: queries.trh_with_vpd(db.session).subquery().c, "Aranet CO2": lambda: ReadingsAranetCO2Class, "Aranet Air Velocity": lambda: ReadingsAranetAirVelocityClass, "Aegis II": lambda: ReadingsAegisIrrigationClass, } # DATA_COLUMNS_BY_SENSOR_TYPE names the columns of that table that we want to plot as # data, and gives them human friendly names to display on the UI. # TODO Could the below data be read from the database? DATA_COLUMNS_BY_SENSOR_TYPE = { "Aranet T&RH": [ {"column_name": "temperature", "ui_name": "Temperature (°C)"}, {"column_name": "humidity", "ui_name": "Humidity (%)"}, {"column_name": "vpd", "ui_name": "VPD (Pa)"}, ], "Aranet CO2": [ {"column_name": "co2", "ui_name": "CO2 (ppm)"}, ], "Aranet Air Velocity": [ {"column_name": "air_velocity", "ui_name": "Air velocity (m/s)"}, ], "Aegis II": [ {"column_name": "temperature", "ui_name": "Temperature (°C)"}, {"column_name": "pH", "ui_name": "pH"}, {"column_name": "dissolved_oxygen", "ui_name": "Dissolved oxygen (%)"}, {"column_name": "conductivity", "ui_name": "Conductivity (μS)"}, {"column_name": "turbidity", "ui_name": "Turbidity"}, {"column_name": "peroxide", "ui_name": "Peroxide (ppm)"}, ], } # The above constants are defined in terms of names of the sensor_types. The code # operates in terms of ids rather than names, so we wrap the above dictionaries into # functions. def get_sensor_type_name(sensor_type_id): """Gi
f get_sensor_type_id(sensor_type_name): """Given a sensor type name, get the ID of the sensor type from the database.""" query = db.session.query( TypeClass.id, ).filter(TypeClass.sensor_type == sensor_type_name) sensor_id = db.session.execute(query).fetchone() if isinstance(sensor_id, Iterable): sensor_id = sensor_id[0] return sensor_id def get_table_by_sensor_type(sensor_type_id): """Return the SQLAlchemy table/subquery corresponding to a given sensor type ID.""" # Because of how global constants work in Flask, DATA_COLUMNS_BY_SENSOR_TYPE has # functions that return the relevant table/subquery, rather than the # tables/subqueries themselves. Hence the calls like `value()` and setting # `value = lambda: None` global DATA_TABLES_BY_SENSOR_TYPE if sensor_type_id in DATA_TABLES_BY_SENSOR_TYPE: return DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id]() else: sensor_type_name = get_sensor_type_name(sensor_type_id) if sensor_type_name in DATA_TABLES_BY_SENSOR_TYPE: value = DATA_TABLES_BY_SENSOR_TYPE[sensor_type_name] else: value = lambda: None DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id] = value return value() def get_columns_by_sensor_type(sensor_type_id): """Return the names of the data columns in the table corresponding to a given sensor type ID. By "data columns" we mean the ones that depend on the sensor type and hold the actual data, e.g. temperature and humidity, but not timestamp. The return values are dictionaries with two keys, "column_name" for the name by which the database knows this column, and "ui_name" for nice human-readable name fit for a UI. """ global DATA_COLUMNS_BY_SENSOR_TYPE if sensor_type_id in DATA_COLUMNS_BY_SENSOR_TYPE: return DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] else: sensor_type_name = get_sensor_type_name(sensor_type_id) if sensor_type_name in DATA_COLUMNS_BY_SENSOR_TYPE: value = DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_name] else: value = None DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] = value return value def get_default_sensor_type(): """Get the ID of the default sensor type.""" return get_sensor_type_id(DEFAULT_SENSOR_TYPE) def is_valid_sensor_type(sensor_type_id): """Return True if we have the necessary metadata about the table and its columns needed for fetching and plotting data for the given sensor type, otherwise False. """ return ( get_table_by_sensor_type(sensor_type_id) is not None and get_columns_by_sensor_type(sensor_type_id) is not None ) # # # DONE WITH GLOBAL CONSTANTS AND SENSOR TYPE METADATA, BEGIN MAIN CONTENT # # # def resample(df, bins, dt_from, dt_to): """ Resamples (adds missing date/temperature bin combinations) to a dataframe. Arguments: df: dataframe with temperature assign to bins bins: temperature bins as a list dt_from: date range from dt_to: date range to Returns: bins_list: a list of temperature bins df_list: a list of df corresponding to temperature bins """ bins_list = [] for i in range(len(bins) - 1): bins_list.append("(%.1f, %.1f]" % (bins[i], bins[i + 1])) date_min = min(df["date"].min(), dt_from) date_max = max(df["date"].max(), dt_to) for n in range(int((date_max - date_min).days) + 1): day = date_min + timedelta(n) for temp_range in bins_list: if len(df[(df["date"] == day) & (df["temp_bin"] == temp_range)].index) == 0: df2 = pd.DataFrame( {"date": [day], "temp_bin": [temp_range], "temp_cnt": [0]} ) df = df.append(df2) df = df.sort_values(by=["date", "temp_bin"], ascending=True) df.reset_index(inplace=True, drop=True) df_list = [] for bin_range in bins_list: df_bin = df[df["temp_bin"] == bin_range] del df_bin["temp_bin"] df_bin.reset_index(inplace=True, drop=True) df_list.append(df_bin) return bins_list, df_list def lights_energy_use(dt_from_, dt_to_): """ Energy use from Carpenter's place (with lights - called Clapham in the database) Arguments: dt_from_: date range from dt_to_: date range to Returns: lights_results_df - a pandas dataframe with mean lights on values """ dt_from = pd.to_datetime(dt_from_.date()) + timedelta(hours=14) dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15) d_from = pd.to_datetime(dt_from_.date()) d_to = pd.to_datetime(dt_to_.date()) col_ec = "electricity_consumption" sensor_device_id = "Clapham" lights_on_cols = [] # getting eneregy data for the analysis query = db.session.query( ReadingsEnergyClass.timestamp, ReadingsEnergyClass.electricity_consumption, ).filter( and_( SensorClass.device_id == sensor_device_id, ReadingsEnergyClass.sensor_id == SensorClass.id, ReadingsEnergyClass.timestamp >= dt_from, ReadingsEnergyClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) if df.empty: return pd.DataFrame({"date": [], "mean_lights_on": []}) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # grouping data by date-hour energy_hour = ( df.groupby( by=[ df["timestamp"].map( lambda x: pd.to_datetime( "%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour), format="%Y-%m-%d-%H", ) ), ] )["electricity_consumption"] .sum() .reset_index() ) # Sorting and reseting index energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # energy dates. Energy date starts from 4pm each day and lasts for 24 hours energy_hour.loc[ energy_hour["timestamp"].dt.hour < 15, "energy_date" ] = pd.to_datetime((energy_hour["timestamp"] + timedelta(days=-1)).dt.date) energy_hour.loc[ energy_hour["timestamp"].dt.hour >= 15, "energy_date" ] = pd.to_datetime(energy_hour["timestamp"].dt.date) # Clasification of lights being on # Lights ON 1: Lights turn on at 4pm and turn off at 9am, as scheduled. energy_hour["lights_on_1"] = energy_hour["timestamp"].apply( lambda x: 1 if (x.hour >= 17 or x.hour < 10) else 0 ) lights_on_cols.append("lights_on_1") # Lights ON 2: Lights are calculated by estimating the lighting use as between # the minima of two consecutive days. The lights are considered on when the # energy use is above the day's first quartile of lighting of this difference. # energy_hour['lights_on_2'] = 0 # lights_on_cols.append('lights_on_2') # Lights ON 3: Lights are assumed to be on if the energy demand is over 30 kW # (max load of the extraction fan) energy_hour["lights_on_3"] = energy_hour[col_ec].apply( lambda x: 1 if (x > 30.0) else 0 ) lights_on_cols.append("lights_on_3") # Lights ON 4: Lights are assumed to turn on at the time of largest energy use # increase in the day, and turn off at the time of largest energy decrease of # the day. # estimating energy difference energy_hour["dE"] = energy_hour[col_ec] - energy_hour[col_ec].shift(1) energy_hour["dE"] = energy_hour["dE"].fillna(0.0) # finding max increase and min decrease energy_hour["dE_min"] = energy_hour.groupby("energy_date")["dE"].transform("min") energy_hour["dE_max"] = energy_hour.groupby("energy_date")["dE"].transform("max") energy_hour.loc[ np.isclose(energy_hour["dE_max"], energy_hour["dE"]), "lights_on_4" ] = 1 energy_hour.loc[ np.isclose(energy_hour["dE_min"], energy_hour["dE"]), "lights_on_4" ] = 0 # repeat last? prev_row_value = None for df_index in energy_hour.index: if df_index > 0: if np.isnan(energy_hour.loc[df_index, "lights_on_4"]) and not np.isnan( prev_row_value ): energy_hour.loc[df_index, "lights_on_4"] = prev_row_value prev_row_value = energy_hour.loc[df_index, "lights_on_4"] lights_on_cols.append("lights_on_4") # Lights ON 5: Lights are assumed on if the energy use is over 0.9 # times the days' energy use mean, and the energy demand is over 30 kW. energy_hour["energy_date_mean"] = energy_hour.groupby("energy_date")[ col_ec ].transform("mean") energy_hour["lights_on_5"] = np.where( (energy_hour[col_ec] > 30.0) & (energy_hour[col_ec] > 0.9 * energy_hour["energy_date_mean"]), 1, 0, ) lights_on_cols.append("lights_on_5") # getting the mean value of lights on per day energy_date_df = energy_hour.loc[ (energy_hour["energy_date"] >= d_from) & (energy_hour["energy_date"] <= d_to) ] energy_date_df = ( energy_date_df.groupby(by=["energy_date"])[lights_on_cols].sum().reset_index() ) energy_date_df["mean_lights_on"] = energy_date_df[lights_on_cols].sum(axis=1) / len( lights_on_cols ) energy_date_df["date"] = energy_date_df["energy_date"].dt.strftime("%Y-%m-%d") lights_results_df = energy_date_df[["date", "mean_lights_on"]] return lights_results_df def ventilation_energy_use(dt_from, dt_to): """ In our data this is called Carpenter’s Place. This reading only counts energy use for the second extraction fan. Arguments: dt_from: date range from dt_to: date range to Returns: ventilation_results_df - a pandas dataframe with ventilation analysis results """ sensor_device_id = "1a Carpenters Place" # getting eneregy data for the analysis query = db.session.query( ReadingsEnergyClass.timestamp, ReadingsEnergyClass.electricity_consumption, ).filter( and_( SensorClass.device_id == sensor_device_id, ReadingsEnergyClass.sensor_id == SensorClass.id, ReadingsEnergyClass.timestamp >= dt_from, ReadingsEnergyClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) if df.empty: return pd.DataFrame({"timestamp": [], "ach": []}) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # grouping data by date-hour energy_hour = ( df.groupby( by=[ df["timestamp"].map( lambda x: "%04d-%02d-%02d %02d:00" % (x.year, x.month, x.day, x.hour) ), ] )["electricity_consumption"] .sum() .reset_index() ) # Sorting and reseting index energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # Calculating air exchange per hour energy_hour["ach"] = ( energy_hour["electricity_consumption"] / CONST_SFP * 3600.0 / (CONST_VTOT / 2.0) ) ventilation_results_df = energy_hour[["timestamp", "ach"]] return ventilation_results_df def aranet_trh_analysis(dt_from, dt_to): """ Performs data analysis for Aranet Temperature+Relative Humidity sensors. Arguments: dt_from_: date range from dt_to_: date range to Returns: sensor_names: a list of sensor names sensor_temp_ranges: json data with temperate ranges """ logging.info( "Calling aranet_trh_analysis with parameters %s %s" % ( dt_from.strftime(CONST_TIMESTAMP_FORMAT), dt_to.strftime(CONST_TIMESTAMP_FORMAT), ) ) query = db.session.query( ReadingsAranetTRHClass.timestamp, ReadingsAranetTRHClass.sensor_id, SensorClass.name, ReadingsAranetTRHClass.temperature, ReadingsAranetTRHClass.humidity, ).filter( and_( ReadingsAranetTRHClass.sensor_id == SensorClass.id, ReadingsAranetTRHClass.timestamp >= dt_from, ReadingsAranetTRHClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) logging.info("Total number of records found: %d" % (len(df.index))) return temperature_range_analysis(df, dt_from, dt_to) def temperature_range_analysis(temp_df, dt_from, dt_to): """ Performs temperature range analysis on a given pandas dataframe. Arguments: temp_df: dt_from: date range from dt_to: date range to Returns: sensor_names: a list of sensor names sensor_temp_ranges: json data with temperate ranges """ df = copy.deepcopy(temp_df) df_unique_sensors = df[["sensor_id", "name"]].drop_duplicates(["sensor_id", "name"]) sensor_ids = df_unique_sensors["sensor_id"].tolist() sensor_names = df_unique_sensors["name"].tolist() # extracting date from datetime df["date"] = pd.to_datetime(df["timestamp"].dt.date) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) data_by_sensor_id = {} for sensor_name, sensor_id in zip(sensor_names, sensor_ids): df_sensor = df[df["sensor_id"] == sensor_id] # grouping data by date-hour and sensor id sensor_grp = df_sensor.groupby( by=[ df_sensor.timestamp.map( lambda x: "%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour) ), "date", ] ) # estimating hourly temperature mean values sensor_grp_temp = sensor_grp["temperature"].mean().reset_index() try: bins = TEMP_BINS[SENSOR_CATEORIES[sensor_id]] except KeyError: logging.error( f"Don't know how to categorise or bin sensor {sensor_id} " "in the dashboard." ) continue # binning temperature values sensor_grp_temp["temp_bin"] = pd.cut(sensor_grp_temp["temperature"], bins) # converting bins to str sensor_grp_temp["temp_bin"] = sensor_grp_temp["temp_bin"].astype(str) # get bin counts for each sensor-day combination sensor_grp_date = sensor_grp_temp.groupby(by=["date", "temp_bin"]) sensor_cnt = sensor_grp_date["temperature"].count().reset_index() sensor_cnt.rename(columns={"temperature": "temp_cnt"}, inplace=True) # Adding missing date/temp_bin combos bins_list, df_list = resample(sensor_cnt, bins, dt_from, dt_to) data_by_sensor_id[sensor_id] = { "name": sensor_name, "bins": bins_list, "data": [ { "date": df["date"].dt.strftime("%Y-%m-%d").to_list(), "count": df["temp_cnt"].to_list(), } for df in df_list ], } return len(data_by_sensor_id.keys()), json.dumps(data_by_sensor_id) def fetch_sensor_data(dt_from, dt_to, sensor_type, sensor_ids): sensor_type_name = get_sensor_type_name(sensor_type) if not is_valid_sensor_type(sensor_type): raise ValueError(f"Don't know how to fetch data for sensor type {sensor_type}") data_table = get_table_by_sensor_type(sensor_type) data_table_columns = [ getattr(data_table, column["column_name"]) for column in get_columns_by_sensor_type(sensor_type) ] query = db.session.query( data_table.timestamp, data_table.sensor_id, SensorClass.name, *data_table_columns, ).filter( and_( data_table.sensor_id == SensorClass.id, data_table.timestamp >= dt_from, data_table.timestamp <= dt_to, data_table.sensor_id.in_(sensor_ids), ) ) df = pd.read_sql(query.statement, query.session.bind) if sensor_type_name == "Aranet T&RH": # Rounding to two decimal places, because our precision isn't infinite, and # long floats look really ugly on the front end. df.loc[:, "vpd"] = df.loc[:, "vpd"].round(2) return df @blueprint.route("/aranet_trh_dashboard") @login_required def aranet_trh_dashboard(): dt_from, dt_to = parse_date_range_argument(request.args.get("range")) num_sensors, temperature_bins_json = aranet_trh_analysis(dt_from, dt_to) return render_template( "aranet_trh_dashboard.html", num_sensors=num_sensors, temperature_bins_json=temperature_bins_json, dt_from=dt_from.strftime("%B %d, %Y"), dt_to=dt_to.strftime("%B %d, %Y"), ) @blueprint.route("/energy_dashboard") @login_required def energy_dashboard(): dt_from, dt_to = parse_date_range_argument(request.args.get("range")) energy_data = {} # lights-on analysis lights_results_df = lights_energy_use(dt_from, dt_to) # ventilation analysis ventilation_results_df = ventilation_energy_use(dt_from, dt_to) # jsonify energy_data["data"] = ( "[" + lights_results_df.to_json(orient="records") + "," + ventilation_results_df.to_json(orient="records") + "]" ) return render_template( "energy_dashboard.html", energy_data=energy_data, dt_from=dt_from.strftime("%B %d, %Y"), dt_to=dt_to.strftime("%B %d, %Y"), ) # # # TIMESERIES DASHBOARD # # # def add_mean_over_sensors(sensor_type, sensor_ids, df, roll_window_minutes=10): """Take the dataframe for timeseries, and add data for a new "sensor" that's the mean of all the ones in the data """ if len(df) == 0: return df df_mean = df.groupby("timestamp").mean() df_mean.loc[:, "sensor_id"] = "mean" df_mean.loc[:, "name"] = "mean" # The sensor data comes with a 10 minute frequency. However, the sensors may be # "phase shifted" with respect to each other, e.g. one may have data for 00 and 10, # while another may have 05 and 15. A 10 minute rolling mean smooths out these # differences. roll_window = timedelta(minutes=roll_window_minutes) for column in get_columns_by_sensor_type(sensor_type): column_name = column["column_name"] df_mean[column_name] = df_mean[column_name].rolling(roll_window).mean() df_mean = df_mean.reset_index() df = pd.concat((df_mean, df), axis=0) return df def fetch_all_sensor_types(): """Get all sensor types from the CROP database, for which we know how to render the timeseries dashboard. Arguments: None Returns: List of dictionaries with keys "id" (int) and "sensor_type" (str). """ query = db.session.query( TypeClass.id, TypeClass.sensor_type, ) sensor_types = db.session.execute(query).fetchall() sensor_types = query_result_to_array(sensor_types) sensor_types = [st for st in sensor_types if is_valid_sensor_type(st["id"])] return sensor_types def fetch_all_sensors(sensor_type): """Get all sensors of a given sensor type from the CROP database. Arguments: sensor_type: The database ID (primary key) of the sensor type. Returns: List of dictionaries with keys "id" (int) and "name" (str), sorted by "id". """ query = db.session.query( SensorClass.id, SensorClass.aranet_code, SensorClass.name, ).filter(SensorClass.type_id == sensor_type) sensors = db.session.execute(query).fetchall() sensors = query_result_to_array(sensors) sensors = {s["id"]: s for s in sorted(sensors, key=lambda x: x["id"])} return sensors @blueprint.route("/timeseries_dashboard", methods=["GET", "POST"]) @login_required def timeseries_dashboard(): # Read query string dt_from = request.args.get("startDate") dt_to = request.args.get("endDate") sensor_ids = request.args.get("sensorIds") if sensor_ids is not None: # sensor_ids is passed as a comma-separated (or space or semicolon, although # those aren't currently used) string of ints, split it into a list of ints. sensor_ids = tuple(map(int, re.split(r"[ ;,]+", sensor_ids.rstrip(" ,;")))) sensor_type = request.args.get("sensorType") if sensor_type is None: sensor_type = get_default_sensor_type() else: sensor_type = int(sensor_type) # Get the data from the database that will be required in all scenarios for how the # page might be rendered. sensor_types = fetch_all_sensor_types() all_sensors = fetch_all_sensors(sensor_type) # If we don't have the information necessary to plot data for sensors, just render # the selector version of the page. if ( dt_from is None or dt_to is None or sensor_ids is None or not is_valid_sensor_type(sensor_type) ): today = datetime.today() dt_from = today - timedelta(days=7) dt_to = today return render_template( "timeseries_dashboard.html", sensor_type=sensor_type, sensor_types=sensor_types, all_sensors=all_sensors, sensor_ids=sensor_ids, dt_from=dt_from, dt_to=dt_to, data=dict(), summaries=dict(), data_columns=[], ) # Convert datetime strings to objects and make dt_to run to the end of the day in # question. dt_from = datetime.strptime(dt_from, "%Y%m%d") dt_to = ( datetime.strptime(dt_to, "%Y%m%d") + timedelta(days=1) + timedelta(milliseconds=-1) ) df = fetch_sensor_data(dt_from, dt_to, sensor_type, sensor_ids) if request.method == "POST": df = df.sort_values("timestamp") return download_csv(df, "timeseries") data_keys = list(sensor_ids) if len(sensor_ids) > 1: df = add_mean_over_sensors(sensor_type, sensor_ids, df) # Insert at start, to make "mean" be the first one displayed on the page. data_keys.insert(0, "mean") data_columns = get_columns_by_sensor_type(sensor_type) data_dict = dict() summary_dict = dict() for key in data_keys: df_key = ( df[df["sensor_id"] == key] .drop(columns=["sensor_id", "name"]) .sort_values("timestamp") ) # You may wonder, why we first to_json, and then json.loads. That's just to have # the data in a nice nested dictionary that a final json.dumps can deal with. data_dict[key] = json.loads(df_key.to_json(orient="records", date_format="iso")) # Round the summary stats to two decimals, for nice front end presentation. summary_dict[key] = json.loads(df_key.describe().round(2).to_json()) return render_template( "timeseries_dashboard.html", sensor_type=sensor_type, sensor_types=sensor_types, all_sensors=all_sensors, sensor_ids=sensor_ids, dt_from=dt_from, dt_to=dt_to, data=data_dict, summaries=summary_dict, data_columns=data_columns, )
ven a sensor type ID, get the name of the sensor type from the database.""" query = db.session.query( TypeClass.sensor_type, ).filter(TypeClass.id == sensor_type_id) sensor_name = db.session.execute(query).fetchone() if isinstance(sensor_name, Iterable): sensor_name = sensor_name[0] return sensor_name de
identifier_body
routes.py
""" Analysis dashboards module. """ try: from collections.abc import Iterable except ImportError: from collections import Iterable import copy from datetime import datetime, timedelta import json import logging import re import numpy as np import pandas as pd from flask_login import login_required from flask import render_template, request from sqlalchemy import and_ from app.dashboards import blueprint from cropcore import queries from cropcore.utils import ( download_csv, parse_date_range_argument, query_result_to_array, ) from cropcore.structure import SQLA as db from cropcore.structure import ( SensorClass, TypeClass, ReadingsAegisIrrigationClass, ReadingsEnergyClass, ReadingsAranetCO2Class, ReadingsAranetTRHClass, ReadingsAranetAirVelocityClass, ) from cropcore.constants import CONST_MAX_RECORDS, CONST_TIMESTAMP_FORMAT # Temperature constants TEMP_BINS = { "Propagation": [0.0, 20.0, 23.0, 25.0, 144.0], "FrontFarm": [0.0, 18.0, 21.0, 25.0, 144.0], "Fridge": [0.0, 20.0, 23.0, 25.0, 144.0], "MidFarm": [0.0, 20.0, 23.0, 25.0, 144.0], "BackFarm": [0.0, 20.0, 25.0, 28.0, 144.0], "Tunnel": [0.0, 20.0, 25.0, 28.0, 144.0], "R&D": [0.0, 20.0, 23.0, 25.0, 144.0], } # TODO Read these from the database. SENSOR_CATEORIES = { 18: "MidFarm", 19: "Tunnel", 20: "Propagation", 21: "FrontFarm", 22: "BackFarm", 23: "MidFarm", 24: "R&D", 25: "R&D", 26: "Fridge", 27: "MidFarm", 48: "Propagation", 49: "R&D", } # Ventilation constants CONST_SFP = 2.39 # specific fan power CONST_VTOT = 20337.0 # total volume – m3 DEFAULT_SENSOR_TYPE = "Aranet T&RH" # Some data that varies based on sensor type. # DATA_COLUMNS_BY_SENSOR_TYPE names the class for the readings table. DATA_TABLES_BY_SENSOR_TYPE = { "Aranet T&RH": lambda: queries.trh_with_vpd(db.session).subquery().c, "Aranet CO2": lambda: ReadingsAranetCO2Class, "Aranet Air Velocity": lambda: ReadingsAranetAirVelocityClass, "Aegis II": lambda: ReadingsAegisIrrigationClass, } # DATA_COLUMNS_BY_SENSOR_TYPE names the columns of that table that we want to plot as # data, and gives them human friendly names to display on the UI. # TODO Could the below data be read from the database? DATA_COLUMNS_BY_SENSOR_TYPE = { "Aranet T&RH": [ {"column_name": "temperature", "ui_name": "Temperature (°C)"}, {"column_name": "humidity", "ui_name": "Humidity (%)"}, {"column_name": "vpd", "ui_name": "VPD (Pa)"}, ], "Aranet CO2": [ {"column_name": "co2", "ui_name": "CO2 (ppm)"}, ], "Aranet Air Velocity": [ {"column_name": "air_velocity", "ui_name": "Air velocity (m/s)"}, ], "Aegis II": [ {"column_name": "temperature", "ui_name": "Temperature (°C)"}, {"column_name": "pH", "ui_name": "pH"}, {"column_name": "dissolved_oxygen", "ui_name": "Dissolved oxygen (%)"}, {"column_name": "conductivity", "ui_name": "Conductivity (μS)"}, {"column_name": "turbidity", "ui_name": "Turbidity"}, {"column_name": "peroxide", "ui_name": "Peroxide (ppm)"}, ], } # The above constants are defined in terms of names of the sensor_types. The code # operates in terms of ids rather than names, so we wrap the above dictionaries into # functions. def get_sensor_type_name(sensor_type_id): """Given a sensor type ID, get the name of the sensor type from the database.""" query = db.session.query( TypeClass.sensor_type, ).filter(TypeClass.id == sensor_type_id) sensor_name = db.session.execute(query).fetchone() if isinstance(sensor_name, Iterable): sensor_name = sensor_name[0] return sensor_name def get_sensor_type_id(sensor_type_name): """Given a sensor type name, get the ID of the sensor type from the database.""" query = db.session.query( TypeClass.id, ).filter(TypeClass.sensor_type == sensor_type_name) sensor_id = db.session.execute(query).fetchone() if isinstance(sensor_id, Iterable): sensor_id = sensor_id[0] return sensor_id def get_table_by_sensor_type(sensor_type_id): """Return the SQLAlchemy table/subquery corresponding to a given sensor type ID.""" # Because of how global constants work in Flask, DATA_COLUMNS_BY_SENSOR_TYPE has # functions that return the relevant table/subquery, rather than the # tables/subqueries themselves. Hence the calls like `value()` and setting # `value = lambda: None` global DATA_TABLES_BY_SENSOR_TYPE if sensor_type_id in DATA_TABLES_BY_SENSOR_TYPE: return DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id]() else: sensor_type_name = get_sensor_type_name(sensor_type_id) if sensor_type_name in DATA_TABLES_BY_SENSOR_TYPE: value = DATA_TABLES_BY_SENSOR_TYPE[sensor_type_name] else: value = lambda: None DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id] = value return value() def get_columns_by_sensor_type(sensor_type_id): """Return the names of the data columns in the table corresponding to a given sensor type ID. By "data columns" we mean the ones that depend on the sensor type and hold the actual data, e.g. temperature and humidity, but not timestamp. The return values are dictionaries with two keys, "column_name" for the name by which the database knows this column, and "ui_name" for nice human-readable name fit for a UI. """ global DATA_COLUMNS_BY_SENSOR_TYPE if sensor_type_id in DATA_COLUMNS_BY_SENSOR_TYPE: return DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] else: sensor_type_name = get_sensor_type_name(sensor_type_id) if sensor_type_name in DATA_COLUMNS_BY_SENSOR_TYPE: value = DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_name] else: value = None DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] = value return value def get_default_sensor_type(): """Get the ID of the default sensor type.""" return get_sensor_type_id(DEFAULT_SENSOR_TYPE) def is_valid_sensor_type(sensor_type_id): """Return True if we have the necessary metadata about the table and its columns needed for fetching and plotting data for the given sensor type, otherwise False. """ return ( get_table_by_sensor_type(sensor_type_id) is not None and get_columns_by_sensor_type(sensor_type_id) is not None ) # # # DONE WITH GLOBAL CONSTANTS AND SENSOR TYPE METADATA, BEGIN MAIN CONTENT # # # def resample(df, bins, dt_from, dt_to): """ Resamples (adds missing date/temperature bin combinations) to a dataframe. Arguments: df: dataframe with temperature assign to bins bins: temperature bins as a list dt_from: date range from dt_to: date range to Returns: bins_list: a list of temperature bins df_list: a list of df corresponding to temperature bins """ bins_list = [] for i in range(len(bins) - 1): bins_list.append("(%.1f, %.1f]" % (bins[i], bins[i + 1])) date_min = min(df["date"].min(), dt_from) date_max = max(df["date"].max(), dt_to) for n in range(int((date_max - date_min).days) + 1): day = date_min + timedelta(n) for temp_range in bins_list: if len(df[(df["date"] == day) & (df["temp_bin"] == temp_range)].index) == 0: df2 = pd.DataFrame( {"date": [day], "temp_bin": [temp_range], "temp_cnt": [0]} ) df = df.append(df2) df = df.sort_values(by=["date", "temp_bin"], ascending=True) df.reset_index(inplace=True, drop=True) df_list = [] for bin_range in bins_list: df_bin = df[df["temp_bin"] == bin_range] del df_bin["temp_bin"] df_bin.reset_index(inplace=True, drop=True) df_list.append(df_bin) return bins_list, df_list def lights_energy_use(dt_from_, dt_to_): """ Energy use from Carpenter's place (with lights - called Clapham in the database) Arguments: dt_from_: date range from dt_to_: date range to Returns: lights_results_df - a pandas dataframe with mean lights on values """ dt_from = pd.to_datetime(dt_from_.date()) + timedelta(hours=14) dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15) d_from = pd.to_datetime(dt_from_.date()) d_to = pd.to_datetime(dt_to_.date()) col_ec = "electricity_consumption" sensor_device_id = "Clapham" lights_on_cols = [] # getting eneregy data for the analysis query = db.session.query( ReadingsEnergyClass.timestamp, ReadingsEnergyClass.electricity_consumption, ).filter( and_( SensorClass.device_id == sensor_device_id, ReadingsEnergyClass.sensor_id == SensorClass.id, ReadingsEnergyClass.timestamp >= dt_from, ReadingsEnergyClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) if df.empty: return pd.DataFrame({"date": [], "mean_lights_on": []}) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # grouping data by date-hour energy_hour = ( df.groupby( by=[ df["timestamp"].map( lambda x: pd.to_datetime( "%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour), format="%Y-%m-%d-%H", ) ), ] )["electricity_consumption"] .sum() .reset_index() ) # Sorting and reseting index energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # energy dates. Energy date starts from 4pm each day and lasts for 24 hours energy_hour.loc[ energy_hour["timestamp"].dt.hour < 15, "energy_date" ] = pd.to_datetime((energy_hour["timestamp"] + timedelta(days=-1)).dt.date) energy_hour.loc[ energy_hour["timestamp"].dt.hour >= 15, "energy_date" ] = pd.to_datetime(energy_hour["timestamp"].dt.date) # Clasification of lights being on # Lights ON 1: Lights turn on at 4pm and turn off at 9am, as scheduled. energy_hour["lights_on_1"] = energy_hour["timestamp"].apply( lambda x: 1 if (x.hour >= 17 or x.hour < 10) else 0 ) lights_on_cols.append("lights_on_1") # Lights ON 2: Lights are calculated by estimating the lighting use as between # the minima of two consecutive days. The lights are considered on when the # energy use is above the day's first quartile of lighting of this difference. # energy_hour['lights_on_2'] = 0 # lights_on_cols.append('lights_on_2') # Lights ON 3: Lights are assumed to be on if the energy demand is over 30 kW # (max load of the extraction fan) energy_hour["lights_on_3"] = energy_hour[col_ec].apply( lambda x: 1 if (x > 30.0) else 0 ) lights_on_cols.append("lights_on_3") # Lights ON 4: Lights are assumed to turn on at the time of largest energy use # increase in the day, and turn off at the time of largest energy decrease of # the day. # estimating energy difference energy_hour["dE"] = energy_hour[col_ec] - energy_hour[col_ec].shift(1) energy_hour["dE"] = energy_hour["dE"].fillna(0.0) # finding max increase and min decrease energy_hour["dE_min"] = energy_hour.groupby("energy_date")["dE"].transform("min") energy_hour["dE_max"] = energy_hour.groupby("energy_date")["dE"].transform("max") energy_hour.loc[ np.isclose(energy_hour["dE_max"], energy_hour["dE"]), "lights_on_4" ] = 1 energy_hour.loc[ np.isclose(energy_hour["dE_min"], energy_hour["dE"]), "lights_on_4" ] = 0 # repeat last? prev_row_value = None for df_index in energy_hour.index: if df_index > 0: if np.isnan(energy_hour.loc[df_index, "lights_on_4"]) and not np.isnan( prev_row_value ): energy_hour.loc[df_index, "lights_on_4"] = prev_row_value prev_row_value = energy_hour.loc[df_index, "lights_on_4"] lights_on_cols.append("lights_on_4") # Lights ON 5: Lights are assumed on if the energy use is over 0.9 # times the days' energy use mean, and the energy demand is over 30 kW. energy_hour["energy_date_mean"] = energy_hour.groupby("energy_date")[ col_ec ].transform("mean") energy_hour["lights_on_5"] = np.where( (energy_hour[col_ec] > 30.0) & (energy_hour[col_ec] > 0.9 * energy_hour["energy_date_mean"]), 1, 0, ) lights_on_cols.append("lights_on_5") # getting the mean value of lights on per day energy_date_df = energy_hour.loc[ (energy_hour["energy_date"] >= d_from) & (energy_hour["energy_date"] <= d_to) ] energy_date_df = ( energy_date_df.groupby(by=["energy_date"])[lights_on_cols].sum().reset_index() ) energy_date_df["mean_lights_on"] = energy_date_df[lights_on_cols].sum(axis=1) / len( lights_on_cols ) energy_date_df["date"] = energy_date_df["energy_date"].dt.strftime("%Y-%m-%d") lights_results_df = energy_date_df[["date", "mean_lights_on"]] return lights_results_df def ventilation_energy_use(dt_from, dt_to): """ In our data this is called Carpenter’s Place. This reading only counts energy use for the second extraction fan. Arguments: dt_from: date range from dt_to: date range to Returns: ventilation_results_df - a pandas dataframe with ventilation analysis results """ sensor_device_id = "1a Carpenters Place" # getting eneregy data for the analysis query = db.session.query( ReadingsEnergyClass.timestamp, ReadingsEnergyClass.electricity_consumption, ).filter( and_( SensorClass.device_id == sensor_device_id, ReadingsEnergyClass.sensor_id == SensorClass.id, ReadingsEnergyClass.timestamp >= dt_from, ReadingsEnergyClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) if df.empty: return pd.DataFrame({"timestamp": [], "ach": []}) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # grouping data by date-hour energy_hour = ( df.groupby( by=[ df["timestamp"].map( lambda x: "%04d-%02d-%02d %02d:00" % (x.year, x.month, x.day, x.hour) ), ] )["electricity_consumption"] .sum() .reset_index() ) # Sorting and reseting index energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # Calculating air exchange per hour energy_hour["ach"] = ( energy_hour["electricity_consumption"] / CONST_SFP * 3600.0 / (CONST_VTOT / 2.0) ) ventilation_results_df = energy_hour[["timestamp", "ach"]] return ventilation_results_df def aranet_trh_analysis(dt_from, dt_to): """ Performs data analysis for Aranet Temperature+Relative Humidity sensors. Arguments: dt_from_: date range from dt_to_: date range to Returns: sensor_names: a list of sensor names sensor_temp_ranges: json data with temperate ranges """ logging.info( "Calling aranet_trh_analysis with parameters %s %s" % ( dt_from.strftime(CONST_TIMESTAMP_FORMAT), dt_to.strftime(CONST_TIMESTAMP_FORMAT), ) ) query = db.session.query( ReadingsAranetTRHClass.timestamp, ReadingsAranetTRHClass.sensor_id, SensorClass.name, ReadingsAranetTRHClass.temperature, ReadingsAranetTRHClass.humidity, ).filter( and_( ReadingsAranetTRHClass.sensor_id == SensorClass.id, ReadingsAranetTRHClass.timestamp >= dt_from, ReadingsAranetTRHClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) logging.info("Total number of records found: %d" % (len(df.index))) return temperature_range_analysis(df, dt_from, dt_to) def tempera
f, dt_from, dt_to): """ Performs temperature range analysis on a given pandas dataframe. Arguments: temp_df: dt_from: date range from dt_to: date range to Returns: sensor_names: a list of sensor names sensor_temp_ranges: json data with temperate ranges """ df = copy.deepcopy(temp_df) df_unique_sensors = df[["sensor_id", "name"]].drop_duplicates(["sensor_id", "name"]) sensor_ids = df_unique_sensors["sensor_id"].tolist() sensor_names = df_unique_sensors["name"].tolist() # extracting date from datetime df["date"] = pd.to_datetime(df["timestamp"].dt.date) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) data_by_sensor_id = {} for sensor_name, sensor_id in zip(sensor_names, sensor_ids): df_sensor = df[df["sensor_id"] == sensor_id] # grouping data by date-hour and sensor id sensor_grp = df_sensor.groupby( by=[ df_sensor.timestamp.map( lambda x: "%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour) ), "date", ] ) # estimating hourly temperature mean values sensor_grp_temp = sensor_grp["temperature"].mean().reset_index() try: bins = TEMP_BINS[SENSOR_CATEORIES[sensor_id]] except KeyError: logging.error( f"Don't know how to categorise or bin sensor {sensor_id} " "in the dashboard." ) continue # binning temperature values sensor_grp_temp["temp_bin"] = pd.cut(sensor_grp_temp["temperature"], bins) # converting bins to str sensor_grp_temp["temp_bin"] = sensor_grp_temp["temp_bin"].astype(str) # get bin counts for each sensor-day combination sensor_grp_date = sensor_grp_temp.groupby(by=["date", "temp_bin"]) sensor_cnt = sensor_grp_date["temperature"].count().reset_index() sensor_cnt.rename(columns={"temperature": "temp_cnt"}, inplace=True) # Adding missing date/temp_bin combos bins_list, df_list = resample(sensor_cnt, bins, dt_from, dt_to) data_by_sensor_id[sensor_id] = { "name": sensor_name, "bins": bins_list, "data": [ { "date": df["date"].dt.strftime("%Y-%m-%d").to_list(), "count": df["temp_cnt"].to_list(), } for df in df_list ], } return len(data_by_sensor_id.keys()), json.dumps(data_by_sensor_id) def fetch_sensor_data(dt_from, dt_to, sensor_type, sensor_ids): sensor_type_name = get_sensor_type_name(sensor_type) if not is_valid_sensor_type(sensor_type): raise ValueError(f"Don't know how to fetch data for sensor type {sensor_type}") data_table = get_table_by_sensor_type(sensor_type) data_table_columns = [ getattr(data_table, column["column_name"]) for column in get_columns_by_sensor_type(sensor_type) ] query = db.session.query( data_table.timestamp, data_table.sensor_id, SensorClass.name, *data_table_columns, ).filter( and_( data_table.sensor_id == SensorClass.id, data_table.timestamp >= dt_from, data_table.timestamp <= dt_to, data_table.sensor_id.in_(sensor_ids), ) ) df = pd.read_sql(query.statement, query.session.bind) if sensor_type_name == "Aranet T&RH": # Rounding to two decimal places, because our precision isn't infinite, and # long floats look really ugly on the front end. df.loc[:, "vpd"] = df.loc[:, "vpd"].round(2) return df @blueprint.route("/aranet_trh_dashboard") @login_required def aranet_trh_dashboard(): dt_from, dt_to = parse_date_range_argument(request.args.get("range")) num_sensors, temperature_bins_json = aranet_trh_analysis(dt_from, dt_to) return render_template( "aranet_trh_dashboard.html", num_sensors=num_sensors, temperature_bins_json=temperature_bins_json, dt_from=dt_from.strftime("%B %d, %Y"), dt_to=dt_to.strftime("%B %d, %Y"), ) @blueprint.route("/energy_dashboard") @login_required def energy_dashboard(): dt_from, dt_to = parse_date_range_argument(request.args.get("range")) energy_data = {} # lights-on analysis lights_results_df = lights_energy_use(dt_from, dt_to) # ventilation analysis ventilation_results_df = ventilation_energy_use(dt_from, dt_to) # jsonify energy_data["data"] = ( "[" + lights_results_df.to_json(orient="records") + "," + ventilation_results_df.to_json(orient="records") + "]" ) return render_template( "energy_dashboard.html", energy_data=energy_data, dt_from=dt_from.strftime("%B %d, %Y"), dt_to=dt_to.strftime("%B %d, %Y"), ) # # # TIMESERIES DASHBOARD # # # def add_mean_over_sensors(sensor_type, sensor_ids, df, roll_window_minutes=10): """Take the dataframe for timeseries, and add data for a new "sensor" that's the mean of all the ones in the data """ if len(df) == 0: return df df_mean = df.groupby("timestamp").mean() df_mean.loc[:, "sensor_id"] = "mean" df_mean.loc[:, "name"] = "mean" # The sensor data comes with a 10 minute frequency. However, the sensors may be # "phase shifted" with respect to each other, e.g. one may have data for 00 and 10, # while another may have 05 and 15. A 10 minute rolling mean smooths out these # differences. roll_window = timedelta(minutes=roll_window_minutes) for column in get_columns_by_sensor_type(sensor_type): column_name = column["column_name"] df_mean[column_name] = df_mean[column_name].rolling(roll_window).mean() df_mean = df_mean.reset_index() df = pd.concat((df_mean, df), axis=0) return df def fetch_all_sensor_types(): """Get all sensor types from the CROP database, for which we know how to render the timeseries dashboard. Arguments: None Returns: List of dictionaries with keys "id" (int) and "sensor_type" (str). """ query = db.session.query( TypeClass.id, TypeClass.sensor_type, ) sensor_types = db.session.execute(query).fetchall() sensor_types = query_result_to_array(sensor_types) sensor_types = [st for st in sensor_types if is_valid_sensor_type(st["id"])] return sensor_types def fetch_all_sensors(sensor_type): """Get all sensors of a given sensor type from the CROP database. Arguments: sensor_type: The database ID (primary key) of the sensor type. Returns: List of dictionaries with keys "id" (int) and "name" (str), sorted by "id". """ query = db.session.query( SensorClass.id, SensorClass.aranet_code, SensorClass.name, ).filter(SensorClass.type_id == sensor_type) sensors = db.session.execute(query).fetchall() sensors = query_result_to_array(sensors) sensors = {s["id"]: s for s in sorted(sensors, key=lambda x: x["id"])} return sensors @blueprint.route("/timeseries_dashboard", methods=["GET", "POST"]) @login_required def timeseries_dashboard(): # Read query string dt_from = request.args.get("startDate") dt_to = request.args.get("endDate") sensor_ids = request.args.get("sensorIds") if sensor_ids is not None: # sensor_ids is passed as a comma-separated (or space or semicolon, although # those aren't currently used) string of ints, split it into a list of ints. sensor_ids = tuple(map(int, re.split(r"[ ;,]+", sensor_ids.rstrip(" ,;")))) sensor_type = request.args.get("sensorType") if sensor_type is None: sensor_type = get_default_sensor_type() else: sensor_type = int(sensor_type) # Get the data from the database that will be required in all scenarios for how the # page might be rendered. sensor_types = fetch_all_sensor_types() all_sensors = fetch_all_sensors(sensor_type) # If we don't have the information necessary to plot data for sensors, just render # the selector version of the page. if ( dt_from is None or dt_to is None or sensor_ids is None or not is_valid_sensor_type(sensor_type) ): today = datetime.today() dt_from = today - timedelta(days=7) dt_to = today return render_template( "timeseries_dashboard.html", sensor_type=sensor_type, sensor_types=sensor_types, all_sensors=all_sensors, sensor_ids=sensor_ids, dt_from=dt_from, dt_to=dt_to, data=dict(), summaries=dict(), data_columns=[], ) # Convert datetime strings to objects and make dt_to run to the end of the day in # question. dt_from = datetime.strptime(dt_from, "%Y%m%d") dt_to = ( datetime.strptime(dt_to, "%Y%m%d") + timedelta(days=1) + timedelta(milliseconds=-1) ) df = fetch_sensor_data(dt_from, dt_to, sensor_type, sensor_ids) if request.method == "POST": df = df.sort_values("timestamp") return download_csv(df, "timeseries") data_keys = list(sensor_ids) if len(sensor_ids) > 1: df = add_mean_over_sensors(sensor_type, sensor_ids, df) # Insert at start, to make "mean" be the first one displayed on the page. data_keys.insert(0, "mean") data_columns = get_columns_by_sensor_type(sensor_type) data_dict = dict() summary_dict = dict() for key in data_keys: df_key = ( df[df["sensor_id"] == key] .drop(columns=["sensor_id", "name"]) .sort_values("timestamp") ) # You may wonder, why we first to_json, and then json.loads. That's just to have # the data in a nice nested dictionary that a final json.dumps can deal with. data_dict[key] = json.loads(df_key.to_json(orient="records", date_format="iso")) # Round the summary stats to two decimals, for nice front end presentation. summary_dict[key] = json.loads(df_key.describe().round(2).to_json()) return render_template( "timeseries_dashboard.html", sensor_type=sensor_type, sensor_types=sensor_types, all_sensors=all_sensors, sensor_ids=sensor_ids, dt_from=dt_from, dt_to=dt_to, data=data_dict, summaries=summary_dict, data_columns=data_columns, )
ture_range_analysis(temp_d
identifier_name
routes.py
""" Analysis dashboards module. """ try: from collections.abc import Iterable except ImportError: from collections import Iterable import copy from datetime import datetime, timedelta import json import logging import re import numpy as np import pandas as pd from flask_login import login_required from flask import render_template, request from sqlalchemy import and_ from app.dashboards import blueprint from cropcore import queries from cropcore.utils import ( download_csv, parse_date_range_argument, query_result_to_array, ) from cropcore.structure import SQLA as db from cropcore.structure import ( SensorClass, TypeClass, ReadingsAegisIrrigationClass, ReadingsEnergyClass, ReadingsAranetCO2Class, ReadingsAranetTRHClass, ReadingsAranetAirVelocityClass, ) from cropcore.constants import CONST_MAX_RECORDS, CONST_TIMESTAMP_FORMAT # Temperature constants TEMP_BINS = { "Propagation": [0.0, 20.0, 23.0, 25.0, 144.0], "FrontFarm": [0.0, 18.0, 21.0, 25.0, 144.0], "Fridge": [0.0, 20.0, 23.0, 25.0, 144.0], "MidFarm": [0.0, 20.0, 23.0, 25.0, 144.0], "BackFarm": [0.0, 20.0, 25.0, 28.0, 144.0], "Tunnel": [0.0, 20.0, 25.0, 28.0, 144.0], "R&D": [0.0, 20.0, 23.0, 25.0, 144.0], } # TODO Read these from the database. SENSOR_CATEORIES = { 18: "MidFarm", 19: "Tunnel", 20: "Propagation", 21: "FrontFarm", 22: "BackFarm", 23: "MidFarm", 24: "R&D", 25: "R&D", 26: "Fridge", 27: "MidFarm", 48: "Propagation", 49: "R&D", } # Ventilation constants CONST_SFP = 2.39 # specific fan power CONST_VTOT = 20337.0 # total volume – m3 DEFAULT_SENSOR_TYPE = "Aranet T&RH" # Some data that varies based on sensor type. # DATA_COLUMNS_BY_SENSOR_TYPE names the class for the readings table. DATA_TABLES_BY_SENSOR_TYPE = { "Aranet T&RH": lambda: queries.trh_with_vpd(db.session).subquery().c, "Aranet CO2": lambda: ReadingsAranetCO2Class, "Aranet Air Velocity": lambda: ReadingsAranetAirVelocityClass, "Aegis II": lambda: ReadingsAegisIrrigationClass, } # DATA_COLUMNS_BY_SENSOR_TYPE names the columns of that table that we want to plot as # data, and gives them human friendly names to display on the UI. # TODO Could the below data be read from the database? DATA_COLUMNS_BY_SENSOR_TYPE = { "Aranet T&RH": [ {"column_name": "temperature", "ui_name": "Temperature (°C)"}, {"column_name": "humidity", "ui_name": "Humidity (%)"}, {"column_name": "vpd", "ui_name": "VPD (Pa)"}, ], "Aranet CO2": [ {"column_name": "co2", "ui_name": "CO2 (ppm)"}, ], "Aranet Air Velocity": [ {"column_name": "air_velocity", "ui_name": "Air velocity (m/s)"}, ], "Aegis II": [ {"column_name": "temperature", "ui_name": "Temperature (°C)"}, {"column_name": "pH", "ui_name": "pH"}, {"column_name": "dissolved_oxygen", "ui_name": "Dissolved oxygen (%)"}, {"column_name": "conductivity", "ui_name": "Conductivity (μS)"}, {"column_name": "turbidity", "ui_name": "Turbidity"}, {"column_name": "peroxide", "ui_name": "Peroxide (ppm)"}, ], } # The above constants are defined in terms of names of the sensor_types. The code # operates in terms of ids rather than names, so we wrap the above dictionaries into # functions. def get_sensor_type_name(sensor_type_id): """Given a sensor type ID, get the name of the sensor type from the database.""" query = db.session.query( TypeClass.sensor_type, ).filter(TypeClass.id == sensor_type_id) sensor_name = db.session.execute(query).fetchone() if isinstance(sensor_name, Iterable): sensor_name = sensor_name[0] return sensor_name def get_sensor_type_id(sensor_type_name): """Given a sensor type name, get the ID of the sensor type from the database.""" query = db.session.query( TypeClass.id, ).filter(TypeClass.sensor_type == sensor_type_name) sensor_id = db.session.execute(query).fetchone() if isinstance(sensor_id, Iterable): sensor_id = sensor_id[0] return sensor_id def get_table_by_sensor_type(sensor_type_id): """Return the SQLAlchemy table/subquery corresponding to a given sensor type ID.""" # Because of how global constants work in Flask, DATA_COLUMNS_BY_SENSOR_TYPE has # functions that return the relevant table/subquery, rather than the # tables/subqueries themselves. Hence the calls like `value()` and setting # `value = lambda: None` global DATA_TABLES_BY_SENSOR_TYPE if sensor_type_id in DATA_TABLES_BY_SENSOR_TYPE: return DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id]() else: sensor_type_name = get_sensor_type_name(sensor_type_id) if sensor_type_name in DATA_TABLES_BY_SENSOR_TYPE: value = DATA_TABLES_BY_SENSOR_TYPE[sensor_type_name] else: value = lambda: None DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id] = value return value() def get_columns_by_sensor_type(sensor_type_id): """Return the names of the data columns in the table corresponding to a given sensor type ID. By "data columns" we mean the ones that depend on the sensor type and hold the actual data, e.g. temperature and humidity, but not timestamp. The return values are dictionaries with two keys, "column_name" for the name by which the database knows this column, and "ui_name" for nice human-readable name fit for a UI. """ global DATA_COLUMNS_BY_SENSOR_TYPE if sensor_type_id in DATA_COLUMNS_BY_SENSOR_TYPE: return DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] else: sensor_type_name = get_sensor_type_name(sensor_type_id) if sensor_type_name in DATA_COLUMNS_BY_SENSOR_TYPE: value = DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_name] else: value = None DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] = value return value def get_default_sensor_type(): """Get the ID of the default sensor type.""" return get_sensor_type_id(DEFAULT_SENSOR_TYPE) def is_valid_sensor_type(sensor_type_id): """Return True if we have the necessary metadata about the table and its columns needed for fetching and plotting data for the given sensor type, otherwise False. """ return ( get_table_by_sensor_type(sensor_type_id) is not None and get_columns_by_sensor_type(sensor_type_id) is not None ) # # # DONE WITH GLOBAL CONSTANTS AND SENSOR TYPE METADATA, BEGIN MAIN CONTENT # # # def resample(df, bins, dt_from, dt_to): """ Resamples (adds missing date/temperature bin combinations) to a dataframe. Arguments: df: dataframe with temperature assign to bins bins: temperature bins as a list dt_from: date range from dt_to: date range to Returns: bins_list: a list of temperature bins df_list: a list of df corresponding to temperature bins """ bins_list = [] for i in range(len(bins) - 1): bins_list.append("(%.1f, %.1f]" % (bins[i], bins[i + 1])) date_min = min(df["date"].min(), dt_from) date_max = max(df["date"].max(), dt_to) for n in range(int((date_max - date_min).days) + 1): day = date_min + timedelta(n) for temp_range in bins_list: if len(df[(df["date"] == day) & (df["temp_bin"] == temp_range)].index) == 0: df2 = pd.DataFrame( {"date": [day], "temp_bin": [temp_range], "temp_cnt": [0]} ) df = df.append(df2) df = df.sort_values(by=["date", "temp_bin"], ascending=True) df.reset_index(inplace=True, drop=True) df_list = [] for bin_range in bins_list: df_bin = df[df["temp_bin"] == bin_range] del df_bin["temp_bin"] df_bin.reset_index(inplace=True, drop=True) df_list.append(df_bin) return bins_list, df_list def lights_energy_use(dt_from_, dt_to_): """ Energy use from Carpenter's place (with lights - called Clapham in the database) Arguments: dt_from_: date range from dt_to_: date range to Returns: lights_results_df - a pandas dataframe with mean lights on values """ dt_from = pd.to_datetime(dt_from_.date()) + timedelta(hours=14) dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15) d_from = pd.to_datetime(dt_from_.date()) d_to = pd.to_datetime(dt_to_.date()) col_ec = "electricity_consumption" sensor_device_id = "Clapham" lights_on_cols = [] # getting eneregy data for the analysis query = db.session.query( ReadingsEnergyClass.timestamp, ReadingsEnergyClass.electricity_consumption, ).filter( and_( SensorClass.device_id == sensor_device_id, ReadingsEnergyClass.sensor_id == SensorClass.id, ReadingsEnergyClass.timestamp >= dt_from, ReadingsEnergyClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) if df.empty: return pd.DataFrame({"date": [], "mean_lights_on": []}) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # grouping data by date-hour energy_hour = ( df.groupby( by=[ df["timestamp"].map( lambda x: pd.to_datetime( "%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour), format="%Y-%m-%d-%H", ) ), ] )["electricity_consumption"] .sum() .reset_index() ) # Sorting and reseting index energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # energy dates. Energy date starts from 4pm each day and lasts for 24 hours energy_hour.loc[ energy_hour["timestamp"].dt.hour < 15, "energy_date" ] = pd.to_datetime((energy_hour["timestamp"] + timedelta(days=-1)).dt.date) energy_hour.loc[ energy_hour["timestamp"].dt.hour >= 15, "energy_date" ] = pd.to_datetime(energy_hour["timestamp"].dt.date) # Clasification of lights being on # Lights ON 1: Lights turn on at 4pm and turn off at 9am, as scheduled. energy_hour["lights_on_1"] = energy_hour["timestamp"].apply( lambda x: 1 if (x.hour >= 17 or x.hour < 10) else 0 ) lights_on_cols.append("lights_on_1") # Lights ON 2: Lights are calculated by estimating the lighting use as between # the minima of two consecutive days. The lights are considered on when the # energy use is above the day's first quartile of lighting of this difference. # energy_hour['lights_on_2'] = 0 # lights_on_cols.append('lights_on_2') # Lights ON 3: Lights are assumed to be on if the energy demand is over 30 kW # (max load of the extraction fan) energy_hour["lights_on_3"] = energy_hour[col_ec].apply( lambda x: 1 if (x > 30.0) else 0 ) lights_on_cols.append("lights_on_3") # Lights ON 4: Lights are assumed to turn on at the time of largest energy use # increase in the day, and turn off at the time of largest energy decrease of # the day. # estimating energy difference energy_hour["dE"] = energy_hour[col_ec] - energy_hour[col_ec].shift(1) energy_hour["dE"] = energy_hour["dE"].fillna(0.0) # finding max increase and min decrease energy_hour["dE_min"] = energy_hour.groupby("energy_date")["dE"].transform("min") energy_hour["dE_max"] = energy_hour.groupby("energy_date")["dE"].transform("max") energy_hour.loc[ np.isclose(energy_hour["dE_max"], energy_hour["dE"]), "lights_on_4" ] = 1 energy_hour.loc[ np.isclose(energy_hour["dE_min"], energy_hour["dE"]), "lights_on_4" ] = 0 # repeat last? prev_row_value = None for df_index in energy_hour.index: if df_index > 0: if np
prev_row_value = energy_hour.loc[df_index, "lights_on_4"] lights_on_cols.append("lights_on_4") # Lights ON 5: Lights are assumed on if the energy use is over 0.9 # times the days' energy use mean, and the energy demand is over 30 kW. energy_hour["energy_date_mean"] = energy_hour.groupby("energy_date")[ col_ec ].transform("mean") energy_hour["lights_on_5"] = np.where( (energy_hour[col_ec] > 30.0) & (energy_hour[col_ec] > 0.9 * energy_hour["energy_date_mean"]), 1, 0, ) lights_on_cols.append("lights_on_5") # getting the mean value of lights on per day energy_date_df = energy_hour.loc[ (energy_hour["energy_date"] >= d_from) & (energy_hour["energy_date"] <= d_to) ] energy_date_df = ( energy_date_df.groupby(by=["energy_date"])[lights_on_cols].sum().reset_index() ) energy_date_df["mean_lights_on"] = energy_date_df[lights_on_cols].sum(axis=1) / len( lights_on_cols ) energy_date_df["date"] = energy_date_df["energy_date"].dt.strftime("%Y-%m-%d") lights_results_df = energy_date_df[["date", "mean_lights_on"]] return lights_results_df def ventilation_energy_use(dt_from, dt_to): """ In our data this is called Carpenter’s Place. This reading only counts energy use for the second extraction fan. Arguments: dt_from: date range from dt_to: date range to Returns: ventilation_results_df - a pandas dataframe with ventilation analysis results """ sensor_device_id = "1a Carpenters Place" # getting eneregy data for the analysis query = db.session.query( ReadingsEnergyClass.timestamp, ReadingsEnergyClass.electricity_consumption, ).filter( and_( SensorClass.device_id == sensor_device_id, ReadingsEnergyClass.sensor_id == SensorClass.id, ReadingsEnergyClass.timestamp >= dt_from, ReadingsEnergyClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) if df.empty: return pd.DataFrame({"timestamp": [], "ach": []}) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # grouping data by date-hour energy_hour = ( df.groupby( by=[ df["timestamp"].map( lambda x: "%04d-%02d-%02d %02d:00" % (x.year, x.month, x.day, x.hour) ), ] )["electricity_consumption"] .sum() .reset_index() ) # Sorting and reseting index energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # Calculating air exchange per hour energy_hour["ach"] = ( energy_hour["electricity_consumption"] / CONST_SFP * 3600.0 / (CONST_VTOT / 2.0) ) ventilation_results_df = energy_hour[["timestamp", "ach"]] return ventilation_results_df def aranet_trh_analysis(dt_from, dt_to): """ Performs data analysis for Aranet Temperature+Relative Humidity sensors. Arguments: dt_from_: date range from dt_to_: date range to Returns: sensor_names: a list of sensor names sensor_temp_ranges: json data with temperate ranges """ logging.info( "Calling aranet_trh_analysis with parameters %s %s" % ( dt_from.strftime(CONST_TIMESTAMP_FORMAT), dt_to.strftime(CONST_TIMESTAMP_FORMAT), ) ) query = db.session.query( ReadingsAranetTRHClass.timestamp, ReadingsAranetTRHClass.sensor_id, SensorClass.name, ReadingsAranetTRHClass.temperature, ReadingsAranetTRHClass.humidity, ).filter( and_( ReadingsAranetTRHClass.sensor_id == SensorClass.id, ReadingsAranetTRHClass.timestamp >= dt_from, ReadingsAranetTRHClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) logging.info("Total number of records found: %d" % (len(df.index))) return temperature_range_analysis(df, dt_from, dt_to) def temperature_range_analysis(temp_df, dt_from, dt_to): """ Performs temperature range analysis on a given pandas dataframe. Arguments: temp_df: dt_from: date range from dt_to: date range to Returns: sensor_names: a list of sensor names sensor_temp_ranges: json data with temperate ranges """ df = copy.deepcopy(temp_df) df_unique_sensors = df[["sensor_id", "name"]].drop_duplicates(["sensor_id", "name"]) sensor_ids = df_unique_sensors["sensor_id"].tolist() sensor_names = df_unique_sensors["name"].tolist() # extracting date from datetime df["date"] = pd.to_datetime(df["timestamp"].dt.date) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) data_by_sensor_id = {} for sensor_name, sensor_id in zip(sensor_names, sensor_ids): df_sensor = df[df["sensor_id"] == sensor_id] # grouping data by date-hour and sensor id sensor_grp = df_sensor.groupby( by=[ df_sensor.timestamp.map( lambda x: "%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour) ), "date", ] ) # estimating hourly temperature mean values sensor_grp_temp = sensor_grp["temperature"].mean().reset_index() try: bins = TEMP_BINS[SENSOR_CATEORIES[sensor_id]] except KeyError: logging.error( f"Don't know how to categorise or bin sensor {sensor_id} " "in the dashboard." ) continue # binning temperature values sensor_grp_temp["temp_bin"] = pd.cut(sensor_grp_temp["temperature"], bins) # converting bins to str sensor_grp_temp["temp_bin"] = sensor_grp_temp["temp_bin"].astype(str) # get bin counts for each sensor-day combination sensor_grp_date = sensor_grp_temp.groupby(by=["date", "temp_bin"]) sensor_cnt = sensor_grp_date["temperature"].count().reset_index() sensor_cnt.rename(columns={"temperature": "temp_cnt"}, inplace=True) # Adding missing date/temp_bin combos bins_list, df_list = resample(sensor_cnt, bins, dt_from, dt_to) data_by_sensor_id[sensor_id] = { "name": sensor_name, "bins": bins_list, "data": [ { "date": df["date"].dt.strftime("%Y-%m-%d").to_list(), "count": df["temp_cnt"].to_list(), } for df in df_list ], } return len(data_by_sensor_id.keys()), json.dumps(data_by_sensor_id) def fetch_sensor_data(dt_from, dt_to, sensor_type, sensor_ids): sensor_type_name = get_sensor_type_name(sensor_type) if not is_valid_sensor_type(sensor_type): raise ValueError(f"Don't know how to fetch data for sensor type {sensor_type}") data_table = get_table_by_sensor_type(sensor_type) data_table_columns = [ getattr(data_table, column["column_name"]) for column in get_columns_by_sensor_type(sensor_type) ] query = db.session.query( data_table.timestamp, data_table.sensor_id, SensorClass.name, *data_table_columns, ).filter( and_( data_table.sensor_id == SensorClass.id, data_table.timestamp >= dt_from, data_table.timestamp <= dt_to, data_table.sensor_id.in_(sensor_ids), ) ) df = pd.read_sql(query.statement, query.session.bind) if sensor_type_name == "Aranet T&RH": # Rounding to two decimal places, because our precision isn't infinite, and # long floats look really ugly on the front end. df.loc[:, "vpd"] = df.loc[:, "vpd"].round(2) return df @blueprint.route("/aranet_trh_dashboard") @login_required def aranet_trh_dashboard(): dt_from, dt_to = parse_date_range_argument(request.args.get("range")) num_sensors, temperature_bins_json = aranet_trh_analysis(dt_from, dt_to) return render_template( "aranet_trh_dashboard.html", num_sensors=num_sensors, temperature_bins_json=temperature_bins_json, dt_from=dt_from.strftime("%B %d, %Y"), dt_to=dt_to.strftime("%B %d, %Y"), ) @blueprint.route("/energy_dashboard") @login_required def energy_dashboard(): dt_from, dt_to = parse_date_range_argument(request.args.get("range")) energy_data = {} # lights-on analysis lights_results_df = lights_energy_use(dt_from, dt_to) # ventilation analysis ventilation_results_df = ventilation_energy_use(dt_from, dt_to) # jsonify energy_data["data"] = ( "[" + lights_results_df.to_json(orient="records") + "," + ventilation_results_df.to_json(orient="records") + "]" ) return render_template( "energy_dashboard.html", energy_data=energy_data, dt_from=dt_from.strftime("%B %d, %Y"), dt_to=dt_to.strftime("%B %d, %Y"), ) # # # TIMESERIES DASHBOARD # # # def add_mean_over_sensors(sensor_type, sensor_ids, df, roll_window_minutes=10): """Take the dataframe for timeseries, and add data for a new "sensor" that's the mean of all the ones in the data """ if len(df) == 0: return df df_mean = df.groupby("timestamp").mean() df_mean.loc[:, "sensor_id"] = "mean" df_mean.loc[:, "name"] = "mean" # The sensor data comes with a 10 minute frequency. However, the sensors may be # "phase shifted" with respect to each other, e.g. one may have data for 00 and 10, # while another may have 05 and 15. A 10 minute rolling mean smooths out these # differences. roll_window = timedelta(minutes=roll_window_minutes) for column in get_columns_by_sensor_type(sensor_type): column_name = column["column_name"] df_mean[column_name] = df_mean[column_name].rolling(roll_window).mean() df_mean = df_mean.reset_index() df = pd.concat((df_mean, df), axis=0) return df def fetch_all_sensor_types(): """Get all sensor types from the CROP database, for which we know how to render the timeseries dashboard. Arguments: None Returns: List of dictionaries with keys "id" (int) and "sensor_type" (str). """ query = db.session.query( TypeClass.id, TypeClass.sensor_type, ) sensor_types = db.session.execute(query).fetchall() sensor_types = query_result_to_array(sensor_types) sensor_types = [st for st in sensor_types if is_valid_sensor_type(st["id"])] return sensor_types def fetch_all_sensors(sensor_type): """Get all sensors of a given sensor type from the CROP database. Arguments: sensor_type: The database ID (primary key) of the sensor type. Returns: List of dictionaries with keys "id" (int) and "name" (str), sorted by "id". """ query = db.session.query( SensorClass.id, SensorClass.aranet_code, SensorClass.name, ).filter(SensorClass.type_id == sensor_type) sensors = db.session.execute(query).fetchall() sensors = query_result_to_array(sensors) sensors = {s["id"]: s for s in sorted(sensors, key=lambda x: x["id"])} return sensors @blueprint.route("/timeseries_dashboard", methods=["GET", "POST"]) @login_required def timeseries_dashboard(): # Read query string dt_from = request.args.get("startDate") dt_to = request.args.get("endDate") sensor_ids = request.args.get("sensorIds") if sensor_ids is not None: # sensor_ids is passed as a comma-separated (or space or semicolon, although # those aren't currently used) string of ints, split it into a list of ints. sensor_ids = tuple(map(int, re.split(r"[ ;,]+", sensor_ids.rstrip(" ,;")))) sensor_type = request.args.get("sensorType") if sensor_type is None: sensor_type = get_default_sensor_type() else: sensor_type = int(sensor_type) # Get the data from the database that will be required in all scenarios for how the # page might be rendered. sensor_types = fetch_all_sensor_types() all_sensors = fetch_all_sensors(sensor_type) # If we don't have the information necessary to plot data for sensors, just render # the selector version of the page. if ( dt_from is None or dt_to is None or sensor_ids is None or not is_valid_sensor_type(sensor_type) ): today = datetime.today() dt_from = today - timedelta(days=7) dt_to = today return render_template( "timeseries_dashboard.html", sensor_type=sensor_type, sensor_types=sensor_types, all_sensors=all_sensors, sensor_ids=sensor_ids, dt_from=dt_from, dt_to=dt_to, data=dict(), summaries=dict(), data_columns=[], ) # Convert datetime strings to objects and make dt_to run to the end of the day in # question. dt_from = datetime.strptime(dt_from, "%Y%m%d") dt_to = ( datetime.strptime(dt_to, "%Y%m%d") + timedelta(days=1) + timedelta(milliseconds=-1) ) df = fetch_sensor_data(dt_from, dt_to, sensor_type, sensor_ids) if request.method == "POST": df = df.sort_values("timestamp") return download_csv(df, "timeseries") data_keys = list(sensor_ids) if len(sensor_ids) > 1: df = add_mean_over_sensors(sensor_type, sensor_ids, df) # Insert at start, to make "mean" be the first one displayed on the page. data_keys.insert(0, "mean") data_columns = get_columns_by_sensor_type(sensor_type) data_dict = dict() summary_dict = dict() for key in data_keys: df_key = ( df[df["sensor_id"] == key] .drop(columns=["sensor_id", "name"]) .sort_values("timestamp") ) # You may wonder, why we first to_json, and then json.loads. That's just to have # the data in a nice nested dictionary that a final json.dumps can deal with. data_dict[key] = json.loads(df_key.to_json(orient="records", date_format="iso")) # Round the summary stats to two decimals, for nice front end presentation. summary_dict[key] = json.loads(df_key.describe().round(2).to_json()) return render_template( "timeseries_dashboard.html", sensor_type=sensor_type, sensor_types=sensor_types, all_sensors=all_sensors, sensor_ids=sensor_ids, dt_from=dt_from, dt_to=dt_to, data=data_dict, summaries=summary_dict, data_columns=data_columns, )
.isnan(energy_hour.loc[df_index, "lights_on_4"]) and not np.isnan( prev_row_value ): energy_hour.loc[df_index, "lights_on_4"] = prev_row_value
conditional_block
routes.py
""" Analysis dashboards module. """ try: from collections.abc import Iterable except ImportError: from collections import Iterable import copy from datetime import datetime, timedelta import json import logging import re import numpy as np import pandas as pd from flask_login import login_required from flask import render_template, request from sqlalchemy import and_ from app.dashboards import blueprint from cropcore import queries from cropcore.utils import ( download_csv, parse_date_range_argument, query_result_to_array, ) from cropcore.structure import SQLA as db from cropcore.structure import ( SensorClass, TypeClass, ReadingsAegisIrrigationClass, ReadingsEnergyClass, ReadingsAranetCO2Class, ReadingsAranetTRHClass, ReadingsAranetAirVelocityClass, ) from cropcore.constants import CONST_MAX_RECORDS, CONST_TIMESTAMP_FORMAT # Temperature constants TEMP_BINS = { "Propagation": [0.0, 20.0, 23.0, 25.0, 144.0], "FrontFarm": [0.0, 18.0, 21.0, 25.0, 144.0], "Fridge": [0.0, 20.0, 23.0, 25.0, 144.0], "MidFarm": [0.0, 20.0, 23.0, 25.0, 144.0], "BackFarm": [0.0, 20.0, 25.0, 28.0, 144.0], "Tunnel": [0.0, 20.0, 25.0, 28.0, 144.0], "R&D": [0.0, 20.0, 23.0, 25.0, 144.0], } # TODO Read these from the database. SENSOR_CATEORIES = { 18: "MidFarm", 19: "Tunnel", 20: "Propagation", 21: "FrontFarm", 22: "BackFarm", 23: "MidFarm", 24: "R&D", 25: "R&D", 26: "Fridge", 27: "MidFarm", 48: "Propagation", 49: "R&D", } # Ventilation constants CONST_SFP = 2.39 # specific fan power CONST_VTOT = 20337.0 # total volume – m3 DEFAULT_SENSOR_TYPE = "Aranet T&RH" # Some data that varies based on sensor type. # DATA_COLUMNS_BY_SENSOR_TYPE names the class for the readings table. DATA_TABLES_BY_SENSOR_TYPE = { "Aranet T&RH": lambda: queries.trh_with_vpd(db.session).subquery().c, "Aranet CO2": lambda: ReadingsAranetCO2Class, "Aranet Air Velocity": lambda: ReadingsAranetAirVelocityClass, "Aegis II": lambda: ReadingsAegisIrrigationClass, } # DATA_COLUMNS_BY_SENSOR_TYPE names the columns of that table that we want to plot as # data, and gives them human friendly names to display on the UI. # TODO Could the below data be read from the database? DATA_COLUMNS_BY_SENSOR_TYPE = { "Aranet T&RH": [ {"column_name": "temperature", "ui_name": "Temperature (°C)"}, {"column_name": "humidity", "ui_name": "Humidity (%)"}, {"column_name": "vpd", "ui_name": "VPD (Pa)"}, ], "Aranet CO2": [ {"column_name": "co2", "ui_name": "CO2 (ppm)"}, ], "Aranet Air Velocity": [ {"column_name": "air_velocity", "ui_name": "Air velocity (m/s)"}, ], "Aegis II": [ {"column_name": "temperature", "ui_name": "Temperature (°C)"}, {"column_name": "pH", "ui_name": "pH"}, {"column_name": "dissolved_oxygen", "ui_name": "Dissolved oxygen (%)"}, {"column_name": "conductivity", "ui_name": "Conductivity (μS)"}, {"column_name": "turbidity", "ui_name": "Turbidity"}, {"column_name": "peroxide", "ui_name": "Peroxide (ppm)"}, ], } # The above constants are defined in terms of names of the sensor_types. The code # operates in terms of ids rather than names, so we wrap the above dictionaries into # functions. def get_sensor_type_name(sensor_type_id): """Given a sensor type ID, get the name of the sensor type from the database.""" query = db.session.query( TypeClass.sensor_type, ).filter(TypeClass.id == sensor_type_id) sensor_name = db.session.execute(query).fetchone() if isinstance(sensor_name, Iterable): sensor_name = sensor_name[0] return sensor_name def get_sensor_type_id(sensor_type_name): """Given a sensor type name, get the ID of the sensor type from the database.""" query = db.session.query( TypeClass.id, ).filter(TypeClass.sensor_type == sensor_type_name) sensor_id = db.session.execute(query).fetchone() if isinstance(sensor_id, Iterable): sensor_id = sensor_id[0] return sensor_id def get_table_by_sensor_type(sensor_type_id): """Return the SQLAlchemy table/subquery corresponding to a given sensor type ID.""" # Because of how global constants work in Flask, DATA_COLUMNS_BY_SENSOR_TYPE has # functions that return the relevant table/subquery, rather than the # tables/subqueries themselves. Hence the calls like `value()` and setting # `value = lambda: None` global DATA_TABLES_BY_SENSOR_TYPE if sensor_type_id in DATA_TABLES_BY_SENSOR_TYPE: return DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id]() else: sensor_type_name = get_sensor_type_name(sensor_type_id) if sensor_type_name in DATA_TABLES_BY_SENSOR_TYPE: value = DATA_TABLES_BY_SENSOR_TYPE[sensor_type_name] else: value = lambda: None DATA_TABLES_BY_SENSOR_TYPE[sensor_type_id] = value return value() def get_columns_by_sensor_type(sensor_type_id): """Return the names of the data columns in the table corresponding to a given sensor type ID. By "data columns" we mean the ones that depend on the sensor type and hold the actual data, e.g. temperature and humidity, but not timestamp. The return values are dictionaries with two keys, "column_name" for the name by which the database knows this column, and "ui_name" for nice human-readable name fit for a UI. """ global DATA_COLUMNS_BY_SENSOR_TYPE if sensor_type_id in DATA_COLUMNS_BY_SENSOR_TYPE: return DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] else: sensor_type_name = get_sensor_type_name(sensor_type_id) if sensor_type_name in DATA_COLUMNS_BY_SENSOR_TYPE: value = DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_name] else: value = None DATA_COLUMNS_BY_SENSOR_TYPE[sensor_type_id] = value return value def get_default_sensor_type(): """Get the ID of the default sensor type.""" return get_sensor_type_id(DEFAULT_SENSOR_TYPE) def is_valid_sensor_type(sensor_type_id): """Return True if we have the necessary metadata about the table and its columns needed for fetching and plotting data for the given sensor type, otherwise False. """ return ( get_table_by_sensor_type(sensor_type_id) is not None and get_columns_by_sensor_type(sensor_type_id) is not None ) # # # DONE WITH GLOBAL CONSTANTS AND SENSOR TYPE METADATA, BEGIN MAIN CONTENT # # # def resample(df, bins, dt_from, dt_to): """ Resamples (adds missing date/temperature bin combinations) to a dataframe. Arguments: df: dataframe with temperature assign to bins bins: temperature bins as a list dt_from: date range from dt_to: date range to Returns: bins_list: a list of temperature bins df_list: a list of df corresponding to temperature bins """ bins_list = [] for i in range(len(bins) - 1): bins_list.append("(%.1f, %.1f]" % (bins[i], bins[i + 1])) date_min = min(df["date"].min(), dt_from) date_max = max(df["date"].max(), dt_to) for n in range(int((date_max - date_min).days) + 1): day = date_min + timedelta(n) for temp_range in bins_list: if len(df[(df["date"] == day) & (df["temp_bin"] == temp_range)].index) == 0: df2 = pd.DataFrame( {"date": [day], "temp_bin": [temp_range], "temp_cnt": [0]} ) df = df.append(df2) df = df.sort_values(by=["date", "temp_bin"], ascending=True) df.reset_index(inplace=True, drop=True) df_list = [] for bin_range in bins_list: df_bin = df[df["temp_bin"] == bin_range] del df_bin["temp_bin"] df_bin.reset_index(inplace=True, drop=True) df_list.append(df_bin) return bins_list, df_list def lights_energy_use(dt_from_, dt_to_): """ Energy use from Carpenter's place (with lights - called Clapham in the database) Arguments: dt_from_: date range from dt_to_: date range to Returns:
""" dt_from = pd.to_datetime(dt_from_.date()) + timedelta(hours=14) dt_to = pd.to_datetime(dt_to_.date()) + timedelta(days=1, hours=15) d_from = pd.to_datetime(dt_from_.date()) d_to = pd.to_datetime(dt_to_.date()) col_ec = "electricity_consumption" sensor_device_id = "Clapham" lights_on_cols = [] # getting eneregy data for the analysis query = db.session.query( ReadingsEnergyClass.timestamp, ReadingsEnergyClass.electricity_consumption, ).filter( and_( SensorClass.device_id == sensor_device_id, ReadingsEnergyClass.sensor_id == SensorClass.id, ReadingsEnergyClass.timestamp >= dt_from, ReadingsEnergyClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) if df.empty: return pd.DataFrame({"date": [], "mean_lights_on": []}) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # grouping data by date-hour energy_hour = ( df.groupby( by=[ df["timestamp"].map( lambda x: pd.to_datetime( "%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour), format="%Y-%m-%d-%H", ) ), ] )["electricity_consumption"] .sum() .reset_index() ) # Sorting and reseting index energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # energy dates. Energy date starts from 4pm each day and lasts for 24 hours energy_hour.loc[ energy_hour["timestamp"].dt.hour < 15, "energy_date" ] = pd.to_datetime((energy_hour["timestamp"] + timedelta(days=-1)).dt.date) energy_hour.loc[ energy_hour["timestamp"].dt.hour >= 15, "energy_date" ] = pd.to_datetime(energy_hour["timestamp"].dt.date) # Clasification of lights being on # Lights ON 1: Lights turn on at 4pm and turn off at 9am, as scheduled. energy_hour["lights_on_1"] = energy_hour["timestamp"].apply( lambda x: 1 if (x.hour >= 17 or x.hour < 10) else 0 ) lights_on_cols.append("lights_on_1") # Lights ON 2: Lights are calculated by estimating the lighting use as between # the minima of two consecutive days. The lights are considered on when the # energy use is above the day's first quartile of lighting of this difference. # energy_hour['lights_on_2'] = 0 # lights_on_cols.append('lights_on_2') # Lights ON 3: Lights are assumed to be on if the energy demand is over 30 kW # (max load of the extraction fan) energy_hour["lights_on_3"] = energy_hour[col_ec].apply( lambda x: 1 if (x > 30.0) else 0 ) lights_on_cols.append("lights_on_3") # Lights ON 4: Lights are assumed to turn on at the time of largest energy use # increase in the day, and turn off at the time of largest energy decrease of # the day. # estimating energy difference energy_hour["dE"] = energy_hour[col_ec] - energy_hour[col_ec].shift(1) energy_hour["dE"] = energy_hour["dE"].fillna(0.0) # finding max increase and min decrease energy_hour["dE_min"] = energy_hour.groupby("energy_date")["dE"].transform("min") energy_hour["dE_max"] = energy_hour.groupby("energy_date")["dE"].transform("max") energy_hour.loc[ np.isclose(energy_hour["dE_max"], energy_hour["dE"]), "lights_on_4" ] = 1 energy_hour.loc[ np.isclose(energy_hour["dE_min"], energy_hour["dE"]), "lights_on_4" ] = 0 # repeat last? prev_row_value = None for df_index in energy_hour.index: if df_index > 0: if np.isnan(energy_hour.loc[df_index, "lights_on_4"]) and not np.isnan( prev_row_value ): energy_hour.loc[df_index, "lights_on_4"] = prev_row_value prev_row_value = energy_hour.loc[df_index, "lights_on_4"] lights_on_cols.append("lights_on_4") # Lights ON 5: Lights are assumed on if the energy use is over 0.9 # times the days' energy use mean, and the energy demand is over 30 kW. energy_hour["energy_date_mean"] = energy_hour.groupby("energy_date")[ col_ec ].transform("mean") energy_hour["lights_on_5"] = np.where( (energy_hour[col_ec] > 30.0) & (energy_hour[col_ec] > 0.9 * energy_hour["energy_date_mean"]), 1, 0, ) lights_on_cols.append("lights_on_5") # getting the mean value of lights on per day energy_date_df = energy_hour.loc[ (energy_hour["energy_date"] >= d_from) & (energy_hour["energy_date"] <= d_to) ] energy_date_df = ( energy_date_df.groupby(by=["energy_date"])[lights_on_cols].sum().reset_index() ) energy_date_df["mean_lights_on"] = energy_date_df[lights_on_cols].sum(axis=1) / len( lights_on_cols ) energy_date_df["date"] = energy_date_df["energy_date"].dt.strftime("%Y-%m-%d") lights_results_df = energy_date_df[["date", "mean_lights_on"]] return lights_results_df def ventilation_energy_use(dt_from, dt_to): """ In our data this is called Carpenter’s Place. This reading only counts energy use for the second extraction fan. Arguments: dt_from: date range from dt_to: date range to Returns: ventilation_results_df - a pandas dataframe with ventilation analysis results """ sensor_device_id = "1a Carpenters Place" # getting eneregy data for the analysis query = db.session.query( ReadingsEnergyClass.timestamp, ReadingsEnergyClass.electricity_consumption, ).filter( and_( SensorClass.device_id == sensor_device_id, ReadingsEnergyClass.sensor_id == SensorClass.id, ReadingsEnergyClass.timestamp >= dt_from, ReadingsEnergyClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) if df.empty: return pd.DataFrame({"timestamp": [], "ach": []}) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # grouping data by date-hour energy_hour = ( df.groupby( by=[ df["timestamp"].map( lambda x: "%04d-%02d-%02d %02d:00" % (x.year, x.month, x.day, x.hour) ), ] )["electricity_consumption"] .sum() .reset_index() ) # Sorting and reseting index energy_hour.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) # Calculating air exchange per hour energy_hour["ach"] = ( energy_hour["electricity_consumption"] / CONST_SFP * 3600.0 / (CONST_VTOT / 2.0) ) ventilation_results_df = energy_hour[["timestamp", "ach"]] return ventilation_results_df def aranet_trh_analysis(dt_from, dt_to): """ Performs data analysis for Aranet Temperature+Relative Humidity sensors. Arguments: dt_from_: date range from dt_to_: date range to Returns: sensor_names: a list of sensor names sensor_temp_ranges: json data with temperate ranges """ logging.info( "Calling aranet_trh_analysis with parameters %s %s" % ( dt_from.strftime(CONST_TIMESTAMP_FORMAT), dt_to.strftime(CONST_TIMESTAMP_FORMAT), ) ) query = db.session.query( ReadingsAranetTRHClass.timestamp, ReadingsAranetTRHClass.sensor_id, SensorClass.name, ReadingsAranetTRHClass.temperature, ReadingsAranetTRHClass.humidity, ).filter( and_( ReadingsAranetTRHClass.sensor_id == SensorClass.id, ReadingsAranetTRHClass.timestamp >= dt_from, ReadingsAranetTRHClass.timestamp <= dt_to, ) ) df = pd.read_sql(query.statement, query.session.bind) logging.info("Total number of records found: %d" % (len(df.index))) return temperature_range_analysis(df, dt_from, dt_to) def temperature_range_analysis(temp_df, dt_from, dt_to): """ Performs temperature range analysis on a given pandas dataframe. Arguments: temp_df: dt_from: date range from dt_to: date range to Returns: sensor_names: a list of sensor names sensor_temp_ranges: json data with temperate ranges """ df = copy.deepcopy(temp_df) df_unique_sensors = df[["sensor_id", "name"]].drop_duplicates(["sensor_id", "name"]) sensor_ids = df_unique_sensors["sensor_id"].tolist() sensor_names = df_unique_sensors["name"].tolist() # extracting date from datetime df["date"] = pd.to_datetime(df["timestamp"].dt.date) # Reseting index df.sort_values(by=["timestamp"], ascending=True).reset_index(inplace=True) data_by_sensor_id = {} for sensor_name, sensor_id in zip(sensor_names, sensor_ids): df_sensor = df[df["sensor_id"] == sensor_id] # grouping data by date-hour and sensor id sensor_grp = df_sensor.groupby( by=[ df_sensor.timestamp.map( lambda x: "%04d-%02d-%02d-%02d" % (x.year, x.month, x.day, x.hour) ), "date", ] ) # estimating hourly temperature mean values sensor_grp_temp = sensor_grp["temperature"].mean().reset_index() try: bins = TEMP_BINS[SENSOR_CATEORIES[sensor_id]] except KeyError: logging.error( f"Don't know how to categorise or bin sensor {sensor_id} " "in the dashboard." ) continue # binning temperature values sensor_grp_temp["temp_bin"] = pd.cut(sensor_grp_temp["temperature"], bins) # converting bins to str sensor_grp_temp["temp_bin"] = sensor_grp_temp["temp_bin"].astype(str) # get bin counts for each sensor-day combination sensor_grp_date = sensor_grp_temp.groupby(by=["date", "temp_bin"]) sensor_cnt = sensor_grp_date["temperature"].count().reset_index() sensor_cnt.rename(columns={"temperature": "temp_cnt"}, inplace=True) # Adding missing date/temp_bin combos bins_list, df_list = resample(sensor_cnt, bins, dt_from, dt_to) data_by_sensor_id[sensor_id] = { "name": sensor_name, "bins": bins_list, "data": [ { "date": df["date"].dt.strftime("%Y-%m-%d").to_list(), "count": df["temp_cnt"].to_list(), } for df in df_list ], } return len(data_by_sensor_id.keys()), json.dumps(data_by_sensor_id) def fetch_sensor_data(dt_from, dt_to, sensor_type, sensor_ids): sensor_type_name = get_sensor_type_name(sensor_type) if not is_valid_sensor_type(sensor_type): raise ValueError(f"Don't know how to fetch data for sensor type {sensor_type}") data_table = get_table_by_sensor_type(sensor_type) data_table_columns = [ getattr(data_table, column["column_name"]) for column in get_columns_by_sensor_type(sensor_type) ] query = db.session.query( data_table.timestamp, data_table.sensor_id, SensorClass.name, *data_table_columns, ).filter( and_( data_table.sensor_id == SensorClass.id, data_table.timestamp >= dt_from, data_table.timestamp <= dt_to, data_table.sensor_id.in_(sensor_ids), ) ) df = pd.read_sql(query.statement, query.session.bind) if sensor_type_name == "Aranet T&RH": # Rounding to two decimal places, because our precision isn't infinite, and # long floats look really ugly on the front end. df.loc[:, "vpd"] = df.loc[:, "vpd"].round(2) return df @blueprint.route("/aranet_trh_dashboard") @login_required def aranet_trh_dashboard(): dt_from, dt_to = parse_date_range_argument(request.args.get("range")) num_sensors, temperature_bins_json = aranet_trh_analysis(dt_from, dt_to) return render_template( "aranet_trh_dashboard.html", num_sensors=num_sensors, temperature_bins_json=temperature_bins_json, dt_from=dt_from.strftime("%B %d, %Y"), dt_to=dt_to.strftime("%B %d, %Y"), ) @blueprint.route("/energy_dashboard") @login_required def energy_dashboard(): dt_from, dt_to = parse_date_range_argument(request.args.get("range")) energy_data = {} # lights-on analysis lights_results_df = lights_energy_use(dt_from, dt_to) # ventilation analysis ventilation_results_df = ventilation_energy_use(dt_from, dt_to) # jsonify energy_data["data"] = ( "[" + lights_results_df.to_json(orient="records") + "," + ventilation_results_df.to_json(orient="records") + "]" ) return render_template( "energy_dashboard.html", energy_data=energy_data, dt_from=dt_from.strftime("%B %d, %Y"), dt_to=dt_to.strftime("%B %d, %Y"), ) # # # TIMESERIES DASHBOARD # # # def add_mean_over_sensors(sensor_type, sensor_ids, df, roll_window_minutes=10): """Take the dataframe for timeseries, and add data for a new "sensor" that's the mean of all the ones in the data """ if len(df) == 0: return df df_mean = df.groupby("timestamp").mean() df_mean.loc[:, "sensor_id"] = "mean" df_mean.loc[:, "name"] = "mean" # The sensor data comes with a 10 minute frequency. However, the sensors may be # "phase shifted" with respect to each other, e.g. one may have data for 00 and 10, # while another may have 05 and 15. A 10 minute rolling mean smooths out these # differences. roll_window = timedelta(minutes=roll_window_minutes) for column in get_columns_by_sensor_type(sensor_type): column_name = column["column_name"] df_mean[column_name] = df_mean[column_name].rolling(roll_window).mean() df_mean = df_mean.reset_index() df = pd.concat((df_mean, df), axis=0) return df def fetch_all_sensor_types(): """Get all sensor types from the CROP database, for which we know how to render the timeseries dashboard. Arguments: None Returns: List of dictionaries with keys "id" (int) and "sensor_type" (str). """ query = db.session.query( TypeClass.id, TypeClass.sensor_type, ) sensor_types = db.session.execute(query).fetchall() sensor_types = query_result_to_array(sensor_types) sensor_types = [st for st in sensor_types if is_valid_sensor_type(st["id"])] return sensor_types def fetch_all_sensors(sensor_type): """Get all sensors of a given sensor type from the CROP database. Arguments: sensor_type: The database ID (primary key) of the sensor type. Returns: List of dictionaries with keys "id" (int) and "name" (str), sorted by "id". """ query = db.session.query( SensorClass.id, SensorClass.aranet_code, SensorClass.name, ).filter(SensorClass.type_id == sensor_type) sensors = db.session.execute(query).fetchall() sensors = query_result_to_array(sensors) sensors = {s["id"]: s for s in sorted(sensors, key=lambda x: x["id"])} return sensors @blueprint.route("/timeseries_dashboard", methods=["GET", "POST"]) @login_required def timeseries_dashboard(): # Read query string dt_from = request.args.get("startDate") dt_to = request.args.get("endDate") sensor_ids = request.args.get("sensorIds") if sensor_ids is not None: # sensor_ids is passed as a comma-separated (or space or semicolon, although # those aren't currently used) string of ints, split it into a list of ints. sensor_ids = tuple(map(int, re.split(r"[ ;,]+", sensor_ids.rstrip(" ,;")))) sensor_type = request.args.get("sensorType") if sensor_type is None: sensor_type = get_default_sensor_type() else: sensor_type = int(sensor_type) # Get the data from the database that will be required in all scenarios for how the # page might be rendered. sensor_types = fetch_all_sensor_types() all_sensors = fetch_all_sensors(sensor_type) # If we don't have the information necessary to plot data for sensors, just render # the selector version of the page. if ( dt_from is None or dt_to is None or sensor_ids is None or not is_valid_sensor_type(sensor_type) ): today = datetime.today() dt_from = today - timedelta(days=7) dt_to = today return render_template( "timeseries_dashboard.html", sensor_type=sensor_type, sensor_types=sensor_types, all_sensors=all_sensors, sensor_ids=sensor_ids, dt_from=dt_from, dt_to=dt_to, data=dict(), summaries=dict(), data_columns=[], ) # Convert datetime strings to objects and make dt_to run to the end of the day in # question. dt_from = datetime.strptime(dt_from, "%Y%m%d") dt_to = ( datetime.strptime(dt_to, "%Y%m%d") + timedelta(days=1) + timedelta(milliseconds=-1) ) df = fetch_sensor_data(dt_from, dt_to, sensor_type, sensor_ids) if request.method == "POST": df = df.sort_values("timestamp") return download_csv(df, "timeseries") data_keys = list(sensor_ids) if len(sensor_ids) > 1: df = add_mean_over_sensors(sensor_type, sensor_ids, df) # Insert at start, to make "mean" be the first one displayed on the page. data_keys.insert(0, "mean") data_columns = get_columns_by_sensor_type(sensor_type) data_dict = dict() summary_dict = dict() for key in data_keys: df_key = ( df[df["sensor_id"] == key] .drop(columns=["sensor_id", "name"]) .sort_values("timestamp") ) # You may wonder, why we first to_json, and then json.loads. That's just to have # the data in a nice nested dictionary that a final json.dumps can deal with. data_dict[key] = json.loads(df_key.to_json(orient="records", date_format="iso")) # Round the summary stats to two decimals, for nice front end presentation. summary_dict[key] = json.loads(df_key.describe().round(2).to_json()) return render_template( "timeseries_dashboard.html", sensor_type=sensor_type, sensor_types=sensor_types, all_sensors=all_sensors, sensor_ids=sensor_ids, dt_from=dt_from, dt_to=dt_to, data=data_dict, summaries=summary_dict, data_columns=data_columns, )
lights_results_df - a pandas dataframe with mean lights on values
random_line_split
main.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Example of a very simple runtime that can perform two types of transaction: //! increment and reset counter in the service instance. #![allow(clippy::unnecessary_wraps)] use exonum::{ blockchain::{config::GenesisConfigBuilder, Blockchain, ConsensusConfig, ValidatorKeys}, helpers::Height, keys::Keys, merkledb::{BinaryValue, Snapshot, TemporaryDB}, runtime::{ migrations::{InitMigrationError, MigrationScript}, oneshot::Receiver, versioning::Version, AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail, InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime, SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID, }, }; use exonum_derive::ExecutionFail; use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle}; use exonum_rust_runtime::{spec::Deploy, RustRuntime}; use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface}; use futures::TryFutureExt; use std::{cell::Cell, collections::BTreeMap, thread, time::Duration}; /// Service instance with a counter. #[derive(Debug, Default)] struct SampleService { counter: Cell<u64>, _name: String, } /// Sample runtime. #[derive(Debug, Default)] struct SampleRuntime { deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>, started_services: BTreeMap<InstanceId, SampleService>, } // Define runtime specific errors. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[derive(ExecutionFail)] #[execution_fail(kind = "runtime")] enum SampleRuntimeError { /// Incorrect information to call transaction. IncorrectCallInfo = 1, /// Incorrect transaction payload. IncorrectPayload = 2, } impl SampleRuntime { /// Create a new service instance with the given specification. fn start_service( &self, artifact: &ArtifactId, instance: &InstanceDescriptor, ) -> Result<SampleService, ExecutionError> { // Invariants guaranteed by the core. assert!(self.deployed_artifacts.contains_key(artifact)); assert!(!self.started_services.contains_key(&instance.id)); Ok(SampleService { _name: instance.name.to_owned(), ..SampleService::default() }) } /// In the present simplest case, the artifact is added into the deployed artifacts table. fn deploy_artifact( &mut self, artifact: ArtifactId, spec: Vec<u8>, ) -> Result<(), ExecutionError> { // Invariant guaranteed by the core assert!(!self.deployed_artifacts.contains_key(&artifact)); println!("Deploying artifact: {}", &artifact); self.deployed_artifacts.insert(artifact, spec); Ok(()) } } impl Runtime for SampleRuntime { fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver
fn is_artifact_deployed(&self, id: &ArtifactId) -> bool { self.deployed_artifacts.contains_key(id) } /// Initiates adding a new service and sets the counter value for this. fn initiate_adding_service( &self, context: ExecutionContext<'_>, artifact: &ArtifactId, params: Vec<u8>, ) -> Result<(), ExecutionError> { let service_instance = self.start_service(artifact, context.instance())?; let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?; service_instance.counter.set(new_value); println!( "Initializing service {}: {} with value {}", artifact, context.instance(), new_value ); Ok(()) } fn initiate_resuming_service( &self, _context: ExecutionContext<'_>, _artifact: &ArtifactId, _parameters: Vec<u8>, ) -> Result<(), ExecutionError> { unreachable!("We don't resume services in this example.") } /// Commits status for the `SampleService` instance with the specified ID. fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) { let spec = &state.spec; match state.status { Some(InstanceStatus::Active) => { // Unwrap here is safe, since by invocation of this method // `exonum` guarantees that `initiate_adding_service` was invoked // before and it returned `Ok(..)`. let instance = self .start_service(&spec.artifact, &spec.as_descriptor()) .unwrap(); println!("Starting service {}: {:?}", spec, instance); self.started_services.insert(spec.id, instance); } Some(InstanceStatus::Stopped) => { let instance = self.started_services.remove(&spec.id); println!("Stopping service {}: {:?}", spec, instance); } _ => { // We aren't interested in other possible statuses. } } } fn migrate( &self, _new_artifact: &ArtifactId, _data_version: &Version, ) -> Result<Option<MigrationScript>, InitMigrationError> { Err(InitMigrationError::NotSupported) } fn execute( &self, context: ExecutionContext<'_>, method_id: MethodId, payload: &[u8], ) -> Result<(), ExecutionError> { let service = self .started_services .get(&context.instance().id) .ok_or(SampleRuntimeError::IncorrectCallInfo)?; println!( "Executing method {}#{} of service {}", context.interface_name(), method_id, context.instance().id ); const SERVICE_INTERFACE: &str = ""; match (context.interface_name(), method_id) { // Increment counter. (SERVICE_INTERFACE, 0) => { let value = u64::from_bytes(payload.into()) .map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?; let counter = service.counter.get(); println!("Updating counter value to {}", counter + value); service.counter.set(value + counter); Ok(()) } // Reset counter. (SERVICE_INTERFACE, 1) => { if !payload.is_empty() { Err(SampleRuntimeError::IncorrectPayload.into()) } else { println!("Resetting counter"); service.counter.set(0); Ok(()) } } // Unknown transaction. (interface, method) => { let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!( "Incorrect information to call transaction. {}#{}", interface, method )); Err(err) } } } fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {} } impl From<SampleRuntime> for (u32, Box<dyn Runtime>) { fn from(inner: SampleRuntime) -> Self { (SampleRuntime::ID, Box::new(inner)) } } impl WellKnownRuntime for SampleRuntime { const ID: u32 = 255; } fn node_config() -> (NodeConfig, Keys) { let keys = Keys::random(); let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())]; let consensus = ConsensusConfig::default().with_validator_keys(validator_keys); let api_address = "0.0.0.0:8000".parse().unwrap(); let api_cfg = NodeApiConfig { public_api_address: Some(api_address), ..Default::default() }; let peer_address = "0.0.0.0:2000"; let node_config = NodeConfig { listen_address: peer_address.parse().unwrap(), consensus, external_address: peer_address.to_owned(), network: Default::default(), connect_list: Default::default(), api: api_cfg, mempool: Default::default(), thread_pool_size: Default::default(), }; (node_config, keys) } async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) { let service_keypair = blockchain.service_keypair(); let deploy_height = Height(50); // Send an artifact `DeployRequest` to the sample runtime. let artifact = "255:sample_artifact:0.1.0".parse().unwrap(); let request = DeployRequest::new(artifact, deploy_height); let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request); blockchain.sender().broadcast_transaction(tx).await.unwrap(); // Wait until the request is finished. thread::sleep(Duration::from_secs(5)); // Send a `StartService` request to the sample runtime. let instance_name = "instance"; let proposal = ConfigPropose::immediate(0).start_service( "255:sample_artifact:0.1.0".parse().unwrap(), instance_name, 10_u64, ); let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal); blockchain .sender() .broadcast_transaction(proposal) .await .unwrap(); // Wait until instance identifier is assigned. thread::sleep(Duration::from_secs(1)); // Get an instance identifier. let snapshot = blockchain.snapshot(); let state = snapshot .for_dispatcher() .get_instance(instance_name) .unwrap(); assert_eq!(state.status.unwrap(), InstanceStatus::Active); let instance_id = state.spec.id; // Send an update counter transaction. let tx = AnyTx::new(CallInfo::new(instance_id, 0), 1_000_u64.into_bytes()); let tx = tx.sign_with_keypair(service_keypair); blockchain.sender().broadcast_transaction(tx).await.unwrap(); thread::sleep(Duration::from_secs(2)); // Send a reset counter transaction. let tx = AnyTx::new(CallInfo::new(instance_id, 1), vec![]); let tx = tx.sign_with_keypair(service_keypair); blockchain.sender().broadcast_transaction(tx).await.unwrap(); thread::sleep(Duration::from_secs(2)); shutdown_handle.shutdown().await.unwrap(); } #[tokio::main] async fn main() { exonum::helpers::init_logger().unwrap(); println!("Creating database in temporary dir..."); let db = TemporaryDB::new(); let (node_cfg, node_keys) = node_config(); let consensus_config = node_cfg.consensus.clone(); let mut genesis_config = GenesisConfigBuilder::with_consensus_config(consensus_config); let mut rt = RustRuntime::builder(); Supervisor::simple().deploy(&mut genesis_config, &mut rt); println!("Creating blockchain with additional runtime..."); let node = NodeBuilder::new(db, node_cfg, node_keys) .with_genesis_config(genesis_config.build()) .with_runtime(SampleRuntime::default()) .with_runtime_fn(|channel| { RustRuntime::builder() .with_factory(Supervisor) .build(channel.endpoints_sender()) }) .build(); let shutdown_handle = node.shutdown_handle(); println!("Starting a single node..."); println!("Blockchain is ready for transactions!"); let blockchain = node.blockchain().clone(); let node_task = node.run().unwrap_or_else(|e| panic!("{}", e)); let node_task = tokio::spawn(node_task); examine_runtime(blockchain, shutdown_handle).await; node_task.await.unwrap(); }
{ Receiver::with_result(self.deploy_artifact(artifact, spec)) }
identifier_body
main.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Example of a very simple runtime that can perform two types of transaction: //! increment and reset counter in the service instance. #![allow(clippy::unnecessary_wraps)] use exonum::{ blockchain::{config::GenesisConfigBuilder, Blockchain, ConsensusConfig, ValidatorKeys}, helpers::Height, keys::Keys, merkledb::{BinaryValue, Snapshot, TemporaryDB}, runtime::{ migrations::{InitMigrationError, MigrationScript}, oneshot::Receiver, versioning::Version, AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail, InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime, SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID, }, }; use exonum_derive::ExecutionFail; use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle}; use exonum_rust_runtime::{spec::Deploy, RustRuntime}; use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface}; use futures::TryFutureExt; use std::{cell::Cell, collections::BTreeMap, thread, time::Duration}; /// Service instance with a counter. #[derive(Debug, Default)] struct SampleService { counter: Cell<u64>, _name: String, } /// Sample runtime. #[derive(Debug, Default)] struct SampleRuntime { deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>, started_services: BTreeMap<InstanceId, SampleService>, } // Define runtime specific errors. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[derive(ExecutionFail)] #[execution_fail(kind = "runtime")] enum SampleRuntimeError { /// Incorrect information to call transaction. IncorrectCallInfo = 1, /// Incorrect transaction payload. IncorrectPayload = 2, } impl SampleRuntime { /// Create a new service instance with the given specification. fn start_service( &self, artifact: &ArtifactId, instance: &InstanceDescriptor, ) -> Result<SampleService, ExecutionError> { // Invariants guaranteed by the core. assert!(self.deployed_artifacts.contains_key(artifact)); assert!(!self.started_services.contains_key(&instance.id)); Ok(SampleService { _name: instance.name.to_owned(), ..SampleService::default() }) } /// In the present simplest case, the artifact is added into the deployed artifacts table. fn deploy_artifact( &mut self, artifact: ArtifactId, spec: Vec<u8>, ) -> Result<(), ExecutionError> { // Invariant guaranteed by the core assert!(!self.deployed_artifacts.contains_key(&artifact)); println!("Deploying artifact: {}", &artifact); self.deployed_artifacts.insert(artifact, spec); Ok(()) } } impl Runtime for SampleRuntime { fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver { Receiver::with_result(self.deploy_artifact(artifact, spec)) } fn is_artifact_deployed(&self, id: &ArtifactId) -> bool { self.deployed_artifacts.contains_key(id) } /// Initiates adding a new service and sets the counter value for this. fn initiate_adding_service( &self, context: ExecutionContext<'_>, artifact: &ArtifactId, params: Vec<u8>, ) -> Result<(), ExecutionError> { let service_instance = self.start_service(artifact, context.instance())?; let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?; service_instance.counter.set(new_value); println!( "Initializing service {}: {} with value {}", artifact, context.instance(), new_value ); Ok(()) } fn initiate_resuming_service( &self, _context: ExecutionContext<'_>, _artifact: &ArtifactId, _parameters: Vec<u8>, ) -> Result<(), ExecutionError> { unreachable!("We don't resume services in this example.") } /// Commits status for the `SampleService` instance with the specified ID. fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) { let spec = &state.spec; match state.status { Some(InstanceStatus::Active) =>
Some(InstanceStatus::Stopped) => { let instance = self.started_services.remove(&spec.id); println!("Stopping service {}: {:?}", spec, instance); } _ => { // We aren't interested in other possible statuses. } } } fn migrate( &self, _new_artifact: &ArtifactId, _data_version: &Version, ) -> Result<Option<MigrationScript>, InitMigrationError> { Err(InitMigrationError::NotSupported) } fn execute( &self, context: ExecutionContext<'_>, method_id: MethodId, payload: &[u8], ) -> Result<(), ExecutionError> { let service = self .started_services .get(&context.instance().id) .ok_or(SampleRuntimeError::IncorrectCallInfo)?; println!( "Executing method {}#{} of service {}", context.interface_name(), method_id, context.instance().id ); const SERVICE_INTERFACE: &str = ""; match (context.interface_name(), method_id) { // Increment counter. (SERVICE_INTERFACE, 0) => { let value = u64::from_bytes(payload.into()) .map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?; let counter = service.counter.get(); println!("Updating counter value to {}", counter + value); service.counter.set(value + counter); Ok(()) } // Reset counter. (SERVICE_INTERFACE, 1) => { if !payload.is_empty() { Err(SampleRuntimeError::IncorrectPayload.into()) } else { println!("Resetting counter"); service.counter.set(0); Ok(()) } } // Unknown transaction. (interface, method) => { let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!( "Incorrect information to call transaction. {}#{}", interface, method )); Err(err) } } } fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {} } impl From<SampleRuntime> for (u32, Box<dyn Runtime>) { fn from(inner: SampleRuntime) -> Self { (SampleRuntime::ID, Box::new(inner)) } } impl WellKnownRuntime for SampleRuntime { const ID: u32 = 255; } fn node_config() -> (NodeConfig, Keys) { let keys = Keys::random(); let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())]; let consensus = ConsensusConfig::default().with_validator_keys(validator_keys); let api_address = "0.0.0.0:8000".parse().unwrap(); let api_cfg = NodeApiConfig { public_api_address: Some(api_address), ..Default::default() }; let peer_address = "0.0.0.0:2000"; let node_config = NodeConfig { listen_address: peer_address.parse().unwrap(), consensus, external_address: peer_address.to_owned(), network: Default::default(), connect_list: Default::default(), api: api_cfg, mempool: Default::default(), thread_pool_size: Default::default(), }; (node_config, keys) } async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) { let service_keypair = blockchain.service_keypair(); let deploy_height = Height(50); // Send an artifact `DeployRequest` to the sample runtime. let artifact = "255:sample_artifact:0.1.0".parse().unwrap(); let request = DeployRequest::new(artifact, deploy_height); let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request); blockchain.sender().broadcast_transaction(tx).await.unwrap(); // Wait until the request is finished. thread::sleep(Duration::from_secs(5)); // Send a `StartService` request to the sample runtime. let instance_name = "instance"; let proposal = ConfigPropose::immediate(0).start_service( "255:sample_artifact:0.1.0".parse().unwrap(), instance_name, 10_u64, ); let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal); blockchain .sender() .broadcast_transaction(proposal) .await .unwrap(); // Wait until instance identifier is assigned. thread::sleep(Duration::from_secs(1)); // Get an instance identifier. let snapshot = blockchain.snapshot(); let state = snapshot .for_dispatcher() .get_instance(instance_name) .unwrap(); assert_eq!(state.status.unwrap(), InstanceStatus::Active); let instance_id = state.spec.id; // Send an update counter transaction. let tx = AnyTx::new(CallInfo::new(instance_id, 0), 1_000_u64.into_bytes()); let tx = tx.sign_with_keypair(service_keypair); blockchain.sender().broadcast_transaction(tx).await.unwrap(); thread::sleep(Duration::from_secs(2)); // Send a reset counter transaction. let tx = AnyTx::new(CallInfo::new(instance_id, 1), vec![]); let tx = tx.sign_with_keypair(service_keypair); blockchain.sender().broadcast_transaction(tx).await.unwrap(); thread::sleep(Duration::from_secs(2)); shutdown_handle.shutdown().await.unwrap(); } #[tokio::main] async fn main() { exonum::helpers::init_logger().unwrap(); println!("Creating database in temporary dir..."); let db = TemporaryDB::new(); let (node_cfg, node_keys) = node_config(); let consensus_config = node_cfg.consensus.clone(); let mut genesis_config = GenesisConfigBuilder::with_consensus_config(consensus_config); let mut rt = RustRuntime::builder(); Supervisor::simple().deploy(&mut genesis_config, &mut rt); println!("Creating blockchain with additional runtime..."); let node = NodeBuilder::new(db, node_cfg, node_keys) .with_genesis_config(genesis_config.build()) .with_runtime(SampleRuntime::default()) .with_runtime_fn(|channel| { RustRuntime::builder() .with_factory(Supervisor) .build(channel.endpoints_sender()) }) .build(); let shutdown_handle = node.shutdown_handle(); println!("Starting a single node..."); println!("Blockchain is ready for transactions!"); let blockchain = node.blockchain().clone(); let node_task = node.run().unwrap_or_else(|e| panic!("{}", e)); let node_task = tokio::spawn(node_task); examine_runtime(blockchain, shutdown_handle).await; node_task.await.unwrap(); }
{ // Unwrap here is safe, since by invocation of this method // `exonum` guarantees that `initiate_adding_service` was invoked // before and it returned `Ok(..)`. let instance = self .start_service(&spec.artifact, &spec.as_descriptor()) .unwrap(); println!("Starting service {}: {:?}", spec, instance); self.started_services.insert(spec.id, instance); }
conditional_block
main.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Example of a very simple runtime that can perform two types of transaction: //! increment and reset counter in the service instance. #![allow(clippy::unnecessary_wraps)] use exonum::{ blockchain::{config::GenesisConfigBuilder, Blockchain, ConsensusConfig, ValidatorKeys}, helpers::Height, keys::Keys, merkledb::{BinaryValue, Snapshot, TemporaryDB}, runtime::{ migrations::{InitMigrationError, MigrationScript}, oneshot::Receiver, versioning::Version, AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail, InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime, SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID, }, }; use exonum_derive::ExecutionFail; use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle}; use exonum_rust_runtime::{spec::Deploy, RustRuntime}; use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface}; use futures::TryFutureExt; use std::{cell::Cell, collections::BTreeMap, thread, time::Duration}; /// Service instance with a counter. #[derive(Debug, Default)] struct SampleService { counter: Cell<u64>, _name: String, } /// Sample runtime. #[derive(Debug, Default)] struct SampleRuntime { deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>, started_services: BTreeMap<InstanceId, SampleService>, } // Define runtime specific errors. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[derive(ExecutionFail)] #[execution_fail(kind = "runtime")] enum SampleRuntimeError { /// Incorrect information to call transaction. IncorrectCallInfo = 1, /// Incorrect transaction payload. IncorrectPayload = 2, } impl SampleRuntime { /// Create a new service instance with the given specification. fn start_service( &self, artifact: &ArtifactId, instance: &InstanceDescriptor, ) -> Result<SampleService, ExecutionError> { // Invariants guaranteed by the core. assert!(self.deployed_artifacts.contains_key(artifact)); assert!(!self.started_services.contains_key(&instance.id)); Ok(SampleService {
}) } /// In the present simplest case, the artifact is added into the deployed artifacts table. fn deploy_artifact( &mut self, artifact: ArtifactId, spec: Vec<u8>, ) -> Result<(), ExecutionError> { // Invariant guaranteed by the core assert!(!self.deployed_artifacts.contains_key(&artifact)); println!("Deploying artifact: {}", &artifact); self.deployed_artifacts.insert(artifact, spec); Ok(()) } } impl Runtime for SampleRuntime { fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver { Receiver::with_result(self.deploy_artifact(artifact, spec)) } fn is_artifact_deployed(&self, id: &ArtifactId) -> bool { self.deployed_artifacts.contains_key(id) } /// Initiates adding a new service and sets the counter value for this. fn initiate_adding_service( &self, context: ExecutionContext<'_>, artifact: &ArtifactId, params: Vec<u8>, ) -> Result<(), ExecutionError> { let service_instance = self.start_service(artifact, context.instance())?; let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?; service_instance.counter.set(new_value); println!( "Initializing service {}: {} with value {}", artifact, context.instance(), new_value ); Ok(()) } fn initiate_resuming_service( &self, _context: ExecutionContext<'_>, _artifact: &ArtifactId, _parameters: Vec<u8>, ) -> Result<(), ExecutionError> { unreachable!("We don't resume services in this example.") } /// Commits status for the `SampleService` instance with the specified ID. fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) { let spec = &state.spec; match state.status { Some(InstanceStatus::Active) => { // Unwrap here is safe, since by invocation of this method // `exonum` guarantees that `initiate_adding_service` was invoked // before and it returned `Ok(..)`. let instance = self .start_service(&spec.artifact, &spec.as_descriptor()) .unwrap(); println!("Starting service {}: {:?}", spec, instance); self.started_services.insert(spec.id, instance); } Some(InstanceStatus::Stopped) => { let instance = self.started_services.remove(&spec.id); println!("Stopping service {}: {:?}", spec, instance); } _ => { // We aren't interested in other possible statuses. } } } fn migrate( &self, _new_artifact: &ArtifactId, _data_version: &Version, ) -> Result<Option<MigrationScript>, InitMigrationError> { Err(InitMigrationError::NotSupported) } fn execute( &self, context: ExecutionContext<'_>, method_id: MethodId, payload: &[u8], ) -> Result<(), ExecutionError> { let service = self .started_services .get(&context.instance().id) .ok_or(SampleRuntimeError::IncorrectCallInfo)?; println!( "Executing method {}#{} of service {}", context.interface_name(), method_id, context.instance().id ); const SERVICE_INTERFACE: &str = ""; match (context.interface_name(), method_id) { // Increment counter. (SERVICE_INTERFACE, 0) => { let value = u64::from_bytes(payload.into()) .map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?; let counter = service.counter.get(); println!("Updating counter value to {}", counter + value); service.counter.set(value + counter); Ok(()) } // Reset counter. (SERVICE_INTERFACE, 1) => { if !payload.is_empty() { Err(SampleRuntimeError::IncorrectPayload.into()) } else { println!("Resetting counter"); service.counter.set(0); Ok(()) } } // Unknown transaction. (interface, method) => { let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!( "Incorrect information to call transaction. {}#{}", interface, method )); Err(err) } } } fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_commit(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {} } impl From<SampleRuntime> for (u32, Box<dyn Runtime>) { fn from(inner: SampleRuntime) -> Self { (SampleRuntime::ID, Box::new(inner)) } } impl WellKnownRuntime for SampleRuntime { const ID: u32 = 255; } fn node_config() -> (NodeConfig, Keys) { let keys = Keys::random(); let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())]; let consensus = ConsensusConfig::default().with_validator_keys(validator_keys); let api_address = "0.0.0.0:8000".parse().unwrap(); let api_cfg = NodeApiConfig { public_api_address: Some(api_address), ..Default::default() }; let peer_address = "0.0.0.0:2000"; let node_config = NodeConfig { listen_address: peer_address.parse().unwrap(), consensus, external_address: peer_address.to_owned(), network: Default::default(), connect_list: Default::default(), api: api_cfg, mempool: Default::default(), thread_pool_size: Default::default(), }; (node_config, keys) } async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) { let service_keypair = blockchain.service_keypair(); let deploy_height = Height(50); // Send an artifact `DeployRequest` to the sample runtime. let artifact = "255:sample_artifact:0.1.0".parse().unwrap(); let request = DeployRequest::new(artifact, deploy_height); let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request); blockchain.sender().broadcast_transaction(tx).await.unwrap(); // Wait until the request is finished. thread::sleep(Duration::from_secs(5)); // Send a `StartService` request to the sample runtime. let instance_name = "instance"; let proposal = ConfigPropose::immediate(0).start_service( "255:sample_artifact:0.1.0".parse().unwrap(), instance_name, 10_u64, ); let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal); blockchain .sender() .broadcast_transaction(proposal) .await .unwrap(); // Wait until instance identifier is assigned. thread::sleep(Duration::from_secs(1)); // Get an instance identifier. let snapshot = blockchain.snapshot(); let state = snapshot .for_dispatcher() .get_instance(instance_name) .unwrap(); assert_eq!(state.status.unwrap(), InstanceStatus::Active); let instance_id = state.spec.id; // Send an update counter transaction. let tx = AnyTx::new(CallInfo::new(instance_id, 0), 1_000_u64.into_bytes()); let tx = tx.sign_with_keypair(service_keypair); blockchain.sender().broadcast_transaction(tx).await.unwrap(); thread::sleep(Duration::from_secs(2)); // Send a reset counter transaction. let tx = AnyTx::new(CallInfo::new(instance_id, 1), vec![]); let tx = tx.sign_with_keypair(service_keypair); blockchain.sender().broadcast_transaction(tx).await.unwrap(); thread::sleep(Duration::from_secs(2)); shutdown_handle.shutdown().await.unwrap(); } #[tokio::main] async fn main() { exonum::helpers::init_logger().unwrap(); println!("Creating database in temporary dir..."); let db = TemporaryDB::new(); let (node_cfg, node_keys) = node_config(); let consensus_config = node_cfg.consensus.clone(); let mut genesis_config = GenesisConfigBuilder::with_consensus_config(consensus_config); let mut rt = RustRuntime::builder(); Supervisor::simple().deploy(&mut genesis_config, &mut rt); println!("Creating blockchain with additional runtime..."); let node = NodeBuilder::new(db, node_cfg, node_keys) .with_genesis_config(genesis_config.build()) .with_runtime(SampleRuntime::default()) .with_runtime_fn(|channel| { RustRuntime::builder() .with_factory(Supervisor) .build(channel.endpoints_sender()) }) .build(); let shutdown_handle = node.shutdown_handle(); println!("Starting a single node..."); println!("Blockchain is ready for transactions!"); let blockchain = node.blockchain().clone(); let node_task = node.run().unwrap_or_else(|e| panic!("{}", e)); let node_task = tokio::spawn(node_task); examine_runtime(blockchain, shutdown_handle).await; node_task.await.unwrap(); }
_name: instance.name.to_owned(), ..SampleService::default()
random_line_split
main.rs
// Copyright 2020 The Exonum Team // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! Example of a very simple runtime that can perform two types of transaction: //! increment and reset counter in the service instance. #![allow(clippy::unnecessary_wraps)] use exonum::{ blockchain::{config::GenesisConfigBuilder, Blockchain, ConsensusConfig, ValidatorKeys}, helpers::Height, keys::Keys, merkledb::{BinaryValue, Snapshot, TemporaryDB}, runtime::{ migrations::{InitMigrationError, MigrationScript}, oneshot::Receiver, versioning::Version, AnyTx, ArtifactId, CallInfo, CommonError, ExecutionContext, ExecutionError, ExecutionFail, InstanceDescriptor, InstanceId, InstanceState, InstanceStatus, Mailbox, MethodId, Runtime, SnapshotExt, WellKnownRuntime, SUPERVISOR_INSTANCE_ID, }, }; use exonum_derive::ExecutionFail; use exonum_node::{NodeApiConfig, NodeBuilder, NodeConfig, ShutdownHandle}; use exonum_rust_runtime::{spec::Deploy, RustRuntime}; use exonum_supervisor::{ConfigPropose, DeployRequest, Supervisor, SupervisorInterface}; use futures::TryFutureExt; use std::{cell::Cell, collections::BTreeMap, thread, time::Duration}; /// Service instance with a counter. #[derive(Debug, Default)] struct SampleService { counter: Cell<u64>, _name: String, } /// Sample runtime. #[derive(Debug, Default)] struct SampleRuntime { deployed_artifacts: BTreeMap<ArtifactId, Vec<u8>>, started_services: BTreeMap<InstanceId, SampleService>, } // Define runtime specific errors. #[derive(Clone, Copy, Debug, Eq, Hash, Ord, PartialEq, PartialOrd)] #[derive(ExecutionFail)] #[execution_fail(kind = "runtime")] enum SampleRuntimeError { /// Incorrect information to call transaction. IncorrectCallInfo = 1, /// Incorrect transaction payload. IncorrectPayload = 2, } impl SampleRuntime { /// Create a new service instance with the given specification. fn start_service( &self, artifact: &ArtifactId, instance: &InstanceDescriptor, ) -> Result<SampleService, ExecutionError> { // Invariants guaranteed by the core. assert!(self.deployed_artifacts.contains_key(artifact)); assert!(!self.started_services.contains_key(&instance.id)); Ok(SampleService { _name: instance.name.to_owned(), ..SampleService::default() }) } /// In the present simplest case, the artifact is added into the deployed artifacts table. fn deploy_artifact( &mut self, artifact: ArtifactId, spec: Vec<u8>, ) -> Result<(), ExecutionError> { // Invariant guaranteed by the core assert!(!self.deployed_artifacts.contains_key(&artifact)); println!("Deploying artifact: {}", &artifact); self.deployed_artifacts.insert(artifact, spec); Ok(()) } } impl Runtime for SampleRuntime { fn deploy_artifact(&mut self, artifact: ArtifactId, spec: Vec<u8>) -> Receiver { Receiver::with_result(self.deploy_artifact(artifact, spec)) } fn is_artifact_deployed(&self, id: &ArtifactId) -> bool { self.deployed_artifacts.contains_key(id) } /// Initiates adding a new service and sets the counter value for this. fn initiate_adding_service( &self, context: ExecutionContext<'_>, artifact: &ArtifactId, params: Vec<u8>, ) -> Result<(), ExecutionError> { let service_instance = self.start_service(artifact, context.instance())?; let new_value = u64::from_bytes(params.into()).map_err(CommonError::malformed_arguments)?; service_instance.counter.set(new_value); println!( "Initializing service {}: {} with value {}", artifact, context.instance(), new_value ); Ok(()) } fn initiate_resuming_service( &self, _context: ExecutionContext<'_>, _artifact: &ArtifactId, _parameters: Vec<u8>, ) -> Result<(), ExecutionError> { unreachable!("We don't resume services in this example.") } /// Commits status for the `SampleService` instance with the specified ID. fn update_service_status(&mut self, _snapshot: &dyn Snapshot, state: &InstanceState) { let spec = &state.spec; match state.status { Some(InstanceStatus::Active) => { // Unwrap here is safe, since by invocation of this method // `exonum` guarantees that `initiate_adding_service` was invoked // before and it returned `Ok(..)`. let instance = self .start_service(&spec.artifact, &spec.as_descriptor()) .unwrap(); println!("Starting service {}: {:?}", spec, instance); self.started_services.insert(spec.id, instance); } Some(InstanceStatus::Stopped) => { let instance = self.started_services.remove(&spec.id); println!("Stopping service {}: {:?}", spec, instance); } _ => { // We aren't interested in other possible statuses. } } } fn migrate( &self, _new_artifact: &ArtifactId, _data_version: &Version, ) -> Result<Option<MigrationScript>, InitMigrationError> { Err(InitMigrationError::NotSupported) } fn execute( &self, context: ExecutionContext<'_>, method_id: MethodId, payload: &[u8], ) -> Result<(), ExecutionError> { let service = self .started_services .get(&context.instance().id) .ok_or(SampleRuntimeError::IncorrectCallInfo)?; println!( "Executing method {}#{} of service {}", context.interface_name(), method_id, context.instance().id ); const SERVICE_INTERFACE: &str = ""; match (context.interface_name(), method_id) { // Increment counter. (SERVICE_INTERFACE, 0) => { let value = u64::from_bytes(payload.into()) .map_err(|e| SampleRuntimeError::IncorrectPayload.with_description(e))?; let counter = service.counter.get(); println!("Updating counter value to {}", counter + value); service.counter.set(value + counter); Ok(()) } // Reset counter. (SERVICE_INTERFACE, 1) => { if !payload.is_empty() { Err(SampleRuntimeError::IncorrectPayload.into()) } else { println!("Resetting counter"); service.counter.set(0); Ok(()) } } // Unknown transaction. (interface, method) => { let err = SampleRuntimeError::IncorrectCallInfo.with_description(format!( "Incorrect information to call transaction. {}#{}", interface, method )); Err(err) } } } fn before_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn after_transactions(&self, _context: ExecutionContext<'_>) -> Result<(), ExecutionError> { Ok(()) } fn
(&mut self, _snapshot: &dyn Snapshot, _mailbox: &mut Mailbox) {} } impl From<SampleRuntime> for (u32, Box<dyn Runtime>) { fn from(inner: SampleRuntime) -> Self { (SampleRuntime::ID, Box::new(inner)) } } impl WellKnownRuntime for SampleRuntime { const ID: u32 = 255; } fn node_config() -> (NodeConfig, Keys) { let keys = Keys::random(); let validator_keys = vec![ValidatorKeys::new(keys.consensus_pk(), keys.service_pk())]; let consensus = ConsensusConfig::default().with_validator_keys(validator_keys); let api_address = "0.0.0.0:8000".parse().unwrap(); let api_cfg = NodeApiConfig { public_api_address: Some(api_address), ..Default::default() }; let peer_address = "0.0.0.0:2000"; let node_config = NodeConfig { listen_address: peer_address.parse().unwrap(), consensus, external_address: peer_address.to_owned(), network: Default::default(), connect_list: Default::default(), api: api_cfg, mempool: Default::default(), thread_pool_size: Default::default(), }; (node_config, keys) } async fn examine_runtime(blockchain: Blockchain, shutdown_handle: ShutdownHandle) { let service_keypair = blockchain.service_keypair(); let deploy_height = Height(50); // Send an artifact `DeployRequest` to the sample runtime. let artifact = "255:sample_artifact:0.1.0".parse().unwrap(); let request = DeployRequest::new(artifact, deploy_height); let tx = service_keypair.request_artifact_deploy(SUPERVISOR_INSTANCE_ID, request); blockchain.sender().broadcast_transaction(tx).await.unwrap(); // Wait until the request is finished. thread::sleep(Duration::from_secs(5)); // Send a `StartService` request to the sample runtime. let instance_name = "instance"; let proposal = ConfigPropose::immediate(0).start_service( "255:sample_artifact:0.1.0".parse().unwrap(), instance_name, 10_u64, ); let proposal = service_keypair.propose_config_change(SUPERVISOR_INSTANCE_ID, proposal); blockchain .sender() .broadcast_transaction(proposal) .await .unwrap(); // Wait until instance identifier is assigned. thread::sleep(Duration::from_secs(1)); // Get an instance identifier. let snapshot = blockchain.snapshot(); let state = snapshot .for_dispatcher() .get_instance(instance_name) .unwrap(); assert_eq!(state.status.unwrap(), InstanceStatus::Active); let instance_id = state.spec.id; // Send an update counter transaction. let tx = AnyTx::new(CallInfo::new(instance_id, 0), 1_000_u64.into_bytes()); let tx = tx.sign_with_keypair(service_keypair); blockchain.sender().broadcast_transaction(tx).await.unwrap(); thread::sleep(Duration::from_secs(2)); // Send a reset counter transaction. let tx = AnyTx::new(CallInfo::new(instance_id, 1), vec![]); let tx = tx.sign_with_keypair(service_keypair); blockchain.sender().broadcast_transaction(tx).await.unwrap(); thread::sleep(Duration::from_secs(2)); shutdown_handle.shutdown().await.unwrap(); } #[tokio::main] async fn main() { exonum::helpers::init_logger().unwrap(); println!("Creating database in temporary dir..."); let db = TemporaryDB::new(); let (node_cfg, node_keys) = node_config(); let consensus_config = node_cfg.consensus.clone(); let mut genesis_config = GenesisConfigBuilder::with_consensus_config(consensus_config); let mut rt = RustRuntime::builder(); Supervisor::simple().deploy(&mut genesis_config, &mut rt); println!("Creating blockchain with additional runtime..."); let node = NodeBuilder::new(db, node_cfg, node_keys) .with_genesis_config(genesis_config.build()) .with_runtime(SampleRuntime::default()) .with_runtime_fn(|channel| { RustRuntime::builder() .with_factory(Supervisor) .build(channel.endpoints_sender()) }) .build(); let shutdown_handle = node.shutdown_handle(); println!("Starting a single node..."); println!("Blockchain is ready for transactions!"); let blockchain = node.blockchain().clone(); let node_task = node.run().unwrap_or_else(|e| panic!("{}", e)); let node_task = tokio::spawn(node_task); examine_runtime(blockchain, shutdown_handle).await; node_task.await.unwrap(); }
after_commit
identifier_name
index.js
"use strict"; const fs = require("fs"); const util = require("util"); const {core, utils, values} = require("@ckb-lumos/base"); const {computeScriptHash} = utils; const {secp256k1Blake160} = require("@ckb-lumos/common-scripts"); const {locateCellDep, sealTransaction} = require("@ckb-lumos/helpers"); const {CellCollector, Indexer} = require("@ckb-lumos/indexer"); const {ScriptValue} = values; const {normalizers, Reader, RPC} = require("ckb-js-toolkit"); const secp256k1 = require("secp256k1"); const {ckbytesToShannons, formattedNumber, getRandomInt, hexToInt, intToHex, shannonsToCkbytes} = require("./util.js"); const DEFAULT_LOCK_HASH = "0x9bd7e06f3ecf4be0f2fcd2188b23f1b9fcc88e5d4b65a8637b17723bbda3cce8"; const MULTISIG_LOCK_HASH = "0x5c5069eb0857efc65e1bca0c07df34c31663b3622fd3876c876320fc9634e2a8"; const SECP_SIGNATURE_PLACEHOLDER_DEFAULT = "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; function addDefaultCellDeps(transaction) { return transaction.update("cellDeps", (cellDeps)=>cellDeps.push(locateCellDep({code_hash: DEFAULT_LOCK_HASH, hash_type: "type"}))); } /** * Adds witness placeholders to the transaction for the default lock. * * This function adds zero-filled placeholders for all cells using the default * lock and empty placeholders for all other cells. If a cell is not using * the default lock, the placeholder may need to be altered after this function * is run. This function can only be used on an empty witnesses structure. * * @param {Object} transaction An instance of a Lumos `TransactionSkelton`. * * @return {Object} An instance of the transaction skeleton with the placeholders added. */ function addDefaultWitnessPlaceholders(transaction) { if(transaction.witnesses.size !== 0) throw new Error("This function can only be used on an empty witnesses structure."); // Cycle through all inputs adding placeholders for unique locks, and empty witnesses in all other places. let uniqueLocks = new Set(); for(const input of transaction.inputs) { let witness = "0x"; const lockHash = computeScriptHash(input.cell_output.lock); if(!uniqueLocks.has(lockHash)) { uniqueLocks.add(lockHash); if(input.cell_output.lock.hash_type === "type" && input.cell_output.lock.code_hash === DEFAULT_LOCK_HASH) witness = SECP_SIGNATURE_PLACEHOLDER_DEFAULT; } witness = new Reader(core.SerializeWitnessArgs(normalizers.NormalizeWitnessArgs({lock: witness}))).serializeJson(); transaction = transaction.update("witnesses", (w)=>w.push(witness)); } return transaction; } function checkTxFee(transaction) { const tx = transaction.toJS(); let capacityInputs = 0n; let capacityOutputs = 0n; for(let input of tx.inputs) capacityInputs += BigInt(input.cell_output.capacity); for(let output of tx.outputs) capacityOutputs += BigInt(output.cell_output.capacity); if(capacityInputs - capacityOutputs > ckbytesToShannons(1)) throw new Error(`Transaction fee too high: ${formattedNumber(shannonsToCkbytes(capacityInputs - capacityOutputs))} CKBytes. A normal transaction fee is < 1 CKByte.`); } /** * Collects Cells for use as capacity from the specified lock script. * * This will search for Cells with at least capacityRequired. If there is insufficient capacity available an error will be thrown. * * @example * const {inputCells, inputCapacity} = await collectCapacity(indexer, addressToScript("ckt1qyqvsv5240xeh85wvnau2eky8pwrhh4jr8ts8vyj37"), ckbytesToShannons(100n)); * * @param {Object} indexer An instance of a running Lumos Indexer. * @param {Object} lockScript A script used to query the CellCollector to find Cells to use as capacity. * @param {BigInt} capacityRequired The number of CKBytes needed. * * @returns {Object} An object with the inputCells[] found and the inputCapacity contained within the provided Cells. */ async function collectCapacity(indexer, lockScript, capacityRequired) { const query = {lock: lockScript, type: "empty"}; const cellCollector = new CellCollector(indexer, query); let inputCells = []; let inputCapacity = 0n; for await (const cell of cellCollector.collect()) { inputCells.push(cell); inputCapacity += hexToInt(cell.cell_output.capacity); if(inputCapacity >= capacityRequired) break; } if(inputCapacity < capacityRequired) throw new Error("Unable to collect enough cells to fulfill the capacity requirements."); return {inputCells, inputCapacity}; } /** * Collects Cells for use as capacity from the specified lock script. * * This will search for Cells with at least capacityRequired. If there is insufficient capacity available an error will be thrown. * * @example * const {inputCells, inputCapacity} = await collectCapacity(indexer, addressToScript("ckt1qyqvsv5240xeh85wvnau2eky8pwrhh4jr8ts8vyj37"), ckbytesToShannons(100n)); * * @param {Object} indexer An instance of a running Lumos Indexer. * @param {Object} lockScript A lock script used to query the CellCollector to find Cells to use as capacity. * @param {Object} typeScript A type script used to query the CellCollector to find Cells to use as capacity. * @param {BigInt} capacityRequired The number of CKBytes needed. * * @returns {Object} An object with the inputCells[] found and the inputCapacity contained within the provided Cells. */ async function collectCapacityWithType(indexer, lockScript, typeScript, capacityRequired) { const query = {lock: lockScript, type: typeScript}; const cellCollector = new CellCollector(indexer, query); let inputCells = []; let inputCapacity = 0n; for await (const cell of cellCollector.collect()) { inputCells.push(cell); inputCapacity += hexToInt(cell.cell_output.capacity); if(inputCapacity >= capacityRequired) break; } if(inputCapacity < capacityRequired) throw new Error("Unable to collect enough cells to fulfill the capacity requirements."); return {inputCells, inputCapacity}; } function describeTransaction(transaction, options) { const defaults = { showCellDeps: true, showInputs: true, showInputCapacity: true, showInputData: false, showInputLock: true, showInputType: true, showInputOutPoint: true, showOutputs: true, showOutputCapacity: true, showOutputData: false, showOutputLock: true, showOutputType: true, showWitnesses: true, showTxFee: true }; options = {...defaults, ...options}; let obj = { deps: [], inputs: [], outputs: [], witnesses: [] }; for(const dep of transaction.cellDeps) { let cell = { dep_type: dep.dep_type, out_point: dep.out_point.tx_hash + "-" + dep.out_point.index }; obj.deps.push(cell);
{ capacity: formattedNumber(hexToInt(input.cell_output.capacity)) + " Shannons", capacityCkbytes: formattedNumber((Number(hexToInt(input.cell_output.capacity)) / 100_000_000), 4) + " CKBytes", lock: new ScriptValue(input.cell_output.lock).hash(), type: (!!input.cell_output.type) ? new ScriptValue(input.cell_output.type).hash() : null, out_point: input.out_point.tx_hash + "-" + input.out_point.index, data: input.data }; obj.inputs.push(cell); } for(const output of transaction.outputs) { let cell = { capacity: formattedNumber(hexToInt(output.cell_output.capacity)) + " Shannons", capacityCkbytes: formattedNumber((Number(hexToInt(output.cell_output.capacity)) / 100_000_000), 4) + " CKBytes", lock: new ScriptValue(output.cell_output.lock).hash(), type: (!!output.cell_output.type) ? new ScriptValue(output.cell_output.type).hash() : null, data: output.data }; obj.outputs.push(cell); } obj.witnesses = transaction.witnesses; if(options.showCellDeps) { console.log("Cell Deps:"); for(const dep of obj.deps) { console.log(" - dep_type: " + dep.dep_type); console.log(" out_point: " + dep.out_point); } } if(options.showInputs) { console.log("Inputs:"); for(const input of obj.inputs) { if(options.showInputCapacity) console.log(" - capacity: " + input.capacity + ` (${input.capacityCkbytes})`); if(options.showInputLock) console.log(" lock: " + input.lock); if(options.showInputType) console.log(" type: " + input.type); if(options.showInputOutPoint) console.log(" out_point: " + input.out_point); if(options.showInputData) { const data = (input.data.length > 66) ? input.data.substr(0, 33) + "..." + input.data.substr(input.data.length - 30) : input.data; const dataBytes = (data.length > 2) ? (input.data.length-2)/2 : 0; console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`); } } } if(options.showOutputs) { console.log("Outputs:"); for(const output of obj.outputs) { if(options.showOutputCapacity) console.log(" - capacity: " + output.capacity + ` (${output.capacityCkbytes})`); if(options.showOutputLock) console.log(" lock: " + output.lock); if(options.showOutputType) console.log(" type: " + output.type); if(options.showOutputData) { const data = (output.data.length > 66) ? output.data.substr(0, 33) + "..." + output.data.substr(output.data.length - 30) : output.data; const dataBytes = (data.length > 2) ? (output.data.length-2)/2 : 0; console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`); } } } if(options.showWitnesses) { console.log("Witnesses:"); for(const witness of obj.witnesses) { console.log(" - " + witness); } } if(options.showTxFee) { const inputCapacity = transaction.inputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n); const outputCapacity = transaction.outputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n); console.log(`TX Fee: ${formattedNumber(inputCapacity - outputCapacity)} Shannons`) } console.log(); } async function getLiveCell(nodeUrl, outPoint, returnData = false) { const rpc = new RPC(nodeUrl); const res = await rpc.get_live_cell({tx_hash: outPoint.tx_hash, index: outPoint.index}, returnData); if(res.status === "dead") throw new Error(`Dead cell found at out point: ${outPoint.tx_hash}-${outPoint.index}`); if(res.status !== "live") throw new Error(`Live cell not found at out point: ${outPoint.tx_hash}-${outPoint.index}`); const cell = { cell_output: { capacity: res.cell.output.capacity, lock: {code_hash: res.cell.output.lock.code_hash, hash_type: res.cell.output.lock.hash_type, args: res.cell.output.lock.args}, type: (!res.cell.output.type) ? undefined : {code_hash: res.cell.output.type.code_hash, hash_type: res.cell.output.type.hash_type, args: res.cell.output.type.args} }, out_point: { tx_hash: outPoint.tx_hash, index: outPoint.index }, data: (returnData) ? res.cell.data.content : "0x" } return cell; } async function indexerReady(indexer, updateProgress=((_indexerTip, _rpcTip)=>{}), options) { const defaults = {blockDifference: 0, timeoutMs: 300_000, recheckMs: 500}; options = {...defaults, ...options}; return new Promise(async (resolve, reject) => { let timedOut = false; const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false; const rpc = new RPC(indexer.uri); let indexerFailureCount = 0; let rpcFailureCount = 0; while(true) { if(timedOut) return reject(Error("Transaction timeout.")); const indexerTipObj = await indexer.tip(); if(!indexerTipObj) { if(++indexerFailureCount >= 5) return reject(Error("Indexer gave an unexpected response.")); await new Promise((resolve)=>setTimeout(resolve, 200)); continue; } const rpcResponse = await rpc.get_tip_block_number(); if(!rpcResponse) { if(++rpcFailureCount >= 5) return reject(Error("RPC gave an unexpected response.")); await new Promise((resolve)=>setTimeout(resolve, 200)); continue; } const indexerTip = BigInt(indexerTipObj.block_number); const rpcTip = BigInt(rpcResponse); if(indexerTip >= (rpcTip - BigInt(options.blockDifference))) { if(timeoutTimer) clearTimeout(timeoutTimer); break; } updateProgress(indexerTip, rpcTip); await new Promise(resolve=>setTimeout(resolve, options.recheckMs)); } return resolve(); }); } async function initializeLumosIndexer(nodeUrl) { // Start the Lumos Indexer and wait until it is fully synchronized. const indexer = new Indexer(nodeUrl, "../indexer-data"); indexer.startForever(); console.log("Indexer is syncing. Please wait."); await indexerReady(indexer, (indexerTip, rpcTip)=>console.log(`Syncing ${Math.floor(Number(indexerTip)/Number(rpcTip)*10_000)/100}% completed.`), {timeoutMs: 0, recheckMs: 800}); console.log(); return indexer; } async function readFile(filename) { const readFile = util.promisify(fs.readFile); return await readFile(filename); } function readFileSync(filename) { return fs.readFileSync(filename); } async function readFileToHexString(filename) { const data = await readFile(filename); const dataSize = data.length; const hexString = "0x" + data.toString("hex"); return {hexString, dataSize}; } function readFileToHexStringSync(filename) { const data = readFileSync(filename); const dataSize = data.length; const hexString = "0x" + data.toString("hex"); return {hexString, dataSize}; } async function sendTransaction(nodeUrl, signedTx) { const rpc = new RPC(nodeUrl); let result; try { result = await rpc.send_transaction(signedTx); } catch(error) { const regex = /^(\w+): ([\w\s]+) (\{.*\})$/; const matches = error.message.match(regex); if(!!matches && matches.length > 0) { const category = matches[1]; const type = matches[2]; const json = JSON.parse(matches[3]); console.log(); console.error(`Error: ${category}`); console.error(`Type: ${type}`); console.error(`Code: ${json.code}`); console.error(`Message: ${json.message}`); console.error(`Data: ${json.data}`); console.log(); throw new Error("RPC Returned Error!"); } else throw error; } return result; } /** * Creates a signature for the provided message with the provided private key using the Secp256k1 algorithm. * * @param {String} privateKey A 256-bit Secp256k1 private key represented as a hex string. * @param {String} message A message to sign represented as a hex string. * * @return {String} A 65 byte Secp256k1 signature represented as a hex string. */ function signMessage(privateKey, message) { const messageArray = new Uint8Array(new Reader(message).toArrayBuffer()); const pkArray = new Uint8Array(new Reader(privateKey).toArrayBuffer()); const {signature, recid} = secp256k1.ecdsaSign(messageArray, pkArray); const array = new Uint8Array(65); array.set(signature, 0); array.set([recid], 64); return new Reader(array.buffer).serializeJson(); } /** * Sign a transaction that uses the default lock and requires a single signature. * * @param {Object} transaction An instance of a Lumos transaction skeleton. * @param {String} privateKey A 256-bit Secp256k1 private key represented as a hex string. * * @return {Object} An instance of a Lumos transaction that has been sealed. */ function signTransaction(transaction, privateKey) { transaction = secp256k1Blake160.prepareSigningEntries(transaction); const signingEntries = transaction.get("signingEntries").toArray(); const signature = signMessage(privateKey, signingEntries[0].message); const tx = sealTransaction(transaction, [signature]); return tx; } async function waitForConfirmation(nodeUrl, txid, updateProgress=((_status)=>{}), options) { const defaults = {timeoutMs: 300_000, recheckMs: 500, throwOnNotFound: true}; options = {...defaults, ...options}; return new Promise(async (resolve, reject) => { let timedOut = false; const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false; const rpc = new RPC(nodeUrl); while(true) { if(timedOut) return reject(Error("Transaction timeout.")); const transaction = await rpc.get_transaction(txid); if(!!transaction) { const status = transaction.tx_status.status; updateProgress(status); if(status === "committed") { if(timeoutTimer) clearTimeout(timeoutTimer); break; } } else if(transaction === null) { if(options.throwOnNotFound) return reject(Error("Transaction was not found.")); else updateProgress("not_found"); } await new Promise(resolve=>setTimeout(resolve, options.recheckMs)); } return resolve(); }); } async function waitForNextBlock(nodeUrl, blocks=1n, updateProgress=((_startTip, _tip)=>{}), options) { const defaults = {timeoutMs: 300_000, recheckMs: 500}; options = {...defaults, ...options}; return new Promise(async (resolve, reject) => { const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false; const rpc = new RPC(nodeUrl); blocks = BigInt(blocks); let timedOut = false; let startTip = 0n; while(true) { if(timedOut) return reject(Error("Transaction timeout.")); let tip = await rpc.get_tip_block_number(); if(!!tip) { tip = BigInt(tip); if(startTip === 0n) startTip = tip; updateProgress(startTip, tip); if(tip >= startTip + blocks) { if(timeoutTimer) clearTimeout(timeoutTimer); break; } } else return reject(Error("RPC gave an unexpected response.")); await new Promise(resolve=>setTimeout(resolve, options.recheckMs)); } return resolve(); }); } async function waitForTransactionConfirmation(nodeUrl, txid) { process.stdout.write("Waiting for transaction to confirm."); await waitForConfirmation(nodeUrl, txid, (_status)=>process.stdout.write("."), {recheckMs: 1_000}); } module.exports = { addDefaultCellDeps, addDefaultWitnessPlaceholders, checkTxFee, collectCapacity, collectCapacityWithType, DEFAULT_LOCK_HASH, describeTransaction, getLiveCell, indexerReady, initializeLumosIndexer, MULTISIG_LOCK_HASH, readFile, readFileSync, readFileToHexString, readFileToHexStringSync, SECP_SIGNATURE_PLACEHOLDER_DEFAULT, sendTransaction, signMessage, signTransaction, waitForConfirmation, waitForTransactionConfirmation, waitForNextBlock };
} for(const input of transaction.inputs) { let cell =
random_line_split
index.js
"use strict"; const fs = require("fs"); const util = require("util"); const {core, utils, values} = require("@ckb-lumos/base"); const {computeScriptHash} = utils; const {secp256k1Blake160} = require("@ckb-lumos/common-scripts"); const {locateCellDep, sealTransaction} = require("@ckb-lumos/helpers"); const {CellCollector, Indexer} = require("@ckb-lumos/indexer"); const {ScriptValue} = values; const {normalizers, Reader, RPC} = require("ckb-js-toolkit"); const secp256k1 = require("secp256k1"); const {ckbytesToShannons, formattedNumber, getRandomInt, hexToInt, intToHex, shannonsToCkbytes} = require("./util.js"); const DEFAULT_LOCK_HASH = "0x9bd7e06f3ecf4be0f2fcd2188b23f1b9fcc88e5d4b65a8637b17723bbda3cce8"; const MULTISIG_LOCK_HASH = "0x5c5069eb0857efc65e1bca0c07df34c31663b3622fd3876c876320fc9634e2a8"; const SECP_SIGNATURE_PLACEHOLDER_DEFAULT = "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; function addDefaultCellDeps(transaction) { return transaction.update("cellDeps", (cellDeps)=>cellDeps.push(locateCellDep({code_hash: DEFAULT_LOCK_HASH, hash_type: "type"}))); } /** * Adds witness placeholders to the transaction for the default lock. * * This function adds zero-filled placeholders for all cells using the default * lock and empty placeholders for all other cells. If a cell is not using * the default lock, the placeholder may need to be altered after this function * is run. This function can only be used on an empty witnesses structure. * * @param {Object} transaction An instance of a Lumos `TransactionSkelton`. * * @return {Object} An instance of the transaction skeleton with the placeholders added. */ function addDefaultWitnessPlaceholders(transaction) { if(transaction.witnesses.size !== 0) throw new Error("This function can only be used on an empty witnesses structure."); // Cycle through all inputs adding placeholders for unique locks, and empty witnesses in all other places. let uniqueLocks = new Set(); for(const input of transaction.inputs) { let witness = "0x"; const lockHash = computeScriptHash(input.cell_output.lock); if(!uniqueLocks.has(lockHash)) { uniqueLocks.add(lockHash); if(input.cell_output.lock.hash_type === "type" && input.cell_output.lock.code_hash === DEFAULT_LOCK_HASH) witness = SECP_SIGNATURE_PLACEHOLDER_DEFAULT; } witness = new Reader(core.SerializeWitnessArgs(normalizers.NormalizeWitnessArgs({lock: witness}))).serializeJson(); transaction = transaction.update("witnesses", (w)=>w.push(witness)); } return transaction; } function checkTxFee(transaction) { const tx = transaction.toJS(); let capacityInputs = 0n; let capacityOutputs = 0n; for(let input of tx.inputs) capacityInputs += BigInt(input.cell_output.capacity); for(let output of tx.outputs) capacityOutputs += BigInt(output.cell_output.capacity); if(capacityInputs - capacityOutputs > ckbytesToShannons(1)) throw new Error(`Transaction fee too high: ${formattedNumber(shannonsToCkbytes(capacityInputs - capacityOutputs))} CKBytes. A normal transaction fee is < 1 CKByte.`); } /** * Collects Cells for use as capacity from the specified lock script. * * This will search for Cells with at least capacityRequired. If there is insufficient capacity available an error will be thrown. * * @example * const {inputCells, inputCapacity} = await collectCapacity(indexer, addressToScript("ckt1qyqvsv5240xeh85wvnau2eky8pwrhh4jr8ts8vyj37"), ckbytesToShannons(100n)); * * @param {Object} indexer An instance of a running Lumos Indexer. * @param {Object} lockScript A script used to query the CellCollector to find Cells to use as capacity. * @param {BigInt} capacityRequired The number of CKBytes needed. * * @returns {Object} An object with the inputCells[] found and the inputCapacity contained within the provided Cells. */ async function collectCapacity(indexer, lockScript, capacityRequired) { const query = {lock: lockScript, type: "empty"}; const cellCollector = new CellCollector(indexer, query); let inputCells = []; let inputCapacity = 0n; for await (const cell of cellCollector.collect()) { inputCells.push(cell); inputCapacity += hexToInt(cell.cell_output.capacity); if(inputCapacity >= capacityRequired) break; } if(inputCapacity < capacityRequired) throw new Error("Unable to collect enough cells to fulfill the capacity requirements."); return {inputCells, inputCapacity}; } /** * Collects Cells for use as capacity from the specified lock script. * * This will search for Cells with at least capacityRequired. If there is insufficient capacity available an error will be thrown. * * @example * const {inputCells, inputCapacity} = await collectCapacity(indexer, addressToScript("ckt1qyqvsv5240xeh85wvnau2eky8pwrhh4jr8ts8vyj37"), ckbytesToShannons(100n)); * * @param {Object} indexer An instance of a running Lumos Indexer. * @param {Object} lockScript A lock script used to query the CellCollector to find Cells to use as capacity. * @param {Object} typeScript A type script used to query the CellCollector to find Cells to use as capacity. * @param {BigInt} capacityRequired The number of CKBytes needed. * * @returns {Object} An object with the inputCells[] found and the inputCapacity contained within the provided Cells. */ async function collectCapacityWithType(indexer, lockScript, typeScript, capacityRequired) { const query = {lock: lockScript, type: typeScript}; const cellCollector = new CellCollector(indexer, query); let inputCells = []; let inputCapacity = 0n; for await (const cell of cellCollector.collect()) { inputCells.push(cell); inputCapacity += hexToInt(cell.cell_output.capacity); if(inputCapacity >= capacityRequired) break; } if(inputCapacity < capacityRequired) throw new Error("Unable to collect enough cells to fulfill the capacity requirements."); return {inputCells, inputCapacity}; } function describeTransaction(transaction, options) { const defaults = { showCellDeps: true, showInputs: true, showInputCapacity: true, showInputData: false, showInputLock: true, showInputType: true, showInputOutPoint: true, showOutputs: true, showOutputCapacity: true, showOutputData: false, showOutputLock: true, showOutputType: true, showWitnesses: true, showTxFee: true }; options = {...defaults, ...options}; let obj = { deps: [], inputs: [], outputs: [], witnesses: [] }; for(const dep of transaction.cellDeps) { let cell = { dep_type: dep.dep_type, out_point: dep.out_point.tx_hash + "-" + dep.out_point.index }; obj.deps.push(cell); } for(const input of transaction.inputs) { let cell = { capacity: formattedNumber(hexToInt(input.cell_output.capacity)) + " Shannons", capacityCkbytes: formattedNumber((Number(hexToInt(input.cell_output.capacity)) / 100_000_000), 4) + " CKBytes", lock: new ScriptValue(input.cell_output.lock).hash(), type: (!!input.cell_output.type) ? new ScriptValue(input.cell_output.type).hash() : null, out_point: input.out_point.tx_hash + "-" + input.out_point.index, data: input.data }; obj.inputs.push(cell); } for(const output of transaction.outputs) { let cell = { capacity: formattedNumber(hexToInt(output.cell_output.capacity)) + " Shannons", capacityCkbytes: formattedNumber((Number(hexToInt(output.cell_output.capacity)) / 100_000_000), 4) + " CKBytes", lock: new ScriptValue(output.cell_output.lock).hash(), type: (!!output.cell_output.type) ? new ScriptValue(output.cell_output.type).hash() : null, data: output.data }; obj.outputs.push(cell); } obj.witnesses = transaction.witnesses; if(options.showCellDeps) { console.log("Cell Deps:"); for(const dep of obj.deps) { console.log(" - dep_type: " + dep.dep_type); console.log(" out_point: " + dep.out_point); } } if(options.showInputs) { console.log("Inputs:"); for(const input of obj.inputs) { if(options.showInputCapacity) console.log(" - capacity: " + input.capacity + ` (${input.capacityCkbytes})`); if(options.showInputLock) console.log(" lock: " + input.lock); if(options.showInputType) console.log(" type: " + input.type); if(options.showInputOutPoint) console.log(" out_point: " + input.out_point); if(options.showInputData) { const data = (input.data.length > 66) ? input.data.substr(0, 33) + "..." + input.data.substr(input.data.length - 30) : input.data; const dataBytes = (data.length > 2) ? (input.data.length-2)/2 : 0; console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`); } } } if(options.showOutputs) { console.log("Outputs:"); for(const output of obj.outputs) { if(options.showOutputCapacity) console.log(" - capacity: " + output.capacity + ` (${output.capacityCkbytes})`); if(options.showOutputLock) console.log(" lock: " + output.lock); if(options.showOutputType) console.log(" type: " + output.type); if(options.showOutputData) { const data = (output.data.length > 66) ? output.data.substr(0, 33) + "..." + output.data.substr(output.data.length - 30) : output.data; const dataBytes = (data.length > 2) ? (output.data.length-2)/2 : 0; console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`); } } } if(options.showWitnesses) { console.log("Witnesses:"); for(const witness of obj.witnesses) { console.log(" - " + witness); } } if(options.showTxFee) { const inputCapacity = transaction.inputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n); const outputCapacity = transaction.outputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n); console.log(`TX Fee: ${formattedNumber(inputCapacity - outputCapacity)} Shannons`) } console.log(); } async function getLiveCell(nodeUrl, outPoint, returnData = false) { const rpc = new RPC(nodeUrl); const res = await rpc.get_live_cell({tx_hash: outPoint.tx_hash, index: outPoint.index}, returnData); if(res.status === "dead") throw new Error(`Dead cell found at out point: ${outPoint.tx_hash}-${outPoint.index}`); if(res.status !== "live") throw new Error(`Live cell not found at out point: ${outPoint.tx_hash}-${outPoint.index}`); const cell = { cell_output: { capacity: res.cell.output.capacity, lock: {code_hash: res.cell.output.lock.code_hash, hash_type: res.cell.output.lock.hash_type, args: res.cell.output.lock.args}, type: (!res.cell.output.type) ? undefined : {code_hash: res.cell.output.type.code_hash, hash_type: res.cell.output.type.hash_type, args: res.cell.output.type.args} }, out_point: { tx_hash: outPoint.tx_hash, index: outPoint.index }, data: (returnData) ? res.cell.data.content : "0x" } return cell; } async function indexerReady(indexer, updateProgress=((_indexerTip, _rpcTip)=>{}), options) { const defaults = {blockDifference: 0, timeoutMs: 300_000, recheckMs: 500}; options = {...defaults, ...options}; return new Promise(async (resolve, reject) => { let timedOut = false; const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false; const rpc = new RPC(indexer.uri); let indexerFailureCount = 0; let rpcFailureCount = 0; while(true) { if(timedOut) return reject(Error("Transaction timeout.")); const indexerTipObj = await indexer.tip(); if(!indexerTipObj) { if(++indexerFailureCount >= 5) return reject(Error("Indexer gave an unexpected response.")); await new Promise((resolve)=>setTimeout(resolve, 200)); continue; } const rpcResponse = await rpc.get_tip_block_number(); if(!rpcResponse) { if(++rpcFailureCount >= 5) return reject(Error("RPC gave an unexpected response.")); await new Promise((resolve)=>setTimeout(resolve, 200)); continue; } const indexerTip = BigInt(indexerTipObj.block_number); const rpcTip = BigInt(rpcResponse); if(indexerTip >= (rpcTip - BigInt(options.blockDifference))) { if(timeoutTimer) clearTimeout(timeoutTimer); break; } updateProgress(indexerTip, rpcTip); await new Promise(resolve=>setTimeout(resolve, options.recheckMs)); } return resolve(); }); } async function initializeLumosIndexer(nodeUrl) { // Start the Lumos Indexer and wait until it is fully synchronized. const indexer = new Indexer(nodeUrl, "../indexer-data"); indexer.startForever(); console.log("Indexer is syncing. Please wait."); await indexerReady(indexer, (indexerTip, rpcTip)=>console.log(`Syncing ${Math.floor(Number(indexerTip)/Number(rpcTip)*10_000)/100}% completed.`), {timeoutMs: 0, recheckMs: 800}); console.log(); return indexer; } async function
(filename) { const readFile = util.promisify(fs.readFile); return await readFile(filename); } function readFileSync(filename) { return fs.readFileSync(filename); } async function readFileToHexString(filename) { const data = await readFile(filename); const dataSize = data.length; const hexString = "0x" + data.toString("hex"); return {hexString, dataSize}; } function readFileToHexStringSync(filename) { const data = readFileSync(filename); const dataSize = data.length; const hexString = "0x" + data.toString("hex"); return {hexString, dataSize}; } async function sendTransaction(nodeUrl, signedTx) { const rpc = new RPC(nodeUrl); let result; try { result = await rpc.send_transaction(signedTx); } catch(error) { const regex = /^(\w+): ([\w\s]+) (\{.*\})$/; const matches = error.message.match(regex); if(!!matches && matches.length > 0) { const category = matches[1]; const type = matches[2]; const json = JSON.parse(matches[3]); console.log(); console.error(`Error: ${category}`); console.error(`Type: ${type}`); console.error(`Code: ${json.code}`); console.error(`Message: ${json.message}`); console.error(`Data: ${json.data}`); console.log(); throw new Error("RPC Returned Error!"); } else throw error; } return result; } /** * Creates a signature for the provided message with the provided private key using the Secp256k1 algorithm. * * @param {String} privateKey A 256-bit Secp256k1 private key represented as a hex string. * @param {String} message A message to sign represented as a hex string. * * @return {String} A 65 byte Secp256k1 signature represented as a hex string. */ function signMessage(privateKey, message) { const messageArray = new Uint8Array(new Reader(message).toArrayBuffer()); const pkArray = new Uint8Array(new Reader(privateKey).toArrayBuffer()); const {signature, recid} = secp256k1.ecdsaSign(messageArray, pkArray); const array = new Uint8Array(65); array.set(signature, 0); array.set([recid], 64); return new Reader(array.buffer).serializeJson(); } /** * Sign a transaction that uses the default lock and requires a single signature. * * @param {Object} transaction An instance of a Lumos transaction skeleton. * @param {String} privateKey A 256-bit Secp256k1 private key represented as a hex string. * * @return {Object} An instance of a Lumos transaction that has been sealed. */ function signTransaction(transaction, privateKey) { transaction = secp256k1Blake160.prepareSigningEntries(transaction); const signingEntries = transaction.get("signingEntries").toArray(); const signature = signMessage(privateKey, signingEntries[0].message); const tx = sealTransaction(transaction, [signature]); return tx; } async function waitForConfirmation(nodeUrl, txid, updateProgress=((_status)=>{}), options) { const defaults = {timeoutMs: 300_000, recheckMs: 500, throwOnNotFound: true}; options = {...defaults, ...options}; return new Promise(async (resolve, reject) => { let timedOut = false; const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false; const rpc = new RPC(nodeUrl); while(true) { if(timedOut) return reject(Error("Transaction timeout.")); const transaction = await rpc.get_transaction(txid); if(!!transaction) { const status = transaction.tx_status.status; updateProgress(status); if(status === "committed") { if(timeoutTimer) clearTimeout(timeoutTimer); break; } } else if(transaction === null) { if(options.throwOnNotFound) return reject(Error("Transaction was not found.")); else updateProgress("not_found"); } await new Promise(resolve=>setTimeout(resolve, options.recheckMs)); } return resolve(); }); } async function waitForNextBlock(nodeUrl, blocks=1n, updateProgress=((_startTip, _tip)=>{}), options) { const defaults = {timeoutMs: 300_000, recheckMs: 500}; options = {...defaults, ...options}; return new Promise(async (resolve, reject) => { const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false; const rpc = new RPC(nodeUrl); blocks = BigInt(blocks); let timedOut = false; let startTip = 0n; while(true) { if(timedOut) return reject(Error("Transaction timeout.")); let tip = await rpc.get_tip_block_number(); if(!!tip) { tip = BigInt(tip); if(startTip === 0n) startTip = tip; updateProgress(startTip, tip); if(tip >= startTip + blocks) { if(timeoutTimer) clearTimeout(timeoutTimer); break; } } else return reject(Error("RPC gave an unexpected response.")); await new Promise(resolve=>setTimeout(resolve, options.recheckMs)); } return resolve(); }); } async function waitForTransactionConfirmation(nodeUrl, txid) { process.stdout.write("Waiting for transaction to confirm."); await waitForConfirmation(nodeUrl, txid, (_status)=>process.stdout.write("."), {recheckMs: 1_000}); } module.exports = { addDefaultCellDeps, addDefaultWitnessPlaceholders, checkTxFee, collectCapacity, collectCapacityWithType, DEFAULT_LOCK_HASH, describeTransaction, getLiveCell, indexerReady, initializeLumosIndexer, MULTISIG_LOCK_HASH, readFile, readFileSync, readFileToHexString, readFileToHexStringSync, SECP_SIGNATURE_PLACEHOLDER_DEFAULT, sendTransaction, signMessage, signTransaction, waitForConfirmation, waitForTransactionConfirmation, waitForNextBlock };
readFile
identifier_name
index.js
"use strict"; const fs = require("fs"); const util = require("util"); const {core, utils, values} = require("@ckb-lumos/base"); const {computeScriptHash} = utils; const {secp256k1Blake160} = require("@ckb-lumos/common-scripts"); const {locateCellDep, sealTransaction} = require("@ckb-lumos/helpers"); const {CellCollector, Indexer} = require("@ckb-lumos/indexer"); const {ScriptValue} = values; const {normalizers, Reader, RPC} = require("ckb-js-toolkit"); const secp256k1 = require("secp256k1"); const {ckbytesToShannons, formattedNumber, getRandomInt, hexToInt, intToHex, shannonsToCkbytes} = require("./util.js"); const DEFAULT_LOCK_HASH = "0x9bd7e06f3ecf4be0f2fcd2188b23f1b9fcc88e5d4b65a8637b17723bbda3cce8"; const MULTISIG_LOCK_HASH = "0x5c5069eb0857efc65e1bca0c07df34c31663b3622fd3876c876320fc9634e2a8"; const SECP_SIGNATURE_PLACEHOLDER_DEFAULT = "0x0000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000"; function addDefaultCellDeps(transaction) { return transaction.update("cellDeps", (cellDeps)=>cellDeps.push(locateCellDep({code_hash: DEFAULT_LOCK_HASH, hash_type: "type"}))); } /** * Adds witness placeholders to the transaction for the default lock. * * This function adds zero-filled placeholders for all cells using the default * lock and empty placeholders for all other cells. If a cell is not using * the default lock, the placeholder may need to be altered after this function * is run. This function can only be used on an empty witnesses structure. * * @param {Object} transaction An instance of a Lumos `TransactionSkelton`. * * @return {Object} An instance of the transaction skeleton with the placeholders added. */ function addDefaultWitnessPlaceholders(transaction) { if(transaction.witnesses.size !== 0) throw new Error("This function can only be used on an empty witnesses structure."); // Cycle through all inputs adding placeholders for unique locks, and empty witnesses in all other places. let uniqueLocks = new Set(); for(const input of transaction.inputs) { let witness = "0x"; const lockHash = computeScriptHash(input.cell_output.lock); if(!uniqueLocks.has(lockHash)) { uniqueLocks.add(lockHash); if(input.cell_output.lock.hash_type === "type" && input.cell_output.lock.code_hash === DEFAULT_LOCK_HASH) witness = SECP_SIGNATURE_PLACEHOLDER_DEFAULT; } witness = new Reader(core.SerializeWitnessArgs(normalizers.NormalizeWitnessArgs({lock: witness}))).serializeJson(); transaction = transaction.update("witnesses", (w)=>w.push(witness)); } return transaction; } function checkTxFee(transaction) { const tx = transaction.toJS(); let capacityInputs = 0n; let capacityOutputs = 0n; for(let input of tx.inputs) capacityInputs += BigInt(input.cell_output.capacity); for(let output of tx.outputs) capacityOutputs += BigInt(output.cell_output.capacity); if(capacityInputs - capacityOutputs > ckbytesToShannons(1)) throw new Error(`Transaction fee too high: ${formattedNumber(shannonsToCkbytes(capacityInputs - capacityOutputs))} CKBytes. A normal transaction fee is < 1 CKByte.`); } /** * Collects Cells for use as capacity from the specified lock script. * * This will search for Cells with at least capacityRequired. If there is insufficient capacity available an error will be thrown. * * @example * const {inputCells, inputCapacity} = await collectCapacity(indexer, addressToScript("ckt1qyqvsv5240xeh85wvnau2eky8pwrhh4jr8ts8vyj37"), ckbytesToShannons(100n)); * * @param {Object} indexer An instance of a running Lumos Indexer. * @param {Object} lockScript A script used to query the CellCollector to find Cells to use as capacity. * @param {BigInt} capacityRequired The number of CKBytes needed. * * @returns {Object} An object with the inputCells[] found and the inputCapacity contained within the provided Cells. */ async function collectCapacity(indexer, lockScript, capacityRequired) { const query = {lock: lockScript, type: "empty"}; const cellCollector = new CellCollector(indexer, query); let inputCells = []; let inputCapacity = 0n; for await (const cell of cellCollector.collect()) { inputCells.push(cell); inputCapacity += hexToInt(cell.cell_output.capacity); if(inputCapacity >= capacityRequired) break; } if(inputCapacity < capacityRequired) throw new Error("Unable to collect enough cells to fulfill the capacity requirements."); return {inputCells, inputCapacity}; } /** * Collects Cells for use as capacity from the specified lock script. * * This will search for Cells with at least capacityRequired. If there is insufficient capacity available an error will be thrown. * * @example * const {inputCells, inputCapacity} = await collectCapacity(indexer, addressToScript("ckt1qyqvsv5240xeh85wvnau2eky8pwrhh4jr8ts8vyj37"), ckbytesToShannons(100n)); * * @param {Object} indexer An instance of a running Lumos Indexer. * @param {Object} lockScript A lock script used to query the CellCollector to find Cells to use as capacity. * @param {Object} typeScript A type script used to query the CellCollector to find Cells to use as capacity. * @param {BigInt} capacityRequired The number of CKBytes needed. * * @returns {Object} An object with the inputCells[] found and the inputCapacity contained within the provided Cells. */ async function collectCapacityWithType(indexer, lockScript, typeScript, capacityRequired) { const query = {lock: lockScript, type: typeScript}; const cellCollector = new CellCollector(indexer, query); let inputCells = []; let inputCapacity = 0n; for await (const cell of cellCollector.collect()) { inputCells.push(cell); inputCapacity += hexToInt(cell.cell_output.capacity); if(inputCapacity >= capacityRequired) break; } if(inputCapacity < capacityRequired) throw new Error("Unable to collect enough cells to fulfill the capacity requirements."); return {inputCells, inputCapacity}; } function describeTransaction(transaction, options) { const defaults = { showCellDeps: true, showInputs: true, showInputCapacity: true, showInputData: false, showInputLock: true, showInputType: true, showInputOutPoint: true, showOutputs: true, showOutputCapacity: true, showOutputData: false, showOutputLock: true, showOutputType: true, showWitnesses: true, showTxFee: true }; options = {...defaults, ...options}; let obj = { deps: [], inputs: [], outputs: [], witnesses: [] }; for(const dep of transaction.cellDeps) { let cell = { dep_type: dep.dep_type, out_point: dep.out_point.tx_hash + "-" + dep.out_point.index }; obj.deps.push(cell); } for(const input of transaction.inputs) { let cell = { capacity: formattedNumber(hexToInt(input.cell_output.capacity)) + " Shannons", capacityCkbytes: formattedNumber((Number(hexToInt(input.cell_output.capacity)) / 100_000_000), 4) + " CKBytes", lock: new ScriptValue(input.cell_output.lock).hash(), type: (!!input.cell_output.type) ? new ScriptValue(input.cell_output.type).hash() : null, out_point: input.out_point.tx_hash + "-" + input.out_point.index, data: input.data }; obj.inputs.push(cell); } for(const output of transaction.outputs) { let cell = { capacity: formattedNumber(hexToInt(output.cell_output.capacity)) + " Shannons", capacityCkbytes: formattedNumber((Number(hexToInt(output.cell_output.capacity)) / 100_000_000), 4) + " CKBytes", lock: new ScriptValue(output.cell_output.lock).hash(), type: (!!output.cell_output.type) ? new ScriptValue(output.cell_output.type).hash() : null, data: output.data }; obj.outputs.push(cell); } obj.witnesses = transaction.witnesses; if(options.showCellDeps) { console.log("Cell Deps:"); for(const dep of obj.deps) { console.log(" - dep_type: " + dep.dep_type); console.log(" out_point: " + dep.out_point); } } if(options.showInputs) { console.log("Inputs:"); for(const input of obj.inputs) { if(options.showInputCapacity) console.log(" - capacity: " + input.capacity + ` (${input.capacityCkbytes})`); if(options.showInputLock) console.log(" lock: " + input.lock); if(options.showInputType) console.log(" type: " + input.type); if(options.showInputOutPoint) console.log(" out_point: " + input.out_point); if(options.showInputData) { const data = (input.data.length > 66) ? input.data.substr(0, 33) + "..." + input.data.substr(input.data.length - 30) : input.data; const dataBytes = (data.length > 2) ? (input.data.length-2)/2 : 0; console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`); } } } if(options.showOutputs) { console.log("Outputs:"); for(const output of obj.outputs) { if(options.showOutputCapacity) console.log(" - capacity: " + output.capacity + ` (${output.capacityCkbytes})`); if(options.showOutputLock) console.log(" lock: " + output.lock); if(options.showOutputType) console.log(" type: " + output.type); if(options.showOutputData) { const data = (output.data.length > 66) ? output.data.substr(0, 33) + "..." + output.data.substr(output.data.length - 30) : output.data; const dataBytes = (data.length > 2) ? (output.data.length-2)/2 : 0; console.log(` data: ${data} (${formattedNumber(dataBytes)} Bytes)`); } } } if(options.showWitnesses) { console.log("Witnesses:"); for(const witness of obj.witnesses) { console.log(" - " + witness); } } if(options.showTxFee) { const inputCapacity = transaction.inputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n); const outputCapacity = transaction.outputs.reduce((a, c)=>a+hexToInt(c.cell_output.capacity), 0n); console.log(`TX Fee: ${formattedNumber(inputCapacity - outputCapacity)} Shannons`) } console.log(); } async function getLiveCell(nodeUrl, outPoint, returnData = false) { const rpc = new RPC(nodeUrl); const res = await rpc.get_live_cell({tx_hash: outPoint.tx_hash, index: outPoint.index}, returnData); if(res.status === "dead") throw new Error(`Dead cell found at out point: ${outPoint.tx_hash}-${outPoint.index}`); if(res.status !== "live") throw new Error(`Live cell not found at out point: ${outPoint.tx_hash}-${outPoint.index}`); const cell = { cell_output: { capacity: res.cell.output.capacity, lock: {code_hash: res.cell.output.lock.code_hash, hash_type: res.cell.output.lock.hash_type, args: res.cell.output.lock.args}, type: (!res.cell.output.type) ? undefined : {code_hash: res.cell.output.type.code_hash, hash_type: res.cell.output.type.hash_type, args: res.cell.output.type.args} }, out_point: { tx_hash: outPoint.tx_hash, index: outPoint.index }, data: (returnData) ? res.cell.data.content : "0x" } return cell; } async function indexerReady(indexer, updateProgress=((_indexerTip, _rpcTip)=>{}), options) { const defaults = {blockDifference: 0, timeoutMs: 300_000, recheckMs: 500}; options = {...defaults, ...options}; return new Promise(async (resolve, reject) => { let timedOut = false; const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false; const rpc = new RPC(indexer.uri); let indexerFailureCount = 0; let rpcFailureCount = 0; while(true) { if(timedOut) return reject(Error("Transaction timeout.")); const indexerTipObj = await indexer.tip(); if(!indexerTipObj) { if(++indexerFailureCount >= 5) return reject(Error("Indexer gave an unexpected response.")); await new Promise((resolve)=>setTimeout(resolve, 200)); continue; } const rpcResponse = await rpc.get_tip_block_number(); if(!rpcResponse) { if(++rpcFailureCount >= 5) return reject(Error("RPC gave an unexpected response.")); await new Promise((resolve)=>setTimeout(resolve, 200)); continue; } const indexerTip = BigInt(indexerTipObj.block_number); const rpcTip = BigInt(rpcResponse); if(indexerTip >= (rpcTip - BigInt(options.blockDifference))) { if(timeoutTimer) clearTimeout(timeoutTimer); break; } updateProgress(indexerTip, rpcTip); await new Promise(resolve=>setTimeout(resolve, options.recheckMs)); } return resolve(); }); } async function initializeLumosIndexer(nodeUrl) { // Start the Lumos Indexer and wait until it is fully synchronized. const indexer = new Indexer(nodeUrl, "../indexer-data"); indexer.startForever(); console.log("Indexer is syncing. Please wait."); await indexerReady(indexer, (indexerTip, rpcTip)=>console.log(`Syncing ${Math.floor(Number(indexerTip)/Number(rpcTip)*10_000)/100}% completed.`), {timeoutMs: 0, recheckMs: 800}); console.log(); return indexer; } async function readFile(filename) { const readFile = util.promisify(fs.readFile); return await readFile(filename); } function readFileSync(filename) { return fs.readFileSync(filename); } async function readFileToHexString(filename) { const data = await readFile(filename); const dataSize = data.length; const hexString = "0x" + data.toString("hex"); return {hexString, dataSize}; } function readFileToHexStringSync(filename) { const data = readFileSync(filename); const dataSize = data.length; const hexString = "0x" + data.toString("hex"); return {hexString, dataSize}; } async function sendTransaction(nodeUrl, signedTx)
/** * Creates a signature for the provided message with the provided private key using the Secp256k1 algorithm. * * @param {String} privateKey A 256-bit Secp256k1 private key represented as a hex string. * @param {String} message A message to sign represented as a hex string. * * @return {String} A 65 byte Secp256k1 signature represented as a hex string. */ function signMessage(privateKey, message) { const messageArray = new Uint8Array(new Reader(message).toArrayBuffer()); const pkArray = new Uint8Array(new Reader(privateKey).toArrayBuffer()); const {signature, recid} = secp256k1.ecdsaSign(messageArray, pkArray); const array = new Uint8Array(65); array.set(signature, 0); array.set([recid], 64); return new Reader(array.buffer).serializeJson(); } /** * Sign a transaction that uses the default lock and requires a single signature. * * @param {Object} transaction An instance of a Lumos transaction skeleton. * @param {String} privateKey A 256-bit Secp256k1 private key represented as a hex string. * * @return {Object} An instance of a Lumos transaction that has been sealed. */ function signTransaction(transaction, privateKey) { transaction = secp256k1Blake160.prepareSigningEntries(transaction); const signingEntries = transaction.get("signingEntries").toArray(); const signature = signMessage(privateKey, signingEntries[0].message); const tx = sealTransaction(transaction, [signature]); return tx; } async function waitForConfirmation(nodeUrl, txid, updateProgress=((_status)=>{}), options) { const defaults = {timeoutMs: 300_000, recheckMs: 500, throwOnNotFound: true}; options = {...defaults, ...options}; return new Promise(async (resolve, reject) => { let timedOut = false; const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false; const rpc = new RPC(nodeUrl); while(true) { if(timedOut) return reject(Error("Transaction timeout.")); const transaction = await rpc.get_transaction(txid); if(!!transaction) { const status = transaction.tx_status.status; updateProgress(status); if(status === "committed") { if(timeoutTimer) clearTimeout(timeoutTimer); break; } } else if(transaction === null) { if(options.throwOnNotFound) return reject(Error("Transaction was not found.")); else updateProgress("not_found"); } await new Promise(resolve=>setTimeout(resolve, options.recheckMs)); } return resolve(); }); } async function waitForNextBlock(nodeUrl, blocks=1n, updateProgress=((_startTip, _tip)=>{}), options) { const defaults = {timeoutMs: 300_000, recheckMs: 500}; options = {...defaults, ...options}; return new Promise(async (resolve, reject) => { const timeoutTimer = (options.timeoutMs !== 0) ? setTimeout(()=>{timedOut = true;}, options.timeoutMs) : false; const rpc = new RPC(nodeUrl); blocks = BigInt(blocks); let timedOut = false; let startTip = 0n; while(true) { if(timedOut) return reject(Error("Transaction timeout.")); let tip = await rpc.get_tip_block_number(); if(!!tip) { tip = BigInt(tip); if(startTip === 0n) startTip = tip; updateProgress(startTip, tip); if(tip >= startTip + blocks) { if(timeoutTimer) clearTimeout(timeoutTimer); break; } } else return reject(Error("RPC gave an unexpected response.")); await new Promise(resolve=>setTimeout(resolve, options.recheckMs)); } return resolve(); }); } async function waitForTransactionConfirmation(nodeUrl, txid) { process.stdout.write("Waiting for transaction to confirm."); await waitForConfirmation(nodeUrl, txid, (_status)=>process.stdout.write("."), {recheckMs: 1_000}); } module.exports = { addDefaultCellDeps, addDefaultWitnessPlaceholders, checkTxFee, collectCapacity, collectCapacityWithType, DEFAULT_LOCK_HASH, describeTransaction, getLiveCell, indexerReady, initializeLumosIndexer, MULTISIG_LOCK_HASH, readFile, readFileSync, readFileToHexString, readFileToHexStringSync, SECP_SIGNATURE_PLACEHOLDER_DEFAULT, sendTransaction, signMessage, signTransaction, waitForConfirmation, waitForTransactionConfirmation, waitForNextBlock };
{ const rpc = new RPC(nodeUrl); let result; try { result = await rpc.send_transaction(signedTx); } catch(error) { const regex = /^(\w+): ([\w\s]+) (\{.*\})$/; const matches = error.message.match(regex); if(!!matches && matches.length > 0) { const category = matches[1]; const type = matches[2]; const json = JSON.parse(matches[3]); console.log(); console.error(`Error: ${category}`); console.error(`Type: ${type}`); console.error(`Code: ${json.code}`); console.error(`Message: ${json.message}`); console.error(`Data: ${json.data}`); console.log(); throw new Error("RPC Returned Error!"); } else throw error; } return result; }
identifier_body
utils.py
import jsonpickle import json as serializer from pkg_resources import Requirement, resource_filename import os import csv from Crypto.Cipher import ARC4 import base64 import socket import getpass from solidfire.factory import ElementFactory from filelock import FileLock import sys def kv_string_to_dict(kv_string): new_dict = {} items = kv_string.split(',') for item in items: kvs = item.split('=') new_dict[kvs[0]] = kvs[1] def print_result(objs, log, as_json=False, as_pickle=False, depth=None, filter_tree=None): # There are 3 acceptable parameter sets to provide: # 1. json=True, depth=None, filter_tree=None # 2. json=False, depth=#, filter_tree=None # 3. json=False, depth=#, filter_tree=acceptable string # Error case
def print_result_as_json(objs, pickle=False): #print(jsonpickle.encode(objs)) nestedDict = serializer.loads(jsonpickle.encode(objs)) filteredDict = type(nestedDict)() if(pickle==False): remove_pickling(nestedDict, filteredDict) else: filteredDict = nestedDict print(serializer.dumps(filteredDict,indent=4)) def remove_pickling(nestedDict, filteredDict): if type(nestedDict) is dict: #foreach key, if list, recurse, if dict, recurse, if string recurse unless py/obj is key. for key in nestedDict: if key == "py/object": continue else: filteredDict[key] = type(nestedDict[key])() filteredDict[key] = remove_pickling(nestedDict[key], filteredDict[key]) return filteredDict if type(nestedDict) is list: # foreach item for i in range(len(nestedDict)): filteredDict.append(type(nestedDict[i])()) filteredDict[i] = remove_pickling(nestedDict[i], filteredDict[i]) return filteredDict return nestedDict """ SDK1.6 Note: Commenting this as print_tree is not supported in SDK 1.6. """ def get_result_as_tree(objs, depth=1, currentDepth=0, lastKey = ""): print("print_tree is not supported in SDK1.6") """stringToReturn = "" if(currentDepth > depth): return "<to see more details, increase depth>\n" if(type(objs) is str or type(objs) is bool or type(objs) is int or type(objs) is type(u'') or objs is None or type(objs) is float):# or (sys.version_info[0]<3 and type(objs) is long)): return str(objs) + "\n" if(type(objs) is list): stringToReturn += "\n" for i in range(len(objs)): obj = objs[i] stringToReturn += currentDepth*" "+get_result_as_tree(obj, depth, currentDepth+1, lastKey) return stringToReturn if(isinstance(objs, dict)): stringToReturn += "\n" for key in objs: stringToReturn += currentDepth*" "+key+": "+get_result_as_tree(objs[key], depth, currentDepth+1, key) return stringToReturn if (isinstance(objs, tuple)): return str(objs[0]) + "\n" if(objs is None): return stringToReturn mydict = objs.__dict__ stringToReturn += "\n" for key in mydict: stringToReturn += currentDepth*" " stringToReturn += key+": "+get_result_as_tree(mydict[key], depth, currentDepth+1, key) return stringToReturn """ def filter_objects_from_simple_keypaths(objs, simpleKeyPaths): # First, we assemble the key paths. # They start out like this: # [accouts.username, accounts.initiator_secret.secret, accounts.status] # and become like this: # {"accounts":{"username":True, "initiator_secret":{"secret":True}, "status":True} keyPaths = dict() for simpleKeyPath in simpleKeyPaths: currentLevel = keyPaths keyPathArray = simpleKeyPath.split('.') for i in range(len(keyPathArray)): if(i<(len(keyPathArray) - 1)): if currentLevel.get(keyPathArray[i]) is None: currentLevel[keyPathArray[i]] = dict() else: currentLevel[keyPathArray[i]] = True currentLevel = currentLevel[keyPathArray[i]] # Then we pass it in to filter objects. return filter_objects(objs, keyPaths) # Keypaths is arranged as follows: # it is a nested dict with the order of the keys. def filter_objects(objs, keyPaths): # Otherwise, we keep recursing deeper. # Because there are deeper keys, we know that we can go deeper. # This means we are dealing with either an array or a dict. # If keyPaths looks like this: # {"username": True, "volumes": {"Id": True}} # The keys in this sequence will be username and volumes. # When we recurse into volumes, the keys will be Id. finalFilteredObjects = dict() if keyPaths == True and type(objs) is not list: return objs # If we've found a list, we recurse deeper to pull out the objs. # We do not advance our keyPath recursion because this is just a list. if type(objs) is list: # If we have a list of objects, we will need to assemble and return a list of stuff. filteredObjsDict = [None]*len(objs) for i in range(len(objs)): # Each element could be a string, dict, or list. filteredObjsDict[i] = filter_objects(objs[i], keyPaths) return filteredObjsDict dictionaryOfInterest = None if type(objs) is dict: dictionaryOfInterest = objs else: dictionaryOfInterest = objs.__dict__ for key in keyPaths: # If we've found a dict, we recurse deeper to pull out the objs. # Because this is a dict, we must advance our keyPaths recursion. # Consider the following example: if key not in dictionaryOfInterest: raise ValueError("'"+key+"' is not a valid key for this level. Valid keys are: "+','.join(dictionaryOfInterest.keys())) finalFilteredObjects[key] = filter_objects(dictionaryOfInterest[key], keyPaths[key]) return finalFilteredObjects def print_result_as_table(objs, keyPaths): filteredDictionary = filter_objects(objs, keyPaths) def print_result_as_tree(objs, depth=1): print(get_result_as_tree(objs, depth)) def establish_connection(ctx): # Verify that the mvip does not contain the port number: if ctx.mvip and ":" in ctx.mvip: ctx.logger.error('Please provide the port using the port parameter.') exit(1) cfg = None # Arguments take precedence regardless of env settings if ctx.mvip: if ctx.username is None: ctx.username = getpass.getpass("Username:") if ctx.password is None: ctx.password = getpass.getpass("Password:") cfg = {'mvip': ctx.mvip, 'username': "b'"+encrypt(ctx.username).decode('utf-8')+"'", 'password': "b'"+encrypt(ctx.password).decode('utf-8')+"'", 'port': ctx.port, 'url': 'https://%s:%s' % (ctx.mvip, ctx.port), 'version': ctx.version, 'verifyssl': ctx.verifyssl, 'timeout': ctx.timeout} try: ctx.element = ElementFactory.create(cfg["mvip"],decrypt(cfg["username"]),decrypt(cfg["password"]),port=cfg["port"],version=cfg["version"],verify_ssl=cfg["verifyssl"],timeout=cfg["timeout"]) ctx.version = ctx.element._api_version cfg["version"] = ctx.element._api_version except Exception as e: ctx.logger.error(e.__str__()) exit(1) # If someone accidentally passed in an argument, but didn't specify everything, throw an error. elif ctx.username or ctx.password: ctx.logger.error("In order to manually connect, please provide an mvip, a username, AND a password") # If someone asked for a given connection or we need to default to using the connection at index 0 if it exists: else: if ctx.connectionindex is None and ctx.name is None: cfg = get_default_connection(ctx) elif ctx.connectionindex is not None: connections = get_connections(ctx) if int(ctx.connectionindex) > (len(connections)-1) or int(ctx.connectionindex) < (-len(connections)): ctx.logger.error("Connection "+str(ctx.connectionindex)+" Please provide an index between "+str(-len(connections))+" and "+str(len(connections)-1)) exit(1) cfg = connections[ctx.connectionindex] elif ctx.name is not None: connections = get_connections(ctx) filteredCfg = [connection for connection in connections if connection["name"] == ctx.name] if(len(filteredCfg) > 1): ctx.logger.error("Your connections.csv file has become corrupted. There are two connections of the same name.") exit() if(len(filteredCfg) < 1): ctx.logger.error("Could not find a connection named "+ctx.name) exit() cfg = filteredCfg[0] # If we managed to find the connection we were looking for, we must try to establish the connection. if cfg is not None: # Finally, we need to establish our connection via elementfactory: try: if int(cfg["port"]) != 443: address = cfg["mvip"] + ":" + cfg["port"] else: address = cfg["mvip"] ctx.element = ElementFactory.create(address, decrypt(cfg["username"]), decrypt(cfg["password"]), cfg["version"], verify_ssl=cfg["verifyssl"]) if int(cfg["timeout"]) != 30: ctx.element.timeout(cfg["timeout"]) except Exception as e: ctx.logger.error(e.__str__()) ctx.logger.error("The connection is corrupt. Run 'sfcli connection prune' to try and remove all broken connections or use 'sfcli connection remove -n name'") ctx.logger.error(cfg) exit(1) # If we want the json output directly from the source, we'll have to override the send request method in the sdk: # This is so that we can circumvent the python objects and get exactly what the json-rpc returns. if ctx.json and ctx.element: def new_send_request(*args, **kwargs): return ctx.element.__class__.__bases__[0].send_request(ctx.element, return_response_raw=True, *args, **kwargs) ctx.element.send_request = new_send_request # The only time it is none is when we're asking for help or we're trying to store a connection. # If that's not what we're doing, we catch it later. if cfg is not None: cfg["port"] = int(cfg["port"]) ctx.cfg = cfg cfg["name"] = cfg.get("name", "default") if not ctx.nocache: write_default_connection(ctx, cfg) if ctx.element is None: ctx.logger.error("You must establish at least one connection and specify which you intend to use.") exit() # this needs to be atomic. def get_connections(ctx): connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv") connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock") if os.path.exists(connectionsCsvLocation): try: with FileLock(connectionsLock): with open(connectionsCsvLocation, 'r') as connectionFile: connections = list(csv.DictReader(connectionFile, delimiter=',')) except Exception as e: ctx.logger.error("Problem reading "+connectionsCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file.") exit(1) else: connections = [] for connection in connections: connection["version"] = float(connection["version"]) if connection.get("verifyssl") == "True": connection["verifyssl"] = True else: connection["verifyssl"] = False return connections def write_connections(ctx, connections): try: connectionsCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "connections.csv") connectionsLock = resource_filename(Requirement.parse("solidfire-cli"), "connectionsLock") with open(connectionsCsvLocation, 'w') as f: with FileLock(connectionsLock): w = csv.DictWriter(f, ["name","mvip","port","username","password","version","url","verifyssl","timeout"], lineterminator='\n') w.writeheader() for connection in connections: if connection is not None: w.writerow(connection) except Exception as e: ctx.logger.error("Problem writing "+ connectionsCsvLocation + " " + str(e.args)+" Try changing the permissions of that file.") exit(1) def get_default_connection(ctx): connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv") if os.path.exists(connectionCsvLocation): defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock") try: with FileLock(defaultLockLocation): with open(connectionCsvLocation) as connectionFile: connection = list(csv.DictReader(connectionFile, delimiter=',')) except Exception as e: ctx.logger.error("Problem reading "+connectionCsvLocation+" because: "+str(e.args)+" Try changing the permissions of that file or specifying credentials.") exit(1) if len(connection)>0: connection[0]["version"] = float(connection[0]["version"]) if(connection[0]["verifyssl"] == "True"): connection[0]["verifyssl"] = True else: connection[0]["verifyssl"] = False return connection[0] else: os.remove(defaultLockLocation) ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.") exit(1) else: ctx.logger.error("Please provide connection information. There is no connection info in cache at this time.") exit(1) def write_default_connection(ctx, connection): connectionCsvLocation = resource_filename(Requirement.parse("solidfire-cli"), "default_connection.csv") try: defaultLockLocation = resource_filename(Requirement.parse("solidfire-cli"), "defaultLock") with FileLock(defaultLockLocation): with open(connectionCsvLocation, 'w') as f: w = csv.DictWriter(f, ["name", "mvip", "port", "username", "password", "version", "url", "verifyssl", "timeout"], lineterminator='\n') w.writeheader() w.writerow(connection) except Exception as e: ctx.logger.warning("Problem writing "+ connectionCsvLocation + " " + str(e.args)+" Try using changing the permissions of that file or using the --nocache flag.") # WARNING! This doesn't actually give us total security. It only gives us obscurity. def encrypt(sensitive_data): cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8')) encoded = base64.b64encode(cipher.encrypt(sensitive_data.encode('utf-8'))) return encoded def decrypt(encoded_sensitive_data): cipher = ARC4.new(socket.gethostname().encode('utf-8') + "SOLIDFIRE".encode('utf-8')) decoded = cipher.decrypt(base64.b64decode(encoded_sensitive_data[2:-1])) return decoded.decode('utf-8')
if as_json and (depth is not None or filter_tree is not None): log.error("If you choose to print it as json, do not provide a depth or filter. Those are for printing it as a tree.") exit() """ SDK1.6 Note: Since print_tree is not supported in 1.6, when both the available output formats json and pickle formats are set to False, change the default output format (pickle) to True. """ if as_json == False and as_pickle == False: as_pickle = True # If json is true, we print it as json and return: if as_json == True or as_pickle == True: print_result_as_json(objs, as_pickle) return """ SDK1.6 Note: Commenting out these lines as print_tree is not supported in 1.6. """ """ # If we have a filter, apply it. if filter_tree is not None: try: objs_to_print = filter_objects_from_simple_keypaths(objs, filter_tree.split(',')) except Exception as e: log.error(e.args[0]) exit(1) else: objs_to_print = objs # Set up a default depth if depth is None: depth = 10 # Next, print the tree to the appropriate depth print_result_as_tree(objs_to_print, depth) """
identifier_body