file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
12.1k
suffix
large_stringlengths
0
12k
middle
large_stringlengths
0
7.51k
fim_type
large_stringclasses
4 values
index.js
Updated(config) { let resetConnection = false if (this.config.host != config.host) { resetConnection = true } this.config = config this.updateActions() this.updateFeedbacks() this.updatePresets() this.updateVariables() if (resetConnection === true || this.socket === undefined) { this.initTCP() } } /** * Clean up the instance before it is destroyed. * * @access public * @since 1.0.0 */ async destroy() { if (this.socket !== undefined) { this.socket.destroy() } if (this.pollTimer !== undefined) { clearInterval(this.pollTimer) } this.log('debug', 'destroy', this.id) } /** * Creates the configuration fields for web config. * * @returns {Array} the config fields * @access public * @since 1.0.0 */ getConfigFields() { return [ { type: 'text', id: 'info', width: 12, label: 'Information', value: 'This module will connect to any Vaddio PTZ Camera via telnet.', }, { type: 'textinput', id: 'host', label: 'Camera IP', width: 6, regex: Regex.IP, }, { type: 'textinput', id: 'username', label: 'Username', width: 6, default: 'admin', regex: Regex.SOMETHING, }, { type: 'textinput', id: 'password', label: 'Password', width: 6, default: 'password', regex: Regex.SOMETHING, }, { type: 'checkbox', id: 'pollingOn', label: 'Enable Status Polling?', width: 2, default: true, }, { type: 'number', id: 'pollingInterval', label: 'Polling Interval (in s)', width: 4, min: 1, max: 999, default: 5, required: true, }, { type: 'checkbox', id: 'storeWithoutSpeed', label: 'Store presets without setting any speed.', tooltip: 'Useful for older models/firmware not supporting it.', width: 4, default: false, }, ] } /** * Main initialization function called once the module * is OK to start doing things. * * @param {Object} config - the configuration * @access public * @since 1.0.0 */ async init(config) { this.config = config this.deviceName = '' this.loggedIn = false this.okToSend = false this.catchUp = false this.nextCommand = '' this.lastPoll = 0 this.pollTimer = null this.panSpeed = 12 this.tiltSpeed = 10 this.zoomSpeed = 3 this.focusSpeed = 5 this.state = { auto_focus: 'on', // auto_iris: 'on', // auto_white_balance: 'on', // backlight_compensation: 'off', // blue_gain: 128, chroma: 7, detail: 7, gain: 0, gamma: 0, iris: 6, led: 'on', mute: 'off', red_gain: 128, standby: 'off', wide_dynamic_range: 'off', } this.updateActions() this.updateVariables() this.updateFeedbacks() this.updatePresets() this.initTCP() } /** * INTERNAL: use setup data to initalize the tcp socket object. * * @access protected * @since 1.0.0 */ initTCP() { this.receiveBuffer = '' if (this.socket !== undefined) { this.socket.destroy() delete this.socket } if (this.pollTimer !== undefined) { clearInterval(this.pollTimer) } if (this.config.port === undefined) { this.config.port = 23 } if (this.config.host) { this.socket = new TCPHelper(this.config.host, this.config.port) this.socket.on('status_change', (status, message) => { this.updateStatus(status, message) }) this.socket.on('error', (err) => { this.log('error', 'Network error: ' + err.message) }) this.socket.on('connect', () => { this.log('debug', 'Connected') }) this.socket.on('disconnect', () => { this.log('debug', 'Disconnected') this.loggedIn = false this.okToSend = false if (this.pollTimer !== undefined) { clearInterval(this.pollTimer) } }) // separate buffered stream into lines with responses this.socket.on('data', (chunk) => { let i = 0, line = '', offset = 0 this.receiveBuffer += chunk // Process lines while ((i = this.receiveBuffer.indexOf('\n', offset)) !== -1) { line = this.receiveBuffer.substr(offset, i - offset) offset = i + 1 this.socket.emit('receiveline', line.toString()) } this.receiveBuffer = this.receiveBuffer.substr(offset) // Read current line if (this.receiveBuffer.match(/[L|l]ogin:/)) { this.receiveBuffer = '' this.socket.send(this.config.username + '\r\n') } else if (this.receiveBuffer.match(/[P|p]assword:/)) { this.receiveBuffer = '' this.socket.send(this.config.password + '\r\n') } else if (this.receiveBuffer.match(/>/)) { this.loggedIn = true if (this.deviceName == '') { this.receiveBuffer = '' this.socket.send('version\r\n') this.catchUp = true this.lastPoll = -1 } else if (this.catchUp == true) { let thisPoll = this.lastPoll + 1 if (thisPoll < PollCommands.length) { this.socket.send(PollCommands[thisPoll] + '\r\n') this.lastPoll = thisPoll } else { this.catchUp = false if (this.config.pollingOn === true) { this.pollTimer = setInterval(this.sendPollCommand.bind(this), this.config.pollingInterval * 1000) } } } else { this.okToSend = true this.sendCommand() } } }) this.socket.on('receiveline', (line) => { if (this.loggedIn == false) { this.processLogin(line) } else { this.processCameraInformation(line) } }) } } /** * INTERNAL: Routes incoming data to the appropriate function for processing. * * @param {Object} data - the collected data * @access protected * @since 1.0.0 */ processCameraInformation(data) { if (data.match(/System Version/)) { this.deviceName = data.substring(data.indexOf('Robo')) this.log('info', 'Connected to a ' + this.deviceName) this.sendCommand('camera ccu get all') } else if (data.startsWith('auto_focus')) { data = data.replace('auto_focus:', '').trim() this.state.auto_focus = data this.checkFeedbacks('auto_focus') } else if (data.startsWith('auto_iris')) { data = data.replace('auto_iris', '').trim() this.state.auto_iris = data this.checkFeedbacks('auto_iris') } else if (data.startsWith('auto_white_balance')) { data = data.replace('auto_white_balance', '').trim() this.state.auto_white_balance = data this.checkFeedbacks('auto_white_balance')
} else if (data.startsWith('backlight_compensation')) { data = data.replace('backlight_compensation', '').trim() this.state.backlight_compensation = data this.checkFeedbacks('backlight_compensation') } else if (data.startsWith('blue_gain')) { data = data.replace('blue_gain', '').trim() this.state.blue_gain = parseInt(data) this.setVariableValues({ blue_gain: this.state.blue_gain }) } else if (data.startsWith('chroma')) { data = data.replace('chroma', '').trim() this.state.chroma = parseInt(data) this.setVariableValues({ chroma: this.state.chroma }) } else if (data.startsWith('detail')) { data = data.replace('detail', '').trim() this.state.detail = parseInt(data) this.setVariableValues({ detail: this.state.detail }) } else if (data.startsWith('gain')) { data = data.replace('gain', '').
random_line_split
index.js
Updated(config) { let resetConnection = false if (this.config.host != config.host) { resetConnection = true } this.config = config this.updateActions() this.updateFeedbacks() this.updatePresets() this.updateVariables() if (resetConnection === true || this.socket === undefined) { this.initTCP() } } /** * Clean up the instance before it is destroyed. * * @access public * @since 1.0.0 */ async destroy() { if (this.socket !== undefined) { this.socket.destroy() } if (this.pollTimer !== undefined) { clearInterval(this.pollTimer) } this.log('debug', 'destroy', this.id) } /** * Creates the configuration fields for web config. * * @returns {Array} the config fields * @access public * @since 1.0.0 */ getConfigFields() { return [ { type: 'text', id: 'info', width: 12, label: 'Information', value: 'This module will connect to any Vaddio PTZ Camera via telnet.', }, { type: 'textinput', id: 'host', label: 'Camera IP', width: 6, regex: Regex.IP, }, { type: 'textinput', id: 'username', label: 'Username', width: 6, default: 'admin', regex: Regex.SOMETHING, }, { type: 'textinput', id: 'password', label: 'Password', width: 6, default: 'password', regex: Regex.SOMETHING, }, { type: 'checkbox', id: 'pollingOn', label: 'Enable Status Polling?', width: 2, default: true, }, { type: 'number', id: 'pollingInterval', label: 'Polling Interval (in s)', width: 4, min: 1, max: 999, default: 5, required: true, }, { type: 'checkbox', id: 'storeWithoutSpeed', label: 'Store presets without setting any speed.', tooltip: 'Useful for older models/firmware not supporting it.', width: 4, default: false, }, ] } /** * Main initialization function called once the module * is OK to start doing things. * * @param {Object} config - the configuration * @access public * @since 1.0.0 */ async init(config) { this.config = config this.deviceName = '' this.loggedIn = false this.okToSend = false this.catchUp = false this.nextCommand = '' this.lastPoll = 0 this.pollTimer = null this.panSpeed = 12 this.tiltSpeed = 10 this.zoomSpeed = 3 this.focusSpeed = 5 this.state = { auto_focus: 'on', // auto_iris: 'on', // auto_white_balance: 'on', // backlight_compensation: 'off', // blue_gain: 128, chroma: 7, detail: 7, gain: 0, gamma: 0, iris: 6, led: 'on', mute: 'off', red_gain: 128, standby: 'off', wide_dynamic_range: 'off', } this.updateActions() this.updateVariables() this.updateFeedbacks() this.updatePresets() this.initTCP() } /** * INTERNAL: use setup data to initalize the tcp socket object. * * @access protected * @since 1.0.0 */
() { this.receiveBuffer = '' if (this.socket !== undefined) { this.socket.destroy() delete this.socket } if (this.pollTimer !== undefined) { clearInterval(this.pollTimer) } if (this.config.port === undefined) { this.config.port = 23 } if (this.config.host) { this.socket = new TCPHelper(this.config.host, this.config.port) this.socket.on('status_change', (status, message) => { this.updateStatus(status, message) }) this.socket.on('error', (err) => { this.log('error', 'Network error: ' + err.message) }) this.socket.on('connect', () => { this.log('debug', 'Connected') }) this.socket.on('disconnect', () => { this.log('debug', 'Disconnected') this.loggedIn = false this.okToSend = false if (this.pollTimer !== undefined) { clearInterval(this.pollTimer) } }) // separate buffered stream into lines with responses this.socket.on('data', (chunk) => { let i = 0, line = '', offset = 0 this.receiveBuffer += chunk // Process lines while ((i = this.receiveBuffer.indexOf('\n', offset)) !== -1) { line = this.receiveBuffer.substr(offset, i - offset) offset = i + 1 this.socket.emit('receiveline', line.toString()) } this.receiveBuffer = this.receiveBuffer.substr(offset) // Read current line if (this.receiveBuffer.match(/[L|l]ogin:/)) { this.receiveBuffer = '' this.socket.send(this.config.username + '\r\n') } else if (this.receiveBuffer.match(/[P|p]assword:/)) { this.receiveBuffer = '' this.socket.send(this.config.password + '\r\n') } else if (this.receiveBuffer.match(/>/)) { this.loggedIn = true if (this.deviceName == '') { this.receiveBuffer = '' this.socket.send('version\r\n') this.catchUp = true this.lastPoll = -1 } else if (this.catchUp == true) { let thisPoll = this.lastPoll + 1 if (thisPoll < PollCommands.length) { this.socket.send(PollCommands[thisPoll] + '\r\n') this.lastPoll = thisPoll } else { this.catchUp = false if (this.config.pollingOn === true) { this.pollTimer = setInterval(this.sendPollCommand.bind(this), this.config.pollingInterval * 1000) } } } else { this.okToSend = true this.sendCommand() } } }) this.socket.on('receiveline', (line) => { if (this.loggedIn == false) { this.processLogin(line) } else { this.processCameraInformation(line) } }) } } /** * INTERNAL: Routes incoming data to the appropriate function for processing. * * @param {Object} data - the collected data * @access protected * @since 1.0.0 */ processCameraInformation(data) { if (data.match(/System Version/)) { this.deviceName = data.substring(data.indexOf('Robo')) this.log('info', 'Connected to a ' + this.deviceName) this.sendCommand('camera ccu get all') } else if (data.startsWith('auto_focus')) { data = data.replace('auto_focus:', '').trim() this.state.auto_focus = data this.checkFeedbacks('auto_focus') } else if (data.startsWith('auto_iris')) { data = data.replace('auto_iris', '').trim() this.state.auto_iris = data this.checkFeedbacks('auto_iris') } else if (data.startsWith('auto_white_balance')) { data = data.replace('auto_white_balance', '').trim() this.state.auto_white_balance = data this.checkFeedbacks('auto_white_balance') } else if (data.startsWith('backlight_compensation')) { data = data.replace('backlight_compensation', '').trim() this.state.backlight_compensation = data this.checkFeedbacks('backlight_compensation') } else if (data.startsWith('blue_gain')) { data = data.replace('blue_gain', '').trim() this.state.blue_gain = parseInt(data) this.setVariableValues({ blue_gain: this.state.blue_gain }) } else if (data.startsWith('chroma')) { data = data.replace('chroma', '').trim() this.state.chroma = parseInt(data) this.setVariableValues({ chroma: this.state.chroma }) } else if (data.startsWith('detail')) { data = data.replace('detail', '').trim() this.state.detail = parseInt(data) this.setVariableValues({ detail: this.state.detail }) } else if (data.startsWith('gain')) { data = data.replace('gain',
initTCP
identifier_name
index.js
/** * Process an updated configuration array. * * @param {Object} config - the new configuration * @access public * @since 1.0.0 */ async configUpdated(config) { let resetConnection = false if (this.config.host != config.host) { resetConnection = true } this.config = config this.updateActions() this.updateFeedbacks() this.updatePresets() this.updateVariables() if (resetConnection === true || this.socket === undefined) { this.initTCP() } } /** * Clean up the instance before it is destroyed. * * @access public * @since 1.0.0 */ async destroy() { if (this.socket !== undefined) { this.socket.destroy() } if (this.pollTimer !== undefined) { clearInterval(this.pollTimer) } this.log('debug', 'destroy', this.id) } /** * Creates the configuration fields for web config. * * @returns {Array} the config fields * @access public * @since 1.0.0 */ getConfigFields() { return [ { type: 'text', id: 'info', width: 12, label: 'Information', value: 'This module will connect to any Vaddio PTZ Camera via telnet.', }, { type: 'textinput', id: 'host', label: 'Camera IP', width: 6, regex: Regex.IP, }, { type: 'textinput', id: 'username', label: 'Username', width: 6, default: 'admin', regex: Regex.SOMETHING, }, { type: 'textinput', id: 'password', label: 'Password', width: 6, default: 'password', regex: Regex.SOMETHING, }, { type: 'checkbox', id: 'pollingOn', label: 'Enable Status Polling?', width: 2, default: true, }, { type: 'number', id: 'pollingInterval', label: 'Polling Interval (in s)', width: 4, min: 1, max: 999, default: 5, required: true, }, { type: 'checkbox', id: 'storeWithoutSpeed', label: 'Store presets without setting any speed.', tooltip: 'Useful for older models/firmware not supporting it.', width: 4, default: false, }, ] } /** * Main initialization function called once the module * is OK to start doing things. * * @param {Object} config - the configuration * @access public * @since 1.0.0 */ async init(config) { this.config = config this.deviceName = '' this.loggedIn = false this.okToSend = false this.catchUp = false this.nextCommand = '' this.lastPoll = 0 this.pollTimer = null this.panSpeed = 12 this.tiltSpeed = 10 this.zoomSpeed = 3 this.focusSpeed = 5 this.state = { auto_focus: 'on', // auto_iris: 'on', // auto_white_balance: 'on', // backlight_compensation: 'off', // blue_gain: 128, chroma: 7, detail: 7, gain: 0, gamma: 0, iris: 6, led: 'on', mute: 'off', red_gain: 128, standby: 'off', wide_dynamic_range: 'off', } this.updateActions() this.updateVariables() this.updateFeedbacks() this.updatePresets() this.initTCP() } /** * INTERNAL: use setup data to initalize the tcp socket object. * * @access protected * @since 1.0.0 */ initTCP() { this.receiveBuffer = '' if (this.socket !== undefined) { this.socket.destroy() delete this.socket } if (this.pollTimer !== undefined) { clearInterval(this.pollTimer) } if (this.config.port === undefined) { this.config.port = 23 } if (this.config.host) { this.socket = new TCPHelper(this.config.host, this.config.port) this.socket.on('status_change', (status, message) => { this.updateStatus(status, message) }) this.socket.on('error', (err) => { this.log('error', 'Network error: ' + err.message) }) this.socket.on('connect', () => { this.log('debug', 'Connected') }) this.socket.on('disconnect', () => { this.log('debug', 'Disconnected') this.loggedIn = false this.okToSend = false if (this.pollTimer !== undefined) { clearInterval(this.pollTimer) } }) // separate buffered stream into lines with responses this.socket.on('data', (chunk) => { let i = 0, line = '', offset = 0 this.receiveBuffer += chunk // Process lines while ((i = this.receiveBuffer.indexOf('\n', offset)) !== -1) { line = this.receiveBuffer.substr(offset, i - offset) offset = i + 1 this.socket.emit('receiveline', line.toString()) } this.receiveBuffer = this.receiveBuffer.substr(offset) // Read current line if (this.receiveBuffer.match(/[L|l]ogin:/)) { this.receiveBuffer = '' this.socket.send(this.config.username + '\r\n') } else if (this.receiveBuffer.match(/[P|p]assword:/)) { this.receiveBuffer = '' this.socket.send(this.config.password + '\r\n') } else if (this.receiveBuffer.match(/>/)) { this.loggedIn = true if (this.deviceName == '') { this.receiveBuffer = '' this.socket.send('version\r\n') this.catchUp = true this.lastPoll = -1 } else if (this.catchUp == true) { let thisPoll = this.lastPoll + 1 if (thisPoll < PollCommands.length) { this.socket.send(PollCommands[thisPoll] + '\r\n') this.lastPoll = thisPoll } else { this.catchUp = false if (this.config.pollingOn === true) { this.pollTimer = setInterval(this.sendPollCommand.bind(this), this.config.pollingInterval * 1000) } } } else { this.okToSend = true this.sendCommand() } } }) this.socket.on('receiveline', (line) => { if (this.loggedIn == false) { this.processLogin(line) } else { this.processCameraInformation(line) } }) } } /** * INTERNAL: Routes incoming data to the appropriate function for processing. * * @param {Object} data - the collected data * @access protected * @since 1.0.0 */ processCameraInformation(data) { if (data.match(/System Version/)) { this.deviceName = data.substring(data.indexOf('Robo')) this.log('info', 'Connected to a ' + this.deviceName) this.sendCommand('camera ccu get all') } else if (data.startsWith('auto_focus')) { data = data.replace('auto_focus:', '').trim() this.state.auto_focus = data this.checkFeedbacks('auto_focus') } else if (data.startsWith('auto_iris')) { data = data.replace('auto_iris', '').trim() this.state.auto_iris = data this.checkFeedbacks('auto_iris') } else if (data.startsWith('auto_white_balance')) { data = data.replace('auto_white_balance', '').trim() this.state.auto_white_balance = data this.checkFeedbacks('auto_white_balance') } else if (data.startsWith('backlight_compensation')) { data = data.replace('backlight_compensation', '').trim() this.state.backlight_compensation = data this.checkFeedbacks('backlight_compensation') } else if (data.startsWith('blue_gain')) { data = data.replace('blue_gain', '').trim() this.state.blue_gain = parseInt(data) this.setVariableValues({ blue_gain: this.state.blue_gain }) } else if (data.startsWith('chroma')) {
{ super(internal) this.updateActions = updateActions.bind(this) this.updateFeedbacks = updateFeedbacks.bind(this) this.updatePresets = updatePresets.bind(this) this.updateVariables = updateVariables.bind(this) }
identifier_body
court.py
auschen kommt von __sensor_y()) :return float, skalierter Y-Anteil vom Ortsvektor """ return self.__sensor_y() / (self.y_max / 2.0) - 1.0 def scaled_sensor_bat(self, player): """ Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1 mit Rauschen zurück (Rauschen kommt von __sensor_bat()) :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, skalierte Schlägerposition von Spieler[Player] """ return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0 def hitbat(self, player): """ Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug. :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player] """ return self._bathit[player] def scaled_sensor_err(self, player): """ Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück. :pre hitbat(player) or out(player) :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, skalierter Error von Spieler[Player] """ return (self.poi[player] - self.__sensor_bat(player) ) / self.y_max def out(self, player): """ Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht. :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False) """ return self._out[player] def getpoints(self, player): """ Liefert die Punktanzahl von Spieler[Player] :param player: Punktzahl von Spieler 0 oder 1 :type player: Int (0 oder 1) :return int, Punktzahl des Spielers """ return self.Points[player] def tick(self): """ Berechnet einen Tick/Spielzug, hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien oder die Kollision mit einem Schläger auf False initialisiert, außerdem die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und her gespielt haben ohne Tor (Endlosspiel verhindern). Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen Bewegungs-/Richtungsvektor ändern muss. Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder ob ein Schläger den Ball getroffen hat. :return void """ ######################### ### Initialisierungen ### ######################### # Setzt den Ball eine Position weiter. # Die Schrittweite wird durch den Faktor self.speed gesetzt, der den Einheitsvektor dirVec skaliert self.posVec += self.dirVec * self.speed # Hat der Schläger den Ball getroffen? # bathit[0] -> linker Schläger # bathit[1] -> rechter Schläger self._bathit = [False, False] self._out = [False, False] ################### ### Anweisungen ### ################### # Falls 10 oder mehr Treffer also jeder mindestens 5x getroffen hat, dann wird abgebrochen # und neu gestartet, damit die aktuelle Endlosschleife unterbrochen wird. Hier würde das KNN # sonst nichts Neues mehr lernen. if self.bouncecount > 10: self.__initvectors() # Abprallen an der Unterseite bei Y = 0 if self.posVec[1] < 0: self.posVec[1] *= -1.0 self.dirVec[1] *= -1.0 # Abprallen an der Oberseite bei Y = y_max (hier vermutlich 9) if self.posVec[1] > self.y_max: self.posVec[1] = 2 * self.y_max - self.posVec[1] self.dirVec[1] *= -1.0 # Prüfe auf Treffer auf der linken Seite (Spieler 0) self.__tickBounceLeft() # Prüfe auf Treffer auf der rechten Seite (Spieler 1) self.__tickBounceRight() def __tickBounceLeft(self): """ Checken, ob der Ball links bei Spieler 0 aus dem Spielfeld fliegt oder vom Schläger getroffen wird :return: void """ # Wenn der Ortsvektor kleiner ist als 0, dann hat er die Torauslinie von Spieler 0 überschritten if self.posVec[0] < 0: # Berechne den theoretischen, genauen Aufprallpunkt (poi: PointOfImpact) # auf der Linie von Spieler 0 (Y = 0) factor = (0 - self.posVec[0]) / self.dirVec[0] poi = self.posVec + (factor * self.dirVec) self.poi[0] = poi[1] # Speichere diesen für eine evtl. spätere Nutzung von z.B. scaled_sensor_err(player) # Prüfe ob der Ball dann den Schläger getroffen hätte, wenn ja, dann... if (poi[1] > self.bat[0] - self.batsize) and (poi[1] < self.bat[0] + self.batsize): self._bathit[0] = True # ... vermerke dies für z.B. hitbat(player) else: # wenn jedoch nicht, dann... self.Points[1] += 1 # ... Punkte von Spieler 1 (rechts) erhöhen self._out[0] = True # und merken, das der Ball außerhalb des Spielfelds # war, z.B. für out(player) # Ball abprallen lassen, falls: # -> Infinite true ist, also das Spiel endlos dauern soll ohne Zurücksetzen der Ballposition # -> Der Schläger den Ball getroffen hat if self.infinite or self._bathit[0]: self.posVec[0] *= -1.0 # Einfallswinklel = Ausfallswinkel self.dirVec[0] *= -1.0 self.bouncecount += 1 # Treffer vermerken, um bei zu vielen Treffern dieses neu zu starten else: self.__initvectors() # Kein Treffer, somit das Spiel neu Initialisieren. self.bouncecount = 0 def __tickBounceRight(self): """Checken, ob der Ball rechts bei Spieler 1 aus dem Spielfeld fliegt oder vom Schläger getroffen wird :return: void """ # Wenn der Ortsvektor größer ist als x_max (hier vermutlich 16), dann hat er die Torauslinie # von Spieler 1 überschritten if self.posVec[0] > self.x_max: # Berechne den theoretischen, genauen Aufprallpunkt (poi: PointOfImpact) auf der Linie von # Spieler (Y = self.x_max) factor = (self.x_max - self.posVec[0]) / self.dirVec[0] poi = self.posVec + (factor * self.dirVec) self.poi[1] = poi[1] # Speichere diesen für eine evtl. spätere Nutzung von z.B. scaled_sensor_err(player) # Prüfe ob der Ball dann den Schläger getroffen hätte, wenn ja, dann... if poi[1] > self.bat[1] - self.batsize and poi[1] < self.bat[1] + self.batsize: self._bathit[1] = True # ... vermerke dies für z.B. hitbat(player) else: # wenn jedoch nicht, dann... self.Points[0] += 1 # ... Punkte von Spieler 0 (links) erhöhen self._out[1] = True # und merken, das d
er Ball außerhalb des Spielfelds # war, z.B. für out(player) # Ball abprallen lassen, falls: # -> Das infinite true ist, also das Spiel endlos dauern soll ohn
conditional_block
court.py
0] # Der "Einschlagspunkt" des Balles auf der (Toraus-)Linie, wird erst nach einem Aufprall # mit konkreten Werten belegt und dann zur Fehlerberechnung genutzt (supervised learning). self.poi = [None, None] # Initiale Schlägerpositionen der Spieler auf ihren Linien. # [SchlängerLinks, SchlägerRechts] # Positionsänderungen sind somit, wie in Pong üblich, nur auf der Y-Achse möglich. self.bat = [self.y_max / 2.0, self.y_max / 2.0] # Zählt die Schlägertreffer (Kollisionen des Balles mit einem Schläger). # Die KNNs sollen unterschiedliche Winkel lernen (der Winkel wird immer zufallsinitialisiert), # bei ausreichender Lerndauer bzw. stark minimiertem Fehler jedoch sind die KNNs manchmal auf # einigen Winkeln derart talentiert, dass der Ball nie mehr über die Torlinie gehen würde. # Um ein solches "Endlosspiel" zu verhindern, wird der Ball nach 10 Treffern resettet, # das Spielfeld also zurückgesetzt mit einer initialen Ballposition auf der Spielfeldmitte und # neuem, zufallskalkuliertem Winkel. self.bouncecount = 0 # Startvorbereitung # Initialisiert das erste Mal den Ortsvektor und Bewegungs-/Richtungsvektor self.__initvectors() def __initvectors(self): """ Initialisiert Anfangs- und Richtungsballvektoren. Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel. Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen. :return void """ # Richtungsvektor erzeugen # Zufallswinkel im Bogenmaß generieren # 2 Pi entsprechen dem vollen Einheitskreis, also 360° # [-Pi/4, +Pi/4] entspricht einem Interval von [-45°, +45°] # Dieses Interval hat sich bewährt, da zu spitze den Lerneffekt und vor allem die Lerndauer # negativ beeinflussen. rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4) # Aus dem Zufallswinkel eine entsprechende Rotationsmatrix generieren rotMatrix = np.array([ [np.cos(rotationAngle), -np.sin(rotationAngle)], [np.sin(rotationAngle), np.cos(rotationAngle)] ]) # Rotationsmatrix auf einen Einheitsvektor (horizontale Ausrichtung) anwenden self.dirVec = np.dot(rotMatrix, np.array([1, 0])) # Zufällig entscheiden, ob der Ball nach links (zu Player 0) oder rechts (zu Player 1) startet. if random.random() > 0.5: self.dirVec[0] *= -1.0 # x-Komponente des Richtungs-/Bewegungsvektors wird an der Y-Achse gespiegelt # Ortsvektor erzeugen # Start irgendowo auf der Mittellinie # (x-Koordinate ist also fixiert auf die Mittellinie, y-Koordinate zufällig) self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]) # Rücksetzen der Anzahl der Schlägertreffer (__init__) self.bouncecount = 0 def _incrpoints(self, player): """ Erhöht den Punktestand für einen Spieler[Player] :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return void """ self.Points[player] += 1 def __sensor_x(self): """ Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück :return float, X-Anteil vom Ortsvektor """ return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax def __sensor_y(self): """ Gibt den Y-Anteil des Ortsvektors des Balles mit Rauschen zurück :return float, Y-Anteil vom Ortsvektor """ return self.posVec[1] + (random.random() - 0.5) * self.outputNoiseMax def __sensor_bat(self, player): """ Gibt die Position des Schlägers auf der Y-Achse von Spieler[Player] mit Rauschen zurück :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, Schlägerposition von Spieler[Player] """ return self.bat[player] + (random.random() - 0.5) * self.outputNoiseMax def scaled_sensor_x(self): """ Gibt den X-Anteil des Ortsvektors des Balles skaliert von -
des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück (Rauschen kommt von __sensor_y()) :return float, skalierter Y-Anteil vom Ortsvektor """ return self.__sensor_y() / (self.y_max / 2.0) - 1.0 def scaled_sensor_bat(self, player): """ Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1 mit Rauschen zurück (Rauschen kommt von __sensor_bat()) :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, skalierte Schlägerposition von Spieler[Player] """ return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0 def hitbat(self, player): """ Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug. :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player] """ return self._bathit[player] def scaled_sensor_err(self, player): """ Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück. :pre hitbat(player) or out(player) :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, skalierter Error von Spieler[Player] """ return (self.poi[player] - self.__sensor_bat(player) ) / self.y_max def out(self, player): """ Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht. :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False) """ return self._out[player] def getpoints(self, player): """ Liefert die Punktanzahl von Spieler[Player] :param player: Punktzahl von Spieler 0 oder 1 :type player: Int (0 oder 1) :return int, Punktzahl des Spielers """ return self.Points[player] def tick(self): """ Berechnet einen Tick/Spielzug, hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien oder die Kollision mit einem Schläger auf False initialisiert, außerdem die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und her gespielt haben ohne Tor (Endlosspiel verhindern). Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen Bewegungs-/Richtungsvektor ändern muss. Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder ob ein Schläger den Ball getroffen hat. :return void """ ######################### ### Initialisierungen
1 bis +1 mit Rauschen zurück (Rauschen kommt von __sensor_x()) :return float, skalierter X-Anteil vom Ortsvektor """ return self.__sensor_x() / (self.x_max / 2.0) - 1.0 def scaled_sensor_y(self): """ Gibt den Y-Anteil
identifier_body
court.py
0] # Der "Einschlagspunkt" des Balles auf der (Toraus-)Linie, wird erst nach einem Aufprall # mit konkreten Werten belegt und dann zur Fehlerberechnung genutzt (supervised learning). self.poi = [None, None] # Initiale Schlägerpositionen der Spieler auf ihren Linien. # [SchlängerLinks, SchlägerRechts] # Positionsänderungen sind somit, wie in Pong üblich, nur auf der Y-Achse möglich. self.bat = [self.y_max / 2.0, self.y_max / 2.0] # Zählt die Schlägertreffer (Kollisionen des Balles mit einem Schläger). # Die KNNs sollen unterschiedliche Winkel lernen (der Winkel wird immer zufallsinitialisiert), # bei ausreichender Lerndauer bzw. stark minimiertem Fehler jedoch sind die KNNs manchmal auf # einigen Winkeln derart talentiert, dass der Ball nie mehr über die Torlinie gehen würde. # Um ein solches "Endlosspiel" zu verhindern, wird der Ball nach 10 Treffern resettet, # das Spielfeld also zurückgesetzt mit einer initialen Ballposition auf der Spielfeldmitte und # neuem, zufallskalkuliertem Winkel. self.bouncecount = 0 # Startvorbereitung # Initialisiert das erste Mal den Ortsvektor und Bewegungs-/Richtungsvektor self.__initvectors() def __initvectors(self): """ Initialisiert Anfangs- und Richtungsballvektoren. Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel. Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen. :return void """ # Richtungsvektor erzeugen # Zufallswinkel im Bogenmaß generieren # 2 Pi entsprechen dem vollen Einheitskreis, also 360° # [-Pi/4, +Pi/4] entspricht einem Interval von [-45°, +45°] # Dieses Interval hat sich bewährt, da zu spitze den Lerneffekt und vor allem die Lerndauer # negativ beeinflussen. rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4) # Aus dem Zufallswinkel eine entsprechende Rotationsmatrix generieren rotMatrix = np.array([ [np.cos(rotationAngle), -np.sin(rotationAngle)], [np.sin(rotationAngle), np.cos(rotationAngle)] ]) # Rotationsmatrix auf einen Einheitsvektor (horizontale Ausrichtung) anwenden self.dirVec = np.dot(rotMatrix, np.array([1, 0])) # Zufällig entscheiden, ob der Ball nach links (zu Player 0) oder rechts (zu Player 1) startet. if random.random() > 0.5: self.dirVec[0] *= -1.0 # x-Komponente des Richtungs-/Bewegungsvektors wird an der Y-Achse gespiegelt # Ortsvektor erzeugen # Start irgendowo auf der Mittellinie # (x-Koordinate ist also fixiert auf die Mittellinie, y-Koordinate zufällig) self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]) # Rücksetzen der Anzahl der Schlägertreffer (__init__) self.bouncecount = 0 def _incrpoints(self, player): """ Erhöht den Punktestand für einen Spieler[Player] :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return void """ self.Points[player] += 1 def __sensor_x(self): """ Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück :return float, X-Anteil vom Ortsvektor """ return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax def __sensor_y(self): """ Gibt den Y-Anteil des Ortsvektors des Balles mit Rauschen zurück :return float, Y-Anteil vom Ortsvektor """ return self.posVec[1] + (random.random() - 0.5) * self.outputNoiseMax def __sensor_bat(self, player): """ Gibt die Position des Schlägers auf der Y-Achse von Spieler[Player] mit Rauschen zurück :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, Schlägerposition von Spieler[Player] """ return self.bat[player] + (random.random() - 0.5) * self.outputNoiseMax def scaled_sensor_x(self): """ Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück (Rauschen kommt von __sensor_x()) :return float, skalierter X-Anteil vom Ortsvektor """ return self.__sensor_x() / (self.x_max / 2.0) - 1.0 def scaled_sensor_y(self): """ Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück (Rauschen kommt von __sensor_y()) :return float, skalierter Y-Anteil vom Ortsvektor """ return self.__sensor_y() / (self.y_max / 2.0) - 1.0 def scaled_sensor_bat(self, player): """ Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1 mit Rauschen zurück (Rauschen kommt von __sensor_bat()) :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, skalierte Schlägerposition von Spieler[Player] """ return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0 def hitbat(self, player): """ Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug. :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player] """ return self._bathit[player] def scaled_sensor_err(self, player): """ Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück. :pre hitbat(player) or out(player) :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, skalierter Error von Spieler[Player] """ return (self.poi[player] - self.__sensor_bat(player) ) / self.y_max def out(self, player): """ Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht. :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False) """ return self._out[player] def getpoints(self, player): """ Liefert die Punktanzahl von Spieler[Player] :param player: Punktzahl von Spieler 0 oder 1 :type player: Int (0 oder 1) :return int, Punktzahl des Spielers """ return self.Points[player] def tick(self): """ Berechnet einen Tick/Spielzug, hierbei wir
r Ball bewegt, die Überschreitung einer der Torauslinien oder die Kollision mit einem Schläger auf False initialisiert, außerdem die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und her gespielt haben ohne Tor (Endlosspiel verhindern). Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen Bewegungs-/Richtungsvektor ändern muss. Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder ob ein Schläger den Ball getroffen hat. :return void """ ######################### ### Initialisier
d de
identifier_name
court.py
0] # Der "Einschlagspunkt" des Balles auf der (Toraus-)Linie, wird erst nach einem Aufprall # mit konkreten Werten belegt und dann zur Fehlerberechnung genutzt (supervised learning). self.poi = [None, None] # Initiale Schlägerpositionen der Spieler auf ihren Linien. # [SchlängerLinks, SchlägerRechts] # Positionsänderungen sind somit, wie in Pong üblich, nur auf der Y-Achse möglich. self.bat = [self.y_max / 2.0, self.y_max / 2.0] # Zählt die Schlägertreffer (Kollisionen des Balles mit einem Schläger).
# Um ein solches "Endlosspiel" zu verhindern, wird der Ball nach 10 Treffern resettet, # das Spielfeld also zurückgesetzt mit einer initialen Ballposition auf der Spielfeldmitte und # neuem, zufallskalkuliertem Winkel. self.bouncecount = 0 # Startvorbereitung # Initialisiert das erste Mal den Ortsvektor und Bewegungs-/Richtungsvektor self.__initvectors() def __initvectors(self): """ Initialisiert Anfangs- und Richtungsballvektoren. Irgendwo in der Mitte auf der Y-Achse und mit einem belibigen Startwinkel. Der Startwinkel ist stets größergleich -45 Grad sowie kleinergleich +45 Grad von der Horizontalen aus gesehen. :return void """ # Richtungsvektor erzeugen # Zufallswinkel im Bogenmaß generieren # 2 Pi entsprechen dem vollen Einheitskreis, also 360° # [-Pi/4, +Pi/4] entspricht einem Interval von [-45°, +45°] # Dieses Interval hat sich bewährt, da zu spitze den Lerneffekt und vor allem die Lerndauer # negativ beeinflussen. rotationAngle = np.random.uniform(-np.pi / 4, np.pi / 4) # Aus dem Zufallswinkel eine entsprechende Rotationsmatrix generieren rotMatrix = np.array([ [np.cos(rotationAngle), -np.sin(rotationAngle)], [np.sin(rotationAngle), np.cos(rotationAngle)] ]) # Rotationsmatrix auf einen Einheitsvektor (horizontale Ausrichtung) anwenden self.dirVec = np.dot(rotMatrix, np.array([1, 0])) # Zufällig entscheiden, ob der Ball nach links (zu Player 0) oder rechts (zu Player 1) startet. if random.random() > 0.5: self.dirVec[0] *= -1.0 # x-Komponente des Richtungs-/Bewegungsvektors wird an der Y-Achse gespiegelt # Ortsvektor erzeugen # Start irgendowo auf der Mittellinie # (x-Koordinate ist also fixiert auf die Mittellinie, y-Koordinate zufällig) self.posVec = np.array([self.x_max / 2.0, self.y_max * random.random()]) # Rücksetzen der Anzahl der Schlägertreffer (__init__) self.bouncecount = 0 def _incrpoints(self, player): """ Erhöht den Punktestand für einen Spieler[Player] :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return void """ self.Points[player] += 1 def __sensor_x(self): """ Gibt den X-Anteil des Ortsvektors des Balles mit Rauschen zurück :return float, X-Anteil vom Ortsvektor """ return self.posVec[0] + (random.random() - 0.5) * self.outputNoiseMax def __sensor_y(self): """ Gibt den Y-Anteil des Ortsvektors des Balles mit Rauschen zurück :return float, Y-Anteil vom Ortsvektor """ return self.posVec[1] + (random.random() - 0.5) * self.outputNoiseMax def __sensor_bat(self, player): """ Gibt die Position des Schlägers auf der Y-Achse von Spieler[Player] mit Rauschen zurück :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, Schlägerposition von Spieler[Player] """ return self.bat[player] + (random.random() - 0.5) * self.outputNoiseMax def scaled_sensor_x(self): """ Gibt den X-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück (Rauschen kommt von __sensor_x()) :return float, skalierter X-Anteil vom Ortsvektor """ return self.__sensor_x() / (self.x_max / 2.0) - 1.0 def scaled_sensor_y(self): """ Gibt den Y-Anteil des Ortsvektors des Balles skaliert von -1 bis +1 mit Rauschen zurück (Rauschen kommt von __sensor_y()) :return float, skalierter Y-Anteil vom Ortsvektor """ return self.__sensor_y() / (self.y_max / 2.0) - 1.0 def scaled_sensor_bat(self, player): """ Gibt die Position des Schlägers von Spieler[Player] skaliert von -1 bis +1 mit Rauschen zurück (Rauschen kommt von __sensor_bat()) :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, skalierte Schlägerposition von Spieler[Player] """ return self.__sensor_bat(player) / (self.y_max / 2.0) - 1.0 def hitbat(self, player): """ Gibt an, ob der Schläger von Spieler[Player] getroffen wurde oder nicht im aktuellen Tick/Spielzug. :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return Bool, Treffer (True) oder kein Treffer (False) vom Schläger von Spieler[Player] """ return self._bathit[player] def scaled_sensor_err(self, player): """ Gibt den Fehler von Spieler[Player] skaliert von -1 bis +1 zurück. :pre hitbat(player) or out(player) :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return float, skalierter Error von Spieler[Player] """ return (self.poi[player] - self.__sensor_bat(player) ) / self.y_max def out(self, player): """ Gibt an, ob der Ball die Linie von Spieler[Player] überschritten hat oder nicht. :param player: Spieler 0 oder 1 :type player: Int (0 oder 1) :return Bool, Ball hat die Linie von Spieler[Player] überschritten (True) oder nicht überschritten (False) """ return self._out[player] def getpoints(self, player): """ Liefert die Punktanzahl von Spieler[Player] :param player: Punktzahl von Spieler 0 oder 1 :type player: Int (0 oder 1) :return int, Punktzahl des Spielers """ return self.Points[player] def tick(self): """ Berechnet einen Tick/Spielzug, hierbei wird der Ball bewegt, die Überschreitung einer der Torauslinien oder die Kollision mit einem Schläger auf False initialisiert, außerdem die Ballposition zurückgesetzt, falls die Spieler den Ball zu oft hin und her gespielt haben ohne Tor (Endlosspiel verhindern). Ebenso wird überprüft, ob der Ball auf eine Bande getroffen ist und seinen Bewegungs-/Richtungsvektor ändern muss. Zum Schluss wird evaluiert, ob der Ball über die Torauslinie geflogen oder ob ein Schläger den Ball getroffen hat. :return void """ ######################### ### Initialisierungen
# Die KNNs sollen unterschiedliche Winkel lernen (der Winkel wird immer zufallsinitialisiert), # bei ausreichender Lerndauer bzw. stark minimiertem Fehler jedoch sind die KNNs manchmal auf # einigen Winkeln derart talentiert, dass der Ball nie mehr über die Torlinie gehen würde.
random_line_split
accounts.rs
use nimiq_account::{ Account, Accounts, BlockLogger, BlockState, RevertInfo, TransactionOperationReceipt, }; use nimiq_block::{Block, BlockError, SkipBlockInfo}; use nimiq_blockchain_interface::PushError; use nimiq_database::{traits::Database, TransactionProxy}; use nimiq_keys::Address; use nimiq_primitives::{ key_nibbles::KeyNibbles, trie::{trie_diff::TrieDiff, trie_proof::TrieProof}, }; use nimiq_serde::Deserialize; use nimiq_transaction::extended_transaction::ExtendedTransaction; use nimiq_trie::WriteTransactionProxy; use crate::{blockchain_state::BlockchainState, Blockchain}; /// Subset of the accounts in the accounts tree pub struct AccountsChunk { /// The end of the chunk. The end key is exclusive. /// When set to None it means that it is the last trie chunk. pub end_key: Option<KeyNibbles>, /// The set of accounts retrieved. pub accounts: Vec<(Address, Account)>, } /// Implements methods to handle the accounts. impl Blockchain { /// Updates the accounts given a block. /// Expects a full block with body. pub fn commit_accounts( &self, state: &BlockchainState, block: &Block, diff: Option<TrieDiff>, txn: &mut WriteTransactionProxy, block_logger: &mut BlockLogger, ) -> Result<u64, PushError> { // Get the accounts from the state. let accounts = &state.accounts; let block_state = BlockState::new(block.block_number(), block.timestamp()); // Check the type of the block. match block { Block::Macro(ref macro_block) => { // Initialize a vector to store the inherents. let inherents = self.create_macro_block_inherents(macro_block); // Commit block to AccountsTree. if accounts.is_complete(Some(txn)) { accounts.commit(txn, &[], &inherents, &block_state, block_logger)?; } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?; } else { return Err(PushError::MissingAccountsTrieDiff); } // Macro blocks are final and receipts for the previous batch are no longer necessary // as rebranching across this block is not possible. self.chain_store.clear_revert_infos(txn.raw()); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, macro_block.header.block_number, macro_block.header.timestamp, vec![], inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), macro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } Block::Micro(ref micro_block) => { // Get the body of the block. let body = micro_block .body .as_ref() .expect("Block body must be present"); let skip_block_info = SkipBlockInfo::from_micro_block(micro_block); // Create the inherents from any forks or skip block info. let inherents = self.create_punishment_inherents( block_state.number, &body.fork_proofs, skip_block_info, Some(txn), ); // Commit block to AccountsTree and create the receipts. let revert_info: RevertInfo = if accounts.is_complete(Some(txn)) { accounts .commit( txn, &body.get_raw_transactions(), &inherents, &block_state, block_logger, )? .into() } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?.into() } else { return Err(PushError::MissingAccountsTrieDiff); }; // Check that the transaction results match the ones in the block. if let RevertInfo::Receipts(receipts) = &revert_info { assert_eq!(receipts.transactions.len(), body.transactions.len()); for (index, receipt) in receipts.transactions.iter().enumerate() { let matches = match receipt { TransactionOperationReceipt::Ok(..) => { body.transactions[index].succeeded() } TransactionOperationReceipt::Err(..) => { body.transactions[index].failed() } }; if !matches { return Err(PushError::InvalidBlock( BlockError::TransactionExecutionMismatch, )); } } } // Store revert info. self.chain_store.put_revert_info( txn.raw(), micro_block.header.block_number, &revert_info, ); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, micro_block.header.block_number, micro_block.header.timestamp, body.transactions.clone(), inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), micro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } } } /// Reverts the accounts given a block. This only applies to micro blocks and skip blocks, since /// macro blocks are final and can't be reverted. pub(crate) fn revert_accounts( &self, accounts: &Accounts, txn: &mut WriteTransactionProxy, block: &Block, block_logger: &mut BlockLogger, ) -> Result<u64, PushError> { if block.is_macro() { panic!("Can't revert {block} - macro blocks are final"); } let block = block.unwrap_micro_ref(); let body = block.body.as_ref().unwrap(); debug!( block = %block, is_skip = block.is_skip_block(), num_transactions = body.transactions.len(), num_fork_proofs = body.fork_proofs.len(), "Reverting block" ); // Verify accounts hash if the tree is complete or changes only happened in the complete part. if let Some(accounts_hash) = accounts.get_root_hash(Some(txn)) { assert_eq!( block.header.state_root, accounts_hash, "Cannot revert {} - inconsistent state", block, ); } // Create the inherents from any forks or skip block info. let skip_block_info = SkipBlockInfo::from_micro_block(block); let inherents = self.create_punishment_inherents(
block.block_number(), &body.fork_proofs, skip_block_info, Some(txn), ); // Get the revert info for this block. let revert_info = self .chain_store .get_revert_info(block.block_number(), Some(txn)) .expect("Failed to revert - missing revert info"); // Revert the block from AccountsTree. let block_state = BlockState::new(block.block_number(), block.header.timestamp); let result = accounts.revert( txn, &body.get_raw_transactions(), &inherents, &block_state, revert_info, block_logger, ); if let Err(e) = result { panic!("Failed to revert {block} - {e:?}"); } // Remove the transactions from the History tree. For this you only need to calculate the // number of transactions that you want to remove. let num_txs = body.transactions.len() + inherents.len(); let (_, total_size) = self .history_store .remove_partial_history(txn.raw(), block.epoch_number(), num_txs) .expect("Failed to remove partial history"); Ok(total_size) } /// Produces a Merkle proof of the inclusion of the given keys in the /// Merkle Radix Trie. pub fn get_accounts_proof(&self, keys: Vec<&KeyNibbles>) -> Option<TrieProof> { let txn = self.env.read_transaction(); self.state().accounts.get_proof(Some(&txn), keys).ok() } /// Gets an accounts chunk given a start key and a limit pub fn get_accounts_chunk( &self, txn_option: Option<&TransactionProxy>, start: KeyNibbles, limit: usize, ) -> AccountsChunk { let trie_chunk = self.state().accounts.get_chunk(start, limit, txn_option); let end_key = trie_chunk.end_key; let accounts = trie_chunk .items .into_iter() .filter(|item| item.key.to_address().is_some()) .map(|item| { ( item.key.to_address().unwrap(), Account::deserialize_from_vec(&item.value).unwrap(), ) }) .collect(); AccountsChunk { end_key, accounts } } }
random_line_split
accounts.rs
use nimiq_account::{ Account, Accounts, BlockLogger, BlockState, RevertInfo, TransactionOperationReceipt, }; use nimiq_block::{Block, BlockError, SkipBlockInfo}; use nimiq_blockchain_interface::PushError; use nimiq_database::{traits::Database, TransactionProxy}; use nimiq_keys::Address; use nimiq_primitives::{ key_nibbles::KeyNibbles, trie::{trie_diff::TrieDiff, trie_proof::TrieProof}, }; use nimiq_serde::Deserialize; use nimiq_transaction::extended_transaction::ExtendedTransaction; use nimiq_trie::WriteTransactionProxy; use crate::{blockchain_state::BlockchainState, Blockchain}; /// Subset of the accounts in the accounts tree pub struct AccountsChunk { /// The end of the chunk. The end key is exclusive. /// When set to None it means that it is the last trie chunk. pub end_key: Option<KeyNibbles>, /// The set of accounts retrieved. pub accounts: Vec<(Address, Account)>, } /// Implements methods to handle the accounts. impl Blockchain { /// Updates the accounts given a block. /// Expects a full block with body. pub fn commit_accounts( &self, state: &BlockchainState, block: &Block, diff: Option<TrieDiff>, txn: &mut WriteTransactionProxy, block_logger: &mut BlockLogger, ) -> Result<u64, PushError>
// Macro blocks are final and receipts for the previous batch are no longer necessary // as rebranching across this block is not possible. self.chain_store.clear_revert_infos(txn.raw()); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, macro_block.header.block_number, macro_block.header.timestamp, vec![], inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), macro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } Block::Micro(ref micro_block) => { // Get the body of the block. let body = micro_block .body .as_ref() .expect("Block body must be present"); let skip_block_info = SkipBlockInfo::from_micro_block(micro_block); // Create the inherents from any forks or skip block info. let inherents = self.create_punishment_inherents( block_state.number, &body.fork_proofs, skip_block_info, Some(txn), ); // Commit block to AccountsTree and create the receipts. let revert_info: RevertInfo = if accounts.is_complete(Some(txn)) { accounts .commit( txn, &body.get_raw_transactions(), &inherents, &block_state, block_logger, )? .into() } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?.into() } else { return Err(PushError::MissingAccountsTrieDiff); }; // Check that the transaction results match the ones in the block. if let RevertInfo::Receipts(receipts) = &revert_info { assert_eq!(receipts.transactions.len(), body.transactions.len()); for (index, receipt) in receipts.transactions.iter().enumerate() { let matches = match receipt { TransactionOperationReceipt::Ok(..) => { body.transactions[index].succeeded() } TransactionOperationReceipt::Err(..) => { body.transactions[index].failed() } }; if !matches { return Err(PushError::InvalidBlock( BlockError::TransactionExecutionMismatch, )); } } } // Store revert info. self.chain_store.put_revert_info( txn.raw(), micro_block.header.block_number, &revert_info, ); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, micro_block.header.block_number, micro_block.header.timestamp, body.transactions.clone(), inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), micro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } } } /// Reverts the accounts given a block. This only applies to micro blocks and skip blocks, since /// macro blocks are final and can't be reverted. pub(crate) fn revert_accounts( &self, accounts: &Accounts, txn: &mut WriteTransactionProxy, block: &Block, block_logger: &mut BlockLogger, ) -> Result<u64, PushError> { if block.is_macro() { panic!("Can't revert {block} - macro blocks are final"); } let block = block.unwrap_micro_ref(); let body = block.body.as_ref().unwrap(); debug!( block = %block, is_skip = block.is_skip_block(), num_transactions = body.transactions.len(), num_fork_proofs = body.fork_proofs.len(), "Reverting block" ); // Verify accounts hash if the tree is complete or changes only happened in the complete part. if let Some(accounts_hash) = accounts.get_root_hash(Some(txn)) { assert_eq!( block.header.state_root, accounts_hash, "Cannot revert {} - inconsistent state", block, ); } // Create the inherents from any forks or skip block info. let skip_block_info = SkipBlockInfo::from_micro_block(block); let inherents = self.create_punishment_inherents( block.block_number(), &body.fork_proofs, skip_block_info, Some(txn), ); // Get the revert info for this block. let revert_info = self .chain_store .get_revert_info(block.block_number(), Some(txn)) .expect("Failed to revert - missing revert info"); // Revert the block from AccountsTree. let block_state = BlockState::new(block.block_number(), block.header.timestamp); let result = accounts.revert( txn, &body.get_raw_transactions(), &inherents, &block_state, revert_info, block_logger, ); if let Err(e) = result { panic!("Failed to revert {block} - {e:?}"); } // Remove the transactions from the History tree. For this you only need to calculate the // number of transactions that you want to remove. let num_txs = body.transactions.len() + inherents.len(); let (_, total_size) = self .history_store .remove_partial_history(txn.raw(), block.epoch_number(), num_txs) .expect("Failed to remove partial history"); Ok(total_size) } /// Produces a Merkle proof of the inclusion of the given keys in the /// Merkle Radix Trie. pub fn get_accounts_proof(&self, keys: Vec<&KeyNibbles>) -> Option<TrieProof> { let txn = self.env.read_transaction(); self.state().accounts.get_proof(Some(&txn), keys).ok() } /// Gets an accounts chunk given a start key and a limit pub fn get_accounts_chunk( &self, txn_option: Option<&TransactionProxy>, start: KeyNibbles, limit: usize, ) -> AccountsChunk { let trie_chunk = self.state().accounts.get_chunk(start, limit, txn_option); let end_key = trie_chunk.end_key; let accounts = trie_chunk .items .into_iter() .filter(|item| item.key.to_address().is_some()) .map(|item| { ( item.key.to_address().unwrap(), Account::deserialize_from_vec(&item.value).unwrap(), ) }) .collect(); AccountsChunk { end_key, accounts } } }
{ // Get the accounts from the state. let accounts = &state.accounts; let block_state = BlockState::new(block.block_number(), block.timestamp()); // Check the type of the block. match block { Block::Macro(ref macro_block) => { // Initialize a vector to store the inherents. let inherents = self.create_macro_block_inherents(macro_block); // Commit block to AccountsTree. if accounts.is_complete(Some(txn)) { accounts.commit(txn, &[], &inherents, &block_state, block_logger)?; } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?; } else { return Err(PushError::MissingAccountsTrieDiff); }
identifier_body
accounts.rs
use nimiq_account::{ Account, Accounts, BlockLogger, BlockState, RevertInfo, TransactionOperationReceipt, }; use nimiq_block::{Block, BlockError, SkipBlockInfo}; use nimiq_blockchain_interface::PushError; use nimiq_database::{traits::Database, TransactionProxy}; use nimiq_keys::Address; use nimiq_primitives::{ key_nibbles::KeyNibbles, trie::{trie_diff::TrieDiff, trie_proof::TrieProof}, }; use nimiq_serde::Deserialize; use nimiq_transaction::extended_transaction::ExtendedTransaction; use nimiq_trie::WriteTransactionProxy; use crate::{blockchain_state::BlockchainState, Blockchain}; /// Subset of the accounts in the accounts tree pub struct
{ /// The end of the chunk. The end key is exclusive. /// When set to None it means that it is the last trie chunk. pub end_key: Option<KeyNibbles>, /// The set of accounts retrieved. pub accounts: Vec<(Address, Account)>, } /// Implements methods to handle the accounts. impl Blockchain { /// Updates the accounts given a block. /// Expects a full block with body. pub fn commit_accounts( &self, state: &BlockchainState, block: &Block, diff: Option<TrieDiff>, txn: &mut WriteTransactionProxy, block_logger: &mut BlockLogger, ) -> Result<u64, PushError> { // Get the accounts from the state. let accounts = &state.accounts; let block_state = BlockState::new(block.block_number(), block.timestamp()); // Check the type of the block. match block { Block::Macro(ref macro_block) => { // Initialize a vector to store the inherents. let inherents = self.create_macro_block_inherents(macro_block); // Commit block to AccountsTree. if accounts.is_complete(Some(txn)) { accounts.commit(txn, &[], &inherents, &block_state, block_logger)?; } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?; } else { return Err(PushError::MissingAccountsTrieDiff); } // Macro blocks are final and receipts for the previous batch are no longer necessary // as rebranching across this block is not possible. self.chain_store.clear_revert_infos(txn.raw()); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, macro_block.header.block_number, macro_block.header.timestamp, vec![], inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), macro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } Block::Micro(ref micro_block) => { // Get the body of the block. let body = micro_block .body .as_ref() .expect("Block body must be present"); let skip_block_info = SkipBlockInfo::from_micro_block(micro_block); // Create the inherents from any forks or skip block info. let inherents = self.create_punishment_inherents( block_state.number, &body.fork_proofs, skip_block_info, Some(txn), ); // Commit block to AccountsTree and create the receipts. let revert_info: RevertInfo = if accounts.is_complete(Some(txn)) { accounts .commit( txn, &body.get_raw_transactions(), &inherents, &block_state, block_logger, )? .into() } else if let Some(diff) = diff { accounts.commit_incomplete(txn, diff)?.into() } else { return Err(PushError::MissingAccountsTrieDiff); }; // Check that the transaction results match the ones in the block. if let RevertInfo::Receipts(receipts) = &revert_info { assert_eq!(receipts.transactions.len(), body.transactions.len()); for (index, receipt) in receipts.transactions.iter().enumerate() { let matches = match receipt { TransactionOperationReceipt::Ok(..) => { body.transactions[index].succeeded() } TransactionOperationReceipt::Err(..) => { body.transactions[index].failed() } }; if !matches { return Err(PushError::InvalidBlock( BlockError::TransactionExecutionMismatch, )); } } } // Store revert info. self.chain_store.put_revert_info( txn.raw(), micro_block.header.block_number, &revert_info, ); // Store the transactions and the inherents into the History tree. let mut total_tx_size = 0; if state.can_verify_history { let ext_txs = ExtendedTransaction::from( self.network_id, micro_block.header.block_number, micro_block.header.timestamp, body.transactions.clone(), inherents, ); total_tx_size = self .history_store .add_to_history(txn.raw(), micro_block.epoch_number(), &ext_txs) .expect("Failed to store history") .1 }; Ok(total_tx_size) } } } /// Reverts the accounts given a block. This only applies to micro blocks and skip blocks, since /// macro blocks are final and can't be reverted. pub(crate) fn revert_accounts( &self, accounts: &Accounts, txn: &mut WriteTransactionProxy, block: &Block, block_logger: &mut BlockLogger, ) -> Result<u64, PushError> { if block.is_macro() { panic!("Can't revert {block} - macro blocks are final"); } let block = block.unwrap_micro_ref(); let body = block.body.as_ref().unwrap(); debug!( block = %block, is_skip = block.is_skip_block(), num_transactions = body.transactions.len(), num_fork_proofs = body.fork_proofs.len(), "Reverting block" ); // Verify accounts hash if the tree is complete or changes only happened in the complete part. if let Some(accounts_hash) = accounts.get_root_hash(Some(txn)) { assert_eq!( block.header.state_root, accounts_hash, "Cannot revert {} - inconsistent state", block, ); } // Create the inherents from any forks or skip block info. let skip_block_info = SkipBlockInfo::from_micro_block(block); let inherents = self.create_punishment_inherents( block.block_number(), &body.fork_proofs, skip_block_info, Some(txn), ); // Get the revert info for this block. let revert_info = self .chain_store .get_revert_info(block.block_number(), Some(txn)) .expect("Failed to revert - missing revert info"); // Revert the block from AccountsTree. let block_state = BlockState::new(block.block_number(), block.header.timestamp); let result = accounts.revert( txn, &body.get_raw_transactions(), &inherents, &block_state, revert_info, block_logger, ); if let Err(e) = result { panic!("Failed to revert {block} - {e:?}"); } // Remove the transactions from the History tree. For this you only need to calculate the // number of transactions that you want to remove. let num_txs = body.transactions.len() + inherents.len(); let (_, total_size) = self .history_store .remove_partial_history(txn.raw(), block.epoch_number(), num_txs) .expect("Failed to remove partial history"); Ok(total_size) } /// Produces a Merkle proof of the inclusion of the given keys in the /// Merkle Radix Trie. pub fn get_accounts_proof(&self, keys: Vec<&KeyNibbles>) -> Option<TrieProof> { let txn = self.env.read_transaction(); self.state().accounts.get_proof(Some(&txn), keys).ok() } /// Gets an accounts chunk given a start key and a limit pub fn get_accounts_chunk( &self, txn_option: Option<&TransactionProxy>, start: KeyNibbles, limit: usize, ) -> AccountsChunk { let trie_chunk = self.state().accounts.get_chunk(start, limit, txn_option); let end_key = trie_chunk.end_key; let accounts = trie_chunk .items .into_iter() .filter(|item| item.key.to_address().is_some()) .map(|item| { ( item.key.to_address().unwrap(), Account::deserialize_from_vec(&item.value).unwrap(), ) }) .collect(); AccountsChunk { end_key, accounts } } }
AccountsChunk
identifier_name
gen.go
px.svg with an explicit fill="#fff". {"av", "ic_play_circle_filled_white_48px.svg"}: true, } func genFile(fqSVGDirName, dirName, baseName, fileName string, size float32) error { fqFileName := filepath.Join(fqSVGDirName, fileName) svgData, err := os.ReadFile(fqFileName) if err != nil { return err } varName := upperCase(dirName) for _, s := range strings.Split(baseName, "_") { varName += upperCase(s) } fmt.Fprintf(out, "var %s = []byte{", varName) defer fmt.Fprintf(out, "\n}\n\n") varNames = append(varNames, varName) var enc iconvg.Encoder enc.Reset(iconvg.Metadata{ ViewBox: iconvg.Rectangle{ Min: f32.Vec2{-24, -24}, Max: f32.Vec2{+24, +24}, }, Palette: iconvg.DefaultPalette, }) g := &SVG{} if err := xml.Unmarshal(svgData, g); err != nil { return err } var vbx, vby float32 for i, v := range strings.Split(g.ViewBox, " ") { f, err := strconv.ParseFloat(v, 32) if err != nil { return err } switch i { case 0: vbx = float32(f) case 1: vby = float32(f) } } offset := f32.Vec2{ vbx * outSize / size, vby * outSize / size, } // adjs maps from opacity to a cReg adj value. adjs := map[float32]uint8{} for _, p := range g.Paths { if fill, ok := skippedPaths[p.D]; ok && fill == p.Fill { continue } if err := genPath(&enc, &p, adjs, size, offset, g.Circles); err != nil { return err } g.Circles = nil } if len(g.Circles) != 0 { if err := genPath(&enc, &Path{}, adjs, size, offset, g.Circles); err != nil { return err } g.Circles = nil } ivgData, err := enc.Bytes() if err != nil { return err } for i, x := range ivgData { if i&0x0f == 0x00 { out.WriteByte('\n') } fmt.Fprintf(out, "%#02x, ", x) } totalFiles++ totalSVGBytes += len(svgData) totalIVGBytes += len(ivgData) return nil } func genPath(enc *iconvg.Encoder, p *Path, adjs map[float32]uint8, size float32, offset f32.Vec2, circles []Circle) error { adj := uint8(0) opacity := float32(1) if p.Opacity != nil { opacity = *p.Opacity } else if p.FillOpacity != nil { opacity = *p.FillOpacity } if opacity != 1 { var ok bool if adj, ok = adjs[opacity]; !ok { adj = uint8(len(adjs) + 1) adjs[opacity] = adj // Set CREG[0-adj] to be a blend of transparent (0x7f) and the // first custom palette color (0x80). enc.SetCReg(adj, false, iconvg.BlendColor(uint8(opacity*0xff), 0x7f, 0x80)) } } needStartPath := true if p.D != "" { needStartPath = false if err := genPathData(enc, adj, p.D, size, offset); err != nil { return err } } for _, c := range circles { // Normalize. cx := c.Cx * outSize / size cx -= outSize/2 + offset[0] cy := c.Cy * outSize / size cy -= outSize/2 + offset[1] r := c.R * outSize / size if needStartPath { needStartPath = false enc.StartPath(adj, cx-r, cy) } else { enc.ClosePathAbsMoveTo(cx-r, cy) } // Convert a circle to two relative arcTo ops, each of 180 degrees. // We can't use one 360 degree arcTo as the start and end point // would be coincident and the computation is degenerate. enc.RelArcTo(r, r, 0, false, true, +2*r, 0) enc.RelArcTo(r, r, 0, false, true, -2*r, 0) } enc.ClosePathEndPath() return nil } func genPathData(enc *iconvg.Encoder, adj uint8, pathData string, size float32, offset f32.Vec2) error { if strings.HasSuffix(pathData, "z") { pathData = pathData[:len(pathData)-1] } r := strings.NewReader(pathData) var args [6]float32 op, relative, started := byte(0), false, false for { b, err := r.ReadByte() if err == io.EOF { break } if err != nil { return err } switch { case b == ' ': continue case 'A' <= b && b <= 'Z': op, relative = b, false case 'a' <= b && b <= 'z': op, relative = b, true default: r.UnreadByte() } n := 0 switch op { case 'L', 'l', 'T', 't': n = 2 case 'Q', 'q', 'S', 's': n = 4 case 'C', 'c': n = 6 case 'H', 'h', 'V', 'v': n = 1 case 'M', 'm': n = 2 case 'Z', 'z': default: return fmt.Errorf("unknown opcode %c\n", b) } scan(&args, r, n) normalize(&args, n, op, size, offset, relative) switch op { case 'L': enc.AbsLineTo(args[0], args[1]) case 'l': enc.RelLineTo(args[0], args[1]) case 'T': enc.AbsSmoothQuadTo(args[0], args[1]) case 't': enc.RelSmoothQuadTo(args[0], args[1]) case 'Q': enc.AbsQuadTo(args[0], args[1], args[2], args[3]) case 'q': enc.RelQuadTo(args[0], args[1], args[2], args[3]) case 'S': enc.AbsSmoothCubeTo(args[0], args[1], args[2], args[3]) case 's': enc.RelSmoothCubeTo(args[0], args[1], args[2], args[3]) case 'C': enc.AbsCubeTo(args[0], args[1], args[2], args[3], args[4], args[5]) case 'c': enc.RelCubeTo(args[0], args[1], args[2], args[3], args[4], args[5]) case 'H': enc.AbsHLineTo(args[0]) case 'h': enc.RelHLineTo(args[0]) case 'V': enc.AbsVLineTo(args[0]) case 'v': enc.RelVLineTo(args[0]) case 'M': if !started { started = true enc.StartPath(adj, args[0], args[1]) } else { enc.ClosePathAbsMoveTo(args[0], args[1]) } case 'm': enc.ClosePathRelMoveTo(args[0], args[1]) } } return nil } func scan(args *[6]float32, r *strings.Reader, n int) { for i := 0; i < n; i++ { for { if b, _ := r.ReadByte(); b != ' ' { r.UnreadByte() break } } fmt.Fscanf(r, "%f", &args[i]) } } func atof(s []byte) (float32, error)
{ f, err := strconv.ParseFloat(string(s), 32) if err != nil { return 0, fmt.Errorf("could not parse %q as a float32: %v", s, err) } return float32(f), err }
identifier_body
gen.go
} func main() { flag.Parse() out.WriteString("// generated by go run gen.go; DO NOT EDIT\n\npackage icons\n\n") f, err := os.Open(*mdicons) if err != nil { log.Fatalf("%v\n\nDid you override the -mdicons flag in icons.go?\n\n", err) } defer f.Close() infos, err := f.Readdir(-1) if err != nil { log.Fatal(err) } names := []string{} for _, info := range infos { if !info.IsDir() { continue } name := info.Name() if name[0] == '.' { continue } names = append(names, name) } sort.Strings(names) for _, name := range names { genDir(name) } fmt.Fprintf(out, "// In total, %d SVG bytes in %d files (%d PNG bytes at 24px * 24px,\n"+ "// %d PNG bytes at 48px * 48px) converted to %d IconVG bytes.\n", totalSVGBytes, totalFiles, totalPNG24Bytes, totalPNG48Bytes, totalIVGBytes) if len(failures) != 0 { out.WriteString("\n/*\nFAILURES:\n\n") for _, failure := range failures { out.WriteString(failure) out.WriteByte('\n') } out.WriteString("\n*/") } raw := out.Bytes() formatted, err := format.Source(raw) if err != nil { log.Fatalf("gofmt failed: %v\n\nGenerated code:\n%s", err, raw) } if err := os.WriteFile("data.go", formatted, 0644); err != nil { log.Fatalf("WriteFile failed: %s\n", err) } // Generate data_test.go. The code immediately above generates data.go. { b := new(bytes.Buffer) b.WriteString("// generated by go run gen.go; DO NOT EDIT\n\npackage icons\n\n") b.WriteString("var list = []struct{ name string; data []byte } {\n") for _, v := range varNames { fmt.Fprintf(b, "{%q, %s},\n", v, v) } b.WriteString("}\n\n") raw := b.Bytes() formatted, err := format.Source(raw) if err != nil { log.Fatalf("gofmt failed: %v\n\nGenerated code:\n%s", err, raw) } if err := os.WriteFile("data_test.go", formatted, 0644); err != nil { log.Fatalf("WriteFile failed: %s\n", err) } } } func genDir(dirName string) { fqPNGDirName := filepath.FromSlash(path.Join(*mdicons, dirName, "1x_web")) fqSVGDirName := filepath.FromSlash(path.Join(*mdicons, dirName, "svg/production")) f, err := os.Open(fqSVGDirName) if err != nil { return } defer f.Close() infos, err := f.Readdir(-1) if err != nil { log.Fatal(err) } baseNames, fileNames, sizes := []string{}, map[string]string{}, map[string]int{} for _, info := range infos { name := info.Name() if !strings.HasPrefix(name, "ic_") || skippedFiles[[2]string{dirName, name}] { continue } size := 0 switch { case strings.HasSuffix(name, "_12px.svg"): size = 12 case strings.HasSuffix(name, "_18px.svg"): size = 18 case strings.HasSuffix(name, "_24px.svg"): size = 24 case strings.HasSuffix(name, "_36px.svg"): size = 36 case strings.HasSuffix(name, "_48px.svg"): size = 48 default: continue } baseName := name[3 : len(name)-9] if prevSize, ok := sizes[baseName]; ok { if size > prevSize { fileNames[baseName] = name sizes[baseName] = size } } else { fileNames[baseName] = name sizes[baseName] = size baseNames = append(baseNames, baseName) } } sort.Strings(baseNames) for _, baseName := range baseNames { fileName := fileNames[baseName] err := genFile(fqSVGDirName, dirName, baseName, fileName, float32(sizes[baseName])) if err == errSkip { continue } if err != nil { failures = append(failures, fmt.Sprintf("%v/svg/production/%v: %v", dirName, fileName, err)) continue } totalPNG24Bytes += pngSize(fqPNGDirName, dirName, baseName, 24) totalPNG48Bytes += pngSize(fqPNGDirName, dirName, baseName, 48) } } func pngSize(fqPNGDirName, dirName, baseName string, targetSize int) int { for _, size := range [...]int{48, 24, 18} { if size > targetSize { continue }
fInfo, err := os.Stat(filepath.Join(fqPNGDirName, fmt.Sprintf("ic_%s_black_%ddp.png", baseName, size))) if err != nil { continue } return int(fInfo.Size()) } failures = append(failures, fmt.Sprintf("no PNG found for %s/1x_web/ic_%s_black_{48,24,18}dp.png", dirName, baseName)) return 0 } type SVG struct { Width float32 `xml:"where,attr"` Height float32 `xml:"height,attr"` ViewBox string `xml:"viewBox,attr"` Paths []Path `xml:"path"` // Some of the SVG files contain <circle> elements, not just <path> // elements. IconVG doesn't have circles per se. Instead, we convert such // circles to be paired arcTo commands, tacked on to the first path. // // In general, this isn't correct if the circles and the path overlap, but // that doesn't happen in the specific case of the Material Design icons. Circles []Circle `xml:"circle"` } type Path struct { D string `xml:"d,attr"` Fill string `xml:"fill,attr"` FillOpacity *float32 `xml:"fill-opacity,attr"` Opacity *float32 `xml:"opacity,attr"` } type Circle struct { Cx float32 `xml:"cx,attr"` Cy float32 `xml:"cy,attr"` R float32 `xml:"r,attr"` } var skippedPaths = map[string]string{ // hardware/svg/production/ic_scanner_48px.svg contains a filled white // rectangle that is overwritten by the subsequent path. // // See https://github.com/google/material-design-icons/issues/490 // // Matches <path fill="#fff" d="M16 34h22v4H16z"/> "M16 34h22v4H16z": "#fff", // device/svg/production/ic_airplanemode_active_48px.svg and // maps/svg/production/ic_flight_48px.svg contain a degenerate path that // contains only one moveTo op. // // See https://github.com/google/material-design-icons/issues/491 // // Matches <path d="M20.36 18"/> "M20.36 18": "", } var skippedFiles = map[[2]string]bool{ // ic_play_circle_filled_white_48px.svg is just the same as // ic_play_circle_filled_48px.svg with an explicit fill="#fff". {"av", "ic_play_circle_filled_white_48px.svg"}: true, } func genFile(fqSVGDirName, dirName, baseName, fileName string, size float32) error { fqFileName := filepath.Join(fqSVGDirName, fileName) svgData, err := os.ReadFile(fqFileName) if err != nil { return err } varName := upperCase(dirName) for _, s := range strings.Split(baseName, "_") { varName += upperCase(s) } fmt.Fprintf(out, "var %s = []byte{", varName) defer fmt.Fprintf(out, "\n}\n\n") varNames = append(varNames, varName) var enc iconvg.Encoder enc.Reset(iconvg.Metadata{ ViewBox: iconvg.Rectangle{ Min: f32.Vec2{-24, -24}, Max: f32.Vec2{+24, +24}, }, Palette: iconvg.DefaultPalette, }) g := &SVG{} if err := xml.Unmarshal(svgData, g); err != nil { return err } var vbx
random_line_split
gen.go
func
() { flag.Parse() out.WriteString("// generated by go run gen.go; DO NOT EDIT\n\npackage icons\n\n") f, err := os.Open(*mdicons) if err != nil { log.Fatalf("%v\n\nDid you override the -mdicons flag in icons.go?\n\n", err) } defer f.Close() infos, err := f.Readdir(-1) if err != nil { log.Fatal(err) } names := []string{} for _, info := range infos { if !info.IsDir() { continue } name := info.Name() if name[0] == '.' { continue } names = append(names, name) } sort.Strings(names) for _, name := range names { genDir(name) } fmt.Fprintf(out, "// In total, %d SVG bytes in %d files (%d PNG bytes at 24px * 24px,\n"+ "// %d PNG bytes at 48px * 48px) converted to %d IconVG bytes.\n", totalSVGBytes, totalFiles, totalPNG24Bytes, totalPNG48Bytes, totalIVGBytes) if len(failures) != 0 { out.WriteString("\n/*\nFAILURES:\n\n") for _, failure := range failures { out.WriteString(failure) out.WriteByte('\n') } out.WriteString("\n*/") } raw := out.Bytes() formatted, err := format.Source(raw) if err != nil { log.Fatalf("gofmt failed: %v\n\nGenerated code:\n%s", err, raw) } if err := os.WriteFile("data.go", formatted, 0644); err != nil { log.Fatalf("WriteFile failed: %s\n", err) } // Generate data_test.go. The code immediately above generates data.go. { b := new(bytes.Buffer) b.WriteString("// generated by go run gen.go; DO NOT EDIT\n\npackage icons\n\n") b.WriteString("var list = []struct{ name string; data []byte } {\n") for _, v := range varNames { fmt.Fprintf(b, "{%q, %s},\n", v, v) } b.WriteString("}\n\n") raw := b.Bytes() formatted, err := format.Source(raw) if err != nil { log.Fatalf("gofmt failed: %v\n\nGenerated code:\n%s", err, raw) } if err := os.WriteFile("data_test.go", formatted, 0644); err != nil { log.Fatalf("WriteFile failed: %s\n", err) } } } func genDir(dirName string) { fqPNGDirName := filepath.FromSlash(path.Join(*mdicons, dirName, "1x_web")) fqSVGDirName := filepath.FromSlash(path.Join(*mdicons, dirName, "svg/production")) f, err := os.Open(fqSVGDirName) if err != nil { return } defer f.Close() infos, err := f.Readdir(-1) if err != nil { log.Fatal(err) } baseNames, fileNames, sizes := []string{}, map[string]string{}, map[string]int{} for _, info := range infos { name := info.Name() if !strings.HasPrefix(name, "ic_") || skippedFiles[[2]string{dirName, name}] { continue } size := 0 switch { case strings.HasSuffix(name, "_12px.svg"): size = 12 case strings.HasSuffix(name, "_18px.svg"): size = 18 case strings.HasSuffix(name, "_24px.svg"): size = 24 case strings.HasSuffix(name, "_36px.svg"): size = 36 case strings.HasSuffix(name, "_48px.svg"): size = 48 default: continue } baseName := name[3 : len(name)-9] if prevSize, ok := sizes[baseName]; ok { if size > prevSize { fileNames[baseName] = name sizes[baseName] = size } } else { fileNames[baseName] = name sizes[baseName] = size baseNames = append(baseNames, baseName) } } sort.Strings(baseNames) for _, baseName := range baseNames { fileName := fileNames[baseName] err := genFile(fqSVGDirName, dirName, baseName, fileName, float32(sizes[baseName])) if err == errSkip { continue } if err != nil { failures = append(failures, fmt.Sprintf("%v/svg/production/%v: %v", dirName, fileName, err)) continue } totalPNG24Bytes += pngSize(fqPNGDirName, dirName, baseName, 24) totalPNG48Bytes += pngSize(fqPNGDirName, dirName, baseName, 48) } } func pngSize(fqPNGDirName, dirName, baseName string, targetSize int) int { for _, size := range [...]int{48, 24, 18} { if size > targetSize { continue } fInfo, err := os.Stat(filepath.Join(fqPNGDirName, fmt.Sprintf("ic_%s_black_%ddp.png", baseName, size))) if err != nil { continue } return int(fInfo.Size()) } failures = append(failures, fmt.Sprintf("no PNG found for %s/1x_web/ic_%s_black_{48,24,18}dp.png", dirName, baseName)) return 0 } type SVG struct { Width float32 `xml:"where,attr"` Height float32 `xml:"height,attr"` ViewBox string `xml:"viewBox,attr"` Paths []Path `xml:"path"` // Some of the SVG files contain <circle> elements, not just <path> // elements. IconVG doesn't have circles per se. Instead, we convert such // circles to be paired arcTo commands, tacked on to the first path. // // In general, this isn't correct if the circles and the path overlap, but // that doesn't happen in the specific case of the Material Design icons. Circles []Circle `xml:"circle"` } type Path struct { D string `xml:"d,attr"` Fill string `xml:"fill,attr"` FillOpacity *float32 `xml:"fill-opacity,attr"` Opacity *float32 `xml:"opacity,attr"` } type Circle struct { Cx float32 `xml:"cx,attr"` Cy float32 `xml:"cy,attr"` R float32 `xml:"r,attr"` } var skippedPaths = map[string]string{ // hardware/svg/production/ic_scanner_48px.svg contains a filled white // rectangle that is overwritten by the subsequent path. // // See https://github.com/google/material-design-icons/issues/490 // // Matches <path fill="#fff" d="M16 34h22v4H16z"/> "M16 34h22v4H16z": "#fff", // device/svg/production/ic_airplanemode_active_48px.svg and // maps/svg/production/ic_flight_48px.svg contain a degenerate path that // contains only one moveTo op. // // See https://github.com/google/material-design-icons/issues/491 // // Matches <path d="M20.36 18"/> "M20.36 18": "", } var skippedFiles = map[[2]string]bool{ // ic_play_circle_filled_white_48px.svg is just the same as // ic_play_circle_filled_48px.svg with an explicit fill="#fff". {"av", "ic_play_circle_filled_white_48px.svg"}: true, } func genFile(fqSVGDirName, dirName, baseName, fileName string, size float32) error { fqFileName := filepath.Join(fqSVGDirName, fileName) svgData, err := os.ReadFile(fqFileName) if err != nil { return err } varName := upperCase(dirName) for _, s := range strings.Split(baseName, "_") { varName += upperCase(s) } fmt.Fprintf(out, "var %s = []byte{", varName) defer fmt.Fprintf(out, "\n}\n\n") varNames = append(varNames, varName) var enc iconvg.Encoder enc.Reset(iconvg.Metadata{ ViewBox: iconvg.Rectangle{ Min: f32.Vec2{-24, -24}, Max: f32.Vec2{+24, +24}, }, Palette: iconvg.DefaultPalette, }) g := &SVG{} if err := xml.Unmarshal(svgData, g); err != nil { return err } var vbx
main
identifier_name
gen.go
func main() { flag.Parse() out.WriteString("// generated by go run gen.go; DO NOT EDIT\n\npackage icons\n\n") f, err := os.Open(*mdicons) if err != nil
defer f.Close() infos, err := f.Readdir(-1) if err != nil { log.Fatal(err) } names := []string{} for _, info := range infos { if !info.IsDir() { continue } name := info.Name() if name[0] == '.' { continue } names = append(names, name) } sort.Strings(names) for _, name := range names { genDir(name) } fmt.Fprintf(out, "// In total, %d SVG bytes in %d files (%d PNG bytes at 24px * 24px,\n"+ "// %d PNG bytes at 48px * 48px) converted to %d IconVG bytes.\n", totalSVGBytes, totalFiles, totalPNG24Bytes, totalPNG48Bytes, totalIVGBytes) if len(failures) != 0 { out.WriteString("\n/*\nFAILURES:\n\n") for _, failure := range failures { out.WriteString(failure) out.WriteByte('\n') } out.WriteString("\n*/") } raw := out.Bytes() formatted, err := format.Source(raw) if err != nil { log.Fatalf("gofmt failed: %v\n\nGenerated code:\n%s", err, raw) } if err := os.WriteFile("data.go", formatted, 0644); err != nil { log.Fatalf("WriteFile failed: %s\n", err) } // Generate data_test.go. The code immediately above generates data.go. { b := new(bytes.Buffer) b.WriteString("// generated by go run gen.go; DO NOT EDIT\n\npackage icons\n\n") b.WriteString("var list = []struct{ name string; data []byte } {\n") for _, v := range varNames { fmt.Fprintf(b, "{%q, %s},\n", v, v) } b.WriteString("}\n\n") raw := b.Bytes() formatted, err := format.Source(raw) if err != nil { log.Fatalf("gofmt failed: %v\n\nGenerated code:\n%s", err, raw) } if err := os.WriteFile("data_test.go", formatted, 0644); err != nil { log.Fatalf("WriteFile failed: %s\n", err) } } } func genDir(dirName string) { fqPNGDirName := filepath.FromSlash(path.Join(*mdicons, dirName, "1x_web")) fqSVGDirName := filepath.FromSlash(path.Join(*mdicons, dirName, "svg/production")) f, err := os.Open(fqSVGDirName) if err != nil { return } defer f.Close() infos, err := f.Readdir(-1) if err != nil { log.Fatal(err) } baseNames, fileNames, sizes := []string{}, map[string]string{}, map[string]int{} for _, info := range infos { name := info.Name() if !strings.HasPrefix(name, "ic_") || skippedFiles[[2]string{dirName, name}] { continue } size := 0 switch { case strings.HasSuffix(name, "_12px.svg"): size = 12 case strings.HasSuffix(name, "_18px.svg"): size = 18 case strings.HasSuffix(name, "_24px.svg"): size = 24 case strings.HasSuffix(name, "_36px.svg"): size = 36 case strings.HasSuffix(name, "_48px.svg"): size = 48 default: continue } baseName := name[3 : len(name)-9] if prevSize, ok := sizes[baseName]; ok { if size > prevSize { fileNames[baseName] = name sizes[baseName] = size } } else { fileNames[baseName] = name sizes[baseName] = size baseNames = append(baseNames, baseName) } } sort.Strings(baseNames) for _, baseName := range baseNames { fileName := fileNames[baseName] err := genFile(fqSVGDirName, dirName, baseName, fileName, float32(sizes[baseName])) if err == errSkip { continue } if err != nil { failures = append(failures, fmt.Sprintf("%v/svg/production/%v: %v", dirName, fileName, err)) continue } totalPNG24Bytes += pngSize(fqPNGDirName, dirName, baseName, 24) totalPNG48Bytes += pngSize(fqPNGDirName, dirName, baseName, 48) } } func pngSize(fqPNGDirName, dirName, baseName string, targetSize int) int { for _, size := range [...]int{48, 24, 18} { if size > targetSize { continue } fInfo, err := os.Stat(filepath.Join(fqPNGDirName, fmt.Sprintf("ic_%s_black_%ddp.png", baseName, size))) if err != nil { continue } return int(fInfo.Size()) } failures = append(failures, fmt.Sprintf("no PNG found for %s/1x_web/ic_%s_black_{48,24,18}dp.png", dirName, baseName)) return 0 } type SVG struct { Width float32 `xml:"where,attr"` Height float32 `xml:"height,attr"` ViewBox string `xml:"viewBox,attr"` Paths []Path `xml:"path"` // Some of the SVG files contain <circle> elements, not just <path> // elements. IconVG doesn't have circles per se. Instead, we convert such // circles to be paired arcTo commands, tacked on to the first path. // // In general, this isn't correct if the circles and the path overlap, but // that doesn't happen in the specific case of the Material Design icons. Circles []Circle `xml:"circle"` } type Path struct { D string `xml:"d,attr"` Fill string `xml:"fill,attr"` FillOpacity *float32 `xml:"fill-opacity,attr"` Opacity *float32 `xml:"opacity,attr"` } type Circle struct { Cx float32 `xml:"cx,attr"` Cy float32 `xml:"cy,attr"` R float32 `xml:"r,attr"` } var skippedPaths = map[string]string{ // hardware/svg/production/ic_scanner_48px.svg contains a filled white // rectangle that is overwritten by the subsequent path. // // See https://github.com/google/material-design-icons/issues/490 // // Matches <path fill="#fff" d="M16 34h22v4H16z"/> "M16 34h22v4H16z": "#fff", // device/svg/production/ic_airplanemode_active_48px.svg and // maps/svg/production/ic_flight_48px.svg contain a degenerate path that // contains only one moveTo op. // // See https://github.com/google/material-design-icons/issues/491 // // Matches <path d="M20.36 18"/> "M20.36 18": "", } var skippedFiles = map[[2]string]bool{ // ic_play_circle_filled_white_48px.svg is just the same as // ic_play_circle_filled_48px.svg with an explicit fill="#fff". {"av", "ic_play_circle_filled_white_48px.svg"}: true, } func genFile(fqSVGDirName, dirName, baseName, fileName string, size float32) error { fqFileName := filepath.Join(fqSVGDirName, fileName) svgData, err := os.ReadFile(fqFileName) if err != nil { return err } varName := upperCase(dirName) for _, s := range strings.Split(baseName, "_") { varName += upperCase(s) } fmt.Fprintf(out, "var %s = []byte{", varName) defer fmt.Fprintf(out, "\n}\n\n") varNames = append(varNames, varName) var enc iconvg.Encoder enc.Reset(iconvg.Metadata{ ViewBox: iconvg.Rectangle{ Min: f32.Vec2{-24, -24}, Max: f32.Vec2{+24, +24}, }, Palette: iconvg.DefaultPalette, }) g := &SVG{} if err := xml.Unmarshal(svgData, g); err != nil { return err } var v
{ log.Fatalf("%v\n\nDid you override the -mdicons flag in icons.go?\n\n", err) }
conditional_block
envconfig.go
, ErrInvalidSpecification } s = s.Elem() if s.Kind() != reflect.Struct { return nil, ErrInvalidSpecification } typeOfSpec := s.Type() // over allocate an info array, we will extend if needed later infos := make([]varInfo, 0, s.NumField()) for i := 0; i < s.NumField(); i++ { f := s.Field(i) ftype := typeOfSpec.Field(i) if !f.CanSet() || isTrue(ftype.Tag.Get("ignored")) { continue } for f.Kind() == reflect.Ptr { if f.IsNil() { if f.Type().Elem().Kind() != reflect.Struct { // nil pointer to a non-struct: leave it alone break } // nil pointer to struct: create a zero instance f.Set(reflect.New(f.Type().Elem())) } f = f.Elem() } // Capture information about the config variable info := varInfo{ Name: ftype.Name, Field: f, Tags: ftype.Tag, Alt: strings.ToUpper(ftype.Tag.Get("envconfig")), } // Default to the field name as the env var name (will be upcased) info.Key = info.Name // Best effort to un-pick camel casing as separate words if isTrue(ftype.Tag.Get("split_words")) { words := gatherRegexp.FindAllStringSubmatch(ftype.Name, -1) if len(words) > 0 { var name []string for _, words := range words { if m := acronymRegexp.FindStringSubmatch(words[0]); len(m) == 3 { name = append(name, m[1], m[2]) } else { name = append(name, words[0]) } } info.Key = strings.Join(name, "_") } } if info.Alt != "" { info.Key = info.Alt if isInsideStructSlice { // we don't want this to be read, since we're inside of a struct slice, // each slice element will have same Alt and thus they would overwrite themselves info.Alt = "" } } if prefix != "" { info.Key = fmt.Sprintf("%s_%s", prefix, info.Key) } info.Key = strings.ToUpper(info.Key) if decoderFrom(f) != nil || setterFrom(f) != nil || textUnmarshaler(f) != nil || binaryUnmarshaler(f) != nil { // there's a decoder defined, no further processing needed infos = append(infos, info) } else if f.Kind() == reflect.Struct { // it's a struct without a specific decoder set innerPrefix := prefix if !ftype.Anonymous { innerPrefix = info.Key } embeddedPtr := f.Addr().Interface() embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr, env, isInsideStructSlice, forUsage) if err != nil { return nil, err } infos = append(infos, embeddedInfos...) } else if arePointers := isSliceOfStructPtrs(f); arePointers || isSliceOfStructs(f) { // it's a slice of structs var ( l int prefixFormat prefixFormatter ) if forUsage
else { var err error // let's find out how many are defined by the env vars, and gather info of each one of them if l, err = sliceLen(info.Key, env); err != nil { return nil, err } prefixFormat = processPrefix(info.Key) // if no keys, check the alternative keys, unless we're inside of a slice if l == 0 && info.Alt != "" && !isInsideStructSlice { if l, err = sliceLen(info.Alt, env); err != nil { return nil, err } prefixFormat = processPrefix(info.Alt) } } f.Set(reflect.MakeSlice(f.Type(), l, l)) for i := 0; i < l; i++ { var structPtrValue reflect.Value if arePointers { f.Index(i).Set(reflect.New(f.Type().Elem().Elem())) structPtrValue = f.Index(i) } else { structPtrValue = f.Index(i).Addr() } embeddedInfos, err := gatherInfo(prefixFormat.format(i), structPtrValue.Interface(), env, true, forUsage) if err != nil { return nil, err } infos = append(infos, embeddedInfos...) } } else { infos = append(infos, info) } } return infos, nil } // Unused returns the slice of environment vars that have the prefix provided but we don't know how or want to parse. // This is likely only meaningful with a non-empty prefix. func Unused(prefix string, spec interface{}) ([]string, error) { spec = copySpec(spec) env := environment() infos, err := gatherInfoForProcessing(prefix, spec, env) if err != nil { return nil, err } vars := make(map[string]struct{}) for _, info := range infos { vars[info.Key] = struct{}{} } if prefix != "" { prefix = strings.ToUpper(prefix) + "_" } var unused []string for key := range env { if !strings.HasPrefix(key, prefix) { continue } if _, found := vars[key]; !found { unused = append(unused, key) } } return unused, nil } // Process populates the specified struct based on environment variables func Process(prefix string, spec interface{}) error { env := environment() infos, err := gatherInfoForProcessing(prefix, spec, env) for _, info := range infos { value, ok := env[info.Key] if !ok && info.Alt != "" { value, ok = env[info.Alt] } def := info.Tags.Get("default") if def != "" && !ok { value = def } req := info.Tags.Get("required") if !ok && def == "" { if isTrue(req) { key := info.Key if info.Alt != "" { key = info.Alt } return fmt.Errorf("required key %s missing value", key) } continue } err = processField(value, info.Field) if err != nil { return &ParseError{ KeyName: info.Key, FieldName: info.Name, TypeName: info.Field.Type().String(), Value: value, Err: err, } } } return err } // MustProcess is the same as Process but panics if an error occurs func MustProcess(prefix string, spec interface{}) { if err := Process(prefix, spec); err != nil { panic(err) } } func processField(value string, field reflect.Value) error { typ := field.Type() decoder := decoderFrom(field) if decoder != nil { return decoder.Decode(value) } // look for Set method if Decode not defined setter := setterFrom(field) if setter != nil { return setter.Set(value) } if t := textUnmarshaler(field); t != nil { return t.UnmarshalText([]byte(value)) } if b := binaryUnmarshaler(field); b != nil { return b.UnmarshalBinary([]byte(value)) } if typ.Kind() == reflect.Ptr { typ = typ.Elem() if field.IsNil() { field.Set(reflect.New(typ)) } field = field.Elem() } switch typ.Kind() { case reflect.String: field.SetString(value) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: var ( val int64 err error ) if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" { var d time.Duration d, err = time.ParseDuration(value) val = int64(d) } else { val, err = strconv.ParseInt(value, 0, typ.Bits()) } if err != nil { return err } field.SetInt(val) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: val, err := strconv.ParseUint(value, 0, typ.Bits()) if err != nil { return err } field.SetUint(val) case reflect.Bool: val, err := strconv.ParseBool(value) if err != nil { return err } field.SetBool(val)
{ // it's just for usage so we don't know how many of them can be out there // so we'll print one info with a generic [N] index l = 1 prefixFormat = usagePrefix{info.Key, "[N]"} }
conditional_block
envconfig.go
) } } f.Set(reflect.MakeSlice(f.Type(), l, l)) for i := 0; i < l; i++ { var structPtrValue reflect.Value if arePointers { f.Index(i).Set(reflect.New(f.Type().Elem().Elem())) structPtrValue = f.Index(i) } else { structPtrValue = f.Index(i).Addr() } embeddedInfos, err := gatherInfo(prefixFormat.format(i), structPtrValue.Interface(), env, true, forUsage) if err != nil { return nil, err } infos = append(infos, embeddedInfos...) } } else { infos = append(infos, info) } } return infos, nil } // Unused returns the slice of environment vars that have the prefix provided but we don't know how or want to parse. // This is likely only meaningful with a non-empty prefix. func Unused(prefix string, spec interface{}) ([]string, error) { spec = copySpec(spec) env := environment() infos, err := gatherInfoForProcessing(prefix, spec, env) if err != nil { return nil, err } vars := make(map[string]struct{}) for _, info := range infos { vars[info.Key] = struct{}{} } if prefix != "" { prefix = strings.ToUpper(prefix) + "_" } var unused []string for key := range env { if !strings.HasPrefix(key, prefix) { continue } if _, found := vars[key]; !found { unused = append(unused, key) } } return unused, nil } // Process populates the specified struct based on environment variables func Process(prefix string, spec interface{}) error { env := environment() infos, err := gatherInfoForProcessing(prefix, spec, env) for _, info := range infos { value, ok := env[info.Key] if !ok && info.Alt != "" { value, ok = env[info.Alt] } def := info.Tags.Get("default") if def != "" && !ok { value = def } req := info.Tags.Get("required") if !ok && def == "" { if isTrue(req) { key := info.Key if info.Alt != "" { key = info.Alt } return fmt.Errorf("required key %s missing value", key) } continue } err = processField(value, info.Field) if err != nil { return &ParseError{ KeyName: info.Key, FieldName: info.Name, TypeName: info.Field.Type().String(), Value: value, Err: err, } } } return err } // MustProcess is the same as Process but panics if an error occurs func MustProcess(prefix string, spec interface{}) { if err := Process(prefix, spec); err != nil { panic(err) } } func processField(value string, field reflect.Value) error { typ := field.Type() decoder := decoderFrom(field) if decoder != nil { return decoder.Decode(value) } // look for Set method if Decode not defined setter := setterFrom(field) if setter != nil { return setter.Set(value) } if t := textUnmarshaler(field); t != nil { return t.UnmarshalText([]byte(value)) } if b := binaryUnmarshaler(field); b != nil { return b.UnmarshalBinary([]byte(value)) } if typ.Kind() == reflect.Ptr { typ = typ.Elem() if field.IsNil() { field.Set(reflect.New(typ)) } field = field.Elem() } switch typ.Kind() { case reflect.String: field.SetString(value) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: var ( val int64 err error ) if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" { var d time.Duration d, err = time.ParseDuration(value) val = int64(d) } else { val, err = strconv.ParseInt(value, 0, typ.Bits()) } if err != nil { return err } field.SetInt(val) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: val, err := strconv.ParseUint(value, 0, typ.Bits()) if err != nil { return err } field.SetUint(val) case reflect.Bool: val, err := strconv.ParseBool(value) if err != nil { return err } field.SetBool(val) case reflect.Float32, reflect.Float64: val, err := strconv.ParseFloat(value, typ.Bits()) if err != nil { return err } field.SetFloat(val) case reflect.Slice: sl := reflect.MakeSlice(typ, 0, 0) if typ.Elem().Kind() == reflect.Uint8 { sl = reflect.ValueOf([]byte(value)) } else if len(strings.TrimSpace(value)) != 0 { vals := strings.Split(value, ",") sl = reflect.MakeSlice(typ, len(vals), len(vals)) for i, val := range vals { err := processField(val, sl.Index(i)) if err != nil { return err } } } field.Set(sl) case reflect.Map: mp := reflect.MakeMap(typ) if len(strings.TrimSpace(value)) != 0 { pairs := strings.Split(value, ",") for _, pair := range pairs { kvpair := strings.Split(pair, ":") if len(kvpair) != 2 { return fmt.Errorf("invalid map item: %q", pair) } k := reflect.New(typ.Key()).Elem() err := processField(kvpair[0], k) if err != nil { return err } v := reflect.New(typ.Elem()).Elem() err = processField(kvpair[1], v) if err != nil { return err } mp.SetMapIndex(k, v) } } field.Set(mp) } return nil } func interfaceFrom(field reflect.Value, fn func(interface{}, *bool)) { // it may be impossible for a struct field to fail this check if !field.CanInterface() { return } var ok bool fn(field.Interface(), &ok) if !ok && field.CanAddr() { fn(field.Addr().Interface(), &ok) } } func decoderFrom(field reflect.Value) (d Decoder) { interfaceFrom(field, func(v interface{}, ok *bool) { d, *ok = v.(Decoder) }) return d } func setterFrom(field reflect.Value) (s Setter) { interfaceFrom(field, func(v interface{}, ok *bool) { s, *ok = v.(Setter) }) return s } func textUnmarshaler(field reflect.Value) (t encoding.TextUnmarshaler) { interfaceFrom(field, func(v interface{}, ok *bool) { t, *ok = v.(encoding.TextUnmarshaler) }) return t } func binaryUnmarshaler(field reflect.Value) (b encoding.BinaryUnmarshaler) { interfaceFrom(field, func(v interface{}, ok *bool) { b, *ok = v.(encoding.BinaryUnmarshaler) }) return b } func isTrue(s string) bool { b, _ := strconv.ParseBool(s) return b } // sliceLen returns the len of a slice of structs defined in the environment config func sliceLen(prefix string, env map[string]string) (int, error) { prefix = prefix + "_" indexes := map[int]bool{} for k := range env { if !strings.HasPrefix(k, prefix) { continue } var digits string for i := len(prefix); i < len(k); i++ { if k[i] >= '0' && k[i] <= '9' { digits += k[i : i+1] } else if k[i] == '_' { break } else { return 0, fmt.Errorf("key %s has prefix %s but doesn't follow an integer value followed by an underscore (unexpected char %q)", k, prefix, k[i]) } } if digits == "" { return 0, fmt.Errorf("key %s has prefix %s but doesn't follow an integer value followed by an underscore (no digits found)", k, prefix) } index, err := strconv.Atoi(digits) if err != nil { return 0, fmt.Errorf("can't parse index in %s: %s", k, err) } indexes[index] = true } for i := 0; i < len(indexes); i++ { if _, ok := indexes[i]; !ok { return 0, fmt.Errorf("prefix %s defines %d indexes, but index %d is unset: indexes must start at 0 and be consecutive", prefix, len(indexes), i) } } return len(indexes), nil } func
isSliceOfStructs
identifier_name
envconfig.go
reflect.Ptr { if f.IsNil() { if f.Type().Elem().Kind() != reflect.Struct { // nil pointer to a non-struct: leave it alone break } // nil pointer to struct: create a zero instance f.Set(reflect.New(f.Type().Elem())) } f = f.Elem() } // Capture information about the config variable info := varInfo{ Name: ftype.Name, Field: f, Tags: ftype.Tag, Alt: strings.ToUpper(ftype.Tag.Get("envconfig")), } // Default to the field name as the env var name (will be upcased) info.Key = info.Name // Best effort to un-pick camel casing as separate words if isTrue(ftype.Tag.Get("split_words")) { words := gatherRegexp.FindAllStringSubmatch(ftype.Name, -1) if len(words) > 0 { var name []string for _, words := range words { if m := acronymRegexp.FindStringSubmatch(words[0]); len(m) == 3 { name = append(name, m[1], m[2]) } else { name = append(name, words[0]) } } info.Key = strings.Join(name, "_") } } if info.Alt != "" { info.Key = info.Alt if isInsideStructSlice { // we don't want this to be read, since we're inside of a struct slice, // each slice element will have same Alt and thus they would overwrite themselves info.Alt = "" } } if prefix != "" { info.Key = fmt.Sprintf("%s_%s", prefix, info.Key) } info.Key = strings.ToUpper(info.Key) if decoderFrom(f) != nil || setterFrom(f) != nil || textUnmarshaler(f) != nil || binaryUnmarshaler(f) != nil { // there's a decoder defined, no further processing needed infos = append(infos, info) } else if f.Kind() == reflect.Struct { // it's a struct without a specific decoder set innerPrefix := prefix if !ftype.Anonymous { innerPrefix = info.Key } embeddedPtr := f.Addr().Interface() embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr, env, isInsideStructSlice, forUsage) if err != nil { return nil, err } infos = append(infos, embeddedInfos...) } else if arePointers := isSliceOfStructPtrs(f); arePointers || isSliceOfStructs(f) { // it's a slice of structs var ( l int prefixFormat prefixFormatter ) if forUsage { // it's just for usage so we don't know how many of them can be out there // so we'll print one info with a generic [N] index l = 1 prefixFormat = usagePrefix{info.Key, "[N]"} } else { var err error // let's find out how many are defined by the env vars, and gather info of each one of them if l, err = sliceLen(info.Key, env); err != nil { return nil, err } prefixFormat = processPrefix(info.Key) // if no keys, check the alternative keys, unless we're inside of a slice if l == 0 && info.Alt != "" && !isInsideStructSlice { if l, err = sliceLen(info.Alt, env); err != nil { return nil, err } prefixFormat = processPrefix(info.Alt) } } f.Set(reflect.MakeSlice(f.Type(), l, l)) for i := 0; i < l; i++ { var structPtrValue reflect.Value if arePointers { f.Index(i).Set(reflect.New(f.Type().Elem().Elem())) structPtrValue = f.Index(i) } else { structPtrValue = f.Index(i).Addr() } embeddedInfos, err := gatherInfo(prefixFormat.format(i), structPtrValue.Interface(), env, true, forUsage) if err != nil { return nil, err } infos = append(infos, embeddedInfos...) } } else { infos = append(infos, info) } } return infos, nil } // Unused returns the slice of environment vars that have the prefix provided but we don't know how or want to parse. // This is likely only meaningful with a non-empty prefix. func Unused(prefix string, spec interface{}) ([]string, error) { spec = copySpec(spec) env := environment() infos, err := gatherInfoForProcessing(prefix, spec, env) if err != nil { return nil, err } vars := make(map[string]struct{}) for _, info := range infos { vars[info.Key] = struct{}{} } if prefix != "" { prefix = strings.ToUpper(prefix) + "_" } var unused []string for key := range env { if !strings.HasPrefix(key, prefix) { continue } if _, found := vars[key]; !found { unused = append(unused, key) } } return unused, nil } // Process populates the specified struct based on environment variables func Process(prefix string, spec interface{}) error { env := environment() infos, err := gatherInfoForProcessing(prefix, spec, env) for _, info := range infos { value, ok := env[info.Key] if !ok && info.Alt != "" { value, ok = env[info.Alt] } def := info.Tags.Get("default") if def != "" && !ok { value = def } req := info.Tags.Get("required") if !ok && def == "" { if isTrue(req) { key := info.Key if info.Alt != "" { key = info.Alt } return fmt.Errorf("required key %s missing value", key) } continue } err = processField(value, info.Field) if err != nil { return &ParseError{ KeyName: info.Key, FieldName: info.Name, TypeName: info.Field.Type().String(), Value: value, Err: err, } } } return err } // MustProcess is the same as Process but panics if an error occurs func MustProcess(prefix string, spec interface{}) { if err := Process(prefix, spec); err != nil { panic(err) } } func processField(value string, field reflect.Value) error { typ := field.Type() decoder := decoderFrom(field) if decoder != nil { return decoder.Decode(value) } // look for Set method if Decode not defined setter := setterFrom(field) if setter != nil { return setter.Set(value) } if t := textUnmarshaler(field); t != nil { return t.UnmarshalText([]byte(value)) } if b := binaryUnmarshaler(field); b != nil { return b.UnmarshalBinary([]byte(value)) } if typ.Kind() == reflect.Ptr { typ = typ.Elem() if field.IsNil() { field.Set(reflect.New(typ)) } field = field.Elem() } switch typ.Kind() { case reflect.String: field.SetString(value) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: var ( val int64 err error ) if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" { var d time.Duration d, err = time.ParseDuration(value) val = int64(d) } else { val, err = strconv.ParseInt(value, 0, typ.Bits()) } if err != nil { return err } field.SetInt(val) case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: val, err := strconv.ParseUint(value, 0, typ.Bits()) if err != nil { return err } field.SetUint(val) case reflect.Bool: val, err := strconv.ParseBool(value) if err != nil { return err } field.SetBool(val) case reflect.Float32, reflect.Float64: val, err := strconv.ParseFloat(value, typ.Bits()) if err != nil { return err } field.SetFloat(val) case reflect.Slice: sl := reflect.MakeSlice(typ, 0, 0) if typ.Elem().Kind() == reflect.Uint8 {
sl = reflect.ValueOf([]byte(value)) } else if len(strings.TrimSpace(value)) != 0 { vals := strings.Split(value, ",") sl = reflect.MakeSlice(typ, len(vals), len(vals)) for i, val := range vals {
random_line_split
envconfig.go
func gatherInfoForProcessing(prefix string, spec interface{}, env map[string]string) ([]varInfo, error) { return gatherInfo(prefix, spec, env, false, false) } // gatherInfo gathers information about the specified struct, use gatherInfoForUsage or gatherInfoForProcessing for calling it func gatherInfo(prefix string, spec interface{}, env map[string]string, isInsideStructSlice, forUsage bool) ([]varInfo, error) { s := reflect.ValueOf(spec) if s.Kind() != reflect.Ptr { return nil, ErrInvalidSpecification } s = s.Elem() if s.Kind() != reflect.Struct { return nil, ErrInvalidSpecification } typeOfSpec := s.Type() // over allocate an info array, we will extend if needed later infos := make([]varInfo, 0, s.NumField()) for i := 0; i < s.NumField(); i++ { f := s.Field(i) ftype := typeOfSpec.Field(i) if !f.CanSet() || isTrue(ftype.Tag.Get("ignored")) { continue } for f.Kind() == reflect.Ptr { if f.IsNil() { if f.Type().Elem().Kind() != reflect.Struct { // nil pointer to a non-struct: leave it alone break } // nil pointer to struct: create a zero instance f.Set(reflect.New(f.Type().Elem())) } f = f.Elem() } // Capture information about the config variable info := varInfo{ Name: ftype.Name, Field: f, Tags: ftype.Tag, Alt: strings.ToUpper(ftype.Tag.Get("envconfig")), } // Default to the field name as the env var name (will be upcased) info.Key = info.Name // Best effort to un-pick camel casing as separate words if isTrue(ftype.Tag.Get("split_words")) { words := gatherRegexp.FindAllStringSubmatch(ftype.Name, -1) if len(words) > 0 { var name []string for _, words := range words { if m := acronymRegexp.FindStringSubmatch(words[0]); len(m) == 3 { name = append(name, m[1], m[2]) } else { name = append(name, words[0]) } } info.Key = strings.Join(name, "_") } } if info.Alt != "" { info.Key = info.Alt if isInsideStructSlice { // we don't want this to be read, since we're inside of a struct slice, // each slice element will have same Alt and thus they would overwrite themselves info.Alt = "" } } if prefix != "" { info.Key = fmt.Sprintf("%s_%s", prefix, info.Key) } info.Key = strings.ToUpper(info.Key) if decoderFrom(f) != nil || setterFrom(f) != nil || textUnmarshaler(f) != nil || binaryUnmarshaler(f) != nil { // there's a decoder defined, no further processing needed infos = append(infos, info) } else if f.Kind() == reflect.Struct { // it's a struct without a specific decoder set innerPrefix := prefix if !ftype.Anonymous { innerPrefix = info.Key } embeddedPtr := f.Addr().Interface() embeddedInfos, err := gatherInfo(innerPrefix, embeddedPtr, env, isInsideStructSlice, forUsage) if err != nil { return nil, err } infos = append(infos, embeddedInfos...) } else if arePointers := isSliceOfStructPtrs(f); arePointers || isSliceOfStructs(f) { // it's a slice of structs var ( l int prefixFormat prefixFormatter ) if forUsage { // it's just for usage so we don't know how many of them can be out there // so we'll print one info with a generic [N] index l = 1 prefixFormat = usagePrefix{info.Key, "[N]"} } else { var err error // let's find out how many are defined by the env vars, and gather info of each one of them if l, err = sliceLen(info.Key, env); err != nil { return nil, err } prefixFormat = processPrefix(info.Key) // if no keys, check the alternative keys, unless we're inside of a slice if l == 0 && info.Alt != "" && !isInsideStructSlice { if l, err = sliceLen(info.Alt, env); err != nil { return nil, err } prefixFormat = processPrefix(info.Alt) } } f.Set(reflect.MakeSlice(f.Type(), l, l)) for i := 0; i < l; i++ { var structPtrValue reflect.Value if arePointers { f.Index(i).Set(reflect.New(f.Type().Elem().Elem())) structPtrValue = f.Index(i) } else { structPtrValue = f.Index(i).Addr() } embeddedInfos, err := gatherInfo(prefixFormat.format(i), structPtrValue.Interface(), env, true, forUsage) if err != nil { return nil, err } infos = append(infos, embeddedInfos...) } } else { infos = append(infos, info) } } return infos, nil } // Unused returns the slice of environment vars that have the prefix provided but we don't know how or want to parse. // This is likely only meaningful with a non-empty prefix. func Unused(prefix string, spec interface{}) ([]string, error) { spec = copySpec(spec) env := environment() infos, err := gatherInfoForProcessing(prefix, spec, env) if err != nil { return nil, err } vars := make(map[string]struct{}) for _, info := range infos { vars[info.Key] = struct{}{} } if prefix != "" { prefix = strings.ToUpper(prefix) + "_" } var unused []string for key := range env { if !strings.HasPrefix(key, prefix) { continue } if _, found := vars[key]; !found { unused = append(unused, key) } } return unused, nil } // Process populates the specified struct based on environment variables func Process(prefix string, spec interface{}) error { env := environment() infos, err := gatherInfoForProcessing(prefix, spec, env) for _, info := range infos { value, ok := env[info.Key] if !ok && info.Alt != "" { value, ok = env[info.Alt] } def := info.Tags.Get("default") if def != "" && !ok { value = def } req := info.Tags.Get("required") if !ok && def == "" { if isTrue(req) { key := info.Key if info.Alt != "" { key = info.Alt } return fmt.Errorf("required key %s missing value", key) } continue } err = processField(value, info.Field) if err != nil { return &ParseError{ KeyName: info.Key, FieldName: info.Name, TypeName: info.Field.Type().String(), Value: value, Err: err, } } } return err } // MustProcess is the same as Process but panics if an error occurs func MustProcess(prefix string, spec interface{}) { if err := Process(prefix, spec); err != nil { panic(err) } } func processField(value string, field reflect.Value) error { typ := field.Type() decoder := decoderFrom(field) if decoder != nil { return decoder.Decode(value) } // look for Set method if Decode not defined setter := setterFrom(field) if setter != nil { return setter.Set(value) } if t := textUnmarshaler(field); t != nil { return t.UnmarshalText([]byte(value)) } if b := binaryUnmarshaler(field); b != nil { return b.UnmarshalBinary([]byte(value)) } if typ.Kind() == reflect.Ptr { typ = typ.Elem() if field.IsNil() { field.Set(reflect.New(typ)) } field = field.Elem() } switch typ.Kind() { case reflect.String: field.SetString(value) case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: var ( val int64 err error ) if field.Kind() == reflect.Int64 && typ.PkgPath() == "time" && typ.Name() == "Duration" { var d time.Duration d, err = time.ParseDuration(value) val = int64(d) } else { val,
{ return gatherInfo(prefix, spec, map[string]string{}, false, true) }
identifier_body
scan_nosmooth.py
if self.Asimov: norm_file = work_dir + '/DataFiles/Misc/P8UCVA_norm.npy' f_global.use_template_normalization_file(norm_file,key_suffix='-0') Asimov_data = np.zeros((40,hp.nside2npix(self.nside))) for key in f_global.template_dict.keys(): Asimov_data += np.array(f_global.template_dict[key]) ################### # Get DM halo map # ################### print("Getting halo map...") if not self.randlocs: # If doing random locations l = self.catalog.l.values[self.iobj] b = self.catalog.b.values[self.iobj] else: badval = True while (badval): test_ell = np.random.uniform(0.,2*np.pi) test_b = np.arccos(np.random.uniform(-1.,1.))-np.pi/2. test_pixval = hp.ang2pix(self.nside, test_b+np.pi/2, test_ell) ps0p5_mask = np.load(work_dir + '/DataFiles/Misc/mask0p5_3FGL.npy') > 0 # Check if not masked with plan or PS mask if ( (np.abs(test_b)*180./np.pi > 20. ) & (ps0p5_mask[test_pixval] == 0)): badval = False l = test_ell*180./np.pi b = test_b*180./np.pi np.savetxt(self.save_dir + "/lb_obj"+str(self.iobj) + ".dat", np.array([l, b])) rs = self.catalog.rs.values[self.iobj]*1e-3 if self.boost: J0 = 10**self.catalog.mulog10J_inf.values[self.iobj] else: J0 = 10**self.catalog.mulog10Jnb_inf.values[self.iobj] mk = mkDMMaps.mkDMMaps(z = self.catalog.z[self.iobj], r_s = rs , J_0 = J0, ell = l*np.pi/180, b = b*np.pi/180, nside=self.nside, use_boost=self.use_boost, Burkert=self.Burkert) DM_template_base = mk.map print("...done") ######################################### # Loop over energy bins to get xsec LLs # ######################################### A_ary = 10**np.linspace(-6,6,200) LL_inten_ary = np.zeros((len(self.ebins)-1,len(A_ary))) inten_ary = np.zeros((len(self.ebins)-1,len(A_ary))) # 10 deg mask for the analysis analysis_mask = cm.make_mask_total(mask_ring = True, inner = 0, outer = 10, ring_b = b, ring_l = l) for iebin, ebin in tqdm(enumerate(np.arange(self.emin,self.emax+1)), disable = 1 - self.verbose): ###################### # Templates and maps # ###################### if self.verbose: print "At bin", ebin if self.imc != -1: data = np.load(mc_dir + 'MC_allhalos_p7_' + self.dm_string + '_v' + str(self.imc)+'.npy')[ebin].astype(np.float64) else: data = f_global.CTB_count_maps[ebin].astype(np.float64) fermi_exposure = f_global.CTB_exposure_maps[ebin] DM_template = DM_template_base*fermi_exposure/np.sum(DM_template_base*fermi_exposure) print("Loading smoothing class...") print("...done!") print("Beginning to smooth...") DM_template_smoothed = DM_template print("...done!") DM_intensity_base = np.sum(DM_template_smoothed/fermi_exposure) dif = f_global.template_dict[self.diff][ebin] iso = f_global.template_dict['iso'][ebin] psc = f_global.template_dict['ps_model'][ebin] bub = f_global.template_dict['bubs'][ebin] # If doing Asimov this first scan is irrelevant, but takes no time so run #################### # NPTFit norm scan # #################### n = nptfit.NPTF(tag='norm_o'+str(self.iobj)+'_E'+str(ebin)+self.mc_tag) n.load_data(data, fermi_exposure) n.load_mask(analysis_mask) n.add_template(dif, self.diff) n.add_template(iso, 'iso') n.add_template(psc, 'psc') n.add_template(bub, 'bub')
n.add_poiss_model('bub', '$A_\mathrm{bub}$', [0,10], False) # # Add PS at halo location # ps_halo_map = np.zeros(hp.nside2npix(self.nside)) # ps_halo_idx = hp.ang2pix(self.nside, np.pi/2. - b*np.pi/180., l*np.pi/180.) # ell and b are in rad # ps_halo_map[ps_halo_idx] = 1. # ps_halo_map_smoothed = ksi.smooth_the_map(ps_halo_map) # smooth it # n.add_template(ps_halo_map_smoothed,'ps_halo') # n.add_poiss_model('ps_halo', 'ps_halo', [0,100], False) if self.floatDM: if ebin >= 7: # Don't float DM in initial scan for < 1 GeV. Below here # Fermi PSF is so large that we find the DM often picks up # spurious excesses in MC. n.add_template(DM_template_smoothed, 'DM') n.add_poiss_model('DM', '$A_\mathrm{DM}$', [0,1000], False) if self.float_ps_together: n.add_poiss_model('psc', '$A_\mathrm{psc}$', [0,10], False) else: # Astropy-formatted coordinates of cluster c2 = SkyCoord("galactic", l=[l]*u.deg, b=[b]*u.deg) idx3fgl_10, _, _, _ = c2.search_around_sky(self.c3, 10*u.deg) idx3fgl_18, _, _, _ = c2.search_around_sky(self.c3, 18*u.deg) ps_map_outer = np.zeros(hp.nside2npix(self.nside)) for i3fgl in idx3fgl_18: ps_file = np.load(ps_indiv_dir + '/ps_temp_128_5_'+str(self.eventtype)+'_'+str(i3fgl)+'.npy') ps_map = np.zeros(hp.nside2npix(self.nside)) ps_map[np.vectorize(int)(ps_file[::,ebin,0])] = ps_file[::,ebin,1] if i3fgl in idx3fgl_10: # If within 10 degrees, float individually n.add_template(ps_map, 'ps_'+str(i3fgl)) n.add_poiss_model('ps_'+str(i3fgl), '$A_\mathrm{ps'+str(i3fgl)+'}$', [0,10], False) else: # Otherwise, add to be floated together ps_map_outer += ps_map if np.sum(ps_map_outer) != 0: n.add_template(ps_map_outer, 'ps_outer') n.add_poiss_model('ps_outer', '$A_\mathrm{ps_outer}$', [0,10], False) n.configure_for_scan() ########## # Minuit # ########## # Skip this step if there is 0 data (higher energy bins) if np.sum(data*np.logical_not(analysis_mask)) != 0: keys = n.poiss_model_keys limit_dict = {} init_val_dict = {} step_size_dict = {} for key in keys: if key == 'DM': limit_dict['limit_'+key] = (0,1000) else: limit_dict['limit_'+key] = (0,50) init_val_dict[key] = 0.0 step_size_dict['error_'+key] = 1.0 other_kwargs = {'print_level': self.verbose, 'errordef': 1} z = limit_dict.copy() z.update(other_kwargs) z.update(limit_dict) z.update(init_val_dict) z.update(step_size_dict) f = call_ll(len(keys),n.ll,keys) m = Minuit(f,**z) m.migrad(ncall=30000, precision=1e-14) ################################### # NPTFit fixed DM and bkg profile #
n.add_poiss_model(self.diff, '$A_\mathrm{dif}$', [0,10], False) n.add_poiss_model('iso', '$A_\mathrm{iso}$', [0,20], False) if (np.sum(bub*np.logical_not(analysis_mask)) != 0):
random_line_split
scan_nosmooth.py
self.floatDM = floatDM # Whether to float the DM in the initial scan self.verbose = verbose # Whether to print tqdm and Minuit output self.noJprof = noJprof # Whether to not do a profile over the J uncertainty self.save_dir = save_dir # Directory to save output files self.load_dir = load_dir # Directory to load intensity LLs from self.randlocs = randlocs # Whether to pick random location print("Starting!") if mc_dm == -1: self.dm_string = "nodm" else: self.dm_string = "10000dm" + str(mc_dm) if self.save_dir != "": if not os.path.exists(self.save_dir): try: os.mkdir(self.save_dir) except OSError as e: if e.errno != 17: raise self.save_dir += "/" if self.load_dir is None: self.load_dir = self.save_dir print("Loading 3FGL...") # If floating sources individually, find nearby 3FGL PSs if not self.float_ps_together: source_3fg_df = pd.read_csv(work_dir + '/DataFiles/Catalogs/3fgl.dat', sep='|', comment='#') source_3fg_df.rename(columns=lambda x: x.strip(), inplace=True) # Strip whitespace for col in source_3fg_df.columns.values: try: source_3fg_df[col] = source_3fg_df[col].map(str.strip) except TypeError: continue source_3fg_df = source_3fg_df.convert_objects(convert_numeric=True) # Coordinates of nearby 3FGL self.c3 = SkyCoord("galactic", l=source_3fg_df['_Lii']*u.deg, b=source_3fg_df['_Bii']*u.deg) print("...done") self.ebins = 2*np.logspace(-1,3,41)[self.emin:self.emax+2] if self.Asimov: self.mc_tag = '_Asimov' else: if self.imc != -1: self.mc_tag = '_mc' + str(self.imc) else: self.mc_tag = '_data' if perform_scan: self.scan() if perform_postprocessing: self.postprocess() def scan(self): print("Getting into scan") ################ # Fermi plugin # ################ print("Loading Fermi plugin...") # Load the Fermi plugin - always load all energy bins, extract what is needed f_global = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=self.eventclass,eventtype=self.eventtype,newstyle=1,data_July16=True) print("... done") # Load necessary templates f_global.add_diffuse_newstyle(comp = self.diff,eventclass = self.eventclass, eventtype = self.eventtype) f_global.add_iso() ps_temp = np.load(work_dir + '/DataFiles/PS-Maps/ps_map.npy') f_global.add_template_by_hand(comp='ps_model',template=ps_temp) f_global.add_bubbles() # If Asimov normalize the templates and create a summed map if self.Asimov: norm_file = work_dir + '/DataFiles/Misc/P8UCVA_norm.npy' f_global.use_template_normalization_file(norm_file,key_suffix='-0') Asimov_data = np.zeros((40,hp.nside2npix(self.nside))) for key in f_global.template_dict.keys(): Asimov_data += np.array(f_global.template_dict[key]) ################### # Get DM halo map # ################### print("Getting halo map...") if not self.randlocs: # If doing random locations l = self.catalog.l.values[self.iobj] b = self.catalog.b.values[self.iobj] else: badval = True while (badval): test_ell = np.random.uniform(0.,2*np.pi) test_b = np.arccos(np.random.uniform(-1.,1.))-np.pi/2. test_pixval = hp.ang2pix(self.nside, test_b+np.pi/2, test_ell) ps0p5_mask = np.load(work_dir + '/DataFiles/Misc/mask0p5_3FGL.npy') > 0 # Check if not masked with plan or PS mask if ( (np.abs(test_b)*180./np.pi > 20. ) & (ps0p5_mask[test_pixval] == 0)): badval = False l = test_ell*180./np.pi b = test_b*180./np.pi np.savetxt(self.save_dir + "/lb_obj"+str(self.iobj) + ".dat", np.array([l, b])) rs = self.catalog.rs.values[self.iobj]*1e-3 if self.boost: J0 = 10**self.catalog.mulog10J_inf.values[self.iobj] else: J0 = 10**self.catalog.mulog10Jnb_inf.values[self.iobj] mk = mkDMMaps.mkDMMaps(z = self.catalog.z[self.iobj], r_s = rs , J_0 = J0, ell = l*np.pi/180, b = b*np.pi/180, nside=self.nside, use_boost=self.use_boost, Burkert=self.Burkert) DM_template_base = mk.map print("...done") ######################################### # Loop over energy bins to get xsec LLs # ######################################### A_ary = 10**np.linspace(-6,6,200) LL_inten_ary = np.zeros((len(self.ebins)-1,len(A_ary))) inten_ary = np.zeros((len(self.ebins)-1,len(A_ary))) # 10 deg mask for the analysis analysis_mask = cm.make_mask_total(mask_ring = True, inner = 0, outer = 10, ring_b = b, ring_l = l) for iebin, ebin in tqdm(enumerate(np.arange(self.emin,self.emax+1)), disable = 1 - self.verbose): ###################### # Templates and maps # ###################### if self.verbose: print "At bin", ebin if self.imc != -1: data = np.load(mc_dir + 'MC_allhalos_p7_' + self.dm_string + '_v' + str(self.imc)+'.npy')[ebin].astype(np.float64) else: data = f_global.CTB_count_maps[ebin].astype(np.float64) fermi_exposure = f_global.CTB_exposure_maps[ebin] DM_template = DM_template_base*fermi_exposure/np.sum(DM_template_base*fermi_exposure) print("Loading smoothing class...") print("...done!") print("Beginning to smooth...") DM_template_smoothed = DM_template print("...done!") DM_intensity_base = np.sum(DM_template_smoothed/fermi_exposure) dif = f_global.template_dict[self.diff][ebin] iso = f_global.template_dict['iso'][ebin] psc
def __init__(self, perform_scan=0, perform_postprocessing=0, save_dir="", load_dir=None,imc=0, iobj=0, emin=0, emax=39, channel='b', nside=128, eventclass=5, eventtype=0, diff='p7', catalog_file='DarkSky_ALL_200,200,200_v3.csv', Burkert=0, use_boost=0, boost=1, float_ps_together=1, Asimov=0, floatDM=1, verbose=0, noJprof=0, mc_dm=-1, randlocs=False): print("Loading catalog...") self.catalog = pd.read_csv(work_dir + '/DataFiles/Catalogs/' + catalog_file) # Halo catalog print("...done") self.iobj = iobj # Objects index to scan self.imc = imc # MC index self.emin = emin # Minimum energy bin self.emax = emax # Maximum energy bin self.channel = channel # Annihilation channel (see PPPC4DMID) self.nside = nside # Healpix nside self.eventclass = eventclass # Fermi eventclass -- 5 for UCV self.eventtype = eventtype # Fermi eventtype -- 0 (All) or 3 (Q4) self.diff = diff # Diffuse model -- p6v11, p7, p8 self.Burkert = Burkert # Whether to use a Burkert (True) or NFW (False) self.boost = boost # Whether to use boosted or unboosted J self.use_boost = use_boost # Whether to put down a boosted profile self.float_ps_together = float_ps_together # Whether to float the whole PS map self.Asimov = Asimov # Whether to use the Asimov expectation
identifier_body
scan_nosmooth.py
(self, perform_scan=0, perform_postprocessing=0, save_dir="", load_dir=None,imc=0, iobj=0, emin=0, emax=39, channel='b', nside=128, eventclass=5, eventtype=0, diff='p7', catalog_file='DarkSky_ALL_200,200,200_v3.csv', Burkert=0, use_boost=0, boost=1, float_ps_together=1, Asimov=0, floatDM=1, verbose=0, noJprof=0, mc_dm=-1, randlocs=False): print("Loading catalog...") self.catalog = pd.read_csv(work_dir + '/DataFiles/Catalogs/' + catalog_file) # Halo catalog print("...done") self.iobj = iobj # Objects index to scan self.imc = imc # MC index self.emin = emin # Minimum energy bin self.emax = emax # Maximum energy bin self.channel = channel # Annihilation channel (see PPPC4DMID) self.nside = nside # Healpix nside self.eventclass = eventclass # Fermi eventclass -- 5 for UCV self.eventtype = eventtype # Fermi eventtype -- 0 (All) or 3 (Q4) self.diff = diff # Diffuse model -- p6v11, p7, p8 self.Burkert = Burkert # Whether to use a Burkert (True) or NFW (False) self.boost = boost # Whether to use boosted or unboosted J self.use_boost = use_boost # Whether to put down a boosted profile self.float_ps_together = float_ps_together # Whether to float the whole PS map self.Asimov = Asimov # Whether to use the Asimov expectation self.floatDM = floatDM # Whether to float the DM in the initial scan self.verbose = verbose # Whether to print tqdm and Minuit output self.noJprof = noJprof # Whether to not do a profile over the J uncertainty self.save_dir = save_dir # Directory to save output files self.load_dir = load_dir # Directory to load intensity LLs from self.randlocs = randlocs # Whether to pick random location print("Starting!") if mc_dm == -1: self.dm_string = "nodm" else: self.dm_string = "10000dm" + str(mc_dm) if self.save_dir != "": if not os.path.exists(self.save_dir): try: os.mkdir(self.save_dir) except OSError as e: if e.errno != 17: raise self.save_dir += "/" if self.load_dir is None: self.load_dir = self.save_dir print("Loading 3FGL...") # If floating sources individually, find nearby 3FGL PSs if not self.float_ps_together: source_3fg_df = pd.read_csv(work_dir + '/DataFiles/Catalogs/3fgl.dat', sep='|', comment='#') source_3fg_df.rename(columns=lambda x: x.strip(), inplace=True) # Strip whitespace for col in source_3fg_df.columns.values: try: source_3fg_df[col] = source_3fg_df[col].map(str.strip) except TypeError: continue source_3fg_df = source_3fg_df.convert_objects(convert_numeric=True) # Coordinates of nearby 3FGL self.c3 = SkyCoord("galactic", l=source_3fg_df['_Lii']*u.deg, b=source_3fg_df['_Bii']*u.deg) print("...done") self.ebins = 2*np.logspace(-1,3,41)[self.emin:self.emax+2] if self.Asimov: self.mc_tag = '_Asimov' else: if self.imc != -1: self.mc_tag = '_mc' + str(self.imc) else: self.mc_tag = '_data' if perform_scan: self.scan() if perform_postprocessing: self.postprocess() def scan(self): print("Getting into scan") ################ # Fermi plugin # ################ print("Loading Fermi plugin...") # Load the Fermi plugin - always load all energy bins, extract what is needed f_global = fp.fermi_plugin(maps_dir,fermi_data_dir=fermi_data_dir,work_dir=work_dir,CTB_en_min=0,CTB_en_max=40,nside=self.nside,eventclass=self.eventclass,eventtype=self.eventtype,newstyle=1,data_July16=True) print("... done") # Load necessary templates f_global.add_diffuse_newstyle(comp = self.diff,eventclass = self.eventclass, eventtype = self.eventtype) f_global.add_iso() ps_temp = np.load(work_dir + '/DataFiles/PS-Maps/ps_map.npy') f_global.add_template_by_hand(comp='ps_model',template=ps_temp) f_global.add_bubbles() # If Asimov normalize the templates and create a summed map if self.Asimov: norm_file = work_dir + '/DataFiles/Misc/P8UCVA_norm.npy' f_global.use_template_normalization_file(norm_file,key_suffix='-0') Asimov_data = np.zeros((40,hp.nside2npix(self.nside))) for key in f_global.template_dict.keys(): Asimov_data += np.array(f_global.template_dict[key]) ################### # Get DM halo map # ################### print("Getting halo map...") if not self.randlocs: # If doing random locations l = self.catalog.l.values[self.iobj] b = self.catalog.b.values[self.iobj] else: badval = True while (badval): test_ell = np.random.uniform(0.,2*np.pi) test_b = np.arccos(np.random.uniform(-1.,1.))-np.pi/2. test_pixval = hp.ang2pix(self.nside, test_b+np.pi/2, test_ell) ps0p5_mask = np.load(work_dir + '/DataFiles/Misc/mask0p5_3FGL.npy') > 0 # Check if not masked with plan or PS mask if ( (np.abs(test_b)*180./np.pi > 20. ) & (ps0p5_mask[test_pixval] == 0)): badval = False l = test_ell*180./np.pi b = test_b*180./np.pi np.savetxt(self.save_dir + "/lb_obj"+str(self.iobj) + ".dat", np.array([l, b])) rs = self.catalog.rs.values[self.iobj]*1e-3 if self.boost: J0 = 10**self.catalog.mulog10J_inf.values[self.iobj] else: J0 = 10**self.catalog.mulog10Jnb_inf.values[self.iobj] mk = mkDMMaps.mkDMMaps(z = self.catalog.z[self.iobj], r_s = rs , J_0 = J0, ell = l*np.pi/180, b = b*np.pi/180, nside=self.nside, use_boost=self.use_boost, Burkert=self.Burkert) DM_template_base = mk.map print("...done") ######################################### # Loop over energy bins to get xsec LLs # ######################################### A_ary = 10**np.linspace(-6,6,200) LL_inten_ary = np.zeros((len(self.ebins)-1,len(A_ary))) inten_ary = np.zeros((len(self.ebins)-1,len(A_ary))) # 10 deg mask for the analysis analysis_mask = cm.make_mask_total(mask_ring = True, inner = 0, outer = 10, ring_b = b, ring_l = l) for iebin, ebin in tqdm(enumerate(np.arange(self.emin,self.emax+1)), disable = 1 - self.verbose): ###################### # Templates and maps # ###################### if self.verbose: print "At bin", ebin if self.imc != -1: data = np.load(mc_dir + 'MC_allhalos_p7_' + self.dm_string + '_v' + str(self.imc)+'.npy')[ebin].astype(np.float64) else: data = f_global.CTB_count_maps[ebin].astype(np.float64) fermi_exposure = f_global.CTB_exposure_maps[ebin] DM_template = DM_template_base*fermi_exposure/np.sum(DM_template_base*fermi_exposure) print("Loading smoothing class...") print("...done!") print("Beginning to smooth...") DM_template_smoothed = DM_template print("...done!") DM_intensity_base = np.sum(DM_template_smoothed/fermi_exposure) dif = f_global.template_dict[self.diff][ebin] iso = f_global.template_dict['iso'][ebin] psc
__init__
identifier_name
scan_nosmooth.py
_poiss_model('DM','DM',False,fixed=True,fixed_norm=1.0) new_n2.configure_for_scan() max_LL = new_n2.ll([]) LL_inten_ary[iebin, iA] = max_LL inten_ary[iebin, iA] = DM_intensity_base*A np.savez(self.save_dir + 'LL_inten_o'+str(self.iobj)+self.mc_tag, LL=LL_inten_ary, intens=inten_ary) def postprocess(self): ############################## # Get intensity without xsec # ############################## m_ary = np.array([1.00000000e+01,1.50000000e+01,2.00000000e+01,2.50000000e+01,3.00000000e+01,4.00000000e+01,5.00000000e+01,6.00000000e+01,7.00000000e+01,8.00000000e+01,9.00000000e+01,1.00000000e+02,1.10000000e+02,1.20000000e+02,1.30000000e+02,1.40000000e+02,1.50000000e+02,1.60000000e+02,1.80000000e+02,2.00000000e+02,2.20000000e+02,2.40000000e+02,2.60000000e+02,2.80000000e+02,3.00000000e+02,3.30000000e+02,3.60000000e+02,4.00000000e+02,4.50000000e+02,5.00000000e+02,5.50000000e+02,6.00000000e+02,6.50000000e+02,7.00000000e+02,7.50000000e+02,8.00000000e+02,9.00000000e+02,1.00000000e+03,1.10000000e+03,1.20000000e+03,1.30000000e+03,1.50000000e+03,1.70000000e+03,2.00000000e+03,2.50000000e+03,3.00000000e+03,4.00000000e+03,5.00000000e+03,6.00000000e+03,7.00000000e+03,8.00000000e+03,9.00000000e+03,1.00000000e+04]) if self.channel == 'mu': self.channel = '\\[Mu]' if self.channel == 'tau': self.channel = '\\[Tau]' # If b use the precomputed value if self.channel == 'b': PPnoxsec_ary = np.load(work_dir + '/DataFiles//PP-Factor/PPnoxsec_b_ary.npy')[:,self.emin:self.emax+2] else: dNdLogx_df = pd.read_csv(work_dir + '/DataFiles//PP-Factor/AtProduction_gammas.dat', delim_whitespace=True) PPnoxsec_ary = np.zeros(shape=(len(m_ary),len(self.ebins)-1)) for mi in range(len(m_ary)): dNdLogx_ann_df = dNdLogx_df.query('mDM == ' + (str(np.int(float(m_ary[mi])))))[['Log[10,x]',self.channel]] Egamma = np.array(m_ary[mi]*(10**dNdLogx_ann_df['Log[10,x]'])) dNdEgamma = np.array(dNdLogx_ann_df[self.channel]/(Egamma*np.log(10))) dNdE_interp = interpolate.interp1d(Egamma, dNdEgamma) for ei in range(len(self.ebins)-1): # -1 because self.ebins-1 bins, self.ebins edges if self.ebins[ei] < m_ary[mi]: # Only have flux if m > Ebin if self.ebins[ei+1] < m_ary[mi]: # Whole bin is inside PPnoxsec_ary[mi,ei] = 1.0/(8*np.pi*m_ary[mi]**2)*integrate.quad(lambda x: dNdE_interp(x), self.ebins[ei], self.ebins[ei+1])[0] else: # Bin only partially contained PPnoxsec_ary[mi,ei] = 1.0/(8*np.pi*m_ary[mi]**2)*integrate.quad(lambda x: dNdE_interp(x), self.ebins[ei], m_ary[mi])[0] ######################################## # Load appropriate J-factor and errors # ######################################## if self.Burkert: if self.boost: mulog10J = self.catalog[u'mulog10JB_inf'].values[self.iobj] siglog10J = self.catalog[u'siglog10JB_inf'].values[self.iobj] else: mulog10J = self.catalog[u'mulog10JBnb_inf'].values[self.iobj] siglog10J = self.catalog[u'siglog10JBnb_inf'].values[self.iobj] else: if self.boost: mulog10J = self.catalog[u'mulog10J_inf'].values[self.iobj] siglog10J = self.catalog[u'siglog10J_inf'].values[self.iobj] else: mulog10J = self.catalog[u'mulog10Jnb_inf'].values[self.iobj] siglog10J = self.catalog[u'siglog10Jnb_inf'].values[self.iobj] ############################################# # Interpolate intensity LLs to get xsec LLs # ############################################# # If randloc load a representative halo in size at if self.randlocs: # Representative values
rep_angext = np.array([0.02785567,0.12069876,0.21354185,0.30638494,0.39922802,0.49207111,0.5849142,0.67775728,0.77060037,0.86344346,0.95628654,1.04912963,1.14197272,1.2348158,1.32765889,1.42050198,1.51334507,1.60618815,1.69903124,1.79187433]) obj_angext = 2*self.catalog[u'rs'].values[self.iobj] / \ (Planck15.angular_diameter_distance(self.catalog[u'z'].values[self.iobj]).value*1000) \ * 180./np.pi rep_index = (np.abs(rep_angext-obj_angext)).argmin() # Choose a random sky location skyloc = np.random.randint(200) np.savetxt(self.save_dir + "/skyloc_obj"+str(self.iobj)+".txt",[skyloc]) LL_inten_file = np.load(self.load_dir[:-1] + str(skyloc) + '/LL_inten_o'+str(rep_index)+'_data.npz')
conditional_block
exploratory_analysis.py
def plotHist(column, title, x_label, y_label): # plots a histogram. Note: update bin width as appropriate binwidth = [x for x in range(0,800000, 2000)] ex = plt.hist(column, bins=binwidth) plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label) return plt.show() def plotHistTwo(colA, colB, title="", x_label="", y_label="Frequency"): # plots a histogram with two variables side-by-side # Note: update binwidth binwidth = [x for x in range(0,6, 1)] plt.hist([colA, colB], bins=binwidth, alpha=0.5, label=["Males", "Females"]) plt.legend(loc='upper right', prop={'size': 13}) plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label) # plt.savefig("retweet_count.png") return plt.show() def plotBar(colA, colB, title="", x_label="", y_label="Frequency"): return 0 def scatter(col1, col2): # to do: plot a scatter plot for variables. E.g. hue vs brightness with # male and female colored differently return 0 def main(): #################### SETUP CODE ######################################## # start time startTime = time.time() # # load the dataset # dataset = '/home/markg/Documents/TCD/ML/ML1819--task-107--team-11/dataset/overall_dataset.csv' # data = pd.read_csv(dataset, encoding='latin-1') # # # reformat date column # data['created'] = pd.to_datetime(data['created']) # # # create new columns for year and month % remove original column # data['year'] = pd.DatetimeIndex(data['created']).year # data['month'] = pd.DatetimeIndex(data['created']).month # data = data.drop(['created'], axis=1) # # data.drop(columns=['Unnamed: 0'], inplace = True) # # data.drop(columns = ['user_timezone', 'tweet_count', 'month', 'text_sent'], inplace=True) # # # # # reformat date column # # data['created'] = pd.to_datetime(data['created']) # # # # # create new columns for year and month # # data['year'] = pd.DatetimeIndex(data['created']).year # # data['month'] = pd.DatetimeIndex(data['created']).month # # # # # remove original date column # # data = data.drop(['created'], axis=1) # # # # # standardize numeric variables (could also consider using robust scaler here) # # numericVariables = ['fav_number', 'tweet_count','retweet_count', 'link_hue', # # 'link_sat', 'link_vue', 'sidebar_hue', 'sidebar_sat', 'sidebar_vue', 'year', 'month'] # # scaler = preprocessing.StandardScaler() # # data[numericVariables] = scaler.fit_transform(data[numericVariables]) # # ##################### END SETUP CODE ###################################### # # #################### SVM MODEL ############################################ # # # create dependent & independent variables # # X = data.drop(['gender', 'fav_number', 'user_timezone', 'tweet_count','retweet_count', 'link_hue', # # # 'link_sat', 'link_vue', 'sidebar_sat', 'sidebar_vue', 'month'], axis=1) # # # # X = data.drop('gender', axis=1) # # # y = data['gender'] # # # # print (X.keys()) # # # # # # # # # # split into 90% training, 10% testing # # # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.10) # # # # # # # # train model (could change kernel here) # # # svm = SVC(C=1, gamma=0.3, kernel='rbf') # # # svm.fit(X_train, y_train) # # # # # # # # # # recursive feature selection using cross validation # # # # # rfecv = RFECV(estimator=svm, step=1, cv=StratifiedKFold(2), # # # # # scoring='accuracy') # # # # # rfecv.fit(X, y) # # # # # print("Optimal number of features : %d" % rfecv.n_features_) # # # # # print("Feature ranking: ", rfecv.ranking_) # # # # # # recursive feature selection without cross validation # # rfe = RFE(svm, 3) # # fit = rfe.fit(X, y) # # print('Num Features:',fit.n_features_to_select) # # print("Selected Features:",fit.support_) # # # # # # # plot bar chart of feature ranking # # features = list(X) # # ranking = rfecv.ranking_ # # plt.bar(features, ranking, align='center', alpha=0.5) # # plt.show() # # # # # Plot number of features VS. cross-validation scores # # plt.figure() # # plt.xlabel("Number of features selected") # # plt.ylabel("Cross validation score (nb of correct classifications)") # # plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) # # plt.show() # # # # # # make predictions and print metrics # # y_pred = svm.predict(X_test) # # print(classification_report(y_test,y_pred)) # # print(confusion_matrix(y_test,y_pred)) # # # # # # # # # # cross validation to choose c and gamma # # C_s, gamma_s = np.meshgrid(np.logspace(-2, 1, 20), np.logspace(-2, 1, 20)) # # scores = list() # # i=0; j=0 # # for C, gamma in zip(C_s.ravel(),gamma_s.ravel()): # # svm.C = C # # svm.gamma = gamma # # this_scores = cross_val_score(svm, X, y, cv=5) # # scores.append(np.mean(this_scores)) # # scores=np.array(scores) # # scores=scores.reshape(C_s.shape) # # fig2, ax2 = plt.subplots(figsize=(12,8)) # # c=ax2.contourf(C_s,gamma_s,scores) # # ax2.set_xlabel('C') # # ax2.set_ylabel('gamma') # # fig2.colorbar(c) # # fig2.savefig('crossvalOverall.png') # # ################## END SVM MODEL ########################################## # # # # create a subset of males and females # males = data[data['gender']==0] # females = data[data['gender']==1] # retweetCountMales = data.loc[(data['gender'] == 0) & data['retweet_count']] # print (retweetCountMales.head(10)) # to access specific columns # favNumberMales = males.loc[:,'fav_number'] # favNumberFemales = females.loc[:,'fav_number'] # plotHistTwo(favNumberMales, favNumberFemales, # x_label="Total Number of Tweets Favourited", title="Total Number of Tweets Favourited") # tweetCountMales = males.loc[:,'tweet_count'] # tweetCountFemales = females.loc[:,'tweet_count'] # plotHistTwo(tweetCountMales, tweetCountFemales, # x_label="Total Number of Tweets Posted", title="Total Number of Tweets Posted") # retweetCountMales = males.loc[:,'retweet_count'].value_counts() # retweetCountFemales = females.loc[:,'retweet_count'].value_counts() # print (retweetCountFemales) # plotHistTwo(retweetCountMales, retweetCountFemales, # x_label="Total Number of retweets Posted", title="Total Number of retweets Posted") # # plot bar char of retweet count # x = np.array([0, 1, 2]) # malesRetweet = [4451, 154, 16] # femalesRetweet = [5220, 117, 12] # width = 0.2 # ax = plt.subplot(111) # rect1 = ax.bar(x, malesRetweet, width, align='center') # rect2 = ax.bar(x + width, femalesRetweet, width, align='center') # ax.set_title('Number of retweets') # ax.set_ylabel('Frequency') # ax.set_xlabel('Number of retweets') # ax.set_xticks(x + width / 2) # ax.set_xticklabels(('0', '1', '2'))
return 0
identifier_body
exploratory_analysis.py
= train_test_split(X, y, test_size = 0.10) # # # # # # # # train model (could change kernel here) # # # svm = SVC(C=1, gamma=0.3, kernel='rbf') # # # svm.fit(X_train, y_train) # # # # # # # # # # recursive feature selection using cross validation # # # # # rfecv = RFECV(estimator=svm, step=1, cv=StratifiedKFold(2), # # # # # scoring='accuracy') # # # # # rfecv.fit(X, y) # # # # # print("Optimal number of features : %d" % rfecv.n_features_) # # # # # print("Feature ranking: ", rfecv.ranking_) # # # # # # recursive feature selection without cross validation # # rfe = RFE(svm, 3) # # fit = rfe.fit(X, y) # # print('Num Features:',fit.n_features_to_select) # # print("Selected Features:",fit.support_) # # # # # # # plot bar chart of feature ranking # # features = list(X) # # ranking = rfecv.ranking_ # # plt.bar(features, ranking, align='center', alpha=0.5) # # plt.show() # # # # # Plot number of features VS. cross-validation scores # # plt.figure() # # plt.xlabel("Number of features selected") # # plt.ylabel("Cross validation score (nb of correct classifications)") # # plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) # # plt.show() # # # # # # make predictions and print metrics # # y_pred = svm.predict(X_test) # # print(classification_report(y_test,y_pred)) # # print(confusion_matrix(y_test,y_pred)) # # # # # # # # # # cross validation to choose c and gamma # # C_s, gamma_s = np.meshgrid(np.logspace(-2, 1, 20), np.logspace(-2, 1, 20)) # # scores = list() # # i=0; j=0 # # for C, gamma in zip(C_s.ravel(),gamma_s.ravel()): # # svm.C = C # # svm.gamma = gamma # # this_scores = cross_val_score(svm, X, y, cv=5) # # scores.append(np.mean(this_scores)) # # scores=np.array(scores) # # scores=scores.reshape(C_s.shape) # # fig2, ax2 = plt.subplots(figsize=(12,8)) # # c=ax2.contourf(C_s,gamma_s,scores) # # ax2.set_xlabel('C') # # ax2.set_ylabel('gamma') # # fig2.colorbar(c) # # fig2.savefig('crossvalOverall.png') # # ################## END SVM MODEL ########################################## # # # # create a subset of males and females # males = data[data['gender']==0] # females = data[data['gender']==1] # retweetCountMales = data.loc[(data['gender'] == 0) & data['retweet_count']] # print (retweetCountMales.head(10)) # to access specific columns # favNumberMales = males.loc[:,'fav_number'] # favNumberFemales = females.loc[:,'fav_number'] # plotHistTwo(favNumberMales, favNumberFemales, # x_label="Total Number of Tweets Favourited", title="Total Number of Tweets Favourited") # tweetCountMales = males.loc[:,'tweet_count'] # tweetCountFemales = females.loc[:,'tweet_count'] # plotHistTwo(tweetCountMales, tweetCountFemales, # x_label="Total Number of Tweets Posted", title="Total Number of Tweets Posted") # retweetCountMales = males.loc[:,'retweet_count'].value_counts() # retweetCountFemales = females.loc[:,'retweet_count'].value_counts() # print (retweetCountFemales) # plotHistTwo(retweetCountMales, retweetCountFemales, # x_label="Total Number of retweets Posted", title="Total Number of retweets Posted") # # plot bar char of retweet count # x = np.array([0, 1, 2]) # malesRetweet = [4451, 154, 16] # femalesRetweet = [5220, 117, 12] # width = 0.2 # ax = plt.subplot(111) # rect1 = ax.bar(x, malesRetweet, width, align='center') # rect2 = ax.bar(x + width, femalesRetweet, width, align='center') # ax.set_title('Number of retweets') # ax.set_ylabel('Frequency') # ax.set_xlabel('Number of retweets') # ax.set_xticks(x + width / 2) # ax.set_xticklabels(('0', '1', '2')) # ax.legend( (rect1[0], rect2[0]), ('Male', 'Female') ) # plt.savefig('retweet.png') # plt.show() # data.info() # to do: DATE # dateMales = females.loc[:,'year'].value_counts() x = np.array([2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007, 2006]) malesDate = [506, 513, 535, 659, 784, 528, 845, 205, 59, 3] femalesDate = [830, 728, 715, 872, 899, 494, 705, 100, 13, 0] width = 0.2 ax = plt.subplot(111) rect1 = ax.bar(x, malesDate, width, align='center') rect2 = ax.bar(x + width, femalesDate, width, align='center') ax.set_title('Date of Registration') ax.set_ylabel('Frequency') ax.set_xlabel('Date of Registration') ax.set_xticks(x + width / 2) ax.set_xticklabels((2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007, 2006)) ax.legend( (rect1[0], rect2[0]), ('Male', 'Female') ) plt.savefig('dateRegistration.png') plt.show() #################### LOGISTIC MODEL ####################################### # create dependent & independent variables # X = data.drop('gender_catg', axis=1) # Y = data['gender_catg'] # # model = LogisticRegression() # rfe = RFE(model, 3) # fit = rfe.fit(X, Y) # print('Num Features:',fit.n_features_to_select) # print("Selected Features:",fit.support_) # # # build model # logit_model=sm.Logit(Y,X) # result=logit_model.fit() # # X_train, X_test, y_train, y_test = train_test_split(X, Y, test_size=0.1, random_state=0) # logreg = LogisticRegression() # logreg.fit(X_train, y_train) # # y_pred = logreg.predict(X_test) # # print('Accuracy of logistic regression classifier on test set: {:.2f}'.format(logreg.score(X_test, y_test))) # print(classification_report(y_test,y_pred)) # logit_roc_auc = roc_auc_score(y_test, logreg.predict(X_test)) # fpr, tpr, thresholds = roc_curve(y_test, logreg.predict_proba(X_test)[:,1]) # plt.figure() # plt.plot(fpr, tpr, label='Logistic Regression (area = %0.2f)' % logit_roc_auc) # plt.plot([0, 1], [0, 1],'r--') # plt.xlim([0.0, 1.0]) # plt.ylim([0.0, 1.05]) # plt.xlabel('False Positive Rate') # plt.ylabel('True Positive Rate') # plt.title('Receiver operating characteristic') # # plt.savefig('Log_ROC') # plt.show() # to keep track of time taken endTIme = time.time() totalTime = endTIme - startTime print("Time taken:", totalTime) if __name__ == '__main__':
main()
conditional_block
exploratory_analysis.py
(column, title, x_label, y_label): # plots a histogram. Note: update bin width as appropriate binwidth = [x for x in range(0,800000, 2000)] ex = plt.hist(column, bins=binwidth) plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label) return plt.show() def plotHistTwo(colA, colB, title="", x_label="", y_label="Frequency"): # plots a histogram with two variables side-by-side # Note: update binwidth binwidth = [x for x in range(0,6, 1)] plt.hist([colA, colB], bins=binwidth, alpha=0.5, label=["Males", "Females"]) plt.legend(loc='upper right', prop={'size': 13}) plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label) # plt.savefig("retweet_count.png") return plt.show() def plotBar(colA, colB, title="", x_label="", y_label="Frequency"): return 0 def scatter(col1, col2): # to do: plot a scatter plot for variables. E.g. hue vs brightness with # male and female colored differently return 0 def main(): #################### SETUP CODE ######################################## # start time startTime = time.time() # # load the dataset # dataset = '/home/markg/Documents/TCD/ML/ML1819--task-107--team-11/dataset/overall_dataset.csv' # data = pd.read_csv(dataset, encoding='latin-1') # # # reformat date column # data['created'] = pd.to_datetime(data['created']) # # # create new columns for year and month % remove original column # data['year'] = pd.DatetimeIndex(data['created']).year # data['month'] = pd.DatetimeIndex(data['created']).month # data = data.drop(['created'], axis=1) # # data.drop(columns=['Unnamed: 0'], inplace = True) # # data.drop(columns = ['user_timezone', 'tweet_count', 'month', 'text_sent'], inplace=True) # # # # # reformat date column # # data['created'] = pd.to_datetime(data['created']) # # # # # create new columns for year and month # # data['year'] = pd.DatetimeIndex(data['created']).year # # data['month'] = pd.DatetimeIndex(data['created']).month # # # # # remove original date column # # data = data.drop(['created'], axis=1) # # # # # standardize numeric variables (could also consider using robust scaler here) # # numericVariables = ['fav_number', 'tweet_count','retweet_count', 'link_hue', # # 'link_sat', 'link_vue', 'sidebar_hue', 'sidebar_sat', 'sidebar_vue', 'year', 'month'] # # scaler = preprocessing.StandardScaler() # # data[numericVariables] = scaler.fit_transform(data[numericVariables]) # # ##################### END SETUP CODE ###################################### # # #################### SVM MODEL ############################################ # # # create dependent & independent variables # # X = data.drop(['gender', 'fav_number', 'user_timezone', 'tweet_count','retweet_count', 'link_hue', # # # 'link_sat', 'link_vue', 'sidebar_sat', 'sidebar_vue', 'month'], axis=1) # # # # X = data.drop('gender', axis=1) # # # y = data['gender'] # # # # print (X.keys()) # # # # # # # # # # split into 90% training, 10% testing # # # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.10) # # # # # # # # train model (could change kernel here) # # # svm = SVC(C=1, gamma=0.3, kernel='rbf') # # # svm.fit(X_train, y_train) # # # # # # # # # # recursive feature selection using cross validation # # # # # rfecv = RFECV(estimator=svm, step=1, cv=StratifiedKFold(2), # # # # # scoring='accuracy') # # # # # rfecv.fit(X, y) # # # # # print("Optimal number of features : %d" % rfecv.n_features_) # # # # # print("Feature ranking: ", rfecv.ranking_) # # # # # # recursive feature selection without cross validation # # rfe = RFE(svm, 3) # # fit = rfe.fit(X, y) # # print('Num Features:',fit.n_features_to_select) # # print("Selected Features:",fit.support_) # # # # # # # plot bar chart of feature ranking # # features = list(X) # # ranking = rfecv.ranking_ # # plt.bar(features, ranking, align='center', alpha=0.5) # # plt.show() # # # # # Plot number of features VS. cross-validation scores # # plt.figure() # # plt.xlabel("Number of features selected") # # plt.ylabel("Cross validation score (nb of correct classifications)") # # plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) # # plt.show() # # # # # # make predictions and print metrics # # y_pred = svm.predict(X_test) # # print(classification_report(y_test,y_pred)) # # print(confusion_matrix(y_test,y_pred)) # # # # # # # # # # cross validation to choose c and gamma # # C_s, gamma_s = np.meshgrid(np.logspace(-2, 1, 20), np.logspace(-2, 1, 20)) # # scores = list() # # i=0; j=0 # # for C, gamma in zip(C_s.ravel(),gamma_s.ravel()): # # svm.C = C # # svm.gamma = gamma # # this_scores = cross_val_score(svm, X, y, cv=5) # # scores.append(np.mean(this_scores)) # # scores=np.array(scores) # # scores=scores.reshape(C_s.shape) # # fig2, ax2 = plt.subplots(figsize=(12,8)) # # c=ax2.contourf(C_s,gamma_s,scores) # # ax2.set_xlabel('C') # # ax2.set_ylabel('gamma') # # fig2.colorbar(c) # # fig2.savefig('crossvalOverall.png') # # ################## END SVM MODEL ########################################## # # # # create a subset of males and females # males = data[data['gender']==0] # females = data[data['gender']==1] # retweetCountMales = data.loc[(data['gender'] == 0) & data['retweet_count']] # print (retweetCountMales.head(10)) # to access specific columns # favNumberMales = males.loc[:,'fav_number'] # favNumberFemales = females.loc[:,'fav_number'] # plotHistTwo(favNumberMales, favNumberFemales, # x_label="Total Number of Tweets Favourited", title="Total Number of Tweets Favourited") # tweetCountMales = males.loc[:,'tweet_count'] # tweetCountFemales = females.loc[:,'tweet_count'] # plotHistTwo(tweetCountMales, tweetCountFemales, # x_label="Total Number of Tweets Posted", title="Total Number of Tweets Posted") # retweetCountMales = males.loc[:,'retweet_count'].value_counts() # retweetCountFemales = females.loc[:,'retweet_count'].value_counts() # print (retweetCountFemales) # plotHistTwo(retweetCountMales, retweetCountFemales, # x_label="Total Number of retweets Posted", title="Total Number of retweets Posted") # # plot bar char of retweet count # x = np.array([0, 1, 2]) # malesRetweet = [4451, 154, 16] # femalesRetweet = [5220, 117, 12] # width = 0.2 # ax = plt.subplot(111) # rect1 = ax.bar(x, malesRetweet, width, align='center') # rect2 = ax.bar(x + width, femalesRetweet, width, align='center') # ax.set_title('Number of retweets') # ax.set_ylabel('Frequency') # ax.set_xlabel('Number of retweets') # ax.set_xticks(x + width / 2) # ax.set_xticklabels(('0', '1', '2')) # ax.legend( (
plotHist
identifier_name
exploratory_analysis.py
B], bins=binwidth, alpha=0.5, label=["Males", "Females"]) plt.legend(loc='upper right', prop={'size': 13}) plt.title(title) plt.xlabel(x_label) plt.ylabel(y_label) # plt.savefig("retweet_count.png") return plt.show() def plotBar(colA, colB, title="", x_label="", y_label="Frequency"): return 0 def scatter(col1, col2): # to do: plot a scatter plot for variables. E.g. hue vs brightness with # male and female colored differently return 0 def main(): #################### SETUP CODE ######################################## # start time startTime = time.time() # # load the dataset # dataset = '/home/markg/Documents/TCD/ML/ML1819--task-107--team-11/dataset/overall_dataset.csv' # data = pd.read_csv(dataset, encoding='latin-1') # # # reformat date column # data['created'] = pd.to_datetime(data['created']) # # # create new columns for year and month % remove original column # data['year'] = pd.DatetimeIndex(data['created']).year # data['month'] = pd.DatetimeIndex(data['created']).month # data = data.drop(['created'], axis=1) # # data.drop(columns=['Unnamed: 0'], inplace = True) # # data.drop(columns = ['user_timezone', 'tweet_count', 'month', 'text_sent'], inplace=True) # # # # # reformat date column # # data['created'] = pd.to_datetime(data['created']) # # # # # create new columns for year and month # # data['year'] = pd.DatetimeIndex(data['created']).year # # data['month'] = pd.DatetimeIndex(data['created']).month # # # # # remove original date column # # data = data.drop(['created'], axis=1) # # # # # standardize numeric variables (could also consider using robust scaler here) # # numericVariables = ['fav_number', 'tweet_count','retweet_count', 'link_hue', # # 'link_sat', 'link_vue', 'sidebar_hue', 'sidebar_sat', 'sidebar_vue', 'year', 'month'] # # scaler = preprocessing.StandardScaler() # # data[numericVariables] = scaler.fit_transform(data[numericVariables]) # # ##################### END SETUP CODE ###################################### # # #################### SVM MODEL ############################################ # # # create dependent & independent variables # # X = data.drop(['gender', 'fav_number', 'user_timezone', 'tweet_count','retweet_count', 'link_hue', # # # 'link_sat', 'link_vue', 'sidebar_sat', 'sidebar_vue', 'month'], axis=1) # # # # X = data.drop('gender', axis=1) # # # y = data['gender'] # # # # print (X.keys()) # # # # # # # # # # split into 90% training, 10% testing # # # X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.10) # # # # # # # # train model (could change kernel here) # # # svm = SVC(C=1, gamma=0.3, kernel='rbf') # # # svm.fit(X_train, y_train) # # # # # # # # # # recursive feature selection using cross validation # # # # # rfecv = RFECV(estimator=svm, step=1, cv=StratifiedKFold(2), # # # # # scoring='accuracy') # # # # # rfecv.fit(X, y) # # # # # print("Optimal number of features : %d" % rfecv.n_features_) # # # # # print("Feature ranking: ", rfecv.ranking_) # # # # # # recursive feature selection without cross validation # # rfe = RFE(svm, 3) # # fit = rfe.fit(X, y) # # print('Num Features:',fit.n_features_to_select) # # print("Selected Features:",fit.support_) # # # # # # # plot bar chart of feature ranking # # features = list(X) # # ranking = rfecv.ranking_ # # plt.bar(features, ranking, align='center', alpha=0.5) # # plt.show() # # # # # Plot number of features VS. cross-validation scores # # plt.figure() # # plt.xlabel("Number of features selected") # # plt.ylabel("Cross validation score (nb of correct classifications)") # # plt.plot(range(1, len(rfecv.grid_scores_) + 1), rfecv.grid_scores_) # # plt.show() # # # # # # make predictions and print metrics # # y_pred = svm.predict(X_test) # # print(classification_report(y_test,y_pred)) # # print(confusion_matrix(y_test,y_pred)) # # # # # # # # # # cross validation to choose c and gamma # # C_s, gamma_s = np.meshgrid(np.logspace(-2, 1, 20), np.logspace(-2, 1, 20)) # # scores = list() # # i=0; j=0 # # for C, gamma in zip(C_s.ravel(),gamma_s.ravel()): # # svm.C = C
# # svm.gamma = gamma # # this_scores = cross_val_score(svm, X, y, cv=5) # # scores.append(np.mean(this_scores)) # # scores=np.array(scores) # # scores=scores.reshape(C_s.shape) # # fig2, ax2 = plt.subplots(figsize=(12,8)) # # c=ax2.contourf(C_s,gamma_s,scores) # # ax2.set_xlabel('C') # # ax2.set_ylabel('gamma') # # fig2.colorbar(c) # # fig2.savefig('crossvalOverall.png') # # ################## END SVM MODEL ########################################## # # # # create a subset of males and females # males = data[data['gender']==0] # females = data[data['gender']==1] # retweetCountMales = data.loc[(data['gender'] == 0) & data['retweet_count']] # print (retweetCountMales.head(10)) # to access specific columns # favNumberMales = males.loc[:,'fav_number'] # favNumberFemales = females.loc[:,'fav_number'] # plotHistTwo(favNumberMales, favNumberFemales, # x_label="Total Number of Tweets Favourited", title="Total Number of Tweets Favourited") # tweetCountMales = males.loc[:,'tweet_count'] # tweetCountFemales = females.loc[:,'tweet_count'] # plotHistTwo(tweetCountMales, tweetCountFemales, # x_label="Total Number of Tweets Posted", title="Total Number of Tweets Posted") # retweetCountMales = males.loc[:,'retweet_count'].value_counts() # retweetCountFemales = females.loc[:,'retweet_count'].value_counts() # print (retweetCountFemales) # plotHistTwo(retweetCountMales, retweetCountFemales, # x_label="Total Number of retweets Posted", title="Total Number of retweets Posted") # # plot bar char of retweet count # x = np.array([0, 1, 2]) # malesRetweet = [4451, 154, 16] # femalesRetweet = [5220, 117, 12] # width = 0.2 # ax = plt.subplot(111) # rect1 = ax.bar(x, malesRetweet, width, align='center') # rect2 = ax.bar(x + width, femalesRetweet, width, align='center') # ax.set_title('Number of retweets') # ax.set_ylabel('Frequency') # ax.set_xlabel('Number of retweets') # ax.set_xticks(x + width / 2) # ax.set_xticklabels(('0', '1', '2')) # ax.legend( (rect1[0], rect2[0]), ('Male', 'Female') ) # plt.savefig('retweet.png') # plt.show() # data.info() # to do: DATE # dateMales = females.loc[:,'year'].value_counts() x = np.array([2015, 2014, 2013, 2012, 2011, 2010, 2009, 2008, 2007, 2006]) malesDate = [506, 513, 535, 659, 784,
random_line_split
Program.ts
sFile; } else if (fileExtension === '.xml') { let xmlFile = new XmlFile(pathAbsolute, pkgPath, this); //add the file to the program this.files[pathAbsolute] = xmlFile; await xmlFile.parse(await getFileContents()); file = xmlFile; //create a new scope for this xml file let scope = new XmlScope(xmlFile); //attach this program to the new scope scope.attachProgram(this); //if the scope doesn't have a parent scope, give it the platform scope if (!scope.parentScope) { scope.parentScope = this.platformScope; } this.scopes[scope.name] = scope; } else { //TODO do we actually need to implement this? Figure out how to handle img paths // let genericFile = this.files[pathAbsolute] = <any>{ // pathAbsolute: pathAbsolute, // pkgPath: pkgPath, // wasProcessed: true // } as File; // file = <any>genericFile; } //notify listeners about this file change if (file) { this.emit('file-added', file); file.setFinishedLoading(); } else { //skip event when file is undefined } return file; } private readonly emitter = new EventEmitter(); public on(name: 'file-added', callback: (file: BrsFile | XmlFile) => void); public on(name: 'file-removed', callback: (file: BrsFile | XmlFile) => void); public on(name: string, callback: (data: any) => void) { this.emitter.on(name, callback); return () => { this.emitter.removeListener(name, callback); }; } protected emit(name: 'file-added', file: BrsFile | XmlFile); protected emit(name: 'file-removed', file: BrsFile | XmlFile); protected emit(name: string, data?: any) { this.emitter.emit(name, data); } /** * Find the file by its absolute path. This is case INSENSITIVE, since * Roku is a case insensitive file system. It is an error to have multiple files * with the same path with only case being different. * @param pathAbsolute */ public getFileByPathAbsolute(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); for (let filePath in this.files) { if (filePath.toLowerCase() === pathAbsolute.toLowerCase()) { return this.files[filePath]; } } } /** * Get a file with the specified pkg path. * If not found, return undefined */ public getFileByPkgPath(pkgPath: string) { pkgPath = util.pathSepNormalize(pkgPath); for (let filePath in this.files) { let file = this.files[filePath]; if (util.standardizePath(file.pkgPath) === util.standardizePath(pkgPath)) { return file; } } } /** * Remove a set of files from the program * @param absolutePaths */ public removeFiles(absolutePaths: string[]) { for (let pathAbsolute of absolutePaths) { this.removeFile(pathAbsolute); } } /** * Remove a file from the program * @param pathAbsolute */ public removeFile(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); let file = this.getFile(pathAbsolute); //notify every scope of this file removal for (let scopeName in this.scopes) { let scope = this.scopes[scopeName]; scope.removeFile(file); } //if there is a scope named the same as this file's path, remove it (i.e. xml scopes) let scope = this.scopes[file.pkgPath]; if (scope) { scope.dispose(); delete this.scopes[file.pkgPath]; } //remove the file from the program delete this.files[pathAbsolute]; this.emit('file-removed', file); } /** * Traverse the entire project, and validate all scopes */ public async validate() { this.diagnostics = []; for (let scopeName in this.scopes) { let scope = this.scopes[scopeName]; scope.validate(); } //find any files NOT loaded into a scope for (let filePath in this.files) { let file = this.files[filePath]; if (!this.fileIsIncludedInAnyScope(file)) { //the file is not loaded in any scope this.diagnostics.push({ file: file, location: Range.create(0, 0, 0, Number.MAX_VALUE), severity: 'warning', ...diagnosticMessages.File_not_referenced_by_any_file_1013() }); } } await Promise.resolve(); } /** * Determine if the given file is included in at least one scope in this program */ private fileIsIncludedInAnyScope(file: BrsFile | XmlFile) { for (let scopeName in this.scopes) { if (this.scopes[scopeName].hasFile(file)) { return true; } } return false; } /** * Get the file at the given path * @param pathAbsolute */ private getFile(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); return this.files[pathAbsolute]; } /** * Get a list of all scopes the file is loaded into * @param file */ public getScopesForFile(file: XmlFile | BrsFile) { let result = [] as Scope[]; for (let key in this.scopes) { let scope = this.scopes[key]; if (scope.hasFile(file)) { result.push(scope); } } return result; } /** * Find all available completion items at the given position * @param pathAbsolute * @param lineIndex * @param columnIndex */ public async getCompletions(pathAbsolute: string, position: Position) { let file = this.getFile(pathAbsolute); if (!file) { return []; } //wait for the file to finish loading await file.isReady(); //find the scopes for this file let scopes = this.getScopesForFile(file); //if there are no scopes, include the platform scope so we at least get the built-in functions scopes = scopes.length > 0 ? scopes : [this.platformScope]; //get the completions for this file for every scope //get the completions from all scopes for this file let allCompletions = util.flatMap( await Promise.all( scopes.map(async ctx => file.getCompletions(position, ctx)) ), c => c ); let result = [] as CompletionItem[]; //only keep completions common to every completion let keyCounts = {} as { [key: string]: number }; for (let completion of allCompletions) { let key = `${completion.label}-${completion.kind}`; keyCounts[key] = keyCounts[key] ? keyCounts[key] + 1 : 1; if (keyCounts[key] === scopes.length) { result.push(completion); } } return result; } /** * Given a position in a file, if the position is sitting on some type of identifier, * go to the definition of that identifier (where this thing was first defined) */ public getDefinition(pathAbsolute: string, position: Position): Location[] { let file = this.getFile(pathAbsolute); if (!file) { return []; } let results = [] as Location[]; let scopes = this.getScopesForFile(file); for (let scope of scopes) { results = results.concat(...scope.getDefinition(file, position)); } return results; } public async getHover(pathAbsolute: string, position: Position) { //find the file let file = this.getFile(pathAbsolute); if (!file) { return null; } return file.getHover(position); } public async transpile(fileEntries: StandardizedFileEntry[], stagingFolderPath: string) { let promises = Object.keys(this.files).map(async (filePath) => { let file = this.files[filePath]; if (file.needsTranspiled)
{ let result = file.transpile(); let filePathObj = fileEntries.find(x => util.standardizePath(x.src) === util.standardizePath(file.pathAbsolute)); if (!filePathObj) { throw new Error(`Cannot find fileMap record in fileMaps for '${file.pathAbsolute}'`); } //replace the file extension let outputCodePath = filePathObj.dest.replace(/\.bs$/gi, '.brs'); //prepend the staging folder path outputCodePath = util.standardizePath(`${stagingFolderPath}/${outputCodePath}`); let outputCodeMapPath = outputCodePath + '.map'; //make sure the full dir path exists await fsExtra.ensureDir(path.dirname(outputCodePath)); if (await fsExtra.pathExists(outputCodePath)) { throw new Error(`Error while transpiling "${filePath}". A file already exists at "${outputCodePath}" and will not be overwritten.`); } await Promise.all([
conditional_block
Program.ts
Object.keys(this.scopes).find(x => x.toLowerCase() === scopeName.toLowerCase()); return this.scopes[key]; } /** * Load a file into the program. If that file already exists, it is replaced. * If file contents are provided, those are used, Otherwise, the file is loaded from the file system * @param pathAbsolute * @param fileContents */ public async addOrReplaceFile(fileEntry: StandardizedFileEntry, fileContents?: string) { assert.ok(fileEntry, 'fileEntry is required'); assert.ok(fileEntry.src, 'fileEntry.src is required'); assert.ok(fileEntry.dest, 'fileEntry.dest is required'); let pathAbsolute = util.standardizePath(fileEntry.src); let pkgPath = util.standardizePkgPath(fileEntry.dest); //if the file is already loaded, remove it if (this.hasFile(pathAbsolute)) { this.removeFile(pathAbsolute); } let fileExtension = path.extname(pathAbsolute).toLowerCase(); let file: BrsFile | XmlFile | undefined; //load the file contents by file path if not provided let getFileContents = async () => { if (fileContents === undefined) { return this.getFileContents(pathAbsolute); } else { return fileContents; } }; //get the extension of the file if (fileExtension === '.brs' || fileExtension === '.bs') { let brsFile = new BrsFile(pathAbsolute, pkgPath, this); //add the file to the program this.files[pathAbsolute] = brsFile; await brsFile.parse(await getFileContents()); file = brsFile; } else if (fileExtension === '.xml') { let xmlFile = new XmlFile(pathAbsolute, pkgPath, this); //add the file to the program this.files[pathAbsolute] = xmlFile; await xmlFile.parse(await getFileContents()); file = xmlFile; //create a new scope for this xml file let scope = new XmlScope(xmlFile); //attach this program to the new scope scope.attachProgram(this); //if the scope doesn't have a parent scope, give it the platform scope if (!scope.parentScope) { scope.parentScope = this.platformScope; } this.scopes[scope.name] = scope; } else { //TODO do we actually need to implement this? Figure out how to handle img paths // let genericFile = this.files[pathAbsolute] = <any>{ // pathAbsolute: pathAbsolute, // pkgPath: pkgPath, // wasProcessed: true // } as File; // file = <any>genericFile; } //notify listeners about this file change if (file) { this.emit('file-added', file); file.setFinishedLoading(); } else { //skip event when file is undefined } return file; } private readonly emitter = new EventEmitter(); public on(name: 'file-added', callback: (file: BrsFile | XmlFile) => void); public on(name: 'file-removed', callback: (file: BrsFile | XmlFile) => void); public on(name: string, callback: (data: any) => void) { this.emitter.on(name, callback); return () => { this.emitter.removeListener(name, callback); }; } protected emit(name: 'file-added', file: BrsFile | XmlFile); protected emit(name: 'file-removed', file: BrsFile | XmlFile); protected emit(name: string, data?: any) { this.emitter.emit(name, data); } /** * Find the file by its absolute path. This is case INSENSITIVE, since * Roku is a case insensitive file system. It is an error to have multiple files * with the same path with only case being different. * @param pathAbsolute */ public getFileByPathAbsolute(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); for (let filePath in this.files) { if (filePath.toLowerCase() === pathAbsolute.toLowerCase()) { return this.files[filePath]; } } } /** * Get a file with the specified pkg path. * If not found, return undefined */ public getFileByPkgPath(pkgPath: string) { pkgPath = util.pathSepNormalize(pkgPath); for (let filePath in this.files) { let file = this.files[filePath]; if (util.standardizePath(file.pkgPath) === util.standardizePath(pkgPath)) { return file; } } } /** * Remove a set of files from the program * @param absolutePaths */ public removeFiles(absolutePaths: string[]) { for (let pathAbsolute of absolutePaths) { this.removeFile(pathAbsolute); } } /** * Remove a file from the program * @param pathAbsolute */ public removeFile(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); let file = this.getFile(pathAbsolute); //notify every scope of this file removal for (let scopeName in this.scopes) { let scope = this.scopes[scopeName]; scope.removeFile(file); } //if there is a scope named the same as this file's path, remove it (i.e. xml scopes) let scope = this.scopes[file.pkgPath]; if (scope) { scope.dispose(); delete this.scopes[file.pkgPath]; } //remove the file from the program delete this.files[pathAbsolute]; this.emit('file-removed', file); } /** * Traverse the entire project, and validate all scopes */ public async validate() { this.diagnostics = []; for (let scopeName in this.scopes) { let scope = this.scopes[scopeName]; scope.validate(); } //find any files NOT loaded into a scope for (let filePath in this.files) { let file = this.files[filePath]; if (!this.fileIsIncludedInAnyScope(file)) { //the file is not loaded in any scope this.diagnostics.push({ file: file, location: Range.create(0, 0, 0, Number.MAX_VALUE), severity: 'warning', ...diagnosticMessages.File_not_referenced_by_any_file_1013() }); } } await Promise.resolve(); } /** * Determine if the given file is included in at least one scope in this program */ private fileIsIncludedInAnyScope(file: BrsFile | XmlFile) { for (let scopeName in this.scopes) { if (this.scopes[scopeName].hasFile(file)) { return true; } } return false; } /** * Get the file at the given path * @param pathAbsolute */ private getFile(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); return this.files[pathAbsolute]; } /** * Get a list of all scopes the file is loaded into * @param file */ public getScopesForFile(file: XmlFile | BrsFile) { let result = [] as Scope[]; for (let key in this.scopes) { let scope = this.scopes[key]; if (scope.hasFile(file)) { result.push(scope); } } return result; } /** * Find all available completion items at the given position * @param pathAbsolute * @param lineIndex * @param columnIndex */ public async getCompletions(pathAbsolute: string, position: Position) { let file = this.getFile(pathAbsolute); if (!file) { return []; } //wait for the file to finish loading await file.isReady(); //find the scopes for this file let scopes = this.getScopesForFile(file); //if there are no scopes, include the platform scope so we at least get the built-in functions scopes = scopes.length > 0 ? scopes : [this.platformScope]; //get the completions for this file for every scope //get the completions from all scopes for this file let allCompletions = util.flatMap( await Promise.all( scopes.map(async ctx => file.getCompletions(position, ctx)) ), c => c ); let result = [] as CompletionItem[]; //only keep completions common to every completion let keyCounts = {} as { [key: string]: number }; for (let completion of allCompletions) { let key = `${completion.label}-${completion.kind}`; keyCounts[key] = keyCounts[key] ? keyCounts[key] + 1 : 1; if (keyCounts[key] === scopes.length) { result.push(completion); } } return result; } /** * Given a position in a file, if the position is sitting on some type of identifier, * go to the definition of that identifier (where this thing was first defined) */ public getDefinition(pathAbsolute: string, position: Position): Location[] { let file = this.getFile(pathAbsolute); if (!file) {
random_line_split
Program.ts
*/ public async getFileContents(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); let reversedResolvers = [...this.fileResolvers].reverse(); for (let fileResolver of reversedResolvers) { let result = await fileResolver(pathAbsolute); if (typeof result === 'string') { return result; } } throw new Error(`Could not load file "${pathAbsolute}"`); } /** * A scope that contains all platform-provided functions. * All scopes should directly or indirectly inherit from this scope */ public platformScope: Scope; /** * The full path to the root of the project (where the manifest file lives) */ public rootDir: string; /** * A set of diagnostics. This does not include any of the scope diagnostics. * Should only be set from `this.validate()` */ private diagnostics = [] as Diagnostic[]; /** * Get a list of all files that are inlcuded in the project but are not referenced * by any scope in the program. */ public getUnreferencedFiles() { let result = [] as File[]; for (let filePath in this.files) { let file = this.files[filePath]; if (!this.fileIsIncludedInAnyScope(file)) { //no scopes reference this file. add it to the list result.push(file); } } return result; } /** * Get the list of errors for the entire program. It's calculated on the fly * by walking through every file, so call this sparingly. */ public getDiagnostics() { let diagnostics = [...this.diagnostics]; //get the diagnostics from all scopes for (let scopeName in this.scopes) { let scope = this.scopes[scopeName]; diagnostics = [ ...diagnostics, ...scope.getDiagnostics() ]; } //get the diagnostics from all unreferenced files let unreferencedFiles = this.getUnreferencedFiles(); for (let file of unreferencedFiles) { diagnostics = [ ...diagnostics, ...file.getDiagnostics() ]; } let finalDiagnostics = [] as Diagnostic[]; for (let diagnostic of diagnostics) { if ( //skip duplicate diagnostics (by reference). //This skips file parse diagnostics when multiple scopes include same file !finalDiagnostics.includes(diagnostic) && //skip any specified error codes !this.options.ignoreErrorCodes?.includes(diagnostic.code) ) { //add the diagnostic to the final list finalDiagnostics.push(diagnostic); } } return finalDiagnostics; } /** * A map of every file loaded into this program */ public files = {} as { [filePath: string]: BrsFile | XmlFile }; private scopes = {} as { [name: string]: Scope }; /** * Determine if the specified file is loaded in this program right now. * @param filePath */ public hasFile(filePath: string) { filePath = util.standardizePath(filePath); return this.files[filePath] !== undefined; } /** * Add and parse all of the provided files. * Files that are already loaded will be replaced by the latest * contents from the file system. * @param filePaths */ public async addOrReplaceFiles(fileObjects: Array<StandardizedFileEntry>) { return Promise.all( fileObjects.map(async (fileObject) => this.addOrReplaceFile(fileObject)) ); } public getPkgPath(...args: any[]): any { //eslint-disable-line throw new Error('Not implemented'); } /** * roku filesystem is case INsensitive, so find the scope by key case insensitive * @param scopeName */ public getScopeByName(scopeName: string) { //most scopes are xml file pkg paths. however, the ones that are not are single names like "platform" and "global", //so it's safe to run the standardizePkgPath method scopeName = util.standardizePkgPath(scopeName); let key = Object.keys(this.scopes).find(x => x.toLowerCase() === scopeName.toLowerCase()); return this.scopes[key]; } /** * Load a file into the program. If that file already exists, it is replaced. * If file contents are provided, those are used, Otherwise, the file is loaded from the file system * @param pathAbsolute * @param fileContents */ public async addOrReplaceFile(fileEntry: StandardizedFileEntry, fileContents?: string) { assert.ok(fileEntry, 'fileEntry is required'); assert.ok(fileEntry.src, 'fileEntry.src is required'); assert.ok(fileEntry.dest, 'fileEntry.dest is required'); let pathAbsolute = util.standardizePath(fileEntry.src); let pkgPath = util.standardizePkgPath(fileEntry.dest); //if the file is already loaded, remove it if (this.hasFile(pathAbsolute)) { this.removeFile(pathAbsolute); } let fileExtension = path.extname(pathAbsolute).toLowerCase(); let file: BrsFile | XmlFile | undefined; //load the file contents by file path if not provided let getFileContents = async () => { if (fileContents === undefined) { return this.getFileContents(pathAbsolute); } else { return fileContents; } }; //get the extension of the file if (fileExtension === '.brs' || fileExtension === '.bs') { let brsFile = new BrsFile(pathAbsolute, pkgPath, this); //add the file to the program this.files[pathAbsolute] = brsFile; await brsFile.parse(await getFileContents()); file = brsFile; } else if (fileExtension === '.xml') { let xmlFile = new XmlFile(pathAbsolute, pkgPath, this); //add the file to the program this.files[pathAbsolute] = xmlFile; await xmlFile.parse(await getFileContents()); file = xmlFile; //create a new scope for this xml file let scope = new XmlScope(xmlFile); //attach this program to the new scope scope.attachProgram(this); //if the scope doesn't have a parent scope, give it the platform scope if (!scope.parentScope) { scope.parentScope = this.platformScope; } this.scopes[scope.name] = scope; } else { //TODO do we actually need to implement this? Figure out how to handle img paths // let genericFile = this.files[pathAbsolute] = <any>{ // pathAbsolute: pathAbsolute, // pkgPath: pkgPath, // wasProcessed: true // } as File; // file = <any>genericFile; } //notify listeners about this file change if (file) { this.emit('file-added', file); file.setFinishedLoading(); } else { //skip event when file is undefined } return file; } private readonly emitter = new EventEmitter(); public on(name: 'file-added', callback: (file: BrsFile | XmlFile) => void); public on(name: 'file-removed', callback: (file: BrsFile | XmlFile) => void); public on(name: string, callback: (data: any) => void) { this.emitter.on(name, callback); return () => { this.emitter.removeListener(name, callback); }; } protected emit(name: 'file-added', file: BrsFile | XmlFile); protected emit(name: 'file-removed', file: BrsFile | XmlFile); protected emit(name: string, data?: any) { this.emitter.emit(name, data); } /** * Find the file by its absolute path. This is case INSENSITIVE, since * Roku is a case insensitive file system. It is an error to have multiple files * with the same path with only case being different. * @param pathAbsolute */ public
(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); for (let filePath in this.files) { if (filePath.toLowerCase() === pathAbsolute.toLowerCase()) { return this.files[filePath]; } } } /** * Get a file with the specified pkg path. * If not found, return undefined */ public getFileByPkgPath(pkgPath: string) { pkgPath = util.pathSepNormalize(pkgPath); for (let filePath in this.files) { let file = this.files[filePath]; if (util.standardizePath(file.pkgPath) === util.standardizePath(pkgPath)) { return file; } } } /** * Remove a set of files from the program * @param absolutePaths */ public removeFiles(absolutePaths: string[]) { for (let pathAbsolute of absolutePaths) { this.removeFile(pathAbsolute); } } /** * Remove a file from the program * @param pathAbsolute */ public removeFile(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); let file = this.getFile(pathAbsolute); //notify every scope of this file removal for (let
getFileByPathAbsolute
identifier_name
Program.ts
that are already loaded will be replaced by the latest * contents from the file system. * @param filePaths */ public async addOrReplaceFiles(fileObjects: Array<StandardizedFileEntry>) { return Promise.all( fileObjects.map(async (fileObject) => this.addOrReplaceFile(fileObject)) ); } public getPkgPath(...args: any[]): any { //eslint-disable-line throw new Error('Not implemented'); } /** * roku filesystem is case INsensitive, so find the scope by key case insensitive * @param scopeName */ public getScopeByName(scopeName: string) { //most scopes are xml file pkg paths. however, the ones that are not are single names like "platform" and "global", //so it's safe to run the standardizePkgPath method scopeName = util.standardizePkgPath(scopeName); let key = Object.keys(this.scopes).find(x => x.toLowerCase() === scopeName.toLowerCase()); return this.scopes[key]; } /** * Load a file into the program. If that file already exists, it is replaced. * If file contents are provided, those are used, Otherwise, the file is loaded from the file system * @param pathAbsolute * @param fileContents */ public async addOrReplaceFile(fileEntry: StandardizedFileEntry, fileContents?: string) { assert.ok(fileEntry, 'fileEntry is required'); assert.ok(fileEntry.src, 'fileEntry.src is required'); assert.ok(fileEntry.dest, 'fileEntry.dest is required'); let pathAbsolute = util.standardizePath(fileEntry.src); let pkgPath = util.standardizePkgPath(fileEntry.dest); //if the file is already loaded, remove it if (this.hasFile(pathAbsolute)) { this.removeFile(pathAbsolute); } let fileExtension = path.extname(pathAbsolute).toLowerCase(); let file: BrsFile | XmlFile | undefined; //load the file contents by file path if not provided let getFileContents = async () => { if (fileContents === undefined) { return this.getFileContents(pathAbsolute); } else { return fileContents; } }; //get the extension of the file if (fileExtension === '.brs' || fileExtension === '.bs') { let brsFile = new BrsFile(pathAbsolute, pkgPath, this); //add the file to the program this.files[pathAbsolute] = brsFile; await brsFile.parse(await getFileContents()); file = brsFile; } else if (fileExtension === '.xml') { let xmlFile = new XmlFile(pathAbsolute, pkgPath, this); //add the file to the program this.files[pathAbsolute] = xmlFile; await xmlFile.parse(await getFileContents()); file = xmlFile; //create a new scope for this xml file let scope = new XmlScope(xmlFile); //attach this program to the new scope scope.attachProgram(this); //if the scope doesn't have a parent scope, give it the platform scope if (!scope.parentScope) { scope.parentScope = this.platformScope; } this.scopes[scope.name] = scope; } else { //TODO do we actually need to implement this? Figure out how to handle img paths // let genericFile = this.files[pathAbsolute] = <any>{ // pathAbsolute: pathAbsolute, // pkgPath: pkgPath, // wasProcessed: true // } as File; // file = <any>genericFile; } //notify listeners about this file change if (file) { this.emit('file-added', file); file.setFinishedLoading(); } else { //skip event when file is undefined } return file; } private readonly emitter = new EventEmitter(); public on(name: 'file-added', callback: (file: BrsFile | XmlFile) => void); public on(name: 'file-removed', callback: (file: BrsFile | XmlFile) => void); public on(name: string, callback: (data: any) => void) { this.emitter.on(name, callback); return () => { this.emitter.removeListener(name, callback); }; } protected emit(name: 'file-added', file: BrsFile | XmlFile); protected emit(name: 'file-removed', file: BrsFile | XmlFile); protected emit(name: string, data?: any) { this.emitter.emit(name, data); } /** * Find the file by its absolute path. This is case INSENSITIVE, since * Roku is a case insensitive file system. It is an error to have multiple files * with the same path with only case being different. * @param pathAbsolute */ public getFileByPathAbsolute(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); for (let filePath in this.files) { if (filePath.toLowerCase() === pathAbsolute.toLowerCase()) { return this.files[filePath]; } } } /** * Get a file with the specified pkg path. * If not found, return undefined */ public getFileByPkgPath(pkgPath: string) { pkgPath = util.pathSepNormalize(pkgPath); for (let filePath in this.files) { let file = this.files[filePath]; if (util.standardizePath(file.pkgPath) === util.standardizePath(pkgPath)) { return file; } } } /** * Remove a set of files from the program * @param absolutePaths */ public removeFiles(absolutePaths: string[]) { for (let pathAbsolute of absolutePaths) { this.removeFile(pathAbsolute); } } /** * Remove a file from the program * @param pathAbsolute */ public removeFile(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); let file = this.getFile(pathAbsolute); //notify every scope of this file removal for (let scopeName in this.scopes) { let scope = this.scopes[scopeName]; scope.removeFile(file); } //if there is a scope named the same as this file's path, remove it (i.e. xml scopes) let scope = this.scopes[file.pkgPath]; if (scope) { scope.dispose(); delete this.scopes[file.pkgPath]; } //remove the file from the program delete this.files[pathAbsolute]; this.emit('file-removed', file); } /** * Traverse the entire project, and validate all scopes */ public async validate() { this.diagnostics = []; for (let scopeName in this.scopes) { let scope = this.scopes[scopeName]; scope.validate(); } //find any files NOT loaded into a scope for (let filePath in this.files) { let file = this.files[filePath]; if (!this.fileIsIncludedInAnyScope(file)) { //the file is not loaded in any scope this.diagnostics.push({ file: file, location: Range.create(0, 0, 0, Number.MAX_VALUE), severity: 'warning', ...diagnosticMessages.File_not_referenced_by_any_file_1013() }); } } await Promise.resolve(); } /** * Determine if the given file is included in at least one scope in this program */ private fileIsIncludedInAnyScope(file: BrsFile | XmlFile) { for (let scopeName in this.scopes) { if (this.scopes[scopeName].hasFile(file)) { return true; } } return false; } /** * Get the file at the given path * @param pathAbsolute */ private getFile(pathAbsolute: string) { pathAbsolute = util.standardizePath(pathAbsolute); return this.files[pathAbsolute]; } /** * Get a list of all scopes the file is loaded into * @param file */ public getScopesForFile(file: XmlFile | BrsFile) { let result = [] as Scope[]; for (let key in this.scopes) { let scope = this.scopes[key]; if (scope.hasFile(file)) { result.push(scope); } } return result; } /** * Find all available completion items at the given position * @param pathAbsolute * @param lineIndex * @param columnIndex */ public async getCompletions(pathAbsolute: string, position: Position)
{ let file = this.getFile(pathAbsolute); if (!file) { return []; } //wait for the file to finish loading await file.isReady(); //find the scopes for this file let scopes = this.getScopesForFile(file); //if there are no scopes, include the platform scope so we at least get the built-in functions scopes = scopes.length > 0 ? scopes : [this.platformScope]; //get the completions for this file for every scope //get the completions from all scopes for this file let allCompletions = util.flatMap( await Promise.all(
identifier_body
si5324.py
(self, register, startbit, length): self.register = register self.startbit = startbit self.length = length def get_endbit(self): return (self.startbit - (self.length - 1)) class Alarms: """ Alarms Class: Holds a collection of alarms states for the class, including both the INT (interrupt) current states and FLG flags that need manual resetting. The console output when this class is returned is a table of states. Otherwise, individual alarms can be accessed directly. """ Loss_Of_Lock_INT = False Loss_Of_Lock_FLG = False Loss_Of_Signal_1_INT = False Loss_Of_Signal_1_FLG = False Loss_Of_Signal_2_INT = False Loss_Of_Signal_2_FLG = False Loss_Of_Signal_X_INT = False Loss_Of_Signal_X_FLG = False Freq_Offset_1_INT = False Freq_Offset_1_FLG = False Freq_Offset_2_INT = False Freq_Offset_2_FLG = False def __repr__(self): return ("\nAlm:\t\tInt:\t\tFlg:\n" + "-------------------------------------\n" + "{}\t\t{}\t\t{}\n".format("LOL", self.Loss_Of_Lock_INT, self.Loss_Of_Lock_FLG) + "{}\t\t{}\t\t{}\n".format("LOS1", self.Loss_Of_Signal_1_INT, self.Loss_Of_Signal_1_FLG) + "{}\t\t{}\t\t{}\n".format("LOS2", self.Loss_Of_Signal_2_INT, self.Loss_Of_Signal_2_FLG) + "{}\t\t{}\t\t{}\n".format("LOSX", self.Loss_Of_Signal_X_INT, self.Loss_Of_Signal_X_FLG) + "{}\t\t{}\t\t{}\n".format("FO1", self.Freq_Offset_1_INT, self.Freq_Offset_1_FLG) + "{}\t\t{}\t\t{}\n".format("FO2", self.Freq_Offset_2_INT, self.Freq_Offset_2_FLG) ) class SI5324(I2CDevice): """ SI4324 Clock Multiplier Class: """ # Registers that will require an iCAL calibration after modification _ICAL_sensitive_registers = [0,1,2,4,5,7,7,9,10,11,19,25,31,34,40,43,46,55] # Registers that should be included in the extracted register mapfile _regmap_registers = [ 0,1,2,3,4,5,6,7,8,9, 10,11,19, 20,21,22,23,24,25, 31,32,33,34,35,36, 40,41,42,43,44,45,46,47,48, 55, 131,132,137,138,139, 142,143, 136] # Register 136 is here by convention (iCAL trigger) # Clock IDs CLOCK_NONE = 0 CLOCK_1 = 1 CLOCK_2 = 2 CLOCK_X = 3 # Autoselection Options: AUTOMODE_Manual = 0b00 AUTOMODE_Auto_Non_Revertive = 0b01 AUTOMODE_Auto_Revertive = 0b10 # Define control fields within I2C registers _FIELD_Free_Run_Mode = _Field(0,6,1) # FREE_RUN Free Run Mode Enable _FIELD_Clock_1_Priority = _Field(1,1,2) # CK_PRIOR2 Clock with 2nd priority _FIELD_Clock_2_Priority = _Field(1,3,2) # CK_PRIOR1 Clock with 1st priority _FIELD_Clock_Select = _Field(3,7,2) # CLKSEL_REG Manual clock selection _FIELD_Autoselection = _Field(4,7,2) # AUTOSEL_REG Autoselection mode _FIELD_Clock_Active = _Field(128,1,2) # CKx_ACTV_REG for clocks 1 and 2 _FIELD_LOS1_INT = _Field(129,1,1) # LOS1_INT Loss of Signal alarm for CLKIN_1 _FIELD_LOS2_INT = _Field(129,2,1) # LOS2_INT Loss of Signal alarm for CLKIN_2 _FIELD_LOSX_INT = _Field(129,0,1) # LOSX_INT Loss of Signal alarm for XA/XB _FIELD_FOSC1_INT = _Field(130,1,1) # FOSC1_INT Frequency Offset alarm for CLKIN_1 _FIELD_FOSC2_INT = _Field(130,2,1) # FOSC2_INT Frequency Offset alarm for CLKIN_2 _FIELD_LOL_INT = _Field(130,0,1) # LOL_INT Loss of Lock alarm _FIELD_ICAL_TRG = _Field(136,6,1) # ICAL Internal Calibration Trigger _FIELD_RST_TRG = _Field(136,7,1) # RST_REG Internal Reset Trigger # NOTE: FLGs need manual clearing, for live alarm status, use corresponding INT signals... _FIELD_FOSC1_FLG = _Field(132,2,1) # FOSC1_FLG Frequency Offset Flag for CLKIN_1 _FIELD_FOSC2_FLG = _Field(132,3,1) # FOSC2_FLG Frequency Offset Flag for CLKIN_2 _FIELD_LOL_FLG = _Field(132,1,1) # LOL_FLG Loss of Lock Flag # NOTE: Any further register fields should be defined here def __init__(self, address=0x68, **kwargs): """ Initialise the SI5324 device. :param address: The address of the SI5324 is determined by pins A[2:0] as follows: 0b1101[A2][A1][A0]. """ I2CDevice.__init__(self, address, **kwargs) logger.info("Created new si5324 instance with address 0x{:02X}.".format(address)) self.iCAL_required = True # An iCAL is required at least once before run """ Utility Functions: """ @staticmethod def pins_to_address(A2,A1,A0): """ Return value of address that self.will be used by the device based on the address pin states A[2:0]. Arguments should be supplied as 1/0. """ if not all(pin in [0,1] for pin in [A2,A1,A0]): # Check pins are 1 or 0 raise I2CException("Pins should be specified as 1 or 0") return (0b1101000 | (A2 << 2) | (A1 << 1) | A0) """ Direct Control Field Functions """ def _set_register_field(self, field, value, verify=False): """ Write a field of <=8 bits into an 8-bit register. Field bits are masked to preserve other settings held within the same register. Some registers for this device are 'ICAL sensitive', meaning that a calibration procedure must be run if they are changed. This is handled automatically unless otherwise specified. :param field: _Field instance holding relevant register and location of field bits :param value: Unsigned byte holding unshifted value to be written to the field :param verify: Boolean. If true, read values back to verify correct writing. """ logger.debug("Writing value {} to field {}-{} in register {}".format( value,field.startbit,field.get_endbit(),field.register)) # check input fits in specified field if (1 << (field.length)) <= value: raise I2CException( "Value {} does not fit in specified field of length {}.".format( value, field.length)) old_value = self.readU8(field.register) new_msk = (0xff >> (8-field.length)) << field.get_endbit() logger.debug("Register {}: field start: {}, field end: {} -> mask {:b}".format( field.register,field.startbit,field.get_endbit(), new_msk)) new_value = (old_value & ~new_msk) | (value << field
__init__
identifier_name
si5324.py
_Clock_Active = _Field(128,1,2) # CKx_ACTV_REG for clocks 1 and 2 _FIELD_LOS1_INT = _Field(129,1,1) # LOS1_INT Loss of Signal alarm for CLKIN_1 _FIELD_LOS2_INT = _Field(129,2,1) # LOS2_INT Loss of Signal alarm for CLKIN_2 _FIELD_LOSX_INT = _Field(129,0,1) # LOSX_INT Loss of Signal alarm for XA/XB _FIELD_FOSC1_INT = _Field(130,1,1) # FOSC1_INT Frequency Offset alarm for CLKIN_1 _FIELD_FOSC2_INT = _Field(130,2,1) # FOSC2_INT Frequency Offset alarm for CLKIN_2 _FIELD_LOL_INT = _Field(130,0,1) # LOL_INT Loss of Lock alarm _FIELD_ICAL_TRG = _Field(136,6,1) # ICAL Internal Calibration Trigger _FIELD_RST_TRG = _Field(136,7,1) # RST_REG Internal Reset Trigger # NOTE: FLGs need manual clearing, for live alarm status, use corresponding INT signals... _FIELD_FOSC1_FLG = _Field(132,2,1) # FOSC1_FLG Frequency Offset Flag for CLKIN_1 _FIELD_FOSC2_FLG = _Field(132,3,1) # FOSC2_FLG Frequency Offset Flag for CLKIN_2 _FIELD_LOL_FLG = _Field(132,1,1) # LOL_FLG Loss of Lock Flag # NOTE: Any further register fields should be defined here def __init__(self, address=0x68, **kwargs): """ Initialise the SI5324 device. :param address: The address of the SI5324 is determined by pins A[2:0] as follows: 0b1101[A2][A1][A0]. """ I2CDevice.__init__(self, address, **kwargs) logger.info("Created new si5324 instance with address 0x{:02X}.".format(address)) self.iCAL_required = True # An iCAL is required at least once before run """ Utility Functions: """ @staticmethod def pins_to_address(A2,A1,A0): """ Return value of address that self.will be used by the device based on the address pin states A[2:0]. Arguments should be supplied as 1/0. """ if not all(pin in [0,1] for pin in [A2,A1,A0]): # Check pins are 1 or 0 raise I2CException("Pins should be specified as 1 or 0") return (0b1101000 | (A2 << 2) | (A1 << 1) | A0) """ Direct Control Field Functions """ def _set_register_field(self, field, value, verify=False): """ Write a field of <=8 bits into an 8-bit register. Field bits are masked to preserve other settings held within the same register. Some registers for this device are 'ICAL sensitive', meaning that a calibration procedure must be run if they are changed. This is handled automatically unless otherwise specified. :param field: _Field instance holding relevant register and location of field bits :param value: Unsigned byte holding unshifted value to be written to the field :param verify: Boolean. If true, read values back to verify correct writing. """ logger.debug("Writing value {} to field {}-{} in register {}".format( value,field.startbit,field.get_endbit(),field.register)) # check input fits in specified field if (1 << (field.length)) <= value: raise I2CException( "Value {} does not fit in specified field of length {}.".format( value, field.length)) old_value = self.readU8(field.register) new_msk = (0xff >> (8-field.length)) << field.get_endbit() logger.debug("Register {}: field start: {}, field end: {} -> mask {:b}".format( field.register,field.startbit,field.get_endbit(), new_msk)) new_value = (old_value & ~new_msk) | (value << field.get_endbit()) logger.debug("Register {}: {:b} -> {:b}".format(field.register, old_value, new_value)) if new_value != old_value: self.write8(field.register, new_value) if verify: verify_value = self._get_register_field(field) logger.debug("Verifying value written ({:b}) against re-read: {:b}".format( value,verify_value)) if verify_value != value: raise I2CException( "Value {} was not successfully written to Field {}".format( value, field)) if (field.register in SI5324._ICAL_sensitive_registers): logger.info("Register {} requires iCAL run".format(field.register)) self.iCAL_required = True def _get_register_field(self, field): """ Read only the field-specific bits from the relevant register :param field: _Field instance holding relevant register and location of field bits """ logger.debug("Getting field starting at bit {}, length {} from register {}".format( field.startbit,field.length,field.register)) raw_register_value = self.readU8(field.register) logger.debug("Raw value: {0:b}".format(raw_register_value)) # remove high bits value = raw_register_value & (0xFF >> (7-field.startbit)) logger.debug("high bits removed: {0:b}".format(value)) # shift value to position 0 value = value >> field.get_endbit() logger.debug("Low bits removed: {0:b}".format(value)) return value """ Register Map File Functions """ def apply_register_map(self, mapfile_location, verify=True): """ Write configuration from a register map generated with DSPLLsim. Since the map is register rather than value-based, there is no need to make use of the _Field access functions. :param mapfile_location: location of register map file to be read :param verify: Boolean. If true, read registers back to verify they are written correctly. """ with open(mapfile_location, 'r') as f: for line in f.readlines(): # The register map starts after general information is printed preceded by '#' if line[0] != '#': # Extract register-value pairing from register map register, value = line.split(',') register = int(register) value = int(value[1:3],16) # Value is in hex if register == 136 and (value & 0x40): logger.info("Ignoring write to iCAL, will be applied next") continue # Write register value logger.info("Writing register {} with value {:02X}".format(register,value)) self.write8(register, value) if verify: verify_value = self.readU8(register)
raise I2CException( "Write of byte to register {} failed.".format(register)) # ICAL-sensitive registers will have been modified during this process self.iCAL_required = True self.calibrate() def export_register_map(self, mapfile_location): """ Generate a register map file using the current settings in device control registers. This file can then be loaded using apply_register_map(filename). :param mapfile_location: location of register map file that will be written to. """ with open(mapfile_location, 'w') as f: f.write("# This register map has been generated for the odin-devices SI5324 driver.\n") # The registers that will be read are the ones found in output register # maps from DSPLLsim. for register in SI5324._regmap_registers: if register == 136: # This register will read 00, but should be written as 0x40 to match # the versions generated by DSPLLsim. This would trigger an iCAL if # written, but is ignored in apply_register_map(). f.write("136, 40h\n") continue value = self.readU8(register) logger.info("Read register {}: {:02X}".format(register, value)) f.write("{}, {:02X}h\n".format(register, value)) logger.info("Register map extraction complete, to file: {}".format(mapfile_location)) """ Device Action Commands """ def _run_ical(self, timeout_ms=20000): """ Runs the ICAL calibration. This should be performed before any usage, since accuracy is not guaranteed until it is complete. By default, output will be
logger.debug("Verifying value written ({:b}) against re-read: {:b}".format( value,verify_value)) if verify_value != value:
random_line_split
si5324.py
__init__(self, address=0x68, **kwargs): """ Initialise the SI5324 device. :param address: The address of the SI5324 is determined by pins A[2:0] as follows: 0b1101[A2][A1][A0]. """ I2CDevice.__init__(self, address, **kwargs) logger.info("Created new si5324 instance with address 0x{:02X}.".format(address)) self.iCAL_required = True # An iCAL is required at least once before run """ Utility Functions: """ @staticmethod def pins_to_address(A2,A1,A0): """ Return value of address that self.will be used by the device based on the address pin states A[2:0]. Arguments should be supplied as 1/0. """ if not all(pin in [0,1] for pin in [A2,A1,A0]): # Check pins are 1 or 0 raise I2CException("Pins should be specified as 1 or 0") return (0b1101000 | (A2 << 2) | (A1 << 1) | A0) """ Direct Control Field Functions """ def _set_register_field(self, field, value, verify=False): """ Write a field of <=8 bits into an 8-bit register. Field bits are masked to preserve other settings held within the same register. Some registers for this device are 'ICAL sensitive', meaning that a calibration procedure must be run if they are changed. This is handled automatically unless otherwise specified. :param field: _Field instance holding relevant register and location of field bits :param value: Unsigned byte holding unshifted value to be written to the field :param verify: Boolean. If true, read values back to verify correct writing. """ logger.debug("Writing value {} to field {}-{} in register {}".format( value,field.startbit,field.get_endbit(),field.register)) # check input fits in specified field if (1 << (field.length)) <= value: raise I2CException( "Value {} does not fit in specified field of length {}.".format( value, field.length)) old_value = self.readU8(field.register) new_msk = (0xff >> (8-field.length)) << field.get_endbit() logger.debug("Register {}: field start: {}, field end: {} -> mask {:b}".format( field.register,field.startbit,field.get_endbit(), new_msk)) new_value = (old_value & ~new_msk) | (value << field.get_endbit()) logger.debug("Register {}: {:b} -> {:b}".format(field.register, old_value, new_value)) if new_value != old_value: self.write8(field.register, new_value) if verify: verify_value = self._get_register_field(field) logger.debug("Verifying value written ({:b}) against re-read: {:b}".format( value,verify_value)) if verify_value != value: raise I2CException( "Value {} was not successfully written to Field {}".format( value, field)) if (field.register in SI5324._ICAL_sensitive_registers): logger.info("Register {} requires iCAL run".format(field.register)) self.iCAL_required = True def _get_register_field(self, field): """ Read only the field-specific bits from the relevant register :param field: _Field instance holding relevant register and location of field bits """ logger.debug("Getting field starting at bit {}, length {} from register {}".format( field.startbit,field.length,field.register)) raw_register_value = self.readU8(field.register) logger.debug("Raw value: {0:b}".format(raw_register_value)) # remove high bits value = raw_register_value & (0xFF >> (7-field.startbit)) logger.debug("high bits removed: {0:b}".format(value)) # shift value to position 0 value = value >> field.get_endbit() logger.debug("Low bits removed: {0:b}".format(value)) return value """ Register Map File Functions """ def apply_register_map(self, mapfile_location, verify=True): """ Write configuration from a register map generated with DSPLLsim. Since the map is register rather than value-based, there is no need to make use of the _Field access functions. :param mapfile_location: location of register map file to be read :param verify: Boolean. If true, read registers back to verify they are written correctly. """ with open(mapfile_location, 'r') as f: for line in f.readlines(): # The register map starts after general information is printed preceded by '#' if line[0] != '#': # Extract register-value pairing from register map register, value = line.split(',') register = int(register) value = int(value[1:3],16) # Value is in hex if register == 136 and (value & 0x40): logger.info("Ignoring write to iCAL, will be applied next") continue # Write register value logger.info("Writing register {} with value {:02X}".format(register,value)) self.write8(register, value) if verify: verify_value = self.readU8(register) logger.debug("Verifying value written ({:b}) against re-read: {:b}".format( value,verify_value)) if verify_value != value: raise I2CException( "Write of byte to register {} failed.".format(register)) # ICAL-sensitive registers will have been modified during this process self.iCAL_required = True self.calibrate() def export_register_map(self, mapfile_location): """ Generate a register map file using the current settings in device control registers. This file can then be loaded using apply_register_map(filename). :param mapfile_location: location of register map file that will be written to. """ with open(mapfile_location, 'w') as f: f.write("# This register map has been generated for the odin-devices SI5324 driver.\n") # The registers that will be read are the ones found in output register # maps from DSPLLsim. for register in SI5324._regmap_registers: if register == 136: # This register will read 00, but should be written as 0x40 to match # the versions generated by DSPLLsim. This would trigger an iCAL if # written, but is ignored in apply_register_map(). f.write("136, 40h\n") continue value = self.readU8(register) logger.info("Read register {}: {:02X}".format(register, value)) f.write("{}, {:02X}h\n".format(register, value)) logger.info("Register map extraction complete, to file: {}".format(mapfile_location)) """ Device Action Commands """ def _run_ical(self, timeout_ms=20000): """ Runs the ICAL calibration. This should be performed before any usage, since accuracy is not guaranteed until it is complete. By default, output will be disabled before calibration has been completed, but enabled during the calibration. The output can be squelched during these periods, with CKOUT_ALWAYS_ON controlling for former, and SQ_ICAL the latter. The ICAL will typically take around 1s, and will hold LOL_INT high during. :param timeout_ms: Time to wait for LOL flag to go low in ms. :return: 0 for success, 1 for failure """ # Write register 136 bit 6 high (self-resetting) self._set_register_field(SI5324._FIELD_ICAL_TRG, 1) logger.info("iCAL initiated") # Wait for LOL low signal before proceeding (signals end of calibration) # Lock time (tLOCKMP) is: # SI5324E* Typ:1.0s Max:1.5s # SI5324A/B/C/D* Typ:0.8s Max:1.0s start_time = time.time() latest_time = time.time() while self._get_register_field(SI5324._FIELD_LOL_INT):
time.sleep(0.100) logger.debug("iCAL waiting...") # Check for LOL timeout (not necessarily fatal, since the input # could just be inactive when selected. However, iCAL should be # performed after the input is provided, or the output will be # unstable). latest_time = time.time() if ((latest_time - start_time)*1000) > timeout_ms: logger.warning(( "iCAL timed out after {}s.".format(latest_time-start_time) + " Check if selected clock has Loss Of Signal:" + "\n{}".format(self.get_alarm_states()) + "\nNOTE: iCAL should be performed on desired source before use." )) return 1
conditional_block
si5324.py
_Clock_Active = _Field(128,1,2) # CKx_ACTV_REG for clocks 1 and 2 _FIELD_LOS1_INT = _Field(129,1,1) # LOS1_INT Loss of Signal alarm for CLKIN_1 _FIELD_LOS2_INT = _Field(129,2,1) # LOS2_INT Loss of Signal alarm for CLKIN_2 _FIELD_LOSX_INT = _Field(129,0,1) # LOSX_INT Loss of Signal alarm for XA/XB _FIELD_FOSC1_INT = _Field(130,1,1) # FOSC1_INT Frequency Offset alarm for CLKIN_1 _FIELD_FOSC2_INT = _Field(130,2,1) # FOSC2_INT Frequency Offset alarm for CLKIN_2 _FIELD_LOL_INT = _Field(130,0,1) # LOL_INT Loss of Lock alarm _FIELD_ICAL_TRG = _Field(136,6,1) # ICAL Internal Calibration Trigger _FIELD_RST_TRG = _Field(136,7,1) # RST_REG Internal Reset Trigger # NOTE: FLGs need manual clearing, for live alarm status, use corresponding INT signals... _FIELD_FOSC1_FLG = _Field(132,2,1) # FOSC1_FLG Frequency Offset Flag for CLKIN_1 _FIELD_FOSC2_FLG = _Field(132,3,1) # FOSC2_FLG Frequency Offset Flag for CLKIN_2 _FIELD_LOL_FLG = _Field(132,1,1) # LOL_FLG Loss of Lock Flag # NOTE: Any further register fields should be defined here def __init__(self, address=0x68, **kwargs): """ Initialise the SI5324 device. :param address: The address of the SI5324 is determined by pins A[2:0] as follows: 0b1101[A2][A1][A0]. """ I2CDevice.__init__(self, address, **kwargs) logger.info("Created new si5324 instance with address 0x{:02X}.".format(address)) self.iCAL_required = True # An iCAL is required at least once before run """ Utility Functions: """ @staticmethod def pins_to_address(A2,A1,A0): """ Return value of address that self.will be used by the device based on the address pin states A[2:0]. Arguments should be supplied as 1/0. """ if not all(pin in [0,1] for pin in [A2,A1,A0]): # Check pins are 1 or 0 raise I2CException("Pins should be specified as 1 or 0") return (0b1101000 | (A2 << 2) | (A1 << 1) | A0) """ Direct Control Field Functions """ def _set_register_field(self, field, value, verify=False): """ Write a field of <=8 bits into an 8-bit register. Field bits are masked to preserve other settings held within the same register. Some registers for this device are 'ICAL sensitive', meaning that a calibration procedure must be run if they are changed. This is handled automatically unless otherwise specified. :param field: _Field instance holding relevant register and location of field bits :param value: Unsigned byte holding unshifted value to be written to the field :param verify: Boolean. If true, read values back to verify correct writing. """ logger.debug("Writing value {} to field {}-{} in register {}".format( value,field.startbit,field.get_endbit(),field.register)) # check input fits in specified field if (1 << (field.length)) <= value: raise I2CException( "Value {} does not fit in specified field of length {}.".format( value, field.length)) old_value = self.readU8(field.register) new_msk = (0xff >> (8-field.length)) << field.get_endbit() logger.debug("Register {}: field start: {}, field end: {} -> mask {:b}".format( field.register,field.startbit,field.get_endbit(), new_msk)) new_value = (old_value & ~new_msk) | (value << field.get_endbit()) logger.debug("Register {}: {:b} -> {:b}".format(field.register, old_value, new_value)) if new_value != old_value: self.write8(field.register, new_value) if verify: verify_value = self._get_register_field(field) logger.debug("Verifying value written ({:b}) against re-read: {:b}".format( value,verify_value)) if verify_value != value: raise I2CException( "Value {} was not successfully written to Field {}".format( value, field)) if (field.register in SI5324._ICAL_sensitive_registers): logger.info("Register {} requires iCAL run".format(field.register)) self.iCAL_required = True def _get_register_field(self, field): """ Read only the field-specific bits from the relevant register :param field: _Field instance holding relevant register and location of field bits """ logger.debug("Getting field starting at bit {}, length {} from register {}".format( field.startbit,field.length,field.register)) raw_register_value = self.readU8(field.register) logger.debug("Raw value: {0:b}".format(raw_register_value)) # remove high bits value = raw_register_value & (0xFF >> (7-field.startbit)) logger.debug("high bits removed: {0:b}".format(value)) # shift value to position 0 value = value >> field.get_endbit() logger.debug("Low bits removed: {0:b}".format(value)) return value """ Register Map File Functions """ def apply_register_map(self, mapfile_location, verify=True):
# Write register value logger.info("Writing register {} with value {:02X}".format(register,value)) self.write8(register, value) if verify: verify_value = self.readU8(register) logger.debug("Verifying value written ({:b}) against re-read: {:b}".format( value,verify_value)) if verify_value != value: raise I2CException( "Write of byte to register {} failed.".format(register)) # ICAL-sensitive registers will have been modified during this process self.iCAL_required = True self.calibrate() def export_register_map(self, mapfile_location): """ Generate a register map file using the current settings in device control registers. This file can then be loaded using apply_register_map(filename). :param mapfile_location: location of register map file that will be written to. """ with open(mapfile_location, 'w') as f: f.write("# This register map has been generated for the odin-devices SI5324 driver.\n") # The registers that will be read are the ones found in output register # maps from DSPLLsim. for register in SI5324._regmap_registers: if register == 136: # This register will read 00, but should be written as 0x40 to match # the versions generated by DSPLLsim. This would trigger an iCAL if # written, but is ignored in apply_register_map(). f.write("136, 40h\n") continue value = self.readU8(register) logger.info("Read register {}: {:02X}".format(register, value)) f.write("{}, {:02X}h\n".format(register, value)) logger.info("Register map extraction complete, to file: {}".format(mapfile_location)) """ Device Action Commands """ def _run_ical(self, timeout_ms=20000): """ Runs the ICAL calibration. This should be performed before any usage, since accuracy is not guaranteed until it is complete. By default, output will be
""" Write configuration from a register map generated with DSPLLsim. Since the map is register rather than value-based, there is no need to make use of the _Field access functions. :param mapfile_location: location of register map file to be read :param verify: Boolean. If true, read registers back to verify they are written correctly. """ with open(mapfile_location, 'r') as f: for line in f.readlines(): # The register map starts after general information is printed preceded by '#' if line[0] != '#': # Extract register-value pairing from register map register, value = line.split(',') register = int(register) value = int(value[1:3],16) # Value is in hex if register == 136 and (value & 0x40): logger.info("Ignoring write to iCAL, will be applied next") continue
identifier_body
mysql_insert_query_test.go
ord) // true, 检测记录的主键是否零值,不插数据,也不会与db交互. assert.True(t, ok) db.Create(&record) // insert, db生成的id将会写入record assert.NotEqual(t, record.ID, 0) ok = db.NewRecord(record) // false, because record.id already exists in db. assert.False(t, ok) record.ID = 0 record.Age = sql.NullInt64{Valid: true, Int64: 0} record.Email = "e2" record.MemberNumber = "1" db.Create(&record) record.ID = 0 record.Age = sql.NullInt64{Valid: true, Int64: 17} record.Email = "e3" record.MemberNumber = "2" db.Create(&record) } func CommonQueryTest(t *testing.T, db *gorm.DB) { var user User // 不要将条件放在结构体内,不会读取的,只有主键会被作为条件, 后面的Take方法也是 db.Debug().First(&user, "u_name=?", "x") // SELECT * FROM users WHERE u_name=x ORDER BY id LIMIT 1; assert.NotEqual(t, user.ID, 0) u := new(User) // Get one record, no specified order (只使用主键查询,其他字段不会使用) // SELECT * FROM `admin_users` WHERE `admin_users`.`deleted_at` IS NULL AND `admin_users`.`id` = 1 LIMIT 1 db.Take(u, "u_name=?", "x") log.Printf("111 %+v", u) // Get last record, order by primary key db.Last(&user) // 获取不存在的记录 takeErr := db.Take(&User{}, "u_name=?", "NOT_EXIST").Error FindErr := db.Find(&[]User{}, "u_name=?", "NOT_EXIST").Error // !!! 注意这个err,当接收者是一个结构体时且数据未找到时返回 assert.Equal(t, takeErr, gorm.ErrRecordNotFound) // slice接收,则是nil assert.Nil(t, FindErr) var users []User // Get all records db.Find(&users) assert.True(t, len(users) > 1) // Get record with primary key (后面参数只会传递给整型主键) db.First(&user, 10) // where可以自定义字符串形式的条件,使用问号占位参数 // 还可以传入带值的struct,其中的值作为条件查询 // 还可以传入map类型,其中的k-v作为条件查询 // 还可以传入slice类型,不过元素只能是整型,作为主键字段参数,执行IN查询(如果主键不是整型,应该会报错) var user1 User // where db.Where("u_name = ?", "x").First(&user1) // SELECT * FROM users WHERE name = 'x' ORDER BY id LIMIT 1; assert.True(t, user1.Name == "x") var users1 []User db.Where("u_name = ?", "x").Find(&users1) // SELECT * FROM users WHERE name = 'x'; assert.True(t, len(users) > 1) // IN db.Where("u_name IN (?)", []string{"x", "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE name in ('x','jinzhu 2'); // LIKE db.Where("u_name LIKE ?", "%x%").Find(&users) // SELECT * FROM users WHERE name LIKE '%jin%'; // AND db.Where("u_name = ? AND age >= ?", "x", "18").Find(&users) // SELECT * FROM users WHERE name = 'x' AND age >= 22; var users2 []User // Time db.Where("updated_at > ?", time.Now().Add(-time.Hour)).Find(&users2) // SELECT * FROM users WHERE updated_at > 'an hour ago'; assert.True(t, len(users2) > 1) var users3 []User // BETWEEN db.Where("created_at BETWEEN ? AND ?", time.Now().Add(-time.Hour), time.Now()).Find(&users3) // SELECT * FROM users WHERE created_at BETWEEN 'an hour ago' AND 'now time'; assert.True(t, len(users3) > 1) var user2 User // struct作为条件查询 // 注意:struct作为条件时,其中字段的零值将会被gorm忽略,比如0,'',false // 如果不想被忽略,只能在定义结构体时,将字段类型定义为指针或scanner/valuer, 如sql.NullInt64/NullString... db.Where(&User{Name: "x", Age: sql.NullInt64{Int64: 18, Valid: true}}).First(&user2) // SELECT * FROM users WHERE name = "x" AND age = 18 ORDER BY id LIMIT 1; assert.True(t, user2.Age.Int64 == 18) var user3 User db.Where(&User{Name: "x", Age: sql.NullInt64{Int64: 0, Valid: true}}).First(&user3) // SELECT * FROM users WHERE name = "x" AND age = 0 ORDER BY id LIMIT 1; assert.True(t, user3.ID > 0 && user3.Age.Int64 == 0) // Map作为条件查询 db.Where(map[string]interface{}{"u_name": "x", "age": 18}).Find(&users) // SELECT * FROM users WHERE name = "x" AND age = 18; // Slice作为条件查询 db.Where([]int64{1, 21, 22}).Find(&users) // SELECT * FROM users WHERE id IN (1, 21, 22); } func QueryNotTest(t *testing.T, db *gorm.DB) { var user User db.Not("u_name", "jinzhu").First(&user) // SELECT * FROM users WHERE name <> "jinzhu" ORDER BY id LIMIT 1; assert.True(t, user.ID > 0) var users []User // Not In db.Not("u_name", []string{"jinzhu", "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE name NOT IN ("jinzhu", "jinzhu 2"); assert.True(t, len(users) > 1) // Not In slice of primary keys db.Not([]int64{1, 2, 3}).First(&user) // SELECT * FROM users WHERE id NOT IN (1,2,3) ORDER BY id LIMIT 1; // Special case db.Not([]int64{}).First(&user) // SELECT * FROM users ORDER BY id LIMIT 1; // Plain SQL db.Not("u_name = ?", "jinzhu").First(&user) // SELECT * FROM users WHERE NOT(name = "jinzhu") ORDER BY id LIMIT 1; // Struct db.Not(User{Name: "jinzhu"}).First(&user) // SELECT * FROM users WHERE name <> "jinzhu" ORDER BY id LIMIT 1; } func QueryOrTest(t *testing.T, db *gorm.DB) { var users []User db.Where("role = ?", "").Or("role = ?", "super_admin").Find(&users) // SELECT * FROM users WHERE role = 'admin' OR role = 'super_admin'; assert.True(t, len(users) > 1) var users1 []User // Struct db.Where("u_name = 'x'").Or(User{Name: "jinzhu 2"}).Find(&users1) // SELECT * FROM users WHERE u_name = 'x' OR name = 'jinzhu 2'; assert.True(t, len(users) > 1) // Map db.Where("u_name = 'jinzhu'").Or(map[string]interface{}{"u_name": "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE u_name = 'jinzhu' OR u_name = 'jinzhu 2'; assert.True(t, len(users) == 0) } // gorm称之为inline condition,内联查询,我看来就是更简单的一种查询写法 func MoreSimpleQueryTest(t *testing.T, db *gorm.DB) { var users []User var user User // Get by primary key (only works for integer primary key) db.First(&user, 2) // SELECT * FROM users WHERE id = 2; assert.True(t, user.ID == 2) // Get by primary key if it were a non-integer type db.First(&user, "id = ?", "string_primary_key") // SELECT * FROM users WHERE id = 'string_primary_key'; // Plain SQL db.Find(&user, "u_name = ?", "jinzhu") // SELECT * FROM users WHERE u_name = "jinzhu"; db.Find(&users, "u_name <> ? AND age = ?", "jinzhu", 18) // SELECT * FROM users WHERE u_name <> "jinzhu" AND age = 18; assert.True(t, len(users) > 0) var users1 []User // Struct db.Find(&users1, User{Age:
db.NewRecord(rec
conditional_block
mysql_insert_query_test.go
u_name=x ORDER BY id LIMIT 1; assert.NotEqual(t, user.ID, 0) u := new(User) // Get one record, no specified order (只使用主键查询,其他字段不会使用) // SELECT * FROM `admin_users` WHERE `admin_users`.`deleted_at` IS NULL AND `admin_users`.`id` = 1 LIMIT 1 db.Take(u, "u_name=?", "x") log.Printf("111 %+v", u) // Get last record, order by primary key db.Last(&user) // 获取不存在的记录 takeErr := db.Take(&User{}, "u_name=?", "NOT_EXIST").Error FindErr := db.Find(&[]User{}, "u_name=?", "NOT_EXIST").Error // !!! 注意这个err,当接收者是一个结构体时且数据未找到时返回 assert.Equal(t, takeErr, gorm.ErrRecordNotFound) // slice接收,则是nil assert.Nil(t, FindErr) var users []User // Ge
// SELECT * FROM users WHERE name = 'x'; assert.True(t, len(users) > 1) // IN db.Where("u_name IN (?)", []string{"x", "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE name in ('x','jinzhu 2'); // LIKE db.Where("u_name LIKE ?", "%x%").Find(&users) // SELECT * FROM users WHERE name LIKE '%jin%'; // AND db.Where("u_name = ? AND age >= ?", "x", "18").Find(&users) // SELECT * FROM users WHERE name = 'x' AND age >= 22; var users2 []User // Time db.Where("updated_at > ?", time.Now().Add(-time.Hour)).Find(&users2) // SELECT * FROM users WHERE updated_at > 'an hour ago'; assert.True(t, len(users2) > 1) var users3 []User // BETWEEN db.Where("created_at BETWEEN ? AND ?", time.Now().Add(-time.Hour), time.Now()).Find(&users3) // SELECT * FROM users WHERE created_at BETWEEN 'an hour ago' AND 'now time'; assert.True(t, len(users3) > 1) var user2 User // struct作为条件查询 // 注意:struct作为条件时,其中字段的零值将会被gorm忽略,比如0,'',false // 如果不想被忽略,只能在定义结构体时,将字段类型定义为指针或scanner/valuer, 如sql.NullInt64/NullString... db.Where(&User{Name: "x", Age: sql.NullInt64{Int64: 18, Valid: true}}).First(&user2) // SELECT * FROM users WHERE name = "x" AND age = 18 ORDER BY id LIMIT 1; assert.True(t, user2.Age.Int64 == 18) var user3 User db.Where(&User{Name: "x", Age: sql.NullInt64{Int64: 0, Valid: true}}).First(&user3) // SELECT * FROM users WHERE name = "x" AND age = 0 ORDER BY id LIMIT 1; assert.True(t, user3.ID > 0 && user3.Age.Int64 == 0) // Map作为条件查询 db.Where(map[string]interface{}{"u_name": "x", "age": 18}).Find(&users) // SELECT * FROM users WHERE name = "x" AND age = 18; // Slice作为条件查询 db.Where([]int64{1, 21, 22}).Find(&users) // SELECT * FROM users WHERE id IN (1, 21, 22); } func QueryNotTest(t *testing.T, db *gorm.DB) { var user User db.Not("u_name", "jinzhu").First(&user) // SELECT * FROM users WHERE name <> "jinzhu" ORDER BY id LIMIT 1; assert.True(t, user.ID > 0) var users []User // Not In db.Not("u_name", []string{"jinzhu", "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE name NOT IN ("jinzhu", "jinzhu 2"); assert.True(t, len(users) > 1) // Not In slice of primary keys db.Not([]int64{1, 2, 3}).First(&user) // SELECT * FROM users WHERE id NOT IN (1,2,3) ORDER BY id LIMIT 1; // Special case db.Not([]int64{}).First(&user) // SELECT * FROM users ORDER BY id LIMIT 1; // Plain SQL db.Not("u_name = ?", "jinzhu").First(&user) // SELECT * FROM users WHERE NOT(name = "jinzhu") ORDER BY id LIMIT 1; // Struct db.Not(User{Name: "jinzhu"}).First(&user) // SELECT * FROM users WHERE name <> "jinzhu" ORDER BY id LIMIT 1; } func QueryOrTest(t *testing.T, db *gorm.DB) { var users []User db.Where("role = ?", "").Or("role = ?", "super_admin").Find(&users) // SELECT * FROM users WHERE role = 'admin' OR role = 'super_admin'; assert.True(t, len(users) > 1) var users1 []User // Struct db.Where("u_name = 'x'").Or(User{Name: "jinzhu 2"}).Find(&users1) // SELECT * FROM users WHERE u_name = 'x' OR name = 'jinzhu 2'; assert.True(t, len(users) > 1) // Map db.Where("u_na me = 'jinzhu'").Or(map[string]interface{}{"u_name": "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE u_name = 'jinzhu' OR u_name = 'jinzhu 2'; assert.True(t, len(users) == 0) } // gorm称之为inline condition,内联查询,我看来就是更简单的一种查询写法 func MoreSimpleQueryTest(t *testing.T, db *gorm.DB) { var users []User var user User // Get by primary key (only works for integer primary key) db.First(&user, 2) // SELECT * FROM users WHERE id = 2; assert.True(t, user.ID == 2) // Get by primary key if it were a non-integer type db.First(&user, "id = ?", "string_primary_key") // SELECT * FROM users WHERE id = 'string_primary_key'; // Plain SQL db.Find(&user, "u_name = ?", "jinzhu") // SELECT * FROM users WHERE u_name = "jinzhu"; db.Find(&users, "u_name <> ? AND age = ?", "jinzhu", 18) // SELECT * FROM users WHERE u_name <> "jinzhu" AND age = 18; assert.True(t, len(users) > 0) var users1 []User // Struct db.Find(&users1, User{Age: sql.NullInt64{Int64: 18, Valid: true}}) // SELECT * FROM users WHERE age = 18; assert.True(t, len(users1) > 0) var users2 []User // Map db.Find(&users2, map[string]interface{}{"age": 18}) // SELECT * FROM users WHERE age = 18; assert.True(t, len(users2) > 0) } func FirstOrInitQueryTest(t *testing.T, db *gorm.DB) { var user User // 先介绍for update db.Set("gorm:query_option", "FOR UPDATE").First(&user, 10) // SELECT * FROM users WHERE id = 10 FOR UPDATE; // FirstOrInit, 获取匹配的第一条,如果没有就用给定的条件初始化传入的user(没有往db插入),仅支持struct和map // 针对不存在的数据 db.FirstOrInit(&user, User{Name: "non_existing"}) // user -> User{ ID: N!=0, Name: "non_existing"} assert.True(t, user.ID == 0 &&
t all records db.Find(&users) assert.True(t, len(users) > 1) // Get record with primary key (后面参数只会传递给整型主键) db.First(&user, 10) // where可以自定义字符串形式的条件,使用问号占位参数 // 还可以传入带值的struct,其中的值作为条件查询 // 还可以传入map类型,其中的k-v作为条件查询 // 还可以传入slice类型,不过元素只能是整型,作为主键字段参数,执行IN查询(如果主键不是整型,应该会报错) var user1 User // where db.Where("u_name = ?", "x").First(&user1) // SELECT * FROM users WHERE name = 'x' ORDER BY id LIMIT 1; assert.True(t, user1.Name == "x") var users1 []User db.Where("u_name = ?", "x").Find(&users1)
identifier_body
mysql_insert_query_test.go
CommonQueryTest(t, db) //QueryNotTest(t, db) //QueryOrTest(t, db) //MoreSimpleQueryTest(t, db) //FirstOrInitQueryTest(t, db) //FirstOrCreateQueryTest(t, db) //SubQueryTest(t, db) //SelectTest(t, db) //LimitTest(t, db) //OffsetTest(t, db) //CountTest(t, db) //JoinTest(t, db) //ScanTest(t, db) UpdateAllFields(t, db) UpdateWantedFields(t, db) } func InsertTest(t *testing.T, db *gorm.DB) { var record = User{ Name: "x", Num: 0, Email: "e1", MemberNumber: "0", } ok := db.NewRecord(record) // true, 检测记录的主键是否零值,不插数据,也不会与db交互. assert.True(t, ok) db.Create(&record) // insert, db生成的id将会写入record assert.NotEqual(t, record.ID, 0) ok = db.NewRecord(record) // false, because record.id already exists in db. assert.False(t, ok) record.ID = 0 record.Age = sql.NullInt64{Valid: true, Int64: 0} record.Email = "e2" record.MemberNumber = "1" db.Create(&record) record.ID = 0 record.Age = sql.NullInt64{Valid: true, Int64: 17} record.Email = "e3" record.MemberNumber = "2" db.Create(&record) } func CommonQueryTest(t *testing.T, db *gorm.DB) { var user User // 不要将条件放在结构体内,不会读取的,只有主键会被作为条件, 后面的Take方法也是 db.Debug().First(&user, "u_name=?", "x") // SELECT * FROM users WHERE u_name=x ORDER BY id LIMIT 1; assert.NotEqual(t, user.ID, 0) u := new(User) // Get one record, no specified order (只使用主键查询,其他字段不会使用) // SELECT * FROM `admin_users` WHERE `admin_users`.`deleted_at` IS NULL AND `admin_users`.`id` = 1 LIMIT 1 db.Take(u, "u_name=?", "x") log.Printf("111 %+v", u) // Get last record, order by primary key db.Last(&user) // 获取不存在的记录 takeErr := db.Take(&User{}, "u_name=?", "NOT_EXIST").Error FindErr := db.Find(&[]User{}, "u_name=?", "NOT_EXIST").Error // !!! 注意这个err,当接收者是一个结构体时且数据未找到时返回 assert.Equal(t, takeErr, gorm.ErrRecordNotFound) // slice接收,则是nil assert.Nil(t, FindErr) var users []User // Get all records db.Find(&users) assert.True(t, len(users) > 1) // Get record with primary key (后面参数只会传递给整型主键) db.First(&user, 10) // where可以自定义字符串形式的条件,使用问号占位参数 // 还可以传入带值的struct,其中的值作为条件查询 // 还可以传入map类型,其中的k-v作为条件查询 // 还可以传入slice类型,不过元素只能是整型,作为主键字段参数,执行IN查询(如果主键不是整型,应该会报错) var user1 User // where db.Where("u_name = ?", "x").First(&user1) // SELECT * FROM users WHERE name = 'x' ORDER BY id LIMIT 1; assert.True(t, user1.Name == "x") var users1 []User db.Where("u_name = ?", "x").Find(&users1) // SELECT * FROM users WHERE name = 'x'; assert.True(t, len(users) > 1) // IN db.Where("u_name IN (?)", []string{"x", "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE name in ('x','jinzhu 2'); // LIKE db.Where("u_name LIKE ?", "%x%").Find(&users) // SELECT * FROM users WHERE name LIKE '%jin%'; // AND db.Where("u_name = ? AND age >= ?", "x", "18").Find(&users) // SELECT * FROM users WHERE name = 'x' AND age >= 22; var users2 []User // Time db.Where("updated_at > ?", time.Now().Add(-time.Hour)).Find(&users2) // SELECT * FROM users WHERE updated_at > 'an hour ago'; assert.True(t, len(users2) > 1) var users3 []User // BETWEEN db.Where("created_at BETWEEN ? AND ?", time.Now().Add(-time.Hour), time.Now()).Find(&users3) // SELECT * FROM users WHERE created_at BETWEEN 'an hour ago' AND 'now time'; assert.True(t, len(users3) > 1) var user2 User // struct作为条件查询 // 注意:struct作为条件时,其中字段的零值将会被gorm忽略,比如0,'',false // 如果不想被忽略,只能在定义结构体时,将字段类型定义为指针或scanner/valuer, 如sql.NullInt64/NullString... db.Where(&User{Name: "x", Age: sql.NullInt64{Int64: 18, Valid: true}}).First(&user2) // SELECT * FROM users WHERE name = "x" AND age = 18 ORDER BY id LIMIT 1; assert.True(t, user2.Age.Int64 == 18) var user3 User db.Where(&User{Name: "x", Age: sql.NullInt64{Int64: 0, Valid: true}}).First(&user3) // SELECT * FROM users WHERE name = "x" AND age = 0 ORDER BY id LIMIT 1; assert.True(t, user3.ID > 0 && user3.Age.Int64 == 0) // Map作为条件查询 db.Where(map[string]interface{}{"u_name": "x", "age": 18}).Find(&users) // SELECT * FROM users WHERE name = "x" AND age = 18; // Slice作为条件查询 db.Where([]int64{1, 21, 22}).Find(&users) // SELECT * FROM users WHERE id IN (1, 21, 22); } func QueryNotTest(t *testing.T, db *gorm.DB) { var user User db.Not("u_name", "jinzhu").First(&user) // SELECT * FROM users WHERE name <> "jinzhu" ORDER BY id LIMIT 1; assert.True(t, user.ID > 0) var users []User // Not In db.Not("u_name", []string{"jinzhu", "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE name NOT IN ("jinzhu", "jinzhu 2"); assert.True(t, len(users) > 1) // Not In slice of primary keys db.Not([]int64{1, 2, 3}).First(&user) // SELECT * FROM users WHERE id NOT IN (1,2,3) ORDER BY id LIMIT 1; // Special case db.Not([]int64{}).First(&user) // SELECT * FROM users ORDER BY id LIMIT 1; // Plain SQL db.Not("u_name = ?", "jinzhu").First(&user) // SELECT * FROM users WHERE NOT(name = "jinzhu") ORDER BY id LIMIT 1; // Struct db.Not(User{Name: "jinzhu"}).First(&user) // SELECT * FROM users WHERE name <> "jinzhu" ORDER BY id LIMIT 1; } func QueryOrTest(t *testing.T, db *gorm.DB) { var users []User db.Where("role = ?", "").Or("role = ?", "super_admin").Find(&users) // SELECT * FROM users WHERE role = 'admin' OR role = 'super_admin'; assert.True(t, len(users) > 1) var users1 []User // Struct db.Where("u_name = 'x'").Or(User{Name: "jinzhu 2"}).Find(&users1) // SELECT * FROM users WHERE u_name = 'x' OR name = 'jinzhu 2'; assert.True(t, len(users) > 1) // Map db.Where("u_name = 'jinzhu'").Or(map[string]interface{}{"u_name": "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE u_name = 'jinzhu' OR u_name = 'jinzhu 2'; assert.True(t, len(users) == 0) } // gorm称之为inline condition,内联查询,我看来就是更简单的一种查询写法 func MoreSimpleQueryTest(t *testing.T, db *gorm.DB) { var users []User var user User // Get by primary key (only
InsertTest(t, db)
random_line_split
mysql_insert_query_test.go
=x ORDER BY id LIMIT 1; assert.NotEqual(t, user.ID, 0) u := new(User) // Get one record, no specified order (只使用主键查询,其他字段不会使用) // SELECT * FROM `admin_users` WHERE `admin_users`.`deleted_at` IS NULL AND `admin_users`.`id` = 1 LIMIT 1 db.Take(u, "u_name=?", "x") log.Printf("111 %+v", u) // Get last record, order by primary key db.Last(&user) // 获取不存在的记录 takeErr := db.Take(&User{}, "u_name=?", "NOT_EXIST").Error FindErr := db.Find(&[]User{}, "u_name=?", "NOT_EXIST").Error // !!! 注意这个err,当接收者是一个结构体时且数据未找到时返回 assert.Equal(t, takeErr, gorm.ErrRecordNotFound) // slice接收,则是nil assert.Nil(t, FindErr) var users []User // Get all records db.Find(&users) assert.True(t, len(users) > 1) // Get record with primary key (后面参数只会传递给整型主键) db.First(&user, 10) // where可以自定义字符串形式的条件,使用问号占位参数 // 还可以传入带值的struct,其中的值作为条件查询 // 还可以传入map类型,其中的k-v作为条件查询 // 还可以传入slice类型,不过元素只能是整型,作为主键字段参数,执行IN查询(如果主键不是整型,应该会报错) var user1 User // where db.Where("u_name = ?", "x").First(&user1) // SELECT * FROM users WHERE name = 'x' ORDER BY id LIMIT 1; assert.True(t, user1.Name == "x") var users1 []User db.Where("u_name = ?", "x").Find(&users1) // SELECT * FROM users WHERE name = 'x'; assert.True(t, len(users) > 1) // IN db.Where("u_name IN (?)", []string{"x", "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE name in ('x','jinzhu 2'); // LIKE db.Where("u_name LIKE ?", "%x%").Find(&users) // SELECT * FROM users WHERE name LIKE '%jin%'; // AND db.Where("u_name = ? AND age >= ?", "x", "18").Find(&users) // SELECT * FROM users WHERE name = 'x' AND age >= 22; var users2 []User // Time db.Where("updated_at > ?", time.Now().Add(-time.Hour)).Find(&users2) // SELECT * FROM users WHERE updated_at > 'an hour ago'; assert.True(t, len(users2) > 1) var users3 []User // BETWEEN db.Where("created_at BETWEEN ? AND ?", time.Now().Add(-time.Hour), time.Now()).Find(&users3) // SELECT * FROM users WHERE created_at BETWEEN 'an hour ago' AND 'now time'; assert.True(t, len(users3) > 1) var user2 User // struct作为条件查询 // 注意:struct作为条件时,其中字段的零值将会被gorm忽略,比如0,'',false // 如果不想被忽略,只能在定义结构体时,将字段类型定义为指针或scanner/valuer, 如sql.NullInt64/NullString... db.Where(&User{Name: "x", Age: sql.NullInt64{Int64: 18, Valid: true}}).First(&user2) // SELECT * FROM users WHERE name = "x" AND age = 18 ORDER BY id LIMIT 1; assert.True(t, user2.Age.Int64 == 18) var user3 User db.Where(&User{Name: "x", Age: sql.NullInt64{Int64: 0, Valid: true}}).First(&user3) // SELECT * FROM users WHERE name = "x" AND age = 0 ORDER BY id LIMIT 1; assert.True(t, user3.ID > 0 && user3.Age.Int64 == 0) // Map作为条件查询 db.Where(map[string]interface{}{"u_name": "x", "age": 18}).Find(&users) // SELECT * FROM users WHERE name = "x" AND age = 18; // Slice作为条件查询 db.Where([]int64{1, 21, 22}).Find(&users) // SELECT * FROM users WHERE id IN (1, 21, 22); } func QueryNotTest(t *testing.T, db *gorm.DB) { var user User db.Not("u_name", "jinzhu").First(&user) // SELECT * FROM users WHERE name <> "jinzhu" ORDER BY id LIMIT 1; assert.True(t, user.ID > 0) var users []User // Not In db.Not("u_name", []string{"jinzhu", "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE name NOT IN ("jinzhu", "jinzhu 2"); assert.True(t, len(users) > 1) // Not In slice of primary keys db.Not([]int64{1, 2, 3}).First(&user) // SELECT * FROM users WHERE id NOT IN (1,2,3) ORDER BY id LIMIT 1; // Special case db.Not([]int64{}).First(&user) // SELECT * FROM users ORDER BY id LIMIT 1; // Plain SQL db.Not("u_name = ?", "jinzhu").First(&user) // SELECT * FROM users WHERE NOT(name = "jinzhu") ORDER BY id LIMIT 1; // Struct db.Not(User{Name: "jinzhu"}).First(&user) // SELECT * FROM users WHERE name <> "jinzhu" ORDER BY id LIMIT 1; } func QueryOrTest(t *testing.T, db *gorm.DB) { var users []User db.Where("role = ?", "").Or("role = ?", "super_admin").Find(&users) // SELECT * FROM users WHERE role = 'admin' OR role = 'super_admin'; assert.True(t, len(users) > 1) var users1 []User // Struct db.Where("u_name = 'x'").Or(User{Name: "jinzhu 2"}).Find(&users1) // SELECT * FROM users WHERE u_name = 'x' OR name = 'jinzhu 2'; assert.True(t, len(users) > 1) // Map db.Where("u_name = 'jinzhu'").Or(map[string]interface{}{"u_name": "jinzhu 2"}).Find(&users) // SELECT * FROM users WHERE u_name = 'jinzhu' OR u_name = 'jinzhu 2'; assert.True(t, len(users) == 0) } // gorm称之为inline condition,内联查询,我看来就是更简单的一种查询写法 func MoreSimpleQueryTest(t *testing.T, db *gorm.DB) { var users []User var user User // Get by primary key (only works for integer primary key) db.First(&user, 2) // SELECT * FROM users WHERE id = 2; assert.True(t, user.ID == 2) // Get by primary key if it were a non-integer type db.First(&user, "id = ?", "string_primary_key") // SELECT * FROM users WHERE id = 'string_primary_key'; // Plain SQL db.Find(&user, "u_name = ?", "jinzhu") // SELECT * FROM users WHERE u_name = "jinzhu"; db.Find(&users, "u_name <> ? AND age = ?", "jinzhu", 18) // SELECT * FROM users WHERE u_name <> "jinzhu" AND age = 18; assert.True(t, len(users) > 0) var users1 []User // Struct db.Find(&users1, User{Age: sql.NullInt64{Int64: 18, Valid: true}}) // SELECT * FROM users WHERE age = 18; assert.True(t, len(users1) > 0) var users2 []User // Map db.Find(&users2, map[string]interface{}{"age": 18}) // SELECT * FROM users WHERE age = 18; assert.True(t, len(users2) > 0) } func FirstOrInitQueryTest(t *testing.T, db *gorm.DB) { var user User // 先介绍for update db.Set("gorm:query_option", "FOR UPDATE").First(&user, 10) // SELECT * FROM users WHERE id = 10 FOR UPDATE; // FirstOrInit, 获取匹配的第一条,如果没有就用给定的条件初始化传入的user(没有往db插入),仅支持struct和map // 针对不存在的数据 db.FirstOrInit(&user, User{Name: "non_existing"}) // user -> User{ ID: N!=0, Name: "non_existing"} assert.True(t, user.ID == 0
&& user.Name == "n
identifier_name
ioWkr.go
// just use fixed cluster number 1, round robin packets if err = ring.SetCluster(1, pfring.ClusterType(pfring.ClusterRoundRobin)); err != nil { log.Errorf("pfring SetCluster error:", err) doneChan <- err return } if err = ring.SetDirection(pfring.ReceiveOnly); err != nil { log.Errorf("pfring failed to set direction") doneChan <- err return } if err = ring.SetPollWatermark(1); err != nil { log.Errorf("pfring failed to set poll watermark") doneChan <- err return } if err = ring.SetPollDuration(1); err != nil { log.Errorf("pfring failed to set poll watermark") doneChan <- err return } if err = ring.SetSamplingRate(1); err != nil { log.Errorf("pfring failed to set sample rate") doneChan <- err return } // only using read for now if err = ring.SetSocketMode(pfring.ReadOnly); err != nil { log.Errorf("pfring SetSocketMode error: %v", err) doneChan <- err return } else if err = ring.Enable(); err != nil { log.Errorf("pfring Enable error: %v", err) doneChan <- err return } if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("RX wkr %d pfring done!", i) } var data []byte var ci gopacket.CaptureInfo rxStartDone <- true for { // try to read from handle data, ci, err = ring.ReadPacketData() if err != nil || data == nil || len(data) == 0 { continue } profiler.Tick() if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("PFring listener %d got data ts %v", i, ci.Timestamp) } rawIn = cfg.RunData.inPacketPool.Get().(*inPacket) rawIn.data = data rawIn.ts = ci.Timestamp rawIn.fromTX = false cfg.RunData.rawInput[getRxChanNumToUse(cfg)] <- rawIn atomic.AddUint64(&cfg.Counters.TotalPacketsRcvd, 1) atomic.AddUint64(&cfg.perIORX[i], 1) profiler.Tock() } }() select { case <-(*cfg.Ctx).Done(): log.Errorf("RX %d done due to context", i) return (*cfg.Ctx).Err() case err := <-doneChan: return err } }) }(rxwkr) select { case <-rxStartDone: if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("RX worker %d running", rxwkr) } continue case <-(*cfg.Ctx).Done(): log.Errorf("Rx worker startup error") return } } txStartDone := make(chan bool) for txwkr := 0; txwkr < cfg.NumTXWorkers; txwkr++ { func(i int) { cfg.Eg.Go(func() error { doneChan := make(chan error, 1) go func() { // PFring doesn't implement TX timestamps actually // API documentation lists it, but at a low level, its not actually used // create a raw socket and send packets via it , read TS similar to Oleg's method var profiler Profiler profiler.Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("TX worker %d", i)) cfg.PerfProfilers = append(cfg.PerfProfilers, &profiler) txTSworker := make([]Profiler, cfg.NumTXTSWorkerPerTx) for j := 0; j < cfg.NumTXTSWorkerPerTx; j++ { txTSworker[j].Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("TX worker %d TSRead worker %d", i, j)) cfg.PerfProfilers = append(cfg.PerfProfilers, &txTSworker[j]) } ifInfo, err := net.InterfaceByName(cfg.Iface) if err != nil { log.Errorf("Interface by name failed in start tx worker") doneChan <- err return } var haddr [8]byte copy(haddr[0:7], ifInfo.HardwareAddr[0:7]) addr := syscall.SockaddrLinklayer{ Protocol: syscall.ETH_P_ALL, Ifindex: ifInfo.Index, Halen: uint8(len(ifInfo.HardwareAddr)), Addr: haddr, } fdTS, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL) if err != nil { log.Errorf("Failed to make raw socket for TS worker %d err %v", i, err) } defer syscall.Close(fdTS) err = syscall.Bind(fdTS, &addr) if err != nil { log.Errorf("Failed to bind TS socket %v", err) } if err := ptp.IoctlTimestamp(fdTS, cfg.Iface); err != nil { log.Errorf("Failed to ioctl timestamp tx worker %v", i) return } // Enable hardware timestamp capabilities on socket flags := unix.SOF_TIMESTAMPING_TX_HARDWARE | unix.SOF_TIMESTAMPING_RX_HARDWARE | unix.SOF_TIMESTAMPING_RAW_HARDWARE if err := unix.SetsockoptInt(fdTS, unix.SOL_SOCKET, ptp.Timestamping(), flags); err != nil { log.Errorf("Failed to set flags tx worker %v err %v", i, err) return } if err := unix.SetsockoptInt(fdTS, unix.SOL_SOCKET, unix.SO_SELECT_ERR_QUEUE, 1); err != nil { log.Errorf("Failed to select err queue tx worker %v", i) return } /* simple socket for non-timestamping */ fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL) if err != nil { log.Errorf("Creating simple socket for tx worker %d failed err %v", i, err) } defer syscall.Close(fd) err = syscall.Bind(fd, &addr) if err != nil { log.Errorf("Simple socket bind failed tx worker %d err %v", i, err) } var txTSBytesReceived uint64 // start go-routines to handle TX TS txTSStartDone := make(chan bool) for j := 0; j < cfg.NumTXTSWorkerPerTx; j++ { go func(workerNum int) { var pktSent []byte var inPkt *inPacket var pktSentLen int var err error var msgs []byte var txTS time.Time msgs = make([]byte, 1000) pktSent = cfg.RunData.bytePool.Get().([]byte) // check if there are control messages on timestamp socket txTSStartDone <- true for { // ideally should use ptp.PeekRecvMsgs , but maybe similar overhead, just leave this txTSworker[workerNum].Tick() pktSentLen, _, _, _, err = unix.Recvmsg(fdTS, pktSent, msgs, unix.MSG_ERRQUEUE) if err != nil || pktSentLen == 0 { continue } txTS, err = ptp.SocketControlMessageTimestamp(msgs) if err != nil { log.Errorf("SocketControlMessageTimestamp err %v", err) } inPkt = cfg.RunData.inPacketPool.Get().(*inPacket) inPkt.data = pktSent inPkt.ts = txTS inPkt.fromTX = true cfg.RunData
{ rxStartDone := make(chan bool) for rxwkr := 0; rxwkr < cfg.NumRXWorkers; rxwkr++ { func(i int) { cfg.Eg.Go(func() error { doneChan := make(chan error, 1) go func() { var profiler Profiler profiler.Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("RX Worker %d", i)) cfg.PerfProfilers = append(cfg.PerfProfilers, &profiler) var ring *pfring.Ring var rawIn *inPacket var err error // 1<<24 is PF_RING_DISCARD_INJECTED_PKTS , if you transmit a packet via the ring, doesn't read it back if ring, err = pfring.NewRing(cfg.Iface, 4096, (1<<24)|pfring.FlagPromisc|pfring.FlagHWTimestamp); err != nil { log.Errorf("pfring ring creation error:", err) doneChan <- err return } defer ring.Close()
identifier_body
ioWkr.go
filers = append(cfg.PerfProfilers, &profiler) var ring *pfring.Ring var rawIn *inPacket var err error // 1<<24 is PF_RING_DISCARD_INJECTED_PKTS , if you transmit a packet via the ring, doesn't read it back if ring, err = pfring.NewRing(cfg.Iface, 4096, (1<<24)|pfring.FlagPromisc|pfring.FlagHWTimestamp); err != nil { log.Errorf("pfring ring creation error:", err) doneChan <- err return } defer ring.Close() // just use fixed cluster number 1, round robin packets if err = ring.SetCluster(1, pfring.ClusterType(pfring.ClusterRoundRobin)); err != nil { log.Errorf("pfring SetCluster error:", err) doneChan <- err return } if err = ring.SetDirection(pfring.ReceiveOnly); err != nil { log.Errorf("pfring failed to set direction") doneChan <- err return } if err = ring.SetPollWatermark(1); err != nil { log.Errorf("pfring failed to set poll watermark") doneChan <- err return } if err = ring.SetPollDuration(1); err != nil { log.Errorf("pfring failed to set poll watermark") doneChan <- err return } if err = ring.SetSamplingRate(1); err != nil { log.Errorf("pfring failed to set sample rate") doneChan <- err return } // only using read for now if err = ring.SetSocketMode(pfring.ReadOnly); err != nil { log.Errorf("pfring SetSocketMode error: %v", err) doneChan <- err return } else if err = ring.Enable(); err != nil { log.Errorf("pfring Enable error: %v", err) doneChan <- err return } if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("RX wkr %d pfring done!", i) } var data []byte var ci gopacket.CaptureInfo rxStartDone <- true for { // try to read from handle data, ci, err = ring.ReadPacketData() if err != nil || data == nil || len(data) == 0 { continue } profiler.Tick() if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("PFring listener %d got data ts %v", i, ci.Timestamp) } rawIn = cfg.RunData.inPacketPool.Get().(*inPacket) rawIn.data = data rawIn.ts = ci.Timestamp rawIn.fromTX = false cfg.RunData.rawInput[getRxChanNumToUse(cfg)] <- rawIn atomic.AddUint64(&cfg.Counters.TotalPacketsRcvd, 1) atomic.AddUint64(&cfg.perIORX[i], 1) profiler.Tock() } }() select { case <-(*cfg.Ctx).Done(): log.Errorf("RX %d done due to context", i) return (*cfg.Ctx).Err() case err := <-doneChan: return err } }) }(rxwkr) select { case <-rxStartDone: if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("RX worker %d running", rxwkr) } continue case <-(*cfg.Ctx).Done(): log.Errorf("Rx worker startup error") return } } txStartDone := make(chan bool) for txwkr := 0; txwkr < cfg.NumTXWorkers; txwkr++ { func(i int) { cfg.Eg.Go(func() error { doneChan := make(chan error, 1) go func() { // PFring doesn't implement TX timestamps actually // API documentation lists it, but at a low level, its not actually used // create a raw socket and send packets via it , read TS similar to Oleg's method var profiler Profiler profiler.Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("TX worker %d", i)) cfg.PerfProfilers = append(cfg.PerfProfilers, &profiler) txTSworker := make([]Profiler, cfg.NumTXTSWorkerPerTx) for j := 0; j < cfg.NumTXTSWorkerPerTx; j++ { txTSworker[j].Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("TX worker %d TSRead worker %d", i, j)) cfg.PerfProfilers = append(cfg.PerfProfilers, &txTSworker[j]) } ifInfo, err := net.InterfaceByName(cfg.Iface) if err != nil { log.Errorf("Interface by name failed in start tx worker") doneChan <- err return } var haddr [8]byte copy(haddr[0:7], ifInfo.HardwareAddr[0:7]) addr := syscall.SockaddrLinklayer{ Protocol: syscall.ETH_P_ALL, Ifindex: ifInfo.Index, Halen: uint8(len(ifInfo.HardwareAddr)), Addr: haddr, } fdTS, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL) if err != nil { log.Errorf("Failed to make raw socket for TS worker %d err %v", i, err) } defer syscall.Close(fdTS) err = syscall.Bind(fdTS, &addr) if err != nil { log.Errorf("Failed to bind TS socket %v", err) } if err := ptp.IoctlTimestamp(fdTS, cfg.Iface); err != nil { log.Errorf("Failed to ioctl timestamp tx worker %v", i) return } // Enable hardware timestamp capabilities on socket flags := unix.SOF_TIMESTAMPING_TX_HARDWARE | unix.SOF_TIMESTAMPING_RX_HARDWARE | unix.SOF_TIMESTAMPING_RAW_HARDWARE if err := unix.SetsockoptInt(fdTS, unix.SOL_SOCKET, ptp.Timestamping(), flags); err != nil { log.Errorf("Failed to set flags tx worker %v err %v", i, err) return } if err := unix.SetsockoptInt(fdTS, unix.SOL_SOCKET, unix.SO_SELECT_ERR_QUEUE, 1); err != nil { log.Errorf("Failed to select err queue tx worker %v", i) return } /* simple socket for non-timestamping */ fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL) if err != nil { log.Errorf("Creating simple socket for tx worker %d failed err %v", i, err) } defer syscall.Close(fd) err = syscall.Bind(fd, &addr) if err != nil { log.Errorf("Simple socket bind failed tx worker %d err %v", i, err) } var txTSBytesReceived uint64 // start go-routines to handle TX TS txTSStartDone := make(chan bool) for j := 0; j < cfg.NumTXTSWorkerPerTx; j++ { go func(workerNum int) { var pktSent []byte var inPkt *inPacket var pktSentLen int var err error var msgs []byte var txTS time.Time msgs = make([]byte, 1000) pktSent = cfg.RunData.bytePool.Get().([]byte) // check if there are control messages on timestamp socket txTSStartDone <- true for { // ideally should use ptp.PeekRecvMsgs , but maybe similar overhead, just leave this txTSworker[workerNum].Tick() pktSentLen, _, _, _, err = unix.Recvmsg(fdTS, pktSent, msgs, unix.MSG_ERRQUEUE) if err != nil || pktSentLen == 0 { continue } txTS, err = ptp.SocketControlMessageTimestamp(msgs) if err != nil { log.Errorf("SocketControlMessageTimestamp err %v", err) } inPkt = cfg.RunData.inPacketPool.Get().(*inPacket) inPkt.data = pktSent inPkt.ts = txTS inPkt.fromTX = true cfg.RunData.rawInput[getRxChanNumToUse(cfg)] <- inPkt pktSent = cfg.RunData.bytePool.Get().([]byte) atomic.AddUint64(&cfg.Counters.TotalTXTSRead, 1)
txTSworker[workerNum].Tock() } }(j) select { case <-txTSStartDone:
atomic.AddUint64(&txTSBytesReceived, uint64(pktSentLen))
random_line_split
ioWkr.go
ers = append(cfg.PerfProfilers, &profiler) var ring *pfring.Ring var rawIn *inPacket var err error // 1<<24 is PF_RING_DISCARD_INJECTED_PKTS , if you transmit a packet via the ring, doesn't read it back if ring, err = pfring.NewRing(cfg.Iface, 4096, (1<<24)|pfring.FlagPromisc|pfring.FlagHWTimestamp); err != nil { log.Errorf("pfring ring creation error:", err) doneChan <- err return } defer ring.Close() // just use fixed cluster number 1, round robin packets if err = ring.SetCluster(1, pfring.ClusterType(pfring.ClusterRoundRobin)); err != nil { log.Errorf("pfring SetCluster error:", err) doneChan <- err return } if err = ring.SetDirection(pfring.ReceiveOnly); err != nil { log.Errorf("pfring failed to set direction") doneChan <- err return } if err = ring.SetPollWatermark(1); err != nil { log.Errorf("pfring failed to set poll watermark") doneChan <- err return } if err = ring.SetPollDuration(1); err != nil { log.Errorf("pfring failed to set poll watermark") doneChan <- err return } if err = ring.SetSamplingRate(1); err != nil { log.Errorf("pfring failed to set sample rate") doneChan <- err return } // only using read for now if err = ring.SetSocketMode(pfring.ReadOnly); err != nil { log.Errorf("pfring SetSocketMode error: %v", err) doneChan <- err return } else if err = ring.Enable(); err != nil { log.Errorf("pfring Enable error: %v", err) doneChan <- err return } if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("RX wkr %d pfring done!", i) } var data []byte var ci gopacket.CaptureInfo rxStartDone <- true for { // try to read from handle data, ci, err = ring.ReadPacketData() if err != nil || data == nil || len(data) == 0 { continue } profiler.Tick() if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("PFring listener %d got data ts %v", i, ci.Timestamp) } rawIn = cfg.RunData.inPacketPool.Get().(*inPacket) rawIn.data = data rawIn.ts = ci.Timestamp rawIn.fromTX = false cfg.RunData.rawInput[getRxChanNumToUse(cfg)] <- rawIn atomic.AddUint64(&cfg.Counters.TotalPacketsRcvd, 1) atomic.AddUint64(&cfg.perIORX[i], 1) profiler.Tock() } }() select { case <-(*cfg.Ctx).Done(): log.Errorf("RX %d done due to context", i) return (*cfg.Ctx).Err() case err := <-doneChan: return err } }) }(rxwkr) select { case <-rxStartDone: if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("RX worker %d running", rxwkr) } continue case <-(*cfg.Ctx).Done(): log.Errorf("Rx worker startup error") return } } txStartDone := make(chan bool) for txwkr := 0; txwkr < cfg.NumTXWorkers; txwkr++ { func(i int) { cfg.Eg.Go(func() error { doneChan := make(chan error, 1) go func() { // PFring doesn't implement TX timestamps actually // API documentation lists it, but at a low level, its not actually used // create a raw socket and send packets via it , read TS similar to Oleg's method var profiler Profiler profiler.Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("TX worker %d", i)) cfg.PerfProfilers = append(cfg.PerfProfilers, &profiler) txTSworker := make([]Profiler, cfg.NumTXTSWorkerPerTx) for j := 0; j < cfg.NumTXTSWorkerPerTx; j++
ifInfo, err := net.InterfaceByName(cfg.Iface) if err != nil { log.Errorf("Interface by name failed in start tx worker") doneChan <- err return } var haddr [8]byte copy(haddr[0:7], ifInfo.HardwareAddr[0:7]) addr := syscall.SockaddrLinklayer{ Protocol: syscall.ETH_P_ALL, Ifindex: ifInfo.Index, Halen: uint8(len(ifInfo.HardwareAddr)), Addr: haddr, } fdTS, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL) if err != nil { log.Errorf("Failed to make raw socket for TS worker %d err %v", i, err) } defer syscall.Close(fdTS) err = syscall.Bind(fdTS, &addr) if err != nil { log.Errorf("Failed to bind TS socket %v", err) } if err := ptp.IoctlTimestamp(fdTS, cfg.Iface); err != nil { log.Errorf("Failed to ioctl timestamp tx worker %v", i) return } // Enable hardware timestamp capabilities on socket flags := unix.SOF_TIMESTAMPING_TX_HARDWARE | unix.SOF_TIMESTAMPING_RX_HARDWARE | unix.SOF_TIMESTAMPING_RAW_HARDWARE if err := unix.SetsockoptInt(fdTS, unix.SOL_SOCKET, ptp.Timestamping(), flags); err != nil { log.Errorf("Failed to set flags tx worker %v err %v", i, err) return } if err := unix.SetsockoptInt(fdTS, unix.SOL_SOCKET, unix.SO_SELECT_ERR_QUEUE, 1); err != nil { log.Errorf("Failed to select err queue tx worker %v", i) return } /* simple socket for non-timestamping */ fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL) if err != nil { log.Errorf("Creating simple socket for tx worker %d failed err %v", i, err) } defer syscall.Close(fd) err = syscall.Bind(fd, &addr) if err != nil { log.Errorf("Simple socket bind failed tx worker %d err %v", i, err) } var txTSBytesReceived uint64 // start go-routines to handle TX TS txTSStartDone := make(chan bool) for j := 0; j < cfg.NumTXTSWorkerPerTx; j++ { go func(workerNum int) { var pktSent []byte var inPkt *inPacket var pktSentLen int var err error var msgs []byte var txTS time.Time msgs = make([]byte, 1000) pktSent = cfg.RunData.bytePool.Get().([]byte) // check if there are control messages on timestamp socket txTSStartDone <- true for { // ideally should use ptp.PeekRecvMsgs , but maybe similar overhead, just leave this txTSworker[workerNum].Tick() pktSentLen, _, _, _, err = unix.Recvmsg(fdTS, pktSent, msgs, unix.MSG_ERRQUEUE) if err != nil || pktSentLen == 0 { continue } txTS, err = ptp.SocketControlMessageTimestamp(msgs) if err != nil { log.Errorf("SocketControlMessageTimestamp err %v", err) } inPkt = cfg.RunData.inPacketPool.Get().(*inPacket) inPkt.data = pktSent inPkt.ts = txTS inPkt.fromTX = true cfg.RunData.rawInput[getRxChanNumToUse(cfg)] <- inPkt pktSent = cfg.RunData.bytePool.Get().([]byte) atomic.AddUint64(&cfg.Counters.TotalTXTSRead, 1) atomic.AddUint64(&txTSBytesReceived, uint64(pktSentLen)) txTSworker[workerNum].Tock() } }(j) select { case <-txTSStartDone:
{ txTSworker[j].Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("TX worker %d TSRead worker %d", i, j)) cfg.PerfProfilers = append(cfg.PerfProfilers, &txTSworker[j]) }
conditional_block
ioWkr.go
(cfg *ClientGenConfig) { rxStartDone := make(chan bool) for rxwkr := 0; rxwkr < cfg.NumRXWorkers; rxwkr++ { func(i int) { cfg.Eg.Go(func() error { doneChan := make(chan error, 1) go func() { var profiler Profiler profiler.Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("RX Worker %d", i)) cfg.PerfProfilers = append(cfg.PerfProfilers, &profiler) var ring *pfring.Ring var rawIn *inPacket var err error // 1<<24 is PF_RING_DISCARD_INJECTED_PKTS , if you transmit a packet via the ring, doesn't read it back if ring, err = pfring.NewRing(cfg.Iface, 4096, (1<<24)|pfring.FlagPromisc|pfring.FlagHWTimestamp); err != nil { log.Errorf("pfring ring creation error:", err) doneChan <- err return } defer ring.Close() // just use fixed cluster number 1, round robin packets if err = ring.SetCluster(1, pfring.ClusterType(pfring.ClusterRoundRobin)); err != nil { log.Errorf("pfring SetCluster error:", err) doneChan <- err return } if err = ring.SetDirection(pfring.ReceiveOnly); err != nil { log.Errorf("pfring failed to set direction") doneChan <- err return } if err = ring.SetPollWatermark(1); err != nil { log.Errorf("pfring failed to set poll watermark") doneChan <- err return } if err = ring.SetPollDuration(1); err != nil { log.Errorf("pfring failed to set poll watermark") doneChan <- err return } if err = ring.SetSamplingRate(1); err != nil { log.Errorf("pfring failed to set sample rate") doneChan <- err return } // only using read for now if err = ring.SetSocketMode(pfring.ReadOnly); err != nil { log.Errorf("pfring SetSocketMode error: %v", err) doneChan <- err return } else if err = ring.Enable(); err != nil { log.Errorf("pfring Enable error: %v", err) doneChan <- err return } if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("RX wkr %d pfring done!", i) } var data []byte var ci gopacket.CaptureInfo rxStartDone <- true for { // try to read from handle data, ci, err = ring.ReadPacketData() if err != nil || data == nil || len(data) == 0 { continue } profiler.Tick() if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("PFring listener %d got data ts %v", i, ci.Timestamp) } rawIn = cfg.RunData.inPacketPool.Get().(*inPacket) rawIn.data = data rawIn.ts = ci.Timestamp rawIn.fromTX = false cfg.RunData.rawInput[getRxChanNumToUse(cfg)] <- rawIn atomic.AddUint64(&cfg.Counters.TotalPacketsRcvd, 1) atomic.AddUint64(&cfg.perIORX[i], 1) profiler.Tock() } }() select { case <-(*cfg.Ctx).Done(): log.Errorf("RX %d done due to context", i) return (*cfg.Ctx).Err() case err := <-doneChan: return err } }) }(rxwkr) select { case <-rxStartDone: if cfg.DebugPrint || cfg.DebugIoWkrRX { log.Debugf("RX worker %d running", rxwkr) } continue case <-(*cfg.Ctx).Done(): log.Errorf("Rx worker startup error") return } } txStartDone := make(chan bool) for txwkr := 0; txwkr < cfg.NumTXWorkers; txwkr++ { func(i int) { cfg.Eg.Go(func() error { doneChan := make(chan error, 1) go func() { // PFring doesn't implement TX timestamps actually // API documentation lists it, but at a low level, its not actually used // create a raw socket and send packets via it , read TS similar to Oleg's method var profiler Profiler profiler.Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("TX worker %d", i)) cfg.PerfProfilers = append(cfg.PerfProfilers, &profiler) txTSworker := make([]Profiler, cfg.NumTXTSWorkerPerTx) for j := 0; j < cfg.NumTXTSWorkerPerTx; j++ { txTSworker[j].Init(cfg.Eg, cfg.Ctx, true, fmt.Sprintf("TX worker %d TSRead worker %d", i, j)) cfg.PerfProfilers = append(cfg.PerfProfilers, &txTSworker[j]) } ifInfo, err := net.InterfaceByName(cfg.Iface) if err != nil { log.Errorf("Interface by name failed in start tx worker") doneChan <- err return } var haddr [8]byte copy(haddr[0:7], ifInfo.HardwareAddr[0:7]) addr := syscall.SockaddrLinklayer{ Protocol: syscall.ETH_P_ALL, Ifindex: ifInfo.Index, Halen: uint8(len(ifInfo.HardwareAddr)), Addr: haddr, } fdTS, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL) if err != nil { log.Errorf("Failed to make raw socket for TS worker %d err %v", i, err) } defer syscall.Close(fdTS) err = syscall.Bind(fdTS, &addr) if err != nil { log.Errorf("Failed to bind TS socket %v", err) } if err := ptp.IoctlTimestamp(fdTS, cfg.Iface); err != nil { log.Errorf("Failed to ioctl timestamp tx worker %v", i) return } // Enable hardware timestamp capabilities on socket flags := unix.SOF_TIMESTAMPING_TX_HARDWARE | unix.SOF_TIMESTAMPING_RX_HARDWARE | unix.SOF_TIMESTAMPING_RAW_HARDWARE if err := unix.SetsockoptInt(fdTS, unix.SOL_SOCKET, ptp.Timestamping(), flags); err != nil { log.Errorf("Failed to set flags tx worker %v err %v", i, err) return } if err := unix.SetsockoptInt(fdTS, unix.SOL_SOCKET, unix.SO_SELECT_ERR_QUEUE, 1); err != nil { log.Errorf("Failed to select err queue tx worker %v", i) return } /* simple socket for non-timestamping */ fd, err := syscall.Socket(syscall.AF_PACKET, syscall.SOCK_RAW, syscall.ETH_P_ALL) if err != nil { log.Errorf("Creating simple socket for tx worker %d failed err %v", i, err) } defer syscall.Close(fd) err = syscall.Bind(fd, &addr) if err != nil { log.Errorf("Simple socket bind failed tx worker %d err %v", i, err) } var txTSBytesReceived uint64 // start go-routines to handle TX TS txTSStartDone := make(chan bool) for j := 0; j < cfg.NumTXTSWorkerPerTx; j++ { go func(workerNum int) { var pktSent []byte var inPkt *inPacket var pktSentLen int var err error var msgs []byte var txTS time.Time msgs = make([]byte, 1000) pktSent = cfg.RunData.bytePool.Get().([]byte) // check if there are control messages on timestamp socket txTSStartDone <- true for { // ideally should use ptp.PeekRecvMsgs , but maybe similar overhead, just leave this txTSworker[workerNum].Tick() pktSentLen, _, _, _, err = unix.Recvmsg(fdTS, pktSent, msgs, unix.MSG_ERRQUEUE) if err != nil || pktSentLen == 0 { continue } txTS, err = ptp.SocketControlMessageTimestamp(msgs) if err != nil { log.Errorf("SocketControlMessageTimestamp err %v", err) } inPkt = cfg.RunData.inPacketPool.Get().(*inPacket) inPkt.data = pktSent inPkt.ts = txTS in
startIOWorker
identifier_name
dcrd_test.go
hint cache: %v", err) } return hintCache } // setUpNotifier is a helper function to start a new notifier backed by a dcrd // driver. func
(t *testing.T, h *rpctest.Harness) *DcrdNotifier { hintCache := initHintCache(t) rpcConfig := h.RPCConfig() notifier, err := New(&rpcConfig, netParams, hintCache, hintCache) if err != nil { t.Fatalf("unable to create notifier: %v", err) } if err := notifier.Start(); err != nil { t.Fatalf("unable to start notifier: %v", err) } return notifier } // TestHistoricalConfDetailsTxIndex ensures that we correctly retrieve // historical confirmation details using the backend node's txindex. func TestHistoricalConfDetailsTxIndex(t *testing.T) { t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // A transaction unknown to the node should not be found within the // txindex even if it is enabled, so we should not proceed with any // fallback methods. var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } switch txStatus { case chainntnfs.TxNotFoundIndex: case chainntnfs.TxNotFoundManually: t.Fatal("should not have proceeded with fallback method, but did") default: t.Fatal("should not have found non-existent transaction, but did") } // Now, we'll create a test transaction and attempt to retrieve its // confirmation details. txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) if err != nil { t.Fatalf("unable to create tx: %v", err) } if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } // The transaction should be found in the mempool at this point. _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since it has yet to be included in a block, it should have been found // within the mempool. switch txStatus { case chainntnfs.TxFoundMempool: default: t.Fatalf("should have found the transaction within the "+ "mempool, but did not: %v", txStatus) } // We'll now confirm this transaction and re-attempt to retrieve its // confirmation details. if _, err := rpctest.AdjustedSimnetMiner(context.Background(), harness.Node, 1); err != nil { t.Fatalf("unable to generate block: %v", err) } _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since the backend node's txindex is enabled and the transaction has // confirmed, we should be able to retrieve it using the txindex. switch txStatus { case chainntnfs.TxFoundIndex: default: t.Fatal("should have found the transaction within the " + "txindex, but did not") } } // TestHistoricalConfDetailsNoTxIndex ensures that we correctly retrieve // historical confirmation details using the set of fallback methods when the // backend node's txindex is disabled. // // TODO(decred) rpctest currently always creates nodes with --txindex and // --addrindex, so this test can't be executed at this time. It can manually // verified by locally modifying a copy of rpctest and adding a replace // directive in the top level go.mod file. Commenting this test for the moment. /* func TestHistoricalConfDetailsNoTxIndex(t *testing.T) { t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // Since the node has its txindex disabled, we fall back to scanning the // chain manually. A transaction unknown to the network should not be // found. var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } switch txStatus { case chainntnfs.TxNotFoundManually: case chainntnfs.TxNotFoundIndex: t.Fatal("should have proceeded with fallback method, but did not") default: t.Fatal("should not have found non-existent transaction, but did") } // Now, we'll create a test transaction and attempt to retrieve its // confirmation details. We'll note its broadcast height to use as the // height hint when manually scanning the chain. _, currentHeight, err := harness.Node.GetBestBlock() if err != nil { t.Fatalf("unable to retrieve current height: %v", err) } txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) if err != nil { t.Fatalf("unable to create tx: %v", err) } if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since it has yet to be included in a block, it should have been found // within the mempool. if txStatus != chainntnfs.TxFoundMempool { t.Fatal("should have found the transaction within the " + "mempool, but did not") } // We'll now confirm this transaction and re-attempt to retrieve its // confirmation details. if _, err := harness.Node.Generate(1); err != nil { t.Fatalf("unable to generate block: %v", err) } _, txStatus, err = notifier.historicalConfDetails( confReq, uint32(currentHeight), uint32(currentHeight)+1, ) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since the backend node's txindex is disabled and the transaction has // confirmed, we should be able to find it by falling back to scanning // the chain manually. if txStatus != chainntnfs.TxFoundManually { t.Fatal("should have found the transaction by manually " + "scanning the chain, but did not") } } */ // TestInneficientRescan tests whether the inneficient per block rescan works // as required to detect spent outpoints and scripts. func TestInneficientRescan(t *testing.T) { t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // Create an output and subsequently spend it. outpoint, txout, privKey := chainntnfs.CreateSpendableOutput( t, harness, nil, ) spenderTx := chainntnfs.CreateSpendTx( t, outpoint, txout, privKey, ) spenderTxHash := spenderTx.TxHash() _, err = harness.Node.SendRawTransaction(context.TODO(), spenderTx, true) if err != nil { t.Fatalf("unable to publish tx: %v", err)
setUpNotifier
identifier_name
dcrd_test.go
hint cache: %v", err) } return hintCache } // setUpNotifier is a helper function to start a new notifier backed by a dcrd // driver. func setUpNotifier(t *testing.T, h *rpctest.Harness) *DcrdNotifier { hintCache := initHintCache(t) rpcConfig := h.RPCConfig() notifier, err := New(&rpcConfig, netParams, hintCache, hintCache) if err != nil { t.Fatalf("unable to create notifier: %v", err) } if err := notifier.Start(); err != nil { t.Fatalf("unable to start notifier: %v", err) } return notifier } // TestHistoricalConfDetailsTxIndex ensures that we correctly retrieve // historical confirmation details using the backend node's txindex. func TestHistoricalConfDetailsTxIndex(t *testing.T) { t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // A transaction unknown to the node should not be found within the // txindex even if it is enabled, so we should not proceed with any // fallback methods. var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } switch txStatus { case chainntnfs.TxNotFoundIndex: case chainntnfs.TxNotFoundManually: t.Fatal("should not have proceeded with fallback method, but did") default: t.Fatal("should not have found non-existent transaction, but did") } // Now, we'll create a test transaction and attempt to retrieve its // confirmation details. txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) if err != nil { t.Fatalf("unable to create tx: %v", err) } if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } // The transaction should be found in the mempool at this point. _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since it has yet to be included in a block, it should have been found // within the mempool. switch txStatus { case chainntnfs.TxFoundMempool: default: t.Fatalf("should have found the transaction within the "+ "mempool, but did not: %v", txStatus) } // We'll now confirm this transaction and re-attempt to retrieve its // confirmation details. if _, err := rpctest.AdjustedSimnetMiner(context.Background(), harness.Node, 1); err != nil { t.Fatalf("unable to generate block: %v", err) } _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since the backend node's txindex is enabled and the transaction has // confirmed, we should be able to retrieve it using the txindex. switch txStatus { case chainntnfs.TxFoundIndex: default: t.Fatal("should have found the transaction within the " + "txindex, but did not") } } // TestHistoricalConfDetailsNoTxIndex ensures that we correctly retrieve // historical confirmation details using the set of fallback methods when the // backend node's txindex is disabled. // // TODO(decred) rpctest currently always creates nodes with --txindex and // --addrindex, so this test can't be executed at this time. It can manually // verified by locally modifying a copy of rpctest and adding a replace // directive in the top level go.mod file. Commenting this test for the moment. /* func TestHistoricalConfDetailsNoTxIndex(t *testing.T) { t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // Since the node has its txindex disabled, we fall back to scanning the // chain manually. A transaction unknown to the network should not be // found. var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } switch txStatus { case chainntnfs.TxNotFoundManually: case chainntnfs.TxNotFoundIndex: t.Fatal("should have proceeded with fallback method, but did not") default: t.Fatal("should not have found non-existent transaction, but did") } // Now, we'll create a test transaction and attempt to retrieve its // confirmation details. We'll note its broadcast height to use as the // height hint when manually scanning the chain. _, currentHeight, err := harness.Node.GetBestBlock() if err != nil { t.Fatalf("unable to retrieve current height: %v", err) } txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) if err != nil { t.Fatalf("unable to create tx: %v", err) } if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since it has yet to be included in a block, it should have been found // within the mempool. if txStatus != chainntnfs.TxFoundMempool { t.Fatal("should have found the transaction within the " + "mempool, but did not") } // We'll now confirm this transaction and re-attempt to retrieve its // confirmation details. if _, err := harness.Node.Generate(1); err != nil { t.Fatalf("unable to generate block: %v", err) } _, txStatus, err = notifier.historicalConfDetails( confReq, uint32(currentHeight), uint32(currentHeight)+1, ) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since the backend node's txindex is disabled and the transaction has // confirmed, we should be able to find it by falling back to scanning // the chain manually. if txStatus != chainntnfs.TxFoundManually { t.Fatal("should have found the transaction by manually " + "scanning the chain, but did not") } } */ // TestInneficientRescan tests whether the inneficient per block rescan works // as required to detect spent outpoints and scripts. func TestInneficientRescan(t *testing.T)
_, err = harness.Node.SendRawTransaction(context.TODO(), spenderTx, true) if err != nil { t.Fatalf("unable to publish tx: %v", err)
{ t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // Create an output and subsequently spend it. outpoint, txout, privKey := chainntnfs.CreateSpendableOutput( t, harness, nil, ) spenderTx := chainntnfs.CreateSpendTx( t, outpoint, txout, privKey, ) spenderTxHash := spenderTx.TxHash()
identifier_body
dcrd_test.go
utils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // A transaction unknown to the node should not be found within the // txindex even if it is enabled, so we should not proceed with any // fallback methods. var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } switch txStatus { case chainntnfs.TxNotFoundIndex: case chainntnfs.TxNotFoundManually: t.Fatal("should not have proceeded with fallback method, but did") default: t.Fatal("should not have found non-existent transaction, but did") } // Now, we'll create a test transaction and attempt to retrieve its // confirmation details. txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) if err != nil { t.Fatalf("unable to create tx: %v", err) } if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } // The transaction should be found in the mempool at this point. _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since it has yet to be included in a block, it should have been found // within the mempool. switch txStatus { case chainntnfs.TxFoundMempool: default: t.Fatalf("should have found the transaction within the "+ "mempool, but did not: %v", txStatus) } // We'll now confirm this transaction and re-attempt to retrieve its // confirmation details. if _, err := rpctest.AdjustedSimnetMiner(context.Background(), harness.Node, 1); err != nil { t.Fatalf("unable to generate block: %v", err) } _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since the backend node's txindex is enabled and the transaction has // confirmed, we should be able to retrieve it using the txindex. switch txStatus { case chainntnfs.TxFoundIndex: default: t.Fatal("should have found the transaction within the " + "txindex, but did not") } } // TestHistoricalConfDetailsNoTxIndex ensures that we correctly retrieve // historical confirmation details using the set of fallback methods when the // backend node's txindex is disabled. // // TODO(decred) rpctest currently always creates nodes with --txindex and // --addrindex, so this test can't be executed at this time. It can manually // verified by locally modifying a copy of rpctest and adding a replace // directive in the top level go.mod file. Commenting this test for the moment. /* func TestHistoricalConfDetailsNoTxIndex(t *testing.T) { t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // Since the node has its txindex disabled, we fall back to scanning the // chain manually. A transaction unknown to the network should not be // found. var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } switch txStatus { case chainntnfs.TxNotFoundManually: case chainntnfs.TxNotFoundIndex: t.Fatal("should have proceeded with fallback method, but did not") default: t.Fatal("should not have found non-existent transaction, but did") } // Now, we'll create a test transaction and attempt to retrieve its // confirmation details. We'll note its broadcast height to use as the // height hint when manually scanning the chain. _, currentHeight, err := harness.Node.GetBestBlock() if err != nil { t.Fatalf("unable to retrieve current height: %v", err) } txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) if err != nil { t.Fatalf("unable to create tx: %v", err) } if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since it has yet to be included in a block, it should have been found // within the mempool. if txStatus != chainntnfs.TxFoundMempool { t.Fatal("should have found the transaction within the " + "mempool, but did not") } // We'll now confirm this transaction and re-attempt to retrieve its // confirmation details. if _, err := harness.Node.Generate(1); err != nil { t.Fatalf("unable to generate block: %v", err) } _, txStatus, err = notifier.historicalConfDetails( confReq, uint32(currentHeight), uint32(currentHeight)+1, ) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since the backend node's txindex is disabled and the transaction has // confirmed, we should be able to find it by falling back to scanning // the chain manually. if txStatus != chainntnfs.TxFoundManually { t.Fatal("should have found the transaction by manually " + "scanning the chain, but did not") } } */ // TestInneficientRescan tests whether the inneficient per block rescan works // as required to detect spent outpoints and scripts. func TestInneficientRescan(t *testing.T) { t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // Create an output and subsequently spend it. outpoint, txout, privKey := chainntnfs.CreateSpendableOutput( t, harness, nil, ) spenderTx := chainntnfs.CreateSpendTx( t, outpoint, txout, privKey, ) spenderTxHash := spenderTx.TxHash() _, err = harness.Node.SendRawTransaction(context.TODO(), spenderTx, true) if err != nil { t.Fatalf("unable to publish tx: %v", err) } if err := chainntnfs.WaitForMempoolTx(harness, &spenderTxHash); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } // We'll now confirm this transaction and attempt to retrieve its // confirmation details. bhs, err := rpctest.AdjustedSimnetMiner(context.Background(), harness.Node, 1) if err != nil { t.Fatalf("unable to generate block: %v", err) } block, err := harness.Node.GetBlock(context.TODO(), bhs[0]) if err != nil { t.Fatalf("unable to get block: %v", err) } var testTx *wire.MsgTx for _, tx := range block.Transactions { otherHash := tx.TxHash() if spenderTxHash.IsEqual(&otherHash)
{ testTx = tx break }
conditional_block
dcrd_test.go
create hint cache: %v", err) } return hintCache } // setUpNotifier is a helper function to start a new notifier backed by a dcrd // driver. func setUpNotifier(t *testing.T, h *rpctest.Harness) *DcrdNotifier { hintCache := initHintCache(t) rpcConfig := h.RPCConfig() notifier, err := New(&rpcConfig, netParams, hintCache, hintCache) if err != nil { t.Fatalf("unable to create notifier: %v", err) } if err := notifier.Start(); err != nil { t.Fatalf("unable to start notifier: %v", err) } return notifier } // TestHistoricalConfDetailsTxIndex ensures that we correctly retrieve // historical confirmation details using the backend node's txindex. func TestHistoricalConfDetailsTxIndex(t *testing.T) { t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // A transaction unknown to the node should not be found within the // txindex even if it is enabled, so we should not proceed with any // fallback methods. var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } switch txStatus { case chainntnfs.TxNotFoundIndex: case chainntnfs.TxNotFoundManually: t.Fatal("should not have proceeded with fallback method, but did") default: t.Fatal("should not have found non-existent transaction, but did") } // Now, we'll create a test transaction and attempt to retrieve its // confirmation details. txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) if err != nil { t.Fatalf("unable to create tx: %v", err) } if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } // The transaction should be found in the mempool at this point. _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since it has yet to be included in a block, it should have been found // within the mempool. switch txStatus { case chainntnfs.TxFoundMempool: default: t.Fatalf("should have found the transaction within the "+ "mempool, but did not: %v", txStatus) } // We'll now confirm this transaction and re-attempt to retrieve its // confirmation details. if _, err := rpctest.AdjustedSimnetMiner(context.Background(), harness.Node, 1); err != nil { t.Fatalf("unable to generate block: %v", err) } _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0)
if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since the backend node's txindex is enabled and the transaction has // confirmed, we should be able to retrieve it using the txindex. switch txStatus { case chainntnfs.TxFoundIndex: default: t.Fatal("should have found the transaction within the " + "txindex, but did not") } } // TestHistoricalConfDetailsNoTxIndex ensures that we correctly retrieve // historical confirmation details using the set of fallback methods when the // backend node's txindex is disabled. // // TODO(decred) rpctest currently always creates nodes with --txindex and // --addrindex, so this test can't be executed at this time. It can manually // verified by locally modifying a copy of rpctest and adding a replace // directive in the top level go.mod file. Commenting this test for the moment. /* func TestHistoricalConfDetailsNoTxIndex(t *testing.T) { t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // Since the node has its txindex disabled, we fall back to scanning the // chain manually. A transaction unknown to the network should not be // found. var unknownHash chainhash.Hash copy(unknownHash[:], bytes.Repeat([]byte{0x10}, 32)) unknownConfReq, err := chainntnfs.NewConfRequest(&unknownHash, testScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err := notifier.historicalConfDetails(unknownConfReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } switch txStatus { case chainntnfs.TxNotFoundManually: case chainntnfs.TxNotFoundIndex: t.Fatal("should have proceeded with fallback method, but did not") default: t.Fatal("should not have found non-existent transaction, but did") } // Now, we'll create a test transaction and attempt to retrieve its // confirmation details. We'll note its broadcast height to use as the // height hint when manually scanning the chain. _, currentHeight, err := harness.Node.GetBestBlock() if err != nil { t.Fatalf("unable to retrieve current height: %v", err) } txid, pkScript, err := chainntnfs.GetTestTxidAndScript(harness) if err != nil { t.Fatalf("unable to create tx: %v", err) } if err := chainntnfs.WaitForMempoolTx(harness, txid); err != nil { t.Fatalf("unable to find tx in the mempool: %v", err) } confReq, err := chainntnfs.NewConfRequest(txid, pkScript) if err != nil { t.Fatalf("unable to create conf request: %v", err) } _, txStatus, err = notifier.historicalConfDetails(confReq, 0, 0) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since it has yet to be included in a block, it should have been found // within the mempool. if txStatus != chainntnfs.TxFoundMempool { t.Fatal("should have found the transaction within the " + "mempool, but did not") } // We'll now confirm this transaction and re-attempt to retrieve its // confirmation details. if _, err := harness.Node.Generate(1); err != nil { t.Fatalf("unable to generate block: %v", err) } _, txStatus, err = notifier.historicalConfDetails( confReq, uint32(currentHeight), uint32(currentHeight)+1, ) if err != nil { t.Fatalf("unable to retrieve historical conf details: %v", err) } // Since the backend node's txindex is disabled and the transaction has // confirmed, we should be able to find it by falling back to scanning // the chain manually. if txStatus != chainntnfs.TxFoundManually { t.Fatal("should have found the transaction by manually " + "scanning the chain, but did not") } } */ // TestInneficientRescan tests whether the inneficient per block rescan works // as required to detect spent outpoints and scripts. func TestInneficientRescan(t *testing.T) { t.Parallel() harness, err := testutils.NewSetupRPCTest( t, 5, netParams, nil, []string{"--txindex"}, true, 25, ) require.NoError(t, err) defer harness.TearDown() notifier := setUpNotifier(t, harness) defer notifier.Stop() // Create an output and subsequently spend it. outpoint, txout, privKey := chainntnfs.CreateSpendableOutput( t, harness, nil, ) spenderTx := chainntnfs.CreateSpendTx( t, outpoint, txout, privKey, ) spenderTxHash := spenderTx.TxHash() _, err = harness.Node.SendRawTransaction(context.TODO(), spenderTx, true) if err != nil { t.Fatalf("unable to publish tx: %v", err)
random_line_split
refcounteddb.rs
/// /// journal format: /// ```text /// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, n] => [ ... ] /// ``` /// /// when we make a new commit, we journal the inserts and removes. /// for each `end_era` that we journaled that we are no passing by, /// we remove all of its removes assuming it is canonical and all /// of its inserts otherwise. // TODO: store last_era, reclaim_period. pub struct RefCountedDB { forward: OverlayDB, backing: Arc<dyn KeyValueDB>, latest_era: Option<u64>, inserts: Vec<H256>, removes: Vec<H256>, column: Option<u32>, } impl RefCountedDB { /// Create a new instance given a `backing` database. pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB { let latest_era = backing.get(column, &LATEST_ERA_KEY) .expect("Low-level database error.") .map(|v| decode::<u64>(&v).expect("decoding db value failed")); RefCountedDB { forward: OverlayDB::new(backing.clone(), column), backing, inserts: vec![], removes: vec![], latest_era, column, } } } impl HashDB<KeccakHasher, DBValue> for RefCountedDB { fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> { self.forward.get(key, prefix) } fn contains(&self, key: &H256, prefix: Prefix) -> bool { self.forward.contains(key, prefix) } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H256 { let r = self.forward.insert(prefix, value); self.inserts.push(r.clone()); r } fn emplace(&mut self, key: H256, prefix: Prefix, value: DBValue) { self.inserts.push(key.clone()); self.forward.emplace(key, prefix, value); } fn remove(&mut self, key: &H256, _prefix: Prefix) { self.removes.push(key.clone()); } } impl JournalDB for RefCountedDB { fn boxed_clone(&self) -> Box<dyn JournalDB> { Box::new(RefCountedDB { forward: self.forward.clone(), backing: self.backing.clone(), latest_era: self.latest_era, inserts: self.inserts.clone(), removes: self.removes.clone(), column: self.column.clone(), }) } fn mem_used(&self) -> usize { let mut ops = new_malloc_size_ops(); self.inserts.size_of(&mut ops) + self.removes.size_of(&mut ops) } fn is_empty(&self) -> bool { self.latest_era.is_none() } fn backing(&self) -> &Arc<dyn KeyValueDB> { &self.backing } fn latest_era(&self) -> Option<u64> { self.latest_era } fn state(&self, id: &H256) -> Option<Bytes> { self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) } fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> { // record new commit's details. let mut db_key = DatabaseKey { era: now, index: 0usize, }; let mut last; while self.backing.get(self.column, { last = encode(&db_key); &last })?.is_some() { db_key.index += 1; } { let value_ref = DatabaseValueRef { id, inserts: &self.inserts, deletes: &self.removes, }; batch.put(self.column, &last, &encode(&value_ref)); } let ops = self.inserts.len() + self.removes.len(); trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); self.inserts.clear(); self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); self.latest_era = Some(now); } Ok(ops as u32) } fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result<u32> { // apply old commits' details let mut db_key = DatabaseKey { era: end_era, index: 0usize, }; let mut last; while let Some(rlp_data) = { self.backing.get(self.column, { last = encode(&db_key); &last })? } { let view = DatabaseValueView::from_rlp(&rlp_data); let our_id = view.id().expect("rlp read from db; qed"); let to_remove = if canon_id == &our_id { view.deletes() } else { view.inserts() }.expect("rlp read from db; qed"); trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); for i in &to_remove { self.forward.remove(i, EMPTY_PREFIX); } batch.delete(self.column, &last); db_key.index += 1; } let r = self.forward.commit_to_batch(batch)?; Ok(r) } fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> { self.inserts.clear(); for remove in self.removes.drain(..) { self.forward.remove(&remove, EMPTY_PREFIX); } self.forward.commit_to_batch(batch) } fn consolidate(&mut self, mut with: super::MemoryDB) { for (key, (value, rc)) in with.drain() { for _ in 0..rc { self.emplace(key, EMPTY_PREFIX, value.clone()); } for _ in rc..0 { self.remove(&key, EMPTY_PREFIX); } } } fn
(&self) -> HashMap<H256, i32> { self.forward.keys() } } #[cfg(test)] mod tests { use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; use crate::{JournalDB, inject_batch, commit_batch}; fn new_db() -> RefCountedDB { let backing = Arc::new(kvdb_memorydb::create(0)); RefCountedDB::new(backing, None) } #[test] fn long_history() { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } #[test] fn latest_era_should_work() { // history is 3 let mut jdb = new_db(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert_eq!(j
keys
identifier_name
refcounteddb.rs
/// /// journal format: /// ```text /// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, n] => [ ... ] /// ``` /// /// when we make a new commit, we journal the inserts and removes. /// for each `end_era` that we journaled that we are no passing by, /// we remove all of its removes assuming it is canonical and all /// of its inserts otherwise. // TODO: store last_era, reclaim_period. pub struct RefCountedDB { forward: OverlayDB, backing: Arc<dyn KeyValueDB>, latest_era: Option<u64>, inserts: Vec<H256>, removes: Vec<H256>, column: Option<u32>, } impl RefCountedDB { /// Create a new instance given a `backing` database. pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB { let latest_era = backing.get(column, &LATEST_ERA_KEY) .expect("Low-level database error.") .map(|v| decode::<u64>(&v).expect("decoding db value failed")); RefCountedDB { forward: OverlayDB::new(backing.clone(), column), backing, inserts: vec![], removes: vec![], latest_era, column, } } } impl HashDB<KeccakHasher, DBValue> for RefCountedDB { fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> { self.forward.get(key, prefix) } fn contains(&self, key: &H256, prefix: Prefix) -> bool { self.forward.contains(key, prefix) } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H256 { let r = self.forward.insert(prefix, value); self.inserts.push(r.clone()); r } fn emplace(&mut self, key: H256, prefix: Prefix, value: DBValue) { self.inserts.push(key.clone()); self.forward.emplace(key, prefix, value); } fn remove(&mut self, key: &H256, _prefix: Prefix) { self.removes.push(key.clone()); } } impl JournalDB for RefCountedDB { fn boxed_clone(&self) -> Box<dyn JournalDB> { Box::new(RefCountedDB { forward: self.forward.clone(), backing: self.backing.clone(), latest_era: self.latest_era, inserts: self.inserts.clone(), removes: self.removes.clone(), column: self.column.clone(), }) } fn mem_used(&self) -> usize { let mut ops = new_malloc_size_ops(); self.inserts.size_of(&mut ops) + self.removes.size_of(&mut ops) } fn is_empty(&self) -> bool { self.latest_era.is_none() } fn backing(&self) -> &Arc<dyn KeyValueDB> { &self.backing } fn latest_era(&self) -> Option<u64> { self.latest_era } fn state(&self, id: &H256) -> Option<Bytes> { self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) } fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> { // record new commit's details. let mut db_key = DatabaseKey { era: now, index: 0usize, }; let mut last; while self.backing.get(self.column, { last = encode(&db_key); &last })?.is_some() { db_key.index += 1; } { let value_ref = DatabaseValueRef { id, inserts: &self.inserts, deletes: &self.removes, }; batch.put(self.column, &last, &encode(&value_ref)); } let ops = self.inserts.len() + self.removes.len(); trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); self.inserts.clear(); self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); self.latest_era = Some(now); } Ok(ops as u32) } fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result<u32> { // apply old commits' details let mut db_key = DatabaseKey { era: end_era, index: 0usize, }; let mut last; while let Some(rlp_data) = { self.backing.get(self.column, { last = encode(&db_key); &last })? } { let view = DatabaseValueView::from_rlp(&rlp_data); let our_id = view.id().expect("rlp read from db; qed"); let to_remove = if canon_id == &our_id { view.deletes() } else
.expect("rlp read from db; qed"); trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); for i in &to_remove { self.forward.remove(i, EMPTY_PREFIX); } batch.delete(self.column, &last); db_key.index += 1; } let r = self.forward.commit_to_batch(batch)?; Ok(r) } fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> { self.inserts.clear(); for remove in self.removes.drain(..) { self.forward.remove(&remove, EMPTY_PREFIX); } self.forward.commit_to_batch(batch) } fn consolidate(&mut self, mut with: super::MemoryDB) { for (key, (value, rc)) in with.drain() { for _ in 0..rc { self.emplace(key, EMPTY_PREFIX, value.clone()); } for _ in rc..0 { self.remove(&key, EMPTY_PREFIX); } } } fn keys(&self) -> HashMap<H256, i32> { self.forward.keys() } } #[cfg(test)] mod tests { use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; use crate::{JournalDB, inject_batch, commit_batch}; fn new_db() -> RefCountedDB { let backing = Arc::new(kvdb_memorydb::create(0)); RefCountedDB::new(backing, None) } #[test] fn long_history() { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } #[test] fn latest_era_should_work() { // history is 3 let mut jdb = new_db(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert_eq!(j
{ view.inserts() }
conditional_block
refcounteddb.rs
/// /// journal format: /// ```text /// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, n] => [ ... ] /// ``` /// /// when we make a new commit, we journal the inserts and removes. /// for each `end_era` that we journaled that we are no passing by, /// we remove all of its removes assuming it is canonical and all /// of its inserts otherwise. // TODO: store last_era, reclaim_period. pub struct RefCountedDB { forward: OverlayDB, backing: Arc<dyn KeyValueDB>, latest_era: Option<u64>, inserts: Vec<H256>, removes: Vec<H256>, column: Option<u32>, } impl RefCountedDB { /// Create a new instance given a `backing` database. pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB { let latest_era = backing.get(column, &LATEST_ERA_KEY) .expect("Low-level database error.") .map(|v| decode::<u64>(&v).expect("decoding db value failed")); RefCountedDB { forward: OverlayDB::new(backing.clone(), column), backing, inserts: vec![], removes: vec![], latest_era, column, } } } impl HashDB<KeccakHasher, DBValue> for RefCountedDB { fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> { self.forward.get(key, prefix) } fn contains(&self, key: &H256, prefix: Prefix) -> bool { self.forward.contains(key, prefix) } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H256 { let r = self.forward.insert(prefix, value); self.inserts.push(r.clone()); r } fn emplace(&mut self, key: H256, prefix: Prefix, value: DBValue) { self.inserts.push(key.clone()); self.forward.emplace(key, prefix, value); } fn remove(&mut self, key: &H256, _prefix: Prefix) { self.removes.push(key.clone()); } } impl JournalDB for RefCountedDB { fn boxed_clone(&self) -> Box<dyn JournalDB> { Box::new(RefCountedDB { forward: self.forward.clone(), backing: self.backing.clone(), latest_era: self.latest_era, inserts: self.inserts.clone(), removes: self.removes.clone(), column: self.column.clone(),
fn mem_used(&self) -> usize { let mut ops = new_malloc_size_ops(); self.inserts.size_of(&mut ops) + self.removes.size_of(&mut ops) } fn is_empty(&self) -> bool { self.latest_era.is_none() } fn backing(&self) -> &Arc<dyn KeyValueDB> { &self.backing } fn latest_era(&self) -> Option<u64> { self.latest_era } fn state(&self, id: &H256) -> Option<Bytes> { self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) } fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> { // record new commit's details. let mut db_key = DatabaseKey { era: now, index: 0usize, }; let mut last; while self.backing.get(self.column, { last = encode(&db_key); &last })?.is_some() { db_key.index += 1; } { let value_ref = DatabaseValueRef { id, inserts: &self.inserts, deletes: &self.removes, }; batch.put(self.column, &last, &encode(&value_ref)); } let ops = self.inserts.len() + self.removes.len(); trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); self.inserts.clear(); self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); self.latest_era = Some(now); } Ok(ops as u32) } fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result<u32> { // apply old commits' details let mut db_key = DatabaseKey { era: end_era, index: 0usize, }; let mut last; while let Some(rlp_data) = { self.backing.get(self.column, { last = encode(&db_key); &last })? } { let view = DatabaseValueView::from_rlp(&rlp_data); let our_id = view.id().expect("rlp read from db; qed"); let to_remove = if canon_id == &our_id { view.deletes() } else { view.inserts() }.expect("rlp read from db; qed"); trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); for i in &to_remove { self.forward.remove(i, EMPTY_PREFIX); } batch.delete(self.column, &last); db_key.index += 1; } let r = self.forward.commit_to_batch(batch)?; Ok(r) } fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> { self.inserts.clear(); for remove in self.removes.drain(..) { self.forward.remove(&remove, EMPTY_PREFIX); } self.forward.commit_to_batch(batch) } fn consolidate(&mut self, mut with: super::MemoryDB) { for (key, (value, rc)) in with.drain() { for _ in 0..rc { self.emplace(key, EMPTY_PREFIX, value.clone()); } for _ in rc..0 { self.remove(&key, EMPTY_PREFIX); } } } fn keys(&self) -> HashMap<H256, i32> { self.forward.keys() } } #[cfg(test)] mod tests { use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; use crate::{JournalDB, inject_batch, commit_batch}; fn new_db() -> RefCountedDB { let backing = Arc::new(kvdb_memorydb::create(0)); RefCountedDB::new(backing, None) } #[test] fn long_history() { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } #[test] fn latest_era_should_work() { // history is 3 let mut jdb = new_db(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert_eq!(jdb
}) }
random_line_split
refcounteddb.rs
/// /// journal format: /// ```text /// [era, 0] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, 1] => [ id, [insert_0, ...], [remove_0, ...] ] /// [era, n] => [ ... ] /// ``` /// /// when we make a new commit, we journal the inserts and removes. /// for each `end_era` that we journaled that we are no passing by, /// we remove all of its removes assuming it is canonical and all /// of its inserts otherwise. // TODO: store last_era, reclaim_period. pub struct RefCountedDB { forward: OverlayDB, backing: Arc<dyn KeyValueDB>, latest_era: Option<u64>, inserts: Vec<H256>, removes: Vec<H256>, column: Option<u32>, } impl RefCountedDB { /// Create a new instance given a `backing` database. pub fn new(backing: Arc<dyn KeyValueDB>, column: Option<u32>) -> RefCountedDB { let latest_era = backing.get(column, &LATEST_ERA_KEY) .expect("Low-level database error.") .map(|v| decode::<u64>(&v).expect("decoding db value failed")); RefCountedDB { forward: OverlayDB::new(backing.clone(), column), backing, inserts: vec![], removes: vec![], latest_era, column, } } } impl HashDB<KeccakHasher, DBValue> for RefCountedDB { fn get(&self, key: &H256, prefix: Prefix) -> Option<DBValue> { self.forward.get(key, prefix) } fn contains(&self, key: &H256, prefix: Prefix) -> bool { self.forward.contains(key, prefix) } fn insert(&mut self, prefix: Prefix, value: &[u8]) -> H256 { let r = self.forward.insert(prefix, value); self.inserts.push(r.clone()); r } fn emplace(&mut self, key: H256, prefix: Prefix, value: DBValue) { self.inserts.push(key.clone()); self.forward.emplace(key, prefix, value); } fn remove(&mut self, key: &H256, _prefix: Prefix) { self.removes.push(key.clone()); } } impl JournalDB for RefCountedDB { fn boxed_clone(&self) -> Box<dyn JournalDB> { Box::new(RefCountedDB { forward: self.forward.clone(), backing: self.backing.clone(), latest_era: self.latest_era, inserts: self.inserts.clone(), removes: self.removes.clone(), column: self.column.clone(), }) } fn mem_used(&self) -> usize { let mut ops = new_malloc_size_ops(); self.inserts.size_of(&mut ops) + self.removes.size_of(&mut ops) } fn is_empty(&self) -> bool { self.latest_era.is_none() } fn backing(&self) -> &Arc<dyn KeyValueDB> { &self.backing } fn latest_era(&self) -> Option<u64> { self.latest_era } fn state(&self, id: &H256) -> Option<Bytes> { self.backing.get_by_prefix(self.column, &id[0..DB_PREFIX_LEN]).map(|b| b.into_vec()) } fn journal_under(&mut self, batch: &mut DBTransaction, now: u64, id: &H256) -> io::Result<u32> { // record new commit's details. let mut db_key = DatabaseKey { era: now, index: 0usize, }; let mut last; while self.backing.get(self.column, { last = encode(&db_key); &last })?.is_some() { db_key.index += 1; } { let value_ref = DatabaseValueRef { id, inserts: &self.inserts, deletes: &self.removes, }; batch.put(self.column, &last, &encode(&value_ref)); } let ops = self.inserts.len() + self.removes.len(); trace!(target: "rcdb", "new journal for time #{}.{} => {}: inserts={:?}, removes={:?}", now, db_key.index, id, self.inserts, self.removes); self.inserts.clear(); self.removes.clear(); if self.latest_era.map_or(true, |e| now > e) { batch.put(self.column, &LATEST_ERA_KEY, &encode(&now)); self.latest_era = Some(now); } Ok(ops as u32) } fn mark_canonical(&mut self, batch: &mut DBTransaction, end_era: u64, canon_id: &H256) -> io::Result<u32> { // apply old commits' details let mut db_key = DatabaseKey { era: end_era, index: 0usize, }; let mut last; while let Some(rlp_data) = { self.backing.get(self.column, { last = encode(&db_key); &last })? } { let view = DatabaseValueView::from_rlp(&rlp_data); let our_id = view.id().expect("rlp read from db; qed"); let to_remove = if canon_id == &our_id { view.deletes() } else { view.inserts() }.expect("rlp read from db; qed"); trace!(target: "rcdb", "delete journal for time #{}.{}=>{}, (canon was {}): deleting {:?}", end_era, db_key.index, our_id, canon_id, to_remove); for i in &to_remove { self.forward.remove(i, EMPTY_PREFIX); } batch.delete(self.column, &last); db_key.index += 1; } let r = self.forward.commit_to_batch(batch)?; Ok(r) } fn inject(&mut self, batch: &mut DBTransaction) -> io::Result<u32> { self.inserts.clear(); for remove in self.removes.drain(..) { self.forward.remove(&remove, EMPTY_PREFIX); } self.forward.commit_to_batch(batch) } fn consolidate(&mut self, mut with: super::MemoryDB) { for (key, (value, rc)) in with.drain() { for _ in 0..rc { self.emplace(key, EMPTY_PREFIX, value.clone()); } for _ in rc..0 { self.remove(&key, EMPTY_PREFIX); } } } fn keys(&self) -> HashMap<H256, i32> { self.forward.keys() } } #[cfg(test)] mod tests { use keccak_hash::keccak; use hash_db::{HashDB, EMPTY_PREFIX}; use super::*; use kvdb_memorydb; use crate::{JournalDB, inject_batch, commit_batch}; fn new_db() -> RefCountedDB
#[test] fn long_history() { // history is 3 let mut jdb = new_db(); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 3, &keccak(b"3"), Some((0, keccak(b"0")))).unwrap(); assert!(jdb.contains(&h, EMPTY_PREFIX)); commit_batch(&mut jdb, 4, &keccak(b"4"), Some((1, keccak(b"1")))).unwrap(); assert!(!jdb.contains(&h, EMPTY_PREFIX)); } #[test] fn latest_era_should_work() { // history is 3 let mut jdb = new_db(); assert_eq!(jdb.latest_era(), None); let h = jdb.insert(EMPTY_PREFIX, b"foo"); commit_batch(&mut jdb, 0, &keccak(b"0"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(0)); jdb.remove(&h, EMPTY_PREFIX); commit_batch(&mut jdb, 1, &keccak(b"1"), None).unwrap(); assert_eq!(jdb.latest_era(), Some(1)); commit_batch(&mut jdb, 2, &keccak(b"2"), None).unwrap(); assert_eq!(
{ let backing = Arc::new(kvdb_memorydb::create(0)); RefCountedDB::new(backing, None) }
identifier_body
main.rs
_fourteen_output = fourteen(test_fourteen_input); println!("14) string '{}' is the isize {}", test_fourteen_input, test_fourteen_output); let test_fifteen_input = "MDCCLXXVI"; let test_fifteen_output = fifteen(test_fifteen_input); println!("15) roman number '{}' equals arabic number {}", test_fifteen_input, test_fifteen_output); let test_eighteen_input = "\tthe rain in spain falls\n\tmainly in the plain \n "; let test_eighteen_output = eighteen(test_eighteen_input); println!("18) '{}' has {} words", test_eighteen_input, test_eighteen_output); let test_nineteen_input = "([{}][]([]()){})"; let test_nineteen_output = nineteen(test_nineteen_input); maybe = "has"; if !test_nineteen_output { maybe = "does not have"; } println!("19) '{}' {} balanced parens", test_nineteen_input, maybe); let test_twenty_input = "the rain in spain red rum sir is murder falls sometimes on the racecar but mainly in the plains"; let test_twenty_output = twenty(test_twenty_input); println!("20) '{}' has '{}' as the longest internal palindrome", test_twenty_input, test_twenty_output); let test_twentyone_input = 1776; //;"MDCCLXXVI"; let test_twentyone_output = twentyone(test_twentyone_input); println!("21) arabic number '{}' equals roman number {}", test_twentyone_input, test_twentyone_output); } fn one(input: &str) -> (char, i32) { // return the char that appears most and it's count // the first occurence of let mut counts = HashMap::new(); let mut max_char = 'z'; //input.chars().next().unwrap(); let mut max_value = 0; input.chars().rev().for_each( | c | { if counts.contains_key(&c) { let next_total = counts.get(&c).unwrap() + 1; if next_total >= max_value { max_char = c; max_value = next_total; } counts.insert( c, next_total); } else { counts.insert( c, 1); } }); (max_char, max_value) } fn two(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if !s.contains(&c) { s.insert(c); r.push(c); } }); r } fn three(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if !s.contains(&c) { s.insert(c); } else { if !r.contains(c) { r.push(c); } } }); r } fn four(input1: &str, input2: &str) -> String { let mut r = String::new(); let mut p = 'z'; input2.chars().for_each( |c | { if !input1.contains(c) { if !(c == ' ' && p == ' ') { r.push(c); p = c; } } }); r } fn five(input1: &str, input2: &str) -> bool { let doubleup = format!("{}{}", input1, input1); doubleup.contains(input2) } fn six(input: &str) -> String { let mut r = String::new(); input.chars().for_each( | c | { r = format!("{}{}", c, r); }); r } fn seven(i1: &str, i2: &str) -> String { let mut r2 = String::from(i2); if i1.len() == 0 { return r2; } r2.push(i1.chars().last().unwrap()); let size_minus_one = i1.len() - 1; let r1 = &i1[..size_minus_one]; return seven(&r1, &r2); } fn eight(i: &str) -> Vec<String> { let mut r = vec!(); if i.len() == 1 { r.push(String::from(i)); return r; } for idx in 0..i.len() { let front = &i[0..idx]; let char = &i[idx..idx+1]; let end = &i[idx+1..]; let without = format!("{}{}", front, end); let subperms = eight(&without); for sp in subperms { r.push(format!("{}{}", char, sp)); } } r } fn nine(i: &str) -> char { for e in i.chars() { let mut count = 0; for se in i.chars() { if se == e { count = count + 1; } } if count == 1 { return e; } } '\0' } fn ten(i: &str) -> String { let mut r = String::new(); let mut is_first = true; for each in i.split(" ") { if is_first { is_first = false; r = String::from(each); } else { r = format!("{} {}", each, r); } } r } fn eleven(i1: &str, i2: &str) -> String { let mut solutions: Vec<String> = vec!(); // tuples of (char, original index of char in the input string) let mut pairs: Vec<(char, usize)> = i1.chars().enumerate().map( |e | -> (char, usize) { (e.1, e.0) }).filter(|p| i2.contains(p.0)).collect(); // println!("{:?}", pairs); //iterate the input string from left to right for _i in 0..pairs.len() { // p will be the match that we remove characters from // if p becomes empty we know we've matched let mut p = String::from(i2.clone()); // remember the first match from p as the head let mut head: Option<(char, usize)> = None; // remember the final match/char from p as the tail let mut tail: Option<(char, usize)> = None; // lets iterate over our pairs of (char, index) for e in &pairs { // if the pair is in p, // remove the character from p and // try and set the head and tail if p.contains(e.0) { p = p.replacen(e.0, "", 1); match head { None => { head = Some(*e) }, Some(_) if p.is_empty() => { tail = Some(*e); break }, Some(_) => {} } } } // if we found all the characters in i2 // we have a match, so head and tail will be populated // chop the string out of i1 and submit it as a solution if head != None && tail != None { let h = head.unwrap(); let t = tail.unwrap(); let solution = String::from(&i1[h.1..=t.1]); solutions.push(solution); } // remove the front character, and iterate again pairs.remove(0); } // println!("{:?}", solutions); // find the shortest solution let shortest = solutions.iter().fold(solutions[0].clone(), |acc, item| { if item.len() < acc.len() { item.clone() } else { acc } }); shortest } fn twelve(i1: &str, i2: &str) -> bool{ let i1 = i1.replace(" ", "").to_lowercase(); let mut i2 = i2.replace(" ", "").to_lowercase(); if i1.len() != i2.len() { return false; } for c in i1.chars() { i2 = i2.replacen(c, "", 1); } let r = i2.len() == 0; r } fn thirteen(i1: &str) -> bool { let i1 = i1.replace(" ", "").to_lowercase(); let i2 = i1.replace(" ", "").to_lowercase().chars().rev().collect::<String>(); let r = i1 == i2; r } fn fourteen(i: &str) -> isize
{ let mut i = String::from(i); let is_negative = i.contains('-'); if is_negative { i = i.replace("-", ""); } let mut r = 0; for c in i.chars() { let d = c.to_digit(10).unwrap(); r = d + (r * 10); } let mut r = r as isize; if is_negative { r = r * -1; } r }
identifier_body
main.rs
println!("9) the first unrepeated char in '{}' is '{}'", test_nine_input, test_nine_output); let test_ten_input = "best is Rust"; let test_ten_output = ten(test_ten_input); println!("10) reversed sentence '{}' is '{}'", test_ten_input, test_ten_output); let test_eleven_input1 = "this is a test string"; let test_eleven_input2 = "tist"; let test_eleven_output = eleven(test_eleven_input1, test_eleven_input2); println!("11) smallest substring '{}' inside of '{}' is '{}'", test_eleven_input2, test_eleven_input1, test_eleven_output); let test_twelve_input1 = "Army"; let test_twelve_input2 = "Mary"; let test_twelve_output = twelve(test_twelve_input1, test_twelve_input2); maybe = ""; if !test_twelve_output { maybe = "not "; } println!("12) '{}' is {}an anagram of '{}'", test_twelve_input1, maybe, test_twelve_input2); let test_thirteen_input1 = "Racecar"; let test_thirteen_output = thirteen(test_thirteen_input1); maybe = ""; if !test_thirteen_output { maybe = "not "; } println!("13) '{}' is {}a palindrome of '{}'", test_thirteen_input1, maybe, test_thirteen_output); let test_fourteen_input = "-123000"; let test_fourteen_output = fourteen(test_fourteen_input); println!("14) string '{}' is the isize {}", test_fourteen_input, test_fourteen_output); let test_fifteen_input = "MDCCLXXVI"; let test_fifteen_output = fifteen(test_fifteen_input); println!("15) roman number '{}' equals arabic number {}", test_fifteen_input, test_fifteen_output); let test_eighteen_input = "\tthe rain in spain falls\n\tmainly in the plain \n "; let test_eighteen_output = eighteen(test_eighteen_input); println!("18) '{}' has {} words", test_eighteen_input, test_eighteen_output); let test_nineteen_input = "([{}][]([]()){})"; let test_nineteen_output = nineteen(test_nineteen_input); maybe = "has"; if !test_nineteen_output { maybe = "does not have"; } println!("19) '{}' {} balanced parens", test_nineteen_input, maybe); let test_twenty_input = "the rain in spain red rum sir is murder falls sometimes on the racecar but mainly in the plains"; let test_twenty_output = twenty(test_twenty_input); println!("20) '{}' has '{}' as the longest internal palindrome", test_twenty_input, test_twenty_output); let test_twentyone_input = 1776; //;"MDCCLXXVI"; let test_twentyone_output = twentyone(test_twentyone_input); println!("21) arabic number '{}' equals roman number {}", test_twentyone_input, test_twentyone_output); } fn one(input: &str) -> (char, i32) { // return the char that appears most and it's count // the first occurence of let mut counts = HashMap::new(); let mut max_char = 'z'; //input.chars().next().unwrap(); let mut max_value = 0; input.chars().rev().for_each( | c | { if counts.contains_key(&c) { let next_total = counts.get(&c).unwrap() + 1; if next_total >= max_value { max_char = c; max_value = next_total; } counts.insert( c, next_total); } else { counts.insert( c, 1); } }); (max_char, max_value) } fn two(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if !s.contains(&c) { s.insert(c); r.push(c); } }); r } fn three(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if !s.contains(&c) { s.insert(c); } else { if !r.contains(c) { r.push(c); } } }); r } fn four(input1: &str, input2: &str) -> String { let mut r = String::new(); let mut p = 'z'; input2.chars().for_each( |c | { if !input1.contains(c) { if !(c == ' ' && p == ' ') { r.push(c); p = c; } } }); r } fn five(input1: &str, input2: &str) -> bool { let doubleup = format!("{}{}", input1, input1); doubleup.contains(input2) } fn six(input: &str) -> String { let mut r = String::new(); input.chars().for_each( | c | { r = format!("{}{}", c, r); }); r } fn seven(i1: &str, i2: &str) -> String { let mut r2 = String::from(i2); if i1.len() == 0 { return r2; } r2.push(i1.chars().last().unwrap()); let size_minus_one = i1.len() - 1; let r1 = &i1[..size_minus_one]; return seven(&r1, &r2); } fn eight(i: &str) -> Vec<String> { let mut r = vec!(); if i.len() == 1 { r.push(String::from(i)); return r; } for idx in 0..i.len() { let front = &i[0..idx]; let char = &i[idx..idx+1]; let end = &i[idx+1..]; let without = format!("{}{}", front, end); let subperms = eight(&without); for sp in subperms { r.push(format!("{}{}", char, sp)); } } r } fn nine(i: &str) -> char { for e in i.chars() { let mut count = 0; for se in i.chars() { if se == e { count = count + 1; } } if count == 1 { return e; } } '\0' } fn ten(i: &str) -> String { let mut r = String::new(); let mut is_first = true; for each in i.split(" ") { if is_first { is_first = false; r = String::from(each); } else { r = format!("{} {}", each, r); } } r } fn eleven(i1: &str, i2: &str) -> String { let mut solutions: Vec<String> = vec!(); // tuples of (char, original index of char in the input string) let mut pairs: Vec<(char, usize)> = i1.chars().enumerate().map( |e | -> (char, usize) { (e.1, e.0) }).filter(|p| i2.contains(p.0)).collect(); // println!("{:?}", pairs); //iterate the input string from left to right for _i in 0..pairs.len() { // p will be the match that we remove characters from // if p becomes empty we know we've matched let mut p = String::from(i2.clone()); // remember the first match from p as the head let mut head: Option<(char, usize)> = None; // remember the final match/char from p as the tail let mut tail: Option<(char, usize)> = None; // lets iterate over our pairs of (char, index) for e in &pairs { // if the pair is in p, // remove the character from p and // try and set the head and tail if p.contains(e.0) { p = p.replacen(e.0, "", 1); match head { None => { head = Some(*e) }, Some(_) if p.is_empty() => { tail = Some(*e); break }, Some(_) => {} } } } // if we found all the characters in i2 // we have a match, so head and tail will be populated // chop the string out of i1 and submit it as a solution if head != None && tail != None { let h = head.unwrap(); let t = tail.unwrap(); let solution = String::from(&i1
println!("8) '{}' has {} permutations {:?}", test_eight_input, test_eight_output.len(), test_eight_output); let test_nine_input = "uprasupradupra"; let test_nine_output = nine(test_nine_input);
random_line_split
main.rs
!("{}{}", c, r); }); r } fn seven(i1: &str, i2: &str) -> String { let mut r2 = String::from(i2); if i1.len() == 0 { return r2; } r2.push(i1.chars().last().unwrap()); let size_minus_one = i1.len() - 1; let r1 = &i1[..size_minus_one]; return seven(&r1, &r2); } fn eight(i: &str) -> Vec<String> { let mut r = vec!(); if i.len() == 1 { r.push(String::from(i)); return r; } for idx in 0..i.len() { let front = &i[0..idx]; let char = &i[idx..idx+1]; let end = &i[idx+1..]; let without = format!("{}{}", front, end); let subperms = eight(&without); for sp in subperms { r.push(format!("{}{}", char, sp)); } } r } fn nine(i: &str) -> char { for e in i.chars() { let mut count = 0; for se in i.chars() { if se == e { count = count + 1; } } if count == 1 { return e; } } '\0' } fn ten(i: &str) -> String { let mut r = String::new(); let mut is_first = true; for each in i.split(" ") { if is_first { is_first = false; r = String::from(each); } else { r = format!("{} {}", each, r); } } r } fn eleven(i1: &str, i2: &str) -> String { let mut solutions: Vec<String> = vec!(); // tuples of (char, original index of char in the input string) let mut pairs: Vec<(char, usize)> = i1.chars().enumerate().map( |e | -> (char, usize) { (e.1, e.0) }).filter(|p| i2.contains(p.0)).collect(); // println!("{:?}", pairs); //iterate the input string from left to right for _i in 0..pairs.len() { // p will be the match that we remove characters from // if p becomes empty we know we've matched let mut p = String::from(i2.clone()); // remember the first match from p as the head let mut head: Option<(char, usize)> = None; // remember the final match/char from p as the tail let mut tail: Option<(char, usize)> = None; // lets iterate over our pairs of (char, index) for e in &pairs { // if the pair is in p, // remove the character from p and // try and set the head and tail if p.contains(e.0) { p = p.replacen(e.0, "", 1); match head { None => { head = Some(*e) }, Some(_) if p.is_empty() => { tail = Some(*e); break }, Some(_) => {} } } } // if we found all the characters in i2 // we have a match, so head and tail will be populated // chop the string out of i1 and submit it as a solution if head != None && tail != None { let h = head.unwrap(); let t = tail.unwrap(); let solution = String::from(&i1[h.1..=t.1]); solutions.push(solution); } // remove the front character, and iterate again pairs.remove(0); } // println!("{:?}", solutions); // find the shortest solution let shortest = solutions.iter().fold(solutions[0].clone(), |acc, item| { if item.len() < acc.len() { item.clone() } else { acc } }); shortest } fn twelve(i1: &str, i2: &str) -> bool{ let i1 = i1.replace(" ", "").to_lowercase(); let mut i2 = i2.replace(" ", "").to_lowercase(); if i1.len() != i2.len() { return false; } for c in i1.chars() { i2 = i2.replacen(c, "", 1); } let r = i2.len() == 0; r } fn thirteen(i1: &str) -> bool { let i1 = i1.replace(" ", "").to_lowercase(); let i2 = i1.replace(" ", "").to_lowercase().chars().rev().collect::<String>(); let r = i1 == i2; r } fn fourteen(i: &str) -> isize { let mut i = String::from(i); let is_negative = i.contains('-'); if is_negative { i = i.replace("-", ""); } let mut r = 0; for c in i.chars() { let d = c.to_digit(10).unwrap(); r = d + (r * 10); } let mut r = r as isize; if is_negative { r = r * -1; } r } fn fifteen(i: &str) -> isize { let mut i = i.to_uppercase(); let mut r = 0; let mut to_long = 0; while i.len() > 0 { for (rn, an) in ROMANS.iter().rev() { if i.starts_with(rn) { r = r + an; i = i.replacen(rn,"",1); break; } } to_long = to_long + 1; if to_long > 20 { return -1 } } r } const ROMANS: [(&str, isize); 30] = [ ("C", 100), ("X", 10), ("I", 1), ("CC", 200), ("XX", 20), ("II", 2), ("CCC", 300), ("XXX", 30), ("III", 3), ("CD", 400), ("XL", 40), ("IV", 4), ("D", 500), ("L", 50), ("V", 5), ("DC", 600), ("LX", 60), ("VI", 6), ("DCC", 700), ("LXX", 70), ("VII", 7), ("DCCC", 800), ("LXXX", 80), ("VIII", 8), ("CM", 900), ("XC", 90), ("IX", 9), ("M", 1000), ("MM", 2000), ("MMM", 3000)]; fn eighteen(i: &str) -> isize { let mut i = String::from(i.trim()); i = i.replace("\n", " "); i = i.replace("\t", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); i = i.replace(" ", " "); let count = i.split(' ').count() as isize; count } fn nineteen(i: &str) -> bool { let mut s:Vec<char> = vec!(); for c in i.chars() { match c { '(' => s.push('('), '[' => s.push('['), '{' => s.push('{'), ')' => if '(' != s.pop().unwrap() { return false }, ']' => if '[' != s.pop().unwrap() { return false }, '}' => if '{' != s.pop().unwrap() { return false }, _ => {} } } true } fn twenty(i: &str) -> String { let mut solutions = vec!(); for n in 2..(i.len()-1) { let e = palindrome_at(i, n); solutions.push(e); } // find the longest solution let longest = solutions.iter().fold( solutions[0].clone(), |acc, item| { if item.len() > acc.len() { item.clone() } else { acc } } ); longest } fn palindrome_at(input: &str, s: usize) -> String { let i:Vec<(char, usize)> = input.chars().enumerate() .map(|(i,c)| (c, i)) .filter(|p| p.0 != ' ') .collect(); let m = i.len(); let fs = std::cmp::min( i.len() - 2, std::cmp::max(1,s)); let mut l = fs; let mut r = fs; if i[l].0 != i[r].0
{ // we are not the same assume a center "pivot" character center r = r + 1; }
conditional_block
main.rs
test_ten_input, test_ten_output); let test_eleven_input1 = "this is a test string"; let test_eleven_input2 = "tist"; let test_eleven_output = eleven(test_eleven_input1, test_eleven_input2); println!("11) smallest substring '{}' inside of '{}' is '{}'", test_eleven_input2, test_eleven_input1, test_eleven_output); let test_twelve_input1 = "Army"; let test_twelve_input2 = "Mary"; let test_twelve_output = twelve(test_twelve_input1, test_twelve_input2); maybe = ""; if !test_twelve_output { maybe = "not "; } println!("12) '{}' is {}an anagram of '{}'", test_twelve_input1, maybe, test_twelve_input2); let test_thirteen_input1 = "Racecar"; let test_thirteen_output = thirteen(test_thirteen_input1); maybe = ""; if !test_thirteen_output { maybe = "not "; } println!("13) '{}' is {}a palindrome of '{}'", test_thirteen_input1, maybe, test_thirteen_output); let test_fourteen_input = "-123000"; let test_fourteen_output = fourteen(test_fourteen_input); println!("14) string '{}' is the isize {}", test_fourteen_input, test_fourteen_output); let test_fifteen_input = "MDCCLXXVI"; let test_fifteen_output = fifteen(test_fifteen_input); println!("15) roman number '{}' equals arabic number {}", test_fifteen_input, test_fifteen_output); let test_eighteen_input = "\tthe rain in spain falls\n\tmainly in the plain \n "; let test_eighteen_output = eighteen(test_eighteen_input); println!("18) '{}' has {} words", test_eighteen_input, test_eighteen_output); let test_nineteen_input = "([{}][]([]()){})"; let test_nineteen_output = nineteen(test_nineteen_input); maybe = "has"; if !test_nineteen_output { maybe = "does not have"; } println!("19) '{}' {} balanced parens", test_nineteen_input, maybe); let test_twenty_input = "the rain in spain red rum sir is murder falls sometimes on the racecar but mainly in the plains"; let test_twenty_output = twenty(test_twenty_input); println!("20) '{}' has '{}' as the longest internal palindrome", test_twenty_input, test_twenty_output); let test_twentyone_input = 1776; //;"MDCCLXXVI"; let test_twentyone_output = twentyone(test_twentyone_input); println!("21) arabic number '{}' equals roman number {}", test_twentyone_input, test_twentyone_output); } fn one(input: &str) -> (char, i32) { // return the char that appears most and it's count // the first occurence of let mut counts = HashMap::new(); let mut max_char = 'z'; //input.chars().next().unwrap(); let mut max_value = 0; input.chars().rev().for_each( | c | { if counts.contains_key(&c) { let next_total = counts.get(&c).unwrap() + 1; if next_total >= max_value { max_char = c; max_value = next_total; } counts.insert( c, next_total); } else { counts.insert( c, 1); } }); (max_char, max_value) } fn two(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if !s.contains(&c) { s.insert(c); r.push(c); } }); r } fn three(input: &str) -> String { let mut r = String::new(); let mut s = std::collections::HashSet::new(); input.chars().for_each( |c | { if !s.contains(&c) { s.insert(c); } else { if !r.contains(c) { r.push(c); } } }); r } fn four(input1: &str, input2: &str) -> String { let mut r = String::new(); let mut p = 'z'; input2.chars().for_each( |c | { if !input1.contains(c) { if !(c == ' ' && p == ' ') { r.push(c); p = c; } } }); r } fn five(input1: &str, input2: &str) -> bool { let doubleup = format!("{}{}", input1, input1); doubleup.contains(input2) } fn
(input: &str) -> String { let mut r = String::new(); input.chars().for_each( | c | { r = format!("{}{}", c, r); }); r } fn seven(i1: &str, i2: &str) -> String { let mut r2 = String::from(i2); if i1.len() == 0 { return r2; } r2.push(i1.chars().last().unwrap()); let size_minus_one = i1.len() - 1; let r1 = &i1[..size_minus_one]; return seven(&r1, &r2); } fn eight(i: &str) -> Vec<String> { let mut r = vec!(); if i.len() == 1 { r.push(String::from(i)); return r; } for idx in 0..i.len() { let front = &i[0..idx]; let char = &i[idx..idx+1]; let end = &i[idx+1..]; let without = format!("{}{}", front, end); let subperms = eight(&without); for sp in subperms { r.push(format!("{}{}", char, sp)); } } r } fn nine(i: &str) -> char { for e in i.chars() { let mut count = 0; for se in i.chars() { if se == e { count = count + 1; } } if count == 1 { return e; } } '\0' } fn ten(i: &str) -> String { let mut r = String::new(); let mut is_first = true; for each in i.split(" ") { if is_first { is_first = false; r = String::from(each); } else { r = format!("{} {}", each, r); } } r } fn eleven(i1: &str, i2: &str) -> String { let mut solutions: Vec<String> = vec!(); // tuples of (char, original index of char in the input string) let mut pairs: Vec<(char, usize)> = i1.chars().enumerate().map( |e | -> (char, usize) { (e.1, e.0) }).filter(|p| i2.contains(p.0)).collect(); // println!("{:?}", pairs); //iterate the input string from left to right for _i in 0..pairs.len() { // p will be the match that we remove characters from // if p becomes empty we know we've matched let mut p = String::from(i2.clone()); // remember the first match from p as the head let mut head: Option<(char, usize)> = None; // remember the final match/char from p as the tail let mut tail: Option<(char, usize)> = None; // lets iterate over our pairs of (char, index) for e in &pairs { // if the pair is in p, // remove the character from p and // try and set the head and tail if p.contains(e.0) { p = p.replacen(e.0, "", 1); match head { None => { head = Some(*e) }, Some(_) if p.is_empty() => { tail = Some(*e); break }, Some(_) => {} } } } // if we found all the characters in i2 // we have a match, so head and tail will be populated // chop the string out of i1 and submit it as a solution if head != None && tail != None { let h = head.unwrap(); let t = tail.unwrap(); let solution = String::from(&i1[h.1..=t.1]); solutions.push(solution); } // remove the front character, and iterate again pairs.remove(0); } // println!("{:?}", solutions); // find the shortest solution let shortest = solutions.iter().fold(solutions[0].clone(), |acc, item| { if item.len() < acc.len() { item.clone() } else { acc } }); shortest } fn twelve(i1: &str, i2: &
six
identifier_name
sqlite.go
1.starts_at, a1.ends_at, a1.updated_at, a1.timeout FROM alerts AS a1 LEFT OUTER JOIN alerts AS a2 ON a1.fingerprint = a2.fingerprint AND a1.updated_at < a2.updated_at WHERE a2.fingerprint IS NULL; `) if err != nil { return nil, err } var alerts []*types.Alert for rows.Next() { var ( labels []byte annotations []byte al types.Alert ) if err := rows.Scan( &labels, &annotations, &al.StartsAt, &al.EndsAt, &al.UpdatedAt, &al.Timeout, ); err != nil { return nil, err } if err := json.Unmarshal(labels, &al.Labels); err != nil { return nil, err } if err := json.Unmarshal(annotations, &al.Annotations); err != nil { return nil, err } alerts = append(alerts, &al) } if err := rows.Err(); err != nil { return nil, err } return alerts, nil } // Get implements the Alerts interface. func (a *Alerts) Get(model.Fingerprint) (*types.Alert, error) { return nil, nil } // Put implements the Alerts interface. func (a *Alerts) Put(alerts ...*types.Alert) error { dbmtx.Lock() defer dbmtx.Unlock() tx, err := a.db.Begin() if err != nil { return err } // The insert invariant requires that there are no two alerts with the same // fingerprint that have overlapping activity range ([StartsAt:EndsAt]). // Such alerts are merged into a single one with the union of both intervals // as its new activity interval. // The exact merge procedure is defined on the Alert structure. Here, we just // care about finding intersecting alerts for each new inserts, deleting them // if existant, and insert the new alert we retrieved by merging. overlap, err := tx.Prepare(` SELECT id, annotations, starts_at, ends_at, updated_at, timeout FROM alerts WHERE fingerprint == $1 AND ( (starts_at <= $2 AND ends_at >= $2) OR (starts_at <= $3 AND ends_at >= $3) ) `) if err != nil { tx.Rollback() return err } defer overlap.Close() delOverlap, err := tx.Prepare(` DELETE FROM alerts WHERE id IN ( SELECT id FROM alerts WHERE fingerprint == $1 AND ( (starts_at <= $2 AND ends_at >= $2) OR (starts_at <= $3 AND ends_at >= $3) ) ) `) if err != nil { tx.Rollback() return err } defer delOverlap.Close() insert, err := tx.Prepare(` INSERT INTO alerts(fingerprint, labels, annotations, starts_at, ends_at, updated_at, timeout) VALUES ($1, $2, $3, $4, $5, $6, $7) `) if err != nil { tx.Rollback() return err } defer insert.Close() for _, alert := range alerts { fp := alert.Fingerprint() // Retrieve all intersecting alerts and delete them. olaps, err := overlap.Query(int64(fp), alert.StartsAt, alert.EndsAt) if err != nil { tx.Rollback() return err } var ( overlapIDs []int64 merges []*types.Alert ) for olaps.Next() { var ( id int64 na types.Alert ann []byte ) if err := olaps.Scan( &id, &ann, &na.StartsAt, &na.EndsAt, &na.UpdatedAt, &na.Timeout, ); err != nil { tx.Rollback() return err } if err := json.Unmarshal(ann, &na.Annotations); err != nil { tx.Rollback() return err } na.Labels = alert.Labels merges = append(merges, &na) overlapIDs = append(overlapIDs, id) } if err := olaps.Err(); err != nil { tx.Rollback() return err } // Merge them. for _, ma := range merges { alert = alert.Merge(ma) } // Delete the old ones. if _, err := delOverlap.Exec(int64(fp), alert.StartsAt, alert.EndsAt); err != nil { tx.Rollback() return err } // Insert the final alert. labels, err := json.Marshal(alert.Labels) if err != nil { tx.Rollback() return err } annotations, err := json.Marshal(alert.Annotations) if err != nil { tx.Rollback() return err } _, err = insert.Exec( int64(fp), labels, annotations, alert.StartsAt, alert.EndsAt, alert.UpdatedAt, alert.Timeout, ) if err != nil { tx.Rollback() return err } a.mtx.RLock() for _, ch := range a.listeners { ch <- alert } a.mtx.RUnlock() } tx.Commit() return nil } const createNotifyInfoTable = ` CREATE TABLE IF NOT EXISTS notify_info ( alert bigint, receiver text, resolved integer, timestamp timestamp ); CREATE INDEX IF NOT EXISTS notify_done ON notify_info (resolved); CREATE UNIQUE INDEX IF NOT EXISTS alert_receiver ON notify_info (alert,receiver); ` type Notifies struct { db *sql.DB } func NewNotifies(db *sql.DB) (*Notifies, error) { dbmtx.Lock() defer dbmtx.Unlock() tx, err := db.Begin() if err != nil { return nil, err } if _, err := tx.Exec(createNotifyInfoTable); err != nil { tx.Rollback() return nil, err } tx.Commit() return &Notifies{db: db}, nil } // Get implements the Notifies interface. func (n *Notifies) Get(dest string, fps ...model.Fingerprint) ([]*types.NotifyInfo, error) { dbmtx.Lock() defer dbmtx.Unlock() var result []*types.NotifyInfo for _, fp := range fps { row := n.db.QueryRow(` SELECT alert, receiver, resolved, timestamp FROM notify_info WHERE receiver == $1 AND alert == $2 `, dest, int64(fp)) var alertFP int64 var ni types.NotifyInfo err := row.Scan( &alertFP, &ni.Receiver, &ni.Resolved, &ni.Timestamp, ) if err == sql.ErrNoRows { result = append(result, nil) continue } if err != nil { return nil, err } ni.Alert = model.Fingerprint(alertFP) result = append(result, &ni) } return result, nil } // Set implements the Notifies interface. func (n *Notifies) Set(ns ...*types.NotifyInfo) error { dbmtx.Lock() defer dbmtx.Unlock() tx, err := n.db.Begin() if err != nil { return err } insert, err := tx.Prepare(` INSERT INTO notify_info(alert, receiver, resolved, timestamp) VALUES ($1, $2, $3, $4); `) if err != nil { tx.Rollback() return err } defer insert.Close() del, err := tx.Prepare(` DELETE FROM notify_info WHERE alert == $1 AND receiver == $2 `) if err != nil { tx.Rollback() return err } defer del.Close() for _, ni := range ns { if _, err := del.Exec(int64(ni.Alert), ni.Receiver); err != nil { tx.Rollback() return fmt.Errorf("deleting old notify failed: %s", err) } if _, err := insert.Exec( int64(ni.Alert), ni.Receiver, ni.Resolved, ni.Timestamp, ); err != nil { tx.Rollback() return fmt.Errorf("inserting new notify failed: %s", err) } } tx.Commit() return nil } const createSilencesTable = ` CREATE TABLE IF NOT EXISTS silences ( id integer PRIMARY KEY AUTOINCREMENT, matchers blob, starts_at timestamp, ends_at timestamp, created_at timestamp, created_by text, comment text ); CREATE INDEX IF NOT EXISTS silences_start ON silences (starts_at); CREATE INDEX IF NOT EXISTS silences_end ON silences (ends_at); ` type Silences struct { db *sql.DB marker types.Marker } // NewSilences returns a new Silences based on the provided SQL DB. func NewSilences(db *sql.DB, mk types.Marker) (*Silences, error) { dbmtx.Lock()
defer dbmtx.Unlock()
random_line_split
sqlite.go
done, err) } // GetPending implements the Alerts interface. func (a *Alerts)
() provider.AlertIterator { var ( ch = make(chan *types.Alert, 200) done = make(chan struct{}) ) alerts, err := a.getPending() go func() { defer close(ch) for _, a := range alerts { select { case ch <- a: case <-done: return } } }() return provider.NewAlertIterator(ch, done, err) } func (a *Alerts) getPending() ([]*types.Alert, error) { dbmtx.Lock() defer dbmtx.Unlock() // Get the last instance for each alert. rows, err := a.db.Query(` SELECT a1.labels, a1.annotations, a1.starts_at, a1.ends_at, a1.updated_at, a1.timeout FROM alerts AS a1 LEFT OUTER JOIN alerts AS a2 ON a1.fingerprint = a2.fingerprint AND a1.updated_at < a2.updated_at WHERE a2.fingerprint IS NULL; `) if err != nil { return nil, err } var alerts []*types.Alert for rows.Next() { var ( labels []byte annotations []byte al types.Alert ) if err := rows.Scan( &labels, &annotations, &al.StartsAt, &al.EndsAt, &al.UpdatedAt, &al.Timeout, ); err != nil { return nil, err } if err := json.Unmarshal(labels, &al.Labels); err != nil { return nil, err } if err := json.Unmarshal(annotations, &al.Annotations); err != nil { return nil, err } alerts = append(alerts, &al) } if err := rows.Err(); err != nil { return nil, err } return alerts, nil } // Get implements the Alerts interface. func (a *Alerts) Get(model.Fingerprint) (*types.Alert, error) { return nil, nil } // Put implements the Alerts interface. func (a *Alerts) Put(alerts ...*types.Alert) error { dbmtx.Lock() defer dbmtx.Unlock() tx, err := a.db.Begin() if err != nil { return err } // The insert invariant requires that there are no two alerts with the same // fingerprint that have overlapping activity range ([StartsAt:EndsAt]). // Such alerts are merged into a single one with the union of both intervals // as its new activity interval. // The exact merge procedure is defined on the Alert structure. Here, we just // care about finding intersecting alerts for each new inserts, deleting them // if existant, and insert the new alert we retrieved by merging. overlap, err := tx.Prepare(` SELECT id, annotations, starts_at, ends_at, updated_at, timeout FROM alerts WHERE fingerprint == $1 AND ( (starts_at <= $2 AND ends_at >= $2) OR (starts_at <= $3 AND ends_at >= $3) ) `) if err != nil { tx.Rollback() return err } defer overlap.Close() delOverlap, err := tx.Prepare(` DELETE FROM alerts WHERE id IN ( SELECT id FROM alerts WHERE fingerprint == $1 AND ( (starts_at <= $2 AND ends_at >= $2) OR (starts_at <= $3 AND ends_at >= $3) ) ) `) if err != nil { tx.Rollback() return err } defer delOverlap.Close() insert, err := tx.Prepare(` INSERT INTO alerts(fingerprint, labels, annotations, starts_at, ends_at, updated_at, timeout) VALUES ($1, $2, $3, $4, $5, $6, $7) `) if err != nil { tx.Rollback() return err } defer insert.Close() for _, alert := range alerts { fp := alert.Fingerprint() // Retrieve all intersecting alerts and delete them. olaps, err := overlap.Query(int64(fp), alert.StartsAt, alert.EndsAt) if err != nil { tx.Rollback() return err } var ( overlapIDs []int64 merges []*types.Alert ) for olaps.Next() { var ( id int64 na types.Alert ann []byte ) if err := olaps.Scan( &id, &ann, &na.StartsAt, &na.EndsAt, &na.UpdatedAt, &na.Timeout, ); err != nil { tx.Rollback() return err } if err := json.Unmarshal(ann, &na.Annotations); err != nil { tx.Rollback() return err } na.Labels = alert.Labels merges = append(merges, &na) overlapIDs = append(overlapIDs, id) } if err := olaps.Err(); err != nil { tx.Rollback() return err } // Merge them. for _, ma := range merges { alert = alert.Merge(ma) } // Delete the old ones. if _, err := delOverlap.Exec(int64(fp), alert.StartsAt, alert.EndsAt); err != nil { tx.Rollback() return err } // Insert the final alert. labels, err := json.Marshal(alert.Labels) if err != nil { tx.Rollback() return err } annotations, err := json.Marshal(alert.Annotations) if err != nil { tx.Rollback() return err } _, err = insert.Exec( int64(fp), labels, annotations, alert.StartsAt, alert.EndsAt, alert.UpdatedAt, alert.Timeout, ) if err != nil { tx.Rollback() return err } a.mtx.RLock() for _, ch := range a.listeners { ch <- alert } a.mtx.RUnlock() } tx.Commit() return nil } const createNotifyInfoTable = ` CREATE TABLE IF NOT EXISTS notify_info ( alert bigint, receiver text, resolved integer, timestamp timestamp ); CREATE INDEX IF NOT EXISTS notify_done ON notify_info (resolved); CREATE UNIQUE INDEX IF NOT EXISTS alert_receiver ON notify_info (alert,receiver); ` type Notifies struct { db *sql.DB } func NewNotifies(db *sql.DB) (*Notifies, error) { dbmtx.Lock() defer dbmtx.Unlock() tx, err := db.Begin() if err != nil { return nil, err } if _, err := tx.Exec(createNotifyInfoTable); err != nil { tx.Rollback() return nil, err } tx.Commit() return &Notifies{db: db}, nil } // Get implements the Notifies interface. func (n *Notifies) Get(dest string, fps ...model.Fingerprint) ([]*types.NotifyInfo, error) { dbmtx.Lock() defer dbmtx.Unlock() var result []*types.NotifyInfo for _, fp := range fps { row := n.db.QueryRow(` SELECT alert, receiver, resolved, timestamp FROM notify_info WHERE receiver == $1 AND alert == $2 `, dest, int64(fp)) var alertFP int64 var ni types.NotifyInfo err := row.Scan( &alertFP, &ni.Receiver, &ni.Resolved, &ni.Timestamp, ) if err == sql.ErrNoRows { result = append(result, nil) continue } if err != nil { return nil, err } ni.Alert = model.Fingerprint(alertFP) result = append(result, &ni) } return result, nil } // Set implements the Notifies interface. func (n *Notifies) Set(ns ...*types.NotifyInfo) error { dbmtx.Lock() defer dbmtx.Unlock() tx, err := n.db.Begin() if err != nil { return err } insert, err := tx.Prepare(` INSERT INTO notify_info(alert, receiver, resolved, timestamp) VALUES ($1, $2, $3, $4); `) if err != nil { tx.Rollback() return err } defer insert.Close() del, err := tx.Prepare(` DELETE FROM notify_info WHERE alert == $1 AND receiver == $2 `) if err != nil { tx.Rollback() return err } defer del.Close() for _, ni := range ns { if _, err := del.Exec(int64(ni.Alert), ni.Receiver); err != nil { tx.Rollback() return fmt.Errorf("deleting old notify failed: %s", err) } if _, err := insert.Exec( int64(ni.Alert), ni.Receiver, ni.Resolved, ni.Timestamp, ); err != nil { tx.Rollback() return fmt.Errorf
GetPending
identifier_name
sqlite.go
<-done }() return provider.NewAlertIterator(ch, done, err) } // GetPending implements the Alerts interface. func (a *Alerts) GetPending() provider.AlertIterator { var ( ch = make(chan *types.Alert, 200) done = make(chan struct{}) ) alerts, err := a.getPending() go func() { defer close(ch) for _, a := range alerts { select { case ch <- a: case <-done: return } } }() return provider.NewAlertIterator(ch, done, err) } func (a *Alerts) getPending() ([]*types.Alert, error) { dbmtx.Lock() defer dbmtx.Unlock() // Get the last instance for each alert. rows, err := a.db.Query(` SELECT a1.labels, a1.annotations, a1.starts_at, a1.ends_at, a1.updated_at, a1.timeout FROM alerts AS a1 LEFT OUTER JOIN alerts AS a2 ON a1.fingerprint = a2.fingerprint AND a1.updated_at < a2.updated_at WHERE a2.fingerprint IS NULL; `) if err != nil { return nil, err } var alerts []*types.Alert for rows.Next() { var ( labels []byte annotations []byte al types.Alert ) if err := rows.Scan( &labels, &annotations, &al.StartsAt, &al.EndsAt, &al.UpdatedAt, &al.Timeout, ); err != nil { return nil, err } if err := json.Unmarshal(labels, &al.Labels); err != nil { return nil, err } if err := json.Unmarshal(annotations, &al.Annotations); err != nil { return nil, err } alerts = append(alerts, &al) } if err := rows.Err(); err != nil { return nil, err } return alerts, nil } // Get implements the Alerts interface. func (a *Alerts) Get(model.Fingerprint) (*types.Alert, error) { return nil, nil } // Put implements the Alerts interface. func (a *Alerts) Put(alerts ...*types.Alert) error { dbmtx.Lock() defer dbmtx.Unlock() tx, err := a.db.Begin() if err != nil { return err } // The insert invariant requires that there are no two alerts with the same // fingerprint that have overlapping activity range ([StartsAt:EndsAt]). // Such alerts are merged into a single one with the union of both intervals // as its new activity interval. // The exact merge procedure is defined on the Alert structure. Here, we just // care about finding intersecting alerts for each new inserts, deleting them // if existant, and insert the new alert we retrieved by merging. overlap, err := tx.Prepare(` SELECT id, annotations, starts_at, ends_at, updated_at, timeout FROM alerts WHERE fingerprint == $1 AND ( (starts_at <= $2 AND ends_at >= $2) OR (starts_at <= $3 AND ends_at >= $3) ) `) if err != nil { tx.Rollback() return err } defer overlap.Close() delOverlap, err := tx.Prepare(` DELETE FROM alerts WHERE id IN ( SELECT id FROM alerts WHERE fingerprint == $1 AND ( (starts_at <= $2 AND ends_at >= $2) OR (starts_at <= $3 AND ends_at >= $3) ) ) `) if err != nil { tx.Rollback() return err } defer delOverlap.Close() insert, err := tx.Prepare(` INSERT INTO alerts(fingerprint, labels, annotations, starts_at, ends_at, updated_at, timeout) VALUES ($1, $2, $3, $4, $5, $6, $7) `) if err != nil { tx.Rollback() return err } defer insert.Close() for _, alert := range alerts { fp := alert.Fingerprint() // Retrieve all intersecting alerts and delete them. olaps, err := overlap.Query(int64(fp), alert.StartsAt, alert.EndsAt) if err != nil { tx.Rollback() return err } var ( overlapIDs []int64 merges []*types.Alert ) for olaps.Next() { var ( id int64 na types.Alert ann []byte ) if err := olaps.Scan( &id, &ann, &na.StartsAt, &na.EndsAt, &na.UpdatedAt, &na.Timeout, ); err != nil { tx.Rollback() return err } if err := json.Unmarshal(ann, &na.Annotations); err != nil { tx.Rollback() return err } na.Labels = alert.Labels merges = append(merges, &na) overlapIDs = append(overlapIDs, id) } if err := olaps.Err(); err != nil { tx.Rollback() return err } // Merge them. for _, ma := range merges { alert = alert.Merge(ma) } // Delete the old ones. if _, err := delOverlap.Exec(int64(fp), alert.StartsAt, alert.EndsAt); err != nil { tx.Rollback() return err } // Insert the final alert. labels, err := json.Marshal(alert.Labels) if err != nil { tx.Rollback() return err } annotations, err := json.Marshal(alert.Annotations) if err != nil { tx.Rollback() return err } _, err = insert.Exec( int64(fp), labels, annotations, alert.StartsAt, alert.EndsAt, alert.UpdatedAt, alert.Timeout, ) if err != nil { tx.Rollback() return err } a.mtx.RLock() for _, ch := range a.listeners { ch <- alert } a.mtx.RUnlock() } tx.Commit() return nil } const createNotifyInfoTable = ` CREATE TABLE IF NOT EXISTS notify_info ( alert bigint, receiver text, resolved integer, timestamp timestamp ); CREATE INDEX IF NOT EXISTS notify_done ON notify_info (resolved); CREATE UNIQUE INDEX IF NOT EXISTS alert_receiver ON notify_info (alert,receiver); ` type Notifies struct { db *sql.DB } func NewNotifies(db *sql.DB) (*Notifies, error) { dbmtx.Lock() defer dbmtx.Unlock() tx, err := db.Begin() if err != nil { return nil, err } if _, err := tx.Exec(createNotifyInfoTable); err != nil { tx.Rollback() return nil, err } tx.Commit() return &Notifies{db: db}, nil } // Get implements the Notifies interface. func (n *Notifies) Get(dest string, fps ...model.Fingerprint) ([]*types.NotifyInfo, error) { dbmtx.Lock() defer dbmtx.Unlock() var result []*types.NotifyInfo for _, fp := range fps { row := n.db.QueryRow(` SELECT alert, receiver, resolved, timestamp FROM notify_info WHERE receiver == $1 AND alert == $2 `, dest, int64(fp)) var alertFP int64 var ni types.NotifyInfo err := row.Scan( &alertFP, &ni.Receiver, &ni.Resolved, &ni.Timestamp, ) if err == sql.ErrNoRows { result = append(result, nil) continue } if err != nil { return nil, err } ni.Alert = model.Fingerprint(alertFP) result = append(result, &ni) } return result, nil } // Set implements the Notifies interface. func (n *Notifies) Set(ns ...*types.NotifyInfo) error { dbmtx.Lock() defer dbmtx.Unlock() tx, err := n.db.Begin() if err != nil { return err } insert, err := tx.Prepare(` INSERT INTO notify_info(alert, receiver, resolved, timestamp) VALUES ($1, $2, $3, $4); `) if err != nil { tx.Rollback() return err } defer insert.Close() del, err := tx.Prepare(` DELETE FROM notify_info WHERE alert == $1 AND receiver == $2 `) if err != nil { tx.Rollback() return err } defer del.Close() for _, ni := range ns { if _, err := del.Exec(int64(ni.Alert), ni.Receiver); err != nil { tx.Rollback() return fmt.Errorf("deleting old notify failed: %s", err) } if _, err := insert.Exec( int
{ select { case ch <- a: case <-done: return } }
conditional_block
sqlite.go
) `) if err != nil { tx.Rollback() return err } defer delOverlap.Close() insert, err := tx.Prepare(` INSERT INTO alerts(fingerprint, labels, annotations, starts_at, ends_at, updated_at, timeout) VALUES ($1, $2, $3, $4, $5, $6, $7) `) if err != nil { tx.Rollback() return err } defer insert.Close() for _, alert := range alerts { fp := alert.Fingerprint() // Retrieve all intersecting alerts and delete them. olaps, err := overlap.Query(int64(fp), alert.StartsAt, alert.EndsAt) if err != nil { tx.Rollback() return err } var ( overlapIDs []int64 merges []*types.Alert ) for olaps.Next() { var ( id int64 na types.Alert ann []byte ) if err := olaps.Scan( &id, &ann, &na.StartsAt, &na.EndsAt, &na.UpdatedAt, &na.Timeout, ); err != nil { tx.Rollback() return err } if err := json.Unmarshal(ann, &na.Annotations); err != nil { tx.Rollback() return err } na.Labels = alert.Labels merges = append(merges, &na) overlapIDs = append(overlapIDs, id) } if err := olaps.Err(); err != nil { tx.Rollback() return err } // Merge them. for _, ma := range merges { alert = alert.Merge(ma) } // Delete the old ones. if _, err := delOverlap.Exec(int64(fp), alert.StartsAt, alert.EndsAt); err != nil { tx.Rollback() return err } // Insert the final alert. labels, err := json.Marshal(alert.Labels) if err != nil { tx.Rollback() return err } annotations, err := json.Marshal(alert.Annotations) if err != nil { tx.Rollback() return err } _, err = insert.Exec( int64(fp), labels, annotations, alert.StartsAt, alert.EndsAt, alert.UpdatedAt, alert.Timeout, ) if err != nil { tx.Rollback() return err } a.mtx.RLock() for _, ch := range a.listeners { ch <- alert } a.mtx.RUnlock() } tx.Commit() return nil } const createNotifyInfoTable = ` CREATE TABLE IF NOT EXISTS notify_info ( alert bigint, receiver text, resolved integer, timestamp timestamp ); CREATE INDEX IF NOT EXISTS notify_done ON notify_info (resolved); CREATE UNIQUE INDEX IF NOT EXISTS alert_receiver ON notify_info (alert,receiver); ` type Notifies struct { db *sql.DB } func NewNotifies(db *sql.DB) (*Notifies, error) { dbmtx.Lock() defer dbmtx.Unlock() tx, err := db.Begin() if err != nil { return nil, err } if _, err := tx.Exec(createNotifyInfoTable); err != nil { tx.Rollback() return nil, err } tx.Commit() return &Notifies{db: db}, nil } // Get implements the Notifies interface. func (n *Notifies) Get(dest string, fps ...model.Fingerprint) ([]*types.NotifyInfo, error) { dbmtx.Lock() defer dbmtx.Unlock() var result []*types.NotifyInfo for _, fp := range fps { row := n.db.QueryRow(` SELECT alert, receiver, resolved, timestamp FROM notify_info WHERE receiver == $1 AND alert == $2 `, dest, int64(fp)) var alertFP int64 var ni types.NotifyInfo err := row.Scan( &alertFP, &ni.Receiver, &ni.Resolved, &ni.Timestamp, ) if err == sql.ErrNoRows { result = append(result, nil) continue } if err != nil { return nil, err } ni.Alert = model.Fingerprint(alertFP) result = append(result, &ni) } return result, nil } // Set implements the Notifies interface. func (n *Notifies) Set(ns ...*types.NotifyInfo) error { dbmtx.Lock() defer dbmtx.Unlock() tx, err := n.db.Begin() if err != nil { return err } insert, err := tx.Prepare(` INSERT INTO notify_info(alert, receiver, resolved, timestamp) VALUES ($1, $2, $3, $4); `) if err != nil { tx.Rollback() return err } defer insert.Close() del, err := tx.Prepare(` DELETE FROM notify_info WHERE alert == $1 AND receiver == $2 `) if err != nil { tx.Rollback() return err } defer del.Close() for _, ni := range ns { if _, err := del.Exec(int64(ni.Alert), ni.Receiver); err != nil { tx.Rollback() return fmt.Errorf("deleting old notify failed: %s", err) } if _, err := insert.Exec( int64(ni.Alert), ni.Receiver, ni.Resolved, ni.Timestamp, ); err != nil { tx.Rollback() return fmt.Errorf("inserting new notify failed: %s", err) } } tx.Commit() return nil } const createSilencesTable = ` CREATE TABLE IF NOT EXISTS silences ( id integer PRIMARY KEY AUTOINCREMENT, matchers blob, starts_at timestamp, ends_at timestamp, created_at timestamp, created_by text, comment text ); CREATE INDEX IF NOT EXISTS silences_start ON silences (starts_at); CREATE INDEX IF NOT EXISTS silences_end ON silences (ends_at); ` type Silences struct { db *sql.DB marker types.Marker } // NewSilences returns a new Silences based on the provided SQL DB. func NewSilences(db *sql.DB, mk types.Marker) (*Silences, error) { dbmtx.Lock() defer dbmtx.Unlock() tx, err := db.Begin() if err != nil { return nil, err } if _, err := tx.Exec(createSilencesTable); err != nil { tx.Rollback() return nil, err } tx.Commit() return &Silences{db: db, marker: mk}, nil } // Mutes implements the Muter interface. func (s *Silences) Mutes(lset model.LabelSet) bool { sils, err := s.All() if err != nil { log.Errorf("retrieving silences failed: %s", err) // In doubt, do not silence anything. return false } for _, sil := range sils { if sil.Mutes(lset) { s.marker.SetSilenced(lset.Fingerprint(), sil.ID) return true } } s.marker.SetSilenced(lset.Fingerprint()) return false } // All implements the Silences interface. func (s *Silences) All() ([]*types.Silence, error) { dbmtx.Lock() defer dbmtx.Unlock() rows, err := s.db.Query(` SELECT id, matchers, starts_at, ends_at, created_at, created_by, comment FROM silences ORDER BY starts_at DESC `) if err != nil { return nil, err } defer rows.Close() var silences []*types.Silence for rows.Next() { var ( sil model.Silence matchers []byte ) if err := rows.Scan( &sil.ID, &matchers, &sil.StartsAt, &sil.EndsAt, &sil.CreatedAt, &sil.CreatedBy, &sil.Comment, ); err != nil { return nil, err } if err := json.Unmarshal(matchers, &sil.Matchers); err != nil { return nil, err } silences = append(silences, types.NewSilence(&sil)) } if err := rows.Err(); err != nil { return nil, err } return silences, nil } // Set impelements the Silences interface. func (s *Silences) Set(sil *types.Silence) (uint64, error)
{ dbmtx.Lock() defer dbmtx.Unlock() mb, err := json.Marshal(sil.Silence.Matchers) if err != nil { return 0, err } tx, err := s.db.Begin() if err != nil { return 0, err } res, err := tx.Exec(` INSERT INTO silences(matchers, starts_at, ends_at, created_at, created_by, comment) VALUES ($1, $2, $3, $4, $5, $6) `, mb, sil.StartsAt,
identifier_body
main.py
_path)) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials(json_key['client_email'], bytes(json_key['private_key'], "utf-8"), scope) self.gc = gspread.authorize(credentials) logging.info("Authorization with Google successful!") self.zapi = pyzenobase.ZenobaseAPI(zenobase_username, zenobase_password) self.ll = self.gc.open("Lifelogger") self.streaks_bucket = self.zapi.create_or_get_bucket(streaks_bucket_name) self.supplements_bucket = self.zapi.create_or_get_bucket(supplements_bucket_name) def _create_events(self, bucket_id, events, debugging=False): logging.info("Uploading {} events...".format(len(events))) if not debugging:
else: for event in events: self.zapi.create_event(bucket_id, event) logging.debug("Done!".format(len(events))) def get_raw_table(self, sheetname): start = time.time() sheet = self.ll.worksheet(sheetname) raw_table = sheet.get_all_values() logging.debug("Took {}s to fetch worksheet '{}'".format(round(time.time()-start, 3), sheetname)) return raw_table @staticmethod def get_dates(raw_table) -> "list of dates": """ Goes through the first column of input table and returns the first sequence of dates it finds. """ dates = [] found_first = False for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]): if dstr: if len(dstr.split("/")) == 3: d = datetime.datetime.strptime(dstr, '%m/%d/%Y') elif len(dstr.split("-")) == 3: d = datetime.datetime.strptime(dstr, '%Y-%m-%d') else: # Not necessarily an error, could just be a non-date cell logging.debug("unknown date-format: {}".format(dstr)) continue dates.append(d) if not found_first: found_first = True logging.debug("Found first date: '{}' at i: {}".format(d.isoformat(), i)) elif found_first: logging.debug("Last date: {}".format(d)) break return dates def get_main(self) -> 'table[category: str][label: str][date: date]': """ Returns a table with the above typesignature """ raw_table = self.get_raw_table("M") categories = raw_table[0] labels = raw_table[1] dates = self.get_dates(raw_table) def next_cat_col(i): n = 1 while True: if i+n > len(categories)-1: return i if categories[i+n]: return i+n n += 1 def get_category_labels(i): end_col = next_cat_col(i) return zip(range(i, end_col), labels[i:end_col]) def get_label_cells(category, label): ci = categories.index(category) i = labels.index(label, ci) cells = {} for j, d in enumerate(dates): cell = raw_table[j+2][i] if cell and cell != "#VALUE!": cells[d] = cell return cells table = {} for i, cat in enumerate(categories): if not cat: continue table[cat] = {} for i, label in get_category_labels(i): table[cat][label] = get_label_cells(cat, label) return table """------------------------------------------------------------------- Here begins the extraction from the table and the export into Zenobase -------------------------------------------------------------------""" def create_streaks(self): table = self.get_main() bucket_id = self.streaks_bucket["@id"] events = [] for label in table["Streaks"]: for d in table["Streaks"][label]: val = table["Streaks"][label][d] mapping = {"TRUE": 1, "FALSE": -1} try: state = mapping[val] except KeyError: logging.warning("could not detect state of '{}'".format(val)) continue ts = pyzenobase.fmt_datetime(d, timezone="Europe/Stockholm") events.append(pyzenobase.ZenobaseEvent( {"timestamp": ts, "count": state, "tag": [label, val]})) self._create_events(bucket_id, events) def create_daily_supps(self): raw_table = self.get_raw_table("D - Daily") labels = raw_table[0] dates = self.get_dates(raw_table) bucket_id = self.supplements_bucket["@id"] events = [] for i, label in enumerate(labels): if not label: continue for j, d in enumerate(dates): if not raw_table[j+2][i]: continue try: weight = float(raw_table[j+2][i]) except ValueError: logging.warning("Invalid data '{}' (not a number) in cell: {}. Skipping..." .format(raw_table[j+2][i], (j+2, i))) continue events.append(pyzenobase.ZenobaseEvent( {"timestamp": pyzenobase.fmt_datetime(d, timezone="Europe/Stockholm"), "tag": [label, "daily"], "weight": { "@value": weight, "unit": "mg" }})) self._create_events(bucket_id, events) def create_timestamped_supps(self): # TODO: Support extra data in parens or clean up spreadsheet data with clearer syntax for parenthesis-data # TODO: Support substances with spaces (or change all such instances to no-space names) # TODO: Build tags sequentially instead of having a bunch of if-statements at the end # TODO: Extract attempts at data extraction since the try-except clauses are practically identical raw_table = self.get_raw_table("D - Timestamped") dates = self.get_dates(raw_table) parse_errors = 0 r_time = re.compile("[0-9]{1,2}:[0-9]{2}") r_weight = re.compile("^[0-9]+\.?[0-9]*") r_unit = re.compile("mcg|ug|mg|g|ml|cl|dl|l") r_roa = re.compile("oral|insuff|subl|intranasal|subcut|buccal") r_alc_perc = re.compile("[0-9]+\.?[0-9]*%") events = [] for i in range(1, len(raw_table[0]), 2): for j, d in enumerate(dates): time_cell = raw_table[j+1][i] data_cell = raw_table[j+1][i+1] time_is_approximate = False time_is_unknown = False if time_cell: try: if time_cell[0] == "~": time_is_approximate = True times = list(map(lambda x: datetime.datetime.strptime(x, "%H:%M").time(), r_time.findall(time_cell))) if len(times) < 1: raise Exception("No valid times found") except Exception as e: # Did not contain time logging.warning(("Could not parse time '{}' for '{}' at '{}' (exception: {}), " + "tagging with unknown_time") .format(time_cell, data_cell, d, e)) parse_errors += 1 times = [datetime.time(hour=0, minute=0)] time_is_unknown = True else: # Cell empty continue # Get the route of administration, if not specified assume oral try: last_token = data_cell.split(" ")[-1] roa = r_roa.findall(last_token)[0] except IndexError: roa = "oral" for dose_and_substance in map(str.strip, data_cell.split("+")): dose_is_approximate = False dose_is_unknown = False if dose_and_substance[0] == "~": dose_and_substance = dose_and_substance[1:] dose_is_approximate = True elif dose_and_substance[0] == "?": dose_and_substance = dose_and_substance.replace("?", "0") dose_is_unknown = True def parse_failed_msg(parse_property, exception): logging.warning(("Could not parse {} for '{}' at '{} {}' (exception: {}))" .format(parse_property, dose_and_substance, d, times, exception))) try: weight_or_volume = float(r_weight.findall(dose_and_substance)[0]) except (ValueError, IndexError) as e: parse_failed_msg("weight_or_volume", e) parse_errors += 1 continue try: unit = r_unit.findall(dose_and_substance)[0] except IndexError as e: parse_failed_msg("unit", e) parse_errors += 1 continue alc_perc = None if "%" in dose_and_substance: try: alc_perc = r_alc_perc.findall(dose_and_substance)[0] alc_perc = alc_perc.replace("%", "") except IndexError as e: parse_failed_msg("percentage", e) parse_errors += 1
self.zapi.create_events(bucket_id, events)
conditional_block
main.py
_path)) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials(json_key['client_email'], bytes(json_key['private_key'], "utf-8"), scope) self.gc = gspread.authorize(credentials) logging.info("Authorization with Google successful!") self.zapi = pyzenobase.ZenobaseAPI(zenobase_username, zenobase_password) self.ll = self.gc.open("Lifelogger") self.streaks_bucket = self.zapi.create_or_get_bucket(streaks_bucket_name) self.supplements_bucket = self.zapi.create_or_get_bucket(supplements_bucket_name) def _create_events(self, bucket_id, events, debugging=False): logging.info("Uploading {} events...".format(len(events))) if not debugging: self.zapi.create_events(bucket_id, events) else: for event in events: self.zapi.create_event(bucket_id, event) logging.debug("Done!".format(len(events))) def get_raw_table(self, sheetname): start = time.time() sheet = self.ll.worksheet(sheetname) raw_table = sheet.get_all_values() logging.debug("Took {}s to fetch worksheet '{}'".format(round(time.time()-start, 3), sheetname)) return raw_table @staticmethod def get_dates(raw_table) -> "list of dates": """ Goes through the first column of input table and returns the first sequence of dates it finds. """ dates = [] found_first = False for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]): if dstr: if len(dstr.split("/")) == 3: d = datetime.datetime.strptime(dstr, '%m/%d/%Y') elif len(dstr.split("-")) == 3: d = datetime.datetime.strptime(dstr, '%Y-%m-%d') else: # Not necessarily an error, could just be a non-date cell logging.debug("unknown date-format: {}".format(dstr)) continue dates.append(d) if not found_first: found_first = True logging.debug("Found first date: '{}' at i: {}".format(d.isoformat(), i)) elif found_first: logging.debug("Last date: {}".format(d)) break return dates def get_main(self) -> 'table[category: str][label: str][date: date]': """ Returns a table with the above typesignature """ raw_table = self.get_raw_table("M") categories = raw_table[0] labels = raw_table[1] dates = self.get_dates(raw_table) def next_cat_col(i): n = 1 while True: if i+n > len(categories)-1: return i if categories[i+n]: return i+n n += 1 def get_category_labels(i): end_col = next_cat_col(i) return zip(range(i, end_col), labels[i:end_col]) def
(category, label): ci = categories.index(category) i = labels.index(label, ci) cells = {} for j, d in enumerate(dates): cell = raw_table[j+2][i] if cell and cell != "#VALUE!": cells[d] = cell return cells table = {} for i, cat in enumerate(categories): if not cat: continue table[cat] = {} for i, label in get_category_labels(i): table[cat][label] = get_label_cells(cat, label) return table """------------------------------------------------------------------- Here begins the extraction from the table and the export into Zenobase -------------------------------------------------------------------""" def create_streaks(self): table = self.get_main() bucket_id = self.streaks_bucket["@id"] events = [] for label in table["Streaks"]: for d in table["Streaks"][label]: val = table["Streaks"][label][d] mapping = {"TRUE": 1, "FALSE": -1} try: state = mapping[val] except KeyError: logging.warning("could not detect state of '{}'".format(val)) continue ts = pyzenobase.fmt_datetime(d, timezone="Europe/Stockholm") events.append(pyzenobase.ZenobaseEvent( {"timestamp": ts, "count": state, "tag": [label, val]})) self._create_events(bucket_id, events) def create_daily_supps(self): raw_table = self.get_raw_table("D - Daily") labels = raw_table[0] dates = self.get_dates(raw_table) bucket_id = self.supplements_bucket["@id"] events = [] for i, label in enumerate(labels): if not label: continue for j, d in enumerate(dates): if not raw_table[j+2][i]: continue try: weight = float(raw_table[j+2][i]) except ValueError: logging.warning("Invalid data '{}' (not a number) in cell: {}. Skipping..." .format(raw_table[j+2][i], (j+2, i))) continue events.append(pyzenobase.ZenobaseEvent( {"timestamp": pyzenobase.fmt_datetime(d, timezone="Europe/Stockholm"), "tag": [label, "daily"], "weight": { "@value": weight, "unit": "mg" }})) self._create_events(bucket_id, events) def create_timestamped_supps(self): # TODO: Support extra data in parens or clean up spreadsheet data with clearer syntax for parenthesis-data # TODO: Support substances with spaces (or change all such instances to no-space names) # TODO: Build tags sequentially instead of having a bunch of if-statements at the end # TODO: Extract attempts at data extraction since the try-except clauses are practically identical raw_table = self.get_raw_table("D - Timestamped") dates = self.get_dates(raw_table) parse_errors = 0 r_time = re.compile("[0-9]{1,2}:[0-9]{2}") r_weight = re.compile("^[0-9]+\.?[0-9]*") r_unit = re.compile("mcg|ug|mg|g|ml|cl|dl|l") r_roa = re.compile("oral|insuff|subl|intranasal|subcut|buccal") r_alc_perc = re.compile("[0-9]+\.?[0-9]*%") events = [] for i in range(1, len(raw_table[0]), 2): for j, d in enumerate(dates): time_cell = raw_table[j+1][i] data_cell = raw_table[j+1][i+1] time_is_approximate = False time_is_unknown = False if time_cell: try: if time_cell[0] == "~": time_is_approximate = True times = list(map(lambda x: datetime.datetime.strptime(x, "%H:%M").time(), r_time.findall(time_cell))) if len(times) < 1: raise Exception("No valid times found") except Exception as e: # Did not contain time logging.warning(("Could not parse time '{}' for '{}' at '{}' (exception: {}), " + "tagging with unknown_time") .format(time_cell, data_cell, d, e)) parse_errors += 1 times = [datetime.time(hour=0, minute=0)] time_is_unknown = True else: # Cell empty continue # Get the route of administration, if not specified assume oral try: last_token = data_cell.split(" ")[-1] roa = r_roa.findall(last_token)[0] except IndexError: roa = "oral" for dose_and_substance in map(str.strip, data_cell.split("+")): dose_is_approximate = False dose_is_unknown = False if dose_and_substance[0] == "~": dose_and_substance = dose_and_substance[1:] dose_is_approximate = True elif dose_and_substance[0] == "?": dose_and_substance = dose_and_substance.replace("?", "0") dose_is_unknown = True def parse_failed_msg(parse_property, exception): logging.warning(("Could not parse {} for '{}' at '{} {}' (exception: {}))" .format(parse_property, dose_and_substance, d, times, exception))) try: weight_or_volume = float(r_weight.findall(dose_and_substance)[0]) except (ValueError, IndexError) as e: parse_failed_msg("weight_or_volume", e) parse_errors += 1 continue try: unit = r_unit.findall(dose_and_substance)[0] except IndexError as e: parse_failed_msg("unit", e) parse_errors += 1 continue alc_perc = None if "%" in dose_and_substance: try: alc_perc = r_alc_perc.findall(dose_and_substance)[0] alc_perc = alc_perc.replace("%", "") except IndexError as e: parse_failed_msg("percentage", e) parse_errors += 1
get_label_cells
identifier_name
main.py
class Lifelogger_to_Zenobase(): def __init__(self, google_oauth_json_path, zenobase_username, zenobase_password, streaks_bucket_name="Streaks", supplements_bucket_name="Supplements - New"): json_key = json.load(open(google_oauth_json_path)) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials(json_key['client_email'], bytes(json_key['private_key'], "utf-8"), scope) self.gc = gspread.authorize(credentials) logging.info("Authorization with Google successful!") self.zapi = pyzenobase.ZenobaseAPI(zenobase_username, zenobase_password) self.ll = self.gc.open("Lifelogger") self.streaks_bucket = self.zapi.create_or_get_bucket(streaks_bucket_name) self.supplements_bucket = self.zapi.create_or_get_bucket(supplements_bucket_name) def _create_events(self, bucket_id, events, debugging=False): logging.info("Uploading {} events...".format(len(events))) if not debugging: self.zapi.create_events(bucket_id, events) else: for event in events: self.zapi.create_event(bucket_id, event) logging.debug("Done!".format(len(events))) def get_raw_table(self, sheetname): start = time.time() sheet = self.ll.worksheet(sheetname) raw_table = sheet.get_all_values() logging.debug("Took {}s to fetch worksheet '{}'".format(round(time.time()-start, 3), sheetname)) return raw_table @staticmethod def get_dates(raw_table) -> "list of dates": """ Goes through the first column of input table and returns the first sequence of dates it finds. """ dates = [] found_first = False for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]): if dstr: if len(dstr.split("/")) == 3: d = datetime.datetime.strptime(dstr, '%m/%d/%Y') elif len(dstr.split("-")) == 3: d = datetime.datetime.strptime(dstr, '%Y-%m-%d') else: # Not necessarily an error, could just be a non-date cell logging.debug("unknown date-format: {}".format(dstr)) continue dates.append(d) if not found_first: found_first = True logging.debug("Found first date: '{}' at i: {}".format(d.isoformat(), i)) elif found_first: logging.debug("Last date: {}".format(d)) break return dates def get_main(self) -> 'table[category: str][label: str][date: date]': """ Returns a table with the above typesignature """ raw_table = self.get_raw_table("M") categories = raw_table[0] labels = raw_table[1] dates = self.get_dates(raw_table) def next_cat_col(i): n = 1 while True: if i+n > len(categories)-1: return i if categories[i+n]: return i+n n += 1 def get_category_labels(i): end_col = next_cat_col(i) return zip(range(i, end_col), labels[i:end_col]) def get_label_cells(category, label): ci = categories.index(category) i = labels.index(label, ci) cells = {} for j, d in enumerate(dates): cell = raw_table[j+2][i] if cell and cell != "#VALUE!": cells[d] = cell return cells table = {} for i, cat in enumerate(categories): if not cat: continue table[cat] = {} for i, label in get_category_labels(i): table[cat][label] = get_label_cells(cat, label) return table """------------------------------------------------------------------- Here begins the extraction from the table and the export into Zenobase -------------------------------------------------------------------""" def create_streaks(self): table = self.get_main() bucket_id = self.streaks_bucket["@id"] events = [] for label in table["Streaks"]: for d in table["Streaks"][label]: val = table["Streaks"][label][d] mapping = {"TRUE": 1, "FALSE": -1} try: state = mapping[val] except KeyError: logging.warning("could not detect state of '{}'".format(val)) continue ts = pyzenobase.fmt_datetime(d, timezone="Europe/Stockholm") events.append(pyzenobase.ZenobaseEvent( {"timestamp": ts, "count": state, "tag": [label, val]})) self._create_events(bucket_id, events) def create_daily_supps(self): raw_table = self.get_raw_table("D - Daily") labels = raw_table[0] dates = self.get_dates(raw_table) bucket_id = self.supplements_bucket["@id"] events = [] for i, label in enumerate(labels): if not label: continue for j, d in enumerate(dates): if not raw_table[j+2][i]: continue try: weight = float(raw_table[j+2][i]) except ValueError: logging.warning("Invalid data '{}' (not a number) in cell: {}. Skipping..." .format(raw_table[j+2][i], (j+2, i))) continue events.append(pyzenobase.ZenobaseEvent( {"timestamp": pyzenobase.fmt_datetime(d, timezone="Europe/Stockholm"), "tag": [label, "daily"], "weight": { "@value": weight, "unit": "mg" }})) self._create_events(bucket_id, events) def create_timestamped_supps(self): # TODO: Support extra data in parens or clean up spreadsheet data with clearer syntax for parenthesis-data # TODO: Support substances with spaces (or change all such instances to no-space names) # TODO: Build tags sequentially instead of having a bunch of if-statements at the end # TODO: Extract attempts at data extraction since the try-except clauses are practically identical raw_table = self.get_raw_table("D - Timestamped") dates = self.get_dates(raw_table) parse_errors = 0 r_time = re.compile("[0-9]{1,2}:[0-9]{2}") r_weight = re.compile("^[0-9]+\.?[0-9]*") r_unit = re.compile("mcg|ug|mg|g|ml|cl|dl|l") r_roa = re.compile("oral|insuff|subl|intranasal|subcut|buccal") r_alc_perc = re.compile("[0-9]+\.?[0-9]*%") events = [] for i in range(1, len(raw_table[0]), 2): for j, d in enumerate(dates): time_cell = raw_table[j+1][i] data_cell = raw_table[j+1][i+1] time_is_approximate = False time_is_unknown = False if time_cell: try: if time_cell[0] == "~": time_is_approximate = True times = list(map(lambda x: datetime.datetime.strptime(x, "%H:%M").time(), r_time.findall(time_cell))) if len(times) < 1: raise Exception("No valid times found") except Exception as e: # Did not contain time logging.warning(("Could not parse time '{}' for '{}' at '{}' (exception: {}), " + "tagging with unknown_time") .format(time_cell, data_cell, d, e)) parse_errors += 1 times = [datetime.time(hour=0, minute=0)] time_is_unknown = True else: # Cell empty continue # Get the route of administration, if not specified assume oral try: last_token = data_cell.split(" ")[-1] roa = r_roa.findall(last_token)[0] except IndexError: roa = "oral" for dose_and_substance in map(str.strip, data_cell.split("+")): dose_is_approximate = False dose_is_unknown = False if dose_and_substance[0] == "~": dose_and_substance = dose_and_substance[1:] dose_is_approximate = True elif dose_and_substance[0] == "?": dose_and_substance = dose_and_substance.replace("?", "0") dose_is_unknown = True def parse_failed_msg(parse_property, exception): logging.warning(("Could not parse {} for '{}' at '{} {}' (exception: {}))" .format(parse_property, dose_and_substance, d, times, exception))) try: weight_or_volume = float(r_weight.findall(dose_and_substance)[0]) except (ValueError, IndexError) as e: parse_failed_msg("weight_or_volume", e) parse_errors += 1 continue try: unit = r_unit.findall(dose_and_substance)[0] except IndexError as e:
return list(map(lambda t: pyzenobase.fmt_datetime(datetime.datetime.combine(d, t)), times))
identifier_body
main.py
_path)) scope = ['https://spreadsheets.google.com/feeds'] credentials = SignedJwtAssertionCredentials(json_key['client_email'], bytes(json_key['private_key'], "utf-8"), scope) self.gc = gspread.authorize(credentials) logging.info("Authorization with Google successful!") self.zapi = pyzenobase.ZenobaseAPI(zenobase_username, zenobase_password) self.ll = self.gc.open("Lifelogger") self.streaks_bucket = self.zapi.create_or_get_bucket(streaks_bucket_name) self.supplements_bucket = self.zapi.create_or_get_bucket(supplements_bucket_name) def _create_events(self, bucket_id, events, debugging=False): logging.info("Uploading {} events...".format(len(events))) if not debugging: self.zapi.create_events(bucket_id, events) else: for event in events: self.zapi.create_event(bucket_id, event) logging.debug("Done!".format(len(events))) def get_raw_table(self, sheetname): start = time.time() sheet = self.ll.worksheet(sheetname) raw_table = sheet.get_all_values() logging.debug("Took {}s to fetch worksheet '{}'".format(round(time.time()-start, 3), sheetname)) return raw_table @staticmethod def get_dates(raw_table) -> "list of dates": """ Goes through the first column of input table and returns the first sequence of dates it finds. """ dates = [] found_first = False for i, dstr in enumerate([raw_table[i][0] for i in range(0, len(raw_table))]): if dstr: if len(dstr.split("/")) == 3: d = datetime.datetime.strptime(dstr, '%m/%d/%Y') elif len(dstr.split("-")) == 3: d = datetime.datetime.strptime(dstr, '%Y-%m-%d') else: # Not necessarily an error, could just be a non-date cell logging.debug("unknown date-format: {}".format(dstr)) continue dates.append(d) if not found_first: found_first = True logging.debug("Found first date: '{}' at i: {}".format(d.isoformat(), i)) elif found_first: logging.debug("Last date: {}".format(d)) break return dates def get_main(self) -> 'table[category: str][label: str][date: date]': """ Returns a table with the above typesignature """ raw_table = self.get_raw_table("M") categories = raw_table[0] labels = raw_table[1] dates = self.get_dates(raw_table) def next_cat_col(i): n = 1 while True: if i+n > len(categories)-1: return i if categories[i+n]: return i+n n += 1 def get_category_labels(i): end_col = next_cat_col(i) return zip(range(i, end_col), labels[i:end_col]) def get_label_cells(category, label): ci = categories.index(category) i = labels.index(label, ci) cells = {} for j, d in enumerate(dates): cell = raw_table[j+2][i] if cell and cell != "#VALUE!": cells[d] = cell return cells table = {} for i, cat in enumerate(categories): if not cat: continue table[cat] = {} for i, label in get_category_labels(i): table[cat][label] = get_label_cells(cat, label) return table """------------------------------------------------------------------- Here begins the extraction from the table and the export into Zenobase -------------------------------------------------------------------""" def create_streaks(self): table = self.get_main() bucket_id = self.streaks_bucket["@id"] events = [] for label in table["Streaks"]: for d in table["Streaks"][label]: val = table["Streaks"][label][d] mapping = {"TRUE": 1, "FALSE": -1} try: state = mapping[val] except KeyError: logging.warning("could not detect state of '{}'".format(val)) continue ts = pyzenobase.fmt_datetime(d, timezone="Europe/Stockholm") events.append(pyzenobase.ZenobaseEvent( {"timestamp": ts, "count": state, "tag": [label, val]})) self._create_events(bucket_id, events) def create_daily_supps(self): raw_table = self.get_raw_table("D - Daily") labels = raw_table[0] dates = self.get_dates(raw_table) bucket_id = self.supplements_bucket["@id"] events = [] for i, label in enumerate(labels): if not label: continue for j, d in enumerate(dates):
if not raw_table[j+2][i]: continue try: weight = float(raw_table[j+2][i]) except ValueError: logging.warning("Invalid data '{}' (not a number) in cell: {}. Skipping..." .format(raw_table[j+2][i], (j+2, i))) continue events.append(pyzenobase.ZenobaseEvent( {"timestamp": pyzenobase.fmt_datetime(d, timezone="Europe/Stockholm"), "tag": [label, "daily"], "weight": { "@value": weight, "unit": "mg" }})) self._create_events(bucket_id, events) def create_timestamped_supps(self): # TODO: Support extra data in parens or clean up spreadsheet data with clearer syntax for parenthesis-data # TODO: Support substances with spaces (or change all such instances to no-space names) # TODO: Build tags sequentially instead of having a bunch of if-statements at the end # TODO: Extract attempts at data extraction since the try-except clauses are practically identical raw_table = self.get_raw_table("D - Timestamped") dates = self.get_dates(raw_table) parse_errors = 0 r_time = re.compile("[0-9]{1,2}:[0-9]{2}") r_weight = re.compile("^[0-9]+\.?[0-9]*") r_unit = re.compile("mcg|ug|mg|g|ml|cl|dl|l") r_roa = re.compile("oral|insuff|subl|intranasal|subcut|buccal") r_alc_perc = re.compile("[0-9]+\.?[0-9]*%") events = [] for i in range(1, len(raw_table[0]), 2): for j, d in enumerate(dates): time_cell = raw_table[j+1][i] data_cell = raw_table[j+1][i+1] time_is_approximate = False time_is_unknown = False if time_cell: try: if time_cell[0] == "~": time_is_approximate = True times = list(map(lambda x: datetime.datetime.strptime(x, "%H:%M").time(), r_time.findall(time_cell))) if len(times) < 1: raise Exception("No valid times found") except Exception as e: # Did not contain time logging.warning(("Could not parse time '{}' for '{}' at '{}' (exception: {}), " + "tagging with unknown_time") .format(time_cell, data_cell, d, e)) parse_errors += 1 times = [datetime.time(hour=0, minute=0)] time_is_unknown = True else: # Cell empty continue # Get the route of administration, if not specified assume oral try: last_token = data_cell.split(" ")[-1] roa = r_roa.findall(last_token)[0] except IndexError: roa = "oral" for dose_and_substance in map(str.strip, data_cell.split("+")): dose_is_approximate = False dose_is_unknown = False if dose_and_substance[0] == "~": dose_and_substance = dose_and_substance[1:] dose_is_approximate = True elif dose_and_substance[0] == "?": dose_and_substance = dose_and_substance.replace("?", "0") dose_is_unknown = True def parse_failed_msg(parse_property, exception): logging.warning(("Could not parse {} for '{}' at '{} {}' (exception: {}))" .format(parse_property, dose_and_substance, d, times, exception))) try: weight_or_volume = float(r_weight.findall(dose_and_substance)[0]) except (ValueError, IndexError) as e: parse_failed_msg("weight_or_volume", e) parse_errors += 1 continue try: unit = r_unit.findall(dose_and_substance)[0] except IndexError as e: parse_failed_msg("unit", e) parse_errors += 1 continue alc_perc = None if "%" in dose_and_substance: try: alc_perc = r_alc_perc.findall(dose_and_substance)[0] alc_perc = alc_perc.replace("%", "") except IndexError as e: parse_failed_msg("percentage", e) parse_errors += 1
random_line_split
PointCode.js
55] ] ); Point.scoreModeListExt[modeIndex].colors[i][1] = Point.scoreModeListExt[modeIndex].colors[i][0]; } break; case 'mellow': for (i = 0; i < randInt(25,35); i++) { // mellow colors Point.scoreModeListExt[modeIndex].colors.push([ fillArray(3,3,85,153), [255,255,255] ]); for (j = 0; j < 3; j++) { Point.scoreModeListExt[modeIndex].colors[i][1][j] = Point.scoreModeListExt[modeIndex].colors[i][0][j] + 25; } } break; case 'squares': for (i = 0; i < randInt(25,35); i++) { // mellow colors Point.scoreModeListExt[modeIndex].colors.push([ fillArray(3,3,85,153), [255,255,255] ]); for (j = 0; j < 3; j++) { Point.scoreModeListExt[modeIndex].colors[i][1][j] = Point.scoreModeListExt[modeIndex].colors[i][0][j] + 25; } } break; case 'lines': index = 0; // to keep track of position in array for (i = 0; i < randInt(1,4); i++) { // black or white if (Math.random()<0.5) { Point.scoreModeListExt[modeIndex].colors.push([ [255,255,255], [255,255,255] ]) } else { Point.scoreModeListExt[modeIndex].colors.push([ [0,0,0], [0,0,0] ]) } index += 1; } for (i = 0; i < randInt(3,6); i++) { // pure colors Point.scoreModeListExt[modeIndex].colors.push([ [randInt(0,1) * 255,randInt(0,1) * 255,randInt(0,1)*255 ], [0,0,0] ]); for (j = 0; j < 3; j++) { if( Point.scoreModeListExt[modeIndex].colors[index][0][j] === 255 ) { Point.scoreModeListExt[modeIndex].colors[index][1][j] = randInt(200,255); } } index += 1; } break; default: index = 0; // to keep track of position in array for (i = 0; i < randInt(15,25); i++) { // all colors Point.scoreModeListExt[modeIndex].colors.push([ [0,0,0], [255,255,255] ]); index += 1; } for (i = 0; i < randInt(5,15); i++) { // mellow colors Point.scoreModeListExt[modeIndex].colors.push([ fillArray(3,3,85,153), [255,255,255] ]); for (j = 0; j < 3; j++) { Point.scoreModeListExt[modeIndex].colors[index][1][j] = Point.scoreModeListExt[modeIndex].colors[index][0][j] + randInt(10,25); } index += 1; } for (i = 0; i < randInt(5,8); i++) { // hard colors high Point.scoreModeListExt[modeIndex].colors.push([ fillArray(3,3,200,230), [255,255,255] ]); for (j = 0; j < 3; j++) { Point.scoreModeListExt[modeIndex].colors[index][1][j] = Point.scoreModeListExt[modeIndex].colors[index][0][j] + randInt(10,25); } index += 1; } for (i = 0; i < randInt(5,8); i++) { // hard colors low Point.scoreModeListExt[modeIndex].colors.push([ fillArray(3,3,10,30), [255,255,255] ]); for (j = 0; j < 3; j++) { Point.scoreModeListExt[modeIndex].colors[index][1][j] = Point.scoreModeListExt[modeIndex].colors[index][0][j] + randInt(10,25); } index += 1; } } // create shorter alias Point.colors = Point.scoreModeListExt[modeIndex].colors; // console.log(Point.scoreModeListExt[modeIndex].name + " nbrBoxes: " + Point.scoreModeListExt[modeIndex].nbrBoxes); }; // function to fill whole picture with one color Point.clearTo = function(rgbClear, box) { var clearToRandom = 0; // console.log("Clear to rgb(" + rgbClear + ")"); if (!rgbClear) { clearToRandom = 1; } if (!box) { // no specific box given Point.tuples.forEach(function(tuple,index){ if (clearToRandom === 1) { rgbClear = fillArray(3,3,0,255); } Point.boxes.fillStyle = "rgb(" + rgbClear[0] + "," + rgbClear[1] + "," + rgbClear[2] + ")"; Point.boxes.fillRect(Point.boxPix * tuple[0], Point.boxPix * tuple[1], Point.boxPix, Point.boxPix); }); } else { // just fill one specific box Point.boxes.fillStyle = "rgb(" + rgbClear[0] + "," + rgbClear[1] + "," + rgbClear[2] + ")"; Point.boxes.fillRect(Point.boxPix * box[0], Point.boxPix * box[1], Point.boxPix, Point.boxPix); } }; // function to draw boxes and kick off Point.play Point.drawBoxes = function(fadeTime) { // fadeTime in seconds var rgb = new Array(3), rgbText = new Array(3), red, green, blue, tuple = [], tmp, x, y; var nbr = Point.nbrBoxes[Point.count % Point.nbrBoxes.length]; // console.log("nbr = " + nbr); // random shuffle all tuples if(Point.scoreMode !== 'single') { Point.tuplesTemp = Point.tuples; shuffleArray(Point.tuplesTemp); } // special treatment for certain modes switch(Point.scoreMode) { case 'single': nbr = 1; if(Point.scoreModeChanged || Point.scoreShuffled) { Point.tuplesTemp = Point.tuples; shuffleArray(Point.tuplesTemp); Point.tuplesTemp = Point.tuplesTemp.filter( function(value, index) { if(index < randInt(2,4)) {return value} }); } else { tmp = Point.tuplesTemp.shift(); Point.tuplesTemp.push(tmp); } break; case 'squares': x = randInt(0,Point.nbrX - 2); y = randInt(0,Point.nbrY - 2); nbr = 4; Point.tuplesTemp = Point.tuplesTemp.filter(function(value) { return value.equals([x,y]) || value.equals([x+1,y]) || value.equals([x,y+1]) || value.equals([x+1,y+1]) } ); if(Math.random() < 0.1) { Point.clearTo([0,0,0]); } break; case 'lines': x = 0; y = 0; if(Math.random()<0.5) { // vertical line x = randInt(0,Point.nbrX-1); nbr = Point.nbrY; Point.tuplesTemp = Point.tuplesTemp.filter(function(value) { return value[0] === x; }); } else { // horizontal line y = randInt(0,Point.nbrY-1); nbr = Point.nbrX; Point.tuplesTemp = Point.tuplesTemp.filter(function(value) { return value[1] === y; }); } break; } // change nbr boxes for (var index = 0; index < nbr; index += 1) { // get next tuple to change tuple = Point.tuplesTemp[index]; // get color for (var i = 0; i < 3; i++) { rgb[i] = randInt(
random_line_split
PointCode.js
iPad') { width = '60px' } else { width = '40px' } for (var i = 0; i < btns.length; i++)
Point.switch(); } else if(!Point.mainTask.running) { Point.clearTo([0,0,0]); Point.init(); Point.flicker(); } }; Point.stop = function() { Point.mainTask.stop(); Point.metaTask.stop(); Point.clearTo([0,0,0]); Point.flicker('stop'); }; Point.flicker = function(mode) { var interval = 100, tuple = [0,0]; if(!mode) { mode = 'start' } switch(mode) { case 'stop': interval = 200; tuple = [Point.nbrX - 1,Point.nbrY - 1]; break; default: interval = 100; } Point.clearTo([255,255,255],tuple); setTimeout(Point.clearTo, 1 * interval, [0,0,0],tuple); setTimeout(Point.clearTo, 2 * interval, [255,255,255],tuple); setTimeout(Point.clearTo, 3 * interval, [0,0,0],tuple); setTimeout(Point.clearTo, 4 * interval, [255,255,255],tuple); setTimeout(Point.clearTo, 5 * interval, [0,0,0],tuple); setTimeout(Point.clearTo, 6 * interval, [255,255,255],tuple); setTimeout(Point.clearTo, 7 * interval, [0,0,0],tuple); setTimeout(Point.clearTo, 8 * interval, [255,255,255],tuple); setTimeout(Point.clearTo, 9 * interval, [0,0,0],tuple); if(mode === 'stop') { setTimeout(function(){ Point.boxes.font = '20px monaco'; Point.boxes.fillStyle = "rgb(255,255,255)"; Point.boxes.fillText(">goodbye", Point.boxPix * tuple[0], Point.boxPix * tuple[1] + 20); setTimeout(Point.clearTo, 40 * interval, [0,0,0], tuple); }, 10 * interval) } }; // constructor for tasks Point.taskConstructor = function(funcArg, waitArg) { var task = { wait: waitArg, // wait time every loop func: funcArg, // function to execute every loop running: true, timeout: false, // variable that holds the setTimeout start: function() { this.running = true; return this.loop(); }, loop: function() { this.timeout = setTimeout(this.runLoop, this.wait); return this; }, runLoop: function() { var result; if (!task.running) return; result = task.func.call(task); if (typeof result == 'number') { if (result === 0) return; task.wait = result; } task.loop(); }, stop: function() { this.running = false; clearTimeout(this.timeout); } }; return task.start(); }; // function that starts it all Point.init = function() { // clear to random colors if(Point.initClear) { Point.clearTo(); } // prepare tuples Point.tuplesTemp = Point.tuples; shuffleArray(Point.tuplesTemp); // initialize score Point.selectScoreMode('init'); // create main task Point.mainTask = Point.taskConstructor(function() { var wait = this.wait; // in milliseconds, initially taken from waitDefault (set below) // count number of loops Point.count += 1; // potentially clear to black if(Math.random() < 0.5) { if(Point.scoreModeChanged || Math.random() < 0.05) { Point.clearTo([0,0,0]); } } // randomly shuffle the score every now and then if(Point.count % 80 === 79) { Point.shuffleScore() } // start drawing and playing processes, pass fadeTime in seconds Point.drawBoxes( (wait/1000)/2 ); // reset wait if changed if(Point.waitChanged) { wait = Point.waitOrig; Point.waitChanged = false; } // reset if scoreModeChanged if(Point.scoreModeChanged) { Point.scoreModeChanged = false; } if(Point.scoreShuffled) { Point.scoreShuffled = false; } // set new wait if(wait < 0.5 * Point.mainWaitDefault) { Point.waitSign = 1; } if(wait > 2 * Point.mainWaitDefault) { Point.waitSign = -1; } wait = wait + Point.waitSign * Point.mainWaitChange * Math.random(); if(Math.random() < 0.1 || Point.fastForward) { Point.waitOrig = wait; if (wait > Point.mainWaitDefault) { wait = wait / 2; } else { wait = wait * 2; } Point.waitChanged = true; // console.log('double/half speed'); } // console.log("WaitTime: "+ (wait/1000).toFixed(2)); return wait; }, Point.mainWaitDefault); // create meta task to switch scoreModes Point.metaTask = Point.taskConstructor(function() { var wait; Point.fastForward = false; Point.selectScoreMode(); wait = Point.metaWaitDefault[Point.scoreModeList.indexOf(Point.scoreMode)]; // wait set by scoreMode wait = wait + (-5 + 10 * Math.random()); // add random value between -5 and 5 sec if(Math.random() < 0.2) { // randomly make wait short wait = 5 + 10 * Math.random(); Point.fastForward = true; // console.log('fast forward'); } // console.log("metaWait: "+wait.toFixed(2)+"sec"); return wait * 1000; }, 1000 * Point.metaWaitDefault[Point.scoreModeList.indexOf(Point.scoreMode)]); // immediately stop so we can start it manually below Point.metaTask.stop(); Point.mainTask.stop(); // control mainTask setTimeout(function() {Point.mainTask.start(); Point.metaTask.start()}, 0 * 1000); // if length of the set is fixed, schedule stop if(Point.setLength > 0) { setTimeout(function() { Point.mainTask.stop() }, Point.setLength * 1000); } }; // function to select new scoreMode Point.selectScoreMode = function(mode) { if(mode !== 'init') { while (Point.scoreMode === Point.scoreModeCurrent) { Point.scoreMode = Point.scoreModeList[randInt(0, Point.scoreModeList.length - 1)]; } } // console.log("ScoreMode: " + Point.scoreMode) Point.scoreModeChanged = true; Point.shuffleScore(); Point.scoreModeCurrent = Point.scoreMode; }; // function to shuffle number of boxes to change, triggered when scoreMode is changed and extra randomly Point.shuffleScore = function() { var modeIndex = Point.scoreModeList.indexOf(Point.scoreMode); var i, j, index; Point.scoreShuffled = true; // set array of max number of boxes, fillArray(repMin,repMax,valMin,valMax) Point.scoreModeListExt[modeIndex].nbrBoxes = fillArray(4, 7, 1, 1) .concat(fillArray(7, 12, 3, 6) .concat(fillArray(1, 2, 1, 1) .concat(fillArray(2, 4, Point.nbrX * Point.nbrY / 2, Point.nbrX * Point.nbrY) .concat(fillArray(6,10,1,3) ) ) ) ); // create shorter alias Point.nbrBoxes = Point.scoreModeListExt[modeIndex].nbrBoxes; // set array of colors, which are pairs of triplets with min and max values for rgb colors // [ [rmin,gmin,bmin], [rmax,gmax,bmax] ], [ [rmin,gmin,bmin], [rmax,gmax,bmax] ], [ [rmin,gmin,bmin], [rmax,gmax,bmax] ],.... switch (Point.scoreMode) { case 'red': Point.scoreModeListExt[modeIndex].colors = [ [ [100,0,0], [255,0,0] ] ]; break; case 'green': Point.scoreModeListExt[modeIndex].colors = [ [ [0,100,0], [0,255,0]
{ btns[i].style.width = width; }
conditional_block
policymap.go
upper limit of entries in the per endpoint policy // table ie the maximum number of peer identities that the endpoint could // send/receive traffic to/from.. It is set by InitMapInfo(), but unit // tests use the initial value below. // The default value of this upper limit is 16384. MaxEntries = 16384 ) type PolicyMap struct { *bpf.Map } func (pe PolicyEntry) IsDeny() bool { return pe.Flags.is(policyFlagDeny) } func (pe *PolicyEntry) String() string { return fmt.Sprintf("%d %d %d", pe.GetProxyPort(), pe.Packets, pe.Bytes) } func (pe *PolicyEntry) New() bpf.MapValue { return &PolicyEntry{} } // PolicyKey represents a key in the BPF policy map for an endpoint. It must // match the layout of policy_key in bpf/lib/common.h. type PolicyKey struct { Prefixlen uint32 `align:"lpm_key"` Identity uint32 `align:"sec_label"` TrafficDirection uint8 `align:"egress"` Nexthdr uint8 `align:"protocol"` DestPortNetwork uint16 `align:"dport"` // In network byte-order } // GetDestPort returns the DestPortNetwork in host byte order func (k *PolicyKey) GetDestPort() uint16 { return byteorder.NetworkToHost16(k.DestPortNetwork) } const ( sizeofPolicyKey = int(unsafe.Sizeof(PolicyKey{})) sizeofPrefixlen = int(unsafe.Sizeof(PolicyKey{}.Prefixlen)) sizeofNexthdr = int(unsafe.Sizeof(PolicyKey{}.Nexthdr)) sizeofDestPort = int(unsafe.Sizeof(PolicyKey{}.DestPortNetwork)) NexthdrBits = uint32(sizeofNexthdr) * 8 DestPortBits = uint32(sizeofDestPort) * 8 FullPrefixBits = NexthdrBits + DestPortBits StaticPrefixBits = uint32(sizeofPolicyKey-sizeofPrefixlen)*8 - FullPrefixBits ) // PolicyEntry represents an entry in the BPF policy map for an endpoint. It must // match the layout of policy_entry in bpf/lib/common.h. type PolicyEntry struct { ProxyPortNetwork uint16 `align:"proxy_port"` // In network byte-order Flags policyEntryFlags `align:"deny"` AuthType uint8 `align:"auth_type"` Pad1 uint16 `align:"pad1"` Pad2 uint16 `align:"pad2"` Packets uint64 `align:"packets"` Bytes uint64 `align:"bytes"` } // GetProxyPort returns the ProxyPortNetwork in host byte order func (pe *PolicyEntry) GetProxyPort() uint16 { return byteorder.NetworkToHost16(pe.ProxyPortNetwork) } type policyEntryFlagParams struct { IsDeny bool IsWildcardNexthdr bool IsWildcardDestPort bool } // getPolicyEntryFlags returns a policyEntryFlags from the policyEntryFlagParams. func getPolicyEntryFlags(p policyEntryFlagParams) policyEntryFlags { var flags policyEntryFlags if p.IsDeny { flags |= policyFlagDeny } if p.IsWildcardNexthdr { flags |= policyFlagWildcardNexthdr } if p.IsWildcardDestPort { flags |= policyFlagWildcardDestPort } return flags } // CallKey is the index into the prog array map. type CallKey struct { index uint32 } // CallValue is the program ID in the prog array map. type CallValue struct { progID uint32 } // String converts the key into a human readable string format. func (k *CallKey) String() string { return strconv.FormatUint(uint64(k.index), 10) } func (k *CallKey) New() bpf.MapKey { return &CallKey{} } // String converts the value into a human readable string format. func (v *CallValue) String() string { return strconv.FormatUint(uint64(v.progID), 10) } func (v *CallValue) New() bpf.MapValue { return &CallValue{} } func (pe *PolicyEntry) Add(oPe PolicyEntry) { pe.Packets += oPe.Packets pe.Bytes += oPe.Bytes } type PolicyEntryDump struct { PolicyEntry Key PolicyKey } // PolicyEntriesDump is a wrapper for a slice of PolicyEntryDump type PolicyEntriesDump []PolicyEntryDump // String returns a string representation of PolicyEntriesDump func (p PolicyEntriesDump) String() string { var sb strings.Builder for _, entry := range p {
sb.WriteString(fmt.Sprintf("%20s: %s\n", entry.Key.String(), entry.PolicyEntry.String())) } return sb.String() } // Less is a function used to sort PolicyEntriesDump by Policy Type // (Deny / Allow), TrafficDirection (Ingress / Egress) and Identity // (ascending order). func (p PolicyEntriesDump) Less(i, j int) bool { iDeny := p[i].PolicyEntry.IsDeny() jDeny := p[j].PolicyEntry.IsDeny() switch { case iDeny && !jDeny: return true case !iDeny && jDeny: return false } if p[i].Key.TrafficDirection < p[j].Key.TrafficDirection { return true } return p[i].Key.TrafficDirection <= p[j].Key.TrafficDirection && p[i].Key.Identity < p[j].Key.Identity } func (key *PolicyKey) PortProtoString() string { dport := key.GetDestPort() protoStr := u8proto.U8proto(key.Nexthdr).String() prefixLen := key.Prefixlen - StaticPrefixBits switch { case prefixLen == 0, prefixLen == NexthdrBits: // Protocol wildcarded or specified, wildcarded port return protoStr case prefixLen > NexthdrBits && prefixLen < FullPrefixBits: // Protocol specified, partially wildcarded port return fmt.Sprintf("0x%x/%d/%s", dport, prefixLen-NexthdrBits, protoStr) case prefixLen == FullPrefixBits: // Both protocol and port specified, nothing wildcarded return fmt.Sprintf("%d/%s", dport, protoStr) default: // Invalid prefix length return fmt.Sprintf("<INVALID PREFIX LENGTH: %d>", prefixLen) } } func (key *PolicyKey) String() string { trafficDirectionString := trafficdirection.TrafficDirection(key.TrafficDirection).String() portProtoStr := key.PortProtoString() return fmt.Sprintf("%s: %d %s", trafficDirectionString, key.Identity, portProtoStr) } func (key *PolicyKey) New() bpf.MapKey { return &PolicyKey{} } // NewKey returns a PolicyKey representing the specified parameters in network // byte-order. func NewKey(id uint32, dport uint16, proto uint8, trafficDirection uint8) PolicyKey { // For now prefix length is derived from the proto and dport values // This will have to be exposed to the caller when port ranges are supported. prefixLen := StaticPrefixBits if proto != 0 { prefixLen += NexthdrBits if dport != 0 { prefixLen += DestPortBits } } return PolicyKey{ Prefixlen: prefixLen, Identity: id, TrafficDirection: trafficDirection, Nexthdr: proto, DestPortNetwork: byteorder.HostToNetwork16(dport), } } // newKey returns a PolicyKey representing the specified parameters in network // byte-order. func newKey(id uint32, dport uint16, proto u8proto.U8proto, trafficDirection trafficdirection.TrafficDirection) PolicyKey { return NewKey(id, dport, uint8(proto), trafficDirection.Uint8()) } // newEntry returns a PolicyEntry representing the specified parameters in // network byte-order. func newEntry(authType uint8, proxyPort uint16, flags policyEntryFlags) PolicyEntry { return PolicyEntry{ ProxyPortNetwork: byteorder.HostToNetwork16(proxyPort), Flags: flags, AuthType: authType, } } // newAllowEntry returns an allow PolicyEntry for the specified parameters in // network byte-order. // This is separated out to be used in unit testing. func newAllowEntry(key PolicyKey, authType uint8, proxyPort uint16) PolicyEntry { pef := getPolicyEntryFlags(policyEntryFlagParams{ IsWildcardNexthdr: key.Nexthdr == 0, IsWildcardDestPort: key.DestPortNetwork == 0, }) return newEntry(authType, proxyPort, pef) } // newDenyEntry returns a deny PolicyEntry for the specified parameters in // network byte-order. // This is separated out to be used in unit testing. func newDenyEntry(key PolicyKey) PolicyEntry { pef := getPolicyEntryFlags(policyEntryFlagParams{
random_line_split
policymap.go
limit of entries in the per endpoint policy // table ie the maximum number of peer identities that the endpoint could // send/receive traffic to/from.. It is set by InitMapInfo(), but unit // tests use the initial value below. // The default value of this upper limit is 16384. MaxEntries = 16384 ) type PolicyMap struct { *bpf.Map } func (pe PolicyEntry)
() bool { return pe.Flags.is(policyFlagDeny) } func (pe *PolicyEntry) String() string { return fmt.Sprintf("%d %d %d", pe.GetProxyPort(), pe.Packets, pe.Bytes) } func (pe *PolicyEntry) New() bpf.MapValue { return &PolicyEntry{} } // PolicyKey represents a key in the BPF policy map for an endpoint. It must // match the layout of policy_key in bpf/lib/common.h. type PolicyKey struct { Prefixlen uint32 `align:"lpm_key"` Identity uint32 `align:"sec_label"` TrafficDirection uint8 `align:"egress"` Nexthdr uint8 `align:"protocol"` DestPortNetwork uint16 `align:"dport"` // In network byte-order } // GetDestPort returns the DestPortNetwork in host byte order func (k *PolicyKey) GetDestPort() uint16 { return byteorder.NetworkToHost16(k.DestPortNetwork) } const ( sizeofPolicyKey = int(unsafe.Sizeof(PolicyKey{})) sizeofPrefixlen = int(unsafe.Sizeof(PolicyKey{}.Prefixlen)) sizeofNexthdr = int(unsafe.Sizeof(PolicyKey{}.Nexthdr)) sizeofDestPort = int(unsafe.Sizeof(PolicyKey{}.DestPortNetwork)) NexthdrBits = uint32(sizeofNexthdr) * 8 DestPortBits = uint32(sizeofDestPort) * 8 FullPrefixBits = NexthdrBits + DestPortBits StaticPrefixBits = uint32(sizeofPolicyKey-sizeofPrefixlen)*8 - FullPrefixBits ) // PolicyEntry represents an entry in the BPF policy map for an endpoint. It must // match the layout of policy_entry in bpf/lib/common.h. type PolicyEntry struct { ProxyPortNetwork uint16 `align:"proxy_port"` // In network byte-order Flags policyEntryFlags `align:"deny"` AuthType uint8 `align:"auth_type"` Pad1 uint16 `align:"pad1"` Pad2 uint16 `align:"pad2"` Packets uint64 `align:"packets"` Bytes uint64 `align:"bytes"` } // GetProxyPort returns the ProxyPortNetwork in host byte order func (pe *PolicyEntry) GetProxyPort() uint16 { return byteorder.NetworkToHost16(pe.ProxyPortNetwork) } type policyEntryFlagParams struct { IsDeny bool IsWildcardNexthdr bool IsWildcardDestPort bool } // getPolicyEntryFlags returns a policyEntryFlags from the policyEntryFlagParams. func getPolicyEntryFlags(p policyEntryFlagParams) policyEntryFlags { var flags policyEntryFlags if p.IsDeny { flags |= policyFlagDeny } if p.IsWildcardNexthdr { flags |= policyFlagWildcardNexthdr } if p.IsWildcardDestPort { flags |= policyFlagWildcardDestPort } return flags } // CallKey is the index into the prog array map. type CallKey struct { index uint32 } // CallValue is the program ID in the prog array map. type CallValue struct { progID uint32 } // String converts the key into a human readable string format. func (k *CallKey) String() string { return strconv.FormatUint(uint64(k.index), 10) } func (k *CallKey) New() bpf.MapKey { return &CallKey{} } // String converts the value into a human readable string format. func (v *CallValue) String() string { return strconv.FormatUint(uint64(v.progID), 10) } func (v *CallValue) New() bpf.MapValue { return &CallValue{} } func (pe *PolicyEntry) Add(oPe PolicyEntry) { pe.Packets += oPe.Packets pe.Bytes += oPe.Bytes } type PolicyEntryDump struct { PolicyEntry Key PolicyKey } // PolicyEntriesDump is a wrapper for a slice of PolicyEntryDump type PolicyEntriesDump []PolicyEntryDump // String returns a string representation of PolicyEntriesDump func (p PolicyEntriesDump) String() string { var sb strings.Builder for _, entry := range p { sb.WriteString(fmt.Sprintf("%20s: %s\n", entry.Key.String(), entry.PolicyEntry.String())) } return sb.String() } // Less is a function used to sort PolicyEntriesDump by Policy Type // (Deny / Allow), TrafficDirection (Ingress / Egress) and Identity // (ascending order). func (p PolicyEntriesDump) Less(i, j int) bool { iDeny := p[i].PolicyEntry.IsDeny() jDeny := p[j].PolicyEntry.IsDeny() switch { case iDeny && !jDeny: return true case !iDeny && jDeny: return false } if p[i].Key.TrafficDirection < p[j].Key.TrafficDirection { return true } return p[i].Key.TrafficDirection <= p[j].Key.TrafficDirection && p[i].Key.Identity < p[j].Key.Identity } func (key *PolicyKey) PortProtoString() string { dport := key.GetDestPort() protoStr := u8proto.U8proto(key.Nexthdr).String() prefixLen := key.Prefixlen - StaticPrefixBits switch { case prefixLen == 0, prefixLen == NexthdrBits: // Protocol wildcarded or specified, wildcarded port return protoStr case prefixLen > NexthdrBits && prefixLen < FullPrefixBits: // Protocol specified, partially wildcarded port return fmt.Sprintf("0x%x/%d/%s", dport, prefixLen-NexthdrBits, protoStr) case prefixLen == FullPrefixBits: // Both protocol and port specified, nothing wildcarded return fmt.Sprintf("%d/%s", dport, protoStr) default: // Invalid prefix length return fmt.Sprintf("<INVALID PREFIX LENGTH: %d>", prefixLen) } } func (key *PolicyKey) String() string { trafficDirectionString := trafficdirection.TrafficDirection(key.TrafficDirection).String() portProtoStr := key.PortProtoString() return fmt.Sprintf("%s: %d %s", trafficDirectionString, key.Identity, portProtoStr) } func (key *PolicyKey) New() bpf.MapKey { return &PolicyKey{} } // NewKey returns a PolicyKey representing the specified parameters in network // byte-order. func NewKey(id uint32, dport uint16, proto uint8, trafficDirection uint8) PolicyKey { // For now prefix length is derived from the proto and dport values // This will have to be exposed to the caller when port ranges are supported. prefixLen := StaticPrefixBits if proto != 0 { prefixLen += NexthdrBits if dport != 0 { prefixLen += DestPortBits } } return PolicyKey{ Prefixlen: prefixLen, Identity: id, TrafficDirection: trafficDirection, Nexthdr: proto, DestPortNetwork: byteorder.HostToNetwork16(dport), } } // newKey returns a PolicyKey representing the specified parameters in network // byte-order. func newKey(id uint32, dport uint16, proto u8proto.U8proto, trafficDirection trafficdirection.TrafficDirection) PolicyKey { return NewKey(id, dport, uint8(proto), trafficDirection.Uint8()) } // newEntry returns a PolicyEntry representing the specified parameters in // network byte-order. func newEntry(authType uint8, proxyPort uint16, flags policyEntryFlags) PolicyEntry { return PolicyEntry{ ProxyPortNetwork: byteorder.HostToNetwork16(proxyPort), Flags: flags, AuthType: authType, } } // newAllowEntry returns an allow PolicyEntry for the specified parameters in // network byte-order. // This is separated out to be used in unit testing. func newAllowEntry(key PolicyKey, authType uint8, proxyPort uint16) PolicyEntry { pef := getPolicyEntryFlags(policyEntryFlagParams{ IsWildcardNexthdr: key.Nexthdr == 0, IsWildcardDestPort: key.DestPortNetwork == 0, }) return newEntry(authType, proxyPort, pef) } // newDenyEntry returns a deny PolicyEntry for the specified parameters in // network byte-order. // This is separated out to be used in unit testing. func newDenyEntry(key PolicyKey) PolicyEntry { pef := getPolicyEntryFlags(policyEntryFlagParams{
IsDeny
identifier_name
policymap.go
limit of entries in the per endpoint policy // table ie the maximum number of peer identities that the endpoint could // send/receive traffic to/from.. It is set by InitMapInfo(), but unit // tests use the initial value below. // The default value of this upper limit is 16384. MaxEntries = 16384 ) type PolicyMap struct { *bpf.Map } func (pe PolicyEntry) IsDeny() bool { return pe.Flags.is(policyFlagDeny) } func (pe *PolicyEntry) String() string { return fmt.Sprintf("%d %d %d", pe.GetProxyPort(), pe.Packets, pe.Bytes) } func (pe *PolicyEntry) New() bpf.MapValue { return &PolicyEntry{} } // PolicyKey represents a key in the BPF policy map for an endpoint. It must // match the layout of policy_key in bpf/lib/common.h. type PolicyKey struct { Prefixlen uint32 `align:"lpm_key"` Identity uint32 `align:"sec_label"` TrafficDirection uint8 `align:"egress"` Nexthdr uint8 `align:"protocol"` DestPortNetwork uint16 `align:"dport"` // In network byte-order } // GetDestPort returns the DestPortNetwork in host byte order func (k *PolicyKey) GetDestPort() uint16 { return byteorder.NetworkToHost16(k.DestPortNetwork) } const ( sizeofPolicyKey = int(unsafe.Sizeof(PolicyKey{})) sizeofPrefixlen = int(unsafe.Sizeof(PolicyKey{}.Prefixlen)) sizeofNexthdr = int(unsafe.Sizeof(PolicyKey{}.Nexthdr)) sizeofDestPort = int(unsafe.Sizeof(PolicyKey{}.DestPortNetwork)) NexthdrBits = uint32(sizeofNexthdr) * 8 DestPortBits = uint32(sizeofDestPort) * 8 FullPrefixBits = NexthdrBits + DestPortBits StaticPrefixBits = uint32(sizeofPolicyKey-sizeofPrefixlen)*8 - FullPrefixBits ) // PolicyEntry represents an entry in the BPF policy map for an endpoint. It must // match the layout of policy_entry in bpf/lib/common.h. type PolicyEntry struct { ProxyPortNetwork uint16 `align:"proxy_port"` // In network byte-order Flags policyEntryFlags `align:"deny"` AuthType uint8 `align:"auth_type"` Pad1 uint16 `align:"pad1"` Pad2 uint16 `align:"pad2"` Packets uint64 `align:"packets"` Bytes uint64 `align:"bytes"` } // GetProxyPort returns the ProxyPortNetwork in host byte order func (pe *PolicyEntry) GetProxyPort() uint16 { return byteorder.NetworkToHost16(pe.ProxyPortNetwork) } type policyEntryFlagParams struct { IsDeny bool IsWildcardNexthdr bool IsWildcardDestPort bool } // getPolicyEntryFlags returns a policyEntryFlags from the policyEntryFlagParams. func getPolicyEntryFlags(p policyEntryFlagParams) policyEntryFlags { var flags policyEntryFlags if p.IsDeny { flags |= policyFlagDeny } if p.IsWildcardNexthdr { flags |= policyFlagWildcardNexthdr } if p.IsWildcardDestPort { flags |= policyFlagWildcardDestPort } return flags } // CallKey is the index into the prog array map. type CallKey struct { index uint32 } // CallValue is the program ID in the prog array map. type CallValue struct { progID uint32 } // String converts the key into a human readable string format. func (k *CallKey) String() string
func (k *CallKey) New() bpf.MapKey { return &CallKey{} } // String converts the value into a human readable string format. func (v *CallValue) String() string { return strconv.FormatUint(uint64(v.progID), 10) } func (v *CallValue) New() bpf.MapValue { return &CallValue{} } func (pe *PolicyEntry) Add(oPe PolicyEntry) { pe.Packets += oPe.Packets pe.Bytes += oPe.Bytes } type PolicyEntryDump struct { PolicyEntry Key PolicyKey } // PolicyEntriesDump is a wrapper for a slice of PolicyEntryDump type PolicyEntriesDump []PolicyEntryDump // String returns a string representation of PolicyEntriesDump func (p PolicyEntriesDump) String() string { var sb strings.Builder for _, entry := range p { sb.WriteString(fmt.Sprintf("%20s: %s\n", entry.Key.String(), entry.PolicyEntry.String())) } return sb.String() } // Less is a function used to sort PolicyEntriesDump by Policy Type // (Deny / Allow), TrafficDirection (Ingress / Egress) and Identity // (ascending order). func (p PolicyEntriesDump) Less(i, j int) bool { iDeny := p[i].PolicyEntry.IsDeny() jDeny := p[j].PolicyEntry.IsDeny() switch { case iDeny && !jDeny: return true case !iDeny && jDeny: return false } if p[i].Key.TrafficDirection < p[j].Key.TrafficDirection { return true } return p[i].Key.TrafficDirection <= p[j].Key.TrafficDirection && p[i].Key.Identity < p[j].Key.Identity } func (key *PolicyKey) PortProtoString() string { dport := key.GetDestPort() protoStr := u8proto.U8proto(key.Nexthdr).String() prefixLen := key.Prefixlen - StaticPrefixBits switch { case prefixLen == 0, prefixLen == NexthdrBits: // Protocol wildcarded or specified, wildcarded port return protoStr case prefixLen > NexthdrBits && prefixLen < FullPrefixBits: // Protocol specified, partially wildcarded port return fmt.Sprintf("0x%x/%d/%s", dport, prefixLen-NexthdrBits, protoStr) case prefixLen == FullPrefixBits: // Both protocol and port specified, nothing wildcarded return fmt.Sprintf("%d/%s", dport, protoStr) default: // Invalid prefix length return fmt.Sprintf("<INVALID PREFIX LENGTH: %d>", prefixLen) } } func (key *PolicyKey) String() string { trafficDirectionString := trafficdirection.TrafficDirection(key.TrafficDirection).String() portProtoStr := key.PortProtoString() return fmt.Sprintf("%s: %d %s", trafficDirectionString, key.Identity, portProtoStr) } func (key *PolicyKey) New() bpf.MapKey { return &PolicyKey{} } // NewKey returns a PolicyKey representing the specified parameters in network // byte-order. func NewKey(id uint32, dport uint16, proto uint8, trafficDirection uint8) PolicyKey { // For now prefix length is derived from the proto and dport values // This will have to be exposed to the caller when port ranges are supported. prefixLen := StaticPrefixBits if proto != 0 { prefixLen += NexthdrBits if dport != 0 { prefixLen += DestPortBits } } return PolicyKey{ Prefixlen: prefixLen, Identity: id, TrafficDirection: trafficDirection, Nexthdr: proto, DestPortNetwork: byteorder.HostToNetwork16(dport), } } // newKey returns a PolicyKey representing the specified parameters in network // byte-order. func newKey(id uint32, dport uint16, proto u8proto.U8proto, trafficDirection trafficdirection.TrafficDirection) PolicyKey { return NewKey(id, dport, uint8(proto), trafficDirection.Uint8()) } // newEntry returns a PolicyEntry representing the specified parameters in // network byte-order. func newEntry(authType uint8, proxyPort uint16, flags policyEntryFlags) PolicyEntry { return PolicyEntry{ ProxyPortNetwork: byteorder.HostToNetwork16(proxyPort), Flags: flags, AuthType: authType, } } // newAllowEntry returns an allow PolicyEntry for the specified parameters in // network byte-order. // This is separated out to be used in unit testing. func newAllowEntry(key PolicyKey, authType uint8, proxyPort uint16) PolicyEntry { pef := getPolicyEntryFlags(policyEntryFlagParams{ IsWildcardNexthdr: key.Nexthdr == 0, IsWildcardDestPort: key.DestPortNetwork == 0, }) return newEntry(authType, proxyPort, pef) } // newDenyEntry returns a deny PolicyEntry for the specified parameters in // network byte-order. // This is separated out to be used in unit testing. func newDenyEntry(key PolicyKey) PolicyEntry { pef := getPolicyEntryFlags(policyEntryFlagParams{
{ return strconv.FormatUint(uint64(k.index), 10) }
identifier_body
policymap.go
else { str = append(str, "Allow") } if pef.is(policyFlagWildcardNexthdr) { str = append(str, "WildcardProtocol") } if pef.is(policyFlagWildcardDestPort) { str = append(str, "WildcardPort") } return strings.Join(str, ", ") } var ( // MaxEntries is the upper limit of entries in the per endpoint policy // table ie the maximum number of peer identities that the endpoint could // send/receive traffic to/from.. It is set by InitMapInfo(), but unit // tests use the initial value below. // The default value of this upper limit is 16384. MaxEntries = 16384 ) type PolicyMap struct { *bpf.Map } func (pe PolicyEntry) IsDeny() bool { return pe.Flags.is(policyFlagDeny) } func (pe *PolicyEntry) String() string { return fmt.Sprintf("%d %d %d", pe.GetProxyPort(), pe.Packets, pe.Bytes) } func (pe *PolicyEntry) New() bpf.MapValue { return &PolicyEntry{} } // PolicyKey represents a key in the BPF policy map for an endpoint. It must // match the layout of policy_key in bpf/lib/common.h. type PolicyKey struct { Prefixlen uint32 `align:"lpm_key"` Identity uint32 `align:"sec_label"` TrafficDirection uint8 `align:"egress"` Nexthdr uint8 `align:"protocol"` DestPortNetwork uint16 `align:"dport"` // In network byte-order } // GetDestPort returns the DestPortNetwork in host byte order func (k *PolicyKey) GetDestPort() uint16 { return byteorder.NetworkToHost16(k.DestPortNetwork) } const ( sizeofPolicyKey = int(unsafe.Sizeof(PolicyKey{})) sizeofPrefixlen = int(unsafe.Sizeof(PolicyKey{}.Prefixlen)) sizeofNexthdr = int(unsafe.Sizeof(PolicyKey{}.Nexthdr)) sizeofDestPort = int(unsafe.Sizeof(PolicyKey{}.DestPortNetwork)) NexthdrBits = uint32(sizeofNexthdr) * 8 DestPortBits = uint32(sizeofDestPort) * 8 FullPrefixBits = NexthdrBits + DestPortBits StaticPrefixBits = uint32(sizeofPolicyKey-sizeofPrefixlen)*8 - FullPrefixBits ) // PolicyEntry represents an entry in the BPF policy map for an endpoint. It must // match the layout of policy_entry in bpf/lib/common.h. type PolicyEntry struct { ProxyPortNetwork uint16 `align:"proxy_port"` // In network byte-order Flags policyEntryFlags `align:"deny"` AuthType uint8 `align:"auth_type"` Pad1 uint16 `align:"pad1"` Pad2 uint16 `align:"pad2"` Packets uint64 `align:"packets"` Bytes uint64 `align:"bytes"` } // GetProxyPort returns the ProxyPortNetwork in host byte order func (pe *PolicyEntry) GetProxyPort() uint16 { return byteorder.NetworkToHost16(pe.ProxyPortNetwork) } type policyEntryFlagParams struct { IsDeny bool IsWildcardNexthdr bool IsWildcardDestPort bool } // getPolicyEntryFlags returns a policyEntryFlags from the policyEntryFlagParams. func getPolicyEntryFlags(p policyEntryFlagParams) policyEntryFlags { var flags policyEntryFlags if p.IsDeny { flags |= policyFlagDeny } if p.IsWildcardNexthdr { flags |= policyFlagWildcardNexthdr } if p.IsWildcardDestPort { flags |= policyFlagWildcardDestPort } return flags } // CallKey is the index into the prog array map. type CallKey struct { index uint32 } // CallValue is the program ID in the prog array map. type CallValue struct { progID uint32 } // String converts the key into a human readable string format. func (k *CallKey) String() string { return strconv.FormatUint(uint64(k.index), 10) } func (k *CallKey) New() bpf.MapKey { return &CallKey{} } // String converts the value into a human readable string format. func (v *CallValue) String() string { return strconv.FormatUint(uint64(v.progID), 10) } func (v *CallValue) New() bpf.MapValue { return &CallValue{} } func (pe *PolicyEntry) Add(oPe PolicyEntry) { pe.Packets += oPe.Packets pe.Bytes += oPe.Bytes } type PolicyEntryDump struct { PolicyEntry Key PolicyKey } // PolicyEntriesDump is a wrapper for a slice of PolicyEntryDump type PolicyEntriesDump []PolicyEntryDump // String returns a string representation of PolicyEntriesDump func (p PolicyEntriesDump) String() string { var sb strings.Builder for _, entry := range p { sb.WriteString(fmt.Sprintf("%20s: %s\n", entry.Key.String(), entry.PolicyEntry.String())) } return sb.String() } // Less is a function used to sort PolicyEntriesDump by Policy Type // (Deny / Allow), TrafficDirection (Ingress / Egress) and Identity // (ascending order). func (p PolicyEntriesDump) Less(i, j int) bool { iDeny := p[i].PolicyEntry.IsDeny() jDeny := p[j].PolicyEntry.IsDeny() switch { case iDeny && !jDeny: return true case !iDeny && jDeny: return false } if p[i].Key.TrafficDirection < p[j].Key.TrafficDirection { return true } return p[i].Key.TrafficDirection <= p[j].Key.TrafficDirection && p[i].Key.Identity < p[j].Key.Identity } func (key *PolicyKey) PortProtoString() string { dport := key.GetDestPort() protoStr := u8proto.U8proto(key.Nexthdr).String() prefixLen := key.Prefixlen - StaticPrefixBits switch { case prefixLen == 0, prefixLen == NexthdrBits: // Protocol wildcarded or specified, wildcarded port return protoStr case prefixLen > NexthdrBits && prefixLen < FullPrefixBits: // Protocol specified, partially wildcarded port return fmt.Sprintf("0x%x/%d/%s", dport, prefixLen-NexthdrBits, protoStr) case prefixLen == FullPrefixBits: // Both protocol and port specified, nothing wildcarded return fmt.Sprintf("%d/%s", dport, protoStr) default: // Invalid prefix length return fmt.Sprintf("<INVALID PREFIX LENGTH: %d>", prefixLen) } } func (key *PolicyKey) String() string { trafficDirectionString := trafficdirection.TrafficDirection(key.TrafficDirection).String() portProtoStr := key.PortProtoString() return fmt.Sprintf("%s: %d %s", trafficDirectionString, key.Identity, portProtoStr) } func (key *PolicyKey) New() bpf.MapKey { return &PolicyKey{} } // NewKey returns a PolicyKey representing the specified parameters in network // byte-order. func NewKey(id uint32, dport uint16, proto uint8, trafficDirection uint8) PolicyKey { // For now prefix length is derived from the proto and dport values // This will have to be exposed to the caller when port ranges are supported. prefixLen := StaticPrefixBits if proto != 0 { prefixLen += NexthdrBits if dport != 0 { prefixLen += DestPortBits } } return PolicyKey{ Prefixlen: prefixLen, Identity: id, TrafficDirection: trafficDirection, Nexthdr: proto, DestPortNetwork: byteorder.HostToNetwork16(dport), } } // newKey returns a PolicyKey representing the specified parameters in network // byte-order. func newKey(id uint32, dport uint16, proto u8proto.U8proto, trafficDirection trafficdirection.TrafficDirection) PolicyKey { return NewKey(id, dport, uint8(proto), trafficDirection.Uint8()) } // newEntry returns a PolicyEntry representing the specified parameters in // network byte-order. func newEntry(authType uint8, proxyPort uint16, flags policyEntryFlags) PolicyEntry { return PolicyEntry{ ProxyPortNetwork: byteorder.HostToNetwork16(proxyPort), Flags: flags, AuthType: authType, } } // newAllowEntry returns an allow PolicyEntry for the specified parameters in // network byte-order. // This is separated out to be used in unit testing. func newAllowEntry(key PolicyKey, authType uint8, proxyPort uint16) PolicyEntry { pef := getPolicyEntryFlags(policyEntryFlagParams{ IsWildcardNexthdr: key.Nexthdr == 0,
{ str = append(str, "Deny") }
conditional_block
output.rs
} #[derive(Debug, Clone, PartialEq, Eq)] pub enum AsyncClass { Stopped, CmdParamChanged, LibraryLoaded, Thread(ThreadEvent), BreakPoint(BreakPointEvent), Other(String), //? } #[derive(Debug)] pub enum AsyncKind { Exec, Status, Notify, } #[derive(Debug)] pub enum StreamKind { Console, Target, Log, } #[derive(Debug)] pub struct ResultRecord { pub(crate) token: Option<Token>, pub class: ResultClass, pub results: Object, } #[derive(Debug)] pub enum OutOfBandRecord { AsyncRecord { token: Option<Token>, kind: AsyncKind, class: AsyncClass, results: Object, }, StreamRecord { kind: StreamKind, data: String, }, } #[derive(Debug)] enum Output { Result(ResultRecord), OutOfBand(OutOfBandRecord), GDBLine, SomethingElse(String), /* Debug */ } use crate::OutOfBandRecordSink; use nom::IResult; use std::io::{BufRead, BufReader, Read}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; pub fn process_output<T: Read, S: OutOfBandRecordSink>( output: T, result_pipe: Sender<ResultRecord>, out_of_band_pipe: S, is_running: Arc<AtomicBool>, ) { let mut reader = BufReader::new(output); loop { let mut buffer = String::new(); match reader.read_line(&mut buffer) { Ok(0) => { return; } Ok(_) => { info!("{}", buffer.trim_end()); let parse_result = match Output::parse(&buffer) { Ok(r) => r, Err(e) => { error!("PARSING ERROR: {}", e); continue; } }; match parse_result { Output::Result(record) => { match record.class { ResultClass::Running => is_running.store(true, Ordering::SeqCst), //Apparently sometimes gdb first claims to be running, only to then stop again (without notifying the user)... ResultClass::Error => is_running.store(false, Ordering::SeqCst), _ => {} } result_pipe.send(record).expect("send result to pipe"); } Output::OutOfBand(record) => { if let OutOfBandRecord::AsyncRecord { class: AsyncClass::Stopped, .. } = record { is_running.store(false, Ordering::SeqCst); } out_of_band_pipe.send(record); } Output::GDBLine => {} //Output::SomethingElse(_) => { /*println!("SOMETHING ELSE: {}", str);*/ } Output::SomethingElse(text) => { out_of_band_pipe.send(OutOfBandRecord::StreamRecord { kind: StreamKind::Target, data: text, }); } } } Err(e) => { panic!("{}", e); } } } } impl Output { fn parse(line: &str) -> Result<Self, String> { match output(line.as_bytes()) { IResult::Done(_, c) => Ok(c), IResult::Incomplete(e) => Err(format!("parsing line: incomplete {:?}", e)), //Is it okay to read the next bytes then? IResult::Error(e) => Err(format!("parse error: {}", e)), } } } named!( result_class<ResultClass>, alt!( value!(ResultClass::Done, tag!("done")) | value!(ResultClass::Running, tag!("running")) | value!(ResultClass::Connected, tag!("connected")) | value!(ResultClass::Error, tag!("error"))
let byte = input[0]; if byte == b'\"' { IResult::Error(::nom::ErrorKind::Custom(1)) //what are we supposed to return here?? } else { IResult::Done(&input[1..], byte) } } named!( escaped_character<u8>, alt!( value!(b'\n', tag!("\\n")) | value!(b'\r', tag!("\\r")) | value!(b'\t', tag!("\\t")) | value!(b'\"', tag!("\\\"")) | value!(b'\\', tag!("\\\\")) | non_quote_byte ) ); named!( string<String>, do_parse!( tag!("\"") >> s: many0!(escaped_character) >> tag!("\"") >> (String::from_utf8_lossy(s.as_slice()).into_owned()) ) ); fn to_map(v: Vec<(String, JsonValue)>) -> Object { //TODO: fix this and parse the map directly let mut obj = Object::new(); for (name, value) in v { debug_assert!(obj.get(&name).is_none(), "Duplicate object member!"); obj.insert(&name, value); } obj } fn to_list(v: Vec<(String, JsonValue)>) -> Vec<JsonValue> { //The gdbmi-grammar is really weird... //TODO: fix this and parse the map directly v.into_iter().map(|(_, value)| value).collect() } named!( value<JsonValue>, alt!( map!(string, JsonValue::String) | do_parse!( tag!("{") >> results: separated_list!(tag!(","), result) >> tag!("}") >> (JsonValue::Object(to_map(results))) ) | do_parse!( tag!("[") >> values: separated_list!(tag!(","), value) >> tag!("]") >> (JsonValue::Array(values)) ) | do_parse!( tag!("[") >> results: separated_list!(tag!(","), result) >> tag!("]") >> (JsonValue::Array(to_list(results))) ) ) ); // Don't even ask... Against its spec, gdb(mi) sometimes emits multiple values for a single tuple // in a comma separated list. named!( buggy_gdb_list_in_result<JsonValue>, map!(separated_list!(tag!(","), value), |values: Vec< JsonValue, >| { if values.len() == 1 { values .into_iter() .next() .expect("len == 1 => first element is guaranteed") } else { JsonValue::Array(values) } }) ); named!( result<(String, JsonValue)>, do_parse!( var: is_not!("={}" /* Do not allow =, {, nor } */) >> tag!("=") >> val: buggy_gdb_list_in_result >> (String::from_utf8_lossy(var).into_owned(), val) ) ); named!( token<Token>, map!(::nom::digit, |values: &[u8]| values .iter() .fold(0, |acc, &ascii_digit| 10 * acc + (ascii_digit - b'0') as u64)) ); named!( result_record<Output>, do_parse!( t: opt!(token) >> tag!("^") >> c: result_class >> res: many0!(do_parse!(tag!(",") >> r: result >> (r))) >> (Output::Result(ResultRecord { token: t, class: c, results: to_map(res), })) ) ); named!( async_kind<AsyncKind>, alt!( value!(AsyncKind::Exec, tag!("*")) | value!(AsyncKind::Status, tag!("+")) | value!(AsyncKind::Notify, tag!("=")) ) ); named!( async_class<AsyncClass>, alt!( value!(AsyncClass::Stopped, tag!("stopped")) | value!( AsyncClass::Thread(ThreadEvent::Created), tag!("thread-created") ) | value!( AsyncClass::Thread(ThreadEvent::GroupStarted), tag!("thread-group-started") ) | value!( AsyncClass::Thread(ThreadEvent::Exited), tag!("thread-exited") ) | value!( AsyncClass::Thread(ThreadEvent::GroupExited), tag!("thread-group-exited") ) | value!( AsyncClass::Thread(ThreadEvent::Selected), tag!("thread-selected") ) | value!(AsyncClass::CmdParamChanged, tag!("cmd-param-changed")) | value!(AsyncClass::LibraryLoaded, tag!("library-loaded")) | value!( AsyncClass::BreakPoint(BreakPointEvent::Created), tag!("breakpoint-created") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Deleted), tag!("breakpoint-deleted") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Modified), tag!("breakpoint-modified") ) | map!(is_not!(","), |msg| AsyncClass::Other( String::from_utf8_lossy(msg).into_owned() )) ) ); named!( async_record<OutOfBandRecord>, do_parse!( t: opt!(token) >> kind: async_kind >> class: async_class >> results: many0
| value!(ResultClass::Exit, tag!("exit")) ) ); fn non_quote_byte(input: &[u8]) -> IResult<&[u8], u8> {
random_line_split
output.rs
#[derive(Debug, Clone, PartialEq, Eq)] pub enum AsyncClass { Stopped, CmdParamChanged, LibraryLoaded, Thread(ThreadEvent), BreakPoint(BreakPointEvent), Other(String), //? } #[derive(Debug)] pub enum AsyncKind { Exec, Status, Notify, } #[derive(Debug)] pub enum StreamKind { Console, Target, Log, } #[derive(Debug)] pub struct ResultRecord { pub(crate) token: Option<Token>, pub class: ResultClass, pub results: Object, } #[derive(Debug)] pub enum OutOfBandRecord { AsyncRecord { token: Option<Token>, kind: AsyncKind, class: AsyncClass, results: Object, }, StreamRecord { kind: StreamKind, data: String, }, } #[derive(Debug)] enum Output { Result(ResultRecord), OutOfBand(OutOfBandRecord), GDBLine, SomethingElse(String), /* Debug */ } use crate::OutOfBandRecordSink; use nom::IResult; use std::io::{BufRead, BufReader, Read}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; pub fn process_output<T: Read, S: OutOfBandRecordSink>( output: T, result_pipe: Sender<ResultRecord>, out_of_band_pipe: S, is_running: Arc<AtomicBool>, ) { let mut reader = BufReader::new(output); loop { let mut buffer = String::new(); match reader.read_line(&mut buffer) { Ok(0) => { return; } Ok(_) => { info!("{}", buffer.trim_end()); let parse_result = match Output::parse(&buffer) { Ok(r) => r, Err(e) => { error!("PARSING ERROR: {}", e); continue; } }; match parse_result { Output::Result(record) => { match record.class { ResultClass::Running => is_running.store(true, Ordering::SeqCst), //Apparently sometimes gdb first claims to be running, only to then stop again (without notifying the user)... ResultClass::Error => is_running.store(false, Ordering::SeqCst), _ =>
} result_pipe.send(record).expect("send result to pipe"); } Output::OutOfBand(record) => { if let OutOfBandRecord::AsyncRecord { class: AsyncClass::Stopped, .. } = record { is_running.store(false, Ordering::SeqCst); } out_of_band_pipe.send(record); } Output::GDBLine => {} //Output::SomethingElse(_) => { /*println!("SOMETHING ELSE: {}", str);*/ } Output::SomethingElse(text) => { out_of_band_pipe.send(OutOfBandRecord::StreamRecord { kind: StreamKind::Target, data: text, }); } } } Err(e) => { panic!("{}", e); } } } } impl Output { fn parse(line: &str) -> Result<Self, String> { match output(line.as_bytes()) { IResult::Done(_, c) => Ok(c), IResult::Incomplete(e) => Err(format!("parsing line: incomplete {:?}", e)), //Is it okay to read the next bytes then? IResult::Error(e) => Err(format!("parse error: {}", e)), } } } named!( result_class<ResultClass>, alt!( value!(ResultClass::Done, tag!("done")) | value!(ResultClass::Running, tag!("running")) | value!(ResultClass::Connected, tag!("connected")) | value!(ResultClass::Error, tag!("error")) | value!(ResultClass::Exit, tag!("exit")) ) ); fn non_quote_byte(input: &[u8]) -> IResult<&[u8], u8> { let byte = input[0]; if byte == b'\"' { IResult::Error(::nom::ErrorKind::Custom(1)) //what are we supposed to return here?? } else { IResult::Done(&input[1..], byte) } } named!( escaped_character<u8>, alt!( value!(b'\n', tag!("\\n")) | value!(b'\r', tag!("\\r")) | value!(b'\t', tag!("\\t")) | value!(b'\"', tag!("\\\"")) | value!(b'\\', tag!("\\\\")) | non_quote_byte ) ); named!( string<String>, do_parse!( tag!("\"") >> s: many0!(escaped_character) >> tag!("\"") >> (String::from_utf8_lossy(s.as_slice()).into_owned()) ) ); fn to_map(v: Vec<(String, JsonValue)>) -> Object { //TODO: fix this and parse the map directly let mut obj = Object::new(); for (name, value) in v { debug_assert!(obj.get(&name).is_none(), "Duplicate object member!"); obj.insert(&name, value); } obj } fn to_list(v: Vec<(String, JsonValue)>) -> Vec<JsonValue> { //The gdbmi-grammar is really weird... //TODO: fix this and parse the map directly v.into_iter().map(|(_, value)| value).collect() } named!( value<JsonValue>, alt!( map!(string, JsonValue::String) | do_parse!( tag!("{") >> results: separated_list!(tag!(","), result) >> tag!("}") >> (JsonValue::Object(to_map(results))) ) | do_parse!( tag!("[") >> values: separated_list!(tag!(","), value) >> tag!("]") >> (JsonValue::Array(values)) ) | do_parse!( tag!("[") >> results: separated_list!(tag!(","), result) >> tag!("]") >> (JsonValue::Array(to_list(results))) ) ) ); // Don't even ask... Against its spec, gdb(mi) sometimes emits multiple values for a single tuple // in a comma separated list. named!( buggy_gdb_list_in_result<JsonValue>, map!(separated_list!(tag!(","), value), |values: Vec< JsonValue, >| { if values.len() == 1 { values .into_iter() .next() .expect("len == 1 => first element is guaranteed") } else { JsonValue::Array(values) } }) ); named!( result<(String, JsonValue)>, do_parse!( var: is_not!("={}" /* Do not allow =, {, nor } */) >> tag!("=") >> val: buggy_gdb_list_in_result >> (String::from_utf8_lossy(var).into_owned(), val) ) ); named!( token<Token>, map!(::nom::digit, |values: &[u8]| values .iter() .fold(0, |acc, &ascii_digit| 10 * acc + (ascii_digit - b'0') as u64)) ); named!( result_record<Output>, do_parse!( t: opt!(token) >> tag!("^") >> c: result_class >> res: many0!(do_parse!(tag!(",") >> r: result >> (r))) >> (Output::Result(ResultRecord { token: t, class: c, results: to_map(res), })) ) ); named!( async_kind<AsyncKind>, alt!( value!(AsyncKind::Exec, tag!("*")) | value!(AsyncKind::Status, tag!("+")) | value!(AsyncKind::Notify, tag!("=")) ) ); named!( async_class<AsyncClass>, alt!( value!(AsyncClass::Stopped, tag!("stopped")) | value!( AsyncClass::Thread(ThreadEvent::Created), tag!("thread-created") ) | value!( AsyncClass::Thread(ThreadEvent::GroupStarted), tag!("thread-group-started") ) | value!( AsyncClass::Thread(ThreadEvent::Exited), tag!("thread-exited") ) | value!( AsyncClass::Thread(ThreadEvent::GroupExited), tag!("thread-group-exited") ) | value!( AsyncClass::Thread(ThreadEvent::Selected), tag!("thread-selected") ) | value!(AsyncClass::CmdParamChanged, tag!("cmd-param-changed")) | value!(AsyncClass::LibraryLoaded, tag!("library-loaded")) | value!( AsyncClass::BreakPoint(BreakPointEvent::Created), tag!("breakpoint-created") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Deleted), tag!("breakpoint-deleted") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Modified), tag!("breakpoint-modified") ) | map!(is_not!(","), |msg| AsyncClass::Other( String::from_utf8_lossy(msg).into_owned() )) ) ); named!( async_record<OutOfBandRecord>, do_parse!( t: opt!(token) >> kind: async_kind >> class: async_class >> results: many
{}
conditional_block
output.rs
} #[derive(Debug, Clone, PartialEq, Eq)] pub enum AsyncClass { Stopped, CmdParamChanged, LibraryLoaded, Thread(ThreadEvent), BreakPoint(BreakPointEvent), Other(String), //? } #[derive(Debug)] pub enum AsyncKind { Exec, Status, Notify, } #[derive(Debug)] pub enum StreamKind { Console, Target, Log, } #[derive(Debug)] pub struct ResultRecord { pub(crate) token: Option<Token>, pub class: ResultClass, pub results: Object, } #[derive(Debug)] pub enum OutOfBandRecord { AsyncRecord { token: Option<Token>, kind: AsyncKind, class: AsyncClass, results: Object, }, StreamRecord { kind: StreamKind, data: String, }, } #[derive(Debug)] enum Output { Result(ResultRecord), OutOfBand(OutOfBandRecord), GDBLine, SomethingElse(String), /* Debug */ } use crate::OutOfBandRecordSink; use nom::IResult; use std::io::{BufRead, BufReader, Read}; use std::sync::atomic::{AtomicBool, Ordering}; use std::sync::mpsc::Sender; use std::sync::Arc; pub fn process_output<T: Read, S: OutOfBandRecordSink>( output: T, result_pipe: Sender<ResultRecord>, out_of_band_pipe: S, is_running: Arc<AtomicBool>, ) { let mut reader = BufReader::new(output); loop { let mut buffer = String::new(); match reader.read_line(&mut buffer) { Ok(0) => { return; } Ok(_) => { info!("{}", buffer.trim_end()); let parse_result = match Output::parse(&buffer) { Ok(r) => r, Err(e) => { error!("PARSING ERROR: {}", e); continue; } }; match parse_result { Output::Result(record) => { match record.class { ResultClass::Running => is_running.store(true, Ordering::SeqCst), //Apparently sometimes gdb first claims to be running, only to then stop again (without notifying the user)... ResultClass::Error => is_running.store(false, Ordering::SeqCst), _ => {} } result_pipe.send(record).expect("send result to pipe"); } Output::OutOfBand(record) => { if let OutOfBandRecord::AsyncRecord { class: AsyncClass::Stopped, .. } = record { is_running.store(false, Ordering::SeqCst); } out_of_band_pipe.send(record); } Output::GDBLine => {} //Output::SomethingElse(_) => { /*println!("SOMETHING ELSE: {}", str);*/ } Output::SomethingElse(text) => { out_of_band_pipe.send(OutOfBandRecord::StreamRecord { kind: StreamKind::Target, data: text, }); } } } Err(e) => { panic!("{}", e); } } } } impl Output { fn
(line: &str) -> Result<Self, String> { match output(line.as_bytes()) { IResult::Done(_, c) => Ok(c), IResult::Incomplete(e) => Err(format!("parsing line: incomplete {:?}", e)), //Is it okay to read the next bytes then? IResult::Error(e) => Err(format!("parse error: {}", e)), } } } named!( result_class<ResultClass>, alt!( value!(ResultClass::Done, tag!("done")) | value!(ResultClass::Running, tag!("running")) | value!(ResultClass::Connected, tag!("connected")) | value!(ResultClass::Error, tag!("error")) | value!(ResultClass::Exit, tag!("exit")) ) ); fn non_quote_byte(input: &[u8]) -> IResult<&[u8], u8> { let byte = input[0]; if byte == b'\"' { IResult::Error(::nom::ErrorKind::Custom(1)) //what are we supposed to return here?? } else { IResult::Done(&input[1..], byte) } } named!( escaped_character<u8>, alt!( value!(b'\n', tag!("\\n")) | value!(b'\r', tag!("\\r")) | value!(b'\t', tag!("\\t")) | value!(b'\"', tag!("\\\"")) | value!(b'\\', tag!("\\\\")) | non_quote_byte ) ); named!( string<String>, do_parse!( tag!("\"") >> s: many0!(escaped_character) >> tag!("\"") >> (String::from_utf8_lossy(s.as_slice()).into_owned()) ) ); fn to_map(v: Vec<(String, JsonValue)>) -> Object { //TODO: fix this and parse the map directly let mut obj = Object::new(); for (name, value) in v { debug_assert!(obj.get(&name).is_none(), "Duplicate object member!"); obj.insert(&name, value); } obj } fn to_list(v: Vec<(String, JsonValue)>) -> Vec<JsonValue> { //The gdbmi-grammar is really weird... //TODO: fix this and parse the map directly v.into_iter().map(|(_, value)| value).collect() } named!( value<JsonValue>, alt!( map!(string, JsonValue::String) | do_parse!( tag!("{") >> results: separated_list!(tag!(","), result) >> tag!("}") >> (JsonValue::Object(to_map(results))) ) | do_parse!( tag!("[") >> values: separated_list!(tag!(","), value) >> tag!("]") >> (JsonValue::Array(values)) ) | do_parse!( tag!("[") >> results: separated_list!(tag!(","), result) >> tag!("]") >> (JsonValue::Array(to_list(results))) ) ) ); // Don't even ask... Against its spec, gdb(mi) sometimes emits multiple values for a single tuple // in a comma separated list. named!( buggy_gdb_list_in_result<JsonValue>, map!(separated_list!(tag!(","), value), |values: Vec< JsonValue, >| { if values.len() == 1 { values .into_iter() .next() .expect("len == 1 => first element is guaranteed") } else { JsonValue::Array(values) } }) ); named!( result<(String, JsonValue)>, do_parse!( var: is_not!("={}" /* Do not allow =, {, nor } */) >> tag!("=") >> val: buggy_gdb_list_in_result >> (String::from_utf8_lossy(var).into_owned(), val) ) ); named!( token<Token>, map!(::nom::digit, |values: &[u8]| values .iter() .fold(0, |acc, &ascii_digit| 10 * acc + (ascii_digit - b'0') as u64)) ); named!( result_record<Output>, do_parse!( t: opt!(token) >> tag!("^") >> c: result_class >> res: many0!(do_parse!(tag!(",") >> r: result >> (r))) >> (Output::Result(ResultRecord { token: t, class: c, results: to_map(res), })) ) ); named!( async_kind<AsyncKind>, alt!( value!(AsyncKind::Exec, tag!("*")) | value!(AsyncKind::Status, tag!("+")) | value!(AsyncKind::Notify, tag!("=")) ) ); named!( async_class<AsyncClass>, alt!( value!(AsyncClass::Stopped, tag!("stopped")) | value!( AsyncClass::Thread(ThreadEvent::Created), tag!("thread-created") ) | value!( AsyncClass::Thread(ThreadEvent::GroupStarted), tag!("thread-group-started") ) | value!( AsyncClass::Thread(ThreadEvent::Exited), tag!("thread-exited") ) | value!( AsyncClass::Thread(ThreadEvent::GroupExited), tag!("thread-group-exited") ) | value!( AsyncClass::Thread(ThreadEvent::Selected), tag!("thread-selected") ) | value!(AsyncClass::CmdParamChanged, tag!("cmd-param-changed")) | value!(AsyncClass::LibraryLoaded, tag!("library-loaded")) | value!( AsyncClass::BreakPoint(BreakPointEvent::Created), tag!("breakpoint-created") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Deleted), tag!("breakpoint-deleted") ) | value!( AsyncClass::BreakPoint(BreakPointEvent::Modified), tag!("breakpoint-modified") ) | map!(is_not!(","), |msg| AsyncClass::Other( String::from_utf8_lossy(msg).into_owned() )) ) ); named!( async_record<OutOfBandRecord>, do_parse!( t: opt!(token) >> kind: async_kind >> class: async_class >> results: many
parse
identifier_name
runAffEffModSweap.py
plitude in eesAmplitudes: for eesFrequency in eesFrequencies: filName = name+"_amp_"+str(eesAmplitude)+"_freq_"+str(eesFrequency) resultFile = gt.find("*"+filName+".p",pathToResults) if not resultFile: returnCode = None while not returnCode==0: program = ['python','scripts/computeAfferentsEfferentsModulation.py', str(eesFrequency),str(eesAmplitude),species,nrnStructureFile,name,"--simTime",str(simTime)] print " ".join(program) forwardSimulation = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) returnCode = None while returnCode is None: message = forwardSimulation.stdout.readline().rstrip("\n").split() if message != None:print "\t\t"+" ".join(message)+"\t\t" returnCode = forwardSimulation.poll() if returnCode != 0: print "\t\t\t\t Error n: ",forwardSimulation.poll()," resetting simulation..." count+=1 if count/nSim-percLastPrint>=printPeriod: percLastPrint=count/nSim print str(round(count/nSim*100))+"% of simulations performed..." plot_stats(eesAmplitudes,eesFrequencies,simTime,name) def plot_stats(eesAmplitudes,eesFrequencies,simTime,name): populationFr = {} populationFr["MnS"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)])
nActiveCells = {} nActiveCells["MnS"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) # nActiveCells["MnFf"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) # nActiveCells["MnFr"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) nActiveCells["Iaf"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) # maxActiveCells= {"MnS":20,"MnFf":20,"MnFr":20,"MnS_MnFf_MnFr":60,"Iaf":60} maxActiveCells= {"MnS":169,"Iaf":60} for i,eesAmplitude in enumerate(eesAmplitudes): for j,eesFrequency in enumerate(eesFrequencies): filName = name+"_amp_"+str(eesAmplitude)+"_freq_"+str(eesFrequency) resultFile = gt.find("*"+filName+".p",pathToResults) if len(resultFile)>1: print "Warning: multiple result files found!!!" with open(resultFile[0], 'r') as pickle_file: _temp_populationFr = pickle.load(pickle_file) _temp_nActiveCells = pickle.load(pickle_file) for muscle in _temp_populationFr: for cellName in _temp_populationFr[muscle]: populationFr[cellName][i,j] = np.array(_temp_populationFr[muscle][cellName])/(float(simTime)/1000)/maxActiveCells[cellName] nActiveCells[cellName][i,j] = _temp_nActiveCells[muscle][cellName] # populationFr["MnS_MnFf_MnFr"] = (populationFr["MnS"]+populationFr["MnFf"]+populationFr["MnFr"])/3 # nActiveCells["MnS_MnFf_MnFr"] = nActiveCells["MnS"]+nActiveCells["MnFf"]+nActiveCells["MnFr"] maxFr = {} maxFr["Iaf"] = np.max(populationFr["Iaf"]) maxFr["MnS"] = np.max(populationFr["MnS"]) # maxFr["MnS"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnFf"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnFr"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnS_MnFf_MnFr"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) ax = [] sizeFactor = 3 fig=plt.figure(figsize=(3*sizeFactor,4*sizeFactor)) gs = gridspec.GridSpec(len(populationFr.keys()),2) gs.update(left=0.05, right=0.95, hspace=0.6, wspace=0.1) colorMap2 = plt.cm.YlGnBu colorMap = plt.cm.YlOrRd colorMap.set_bad(color="#20201f") colorMap2.set_bad(color="#20201f") # colorMap = cmaps.magma maxSpikes = np.max([np.max(populationFr[cellName]) for cellName in populationFr]) cellNames = ["Iaf","MnS"] for i,cellName in enumerate(cellNames): # Plot on number of spikes ax.append(plt.subplot(gs[i,0])) data = np.ma.masked_where(populationFr[cellName]==0,populationFr[cellName]) im = ax[-1].imshow(data, cmap=colorMap, interpolation='nearest',origin="lower",vmin = 0,vmax=maxFr[cellName],aspect='auto') ax[-1].set_title("Number of spikes - "+cellName) # Move left and bottom spines outward by 10 points ax[-1].spines['left'].set_position(('outward', 10)) ax[-1].spines['bottom'].set_position(('outward', 10)) # Hide the right and top spines ax[-1].spines['right'].set_visible(False) ax[-1].spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax[-1].yaxis.set_ticks_position('left') ax[-1].xaxis.set_ticks_position('bottom') ax[-1].set_xticks(range(len(eesFrequencies))) ax[-1].set_xticklabels(eesFrequencies) ax[-1].set_yticks(range(len(eesAmplitudes))) ax[-1].set_yticklabels(eesAmplitudes) ax[-1].set_ylabel("Stimulation amplitude \n(% of recruited fibers)") fig.colorbar(im, orientation='vertical',label="N spikes") # Plot on number of active cells ax.append(plt.subplot(gs[i,1])) # mask some 'bad' data, in your case you would have: data == 0 data = np.ma.masked_where(nActiveCells[cellName]==0,nActiveCells[cellName]) im = ax[-1].imshow(data, cmap=colorMap2, interpolation='nearest',origin="lower",vmin = 0, vmax = maxActiveCells[cellName],aspect='auto') ax[-1].set_title("Number of active cells - "+cellName) # Move left and bottom spines outward by 10 points ax[-1].spines['left'].set_position(('outward', 10)) ax[-1].spines['bottom'].set_position(('outward', 10)) # Hide the right and top spines ax[-1].spines['right'].set_visible(False) ax[-1].spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax[-1].yaxis.set_ticks_position('left') ax[-1].xaxis.set_ticks_position('bottom') ax[-1].set_xticks(range(len(eesFrequencies))) ax[-1].set_xticklabels(eesFrequencies) ax[-1].set_yticks(range(len(eesAmplitudes))) ax[-1].set_yticklabels(eesAmplitudes) fig.colorbar(im, orientation='vertical',label="N active cells") ax[-2].set_xlabel("Stimulation frequency (Hz)") ax[-1].set_xlabel("Stimulation frequency (Hz)") fileName = time.strftime("%Y_%m_%d_freqAmpDependancy.pdf") plt.savefig("../../results/"+fileName, format="pdf",transparent=True) fig2, ax2 = plt.subplots(2, 1) intervalHalfWidth = 5 targetFiringrates = range(10,41,10) cmap = plt.get_cmap('winter') colors = cmap(np.linspace(0.1,0.9,len(targetFiringrates))) isomod
# populationFr["MnFf"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) # populationFr["MnFr"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) populationFr["Iaf"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)])
random_line_split
runAffEffModSweap.py
["MnS_MnFf_MnFr"] = nActiveCells["MnS"]+nActiveCells["MnFf"]+nActiveCells["MnFr"] maxFr = {} maxFr["Iaf"] = np.max(populationFr["Iaf"]) maxFr["MnS"] = np.max(populationFr["MnS"]) # maxFr["MnS"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnFf"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnFr"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnS_MnFf_MnFr"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) ax = [] sizeFactor = 3 fig=plt.figure(figsize=(3*sizeFactor,4*sizeFactor)) gs = gridspec.GridSpec(len(populationFr.keys()),2) gs.update(left=0.05, right=0.95, hspace=0.6, wspace=0.1) colorMap2 = plt.cm.YlGnBu colorMap = plt.cm.YlOrRd colorMap.set_bad(color="#20201f") colorMap2.set_bad(color="#20201f") # colorMap = cmaps.magma maxSpikes = np.max([np.max(populationFr[cellName]) for cellName in populationFr]) cellNames = ["Iaf","MnS"] for i,cellName in enumerate(cellNames): # Plot on number of spikes ax.append(plt.subplot(gs[i,0])) data = np.ma.masked_where(populationFr[cellName]==0,populationFr[cellName]) im = ax[-1].imshow(data, cmap=colorMap, interpolation='nearest',origin="lower",vmin = 0,vmax=maxFr[cellName],aspect='auto') ax[-1].set_title("Number of spikes - "+cellName) # Move left and bottom spines outward by 10 points ax[-1].spines['left'].set_position(('outward', 10)) ax[-1].spines['bottom'].set_position(('outward', 10)) # Hide the right and top spines ax[-1].spines['right'].set_visible(False) ax[-1].spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax[-1].yaxis.set_ticks_position('left') ax[-1].xaxis.set_ticks_position('bottom') ax[-1].set_xticks(range(len(eesFrequencies))) ax[-1].set_xticklabels(eesFrequencies) ax[-1].set_yticks(range(len(eesAmplitudes))) ax[-1].set_yticklabels(eesAmplitudes) ax[-1].set_ylabel("Stimulation amplitude \n(% of recruited fibers)") fig.colorbar(im, orientation='vertical',label="N spikes") # Plot on number of active cells ax.append(plt.subplot(gs[i,1])) # mask some 'bad' data, in your case you would have: data == 0 data = np.ma.masked_where(nActiveCells[cellName]==0,nActiveCells[cellName]) im = ax[-1].imshow(data, cmap=colorMap2, interpolation='nearest',origin="lower",vmin = 0, vmax = maxActiveCells[cellName],aspect='auto') ax[-1].set_title("Number of active cells - "+cellName) # Move left and bottom spines outward by 10 points ax[-1].spines['left'].set_position(('outward', 10)) ax[-1].spines['bottom'].set_position(('outward', 10)) # Hide the right and top spines ax[-1].spines['right'].set_visible(False) ax[-1].spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax[-1].yaxis.set_ticks_position('left') ax[-1].xaxis.set_ticks_position('bottom') ax[-1].set_xticks(range(len(eesFrequencies))) ax[-1].set_xticklabels(eesFrequencies) ax[-1].set_yticks(range(len(eesAmplitudes))) ax[-1].set_yticklabels(eesAmplitudes) fig.colorbar(im, orientation='vertical',label="N active cells") ax[-2].set_xlabel("Stimulation frequency (Hz)") ax[-1].set_xlabel("Stimulation frequency (Hz)") fileName = time.strftime("%Y_%m_%d_freqAmpDependancy.pdf") plt.savefig("../../results/"+fileName, format="pdf",transparent=True) fig2, ax2 = plt.subplots(2, 1) intervalHalfWidth = 5 targetFiringrates = range(10,41,10) cmap = plt.get_cmap('winter') colors = cmap(np.linspace(0.1,0.9,len(targetFiringrates))) isomodulationCurves = [] for n,target in enumerate(targetFiringrates): isomodulationCurves.append({}) temp = np.zeros([len(eesAmplitudes),len(eesFrequencies)])*np.nan for i,eesAmplitude in enumerate(eesAmplitudes): for j,eesFrequency in enumerate(eesFrequencies): if populationFr["MnS_MnFf_MnFr"][i,j]>target-intervalHalfWidth and populationFr["MnS_MnFf_MnFr"][i,j]<target+intervalHalfWidth: if type(eesAmplitude) is str: temp[i,j] = eesAmplitude[1:4] else: temp[i,j] = eesAmplitude isomodulationCurves[-1]['max'] = fill_nan(np.nanmax(temp,axis=0)) isomodulationCurves[-1]['mean'] = fill_nan(np.nanmean(temp,axis=0)) isomodulationCurves[-1]['min'] = fill_nan(np.nanmin(temp,axis=0)) ax2[0].plot(eesFrequencies,isomodulationCurves[-1]['max'],color=colors[n]) ax2[0].plot(eesFrequencies,isomodulationCurves[-1]['mean'],color=colors[n]) ax2[0].plot(eesFrequencies,isomodulationCurves[-1]['min'],color=colors[n]) ax2[0].fill_between(eesFrequencies,isomodulationCurves[-1]['min'],isomodulationCurves[-1]['max'],color=colors[n],alpha=0.3) ax2[0].set_xscale("log") ax2[1].plot(eesFrequencies,isomodulationCurves[-1]['mean'],color=colors[n]) fileName = time.strftime("/%Y_%m_%d_freqAmpDependancyIsoModCurves.pdf") plt.savefig(pathToResults+fileName, format="pdf",transparent=True) plt.show() def fill_nan(A): """ interpolate to fill nan values """ inds = np.arange(A.shape[0]) good = np.where(np.isfinite(A)) A[np.isnan(A)] = np.interp(inds[np.isnan(A)], inds[good], A[good]) return A # ---Unused--- def load_rec_data(): """ Load recruitment data from a previosly validated FEM model (Capogrosso et al 2013). """ recI_MG=np.loadtxt('../recruitmentData/GM_full_S1_wire1') recI_TA=np.loadtxt('../recruitmentData/TA_full_S1_wire1') allPercIf_GM= recI_MG/max(recI_MG) allPercIf_TA= recI_TA/max(recI_TA) minCur = 0 #uA maxCur = 600 #uA nVal = recI_MG.size allPercIf= (allPercIf_GM+allPercIf_TA)/2 currents = np.linspace(minCur,maxCur,nVal) f = interpolate.interp1d(currents, allPercIf) return f def compute_error(amplitude,target,f): actualPerc = f(amplitude) error = np.array(target-actualPerc) return error def minimize(target,f,x0,errTol=0.01,dx=5,maxIters = 100000): error=9999 x0 -= dx for i in xrange(maxIters): x0 += dx error = compute_error(x0,target,f) if error<errTol:break if error>errTol:raise Exception("minimization failed") print "out:",x0," target:",target," error:",error return x0,error def find_corrisponding_amplitude(target,f):
tp = (target, f) current,error = minimize(target, f,x0=150) return current
identifier_body
runAffEffModSweap.py
nActiveCells[cellName][i,j] = _temp_nActiveCells[muscle][cellName] # populationFr["MnS_MnFf_MnFr"] = (populationFr["MnS"]+populationFr["MnFf"]+populationFr["MnFr"])/3 # nActiveCells["MnS_MnFf_MnFr"] = nActiveCells["MnS"]+nActiveCells["MnFf"]+nActiveCells["MnFr"] maxFr = {} maxFr["Iaf"] = np.max(populationFr["Iaf"]) maxFr["MnS"] = np.max(populationFr["MnS"]) # maxFr["MnS"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnFf"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnFr"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnS_MnFf_MnFr"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) ax = [] sizeFactor = 3 fig=plt.figure(figsize=(3*sizeFactor,4*sizeFactor)) gs = gridspec.GridSpec(len(populationFr.keys()),2) gs.update(left=0.05, right=0.95, hspace=0.6, wspace=0.1) colorMap2 = plt.cm.YlGnBu colorMap = plt.cm.YlOrRd colorMap.set_bad(color="#20201f") colorMap2.set_bad(color="#20201f") # colorMap = cmaps.magma maxSpikes = np.max([np.max(populationFr[cellName]) for cellName in populationFr]) cellNames = ["Iaf","MnS"] for i,cellName in enumerate(cellNames): # Plot on number of spikes ax.append(plt.subplot(gs[i,0])) data = np.ma.masked_where(populationFr[cellName]==0,populationFr[cellName]) im = ax[-1].imshow(data, cmap=colorMap, interpolation='nearest',origin="lower",vmin = 0,vmax=maxFr[cellName],aspect='auto') ax[-1].set_title("Number of spikes - "+cellName) # Move left and bottom spines outward by 10 points ax[-1].spines['left'].set_position(('outward', 10)) ax[-1].spines['bottom'].set_position(('outward', 10)) # Hide the right and top spines ax[-1].spines['right'].set_visible(False) ax[-1].spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax[-1].yaxis.set_ticks_position('left') ax[-1].xaxis.set_ticks_position('bottom') ax[-1].set_xticks(range(len(eesFrequencies))) ax[-1].set_xticklabels(eesFrequencies) ax[-1].set_yticks(range(len(eesAmplitudes))) ax[-1].set_yticklabels(eesAmplitudes) ax[-1].set_ylabel("Stimulation amplitude \n(% of recruited fibers)") fig.colorbar(im, orientation='vertical',label="N spikes") # Plot on number of active cells ax.append(plt.subplot(gs[i,1])) # mask some 'bad' data, in your case you would have: data == 0 data = np.ma.masked_where(nActiveCells[cellName]==0,nActiveCells[cellName]) im = ax[-1].imshow(data, cmap=colorMap2, interpolation='nearest',origin="lower",vmin = 0, vmax = maxActiveCells[cellName],aspect='auto') ax[-1].set_title("Number of active cells - "+cellName) # Move left and bottom spines outward by 10 points ax[-1].spines['left'].set_position(('outward', 10)) ax[-1].spines['bottom'].set_position(('outward', 10)) # Hide the right and top spines ax[-1].spines['right'].set_visible(False) ax[-1].spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax[-1].yaxis.set_ticks_position('left') ax[-1].xaxis.set_ticks_position('bottom') ax[-1].set_xticks(range(len(eesFrequencies))) ax[-1].set_xticklabels(eesFrequencies) ax[-1].set_yticks(range(len(eesAmplitudes))) ax[-1].set_yticklabels(eesAmplitudes) fig.colorbar(im, orientation='vertical',label="N active cells") ax[-2].set_xlabel("Stimulation frequency (Hz)") ax[-1].set_xlabel("Stimulation frequency (Hz)") fileName = time.strftime("%Y_%m_%d_freqAmpDependancy.pdf") plt.savefig("../../results/"+fileName, format="pdf",transparent=True) fig2, ax2 = plt.subplots(2, 1) intervalHalfWidth = 5 targetFiringrates = range(10,41,10) cmap = plt.get_cmap('winter') colors = cmap(np.linspace(0.1,0.9,len(targetFiringrates))) isomodulationCurves = [] for n,target in enumerate(targetFiringrates): isomodulationCurves.append({}) temp = np.zeros([len(eesAmplitudes),len(eesFrequencies)])*np.nan for i,eesAmplitude in enumerate(eesAmplitudes): for j,eesFrequency in enumerate(eesFrequencies): if populationFr["MnS_MnFf_MnFr"][i,j]>target-intervalHalfWidth and populationFr["MnS_MnFf_MnFr"][i,j]<target+intervalHalfWidth: if type(eesAmplitude) is str: temp[i,j] = eesAmplitude[1:4] else: temp[i,j] = eesAmplitude isomodulationCurves[-1]['max'] = fill_nan(np.nanmax(temp,axis=0)) isomodulationCurves[-1]['mean'] = fill_nan(np.nanmean(temp,axis=0)) isomodulationCurves[-1]['min'] = fill_nan(np.nanmin(temp,axis=0)) ax2[0].plot(eesFrequencies,isomodulationCurves[-1]['max'],color=colors[n]) ax2[0].plot(eesFrequencies,isomodulationCurves[-1]['mean'],color=colors[n]) ax2[0].plot(eesFrequencies,isomodulationCurves[-1]['min'],color=colors[n]) ax2[0].fill_between(eesFrequencies,isomodulationCurves[-1]['min'],isomodulationCurves[-1]['max'],color=colors[n],alpha=0.3) ax2[0].set_xscale("log") ax2[1].plot(eesFrequencies,isomodulationCurves[-1]['mean'],color=colors[n]) fileName = time.strftime("/%Y_%m_%d_freqAmpDependancyIsoModCurves.pdf") plt.savefig(pathToResults+fileName, format="pdf",transparent=True) plt.show() def fill_nan(A): """ interpolate to fill nan values """ inds = np.arange(A.shape[0]) good = np.where(np.isfinite(A)) A[np.isnan(A)] = np.interp(inds[np.isnan(A)], inds[good], A[good]) return A # ---Unused--- def load_rec_data(): """ Load recruitment data from a previosly validated FEM model (Capogrosso et al 2013). """ recI_MG=np.loadtxt('../recruitmentData/GM_full_S1_wire1') recI_TA=np.loadtxt('../recruitmentData/TA_full_S1_wire1') allPercIf_GM= recI_MG/max(recI_MG) allPercIf_TA= recI_TA/max(recI_TA) minCur = 0 #uA maxCur = 600 #uA nVal = recI_MG.size allPercIf= (allPercIf_GM+allPercIf_TA)/2 currents = np.linspace(minCur,maxCur,nVal) f = interpolate.interp1d(currents, allPercIf) return f def compute_error(amplitude,target,f): actualPerc = f(amplitude) error = np.array(target-actualPerc) return error def minimize(target,f,x0,errTol=0.01,dx=5,maxIters = 100000): error=9999 x0 -= dx for i in xrange(maxIters): x0 += dx error = compute_error(x0,target,f) if error<errTol:
break
conditional_block
runAffEffModSweap.py
in eesAmplitudes: for eesFrequency in eesFrequencies: filName = name+"_amp_"+str(eesAmplitude)+"_freq_"+str(eesFrequency) resultFile = gt.find("*"+filName+".p",pathToResults) if not resultFile: returnCode = None while not returnCode==0: program = ['python','scripts/computeAfferentsEfferentsModulation.py', str(eesFrequency),str(eesAmplitude),species,nrnStructureFile,name,"--simTime",str(simTime)] print " ".join(program) forwardSimulation = subprocess.Popen(program, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE) returnCode = None while returnCode is None: message = forwardSimulation.stdout.readline().rstrip("\n").split() if message != None:print "\t\t"+" ".join(message)+"\t\t" returnCode = forwardSimulation.poll() if returnCode != 0: print "\t\t\t\t Error n: ",forwardSimulation.poll()," resetting simulation..." count+=1 if count/nSim-percLastPrint>=printPeriod: percLastPrint=count/nSim print str(round(count/nSim*100))+"% of simulations performed..." plot_stats(eesAmplitudes,eesFrequencies,simTime,name) def
(eesAmplitudes,eesFrequencies,simTime,name): populationFr = {} populationFr["MnS"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) # populationFr["MnFf"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) # populationFr["MnFr"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) populationFr["Iaf"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) nActiveCells = {} nActiveCells["MnS"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) # nActiveCells["MnFf"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) # nActiveCells["MnFr"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) nActiveCells["Iaf"] = np.zeros([len(eesAmplitudes),len(eesFrequencies)]) # maxActiveCells= {"MnS":20,"MnFf":20,"MnFr":20,"MnS_MnFf_MnFr":60,"Iaf":60} maxActiveCells= {"MnS":169,"Iaf":60} for i,eesAmplitude in enumerate(eesAmplitudes): for j,eesFrequency in enumerate(eesFrequencies): filName = name+"_amp_"+str(eesAmplitude)+"_freq_"+str(eesFrequency) resultFile = gt.find("*"+filName+".p",pathToResults) if len(resultFile)>1: print "Warning: multiple result files found!!!" with open(resultFile[0], 'r') as pickle_file: _temp_populationFr = pickle.load(pickle_file) _temp_nActiveCells = pickle.load(pickle_file) for muscle in _temp_populationFr: for cellName in _temp_populationFr[muscle]: populationFr[cellName][i,j] = np.array(_temp_populationFr[muscle][cellName])/(float(simTime)/1000)/maxActiveCells[cellName] nActiveCells[cellName][i,j] = _temp_nActiveCells[muscle][cellName] # populationFr["MnS_MnFf_MnFr"] = (populationFr["MnS"]+populationFr["MnFf"]+populationFr["MnFr"])/3 # nActiveCells["MnS_MnFf_MnFr"] = nActiveCells["MnS"]+nActiveCells["MnFf"]+nActiveCells["MnFr"] maxFr = {} maxFr["Iaf"] = np.max(populationFr["Iaf"]) maxFr["MnS"] = np.max(populationFr["MnS"]) # maxFr["MnS"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnFf"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnFr"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) # maxFr["MnS_MnFf_MnFr"] = np.max([populationFr["MnS"],populationFr["MnFf"],populationFr["MnFr"]]) ax = [] sizeFactor = 3 fig=plt.figure(figsize=(3*sizeFactor,4*sizeFactor)) gs = gridspec.GridSpec(len(populationFr.keys()),2) gs.update(left=0.05, right=0.95, hspace=0.6, wspace=0.1) colorMap2 = plt.cm.YlGnBu colorMap = plt.cm.YlOrRd colorMap.set_bad(color="#20201f") colorMap2.set_bad(color="#20201f") # colorMap = cmaps.magma maxSpikes = np.max([np.max(populationFr[cellName]) for cellName in populationFr]) cellNames = ["Iaf","MnS"] for i,cellName in enumerate(cellNames): # Plot on number of spikes ax.append(plt.subplot(gs[i,0])) data = np.ma.masked_where(populationFr[cellName]==0,populationFr[cellName]) im = ax[-1].imshow(data, cmap=colorMap, interpolation='nearest',origin="lower",vmin = 0,vmax=maxFr[cellName],aspect='auto') ax[-1].set_title("Number of spikes - "+cellName) # Move left and bottom spines outward by 10 points ax[-1].spines['left'].set_position(('outward', 10)) ax[-1].spines['bottom'].set_position(('outward', 10)) # Hide the right and top spines ax[-1].spines['right'].set_visible(False) ax[-1].spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax[-1].yaxis.set_ticks_position('left') ax[-1].xaxis.set_ticks_position('bottom') ax[-1].set_xticks(range(len(eesFrequencies))) ax[-1].set_xticklabels(eesFrequencies) ax[-1].set_yticks(range(len(eesAmplitudes))) ax[-1].set_yticklabels(eesAmplitudes) ax[-1].set_ylabel("Stimulation amplitude \n(% of recruited fibers)") fig.colorbar(im, orientation='vertical',label="N spikes") # Plot on number of active cells ax.append(plt.subplot(gs[i,1])) # mask some 'bad' data, in your case you would have: data == 0 data = np.ma.masked_where(nActiveCells[cellName]==0,nActiveCells[cellName]) im = ax[-1].imshow(data, cmap=colorMap2, interpolation='nearest',origin="lower",vmin = 0, vmax = maxActiveCells[cellName],aspect='auto') ax[-1].set_title("Number of active cells - "+cellName) # Move left and bottom spines outward by 10 points ax[-1].spines['left'].set_position(('outward', 10)) ax[-1].spines['bottom'].set_position(('outward', 10)) # Hide the right and top spines ax[-1].spines['right'].set_visible(False) ax[-1].spines['top'].set_visible(False) # Only show ticks on the left and bottom spines ax[-1].yaxis.set_ticks_position('left') ax[-1].xaxis.set_ticks_position('bottom') ax[-1].set_xticks(range(len(eesFrequencies))) ax[-1].set_xticklabels(eesFrequencies) ax[-1].set_yticks(range(len(eesAmplitudes))) ax[-1].set_yticklabels(eesAmplitudes) fig.colorbar(im, orientation='vertical',label="N active cells") ax[-2].set_xlabel("Stimulation frequency (Hz)") ax[-1].set_xlabel("Stimulation frequency (Hz)") fileName = time.strftime("%Y_%m_%d_freqAmpDependancy.pdf") plt.savefig("../../results/"+fileName, format="pdf",transparent=True) fig2, ax2 = plt.subplots(2, 1) intervalHalfWidth = 5 targetFiringrates = range(10,41,10) cmap = plt.get_cmap('winter') colors = cmap(np.linspace(0.1,0.9,len(targetFiringrates))) isomod
plot_stats
identifier_name
miopoll.rs
(&mut self) { let mut ctrl = self.ctrl.borrow_mut(); if let Err(e) = ctrl.del(self.token, &mut self.source) { // TODO: Report the errors some other way, e.g. logged? ctrl.errors.push(e); } } } impl<S: Source> Deref for MioSource<S> { type Target = S; fn deref(&self) -> &Self::Target { &self.source } } impl<S: Source> DerefMut for MioSource<S> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.source } } /// Handle EINTR failures by retrying #[inline] fn retry<R>(mut f: impl FnMut() -> Result<R>) -> Result<R> { loop { let rv = f(); match rv { Err(ref e) if e.kind() == ErrorKind::Interrupted => (), _ => return rv, } } } /// Ref-counting wrapper around a mio `Poll` instance /// /// After creation, pass cloned copies of this to all interested /// parties. A `MioPoll` reference is also available from the /// associated **Stakker** instance using /// `cx.anymap_get::<MioPoll>()`. pub struct MioPoll { rc: Rc<RefCell<Control>>, } impl MioPoll { /// Create a new MioPoll instance wrapping the given mio `Poll` /// instance and mio `Events` queue (which the caller should size /// according to their requirements). The waker priority should /// also be provided, in the range `0..=10`. Sets up the /// **Stakker** instance to use `MioPoll` as the poll-waker, and /// puts a `MioPoll` clone into the **Stakker** anymap. pub fn new(stakker: &mut Stakker, poll: Poll, events: Events, waker_pri: u32) -> Result<Self> { let mut token_map = Slab::with_capacity(256); let waker_pri = waker_pri.min(MAX_PRI); let waker_token = Token(token_map.insert(Entry { pri: waker_pri, fwd: fwd_nop!(), })); assert_eq!(waker_token, WAKER_TOKEN); let waker = Arc::new(retry(|| Waker::new(poll.registry(), WAKER_TOKEN))?); let waker2 = waker.clone(); let mut ctrl = Control { poll, token_map, queues: Default::default(), max_pri: waker_pri, events, errors: Vec::new(), waker, }; let deferrer = stakker.deferrer(); ctrl.set_wake_fwd(Fwd::new(move |_| deferrer.defer(|s| s.poll_wake()))); let miopoll = Self { rc: Rc::new(RefCell::new(ctrl)), }; stakker.anymap_set(miopoll.clone()); stakker.set_poll_waker(move || { if let Err(e) = retry(|| waker2.wake()) { panic!("Inter-thread poll waker failed: {}", e); } }); Ok(miopoll) } /// Register a mio `Source` object with the poll instance. /// Returns a [`MioSource`] which takes care of cleaning up the /// token and handler when it is dropped. /// /// This uses edge-triggering: whenever one of the Interest flags /// included in `ready` changes state, the given `Fwd` instance /// will be invoked with the new `Ready` value. The contract with /// the handler is that there may be spurious calls to it, so it /// must be ready for that. /// /// `pri` gives a priority level: `0..=10`. If handlers are /// registered at different priority levels, then higher priority /// events get handled before lower priority events. Under /// constant very heavy load, lower priority events might be /// delayed indefinitely. /// /// [`MioSource`]: struct.MioSource.html pub fn add<S: Source>( &self, mut source: S, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<MioSource<S>> { let token = self.rc.borrow_mut().add(&mut source, ready, pri, fwd)?; Ok(MioSource { token, ctrl: self.rc.clone(), source, }) } /// Poll for new events and queue all the events of the highest /// available priority level. Events of lower priority levels are /// queued internally to be used on a future call to this method. /// /// So the expected pattern is that highest-priority handlers get /// run, and when all the resulting processing has completed in /// **Stakker**, then the main loop polls again, and if more /// high-priority events have occurred, then those too will get /// processed. Lower-priority handlers will only get a chance to /// run when nothing higher-priority needs handling. /// /// On success returns `Ok(true)` if an event was processed, or /// `Ok(false)` if there were no new events. pub fn poll(&self, max_delay: Duration) -> Result<bool> { self.rc.borrow_mut().poll(max_delay) } /// Set the handler for "wake" events. There can only be one /// handler for "wake" events, so setting it here drops the /// previous handler. Don't call this unless you wish to override /// the default wake handling which calls /// [`stakker::Stakker::poll_wake`]. /// /// [`stakker::Stakker::poll_wake`]: ../stakker/struct.Stakker.html#method.poll_wake pub fn set_wake_fwd(&mut self, fwd: Fwd<Ready>) { self.rc.borrow_mut().set_wake_fwd(fwd); } /// Get a cloned reference to the waker for this `MioPoll` /// instance. This can be passed to other threads, which can call /// `wake()` on it to cause the wake handler to be run in the main /// polling thread. pub fn waker(&mut self) -> Arc<Waker> { self.rc.borrow_mut().waker.clone() } } impl Clone for MioPoll { fn clone(&self) -> Self { Self { rc: self.rc.clone(), } } } struct QueueEvent { token: usize, ready: Ready, } struct Entry { pri: u32, fwd: Fwd<Ready>, } struct Control { token_map: Slab<Entry>, poll: Poll, // Highest priority in use goes on a fast path so we need queues
waker: Arc<Waker>, } impl Control { #[inline] fn del(&mut self, token: Token, handle: &mut impl Source) -> Result<()> { let rv = retry(|| self.poll.registry().deregister(handle)); if self.token_map.contains(token.into()) { self.token_map.remove(token.into()); return rv; } rv.and(Err(Error::from(ErrorKind::NotFound))) } #[inline] fn add( &mut self, handle: &mut impl Source, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<Token> { let pri = pri.min(MAX_PRI); self.max_pri = self.max_pri.max(pri); let token = Token(self.token_map.insert(Entry { pri, fwd })); retry(|| self.poll.registry().register(handle, token, ready))?; Ok(token) } fn poll(&mut self, max_delay: Duration) -> Result<bool> { retry(|| self.poll.poll(&mut self.events, Some(max_delay)))?; let mut done = false; for ev in &self.events { let token = ev.token().into(); if let Some(ref mut entry) = self.token_map.get_mut(token) { // Fast-path for highest priority level present in // registrations, so if user uses only one priority level, // there is no queuing necessary here. let ready = Ready::new(ev); if entry.pri == self.max_pri { done = true; entry.fwd.fwd(ready); } else { self.queues[entry.pri as usize].push(QueueEvent { token, ready }); } } } self.events.clear(); if !done { for qu in self.queues.iter_mut().rev() { if !qu.is_empty() { for qev in qu.drain(..) { if let Some(ref mut entry) = self.token_map.get_mut(qev.token) { done = true; entry.fwd.fwd(qev.ready); } } if done { break; } } } } Ok(done)
// only for 0..=9 queues: [Vec<QueueEvent>; MAX_PRI as usize], max_pri: u32, events: Events, errors: Vec<Error>,
random_line_split
miopoll.rs
mut self) { let mut ctrl = self.ctrl.borrow_mut(); if let Err(e) = ctrl.del(self.token, &mut self.source) { // TODO: Report the errors some other way, e.g. logged? ctrl.errors.push(e); } } } impl<S: Source> Deref for MioSource<S> { type Target = S; fn deref(&self) -> &Self::Target { &self.source } } impl<S: Source> DerefMut for MioSource<S> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.source } } /// Handle EINTR failures by retrying #[inline] fn retry<R>(mut f: impl FnMut() -> Result<R>) -> Result<R> { loop { let rv = f(); match rv { Err(ref e) if e.kind() == ErrorKind::Interrupted => (), _ => return rv, } } } /// Ref-counting wrapper around a mio `Poll` instance /// /// After creation, pass cloned copies of this to all interested /// parties. A `MioPoll` reference is also available from the /// associated **Stakker** instance using /// `cx.anymap_get::<MioPoll>()`. pub struct MioPoll { rc: Rc<RefCell<Control>>, } impl MioPoll { /// Create a new MioPoll instance wrapping the given mio `Poll` /// instance and mio `Events` queue (which the caller should size /// according to their requirements). The waker priority should /// also be provided, in the range `0..=10`. Sets up the /// **Stakker** instance to use `MioPoll` as the poll-waker, and /// puts a `MioPoll` clone into the **Stakker** anymap. pub fn new(stakker: &mut Stakker, poll: Poll, events: Events, waker_pri: u32) -> Result<Self> { let mut token_map = Slab::with_capacity(256); let waker_pri = waker_pri.min(MAX_PRI); let waker_token = Token(token_map.insert(Entry { pri: waker_pri, fwd: fwd_nop!(), })); assert_eq!(waker_token, WAKER_TOKEN); let waker = Arc::new(retry(|| Waker::new(poll.registry(), WAKER_TOKEN))?); let waker2 = waker.clone(); let mut ctrl = Control { poll, token_map, queues: Default::default(), max_pri: waker_pri, events, errors: Vec::new(), waker, }; let deferrer = stakker.deferrer(); ctrl.set_wake_fwd(Fwd::new(move |_| deferrer.defer(|s| s.poll_wake()))); let miopoll = Self { rc: Rc::new(RefCell::new(ctrl)), }; stakker.anymap_set(miopoll.clone()); stakker.set_poll_waker(move || { if let Err(e) = retry(|| waker2.wake()) { panic!("Inter-thread poll waker failed: {}", e); } }); Ok(miopoll) } /// Register a mio `Source` object with the poll instance. /// Returns a [`MioSource`] which takes care of cleaning up the /// token and handler when it is dropped. /// /// This uses edge-triggering: whenever one of the Interest flags /// included in `ready` changes state, the given `Fwd` instance /// will be invoked with the new `Ready` value. The contract with /// the handler is that there may be spurious calls to it, so it /// must be ready for that. /// /// `pri` gives a priority level: `0..=10`. If handlers are /// registered at different priority levels, then higher priority /// events get handled before lower priority events. Under /// constant very heavy load, lower priority events might be /// delayed indefinitely. /// /// [`MioSource`]: struct.MioSource.html pub fn add<S: Source>( &self, mut source: S, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<MioSource<S>> { let token = self.rc.borrow_mut().add(&mut source, ready, pri, fwd)?; Ok(MioSource { token, ctrl: self.rc.clone(), source, }) } /// Poll for new events and queue all the events of the highest /// available priority level. Events of lower priority levels are /// queued internally to be used on a future call to this method. /// /// So the expected pattern is that highest-priority handlers get /// run, and when all the resulting processing has completed in /// **Stakker**, then the main loop polls again, and if more /// high-priority events have occurred, then those too will get /// processed. Lower-priority handlers will only get a chance to /// run when nothing higher-priority needs handling. /// /// On success returns `Ok(true)` if an event was processed, or /// `Ok(false)` if there were no new events. pub fn poll(&self, max_delay: Duration) -> Result<bool> { self.rc.borrow_mut().poll(max_delay) } /// Set the handler for "wake" events. There can only be one /// handler for "wake" events, so setting it here drops the /// previous handler. Don't call this unless you wish to override /// the default wake handling which calls /// [`stakker::Stakker::poll_wake`]. /// /// [`stakker::Stakker::poll_wake`]: ../stakker/struct.Stakker.html#method.poll_wake pub fn set_wake_fwd(&mut self, fwd: Fwd<Ready>) { self.rc.borrow_mut().set_wake_fwd(fwd); } /// Get a cloned reference to the waker for this `MioPoll` /// instance. This can be passed to other threads, which can call /// `wake()` on it to cause the wake handler to be run in the main /// polling thread. pub fn waker(&mut self) -> Arc<Waker> { self.rc.borrow_mut().waker.clone() } } impl Clone for MioPoll { fn clone(&self) -> Self { Self { rc: self.rc.clone(), } } } struct QueueEvent { token: usize, ready: Ready, } struct Entry { pri: u32, fwd: Fwd<Ready>, } struct Control { token_map: Slab<Entry>, poll: Poll, // Highest priority in use goes on a fast path so we need queues // only for 0..=9 queues: [Vec<QueueEvent>; MAX_PRI as usize], max_pri: u32, events: Events, errors: Vec<Error>, waker: Arc<Waker>, } impl Control { #[inline] fn del(&mut self, token: Token, handle: &mut impl Source) -> Result<()> { let rv = retry(|| self.poll.registry().deregister(handle)); if self.token_map.contains(token.into()) { self.token_map.remove(token.into()); return rv; } rv.and(Err(Error::from(ErrorKind::NotFound))) } #[inline] fn add( &mut self, handle: &mut impl Source, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<Token> { let pri = pri.min(MAX_PRI); self.max_pri = self.max_pri.max(pri); let token = Token(self.token_map.insert(Entry { pri, fwd })); retry(|| self.poll.registry().register(handle, token, ready))?; Ok(token) } fn poll(&mut self, max_delay: Duration) -> Result<bool> { retry(|| self.poll.poll(&mut self.events, Some(max_delay)))?; let mut done = false; for ev in &self.events { let token = ev.token().into(); if let Some(ref mut entry) = self.token_map.get_mut(token)
} self.events.clear(); if !done { for qu in self.queues.iter_mut().rev() { if !qu.is_empty() { for qev in qu.drain(..) { if let Some(ref mut entry) = self.token_map.get_mut(qev.token) { done = true; entry.fwd.fwd(qev.ready); } } if done { break; } } } } Ok(done
{ // Fast-path for highest priority level present in // registrations, so if user uses only one priority level, // there is no queuing necessary here. let ready = Ready::new(ev); if entry.pri == self.max_pri { done = true; entry.fwd.fwd(ready); } else { self.queues[entry.pri as usize].push(QueueEvent { token, ready }); } }
conditional_block
miopoll.rs
mut self) { let mut ctrl = self.ctrl.borrow_mut(); if let Err(e) = ctrl.del(self.token, &mut self.source) { // TODO: Report the errors some other way, e.g. logged? ctrl.errors.push(e); } } } impl<S: Source> Deref for MioSource<S> { type Target = S; fn deref(&self) -> &Self::Target { &self.source } } impl<S: Source> DerefMut for MioSource<S> { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.source } } /// Handle EINTR failures by retrying #[inline] fn retry<R>(mut f: impl FnMut() -> Result<R>) -> Result<R> { loop { let rv = f(); match rv { Err(ref e) if e.kind() == ErrorKind::Interrupted => (), _ => return rv, } } } /// Ref-counting wrapper around a mio `Poll` instance /// /// After creation, pass cloned copies of this to all interested /// parties. A `MioPoll` reference is also available from the /// associated **Stakker** instance using /// `cx.anymap_get::<MioPoll>()`. pub struct MioPoll { rc: Rc<RefCell<Control>>, } impl MioPoll { /// Create a new MioPoll instance wrapping the given mio `Poll` /// instance and mio `Events` queue (which the caller should size /// according to their requirements). The waker priority should /// also be provided, in the range `0..=10`. Sets up the /// **Stakker** instance to use `MioPoll` as the poll-waker, and /// puts a `MioPoll` clone into the **Stakker** anymap. pub fn new(stakker: &mut Stakker, poll: Poll, events: Events, waker_pri: u32) -> Result<Self> { let mut token_map = Slab::with_capacity(256); let waker_pri = waker_pri.min(MAX_PRI); let waker_token = Token(token_map.insert(Entry { pri: waker_pri, fwd: fwd_nop!(), })); assert_eq!(waker_token, WAKER_TOKEN); let waker = Arc::new(retry(|| Waker::new(poll.registry(), WAKER_TOKEN))?); let waker2 = waker.clone(); let mut ctrl = Control { poll, token_map, queues: Default::default(), max_pri: waker_pri, events, errors: Vec::new(), waker, }; let deferrer = stakker.deferrer(); ctrl.set_wake_fwd(Fwd::new(move |_| deferrer.defer(|s| s.poll_wake()))); let miopoll = Self { rc: Rc::new(RefCell::new(ctrl)), }; stakker.anymap_set(miopoll.clone()); stakker.set_poll_waker(move || { if let Err(e) = retry(|| waker2.wake()) { panic!("Inter-thread poll waker failed: {}", e); } }); Ok(miopoll) } /// Register a mio `Source` object with the poll instance. /// Returns a [`MioSource`] which takes care of cleaning up the /// token and handler when it is dropped. /// /// This uses edge-triggering: whenever one of the Interest flags /// included in `ready` changes state, the given `Fwd` instance /// will be invoked with the new `Ready` value. The contract with /// the handler is that there may be spurious calls to it, so it /// must be ready for that. /// /// `pri` gives a priority level: `0..=10`. If handlers are /// registered at different priority levels, then higher priority /// events get handled before lower priority events. Under /// constant very heavy load, lower priority events might be /// delayed indefinitely. /// /// [`MioSource`]: struct.MioSource.html pub fn add<S: Source>( &self, mut source: S, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<MioSource<S>> { let token = self.rc.borrow_mut().add(&mut source, ready, pri, fwd)?; Ok(MioSource { token, ctrl: self.rc.clone(), source, }) } /// Poll for new events and queue all the events of the highest /// available priority level. Events of lower priority levels are /// queued internally to be used on a future call to this method. /// /// So the expected pattern is that highest-priority handlers get /// run, and when all the resulting processing has completed in /// **Stakker**, then the main loop polls again, and if more /// high-priority events have occurred, then those too will get /// processed. Lower-priority handlers will only get a chance to /// run when nothing higher-priority needs handling. /// /// On success returns `Ok(true)` if an event was processed, or /// `Ok(false)` if there were no new events. pub fn poll(&self, max_delay: Duration) -> Result<bool> { self.rc.borrow_mut().poll(max_delay) } /// Set the handler for "wake" events. There can only be one /// handler for "wake" events, so setting it here drops the /// previous handler. Don't call this unless you wish to override /// the default wake handling which calls /// [`stakker::Stakker::poll_wake`]. /// /// [`stakker::Stakker::poll_wake`]: ../stakker/struct.Stakker.html#method.poll_wake pub fn set_wake_fwd(&mut self, fwd: Fwd<Ready>) { self.rc.borrow_mut().set_wake_fwd(fwd); } /// Get a cloned reference to the waker for this `MioPoll` /// instance. This can be passed to other threads, which can call /// `wake()` on it to cause the wake handler to be run in the main /// polling thread. pub fn waker(&mut self) -> Arc<Waker> { self.rc.borrow_mut().waker.clone() } } impl Clone for MioPoll { fn clone(&self) -> Self { Self { rc: self.rc.clone(), } } } struct QueueEvent { token: usize, ready: Ready, } struct Entry { pri: u32, fwd: Fwd<Ready>, } struct
{ token_map: Slab<Entry>, poll: Poll, // Highest priority in use goes on a fast path so we need queues // only for 0..=9 queues: [Vec<QueueEvent>; MAX_PRI as usize], max_pri: u32, events: Events, errors: Vec<Error>, waker: Arc<Waker>, } impl Control { #[inline] fn del(&mut self, token: Token, handle: &mut impl Source) -> Result<()> { let rv = retry(|| self.poll.registry().deregister(handle)); if self.token_map.contains(token.into()) { self.token_map.remove(token.into()); return rv; } rv.and(Err(Error::from(ErrorKind::NotFound))) } #[inline] fn add( &mut self, handle: &mut impl Source, ready: Interest, pri: u32, fwd: Fwd<Ready>, ) -> Result<Token> { let pri = pri.min(MAX_PRI); self.max_pri = self.max_pri.max(pri); let token = Token(self.token_map.insert(Entry { pri, fwd })); retry(|| self.poll.registry().register(handle, token, ready))?; Ok(token) } fn poll(&mut self, max_delay: Duration) -> Result<bool> { retry(|| self.poll.poll(&mut self.events, Some(max_delay)))?; let mut done = false; for ev in &self.events { let token = ev.token().into(); if let Some(ref mut entry) = self.token_map.get_mut(token) { // Fast-path for highest priority level present in // registrations, so if user uses only one priority level, // there is no queuing necessary here. let ready = Ready::new(ev); if entry.pri == self.max_pri { done = true; entry.fwd.fwd(ready); } else { self.queues[entry.pri as usize].push(QueueEvent { token, ready }); } } } self.events.clear(); if !done { for qu in self.queues.iter_mut().rev() { if !qu.is_empty() { for qev in qu.drain(..) { if let Some(ref mut entry) = self.token_map.get_mut(qev.token) { done = true; entry.fwd.fwd(qev.ready); } } if done { break; } } } } Ok(done)
Control
identifier_name
xgboost.go
pred(datasource='''{{.DataSource}}''', select='''{{.Select}}''', result_table='''{{.ResultTable}}''', pred_label_name='''{{.PredLabelName}}''', load='''{{.Load}}''') ` type xgbEvaluateFiller struct { StepIndex int DataSource string Select string ResultTable string PredLabelName string Load string ValidationMetrics string Submitter string } // XGBoostGenerateEvaluation generates the XGBoost evaluation code func XGBoostGenerateEvaluation(evalStmt *ir.EvaluateStmt, stepIndex int, session *pb.Session) (string, error) { ds, err := GeneratePyDbConnStr(session) if err != nil { return "", err } labelName := "" if nc, ok := evalStmt.Label.(*ir.NumericColumn); ok { labelName = nc.FieldDesc.Name } else { return "", fmt.Errorf("unsupported label type %T", evalStmt.Label) } metricList := []string{"accuracy_score"} if m, ok := evalStmt.Attributes["validation.metrics"]; ok { if metricStr, ok := m.(string); ok { metricList = []string{} for _, s := range strings.Split(metricStr, ",") { metricList = append(metricList, strings.TrimSpace(s)) } } else { return "", fmt.Errorf("validation.metrics must be of type string") } } metricPyStr := ir.AttrToPythonValue(metricList) filler := &xgbEvaluateFiller{ StepIndex: stepIndex, DataSource: ds, Select: replaceNewLineRuneAndTrimSpace(evalStmt.Select), ResultTable: evalStmt.Into, PredLabelName: labelName, Load: evalStmt.ModelName, ValidationMetrics: metricPyStr, Submitter: getSubmitter(session), } var program bytes.Buffer tpl := template.Must(template.New("Evaluate").Parse(xgbEvaluateTemplate)) if err := tpl.Execute(&program, filler); err != nil { return "", err } return program.String(), nil } const xgbEvaluateTemplate = ` def step_entry_{{.StepIndex}}(): import runtime.temp_file as temp_file from runtime.{{.Submitter}} import evaluate with temp_file.TemporaryDirectory(as_cwd=True): evaluate(datasource='''{{.DataSource}}''', select='''{{.Select}}''', result_table='''{{.ResultTable}}''', pred_label_name='''{{.PredLabelName}}''', load='''{{.Load}}''', validation_metrics={{.ValidationMetrics}}) ` func getSubmitter(session *pb.Session) string { if session.Submitter != "" { return session.Submitter } submitter := os.Getenv("SQLFLOW_submitter") if submitter != "" { return submitter } return "local" } func generateFeatureColumnCode(fcMap map[string][]ir.FeatureColumn) string { allFCCodes := make([]string, 0) for target, fcList := range fcMap { if len(fcList) == 0 { continue } codeList := make([]string, 0) for _, fc := range fcList { codeList = append(codeList, fc.GenPythonCode()) } code := fmt.Sprintf(`"%s":[%s]`, target, strings.Join(codeList, ",")) allFCCodes = append(allFCCodes, code) } return fmt.Sprintf("{%s}", strings.Join(allFCCodes, ",")) } // TODO(typhoonzero): below functions are copied from codegen/xgboost/codegen.go // remove the original functions when this experimental packages are ready. // ----------------------------------------------------------------------------- func getXGBoostObjectives() (ret []string) { for k := range attribute.XGBoostObjectiveDocs { ret = append(ret, k) } return } // TODO(tony): complete model parameter and training parameter list // model parameter list: https://xgboost.readthedocs.io/en/latest/parameter.html#general-parameters // training parameter list: https://github.com/dmlc/xgboost/blob/b61d53447203ca7a321d72f6bdd3f553a3aa06c4/python-package/xgboost/training.py#L115-L117 var attributeDictionary = attribute.Dictionary{}. Float("eta", float32(0.3), `[default=0.3, alias: learning_rate] Step size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features, and eta shrinks the feature weights to make the boosting process more conservative. range: [0,1]`, attribute.Float32RangeChecker(0, 1, true, true)). Int("num_class", nil, `Number of classes. range: [2, Infinity]`, attribute.IntLowerBoundChecker(2, true)). String("objective", nil, `Learning objective`, attribute.StringChoicesChecker(getXGBoostObjectives()...)). String("eval_metric", nil, `eval metric`, nil). Bool("train.disk_cache", false, `whether use external memory to cache train data`, nil). Int("train.num_boost_round", 10, `[default=10] The number of rounds for boosting. range: [1, Infinity]`, attribute.IntLowerBoundChecker(1, true)). Int("train.batch_size", -1, `[default=-1] Batch size for each iteration, -1 means use all data at once. range: [-1, Infinity]`, attribute.IntLowerBoundChecker(-1, true)). Int("train.epoch", 1, `[default=1] Number of rounds to run the training. range: [1, Infinity]`, attribute.IntLowerBoundChecker(1, true)). String("validation.select", "", `[default=""] Specify the dataset for validation. example: "SELECT * FROM boston.train LIMIT 8"`, nil). Int("train.num_workers", 1, `[default=1] Number of workers for distributed train, 1 means stand-alone mode. range: [1, 128]`, attribute.IntRangeChecker(1, 128, true, true)) var fullAttrValidator = attribute.Dictionary{} func updateIfKeyDoesNotExist(current, add map[string]interface{}) { for k, v := range add { if _, ok := current[k]; !ok { current[k] = v } } } func resolveModelParams(ir *ir.TrainStmt) error { switch strings.ToUpper(ir.Estimator) { case "XGBOOST.XGBREGRESSOR", "XGBREGRESSOR": defaultAttributes := map[string]interface{}{"objective": "reg:squarederror"} updateIfKeyDoesNotExist(ir.Attributes, defaultAttributes) case "XGBOOST.XGBRFREGRESSOR", "XGBRFREGRESSOR": defaultAttributes := map[string]interface{}{"objective": "reg:squarederror", "learning_rate": 1, "subsample": 0.8, "colsample_bynode": 0.8, "reg_lambda": 1e-05} updateIfKeyDoesNotExist(ir.Attributes, defaultAttributes) case "XGBOOST.XGBCLASSIFIER", "XGBCLASSIFIER": defaultAttributes := map[string]interface{}{"objective": "binary:logistic"} updateIfKeyDoesNotExist(ir.Attributes, defaultAttributes) case "XGBOOST.XGBRFCLASSIFIER", "XGBRFCLASSIFIER": defaultAttributes := map[string]interface{}{"objective": "multi:softprob", "learning_rate": 1, "subsample": 0.8, "colsample_bynode": 0.8, "reg_lambda": 1e-05} updateIfKeyDoesNotExist(ir.Attributes, defaultAttributes) case "XGBOOST.XGBRANKER", "XGBRANKER": defaultAttributes := map[string]interface{}{"objective": "rank:pairwise"} updateIfKeyDoesNotExist(ir.Attributes, defaultAttributes) case "XGBOOST.GBTREE": defaultAttributes := map[string]interface{}{"booster": "gbtree"} updateIfKeyDoesNotExist(ir.Attributes, defaultAttributes) case "XGBOOST.GBLINEAR": defaultAttributes := map[string]interface{}{"booster": "gblinear"} updateIfKeyDoesNotExist(ir.Attributes, defaultAttributes) case "XGBOOST.DART": defaultAttributes := map[string]interface{}{"booster": "dart"} updateIfKeyDoesNotExist(ir.Attributes, defaultAttributes) default: return fmt.Errorf("unsupported model name %v, currently supports xgboost.gbtree, xgboost.gblinear, xgboost.dart", ir.Estimator) } return nil } func parseAttribute(attrs map[string]interface{}) map[string]map[string]interface{}
{ params := map[string]map[string]interface{}{"": {}, "train.": {}} paramPrefix := []string{"train.", ""} // use slice to assure traverse order, this is necessary because all string starts with "" for key, attr := range attrs { for _, pp := range paramPrefix { if strings.HasPrefix(key, pp) { params[pp][key[len(pp):]] = attr break } } } return params }
identifier_body
xgboost.go
batch_size") } epochAttr, ok := params["train."]["epoch"] if ok { epoch = epochAttr.(int) delete(params["train."], "epoch") } if _, ok := params["train."]["num_workers"]; ok { delete(params["train."], "num_workers") } if len(trainStmt.Features) > 1 { return "", fmt.Errorf("xgboost only support 0 or 1 feature column set, received %d", len(trainStmt.Features)) } // featureColumnCode is a python map definition code like fc_map = {"feature_columns": [...]} featureColumnCode := generateFeatureColumnCode(trainStmt.Features) labelColumnCode := trainStmt.Label.GenPythonCode() mp, err := json.Marshal(params[""]) if err != nil { return "", err } tp, err := json.Marshal(params["train."]) if err != nil { return "", err } dbConnStr, err := GeneratePyDbConnStr(session) if err != nil { return "", err } filler := xgbTrainFiller{ StepIndex: stepIndex, OriginalSQL: replaceNewLineRuneAndTrimSpace(trainStmt.OriginalSQL), ModelImage: trainStmt.ModelImage, Estimator: trainStmt.Estimator, DataSource: dbConnStr, Select: replaceNewLineRuneAndTrimSpace(trainStmt.Select), ValidationSelect: replaceNewLineRuneAndTrimSpace(trainStmt.ValidationSelect), ModelParamsJSON: string(mp), TrainParamsJSON: string(tp), FeatureColumnCode: featureColumnCode, LabelColumnCode: labelColumnCode, Save: trainStmt.Into, Load: trainStmt.PreTrainedModel, DiskCache: diskCache, BatchSize: batchSize, Epoch: epoch, Submitter: getSubmitter(session), } var program bytes.Buffer var trainTemplate = template.Must(template.New("Train").Parse(xgbTrainTemplate)) err = trainTemplate.Execute(&program, filler) if err != nil { return "", err } return program.String(), nil } const xgbTrainTemplate = ` def step_entry_{{.StepIndex}}(): import json import runtime.temp_file as temp_file import runtime.feature.column import runtime.feature.field_desc from runtime.{{.Submitter}} import train feature_column_map = {{.FeatureColumnCode}} label_column = {{.LabelColumnCode}} model_params = json.loads('''{{.ModelParamsJSON}}''') train_params = json.loads('''{{.TrainParamsJSON}}''') with temp_file.TemporaryDirectory(as_cwd=True) as temp_dir: train_params["original_sql"] = '''{{.OriginalSQL}}''' train_params["model_image"] = '''{{.ModelImage}}''' train_params["feature_column_map"] = feature_column_map train_params["label_column"] = label_column train_params["disk_cache"] = "{{.DiskCache}}"=="true" train_params["batch_size"] = {{.BatchSize}} train_params["epoch"] = {{.Epoch}} train(datasource='''{{.DataSource}}''', estimator_string='''{{.Estimator}}''', select='''{{.Select}}''', validation_select='''{{.ValidationSelect}}''', model_params=model_params, save='''{{.Save}}''', load='''{{.Load}}''', train_params=train_params) ` type xgbPredFiller struct { StepIndex int DataSource string Select string PredLabelName string ResultTable string Load string Submitter string } // XGBoostGeneratePredict generates the XGBoost prediction code func XGBoostGeneratePredict(predStmt *ir.PredictStmt, stepIndex int, session *pb.Session) (string, error) { dbConnStr, err := GeneratePyDbConnStr(session) if err != nil { return "", err } filler := &xgbPredFiller{ StepIndex: stepIndex, DataSource: dbConnStr, Select: replaceNewLineRuneAndTrimSpace(predStmt.Select), PredLabelName: predStmt.ResultColumn, ResultTable: predStmt.ResultTable, Load: predStmt.Using, Submitter: getSubmitter(session), } var program bytes.Buffer predTmpl := template.Must(template.New("Train").Parse(xgbPredTemplate)) err = predTmpl.Execute(&program, filler) if err != nil { return "", err } return program.String(), nil } const xgbPredTemplate = ` def step_entry_{{.StepIndex}}(): import runtime.temp_file as temp_file from runtime.{{.Submitter}} import pred with temp_file.TemporaryDirectory(as_cwd=True): pred(datasource='''{{.DataSource}}''', select='''{{.Select}}''', result_table='''{{.ResultTable}}''', pred_label_name='''{{.PredLabelName}}''', load='''{{.Load}}''') ` type xgbEvaluateFiller struct { StepIndex int DataSource string Select string ResultTable string PredLabelName string Load string ValidationMetrics string Submitter string } // XGBoostGenerateEvaluation generates the XGBoost evaluation code func XGBoostGenerateEvaluation(evalStmt *ir.EvaluateStmt, stepIndex int, session *pb.Session) (string, error) { ds, err := GeneratePyDbConnStr(session) if err != nil { return "", err } labelName := "" if nc, ok := evalStmt.Label.(*ir.NumericColumn); ok { labelName = nc.FieldDesc.Name } else { return "", fmt.Errorf("unsupported label type %T", evalStmt.Label) } metricList := []string{"accuracy_score"} if m, ok := evalStmt.Attributes["validation.metrics"]; ok { if metricStr, ok := m.(string); ok { metricList = []string{} for _, s := range strings.Split(metricStr, ",") { metricList = append(metricList, strings.TrimSpace(s)) } } else { return "", fmt.Errorf("validation.metrics must be of type string") } } metricPyStr := ir.AttrToPythonValue(metricList) filler := &xgbEvaluateFiller{ StepIndex: stepIndex, DataSource: ds, Select: replaceNewLineRuneAndTrimSpace(evalStmt.Select), ResultTable: evalStmt.Into, PredLabelName: labelName, Load: evalStmt.ModelName, ValidationMetrics: metricPyStr, Submitter: getSubmitter(session), } var program bytes.Buffer tpl := template.Must(template.New("Evaluate").Parse(xgbEvaluateTemplate)) if err := tpl.Execute(&program, filler); err != nil { return "", err } return program.String(), nil } const xgbEvaluateTemplate = ` def step_entry_{{.StepIndex}}(): import runtime.temp_file as temp_file from runtime.{{.Submitter}} import evaluate with temp_file.TemporaryDirectory(as_cwd=True): evaluate(datasource='''{{.DataSource}}''', select='''{{.Select}}''', result_table='''{{.ResultTable}}''', pred_label_name='''{{.PredLabelName}}''', load='''{{.Load}}''', validation_metrics={{.ValidationMetrics}}) ` func
(session *pb.Session) string { if session.Submitter != "" { return session.Submitter } submitter := os.Getenv("SQLFLOW_submitter") if submitter != "" { return submitter } return "local" } func generateFeatureColumnCode(fcMap map[string][]ir.FeatureColumn) string { allFCCodes := make([]string, 0) for target, fcList := range fcMap { if len(fcList) == 0 { continue } codeList := make([]string, 0) for _, fc := range fcList { codeList = append(codeList, fc.GenPythonCode()) } code := fmt.Sprintf(`"%s":[%s]`, target, strings.Join(codeList, ",")) allFCCodes = append(allFCCodes, code) } return fmt.Sprintf("{%s}", strings.Join(allFCCodes, ",")) } // TODO(typhoonzero): below functions are copied from codegen/xgboost/codegen.go // remove the original functions when this experimental packages are ready. // ----------------------------------------------------------------------------- func getXGBoostObjectives() (ret []string) { for k := range attribute.XGBoostObjectiveDocs { ret = append(ret, k) } return } // TODO(tony): complete model parameter and training parameter list // model parameter list: https://xgboost.readthedocs.io/en/latest/parameter.html#general-parameters // training parameter list: https://github.com/dmlc/xgboost/blob/b61d53447203ca7a321d72f6bdd3f553a3
getSubmitter
identifier_name
xgboost.go
: trainStmt.ModelImage, Estimator: trainStmt.Estimator, DataSource: dbConnStr, Select: replaceNewLineRuneAndTrimSpace(trainStmt.Select), ValidationSelect: replaceNewLineRuneAndTrimSpace(trainStmt.ValidationSelect), ModelParamsJSON: string(mp), TrainParamsJSON: string(tp), FeatureColumnCode: featureColumnCode, LabelColumnCode: labelColumnCode, Save: trainStmt.Into, Load: trainStmt.PreTrainedModel, DiskCache: diskCache, BatchSize: batchSize, Epoch: epoch, Submitter: getSubmitter(session), } var program bytes.Buffer var trainTemplate = template.Must(template.New("Train").Parse(xgbTrainTemplate)) err = trainTemplate.Execute(&program, filler) if err != nil { return "", err } return program.String(), nil } const xgbTrainTemplate = ` def step_entry_{{.StepIndex}}(): import json import runtime.temp_file as temp_file import runtime.feature.column import runtime.feature.field_desc from runtime.{{.Submitter}} import train feature_column_map = {{.FeatureColumnCode}} label_column = {{.LabelColumnCode}} model_params = json.loads('''{{.ModelParamsJSON}}''') train_params = json.loads('''{{.TrainParamsJSON}}''') with temp_file.TemporaryDirectory(as_cwd=True) as temp_dir: train_params["original_sql"] = '''{{.OriginalSQL}}''' train_params["model_image"] = '''{{.ModelImage}}''' train_params["feature_column_map"] = feature_column_map train_params["label_column"] = label_column train_params["disk_cache"] = "{{.DiskCache}}"=="true" train_params["batch_size"] = {{.BatchSize}} train_params["epoch"] = {{.Epoch}} train(datasource='''{{.DataSource}}''', estimator_string='''{{.Estimator}}''', select='''{{.Select}}''', validation_select='''{{.ValidationSelect}}''', model_params=model_params, save='''{{.Save}}''', load='''{{.Load}}''', train_params=train_params) ` type xgbPredFiller struct { StepIndex int DataSource string Select string PredLabelName string ResultTable string Load string Submitter string } // XGBoostGeneratePredict generates the XGBoost prediction code func XGBoostGeneratePredict(predStmt *ir.PredictStmt, stepIndex int, session *pb.Session) (string, error) { dbConnStr, err := GeneratePyDbConnStr(session) if err != nil { return "", err } filler := &xgbPredFiller{ StepIndex: stepIndex, DataSource: dbConnStr, Select: replaceNewLineRuneAndTrimSpace(predStmt.Select), PredLabelName: predStmt.ResultColumn, ResultTable: predStmt.ResultTable, Load: predStmt.Using, Submitter: getSubmitter(session), } var program bytes.Buffer predTmpl := template.Must(template.New("Train").Parse(xgbPredTemplate)) err = predTmpl.Execute(&program, filler) if err != nil { return "", err } return program.String(), nil } const xgbPredTemplate = ` def step_entry_{{.StepIndex}}(): import runtime.temp_file as temp_file from runtime.{{.Submitter}} import pred with temp_file.TemporaryDirectory(as_cwd=True): pred(datasource='''{{.DataSource}}''', select='''{{.Select}}''', result_table='''{{.ResultTable}}''', pred_label_name='''{{.PredLabelName}}''', load='''{{.Load}}''') ` type xgbEvaluateFiller struct { StepIndex int DataSource string Select string ResultTable string PredLabelName string Load string ValidationMetrics string Submitter string } // XGBoostGenerateEvaluation generates the XGBoost evaluation code func XGBoostGenerateEvaluation(evalStmt *ir.EvaluateStmt, stepIndex int, session *pb.Session) (string, error) { ds, err := GeneratePyDbConnStr(session) if err != nil { return "", err } labelName := "" if nc, ok := evalStmt.Label.(*ir.NumericColumn); ok { labelName = nc.FieldDesc.Name } else { return "", fmt.Errorf("unsupported label type %T", evalStmt.Label) } metricList := []string{"accuracy_score"} if m, ok := evalStmt.Attributes["validation.metrics"]; ok { if metricStr, ok := m.(string); ok { metricList = []string{} for _, s := range strings.Split(metricStr, ",") { metricList = append(metricList, strings.TrimSpace(s)) } } else { return "", fmt.Errorf("validation.metrics must be of type string") } } metricPyStr := ir.AttrToPythonValue(metricList) filler := &xgbEvaluateFiller{ StepIndex: stepIndex, DataSource: ds, Select: replaceNewLineRuneAndTrimSpace(evalStmt.Select), ResultTable: evalStmt.Into, PredLabelName: labelName, Load: evalStmt.ModelName, ValidationMetrics: metricPyStr, Submitter: getSubmitter(session), } var program bytes.Buffer tpl := template.Must(template.New("Evaluate").Parse(xgbEvaluateTemplate)) if err := tpl.Execute(&program, filler); err != nil { return "", err } return program.String(), nil } const xgbEvaluateTemplate = ` def step_entry_{{.StepIndex}}(): import runtime.temp_file as temp_file from runtime.{{.Submitter}} import evaluate with temp_file.TemporaryDirectory(as_cwd=True): evaluate(datasource='''{{.DataSource}}''', select='''{{.Select}}''', result_table='''{{.ResultTable}}''', pred_label_name='''{{.PredLabelName}}''', load='''{{.Load}}''', validation_metrics={{.ValidationMetrics}}) ` func getSubmitter(session *pb.Session) string { if session.Submitter != "" { return session.Submitter } submitter := os.Getenv("SQLFLOW_submitter") if submitter != "" { return submitter } return "local" } func generateFeatureColumnCode(fcMap map[string][]ir.FeatureColumn) string { allFCCodes := make([]string, 0) for target, fcList := range fcMap { if len(fcList) == 0 { continue } codeList := make([]string, 0) for _, fc := range fcList { codeList = append(codeList, fc.GenPythonCode()) } code := fmt.Sprintf(`"%s":[%s]`, target, strings.Join(codeList, ",")) allFCCodes = append(allFCCodes, code) } return fmt.Sprintf("{%s}", strings.Join(allFCCodes, ",")) } // TODO(typhoonzero): below functions are copied from codegen/xgboost/codegen.go // remove the original functions when this experimental packages are ready. // ----------------------------------------------------------------------------- func getXGBoostObjectives() (ret []string) { for k := range attribute.XGBoostObjectiveDocs { ret = append(ret, k) } return } // TODO(tony): complete model parameter and training parameter list // model parameter list: https://xgboost.readthedocs.io/en/latest/parameter.html#general-parameters // training parameter list: https://github.com/dmlc/xgboost/blob/b61d53447203ca7a321d72f6bdd3f553a3aa06c4/python-package/xgboost/training.py#L115-L117 var attributeDictionary = attribute.Dictionary{}. Float("eta", float32(0.3), `[default=0.3, alias: learning_rate] Step size shrinkage used in update to prevents overfitting. After each boosting step, we can directly get the weights of new features, and eta shrinks the feature weights to make the boosting process more conservative. range: [0,1]`, attribute.Float32RangeChecker(0, 1, true, true)). Int("num_class", nil, `Number of classes. range: [2, Infinity]`, attribute.IntLowerBoundChecker(2, true)). String("objective", nil, `Learning objective`, attribute.StringChoicesChecker(getXGBoostObjectives()...)). String("eval_metric", nil, `eval metric`, nil). Bool("train.disk_cache", false, `whether use external memory to cache train data`, nil). Int("train.num_boost_round", 10, `[default=10]
The number of rounds for boosting. range: [1, Infinity]`, attribute.IntLowerBoundChecker(1, true)). Int("train.batch_size", -1, `[default=-1]
random_line_split
xgboost.go
batch_size") } epochAttr, ok := params["train."]["epoch"] if ok { epoch = epochAttr.(int) delete(params["train."], "epoch") } if _, ok := params["train."]["num_workers"]; ok { delete(params["train."], "num_workers") } if len(trainStmt.Features) > 1 { return "", fmt.Errorf("xgboost only support 0 or 1 feature column set, received %d", len(trainStmt.Features)) } // featureColumnCode is a python map definition code like fc_map = {"feature_columns": [...]} featureColumnCode := generateFeatureColumnCode(trainStmt.Features) labelColumnCode := trainStmt.Label.GenPythonCode() mp, err := json.Marshal(params[""]) if err != nil { return "", err } tp, err := json.Marshal(params["train."]) if err != nil { return "", err } dbConnStr, err := GeneratePyDbConnStr(session) if err != nil { return "", err } filler := xgbTrainFiller{ StepIndex: stepIndex, OriginalSQL: replaceNewLineRuneAndTrimSpace(trainStmt.OriginalSQL), ModelImage: trainStmt.ModelImage, Estimator: trainStmt.Estimator, DataSource: dbConnStr, Select: replaceNewLineRuneAndTrimSpace(trainStmt.Select), ValidationSelect: replaceNewLineRuneAndTrimSpace(trainStmt.ValidationSelect), ModelParamsJSON: string(mp), TrainParamsJSON: string(tp), FeatureColumnCode: featureColumnCode, LabelColumnCode: labelColumnCode, Save: trainStmt.Into, Load: trainStmt.PreTrainedModel, DiskCache: diskCache, BatchSize: batchSize, Epoch: epoch, Submitter: getSubmitter(session), } var program bytes.Buffer var trainTemplate = template.Must(template.New("Train").Parse(xgbTrainTemplate)) err = trainTemplate.Execute(&program, filler) if err != nil { return "", err } return program.String(), nil } const xgbTrainTemplate = ` def step_entry_{{.StepIndex}}(): import json import runtime.temp_file as temp_file import runtime.feature.column import runtime.feature.field_desc from runtime.{{.Submitter}} import train feature_column_map = {{.FeatureColumnCode}} label_column = {{.LabelColumnCode}} model_params = json.loads('''{{.ModelParamsJSON}}''') train_params = json.loads('''{{.TrainParamsJSON}}''') with temp_file.TemporaryDirectory(as_cwd=True) as temp_dir: train_params["original_sql"] = '''{{.OriginalSQL}}''' train_params["model_image"] = '''{{.ModelImage}}''' train_params["feature_column_map"] = feature_column_map train_params["label_column"] = label_column train_params["disk_cache"] = "{{.DiskCache}}"=="true" train_params["batch_size"] = {{.BatchSize}} train_params["epoch"] = {{.Epoch}} train(datasource='''{{.DataSource}}''', estimator_string='''{{.Estimator}}''', select='''{{.Select}}''', validation_select='''{{.ValidationSelect}}''', model_params=model_params, save='''{{.Save}}''', load='''{{.Load}}''', train_params=train_params) ` type xgbPredFiller struct { StepIndex int DataSource string Select string PredLabelName string ResultTable string Load string Submitter string } // XGBoostGeneratePredict generates the XGBoost prediction code func XGBoostGeneratePredict(predStmt *ir.PredictStmt, stepIndex int, session *pb.Session) (string, error) { dbConnStr, err := GeneratePyDbConnStr(session) if err != nil { return "", err } filler := &xgbPredFiller{ StepIndex: stepIndex, DataSource: dbConnStr, Select: replaceNewLineRuneAndTrimSpace(predStmt.Select), PredLabelName: predStmt.ResultColumn, ResultTable: predStmt.ResultTable, Load: predStmt.Using, Submitter: getSubmitter(session), } var program bytes.Buffer predTmpl := template.Must(template.New("Train").Parse(xgbPredTemplate)) err = predTmpl.Execute(&program, filler) if err != nil { return "", err } return program.String(), nil } const xgbPredTemplate = ` def step_entry_{{.StepIndex}}(): import runtime.temp_file as temp_file from runtime.{{.Submitter}} import pred with temp_file.TemporaryDirectory(as_cwd=True): pred(datasource='''{{.DataSource}}''', select='''{{.Select}}''', result_table='''{{.ResultTable}}''', pred_label_name='''{{.PredLabelName}}''', load='''{{.Load}}''') ` type xgbEvaluateFiller struct { StepIndex int DataSource string Select string ResultTable string PredLabelName string Load string ValidationMetrics string Submitter string } // XGBoostGenerateEvaluation generates the XGBoost evaluation code func XGBoostGenerateEvaluation(evalStmt *ir.EvaluateStmt, stepIndex int, session *pb.Session) (string, error) { ds, err := GeneratePyDbConnStr(session) if err != nil { return "", err } labelName := "" if nc, ok := evalStmt.Label.(*ir.NumericColumn); ok { labelName = nc.FieldDesc.Name } else { return "", fmt.Errorf("unsupported label type %T", evalStmt.Label) } metricList := []string{"accuracy_score"} if m, ok := evalStmt.Attributes["validation.metrics"]; ok { if metricStr, ok := m.(string); ok { metricList = []string{} for _, s := range strings.Split(metricStr, ",") { metricList = append(metricList, strings.TrimSpace(s)) } } else { return "", fmt.Errorf("validation.metrics must be of type string") } } metricPyStr := ir.AttrToPythonValue(metricList) filler := &xgbEvaluateFiller{ StepIndex: stepIndex, DataSource: ds, Select: replaceNewLineRuneAndTrimSpace(evalStmt.Select), ResultTable: evalStmt.Into, PredLabelName: labelName, Load: evalStmt.ModelName, ValidationMetrics: metricPyStr, Submitter: getSubmitter(session), } var program bytes.Buffer tpl := template.Must(template.New("Evaluate").Parse(xgbEvaluateTemplate)) if err := tpl.Execute(&program, filler); err != nil { return "", err } return program.String(), nil } const xgbEvaluateTemplate = ` def step_entry_{{.StepIndex}}(): import runtime.temp_file as temp_file from runtime.{{.Submitter}} import evaluate with temp_file.TemporaryDirectory(as_cwd=True): evaluate(datasource='''{{.DataSource}}''', select='''{{.Select}}''', result_table='''{{.ResultTable}}''', pred_label_name='''{{.PredLabelName}}''', load='''{{.Load}}''', validation_metrics={{.ValidationMetrics}}) ` func getSubmitter(session *pb.Session) string { if session.Submitter != "" { return session.Submitter } submitter := os.Getenv("SQLFLOW_submitter") if submitter != ""
return "local" } func generateFeatureColumnCode(fcMap map[string][]ir.FeatureColumn) string { allFCCodes := make([]string, 0) for target, fcList := range fcMap { if len(fcList) == 0 { continue } codeList := make([]string, 0) for _, fc := range fcList { codeList = append(codeList, fc.GenPythonCode()) } code := fmt.Sprintf(`"%s":[%s]`, target, strings.Join(codeList, ",")) allFCCodes = append(allFCCodes, code) } return fmt.Sprintf("{%s}", strings.Join(allFCCodes, ",")) } // TODO(typhoonzero): below functions are copied from codegen/xgboost/codegen.go // remove the original functions when this experimental packages are ready. // ----------------------------------------------------------------------------- func getXGBoostObjectives() (ret []string) { for k := range attribute.XGBoostObjectiveDocs { ret = append(ret, k) } return } // TODO(tony): complete model parameter and training parameter list // model parameter list: https://xgboost.readthedocs.io/en/latest/parameter.html#general-parameters // training parameter list: https://github.com/dmlc/xgboost/blob/b61d53447203ca7a321d72f6bdd3f553a
{ return submitter }
conditional_block
internals.rs
<T> where T: Float, { pub frequency: T, pub clarity: T, } /// Data structure to hold any buffers needed for pitch computation. /// For WASM it's best to allocate buffers once rather than allocate and /// free buffers repeatedly, so we use a `BufferPool` object to manage the buffers. pub struct DetectorInternals<T> where T: Float, { pub size: usize, pub padding: usize, pub buffers: BufferPool<T>, } impl<T> DetectorInternals<T> where T: Float, { pub fn new(size: usize, padding: usize) -> Self { let buffers = BufferPool::new(size + padding); DetectorInternals { size, padding, buffers, } } } /// Compute the autocorrelation of `signal` to `result`. All buffers but `signal` /// may be used as scratch. pub fn autocorrelation<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float, { let (ref1, ref2) = (buffers.get_complex_buffer(), buffers.get_complex_buffer()); let signal_complex = &mut ref1.borrow_mut()[..]; let scratch = &mut ref2.borrow_mut()[..]; let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal_complex.len()); let inv_fft = planner.plan_fft_inverse(signal_complex.len()); // Compute the autocorrelation copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); fft.process_with_scratch(signal_complex, scratch); modulus_squared(signal_complex); inv_fft.process_with_scratch(signal_complex, scratch); copy_complex_to_real(signal_complex, result, ComplexComponent::Re); } pub fn pitch_from_peaks<T>( input: &[T], sample_rate: usize, clarity_threshold: T, correction: PeakCorrection, ) -> Option<Pitch<T>> where T: Float, { let sample_rate = T::from_usize(sample_rate).unwrap(); let peaks = detect_peaks(input); choose_peak(peaks, clarity_threshold) .map(|peak| correct_peak(peak, input, correction)) .map(|peak| Pitch { frequency: sample_rate / peak.0, clarity: peak.1 / input[0], }) } fn m_of_tau<T>(signal: &[T], signal_square_sum: Option<T>, result: &mut [T]) where T: Float + std::iter::Sum, { assert!(result.len() >= signal.len()); let signal_square_sum = signal_square_sum.unwrap_or_else(|| square_sum(signal)); let start = T::from_usize(2).unwrap() * signal_square_sum; result[0] = start; let last = result[1..] .iter_mut() .zip(signal) .fold(start, |old, (r, &s)| { *r = old - s * s; *r }); // Pad the end of `result` with the last value result[signal.len()..].iter_mut().for_each(|r| *r = last); } pub fn normalized_square_difference<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float + std::iter::Sum, { let two = T::from_usize(2).unwrap(); let scratch_ref = buffers.get_real_buffer(); let scratch = &mut scratch_ref.borrow_mut()[..]; autocorrelation(signal, buffers, result); m_of_tau(signal, Some(result[0]), scratch); result .iter_mut() .zip(scratch) .for_each(|(r, s)| *r = two * *r / *s) } /// Compute the windowed autocorrelation of `signal` and put the result in `result`. /// For a signal _x=(x_0,x_1,...)_, the windowed autocorrelation with window size _w_ is /// the function /// /// > r(t) = sum_{i=0}^{w-1} x_i*x_{i+t} /// /// This function assumes `window_size` is at most half of the length of `signal`. pub fn windowed_autocorrelation<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( buffers.buffer_size >= signal.len(), "Buffers must have a length at least equal to `signal`." ); let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal.len()); let inv_fft = planner.plan_fft_inverse(signal.len()); let (scratch_ref1, scratch_ref2, scratch_ref3) = ( buffers.get_complex_buffer(), buffers.get_complex_buffer(), buffers.get_complex_buffer(), ); let signal_complex = &mut scratch_ref1.borrow_mut()[..signal.len()]; let truncated_signal_complex = &mut scratch_ref2.borrow_mut()[..signal.len()]; let scratch = &mut scratch_ref3.borrow_mut()[..signal.len()]; // To achieve the windowed autocorrelation, we compute the cross correlation between // the original signal and the signal truncated to lie in `0..window_size` copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); copy_real_to_complex( &signal[..window_size], truncated_signal_complex, ComplexComponent::Re, ); fft.process_with_scratch(signal_complex, scratch); fft.process_with_scratch(truncated_signal_complex, scratch); // rustfft doesn't normalize when it computes the fft, so we need to normalize ourselves by // dividing by `sqrt(signal.len())` each time we take an fft or inverse fft. // Since the fft is linear and we are doing fft -> inverse fft, we can just divide by // `signal.len()` once. let normalization_const = T::one() / T::from_usize(signal.len()).unwrap(); signal_complex .iter_mut() .zip(truncated_signal_complex.iter()) .for_each(|(a, b)| { *a = *a * normalization_const * b.conj(); }); inv_fft.process_with_scratch(signal_complex, scratch); // The result is valid only for `0..window_size` copy_complex_to_real(&signal_complex[..window_size], result, ComplexComponent::Re); } /// Compute the windowed square error, _d(t)_, of `signal`. For a window size of _w_ and a signal /// _x=(x_0,x_1,...)_, this is defined by /// /// > d(t) = sum_{i=0}^{w-1} (x_i - x_{i+t})^2 /// /// This function is computed efficiently using an FFT. It is assumed that `window_size` is at most half /// the length of `signal`. pub fn windowed_square_error<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( 2 * window_size <= signal.len(), "The window size cannot be more than half the signal length" ); let two = T::from_f64(2.).unwrap(); // The windowed square error function, d(t), can be computed // as d(t) = pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) // where pow_a^b is the sum of the square of `signal` on the window `a..b` // We proceed accordingly. windowed_autocorrelation(signal, window_size, buffers, result); let mut windowed_power = square_sum(&signal[..window_size]); let power = windowed_power; result.iter_mut().enumerate().for_each(|(i, a)| { // use the formula pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) *a = power + windowed_power - two * *a; // Since we're processing everything in order, we can computed pow_{t+1}^{t+1+w} // directly from pow_t^{t+w} by adding and subtracting the boundary terms. windowed_power = windowed_power - signal[i] * signal[i] + signal[i + window_size] * signal[i + window_size]; }) } /// Calculate the "cumulative mean normalized difference function" as /// specified in the YIN paper. If _d(t)_ is the square error function, /// compute _d'(0) = 1_ and for _t > 0_ /// /// > d'(t) = d(t) / [ (1/t) * sum_{i=0}^t d(i) ] pub fn yin_normalize_square_error<T: Float>(square_error: &mut [T]) { let mut sum = T::zero(); square_error[0] = T::one(); // square_error[0] should always be zero, so we don't need to worry about // adding this to our sum. square_error .iter_mut() .enumerate() .skip(1) .for
Pitch
identifier_name
internals.rs
: usize, pub buffers: BufferPool<T>, } impl<T> DetectorInternals<T> where T: Float, { pub fn new(size: usize, padding: usize) -> Self { let buffers = BufferPool::new(size + padding); DetectorInternals { size, padding, buffers, } } } /// Compute the autocorrelation of `signal` to `result`. All buffers but `signal` /// may be used as scratch. pub fn autocorrelation<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float, { let (ref1, ref2) = (buffers.get_complex_buffer(), buffers.get_complex_buffer()); let signal_complex = &mut ref1.borrow_mut()[..]; let scratch = &mut ref2.borrow_mut()[..]; let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal_complex.len()); let inv_fft = planner.plan_fft_inverse(signal_complex.len()); // Compute the autocorrelation copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); fft.process_with_scratch(signal_complex, scratch); modulus_squared(signal_complex); inv_fft.process_with_scratch(signal_complex, scratch); copy_complex_to_real(signal_complex, result, ComplexComponent::Re); } pub fn pitch_from_peaks<T>( input: &[T], sample_rate: usize, clarity_threshold: T, correction: PeakCorrection, ) -> Option<Pitch<T>> where T: Float, { let sample_rate = T::from_usize(sample_rate).unwrap(); let peaks = detect_peaks(input); choose_peak(peaks, clarity_threshold) .map(|peak| correct_peak(peak, input, correction)) .map(|peak| Pitch { frequency: sample_rate / peak.0, clarity: peak.1 / input[0], }) } fn m_of_tau<T>(signal: &[T], signal_square_sum: Option<T>, result: &mut [T]) where T: Float + std::iter::Sum, { assert!(result.len() >= signal.len()); let signal_square_sum = signal_square_sum.unwrap_or_else(|| square_sum(signal)); let start = T::from_usize(2).unwrap() * signal_square_sum; result[0] = start; let last = result[1..] .iter_mut() .zip(signal) .fold(start, |old, (r, &s)| { *r = old - s * s; *r }); // Pad the end of `result` with the last value result[signal.len()..].iter_mut().for_each(|r| *r = last); } pub fn normalized_square_difference<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float + std::iter::Sum, { let two = T::from_usize(2).unwrap(); let scratch_ref = buffers.get_real_buffer(); let scratch = &mut scratch_ref.borrow_mut()[..]; autocorrelation(signal, buffers, result); m_of_tau(signal, Some(result[0]), scratch); result .iter_mut() .zip(scratch) .for_each(|(r, s)| *r = two * *r / *s) } /// Compute the windowed autocorrelation of `signal` and put the result in `result`. /// For a signal _x=(x_0,x_1,...)_, the windowed autocorrelation with window size _w_ is /// the function /// /// > r(t) = sum_{i=0}^{w-1} x_i*x_{i+t} /// /// This function assumes `window_size` is at most half of the length of `signal`. pub fn windowed_autocorrelation<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( buffers.buffer_size >= signal.len(), "Buffers must have a length at least equal to `signal`." ); let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal.len()); let inv_fft = planner.plan_fft_inverse(signal.len()); let (scratch_ref1, scratch_ref2, scratch_ref3) = ( buffers.get_complex_buffer(), buffers.get_complex_buffer(), buffers.get_complex_buffer(), ); let signal_complex = &mut scratch_ref1.borrow_mut()[..signal.len()]; let truncated_signal_complex = &mut scratch_ref2.borrow_mut()[..signal.len()]; let scratch = &mut scratch_ref3.borrow_mut()[..signal.len()]; // To achieve the windowed autocorrelation, we compute the cross correlation between // the original signal and the signal truncated to lie in `0..window_size` copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); copy_real_to_complex( &signal[..window_size], truncated_signal_complex, ComplexComponent::Re, ); fft.process_with_scratch(signal_complex, scratch); fft.process_with_scratch(truncated_signal_complex, scratch); // rustfft doesn't normalize when it computes the fft, so we need to normalize ourselves by // dividing by `sqrt(signal.len())` each time we take an fft or inverse fft. // Since the fft is linear and we are doing fft -> inverse fft, we can just divide by // `signal.len()` once. let normalization_const = T::one() / T::from_usize(signal.len()).unwrap(); signal_complex .iter_mut() .zip(truncated_signal_complex.iter()) .for_each(|(a, b)| { *a = *a * normalization_const * b.conj(); }); inv_fft.process_with_scratch(signal_complex, scratch); // The result is valid only for `0..window_size` copy_complex_to_real(&signal_complex[..window_size], result, ComplexComponent::Re); } /// Compute the windowed square error, _d(t)_, of `signal`. For a window size of _w_ and a signal /// _x=(x_0,x_1,...)_, this is defined by /// /// > d(t) = sum_{i=0}^{w-1} (x_i - x_{i+t})^2 /// /// This function is computed efficiently using an FFT. It is assumed that `window_size` is at most half /// the length of `signal`. pub fn windowed_square_error<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( 2 * window_size <= signal.len(), "The window size cannot be more than half the signal length" ); let two = T::from_f64(2.).unwrap(); // The windowed square error function, d(t), can be computed // as d(t) = pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) // where pow_a^b is the sum of the square of `signal` on the window `a..b` // We proceed accordingly. windowed_autocorrelation(signal, window_size, buffers, result); let mut windowed_power = square_sum(&signal[..window_size]); let power = windowed_power; result.iter_mut().enumerate().for_each(|(i, a)| { // use the formula pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) *a = power + windowed_power - two * *a; // Since we're processing everything in order, we can computed pow_{t+1}^{t+1+w} // directly from pow_t^{t+w} by adding and subtracting the boundary terms. windowed_power = windowed_power - signal[i] * signal[i] + signal[i + window_size] * signal[i + window_size]; }) } /// Calculate the "cumulative mean normalized difference function" as /// specified in the YIN paper. If _d(t)_ is the square error function, /// compute _d'(0) = 1_ and for _t > 0_ /// /// > d'(t) = d(t) / [ (1/t) * sum_{i=0}^t d(i) ] pub fn yin_normalize_square_error<T: Float>(square_error: &mut [T]) { let mut sum = T::zero();
// adding this to our sum. square_error .iter_mut() .enumerate() .skip(1) .for_each(|(i, a)| { sum = sum + *a; *a = *a * T::from_usize(i + 1).unwrap() / sum; }); } #[cfg(test)] mod tests { use super::*; #[test] fn windowed_autocorrelation_test() { let signal: Vec<f64> = vec![0., 1., 2., 0., -1., -2.
square_error[0] = T::one(); // square_error[0] should always be zero, so we don't need to worry about
random_line_split
internals.rs
.get_complex_buffer()); let signal_complex = &mut ref1.borrow_mut()[..]; let scratch = &mut ref2.borrow_mut()[..]; let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal_complex.len()); let inv_fft = planner.plan_fft_inverse(signal_complex.len()); // Compute the autocorrelation copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); fft.process_with_scratch(signal_complex, scratch); modulus_squared(signal_complex); inv_fft.process_with_scratch(signal_complex, scratch); copy_complex_to_real(signal_complex, result, ComplexComponent::Re); } pub fn pitch_from_peaks<T>( input: &[T], sample_rate: usize, clarity_threshold: T, correction: PeakCorrection, ) -> Option<Pitch<T>> where T: Float, { let sample_rate = T::from_usize(sample_rate).unwrap(); let peaks = detect_peaks(input); choose_peak(peaks, clarity_threshold) .map(|peak| correct_peak(peak, input, correction)) .map(|peak| Pitch { frequency: sample_rate / peak.0, clarity: peak.1 / input[0], }) } fn m_of_tau<T>(signal: &[T], signal_square_sum: Option<T>, result: &mut [T]) where T: Float + std::iter::Sum, { assert!(result.len() >= signal.len()); let signal_square_sum = signal_square_sum.unwrap_or_else(|| square_sum(signal)); let start = T::from_usize(2).unwrap() * signal_square_sum; result[0] = start; let last = result[1..] .iter_mut() .zip(signal) .fold(start, |old, (r, &s)| { *r = old - s * s; *r }); // Pad the end of `result` with the last value result[signal.len()..].iter_mut().for_each(|r| *r = last); } pub fn normalized_square_difference<T>(signal: &[T], buffers: &mut BufferPool<T>, result: &mut [T]) where T: Float + std::iter::Sum, { let two = T::from_usize(2).unwrap(); let scratch_ref = buffers.get_real_buffer(); let scratch = &mut scratch_ref.borrow_mut()[..]; autocorrelation(signal, buffers, result); m_of_tau(signal, Some(result[0]), scratch); result .iter_mut() .zip(scratch) .for_each(|(r, s)| *r = two * *r / *s) } /// Compute the windowed autocorrelation of `signal` and put the result in `result`. /// For a signal _x=(x_0,x_1,...)_, the windowed autocorrelation with window size _w_ is /// the function /// /// > r(t) = sum_{i=0}^{w-1} x_i*x_{i+t} /// /// This function assumes `window_size` is at most half of the length of `signal`. pub fn windowed_autocorrelation<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( buffers.buffer_size >= signal.len(), "Buffers must have a length at least equal to `signal`." ); let mut planner = FftPlanner::new(); let fft = planner.plan_fft_forward(signal.len()); let inv_fft = planner.plan_fft_inverse(signal.len()); let (scratch_ref1, scratch_ref2, scratch_ref3) = ( buffers.get_complex_buffer(), buffers.get_complex_buffer(), buffers.get_complex_buffer(), ); let signal_complex = &mut scratch_ref1.borrow_mut()[..signal.len()]; let truncated_signal_complex = &mut scratch_ref2.borrow_mut()[..signal.len()]; let scratch = &mut scratch_ref3.borrow_mut()[..signal.len()]; // To achieve the windowed autocorrelation, we compute the cross correlation between // the original signal and the signal truncated to lie in `0..window_size` copy_real_to_complex(signal, signal_complex, ComplexComponent::Re); copy_real_to_complex( &signal[..window_size], truncated_signal_complex, ComplexComponent::Re, ); fft.process_with_scratch(signal_complex, scratch); fft.process_with_scratch(truncated_signal_complex, scratch); // rustfft doesn't normalize when it computes the fft, so we need to normalize ourselves by // dividing by `sqrt(signal.len())` each time we take an fft or inverse fft. // Since the fft is linear and we are doing fft -> inverse fft, we can just divide by // `signal.len()` once. let normalization_const = T::one() / T::from_usize(signal.len()).unwrap(); signal_complex .iter_mut() .zip(truncated_signal_complex.iter()) .for_each(|(a, b)| { *a = *a * normalization_const * b.conj(); }); inv_fft.process_with_scratch(signal_complex, scratch); // The result is valid only for `0..window_size` copy_complex_to_real(&signal_complex[..window_size], result, ComplexComponent::Re); } /// Compute the windowed square error, _d(t)_, of `signal`. For a window size of _w_ and a signal /// _x=(x_0,x_1,...)_, this is defined by /// /// > d(t) = sum_{i=0}^{w-1} (x_i - x_{i+t})^2 /// /// This function is computed efficiently using an FFT. It is assumed that `window_size` is at most half /// the length of `signal`. pub fn windowed_square_error<T>( signal: &[T], window_size: usize, buffers: &mut BufferPool<T>, result: &mut [T], ) where T: Float + std::iter::Sum, { assert!( 2 * window_size <= signal.len(), "The window size cannot be more than half the signal length" ); let two = T::from_f64(2.).unwrap(); // The windowed square error function, d(t), can be computed // as d(t) = pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) // where pow_a^b is the sum of the square of `signal` on the window `a..b` // We proceed accordingly. windowed_autocorrelation(signal, window_size, buffers, result); let mut windowed_power = square_sum(&signal[..window_size]); let power = windowed_power; result.iter_mut().enumerate().for_each(|(i, a)| { // use the formula pow_0^w + pow_t^{t+w} - 2*windowed_autocorrelation(t) *a = power + windowed_power - two * *a; // Since we're processing everything in order, we can computed pow_{t+1}^{t+1+w} // directly from pow_t^{t+w} by adding and subtracting the boundary terms. windowed_power = windowed_power - signal[i] * signal[i] + signal[i + window_size] * signal[i + window_size]; }) } /// Calculate the "cumulative mean normalized difference function" as /// specified in the YIN paper. If _d(t)_ is the square error function, /// compute _d'(0) = 1_ and for _t > 0_ /// /// > d'(t) = d(t) / [ (1/t) * sum_{i=0}^t d(i) ] pub fn yin_normalize_square_error<T: Float>(square_error: &mut [T]) { let mut sum = T::zero(); square_error[0] = T::one(); // square_error[0] should always be zero, so we don't need to worry about // adding this to our sum. square_error .iter_mut() .enumerate() .skip(1) .for_each(|(i, a)| { sum = sum + *a; *a = *a * T::from_usize(i + 1).unwrap() / sum; }); } #[cfg(test)] mod tests { use super::*; #[test] fn windowed_autocorrelation_test()
{ let signal: Vec<f64> = vec![0., 1., 2., 0., -1., -2.]; let window_size: usize = 3; let buffers = &mut BufferPool::new(signal.len()); let result: Vec<f64> = (0..window_size) .map(|i| { signal[..window_size] .iter() .zip(signal[i..(i + window_size)].iter()) .map(|(a, b)| *a * *b) .sum() }) .collect(); let mut computed_result = vec![0.; window_size]; windowed_autocorrelation(&signal, window_size, buffers, &mut computed_result); // Using an FFT loses precision; we don't care that much, so round generously. computed_result
identifier_body
main.rs
sync::{mpsc::Receiver, Arc, Mutex, RwLock}; use chain_impl_mockchain::block::{message::MessageId, Message}; use futures::Future; use bech32::{u5, Bech32, FromBase32, ToBase32}; use blockcfg::{ genesis_data::ConfigGenesisData, genesis_data::GenesisData, mock::Mockchain as Cardano, }; use blockchain::{Blockchain, BlockchainR}; use chain_crypto::{ AsymmetricKey, Curve25519_2HashDH, Ed25519, Ed25519Bip32, Ed25519Extended, FakeMMM, }; use intercom::BlockMsg; use leadership::leadership_task; use rand::rngs::EntropyRng; use rand::SeedableRng; use rand_chacha::ChaChaRng; use rest::v0::node::stats::StatsCounter; use settings::{Command, GenPrivKeyType}; use transaction::{transaction_task, TPool}; use utils::task::{TaskBroadcastBox, Tasks}; #[macro_use] pub mod log_wrapper; pub mod blockcfg; pub mod blockchain; pub mod client; pub mod clock; // pub mod consensus; pub mod intercom; pub mod leadership; pub mod network; pub mod rest; pub mod secure; pub mod settings; pub mod state; pub mod transaction; pub mod utils; // TODO: consider an appropriate size for the broadcast buffer. // For the block task, there should hardly be a need to buffer more // than one block as the network task should be able to broadcast the // block notifications in time. const BLOCK_BUS_CAPACITY: usize = 2; pub type TODO = u32; fn block_task( blockchain: BlockchainR<Cardano>, _clock: clock::Clock, // FIXME: use it or lose it r: Receiver<BlockMsg<Cardano>>, stats_counter: StatsCounter, ) { let mut network_broadcast = TaskBroadcastBox::new(BLOCK_BUS_CAPACITY); loop { let bquery = r.recv().unwrap(); blockchain::process(&blockchain, bquery, &mut network_broadcast, &stats_counter); } } fn startup_info( gd: &GenesisData, blockchain: &Blockchain<Cardano>, _settings: &settings::start::Settings, ) { println!( "k={} tip={}", gd.epoch_stability_depth, blockchain.get_tip() ); } // Expand the type with more variants // when it becomes necessary to represent different error cases. type Error = settings::Error; fn start(settings: settings::start::Settings) -> Result<(), Error> { settings.log_settings.apply(); let genesis_data = settings.read_genesis_data().unwrap(); let clock = { let initial_epoch = clock::ClockEpochConfiguration { slot_duration: genesis_data.slot_duration, slots_per_epoch: genesis_data.epoch_stability_depth * 10, }; clock::Clock::new(genesis_data.start_time, initial_epoch) }; let leader_secret = if let Some(secret_path) = &settings.leadership { Some(secure::NodeSecret::load_from_file(secret_path.as_path())) } else { None }; //let mut state = State::new(); let blockchain_data = Blockchain::new(genesis_data.clone(), &settings.storage); startup_info(&genesis_data, &blockchain_data, &settings); let blockchain = Arc::new(RwLock::new(blockchain_data)); let mut tasks = Tasks::new(); // # Bootstrap phase // // done at every startup: we need to bootstrap from whatever local state (including nothing) // to the latest network state (or close to latest). until this happen, we don't participate in the network // (no block creation) and our network connection(s) is only use to download data. // // Various aspects to do, similar to hermes: // * download all the existing blocks // * verify all the downloaded blocks // * network / peer discoveries (?) // * gclock sync ? // Read block state // init storage // create blockchain storage network::bootstrap(&settings.network, blockchain.clone()); // # Active phase // // now that we have caught up (or almost caught up) we download blocks from neighbor nodes, // listen to announcements and actively listen to synchronous queries // // There's two simultaenous roles to this: // * Leader: decided after global or local evaluation. Need to create and propagate a block // * Non-Leader: always. receive (pushed-) blocks from other peers, investigate the correct blockchain updates // // Also receive synchronous connection queries: // * new nodes subscribing to updates (blocks, transactions) // * client GetBlocks/Headers ... let tpool_data: TPool<MessageId, Message> = TPool::new(); let tpool = Arc::new(RwLock::new(tpool_data)); // Validation of consensus settings should make sure that we always have // non-empty selection data. let stats_counter = StatsCounter::default(); let transaction_task = { let tpool = tpool.clone(); let blockchain = blockchain.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("transaction", move |r| { transaction_task(blockchain, tpool, r, stats_counter) }) }; let block_task = { let blockchain = blockchain.clone(); let clock = clock.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("block", move |r| { block_task(blockchain, clock, r, stats_counter) }) }; let client_task = { let blockchain = blockchain.clone(); tasks.task_create_with_inputs("client-query", move |r| client::client_task(blockchain, r)) }; // ** TODO ** // setup_network // connection-events: // poll: // recv_transaction: // check_transaction_valid // add transaction to pool // recv_block: // check block valid // try to extend blockchain with block // update utxo state // flush transaction pool if any txid made it // get block(s): // try to answer // { let client_msgbox = client_task.clone(); let transaction_msgbox = transaction_task.clone(); let block_msgbox = block_task.clone(); let config = settings.network.clone(); let channels = network::Channels { client_box: client_msgbox, transaction_box: transaction_msgbox, block_box: block_msgbox, }; tasks.task_create("network", move || { network::run(config, channels); }); }; if let Some(secret) = leader_secret // == settings::start::Leadership::Yes // && leadership::selection::can_lead(&selection) == leadership::IsLeading::Yes { let tpool = tpool.clone(); let clock = clock.clone(); let block_task = block_task.clone(); let blockchain = blockchain.clone(); let leader_id = chain_impl_mockchain::leadership::LeaderId::Bft(secret.public().block_publickey.into()); let pk = chain_impl_mockchain::leadership::Leader::BftLeader(secret.block_privatekey); tasks.task_create("leadership", move || { leadership_task(leader_id, pk, tpool, blockchain, clock, block_task) }); }; let rest_server = match settings.rest { Some(ref rest) => { let context = rest::Context { stats_counter, blockchain, transaction_task: Arc::new(Mutex::new(transaction_task)), }; Some(rest::start_rest_server(rest, context)?) } None => None, }; // periodically cleanup (custom): // storage cleanup/packing // tpool.gc() // FIXME some sort of join so that the main thread does something ... tasks.join(); if let Some(server) = rest_server { server.stop().wait().unwrap() } Ok(()) } fn main()
GenPrivKeyType::Ed25519Extended => gen_priv_key_bech32::<Ed25519Extended>(), GenPrivKeyType::FakeMMM => gen_priv_key_bech32::<FakeMMM>(), GenPrivKeyType::Curve25519_2HashDH => gen_priv_key_bech32::<Curve25519_2HashDH>(), }; println!("{}", priv_key_bech32); } Command::GeneratePubKey(args) => { let stdin = io::stdin();
{ let command = match Command::load() { Err(err) => { eprintln!("{}", err); std::process::exit(1); } Ok(v) => v, }; match command { Command::Start(start_settings) => { if let Err(error) = start(start_settings) { eprintln!("jormungandr error: {}", error); std::process::exit(1); } } Command::GeneratePrivKey(args) => { let priv_key_bech32 = match args.key_type { GenPrivKeyType::Ed25519 => gen_priv_key_bech32::<Ed25519>(), GenPrivKeyType::Ed25519Bip32 => gen_priv_key_bech32::<Ed25519Bip32>(),
identifier_body
main.rs
::sync::{mpsc::Receiver, Arc, Mutex, RwLock}; use chain_impl_mockchain::block::{message::MessageId, Message}; use futures::Future; use bech32::{u5, Bech32, FromBase32, ToBase32}; use blockcfg::{ genesis_data::ConfigGenesisData, genesis_data::GenesisData, mock::Mockchain as Cardano, }; use blockchain::{Blockchain, BlockchainR}; use chain_crypto::{ AsymmetricKey, Curve25519_2HashDH, Ed25519, Ed25519Bip32, Ed25519Extended, FakeMMM, }; use intercom::BlockMsg; use leadership::leadership_task; use rand::rngs::EntropyRng; use rand::SeedableRng; use rand_chacha::ChaChaRng; use rest::v0::node::stats::StatsCounter; use settings::{Command, GenPrivKeyType}; use transaction::{transaction_task, TPool}; use utils::task::{TaskBroadcastBox, Tasks}; #[macro_use] pub mod log_wrapper; pub mod blockcfg; pub mod blockchain; pub mod client; pub mod clock; // pub mod consensus; pub mod intercom; pub mod leadership; pub mod network; pub mod rest; pub mod secure; pub mod settings; pub mod state; pub mod transaction; pub mod utils; // TODO: consider an appropriate size for the broadcast buffer. // For the block task, there should hardly be a need to buffer more // than one block as the network task should be able to broadcast the // block notifications in time. const BLOCK_BUS_CAPACITY: usize = 2; pub type TODO = u32; fn block_task( blockchain: BlockchainR<Cardano>, _clock: clock::Clock, // FIXME: use it or lose it r: Receiver<BlockMsg<Cardano>>, stats_counter: StatsCounter, ) { let mut network_broadcast = TaskBroadcastBox::new(BLOCK_BUS_CAPACITY); loop { let bquery = r.recv().unwrap(); blockchain::process(&blockchain, bquery, &mut network_broadcast, &stats_counter); } } fn startup_info( gd: &GenesisData, blockchain: &Blockchain<Cardano>, _settings: &settings::start::Settings, ) { println!( "k={} tip={}", gd.epoch_stability_depth, blockchain.get_tip() ); } // Expand the type with more variants // when it becomes necessary to represent different error cases. type Error = settings::Error; fn start(settings: settings::start::Settings) -> Result<(), Error> { settings.log_settings.apply(); let genesis_data = settings.read_genesis_data().unwrap(); let clock = { let initial_epoch = clock::ClockEpochConfiguration { slot_duration: genesis_data.slot_duration, slots_per_epoch: genesis_data.epoch_stability_depth * 10, }; clock::Clock::new(genesis_data.start_time, initial_epoch) }; let leader_secret = if let Some(secret_path) = &settings.leadership { Some(secure::NodeSecret::load_from_file(secret_path.as_path())) } else { None }; //let mut state = State::new(); let blockchain_data = Blockchain::new(genesis_data.clone(), &settings.storage); startup_info(&genesis_data, &blockchain_data, &settings); let blockchain = Arc::new(RwLock::new(blockchain_data)); let mut tasks = Tasks::new(); // # Bootstrap phase // // done at every startup: we need to bootstrap from whatever local state (including nothing) // to the latest network state (or close to latest). until this happen, we don't participate in the network // (no block creation) and our network connection(s) is only use to download data. // // Various aspects to do, similar to hermes: // * download all the existing blocks // * verify all the downloaded blocks // * network / peer discoveries (?) // * gclock sync ? // Read block state // init storage // create blockchain storage network::bootstrap(&settings.network, blockchain.clone()); // # Active phase // // now that we have caught up (or almost caught up) we download blocks from neighbor nodes, // listen to announcements and actively listen to synchronous queries // // There's two simultaenous roles to this: // * Leader: decided after global or local evaluation. Need to create and propagate a block // * Non-Leader: always. receive (pushed-) blocks from other peers, investigate the correct blockchain updates // // Also receive synchronous connection queries: // * new nodes subscribing to updates (blocks, transactions) // * client GetBlocks/Headers ... let tpool_data: TPool<MessageId, Message> = TPool::new(); let tpool = Arc::new(RwLock::new(tpool_data)); // Validation of consensus settings should make sure that we always have // non-empty selection data. let stats_counter = StatsCounter::default(); let transaction_task = { let tpool = tpool.clone(); let blockchain = blockchain.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("transaction", move |r| { transaction_task(blockchain, tpool, r, stats_counter) }) }; let block_task = { let blockchain = blockchain.clone(); let clock = clock.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("block", move |r| { block_task(blockchain, clock, r, stats_counter) }) }; let client_task = { let blockchain = blockchain.clone(); tasks.task_create_with_inputs("client-query", move |r| client::client_task(blockchain, r)) }; // ** TODO ** // setup_network // connection-events: // poll: // recv_transaction: // check_transaction_valid // add transaction to pool // recv_block: // check block valid // try to extend blockchain with block // update utxo state // flush transaction pool if any txid made it // get block(s): // try to answer // { let client_msgbox = client_task.clone(); let transaction_msgbox = transaction_task.clone(); let block_msgbox = block_task.clone(); let config = settings.network.clone(); let channels = network::Channels { client_box: client_msgbox, transaction_box: transaction_msgbox, block_box: block_msgbox, }; tasks.task_create("network", move || { network::run(config, channels); }); }; if let Some(secret) = leader_secret // == settings::start::Leadership::Yes // && leadership::selection::can_lead(&selection) == leadership::IsLeading::Yes { let tpool = tpool.clone(); let clock = clock.clone(); let block_task = block_task.clone(); let blockchain = blockchain.clone(); let leader_id = chain_impl_mockchain::leadership::LeaderId::Bft(secret.public().block_publickey.into()); let pk = chain_impl_mockchain::leadership::Leader::BftLeader(secret.block_privatekey); tasks.task_create("leadership", move || { leadership_task(leader_id, pk, tpool, blockchain, clock, block_task) }); }; let rest_server = match settings.rest { Some(ref rest) => { let context = rest::Context { stats_counter, blockchain, transaction_task: Arc::new(Mutex::new(transaction_task)), }; Some(rest::start_rest_server(rest, context)?) } None => None, }; // periodically cleanup (custom): // storage cleanup/packing // tpool.gc() // FIXME some sort of join so that the main thread does something ... tasks.join(); if let Some(server) = rest_server { server.stop().wait().unwrap() } Ok(()) } fn main() { let command = match Command::load() { Err(err) => { eprintln!("{}", err); std::process::exit(1); } Ok(v) => v, };
match command { Command::Start(start_settings) => { if let Err(error) = start(start_settings) { eprintln!("jormungandr error: {}", error); std::process::exit(1); } } Command::GeneratePrivKey(args) => { let priv_key_bech32 = match args.key_type { GenPrivKeyType::Ed25519 => gen_priv_key_bech32::<Ed25519>(), GenPrivKeyType::Ed25519Bip32 => gen_priv_key_bech32::<Ed25519Bip32>(), GenPrivKeyType::Ed25519Extended => gen_priv_key_bech32::<Ed25519Extended>(), GenPrivKeyType::FakeMMM => gen_priv_key_bech32::<FakeMMM>(), GenPrivKeyType::Curve25519_2HashDH => gen_priv_key_bech32::<Curve25519_2HashDH>(), }; println!("{}", priv_key_bech32); } Command::GeneratePubKey(args) => { let stdin = io::stdin();
random_line_split
main.rs
sync::{mpsc::Receiver, Arc, Mutex, RwLock}; use chain_impl_mockchain::block::{message::MessageId, Message}; use futures::Future; use bech32::{u5, Bech32, FromBase32, ToBase32}; use blockcfg::{ genesis_data::ConfigGenesisData, genesis_data::GenesisData, mock::Mockchain as Cardano, }; use blockchain::{Blockchain, BlockchainR}; use chain_crypto::{ AsymmetricKey, Curve25519_2HashDH, Ed25519, Ed25519Bip32, Ed25519Extended, FakeMMM, }; use intercom::BlockMsg; use leadership::leadership_task; use rand::rngs::EntropyRng; use rand::SeedableRng; use rand_chacha::ChaChaRng; use rest::v0::node::stats::StatsCounter; use settings::{Command, GenPrivKeyType}; use transaction::{transaction_task, TPool}; use utils::task::{TaskBroadcastBox, Tasks}; #[macro_use] pub mod log_wrapper; pub mod blockcfg; pub mod blockchain; pub mod client; pub mod clock; // pub mod consensus; pub mod intercom; pub mod leadership; pub mod network; pub mod rest; pub mod secure; pub mod settings; pub mod state; pub mod transaction; pub mod utils; // TODO: consider an appropriate size for the broadcast buffer. // For the block task, there should hardly be a need to buffer more // than one block as the network task should be able to broadcast the // block notifications in time. const BLOCK_BUS_CAPACITY: usize = 2; pub type TODO = u32; fn block_task( blockchain: BlockchainR<Cardano>, _clock: clock::Clock, // FIXME: use it or lose it r: Receiver<BlockMsg<Cardano>>, stats_counter: StatsCounter, ) { let mut network_broadcast = TaskBroadcastBox::new(BLOCK_BUS_CAPACITY); loop { let bquery = r.recv().unwrap(); blockchain::process(&blockchain, bquery, &mut network_broadcast, &stats_counter); } } fn startup_info( gd: &GenesisData, blockchain: &Blockchain<Cardano>, _settings: &settings::start::Settings, ) { println!( "k={} tip={}", gd.epoch_stability_depth, blockchain.get_tip() ); } // Expand the type with more variants // when it becomes necessary to represent different error cases. type Error = settings::Error; fn start(settings: settings::start::Settings) -> Result<(), Error> { settings.log_settings.apply(); let genesis_data = settings.read_genesis_data().unwrap(); let clock = { let initial_epoch = clock::ClockEpochConfiguration { slot_duration: genesis_data.slot_duration, slots_per_epoch: genesis_data.epoch_stability_depth * 10, }; clock::Clock::new(genesis_data.start_time, initial_epoch) }; let leader_secret = if let Some(secret_path) = &settings.leadership { Some(secure::NodeSecret::load_from_file(secret_path.as_path())) } else { None }; //let mut state = State::new(); let blockchain_data = Blockchain::new(genesis_data.clone(), &settings.storage); startup_info(&genesis_data, &blockchain_data, &settings); let blockchain = Arc::new(RwLock::new(blockchain_data)); let mut tasks = Tasks::new(); // # Bootstrap phase // // done at every startup: we need to bootstrap from whatever local state (including nothing) // to the latest network state (or close to latest). until this happen, we don't participate in the network // (no block creation) and our network connection(s) is only use to download data. // // Various aspects to do, similar to hermes: // * download all the existing blocks // * verify all the downloaded blocks // * network / peer discoveries (?) // * gclock sync ? // Read block state // init storage // create blockchain storage network::bootstrap(&settings.network, blockchain.clone()); // # Active phase // // now that we have caught up (or almost caught up) we download blocks from neighbor nodes, // listen to announcements and actively listen to synchronous queries // // There's two simultaenous roles to this: // * Leader: decided after global or local evaluation. Need to create and propagate a block // * Non-Leader: always. receive (pushed-) blocks from other peers, investigate the correct blockchain updates // // Also receive synchronous connection queries: // * new nodes subscribing to updates (blocks, transactions) // * client GetBlocks/Headers ... let tpool_data: TPool<MessageId, Message> = TPool::new(); let tpool = Arc::new(RwLock::new(tpool_data)); // Validation of consensus settings should make sure that we always have // non-empty selection data. let stats_counter = StatsCounter::default(); let transaction_task = { let tpool = tpool.clone(); let blockchain = blockchain.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("transaction", move |r| { transaction_task(blockchain, tpool, r, stats_counter) }) }; let block_task = { let blockchain = blockchain.clone(); let clock = clock.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("block", move |r| { block_task(blockchain, clock, r, stats_counter) }) }; let client_task = { let blockchain = blockchain.clone(); tasks.task_create_with_inputs("client-query", move |r| client::client_task(blockchain, r)) }; // ** TODO ** // setup_network // connection-events: // poll: // recv_transaction: // check_transaction_valid // add transaction to pool // recv_block: // check block valid // try to extend blockchain with block // update utxo state // flush transaction pool if any txid made it // get block(s): // try to answer // { let client_msgbox = client_task.clone(); let transaction_msgbox = transaction_task.clone(); let block_msgbox = block_task.clone(); let config = settings.network.clone(); let channels = network::Channels { client_box: client_msgbox, transaction_box: transaction_msgbox, block_box: block_msgbox, }; tasks.task_create("network", move || { network::run(config, channels); }); }; if let Some(secret) = leader_secret // == settings::start::Leadership::Yes // && leadership::selection::can_lead(&selection) == leadership::IsLeading::Yes { let tpool = tpool.clone(); let clock = clock.clone(); let block_task = block_task.clone(); let blockchain = blockchain.clone(); let leader_id = chain_impl_mockchain::leadership::LeaderId::Bft(secret.public().block_publickey.into()); let pk = chain_impl_mockchain::leadership::Leader::BftLeader(secret.block_privatekey); tasks.task_create("leadership", move || { leadership_task(leader_id, pk, tpool, blockchain, clock, block_task) }); }; let rest_server = match settings.rest { Some(ref rest) => { let context = rest::Context { stats_counter, blockchain, transaction_task: Arc::new(Mutex::new(transaction_task)), }; Some(rest::start_rest_server(rest, context)?) } None => None, }; // periodically cleanup (custom): // storage cleanup/packing // tpool.gc() // FIXME some sort of join so that the main thread does something ... tasks.join(); if let Some(server) = rest_server { server.stop().wait().unwrap() } Ok(()) } fn main() { let command = match Command::load() { Err(err) =>
Ok(v) => v, }; match command { Command::Start(start_settings) => { if let Err(error) = start(start_settings) { eprintln!("jormungandr error: {}", error); std::process::exit(1); } } Command::GeneratePrivKey(args) => { let priv_key_bech32 = match args.key_type { GenPrivKeyType::Ed25519 => gen_priv_key_bech32::<Ed25519>(), GenPrivKeyType::Ed25519Bip32 => gen_priv_key_bech32::<Ed25519Bip32>(), GenPrivKeyType::Ed25519Extended => gen_priv_key_bech32::<Ed25519Extended>(), GenPrivKeyType::FakeMMM => gen_priv_key_bech32::<FakeMMM>(), GenPrivKeyType::Curve25519_2HashDH => gen_priv_key_bech32::<Curve25519_2HashDH>(), }; println!("{}", priv_key_bech32); } Command::GeneratePubKey(args) => { let stdin = io::stdin
{ eprintln!("{}", err); std::process::exit(1); }
conditional_block
main.rs
sync::{mpsc::Receiver, Arc, Mutex, RwLock}; use chain_impl_mockchain::block::{message::MessageId, Message}; use futures::Future; use bech32::{u5, Bech32, FromBase32, ToBase32}; use blockcfg::{ genesis_data::ConfigGenesisData, genesis_data::GenesisData, mock::Mockchain as Cardano, }; use blockchain::{Blockchain, BlockchainR}; use chain_crypto::{ AsymmetricKey, Curve25519_2HashDH, Ed25519, Ed25519Bip32, Ed25519Extended, FakeMMM, }; use intercom::BlockMsg; use leadership::leadership_task; use rand::rngs::EntropyRng; use rand::SeedableRng; use rand_chacha::ChaChaRng; use rest::v0::node::stats::StatsCounter; use settings::{Command, GenPrivKeyType}; use transaction::{transaction_task, TPool}; use utils::task::{TaskBroadcastBox, Tasks}; #[macro_use] pub mod log_wrapper; pub mod blockcfg; pub mod blockchain; pub mod client; pub mod clock; // pub mod consensus; pub mod intercom; pub mod leadership; pub mod network; pub mod rest; pub mod secure; pub mod settings; pub mod state; pub mod transaction; pub mod utils; // TODO: consider an appropriate size for the broadcast buffer. // For the block task, there should hardly be a need to buffer more // than one block as the network task should be able to broadcast the // block notifications in time. const BLOCK_BUS_CAPACITY: usize = 2; pub type TODO = u32; fn block_task( blockchain: BlockchainR<Cardano>, _clock: clock::Clock, // FIXME: use it or lose it r: Receiver<BlockMsg<Cardano>>, stats_counter: StatsCounter, ) { let mut network_broadcast = TaskBroadcastBox::new(BLOCK_BUS_CAPACITY); loop { let bquery = r.recv().unwrap(); blockchain::process(&blockchain, bquery, &mut network_broadcast, &stats_counter); } } fn
( gd: &GenesisData, blockchain: &Blockchain<Cardano>, _settings: &settings::start::Settings, ) { println!( "k={} tip={}", gd.epoch_stability_depth, blockchain.get_tip() ); } // Expand the type with more variants // when it becomes necessary to represent different error cases. type Error = settings::Error; fn start(settings: settings::start::Settings) -> Result<(), Error> { settings.log_settings.apply(); let genesis_data = settings.read_genesis_data().unwrap(); let clock = { let initial_epoch = clock::ClockEpochConfiguration { slot_duration: genesis_data.slot_duration, slots_per_epoch: genesis_data.epoch_stability_depth * 10, }; clock::Clock::new(genesis_data.start_time, initial_epoch) }; let leader_secret = if let Some(secret_path) = &settings.leadership { Some(secure::NodeSecret::load_from_file(secret_path.as_path())) } else { None }; //let mut state = State::new(); let blockchain_data = Blockchain::new(genesis_data.clone(), &settings.storage); startup_info(&genesis_data, &blockchain_data, &settings); let blockchain = Arc::new(RwLock::new(blockchain_data)); let mut tasks = Tasks::new(); // # Bootstrap phase // // done at every startup: we need to bootstrap from whatever local state (including nothing) // to the latest network state (or close to latest). until this happen, we don't participate in the network // (no block creation) and our network connection(s) is only use to download data. // // Various aspects to do, similar to hermes: // * download all the existing blocks // * verify all the downloaded blocks // * network / peer discoveries (?) // * gclock sync ? // Read block state // init storage // create blockchain storage network::bootstrap(&settings.network, blockchain.clone()); // # Active phase // // now that we have caught up (or almost caught up) we download blocks from neighbor nodes, // listen to announcements and actively listen to synchronous queries // // There's two simultaenous roles to this: // * Leader: decided after global or local evaluation. Need to create and propagate a block // * Non-Leader: always. receive (pushed-) blocks from other peers, investigate the correct blockchain updates // // Also receive synchronous connection queries: // * new nodes subscribing to updates (blocks, transactions) // * client GetBlocks/Headers ... let tpool_data: TPool<MessageId, Message> = TPool::new(); let tpool = Arc::new(RwLock::new(tpool_data)); // Validation of consensus settings should make sure that we always have // non-empty selection data. let stats_counter = StatsCounter::default(); let transaction_task = { let tpool = tpool.clone(); let blockchain = blockchain.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("transaction", move |r| { transaction_task(blockchain, tpool, r, stats_counter) }) }; let block_task = { let blockchain = blockchain.clone(); let clock = clock.clone(); let stats_counter = stats_counter.clone(); tasks.task_create_with_inputs("block", move |r| { block_task(blockchain, clock, r, stats_counter) }) }; let client_task = { let blockchain = blockchain.clone(); tasks.task_create_with_inputs("client-query", move |r| client::client_task(blockchain, r)) }; // ** TODO ** // setup_network // connection-events: // poll: // recv_transaction: // check_transaction_valid // add transaction to pool // recv_block: // check block valid // try to extend blockchain with block // update utxo state // flush transaction pool if any txid made it // get block(s): // try to answer // { let client_msgbox = client_task.clone(); let transaction_msgbox = transaction_task.clone(); let block_msgbox = block_task.clone(); let config = settings.network.clone(); let channels = network::Channels { client_box: client_msgbox, transaction_box: transaction_msgbox, block_box: block_msgbox, }; tasks.task_create("network", move || { network::run(config, channels); }); }; if let Some(secret) = leader_secret // == settings::start::Leadership::Yes // && leadership::selection::can_lead(&selection) == leadership::IsLeading::Yes { let tpool = tpool.clone(); let clock = clock.clone(); let block_task = block_task.clone(); let blockchain = blockchain.clone(); let leader_id = chain_impl_mockchain::leadership::LeaderId::Bft(secret.public().block_publickey.into()); let pk = chain_impl_mockchain::leadership::Leader::BftLeader(secret.block_privatekey); tasks.task_create("leadership", move || { leadership_task(leader_id, pk, tpool, blockchain, clock, block_task) }); }; let rest_server = match settings.rest { Some(ref rest) => { let context = rest::Context { stats_counter, blockchain, transaction_task: Arc::new(Mutex::new(transaction_task)), }; Some(rest::start_rest_server(rest, context)?) } None => None, }; // periodically cleanup (custom): // storage cleanup/packing // tpool.gc() // FIXME some sort of join so that the main thread does something ... tasks.join(); if let Some(server) = rest_server { server.stop().wait().unwrap() } Ok(()) } fn main() { let command = match Command::load() { Err(err) => { eprintln!("{}", err); std::process::exit(1); } Ok(v) => v, }; match command { Command::Start(start_settings) => { if let Err(error) = start(start_settings) { eprintln!("jormungandr error: {}", error); std::process::exit(1); } } Command::GeneratePrivKey(args) => { let priv_key_bech32 = match args.key_type { GenPrivKeyType::Ed25519 => gen_priv_key_bech32::<Ed25519>(), GenPrivKeyType::Ed25519Bip32 => gen_priv_key_bech32::<Ed25519Bip32>(), GenPrivKeyType::Ed25519Extended => gen_priv_key_bech32::<Ed25519Extended>(), GenPrivKeyType::FakeMMM => gen_priv_key_bech32::<FakeMMM>(), GenPrivKeyType::Curve25519_2HashDH => gen_priv_key_bech32::<Curve25519_2HashDH>(), }; println!("{}", priv_key_bech32); } Command::GeneratePubKey(args) => { let stdin = io::stdin();
startup_info
identifier_name
qualify_textgrid.py
text' TEXT_CATEGORY_PARSER = re.compile('^(?P<category>[1-4])\D.*', flags=re.UNICODE) MARKS_MEANING = { '1': '1-', '2': '2-', '3': '3-', '4': '4-' } logger = None time_logger = None def setup(target): global logger global time_logger if os.path.isdir(target): if target.endswith('\\'): target = target[:-1] logfile = os.path.join(target, os.path.basename(target)+'.log') timelog = os.path.join(target, 'duration.log') elif os.path.isfile(target): logfile = target + '.log' timelog = target + '_duration.log' logger = open(logfile, 'w') time_logger = open(timelog, 'w') def teardown(): logger.close() time_logger.close() def loginfo(msg, stdout=False, timelog=False): if stdout: print(msg) logger.write((msg+os.linesep).encode('utf-8')) if timelog: logtime(msg) #syntax sugar def logtime(msg, stdout=False): if stdout: print(msg) time_logger.write((msg+os.linesep).encode('utf-8')) class CycleIterator(object): """ a wrapper for the itertools.cycle """ def __init__(self, iterable): super(CycleIterator, self).__init__() self.iterable = iterable self.iterator = cycle(iterable) self.value = None def head(self): return self.iterable[0] def tail(self): return self.iterable[-1] def next(self): self.value = self.iterator.next() return self.value def end(self): return self.value == self.tail() # to loop from the begining def reset(self): self.iterator = cycle(self.iterable) def index(self, i): return self.iterable[i] class TextgridParser(object): """translate the textgrid into a dict""" CODINGS = ( ('utf-8-sig', (codecs.BOM_UTF8,)), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)), ) # for textgrid header HEADER_PATTERN = ( re.compile('xmin = (?P<start>[\d\.]+)\s*xmax = (?P<end>[\d\.]+)\s*tiers\? <exists>'), lambda x: float(x.group('end')) - float(x.group('start')), ) BLOCK_PATTERNS = ( (re.compile('^\s*intervals \[(?P<slice>\d+)\]:'), 'slice', int), (re.compile('^\s*xmin = (?P<xmin>[\d\.]+)'), 'xmin', float), (re.compile('^\s*xmax = (?P<xmax>[\d\.]+)'), 'xmax', float), (re.compile('^\s*text = "(?P<text>.*)"'), 'text', str), ) # for a special case that a text has multiple lines MULTILINES_PATTERN = ( (re.compile('^\s*text = "(?P<text>.*)'), 'text', str),
PATTERN_KEYS = ('pattern', 'key', 'type') def __init__(self, coding='utf-8'): super(TextgridParser, self).__init__() self.default_coding = coding self.intervals = [] self.original_duration_sum = 0 def reset(self): self.intervals = [] def read(self, filename): self.filename = filename with open(filename, 'rb') as f: raw_data = f.read() # self.coding = self.code_det(content[0:10]) self.coding = chardet.detect(raw_data)['encoding'] try: self.content = raw_data.decode(self.coding).encode(self.default_coding) self.lines = self.content.splitlines() except UnicodeError, e: loginfo(u'>>文件:{filename}'.format(filename=self.filename), stdout=True) loginfo(u'解码时发生错误,请选择合适的文本编辑器,并以utf-8编码格式保存后,再运行此程序', stdout=True) loginfo('') raise IOError def code_det(self, headline, default='utf-8'): for enc,boms in TextgridParser.CODINGS: if any(headline.startswith(bom) for bom in boms): return enc return default def pack(self, keys, tuples): package = [] for vals in tuples: package.append({ keys[i]:vals[i] for i in range(len(keys)) }) return package def update(self, interval, item_pattern, line, append_mode=False): ip = item_pattern if append_mode: # only for text interval[ip['key']] += ip['type'](ip['pattern'].match(line).group(ip['key'])) else: interval.update({ ip['key']: ip['type'](ip['pattern'].match(line).group(ip['key'])) }) return interval def match(self, item_pattern, line): return item_pattern['pattern'].match(line) def search(self, parser, fn): return fn(parser.search(self.content)) def parse(self): print(u'正在解析{filename}...'.format(filename=self.filename)) loginfo(u'>>文件:%s' % self.filename) original_duration = self.search(*TextgridParser.HEADER_PATTERN) self.original_duration_sum += original_duration logtime(u'>>文件:%s\t 原始语音时长为%f秒' % (self.filename, original_duration)) lineno = 0 interval = {} APPEND_MODE = False self.reset() bp_iter = CycleIterator(self.pack(TextgridParser.PATTERN_KEYS, TextgridParser.BLOCK_PATTERNS)) mp_iter = CycleIterator(self.pack(TextgridParser.PATTERN_KEYS, TextgridParser.MULTILINES_PATTERN)) block_begining = bp_iter.head() item_pattern = bp_iter.next() for line in self.lines: lineno += 1 # reset the block parsing once the line matched the begining pattern if self.match(block_begining, line): # self.update(interval, block_begining, line) # not the start actually, exception occured in parsing last block if item_pattern != block_begining: loginfo(u'错误:无法解析第%d行,不是textgrid标准格式,已跳过' % (lineno-1), stdout=True) # last line instead of the current interval = {} APPEND_MODE = False bp_iter.reset() item_pattern = bp_iter.next() # when a text existed in multiple lines elif APPEND_MODE: if self.match(mp_iter.tail(), line): # match the pattern of end line self.update(interval, mp_iter.tail(), line, APPEND_MODE) interval['lineno'] = lineno self.intervals.append(interval) # block ends interval = {} item_pattern = bp_iter.next() # loop to the begining APPEND_MODE = False # 2. block ending else: # append the middle part of the text self.update(interval, mp_iter.index(1), line, APPEND_MODE) # match the item in sequence if self.match(item_pattern, line): self.update(interval, item_pattern, line) # if the end of the block was matched if bp_iter.end(): interval['lineno'] = lineno self.intervals.append(interval) interval = {} # loop to the begining item_pattern = bp_iter.next() # 1. block ending # match the begining of multi-lines text instead of a single line elif self.match(mp_iter.head(), line): self.update(interval, mp_iter.head(), line) APPEND_MODE = True def validate(intervals, quiet=False): validated = [] error_no = 0 if not quiet: print(u'正在验证...') for interval in intervals: legal = True # to append legal textgrid to the list text = interval[TEXT_KEY].decode('utf-8') if text: for rp,fn,msg in RULES_PATTERNS: result = rp.match(text) if result: text = fn(result) else: if not quiet: loginfo(msg.format(lineno=interval['lineno'], text=interval['text'].decode('utf-8'))) legal = False error_no += 1 break else: legal = False if legal: validated.append(interval) if not quiet: print(u'验证完成,检测到%d个错误' % error_no) if error_no == 0: loginfo(u'Succeed') else: loginfo(u'共%d个错误被
(re.compile('^(?P<text>.*)$'), 'text', str), # to adapt the new line (re.compile('^(?P<text>.*)"\s*$'), 'text', str), )
random_line_split
qualify_textgrid.py
= None time_logger = None def setup(target): global logger global time_logger if os.path.isdir(target): if target.endswith('\\'): target = target[:-1] logfile = os.path.join(target, os.path.basename(target)+'.log') timelog = os.path.join(target, 'duration.log') elif os.path.isfile(target): logfile = target + '.log' timelog = target + '_duration.log' logger = open(logfile, 'w') time_logger = open(timelog, 'w') def teardown(): logger.close() time_logger.close() def loginfo(msg, stdout=False, timelog=False): if stdout: print(msg) logger.write((msg+os.linesep).encode('utf-8')) if timelog: logtime(msg) #syntax sugar def logtime(msg, stdout=False): if stdout: print(msg) time_logger.write((msg+os.linesep).encode('utf-8')) class CycleIterator(object): """ a wrapper for the itertools.cycle """ def __init__(self, iterable): super(CycleIterator, self).__init__() self.iterable = iterable self.iterator = cycle(iterable) self.value = None def head(self): return self.iterable[0] def tail(self): return self.iterable[-1] def next(self): self.value = self.iterator.next() return self.value def end(self): return self.value == self.tail() # to loop from the begining def reset(self): self.iterator = cycle(self.iterable) def index(self, i): return self.iterable[i] class TextgridParser(object): """translate the textgrid into a dict""" CODINGS = ( ('utf-8-sig', (codecs.BOM_UTF8,)), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)), ) # for textgrid header HEADER_PATTERN = ( re.compile('xmin = (?P<start>[\d\.]+)\s*xmax = (?P<end>[\d\.]+)\s*tiers\? <exists>'), lambda x: float(x.group('end')) - float(x.group('start')), ) BLOCK_PATTERNS = ( (re.compile('^\s*intervals \[(?P<slice>\d+)\]:'), 'slice', int), (re.compile('^\s*xmin = (?P<xmin>[\d\.]+)'), 'xmin', float), (re.compile('^\s*xmax = (?P<xmax>[\d\.]+)'), 'xmax', float), (re.compile('^\s*text = "(?P<text>.*)"'), 'text', str), ) # for a special case that a text has multiple lines MULTILINES_PATTERN = ( (re.compile('^\s*text = "(?P<text>.*)'), 'text', str), (re.compile('^(?P<text>.*)$'), 'text', str), # to adapt the new line (re.compile('^(?P<text>.*)"\s*$'), 'text', str), ) PATTERN_KEYS = ('pattern', 'key', 'type') def __init__(self, coding='utf-8'): super(TextgridParser, self).__init__() self.default_coding = coding self.intervals = [] self.original_duration_sum = 0 def reset(self): self.intervals = [] def read(self, filename): self.filename = filename with open(filename, 'rb') as f: raw_data = f.read() # self.coding = self.code_det(content[0:10]) self.coding = chardet.detect(raw_data)['encoding'] try: self.content = raw_data.decode(self.coding).encode(self.default_coding) self.lines = self.content.splitlines() except UnicodeError, e: loginfo(u'>>文件:{filename}'.format(filename=self.filename), stdout=True) loginfo(u'解码时发生错误,请选择合适的文本编辑器,并以utf-8编码格式保存后,再运行此程序', stdout=True) loginfo('') raise IOError def code_det(self, headline, default='utf-8'): for enc,boms in TextgridParser.CODINGS: if any(headline.startswith(bom) for bom in boms): return enc return default def pack(self, keys, tuples): package = [] for vals in tuples: package.append({ keys[i]:vals[i] for i in range(len(keys)) }) return package def update(self, interval, item_pattern, line, append_mode=False): ip = item_pattern if append_mode: # only for text interval[ip['key']] += ip['type'](ip['pattern'].match(line).group(ip['key'])) else: interval.update({ ip['key']: ip['type'](ip['pattern'].match(line).group(ip['key'])) }) return interval def match(self, item_pattern, line): return item_pattern['pattern'].match(line) def search(self, parser, fn): return fn(parser.search(self.content)) def parse(self): print(u'正在解析{filename}...'.format(filename=self.filename)) loginfo(u'>>文件:%s' % self.filename) original_duration = self.search(*TextgridParser.HEADER_PATTERN) self.original_duration_sum += original_duration logtime(u'>>文件:%s\t 原始语音时长为%f秒' % (self.filename, original_duration)) lineno = 0 interval = {} APPEND_MODE = False self.reset() bp_iter = CycleIterator(self.pack(TextgridParser.PATTERN_KEYS, TextgridParser.BLOCK_PATTERNS)) mp_iter = CycleIterator(self.pack(TextgridParser.PATTERN_KEYS, TextgridParser.MULTILINES_PATTERN)) block_begining = bp_iter.head() item_pattern = bp_iter.next() for line in self.lines: lineno += 1 # reset the block parsing once the line matched the begining pattern if self.match(block_begining, line): # self.update(interval, block_begining, line) # not the start actually, exception occured in parsing last block if item_pattern != block_begining: loginfo(u'错误:无法解析第%d行,不是textgrid标准格式,已跳过' % (lineno-1), stdout=True) # last line instead of the current interval = {} APPEND_MODE = False bp_iter.reset() item_pattern = bp_iter.next() # when a text existed in multiple lines elif APPEND_MODE: if self.match(mp_iter.tail(), line): # match the pattern of end line self.update(interval, mp_iter.tail(), line, APPEND_MODE) interval['lineno'] = lineno self.intervals.append(interval) # block ends interval = {} item_pattern = bp_iter.next() # loop to the begining APPEND_MODE = False # 2. block ending else: # append the middle part of the text self.update(interval, mp_iter.index(1), line, APPEND_MODE) # match the item in sequence if self.match(item_pattern, line): self.update(interval, item_pattern, line) # if the end of the block was matched if bp_iter.end(): interval['lineno'] = lineno self.intervals.append(interval) interval = {} # loop to the begining item_pattern = bp_iter.next() # 1. block ending # match the begining of multi-lines text instead of a single line elif self.match(mp_iter.head(), line): self.update(interval, mp_iter.head(), line) APPEND_MODE = True def validate(intervals, quiet=False): validated = [] error_no = 0 if not quiet: print(u'正在验证...') for interval in intervals: legal = True # to append legal textgrid to the list text = interval[TEXT_KEY].decode('utf-8') if text: for rp,fn,msg in RULES_PATTERNS: result = rp.match(text) if result: text = fn(result) else: if not quiet: loginfo(msg.format(lineno=interval['lineno'], text=interval['text'].decode('utf-8'))) legal = False error_no += 1 break else: legal =
False if legal: validated.append(interval) if not quiet: print(u'验证完成,检测到%d个错误' % error_no) if error_no == 0: loginfo(u'Succeed') else: loginfo(u'共%d个错误被检测到' % error_no) loginfo('') # extra space line return validated def timeit(intervals, title=None): assoeted_intervals = {} for interval in intervals: try: # assume it was validated before category = TEXT_CATEGORY_PARSER.match(interval[TEXT_KEY].decode('utf-8'
conditional_block
qualify_textgrid.py
text' TEXT_CATEGORY_PARSER = re.compile('^(?P<category>[1-4])\D.*', flags=re.UNICODE) MARKS_MEANING = { '1': '1-', '2': '2-', '3': '3-', '4': '4-' } logger = None time_logger = None def setup(target): global logger global time_logger if os.path.isdir(target): if target.endswith('\\'): target = target[:-1] logfile = os.path.join(target, os.path.basename(target)+'.log') timelog = os.path.join(target, 'duration.log') elif os.path.isfile(target): logfile = target + '.log' timelog = target + '_duration.log' logger = open(logfile, 'w') time_logger = open(timelog, 'w') def teardown(): logger.close() time_logger.close() def loginfo(msg, stdout=False, timelog=False): if stdout: print(msg) logger.write((msg+os.linesep).encode('utf-8')) if timelog: logtime(msg) #syntax sugar def logtime(msg, stdout=False): if stdout: print(msg) time_logger.write((msg+os.linesep).encode('utf-8')) class CycleIterator(object): """ a wrapper for the itertools.cycle """ def __init__(self, iterable): super(CycleIterator, self).__init__() self.iterable = iterable self.iterator = cycle(iterable) self.value = None def head(self): return self.iterable[0] def tail(self): return self.iterable[-1] def next(self): self.value = self.iterator.next() return self.value def end(self): return self.value == self.tail() # to loop from the begining def reset(self): self.iterator = cycle(self.iterable) def index(self, i): return self.iterable[i] class TextgridParser(object): """translate the textgrid into a dict""" CODINGS = ( ('utf-8-sig', (codecs.BOM_UTF8,)), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32
odecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)), ) # for textgrid header HEADER_PATTERN = ( re.compile('xmin = (?P<start>[\d\.]+)\s*xmax = (?P<end>[\d\.]+)\s*tiers\? <exists>'), lambda x: float(x.group('end')) - float(x.group('start')), ) BLOCK_PATTERNS = ( (re.compile('^\s*intervals \[(?P<slice>\d+)\]:'), 'slice', int), (re.compile('^\s*xmin = (?P<xmin>[\d\.]+)'), 'xmin', float), (re.compile('^\s*xmax = (?P<xmax>[\d\.]+)'), 'xmax', float), (re.compile('^\s*text = "(?P<text>.*)"'), 'text', str), ) # for a special case that a text has multiple lines MULTILINES_PATTERN = ( (re.compile('^\s*text = "(?P<text>.*)'), 'text', str), (re.compile('^(?P<text>.*)$'), 'text', str), # to adapt the new line (re.compile('^(?P<text>.*)"\s*$'), 'text', str), ) PATTERN_KEYS = ('pattern', 'key', 'type') def __init__(self, coding='utf-8'): super(TextgridParser, self).__init__() self.default_coding = coding self.intervals = [] self.original_duration_sum = 0 def reset(self): self.intervals = [] def read(self, filename): self.filename = filename with open(filename, 'rb') as f: raw_data = f.read() # self.coding = self.code_det(content[0:10]) self.coding = chardet.detect(raw_data)['encoding'] try: self.content = raw_data.decode(self.coding).encode(self.default_coding) self.lines = self.content.splitlines() except UnicodeError, e: loginfo(u'>>文件:{filename}'.format(filename=self.filename), stdout=True) loginfo(u'解码时发生错误,请选择合适的文本编辑器,并以utf-8编码格式保存后,再运行此程序', stdout=True) loginfo('') raise IOError def code_det(self, headline, default='utf-8'): for enc,boms in TextgridParser.CODINGS: if any(headline.startswith(bom) for bom in boms): return enc return default def pack(self, keys, tuples): package = [] for vals in tuples: package.append({ keys[i]:vals[i] for i in range(len(keys)) }) return package def update(self, interval, item_pattern, line, append_mode=False): ip = item_pattern if append_mode: # only for text interval[ip['key']] += ip['type'](ip['pattern'].match(line).group(ip['key'])) else: interval.update({ ip['key']: ip['type'](ip['pattern'].match(line).group(ip['key'])) }) return interval def match(self, item_pattern, line): return item_pattern['pattern'].match(line) def search(self, parser, fn): return fn(parser.search(self.content)) def parse(self): print(u'正在解析{filename}...'.format(filename=self.filename)) loginfo(u'>>文件:%s' % self.filename) original_duration = self.search(*TextgridParser.HEADER_PATTERN) self.original_duration_sum += original_duration logtime(u'>>文件:%s\t 原始语音时长为%f秒' % (self.filename, original_duration)) lineno = 0 interval = {} APPEND_MODE = False self.reset() bp_iter = CycleIterator(self.pack(TextgridParser.PATTERN_KEYS, TextgridParser.BLOCK_PATTERNS)) mp_iter = CycleIterator(self.pack(TextgridParser.PATTERN_KEYS, TextgridParser.MULTILINES_PATTERN)) block_begining = bp_iter.head() item_pattern = bp_iter.next() for line in self.lines: lineno += 1 # reset the block parsing once the line matched the begining pattern if self.match(block_begining, line): # self.update(interval, block_begining, line) # not the start actually, exception occured in parsing last block if item_pattern != block_begining: loginfo(u'错误:无法解析第%d行,不是textgrid标准格式,已跳过' % (lineno-1), stdout=True) # last line instead of the current interval = {} APPEND_MODE = False bp_iter.reset() item_pattern = bp_iter.next() # when a text existed in multiple lines elif APPEND_MODE: if self.match(mp_iter.tail(), line): # match the pattern of end line self.update(interval, mp_iter.tail(), line, APPEND_MODE) interval['lineno'] = lineno self.intervals.append(interval) # block ends interval = {} item_pattern = bp_iter.next() # loop to the begining APPEND_MODE = False # 2. block ending else: # append the middle part of the text self.update(interval, mp_iter.index(1), line, APPEND_MODE) # match the item in sequence if self.match(item_pattern, line): self.update(interval, item_pattern, line) # if the end of the block was matched if bp_iter.end(): interval['lineno'] = lineno self.intervals.append(interval) interval = {} # loop to the begining item_pattern = bp_iter.next() # 1. block ending # match the begining of multi-lines text instead of a single line elif self.match(mp_iter.head(), line): self.update(interval, mp_iter.head(), line) APPEND_MODE = True def validate(intervals, quiet=False): validated = [] error_no = 0 if not quiet: print(u'正在验证...') for interval in intervals: legal = True # to append legal textgrid to the list text = interval[TEXT_KEY].decode('utf-8') if text: for rp,fn,msg in RULES_PATTERNS: result = rp.match(text) if result: text = fn(result) else: if not quiet: loginfo(msg.format(lineno=interval['lineno'], text=interval['text'].decode('utf-8'))) legal = False error_no += 1 break else: legal = False if legal: validated.append(interval) if not quiet: print(u'验证完成,检测到%d个错误' % error_no) if error_no == 0: loginfo(u'Succeed') else: loginfo(u'共%d个错误被
', (c
identifier_name
qualify_textgrid.py
' TEXT_CATEGORY_PARSER = re.compile('^(?P<category>[1-4])\D.*', flags=re.UNICODE) MARKS_MEANING = { '1': '1-', '2': '2-', '3': '3-', '4': '4-' } logger = None time_logger = None def setup(target): global logger global time_logger if os.path.isdir(target): if target.endswith('\\'): target = target[:-1] logfile = os.path.join(target, os.path.basename(target)+'.log') timelog = os.path.join(target, 'duration.log') elif os.path.isfile(target): logfile = target + '.log' timelog = target + '_duration.log' logger = open(logfile, 'w') time_logger = open(timelog, 'w') def teardown(): logger.close() time_logger.close() def loginfo(msg, stdout=False, timelog=False): if stdout: print(msg) logger.write((msg+os.linesep).encode('utf-8')) if timelog: logtime(msg) #syntax sugar def logtime(msg, stdout=False): if stdout: print(msg) time_logger.write((msg+os.linesep).encode('utf-8')) class CycleIterator(object): """ a wrapper for the itertools.cycle """ def __init__(self, iterable): super(CycleIterator, self).__init__() self.iterable = iterable self.iterator = cycle(iterable) self.value = None def head(self): return self.iterable[0] def tail(self): return self.iterable[-1] def next(self): self.value = self.iterator.next() return self.value def end(self): return self.value == self.tail() # to loop from the begining def reset(self): self.iterator = cycle(self.iterable) def index(self, i): return self.iterable[i] class TextgridParser(object): """translate the textgrid into a dict""" CODINGS = ( ('utf-8-sig', (codecs.BOM_UTF8,)), ('utf-16', (codecs.BOM_UTF16_LE, codecs.BOM_UTF16_BE)), ('utf-32', (codecs.BOM_UTF32_LE, codecs.BOM_UTF32_BE)), ) # for textgrid header HEADER_PATTERN = ( re.compile('xmin = (?P<start>[\d\.]+)\s*xmax = (?P<end>[\d\.]+)\s*tiers\? <exists>'), lambda x: float(x.group('end')) - float(x.group('start')), ) BLOCK_PATTERNS = ( (re.compile('^\s*intervals \[(?P<slice>\d+)\]:'), 'slice', int), (re.compile('^\s*xmin = (?P<xmin>[\d\.]+)'), 'xmin', float), (re.compile('^\s*xmax = (?P<xmax>[\d\.]+)'), 'xmax', float), (re.compile('^\s*text = "(?P<text>.*)"'), 'text', str), ) # for a special case that a text has multiple lines MULTILINES_PATTERN = ( (re.compile('^\s*text = "(?P<text>.*)'), 'text', str), (re.compile('^(?P<text>.*)$'), 'text', str), # to adapt the new line (re.compile('^(?P<text>.*)"\s*$'), 'text', str), ) PATTERN_KEYS = ('pattern', 'key', 'type') def __init__(self, coding='utf-8'): super(TextgridParser, self).__init__() self.default_coding = coding self.intervals = [] self.original_duration_sum = 0 def reset(self): self.intervals = [] def read(self, filename): self.filename = filename with open(filename, 'rb') as f: raw_data = f.read() # self.coding = self.code_det(content[0:10]) self.coding = chardet.detect(raw_data)['encoding'] try: self.content = raw_data.decode(self.coding).encode(self.default_coding) self.lines = self.content.splitlines() except UnicodeError, e: loginfo(u'>>文件:{filename}'.format(filename=self.filename), stdout=True) loginfo(u'解码时发生错误,请选择合适的文本编辑器,并以utf-8编码格式保存后,再运行此程序', stdout=True) loginfo('') raise IOError def code_det(self, headline, default='utf-8'): for enc,boms in TextgridParser.CODINGS: if any(headline.startswith(bom) for bom in boms): return enc return default def pack(self, keys, tuples): package = [] for vals in tuples: package.append({ keys[i]:vals[i] for i in range(len(keys)) }) return package def update(self, interval, item_pattern, line, append_mode=False): ip = item_pattern if append_mode: # only for text interval[ip['key']] += ip['type'](ip['pattern'].match(line).group(
urn interval def match(self, item_pattern, line): return item_pattern['pattern'].match(line) def search(self, parser, fn): return fn(parser.search(self.content)) def parse(self): print(u'正在解析{filename}...'.format(filename=self.filename)) loginfo(u'>>文件:%s' % self.filename) original_duration = self.search(*TextgridParser.HEADER_PATTERN) self.original_duration_sum += original_duration logtime(u'>>文件:%s\t 原始语音时长为%f秒' % (self.filename, original_duration)) lineno = 0 interval = {} APPEND_MODE = False self.reset() bp_iter = CycleIterator(self.pack(TextgridParser.PATTERN_KEYS, TextgridParser.BLOCK_PATTERNS)) mp_iter = CycleIterator(self.pack(TextgridParser.PATTERN_KEYS, TextgridParser.MULTILINES_PATTERN)) block_begining = bp_iter.head() item_pattern = bp_iter.next() for line in self.lines: lineno += 1 # reset the block parsing once the line matched the begining pattern if self.match(block_begining, line): # self.update(interval, block_begining, line) # not the start actually, exception occured in parsing last block if item_pattern != block_begining: loginfo(u'错误:无法解析第%d行,不是textgrid标准格式,已跳过' % (lineno-1), stdout=True) # last line instead of the current interval = {} APPEND_MODE = False bp_iter.reset() item_pattern = bp_iter.next() # when a text existed in multiple lines elif APPEND_MODE: if self.match(mp_iter.tail(), line): # match the pattern of end line self.update(interval, mp_iter.tail(), line, APPEND_MODE) interval['lineno'] = lineno self.intervals.append(interval) # block ends interval = {} item_pattern = bp_iter.next() # loop to the begining APPEND_MODE = False # 2. block ending else: # append the middle part of the text self.update(interval, mp_iter.index(1), line, APPEND_MODE) # match the item in sequence if self.match(item_pattern, line): self.update(interval, item_pattern, line) # if the end of the block was matched if bp_iter.end(): interval['lineno'] = lineno self.intervals.append(interval) interval = {} # loop to the begining item_pattern = bp_iter.next() # 1. block ending # match the begining of multi-lines text instead of a single line elif self.match(mp_iter.head(), line): self.update(interval, mp_iter.head(), line) APPEND_MODE = True def validate(intervals, quiet=False): validated = [] error_no = 0 if not quiet: print(u'正在验证...') for interval in intervals: legal = True # to append legal textgrid to the list text = interval[TEXT_KEY].decode('utf-8') if text: for rp,fn,msg in RULES_PATTERNS: result = rp.match(text) if result: text = fn(result) else: if not quiet: loginfo(msg.format(lineno=interval['lineno'], text=interval['text'].decode('utf-8'))) legal = False error_no += 1 break else: legal = False if legal: validated.append(interval) if not quiet: print(u'验证完成,检测到%d个错误' % error_no) if error_no == 0: loginfo(u'Succeed') else: loginfo(u'共%d个错误被
ip['key'])) else: interval.update({ ip['key']: ip['type'](ip['pattern'].match(line).group(ip['key'])) }) ret
identifier_body
table.rs
table.len()).expect("write error"); } // Reduce the table to prime, essential implicants pub fn reduce_to_prime_implicants (table: Table) -> Vec<Row> { // imps contains a vector of the found implicants; primed with the last row, last column let mut imps: Vec<u32> = Vec::new(); // Get the last column let mut end_column: usize = table.entries.len() -1; // Get the last column, minus the already primed imps. let mut end_row: usize = table.entries.last().unwrap().len() -1; // Vector of the Rows that are prime implicants, primed with the first one let mut prime_imps: Vec<Row> = Vec::new(); // Loop until all of the imps have been found. loop { // Check each implicant entry to see if it is already included for i in 0..table.entries[end_column][end_row].implicants.len() { // If not, then add all of the implicants in the entry and push the Row if ! imps.contains(& table.entries[end_column][end_row].implicants[i]) { imps.extend(table.entries[end_column][end_row].implicants.clone()); prime_imps.push(table.entries[end_column][end_row].clone()); } } // Check to see if we are done if vec_in( & imps, & table.all_implicants) { break; } // Decriment the counters if end_row == 0 { end_column -= 1; end_row = table.entries[end_column].len() -1; } else { end_row -= 1; } } // Return prime implicants prime_imps } // Check to see if vec_b is contained within vec_a fn vec_in (vec_a: & Vec<u32>, vec_b: & Vec<u32>) -> bool { for i in 0..vec_b.len() { if ! vec_a.contains(& vec_b[i]) { return false } } true } // If there is a dublicate, return true. Else, return false fn implicant_duplicate (imps_a: & Vec<Row>, imps_b: & Vec<u32>) -> bool { // Test to see if the implicant has already been found for b in 0..imps_a.len() { if vec_in(& imps_a[b].implicants, & imps_b) { return true; } } return false; } // Compare the implicants pub fn comparison (mut table: Table) -> Option<Table> { let mut bin: Vec<u32> = Vec::new(); let mut diffs: u32 = 0; let mut imps: Vec<Row> = Vec::new(); let mut temp_implicants: Vec<u32>; let mut dashes1: Vec<usize> = Vec::new(); let mut dashes2: Vec<usize> = Vec::new(); // For lazyness clone the set of data needed to increase readability...maybe should // be switched to refernces and dereferences let work_set: Vec<Row> = table.entries.last().unwrap().clone(); // For each Row in the last vector in table.entries for i in 0..(work_set.len()) { // Find the indexes of the dashes for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == 2 { dashes1.push(n); } } // For each Row that has one more "one" than the above Row for a in i..(work_set.len()) { dashes2.clear(); // This could be put in a function if work_set[a].ones == work_set[i].ones + 1 { // Get the indexes of the dashes for n in 0..(work_set[a].bin.len()) { if work_set[a].bin[n] == 2 { dashes2.push(n); } } // Compare the indexes of the dashes. If they are not the same, pass along if dashes1 != dashes2 { continue; } // Compare the differences for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == work_set[a].bin[n] { bin.push(work_set[i].bin[n]); } else { bin.push(2); diffs += 1; } // Check to see if the difference is greater than one if diffs > 1 { break; } } // Check to see if the differences is greater than one if diffs > 1
// Put together the base implicants of the candidate new implicant temp_implicants = [work_set[i].implicants.clone(), work_set[a].implicants.clone()].concat(); // LOgic not right!!!!!! // Test to see if the implicant has already been found // if Yes, Move on! if implicant_duplicate(& imps, & temp_implicants) { temp_implicants.clear(); bin.clear(); diffs = 0; continue; } // Push the row to the imps imps.push(Row { bin: bin.clone(), ones: work_set[i].ones, implicants: temp_implicants.clone() }); // clear out the variables temp_implicants.clear(); bin.clear(); diffs = 0; } // If the number of ones is greater than one differnt, break the loop else if work_set[a].ones >= work_set[i].ones + 1 { break; } } // Reset bin, diffs, dashes dashes1.clear(); } // return the result wrapped in an option. if imps.len() == 0 { None } else { table.entries.push(imps); Some(table) } } // Do the inital comparison throwing in the first set of dashes (2's...because u32 doesn't // include dashes) pub fn initial_comparison (mut table: Table) -> Table { // imps is a vector of rows that houses the new column of implicants let mut imps: Vec<Row> = Vec::new(); // num_dashes is a u32 that contains the number of dashes (don't cares) in a row. If // there is more or less than one then the rows cannot be combined. let mut num_dashes: u32 = 0; // temp is a vector of binary implicants. let mut temp: Vec<u32> = Vec::new(); // iterate over each entry in the array for i in 0..(table.entries[0].len()) { // For each array entry, compare it to all the entries following it. for n in i..table.entries[0].len() { // Only compare the entries that have one more "one" in it. if table.entries[0][n].ones == table.entries[0][i].ones + 1 { // Compare each entry for x in 0..(table.entries[0][i].bin.len()) { // if the entries match, push the entry to the temp vector if table.entries[0][i].bin[x] == table.entries[0][n].bin[x] { temp.push(table.entries[0][i].bin[x]); // if they don't match, increment the number of dashes and push 2 } else { num_dashes += 1; temp.push(2); } // Check to see if there is more than one dash and break if so if num_dashes > 1 { break; } } // if all of the bits have been compared, and there is only one dash, push // the new implicant into imps if num_dashes == 1 { imps.push(Row { bin: temp.clone(), ones: table.entries[0][n].ones, implicants: [table.entries[0][n].implicants.clone(), table.entries[0][i].implicants.clone()].concat() }) } // Rest for the next iteration num_dashes = 0; temp.clear(); } // check to see if the loop ieterations have passed the one different "one" else if table.entries[0][n].ones > table.entries[0][i].ones + 1 { break; } } } // Push the new implications into another column of the entries table. if ! imps.is_empty() { table.entries.push(imps); } // return it! table } // Quickly sort the truth table entries by the number of ones they have in them pub fn quick_sort (table: Vec<Row>) -> Vec<Row> { // If the array has a length less than or equal to one then it is already sorted if & table.len() <= & 1 { return table } // delare the three vectors let mut smaller: Vec<Row> = Vec::new(); let mut
{ continue; }
conditional_block
table.rs
the last column, minus the already primed imps. let mut end_row: usize = table.entries.last().unwrap().len() -1; // Vector of the Rows that are prime implicants, primed with the first one let mut prime_imps: Vec<Row> = Vec::new(); // Loop until all of the imps have been found. loop { // Check each implicant entry to see if it is already included for i in 0..table.entries[end_column][end_row].implicants.len() { // If not, then add all of the implicants in the entry and push the Row if ! imps.contains(& table.entries[end_column][end_row].implicants[i]) { imps.extend(table.entries[end_column][end_row].implicants.clone()); prime_imps.push(table.entries[end_column][end_row].clone()); } } // Check to see if we are done if vec_in( & imps, & table.all_implicants) { break; } // Decriment the counters if end_row == 0 { end_column -= 1; end_row = table.entries[end_column].len() -1; } else { end_row -= 1; } } // Return prime implicants prime_imps } // Check to see if vec_b is contained within vec_a fn vec_in (vec_a: & Vec<u32>, vec_b: & Vec<u32>) -> bool { for i in 0..vec_b.len() { if ! vec_a.contains(& vec_b[i]) { return false } } true } // If there is a dublicate, return true. Else, return false fn implicant_duplicate (imps_a: & Vec<Row>, imps_b: & Vec<u32>) -> bool { // Test to see if the implicant has already been found for b in 0..imps_a.len() { if vec_in(& imps_a[b].implicants, & imps_b) { return true; } } return false; } // Compare the implicants pub fn comparison (mut table: Table) -> Option<Table> { let mut bin: Vec<u32> = Vec::new(); let mut diffs: u32 = 0; let mut imps: Vec<Row> = Vec::new(); let mut temp_implicants: Vec<u32>; let mut dashes1: Vec<usize> = Vec::new(); let mut dashes2: Vec<usize> = Vec::new(); // For lazyness clone the set of data needed to increase readability...maybe should // be switched to refernces and dereferences let work_set: Vec<Row> = table.entries.last().unwrap().clone(); // For each Row in the last vector in table.entries for i in 0..(work_set.len()) { // Find the indexes of the dashes for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == 2 { dashes1.push(n); } } // For each Row that has one more "one" than the above Row for a in i..(work_set.len()) { dashes2.clear(); // This could be put in a function if work_set[a].ones == work_set[i].ones + 1 { // Get the indexes of the dashes for n in 0..(work_set[a].bin.len()) { if work_set[a].bin[n] == 2 { dashes2.push(n); } } // Compare the indexes of the dashes. If they are not the same, pass along if dashes1 != dashes2 { continue; } // Compare the differences for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == work_set[a].bin[n] { bin.push(work_set[i].bin[n]); } else { bin.push(2); diffs += 1; } // Check to see if the difference is greater than one if diffs > 1 { break; } } // Check to see if the differences is greater than one if diffs > 1 { continue; } // Put together the base implicants of the candidate new implicant temp_implicants = [work_set[i].implicants.clone(), work_set[a].implicants.clone()].concat(); // LOgic not right!!!!!! // Test to see if the implicant has already been found // if Yes, Move on! if implicant_duplicate(& imps, & temp_implicants) { temp_implicants.clear(); bin.clear(); diffs = 0; continue; } // Push the row to the imps imps.push(Row { bin: bin.clone(), ones: work_set[i].ones, implicants: temp_implicants.clone() }); // clear out the variables temp_implicants.clear(); bin.clear(); diffs = 0; } // If the number of ones is greater than one differnt, break the loop else if work_set[a].ones >= work_set[i].ones + 1 { break; } } // Reset bin, diffs, dashes dashes1.clear(); } // return the result wrapped in an option. if imps.len() == 0 { None } else { table.entries.push(imps); Some(table) } } // Do the inital comparison throwing in the first set of dashes (2's...because u32 doesn't // include dashes) pub fn initial_comparison (mut table: Table) -> Table { // imps is a vector of rows that houses the new column of implicants let mut imps: Vec<Row> = Vec::new(); // num_dashes is a u32 that contains the number of dashes (don't cares) in a row. If // there is more or less than one then the rows cannot be combined. let mut num_dashes: u32 = 0; // temp is a vector of binary implicants. let mut temp: Vec<u32> = Vec::new(); // iterate over each entry in the array for i in 0..(table.entries[0].len()) { // For each array entry, compare it to all the entries following it. for n in i..table.entries[0].len() { // Only compare the entries that have one more "one" in it. if table.entries[0][n].ones == table.entries[0][i].ones + 1 { // Compare each entry for x in 0..(table.entries[0][i].bin.len()) { // if the entries match, push the entry to the temp vector if table.entries[0][i].bin[x] == table.entries[0][n].bin[x] { temp.push(table.entries[0][i].bin[x]); // if they don't match, increment the number of dashes and push 2 } else { num_dashes += 1; temp.push(2); } // Check to see if there is more than one dash and break if so if num_dashes > 1 { break; } } // if all of the bits have been compared, and there is only one dash, push // the new implicant into imps if num_dashes == 1 { imps.push(Row { bin: temp.clone(), ones: table.entries[0][n].ones, implicants: [table.entries[0][n].implicants.clone(), table.entries[0][i].implicants.clone()].concat() }) } // Rest for the next iteration num_dashes = 0; temp.clear(); } // check to see if the loop ieterations have passed the one different "one" else if table.entries[0][n].ones > table.entries[0][i].ones + 1 { break; } } } // Push the new implications into another column of the entries table. if ! imps.is_empty() { table.entries.push(imps); } // return it! table } // Quickly sort the truth table entries by the number of ones they have in them pub fn quick_sort (table: Vec<Row>) -> Vec<Row>
{ // If the array has a length less than or equal to one then it is already sorted if & table.len() <= & 1 { return table } // delare the three vectors let mut smaller: Vec<Row> = Vec::new(); let mut equal: Vec<Row> = Vec::new(); let mut larger: Vec<Row> = Vec::new(); // Get the pivot in the middle of the array // The ends are bad choices because often the list is already almost sorted let pivot = & table[(& table.len()/2)].ones; // Iterate and devide the values into the respective vectors for x in & table { if x.ones < * pivot { smaller.push(x.clone());
identifier_body
table.rs
ps.contains(& table.entries[end_column][end_row].implicants[i]) { imps.extend(table.entries[end_column][end_row].implicants.clone()); prime_imps.push(table.entries[end_column][end_row].clone()); } } // Check to see if we are done if vec_in( & imps, & table.all_implicants) { break; } // Decriment the counters if end_row == 0 { end_column -= 1; end_row = table.entries[end_column].len() -1; } else { end_row -= 1; } } // Return prime implicants prime_imps } // Check to see if vec_b is contained within vec_a fn vec_in (vec_a: & Vec<u32>, vec_b: & Vec<u32>) -> bool { for i in 0..vec_b.len() { if ! vec_a.contains(& vec_b[i]) { return false } } true } // If there is a dublicate, return true. Else, return false fn implicant_duplicate (imps_a: & Vec<Row>, imps_b: & Vec<u32>) -> bool { // Test to see if the implicant has already been found for b in 0..imps_a.len() { if vec_in(& imps_a[b].implicants, & imps_b) { return true; } } return false; } // Compare the implicants pub fn comparison (mut table: Table) -> Option<Table> { let mut bin: Vec<u32> = Vec::new(); let mut diffs: u32 = 0; let mut imps: Vec<Row> = Vec::new(); let mut temp_implicants: Vec<u32>; let mut dashes1: Vec<usize> = Vec::new(); let mut dashes2: Vec<usize> = Vec::new(); // For lazyness clone the set of data needed to increase readability...maybe should // be switched to refernces and dereferences let work_set: Vec<Row> = table.entries.last().unwrap().clone(); // For each Row in the last vector in table.entries for i in 0..(work_set.len()) { // Find the indexes of the dashes for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == 2 { dashes1.push(n); } } // For each Row that has one more "one" than the above Row for a in i..(work_set.len()) { dashes2.clear(); // This could be put in a function if work_set[a].ones == work_set[i].ones + 1 { // Get the indexes of the dashes for n in 0..(work_set[a].bin.len()) { if work_set[a].bin[n] == 2 { dashes2.push(n); } } // Compare the indexes of the dashes. If they are not the same, pass along if dashes1 != dashes2 { continue; } // Compare the differences for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == work_set[a].bin[n] { bin.push(work_set[i].bin[n]); } else { bin.push(2); diffs += 1; } // Check to see if the difference is greater than one if diffs > 1 { break; } } // Check to see if the differences is greater than one if diffs > 1 { continue; } // Put together the base implicants of the candidate new implicant temp_implicants = [work_set[i].implicants.clone(), work_set[a].implicants.clone()].concat(); // LOgic not right!!!!!! // Test to see if the implicant has already been found // if Yes, Move on! if implicant_duplicate(& imps, & temp_implicants) { temp_implicants.clear(); bin.clear(); diffs = 0; continue; } // Push the row to the imps imps.push(Row { bin: bin.clone(), ones: work_set[i].ones, implicants: temp_implicants.clone() }); // clear out the variables temp_implicants.clear(); bin.clear(); diffs = 0; } // If the number of ones is greater than one differnt, break the loop else if work_set[a].ones >= work_set[i].ones + 1 { break; } } // Reset bin, diffs, dashes dashes1.clear(); } // return the result wrapped in an option. if imps.len() == 0 { None } else { table.entries.push(imps); Some(table) } } // Do the inital comparison throwing in the first set of dashes (2's...because u32 doesn't // include dashes) pub fn initial_comparison (mut table: Table) -> Table { // imps is a vector of rows that houses the new column of implicants let mut imps: Vec<Row> = Vec::new(); // num_dashes is a u32 that contains the number of dashes (don't cares) in a row. If // there is more or less than one then the rows cannot be combined. let mut num_dashes: u32 = 0; // temp is a vector of binary implicants. let mut temp: Vec<u32> = Vec::new(); // iterate over each entry in the array for i in 0..(table.entries[0].len()) { // For each array entry, compare it to all the entries following it. for n in i..table.entries[0].len() { // Only compare the entries that have one more "one" in it. if table.entries[0][n].ones == table.entries[0][i].ones + 1 { // Compare each entry for x in 0..(table.entries[0][i].bin.len()) { // if the entries match, push the entry to the temp vector if table.entries[0][i].bin[x] == table.entries[0][n].bin[x] { temp.push(table.entries[0][i].bin[x]); // if they don't match, increment the number of dashes and push 2 } else { num_dashes += 1; temp.push(2); } // Check to see if there is more than one dash and break if so if num_dashes > 1 { break; } } // if all of the bits have been compared, and there is only one dash, push // the new implicant into imps if num_dashes == 1 { imps.push(Row { bin: temp.clone(), ones: table.entries[0][n].ones, implicants: [table.entries[0][n].implicants.clone(), table.entries[0][i].implicants.clone()].concat() }) } // Rest for the next iteration num_dashes = 0; temp.clear(); } // check to see if the loop ieterations have passed the one different "one" else if table.entries[0][n].ones > table.entries[0][i].ones + 1 { break; } } } // Push the new implications into another column of the entries table. if ! imps.is_empty() { table.entries.push(imps); } // return it! table } // Quickly sort the truth table entries by the number of ones they have in them pub fn quick_sort (table: Vec<Row>) -> Vec<Row> { // If the array has a length less than or equal to one then it is already sorted if & table.len() <= & 1 { return table } // delare the three vectors let mut smaller: Vec<Row> = Vec::new(); let mut equal: Vec<Row> = Vec::new(); let mut larger: Vec<Row> = Vec::new(); // Get the pivot in the middle of the array // The ends are bad choices because often the list is already almost sorted let pivot = & table[(& table.len()/2)].ones; // Iterate and devide the values into the respective vectors for x in & table { if x.ones < * pivot { smaller.push(x.clone()); } else if x.ones == * pivot { equal.push(x.clone()); } else { larger.push(x.clone()); } } // return recursivly. [quick_sort(smaller), equal, quick_sort(larger)].concat() } pub fn initialize_table (sop: & Vec<u32>) -> Table { // Get the bit size needed to hold all of the SOP implicants let bit_size = max_n(&sop); // initialze a temporary row let mut the_row = Row {
bin: vec![0,0,0,0], ones: 0, implicants: vec![0],
random_line_split
table.rs
table.len()).expect("write error"); } // Reduce the table to prime, essential implicants pub fn reduce_to_prime_implicants (table: Table) -> Vec<Row> { // imps contains a vector of the found implicants; primed with the last row, last column let mut imps: Vec<u32> = Vec::new(); // Get the last column let mut end_column: usize = table.entries.len() -1; // Get the last column, minus the already primed imps. let mut end_row: usize = table.entries.last().unwrap().len() -1; // Vector of the Rows that are prime implicants, primed with the first one let mut prime_imps: Vec<Row> = Vec::new(); // Loop until all of the imps have been found. loop { // Check each implicant entry to see if it is already included for i in 0..table.entries[end_column][end_row].implicants.len() { // If not, then add all of the implicants in the entry and push the Row if ! imps.contains(& table.entries[end_column][end_row].implicants[i]) { imps.extend(table.entries[end_column][end_row].implicants.clone()); prime_imps.push(table.entries[end_column][end_row].clone()); } } // Check to see if we are done if vec_in( & imps, & table.all_implicants) { break; } // Decriment the counters if end_row == 0 { end_column -= 1; end_row = table.entries[end_column].len() -1; } else { end_row -= 1; } } // Return prime implicants prime_imps } // Check to see if vec_b is contained within vec_a fn vec_in (vec_a: & Vec<u32>, vec_b: & Vec<u32>) -> bool { for i in 0..vec_b.len() { if ! vec_a.contains(& vec_b[i]) { return false } } true } // If there is a dublicate, return true. Else, return false fn implicant_duplicate (imps_a: & Vec<Row>, imps_b: & Vec<u32>) -> bool { // Test to see if the implicant has already been found for b in 0..imps_a.len() { if vec_in(& imps_a[b].implicants, & imps_b) { return true; } } return false; } // Compare the implicants pub fn comparison (mut table: Table) -> Option<Table> { let mut bin: Vec<u32> = Vec::new(); let mut diffs: u32 = 0; let mut imps: Vec<Row> = Vec::new(); let mut temp_implicants: Vec<u32>; let mut dashes1: Vec<usize> = Vec::new(); let mut dashes2: Vec<usize> = Vec::new(); // For lazyness clone the set of data needed to increase readability...maybe should // be switched to refernces and dereferences let work_set: Vec<Row> = table.entries.last().unwrap().clone(); // For each Row in the last vector in table.entries for i in 0..(work_set.len()) { // Find the indexes of the dashes for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == 2 { dashes1.push(n); } } // For each Row that has one more "one" than the above Row for a in i..(work_set.len()) { dashes2.clear(); // This could be put in a function if work_set[a].ones == work_set[i].ones + 1 { // Get the indexes of the dashes for n in 0..(work_set[a].bin.len()) { if work_set[a].bin[n] == 2 { dashes2.push(n); } } // Compare the indexes of the dashes. If they are not the same, pass along if dashes1 != dashes2 { continue; } // Compare the differences for n in 0..(work_set[i].bin.len()) { if work_set[i].bin[n] == work_set[a].bin[n] { bin.push(work_set[i].bin[n]); } else { bin.push(2); diffs += 1; } // Check to see if the difference is greater than one if diffs > 1 { break; } } // Check to see if the differences is greater than one if diffs > 1 { continue; } // Put together the base implicants of the candidate new implicant temp_implicants = [work_set[i].implicants.clone(), work_set[a].implicants.clone()].concat(); // LOgic not right!!!!!! // Test to see if the implicant has already been found // if Yes, Move on! if implicant_duplicate(& imps, & temp_implicants) { temp_implicants.clear(); bin.clear(); diffs = 0; continue; } // Push the row to the imps imps.push(Row { bin: bin.clone(), ones: work_set[i].ones, implicants: temp_implicants.clone() }); // clear out the variables temp_implicants.clear(); bin.clear(); diffs = 0; } // If the number of ones is greater than one differnt, break the loop else if work_set[a].ones >= work_set[i].ones + 1 { break; } } // Reset bin, diffs, dashes dashes1.clear(); } // return the result wrapped in an option. if imps.len() == 0 { None } else { table.entries.push(imps); Some(table) } } // Do the inital comparison throwing in the first set of dashes (2's...because u32 doesn't // include dashes) pub fn
(mut table: Table) -> Table { // imps is a vector of rows that houses the new column of implicants let mut imps: Vec<Row> = Vec::new(); // num_dashes is a u32 that contains the number of dashes (don't cares) in a row. If // there is more or less than one then the rows cannot be combined. let mut num_dashes: u32 = 0; // temp is a vector of binary implicants. let mut temp: Vec<u32> = Vec::new(); // iterate over each entry in the array for i in 0..(table.entries[0].len()) { // For each array entry, compare it to all the entries following it. for n in i..table.entries[0].len() { // Only compare the entries that have one more "one" in it. if table.entries[0][n].ones == table.entries[0][i].ones + 1 { // Compare each entry for x in 0..(table.entries[0][i].bin.len()) { // if the entries match, push the entry to the temp vector if table.entries[0][i].bin[x] == table.entries[0][n].bin[x] { temp.push(table.entries[0][i].bin[x]); // if they don't match, increment the number of dashes and push 2 } else { num_dashes += 1; temp.push(2); } // Check to see if there is more than one dash and break if so if num_dashes > 1 { break; } } // if all of the bits have been compared, and there is only one dash, push // the new implicant into imps if num_dashes == 1 { imps.push(Row { bin: temp.clone(), ones: table.entries[0][n].ones, implicants: [table.entries[0][n].implicants.clone(), table.entries[0][i].implicants.clone()].concat() }) } // Rest for the next iteration num_dashes = 0; temp.clear(); } // check to see if the loop ieterations have passed the one different "one" else if table.entries[0][n].ones > table.entries[0][i].ones + 1 { break; } } } // Push the new implications into another column of the entries table. if ! imps.is_empty() { table.entries.push(imps); } // return it! table } // Quickly sort the truth table entries by the number of ones they have in them pub fn quick_sort (table: Vec<Row>) -> Vec<Row> { // If the array has a length less than or equal to one then it is already sorted if & table.len() <= & 1 { return table } // delare the three vectors let mut smaller: Vec<Row> = Vec::new(); let mut
initial_comparison
identifier_name
gobuild.go
Dir + pack.OutputFile + objExt argvFilled++ logger.Info("Linking %s...\n", argv[2]) logger.Info(" %s\n\n", getCommandline(argv)) cmd, err := exec.Run(linkerBin, argv[0:argvFilled], os.Environ(), rootPath, exec.DevNull, exec.PassThrough, exec.PassThrough) if err != nil { logger.Error("%s\n", err) os.Exit(1) } waitmsg, err := cmd.Wait(0) if err != nil { logger.Error("Linker execution error (%s), aborting compilation.\n", err) os.Exit(1) } if waitmsg.ExitStatus() != 0 { logger.Error("Linker returned with errors, aborting.\n") return false } return true } /* Executes goyacc for a single .y file. The new .go files is prefixed with an underscore and returned as a string for further use. */ func goyacc(filepath string) string { // construct output file path var outFilepath string l_idx := strings.LastIndex(filepath, "/") if l_idx >= 0 { outFilepath = filepath[0:l_idx+1] + "_" + filepath[l_idx+1:len(filepath)-1] + "go" } else { outFilepath = "_" + filepath[0:len(filepath)-1] + "go" } goyaccPath, err := exec.LookPath("goyacc") if err != nil { logger.Error("%s\n", err) os.Exit(1) } logger.Info("Parsing goyacc file %s.\n", filepath) argv := []string{goyaccPath, "-o", outFilepath, filepath} logger.Debug("%s\n", argv) cmd, err := exec.Run(argv[0], argv, os.Environ(), rootPath, exec.PassThrough, exec.PassThrough, exec.PassThrough) if err != nil { logger.Error("%s\n", err) os.Exit(1) } waitmsg, err := cmd.Wait(0) if err != nil { logger.Error("Executing goyacc failed: %s.\n", err) os.Exit(1) } if waitmsg.ExitStatus() != 0 { os.Exit(waitmsg.ExitStatus()) } return outFilepath } /* Executes something. Used for the -run command line option. */ func runExec(argv []string) { logger.Info("Executing %s:\n", argv[0]) logger.Debug("%s\n", getCommandline(argv)) cmd, err := exec.Run(argv[0], argv, os.Environ(), rootPath, exec.PassThrough, exec.PassThrough, exec.PassThrough) if err != nil { logger.Error("%s\n", err) os.Exit(1) } waitmsg, err := cmd.Wait(0) if err != nil { logger.Error("Executing %s failed: %s.\n", argv[0], err) os.Exit(1) } if waitmsg.ExitStatus() != 0 { os.Exit(waitmsg.ExitStatus()) } } /* Creates a .a file for a single GoPackage */ func packLib(pack *godata.GoPackage) { var objDir string = "" //outputDirPrefix + getObjDir(); // ignore packages that need to be build manually (like cgo packages) if pack.HasCGOFiles() { logger.Debug("Skipped %s.a because it can't be build with gobuild.\n", pack.Name) return } logger.Info("Creating %s.a...\n", pack.Name) argv := []string{ gopackBin, "crg", // create new go archive outputDirPrefix + pack.Name + ".a", objDir + pack.Name + objExt, } logger.Debug("%s\n", getCommandline(argv)) cmd, err := exec.Run(gopackBin, argv, os.Environ(), rootPath, exec.DevNull, exec.PassThrough, exec.PassThrough) if err != nil { logger.Error("%s\n", err) os.Exit(1) } waitmsg, err := cmd.Wait(0) if err != nil { logger.Error("gopack execution error (%s), aborting.\n", err) os.Exit(1) } if waitmsg.ExitStatus() != 0 { logger.Error("gopack returned with errors, aborting.\n") os.Exit(waitmsg.ExitStatus()) } os.Remove(objDir + pack.Name + objExt) } /* Build an executable from the given sources. */ func buildExecutable() { var executables []string var execFilled int // check if there's a main package: if goPackages.GetMainCount() == 0 { logger.Error("No main package found.\n") os.Exit(1) } // multiple main, no command file from command line and no -a -> error if (goPackages.GetMainCount() > 1) && (flag.NArg() == 0) && !*flagBuildAll { logger.Error("Multiple files found with main function.\n") logger.ErrorContinue("Please specify one or more as command line parameter or\n") logger.ErrorContinue("run gobuild with -a. Available main files are:\n") for _, fn := range goPackages.GetMainFilenames() { logger.ErrorContinue("\t %s\n", fn) } os.Exit(1) } // compile all needed packages if flag.NArg() > 0 { if *flagRunExec { executables = make([]string, flag.NArg()) } for _, fn := range flag.Args() { mainPack, exists := goPackages.GetMain(fn, !*flagSingleMainFile) if !exists { logger.Error("File %s not found.\n", fn) return // or os.Exit? } if compile(mainPack) { // link everything together if link(mainPack) { if *flagRunExec { executables[execFilled] = outputDirPrefix + mainPack.OutputFile execFilled++ } } else { linkErrors = true } } else { logger.Error("Can't link executable because of compile errors.\n") compileErrors = true } } } else { if *flagRunExec { executables = make([]string, goPackages.GetMainCount()) } for _, mainPack := range goPackages.GetMainPackages(!*flagSingleMainFile) { if compile(mainPack) { if link(mainPack) { if *flagRunExec { executables[execFilled] = outputDirPrefix + mainPack.OutputFile execFilled++ } } else { linkErrors = true } } else { logger.Error("Can't link executable because of compile errors.\n") compileErrors = true } } } if *flagRunExec && !linkErrors && !compileErrors { for i := 0; i < execFilled; i++ { runExec([]string{executables[i]}) } } } /* Build library files (.a) for all packages or the ones given though command line parameters. */ func buildLibrary() { var packNames []string var pack *godata.GoPackage var exists bool if goPackages.GetPackageCount() == 0 { logger.Warn("No packages found to build.\n") return } // check for there is at least one package that can be compiled var hasNoCompilablePacks bool = true for _, packName := range goPackages.GetPackageNames() { pack, _ := goPackages.Get(packName) if pack.Name == "main" { continue } if pack.Files.Len() > 0 && !pack.HasCGOFiles() { hasNoCompilablePacks = false break } } if hasNoCompilablePacks { logger.Warn("No packages found that could be compiled by gobuild.\n") compileErrors = true return } // check for command line parameters if flag.NArg() > 0 { packNames = flag.Args() } else { packNames = goPackages.GetPackageNames() } // loop over all packages, compile them and build a .a file for _, name := range packNames { if name == "main" { continue // don't make this into a library } pack, exists = goPackages.Get(name) if !exists { logger.Error("Package %s doesn't exist.\n", name) continue // or exit? } // don't compile remote packages or packages without files if pack.Type == godata.REMOTE_PACKAGE || pack.Files.Len() == 0 { continue } // these packages come from invalid/unhandled imports if pack.Files.Len() == 0 { logger.Debug("Skipping package %s, no files to compile.\n", pack.Name) continue } if !pack.Compiled && !pack.HasErrors { compileErrors = !compile(pack) || compileErrors } if pack.HasErrors
{ logger.Error("Can't create library because of compile errors.\n") compileErrors = true }
conditional_block
gobuild.go
} else { localPackName = pack.Name } testFileSource += "import \"" + pack.Name + "\"\n" tmpStr = "var test_" + localPackVarName + " = []testing.InternalTest {\n" for _, igf := range *pack.Files { logger.Debug("Test* from %s: \n", (igf.(*godata.GoFile)).Filename) if (igf.(*godata.GoFile)).IsTestFile { for _, istr := range *(igf.(*godata.GoFile)).TestFunctions { tmpStr += "\ttesting.InternalTest{ \"" + pack.Name + "." + istr.(string) + "\", " + localPackName + "." + istr.(string) + " },\n" fnCount++ } } } tmpStr += "}\n\n" if fnCount > 0 { testCalls += "\tfmt.Println(\"Testing " + pack.Name + ":\");\n" + "\ttesting.Main(__regexp__.MatchString, test_" + localPackVarName + ");\n" testArrays += tmpStr } fnCount = 0 tmpStr = "var bench_" + localPackVarName + " = []testing.Benchmark {\n" for _, igf := range *pack.Files { if (igf.(*godata.GoFile)).IsTestFile { for _, istr := range *(igf.(*godata.GoFile)).BenchmarkFunctions { tmpStr += "\ttesting.Benchmark{ \"" + pack.Name + "." + istr.(string) + "\", " + localPackName + "." + istr.(string) + " },\n" fnCount++ } } } tmpStr += "}\n\n" if fnCount > 0 { benchCalls += "\tfmt.Println(\"Benchmarking " + pack.Name + ":\");\n" + "\ttesting.RunBenchmarks(bench_" + localPackVarName + ");\n" testArrays += tmpStr } } testFileSource += "\n" + testArrays // func main() testFileSource += "\nfunc main() {\n" + testCalls + benchCalls + "}\n" testFile, err = os.Create(testGoFile.Filename) if err != nil { logger.Error("Could not create %s: %s\n", testGoFile.Filename, err) os.Exit(1) } testFile.WriteString(testFileSource) testFile.Close() return testPack } /* The compile method will run the compiler for every package it has found, starting with the main package. Returns true if compiled successfully. */ func compile(pack *godata.GoPackage) bool { var argc int var argv []string var argvFilled int var objDir = "" //outputDirPrefix + getObjDir(); // check for recursive dependencies if pack.InProgress { logger.Error("Found a recurisve dependency in %s. This is not supported in Go.\n", pack.Name) pack.HasErrors = true pack.InProgress = false return false } pack.InProgress = true // first compile all dependencies for _, idep := range *pack.Depends { dep := idep.(*godata.GoPackage) if dep.HasErrors { pack.HasErrors = true pack.InProgress = false return false } if !dep.Compiled && (dep.Type == godata.LOCAL_PACKAGE || dep.Type == godata.UNKNOWN_PACKAGE && dep.Files.Len() > 0) { if !compile(dep) { pack.HasErrors = true pack.InProgress = false return false } } } // cgo files (the ones which import "C") can't be compiled // at the moment. They need to be compiled by hand into .a files. if pack.HasCGOFiles() { if pack.HasExistingAFile() { pack.Compiled = true pack.InProgress = false return true } else { logger.Error("Can't compile cgo files. Please manually compile them.\n") os.Exit(1) } } // check if this package has any files (if not -> error) if pack.Files.Len() == 0 && pack.Type == godata.LOCAL_PACKAGE { logger.Error("No files found for package %s.\n", pack.Name) os.Exit(1) } // if the outputDirPrefix points to something, subdirectories // need to be created if they don't already exist outputFile := objDir + pack.OutputFile if strings.Index(outputFile, "/") != -1 { path := outputFile[0:strings.LastIndex(outputFile, "/")] dir, err := os.Stat(path) if err != nil { err = os.MkdirAll(path, rootPathPerm) if err != nil { logger.Error("Could not create output path %s: %s\n", path, err) os.Exit(1) } } else if !dir.IsDirectory() { logger.Error("File found in %s instead of a directory.\n", path) os.Exit(1) } } // before compiling, remove any .a file // this is done because the compiler/linker looks for .a files // before it looks for .[568] files if !*flagKeepAFiles { if err := os.Remove(outputFile + ".a"); err == nil { logger.Debug("Removed file %s.a.\n", outputFile) } } // construct compiler command line arguments if pack.Name != "main" { logger.Info("Compiling %s...\n", pack.Name) } else { logger.Info("Compiling %s (%s)...\n", pack.Name, pack.OutputFile) } argc = pack.Files.Len() + 3 if *flagIncludePaths != "" { argc += 2 * (strings.Count(*flagIncludePaths, ",") + 1) } if pack.NeedsLocalSearchPath() || objDir != "" { argc += 2 } if pack.Name == "main" { argc += 2 } argv = make([]string, argc*2) argv[argvFilled] = compilerBin argvFilled++ argv[argvFilled] = "-o" argvFilled++ argv[argvFilled] = outputFile + objExt argvFilled++ if *flagIncludePaths != "" { for _, includePath := range strings.Split(*flagIncludePaths, ",", -1) { argv[argvFilled] = "-I" argvFilled++ argv[argvFilled] = includePath argvFilled++ } } // for _, arg := range argv { // logger.Info(arg) // logger.Info(" ") // } // logger.Info("\n") if pack.NeedsLocalSearchPath() || objDir != "" { argv[argvFilled] = "-I" argvFilled++ if objDir != "" { argv[argvFilled] = objDir } else { argv[argvFilled] = "." } argvFilled++ } if pack.Name == "main" { argv[argvFilled] = "-I" argvFilled++ argv[argvFilled] = "." argvFilled++ } for i := 0; i < pack.Files.Len(); i++ { gf := pack.Files.At(i).(*godata.GoFile) argv[argvFilled] = gf.Filename argvFilled++ } logger.Info(" %s\n", getCommandline(argv[0:argvFilled])) cmd, err := exec.Run(compilerBin, argv[0:argvFilled], os.Environ(), rootPath, exec.DevNull, exec.PassThrough, exec.PassThrough) if err != nil { logger.Error("%s\n", err) os.Exit(1) } waitmsg, err := cmd.Wait(0) if err != nil { logger.Error("Compiler execution error (%s), aborting compilation.\n", err) os.Exit(1) } if waitmsg.ExitStatus() != 0 { pack.HasErrors = true pack.InProgress = false return false } // it should now be compiled pack.Compiled = true pack.InProgress = false return true } /* Calls the linker for the main file, which should be called "main.(5|6|8)". */ func link(pack *godata.GoPackage) bool
{ var argc int var argv []string var argvFilled int var objDir string = "" //outputDirPrefix + getObjDir(); // build the command line for the linker argc = 4 if *flagIncludePaths != "" { argc += 2 } if pack.NeedsLocalSearchPath() { argc += 2 } if pack.Name == "main" { argc += 2 } argv = make([]string, argc*3)
identifier_body
gobuild.go
0:argvFilled], os.Environ(), rootPath, exec.DevNull, exec.PassThrough, exec.PassThrough) if err != nil { logger.Error("%s\n", err) os.Exit(1) } waitmsg, err := cmd.Wait(0) if err != nil { logger.Error("Linker execution error (%s), aborting compilation.\n", err) os.Exit(1) } if waitmsg.ExitStatus() != 0 { logger.Error("Linker returned with errors, aborting.\n") return false } return true } /* Executes goyacc for a single .y file. The new .go files is prefixed with an underscore and returned as a string for further use. */ func goyacc(filepath string) string { // construct output file path var outFilepath string l_idx := strings.LastIndex(filepath, "/") if l_idx >= 0 { outFilepath = filepath[0:l_idx+1] + "_" + filepath[l_idx+1:len(filepath)-1] + "go" } else { outFilepath = "_" + filepath[0:len(filepath)-1] + "go" } goyaccPath, err := exec.LookPath("goyacc") if err != nil { logger.Error("%s\n", err) os.Exit(1) } logger.Info("Parsing goyacc file %s.\n", filepath) argv := []string{goyaccPath, "-o", outFilepath, filepath} logger.Debug("%s\n", argv) cmd, err := exec.Run(argv[0], argv, os.Environ(), rootPath, exec.PassThrough, exec.PassThrough, exec.PassThrough) if err != nil { logger.Error("%s\n", err) os.Exit(1) } waitmsg, err := cmd.Wait(0) if err != nil { logger.Error("Executing goyacc failed: %s.\n", err) os.Exit(1) } if waitmsg.ExitStatus() != 0 { os.Exit(waitmsg.ExitStatus()) } return outFilepath } /* Executes something. Used for the -run command line option. */ func runExec(argv []string) { logger.Info("Executing %s:\n", argv[0]) logger.Debug("%s\n", getCommandline(argv)) cmd, err := exec.Run(argv[0], argv, os.Environ(), rootPath, exec.PassThrough, exec.PassThrough, exec.PassThrough) if err != nil { logger.Error("%s\n", err) os.Exit(1) } waitmsg, err := cmd.Wait(0) if err != nil { logger.Error("Executing %s failed: %s.\n", argv[0], err) os.Exit(1) } if waitmsg.ExitStatus() != 0 { os.Exit(waitmsg.ExitStatus()) } } /* Creates a .a file for a single GoPackage */ func packLib(pack *godata.GoPackage) { var objDir string = "" //outputDirPrefix + getObjDir(); // ignore packages that need to be build manually (like cgo packages) if pack.HasCGOFiles() { logger.Debug("Skipped %s.a because it can't be build with gobuild.\n", pack.Name) return } logger.Info("Creating %s.a...\n", pack.Name) argv := []string{ gopackBin, "crg", // create new go archive outputDirPrefix + pack.Name + ".a", objDir + pack.Name + objExt, } logger.Debug("%s\n", getCommandline(argv)) cmd, err := exec.Run(gopackBin, argv, os.Environ(), rootPath, exec.DevNull, exec.PassThrough, exec.PassThrough) if err != nil { logger.Error("%s\n", err) os.Exit(1) } waitmsg, err := cmd.Wait(0) if err != nil { logger.Error("gopack execution error (%s), aborting.\n", err) os.Exit(1) } if waitmsg.ExitStatus() != 0 { logger.Error("gopack returned with errors, aborting.\n") os.Exit(waitmsg.ExitStatus()) } os.Remove(objDir + pack.Name + objExt) } /* Build an executable from the given sources. */ func buildExecutable() { var executables []string var execFilled int // check if there's a main package: if goPackages.GetMainCount() == 0 { logger.Error("No main package found.\n") os.Exit(1) } // multiple main, no command file from command line and no -a -> error if (goPackages.GetMainCount() > 1) && (flag.NArg() == 0) && !*flagBuildAll { logger.Error("Multiple files found with main function.\n") logger.ErrorContinue("Please specify one or more as command line parameter or\n") logger.ErrorContinue("run gobuild with -a. Available main files are:\n") for _, fn := range goPackages.GetMainFilenames() { logger.ErrorContinue("\t %s\n", fn) } os.Exit(1) } // compile all needed packages if flag.NArg() > 0 { if *flagRunExec { executables = make([]string, flag.NArg()) } for _, fn := range flag.Args() { mainPack, exists := goPackages.GetMain(fn, !*flagSingleMainFile) if !exists { logger.Error("File %s not found.\n", fn) return // or os.Exit? } if compile(mainPack) { // link everything together if link(mainPack) { if *flagRunExec { executables[execFilled] = outputDirPrefix + mainPack.OutputFile execFilled++ } } else { linkErrors = true } } else { logger.Error("Can't link executable because of compile errors.\n") compileErrors = true } } } else { if *flagRunExec { executables = make([]string, goPackages.GetMainCount()) } for _, mainPack := range goPackages.GetMainPackages(!*flagSingleMainFile) { if compile(mainPack) { if link(mainPack) { if *flagRunExec { executables[execFilled] = outputDirPrefix + mainPack.OutputFile execFilled++ } } else { linkErrors = true } } else { logger.Error("Can't link executable because of compile errors.\n") compileErrors = true } } } if *flagRunExec && !linkErrors && !compileErrors { for i := 0; i < execFilled; i++ { runExec([]string{executables[i]}) } } } /* Build library files (.a) for all packages or the ones given though command line parameters. */ func buildLibrary() { var packNames []string var pack *godata.GoPackage var exists bool if goPackages.GetPackageCount() == 0 { logger.Warn("No packages found to build.\n") return } // check for there is at least one package that can be compiled var hasNoCompilablePacks bool = true for _, packName := range goPackages.GetPackageNames() { pack, _ := goPackages.Get(packName) if pack.Name == "main" { continue } if pack.Files.Len() > 0 && !pack.HasCGOFiles() { hasNoCompilablePacks = false break } } if hasNoCompilablePacks { logger.Warn("No packages found that could be compiled by gobuild.\n") compileErrors = true return } // check for command line parameters if flag.NArg() > 0 { packNames = flag.Args() } else { packNames = goPackages.GetPackageNames() } // loop over all packages, compile them and build a .a file for _, name := range packNames { if name == "main" { continue // don't make this into a library } pack, exists = goPackages.Get(name) if !exists { logger.Error("Package %s doesn't exist.\n", name) continue // or exit? } // don't compile remote packages or packages without files if pack.Type == godata.REMOTE_PACKAGE || pack.Files.Len() == 0 { continue } // these packages come from invalid/unhandled imports if pack.Files.Len() == 0 { logger.Debug("Skipping package %s, no files to compile.\n", pack.Name) continue } if !pack.Compiled && !pack.HasErrors { compileErrors = !compile(pack) || compileErrors } if pack.HasErrors { logger.Error("Can't create library because of compile errors.\n") compileErrors = true } else { packLib(pack) } } }
/* Creates a new file called _testmain.go and compiles/links it to _testmain. If the -run command line option is given it will also run the tests. In this
random_line_split