file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
normalizations.py | # Lint as: python3
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Normalization layers."""
from typing import List, Optional, Tuple
import jax
from jax import numpy as jnp
from lingvo.jax import base_layer
from lingvo.jax import py_utils
from lingvo.jax import pytypes
NestedMap = py_utils.NestedMap
WeightInit = py_utils.WeightInit
weight_params = py_utils.weight_params
InstantiableParams = py_utils.InstantiableParams
JTensor = pytypes.JTensor
def compute_moments(
inputs: JTensor,
padding: JTensor,
reduce_over_dims: List[int],
enable_cross_replica_sum_on_tpu: bool = False,
keepdims: bool = False,
) -> Tuple[JTensor, JTensor]:
"""Computes mean and variance over the valid data points in inputs."""
assert inputs.ndim == padding.ndim
rank = inputs.ndim
assert all([0 <= dim < rank for dim in reduce_over_dims])
mask = 1.0 - padding
sum_v = jnp.sum(inputs * mask, axis=reduce_over_dims, keepdims=keepdims)
count_v = jnp.sum(
jnp.ones_like(inputs) * mask, axis=reduce_over_dims, keepdims=keepdims)
if enable_cross_replica_sum_on_tpu:
# TODO(shafey, yonghui): Fetch axis_name from globals.
sum_v = jax.lax.psum(sum_v, axis_name='batch')
count_v = jax.lax.psum(count_v, axis_name='batch')
count_v = jnp.maximum(count_v, 1.0)
mean = sum_v / count_v
sum_vv = jnp.sum(
(inputs - mean) * (inputs - mean) * mask,
axis=reduce_over_dims,
keepdims=keepdims)
if enable_cross_replica_sum_on_tpu:
# TODO(shafey, yonghui): Fetch axis_name from globals.
sum_vv = jax.lax.psum(sum_vv, axis_name='batch')
variance = sum_vv / count_v
return mean, variance
class BatchNorm(base_layer.BaseLayer):
"""Batch normalization layer."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('dim', 0, 'Depth of the input/output.')
p.Define(
'decay', 0.999,
'Decay in updating the mean and variance moving average used in'
' batch normalization.')
p.Define(
'enable_cross_replica_sum_on_tpu', False,
'If true, computes global mean and variance across all replicas.'
'Only effective for tpu.')
p.Define(
'use_moving_avg_in_training', False,
'If True, use global moving avg (mean, variance) during training'
' to avoid mismatch between train and eval, which then'
' essentially acts as an adaptive normalization step. When this is'
' set to True, it also disables the use of beta and gamma variables.')
p.Define('set_padded_output_to_zero', True,
'If True, sets the padded outputs to zero.')
return p
def __init__(self, params: InstantiableParams) -> None:
super().__init__(params)
p = self.params
self._epsilon = 0.001
self._decay = p.decay
def _get_weight_shape(self) -> JTensor:
return [self.params.dim]
def create_layer_variables(self) -> None:
p = self.params
beta_pc = weight_params(
shape=self._get_weight_shape(),
init=WeightInit.Constant(0.0),
dtype=p.dtype)
self.create_variable('beta', beta_pc)
# gamma = theta.gamma + 1.0
gamma_pc = weight_params(
shape=self._get_weight_shape(),
init=WeightInit.Constant(0.0),
dtype=p.dtype)
self.create_variable('gamma', gamma_pc)
mva = weight_params(
shape=[p.dim],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
collections=[base_layer.REQUIRES_MEAN_SYNC])
self.create_variable('moving_mean', mva, trainable=False)
mvv = weight_params(
shape=[p.dim],
init=WeightInit.Constant(1.0),
dtype=p.dtype,
collections=[base_layer.REQUIRES_MEAN_SYNC])
self.create_variable('moving_variance', mvv, trainable=False)
def _get_default_paddings(self, inputs: JTensor) -> JTensor:
"""Gets the default paddings for an input."""
in_shape = list(inputs.shape)
assert len(in_shape) > 1
in_shape[-1] = 1
return jnp.zeros(in_shape, dtype=inputs.dtype)
def | (self, theta: NestedMap) -> Tuple[JTensor, JTensor]:
p = self.params
if p.use_moving_avg_in_training:
beta = 0.0
gamma = 1.0
else:
beta = theta.beta
gamma = theta.gamma + 1.0
return beta, gamma
def compute_and_update_moments(
self, theta: NestedMap, inputs: JTensor,
paddings: JTensor) -> Tuple[JTensor, JTensor, JTensor, JTensor]:
"""Computes moments and updates state.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs JTensor. Shaped [..., dim].
paddings: The paddings JTensor. Shaped [..., 1], with the same rank as
the input JTensor.
Returns:
Tuple of (mean, variance, beta, gamma).
"""
p = self.params
if self.do_eval:
# The mean and variance used for normalization.
norm_mean, norm_variance = theta.moving_mean, theta.moving_variance
base_layer.add_summary('moving_mean', theta.moving_mean)
base_layer.add_summary('moving_variance', theta.moving_variance)
else:
rank = inputs.ndim
reduce_over_dims = list(range(0, rank - 1))
mean, variance = compute_moments(
inputs,
paddings,
reduce_over_dims,
enable_cross_replica_sum_on_tpu=p.enable_cross_replica_sum_on_tpu,
keepdims=True)
new_moving_mean = theta.moving_mean * p.decay + mean * (1.0 - p.decay)
self.forward_update_var('moving_mean', new_moving_mean)
new_moving_variance = (
theta.moving_variance * p.decay + variance * (1.0 - p.decay))
self.forward_update_var('moving_variance', new_moving_variance)
# Add some summaries for visualization.
base_layer.add_summary('mean', mean)
base_layer.add_summary('variance', variance)
base_layer.add_summary('moving_mean', theta.moving_mean)
base_layer.add_summary('moving_variance', theta.moving_variance)
if p.use_moving_avg_in_training:
# Use the global statistics for normalization.
norm_mean = theta.moving_mean
norm_variance = theta.moving_variance
else:
# Use the batch statistics for normalization.
norm_mean = mean
norm_variance = variance
beta, gamma = self._get_beta_gamma(theta)
return norm_mean, norm_variance, beta, gamma
def fprop(self,
theta: NestedMap,
inputs: JTensor,
paddings: Optional[JTensor] = None) -> JTensor:
"""Apply batch normalization.
Args:
theta: A `.NestedMap` object containing weights' values of this layer and
its children layers.
inputs: The inputs JTensor. Shaped [..., dim].
paddings: The paddings JTensor. Shaped [..., 1].
Returns:
Output after applying batch normalization, with the same shape as
'inputs'.
"""
p = self.params
inputs, paddings = self._cast_to_fprop_dtype((inputs, paddings))
if paddings is None:
paddings = self._get_default_paddings(inputs)
assert inputs.ndim == paddings.ndim
assert paddings.shape[-1] == 1
norm_mean, norm_variance, beta, gamma = self.compute_and_update_moments(
theta, inputs, paddings)
inv = gamma / jnp.sqrt(norm_variance + self._epsilon)
bn_output = (inputs - norm_mean) * inv + beta
if p.set_padded_output_to_zero:
bn_output *= 1.0 - paddings
return bn_output
class LayerNorm(base_layer.BaseLayer):
"""Layer normalization."""
@classmethod
def Params(cls) -> InstantiableParams:
p = super().Params()
p.Define('input_dims', 0, 'Depth of the input to the network.')
p.Define('epsilon', 1e-6, 'Tiny value to guard rsqrt.')
p.Define('scale', True, 'Whether to use a learned scaling.')
p.Define('bias', True, 'Whether to use bias.')
return p
def create_layer_variables(self) -> None:
super().create_layer_variables()
p = self.params
wp = p.weight_split_dims_mapping
wp_scale = wp.wt
if p.device_mesh is not None and wp.wt is None:
# Simply replicate the weights.
wp_scale = [-1]
if p.scale:
self.create_variable(
'scale',
weight_params(
shape=[p.input_dims],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=wp_scale))
if p.bias:
wp_bias = wp_scale # bias should use the same sharding as scale.
self.create_variable(
'bias',
weight_params(
shape=[p.input_dims],
init=WeightInit.Constant(0.0),
dtype=p.dtype,
device_mesh=p.device_mesh,
tensor_split_dims_mapping=wp_bias))
def fprop(self, theta: NestedMap, inputs: JTensor) -> JTensor:
"""Apply layer norm to inputs.
Args:
theta: A NestedMap object containing weights' values of this layer and its
children layers.
inputs: The inputs JTensor. Shaped [..., input_dims].
Returns:
Layer normalized input.
"""
p = self.params
mean = jnp.mean(inputs, axis=[-1], keepdims=True)
var = jnp.mean(jnp.square(inputs - mean), axis=[-1], keepdims=True)
normed_inputs = (inputs - mean) * jax.lax.rsqrt(var + self.params.epsilon)
if p.scale:
normed_inputs *= (1 + theta.scale)
if p.bias:
normed_inputs += theta.bias
return normed_inputs
| _get_beta_gamma | identifier_name |
xmp_dashboard.py | # -*- coding: UTF-8 -*-
# py_md5_xmp
# Scriptet tar en given xmp-fil och beräknar md5-summan för dess datafil.
#
# md5 summa i Adobe LR xmp-fil:
# PelleTags:PelleTag1_md5sum="935c0eb6242e80c95001368b9d53b421"
#
# Exiftool xmp-fil:
# <PelleTags:PelleTag1_md5sum>572737b08d11666255afb41b2c0443cb</PelleTags:PelleTag1_md5sum>
# cur.execute('''CREATE TABLE dashboard (id INTEGER PRIMARY KEY, main_path TEXT, last_run TEXT, tot_xmp INTEGER, ok_xmp INTEGER,nok_xmp INTEGER, missing_raw INTEGER)''')
# cur.execute("INSERT INTO dashboard (main_path, last_run, tot_xmp, ok_xmp, nok_xmp, missing_raw) VALUES(?,?,?,?,?,?)", ['H:\Pelle Temp', 0, 0, 0, 0, 0])
# cur.execute("UPDATE dashboard SET last_run = ? WHERE id = 1", ['2018-11-07'])
#
# cur.execute('''CREATE TABLE md5_results (id INTEGER PRIMARY KEY, file_path TEXT NOT NULL, file_name TEXT NOT NULL, md5_file TEXT, md5_calc TEXT, ok_nok TEXT, date TEXT)''')
# cur.execute("INSERT INTO md5_results (file_path, file_name, md5_file, md5_calc, ok_nok, date) VALUES(?,?,?,?,?,?)", ['C:\Pelle\Dropbox\Hack - xmp dashboard', 'testfil.xmp', '0', '0', '0', '2018-11-07'])
#
# ToDo's:
# - Fanken vilken röra det blev när jkjag försökte OOPsa allt. Nu är helt icke-intuitivt med ett mellan tinng mellan procedur och OOPs...
# - Multithreading? Starta flera threads om min folder ligger på olika diskar? Man ser ju att PCU lasten är runt 10%
# medans disklasten liger på 40-90%. Gör threadandet lite intelligent så den inte startar med mapar som ligger på samma disk.
# - Gör den def _main == osv. Verkar OOPsigt och bra.
# - Gör lite fler try-except, definitivt alla sql execute.
# - Någon form av utskrift så man kan följa mellanresultat. I ett nytt fönster?
# Kanske ngt liknande det jag har till verify xmp-hacket.
# Allt syns ju IDLE?
# Skippade:
# - En Cancel Run-knapp. - Fick det inte att lira, måste läsa på mer.
# v0.1 191106
# - Nu funkar läsa in xmp-filer och jämföra med beräknad md5 för motsvarande fil.
# v0.2 191107
# - sqlite fungerar ok
# - Räknar antalet närvarande xmp-filer i varje main folder.
# - Finns nu en tabell man använder.
# v0.3 191107
# - Själva dashboarden visas nu
# v0.4 191107
# - Göra event som fyller på databasen
# v0.5 1911115
# - Funkar nu med dynamisk generering av drop down boxarna. Man lagrar värdena i ett dict. Blev riktigt snyggt
# v0.6 191111
# - Funkar nu för New Run (och Do Nothing), Den raderar existerande värden i tabellen och fyller på med nya
# v0.7 191111
# - New run med delete allt gammalt i db verkar funka nu
# v0.8 191114
# - Cancel Run-fönster. Måste alltså ha två fönster. Nog strukturera om allt...
# - Implementerade en Cancel Run-knapp, men fick det inte att lira. Stegar till 0.9 och tar bort den.
# v0.9 191114
# - Lade till så att man får en utskrift var 1000 fil man hanterar. Det borde snabb upp lite.
# - Började titta på PyQt, verkar mycket stabilare än Tkinter. Får se om jag får ordning på det.
# - Fixade ett fel där lower case av t.ex. mp4 inte togs med eftersom listan med filändelser är versaler. Lade
# till upper() i jämförelsen.
# v0.10 191118
# - Fixat, var en räknare som inte stegade på rätt ställe. Nu finns det ett fel om det inte finns en raw-fil, då blir det ett index out of bounds. Detta händer när det är
# en DNG-fil, tex. DNG finns inte med bland filändelserna. Fixade det men nu blir det en irriterande utskrift för varje
# fil den hittar som inte finns i RAW-listan. En xmp kan ju ha en jpg bredvid sig t.ex. som inte skall generera ngn utskrift.
# - Fixat, lade till en separat räknare. Jag lade ju till att den skriver ut var xxx fil att hur många den processat. Irriterande skriver den det för de första
# 100 filerna. Min div/mod verkar inte funka som jag tänkte mig.
# - Lade till en break så när den räknat md5 för en fil så hoppar den ut loopen. Tidigare fortsatte den att stega igenom hela mappen även om den hiottat rätt fil.
# - Ta time() vid start av vartje mapp, och vi slkutet och spara. Skriv sedan ut en liten summering.
# - Vid varje start skriver jag ut mappnamnet. Kan man skriva ut antalet filer också? JAg räknar ju dem innan. Samma via Approxxx fuiler av yy?
# - Lade till en ny parameter vital_stats som om satt skriver ut det viktigaste.
# - Lite andra småfix.
# v0.11 191118
# - Optimerade lite för läsbarhet i loopen med filjämförelser.
# - Lade till lite mer text i starten och förlupen tid i sekunder för varje limited_printouts intervall.
# - Nu funkar det ganska bra, så stegar.
# v0.12 191118
# - Vilken röra. Tog bort tkinter och några av klasserna jag gjort, nog bättre att gå tillbaka till mer procedural kod.
# - Jag räknar antalet fall där jag har en xmp-fil utan tillhörande RAW,men jag visar det inte ngnstans.
# - En liten räknare som visar hur många dagar sen det var man körde mappen?
# - PyQt
# - Ange sekunder mellantider funkar bra, man kunde ge delta tid också.
# - Threading, man måste ha någon intelligens så man lägger ut dem på olika hårddiskar, och begränsar antalet trådar till antalet hårddiskar.
# v0.12 branch THREAD 191118
# - Threading verkar faktiskt funka. Men jisses vad rörig koden är nu!
# v0.13 branch THREAD 191119
# - Initiala tester visar att den inte klarar av att separera diskarna, så den startar två mappar på samma disk.
# Måste alltså starta threadsen manuellt, och vänta på att de blir klara.
# v0.14 191120
# - Det funkar nu! Den tittar på listan över mappar som skall köras, och fördelar sedan threadsen över
# hårddiskarna så att det aldrig körs två threads samtidigt på samma disk.
# - Detta blir nu huvudbranchen, tar bort THREAD.
# - Skulle behöva snygga till det, nu är det riktigt grötigt.
# - Den där Cancel-knappen är nog bra att ge sig på tillsammans med PtQt.
# - Delta sekunder för utskriften efter x antal filer. Men den måste vara thread-specifik, dvs varje thread har sin egen räknare.
# - Jag fyller inte i missing raw kolumnen i dashboard.
# - Test av GitHub, denna skall vara för W12.
# v0.15 191127
# - Gjorde om så att istället för att ha en version för varje dator så gjorde jag en config-fil, connect_sqlite_db.py, som jag
# anropar. I den finns rätt sträng för att connecta till rätt databas.
# v1.0 191127
# Andra försöket i git, tar bort ver-hantering i namnet, och passar på att stega till 1.0
# 191127
# - I Gityran så slarvade jag bort filerna med följande fix:
# -- Lade till att den visar antalet filer med missing RAW.
# --- Uppdaterade db på LM och W12, behövs ju på ACTUAL och W10 också,
# dvs ta fram sql:en och testa.
# --- GUIt behöver fixas, själva Dashboarden saknar klumnen missing RAW.
# - Allt ovan nu fixat, behöver lägga till SQL för att uppdatera db på W10&ACTUAL
# 191129
# - Lade till antal dagar sedan senaste körningen.
# - Lade till delta seconds sedan varje limited_printouts.
import os
from os.path import join
from operator import itemgetter, attrgetter
from tkinter import *
import fnmatch
import sys
import math
import threading
import subprocess
import fileinput
import datetime
import time
import hashlib
import sqlite3
from tkinter import ttk # Denna innehåller comboboxen - drop down.
# generate_md5_Checksum_def är en funktion som ligger i en separat fil,
from generate_md5_Checksum_def import md5Checksum
from connect_sqlite_db import connect_sqlite_db
def index_containing_substring(the_list, substring): # returns the line number of the md5 sum, zero if no md5.
for i, s in enumerate(the_list):
if substring in s:
if substring[0] == '<':
md5 = s[s.find(substring)+28:s.find(substring)+60]
else:
md5 = s[s.find(substring)+28:s.find(substring)+60]
return md5
return 0
def folderThread(main_folder):
global dashboard
global main_folders
global combo
global combo_var
global verbose
global cur
print("Thread started at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print('Printout levels: ' + str(vital_stats) + ' ' + str(verbose) + ' ', flush=True)
print('Thread identity: ' + str(threading.get_ident()))
if vital_stats: print('Thread identity: ' + str(threading.get_ident()) + " Starting with " + main_folder + " and " + combo_var[main_folder] + " containing " + str(xmp_tracker[main_folders.index(main_folder)][1]) + " xmp files.")
md5_OK = 0
md5_NOK = 0
md5_not_found = 0
md5_missing_raw = 0
xmp_file_counter = 0
results = []
time1 = time.time()
try: # Clear db here from all rows with path
sql = "DELETE FROM md5_results WHERE file_path LIKE '" + main_folder + "%'"
cur.execute(sql)
if verbose: print("Thread identity: " + str(threading.get_ident()) + " Executed sql: " + sql)
if verbose: print("Thread identity: " + str(threading.get_ident()) + " First sql segment: Rows returned from execute = " + str(cur.rowcount), flush=True)
conn.commit()
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to delete record from sqlite table", error, flush=True)
try: # Update dashboard since I've removed all files for folder main_folder.
cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ? WHERE main_path = '" + main_folder + "'",
(todays_date, 0, 0, 0, 0))
if verbose: print(" Thread identity: " + str(threading.get_ident()) + "First sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount), flush=True)
conn.commit()
except sqlite3.Error as error:
if print_errors: print(" Thread identity: " + str(threading.get_ident()) + "Failed to UPDATE dashboard from sqlite table", error, flush=True)
time2 = time.time()
for subdir, dirs, files in os.walk(main_folder):
for file in files:
found_raw = 0
try:
if file.endswith('xmp'):
xmp_file_counter +=1
if verbose: print ('Found file: ' + file, flush=True)
f = open(subdir+'\\'+ file,"r")
list_file = list(f)
md5_index = [index_containing_substring(list_file, '<PelleTags:PelleTag1_md5sum>'),
index_containing_substring(list_file, 'PelleTags:PelleTag1_md5sum=')]
if verbose: print(md5_index)
if any(md5_index): # xmp-filen innehåller en md5-summa.
res = [idx for idx, val in enumerate(md5_index) if val != 0] # Ger vilken typ av xmp encoding det är.
md5_xmp = md5_index[res[0]]
for raw_file in os.listdir(subdir): # Find the corresponding RAW-file to generate md5 sum.
if file[:-3] in raw_file[:-3]: # Här slicar jag bort ändelserna för att se om de har samma namn.
if raw_file[-3:].upper() in raw_extensions:
found_raw = 1
md5_calculated = md5Checksum(subdir + '\\' + raw_file)
if verbose: print ('Calculated md5 for file ' + raw_file)
if verbose: print (md5_calculated)
if md5_calculated == md5_xmp:
results.append((subdir, file, md5_xmp, md5_calculated, 'OK', todays_date))
md5_OK +=1
if verbose: print("md5 stämmer " + " subdir " + subdir + " file " + file)
else:
results.append((subdir, file, md5_xmp, md5_calculated, 'NOK', todays_date))
md5_NOK +=1
if print_errors: print("md5 fail: " + subdir + "\\" + str(raw_file))
break
if not found_raw: # Efter break exekveras denna. Tror jag...
if print_errors: print("xmp without matching raw: " + subdir + "\\" + str(raw_file))
results.append((subdir, file, 'No valid raw file found', '-', 'NOK', todays_date))
md5_missing_raw +=1
else: # index_containing_substring returns zero, PelleTags not present in xmp-file
if print_errors: print("Error, no md5 sum in file " + subdir + "\\" + file)
md5_not_found += 1
results.append((subdir, file, 'No md5 in xmp', md5_calculated, 'NOK', todays_date))
f.close()
if xmp_file_counter % limited_printouts == 0:
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + ", " + str(xmp_file_counter) + " xmp-files processed in "
+ str(round(time.time()-time1)) + " seconds, delta time " + str(round(time.time()-time2)) + " seconds, local time " +
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
time2 = time.time()
except:
if print_errors:
print ('Unexpected fail for file: ' + file)
print (sys.exc_info())
f.close()
try:
cur.executemany("INSERT INTO md5_results ('file_path', 'file_name', 'md5_file', 'md5_calc', 'ok_nok', 'date') VALUES (?,?,?,?,?,?)", results)
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute INSERT INTO md5_resutls = "
+ str(cur.rowcount))
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to INSERT INTO md5_results sqlite table", error)
try:
cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ?, missing_xmp = ? WHERE main_path = '" + main_folder + "'",
(todays_date, md5_OK+md5_NOK, md5_OK, md5_NOK,md5_missing_raw,md5_not_found))
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount))
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to UPDATE dashboard from sqlite table", error)
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Run results for path: " + main_folder + " md5_OK=" + str(md5_OK) + " md5_NOK=" +
str(md5_NOK) + " md5__not_found=" + str(md5_not_found) + " md5_missing_raw=" + str(md5_missing_raw) + ", in " + str(round(time.time()-time1)) +
" seconds, finished at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ".")
def runrunrun():
global combo
global combo_var
global master
for key, value in combo.items():
combo_var[key] = combo[key].get() #combo values gets destroyed when closing GUI, therefore copy values.
master.destroy() # Close GUI and continue after mainloop
def quitquit():
conn.close()
exit()
def xmp_count():
# Check how many xmp files there are in each main folder.
global main_folders
global xmp_tracker
global verbose
global xmp_tracker
xmp_file_count = 0
for main_folder in main_folders:
for subdir, dirs, files in os.walk(main_folder):
for file in files:
if file[-3:] == 'xmp':
xmp_file_count += 1
xmp_tracker.append([main_folder,xmp_file_count])
if vital_stats: print(main_folder + " has " + str(xmp_file_count) + " xmp files")
xmp_file_count = 0
def build_dashboard():
# Build dashboard
# Creating main tkinter window/toplevel
global dashboard
global main_folders
global combo
global combo_var
global xmp_tracker
global verbose
global master
date_format = "%Y-%m-%d"
todays_date = datetime.date.today().strftime(date_format)
max_width = max(len(x) for x in main_folders) # needed to size the cell with path
# this will create a label widget
col_1 = Label(master, relief=RIDGE, text = "Folder path", width = max_width)
col_2 = Label(master, relief=RIDGE, text = "Last run", width = 12)
col_3 = Label(master, relief=RIDGE, text = "Days since last run", width = 18)
col_4 = Label(master, relief=RIDGE, text = "Tot last count xmp")
col_5 = Label(master, relief=RIDGE, text = "Tot db xmp")
col_6 = Label(master, relief=RIDGE, text = "OK xmp")
col_7 = Label(master, relief=RIDGE, text = "NOK xmp")
col_8 = Label(master, relief=RIDGE, text = "Missing RAW")
col_9 = Label(master, relief=RIDGE, text = "Missing xmp")
col_10 = Label(master, relief=RIDGE, text = "Start/Restart")
# grid method to arrange labels in respective
# rows and columns as specified
col_1.grid(row = 0, column = 0, sticky = W, pady = 2)
col_2.grid(row = 0, column = 1, sticky = W, pady = 2)
col_3.grid(row = 0, column = 2, sticky = W, pady = 2)
col_4.grid(row = 0, column = 3, sticky = W, pady = 2)
col_5.grid(row = 0, column = 4, sticky = W, pady = 2)
col_6.grid(row = 0, column = 5, sticky = W, pady = 2)
col_7.grid(row = 0, column = 6, sticky = W, pady = 2)
col_8.grid(row = 0, column = 7, sticky = W, pady = 2)
col_9.grid(row = 0, column = 8, sticky = W, pady = 2)
col_10.grid(row = 0, column = 9, sticky = W, pady = 2)
for ii, each_row in enumerate(dashboard):
# print(each_row)
# print(ii)
col_1 = Label(master, text = each_row[1])
col_2 = Label(master, text = each_row[2])
col_3 = Label(master, text = (datetime.datetime.strptime(todays_date, date_format)- datetime.datetime.strptime(each_row[2], date_format)).days)
col_4 = Label(master, text = xmp_tracker[ii][1])
col_5 = Label(master, text = each_row[3])
col_6 = Label(master, text = each_row[4])
col_7 = Label(master, text = each_row[5])
col_8 = Label(master, text = each_row[6])
col_9 = Label(master, text = each_row[7])
col_1.grid(row = ii+1, column = 0, sticky = W, pady = 5, padx = 5)
col_2.grid(row = ii+1, column = 1, sticky = W, pady = 5, padx = 5)
col_3.grid(row = ii+1, column = 2, sticky = W, pady = 5, padx = 5)
col_4.grid(row = ii+1, column = 3, sticky = W, pady = 5, padx = 5)
col_5.grid(row = ii+1, column = 4, sticky = W, pady = 5, padx = 5)
col_6.grid(row = ii+1, column = 5, sticky = W, pady = 5, padx = 5)
col_7.grid(row = ii+1, column = 6, sticky = W, pady = 5, padx = 5)
col_8.grid(row = ii+1, column = 7, sticky = W, pady = 5, padx = 5)
col_9.grid(row = ii+1, column = 8, sticky = W, pady = 5, padx = 5)
valores=("Do nothing", "Restart", "New run")
# key_name[1] innehåller path som blir key i dict, och värdet blir det man valt i drop down.
for index, key_name in enumerate(dashboard):
combo[key_name[1]] = ttk.Combobox(master, values=valores)
combo[key_name[1]].set("Do nothing ")
combo[key_name[1]].grid(row = 1+index, column = 9, sticky = W, pady = 6, padx = 5)
# button widget
b1 = Button(master, text = "Cancel", width = 9, command=quitquit)
b2 = Button(master, text = "Go", width = 9, command=runrunrun)
# arranging button widgets
b1.grid(row = ii+2, column = 9, sticky = W)
b2.grid(row = ii+2, column = 9, sticky = E)
# infinite loop which can be terminated
# by keyboard or mouse interrupt
mainloop()
def get_list_of_folders(main_folders_2):
tmp = []
tmp.append(main_folders_2[0])
for ii in range(len(main_folders_2)):
if main_folders_2[ii] not in tmp and main_folders_2[ii][0] not in [item[0] for item in tmp]:
tmp.append(main_folders_2[ii])
return tmp
def run_thru_folders():
global dashboard
global main_folders
global combo
global combo_var
global verbose
global vital_stats
global cur
| _folders = []
current_run_list = []
l3 = []
for main_folder in main_folders:
if combo_var[main_folder] == "New run":
new_run_folders.append(main_folder)
else:
if vital_stats: print("For folder " + main_folder + ": " + combo_var[main_folder])
if vital_stats: print('new_run_folders: ')
if vital_stats: print(new_run_folders)
while new_run_folders:
current_run_list = get_list_of_folders(new_run_folders)
if vital_stats: print('current_run_list: ')
if vital_stats: print(current_run_list)
if l3 is not None:
l3 = [x for x in new_run_folders if x not in current_run_list]
new_run_folders = l3
iii = 0
thread_list = []
for current_folder in current_run_list:
thread_list.append(threading.Thread(target=folderThread, args=(current_folder,)))
# Starting threads
thread_list[iii].start()
print('Startat thread number ' + str(iii))
print("Base identity: " + str(thread_list[iii]) + " ")
time.sleep(3) # Think this is needed to give time to db transactions at the start of a run.
iii += 1
for xxx in range(iii):
thread_list[xxx].join()
verbose = 1
vital_stats = 1
limited_printouts = 1000
print_errors = 1
md5_index = []
xmp_tracker = []
combo = {}
combo_var = {}
todays_date = str(datetime.date.today())
raw_extensions = ['CR2','NEF','3FR','ARW','SRF','SR2','CRW','IIQ','EIP','DCR','K25','KDC','ERF','MEF','MOS','MRW','NRW',
'ORF','PEF','RAF','RAW','RW2','RWL','RWZ','X3F','MOV','MP4','AVI','WMV','M4V','MPG','3GP','3G2']
#conn = sqlite3.connect('C:\Pelle\Dashboard_Python\dashboard_md5.sql3', check_same_thread=False) # LeanMean
#conn = sqlite3.connect('G:\PelleHack\Python_Dashboard\dashboard_md5.sql3', check_same_thread=False) # W12
conn = connect_sqlite_db()
cur = conn.cursor()
cur.execute("SELECT * FROM dashboard")
dashboard = cur.fetchall()
#print(dashboard)
main_folders = [dashboard[i][1] for i in range(len(dashboard))] # Extract the paths to its own list.
print(main_folders)
xmp_count()
master = Tk()
build_dashboard()
run_thru_folders()
conn.commit()
conn.close()
|
new_run | identifier_name |
xmp_dashboard.py | # -*- coding: UTF-8 -*-
# py_md5_xmp
# Scriptet tar en given xmp-fil och beräknar md5-summan för dess datafil.
#
# md5 summa i Adobe LR xmp-fil:
# PelleTags:PelleTag1_md5sum="935c0eb6242e80c95001368b9d53b421"
#
# Exiftool xmp-fil:
# <PelleTags:PelleTag1_md5sum>572737b08d11666255afb41b2c0443cb</PelleTags:PelleTag1_md5sum>
# cur.execute('''CREATE TABLE dashboard (id INTEGER PRIMARY KEY, main_path TEXT, last_run TEXT, tot_xmp INTEGER, ok_xmp INTEGER,nok_xmp INTEGER, missing_raw INTEGER)''')
# cur.execute("INSERT INTO dashboard (main_path, last_run, tot_xmp, ok_xmp, nok_xmp, missing_raw) VALUES(?,?,?,?,?,?)", ['H:\Pelle Temp', 0, 0, 0, 0, 0])
# cur.execute("UPDATE dashboard SET last_run = ? WHERE id = 1", ['2018-11-07'])
#
# cur.execute('''CREATE TABLE md5_results (id INTEGER PRIMARY KEY, file_path TEXT NOT NULL, file_name TEXT NOT NULL, md5_file TEXT, md5_calc TEXT, ok_nok TEXT, date TEXT)''')
# cur.execute("INSERT INTO md5_results (file_path, file_name, md5_file, md5_calc, ok_nok, date) VALUES(?,?,?,?,?,?)", ['C:\Pelle\Dropbox\Hack - xmp dashboard', 'testfil.xmp', '0', '0', '0', '2018-11-07'])
#
# ToDo's:
# - Fanken vilken röra det blev när jkjag försökte OOPsa allt. Nu är helt icke-intuitivt med ett mellan tinng mellan procedur och OOPs...
# - Multithreading? Starta flera threads om min folder ligger på olika diskar? Man ser ju att PCU lasten är runt 10%
# medans disklasten liger på 40-90%. Gör threadandet lite intelligent så den inte startar med mapar som ligger på samma disk.
# - Gör den def _main == osv. Verkar OOPsigt och bra.
# - Gör lite fler try-except, definitivt alla sql execute.
# - Någon form av utskrift så man kan följa mellanresultat. I ett nytt fönster?
# Kanske ngt liknande det jag har till verify xmp-hacket.
# Allt syns ju IDLE?
# Skippade:
# - En Cancel Run-knapp. - Fick det inte att lira, måste läsa på mer.
# v0.1 191106
# - Nu funkar läsa in xmp-filer och jämföra med beräknad md5 för motsvarande fil.
# v0.2 191107
# - sqlite fungerar ok
# - Räknar antalet närvarande xmp-filer i varje main folder.
# - Finns nu en tabell man använder.
# v0.3 191107
# - Själva dashboarden visas nu
# v0.4 191107
# - Göra event som fyller på databasen
# v0.5 1911115
# - Funkar nu med dynamisk generering av drop down boxarna. Man lagrar värdena i ett dict. Blev riktigt snyggt
# v0.6 191111
# - Funkar nu för New Run (och Do Nothing), Den raderar existerande värden i tabellen och fyller på med nya
# v0.7 191111
# - New run med delete allt gammalt i db verkar funka nu
# v0.8 191114
# - Cancel Run-fönster. Måste alltså ha två fönster. Nog strukturera om allt...
# - Implementerade en Cancel Run-knapp, men fick det inte att lira. Stegar till 0.9 och tar bort den.
# v0.9 191114
# - Lade till så att man får en utskrift var 1000 fil man hanterar. Det borde snabb upp lite.
# - Började titta på PyQt, verkar mycket stabilare än Tkinter. Får se om jag får ordning på det.
# - Fixade ett fel där lower case av t.ex. mp4 inte togs med eftersom listan med filändelser är versaler. Lade
# till upper() i jämförelsen.
# v0.10 191118
# - Fixat, var en räknare som inte stegade på rätt ställe. Nu finns det ett fel om det inte finns en raw-fil, då blir det ett index out of bounds. Detta händer när det är
# en DNG-fil, tex. DNG finns inte med bland filändelserna. Fixade det men nu blir det en irriterande utskrift för varje
# fil den hittar som inte finns i RAW-listan. En xmp kan ju ha en jpg bredvid sig t.ex. som inte skall generera ngn utskrift.
# - Fixat, lade till en separat räknare. Jag lade ju till att den skriver ut var xxx fil att hur många den processat. Irriterande skriver den det för de första
# 100 filerna. Min div/mod verkar inte funka som jag tänkte mig.
# - Lade till en break så när den räknat md5 för en fil så hoppar den ut loopen. Tidigare fortsatte den att stega igenom hela mappen även om den hiottat rätt fil.
# - Ta time() vid start av vartje mapp, och vi slkutet och spara. Skriv sedan ut en liten summering.
# - Vid varje start skriver jag ut mappnamnet. Kan man skriva ut antalet filer också? JAg räknar ju dem innan. Samma via Approxxx fuiler av yy?
# - Lade till en ny parameter vital_stats som om satt skriver ut det viktigaste.
# - Lite andra småfix.
# v0.11 191118
# - Optimerade lite för läsbarhet i loopen med filjämförelser.
# - Lade till lite mer text i starten och förlupen tid i sekunder för varje limited_printouts intervall.
# - Nu funkar det ganska bra, så stegar.
# v0.12 191118
# - Vilken röra. Tog bort tkinter och några av klasserna jag gjort, nog bättre att gå tillbaka till mer procedural kod.
# - Jag räknar antalet fall där jag har en xmp-fil utan tillhörande RAW,men jag visar det inte ngnstans.
# - En liten räknare som visar hur många dagar sen det var man körde mappen?
# - PyQt
# - Ange sekunder mellantider funkar bra, man kunde ge delta tid också.
# - Threading, man måste ha någon intelligens så man lägger ut dem på olika hårddiskar, och begränsar antalet trådar till antalet hårddiskar.
# v0.12 branch THREAD 191118
# - Threading verkar faktiskt funka. Men jisses vad rörig koden är nu!
# v0.13 branch THREAD 191119
# - Initiala tester visar att den inte klarar av att separera diskarna, så den startar två mappar på samma disk.
# Måste alltså starta threadsen manuellt, och vänta på att de blir klara.
# v0.14 191120
# - Det funkar nu! Den tittar på listan över mappar som skall köras, och fördelar sedan threadsen över
# hårddiskarna så att det aldrig körs två threads samtidigt på samma disk.
# - Detta blir nu huvudbranchen, tar bort THREAD.
# - Skulle behöva snygga till det, nu är det riktigt grötigt.
# - Den där Cancel-knappen är nog bra att ge sig på tillsammans med PtQt.
# - Delta sekunder för utskriften efter x antal filer. Men den måste vara thread-specifik, dvs varje thread har sin egen räknare.
# - Jag fyller inte i missing raw kolumnen i dashboard.
# - Test av GitHub, denna skall vara för W12.
# v0.15 191127
# - Gjorde om så att istället för att ha en version för varje dator så gjorde jag en config-fil, connect_sqlite_db.py, som jag
# anropar. I den finns rätt sträng för att connecta till rätt databas.
# v1.0 191127
# Andra försöket i git, tar bort ver-hantering i namnet, och passar på att stega till 1.0
# 191127
# - I Gityran så slarvade jag bort filerna med följande fix:
# -- Lade till att den visar antalet filer med missing RAW.
# --- Uppdaterade db på LM och W12, behövs ju på ACTUAL och W10 också,
# dvs ta fram sql:en och testa.
# --- GUIt behöver fixas, själva Dashboarden saknar klumnen missing RAW.
# - Allt ovan nu fixat, behöver lägga till SQL för att uppdatera db på W10&ACTUAL
# 191129
# - Lade till antal dagar sedan senaste körningen.
# - Lade till delta seconds sedan varje limited_printouts.
import os
from os.path import join
from operator import itemgetter, attrgetter
from tkinter import *
import fnmatch
import sys
import math
import threading
import subprocess
import fileinput
import datetime
import time
import hashlib
import sqlite3
from tkinter import ttk # Denna innehåller comboboxen - drop down.
# generate_md5_Checksum_def är en funktion som ligger i en separat fil,
from generate_md5_Checksum_def import md5Checksum
from connect_sqlite_db import connect_sqlite_db
def index_containing_substring(the_list, substring): # returns the line number of the md5 sum, zero if no md5.
for i, s in enumerate(the_list):
if substring in s:
if substring[0] == '<':
md5 = s[s.find(substring)+28:s.find(substring)+60]
else:
md5 = s[s.find(substring)+28:s.find(substring)+60]
return md5
return 0
def folderThread(main_folder):
global dashboard
global main_folders
global combo
global combo_var
global verbose
global cur
print("Thread started at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print('Printout levels: ' + str(vital_stats) + ' ' + str(verbose) + ' ', flush=True)
print('Thread identity: ' + str(threading.get_ident()))
if vital_stats: print('Thread identity: ' + str(threading.get_ident()) + " Starting with " + main_folder + " and " + combo_var[main_folder] + " containing " + str(xmp_tracker[main_folders.index(main_folder)][1]) + " xmp files.")
md5_OK = 0
md5_NOK = 0
md5_not_found = 0
md5_missing_raw = 0
xmp_file_counter = 0
results = []
time1 = time.time()
try: # Clear db here from all rows with path
sql = "DELETE FROM md5_results WHERE file_path LIKE '" + main_folder + "%'"
cur.execute(sql)
if verbose: print("Thread identity: " + str(threading.get_ident()) + " Executed sql: " + sql)
if verbose: print("Thread identity: " + str(threading.get_ident()) + " First sql segment: Rows returned from execute = " + str(cur.rowcount), flush=True)
conn.commit()
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to delete record from sqlite table", error, flush=True)
try: # Update dashboard since I've removed all files for folder main_folder.
cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ? WHERE main_path = '" + main_folder + "'",
(todays_date, 0, 0, 0, 0))
if verbose: print(" Thread identity: " + str(threading.get_ident()) + "First sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount), flush=True)
conn.commit()
except sqlite3.Error as error:
if print_errors: print(" Thread identity: " + str(threading.get_ident()) + "Failed to UPDATE dashboard from sqlite table", error, flush=True)
time2 = time.time()
for subdir, dirs, files in os.walk(main_folder):
for file in files:
found_raw = 0
try:
if file.endswith('xmp'):
xmp_file_counter +=1
if verbose: print ('Found file: ' + file, flush=True)
f = open(subdir+'\\'+ file,"r")
list_file = list(f)
md5_in | le, '<PelleTags:PelleTag1_md5sum>'),
index_containing_substring(list_file, 'PelleTags:PelleTag1_md5sum=')]
if verbose: print(md5_index)
if any(md5_index): # xmp-filen innehåller en md5-summa.
res = [idx for idx, val in enumerate(md5_index) if val != 0] # Ger vilken typ av xmp encoding det är.
md5_xmp = md5_index[res[0]]
for raw_file in os.listdir(subdir): # Find the corresponding RAW-file to generate md5 sum.
if file[:-3] in raw_file[:-3]: # Här slicar jag bort ändelserna för att se om de har samma namn.
if raw_file[-3:].upper() in raw_extensions:
found_raw = 1
md5_calculated = md5Checksum(subdir + '\\' + raw_file)
if verbose: print ('Calculated md5 for file ' + raw_file)
if verbose: print (md5_calculated)
if md5_calculated == md5_xmp:
results.append((subdir, file, md5_xmp, md5_calculated, 'OK', todays_date))
md5_OK +=1
if verbose: print("md5 stämmer " + " subdir " + subdir + " file " + file)
else:
results.append((subdir, file, md5_xmp, md5_calculated, 'NOK', todays_date))
md5_NOK +=1
if print_errors: print("md5 fail: " + subdir + "\\" + str(raw_file))
break
if not found_raw: # Efter break exekveras denna. Tror jag...
if print_errors: print("xmp without matching raw: " + subdir + "\\" + str(raw_file))
results.append((subdir, file, 'No valid raw file found', '-', 'NOK', todays_date))
md5_missing_raw +=1
else: # index_containing_substring returns zero, PelleTags not present in xmp-file
if print_errors: print("Error, no md5 sum in file " + subdir + "\\" + file)
md5_not_found += 1
results.append((subdir, file, 'No md5 in xmp', md5_calculated, 'NOK', todays_date))
f.close()
if xmp_file_counter % limited_printouts == 0:
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + ", " + str(xmp_file_counter) + " xmp-files processed in "
+ str(round(time.time()-time1)) + " seconds, delta time " + str(round(time.time()-time2)) + " seconds, local time " +
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
time2 = time.time()
except:
if print_errors:
print ('Unexpected fail for file: ' + file)
print (sys.exc_info())
f.close()
try:
cur.executemany("INSERT INTO md5_results ('file_path', 'file_name', 'md5_file', 'md5_calc', 'ok_nok', 'date') VALUES (?,?,?,?,?,?)", results)
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute INSERT INTO md5_resutls = "
+ str(cur.rowcount))
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to INSERT INTO md5_results sqlite table", error)
try:
cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ?, missing_xmp = ? WHERE main_path = '" + main_folder + "'",
(todays_date, md5_OK+md5_NOK, md5_OK, md5_NOK,md5_missing_raw,md5_not_found))
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount))
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to UPDATE dashboard from sqlite table", error)
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Run results for path: " + main_folder + " md5_OK=" + str(md5_OK) + " md5_NOK=" +
str(md5_NOK) + " md5__not_found=" + str(md5_not_found) + " md5_missing_raw=" + str(md5_missing_raw) + ", in " + str(round(time.time()-time1)) +
" seconds, finished at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ".")
def runrunrun():
global combo
global combo_var
global master
for key, value in combo.items():
combo_var[key] = combo[key].get() #combo values gets destroyed when closing GUI, therefore copy values.
master.destroy() # Close GUI and continue after mainloop
def quitquit():
conn.close()
exit()
def xmp_count():
# Check how many xmp files there are in each main folder.
global main_folders
global xmp_tracker
global verbose
global xmp_tracker
xmp_file_count = 0
for main_folder in main_folders:
for subdir, dirs, files in os.walk(main_folder):
for file in files:
if file[-3:] == 'xmp':
xmp_file_count += 1
xmp_tracker.append([main_folder,xmp_file_count])
if vital_stats: print(main_folder + " has " + str(xmp_file_count) + " xmp files")
xmp_file_count = 0
def build_dashboard():
# Build dashboard
# Creating main tkinter window/toplevel
global dashboard
global main_folders
global combo
global combo_var
global xmp_tracker
global verbose
global master
date_format = "%Y-%m-%d"
todays_date = datetime.date.today().strftime(date_format)
max_width = max(len(x) for x in main_folders) # needed to size the cell with path
# this will create a label widget
col_1 = Label(master, relief=RIDGE, text = "Folder path", width = max_width)
col_2 = Label(master, relief=RIDGE, text = "Last run", width = 12)
col_3 = Label(master, relief=RIDGE, text = "Days since last run", width = 18)
col_4 = Label(master, relief=RIDGE, text = "Tot last count xmp")
col_5 = Label(master, relief=RIDGE, text = "Tot db xmp")
col_6 = Label(master, relief=RIDGE, text = "OK xmp")
col_7 = Label(master, relief=RIDGE, text = "NOK xmp")
col_8 = Label(master, relief=RIDGE, text = "Missing RAW")
col_9 = Label(master, relief=RIDGE, text = "Missing xmp")
col_10 = Label(master, relief=RIDGE, text = "Start/Restart")
# grid method to arrange labels in respective
# rows and columns as specified
col_1.grid(row = 0, column = 0, sticky = W, pady = 2)
col_2.grid(row = 0, column = 1, sticky = W, pady = 2)
col_3.grid(row = 0, column = 2, sticky = W, pady = 2)
col_4.grid(row = 0, column = 3, sticky = W, pady = 2)
col_5.grid(row = 0, column = 4, sticky = W, pady = 2)
col_6.grid(row = 0, column = 5, sticky = W, pady = 2)
col_7.grid(row = 0, column = 6, sticky = W, pady = 2)
col_8.grid(row = 0, column = 7, sticky = W, pady = 2)
col_9.grid(row = 0, column = 8, sticky = W, pady = 2)
col_10.grid(row = 0, column = 9, sticky = W, pady = 2)
for ii, each_row in enumerate(dashboard):
# print(each_row)
# print(ii)
col_1 = Label(master, text = each_row[1])
col_2 = Label(master, text = each_row[2])
col_3 = Label(master, text = (datetime.datetime.strptime(todays_date, date_format)- datetime.datetime.strptime(each_row[2], date_format)).days)
col_4 = Label(master, text = xmp_tracker[ii][1])
col_5 = Label(master, text = each_row[3])
col_6 = Label(master, text = each_row[4])
col_7 = Label(master, text = each_row[5])
col_8 = Label(master, text = each_row[6])
col_9 = Label(master, text = each_row[7])
col_1.grid(row = ii+1, column = 0, sticky = W, pady = 5, padx = 5)
col_2.grid(row = ii+1, column = 1, sticky = W, pady = 5, padx = 5)
col_3.grid(row = ii+1, column = 2, sticky = W, pady = 5, padx = 5)
col_4.grid(row = ii+1, column = 3, sticky = W, pady = 5, padx = 5)
col_5.grid(row = ii+1, column = 4, sticky = W, pady = 5, padx = 5)
col_6.grid(row = ii+1, column = 5, sticky = W, pady = 5, padx = 5)
col_7.grid(row = ii+1, column = 6, sticky = W, pady = 5, padx = 5)
col_8.grid(row = ii+1, column = 7, sticky = W, pady = 5, padx = 5)
col_9.grid(row = ii+1, column = 8, sticky = W, pady = 5, padx = 5)
valores=("Do nothing", "Restart", "New run")
# key_name[1] innehåller path som blir key i dict, och värdet blir det man valt i drop down.
for index, key_name in enumerate(dashboard):
combo[key_name[1]] = ttk.Combobox(master, values=valores)
combo[key_name[1]].set("Do nothing ")
combo[key_name[1]].grid(row = 1+index, column = 9, sticky = W, pady = 6, padx = 5)
# button widget
b1 = Button(master, text = "Cancel", width = 9, command=quitquit)
b2 = Button(master, text = "Go", width = 9, command=runrunrun)
# arranging button widgets
b1.grid(row = ii+2, column = 9, sticky = W)
b2.grid(row = ii+2, column = 9, sticky = E)
# infinite loop which can be terminated
# by keyboard or mouse interrupt
mainloop()
def get_list_of_folders(main_folders_2):
tmp = []
tmp.append(main_folders_2[0])
for ii in range(len(main_folders_2)):
if main_folders_2[ii] not in tmp and main_folders_2[ii][0] not in [item[0] for item in tmp]:
tmp.append(main_folders_2[ii])
return tmp
def run_thru_folders():
global dashboard
global main_folders
global combo
global combo_var
global verbose
global vital_stats
global cur
new_run_folders = []
current_run_list = []
l3 = []
for main_folder in main_folders:
if combo_var[main_folder] == "New run":
new_run_folders.append(main_folder)
else:
if vital_stats: print("For folder " + main_folder + ": " + combo_var[main_folder])
if vital_stats: print('new_run_folders: ')
if vital_stats: print(new_run_folders)
while new_run_folders:
current_run_list = get_list_of_folders(new_run_folders)
if vital_stats: print('current_run_list: ')
if vital_stats: print(current_run_list)
if l3 is not None:
l3 = [x for x in new_run_folders if x not in current_run_list]
new_run_folders = l3
iii = 0
thread_list = []
for current_folder in current_run_list:
thread_list.append(threading.Thread(target=folderThread, args=(current_folder,)))
# Starting threads
thread_list[iii].start()
print('Startat thread number ' + str(iii))
print("Base identity: " + str(thread_list[iii]) + " ")
time.sleep(3) # Think this is needed to give time to db transactions at the start of a run.
iii += 1
for xxx in range(iii):
thread_list[xxx].join()
verbose = 1
vital_stats = 1
limited_printouts = 1000
print_errors = 1
md5_index = []
xmp_tracker = []
combo = {}
combo_var = {}
todays_date = str(datetime.date.today())
raw_extensions = ['CR2','NEF','3FR','ARW','SRF','SR2','CRW','IIQ','EIP','DCR','K25','KDC','ERF','MEF','MOS','MRW','NRW',
'ORF','PEF','RAF','RAW','RW2','RWL','RWZ','X3F','MOV','MP4','AVI','WMV','M4V','MPG','3GP','3G2']
#conn = sqlite3.connect('C:\Pelle\Dashboard_Python\dashboard_md5.sql3', check_same_thread=False) # LeanMean
#conn = sqlite3.connect('G:\PelleHack\Python_Dashboard\dashboard_md5.sql3', check_same_thread=False) # W12
conn = connect_sqlite_db()
cur = conn.cursor()
cur.execute("SELECT * FROM dashboard")
dashboard = cur.fetchall()
#print(dashboard)
main_folders = [dashboard[i][1] for i in range(len(dashboard))] # Extract the paths to its own list.
print(main_folders)
xmp_count()
master = Tk()
build_dashboard()
run_thru_folders()
conn.commit()
conn.close()
| dex = [index_containing_substring(list_fi | conditional_block |
xmp_dashboard.py | # -*- coding: UTF-8 -*-
# py_md5_xmp
# Scriptet tar en given xmp-fil och beräknar md5-summan för dess datafil.
#
# md5 summa i Adobe LR xmp-fil:
# PelleTags:PelleTag1_md5sum="935c0eb6242e80c95001368b9d53b421"
#
# Exiftool xmp-fil:
# <PelleTags:PelleTag1_md5sum>572737b08d11666255afb41b2c0443cb</PelleTags:PelleTag1_md5sum>
# cur.execute('''CREATE TABLE dashboard (id INTEGER PRIMARY KEY, main_path TEXT, last_run TEXT, tot_xmp INTEGER, ok_xmp INTEGER,nok_xmp INTEGER, missing_raw INTEGER)''')
# cur.execute("INSERT INTO dashboard (main_path, last_run, tot_xmp, ok_xmp, nok_xmp, missing_raw) VALUES(?,?,?,?,?,?)", ['H:\Pelle Temp', 0, 0, 0, 0, 0])
# cur.execute("UPDATE dashboard SET last_run = ? WHERE id = 1", ['2018-11-07'])
#
# cur.execute('''CREATE TABLE md5_results (id INTEGER PRIMARY KEY, file_path TEXT NOT NULL, file_name TEXT NOT NULL, md5_file TEXT, md5_calc TEXT, ok_nok TEXT, date TEXT)''')
# cur.execute("INSERT INTO md5_results (file_path, file_name, md5_file, md5_calc, ok_nok, date) VALUES(?,?,?,?,?,?)", ['C:\Pelle\Dropbox\Hack - xmp dashboard', 'testfil.xmp', '0', '0', '0', '2018-11-07'])
#
# ToDo's:
# - Fanken vilken röra det blev när jkjag försökte OOPsa allt. Nu är helt icke-intuitivt med ett mellan tinng mellan procedur och OOPs...
# - Multithreading? Starta flera threads om min folder ligger på olika diskar? Man ser ju att PCU lasten är runt 10%
# medans disklasten liger på 40-90%. Gör threadandet lite intelligent så den inte startar med mapar som ligger på samma disk.
# - Gör den def _main == osv. Verkar OOPsigt och bra.
# - Gör lite fler try-except, definitivt alla sql execute.
# - Någon form av utskrift så man kan följa mellanresultat. I ett nytt fönster?
# Kanske ngt liknande det jag har till verify xmp-hacket.
# Allt syns ju IDLE?
# Skippade:
# - En Cancel Run-knapp. - Fick det inte att lira, måste läsa på mer.
# v0.1 191106
# - Nu funkar läsa in xmp-filer och jämföra med beräknad md5 för motsvarande fil.
# v0.2 191107
# - sqlite fungerar ok
# - Räknar antalet närvarande xmp-filer i varje main folder.
# - Finns nu en tabell man använder.
# v0.3 191107
# - Själva dashboarden visas nu
# v0.4 191107
# - Göra event som fyller på databasen
# v0.5 1911115
# - Funkar nu med dynamisk generering av drop down boxarna. Man lagrar värdena i ett dict. Blev riktigt snyggt
# v0.6 191111
# - Funkar nu för New Run (och Do Nothing), Den raderar existerande värden i tabellen och fyller på med nya
# v0.7 191111
# - New run med delete allt gammalt i db verkar funka nu
# v0.8 191114
# - Cancel Run-fönster. Måste alltså ha två fönster. Nog strukturera om allt...
# - Implementerade en Cancel Run-knapp, men fick det inte att lira. Stegar till 0.9 och tar bort den.
# v0.9 191114
# - Lade till så att man får en utskrift var 1000 fil man hanterar. Det borde snabb upp lite.
# - Började titta på PyQt, verkar mycket stabilare än Tkinter. Får se om jag får ordning på det.
# - Fixade ett fel där lower case av t.ex. mp4 inte togs med eftersom listan med filändelser är versaler. Lade
# till upper() i jämförelsen.
# v0.10 191118
# - Fixat, var en räknare som inte stegade på rätt ställe. Nu finns det ett fel om det inte finns en raw-fil, då blir det ett index out of bounds. Detta händer när det är
# en DNG-fil, tex. DNG finns inte med bland filändelserna. Fixade det men nu blir det en irriterande utskrift för varje
# fil den hittar som inte finns i RAW-listan. En xmp kan ju ha en jpg bredvid sig t.ex. som inte skall generera ngn utskrift.
# - Fixat, lade till en separat räknare. Jag lade ju till att den skriver ut var xxx fil att hur många den processat. Irriterande skriver den det för de första
# 100 filerna. Min div/mod verkar inte funka som jag tänkte mig.
# - Lade till en break så när den räknat md5 för en fil så hoppar den ut loopen. Tidigare fortsatte den att stega igenom hela mappen även om den hiottat rätt fil.
# - Ta time() vid start av vartje mapp, och vi slkutet och spara. Skriv sedan ut en liten summering.
# - Vid varje start skriver jag ut mappnamnet. Kan man skriva ut antalet filer också? JAg räknar ju dem innan. Samma via Approxxx fuiler av yy?
# - Lade till en ny parameter vital_stats som om satt skriver ut det viktigaste.
# - Lite andra småfix.
# v0.11 191118
# - Optimerade lite för läsbarhet i loopen med filjämförelser.
# - Lade till lite mer text i starten och förlupen tid i sekunder för varje limited_printouts intervall.
# - Nu funkar det ganska bra, så stegar.
# v0.12 191118
# - Vilken röra. Tog bort tkinter och några av klasserna jag gjort, nog bättre att gå tillbaka till mer procedural kod.
# - Jag räknar antalet fall där jag har en xmp-fil utan tillhörande RAW,men jag visar det inte ngnstans.
# - En liten räknare som visar hur många dagar sen det var man körde mappen?
# - PyQt
# - Ange sekunder mellantider funkar bra, man kunde ge delta tid också.
# - Threading, man måste ha någon intelligens så man lägger ut dem på olika hårddiskar, och begränsar antalet trådar till antalet hårddiskar.
# v0.12 branch THREAD 191118
# - Threading verkar faktiskt funka. Men jisses vad rörig koden är nu!
# v0.13 branch THREAD 191119
# - Initiala tester visar att den inte klarar av att separera diskarna, så den startar två mappar på samma disk.
# Måste alltså starta threadsen manuellt, och vänta på att de blir klara.
# v0.14 191120
# - Det funkar nu! Den tittar på listan över mappar som skall köras, och fördelar sedan threadsen över
# hårddiskarna så att det aldrig körs två threads samtidigt på samma disk.
# - Detta blir nu huvudbranchen, tar bort THREAD.
# - Skulle behöva snygga till det, nu är det riktigt grötigt.
# - Den där Cancel-knappen är nog bra att ge sig på tillsammans med PtQt.
# - Delta sekunder för utskriften efter x antal filer. Men den måste vara thread-specifik, dvs varje thread har sin egen räknare.
# - Jag fyller inte i missing raw kolumnen i dashboard.
# - Test av GitHub, denna skall vara för W12.
# v0.15 191127
# - Gjorde om så att istället för att ha en version för varje dator så gjorde jag en config-fil, connect_sqlite_db.py, som jag
# anropar. I den finns rätt sträng för att connecta till rätt databas.
# v1.0 191127
# Andra försöket i git, tar bort ver-hantering i namnet, och passar på att stega till 1.0
# 191127
# - I Gityran så slarvade jag bort filerna med följande fix:
# -- Lade till att den visar antalet filer med missing RAW.
# --- Uppdaterade db på LM och W12, behövs ju på ACTUAL och W10 också,
# dvs ta fram sql:en och testa.
# --- GUIt behöver fixas, själva Dashboarden saknar klumnen missing RAW.
# - Allt ovan nu fixat, behöver lägga till SQL för att uppdatera db på W10&ACTUAL
# 191129
# - Lade till antal dagar sedan senaste körningen.
# - Lade till delta seconds sedan varje limited_printouts.
import os
from os.path import join
from operator import itemgetter, attrgetter
from tkinter import *
import fnmatch
import sys
import math
import threading
import subprocess
import fileinput
import datetime
import time
import hashlib
import sqlite3
from tkinter import ttk # Denna innehåller comboboxen - drop down.
# generate_md5_Checksum_def är en funktion som ligger i en separat fil,
from generate_md5_Checksum_def import md5Checksum
from connect_sqlite_db import connect_sqlite_db
def index_containing_substring(the_list, substring): # returns the line number of the md5 sum, zero if no md5.
for i, s in enumerate(the_list):
if substring in s:
if substring[0] == '<':
md5 = s[s.find(substring)+28:s.find(substring)+60]
else:
md5 = s[s.find(substring)+28:s.find(substring)+60]
return md5
return 0
def folderThread(main_folder):
global dashboard
global main_folders
global combo
global combo_var
global verbose
global cur
print("Thread started at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print('Printout levels: ' + str(vital_stats) + ' ' + str(verbose) + ' ', flush=True)
print('Thread identity: ' + str(threading.get_ident()))
if vital_stats: print('Thread identity: ' + str(threading.get_ident()) + " Starting with " + main_folder + " and " + combo_var[main_folder] + " containing " + str(xmp_tracker[main_folders.index(main_folder)][1]) + " xmp files.")
md5_OK = 0
md5_NOK = 0
md5_not_found = 0
md5_missing_raw = 0
xmp_file_counter = 0
results = []
time1 = time.time()
try: # Clear db here from all rows with path
sql = "DELETE FROM md5_results WHERE file_path LIKE '" + main_folder + "%'"
cur.execute(sql)
if verbose: print("Thread identity: " + str(threading.get_ident()) + " Executed sql: " + sql)
if verbose: print("Thread identity: " + str(threading.get_ident()) + " First sql segment: Rows returned from execute = " + str(cur.rowcount), flush=True)
conn.commit()
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to delete record from sqlite table", error, flush=True)
try: # Update dashboard since I've removed all files for folder main_folder.
cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ? WHERE main_path = '" + main_folder + "'",
(todays_date, 0, 0, 0, 0))
if verbose: print(" Thread identity: " + str(threading.get_ident()) + "First sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount), flush=True)
conn.commit()
except sqlite3.Error as error:
if print_errors: print(" Thread identity: " + str(threading.get_ident()) + "Failed to UPDATE dashboard from sqlite table", error, flush=True)
time2 = time.time()
for subdir, dirs, files in os.walk(main_folder):
for file in files:
found_raw = 0
try:
if file.endswith('xmp'):
xmp_file_counter +=1
if verbose: print ('Found file: ' + file, flush=True)
f = open(subdir+'\\'+ file,"r")
list_file = list(f)
md5_index = [index_containing_substring(list_file, '<PelleTags:PelleTag1_md5sum>'),
index_containing_substring(list_file, 'PelleTags:PelleTag1_md5sum=')]
if verbose: print(md5_index)
if any(md5_index): # xmp-filen innehåller en md5-summa.
res = [idx for idx, val in enumerate(md5_index) if val != 0] # Ger vilken typ av xmp encoding det är.
md5_xmp = md5_index[res[0]]
for raw_file in os.listdir(subdir): # Find the corresponding RAW-file to generate md5 sum.
if file[:-3] in raw_file[:-3]: # Här slicar jag bort ändelserna för att se om de har samma namn.
if raw_file[-3:].upper() in raw_extensions:
found_raw = 1
md5_calculated = md5Checksum(subdir + '\\' + raw_file)
if verbose: print ('Calculated md5 for file ' + raw_file)
if verbose: print (md5_calculated)
if md5_calculated == md5_xmp:
results.append((subdir, file, md5_xmp, md5_calculated, 'OK', todays_date))
md5_OK +=1
if verbose: print("md5 stämmer " + " subdir " + subdir + " file " + file)
else:
results.append((subdir, file, md5_xmp, md5_calculated, 'NOK', todays_date))
md5_NOK +=1
if print_errors: print("md5 fail: " + subdir + "\\" + str(raw_file))
break
if not found_raw: # Efter break exekveras denna. Tror jag...
if print_errors: print("xmp without matching raw: " + subdir + "\\" + str(raw_file))
results.append((subdir, file, 'No valid raw file found', '-', 'NOK', todays_date))
md5_missing_raw +=1
else: # index_containing_substring returns zero, PelleTags not present in xmp-file
if print_errors: print("Error, no md5 sum in file " + subdir + "\\" + file)
md5_not_found += 1
results.append((subdir, file, 'No md5 in xmp', md5_calculated, 'NOK', todays_date))
f.close()
if xmp_file_counter % limited_printouts == 0:
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + ", " + str(xmp_file_counter) + " xmp-files processed in "
+ str(round(time.time()-time1)) + " seconds, delta time " + str(round(time.time()-time2)) + " seconds, local time " +
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
time2 = time.time()
except:
if print_errors:
print ('Unexpected fail for file: ' + file)
print (sys.exc_info())
f.close()
try:
cur.executemany("INSERT INTO md5_results ('file_path', 'file_name', 'md5_file', 'md5_calc', 'ok_nok', 'date') VALUES (?,?,?,?,?,?)", results)
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute INSERT INTO md5_resutls = "
+ str(cur.rowcount))
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to INSERT INTO md5_results sqlite table", error)
try:
cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ?, missing_xmp = ? WHERE main_path = '" + main_folder + "'",
(todays_date, md5_OK+md5_NOK, md5_OK, md5_NOK,md5_missing_raw,md5_not_found))
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount))
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to UPDATE dashboard from sqlite table", error)
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Run results for path: " + main_folder + " md5_OK=" + str(md5_OK) + " md5_NOK=" +
str(md5_NOK) + " md5__not_found=" + str(md5_not_found) + " md5_missing_raw=" + str(md5_missing_raw) + ", in " + str(round(time.time()-time1)) +
" seconds, finished at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ".")
def runrunrun():
global combo
global combo_var
global master
for key, value in combo.items():
combo_var[key] = combo[key].get() #combo values gets destroyed when closing GUI, therefore copy values.
master.destroy() # Close GUI and continue after mainloop
def quitquit():
conn.close()
exit()
def xmp_count():
# Check how many xmp files there are in each main folder.
global main_folders
global xmp_tracker
global verbose
global xmp_tracker
xmp_file_count = 0
for main_folder in main_folders:
for subdir, dirs, files in os.walk(main_folder):
for file in files:
if file[-3:] == 'xmp':
xmp_file_count += 1
xmp_tracker.append([main_folder,xmp_file_count])
if vital_stats: print(main_folder + " has " + str(xmp_file_count) + " xmp files")
xmp_file_count = 0
def build_dashboard():
# Build dashboard
# Creating main tkinter window/toplevel
global dashboard
global main_folders
global combo
global combo_var
global xmp_tracker
global verbose
global master
date_format = "%Y-%m-%d"
todays_date = datetime.date.today().strftime(date_format)
max_width = max(len(x) for x in main_folders) # needed to size the cell with path
# this will create a label widget
col_1 = Label(master, relief=RIDGE, text = "Folder path", width = max_width)
col_2 = Label(master, relief=RIDGE, text = "Last run", width = 12)
col_3 = Label(master, relief=RIDGE, text = "Days since last run", width = 18)
col_4 = Label(master, relief=RIDGE, text = "Tot last count xmp")
col_5 = Label(master, relief=RIDGE, text = "Tot db xmp")
col_6 = Label(master, relief=RIDGE, text = "OK xmp")
col_7 = Label(master, relief=RIDGE, text = "NOK xmp")
| col_8 = Label(master, relief=RIDGE, text = "Missing RAW")
col_9 = Label(master, relief=RIDGE, text = "Missing xmp")
col_10 = Label(master, relief=RIDGE, text = "Start/Restart")
# grid method to arrange labels in respective
# rows and columns as specified
col_1.grid(row = 0, column = 0, sticky = W, pady = 2)
col_2.grid(row = 0, column = 1, sticky = W, pady = 2)
col_3.grid(row = 0, column = 2, sticky = W, pady = 2)
col_4.grid(row = 0, column = 3, sticky = W, pady = 2)
col_5.grid(row = 0, column = 4, sticky = W, pady = 2)
col_6.grid(row = 0, column = 5, sticky = W, pady = 2)
col_7.grid(row = 0, column = 6, sticky = W, pady = 2)
col_8.grid(row = 0, column = 7, sticky = W, pady = 2)
col_9.grid(row = 0, column = 8, sticky = W, pady = 2)
col_10.grid(row = 0, column = 9, sticky = W, pady = 2)
for ii, each_row in enumerate(dashboard):
# print(each_row)
# print(ii)
col_1 = Label(master, text = each_row[1])
col_2 = Label(master, text = each_row[2])
col_3 = Label(master, text = (datetime.datetime.strptime(todays_date, date_format)- datetime.datetime.strptime(each_row[2], date_format)).days)
col_4 = Label(master, text = xmp_tracker[ii][1])
col_5 = Label(master, text = each_row[3])
col_6 = Label(master, text = each_row[4])
col_7 = Label(master, text = each_row[5])
col_8 = Label(master, text = each_row[6])
col_9 = Label(master, text = each_row[7])
col_1.grid(row = ii+1, column = 0, sticky = W, pady = 5, padx = 5)
col_2.grid(row = ii+1, column = 1, sticky = W, pady = 5, padx = 5)
col_3.grid(row = ii+1, column = 2, sticky = W, pady = 5, padx = 5)
col_4.grid(row = ii+1, column = 3, sticky = W, pady = 5, padx = 5)
col_5.grid(row = ii+1, column = 4, sticky = W, pady = 5, padx = 5)
col_6.grid(row = ii+1, column = 5, sticky = W, pady = 5, padx = 5)
col_7.grid(row = ii+1, column = 6, sticky = W, pady = 5, padx = 5)
col_8.grid(row = ii+1, column = 7, sticky = W, pady = 5, padx = 5)
col_9.grid(row = ii+1, column = 8, sticky = W, pady = 5, padx = 5)
valores=("Do nothing", "Restart", "New run")
# key_name[1] innehåller path som blir key i dict, och värdet blir det man valt i drop down.
for index, key_name in enumerate(dashboard):
combo[key_name[1]] = ttk.Combobox(master, values=valores)
combo[key_name[1]].set("Do nothing ")
combo[key_name[1]].grid(row = 1+index, column = 9, sticky = W, pady = 6, padx = 5)
# button widget
b1 = Button(master, text = "Cancel", width = 9, command=quitquit)
b2 = Button(master, text = "Go", width = 9, command=runrunrun)
# arranging button widgets
b1.grid(row = ii+2, column = 9, sticky = W)
b2.grid(row = ii+2, column = 9, sticky = E)
# infinite loop which can be terminated
# by keyboard or mouse interrupt
mainloop()
def get_list_of_folders(main_folders_2):
tmp = []
tmp.append(main_folders_2[0])
for ii in range(len(main_folders_2)):
if main_folders_2[ii] not in tmp and main_folders_2[ii][0] not in [item[0] for item in tmp]:
tmp.append(main_folders_2[ii])
return tmp
def run_thru_folders():
global dashboard
global main_folders
global combo
global combo_var
global verbose
global vital_stats
global cur
new_run_folders = []
current_run_list = []
l3 = []
for main_folder in main_folders:
if combo_var[main_folder] == "New run":
new_run_folders.append(main_folder)
else:
if vital_stats: print("For folder " + main_folder + ": " + combo_var[main_folder])
if vital_stats: print('new_run_folders: ')
if vital_stats: print(new_run_folders)
while new_run_folders:
current_run_list = get_list_of_folders(new_run_folders)
if vital_stats: print('current_run_list: ')
if vital_stats: print(current_run_list)
if l3 is not None:
l3 = [x for x in new_run_folders if x not in current_run_list]
new_run_folders = l3
iii = 0
thread_list = []
for current_folder in current_run_list:
thread_list.append(threading.Thread(target=folderThread, args=(current_folder,)))
# Starting threads
thread_list[iii].start()
print('Startat thread number ' + str(iii))
print("Base identity: " + str(thread_list[iii]) + " ")
time.sleep(3) # Think this is needed to give time to db transactions at the start of a run.
iii += 1
for xxx in range(iii):
thread_list[xxx].join()
verbose = 1
vital_stats = 1
limited_printouts = 1000
print_errors = 1
md5_index = []
xmp_tracker = []
combo = {}
combo_var = {}
todays_date = str(datetime.date.today())
raw_extensions = ['CR2','NEF','3FR','ARW','SRF','SR2','CRW','IIQ','EIP','DCR','K25','KDC','ERF','MEF','MOS','MRW','NRW',
'ORF','PEF','RAF','RAW','RW2','RWL','RWZ','X3F','MOV','MP4','AVI','WMV','M4V','MPG','3GP','3G2']
#conn = sqlite3.connect('C:\Pelle\Dashboard_Python\dashboard_md5.sql3', check_same_thread=False) # LeanMean
#conn = sqlite3.connect('G:\PelleHack\Python_Dashboard\dashboard_md5.sql3', check_same_thread=False) # W12
conn = connect_sqlite_db()
cur = conn.cursor()
cur.execute("SELECT * FROM dashboard")
dashboard = cur.fetchall()
#print(dashboard)
main_folders = [dashboard[i][1] for i in range(len(dashboard))] # Extract the paths to its own list.
print(main_folders)
xmp_count()
master = Tk()
build_dashboard()
run_thru_folders()
conn.commit()
conn.close() | random_line_split | |
xmp_dashboard.py | # -*- coding: UTF-8 -*-
# py_md5_xmp
# Scriptet tar en given xmp-fil och beräknar md5-summan för dess datafil.
#
# md5 summa i Adobe LR xmp-fil:
# PelleTags:PelleTag1_md5sum="935c0eb6242e80c95001368b9d53b421"
#
# Exiftool xmp-fil:
# <PelleTags:PelleTag1_md5sum>572737b08d11666255afb41b2c0443cb</PelleTags:PelleTag1_md5sum>
# cur.execute('''CREATE TABLE dashboard (id INTEGER PRIMARY KEY, main_path TEXT, last_run TEXT, tot_xmp INTEGER, ok_xmp INTEGER,nok_xmp INTEGER, missing_raw INTEGER)''')
# cur.execute("INSERT INTO dashboard (main_path, last_run, tot_xmp, ok_xmp, nok_xmp, missing_raw) VALUES(?,?,?,?,?,?)", ['H:\Pelle Temp', 0, 0, 0, 0, 0])
# cur.execute("UPDATE dashboard SET last_run = ? WHERE id = 1", ['2018-11-07'])
#
# cur.execute('''CREATE TABLE md5_results (id INTEGER PRIMARY KEY, file_path TEXT NOT NULL, file_name TEXT NOT NULL, md5_file TEXT, md5_calc TEXT, ok_nok TEXT, date TEXT)''')
# cur.execute("INSERT INTO md5_results (file_path, file_name, md5_file, md5_calc, ok_nok, date) VALUES(?,?,?,?,?,?)", ['C:\Pelle\Dropbox\Hack - xmp dashboard', 'testfil.xmp', '0', '0', '0', '2018-11-07'])
#
# ToDo's:
# - Fanken vilken röra det blev när jkjag försökte OOPsa allt. Nu är helt icke-intuitivt med ett mellan tinng mellan procedur och OOPs...
# - Multithreading? Starta flera threads om min folder ligger på olika diskar? Man ser ju att PCU lasten är runt 10%
# medans disklasten liger på 40-90%. Gör threadandet lite intelligent så den inte startar med mapar som ligger på samma disk.
# - Gör den def _main == osv. Verkar OOPsigt och bra.
# - Gör lite fler try-except, definitivt alla sql execute.
# - Någon form av utskrift så man kan följa mellanresultat. I ett nytt fönster?
# Kanske ngt liknande det jag har till verify xmp-hacket.
# Allt syns ju IDLE?
# Skippade:
# - En Cancel Run-knapp. - Fick det inte att lira, måste läsa på mer.
# v0.1 191106
# - Nu funkar läsa in xmp-filer och jämföra med beräknad md5 för motsvarande fil.
# v0.2 191107
# - sqlite fungerar ok
# - Räknar antalet närvarande xmp-filer i varje main folder.
# - Finns nu en tabell man använder.
# v0.3 191107
# - Själva dashboarden visas nu
# v0.4 191107
# - Göra event som fyller på databasen
# v0.5 1911115
# - Funkar nu med dynamisk generering av drop down boxarna. Man lagrar värdena i ett dict. Blev riktigt snyggt
# v0.6 191111
# - Funkar nu för New Run (och Do Nothing), Den raderar existerande värden i tabellen och fyller på med nya
# v0.7 191111
# - New run med delete allt gammalt i db verkar funka nu
# v0.8 191114
# - Cancel Run-fönster. Måste alltså ha två fönster. Nog strukturera om allt...
# - Implementerade en Cancel Run-knapp, men fick det inte att lira. Stegar till 0.9 och tar bort den.
# v0.9 191114
# - Lade till så att man får en utskrift var 1000 fil man hanterar. Det borde snabb upp lite.
# - Började titta på PyQt, verkar mycket stabilare än Tkinter. Får se om jag får ordning på det.
# - Fixade ett fel där lower case av t.ex. mp4 inte togs med eftersom listan med filändelser är versaler. Lade
# till upper() i jämförelsen.
# v0.10 191118
# - Fixat, var en räknare som inte stegade på rätt ställe. Nu finns det ett fel om det inte finns en raw-fil, då blir det ett index out of bounds. Detta händer när det är
# en DNG-fil, tex. DNG finns inte med bland filändelserna. Fixade det men nu blir det en irriterande utskrift för varje
# fil den hittar som inte finns i RAW-listan. En xmp kan ju ha en jpg bredvid sig t.ex. som inte skall generera ngn utskrift.
# - Fixat, lade till en separat räknare. Jag lade ju till att den skriver ut var xxx fil att hur många den processat. Irriterande skriver den det för de första
# 100 filerna. Min div/mod verkar inte funka som jag tänkte mig.
# - Lade till en break så när den räknat md5 för en fil så hoppar den ut loopen. Tidigare fortsatte den att stega igenom hela mappen även om den hiottat rätt fil.
# - Ta time() vid start av vartje mapp, och vi slkutet och spara. Skriv sedan ut en liten summering.
# - Vid varje start skriver jag ut mappnamnet. Kan man skriva ut antalet filer också? JAg räknar ju dem innan. Samma via Approxxx fuiler av yy?
# - Lade till en ny parameter vital_stats som om satt skriver ut det viktigaste.
# - Lite andra småfix.
# v0.11 191118
# - Optimerade lite för läsbarhet i loopen med filjämförelser.
# - Lade till lite mer text i starten och förlupen tid i sekunder för varje limited_printouts intervall.
# - Nu funkar det ganska bra, så stegar.
# v0.12 191118
# - Vilken röra. Tog bort tkinter och några av klasserna jag gjort, nog bättre att gå tillbaka till mer procedural kod.
# - Jag räknar antalet fall där jag har en xmp-fil utan tillhörande RAW,men jag visar det inte ngnstans.
# - En liten räknare som visar hur många dagar sen det var man körde mappen?
# - PyQt
# - Ange sekunder mellantider funkar bra, man kunde ge delta tid också.
# - Threading, man måste ha någon intelligens så man lägger ut dem på olika hårddiskar, och begränsar antalet trådar till antalet hårddiskar.
# v0.12 branch THREAD 191118
# - Threading verkar faktiskt funka. Men jisses vad rörig koden är nu!
# v0.13 branch THREAD 191119
# - Initiala tester visar att den inte klarar av att separera diskarna, så den startar två mappar på samma disk.
# Måste alltså starta threadsen manuellt, och vänta på att de blir klara.
# v0.14 191120
# - Det funkar nu! Den tittar på listan över mappar som skall köras, och fördelar sedan threadsen över
# hårddiskarna så att det aldrig körs två threads samtidigt på samma disk.
# - Detta blir nu huvudbranchen, tar bort THREAD.
# - Skulle behöva snygga till det, nu är det riktigt grötigt.
# - Den där Cancel-knappen är nog bra att ge sig på tillsammans med PtQt.
# - Delta sekunder för utskriften efter x antal filer. Men den måste vara thread-specifik, dvs varje thread har sin egen räknare.
# - Jag fyller inte i missing raw kolumnen i dashboard.
# - Test av GitHub, denna skall vara för W12.
# v0.15 191127
# - Gjorde om så att istället för att ha en version för varje dator så gjorde jag en config-fil, connect_sqlite_db.py, som jag
# anropar. I den finns rätt sträng för att connecta till rätt databas.
# v1.0 191127
# Andra försöket i git, tar bort ver-hantering i namnet, och passar på att stega till 1.0
# 191127
# - I Gityran så slarvade jag bort filerna med följande fix:
# -- Lade till att den visar antalet filer med missing RAW.
# --- Uppdaterade db på LM och W12, behövs ju på ACTUAL och W10 också,
# dvs ta fram sql:en och testa.
# --- GUIt behöver fixas, själva Dashboarden saknar klumnen missing RAW.
# - Allt ovan nu fixat, behöver lägga till SQL för att uppdatera db på W10&ACTUAL
# 191129
# - Lade till antal dagar sedan senaste körningen.
# - Lade till delta seconds sedan varje limited_printouts.
import os
from os.path import join
from operator import itemgetter, attrgetter
from tkinter import *
import fnmatch
import sys
import math
import threading
import subprocess
import fileinput
import datetime
import time
import hashlib
import sqlite3
from tkinter import ttk # Denna innehåller comboboxen - drop down.
# generate_md5_Checksum_def är en funktion som ligger i en separat fil,
from generate_md5_Checksum_def import md5Checksum
from connect_sqlite_db import connect_sqlite_db
def index_containing_substring(the_list, substring): # returns the line number of the md5 sum, zero if no md5.
for i, s in enumerate(the_list):
if substring in s:
if substring[0] == '<':
md5 = s[s.find(substring)+28:s.find(substring)+60]
else:
md5 = s[s.find(substring)+28:s.find(substring)+60]
return md5
return 0
def folderThread(main_folder):
global dashboard
global main_folders
global combo
global combo_var
global verbose
global cur
print("Thread started at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
print('Printout levels: ' + str(vital_stats) + ' ' + str(verbose) + ' ', flush=True)
print('Thread identity: ' + str(threading.get_ident()))
if vital_stats: print('Thread identity: ' + str(threading.get_ident()) + " Starting with " + main_folder + " and " + combo_var[main_folder] + " containing " + str(xmp_tracker[main_folders.index(main_folder)][1]) + " xmp files.")
md5_OK = 0
md5_NOK = 0
md5_not_found = 0
md5_missing_raw = 0
xmp_file_counter = 0
results = []
time1 = time.time()
try: # Clear db here from all rows with path
sql = "DELETE FROM md5_results WHERE file_path LIKE '" + main_folder + "%'"
cur.execute(sql)
if verbose: print("Thread identity: " + str(threading.get_ident()) + " Executed sql: " + sql)
if verbose: print("Thread identity: " + str(threading.get_ident()) + " First sql segment: Rows returned from execute = " + str(cur.rowcount), flush=True)
conn.commit()
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to delete record from sqlite table", error, flush=True)
try: # Update dashboard since I've removed all files for folder main_folder.
cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ? WHERE main_path = '" + main_folder + "'",
(todays_date, 0, 0, 0, 0))
if verbose: print(" Thread identity: " + str(threading.get_ident()) + "First sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount), flush=True)
conn.commit()
except sqlite3.Error as error:
if print_errors: print(" Thread identity: " + str(threading.get_ident()) + "Failed to UPDATE dashboard from sqlite table", error, flush=True)
time2 = time.time()
for subdir, dirs, files in os.walk(main_folder):
for file in files:
found_raw = 0
try:
if file.endswith('xmp'):
xmp_file_counter +=1
if verbose: print ('Found file: ' + file, flush=True)
f = open(subdir+'\\'+ file,"r")
list_file = list(f)
md5_index = [index_containing_substring(list_file, '<PelleTags:PelleTag1_md5sum>'),
index_containing_substring(list_file, 'PelleTags:PelleTag1_md5sum=')]
if verbose: print(md5_index)
if any(md5_index): # xmp-filen innehåller en md5-summa.
res = [idx for idx, val in enumerate(md5_index) if val != 0] # Ger vilken typ av xmp encoding det är.
md5_xmp = md5_index[res[0]]
for raw_file in os.listdir(subdir): # Find the corresponding RAW-file to generate md5 sum.
if file[:-3] in raw_file[:-3]: # Här slicar jag bort ändelserna för att se om de har samma namn.
if raw_file[-3:].upper() in raw_extensions:
found_raw = 1
md5_calculated = md5Checksum(subdir + '\\' + raw_file)
if verbose: print ('Calculated md5 for file ' + raw_file)
if verbose: print (md5_calculated)
if md5_calculated == md5_xmp:
results.append((subdir, file, md5_xmp, md5_calculated, 'OK', todays_date))
md5_OK +=1
if verbose: print("md5 stämmer " + " subdir " + subdir + " file " + file)
else:
results.append((subdir, file, md5_xmp, md5_calculated, 'NOK', todays_date))
md5_NOK +=1
if print_errors: print("md5 fail: " + subdir + "\\" + str(raw_file))
break
if not found_raw: # Efter break exekveras denna. Tror jag...
if print_errors: print("xmp without matching raw: " + subdir + "\\" + str(raw_file))
results.append((subdir, file, 'No valid raw file found', '-', 'NOK', todays_date))
md5_missing_raw +=1
else: # index_containing_substring returns zero, PelleTags not present in xmp-file
if print_errors: print("Error, no md5 sum in file " + subdir + "\\" + file)
md5_not_found += 1
results.append((subdir, file, 'No md5 in xmp', md5_calculated, 'NOK', todays_date))
f.close()
if xmp_file_counter % limited_printouts == 0:
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + ", " + str(xmp_file_counter) + " xmp-files processed in "
+ str(round(time.time()-time1)) + " seconds, delta time " + str(round(time.time()-time2)) + " seconds, local time " +
time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()))
time2 = time.time()
except:
if print_errors:
print ('Unexpected fail for file: ' + file)
print (sys.exc_info())
f.close()
try:
cur.executemany("INSERT INTO md5_results ('file_path', 'file_name', 'md5_file', 'md5_calc', 'ok_nok', 'date') VALUES (?,?,?,?,?,?)", results)
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute INSERT INTO md5_resutls = "
+ str(cur.rowcount))
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to INSERT INTO md5_results sqlite table", error)
try:
cur.execute("UPDATE dashboard SET last_run = ?, tot_xmp = ?, ok_xmp = ?, nok_xmp = ?, missing_raw = ?, missing_xmp = ? WHERE main_path = '" + main_folder + "'",
(todays_date, md5_OK+md5_NOK, md5_OK, md5_NOK,md5_missing_raw,md5_not_found))
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Second sql segment: Rows returned from execute UPDATE dashboard = " + str(cur.rowcount))
except sqlite3.Error as error:
if print_errors: print("Thread identity: " + str(threading.get_ident()) + " Failed to UPDATE dashboard from sqlite table", error)
if vital_stats: print("Thread identity: " + str(threading.get_ident()) + " Run results for path: " + main_folder + " md5_OK=" + str(md5_OK) + " md5_NOK=" +
str(md5_NOK) + " md5__not_found=" + str(md5_not_found) + " md5_missing_raw=" + str(md5_missing_raw) + ", in " + str(round(time.time()-time1)) +
" seconds, finished at " + time.strftime("%Y-%m-%d %H:%M:%S", time.localtime()) + ".")
def runrunrun():
global combo
global combo_var
global master
for key, value in combo.items():
combo_var[key] = combo[key].get() #combo values gets destroyed when closing GUI, therefore copy values.
master.destroy() # Close GUI and continue after mainloop
def quitquit():
conn.close()
exit()
def xmp_count():
# Check how many xmp files there are in each main folder.
global main_folders
global xmp_tracker
global | _tracker
xmp_file_count = 0
for main_folder in main_folders:
for subdir, dirs, files in os.walk(main_folder):
for file in files:
if file[-3:] == 'xmp':
xmp_file_count += 1
xmp_tracker.append([main_folder,xmp_file_count])
if vital_stats: print(main_folder + " has " + str(xmp_file_count) + " xmp files")
xmp_file_count = 0
def build_dashboard():
# Build dashboard
# Creating main tkinter window/toplevel
global dashboard
global main_folders
global combo
global combo_var
global xmp_tracker
global verbose
global master
date_format = "%Y-%m-%d"
todays_date = datetime.date.today().strftime(date_format)
max_width = max(len(x) for x in main_folders) # needed to size the cell with path
# this will create a label widget
col_1 = Label(master, relief=RIDGE, text = "Folder path", width = max_width)
col_2 = Label(master, relief=RIDGE, text = "Last run", width = 12)
col_3 = Label(master, relief=RIDGE, text = "Days since last run", width = 18)
col_4 = Label(master, relief=RIDGE, text = "Tot last count xmp")
col_5 = Label(master, relief=RIDGE, text = "Tot db xmp")
col_6 = Label(master, relief=RIDGE, text = "OK xmp")
col_7 = Label(master, relief=RIDGE, text = "NOK xmp")
col_8 = Label(master, relief=RIDGE, text = "Missing RAW")
col_9 = Label(master, relief=RIDGE, text = "Missing xmp")
col_10 = Label(master, relief=RIDGE, text = "Start/Restart")
# grid method to arrange labels in respective
# rows and columns as specified
col_1.grid(row = 0, column = 0, sticky = W, pady = 2)
col_2.grid(row = 0, column = 1, sticky = W, pady = 2)
col_3.grid(row = 0, column = 2, sticky = W, pady = 2)
col_4.grid(row = 0, column = 3, sticky = W, pady = 2)
col_5.grid(row = 0, column = 4, sticky = W, pady = 2)
col_6.grid(row = 0, column = 5, sticky = W, pady = 2)
col_7.grid(row = 0, column = 6, sticky = W, pady = 2)
col_8.grid(row = 0, column = 7, sticky = W, pady = 2)
col_9.grid(row = 0, column = 8, sticky = W, pady = 2)
col_10.grid(row = 0, column = 9, sticky = W, pady = 2)
for ii, each_row in enumerate(dashboard):
# print(each_row)
# print(ii)
col_1 = Label(master, text = each_row[1])
col_2 = Label(master, text = each_row[2])
col_3 = Label(master, text = (datetime.datetime.strptime(todays_date, date_format)- datetime.datetime.strptime(each_row[2], date_format)).days)
col_4 = Label(master, text = xmp_tracker[ii][1])
col_5 = Label(master, text = each_row[3])
col_6 = Label(master, text = each_row[4])
col_7 = Label(master, text = each_row[5])
col_8 = Label(master, text = each_row[6])
col_9 = Label(master, text = each_row[7])
col_1.grid(row = ii+1, column = 0, sticky = W, pady = 5, padx = 5)
col_2.grid(row = ii+1, column = 1, sticky = W, pady = 5, padx = 5)
col_3.grid(row = ii+1, column = 2, sticky = W, pady = 5, padx = 5)
col_4.grid(row = ii+1, column = 3, sticky = W, pady = 5, padx = 5)
col_5.grid(row = ii+1, column = 4, sticky = W, pady = 5, padx = 5)
col_6.grid(row = ii+1, column = 5, sticky = W, pady = 5, padx = 5)
col_7.grid(row = ii+1, column = 6, sticky = W, pady = 5, padx = 5)
col_8.grid(row = ii+1, column = 7, sticky = W, pady = 5, padx = 5)
col_9.grid(row = ii+1, column = 8, sticky = W, pady = 5, padx = 5)
valores=("Do nothing", "Restart", "New run")
# key_name[1] innehåller path som blir key i dict, och värdet blir det man valt i drop down.
for index, key_name in enumerate(dashboard):
combo[key_name[1]] = ttk.Combobox(master, values=valores)
combo[key_name[1]].set("Do nothing ")
combo[key_name[1]].grid(row = 1+index, column = 9, sticky = W, pady = 6, padx = 5)
# button widget
b1 = Button(master, text = "Cancel", width = 9, command=quitquit)
b2 = Button(master, text = "Go", width = 9, command=runrunrun)
# arranging button widgets
b1.grid(row = ii+2, column = 9, sticky = W)
b2.grid(row = ii+2, column = 9, sticky = E)
# infinite loop which can be terminated
# by keyboard or mouse interrupt
mainloop()
def get_list_of_folders(main_folders_2):
tmp = []
tmp.append(main_folders_2[0])
for ii in range(len(main_folders_2)):
if main_folders_2[ii] not in tmp and main_folders_2[ii][0] not in [item[0] for item in tmp]:
tmp.append(main_folders_2[ii])
return tmp
def run_thru_folders():
global dashboard
global main_folders
global combo
global combo_var
global verbose
global vital_stats
global cur
new_run_folders = []
current_run_list = []
l3 = []
for main_folder in main_folders:
if combo_var[main_folder] == "New run":
new_run_folders.append(main_folder)
else:
if vital_stats: print("For folder " + main_folder + ": " + combo_var[main_folder])
if vital_stats: print('new_run_folders: ')
if vital_stats: print(new_run_folders)
while new_run_folders:
current_run_list = get_list_of_folders(new_run_folders)
if vital_stats: print('current_run_list: ')
if vital_stats: print(current_run_list)
if l3 is not None:
l3 = [x for x in new_run_folders if x not in current_run_list]
new_run_folders = l3
iii = 0
thread_list = []
for current_folder in current_run_list:
thread_list.append(threading.Thread(target=folderThread, args=(current_folder,)))
# Starting threads
thread_list[iii].start()
print('Startat thread number ' + str(iii))
print("Base identity: " + str(thread_list[iii]) + " ")
time.sleep(3) # Think this is needed to give time to db transactions at the start of a run.
iii += 1
for xxx in range(iii):
thread_list[xxx].join()
verbose = 1
vital_stats = 1
limited_printouts = 1000
print_errors = 1
md5_index = []
xmp_tracker = []
combo = {}
combo_var = {}
todays_date = str(datetime.date.today())
raw_extensions = ['CR2','NEF','3FR','ARW','SRF','SR2','CRW','IIQ','EIP','DCR','K25','KDC','ERF','MEF','MOS','MRW','NRW',
'ORF','PEF','RAF','RAW','RW2','RWL','RWZ','X3F','MOV','MP4','AVI','WMV','M4V','MPG','3GP','3G2']
#conn = sqlite3.connect('C:\Pelle\Dashboard_Python\dashboard_md5.sql3', check_same_thread=False) # LeanMean
#conn = sqlite3.connect('G:\PelleHack\Python_Dashboard\dashboard_md5.sql3', check_same_thread=False) # W12
conn = connect_sqlite_db()
cur = conn.cursor()
cur.execute("SELECT * FROM dashboard")
dashboard = cur.fetchall()
#print(dashboard)
main_folders = [dashboard[i][1] for i in range(len(dashboard))] # Extract the paths to its own list.
print(main_folders)
xmp_count()
master = Tk()
build_dashboard()
run_thru_folders()
conn.commit()
conn.close()
| verbose
global xmp | identifier_body |
sensor_update.py | """
===============
=== Purpose ===
===============
Produces a signal for each flu digital surveillance source, which is then used
as a 'sensor' in the context of nowcasting through sensor fusion.
Each signal is updated over the following inclusive range of epiweeks:
- epiweek of most recently computed signal of this type
- last epiweek
The idea is to recompute the last stored value (just in case there were
changes to the underlying data source), and to compute all weeks up to, but
not including, the current week (because the current week is, by definition,
still ongoing).
The following signals are available:
- gft: Google Flu Trends
- ght: Google Health Trends
- twtr: HealthTweets
- wiki: Wikipedia access
- cdc: CDC Page Hits
- epic: Epicast 1-week-ahead point prediction
- quid: Flu lab test data
- sar3: Seasonal Autoregression (order 3) with holidays
- arch: Best-fit Archetype at 1-week-ahead
- ar3: Autoregression (order 3) with holidays
See also:
- signal_update.py
- sar3.py
- arch.py
- ar3.py
"""
# standard library
import argparse
import re
import subprocess
import sys
# third party
import numpy as np
# first party
from delphi.epidata.client.delphi_epidata import Epidata
from delphi.nowcast.sensors.arch import ARCH
from delphi.nowcast.sensors.sar3 import SAR3
from delphi.nowcast.sensors.ar3 import AR3
from delphi.nowcast.util.sensors_table import SensorsTable
import delphi.operations.secrets as secrets
from delphi.utils.epidate import EpiDate
import delphi.utils.epiweek as flu
from delphi.utils.geo.locations import Locations
def get_most_recent_issue(epidata):
# search for FluView issues within the last 10 weeks
ew2 = EpiDate.today().get_ew()
ew1 = flu.add_epiweeks(ew2, -9)
rows = epidata.check(epidata.fluview('nat', epidata.range(ew1, ew2)))
return max([row['issue'] for row in rows])
def get_location_list(loc):
"""Return the list of locations described by the given string."""
if loc == 'all':
return Locations.region_list
elif loc == 'hhs':
return Locations.hhs_list
elif loc == 'cen':
return Locations.cen_list
elif loc in Locations.region_list:
return [loc]
else:
raise UnknownLocationException('unknown location: %s' % str(loc))
class UnknownLocationException(Exception):
"""An Exception indicating that the given location is not known."""
class SignalGetter:
"""Class with static methods that implement the fetching of
different data signals. Each function returns a function that
only takes a single argument:
- weeks: an Epiweek range of weeks to fetch data for.
"""
def __init__(self):
pass
@staticmethod
def get_gft(location, epiweek, valid):
def fetch(weeks):
# The GFT model update of 2013 significantly improved the GFT signal, so
# much so that training on the old data will severely hurt the predictive
# power of the new data. To overcome this, I basically pretend that GFT
# versions before and after mid-2013 are different signals.
if weeks['to'] >= 201340:
# this is the new GFT model, so throw out data from the old model
weeks = Epidata.range(max(weeks['from'], 201331), weeks['to'])
return Epidata.gft(location, weeks)
return fetch
@staticmethod
def get_ght(location, epiweek, valid):
loc = 'US' if location == 'nat' else location
fetch = lambda weeks: Epidata.ght(secrets.api.ght, loc, weeks, '/m/0cycc')
return fetch
@staticmethod
def get_twtr(location, epiweek, valid):
def fetch(weeks):
# Impute missing weeks with 0%
# This is actually correct because twitter does not store rows with `num` =
# 0. So weeks with 0 `num` (and `percent`) are missing from the response.
res = Epidata.twitter(secrets.api.twitter, location, epiweeks=weeks)
if 'epidata' in res:
epiweeks = set([r['epiweek'] for r in res['epidata']])
first, last = 201149, weeks['to']
for ew in flu.range_epiweeks(first, last, inclusive=True):
if ew not in epiweeks:
res['epidata'].append({'epiweek': ew, 'percent': 0.})
return res
return fetch
@staticmethod
def get_wiki(location, epiweek, valid):
if location != 'nat':
raise Exception('wiki is only available for nat')
articles = [
'human_flu',
'influenza',
'influenza_a_virus',
'influenzavirus_a',
'influenzavirus_c',
'oseltamivir',
'zanamivir',
]
hours = [17, 18, 21]
# There are 21 time series (7 articles, 3 hours) of N epiweeks. Each time
# series needs to be fetched, and then the whole dataset needs to be pivoted
# so that there are N rows, each with 21 values.
fields = ['f%d' % i for i in range(len(articles) * len(hours))]
def fetch(weeks):
# a map from epiweeks to a map of field-value pairs (for each article/hour)
data = {}
# field name index
idx = 0
# download each time series individually
for article in articles:
for hour in hours:
# fetch the data from the API
res = Epidata.wiki(article, epiweeks=weeks, hours=hour)
epidata = Epidata.check(res)
field_name = fields[idx]
idx += 1
# loop over rows of the response, ordered by epiweek
for row in epidata:
ew = row['epiweek']
if ew not in data:
# make a new entry for this epiweek
data[ew] = {'epiweek': ew}
# save the value of this field
data[ew][field_name] = row['value']
# convert the map to a list matching the API epidata list
rows = []
for ew in sorted(list(data.keys())):
rows.append(data[ew])
# spoof the API response
return {
'result': 1,
'message': None,
'epidata': rows,
}
return fetch, fields
@staticmethod
def get_cdc(location, epiweek, valid):
fields = ['num2', 'num4', 'num5', 'num6', 'num7', 'num8']
def fetch(weeks):
# It appears that log-transformed counts provide a much better fit.
res = Epidata.cdc(secrets.api.cdc, weeks, location)
if 'epidata' in res:
for row in res['epidata']:
for col in fields:
row[col] = np.log(1. + row[col])
return res
return fetch, fields
@staticmethod
def get_quid(location, epiweek, valid):
fields = ['value']
def fetch(weeks):
res = Epidata.quidel(secrets.api.quidel, weeks, location)
return res
return fetch, fields
class SensorFitting:
def __init__(self):
pass
@staticmethod
def fit_loch_ness(location, epiweek, name, fields, fetch, valid):
# Helper functions
def get_weeks(epiweek):
ew1 = 200330
ew2 = epiweek
ew3 = flu.add_epiweeks(epiweek, 1)
weeks0 = Epidata.range(ew1, ew2)
weeks1 = Epidata.range(ew1, ew3)
return (ew1, ew2, ew3, weeks0, weeks1)
def extract(rows, fields):
data = {}
for row in rows:
data[row['epiweek']] = [float(row[f]) for f in fields]
return data
def get_training_set_data(data):
epiweeks = sorted(list(data.keys()))
X = [data[ew]['x'] for ew in epiweeks]
Y = [data[ew]['y'] for ew in epiweeks]
return (epiweeks, X, Y)
def get_training_set(location, epiweek, signal, valid):
ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek)
auth = secrets.api.fluview
try:
result = Epidata.fluview(location, weeks0, issues=ew2, auth=auth)
rows = Epidata.check(result)
unstable = extract(rows, ['wili'])
except Exception:
unstable = {}
rows = Epidata.check(Epidata.fluview(location, weeks0, auth=auth))
stable = extract(rows, ['wili'])
data = {}
num_dropped = 0
for ew in signal.keys():
if ew == ew3:
continue
sig = signal[ew]
if ew not in unstable:
if valid and flu.delta_epiweeks(ew, ew3) <= 5:
raise Exception('unstable wILI is not available on %d' % ew)
if ew not in stable:
num_dropped += 1
continue
wili = stable[ew]
else:
wili = unstable[ew]
data[ew] = {'x': sig, 'y': wili}
if num_dropped:
msg = 'warning: dropped %d/%d signal weeks because (w)ILI was unavailable'
print(msg % (num_dropped, len(signal)))
return get_training_set_data(data)
def dot(*Ms):
""" Simple function to compute the dot product
for any number of arguments.
"""
N = Ms[0]
for M in Ms[1:]:
N = np.dot(N, M)
return N
def get_weight(ew1, ew2):
""" This function gives the weight between two given
epiweeks based on a function that:
- drops sharply over the most recent ~3 weeks
- falls off exponentially with time
- puts extra emphasis on the past weeks at the
same time of year (seasonality)
- gives no week a weight of zero
"""
dw = flu.delta_epiweeks(ew1, ew2)
yr = 52.2
hl1, hl2, bw = yr, 1, 4
a = 0.05
#b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2
b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))
c = 2 ** -(dw / hl1)
d = 1 - 2 ** -(dw / hl2)
return (a + (1 - a) * b) * c * d
def get_periodic_bias(epiweek):
weeks_per_year = 52.2
offset = flu.delta_epiweeks(200001, epiweek) % weeks_per_year
angle = np.pi * 2 * offset / weeks_per_year
return [np.sin(angle), np.cos(angle)]
def apply_model(epiweek, beta, values):
bias0 = [1.]
if beta.shape[0] > len(values) + 1:
# constant and periodic bias
bias1 = get_periodic_bias(epiweek)
obs = np.array([values + bias0 + bias1])
else:
# constant bias only
obs = np.array([values + bias0])
return float(dot(obs, beta))
def get_model(ew2, epiweeks, X, Y):
ne, nx1, nx2, ny = len(epiweeks), len(X), len(X[0]), len(Y)
if ne != nx1 or nx1 != ny:
raise Exception('length mismatch e=%d X=%d Y=%d' % (ne, nx1, ny))
weights = np.diag([get_weight(ew1, ew2) for ew1 in epiweeks])
X = np.array(X).reshape((nx1, nx2))
Y = np.array(Y).reshape((ny, 1))
bias0 = np.ones(Y.shape)
if ne >= 26 and flu.delta_epiweeks(epiweeks[0], epiweeks[-1]) >= 52:
# constant and periodic bias
bias1 = np.array([get_periodic_bias(ew) for ew in epiweeks])
X = np.hstack((X, bias0, bias1))
else:
# constant bias only
X = np.hstack((X, bias0))
XtXi = np.linalg.inv(dot(X.T, weights, X))
XtY = dot(X.T, weights, Y)
return np.dot(XtXi, XtY)
if type(fields) == str:
fields = [fields]
ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek)
rows = Epidata.check(fetch(weeks1))
signal = extract(rows, fields)
min_rows = 3 + len(fields)
if ew3 not in signal:
raise Exception('%s unavailable on %d' % (name, ew3))
if len(signal) < min_rows:
raise Exception('%s available less than %d weeks' % (name, min_rows))
epiweeks, X, Y = get_training_set(location, epiweek, signal, valid)
min_rows = min_rows - 1
if len(Y) < min_rows:
raise Exception('(w)ILI available less than %d weeks' % (min_rows))
model = get_model(ew3, epiweeks, X, Y)
value = apply_model(ew3, model, signal[ew3])
return value
class SensorGetter:
"""Class that implements different sensors. Some sensors
may take in a signal to do the fitting on, others do not.
"""
def __init__(self):
pass
@staticmethod
def get_sensor_implementations():
"""Return a map from sensor names to sensor implementations."""
return {
'cdc': SensorGetter.get_cdc,
'gft': SensorGetter.get_gft,
'ght': SensorGetter.get_ght,
'ghtj': SensorGetter.get_ghtj,
'twtr': SensorGetter.get_twtr,
'wiki': SensorGetter.get_wiki,
'epic': SensorGetter.get_epic,
'sar3': SensorGetter.get_sar3,
'arch': SensorGetter.get_arch,
'ar3': SensorGetter.get_ar3,
'quid': SensorGetter.get_quid,
}
@staticmethod
def get_epic(location, epiweek, valid):
fc = Epidata.check(Epidata.delphi('ec', epiweek))[0]
return fc['forecast']['data'][location]['x1']['point']
@staticmethod
def get_sar3(location, epiweek, valid):
return SAR3(location).predict(epiweek, valid=valid)
@staticmethod
def get_arch(location, epiweek, valid): | return AR3(location).predict(epiweek, valid=valid)
@staticmethod
def get_ghtj(location, epiweek, valid):
loc = 'US' if location == 'nat' else location
def justinfun(location, epiweek):
# Need to set an absolute path
main_driver = '/home/automation/ghtj/ghtj.R'
args = ['Rscript', main_driver, location, str(epiweek)]
subprocess.check_call(args, shell=False)
# Need to set an absolute path
outputdir = '/home/automation/ghtj/output'
prefix = 'ghtpred'
predfilename = '%s/%s-%s-%d.txt' % (outputdir, prefix, loc, epiweek)
with open(predfilename, 'r') as f:
mypred = float(f.read())
print(mypred)
return mypred
# Making the single prediction now:
mypred = justinfun(location, epiweek)
return mypred
# sensors using the loch ness fitting
@staticmethod
def get_gft(location, epiweek, valid):
fetch = SignalGetter.get_gft(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'gft', 'num', fetch, valid)
@staticmethod
def get_ght(location, epiweek, valid):
fetch = SignalGetter.get_ght(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'ght', 'value', fetch, valid)
@staticmethod
def get_twtr(location, epiweek, valid):
fetch = SignalGetter.get_twtr(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'twtr', 'percent', fetch, valid)
@staticmethod
def get_wiki(location, epiweek, valid):
fetch, fields = SignalGetter.get_wiki(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'wiki', fields, fetch, valid)
@staticmethod
def get_cdc(location, epiweek, valid):
fetch, fields = SignalGetter.get_cdc(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'cdc', fields, fetch, valid)
@staticmethod
def get_quid(location, epiweek, valid):
fetch, fields = SignalGetter.get_quid(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'quid', fields, fetch, valid)
class SensorUpdate:
"""
Produces both real-time and retrospective sensor readings for ILI in the US.
Readings (predictions of ILI made using raw inputs) are stored in the Delphi
database and are accessible via the Epidata API.
"""
@staticmethod
def new_instance(valid, test_mode):
"""
Return a new instance under the default configuration.
If `test_mode` is True, database changes will not be committed.
If `valid` is True, be punctilious about hiding values that were not known
at the time (e.g. run the model with preliminary ILI only). Otherwise, be
more lenient (e.g. fall back to final ILI when preliminary ILI isn't
available).
"""
database = SensorsTable(test_mode=test_mode)
implementations = SensorGetter.get_sensor_implementations()
return SensorUpdate(valid, database, implementations, Epidata)
def __init__(self, valid, database, implementations, epidata):
self.valid = valid
self.database = database
self.implementations = implementations
self.epidata = epidata
def update(self, sensors, first_week, last_week):
"""
Compute sensor readings and store them in the database.
"""
# most recent issue
if last_week is None:
last_issue = get_most_recent_issue(self.epidata)
last_week = flu.add_epiweeks(last_issue, +1)
# connect
with self.database as database:
# update each sensor
for (name, loc) in sensors:
# update each location
for location in get_location_list(loc):
# timing
ew1 = first_week
if ew1 is None:
ew1 = database.get_most_recent_epiweek(name, location)
if ew1 is None:
# If an existing sensor reading wasn't found in the database and
# no start week was given, just assume that readings should start
# at 2010w40.
ew1 = 201040
print('%s-%s not found, starting at %d' % (name, location, ew1))
args = (name, location, ew1, last_week)
print('Updating %s-%s from %d to %d.' % args)
for test_week in flu.range_epiweeks(ew1, last_week, inclusive=True):
self.update_single(database, test_week, name, location)
def update_single(self, database, test_week, name, location):
train_week = flu.add_epiweeks(test_week, -1)
impl = self.implementations[name]
try:
value = impl(location, train_week, self.valid)
print(' %4s %5s %d -> %.3f' % (name, location, test_week, value))
except Exception as ex:
value = None
print(' failed: %4s %5s %d' % (name, location, test_week), ex)
if value is not None:
database.insert(name, location, test_week, value)
sys.stdout.flush()
def get_argument_parser():
"""Define command line arguments and usage."""
parser = argparse.ArgumentParser()
parser.add_argument(
'names',
help=(
'list of name-location pairs '
'(location can be nat/hhs/cen/state or specific location labels)'))
parser.add_argument(
'--first',
'-f',
type=int,
help='first epiweek override')
parser.add_argument(
'--last',
'-l',
type=int,
help='last epiweek override')
parser.add_argument(
'--epiweek',
'-w',
type=int,
help='epiweek override')
parser.add_argument(
'--test',
'-t',
default=False,
action='store_true',
help='dry run only')
parser.add_argument(
'--valid',
'-v',
default=False,
action='store_true',
help='do not fall back to stable wILI; require unstable wILI')
return parser
def validate_args(args):
"""Validate and return command line arguments."""
# check epiweek specification
first, last, week = args.first, args.last, args.epiweek
for ew in [first, last, week]:
if ew is not None:
flu.check_epiweek(ew)
if week is not None:
if first is not None or last is not None:
raise ValueError('`week` overrides `first` and `last`')
first = last = week
if first is not None and last is not None and first > last:
raise ValueError('`first` must not be greater than `last`')
# validate and extract name-location pairs
pair_regex = '[^-,]+-[^-,]+'
names_regex = '%s(,%s)*' % (pair_regex, pair_regex)
if not re.match(names_regex, args.names):
raise ValueError('invalid sensor specification')
return args.names, first, last, args.valid, args.test
def parse_sensor_location_pairs(names):
return [pair.split('-') for pair in names.split(',')]
def main(names, first, last, valid, test):
"""Run this script from the command line."""
sensors = parse_sensor_location_pairs(names)
SensorUpdate.new_instance(valid, test).update(sensors, first, last)
if __name__ == '__main__':
main(*validate_args(get_argument_parser().parse_args())) | return ARCH(location).predict(epiweek, valid=valid)
@staticmethod
def get_ar3(location, epiweek, valid): | random_line_split |
sensor_update.py | """
===============
=== Purpose ===
===============
Produces a signal for each flu digital surveillance source, which is then used
as a 'sensor' in the context of nowcasting through sensor fusion.
Each signal is updated over the following inclusive range of epiweeks:
- epiweek of most recently computed signal of this type
- last epiweek
The idea is to recompute the last stored value (just in case there were
changes to the underlying data source), and to compute all weeks up to, but
not including, the current week (because the current week is, by definition,
still ongoing).
The following signals are available:
- gft: Google Flu Trends
- ght: Google Health Trends
- twtr: HealthTweets
- wiki: Wikipedia access
- cdc: CDC Page Hits
- epic: Epicast 1-week-ahead point prediction
- quid: Flu lab test data
- sar3: Seasonal Autoregression (order 3) with holidays
- arch: Best-fit Archetype at 1-week-ahead
- ar3: Autoregression (order 3) with holidays
See also:
- signal_update.py
- sar3.py
- arch.py
- ar3.py
"""
# standard library
import argparse
import re
import subprocess
import sys
# third party
import numpy as np
# first party
from delphi.epidata.client.delphi_epidata import Epidata
from delphi.nowcast.sensors.arch import ARCH
from delphi.nowcast.sensors.sar3 import SAR3
from delphi.nowcast.sensors.ar3 import AR3
from delphi.nowcast.util.sensors_table import SensorsTable
import delphi.operations.secrets as secrets
from delphi.utils.epidate import EpiDate
import delphi.utils.epiweek as flu
from delphi.utils.geo.locations import Locations
def get_most_recent_issue(epidata):
# search for FluView issues within the last 10 weeks
ew2 = EpiDate.today().get_ew()
ew1 = flu.add_epiweeks(ew2, -9)
rows = epidata.check(epidata.fluview('nat', epidata.range(ew1, ew2)))
return max([row['issue'] for row in rows])
def get_location_list(loc):
"""Return the list of locations described by the given string."""
if loc == 'all':
return Locations.region_list
elif loc == 'hhs':
return Locations.hhs_list
elif loc == 'cen':
return Locations.cen_list
elif loc in Locations.region_list:
return [loc]
else:
raise UnknownLocationException('unknown location: %s' % str(loc))
class UnknownLocationException(Exception):
"""An Exception indicating that the given location is not known."""
class SignalGetter:
"""Class with static methods that implement the fetching of
different data signals. Each function returns a function that
only takes a single argument:
- weeks: an Epiweek range of weeks to fetch data for.
"""
def __init__(self):
pass
@staticmethod
def get_gft(location, epiweek, valid):
def fetch(weeks):
# The GFT model update of 2013 significantly improved the GFT signal, so
# much so that training on the old data will severely hurt the predictive
# power of the new data. To overcome this, I basically pretend that GFT
# versions before and after mid-2013 are different signals.
if weeks['to'] >= 201340:
# this is the new GFT model, so throw out data from the old model
weeks = Epidata.range(max(weeks['from'], 201331), weeks['to'])
return Epidata.gft(location, weeks)
return fetch
@staticmethod
def get_ght(location, epiweek, valid):
loc = 'US' if location == 'nat' else location
fetch = lambda weeks: Epidata.ght(secrets.api.ght, loc, weeks, '/m/0cycc')
return fetch
@staticmethod
def get_twtr(location, epiweek, valid):
def fetch(weeks):
# Impute missing weeks with 0%
# This is actually correct because twitter does not store rows with `num` =
# 0. So weeks with 0 `num` (and `percent`) are missing from the response.
res = Epidata.twitter(secrets.api.twitter, location, epiweeks=weeks)
if 'epidata' in res:
epiweeks = set([r['epiweek'] for r in res['epidata']])
first, last = 201149, weeks['to']
for ew in flu.range_epiweeks(first, last, inclusive=True):
if ew not in epiweeks:
res['epidata'].append({'epiweek': ew, 'percent': 0.})
return res
return fetch
@staticmethod
def get_wiki(location, epiweek, valid):
if location != 'nat':
raise Exception('wiki is only available for nat')
articles = [
'human_flu',
'influenza',
'influenza_a_virus',
'influenzavirus_a',
'influenzavirus_c',
'oseltamivir',
'zanamivir',
]
hours = [17, 18, 21]
# There are 21 time series (7 articles, 3 hours) of N epiweeks. Each time
# series needs to be fetched, and then the whole dataset needs to be pivoted
# so that there are N rows, each with 21 values.
fields = ['f%d' % i for i in range(len(articles) * len(hours))]
def fetch(weeks):
# a map from epiweeks to a map of field-value pairs (for each article/hour)
data = {}
# field name index
idx = 0
# download each time series individually
for article in articles:
for hour in hours:
# fetch the data from the API
res = Epidata.wiki(article, epiweeks=weeks, hours=hour)
epidata = Epidata.check(res)
field_name = fields[idx]
idx += 1
# loop over rows of the response, ordered by epiweek
for row in epidata:
ew = row['epiweek']
if ew not in data:
# make a new entry for this epiweek
data[ew] = {'epiweek': ew}
# save the value of this field
data[ew][field_name] = row['value']
# convert the map to a list matching the API epidata list
rows = []
for ew in sorted(list(data.keys())):
rows.append(data[ew])
# spoof the API response
return {
'result': 1,
'message': None,
'epidata': rows,
}
return fetch, fields
@staticmethod
def get_cdc(location, epiweek, valid):
fields = ['num2', 'num4', 'num5', 'num6', 'num7', 'num8']
def fetch(weeks):
# It appears that log-transformed counts provide a much better fit.
res = Epidata.cdc(secrets.api.cdc, weeks, location)
if 'epidata' in res:
for row in res['epidata']:
for col in fields:
row[col] = np.log(1. + row[col])
return res
return fetch, fields
@staticmethod
def get_quid(location, epiweek, valid):
fields = ['value']
def fetch(weeks):
res = Epidata.quidel(secrets.api.quidel, weeks, location)
return res
return fetch, fields
class SensorFitting:
def __init__(self):
pass
@staticmethod
def fit_loch_ness(location, epiweek, name, fields, fetch, valid):
# Helper functions
def get_weeks(epiweek):
ew1 = 200330
ew2 = epiweek
ew3 = flu.add_epiweeks(epiweek, 1)
weeks0 = Epidata.range(ew1, ew2)
weeks1 = Epidata.range(ew1, ew3)
return (ew1, ew2, ew3, weeks0, weeks1)
def extract(rows, fields):
data = {}
for row in rows:
data[row['epiweek']] = [float(row[f]) for f in fields]
return data
def get_training_set_data(data):
epiweeks = sorted(list(data.keys()))
X = [data[ew]['x'] for ew in epiweeks]
Y = [data[ew]['y'] for ew in epiweeks]
return (epiweeks, X, Y)
def get_training_set(location, epiweek, signal, valid):
ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek)
auth = secrets.api.fluview
try:
result = Epidata.fluview(location, weeks0, issues=ew2, auth=auth)
rows = Epidata.check(result)
unstable = extract(rows, ['wili'])
except Exception:
unstable = {}
rows = Epidata.check(Epidata.fluview(location, weeks0, auth=auth))
stable = extract(rows, ['wili'])
data = {}
num_dropped = 0
for ew in signal.keys():
if ew == ew3:
continue
sig = signal[ew]
if ew not in unstable:
if valid and flu.delta_epiweeks(ew, ew3) <= 5:
raise Exception('unstable wILI is not available on %d' % ew)
if ew not in stable:
num_dropped += 1
continue
wili = stable[ew]
else:
wili = unstable[ew]
data[ew] = {'x': sig, 'y': wili}
if num_dropped:
msg = 'warning: dropped %d/%d signal weeks because (w)ILI was unavailable'
print(msg % (num_dropped, len(signal)))
return get_training_set_data(data)
def dot(*Ms):
""" Simple function to compute the dot product
for any number of arguments.
"""
N = Ms[0]
for M in Ms[1:]:
N = np.dot(N, M)
return N
def get_weight(ew1, ew2):
""" This function gives the weight between two given
epiweeks based on a function that:
- drops sharply over the most recent ~3 weeks
- falls off exponentially with time
- puts extra emphasis on the past weeks at the
same time of year (seasonality)
- gives no week a weight of zero
"""
dw = flu.delta_epiweeks(ew1, ew2)
yr = 52.2
hl1, hl2, bw = yr, 1, 4
a = 0.05
#b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2
b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))
c = 2 ** -(dw / hl1)
d = 1 - 2 ** -(dw / hl2)
return (a + (1 - a) * b) * c * d
def get_periodic_bias(epiweek):
weeks_per_year = 52.2
offset = flu.delta_epiweeks(200001, epiweek) % weeks_per_year
angle = np.pi * 2 * offset / weeks_per_year
return [np.sin(angle), np.cos(angle)]
def apply_model(epiweek, beta, values):
bias0 = [1.]
if beta.shape[0] > len(values) + 1:
# constant and periodic bias
bias1 = get_periodic_bias(epiweek)
obs = np.array([values + bias0 + bias1])
else:
# constant bias only
obs = np.array([values + bias0])
return float(dot(obs, beta))
def get_model(ew2, epiweeks, X, Y):
ne, nx1, nx2, ny = len(epiweeks), len(X), len(X[0]), len(Y)
if ne != nx1 or nx1 != ny:
raise Exception('length mismatch e=%d X=%d Y=%d' % (ne, nx1, ny))
weights = np.diag([get_weight(ew1, ew2) for ew1 in epiweeks])
X = np.array(X).reshape((nx1, nx2))
Y = np.array(Y).reshape((ny, 1))
bias0 = np.ones(Y.shape)
if ne >= 26 and flu.delta_epiweeks(epiweeks[0], epiweeks[-1]) >= 52:
# constant and periodic bias
bias1 = np.array([get_periodic_bias(ew) for ew in epiweeks])
X = np.hstack((X, bias0, bias1))
else:
# constant bias only
X = np.hstack((X, bias0))
XtXi = np.linalg.inv(dot(X.T, weights, X))
XtY = dot(X.T, weights, Y)
return np.dot(XtXi, XtY)
if type(fields) == str:
fields = [fields]
ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek)
rows = Epidata.check(fetch(weeks1))
signal = extract(rows, fields)
min_rows = 3 + len(fields)
if ew3 not in signal:
raise Exception('%s unavailable on %d' % (name, ew3))
if len(signal) < min_rows:
raise Exception('%s available less than %d weeks' % (name, min_rows))
epiweeks, X, Y = get_training_set(location, epiweek, signal, valid)
min_rows = min_rows - 1
if len(Y) < min_rows:
raise Exception('(w)ILI available less than %d weeks' % (min_rows))
model = get_model(ew3, epiweeks, X, Y)
value = apply_model(ew3, model, signal[ew3])
return value
class SensorGetter:
"""Class that implements different sensors. Some sensors
may take in a signal to do the fitting on, others do not.
"""
def __init__(self):
pass
@staticmethod
def get_sensor_implementations():
"""Return a map from sensor names to sensor implementations."""
return {
'cdc': SensorGetter.get_cdc,
'gft': SensorGetter.get_gft,
'ght': SensorGetter.get_ght,
'ghtj': SensorGetter.get_ghtj,
'twtr': SensorGetter.get_twtr,
'wiki': SensorGetter.get_wiki,
'epic': SensorGetter.get_epic,
'sar3': SensorGetter.get_sar3,
'arch': SensorGetter.get_arch,
'ar3': SensorGetter.get_ar3,
'quid': SensorGetter.get_quid,
}
@staticmethod
def | (location, epiweek, valid):
fc = Epidata.check(Epidata.delphi('ec', epiweek))[0]
return fc['forecast']['data'][location]['x1']['point']
@staticmethod
def get_sar3(location, epiweek, valid):
return SAR3(location).predict(epiweek, valid=valid)
@staticmethod
def get_arch(location, epiweek, valid):
return ARCH(location).predict(epiweek, valid=valid)
@staticmethod
def get_ar3(location, epiweek, valid):
return AR3(location).predict(epiweek, valid=valid)
@staticmethod
def get_ghtj(location, epiweek, valid):
loc = 'US' if location == 'nat' else location
def justinfun(location, epiweek):
# Need to set an absolute path
main_driver = '/home/automation/ghtj/ghtj.R'
args = ['Rscript', main_driver, location, str(epiweek)]
subprocess.check_call(args, shell=False)
# Need to set an absolute path
outputdir = '/home/automation/ghtj/output'
prefix = 'ghtpred'
predfilename = '%s/%s-%s-%d.txt' % (outputdir, prefix, loc, epiweek)
with open(predfilename, 'r') as f:
mypred = float(f.read())
print(mypred)
return mypred
# Making the single prediction now:
mypred = justinfun(location, epiweek)
return mypred
# sensors using the loch ness fitting
@staticmethod
def get_gft(location, epiweek, valid):
fetch = SignalGetter.get_gft(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'gft', 'num', fetch, valid)
@staticmethod
def get_ght(location, epiweek, valid):
fetch = SignalGetter.get_ght(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'ght', 'value', fetch, valid)
@staticmethod
def get_twtr(location, epiweek, valid):
fetch = SignalGetter.get_twtr(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'twtr', 'percent', fetch, valid)
@staticmethod
def get_wiki(location, epiweek, valid):
fetch, fields = SignalGetter.get_wiki(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'wiki', fields, fetch, valid)
@staticmethod
def get_cdc(location, epiweek, valid):
fetch, fields = SignalGetter.get_cdc(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'cdc', fields, fetch, valid)
@staticmethod
def get_quid(location, epiweek, valid):
fetch, fields = SignalGetter.get_quid(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'quid', fields, fetch, valid)
class SensorUpdate:
"""
Produces both real-time and retrospective sensor readings for ILI in the US.
Readings (predictions of ILI made using raw inputs) are stored in the Delphi
database and are accessible via the Epidata API.
"""
@staticmethod
def new_instance(valid, test_mode):
"""
Return a new instance under the default configuration.
If `test_mode` is True, database changes will not be committed.
If `valid` is True, be punctilious about hiding values that were not known
at the time (e.g. run the model with preliminary ILI only). Otherwise, be
more lenient (e.g. fall back to final ILI when preliminary ILI isn't
available).
"""
database = SensorsTable(test_mode=test_mode)
implementations = SensorGetter.get_sensor_implementations()
return SensorUpdate(valid, database, implementations, Epidata)
def __init__(self, valid, database, implementations, epidata):
self.valid = valid
self.database = database
self.implementations = implementations
self.epidata = epidata
def update(self, sensors, first_week, last_week):
"""
Compute sensor readings and store them in the database.
"""
# most recent issue
if last_week is None:
last_issue = get_most_recent_issue(self.epidata)
last_week = flu.add_epiweeks(last_issue, +1)
# connect
with self.database as database:
# update each sensor
for (name, loc) in sensors:
# update each location
for location in get_location_list(loc):
# timing
ew1 = first_week
if ew1 is None:
ew1 = database.get_most_recent_epiweek(name, location)
if ew1 is None:
# If an existing sensor reading wasn't found in the database and
# no start week was given, just assume that readings should start
# at 2010w40.
ew1 = 201040
print('%s-%s not found, starting at %d' % (name, location, ew1))
args = (name, location, ew1, last_week)
print('Updating %s-%s from %d to %d.' % args)
for test_week in flu.range_epiweeks(ew1, last_week, inclusive=True):
self.update_single(database, test_week, name, location)
def update_single(self, database, test_week, name, location):
train_week = flu.add_epiweeks(test_week, -1)
impl = self.implementations[name]
try:
value = impl(location, train_week, self.valid)
print(' %4s %5s %d -> %.3f' % (name, location, test_week, value))
except Exception as ex:
value = None
print(' failed: %4s %5s %d' % (name, location, test_week), ex)
if value is not None:
database.insert(name, location, test_week, value)
sys.stdout.flush()
def get_argument_parser():
"""Define command line arguments and usage."""
parser = argparse.ArgumentParser()
parser.add_argument(
'names',
help=(
'list of name-location pairs '
'(location can be nat/hhs/cen/state or specific location labels)'))
parser.add_argument(
'--first',
'-f',
type=int,
help='first epiweek override')
parser.add_argument(
'--last',
'-l',
type=int,
help='last epiweek override')
parser.add_argument(
'--epiweek',
'-w',
type=int,
help='epiweek override')
parser.add_argument(
'--test',
'-t',
default=False,
action='store_true',
help='dry run only')
parser.add_argument(
'--valid',
'-v',
default=False,
action='store_true',
help='do not fall back to stable wILI; require unstable wILI')
return parser
def validate_args(args):
"""Validate and return command line arguments."""
# check epiweek specification
first, last, week = args.first, args.last, args.epiweek
for ew in [first, last, week]:
if ew is not None:
flu.check_epiweek(ew)
if week is not None:
if first is not None or last is not None:
raise ValueError('`week` overrides `first` and `last`')
first = last = week
if first is not None and last is not None and first > last:
raise ValueError('`first` must not be greater than `last`')
# validate and extract name-location pairs
pair_regex = '[^-,]+-[^-,]+'
names_regex = '%s(,%s)*' % (pair_regex, pair_regex)
if not re.match(names_regex, args.names):
raise ValueError('invalid sensor specification')
return args.names, first, last, args.valid, args.test
def parse_sensor_location_pairs(names):
return [pair.split('-') for pair in names.split(',')]
def main(names, first, last, valid, test):
"""Run this script from the command line."""
sensors = parse_sensor_location_pairs(names)
SensorUpdate.new_instance(valid, test).update(sensors, first, last)
if __name__ == '__main__':
main(*validate_args(get_argument_parser().parse_args()))
| get_epic | identifier_name |
sensor_update.py | """
===============
=== Purpose ===
===============
Produces a signal for each flu digital surveillance source, which is then used
as a 'sensor' in the context of nowcasting through sensor fusion.
Each signal is updated over the following inclusive range of epiweeks:
- epiweek of most recently computed signal of this type
- last epiweek
The idea is to recompute the last stored value (just in case there were
changes to the underlying data source), and to compute all weeks up to, but
not including, the current week (because the current week is, by definition,
still ongoing).
The following signals are available:
- gft: Google Flu Trends
- ght: Google Health Trends
- twtr: HealthTweets
- wiki: Wikipedia access
- cdc: CDC Page Hits
- epic: Epicast 1-week-ahead point prediction
- quid: Flu lab test data
- sar3: Seasonal Autoregression (order 3) with holidays
- arch: Best-fit Archetype at 1-week-ahead
- ar3: Autoregression (order 3) with holidays
See also:
- signal_update.py
- sar3.py
- arch.py
- ar3.py
"""
# standard library
import argparse
import re
import subprocess
import sys
# third party
import numpy as np
# first party
from delphi.epidata.client.delphi_epidata import Epidata
from delphi.nowcast.sensors.arch import ARCH
from delphi.nowcast.sensors.sar3 import SAR3
from delphi.nowcast.sensors.ar3 import AR3
from delphi.nowcast.util.sensors_table import SensorsTable
import delphi.operations.secrets as secrets
from delphi.utils.epidate import EpiDate
import delphi.utils.epiweek as flu
from delphi.utils.geo.locations import Locations
def get_most_recent_issue(epidata):
# search for FluView issues within the last 10 weeks
ew2 = EpiDate.today().get_ew()
ew1 = flu.add_epiweeks(ew2, -9)
rows = epidata.check(epidata.fluview('nat', epidata.range(ew1, ew2)))
return max([row['issue'] for row in rows])
def get_location_list(loc):
"""Return the list of locations described by the given string."""
if loc == 'all':
return Locations.region_list
elif loc == 'hhs':
return Locations.hhs_list
elif loc == 'cen':
return Locations.cen_list
elif loc in Locations.region_list:
return [loc]
else:
raise UnknownLocationException('unknown location: %s' % str(loc))
class UnknownLocationException(Exception):
"""An Exception indicating that the given location is not known."""
class SignalGetter:
"""Class with static methods that implement the fetching of
different data signals. Each function returns a function that
only takes a single argument:
- weeks: an Epiweek range of weeks to fetch data for.
"""
def __init__(self):
pass
@staticmethod
def get_gft(location, epiweek, valid):
def fetch(weeks):
# The GFT model update of 2013 significantly improved the GFT signal, so
# much so that training on the old data will severely hurt the predictive
# power of the new data. To overcome this, I basically pretend that GFT
# versions before and after mid-2013 are different signals.
if weeks['to'] >= 201340:
# this is the new GFT model, so throw out data from the old model
weeks = Epidata.range(max(weeks['from'], 201331), weeks['to'])
return Epidata.gft(location, weeks)
return fetch
@staticmethod
def get_ght(location, epiweek, valid):
loc = 'US' if location == 'nat' else location
fetch = lambda weeks: Epidata.ght(secrets.api.ght, loc, weeks, '/m/0cycc')
return fetch
@staticmethod
def get_twtr(location, epiweek, valid):
def fetch(weeks):
# Impute missing weeks with 0%
# This is actually correct because twitter does not store rows with `num` =
# 0. So weeks with 0 `num` (and `percent`) are missing from the response.
res = Epidata.twitter(secrets.api.twitter, location, epiweeks=weeks)
if 'epidata' in res:
epiweeks = set([r['epiweek'] for r in res['epidata']])
first, last = 201149, weeks['to']
for ew in flu.range_epiweeks(first, last, inclusive=True):
if ew not in epiweeks:
res['epidata'].append({'epiweek': ew, 'percent': 0.})
return res
return fetch
@staticmethod
def get_wiki(location, epiweek, valid):
if location != 'nat':
raise Exception('wiki is only available for nat')
articles = [
'human_flu',
'influenza',
'influenza_a_virus',
'influenzavirus_a',
'influenzavirus_c',
'oseltamivir',
'zanamivir',
]
hours = [17, 18, 21]
# There are 21 time series (7 articles, 3 hours) of N epiweeks. Each time
# series needs to be fetched, and then the whole dataset needs to be pivoted
# so that there are N rows, each with 21 values.
fields = ['f%d' % i for i in range(len(articles) * len(hours))]
def fetch(weeks):
# a map from epiweeks to a map of field-value pairs (for each article/hour)
data = {}
# field name index
idx = 0
# download each time series individually
for article in articles:
for hour in hours:
# fetch the data from the API
res = Epidata.wiki(article, epiweeks=weeks, hours=hour)
epidata = Epidata.check(res)
field_name = fields[idx]
idx += 1
# loop over rows of the response, ordered by epiweek
for row in epidata:
ew = row['epiweek']
if ew not in data:
# make a new entry for this epiweek
data[ew] = {'epiweek': ew}
# save the value of this field
data[ew][field_name] = row['value']
# convert the map to a list matching the API epidata list
rows = []
for ew in sorted(list(data.keys())):
rows.append(data[ew])
# spoof the API response
return {
'result': 1,
'message': None,
'epidata': rows,
}
return fetch, fields
@staticmethod
def get_cdc(location, epiweek, valid):
fields = ['num2', 'num4', 'num5', 'num6', 'num7', 'num8']
def fetch(weeks):
# It appears that log-transformed counts provide a much better fit.
res = Epidata.cdc(secrets.api.cdc, weeks, location)
if 'epidata' in res:
for row in res['epidata']:
for col in fields:
row[col] = np.log(1. + row[col])
return res
return fetch, fields
@staticmethod
def get_quid(location, epiweek, valid):
fields = ['value']
def fetch(weeks):
res = Epidata.quidel(secrets.api.quidel, weeks, location)
return res
return fetch, fields
class SensorFitting:
def __init__(self):
pass
@staticmethod
def fit_loch_ness(location, epiweek, name, fields, fetch, valid):
# Helper functions
def get_weeks(epiweek):
ew1 = 200330
ew2 = epiweek
ew3 = flu.add_epiweeks(epiweek, 1)
weeks0 = Epidata.range(ew1, ew2)
weeks1 = Epidata.range(ew1, ew3)
return (ew1, ew2, ew3, weeks0, weeks1)
def extract(rows, fields):
data = {}
for row in rows:
data[row['epiweek']] = [float(row[f]) for f in fields]
return data
def get_training_set_data(data):
epiweeks = sorted(list(data.keys()))
X = [data[ew]['x'] for ew in epiweeks]
Y = [data[ew]['y'] for ew in epiweeks]
return (epiweeks, X, Y)
def get_training_set(location, epiweek, signal, valid):
ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek)
auth = secrets.api.fluview
try:
result = Epidata.fluview(location, weeks0, issues=ew2, auth=auth)
rows = Epidata.check(result)
unstable = extract(rows, ['wili'])
except Exception:
unstable = {}
rows = Epidata.check(Epidata.fluview(location, weeks0, auth=auth))
stable = extract(rows, ['wili'])
data = {}
num_dropped = 0
for ew in signal.keys():
if ew == ew3:
continue
sig = signal[ew]
if ew not in unstable:
if valid and flu.delta_epiweeks(ew, ew3) <= 5:
raise Exception('unstable wILI is not available on %d' % ew)
if ew not in stable:
num_dropped += 1
continue
wili = stable[ew]
else:
wili = unstable[ew]
data[ew] = {'x': sig, 'y': wili}
if num_dropped:
msg = 'warning: dropped %d/%d signal weeks because (w)ILI was unavailable'
print(msg % (num_dropped, len(signal)))
return get_training_set_data(data)
def dot(*Ms):
""" Simple function to compute the dot product
for any number of arguments.
"""
N = Ms[0]
for M in Ms[1:]:
N = np.dot(N, M)
return N
def get_weight(ew1, ew2):
""" This function gives the weight between two given
epiweeks based on a function that:
- drops sharply over the most recent ~3 weeks
- falls off exponentially with time
- puts extra emphasis on the past weeks at the
same time of year (seasonality)
- gives no week a weight of zero
"""
dw = flu.delta_epiweeks(ew1, ew2)
yr = 52.2
hl1, hl2, bw = yr, 1, 4
a = 0.05
#b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2
b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))
c = 2 ** -(dw / hl1)
d = 1 - 2 ** -(dw / hl2)
return (a + (1 - a) * b) * c * d
def get_periodic_bias(epiweek):
weeks_per_year = 52.2
offset = flu.delta_epiweeks(200001, epiweek) % weeks_per_year
angle = np.pi * 2 * offset / weeks_per_year
return [np.sin(angle), np.cos(angle)]
def apply_model(epiweek, beta, values):
bias0 = [1.]
if beta.shape[0] > len(values) + 1:
# constant and periodic bias
bias1 = get_periodic_bias(epiweek)
obs = np.array([values + bias0 + bias1])
else:
# constant bias only
obs = np.array([values + bias0])
return float(dot(obs, beta))
def get_model(ew2, epiweeks, X, Y):
ne, nx1, nx2, ny = len(epiweeks), len(X), len(X[0]), len(Y)
if ne != nx1 or nx1 != ny:
raise Exception('length mismatch e=%d X=%d Y=%d' % (ne, nx1, ny))
weights = np.diag([get_weight(ew1, ew2) for ew1 in epiweeks])
X = np.array(X).reshape((nx1, nx2))
Y = np.array(Y).reshape((ny, 1))
bias0 = np.ones(Y.shape)
if ne >= 26 and flu.delta_epiweeks(epiweeks[0], epiweeks[-1]) >= 52:
# constant and periodic bias
bias1 = np.array([get_periodic_bias(ew) for ew in epiweeks])
X = np.hstack((X, bias0, bias1))
else:
# constant bias only
X = np.hstack((X, bias0))
XtXi = np.linalg.inv(dot(X.T, weights, X))
XtY = dot(X.T, weights, Y)
return np.dot(XtXi, XtY)
if type(fields) == str:
fields = [fields]
ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek)
rows = Epidata.check(fetch(weeks1))
signal = extract(rows, fields)
min_rows = 3 + len(fields)
if ew3 not in signal:
raise Exception('%s unavailable on %d' % (name, ew3))
if len(signal) < min_rows:
raise Exception('%s available less than %d weeks' % (name, min_rows))
epiweeks, X, Y = get_training_set(location, epiweek, signal, valid)
min_rows = min_rows - 1
if len(Y) < min_rows:
raise Exception('(w)ILI available less than %d weeks' % (min_rows))
model = get_model(ew3, epiweeks, X, Y)
value = apply_model(ew3, model, signal[ew3])
return value
class SensorGetter:
"""Class that implements different sensors. Some sensors
may take in a signal to do the fitting on, others do not.
"""
def __init__(self):
pass
@staticmethod
def get_sensor_implementations():
"""Return a map from sensor names to sensor implementations."""
return {
'cdc': SensorGetter.get_cdc,
'gft': SensorGetter.get_gft,
'ght': SensorGetter.get_ght,
'ghtj': SensorGetter.get_ghtj,
'twtr': SensorGetter.get_twtr,
'wiki': SensorGetter.get_wiki,
'epic': SensorGetter.get_epic,
'sar3': SensorGetter.get_sar3,
'arch': SensorGetter.get_arch,
'ar3': SensorGetter.get_ar3,
'quid': SensorGetter.get_quid,
}
@staticmethod
def get_epic(location, epiweek, valid):
fc = Epidata.check(Epidata.delphi('ec', epiweek))[0]
return fc['forecast']['data'][location]['x1']['point']
@staticmethod
def get_sar3(location, epiweek, valid):
return SAR3(location).predict(epiweek, valid=valid)
@staticmethod
def get_arch(location, epiweek, valid):
return ARCH(location).predict(epiweek, valid=valid)
@staticmethod
def get_ar3(location, epiweek, valid):
return AR3(location).predict(epiweek, valid=valid)
@staticmethod
def get_ghtj(location, epiweek, valid):
loc = 'US' if location == 'nat' else location
def justinfun(location, epiweek):
# Need to set an absolute path
main_driver = '/home/automation/ghtj/ghtj.R'
args = ['Rscript', main_driver, location, str(epiweek)]
subprocess.check_call(args, shell=False)
# Need to set an absolute path
outputdir = '/home/automation/ghtj/output'
prefix = 'ghtpred'
predfilename = '%s/%s-%s-%d.txt' % (outputdir, prefix, loc, epiweek)
with open(predfilename, 'r') as f:
mypred = float(f.read())
print(mypred)
return mypred
# Making the single prediction now:
mypred = justinfun(location, epiweek)
return mypred
# sensors using the loch ness fitting
@staticmethod
def get_gft(location, epiweek, valid):
fetch = SignalGetter.get_gft(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'gft', 'num', fetch, valid)
@staticmethod
def get_ght(location, epiweek, valid):
fetch = SignalGetter.get_ght(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'ght', 'value', fetch, valid)
@staticmethod
def get_twtr(location, epiweek, valid):
fetch = SignalGetter.get_twtr(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'twtr', 'percent', fetch, valid)
@staticmethod
def get_wiki(location, epiweek, valid):
fetch, fields = SignalGetter.get_wiki(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'wiki', fields, fetch, valid)
@staticmethod
def get_cdc(location, epiweek, valid):
fetch, fields = SignalGetter.get_cdc(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'cdc', fields, fetch, valid)
@staticmethod
def get_quid(location, epiweek, valid):
fetch, fields = SignalGetter.get_quid(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'quid', fields, fetch, valid)
class SensorUpdate:
"""
Produces both real-time and retrospective sensor readings for ILI in the US.
Readings (predictions of ILI made using raw inputs) are stored in the Delphi
database and are accessible via the Epidata API.
"""
@staticmethod
def new_instance(valid, test_mode):
"""
Return a new instance under the default configuration.
If `test_mode` is True, database changes will not be committed.
If `valid` is True, be punctilious about hiding values that were not known
at the time (e.g. run the model with preliminary ILI only). Otherwise, be
more lenient (e.g. fall back to final ILI when preliminary ILI isn't
available).
"""
database = SensorsTable(test_mode=test_mode)
implementations = SensorGetter.get_sensor_implementations()
return SensorUpdate(valid, database, implementations, Epidata)
def __init__(self, valid, database, implementations, epidata):
self.valid = valid
self.database = database
self.implementations = implementations
self.epidata = epidata
def update(self, sensors, first_week, last_week):
"""
Compute sensor readings and store them in the database.
"""
# most recent issue
if last_week is None:
last_issue = get_most_recent_issue(self.epidata)
last_week = flu.add_epiweeks(last_issue, +1)
# connect
with self.database as database:
# update each sensor
for (name, loc) in sensors:
# update each location
for location in get_location_list(loc):
# timing
ew1 = first_week
if ew1 is None:
ew1 = database.get_most_recent_epiweek(name, location)
if ew1 is None:
# If an existing sensor reading wasn't found in the database and
# no start week was given, just assume that readings should start
# at 2010w40.
ew1 = 201040
print('%s-%s not found, starting at %d' % (name, location, ew1))
args = (name, location, ew1, last_week)
print('Updating %s-%s from %d to %d.' % args)
for test_week in flu.range_epiweeks(ew1, last_week, inclusive=True):
|
def update_single(self, database, test_week, name, location):
train_week = flu.add_epiweeks(test_week, -1)
impl = self.implementations[name]
try:
value = impl(location, train_week, self.valid)
print(' %4s %5s %d -> %.3f' % (name, location, test_week, value))
except Exception as ex:
value = None
print(' failed: %4s %5s %d' % (name, location, test_week), ex)
if value is not None:
database.insert(name, location, test_week, value)
sys.stdout.flush()
def get_argument_parser():
"""Define command line arguments and usage."""
parser = argparse.ArgumentParser()
parser.add_argument(
'names',
help=(
'list of name-location pairs '
'(location can be nat/hhs/cen/state or specific location labels)'))
parser.add_argument(
'--first',
'-f',
type=int,
help='first epiweek override')
parser.add_argument(
'--last',
'-l',
type=int,
help='last epiweek override')
parser.add_argument(
'--epiweek',
'-w',
type=int,
help='epiweek override')
parser.add_argument(
'--test',
'-t',
default=False,
action='store_true',
help='dry run only')
parser.add_argument(
'--valid',
'-v',
default=False,
action='store_true',
help='do not fall back to stable wILI; require unstable wILI')
return parser
def validate_args(args):
"""Validate and return command line arguments."""
# check epiweek specification
first, last, week = args.first, args.last, args.epiweek
for ew in [first, last, week]:
if ew is not None:
flu.check_epiweek(ew)
if week is not None:
if first is not None or last is not None:
raise ValueError('`week` overrides `first` and `last`')
first = last = week
if first is not None and last is not None and first > last:
raise ValueError('`first` must not be greater than `last`')
# validate and extract name-location pairs
pair_regex = '[^-,]+-[^-,]+'
names_regex = '%s(,%s)*' % (pair_regex, pair_regex)
if not re.match(names_regex, args.names):
raise ValueError('invalid sensor specification')
return args.names, first, last, args.valid, args.test
def parse_sensor_location_pairs(names):
return [pair.split('-') for pair in names.split(',')]
def main(names, first, last, valid, test):
"""Run this script from the command line."""
sensors = parse_sensor_location_pairs(names)
SensorUpdate.new_instance(valid, test).update(sensors, first, last)
if __name__ == '__main__':
main(*validate_args(get_argument_parser().parse_args()))
| self.update_single(database, test_week, name, location) | conditional_block |
sensor_update.py | """
===============
=== Purpose ===
===============
Produces a signal for each flu digital surveillance source, which is then used
as a 'sensor' in the context of nowcasting through sensor fusion.
Each signal is updated over the following inclusive range of epiweeks:
- epiweek of most recently computed signal of this type
- last epiweek
The idea is to recompute the last stored value (just in case there were
changes to the underlying data source), and to compute all weeks up to, but
not including, the current week (because the current week is, by definition,
still ongoing).
The following signals are available:
- gft: Google Flu Trends
- ght: Google Health Trends
- twtr: HealthTweets
- wiki: Wikipedia access
- cdc: CDC Page Hits
- epic: Epicast 1-week-ahead point prediction
- quid: Flu lab test data
- sar3: Seasonal Autoregression (order 3) with holidays
- arch: Best-fit Archetype at 1-week-ahead
- ar3: Autoregression (order 3) with holidays
See also:
- signal_update.py
- sar3.py
- arch.py
- ar3.py
"""
# standard library
import argparse
import re
import subprocess
import sys
# third party
import numpy as np
# first party
from delphi.epidata.client.delphi_epidata import Epidata
from delphi.nowcast.sensors.arch import ARCH
from delphi.nowcast.sensors.sar3 import SAR3
from delphi.nowcast.sensors.ar3 import AR3
from delphi.nowcast.util.sensors_table import SensorsTable
import delphi.operations.secrets as secrets
from delphi.utils.epidate import EpiDate
import delphi.utils.epiweek as flu
from delphi.utils.geo.locations import Locations
def get_most_recent_issue(epidata):
# search for FluView issues within the last 10 weeks
ew2 = EpiDate.today().get_ew()
ew1 = flu.add_epiweeks(ew2, -9)
rows = epidata.check(epidata.fluview('nat', epidata.range(ew1, ew2)))
return max([row['issue'] for row in rows])
def get_location_list(loc):
"""Return the list of locations described by the given string."""
if loc == 'all':
return Locations.region_list
elif loc == 'hhs':
return Locations.hhs_list
elif loc == 'cen':
return Locations.cen_list
elif loc in Locations.region_list:
return [loc]
else:
raise UnknownLocationException('unknown location: %s' % str(loc))
class UnknownLocationException(Exception):
"""An Exception indicating that the given location is not known."""
class SignalGetter:
"""Class with static methods that implement the fetching of
different data signals. Each function returns a function that
only takes a single argument:
- weeks: an Epiweek range of weeks to fetch data for.
"""
def __init__(self):
pass
@staticmethod
def get_gft(location, epiweek, valid):
def fetch(weeks):
# The GFT model update of 2013 significantly improved the GFT signal, so
# much so that training on the old data will severely hurt the predictive
# power of the new data. To overcome this, I basically pretend that GFT
# versions before and after mid-2013 are different signals.
if weeks['to'] >= 201340:
# this is the new GFT model, so throw out data from the old model
weeks = Epidata.range(max(weeks['from'], 201331), weeks['to'])
return Epidata.gft(location, weeks)
return fetch
@staticmethod
def get_ght(location, epiweek, valid):
loc = 'US' if location == 'nat' else location
fetch = lambda weeks: Epidata.ght(secrets.api.ght, loc, weeks, '/m/0cycc')
return fetch
@staticmethod
def get_twtr(location, epiweek, valid):
def fetch(weeks):
# Impute missing weeks with 0%
# This is actually correct because twitter does not store rows with `num` =
# 0. So weeks with 0 `num` (and `percent`) are missing from the response.
res = Epidata.twitter(secrets.api.twitter, location, epiweeks=weeks)
if 'epidata' in res:
epiweeks = set([r['epiweek'] for r in res['epidata']])
first, last = 201149, weeks['to']
for ew in flu.range_epiweeks(first, last, inclusive=True):
if ew not in epiweeks:
res['epidata'].append({'epiweek': ew, 'percent': 0.})
return res
return fetch
@staticmethod
def get_wiki(location, epiweek, valid):
if location != 'nat':
raise Exception('wiki is only available for nat')
articles = [
'human_flu',
'influenza',
'influenza_a_virus',
'influenzavirus_a',
'influenzavirus_c',
'oseltamivir',
'zanamivir',
]
hours = [17, 18, 21]
# There are 21 time series (7 articles, 3 hours) of N epiweeks. Each time
# series needs to be fetched, and then the whole dataset needs to be pivoted
# so that there are N rows, each with 21 values.
fields = ['f%d' % i for i in range(len(articles) * len(hours))]
def fetch(weeks):
# a map from epiweeks to a map of field-value pairs (for each article/hour)
data = {}
# field name index
idx = 0
# download each time series individually
for article in articles:
for hour in hours:
# fetch the data from the API
res = Epidata.wiki(article, epiweeks=weeks, hours=hour)
epidata = Epidata.check(res)
field_name = fields[idx]
idx += 1
# loop over rows of the response, ordered by epiweek
for row in epidata:
ew = row['epiweek']
if ew not in data:
# make a new entry for this epiweek
data[ew] = {'epiweek': ew}
# save the value of this field
data[ew][field_name] = row['value']
# convert the map to a list matching the API epidata list
rows = []
for ew in sorted(list(data.keys())):
rows.append(data[ew])
# spoof the API response
return {
'result': 1,
'message': None,
'epidata': rows,
}
return fetch, fields
@staticmethod
def get_cdc(location, epiweek, valid):
fields = ['num2', 'num4', 'num5', 'num6', 'num7', 'num8']
def fetch(weeks):
# It appears that log-transformed counts provide a much better fit.
res = Epidata.cdc(secrets.api.cdc, weeks, location)
if 'epidata' in res:
for row in res['epidata']:
for col in fields:
row[col] = np.log(1. + row[col])
return res
return fetch, fields
@staticmethod
def get_quid(location, epiweek, valid):
fields = ['value']
def fetch(weeks):
res = Epidata.quidel(secrets.api.quidel, weeks, location)
return res
return fetch, fields
class SensorFitting:
def __init__(self):
pass
@staticmethod
def fit_loch_ness(location, epiweek, name, fields, fetch, valid):
# Helper functions
def get_weeks(epiweek):
ew1 = 200330
ew2 = epiweek
ew3 = flu.add_epiweeks(epiweek, 1)
weeks0 = Epidata.range(ew1, ew2)
weeks1 = Epidata.range(ew1, ew3)
return (ew1, ew2, ew3, weeks0, weeks1)
def extract(rows, fields):
data = {}
for row in rows:
data[row['epiweek']] = [float(row[f]) for f in fields]
return data
def get_training_set_data(data):
epiweeks = sorted(list(data.keys()))
X = [data[ew]['x'] for ew in epiweeks]
Y = [data[ew]['y'] for ew in epiweeks]
return (epiweeks, X, Y)
def get_training_set(location, epiweek, signal, valid):
ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek)
auth = secrets.api.fluview
try:
result = Epidata.fluview(location, weeks0, issues=ew2, auth=auth)
rows = Epidata.check(result)
unstable = extract(rows, ['wili'])
except Exception:
unstable = {}
rows = Epidata.check(Epidata.fluview(location, weeks0, auth=auth))
stable = extract(rows, ['wili'])
data = {}
num_dropped = 0
for ew in signal.keys():
if ew == ew3:
continue
sig = signal[ew]
if ew not in unstable:
if valid and flu.delta_epiweeks(ew, ew3) <= 5:
raise Exception('unstable wILI is not available on %d' % ew)
if ew not in stable:
num_dropped += 1
continue
wili = stable[ew]
else:
wili = unstable[ew]
data[ew] = {'x': sig, 'y': wili}
if num_dropped:
msg = 'warning: dropped %d/%d signal weeks because (w)ILI was unavailable'
print(msg % (num_dropped, len(signal)))
return get_training_set_data(data)
def dot(*Ms):
""" Simple function to compute the dot product
for any number of arguments.
"""
N = Ms[0]
for M in Ms[1:]:
N = np.dot(N, M)
return N
def get_weight(ew1, ew2):
""" This function gives the weight between two given
epiweeks based on a function that:
- drops sharply over the most recent ~3 weeks
- falls off exponentially with time
- puts extra emphasis on the past weeks at the
same time of year (seasonality)
- gives no week a weight of zero
"""
dw = flu.delta_epiweeks(ew1, ew2)
yr = 52.2
hl1, hl2, bw = yr, 1, 4
a = 0.05
#b = (np.cos(2 * np.pi * (dw / yr)) + 1) / 2
b = np.exp(-((min(dw % yr, yr - dw % yr) / bw) ** 2))
c = 2 ** -(dw / hl1)
d = 1 - 2 ** -(dw / hl2)
return (a + (1 - a) * b) * c * d
def get_periodic_bias(epiweek):
weeks_per_year = 52.2
offset = flu.delta_epiweeks(200001, epiweek) % weeks_per_year
angle = np.pi * 2 * offset / weeks_per_year
return [np.sin(angle), np.cos(angle)]
def apply_model(epiweek, beta, values):
bias0 = [1.]
if beta.shape[0] > len(values) + 1:
# constant and periodic bias
bias1 = get_periodic_bias(epiweek)
obs = np.array([values + bias0 + bias1])
else:
# constant bias only
obs = np.array([values + bias0])
return float(dot(obs, beta))
def get_model(ew2, epiweeks, X, Y):
ne, nx1, nx2, ny = len(epiweeks), len(X), len(X[0]), len(Y)
if ne != nx1 or nx1 != ny:
raise Exception('length mismatch e=%d X=%d Y=%d' % (ne, nx1, ny))
weights = np.diag([get_weight(ew1, ew2) for ew1 in epiweeks])
X = np.array(X).reshape((nx1, nx2))
Y = np.array(Y).reshape((ny, 1))
bias0 = np.ones(Y.shape)
if ne >= 26 and flu.delta_epiweeks(epiweeks[0], epiweeks[-1]) >= 52:
# constant and periodic bias
bias1 = np.array([get_periodic_bias(ew) for ew in epiweeks])
X = np.hstack((X, bias0, bias1))
else:
# constant bias only
X = np.hstack((X, bias0))
XtXi = np.linalg.inv(dot(X.T, weights, X))
XtY = dot(X.T, weights, Y)
return np.dot(XtXi, XtY)
if type(fields) == str:
fields = [fields]
ew1, ew2, ew3, weeks0, weeks1 = get_weeks(epiweek)
rows = Epidata.check(fetch(weeks1))
signal = extract(rows, fields)
min_rows = 3 + len(fields)
if ew3 not in signal:
raise Exception('%s unavailable on %d' % (name, ew3))
if len(signal) < min_rows:
raise Exception('%s available less than %d weeks' % (name, min_rows))
epiweeks, X, Y = get_training_set(location, epiweek, signal, valid)
min_rows = min_rows - 1
if len(Y) < min_rows:
raise Exception('(w)ILI available less than %d weeks' % (min_rows))
model = get_model(ew3, epiweeks, X, Y)
value = apply_model(ew3, model, signal[ew3])
return value
class SensorGetter:
"""Class that implements different sensors. Some sensors
may take in a signal to do the fitting on, others do not.
"""
def __init__(self):
pass
@staticmethod
def get_sensor_implementations():
"""Return a map from sensor names to sensor implementations."""
return {
'cdc': SensorGetter.get_cdc,
'gft': SensorGetter.get_gft,
'ght': SensorGetter.get_ght,
'ghtj': SensorGetter.get_ghtj,
'twtr': SensorGetter.get_twtr,
'wiki': SensorGetter.get_wiki,
'epic': SensorGetter.get_epic,
'sar3': SensorGetter.get_sar3,
'arch': SensorGetter.get_arch,
'ar3': SensorGetter.get_ar3,
'quid': SensorGetter.get_quid,
}
@staticmethod
def get_epic(location, epiweek, valid):
fc = Epidata.check(Epidata.delphi('ec', epiweek))[0]
return fc['forecast']['data'][location]['x1']['point']
@staticmethod
def get_sar3(location, epiweek, valid):
return SAR3(location).predict(epiweek, valid=valid)
@staticmethod
def get_arch(location, epiweek, valid):
return ARCH(location).predict(epiweek, valid=valid)
@staticmethod
def get_ar3(location, epiweek, valid):
return AR3(location).predict(epiweek, valid=valid)
@staticmethod
def get_ghtj(location, epiweek, valid):
loc = 'US' if location == 'nat' else location
def justinfun(location, epiweek):
# Need to set an absolute path
main_driver = '/home/automation/ghtj/ghtj.R'
args = ['Rscript', main_driver, location, str(epiweek)]
subprocess.check_call(args, shell=False)
# Need to set an absolute path
outputdir = '/home/automation/ghtj/output'
prefix = 'ghtpred'
predfilename = '%s/%s-%s-%d.txt' % (outputdir, prefix, loc, epiweek)
with open(predfilename, 'r') as f:
mypred = float(f.read())
print(mypred)
return mypred
# Making the single prediction now:
mypred = justinfun(location, epiweek)
return mypred
# sensors using the loch ness fitting
@staticmethod
def get_gft(location, epiweek, valid):
fetch = SignalGetter.get_gft(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'gft', 'num', fetch, valid)
@staticmethod
def get_ght(location, epiweek, valid):
fetch = SignalGetter.get_ght(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'ght', 'value', fetch, valid)
@staticmethod
def get_twtr(location, epiweek, valid):
fetch = SignalGetter.get_twtr(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'twtr', 'percent', fetch, valid)
@staticmethod
def get_wiki(location, epiweek, valid):
fetch, fields = SignalGetter.get_wiki(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'wiki', fields, fetch, valid)
@staticmethod
def get_cdc(location, epiweek, valid):
fetch, fields = SignalGetter.get_cdc(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'cdc', fields, fetch, valid)
@staticmethod
def get_quid(location, epiweek, valid):
fetch, fields = SignalGetter.get_quid(location, epiweek, valid)
return SensorFitting.fit_loch_ness(location, epiweek, 'quid', fields, fetch, valid)
class SensorUpdate:
"""
Produces both real-time and retrospective sensor readings for ILI in the US.
Readings (predictions of ILI made using raw inputs) are stored in the Delphi
database and are accessible via the Epidata API.
"""
@staticmethod
def new_instance(valid, test_mode):
|
def __init__(self, valid, database, implementations, epidata):
self.valid = valid
self.database = database
self.implementations = implementations
self.epidata = epidata
def update(self, sensors, first_week, last_week):
"""
Compute sensor readings and store them in the database.
"""
# most recent issue
if last_week is None:
last_issue = get_most_recent_issue(self.epidata)
last_week = flu.add_epiweeks(last_issue, +1)
# connect
with self.database as database:
# update each sensor
for (name, loc) in sensors:
# update each location
for location in get_location_list(loc):
# timing
ew1 = first_week
if ew1 is None:
ew1 = database.get_most_recent_epiweek(name, location)
if ew1 is None:
# If an existing sensor reading wasn't found in the database and
# no start week was given, just assume that readings should start
# at 2010w40.
ew1 = 201040
print('%s-%s not found, starting at %d' % (name, location, ew1))
args = (name, location, ew1, last_week)
print('Updating %s-%s from %d to %d.' % args)
for test_week in flu.range_epiweeks(ew1, last_week, inclusive=True):
self.update_single(database, test_week, name, location)
def update_single(self, database, test_week, name, location):
train_week = flu.add_epiweeks(test_week, -1)
impl = self.implementations[name]
try:
value = impl(location, train_week, self.valid)
print(' %4s %5s %d -> %.3f' % (name, location, test_week, value))
except Exception as ex:
value = None
print(' failed: %4s %5s %d' % (name, location, test_week), ex)
if value is not None:
database.insert(name, location, test_week, value)
sys.stdout.flush()
def get_argument_parser():
"""Define command line arguments and usage."""
parser = argparse.ArgumentParser()
parser.add_argument(
'names',
help=(
'list of name-location pairs '
'(location can be nat/hhs/cen/state or specific location labels)'))
parser.add_argument(
'--first',
'-f',
type=int,
help='first epiweek override')
parser.add_argument(
'--last',
'-l',
type=int,
help='last epiweek override')
parser.add_argument(
'--epiweek',
'-w',
type=int,
help='epiweek override')
parser.add_argument(
'--test',
'-t',
default=False,
action='store_true',
help='dry run only')
parser.add_argument(
'--valid',
'-v',
default=False,
action='store_true',
help='do not fall back to stable wILI; require unstable wILI')
return parser
def validate_args(args):
"""Validate and return command line arguments."""
# check epiweek specification
first, last, week = args.first, args.last, args.epiweek
for ew in [first, last, week]:
if ew is not None:
flu.check_epiweek(ew)
if week is not None:
if first is not None or last is not None:
raise ValueError('`week` overrides `first` and `last`')
first = last = week
if first is not None and last is not None and first > last:
raise ValueError('`first` must not be greater than `last`')
# validate and extract name-location pairs
pair_regex = '[^-,]+-[^-,]+'
names_regex = '%s(,%s)*' % (pair_regex, pair_regex)
if not re.match(names_regex, args.names):
raise ValueError('invalid sensor specification')
return args.names, first, last, args.valid, args.test
def parse_sensor_location_pairs(names):
return [pair.split('-') for pair in names.split(',')]
def main(names, first, last, valid, test):
"""Run this script from the command line."""
sensors = parse_sensor_location_pairs(names)
SensorUpdate.new_instance(valid, test).update(sensors, first, last)
if __name__ == '__main__':
main(*validate_args(get_argument_parser().parse_args()))
| """
Return a new instance under the default configuration.
If `test_mode` is True, database changes will not be committed.
If `valid` is True, be punctilious about hiding values that were not known
at the time (e.g. run the model with preliminary ILI only). Otherwise, be
more lenient (e.g. fall back to final ILI when preliminary ILI isn't
available).
"""
database = SensorsTable(test_mode=test_mode)
implementations = SensorGetter.get_sensor_implementations()
return SensorUpdate(valid, database, implementations, Epidata) | identifier_body |
图像数据处理.py | 图像数据处理
使用TFRecord格式统一存储输入数据
message Example {
Features features = 1;
};
message Features {
map<string, Feature> feature = 1;
};
message Feature{
oneof kind {
ByteList bytes_list = 1;
FloatList float_list = 2;
Int64List int64_list = 3;
}
};
将数据存入TFRecord
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
#生成整数型的属性
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
#生成字符串型的属性
def _bytes_features(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
#读取mnist数据
mnist = input_data.read_data_sets('/path', dtype=tf.uint8, one_hot=True)
images = mnist.train.images
labels = mnist.train.labels
pixels = images.shape[1]
num_examples = mnist.train.num_examples
filename = '/path/to/output.tfrecords'
#创建一个writer来写TFRecord文件
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
images_raw = images[index].tostring()#将每个图像转换成一个字符串
#将一个样例转化为Example Protocol Buffer,并写入
example | ducer(['/path/to/output.tfrecords'])
#从队列中读取一个样例
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(#解析单个样例函数
serialized_example,
features={
'images_raw':tf.FixedLenFeature([],tf.string),#解析为一个tensor
'pixels':tf.FixedLenFeature([],tf.int64),
'label':tf.FixedLenFeature([],tf.int64),
})
image= tf.decode_raw(features[’image_raw’], tf.uint8}#将字符串tensor解析成数组
label = tf.cast(features[’label’], tf.int32}
pixels = tf.cast(features[’pixels’], tf.int32}
sess = tf.Session()
#启动多线程处理输入数据
coord = tf.train.Coordinator(}
threads = tf.train.start_queue_runners(sess=sess, coord=coord}
#每次运行可以读取TFRecord 文件中的一个样例。当所有样例读完之后,在此样例中程序
#会再从头读取。
for i in range(10} :
print(sess.run([image, label, pixels]))
图像编码处理
import matplotlib.pyplot as plt
import tensorflow as tf
image_raw_data = tf.gfile.FastGFile('/path','rb').read()#读取原始图像为字符串
with tf.Session() as sess:#对图像进行解码,使用的是jpeg,还有png等
img_data = tf.image.decode_jpeg(image_raw_data)#结果是一个tensor
print(img_data.eval())
plt.imshow(img_data.eval())
plt.show()
encode_image = tf.image.encode_jpeg(img_data)#将tensor编码成jpeg并存入文件
with tf.gfile.FastGFlie('/path/to/output','wb') as f:
f.write(encode_image.eval())
调整图像大小
image_raw_data = tf.gfile.FastGFile('/path','rb').read()
image_data = tf.decode_jpeg(image_raw_data)#解码图像
image_data = tf.image.convert_image_dtype(image_raw_data,dtype=tf.float32)#转换格式为浮点
resized = tf.image.resize_images(image_data,[300,300],method=0)
croped = tf.image.resize_images_with_crop_or_pad(img_data,400,400)#截取指定大小图像,图像够大就截取,不够就在周围填充0
central = tf.image.central_crop(image_data,0.5)#根据比例截取
#图像各种翻转
fliped = tf.image.flip_up_down(image_data)
fliped = tf.image.random_flip_up_down(image_data)
fliped = tf.image.flip_left_right(image_data)
fliped = tf.image.random_flip_left_right(image_data)
transposed = tf.image.transpose_image(image_data)
adjusted = tf.image.adjust_brightness(img_data,-0.5)#调整亮度
adjust_brightness = tf.clip_by_value(adjusted, 0.0, 1.0)#把亮度限定在正确范围内
adjusted = tf.image.random_brightness(image, random_range)
adjust_brightness = tf.image.adjust_contrast(image_data, 5)#调整对比度
adjusted = tf.image.adjust_hue(img_data, 0.3)#调整色彩
adjusted = tf.image.adjust_saturation(img_data, 5)#调整饱和度
adjusted = tf.image.per_image_standardization(img_data)#调整数值为0,方差为1
图像加标注框
batched = tf.expand_dims(
tf.image.convert_image_dtype(img_data,tf.float32),0)
boxes = tf.constant([0.05, 0.05, 0.9, 0.7],[0.35, 0.47, 0.5, 0.56])#同时添加两个标注框
#参数是相对位置,[y_min,x_min,y_max,x_max]
boxed = tf.image.draw_bounding_boxes(batched,boxed)
完整图像预处理
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
#随机调整一张图像的色彩,定义不同顺序调整亮度、对比度、饱和度和色相,具体使用的顺序会影响学习
def distort_color(image,color_ordering=0):
if color_ordering == 0:
image = tf.image.random_saturation(image,lower=0.5,upper=0.5)
image = tf.image.random_brightness(image,max_delta=32. / 255. )
image = tf.random_hue(image,max_delta=0.2)
elif color_ordering == 1:
image = tf.image.random_brightness(image,max_delta=32. / 255. )
image = tf.image.random_saturation(image,lower=0.5,upper=0.5)
image = tf.random_hue(image,max_delta=0.2)
elif color_ordering == 2:
#其他转换顺序
return tf.clip_by_value(image, 0.0, 1.0)#把图片每个元素值规定在范围内
#预处理图片
def preprocess_for_train(image,height,width,bbox):
if bbox is None:#标注框没定义的话就取全图片
bbox = tf.constant([0.0,0.0,1.0,1.0],dtype=tf.float32,shape=[1,1,4])
if image.dtype != tf.float32#转换图像像素值类型
image = tf.convert_image_dtype(image, dtype=tf.float32)
bbox_begin,bbox_size,_ = tf.image.sample_distorted_bounding_box(tf.shape(image),bounding_boxes=bbox)#随机截取图像
distort_image = tf.slice(image, bbox_begin, bbox_size)
distorted_image = tf.image.resize_images(distort_image,[height,width],method=np.randint(4))#调整图像大小为神经网络的输入大小
distort_image = tf.image.random_flip_left_right(distort_image)#随机左右翻转图像
distort_image = distort_color(distort_image,np.random.randint(2))#随机调整图像颜色
return distort_image
image_raw_data = tf.gfile.FastGFile(path,'rb').read()
with tf.Session() as sess:
image_data = tf.image.decode(image_raw_data)
boxes = tf.constant([[[0.05,0.05,0.9,0.7],[0.35,0.47,0.5,0.56]]])
for i in range(6)
result = preprocess_for_train(image_data,299,299,boxes)
plt.imgshow(result.eval())
plt.show()
多线程处理数据输入
队列,处理输入数据的框架
import tensorflow as tf
q = tf.FIFOQueue(2,'int32')#指定一个先进先出队列,可以保存两个元素
#RandomShufffleQueue是随机进出队列
init = q.enqueue_many(([0,10],))#使用函数初始化队列中的元素,元素的值为0和10
x = q.dequeue()#出队列
y = x + 1
q_inc = q.enqueue([y])#加入队列
with tf.Session() as tf:
init.run()#初始化队列
for i in range(5):
v,_ = sess.run([x,q_inc])
print v
多线程操作
coord = tf.train.Coordinator()#创建一个实例来协同多线程
threads = [
threading.Thread(target=MyLoop, args=(cord, i , )) for i in range(5)]
for t in threads: t.start()
coord.join(threads)
def MyLoop(coord, worker_id):
#使用tf.Coordinator 类提供的协同工具判断当前线程是否市要停止。
while not coord. should_stop ():
#随机停止所有的线程。
if np.random.rand() < 0.1
print ” Stoping from id: %d\n” worker_id,
#coord.request_stop()函数来通知其他线程停止。
coord.request_stop()
else:
#打印当前线程的Id
print ” Working on id : %d\n ” % worker_id,
#暂停l秒
time.sleep(l)
队列管理
queue = tf.FIFOQueue(100,"float")
enqueue_op = queue.enqueue([tf.random_normal([1])])#入队操作
qr = tf.train.QueueRunner(queue, [enqueue_op] * 5)#启动几个线程,每个线程运行enqueue_op操作
tf.train.add_queue_runner(qr)#加入tf计算图上指定集合
out_tensor = queue.dequeue()
with tf.Session() as sess:
coord = tf.train.Coordinator()#协同启动进程
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
#必须明确使用此函数启动所有线程,进行入队操作以供后期调用
for _ in range(3): print(sess.run(out_tensor)[0])
coord.request_stop()
coord.join(threads)
输入文件队列
num_shards = 2#总文件数
instances_per_shard = 2#每个文件多少数据
#把输入转换成TFRecord
for i in range(num_shards):
filename = ('/path/data.tfrecords-%.5d-of-%.5d' % (i, num_shards))
writer = tf.python_io.TFRecordWriter(filename)
for j in range(instances_per_shard):#将数据封装成Example结构并写入TFRcecord文件
example = tf.train.Example(features=tf.train.Features(feature={
'i': _int64_feature(i),
'j': _int64_feature(j)
}
))
writer.write(example.SerializerToString())
writer.close()
读取执行
files = tf.train.match_filenames_once('/path/data.tfrecords-*')
filename_queue = tf.train.string_input_producer(files, shuffle=False)#创建输入队列
reader = tf.TFRecordReader()
_, serialized_example= reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features = {
'i': tf.FixedLenFeature([], tf.int64),
'j': tf.FixedLenFeature([], tf.int64),
})
with tf.Session() as sess:
tf.local_variables_initializer().run()
print(sess.run(files))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(6):
print(sess.run([features['i'], features['j']]))
coord.request_stop()
coord.join(threads)
组合训练数据
batch_size = 3
example,label = features[i], features[j]
capacity = 1000 + 3 * batch_size
example_batch, label_batch = tf.train.batch([example, label], batch_size=batch_size,
capacity=capacity)
#此函数将[example, label]整理成输入batch队列,自动管理队列出入
with tf.Session() as sess:
tf.initializer_all_variables.run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(2):
cur_exapmle_batch, cur_label_batch = sess.run(
[example_batch, label_batch])
print(cur_exapmle_batch, cur_label_batch)
coord.request_stop()
coord.join(threads)
完整数据处理框架
import tensorflow as tf
files = tf.train.match_filenames_once('path/file_pattern-*')
filename_queue = tf.train.string_input_producer(files, shuffle=False)
#产生输入队列的函数
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)#读取序列
features = tf.parse_single_example(serialized_example,
features = {
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'weight': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
}
)
image = features['image']
label = features['label']
height = features['height']
weight = features['weight']
channels = features['channels']
decode_image = tf.decode_raw(image, tf.uint8)#解码tensor
decode_image.set_shape([height,weight,channels])
#前面的预处理图片函数
image_size = 299
distort_image = preprocess_for_train(decode_image, image_size, image_size, None)
#整理成输入batch队列
min_after_dequeue = 10000
batch_size = 100
capacity = min_after_dequeue + 3 * batch_size
image_batch, label_batch = tf.train.shuffle_batch(
[distorted_image, label], batch_size=batch_size,
capacity=capacity, min_after_dequeue=min_after_dequeue
)
logit = inference(image_batch)
loss = calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
with tf.Session() as sess:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
coord = tf.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in(training_step):
sess.run(train_step)
coord.request_stop()
coord.join()
数据集操作
input_data = [1, 2, 3, 5, 8]
dataset = tf.data.Dataset.from_tensor_slice(input_data)
iterator = dataset.make_one_shot_iterator()
x = iterator.get_next()
dataset = tf.data.TextLineDataset(input_files)#从文本构建
#使用TFRecord需要定义parsr
def parser(record):
features = tf.parse_single_example(
record,
features={
'f1': tf.FixedLenFeature([],tf.int64)
'f2': tf.FixedLenFeature([],tf.int64)
})
return features['f1'], features['f2']
input_files = '/path/to/TFRecordfile'
dataset = tf.data.TFRecordDataset(input_files)
dataset = dataset.map(parser)#对每条数据调用parser方法
iterator = dataset.make_one_shot_iterator()
f1, f2 = iterator.get_next()
with tf.Session as sess:
for i in range(10):
print(sess.run[f1, f2])
#使用placeholder需要初始化
iterator = dataset.make_initializer_iterator()
#从TFRecord文件创建数据集,具体文件路径是palceholder
f1, f2 = iterator.get_next()
with tf.Session() as sess:#需要初始化
sess.run(iterator.initializer,
feed_dict={input_files:['/path/to/TFRecordfile']})
while True:
try:
sess.run([f1, f2])
except tf.errors.OutOfRangeError:
break
处理输入数据集
import tensorflow as tf
train_files = tf.train.match_filenames_once('/path')#这里读取TFRecord文件
test_files = tf.train.match_filenames_once('/path')
def parser(record):
features = tf.parse_single_example(
record,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'weight': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
}
)
decode_image = tf.decode_raw(features['image'], tf.uint8)
decode_image.set_shape([features['height'], features['weight'], features['channels']])
label = features['label']
return decode_image, label
image_size = 299
batch_size = 100
shuffle_buffer = 10000
dataset = tf.data.TFRecordDataset(train_files)
dataset = dataset.map(parser)#把数据用parser解析
#开始shuffle和batching操作
dataset = dataset.map(
lambda image, label: (
preprocess_for_train(image, image_size, image_size, None), label)
)
dataset = dataset.shuffle(shuffle_buffer).batch(batch_size)
NUM_EPOCH = 10
dataset = dataset.repeat(NUM_EPOCH)#数据集重复次数
iterator = dataset.make_initializer_iterator()
image_batch, label_batch = iterator.get_next()#获取batch数据
logit = inference(image_batch)
loss =calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
test_dataset = tf.data.TFRecord(test_files)
test_dataset = test_dataset.map(parser).map(
lambda label: (
tf.image.resize_images(image, [image_size, image_size], label))
)
test_dataset = test_dataset.batch(batch_size)
test_iterator = test_dataset.make_initializer_iterator()#定义测试数据上的迭代器
test_image_batch, test_label_batch = test_iterator.get_next()
test_logit = inference(test_image_batch)
predictions = tf.argmax(test_logit, axis=-1, output_type=tf.int32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(),
tf.loacl_variables_initializer())
sess.run(iterator.initializer)
while True:
try:
sess.run(train_step)
except tf.errors.OutOfRangeError:
break
sess.run(test_iterator.initializer)
test_results =[]
test_labels = []
while True:
try:
pred, label = sess.run([predictions, test_label_batch])
test_results.extend(pred)
test_labels.extend(label)
except tf.errors.OutOfRangeError:
break
correct = (float(y == y_) for (y, y_) in zip(test_results, test_labels)]
accuracy= sum(correct) / len(correct)
print (”Test accuracy is:”, accuracy)
| = tf.train.Example(features=tf.train.Feature(feature={
'pixels':_int_64_feature(pixels),
'label':_int_64_feature(np.argmax(labels[index])),
'images_raw':_bytes_features(images_raw)}
))
writer.write(example.SerializerToString())#写入TFRecord文件
writer.close()
读取TFRecord
import tensorflow as tf
#创建一个reader来读取tfr文件
reader = tf.TFRecordReader()
#创建一个队列来维护输入文件列表
filename_queue = tf.train.string_input_pro | conditional_block |
图像数据处理.py | 图像数据处理
使用TFRecord格式统一存储输入数据
message Example {
Features features = 1;
};
message Features {
map<string, Feature> feature = 1;
};
message Feature{
oneof kind {
ByteList bytes_list = 1;
FloatList float_list = 2;
Int64List int64_list = 3;
}
};
将数据存入TFRecord
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
#生成整数型的属性
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
#生成字符串型的属性
def _bytes_features(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
#读取mnist数据
mnist = input_data.read_data_sets('/path', dtype=tf.uint8, one_hot=True)
images = mnist.train.images
labels = mnist.train.labels
pixels = images.shape[1]
num_examples = mnist.train.num_examples
filename = '/path/to/output.tfrecords'
#创建一个writer来写TFRecord文件
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
images_raw = images[index].tostring()#将每个图像转换成一个字符串
#将一个样例转化为Example Protocol Buffer,并写入
example = tf.train.Example(features=tf.train.Feature(feature={
'pixels':_int_64_feature(pixels),
'label':_int_64_feature(np.argmax(labels[index])),
'images_raw':_bytes_features(images_raw)}
))
writer.write(example.SerializerToString())#写入TFRecord文件
writer.close()
读取TFRecord
import tensorflow as tf
#创建一个reader来读取tfr文件
reader = tf.TFRecordReader()
#创建一个队列来维护输入文件列表
filename_queue = tf.train.string_input_producer(['/path/to/output.tfrecords'])
#从队列中读取一个样例
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(#解析单个样例函数
serialized_example,
features={
'images_raw':tf.FixedLenFeature([],tf.string),#解析为一个tensor
'pixels':tf.FixedLenFeature([],tf.int64),
'label':tf.FixedLenFeature([],tf.int64),
}) |
sess = tf.Session()
#启动多线程处理输入数据
coord = tf.train.Coordinator(}
threads = tf.train.start_queue_runners(sess=sess, coord=coord}
#每次运行可以读取TFRecord 文件中的一个样例。当所有样例读完之后,在此样例中程序
#会再从头读取。
for i in range(10} :
print(sess.run([image, label, pixels]))
图像编码处理
import matplotlib.pyplot as plt
import tensorflow as tf
image_raw_data = tf.gfile.FastGFile('/path','rb').read()#读取原始图像为字符串
with tf.Session() as sess:#对图像进行解码,使用的是jpeg,还有png等
img_data = tf.image.decode_jpeg(image_raw_data)#结果是一个tensor
print(img_data.eval())
plt.imshow(img_data.eval())
plt.show()
encode_image = tf.image.encode_jpeg(img_data)#将tensor编码成jpeg并存入文件
with tf.gfile.FastGFlie('/path/to/output','wb') as f:
f.write(encode_image.eval())
调整图像大小
image_raw_data = tf.gfile.FastGFile('/path','rb').read()
image_data = tf.decode_jpeg(image_raw_data)#解码图像
image_data = tf.image.convert_image_dtype(image_raw_data,dtype=tf.float32)#转换格式为浮点
resized = tf.image.resize_images(image_data,[300,300],method=0)
croped = tf.image.resize_images_with_crop_or_pad(img_data,400,400)#截取指定大小图像,图像够大就截取,不够就在周围填充0
central = tf.image.central_crop(image_data,0.5)#根据比例截取
#图像各种翻转
fliped = tf.image.flip_up_down(image_data)
fliped = tf.image.random_flip_up_down(image_data)
fliped = tf.image.flip_left_right(image_data)
fliped = tf.image.random_flip_left_right(image_data)
transposed = tf.image.transpose_image(image_data)
adjusted = tf.image.adjust_brightness(img_data,-0.5)#调整亮度
adjust_brightness = tf.clip_by_value(adjusted, 0.0, 1.0)#把亮度限定在正确范围内
adjusted = tf.image.random_brightness(image, random_range)
adjust_brightness = tf.image.adjust_contrast(image_data, 5)#调整对比度
adjusted = tf.image.adjust_hue(img_data, 0.3)#调整色彩
adjusted = tf.image.adjust_saturation(img_data, 5)#调整饱和度
adjusted = tf.image.per_image_standardization(img_data)#调整数值为0,方差为1
图像加标注框
batched = tf.expand_dims(
tf.image.convert_image_dtype(img_data,tf.float32),0)
boxes = tf.constant([0.05, 0.05, 0.9, 0.7],[0.35, 0.47, 0.5, 0.56])#同时添加两个标注框
#参数是相对位置,[y_min,x_min,y_max,x_max]
boxed = tf.image.draw_bounding_boxes(batched,boxed)
完整图像预处理
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
#随机调整一张图像的色彩,定义不同顺序调整亮度、对比度、饱和度和色相,具体使用的顺序会影响学习
def distort_color(image,color_ordering=0):
if color_ordering == 0:
image = tf.image.random_saturation(image,lower=0.5,upper=0.5)
image = tf.image.random_brightness(image,max_delta=32. / 255. )
image = tf.random_hue(image,max_delta=0.2)
elif color_ordering == 1:
image = tf.image.random_brightness(image,max_delta=32. / 255. )
image = tf.image.random_saturation(image,lower=0.5,upper=0.5)
image = tf.random_hue(image,max_delta=0.2)
elif color_ordering == 2:
#其他转换顺序
return tf.clip_by_value(image, 0.0, 1.0)#把图片每个元素值规定在范围内
#预处理图片
def preprocess_for_train(image,height,width,bbox):
if bbox is None:#标注框没定义的话就取全图片
bbox = tf.constant([0.0,0.0,1.0,1.0],dtype=tf.float32,shape=[1,1,4])
if image.dtype != tf.float32#转换图像像素值类型
image = tf.convert_image_dtype(image, dtype=tf.float32)
bbox_begin,bbox_size,_ = tf.image.sample_distorted_bounding_box(tf.shape(image),bounding_boxes=bbox)#随机截取图像
distort_image = tf.slice(image, bbox_begin, bbox_size)
distorted_image = tf.image.resize_images(distort_image,[height,width],method=np.randint(4))#调整图像大小为神经网络的输入大小
distort_image = tf.image.random_flip_left_right(distort_image)#随机左右翻转图像
distort_image = distort_color(distort_image,np.random.randint(2))#随机调整图像颜色
return distort_image
image_raw_data = tf.gfile.FastGFile(path,'rb').read()
with tf.Session() as sess:
image_data = tf.image.decode(image_raw_data)
boxes = tf.constant([[[0.05,0.05,0.9,0.7],[0.35,0.47,0.5,0.56]]])
for i in range(6)
result = preprocess_for_train(image_data,299,299,boxes)
plt.imgshow(result.eval())
plt.show()
多线程处理数据输入
队列,处理输入数据的框架
import tensorflow as tf
q = tf.FIFOQueue(2,'int32')#指定一个先进先出队列,可以保存两个元素
#RandomShufffleQueue是随机进出队列
init = q.enqueue_many(([0,10],))#使用函数初始化队列中的元素,元素的值为0和10
x = q.dequeue()#出队列
y = x + 1
q_inc = q.enqueue([y])#加入队列
with tf.Session() as tf:
init.run()#初始化队列
for i in range(5):
v,_ = sess.run([x,q_inc])
print v
多线程操作
coord = tf.train.Coordinator()#创建一个实例来协同多线程
threads = [
threading.Thread(target=MyLoop, args=(cord, i , )) for i in range(5)]
for t in threads: t.start()
coord.join(threads)
def MyLoop(coord, worker_id):
#使用tf.Coordinator 类提供的协同工具判断当前线程是否市要停止。
while not coord. should_stop ():
#随机停止所有的线程。
if np.random.rand() < 0.1
print ” Stoping from id: %d\n” worker_id,
#coord.request_stop()函数来通知其他线程停止。
coord.request_stop()
else:
#打印当前线程的Id
print ” Working on id : %d\n ” % worker_id,
#暂停l秒
time.sleep(l)
队列管理
queue = tf.FIFOQueue(100,"float")
enqueue_op = queue.enqueue([tf.random_normal([1])])#入队操作
qr = tf.train.QueueRunner(queue, [enqueue_op] * 5)#启动几个线程,每个线程运行enqueue_op操作
tf.train.add_queue_runner(qr)#加入tf计算图上指定集合
out_tensor = queue.dequeue()
with tf.Session() as sess:
coord = tf.train.Coordinator()#协同启动进程
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
#必须明确使用此函数启动所有线程,进行入队操作以供后期调用
for _ in range(3): print(sess.run(out_tensor)[0])
coord.request_stop()
coord.join(threads)
输入文件队列
num_shards = 2#总文件数
instances_per_shard = 2#每个文件多少数据
#把输入转换成TFRecord
for i in range(num_shards):
filename = ('/path/data.tfrecords-%.5d-of-%.5d' % (i, num_shards))
writer = tf.python_io.TFRecordWriter(filename)
for j in range(instances_per_shard):#将数据封装成Example结构并写入TFRcecord文件
example = tf.train.Example(features=tf.train.Features(feature={
'i': _int64_feature(i),
'j': _int64_feature(j)
}
))
writer.write(example.SerializerToString())
writer.close()
读取执行
files = tf.train.match_filenames_once('/path/data.tfrecords-*')
filename_queue = tf.train.string_input_producer(files, shuffle=False)#创建输入队列
reader = tf.TFRecordReader()
_, serialized_example= reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features = {
'i': tf.FixedLenFeature([], tf.int64),
'j': tf.FixedLenFeature([], tf.int64),
})
with tf.Session() as sess:
tf.local_variables_initializer().run()
print(sess.run(files))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(6):
print(sess.run([features['i'], features['j']]))
coord.request_stop()
coord.join(threads)
组合训练数据
batch_size = 3
example,label = features[i], features[j]
capacity = 1000 + 3 * batch_size
example_batch, label_batch = tf.train.batch([example, label], batch_size=batch_size,
capacity=capacity)
#此函数将[example, label]整理成输入batch队列,自动管理队列出入
with tf.Session() as sess:
tf.initializer_all_variables.run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(2):
cur_exapmle_batch, cur_label_batch = sess.run(
[example_batch, label_batch])
print(cur_exapmle_batch, cur_label_batch)
coord.request_stop()
coord.join(threads)
完整数据处理框架
import tensorflow as tf
files = tf.train.match_filenames_once('path/file_pattern-*')
filename_queue = tf.train.string_input_producer(files, shuffle=False)
#产生输入队列的函数
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)#读取序列
features = tf.parse_single_example(serialized_example,
features = {
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'weight': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
}
)
image = features['image']
label = features['label']
height = features['height']
weight = features['weight']
channels = features['channels']
decode_image = tf.decode_raw(image, tf.uint8)#解码tensor
decode_image.set_shape([height,weight,channels])
#前面的预处理图片函数
image_size = 299
distort_image = preprocess_for_train(decode_image, image_size, image_size, None)
#整理成输入batch队列
min_after_dequeue = 10000
batch_size = 100
capacity = min_after_dequeue + 3 * batch_size
image_batch, label_batch = tf.train.shuffle_batch(
[distorted_image, label], batch_size=batch_size,
capacity=capacity, min_after_dequeue=min_after_dequeue
)
logit = inference(image_batch)
loss = calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
with tf.Session() as sess:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
coord = tf.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in(training_step):
sess.run(train_step)
coord.request_stop()
coord.join()
数据集操作
input_data = [1, 2, 3, 5, 8]
dataset = tf.data.Dataset.from_tensor_slice(input_data)
iterator = dataset.make_one_shot_iterator()
x = iterator.get_next()
dataset = tf.data.TextLineDataset(input_files)#从文本构建
#使用TFRecord需要定义parsr
def parser(record):
features = tf.parse_single_example(
record,
features={
'f1': tf.FixedLenFeature([],tf.int64)
'f2': tf.FixedLenFeature([],tf.int64)
})
return features['f1'], features['f2']
input_files = '/path/to/TFRecordfile'
dataset = tf.data.TFRecordDataset(input_files)
dataset = dataset.map(parser)#对每条数据调用parser方法
iterator = dataset.make_one_shot_iterator()
f1, f2 = iterator.get_next()
with tf.Session as sess:
for i in range(10):
print(sess.run[f1, f2])
#使用placeholder需要初始化
iterator = dataset.make_initializer_iterator()
#从TFRecord文件创建数据集,具体文件路径是palceholder
f1, f2 = iterator.get_next()
with tf.Session() as sess:#需要初始化
sess.run(iterator.initializer,
feed_dict={input_files:['/path/to/TFRecordfile']})
while True:
try:
sess.run([f1, f2])
except tf.errors.OutOfRangeError:
break
处理输入数据集
import tensorflow as tf
train_files = tf.train.match_filenames_once('/path')#这里读取TFRecord文件
test_files = tf.train.match_filenames_once('/path')
def parser(record):
features = tf.parse_single_example(
record,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'weight': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
}
)
decode_image = tf.decode_raw(features['image'], tf.uint8)
decode_image.set_shape([features['height'], features['weight'], features['channels']])
label = features['label']
return decode_image, label
image_size = 299
batch_size = 100
shuffle_buffer = 10000
dataset = tf.data.TFRecordDataset(train_files)
dataset = dataset.map(parser)#把数据用parser解析
#开始shuffle和batching操作
dataset = dataset.map(
lambda image, label: (
preprocess_for_train(image, image_size, image_size, None), label)
)
dataset = dataset.shuffle(shuffle_buffer).batch(batch_size)
NUM_EPOCH = 10
dataset = dataset.repeat(NUM_EPOCH)#数据集重复次数
iterator = dataset.make_initializer_iterator()
image_batch, label_batch = iterator.get_next()#获取batch数据
logit = inference(image_batch)
loss =calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
test_dataset = tf.data.TFRecord(test_files)
test_dataset = test_dataset.map(parser).map(
lambda label: (
tf.image.resize_images(image, [image_size, image_size], label))
)
test_dataset = test_dataset.batch(batch_size)
test_iterator = test_dataset.make_initializer_iterator()#定义测试数据上的迭代器
test_image_batch, test_label_batch = test_iterator.get_next()
test_logit = inference(test_image_batch)
predictions = tf.argmax(test_logit, axis=-1, output_type=tf.int32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(),
tf.loacl_variables_initializer())
sess.run(iterator.initializer)
while True:
try:
sess.run(train_step)
except tf.errors.OutOfRangeError:
break
sess.run(test_iterator.initializer)
test_results =[]
test_labels = []
while True:
try:
pred, label = sess.run([predictions, test_label_batch])
test_results.extend(pred)
test_labels.extend(label)
except tf.errors.OutOfRangeError:
break
correct = (float(y == y_) for (y, y_) in zip(test_results, test_labels)]
accuracy= sum(correct) / len(correct)
print (”Test accuracy is:”, accuracy) |
image= tf.decode_raw(features[’image_raw’], tf.uint8}#将字符串tensor解析成数组
label = tf.cast(features[’label’], tf.int32}
pixels = tf.cast(features[’pixels’], tf.int32} | random_line_split |
图像数据处理.py | 图像数据处理
使用TFRecord格式统一存储输入数据
message Example {
Features features = 1;
};
message Features {
map<string, Feature> feature = 1;
};
message Feature{
oneof kind {
ByteList bytes_list = 1;
FloatList float_list = 2;
Int64List int64_list = 3;
}
};
将数据存入TFRecord
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
#生成整数型的属性
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
#生成字符串型的属性
def _bytes_features(value):
return tf.train.Feature(bytes_list=tf.train.BytesLis | ))
#读取mnist数据
mnist = input_data.read_data_sets('/path', dtype=tf.uint8, one_hot=True)
images = mnist.train.images
labels = mnist.train.labels
pixels = images.shape[1]
num_examples = mnist.train.num_examples
filename = '/path/to/output.tfrecords'
#创建一个writer来写TFRecord文件
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
images_raw = images[index].tostring()#将每个图像转换成一个字符串
#将一个样例转化为Example Protocol Buffer,并写入
example = tf.train.Example(features=tf.train.Feature(feature={
'pixels':_int_64_feature(pixels),
'label':_int_64_feature(np.argmax(labels[index])),
'images_raw':_bytes_features(images_raw)}
))
writer.write(example.SerializerToString())#写入TFRecord文件
writer.close()
读取TFRecord
import tensorflow as tf
#创建一个reader来读取tfr文件
reader = tf.TFRecordReader()
#创建一个队列来维护输入文件列表
filename_queue = tf.train.string_input_producer(['/path/to/output.tfrecords'])
#从队列中读取一个样例
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(#解析单个样例函数
serialized_example,
features={
'images_raw':tf.FixedLenFeature([],tf.string),#解析为一个tensor
'pixels':tf.FixedLenFeature([],tf.int64),
'label':tf.FixedLenFeature([],tf.int64),
})
image= tf.decode_raw(features[’image_raw’], tf.uint8}#将字符串tensor解析成数组
label = tf.cast(features[’label’], tf.int32}
pixels = tf.cast(features[’pixels’], tf.int32}
sess = tf.Session()
#启动多线程处理输入数据
coord = tf.train.Coordinator(}
threads = tf.train.start_queue_runners(sess=sess, coord=coord}
#每次运行可以读取TFRecord 文件中的一个样例。当所有样例读完之后,在此样例中程序
#会再从头读取。
for i in range(10} :
print(sess.run([image, label, pixels]))
图像编码处理
import matplotlib.pyplot as plt
import tensorflow as tf
image_raw_data = tf.gfile.FastGFile('/path','rb').read()#读取原始图像为字符串
with tf.Session() as sess:#对图像进行解码,使用的是jpeg,还有png等
img_data = tf.image.decode_jpeg(image_raw_data)#结果是一个tensor
print(img_data.eval())
plt.imshow(img_data.eval())
plt.show()
encode_image = tf.image.encode_jpeg(img_data)#将tensor编码成jpeg并存入文件
with tf.gfile.FastGFlie('/path/to/output','wb') as f:
f.write(encode_image.eval())
调整图像大小
image_raw_data = tf.gfile.FastGFile('/path','rb').read()
image_data = tf.decode_jpeg(image_raw_data)#解码图像
image_data = tf.image.convert_image_dtype(image_raw_data,dtype=tf.float32)#转换格式为浮点
resized = tf.image.resize_images(image_data,[300,300],method=0)
croped = tf.image.resize_images_with_crop_or_pad(img_data,400,400)#截取指定大小图像,图像够大就截取,不够就在周围填充0
central = tf.image.central_crop(image_data,0.5)#根据比例截取
#图像各种翻转
fliped = tf.image.flip_up_down(image_data)
fliped = tf.image.random_flip_up_down(image_data)
fliped = tf.image.flip_left_right(image_data)
fliped = tf.image.random_flip_left_right(image_data)
transposed = tf.image.transpose_image(image_data)
adjusted = tf.image.adjust_brightness(img_data,-0.5)#调整亮度
adjust_brightness = tf.clip_by_value(adjusted, 0.0, 1.0)#把亮度限定在正确范围内
adjusted = tf.image.random_brightness(image, random_range)
adjust_brightness = tf.image.adjust_contrast(image_data, 5)#调整对比度
adjusted = tf.image.adjust_hue(img_data, 0.3)#调整色彩
adjusted = tf.image.adjust_saturation(img_data, 5)#调整饱和度
adjusted = tf.image.per_image_standardization(img_data)#调整数值为0,方差为1
图像加标注框
batched = tf.expand_dims(
tf.image.convert_image_dtype(img_data,tf.float32),0)
boxes = tf.constant([0.05, 0.05, 0.9, 0.7],[0.35, 0.47, 0.5, 0.56])#同时添加两个标注框
#参数是相对位置,[y_min,x_min,y_max,x_max]
boxed = tf.image.draw_bounding_boxes(batched,boxed)
完整图像预处理
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
#随机调整一张图像的色彩,定义不同顺序调整亮度、对比度、饱和度和色相,具体使用的顺序会影响学习
def distort_color(image,color_ordering=0):
if color_ordering == 0:
image = tf.image.random_saturation(image,lower=0.5,upper=0.5)
image = tf.image.random_brightness(image,max_delta=32. / 255. )
image = tf.random_hue(image,max_delta=0.2)
elif color_ordering == 1:
image = tf.image.random_brightness(image,max_delta=32. / 255. )
image = tf.image.random_saturation(image,lower=0.5,upper=0.5)
image = tf.random_hue(image,max_delta=0.2)
elif color_ordering == 2:
#其他转换顺序
return tf.clip_by_value(image, 0.0, 1.0)#把图片每个元素值规定在范围内
#预处理图片
def preprocess_for_train(image,height,width,bbox):
if bbox is None:#标注框没定义的话就取全图片
bbox = tf.constant([0.0,0.0,1.0,1.0],dtype=tf.float32,shape=[1,1,4])
if image.dtype != tf.float32#转换图像像素值类型
image = tf.convert_image_dtype(image, dtype=tf.float32)
bbox_begin,bbox_size,_ = tf.image.sample_distorted_bounding_box(tf.shape(image),bounding_boxes=bbox)#随机截取图像
distort_image = tf.slice(image, bbox_begin, bbox_size)
distorted_image = tf.image.resize_images(distort_image,[height,width],method=np.randint(4))#调整图像大小为神经网络的输入大小
distort_image = tf.image.random_flip_left_right(distort_image)#随机左右翻转图像
distort_image = distort_color(distort_image,np.random.randint(2))#随机调整图像颜色
return distort_image
image_raw_data = tf.gfile.FastGFile(path,'rb').read()
with tf.Session() as sess:
image_data = tf.image.decode(image_raw_data)
boxes = tf.constant([[[0.05,0.05,0.9,0.7],[0.35,0.47,0.5,0.56]]])
for i in range(6)
result = preprocess_for_train(image_data,299,299,boxes)
plt.imgshow(result.eval())
plt.show()
多线程处理数据输入
队列,处理输入数据的框架
import tensorflow as tf
q = tf.FIFOQueue(2,'int32')#指定一个先进先出队列,可以保存两个元素
#RandomShufffleQueue是随机进出队列
init = q.enqueue_many(([0,10],))#使用函数初始化队列中的元素,元素的值为0和10
x = q.dequeue()#出队列
y = x + 1
q_inc = q.enqueue([y])#加入队列
with tf.Session() as tf:
init.run()#初始化队列
for i in range(5):
v,_ = sess.run([x,q_inc])
print v
多线程操作
coord = tf.train.Coordinator()#创建一个实例来协同多线程
threads = [
threading.Thread(target=MyLoop, args=(cord, i , )) for i in range(5)]
for t in threads: t.start()
coord.join(threads)
def MyLoop(coord, worker_id):
#使用tf.Coordinator 类提供的协同工具判断当前线程是否市要停止。
while not coord. should_stop ():
#随机停止所有的线程。
if np.random.rand() < 0.1
print ” Stoping from id: %d\n” worker_id,
#coord.request_stop()函数来通知其他线程停止。
coord.request_stop()
else:
#打印当前线程的Id
print ” Working on id : %d\n ” % worker_id,
#暂停l秒
time.sleep(l)
队列管理
queue = tf.FIFOQueue(100,"float")
enqueue_op = queue.enqueue([tf.random_normal([1])])#入队操作
qr = tf.train.QueueRunner(queue, [enqueue_op] * 5)#启动几个线程,每个线程运行enqueue_op操作
tf.train.add_queue_runner(qr)#加入tf计算图上指定集合
out_tensor = queue.dequeue()
with tf.Session() as sess:
coord = tf.train.Coordinator()#协同启动进程
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
#必须明确使用此函数启动所有线程,进行入队操作以供后期调用
for _ in range(3): print(sess.run(out_tensor)[0])
coord.request_stop()
coord.join(threads)
输入文件队列
num_shards = 2#总文件数
instances_per_shard = 2#每个文件多少数据
#把输入转换成TFRecord
for i in range(num_shards):
filename = ('/path/data.tfrecords-%.5d-of-%.5d' % (i, num_shards))
writer = tf.python_io.TFRecordWriter(filename)
for j in range(instances_per_shard):#将数据封装成Example结构并写入TFRcecord文件
example = tf.train.Example(features=tf.train.Features(feature={
'i': _int64_feature(i),
'j': _int64_feature(j)
}
))
writer.write(example.SerializerToString())
writer.close()
读取执行
files = tf.train.match_filenames_once('/path/data.tfrecords-*')
filename_queue = tf.train.string_input_producer(files, shuffle=False)#创建输入队列
reader = tf.TFRecordReader()
_, serialized_example= reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features = {
'i': tf.FixedLenFeature([], tf.int64),
'j': tf.FixedLenFeature([], tf.int64),
})
with tf.Session() as sess:
tf.local_variables_initializer().run()
print(sess.run(files))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(6):
print(sess.run([features['i'], features['j']]))
coord.request_stop()
coord.join(threads)
组合训练数据
batch_size = 3
example,label = features[i], features[j]
capacity = 1000 + 3 * batch_size
example_batch, label_batch = tf.train.batch([example, label], batch_size=batch_size,
capacity=capacity)
#此函数将[example, label]整理成输入batch队列,自动管理队列出入
with tf.Session() as sess:
tf.initializer_all_variables.run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(2):
cur_exapmle_batch, cur_label_batch = sess.run(
[example_batch, label_batch])
print(cur_exapmle_batch, cur_label_batch)
coord.request_stop()
coord.join(threads)
完整数据处理框架
import tensorflow as tf
files = tf.train.match_filenames_once('path/file_pattern-*')
filename_queue = tf.train.string_input_producer(files, shuffle=False)
#产生输入队列的函数
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)#读取序列
features = tf.parse_single_example(serialized_example,
features = {
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'weight': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
}
)
image = features['image']
label = features['label']
height = features['height']
weight = features['weight']
channels = features['channels']
decode_image = tf.decode_raw(image, tf.uint8)#解码tensor
decode_image.set_shape([height,weight,channels])
#前面的预处理图片函数
image_size = 299
distort_image = preprocess_for_train(decode_image, image_size, image_size, None)
#整理成输入batch队列
min_after_dequeue = 10000
batch_size = 100
capacity = min_after_dequeue + 3 * batch_size
image_batch, label_batch = tf.train.shuffle_batch(
[distorted_image, label], batch_size=batch_size,
capacity=capacity, min_after_dequeue=min_after_dequeue
)
logit = inference(image_batch)
loss = calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
with tf.Session() as sess:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
coord = tf.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in(training_step):
sess.run(train_step)
coord.request_stop()
coord.join()
数据集操作
input_data = [1, 2, 3, 5, 8]
dataset = tf.data.Dataset.from_tensor_slice(input_data)
iterator = dataset.make_one_shot_iterator()
x = iterator.get_next()
dataset = tf.data.TextLineDataset(input_files)#从文本构建
#使用TFRecord需要定义parsr
def parser(record):
features = tf.parse_single_example(
record,
features={
'f1': tf.FixedLenFeature([],tf.int64)
'f2': tf.FixedLenFeature([],tf.int64)
})
return features['f1'], features['f2']
input_files = '/path/to/TFRecordfile'
dataset = tf.data.TFRecordDataset(input_files)
dataset = dataset.map(parser)#对每条数据调用parser方法
iterator = dataset.make_one_shot_iterator()
f1, f2 = iterator.get_next()
with tf.Session as sess:
for i in range(10):
print(sess.run[f1, f2])
#使用placeholder需要初始化
iterator = dataset.make_initializer_iterator()
#从TFRecord文件创建数据集,具体文件路径是palceholder
f1, f2 = iterator.get_next()
with tf.Session() as sess:#需要初始化
sess.run(iterator.initializer,
feed_dict={input_files:['/path/to/TFRecordfile']})
while True:
try:
sess.run([f1, f2])
except tf.errors.OutOfRangeError:
break
处理输入数据集
import tensorflow as tf
train_files = tf.train.match_filenames_once('/path')#这里读取TFRecord文件
test_files = tf.train.match_filenames_once('/path')
def parser(record):
features = tf.parse_single_example(
record,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'weight': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
}
)
decode_image = tf.decode_raw(features['image'], tf.uint8)
decode_image.set_shape([features['height'], features['weight'], features['channels']])
label = features['label']
return decode_image, label
image_size = 299
batch_size = 100
shuffle_buffer = 10000
dataset = tf.data.TFRecordDataset(train_files)
dataset = dataset.map(parser)#把数据用parser解析
#开始shuffle和batching操作
dataset = dataset.map(
lambda image, label: (
preprocess_for_train(image, image_size, image_size, None), label)
)
dataset = dataset.shuffle(shuffle_buffer).batch(batch_size)
NUM_EPOCH = 10
dataset = dataset.repeat(NUM_EPOCH)#数据集重复次数
iterator = dataset.make_initializer_iterator()
image_batch, label_batch = iterator.get_next()#获取batch数据
logit = inference(image_batch)
loss =calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
test_dataset = tf.data.TFRecord(test_files)
test_dataset = test_dataset.map(parser).map(
lambda label: (
tf.image.resize_images(image, [image_size, image_size], label))
)
test_dataset = test_dataset.batch(batch_size)
test_iterator = test_dataset.make_initializer_iterator()#定义测试数据上的迭代器
test_image_batch, test_label_batch = test_iterator.get_next()
test_logit = inference(test_image_batch)
predictions = tf.argmax(test_logit, axis=-1, output_type=tf.int32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(),
tf.loacl_variables_initializer())
sess.run(iterator.initializer)
while True:
try:
sess.run(train_step)
except tf.errors.OutOfRangeError:
break
sess.run(test_iterator.initializer)
test_results =[]
test_labels = []
while True:
try:
pred, label = sess.run([predictions, test_label_batch])
test_results.extend(pred)
test_labels.extend(label)
except tf.errors.OutOfRangeError:
break
correct = (float(y == y_) for (y, y_) in zip(test_results, test_labels)]
accuracy= sum(correct) / len(correct)
print (”Test accuracy is:”, accuracy)
| t(value=[value] | identifier_name |
图像数据处理.py | 图像数据处理
使用TFRecord格式统一存储输入数据
message Example {
Features features = 1;
};
message Features {
map<string, Feature> feature = 1;
};
message Feature{
oneof kind {
ByteList bytes_list = 1;
FloatList float_list = 2;
Int64List int64_list = 3;
}
};
将数据存入TFRecord
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data
import numpy as np
#生成整数型的属性
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
#生成字符串型的属性
def _bytes_features(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
#读取mnist数 | True)
images = mnist.train.images
labels = mnist.train.labels
pixels = images.shape[1]
num_examples = mnist.train.num_examples
filename = '/path/to/output.tfrecords'
#创建一个writer来写TFRecord文件
writer = tf.python_io.TFRecordWriter(filename)
for index in range(num_examples):
images_raw = images[index].tostring()#将每个图像转换成一个字符串
#将一个样例转化为Example Protocol Buffer,并写入
example = tf.train.Example(features=tf.train.Feature(feature={
'pixels':_int_64_feature(pixels),
'label':_int_64_feature(np.argmax(labels[index])),
'images_raw':_bytes_features(images_raw)}
))
writer.write(example.SerializerToString())#写入TFRecord文件
writer.close()
读取TFRecord
import tensorflow as tf
#创建一个reader来读取tfr文件
reader = tf.TFRecordReader()
#创建一个队列来维护输入文件列表
filename_queue = tf.train.string_input_producer(['/path/to/output.tfrecords'])
#从队列中读取一个样例
_, serialized_example = reader.read(filename_queue)
features = tf.parse_single_example(#解析单个样例函数
serialized_example,
features={
'images_raw':tf.FixedLenFeature([],tf.string),#解析为一个tensor
'pixels':tf.FixedLenFeature([],tf.int64),
'label':tf.FixedLenFeature([],tf.int64),
})
image= tf.decode_raw(features[’image_raw’], tf.uint8}#将字符串tensor解析成数组
label = tf.cast(features[’label’], tf.int32}
pixels = tf.cast(features[’pixels’], tf.int32}
sess = tf.Session()
#启动多线程处理输入数据
coord = tf.train.Coordinator(}
threads = tf.train.start_queue_runners(sess=sess, coord=coord}
#每次运行可以读取TFRecord 文件中的一个样例。当所有样例读完之后,在此样例中程序
#会再从头读取。
for i in range(10} :
print(sess.run([image, label, pixels]))
图像编码处理
import matplotlib.pyplot as plt
import tensorflow as tf
image_raw_data = tf.gfile.FastGFile('/path','rb').read()#读取原始图像为字符串
with tf.Session() as sess:#对图像进行解码,使用的是jpeg,还有png等
img_data = tf.image.decode_jpeg(image_raw_data)#结果是一个tensor
print(img_data.eval())
plt.imshow(img_data.eval())
plt.show()
encode_image = tf.image.encode_jpeg(img_data)#将tensor编码成jpeg并存入文件
with tf.gfile.FastGFlie('/path/to/output','wb') as f:
f.write(encode_image.eval())
调整图像大小
image_raw_data = tf.gfile.FastGFile('/path','rb').read()
image_data = tf.decode_jpeg(image_raw_data)#解码图像
image_data = tf.image.convert_image_dtype(image_raw_data,dtype=tf.float32)#转换格式为浮点
resized = tf.image.resize_images(image_data,[300,300],method=0)
croped = tf.image.resize_images_with_crop_or_pad(img_data,400,400)#截取指定大小图像,图像够大就截取,不够就在周围填充0
central = tf.image.central_crop(image_data,0.5)#根据比例截取
#图像各种翻转
fliped = tf.image.flip_up_down(image_data)
fliped = tf.image.random_flip_up_down(image_data)
fliped = tf.image.flip_left_right(image_data)
fliped = tf.image.random_flip_left_right(image_data)
transposed = tf.image.transpose_image(image_data)
adjusted = tf.image.adjust_brightness(img_data,-0.5)#调整亮度
adjust_brightness = tf.clip_by_value(adjusted, 0.0, 1.0)#把亮度限定在正确范围内
adjusted = tf.image.random_brightness(image, random_range)
adjust_brightness = tf.image.adjust_contrast(image_data, 5)#调整对比度
adjusted = tf.image.adjust_hue(img_data, 0.3)#调整色彩
adjusted = tf.image.adjust_saturation(img_data, 5)#调整饱和度
adjusted = tf.image.per_image_standardization(img_data)#调整数值为0,方差为1
图像加标注框
batched = tf.expand_dims(
tf.image.convert_image_dtype(img_data,tf.float32),0)
boxes = tf.constant([0.05, 0.05, 0.9, 0.7],[0.35, 0.47, 0.5, 0.56])#同时添加两个标注框
#参数是相对位置,[y_min,x_min,y_max,x_max]
boxed = tf.image.draw_bounding_boxes(batched,boxed)
完整图像预处理
import tensorflow as tf
import matplotlib.pyplot as plt
import numpy as np
#随机调整一张图像的色彩,定义不同顺序调整亮度、对比度、饱和度和色相,具体使用的顺序会影响学习
def distort_color(image,color_ordering=0):
if color_ordering == 0:
image = tf.image.random_saturation(image,lower=0.5,upper=0.5)
image = tf.image.random_brightness(image,max_delta=32. / 255. )
image = tf.random_hue(image,max_delta=0.2)
elif color_ordering == 1:
image = tf.image.random_brightness(image,max_delta=32. / 255. )
image = tf.image.random_saturation(image,lower=0.5,upper=0.5)
image = tf.random_hue(image,max_delta=0.2)
elif color_ordering == 2:
#其他转换顺序
return tf.clip_by_value(image, 0.0, 1.0)#把图片每个元素值规定在范围内
#预处理图片
def preprocess_for_train(image,height,width,bbox):
if bbox is None:#标注框没定义的话就取全图片
bbox = tf.constant([0.0,0.0,1.0,1.0],dtype=tf.float32,shape=[1,1,4])
if image.dtype != tf.float32#转换图像像素值类型
image = tf.convert_image_dtype(image, dtype=tf.float32)
bbox_begin,bbox_size,_ = tf.image.sample_distorted_bounding_box(tf.shape(image),bounding_boxes=bbox)#随机截取图像
distort_image = tf.slice(image, bbox_begin, bbox_size)
distorted_image = tf.image.resize_images(distort_image,[height,width],method=np.randint(4))#调整图像大小为神经网络的输入大小
distort_image = tf.image.random_flip_left_right(distort_image)#随机左右翻转图像
distort_image = distort_color(distort_image,np.random.randint(2))#随机调整图像颜色
return distort_image
image_raw_data = tf.gfile.FastGFile(path,'rb').read()
with tf.Session() as sess:
image_data = tf.image.decode(image_raw_data)
boxes = tf.constant([[[0.05,0.05,0.9,0.7],[0.35,0.47,0.5,0.56]]])
for i in range(6)
result = preprocess_for_train(image_data,299,299,boxes)
plt.imgshow(result.eval())
plt.show()
多线程处理数据输入
队列,处理输入数据的框架
import tensorflow as tf
q = tf.FIFOQueue(2,'int32')#指定一个先进先出队列,可以保存两个元素
#RandomShufffleQueue是随机进出队列
init = q.enqueue_many(([0,10],))#使用函数初始化队列中的元素,元素的值为0和10
x = q.dequeue()#出队列
y = x + 1
q_inc = q.enqueue([y])#加入队列
with tf.Session() as tf:
init.run()#初始化队列
for i in range(5):
v,_ = sess.run([x,q_inc])
print v
多线程操作
coord = tf.train.Coordinator()#创建一个实例来协同多线程
threads = [
threading.Thread(target=MyLoop, args=(cord, i , )) for i in range(5)]
for t in threads: t.start()
coord.join(threads)
def MyLoop(coord, worker_id):
#使用tf.Coordinator 类提供的协同工具判断当前线程是否市要停止。
while not coord. should_stop ():
#随机停止所有的线程。
if np.random.rand() < 0.1
print ” Stoping from id: %d\n” worker_id,
#coord.request_stop()函数来通知其他线程停止。
coord.request_stop()
else:
#打印当前线程的Id
print ” Working on id : %d\n ” % worker_id,
#暂停l秒
time.sleep(l)
队列管理
queue = tf.FIFOQueue(100,"float")
enqueue_op = queue.enqueue([tf.random_normal([1])])#入队操作
qr = tf.train.QueueRunner(queue, [enqueue_op] * 5)#启动几个线程,每个线程运行enqueue_op操作
tf.train.add_queue_runner(qr)#加入tf计算图上指定集合
out_tensor = queue.dequeue()
with tf.Session() as sess:
coord = tf.train.Coordinator()#协同启动进程
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
#必须明确使用此函数启动所有线程,进行入队操作以供后期调用
for _ in range(3): print(sess.run(out_tensor)[0])
coord.request_stop()
coord.join(threads)
输入文件队列
num_shards = 2#总文件数
instances_per_shard = 2#每个文件多少数据
#把输入转换成TFRecord
for i in range(num_shards):
filename = ('/path/data.tfrecords-%.5d-of-%.5d' % (i, num_shards))
writer = tf.python_io.TFRecordWriter(filename)
for j in range(instances_per_shard):#将数据封装成Example结构并写入TFRcecord文件
example = tf.train.Example(features=tf.train.Features(feature={
'i': _int64_feature(i),
'j': _int64_feature(j)
}
))
writer.write(example.SerializerToString())
writer.close()
读取执行
files = tf.train.match_filenames_once('/path/data.tfrecords-*')
filename_queue = tf.train.string_input_producer(files, shuffle=False)#创建输入队列
reader = tf.TFRecordReader()
_, serialized_example= reader.read(filename_queue)
features = tf.parse_single_example(
serialized_example,
features = {
'i': tf.FixedLenFeature([], tf.int64),
'j': tf.FixedLenFeature([], tf.int64),
})
with tf.Session() as sess:
tf.local_variables_initializer().run()
print(sess.run(files))
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(6):
print(sess.run([features['i'], features['j']]))
coord.request_stop()
coord.join(threads)
组合训练数据
batch_size = 3
example,label = features[i], features[j]
capacity = 1000 + 3 * batch_size
example_batch, label_batch = tf.train.batch([example, label], batch_size=batch_size,
capacity=capacity)
#此函数将[example, label]整理成输入batch队列,自动管理队列出入
with tf.Session() as sess:
tf.initializer_all_variables.run()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in range(2):
cur_exapmle_batch, cur_label_batch = sess.run(
[example_batch, label_batch])
print(cur_exapmle_batch, cur_label_batch)
coord.request_stop()
coord.join(threads)
完整数据处理框架
import tensorflow as tf
files = tf.train.match_filenames_once('path/file_pattern-*')
filename_queue = tf.train.string_input_producer(files, shuffle=False)
#产生输入队列的函数
reader = tf.TFRecordReader()
_, serialized_example = reader.read(filename_queue)#读取序列
features = tf.parse_single_example(serialized_example,
features = {
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'weight': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
}
)
image = features['image']
label = features['label']
height = features['height']
weight = features['weight']
channels = features['channels']
decode_image = tf.decode_raw(image, tf.uint8)#解码tensor
decode_image.set_shape([height,weight,channels])
#前面的预处理图片函数
image_size = 299
distort_image = preprocess_for_train(decode_image, image_size, image_size, None)
#整理成输入batch队列
min_after_dequeue = 10000
batch_size = 100
capacity = min_after_dequeue + 3 * batch_size
image_batch, label_batch = tf.train.shuffle_batch(
[distorted_image, label], batch_size=batch_size,
capacity=capacity, min_after_dequeue=min_after_dequeue
)
logit = inference(image_batch)
loss = calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
with tf.Session() as sess:
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
coord = tf.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
for i in(training_step):
sess.run(train_step)
coord.request_stop()
coord.join()
数据集操作
input_data = [1, 2, 3, 5, 8]
dataset = tf.data.Dataset.from_tensor_slice(input_data)
iterator = dataset.make_one_shot_iterator()
x = iterator.get_next()
dataset = tf.data.TextLineDataset(input_files)#从文本构建
#使用TFRecord需要定义parsr
def parser(record):
features = tf.parse_single_example(
record,
features={
'f1': tf.FixedLenFeature([],tf.int64)
'f2': tf.FixedLenFeature([],tf.int64)
})
return features['f1'], features['f2']
input_files = '/path/to/TFRecordfile'
dataset = tf.data.TFRecordDataset(input_files)
dataset = dataset.map(parser)#对每条数据调用parser方法
iterator = dataset.make_one_shot_iterator()
f1, f2 = iterator.get_next()
with tf.Session as sess:
for i in range(10):
print(sess.run[f1, f2])
#使用placeholder需要初始化
iterator = dataset.make_initializer_iterator()
#从TFRecord文件创建数据集,具体文件路径是palceholder
f1, f2 = iterator.get_next()
with tf.Session() as sess:#需要初始化
sess.run(iterator.initializer,
feed_dict={input_files:['/path/to/TFRecordfile']})
while True:
try:
sess.run([f1, f2])
except tf.errors.OutOfRangeError:
break
处理输入数据集
import tensorflow as tf
train_files = tf.train.match_filenames_once('/path')#这里读取TFRecord文件
test_files = tf.train.match_filenames_once('/path')
def parser(record):
features = tf.parse_single_example(
record,
features={
'image': tf.FixedLenFeature([], tf.string),
'label': tf.FixedLenFeature([], tf.int64),
'height': tf.FixedLenFeature([], tf.int64),
'weight': tf.FixedLenFeature([], tf.int64),
'channels': tf.FixedLenFeature([], tf.int64),
}
)
decode_image = tf.decode_raw(features['image'], tf.uint8)
decode_image.set_shape([features['height'], features['weight'], features['channels']])
label = features['label']
return decode_image, label
image_size = 299
batch_size = 100
shuffle_buffer = 10000
dataset = tf.data.TFRecordDataset(train_files)
dataset = dataset.map(parser)#把数据用parser解析
#开始shuffle和batching操作
dataset = dataset.map(
lambda image, label: (
preprocess_for_train(image, image_size, image_size, None), label)
)
dataset = dataset.shuffle(shuffle_buffer).batch(batch_size)
NUM_EPOCH = 10
dataset = dataset.repeat(NUM_EPOCH)#数据集重复次数
iterator = dataset.make_initializer_iterator()
image_batch, label_batch = iterator.get_next()#获取batch数据
logit = inference(image_batch)
loss =calc_loss(logit, label_batch)
train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(loss)
test_dataset = tf.data.TFRecord(test_files)
test_dataset = test_dataset.map(parser).map(
lambda label: (
tf.image.resize_images(image, [image_size, image_size], label))
)
test_dataset = test_dataset.batch(batch_size)
test_iterator = test_dataset.make_initializer_iterator()#定义测试数据上的迭代器
test_image_batch, test_label_batch = test_iterator.get_next()
test_logit = inference(test_image_batch)
predictions = tf.argmax(test_logit, axis=-1, output_type=tf.int32)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer(),
tf.loacl_variables_initializer())
sess.run(iterator.initializer)
while True:
try:
sess.run(train_step)
except tf.errors.OutOfRangeError:
break
sess.run(test_iterator.initializer)
test_results =[]
test_labels = []
while True:
try:
pred, label = sess.run([predictions, test_label_batch])
test_results.extend(pred)
test_labels.extend(label)
except tf.errors.OutOfRangeError:
break
correct = (float(y == y_) for (y, y_) in zip(test_results, test_labels)]
accuracy= sum(correct) / len(correct)
print (”Test accuracy is:”, accuracy)
| 据
mnist = input_data.read_data_sets('/path', dtype=tf.uint8, one_hot= | identifier_body |
functions_and_their_processes.rs | use rand::Rng;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn factorial(n: i128) -> i128 {
if n == 1 {
1
} else {
n * factorial(n - 1)
}
}
pub fn fact_iter(n: i128) -> i128 {
fn helper(p: i128, c: i128, max_count: i128) -> i128 {
if c > max_count {
p
} else {
helper(p * c, c + 1, max_count)
}
}
helper(1, 1, n)
}
pub fn inc(a: i128) -> i128 {
a + 1
}
pub fn dec(a: i128) -> i128 {
a - 1
}
pub fn plus(a: i128, b: i128) -> i128 {
if a == 0 {
b
} else {
plus(dec(a), inc(b))
}
}
pub fn ackermann(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
if a == 0 {
2 * b
} else {
if b == 1 {
2
} else {
ackermann(a - 1, ackermann(a, b - 1))
}
}
}
}
fn f(n: i128) -> i128 {
ackermann(0, n)
}
fn g(n: i128) -> i128 {
ackermann(1, n)
}
fn h(n: i128) -> i128 {
ackermann(2, n)
}
pub fn fac(n: i128) -> i128 {
if n == 1 {
1
} else {
n * fac(n - 1)
}
}
pub fn fib(n: i128) -> i128 {
if n < 2 {
n
} else {
fib(n - 2) + fib(n - 1)
}
}
// rust function can not access local variable
pub fn fib_iter(n: i128) -> i128 {
fn helper(a: i128, b: i128, i: i128, n: i128) -> i128 {
if i == n {
b
} else {
helper(b, a + b, i + 1, n)
}
}
helper(0, 1, 1, n)
}
/*
The number of ways to change amount a using n kinds of coins equals
- the number of ways to change amount a using all but the the first kind of coin, plus
- the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin
*/
fn count_change(amount: i128) -> i128 {
cc(amount, 6)
}
fn cc(amount: i128, coin_kind: i8) -> i128 {
if amount == 0 {
1
} else {
if amount < 0 || coin_kind == 0 {
0
} else {
cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind)
}
}
}
fn get_value(coin_kind: i8) -> i128 {
match coin_kind {
6 => 100,
5 => 50,
4 => 25,
3 => 10,
2 => 5,
1 => 1,
_ => 0,
}
}
/*
Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3.
Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f
by means of an iterative process.
*/
fn fn3(n: i128) -> i128 {
if n < 3 {
n
} else {
fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3)
}
}
fn fn3_iter(n: i128) -> i128 {
fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 {
if k == n {
p1
} else {
helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n)
}
}
return helper(0, 1, 2, 2, n);
}
// m >= n
pub fn pascal(m: i128, n: i128) -> i128 {
if n == 0 || m == n {
1
} else {
pascal(m - 1, n - 1) + pascal(m - 1, n)
}
}
// pascal triangle with interative process
pub fn pascal_iter(m: usize, n: usize) -> i128 {
fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 {
if m == 0 || m == n {
1
} else {
if l == m {
pre_vec[n - 1] + pre_vec[n]
} else {
let mut new_vec = vec![];
for (i, _) in pre_vec.iter().enumerate() {
if i == 0 {
new_vec.push(1);
} else {
new_vec.push(pre_vec[i - 1] + pre_vec[i])
}
}
new_vec.push(1);
helper(m, n, l + 1, new_vec.to_vec())
}
}
}
helper(m, n, 2, vec![1, 1])
}
pub fn cube(x: f32) -> f32 {
x * x * x
}
fn p(x: f32) -> f32 {
3.0 * x - 4.0 * cube(x)
}
pub fn sine(angle: f32) -> f32 {
if f32::abs(angle) <= 0.1 {
angle
} else {
p(sine(angle / 3.0))
}
}
pub fn expt(b: i128, n: i128) -> i128 {
if n == 0 {
1
} else {
b * expt(b, n - 1)
}
}
pub fn expt_iter(b: i128, n: i128) -> i128 {
fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 {
if c == n {
p
} else {
helper(c + 1, b * p, b, n)
}
}
helper(0, 1, b, n)
}
pub fn is_even(n: i128) -> bool {
n % 2 == 0
}
pub fn square(i: i128) -> i128 {
i * i
}
pub fn half(i: i128) -> i128 {
i / 2
}
pub fn fast_expt(b: i128, n: i128) -> i128 {
if n == 1 {
b
} else {
if is_even(n) {
square(fast_expt(b, half(n)))
} else {
b * fast_expt(b, n - 1)
}
}
}
pub fn fast_expt_iter(b: i128, n: i128) -> i128 {
fn helper(p: i128, b: i128, n: i128) -> i128 {
if n == 0 {
p
} else {
if is_even(n) {
helper(p, square(b), half(n))
} else {
helper(b * p, b, n - 1)
}
}
}
helper(1, b, n)
}
pub fn double(x: i128) -> i128 {
x * 2
}
pub fn times(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
a + times(a, b - 1)
}
}
pub fn times_iter(a: i128, b: i128) -> i128 {
fn helper(s: i128, a: i128, b: i128) -> i128 {
if b == 0 {
s
} else {
if is_even(b) {
helper(s, double(a), half(b))
} else {
helper(s + a, a, b - 1)
}
}
}
helper(0, a, b)
}
pub fn fast_fib(n: i128) -> i128 {
fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 {
if count == 0 {
b
} else {
if is_even(count) {
helper(
a,
b,
square(p) + square(q),
2 * p * q + square(q),
half(count),
)
} else {
helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1)
}
}
}
helper(1, 0, 0, 1, n)
}
pub fn gcd(a: i128, b: i128) -> i128 {
if b == 0 {
a
} else {
gcd(b, a % b)
}
}
pub fn devides(test_divisor: i128, n: i128) -> bool {
n % | d_divisor(n: i128, test_divisor: i128) -> i128 {
if square(test_divisor) > n {
n
} else {
if devides(test_divisor, n) {
test_divisor
} else {
find_divisor(n, test_divisor + 1)
}
}
}
pub fn smallest_divisor(n: i128) -> i128 {
find_divisor(n, 2)
}
pub fn is_prime(n: i128) -> bool {
smallest_divisor(n) == n
}
pub fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
// Fermat test
pub fn fermat_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n, n) == a
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n);
println!("fermat_test testing {}", a);
try_it(a, n)
}
pub fn fast_is_prime(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if fermat_test(n) {
fast_is_prime(n, times - 1)
} else {
false
}
}
}
// Exercise 1.22
fn timed_prime_test(n: i128) -> bool {
println!(" start testing: {}", n);
let now = SystemTime::now();
start_prime_test(n, now)
}
fn start_prime_test(n: i128, now: SystemTime) -> bool {
if is_prime(n) {
report_prime(now, n)
} else {
true
}
}
fn report_prime(now: SystemTime, n: i128) -> bool {
println!(" *** ");
println!(" prime number is: {}", n);
println!("Time used: {}", get_lapsed_time_millis(now));
/*
match now.elapsed() {
Ok(elapsed) => println!("Time used: {}", elapsed.as_millis()),
Err(e) => println!("Error: {:?}", e),
}
*/
false
}
fn get_lapsed_time_millis(then: SystemTime) -> u128 {
let new_now = SystemTime::now();
new_now
.duration_since(UNIX_EPOCH)
.expect("Time")
.as_millis()
- then.duration_since(UNIX_EPOCH).expect("Time").as_millis()
}
// start is odd number
fn search_for_prime(start: i128, count: i128) {
fn helper(start: i128, count: i128) {
if count == 0 {
return;
} else {
if timed_prime_test(start) {
helper(start + 2, count)
} else {
helper(start + 2, count - 1)
}
}
}
helper(start, count)
}
// Exercise 1.27
fn test_carmichael_number(n: i128) {
for i in 2..n {
if expmod(i, n, n) == i {
println!(" testing {}", i);
}
}
}
// Exercise 1.28 Miller-Rabin test
fn miller_rabin_test(n: i128, times: i128) -> bool {
fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
fn trivial_test(r: i128, m: i128) -> i128 {
if r == 1 || r == m - 1 {
r
} else if square(r) % m == 1 {
0
} else {
r
}
}
fn helper_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n - 1, n) == 1
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n - 1);
println!("miller_rabin testing {}", a);
try_it(a, n)
}
fn test_times(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if helper_test(n) {
test_times(n, times - 1)
} else {
false
}
}
}
test_times(n, times)
}
#[test]
fn functions_and_their_processes_tests() {
println!("{}", factorial(5));
println!("{}", fact_iter(5));
println!("{}", plus(5, 13));
println!("{}", ackermann(1, 10));
println!("{}", ackermann(2, 4));
println!("{}", ackermann(3, 3));
println!("{}", f(3));
println!("{}", g(3));
println!("{}", h(4));
println!("{}", fac(5));
println!("{}", fib(5));
println!("{}", fib_iter(5));
println!("{}", count_change(100));
println!("{}", fn3(3));
println!("{}", fn3(4));
println!("{}", fn3_iter(3));
println!("{}", fn3_iter(4));
println!("{}", pascal(3, 2));
println!("{}", pascal_iter(3, 2));
println!("{}", pascal(4, 2));
println!("{}", pascal_iter(4, 2));
println!("{}", expt(4, 2));
println!("{}", expt_iter(4, 2));
println!("{}", fast_expt(4, 2));
println!("{}", fast_expt_iter(4, 2));
println!("{}", times(4, 2));
println!("{}", times_iter(4, 2));
println!("{}", fast_fib(5));
println!("{}", gcd(2, 5));
println!("{}", smallest_divisor(45));
println!("{}", is_prime(5));
println!("{}", fermat_test(5));
println!("{}", timed_prime_test(16769023));
search_for_prime(1111, 3);
search_for_prime(11111, 3);
search_for_prime(111111, 3);
search_for_prime(1111111, 3);
search_for_prime(11111111, 3);
search_for_prime(111111111, 3);
// carmichael number
test_carmichael_number(2821);
println!("is 2821 prime by fermat_test? {}", fast_is_prime(2821, 100));
println!(
"is 2821 prime by miller_rabin test? {}",
miller_rabin_test(2821, 100)
);
}
| test_divisor == 0
}
fn fin | identifier_body |
functions_and_their_processes.rs | use rand::Rng;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn factorial(n: i128) -> i128 {
if n == 1 {
1
} else {
n * factorial(n - 1)
}
}
pub fn fact_iter(n: i128) -> i128 {
fn helper(p: i128, c: i128, max_count: i128) -> i128 {
if c > max_count {
p
} else {
helper(p * c, c + 1, max_count)
}
}
helper(1, 1, n)
}
pub fn inc(a: i128) -> i128 {
a + 1
}
pub fn dec(a: i128) -> i128 {
a - 1
}
pub fn plus(a: i128, b: i128) -> i128 {
if a == 0 {
b
} else {
plus(dec(a), inc(b))
}
}
pub fn ackermann(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
if a == 0 {
2 * b
} else {
if b == 1 {
2
} else {
ackermann(a - 1, ackermann(a, b - 1))
}
}
}
}
fn f(n: i128) -> i128 {
ackermann(0, n)
}
fn g(n: i128) -> i128 {
ackermann(1, n)
}
fn h(n: i128) -> i128 {
ackermann(2, n)
}
pub fn fac(n: i128) -> i128 {
if n == 1 {
1
} else {
n * fac(n - 1)
}
}
pub fn fib(n: i128) -> i128 {
if n < 2 {
n
} else {
fib(n - 2) + fib(n - 1)
}
}
// rust function can not access local variable
pub fn fib_iter(n: i128) -> i128 {
fn helper(a: i128, b: i128, i: i128, n: i128) -> i128 {
if i == n {
b
} else {
helper(b, a + b, i + 1, n)
}
}
helper(0, 1, 1, n)
}
/*
The number of ways to change amount a using n kinds of coins equals
- the number of ways to change amount a using all but the the first kind of coin, plus
- the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin
*/
fn count_change(amount: i128) -> i128 {
cc(amount, 6)
}
fn cc(amount: i128, coin_kind: i8) -> i128 {
if amount == 0 {
1
} else {
if amount < 0 || coin_kind == 0 {
0
} else {
cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind)
}
}
}
fn get_value(coin_kind: i8) -> i128 {
match coin_kind {
6 => 100,
5 => 50,
4 => 25,
3 => 10,
2 => 5,
1 => 1,
_ => 0,
}
}
/*
Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3.
Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f
by means of an iterative process.
*/
fn fn3(n: i128) -> i128 {
if n < 3 {
n
} else {
fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3)
}
}
fn fn3_iter(n: i128) -> i128 {
fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 {
if k == n {
p1
} else {
helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n)
}
}
return helper(0, 1, 2, 2, n);
}
// m >= n
pub fn pascal(m: i128, n: i128) -> i128 {
if n == 0 || m == n {
1
} else {
pascal(m - 1, n - 1) + pascal(m - 1, n)
}
}
// pascal triangle with interative process
pub fn pascal_iter(m: usize, n: usize) -> i128 {
fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 {
if m == 0 || m == n {
1
} else {
if l == m {
pre_vec[n - 1] + pre_vec[n]
} else {
let mut new_vec = vec![];
for (i, _) in pre_vec.iter().enumerate() {
if i == 0 {
new_vec.push(1);
} else {
new_vec.push(pre_vec[i - 1] + pre_vec[i])
}
}
new_vec.push(1);
helper(m, n, l + 1, new_vec.to_vec())
}
}
}
helper(m, n, 2, vec![1, 1])
}
pub fn cube(x: f32) -> f32 {
x * x * x
}
fn p(x: f32) -> f32 {
3.0 * x - 4.0 * cube(x)
}
pub fn sine(angle: f32) -> f32 {
if f32::abs(angle) <= 0.1 {
angle
} else {
p(sine(angle / 3.0))
}
}
pub fn expt(b: i128, n: i128) -> i128 {
if n == 0 {
1
} else {
b * expt(b, n - 1)
}
}
pub fn expt_iter(b: i128, n: i128) -> i128 {
fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 {
if c == n {
p
} else {
helper(c + 1, b * p, b, n)
}
}
helper(0, 1, b, n)
}
pub fn is_even(n: i128) -> bool {
n % 2 == 0
}
pub fn square(i: i128) -> i128 {
i * i
}
pub fn half(i: i128) -> i128 {
i / 2
}
pub fn fast_expt(b: i128, n: i128) -> i128 {
if n == 1 {
b
} else {
if is_even(n) {
square(fast_expt(b, half(n)))
} else {
b * fast_expt(b, n - 1)
}
}
}
pub fn fast_expt_iter(b: i128, n: i128) -> i128 {
fn helper(p: i128, b: i128, n: i128) -> i128 {
if n == 0 {
p
} else {
if is_even(n) {
helper(p, square(b), half(n))
} else {
helper(b * p, b, n - 1)
}
}
}
helper(1, b, n)
}
pub fn double(x: i128) -> i128 {
x * 2
}
pub fn times(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
a + times(a, b - 1)
}
}
pub fn times_iter(a: i128, b: i128) -> i128 {
fn helper(s: i128, a: i128, b: i128) -> i128 {
if b == 0 {
s
} else {
if is_even(b) {
helper(s, double(a), half(b))
} else {
helper(s + a, a, b - 1)
}
}
}
helper(0, a, b)
}
pub fn fast_fib(n: i128) -> i128 {
fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 {
if count == 0 {
b
} else {
if is_even(count) {
helper(
a,
b,
square(p) + square(q),
2 * p * q + square(q),
half(count),
)
} else {
helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1)
}
}
}
helper(1, 0, 0, 1, n)
}
pub fn gcd(a: i128, b: i128) -> i128 {
if b == 0 {
a
} else {
gcd(b, a % b)
}
}
pub fn devides(test_divisor: i128, n: i128) -> bool {
n % test_divisor == 0
}
fn find_divisor(n: i128, test_divisor: i128) -> i128 {
if square(test_divisor) > n {
n
} else {
if devides(test_divisor, n) {
test_divisor
} else {
find_divisor(n, test_divisor + 1)
}
}
}
pub fn smallest_divisor(n: i128) -> i128 {
find_divisor(n, 2)
}
pub fn is_prime(n: i128) -> bool {
smallest_divisor(n) == n
}
pub fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
// Fermat test
pub fn fermat_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n, n) == a
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n);
println!("fermat_test testing {}", a);
try_it(a, n)
}
pub fn fast_is_prime(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if fermat_test(n) {
fast_is_prime(n, times - 1)
} else {
false
}
}
}
// Exercise 1.22
fn timed_prime_test(n: i128) -> bool {
println!(" start testing: {}", n);
let now = SystemTime::now();
start_prime_test(n, now)
}
fn start_prime_test(n: i128, now: SystemTime) -> bool {
if is_prime(n) {
report_prime(now, n)
} else {
true
}
}
fn report_prime(now: SystemTime, n: i128) -> bool {
println!(" *** ");
println!(" prime number is: {}", n);
println!("Time used: {}", get_lapsed_time_millis(now));
/*
match now.elapsed() {
Ok(elapsed) => println!("Time used: {}", elapsed.as_millis()),
Err(e) => println!("Error: {:?}", e),
}
*/
false
}
fn get_lapsed_time_millis(then: SystemTime) -> u128 {
let new_now = SystemTime::now();
new_now
.duration_since(UNIX_EPOCH)
.expect("Time")
.as_millis()
- then.duration_since(UNIX_EPOCH).expect("Time").as_millis()
}
// start is odd number
fn search_for_prime(start: i128, count: i128) {
fn helper(start: i128, count: i128) {
if count == 0 {
return;
} else {
if timed_prime_test(start) {
helper(start + 2, count)
} else {
helper(start + 2, count - 1)
}
}
}
helper(start, count)
}
// Exercise 1.27
fn test_carmichael_number(n: i128) { | }
// Exercise 1.28 Miller-Rabin test
fn miller_rabin_test(n: i128, times: i128) -> bool {
fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
fn trivial_test(r: i128, m: i128) -> i128 {
if r == 1 || r == m - 1 {
r
} else if square(r) % m == 1 {
0
} else {
r
}
}
fn helper_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n - 1, n) == 1
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n - 1);
println!("miller_rabin testing {}", a);
try_it(a, n)
}
fn test_times(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if helper_test(n) {
test_times(n, times - 1)
} else {
false
}
}
}
test_times(n, times)
}
#[test]
fn functions_and_their_processes_tests() {
println!("{}", factorial(5));
println!("{}", fact_iter(5));
println!("{}", plus(5, 13));
println!("{}", ackermann(1, 10));
println!("{}", ackermann(2, 4));
println!("{}", ackermann(3, 3));
println!("{}", f(3));
println!("{}", g(3));
println!("{}", h(4));
println!("{}", fac(5));
println!("{}", fib(5));
println!("{}", fib_iter(5));
println!("{}", count_change(100));
println!("{}", fn3(3));
println!("{}", fn3(4));
println!("{}", fn3_iter(3));
println!("{}", fn3_iter(4));
println!("{}", pascal(3, 2));
println!("{}", pascal_iter(3, 2));
println!("{}", pascal(4, 2));
println!("{}", pascal_iter(4, 2));
println!("{}", expt(4, 2));
println!("{}", expt_iter(4, 2));
println!("{}", fast_expt(4, 2));
println!("{}", fast_expt_iter(4, 2));
println!("{}", times(4, 2));
println!("{}", times_iter(4, 2));
println!("{}", fast_fib(5));
println!("{}", gcd(2, 5));
println!("{}", smallest_divisor(45));
println!("{}", is_prime(5));
println!("{}", fermat_test(5));
println!("{}", timed_prime_test(16769023));
search_for_prime(1111, 3);
search_for_prime(11111, 3);
search_for_prime(111111, 3);
search_for_prime(1111111, 3);
search_for_prime(11111111, 3);
search_for_prime(111111111, 3);
// carmichael number
test_carmichael_number(2821);
println!("is 2821 prime by fermat_test? {}", fast_is_prime(2821, 100));
println!(
"is 2821 prime by miller_rabin test? {}",
miller_rabin_test(2821, 100)
);
} | for i in 2..n {
if expmod(i, n, n) == i {
println!(" testing {}", i);
}
} | random_line_split |
functions_and_their_processes.rs | use rand::Rng;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn factorial(n: i128) -> i128 {
if n == 1 {
1
} else {
n * factorial(n - 1)
}
}
pub fn fact_iter(n: i128) -> i128 {
fn helper(p: i128, c: i128, max_count: i128) -> i128 {
if c > max_count {
p
} else {
helper(p * c, c + 1, max_count)
}
}
helper(1, 1, n)
}
pub fn inc(a: i128) -> i128 {
a + 1
}
pub fn dec(a: i128) -> i128 {
a - 1
}
pub fn plus(a: i128, b: i128) -> i128 {
if a == 0 {
b
} else {
plus(dec(a), inc(b))
}
}
pub fn ackermann(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
if a == 0 {
2 * b
} else {
if b == 1 | else {
ackermann(a - 1, ackermann(a, b - 1))
}
}
}
}
fn f(n: i128) -> i128 {
ackermann(0, n)
}
fn g(n: i128) -> i128 {
ackermann(1, n)
}
fn h(n: i128) -> i128 {
ackermann(2, n)
}
pub fn fac(n: i128) -> i128 {
if n == 1 {
1
} else {
n * fac(n - 1)
}
}
pub fn fib(n: i128) -> i128 {
if n < 2 {
n
} else {
fib(n - 2) + fib(n - 1)
}
}
// rust function can not access local variable
pub fn fib_iter(n: i128) -> i128 {
fn helper(a: i128, b: i128, i: i128, n: i128) -> i128 {
if i == n {
b
} else {
helper(b, a + b, i + 1, n)
}
}
helper(0, 1, 1, n)
}
/*
The number of ways to change amount a using n kinds of coins equals
- the number of ways to change amount a using all but the the first kind of coin, plus
- the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin
*/
fn count_change(amount: i128) -> i128 {
cc(amount, 6)
}
fn cc(amount: i128, coin_kind: i8) -> i128 {
if amount == 0 {
1
} else {
if amount < 0 || coin_kind == 0 {
0
} else {
cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind)
}
}
}
fn get_value(coin_kind: i8) -> i128 {
match coin_kind {
6 => 100,
5 => 50,
4 => 25,
3 => 10,
2 => 5,
1 => 1,
_ => 0,
}
}
/*
Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3.
Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f
by means of an iterative process.
*/
fn fn3(n: i128) -> i128 {
if n < 3 {
n
} else {
fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3)
}
}
fn fn3_iter(n: i128) -> i128 {
fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 {
if k == n {
p1
} else {
helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n)
}
}
return helper(0, 1, 2, 2, n);
}
// m >= n
pub fn pascal(m: i128, n: i128) -> i128 {
if n == 0 || m == n {
1
} else {
pascal(m - 1, n - 1) + pascal(m - 1, n)
}
}
// pascal triangle with interative process
pub fn pascal_iter(m: usize, n: usize) -> i128 {
fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 {
if m == 0 || m == n {
1
} else {
if l == m {
pre_vec[n - 1] + pre_vec[n]
} else {
let mut new_vec = vec![];
for (i, _) in pre_vec.iter().enumerate() {
if i == 0 {
new_vec.push(1);
} else {
new_vec.push(pre_vec[i - 1] + pre_vec[i])
}
}
new_vec.push(1);
helper(m, n, l + 1, new_vec.to_vec())
}
}
}
helper(m, n, 2, vec![1, 1])
}
pub fn cube(x: f32) -> f32 {
x * x * x
}
fn p(x: f32) -> f32 {
3.0 * x - 4.0 * cube(x)
}
pub fn sine(angle: f32) -> f32 {
if f32::abs(angle) <= 0.1 {
angle
} else {
p(sine(angle / 3.0))
}
}
pub fn expt(b: i128, n: i128) -> i128 {
if n == 0 {
1
} else {
b * expt(b, n - 1)
}
}
pub fn expt_iter(b: i128, n: i128) -> i128 {
fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 {
if c == n {
p
} else {
helper(c + 1, b * p, b, n)
}
}
helper(0, 1, b, n)
}
pub fn is_even(n: i128) -> bool {
n % 2 == 0
}
pub fn square(i: i128) -> i128 {
i * i
}
pub fn half(i: i128) -> i128 {
i / 2
}
pub fn fast_expt(b: i128, n: i128) -> i128 {
if n == 1 {
b
} else {
if is_even(n) {
square(fast_expt(b, half(n)))
} else {
b * fast_expt(b, n - 1)
}
}
}
pub fn fast_expt_iter(b: i128, n: i128) -> i128 {
fn helper(p: i128, b: i128, n: i128) -> i128 {
if n == 0 {
p
} else {
if is_even(n) {
helper(p, square(b), half(n))
} else {
helper(b * p, b, n - 1)
}
}
}
helper(1, b, n)
}
pub fn double(x: i128) -> i128 {
x * 2
}
pub fn times(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
a + times(a, b - 1)
}
}
pub fn times_iter(a: i128, b: i128) -> i128 {
fn helper(s: i128, a: i128, b: i128) -> i128 {
if b == 0 {
s
} else {
if is_even(b) {
helper(s, double(a), half(b))
} else {
helper(s + a, a, b - 1)
}
}
}
helper(0, a, b)
}
pub fn fast_fib(n: i128) -> i128 {
fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 {
if count == 0 {
b
} else {
if is_even(count) {
helper(
a,
b,
square(p) + square(q),
2 * p * q + square(q),
half(count),
)
} else {
helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1)
}
}
}
helper(1, 0, 0, 1, n)
}
pub fn gcd(a: i128, b: i128) -> i128 {
if b == 0 {
a
} else {
gcd(b, a % b)
}
}
pub fn devides(test_divisor: i128, n: i128) -> bool {
n % test_divisor == 0
}
fn find_divisor(n: i128, test_divisor: i128) -> i128 {
if square(test_divisor) > n {
n
} else {
if devides(test_divisor, n) {
test_divisor
} else {
find_divisor(n, test_divisor + 1)
}
}
}
pub fn smallest_divisor(n: i128) -> i128 {
find_divisor(n, 2)
}
pub fn is_prime(n: i128) -> bool {
smallest_divisor(n) == n
}
pub fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
// Fermat test
pub fn fermat_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n, n) == a
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n);
println!("fermat_test testing {}", a);
try_it(a, n)
}
pub fn fast_is_prime(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if fermat_test(n) {
fast_is_prime(n, times - 1)
} else {
false
}
}
}
// Exercise 1.22
fn timed_prime_test(n: i128) -> bool {
println!(" start testing: {}", n);
let now = SystemTime::now();
start_prime_test(n, now)
}
fn start_prime_test(n: i128, now: SystemTime) -> bool {
if is_prime(n) {
report_prime(now, n)
} else {
true
}
}
fn report_prime(now: SystemTime, n: i128) -> bool {
println!(" *** ");
println!(" prime number is: {}", n);
println!("Time used: {}", get_lapsed_time_millis(now));
/*
match now.elapsed() {
Ok(elapsed) => println!("Time used: {}", elapsed.as_millis()),
Err(e) => println!("Error: {:?}", e),
}
*/
false
}
fn get_lapsed_time_millis(then: SystemTime) -> u128 {
let new_now = SystemTime::now();
new_now
.duration_since(UNIX_EPOCH)
.expect("Time")
.as_millis()
- then.duration_since(UNIX_EPOCH).expect("Time").as_millis()
}
// start is odd number
fn search_for_prime(start: i128, count: i128) {
fn helper(start: i128, count: i128) {
if count == 0 {
return;
} else {
if timed_prime_test(start) {
helper(start + 2, count)
} else {
helper(start + 2, count - 1)
}
}
}
helper(start, count)
}
// Exercise 1.27
fn test_carmichael_number(n: i128) {
for i in 2..n {
if expmod(i, n, n) == i {
println!(" testing {}", i);
}
}
}
// Exercise 1.28 Miller-Rabin test
fn miller_rabin_test(n: i128, times: i128) -> bool {
fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
fn trivial_test(r: i128, m: i128) -> i128 {
if r == 1 || r == m - 1 {
r
} else if square(r) % m == 1 {
0
} else {
r
}
}
fn helper_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n - 1, n) == 1
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n - 1);
println!("miller_rabin testing {}", a);
try_it(a, n)
}
fn test_times(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if helper_test(n) {
test_times(n, times - 1)
} else {
false
}
}
}
test_times(n, times)
}
#[test]
fn functions_and_their_processes_tests() {
println!("{}", factorial(5));
println!("{}", fact_iter(5));
println!("{}", plus(5, 13));
println!("{}", ackermann(1, 10));
println!("{}", ackermann(2, 4));
println!("{}", ackermann(3, 3));
println!("{}", f(3));
println!("{}", g(3));
println!("{}", h(4));
println!("{}", fac(5));
println!("{}", fib(5));
println!("{}", fib_iter(5));
println!("{}", count_change(100));
println!("{}", fn3(3));
println!("{}", fn3(4));
println!("{}", fn3_iter(3));
println!("{}", fn3_iter(4));
println!("{}", pascal(3, 2));
println!("{}", pascal_iter(3, 2));
println!("{}", pascal(4, 2));
println!("{}", pascal_iter(4, 2));
println!("{}", expt(4, 2));
println!("{}", expt_iter(4, 2));
println!("{}", fast_expt(4, 2));
println!("{}", fast_expt_iter(4, 2));
println!("{}", times(4, 2));
println!("{}", times_iter(4, 2));
println!("{}", fast_fib(5));
println!("{}", gcd(2, 5));
println!("{}", smallest_divisor(45));
println!("{}", is_prime(5));
println!("{}", fermat_test(5));
println!("{}", timed_prime_test(16769023));
search_for_prime(1111, 3);
search_for_prime(11111, 3);
search_for_prime(111111, 3);
search_for_prime(1111111, 3);
search_for_prime(11111111, 3);
search_for_prime(111111111, 3);
// carmichael number
test_carmichael_number(2821);
println!("is 2821 prime by fermat_test? {}", fast_is_prime(2821, 100));
println!(
"is 2821 prime by miller_rabin test? {}",
miller_rabin_test(2821, 100)
);
}
| {
2
} | conditional_block |
functions_and_their_processes.rs | use rand::Rng;
use std::time::{SystemTime, UNIX_EPOCH};
pub fn factorial(n: i128) -> i128 {
if n == 1 {
1
} else {
n * factorial(n - 1)
}
}
pub fn fact_iter(n: i128) -> i128 {
fn helper(p: i128, c: i128, max_count: i128) -> i128 {
if c > max_count {
p
} else {
helper(p * c, c + 1, max_count)
}
}
helper(1, 1, n)
}
pub fn inc(a: i128) -> i128 {
a + 1
}
pub fn dec(a: i128) -> i128 {
a - 1
}
pub fn plus(a: i128, b: i128) -> i128 {
if a == 0 {
b
} else {
plus(dec(a), inc(b))
}
}
pub fn ackermann(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
if a == 0 {
2 * b
} else {
if b == 1 {
2
} else {
ackermann(a - 1, ackermann(a, b - 1))
}
}
}
}
fn f(n: i128) -> i128 {
ackermann(0, n)
}
fn g(n: i128) -> i128 {
ackermann(1, n)
}
fn h(n: i128) -> i128 {
ackermann(2, n)
}
pub fn fac(n: i128) -> i128 {
if n == 1 {
1
} else {
n * fac(n - 1)
}
}
pub fn fib(n: i128) -> i128 {
if n < 2 {
n
} else {
fib(n - 2) + fib(n - 1)
}
}
// rust function can not access local variable
pub fn fib_iter(n: i128) -> i128 {
fn helper(a: i128, b: i128, i: i128, n: i128) -> i128 {
if i == n {
b
} else {
helper(b, a + b, i + 1, n)
}
}
helper(0, 1, 1, n)
}
/*
The number of ways to change amount a using n kinds of coins equals
- the number of ways to change amount a using all but the the first kind of coin, plus
- the number of ways to change amount (a - d) using all n kinds of coins where d is the value of the first kind of coin
*/
fn count_change(amount: i128) -> i128 {
cc(amount, 6)
}
fn | (amount: i128, coin_kind: i8) -> i128 {
if amount == 0 {
1
} else {
if amount < 0 || coin_kind == 0 {
0
} else {
cc(amount, coin_kind - 1) + cc(amount - get_value(coin_kind), coin_kind)
}
}
}
fn get_value(coin_kind: i8) -> i128 {
match coin_kind {
6 => 100,
5 => 50,
4 => 25,
3 => 10,
2 => 5,
1 => 1,
_ => 0,
}
}
/*
Exercise 1.11 A function f is defined by the rule that f(n)=n if n<3 and f(n)=f(n−1)+2f(n−2)+3f(n−3) if n≥3.
Write a JavaScript function that computes f by means of a recursive process. Write a function that computes f
by means of an iterative process.
*/
fn fn3(n: i128) -> i128 {
if n < 3 {
n
} else {
fn3(n - 1) + 2 * fn3(n - 2) + 3 * fn3(n - 3)
}
}
fn fn3_iter(n: i128) -> i128 {
fn helper(p3: i128, p2: i128, p1: i128, k: i128, n: i128) -> i128 {
if k == n {
p1
} else {
helper(p2, p1, 3 * p3 + 2 * p2 + p1, k + 1, n)
}
}
return helper(0, 1, 2, 2, n);
}
// m >= n
pub fn pascal(m: i128, n: i128) -> i128 {
if n == 0 || m == n {
1
} else {
pascal(m - 1, n - 1) + pascal(m - 1, n)
}
}
// pascal triangle with interative process
pub fn pascal_iter(m: usize, n: usize) -> i128 {
fn helper(m: usize, n: usize, l: usize, pre_vec: Vec<i128>) -> i128 {
if m == 0 || m == n {
1
} else {
if l == m {
pre_vec[n - 1] + pre_vec[n]
} else {
let mut new_vec = vec![];
for (i, _) in pre_vec.iter().enumerate() {
if i == 0 {
new_vec.push(1);
} else {
new_vec.push(pre_vec[i - 1] + pre_vec[i])
}
}
new_vec.push(1);
helper(m, n, l + 1, new_vec.to_vec())
}
}
}
helper(m, n, 2, vec![1, 1])
}
pub fn cube(x: f32) -> f32 {
x * x * x
}
fn p(x: f32) -> f32 {
3.0 * x - 4.0 * cube(x)
}
pub fn sine(angle: f32) -> f32 {
if f32::abs(angle) <= 0.1 {
angle
} else {
p(sine(angle / 3.0))
}
}
pub fn expt(b: i128, n: i128) -> i128 {
if n == 0 {
1
} else {
b * expt(b, n - 1)
}
}
pub fn expt_iter(b: i128, n: i128) -> i128 {
fn helper(c: i128, p: i128, b: i128, n: i128) -> i128 {
if c == n {
p
} else {
helper(c + 1, b * p, b, n)
}
}
helper(0, 1, b, n)
}
pub fn is_even(n: i128) -> bool {
n % 2 == 0
}
pub fn square(i: i128) -> i128 {
i * i
}
pub fn half(i: i128) -> i128 {
i / 2
}
pub fn fast_expt(b: i128, n: i128) -> i128 {
if n == 1 {
b
} else {
if is_even(n) {
square(fast_expt(b, half(n)))
} else {
b * fast_expt(b, n - 1)
}
}
}
pub fn fast_expt_iter(b: i128, n: i128) -> i128 {
fn helper(p: i128, b: i128, n: i128) -> i128 {
if n == 0 {
p
} else {
if is_even(n) {
helper(p, square(b), half(n))
} else {
helper(b * p, b, n - 1)
}
}
}
helper(1, b, n)
}
pub fn double(x: i128) -> i128 {
x * 2
}
pub fn times(a: i128, b: i128) -> i128 {
if b == 0 {
0
} else {
a + times(a, b - 1)
}
}
pub fn times_iter(a: i128, b: i128) -> i128 {
fn helper(s: i128, a: i128, b: i128) -> i128 {
if b == 0 {
s
} else {
if is_even(b) {
helper(s, double(a), half(b))
} else {
helper(s + a, a, b - 1)
}
}
}
helper(0, a, b)
}
pub fn fast_fib(n: i128) -> i128 {
fn helper(a: i128, b: i128, p: i128, q: i128, count: i128) -> i128 {
if count == 0 {
b
} else {
if is_even(count) {
helper(
a,
b,
square(p) + square(q),
2 * p * q + square(q),
half(count),
)
} else {
helper(b * q + a * q + a * p, b * p + a * q, p, q, count - 1)
}
}
}
helper(1, 0, 0, 1, n)
}
pub fn gcd(a: i128, b: i128) -> i128 {
if b == 0 {
a
} else {
gcd(b, a % b)
}
}
pub fn devides(test_divisor: i128, n: i128) -> bool {
n % test_divisor == 0
}
fn find_divisor(n: i128, test_divisor: i128) -> i128 {
if square(test_divisor) > n {
n
} else {
if devides(test_divisor, n) {
test_divisor
} else {
find_divisor(n, test_divisor + 1)
}
}
}
pub fn smallest_divisor(n: i128) -> i128 {
find_divisor(n, 2)
}
pub fn is_prime(n: i128) -> bool {
smallest_divisor(n) == n
}
pub fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
// Fermat test
pub fn fermat_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n, n) == a
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n);
println!("fermat_test testing {}", a);
try_it(a, n)
}
pub fn fast_is_prime(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if fermat_test(n) {
fast_is_prime(n, times - 1)
} else {
false
}
}
}
// Exercise 1.22
fn timed_prime_test(n: i128) -> bool {
println!(" start testing: {}", n);
let now = SystemTime::now();
start_prime_test(n, now)
}
fn start_prime_test(n: i128, now: SystemTime) -> bool {
if is_prime(n) {
report_prime(now, n)
} else {
true
}
}
fn report_prime(now: SystemTime, n: i128) -> bool {
println!(" *** ");
println!(" prime number is: {}", n);
println!("Time used: {}", get_lapsed_time_millis(now));
/*
match now.elapsed() {
Ok(elapsed) => println!("Time used: {}", elapsed.as_millis()),
Err(e) => println!("Error: {:?}", e),
}
*/
false
}
fn get_lapsed_time_millis(then: SystemTime) -> u128 {
let new_now = SystemTime::now();
new_now
.duration_since(UNIX_EPOCH)
.expect("Time")
.as_millis()
- then.duration_since(UNIX_EPOCH).expect("Time").as_millis()
}
// start is odd number
fn search_for_prime(start: i128, count: i128) {
fn helper(start: i128, count: i128) {
if count == 0 {
return;
} else {
if timed_prime_test(start) {
helper(start + 2, count)
} else {
helper(start + 2, count - 1)
}
}
}
helper(start, count)
}
// Exercise 1.27
fn test_carmichael_number(n: i128) {
for i in 2..n {
if expmod(i, n, n) == i {
println!(" testing {}", i);
}
}
}
// Exercise 1.28 Miller-Rabin test
fn miller_rabin_test(n: i128, times: i128) -> bool {
fn expmod(base: i128, exp: i128, m: i128) -> i128 {
if exp == 0 {
1
} else {
if is_even(exp) {
// square after expmod, otherwise it will overflow easily
square(expmod(base, half(exp), m)) % m
} else {
base * expmod(base, exp - 1, m) % m
}
}
}
fn trivial_test(r: i128, m: i128) -> i128 {
if r == 1 || r == m - 1 {
r
} else if square(r) % m == 1 {
0
} else {
r
}
}
fn helper_test(n: i128) -> bool {
fn try_it(a: i128, n: i128) -> bool {
expmod(a, n - 1, n) == 1
}
let mut rng = rand::thread_rng();
let a = rng.gen_range(1, n - 1);
println!("miller_rabin testing {}", a);
try_it(a, n)
}
fn test_times(n: i128, times: i128) -> bool {
if times == 0 {
true
} else {
if helper_test(n) {
test_times(n, times - 1)
} else {
false
}
}
}
test_times(n, times)
}
#[test]
fn functions_and_their_processes_tests() {
println!("{}", factorial(5));
println!("{}", fact_iter(5));
println!("{}", plus(5, 13));
println!("{}", ackermann(1, 10));
println!("{}", ackermann(2, 4));
println!("{}", ackermann(3, 3));
println!("{}", f(3));
println!("{}", g(3));
println!("{}", h(4));
println!("{}", fac(5));
println!("{}", fib(5));
println!("{}", fib_iter(5));
println!("{}", count_change(100));
println!("{}", fn3(3));
println!("{}", fn3(4));
println!("{}", fn3_iter(3));
println!("{}", fn3_iter(4));
println!("{}", pascal(3, 2));
println!("{}", pascal_iter(3, 2));
println!("{}", pascal(4, 2));
println!("{}", pascal_iter(4, 2));
println!("{}", expt(4, 2));
println!("{}", expt_iter(4, 2));
println!("{}", fast_expt(4, 2));
println!("{}", fast_expt_iter(4, 2));
println!("{}", times(4, 2));
println!("{}", times_iter(4, 2));
println!("{}", fast_fib(5));
println!("{}", gcd(2, 5));
println!("{}", smallest_divisor(45));
println!("{}", is_prime(5));
println!("{}", fermat_test(5));
println!("{}", timed_prime_test(16769023));
search_for_prime(1111, 3);
search_for_prime(11111, 3);
search_for_prime(111111, 3);
search_for_prime(1111111, 3);
search_for_prime(11111111, 3);
search_for_prime(111111111, 3);
// carmichael number
test_carmichael_number(2821);
println!("is 2821 prime by fermat_test? {}", fast_is_prime(2821, 100));
println!(
"is 2821 prime by miller_rabin test? {}",
miller_rabin_test(2821, 100)
);
}
| cc | identifier_name |
lib.rs | #![allow(clippy::type_complexity)]
#![allow(clippy::question_mark)]
#![warn(rust_2018_idioms)]
#![warn(missing_docs)]
//! The salsa crate is a crate for incremental recomputation. It
//! permits you to define a "database" of queries with both inputs and
//! values derived from those inputs; as you set the inputs, you can
//! re-execute the derived queries and it will try to re-use results
//! from previous invocations as appropriate.
mod derived;
mod doctest;
mod durability;
mod hash;
mod input;
mod intern_id;
mod interned;
mod revision;
mod runtime;
mod storage;
pub mod debug;
/// Items in this module are public for implementation reasons,
/// and are exempt from the SemVer guarantees.
#[doc(hidden)]
pub mod plumbing;
use crate::plumbing::CycleRecoveryStrategy;
use crate::plumbing::DerivedQueryStorageOps;
use crate::plumbing::InputQueryStorageOps;
use crate::plumbing::LruQueryStorageOps;
use crate::plumbing::QueryStorageMassOps;
use crate::plumbing::QueryStorageOps;
pub use crate::revision::Revision;
use std::fmt::{self, Debug};
use std::hash::Hash;
use std::panic::AssertUnwindSafe;
use std::panic::{self, UnwindSafe};
use std::sync::Arc;
pub use crate::durability::Durability;
pub use crate::intern_id::InternId;
pub use crate::interned::InternKey;
pub use crate::runtime::Runtime;
pub use crate::runtime::RuntimeId;
pub use crate::storage::Storage;
/// The base trait which your "query context" must implement. Gives
/// access to the salsa runtime, which you must embed into your query
/// context (along with whatever other state you may require).
pub trait Database: plumbing::DatabaseOps {
/// This function is invoked at key points in the salsa
/// runtime. It permits the database to be customized and to
/// inject logging or other custom behavior.
fn salsa_event(&self, event_fn: Event) {
#![allow(unused_variables)]
}
/// Starts unwinding the stack if the current revision is cancelled.
///
/// This method can be called by query implementations that perform
/// potentially expensive computations, in order to speed up propagation of
/// cancellation.
///
/// Cancellation will automatically be triggered by salsa on any query
/// invocation.
///
/// This method should not be overridden by `Database` implementors. A
/// `salsa_event` is emitted when this method is called, so that should be
/// used instead.
#[inline]
fn unwind_if_cancelled(&self) {
let runtime = self.salsa_runtime();
self.salsa_event(Event {
runtime_id: runtime.id(),
kind: EventKind::WillCheckCancellation,
});
let current_revision = runtime.current_revision();
let pending_revision = runtime.pending_revision();
log::debug!(
"unwind_if_cancelled: current_revision={:?}, pending_revision={:?}",
current_revision,
pending_revision
);
if pending_revision > current_revision {
runtime.unwind_cancelled();
}
}
/// Gives access to the underlying salsa runtime.
///
/// This method should not be overridden by `Database` implementors.
fn salsa_runtime(&self) -> &Runtime {
self.ops_salsa_runtime()
}
/// Gives access to the underlying salsa runtime.
///
/// This method should not be overridden by `Database` implementors.
fn salsa_runtime_mut(&mut self) -> &mut Runtime {
self.ops_salsa_runtime_mut()
}
}
/// The `Event` struct identifies various notable things that can
/// occur during salsa execution. Instances of this struct are given
/// to `salsa_event`.
pub struct Event {
/// The id of the snapshot that triggered the event. Usually
/// 1-to-1 with a thread, as well.
pub runtime_id: RuntimeId,
/// What sort of event was it.
pub kind: EventKind,
}
impl Event {
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<'me, D: ?Sized>(&'me self, db: &'me D) -> impl std::fmt::Debug + 'me
where
D: plumbing::DatabaseOps,
{
EventDebug { event: self, db }
}
}
impl fmt::Debug for Event {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Event")
.field("runtime_id", &self.runtime_id)
.field("kind", &self.kind)
.finish()
}
}
struct EventDebug<'me, D: ?Sized>
where
D: plumbing::DatabaseOps,
{
event: &'me Event,
db: &'me D,
}
impl<'me, D: ?Sized> fmt::Debug for EventDebug<'me, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Event")
.field("runtime_id", &self.event.runtime_id)
.field("kind", &self.event.kind.debug(self.db))
.finish()
}
}
/// An enum identifying the various kinds of events that can occur.
pub enum EventKind {
/// Occurs when we found that all inputs to a memoized value are
/// up-to-date and hence the value can be re-used without
/// executing the closure.
///
/// Executes before the "re-used" value is returned.
DidValidateMemoizedValue {
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that another thread (with id `other_runtime_id`) is processing the
/// given query (`database_key`), so we will block until they
/// finish.
///
/// Executes after we have registered with the other thread but
/// before they have answered us.
///
/// (NB: you can find the `id` of the current thread via the
/// `salsa_runtime`)
WillBlockOn {
/// The id of the runtime we will block on.
other_runtime_id: RuntimeId,
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that the function for this query will be executed.
/// This is either because it has never executed before or because
/// its inputs may be out of date.
WillExecute {
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that `unwind_if_cancelled` was called and salsa will check if
/// the current revision has been cancelled.
WillCheckCancellation,
}
impl EventKind {
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<'me, D: ?Sized>(&'me self, db: &'me D) -> impl std::fmt::Debug + 'me
where
D: plumbing::DatabaseOps,
{
EventKindDebug { kind: self, db }
}
}
impl fmt::Debug for EventKind {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
EventKind::DidValidateMemoizedValue { database_key } => fmt
.debug_struct("DidValidateMemoizedValue")
.field("database_key", database_key)
.finish(),
EventKind::WillBlockOn {
other_runtime_id,
database_key,
} => fmt
.debug_struct("WillBlockOn")
.field("other_runtime_id", other_runtime_id)
.field("database_key", database_key)
.finish(),
EventKind::WillExecute { database_key } => fmt
.debug_struct("WillExecute")
.field("database_key", database_key)
.finish(),
EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(),
}
}
}
struct EventKindDebug<'me, D: ?Sized>
where
D: plumbing::DatabaseOps,
{
kind: &'me EventKind,
db: &'me D,
}
impl<'me, D: ?Sized> fmt::Debug for EventKindDebug<'me, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
EventKind::DidValidateMemoizedValue { database_key } => fmt
.debug_struct("DidValidateMemoizedValue")
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillBlockOn {
other_runtime_id,
database_key,
} => fmt
.debug_struct("WillBlockOn")
.field("other_runtime_id", &other_runtime_id)
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillExecute { database_key } => fmt
.debug_struct("WillExecute")
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(),
}
}
}
/// Indicates a database that also supports parallel query
/// evaluation. All of Salsa's base query support is capable of
/// parallel execution, but for it to work, your query key/value types
/// must also be `Send`, as must any additional data in your database.
pub trait ParallelDatabase: Database + Send {
/// Creates a second handle to the database that holds the
/// database fixed at a particular revision. So long as this
/// "frozen" handle exists, any attempt to [`set`] an input will
/// block.
///
/// [`set`]: struct.QueryTable.html#method.set
///
/// This is the method you are meant to use most of the time in a
/// parallel setting where modifications may arise asynchronously
/// (e.g., a language server). In this context, it is common to
/// wish to "fork off" a snapshot of the database performing some
/// series of queries in parallel and arranging the results. Using
/// this method for that purpose ensures that those queries will
/// see a consistent view of the database (it is also advisable
/// for those queries to use the [`Runtime::unwind_if_cancelled`]
/// method to check for cancellation).
///
/// # Panics
///
/// It is not permitted to create a snapshot from inside of a
/// query. Attepting to do so will panic.
///
/// # Deadlock warning
///
/// The intended pattern for snapshots is that, once created, they
/// are sent to another thread and used from there. As such, the
/// `snapshot` acquires a "read lock" on the database --
/// therefore, so long as the `snapshot` is not dropped, any
/// attempt to `set` a value in the database will block. If the
/// `snapshot` is owned by the same thread that is attempting to
/// `set`, this will cause a problem.
///
/// # How to implement this
///
/// Typically, this method will create a second copy of your
/// database type (`MyDatabaseType`, in the example below),
/// cloning over each of the fields from `self` into this new
/// copy. For the field that stores the salsa runtime, you should
/// use [the `Runtime::snapshot` method][rfm] to create a snapshot of the
/// runtime. Finally, package up the result using `Snapshot::new`,
/// which is a simple wrapper type that only gives `&self` access
/// to the database within (thus preventing the use of methods
/// that may mutate the inputs):
///
/// [rfm]: struct.Runtime.html#method.snapshot
///
/// ```rust,ignore
/// impl ParallelDatabase for MyDatabaseType {
/// fn snapshot(&self) -> Snapshot<Self> {
/// Snapshot::new(
/// MyDatabaseType {
/// runtime: self.runtime.snapshot(self),
/// other_field: self.other_field.clone(),
/// }
/// )
/// }
/// }
/// ```
fn snapshot(&self) -> Snapshot<Self>;
}
/// Simple wrapper struct that takes ownership of a database `DB` and
/// only gives `&self` access to it. See [the `snapshot` method][fm]
/// for more details.
///
/// [fm]: trait.ParallelDatabase.html#method.snapshot
#[derive(Debug)]
pub struct Snapshot<DB: ?Sized>
where
DB: ParallelDatabase,
{
db: DB,
}
impl<DB> Snapshot<DB>
where
DB: ParallelDatabase,
{
/// Creates a `Snapshot` that wraps the given database handle
/// `db`. From this point forward, only shared references to `db`
/// will be possible.
pub fn new(db: DB) -> Self {
Snapshot { db }
}
}
impl<DB> std::ops::Deref for Snapshot<DB>
where
DB: ParallelDatabase,
{
type Target = DB;
fn deref(&self) -> &DB {
&self.db
}
}
/// An integer that uniquely identifies a particular query instance within the
/// database. Used to track dependencies between queries. Fully ordered and
/// equatable but those orderings are arbitrary, and meant to be used only for
/// inserting into maps and the like.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct DatabaseKeyIndex {
group_index: u16,
query_index: u16,
key_index: u32,
}
impl DatabaseKeyIndex {
/// Returns the index of the query group containing this key.
#[inline]
pub fn group_index(self) -> u16 {
self.group_index
}
/// Returns the index of the query within its query group.
#[inline]
pub fn query_index(self) -> u16 {
self.query_index
}
/// Returns the index of this particular query key within the query.
#[inline]
pub fn key_index(self) -> u32 {
self.key_index
}
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<D: ?Sized>(self, db: &D) -> impl std::fmt::Debug + '_
where
D: plumbing::DatabaseOps,
{
DatabaseKeyIndexDebug { index: self, db }
}
}
/// Helper type for `DatabaseKeyIndex::debug`
struct DatabaseKeyIndexDebug<'me, D: ?Sized>
where
D: plumbing::DatabaseOps,
{
index: DatabaseKeyIndex,
db: &'me D,
}
impl<D: ?Sized> std::fmt::Debug for DatabaseKeyIndexDebug<'_, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.db.fmt_index(self.index, fmt)
}
}
/// Trait implements by all of the "special types" associated with
/// each of your queries.
///
/// Base trait of `Query` that has a lifetime parameter to allow the `DynDb` to be non-'static.
pub trait QueryDb<'d>: Sized {
/// Dyn version of the associated trait for this query group.
type DynDb: ?Sized + Database + HasQueryGroup<Self::Group> + 'd;
/// Associate query group struct.
type Group: plumbing::QueryGroup<GroupStorage = Self::GroupStorage>;
/// Generated struct that contains storage for all queries in a group.
type GroupStorage;
}
/// Trait implements by all of the "special types" associated with
/// each of your queries.
pub trait Query: Debug + Default + Sized + for<'d> QueryDb<'d> {
/// Type that you you give as a parameter -- for queries with zero
/// or more than one input, this will be a tuple.
type Key: Clone + Debug + Hash + Eq;
/// What value does the query return?
type Value: Clone + Debug;
/// Internal struct storing the values for the query.
// type Storage: plumbing::QueryStorageOps<Self>;
type Storage;
/// A unique index identifying this query within the group.
const QUERY_INDEX: u16;
/// Name of the query method (e.g., `foo`)
const QUERY_NAME: &'static str;
/// Exact storage for this query from the storage for its group.
fn query_storage<'a>(
group_storage: &'a <Self as QueryDb<'_>>::GroupStorage,
) -> &'a Arc<Self::Storage>;
/// Exact storage for this query from the storage for its group.
fn query_storage_mut<'a>(
group_storage: &'a <Self as QueryDb<'_>>::GroupStorage,
) -> &'a Arc<Self::Storage>;
}
/// Return value from [the `query` method] on `Database`.
/// Gives access to various less common operations on queries.
///
/// [the `query` method]: trait.Database.html#method.query
pub struct QueryTable<'me, Q>
where
Q: Query,
{
db: &'me <Q as QueryDb<'me>>::DynDb,
storage: &'me Q::Storage,
}
impl<'me, Q> QueryTable<'me, Q>
where
Q: Query,
Q::Storage: QueryStorageOps<Q>,
{
/// Constructs a new `QueryTable`.
pub fn new(db: &'me <Q as QueryDb<'me>>::DynDb, storage: &'me Q::Storage) -> Self {
Self { db, storage }
}
/// Execute the query on a given input. Usually it's easier to
/// invoke the trait method directly. Note that for variadic
/// queries (those with no inputs, or those with more than one
/// input) the key will be a tuple.
pub fn get(&self, key: Q::Key) -> Q::Value {
self.storage.fetch(self.db, &key)
}
/// Completely clears the storage for this query.
///
/// This method breaks internal invariants of salsa, so any further queries
/// might return nonsense results. It is useful only in very specific
/// circumstances -- for example, when one wants to observe which values
/// dropped together with the table
pub fn purge(&self)
where
Q::Storage: plumbing::QueryStorageMassOps,
{
self.storage.purge();
}
}
/// Return value from [the `query_mut` method] on `Database`.
/// Gives access to the `set` method, notably, that is used to
/// set the value of an input query.
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub struct QueryTableMut<'me, Q>
where
Q: Query + 'me,
{
runtime: &'me mut Runtime,
storage: &'me Q::Storage,
}
impl<'me, Q> QueryTableMut<'me, Q>
where
Q: Query,
{
/// Constructs a new `QueryTableMut`.
pub fn new(runtime: &'me mut Runtime, storage: &'me Q::Storage) -> Self {
Self { runtime, storage }
}
/// Assign a value to an "input query". Must be used outside of
/// an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn | (&mut self, key: Q::Key, value: Q::Value)
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.set_with_durability(key, value, Durability::LOW);
}
/// Assign a value to an "input query", with the additional
/// promise that this value will **never change**. Must be used
/// outside of an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn set_with_durability(&mut self, key: Q::Key, value: Q::Value, durability: Durability)
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.storage.set(self.runtime, &key, value, durability);
}
/// Removes a value from an "input query". Must be used outside of
/// an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// # Panics
/// Panics if the value was not previously set by `set` or
/// `set_with_durability`.
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn remove(&mut self, key: Q::Key) -> Q::Value
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.storage.remove(self.runtime, &key)
}
/// Sets the size of LRU cache of values for this query table.
///
/// That is, at most `cap` values will be preset in the table at the same
/// time. This helps with keeping maximum memory usage under control, at the
/// cost of potential extra recalculations of evicted values.
///
/// If `cap` is zero, all values are preserved, this is the default.
pub fn set_lru_capacity(&self, cap: usize)
where
Q::Storage: plumbing::LruQueryStorageOps,
{
self.storage.set_lru_capacity(cap);
}
/// Marks the computed value as outdated.
///
/// This causes salsa to re-execute the query function on the next access to
/// the query, even if all dependencies are up to date.
///
/// This is most commonly used as part of the [on-demand input
/// pattern](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html).
pub fn invalidate(&mut self, key: &Q::Key)
where
Q::Storage: plumbing::DerivedQueryStorageOps<Q>,
{
self.storage.invalidate(self.runtime, key)
}
}
/// A panic payload indicating that execution of a salsa query was cancelled.
///
/// This can occur for a few reasons:
/// *
/// *
/// *
#[derive(Debug)]
#[non_exhaustive]
pub enum Cancelled {
/// The query was operating on revision R, but there is a pending write to move to revision R+1.
#[non_exhaustive]
PendingWrite,
/// The query was blocked on another thread, and that thread panicked.
#[non_exhaustive]
PropagatedPanic,
}
impl Cancelled {
fn throw(self) -> ! {
// We use resume and not panic here to avoid running the panic
// hook (that is, to avoid collecting and printing backtrace).
std::panic::resume_unwind(Box::new(self));
}
/// Runs `f`, and catches any salsa cancellation.
pub fn catch<F, T>(f: F) -> Result<T, Cancelled>
where
F: FnOnce() -> T + UnwindSafe,
{
match panic::catch_unwind(f) {
Ok(t) => Ok(t),
Err(payload) => match payload.downcast() {
Ok(cancelled) => Err(*cancelled),
Err(payload) => panic::resume_unwind(payload),
},
}
}
}
impl std::fmt::Display for Cancelled {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let why = match self {
Cancelled::PendingWrite => "pending write",
Cancelled::PropagatedPanic => "propagated panic",
};
f.write_str("cancelled because of ")?;
f.write_str(why)
}
}
impl std::error::Error for Cancelled {}
/// Captures the participants of a cycle that occurred when executing a query.
///
/// This type is meant to be used to help give meaningful error messages to the
/// user or to help salsa developers figure out why their program is resulting
/// in a computation cycle.
///
/// It is used in a few ways:
///
/// * During [cycle recovery](https://https://salsa-rs.github.io/salsa/cycles/fallback.html),
/// where it is given to the fallback function.
/// * As the panic value when an unexpected cycle (i.e., a cycle where one or more participants
/// lacks cycle recovery information) occurs.
///
/// You can read more about cycle handling in
/// the [salsa book](https://https://salsa-rs.github.io/salsa/cycles.html).
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Cycle {
participants: plumbing::CycleParticipants,
}
impl Cycle {
pub(crate) fn new(participants: plumbing::CycleParticipants) -> Self {
Self { participants }
}
/// True if two `Cycle` values represent the same cycle.
pub(crate) fn is(&self, cycle: &Cycle) -> bool {
Arc::ptr_eq(&self.participants, &cycle.participants)
}
pub(crate) fn throw(self) -> ! {
log::debug!("throwing cycle {:?}", self);
std::panic::resume_unwind(Box::new(self))
}
pub(crate) fn catch<T>(execute: impl FnOnce() -> T) -> Result<T, Cycle> {
match std::panic::catch_unwind(AssertUnwindSafe(execute)) {
Ok(v) => Ok(v),
Err(err) => match err.downcast::<Cycle>() {
Ok(cycle) => Err(*cycle),
Err(other) => std::panic::resume_unwind(other),
},
}
}
/// Iterate over the [`DatabaseKeyIndex`] for each query participating
/// in the cycle. The start point of this iteration within the cycle
/// is arbitrary but deterministic, but the ordering is otherwise determined
/// by the execution.
pub fn participant_keys(&self) -> impl Iterator<Item = DatabaseKeyIndex> + '_ {
self.participants.iter().copied()
}
/// Returns a vector with the debug information for
/// all the participants in the cycle.
pub fn all_participants<DB: ?Sized + Database>(&self, db: &DB) -> Vec<String> {
self.participant_keys()
.map(|d| format!("{:?}", d.debug(db)))
.collect()
}
/// Returns a vector with the debug information for
/// those participants in the cycle that lacked recovery
/// information.
pub fn unexpected_participants<DB: ?Sized + Database>(&self, db: &DB) -> Vec<String> {
self.participant_keys()
.filter(|&d| db.cycle_recovery_strategy(d) == CycleRecoveryStrategy::Panic)
.map(|d| format!("{:?}", d.debug(db)))
.collect()
}
/// Returns a "debug" view onto this strict that can be used to print out information.
pub fn debug<'me, DB: ?Sized + Database>(&'me self, db: &'me DB) -> impl std::fmt::Debug + 'me {
struct UnexpectedCycleDebug<'me> {
c: &'me Cycle,
db: &'me dyn Database,
}
impl<'me> std::fmt::Debug for UnexpectedCycleDebug<'me> {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.debug_struct("UnexpectedCycle")
.field("all_participants", &self.c.all_participants(self.db))
.field(
"unexpected_participants",
&self.c.unexpected_participants(self.db),
)
.finish()
}
}
UnexpectedCycleDebug {
c: self,
db: db.ops_database(),
}
}
}
// Re-export the procedural macros.
#[allow(unused_imports)]
#[macro_use]
extern crate salsa_macros;
use plumbing::HasQueryGroup;
pub use salsa_macros::*;
| set | identifier_name |
lib.rs | #![allow(clippy::type_complexity)]
#![allow(clippy::question_mark)]
#![warn(rust_2018_idioms)]
#![warn(missing_docs)]
//! The salsa crate is a crate for incremental recomputation. It
//! permits you to define a "database" of queries with both inputs and
//! values derived from those inputs; as you set the inputs, you can
//! re-execute the derived queries and it will try to re-use results
//! from previous invocations as appropriate.
mod derived;
mod doctest;
mod durability;
mod hash;
mod input;
mod intern_id;
mod interned;
mod revision;
mod runtime;
mod storage;
pub mod debug;
/// Items in this module are public for implementation reasons,
/// and are exempt from the SemVer guarantees.
#[doc(hidden)]
pub mod plumbing;
use crate::plumbing::CycleRecoveryStrategy;
use crate::plumbing::DerivedQueryStorageOps;
use crate::plumbing::InputQueryStorageOps;
use crate::plumbing::LruQueryStorageOps;
use crate::plumbing::QueryStorageMassOps;
use crate::plumbing::QueryStorageOps;
pub use crate::revision::Revision;
use std::fmt::{self, Debug};
use std::hash::Hash;
use std::panic::AssertUnwindSafe;
use std::panic::{self, UnwindSafe};
use std::sync::Arc;
pub use crate::durability::Durability;
pub use crate::intern_id::InternId;
pub use crate::interned::InternKey;
pub use crate::runtime::Runtime;
pub use crate::runtime::RuntimeId;
pub use crate::storage::Storage;
/// The base trait which your "query context" must implement. Gives
/// access to the salsa runtime, which you must embed into your query
/// context (along with whatever other state you may require).
pub trait Database: plumbing::DatabaseOps {
/// This function is invoked at key points in the salsa
/// runtime. It permits the database to be customized and to
/// inject logging or other custom behavior.
fn salsa_event(&self, event_fn: Event) {
#![allow(unused_variables)]
}
/// Starts unwinding the stack if the current revision is cancelled.
///
/// This method can be called by query implementations that perform
/// potentially expensive computations, in order to speed up propagation of
/// cancellation.
///
/// Cancellation will automatically be triggered by salsa on any query
/// invocation.
///
/// This method should not be overridden by `Database` implementors. A
/// `salsa_event` is emitted when this method is called, so that should be
/// used instead.
#[inline]
fn unwind_if_cancelled(&self) {
let runtime = self.salsa_runtime();
self.salsa_event(Event {
runtime_id: runtime.id(),
kind: EventKind::WillCheckCancellation,
});
let current_revision = runtime.current_revision();
let pending_revision = runtime.pending_revision();
log::debug!(
"unwind_if_cancelled: current_revision={:?}, pending_revision={:?}",
current_revision,
pending_revision
); |
/// Gives access to the underlying salsa runtime.
///
/// This method should not be overridden by `Database` implementors.
fn salsa_runtime(&self) -> &Runtime {
self.ops_salsa_runtime()
}
/// Gives access to the underlying salsa runtime.
///
/// This method should not be overridden by `Database` implementors.
fn salsa_runtime_mut(&mut self) -> &mut Runtime {
self.ops_salsa_runtime_mut()
}
}
/// The `Event` struct identifies various notable things that can
/// occur during salsa execution. Instances of this struct are given
/// to `salsa_event`.
pub struct Event {
/// The id of the snapshot that triggered the event. Usually
/// 1-to-1 with a thread, as well.
pub runtime_id: RuntimeId,
/// What sort of event was it.
pub kind: EventKind,
}
impl Event {
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<'me, D: ?Sized>(&'me self, db: &'me D) -> impl std::fmt::Debug + 'me
where
D: plumbing::DatabaseOps,
{
EventDebug { event: self, db }
}
}
impl fmt::Debug for Event {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Event")
.field("runtime_id", &self.runtime_id)
.field("kind", &self.kind)
.finish()
}
}
struct EventDebug<'me, D: ?Sized>
where
D: plumbing::DatabaseOps,
{
event: &'me Event,
db: &'me D,
}
impl<'me, D: ?Sized> fmt::Debug for EventDebug<'me, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt.debug_struct("Event")
.field("runtime_id", &self.event.runtime_id)
.field("kind", &self.event.kind.debug(self.db))
.finish()
}
}
/// An enum identifying the various kinds of events that can occur.
pub enum EventKind {
/// Occurs when we found that all inputs to a memoized value are
/// up-to-date and hence the value can be re-used without
/// executing the closure.
///
/// Executes before the "re-used" value is returned.
DidValidateMemoizedValue {
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that another thread (with id `other_runtime_id`) is processing the
/// given query (`database_key`), so we will block until they
/// finish.
///
/// Executes after we have registered with the other thread but
/// before they have answered us.
///
/// (NB: you can find the `id` of the current thread via the
/// `salsa_runtime`)
WillBlockOn {
/// The id of the runtime we will block on.
other_runtime_id: RuntimeId,
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that the function for this query will be executed.
/// This is either because it has never executed before or because
/// its inputs may be out of date.
WillExecute {
/// The database-key for the affected value. Implements `Debug`.
database_key: DatabaseKeyIndex,
},
/// Indicates that `unwind_if_cancelled` was called and salsa will check if
/// the current revision has been cancelled.
WillCheckCancellation,
}
impl EventKind {
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<'me, D: ?Sized>(&'me self, db: &'me D) -> impl std::fmt::Debug + 'me
where
D: plumbing::DatabaseOps,
{
EventKindDebug { kind: self, db }
}
}
impl fmt::Debug for EventKind {
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
EventKind::DidValidateMemoizedValue { database_key } => fmt
.debug_struct("DidValidateMemoizedValue")
.field("database_key", database_key)
.finish(),
EventKind::WillBlockOn {
other_runtime_id,
database_key,
} => fmt
.debug_struct("WillBlockOn")
.field("other_runtime_id", other_runtime_id)
.field("database_key", database_key)
.finish(),
EventKind::WillExecute { database_key } => fmt
.debug_struct("WillExecute")
.field("database_key", database_key)
.finish(),
EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(),
}
}
}
struct EventKindDebug<'me, D: ?Sized>
where
D: plumbing::DatabaseOps,
{
kind: &'me EventKind,
db: &'me D,
}
impl<'me, D: ?Sized> fmt::Debug for EventKindDebug<'me, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result {
match self.kind {
EventKind::DidValidateMemoizedValue { database_key } => fmt
.debug_struct("DidValidateMemoizedValue")
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillBlockOn {
other_runtime_id,
database_key,
} => fmt
.debug_struct("WillBlockOn")
.field("other_runtime_id", &other_runtime_id)
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillExecute { database_key } => fmt
.debug_struct("WillExecute")
.field("database_key", &database_key.debug(self.db))
.finish(),
EventKind::WillCheckCancellation => fmt.debug_struct("WillCheckCancellation").finish(),
}
}
}
/// Indicates a database that also supports parallel query
/// evaluation. All of Salsa's base query support is capable of
/// parallel execution, but for it to work, your query key/value types
/// must also be `Send`, as must any additional data in your database.
pub trait ParallelDatabase: Database + Send {
/// Creates a second handle to the database that holds the
/// database fixed at a particular revision. So long as this
/// "frozen" handle exists, any attempt to [`set`] an input will
/// block.
///
/// [`set`]: struct.QueryTable.html#method.set
///
/// This is the method you are meant to use most of the time in a
/// parallel setting where modifications may arise asynchronously
/// (e.g., a language server). In this context, it is common to
/// wish to "fork off" a snapshot of the database performing some
/// series of queries in parallel and arranging the results. Using
/// this method for that purpose ensures that those queries will
/// see a consistent view of the database (it is also advisable
/// for those queries to use the [`Runtime::unwind_if_cancelled`]
/// method to check for cancellation).
///
/// # Panics
///
/// It is not permitted to create a snapshot from inside of a
/// query. Attepting to do so will panic.
///
/// # Deadlock warning
///
/// The intended pattern for snapshots is that, once created, they
/// are sent to another thread and used from there. As such, the
/// `snapshot` acquires a "read lock" on the database --
/// therefore, so long as the `snapshot` is not dropped, any
/// attempt to `set` a value in the database will block. If the
/// `snapshot` is owned by the same thread that is attempting to
/// `set`, this will cause a problem.
///
/// # How to implement this
///
/// Typically, this method will create a second copy of your
/// database type (`MyDatabaseType`, in the example below),
/// cloning over each of the fields from `self` into this new
/// copy. For the field that stores the salsa runtime, you should
/// use [the `Runtime::snapshot` method][rfm] to create a snapshot of the
/// runtime. Finally, package up the result using `Snapshot::new`,
/// which is a simple wrapper type that only gives `&self` access
/// to the database within (thus preventing the use of methods
/// that may mutate the inputs):
///
/// [rfm]: struct.Runtime.html#method.snapshot
///
/// ```rust,ignore
/// impl ParallelDatabase for MyDatabaseType {
/// fn snapshot(&self) -> Snapshot<Self> {
/// Snapshot::new(
/// MyDatabaseType {
/// runtime: self.runtime.snapshot(self),
/// other_field: self.other_field.clone(),
/// }
/// )
/// }
/// }
/// ```
fn snapshot(&self) -> Snapshot<Self>;
}
/// Simple wrapper struct that takes ownership of a database `DB` and
/// only gives `&self` access to it. See [the `snapshot` method][fm]
/// for more details.
///
/// [fm]: trait.ParallelDatabase.html#method.snapshot
#[derive(Debug)]
pub struct Snapshot<DB: ?Sized>
where
DB: ParallelDatabase,
{
db: DB,
}
impl<DB> Snapshot<DB>
where
DB: ParallelDatabase,
{
/// Creates a `Snapshot` that wraps the given database handle
/// `db`. From this point forward, only shared references to `db`
/// will be possible.
pub fn new(db: DB) -> Self {
Snapshot { db }
}
}
impl<DB> std::ops::Deref for Snapshot<DB>
where
DB: ParallelDatabase,
{
type Target = DB;
fn deref(&self) -> &DB {
&self.db
}
}
/// An integer that uniquely identifies a particular query instance within the
/// database. Used to track dependencies between queries. Fully ordered and
/// equatable but those orderings are arbitrary, and meant to be used only for
/// inserting into maps and the like.
#[derive(Copy, Clone, PartialEq, Eq, PartialOrd, Ord, Hash, Debug)]
pub struct DatabaseKeyIndex {
group_index: u16,
query_index: u16,
key_index: u32,
}
impl DatabaseKeyIndex {
/// Returns the index of the query group containing this key.
#[inline]
pub fn group_index(self) -> u16 {
self.group_index
}
/// Returns the index of the query within its query group.
#[inline]
pub fn query_index(self) -> u16 {
self.query_index
}
/// Returns the index of this particular query key within the query.
#[inline]
pub fn key_index(self) -> u32 {
self.key_index
}
/// Returns a type that gives a user-readable debug output.
/// Use like `println!("{:?}", index.debug(db))`.
pub fn debug<D: ?Sized>(self, db: &D) -> impl std::fmt::Debug + '_
where
D: plumbing::DatabaseOps,
{
DatabaseKeyIndexDebug { index: self, db }
}
}
/// Helper type for `DatabaseKeyIndex::debug`
struct DatabaseKeyIndexDebug<'me, D: ?Sized>
where
D: plumbing::DatabaseOps,
{
index: DatabaseKeyIndex,
db: &'me D,
}
impl<D: ?Sized> std::fmt::Debug for DatabaseKeyIndexDebug<'_, D>
where
D: plumbing::DatabaseOps,
{
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.db.fmt_index(self.index, fmt)
}
}
/// Trait implements by all of the "special types" associated with
/// each of your queries.
///
/// Base trait of `Query` that has a lifetime parameter to allow the `DynDb` to be non-'static.
pub trait QueryDb<'d>: Sized {
/// Dyn version of the associated trait for this query group.
type DynDb: ?Sized + Database + HasQueryGroup<Self::Group> + 'd;
/// Associate query group struct.
type Group: plumbing::QueryGroup<GroupStorage = Self::GroupStorage>;
/// Generated struct that contains storage for all queries in a group.
type GroupStorage;
}
/// Trait implements by all of the "special types" associated with
/// each of your queries.
pub trait Query: Debug + Default + Sized + for<'d> QueryDb<'d> {
/// Type that you you give as a parameter -- for queries with zero
/// or more than one input, this will be a tuple.
type Key: Clone + Debug + Hash + Eq;
/// What value does the query return?
type Value: Clone + Debug;
/// Internal struct storing the values for the query.
// type Storage: plumbing::QueryStorageOps<Self>;
type Storage;
/// A unique index identifying this query within the group.
const QUERY_INDEX: u16;
/// Name of the query method (e.g., `foo`)
const QUERY_NAME: &'static str;
/// Exact storage for this query from the storage for its group.
fn query_storage<'a>(
group_storage: &'a <Self as QueryDb<'_>>::GroupStorage,
) -> &'a Arc<Self::Storage>;
/// Exact storage for this query from the storage for its group.
fn query_storage_mut<'a>(
group_storage: &'a <Self as QueryDb<'_>>::GroupStorage,
) -> &'a Arc<Self::Storage>;
}
/// Return value from [the `query` method] on `Database`.
/// Gives access to various less common operations on queries.
///
/// [the `query` method]: trait.Database.html#method.query
pub struct QueryTable<'me, Q>
where
Q: Query,
{
db: &'me <Q as QueryDb<'me>>::DynDb,
storage: &'me Q::Storage,
}
impl<'me, Q> QueryTable<'me, Q>
where
Q: Query,
Q::Storage: QueryStorageOps<Q>,
{
/// Constructs a new `QueryTable`.
pub fn new(db: &'me <Q as QueryDb<'me>>::DynDb, storage: &'me Q::Storage) -> Self {
Self { db, storage }
}
/// Execute the query on a given input. Usually it's easier to
/// invoke the trait method directly. Note that for variadic
/// queries (those with no inputs, or those with more than one
/// input) the key will be a tuple.
pub fn get(&self, key: Q::Key) -> Q::Value {
self.storage.fetch(self.db, &key)
}
/// Completely clears the storage for this query.
///
/// This method breaks internal invariants of salsa, so any further queries
/// might return nonsense results. It is useful only in very specific
/// circumstances -- for example, when one wants to observe which values
/// dropped together with the table
pub fn purge(&self)
where
Q::Storage: plumbing::QueryStorageMassOps,
{
self.storage.purge();
}
}
/// Return value from [the `query_mut` method] on `Database`.
/// Gives access to the `set` method, notably, that is used to
/// set the value of an input query.
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub struct QueryTableMut<'me, Q>
where
Q: Query + 'me,
{
runtime: &'me mut Runtime,
storage: &'me Q::Storage,
}
impl<'me, Q> QueryTableMut<'me, Q>
where
Q: Query,
{
/// Constructs a new `QueryTableMut`.
pub fn new(runtime: &'me mut Runtime, storage: &'me Q::Storage) -> Self {
Self { runtime, storage }
}
/// Assign a value to an "input query". Must be used outside of
/// an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn set(&mut self, key: Q::Key, value: Q::Value)
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.set_with_durability(key, value, Durability::LOW);
}
/// Assign a value to an "input query", with the additional
/// promise that this value will **never change**. Must be used
/// outside of an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn set_with_durability(&mut self, key: Q::Key, value: Q::Value, durability: Durability)
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.storage.set(self.runtime, &key, value, durability);
}
/// Removes a value from an "input query". Must be used outside of
/// an active query computation.
///
/// If you are using `snapshot`, see the notes on blocking
/// and cancellation on [the `query_mut` method].
///
/// # Panics
/// Panics if the value was not previously set by `set` or
/// `set_with_durability`.
///
/// [the `query_mut` method]: trait.Database.html#method.query_mut
pub fn remove(&mut self, key: Q::Key) -> Q::Value
where
Q::Storage: plumbing::InputQueryStorageOps<Q>,
{
self.storage.remove(self.runtime, &key)
}
/// Sets the size of LRU cache of values for this query table.
///
/// That is, at most `cap` values will be preset in the table at the same
/// time. This helps with keeping maximum memory usage under control, at the
/// cost of potential extra recalculations of evicted values.
///
/// If `cap` is zero, all values are preserved, this is the default.
pub fn set_lru_capacity(&self, cap: usize)
where
Q::Storage: plumbing::LruQueryStorageOps,
{
self.storage.set_lru_capacity(cap);
}
/// Marks the computed value as outdated.
///
/// This causes salsa to re-execute the query function on the next access to
/// the query, even if all dependencies are up to date.
///
/// This is most commonly used as part of the [on-demand input
/// pattern](https://salsa-rs.github.io/salsa/common_patterns/on_demand_inputs.html).
pub fn invalidate(&mut self, key: &Q::Key)
where
Q::Storage: plumbing::DerivedQueryStorageOps<Q>,
{
self.storage.invalidate(self.runtime, key)
}
}
/// A panic payload indicating that execution of a salsa query was cancelled.
///
/// This can occur for a few reasons:
/// *
/// *
/// *
#[derive(Debug)]
#[non_exhaustive]
pub enum Cancelled {
/// The query was operating on revision R, but there is a pending write to move to revision R+1.
#[non_exhaustive]
PendingWrite,
/// The query was blocked on another thread, and that thread panicked.
#[non_exhaustive]
PropagatedPanic,
}
impl Cancelled {
fn throw(self) -> ! {
// We use resume and not panic here to avoid running the panic
// hook (that is, to avoid collecting and printing backtrace).
std::panic::resume_unwind(Box::new(self));
}
/// Runs `f`, and catches any salsa cancellation.
pub fn catch<F, T>(f: F) -> Result<T, Cancelled>
where
F: FnOnce() -> T + UnwindSafe,
{
match panic::catch_unwind(f) {
Ok(t) => Ok(t),
Err(payload) => match payload.downcast() {
Ok(cancelled) => Err(*cancelled),
Err(payload) => panic::resume_unwind(payload),
},
}
}
}
impl std::fmt::Display for Cancelled {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
let why = match self {
Cancelled::PendingWrite => "pending write",
Cancelled::PropagatedPanic => "propagated panic",
};
f.write_str("cancelled because of ")?;
f.write_str(why)
}
}
impl std::error::Error for Cancelled {}
/// Captures the participants of a cycle that occurred when executing a query.
///
/// This type is meant to be used to help give meaningful error messages to the
/// user or to help salsa developers figure out why their program is resulting
/// in a computation cycle.
///
/// It is used in a few ways:
///
/// * During [cycle recovery](https://https://salsa-rs.github.io/salsa/cycles/fallback.html),
/// where it is given to the fallback function.
/// * As the panic value when an unexpected cycle (i.e., a cycle where one or more participants
/// lacks cycle recovery information) occurs.
///
/// You can read more about cycle handling in
/// the [salsa book](https://https://salsa-rs.github.io/salsa/cycles.html).
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct Cycle {
participants: plumbing::CycleParticipants,
}
impl Cycle {
pub(crate) fn new(participants: plumbing::CycleParticipants) -> Self {
Self { participants }
}
/// True if two `Cycle` values represent the same cycle.
pub(crate) fn is(&self, cycle: &Cycle) -> bool {
Arc::ptr_eq(&self.participants, &cycle.participants)
}
pub(crate) fn throw(self) -> ! {
log::debug!("throwing cycle {:?}", self);
std::panic::resume_unwind(Box::new(self))
}
pub(crate) fn catch<T>(execute: impl FnOnce() -> T) -> Result<T, Cycle> {
match std::panic::catch_unwind(AssertUnwindSafe(execute)) {
Ok(v) => Ok(v),
Err(err) => match err.downcast::<Cycle>() {
Ok(cycle) => Err(*cycle),
Err(other) => std::panic::resume_unwind(other),
},
}
}
/// Iterate over the [`DatabaseKeyIndex`] for each query participating
/// in the cycle. The start point of this iteration within the cycle
/// is arbitrary but deterministic, but the ordering is otherwise determined
/// by the execution.
pub fn participant_keys(&self) -> impl Iterator<Item = DatabaseKeyIndex> + '_ {
self.participants.iter().copied()
}
/// Returns a vector with the debug information for
/// all the participants in the cycle.
pub fn all_participants<DB: ?Sized + Database>(&self, db: &DB) -> Vec<String> {
self.participant_keys()
.map(|d| format!("{:?}", d.debug(db)))
.collect()
}
/// Returns a vector with the debug information for
/// those participants in the cycle that lacked recovery
/// information.
pub fn unexpected_participants<DB: ?Sized + Database>(&self, db: &DB) -> Vec<String> {
self.participant_keys()
.filter(|&d| db.cycle_recovery_strategy(d) == CycleRecoveryStrategy::Panic)
.map(|d| format!("{:?}", d.debug(db)))
.collect()
}
/// Returns a "debug" view onto this strict that can be used to print out information.
pub fn debug<'me, DB: ?Sized + Database>(&'me self, db: &'me DB) -> impl std::fmt::Debug + 'me {
struct UnexpectedCycleDebug<'me> {
c: &'me Cycle,
db: &'me dyn Database,
}
impl<'me> std::fmt::Debug for UnexpectedCycleDebug<'me> {
fn fmt(&self, fmt: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
fmt.debug_struct("UnexpectedCycle")
.field("all_participants", &self.c.all_participants(self.db))
.field(
"unexpected_participants",
&self.c.unexpected_participants(self.db),
)
.finish()
}
}
UnexpectedCycleDebug {
c: self,
db: db.ops_database(),
}
}
}
// Re-export the procedural macros.
#[allow(unused_imports)]
#[macro_use]
extern crate salsa_macros;
use plumbing::HasQueryGroup;
pub use salsa_macros::*; | if pending_revision > current_revision {
runtime.unwind_cancelled();
}
} | random_line_split |
pwm.rs | use std::{
fmt::Display,
fs,
path::{Path, PathBuf},
str::FromStr,
time::Duration,
};
use thiserror::Error;
use tracing::{debug, instrument};
/// Everything that can go wrong.
#[derive(Error, Debug)]
pub enum PwmError {
#[error("{0:?} not found")]
ControllerNotFound(Controller),
#[error("{0:?}/{1:?} not found")]
ChannelNotFound(Controller, Channel),
#[error("{0:?} not exported")]
NotExported(Controller),
#[error("failed to {0:?}: {1}")]
Sysfs(Access, #[source] std::io::Error),
#[error("duty cycle value must not be greater than the period value")]
DutyCycleGreaterThanPeriod,
#[error("legal polarity values: 'normal', 'inversed'")]
InvalidPolarity,
#[error("{0} cannot be changed while channel is enabled")]
IllegalChangeWhileEnabled(&'static str),
#[error("expected boolean value, got {0:?}")]
NotBoolean(String),
#[error("expected a duration in nanoseconds, got {0:?}: {1}")]
NotADuration(String, #[source] std::num::ParseIntError),
}
/// Used in PwmError to format sysfs related errors.
#[derive(Debug)]
pub enum Access {
Read(PathBuf),
Write(PathBuf),
}
/// Exposes PWM functionality.
///
/// Since the Linux kernel exposes PWM controllers and their settings through
/// sysfs, PWM operations are just file reads and writes. To allow testing with
/// a real file system but outside of sysfs, the `sysfs_root` property may be
/// used to "offset" those operations to an alternative directory.
///
/// Documentation on Linux PWM sysfs:
/// <https://www.kernel.org/doc/html/latest/driver-api/pwm.html>
#[derive(Debug)]
pub struct Pwm {
sysfs_root: PathBuf,
}
/// A PWM controller (a.k.a. PWM chip) is identified by a non-negative number.
#[derive(Debug, Clone)]
pub struct Controller(pub u32);
/// PWM controllers expose channels, which are also identified by non-negative numbers.
#[derive(Debug, Clone)]
pub struct Channel(pub u32);
type Result<T> = std::result::Result<T, PwmError>;
impl Pwm {
/// Initialize PWM.
pub fn new() -> Self {
Self::with_sysfs_root(PathBuf::from("/sys/class/pwm"))
}
/// Initialize PWM with an alternative sysfs directory, for testing.
pub fn with_sysfs_root(sysfs_root: PathBuf) -> Self {
if !sysfs_root.exists() {
panic!("sysfs root does not exist: {:?}", sysfs_root);
}
Self { sysfs_root }
}
/// Returns the number of channels for the given controller.
#[instrument]
pub fn npwm(&self, controller: &Controller) -> Result<u32> {
self.controller_file(controller, "npwm")
.and_then(|path| read(&path))
.map(|s| {
s.trim()
.parse::<u32>()
.expect("npwm expected to contain the number of channels")
})
}
/// Returns whether a controller's channels are ready to be used.
#[instrument]
pub fn is_exported(&self, controller: &Controller) -> Result<bool> {
// A controller is exported if the channel subdirectories are there.
// Since a controller without any channel doesn't make sense, it's
// enough to check for the existance of the first channel's enable file.
match self.channel_dir(controller, &Channel(0)) {
Ok(_) => Ok(true),
Err(PwmError::NotExported(_)) => Ok(false),
Err(e) => Err(e),
}
}
/// Export a PWM controller, which enables access to its channels.
#[instrument]
pub fn export(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "export")
.and_then(|path| write(&path, "1"))
}
/// Unexport a PWM controller, which disables access to its channels.
#[instrument]
pub fn unexport(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "unexport")
.and_then(|path| write(&path, "1"))
}
/// Returns whether a controller's channel is enabled.
#[instrument]
pub fn is_enabled(&self, controller: &Controller, channel: &Channel) -> Result<bool> {
self.channel_file(controller, channel, "enable")
.and_then(|path| read(&path))
.and_then(parse_bool)
}
/// Enable a channel.
#[instrument]
pub fn enable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "1"))
}
/// Disable a channel.
#[instrument]
pub fn disable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "0"))
}
/// The total period of the PWM signal (read/write). Value is in nanoseconds
/// and is the sum of the active and inactive time of the PWM.
#[instrument]
pub fn set_period(
&mut self,
controller: Controller,
channel: Channel,
period: Duration,
) -> Result<()> {
let duty_cycle = self
.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "period")
.and_then(|path| write(&path, &period.as_nanos().to_string()))
}
/// The active time of the PWM signal (read/write). Value is in nanoseconds
/// and must be less than the period.
#[instrument]
pub fn set_duty_cycle(
&mut self,
controller: Controller,
channel: Channel,
duty_cycle: Duration,
) -> Result<()> {
let period = self
.channel_file(&controller, &channel, "period")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| write(&path, &duty_cycle.as_nanos().to_string()))
}
/// Changes the polarity of the PWM signal (read/write). Writes to this
/// property only work if the PWM chip supports changing the polarity. The
/// polarity can only be changed if the PWM is not enabled. Value is the
/// string “normal” or “inversed”.
#[instrument]
pub fn set_polarity(
&mut self,
controller: Controller,
channel: Channel,
polarity: Polarity,
) -> Result<()> {
// setting polarity is only allowed if channel is disabled:
if self.is_enabled(&controller, &channel)? {
return Err(PwmError::IllegalChangeWhileEnabled("polarity"));
}
self.channel_file(&controller, &channel, "polarity")
.and_then(|path| write(&path, &polarity.to_string()))
}
fn controller_dir(&self, controller: &Controller) -> Result<PathBuf> {
let path = self.sysfs_root.join(format!("pwmchip{}", controller.0));
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn controller_file(&self, controller: &Controller, fname: &str) -> Result<PathBuf> { | if path.is_file() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn channel_dir(&self, controller: &Controller, channel: &Channel) -> Result<PathBuf> {
let n_pwm = self.npwm(controller)?;
if channel.0 >= n_pwm {
return Err(PwmError::ChannelNotFound(
controller.clone(),
channel.clone(),
));
}
let path = self
.controller_dir(controller)
.map(|controller| controller.join(format!("pwm{}", channel.0)))?;
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
fn channel_file(
&self,
controller: &Controller,
channel: &Channel,
fname: &str,
) -> Result<PathBuf> {
let path = self
.channel_dir(controller, channel)
.map(|channel| channel.join(fname))?;
if path.is_file() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
}
fn read(path: &Path) -> Result<String> {
fs::read_to_string(path).map_err(|e| PwmError::Sysfs(Access::Read(path.to_owned()), e))
}
fn write(path: &Path, contents: &str) -> Result<()> {
debug!("writing to {:?}", path);
fs::write(path, contents).map_err(|e| PwmError::Sysfs(Access::Write(path.to_owned()), e))
}
fn parse_bool(s: String) -> Result<bool> {
// sysfs compatible according to http://lkml.iu.edu/hypermail/linux/kernel/1103.2/02488.html
match s.trim_end().to_lowercase().as_ref() {
"1" | "y" | "yes" | "true" => Ok(true),
"0" | "n" | "no" | "false" | "" => Ok(false),
_ => Err(PwmError::NotBoolean(s)),
}
}
fn parse_duration(s: String) -> Result<Duration> {
s.trim_end()
.parse::<u64>()
.map_err(|e| PwmError::NotADuration(s, e))
.map(Duration::from_nanos)
}
#[derive(Debug)]
pub enum Polarity {
Normal,
Inversed,
}
impl Display for Polarity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Polarity::*;
match *self {
Normal => write!(f, "normal"),
Inversed => write!(f, "inversed"),
}
}
}
impl FromStr for Polarity {
type Err = PwmError;
fn from_str(s: &str) -> Result<Self> {
use Polarity::*;
match s {
"normal" => Ok(Normal),
"inversed" => Ok(Inversed),
_ => Err(PwmError::InvalidPolarity),
}
}
}
#[cfg(test)]
mod should {
use super::*;
use temp_dir::TempDir;
#[test]
fn fail_if_controller_not_found() {
let tmp = TempDir::new().unwrap();
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
assert!(matches!(
pwm.export(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
assert!(matches!(
pwm.unexport(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
}
#[test]
fn export_and_unexport_a_controller() {
let tmp = TempDir::new().unwrap();
let chip = tmp.child("pwmchip0");
fs::create_dir(&chip).unwrap();
let export = touch(chip.join("export"));
let unexport = touch(chip.join("unexport"));
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
pwm.export(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&export).unwrap(), "1");
pwm.unexport(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&unexport).unwrap(), "1");
}
fn touch(path: PathBuf) -> PathBuf {
fs::write(&path, b"").unwrap();
path
}
} | let path = self
.sysfs_root
.join(format!("pwmchip{}/{}", controller.0, fname)); | random_line_split |
pwm.rs | use std::{
fmt::Display,
fs,
path::{Path, PathBuf},
str::FromStr,
time::Duration,
};
use thiserror::Error;
use tracing::{debug, instrument};
/// Everything that can go wrong.
#[derive(Error, Debug)]
pub enum PwmError {
#[error("{0:?} not found")]
ControllerNotFound(Controller),
#[error("{0:?}/{1:?} not found")]
ChannelNotFound(Controller, Channel),
#[error("{0:?} not exported")]
NotExported(Controller),
#[error("failed to {0:?}: {1}")]
Sysfs(Access, #[source] std::io::Error),
#[error("duty cycle value must not be greater than the period value")]
DutyCycleGreaterThanPeriod,
#[error("legal polarity values: 'normal', 'inversed'")]
InvalidPolarity,
#[error("{0} cannot be changed while channel is enabled")]
IllegalChangeWhileEnabled(&'static str),
#[error("expected boolean value, got {0:?}")]
NotBoolean(String),
#[error("expected a duration in nanoseconds, got {0:?}: {1}")]
NotADuration(String, #[source] std::num::ParseIntError),
}
/// Used in PwmError to format sysfs related errors.
#[derive(Debug)]
pub enum Access {
Read(PathBuf),
Write(PathBuf),
}
/// Exposes PWM functionality.
///
/// Since the Linux kernel exposes PWM controllers and their settings through
/// sysfs, PWM operations are just file reads and writes. To allow testing with
/// a real file system but outside of sysfs, the `sysfs_root` property may be
/// used to "offset" those operations to an alternative directory.
///
/// Documentation on Linux PWM sysfs:
/// <https://www.kernel.org/doc/html/latest/driver-api/pwm.html>
#[derive(Debug)]
pub struct Pwm {
sysfs_root: PathBuf,
}
/// A PWM controller (a.k.a. PWM chip) is identified by a non-negative number.
#[derive(Debug, Clone)]
pub struct Controller(pub u32);
/// PWM controllers expose channels, which are also identified by non-negative numbers.
#[derive(Debug, Clone)]
pub struct Channel(pub u32);
type Result<T> = std::result::Result<T, PwmError>;
impl Pwm {
/// Initialize PWM.
pub fn new() -> Self {
Self::with_sysfs_root(PathBuf::from("/sys/class/pwm"))
}
/// Initialize PWM with an alternative sysfs directory, for testing.
pub fn with_sysfs_root(sysfs_root: PathBuf) -> Self {
if !sysfs_root.exists() {
panic!("sysfs root does not exist: {:?}", sysfs_root);
}
Self { sysfs_root }
}
/// Returns the number of channels for the given controller.
#[instrument]
pub fn npwm(&self, controller: &Controller) -> Result<u32> {
self.controller_file(controller, "npwm")
.and_then(|path| read(&path))
.map(|s| {
s.trim()
.parse::<u32>()
.expect("npwm expected to contain the number of channels")
})
}
/// Returns whether a controller's channels are ready to be used.
#[instrument]
pub fn is_exported(&self, controller: &Controller) -> Result<bool> {
// A controller is exported if the channel subdirectories are there.
// Since a controller without any channel doesn't make sense, it's
// enough to check for the existance of the first channel's enable file.
match self.channel_dir(controller, &Channel(0)) {
Ok(_) => Ok(true),
Err(PwmError::NotExported(_)) => Ok(false),
Err(e) => Err(e),
}
}
/// Export a PWM controller, which enables access to its channels.
#[instrument]
pub fn export(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "export")
.and_then(|path| write(&path, "1"))
}
/// Unexport a PWM controller, which disables access to its channels.
#[instrument]
pub fn unexport(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "unexport")
.and_then(|path| write(&path, "1"))
}
/// Returns whether a controller's channel is enabled.
#[instrument]
pub fn is_enabled(&self, controller: &Controller, channel: &Channel) -> Result<bool> {
self.channel_file(controller, channel, "enable")
.and_then(|path| read(&path))
.and_then(parse_bool)
}
/// Enable a channel.
#[instrument]
pub fn enable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "1"))
}
/// Disable a channel.
#[instrument]
pub fn disable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "0"))
}
/// The total period of the PWM signal (read/write). Value is in nanoseconds
/// and is the sum of the active and inactive time of the PWM.
#[instrument]
pub fn set_period(
&mut self,
controller: Controller,
channel: Channel,
period: Duration,
) -> Result<()> {
let duty_cycle = self
.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "period")
.and_then(|path| write(&path, &period.as_nanos().to_string()))
}
/// The active time of the PWM signal (read/write). Value is in nanoseconds
/// and must be less than the period.
#[instrument]
pub fn set_duty_cycle(
&mut self,
controller: Controller,
channel: Channel,
duty_cycle: Duration,
) -> Result<()> {
let period = self
.channel_file(&controller, &channel, "period")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| write(&path, &duty_cycle.as_nanos().to_string()))
}
/// Changes the polarity of the PWM signal (read/write). Writes to this
/// property only work if the PWM chip supports changing the polarity. The
/// polarity can only be changed if the PWM is not enabled. Value is the
/// string “normal” or “inversed”.
#[instrument]
pub fn set_polarity(
&mut self,
controller: Controller,
channel: Channel,
polarity: Polarity,
) -> Result<()> {
// setting polarity is only allowed if channel is disabled:
if self.is_enabled(&controller, &channel)? {
return Err(PwmError::IllegalChangeWhileEnabled("polarity"));
}
self.channel_file(&controller, &channel, "polarity")
.and_then(|path| write(&path, &polarity.to_string()))
}
fn controller_dir(&self, controller: &Controller) -> Result<PathBuf> {
let path = self.sysfs_root.join(format!("pwmchip{}", controller.0));
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn controller_file(&self, controller: &Controller, fname: &str) -> Result<PathBuf> {
let path = self
.sysfs_root
.join(format!("pwmchip{}/{}", controller.0, fname));
if path.is_file() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn channel_dir(&self, controller: &Controller, channel: &Channel) -> Result<PathBuf> {
let n_pwm = self.npwm(controller)?;
if channel.0 >= n_pwm {
return Err(PwmError::ChannelNotFound(
controller.clone(),
channel.clone(),
));
}
let path = self
.controller_dir(controller)
.map(|controller| controller.join(format!("pwm{}", channel.0)))?;
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
fn channel_file(
&self,
controller: &Controller,
channel: &Channel,
fname: &str,
) -> Result<PathBuf> {
let path = self
.channel_dir(controller, channel)
.map(|channel| channel.join(fname))?;
if path.is_file() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
}
fn read(path: &Path) -> Result<String> {
fs::read_to_string(path).map_err(|e| PwmError::Sysfs(Access::Read(path.to_owned()), e))
}
fn write(path: &Path, contents: &str) -> Result<()> {
debug!("writing to {:?}", path);
fs::write(path, contents).map_err(|e| PwmError::Sysfs(Access::Write(path.to_owned()), e))
}
fn parse_bool(s: String) -> Result<bool> {
// sysfs compatible according to http://lkml.iu.edu/hypermail/linux/kernel/1103.2/02488.html
match s.trim_end().to_lowercase().as_ref() {
"1" | "y" | "yes" | "true" => Ok(true),
"0" | "n" | "no" | "false" | "" => Ok(false),
_ => Err(PwmError::NotBoolean(s)),
}
}
fn parse_duration(s: String) -> Result<Duration> {
s.trim_end()
.parse::<u64>()
.map_err(|e| PwmError::NotADuration(s, e))
.map(Duration::from_nanos)
}
#[derive(Debug)]
pub enum Polarity | ormal,
Inversed,
}
impl Display for Polarity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Polarity::*;
match *self {
Normal => write!(f, "normal"),
Inversed => write!(f, "inversed"),
}
}
}
impl FromStr for Polarity {
type Err = PwmError;
fn from_str(s: &str) -> Result<Self> {
use Polarity::*;
match s {
"normal" => Ok(Normal),
"inversed" => Ok(Inversed),
_ => Err(PwmError::InvalidPolarity),
}
}
}
#[cfg(test)]
mod should {
use super::*;
use temp_dir::TempDir;
#[test]
fn fail_if_controller_not_found() {
let tmp = TempDir::new().unwrap();
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
assert!(matches!(
pwm.export(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
assert!(matches!(
pwm.unexport(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
}
#[test]
fn export_and_unexport_a_controller() {
let tmp = TempDir::new().unwrap();
let chip = tmp.child("pwmchip0");
fs::create_dir(&chip).unwrap();
let export = touch(chip.join("export"));
let unexport = touch(chip.join("unexport"));
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
pwm.export(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&export).unwrap(), "1");
pwm.unexport(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&unexport).unwrap(), "1");
}
fn touch(path: PathBuf) -> PathBuf {
fs::write(&path, b"").unwrap();
path
}
}
| {
N | identifier_name |
pwm.rs | use std::{
fmt::Display,
fs,
path::{Path, PathBuf},
str::FromStr,
time::Duration,
};
use thiserror::Error;
use tracing::{debug, instrument};
/// Everything that can go wrong.
#[derive(Error, Debug)]
pub enum PwmError {
#[error("{0:?} not found")]
ControllerNotFound(Controller),
#[error("{0:?}/{1:?} not found")]
ChannelNotFound(Controller, Channel),
#[error("{0:?} not exported")]
NotExported(Controller),
#[error("failed to {0:?}: {1}")]
Sysfs(Access, #[source] std::io::Error),
#[error("duty cycle value must not be greater than the period value")]
DutyCycleGreaterThanPeriod,
#[error("legal polarity values: 'normal', 'inversed'")]
InvalidPolarity,
#[error("{0} cannot be changed while channel is enabled")]
IllegalChangeWhileEnabled(&'static str),
#[error("expected boolean value, got {0:?}")]
NotBoolean(String),
#[error("expected a duration in nanoseconds, got {0:?}: {1}")]
NotADuration(String, #[source] std::num::ParseIntError),
}
/// Used in PwmError to format sysfs related errors.
#[derive(Debug)]
pub enum Access {
Read(PathBuf),
Write(PathBuf),
}
/// Exposes PWM functionality.
///
/// Since the Linux kernel exposes PWM controllers and their settings through
/// sysfs, PWM operations are just file reads and writes. To allow testing with
/// a real file system but outside of sysfs, the `sysfs_root` property may be
/// used to "offset" those operations to an alternative directory.
///
/// Documentation on Linux PWM sysfs:
/// <https://www.kernel.org/doc/html/latest/driver-api/pwm.html>
#[derive(Debug)]
pub struct Pwm {
sysfs_root: PathBuf,
}
/// A PWM controller (a.k.a. PWM chip) is identified by a non-negative number.
#[derive(Debug, Clone)]
pub struct Controller(pub u32);
/// PWM controllers expose channels, which are also identified by non-negative numbers.
#[derive(Debug, Clone)]
pub struct Channel(pub u32);
type Result<T> = std::result::Result<T, PwmError>;
impl Pwm {
/// Initialize PWM.
pub fn new() -> Self {
Self::with_sysfs_root(PathBuf::from("/sys/class/pwm"))
}
/// Initialize PWM with an alternative sysfs directory, for testing.
pub fn with_sysfs_root(sysfs_root: PathBuf) -> Self {
if !sysfs_root.exists() {
panic!("sysfs root does not exist: {:?}", sysfs_root);
}
Self { sysfs_root }
}
/// Returns the number of channels for the given controller.
#[instrument]
pub fn npwm(&self, controller: &Controller) -> Result<u32> {
self.controller_file(controller, "npwm")
.and_then(|path| read(&path))
.map(|s| {
s.trim()
.parse::<u32>()
.expect("npwm expected to contain the number of channels")
})
}
/// Returns whether a controller's channels are ready to be used.
#[instrument]
pub fn is_exported(&self, controller: &Controller) -> Result<bool> {
// A controller is exported if the channel subdirectories are there.
// Since a controller without any channel doesn't make sense, it's
// enough to check for the existance of the first channel's enable file.
match self.channel_dir(controller, &Channel(0)) {
Ok(_) => Ok(true),
Err(PwmError::NotExported(_)) => Ok(false),
Err(e) => Err(e),
}
}
/// Export a PWM controller, which enables access to its channels.
#[instrument]
pub fn export(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "export")
.and_then(|path| write(&path, "1"))
}
/// Unexport a PWM controller, which disables access to its channels.
#[instrument]
pub fn unexport(&mut self, controller: Controller) -> Result<()> {
self.controller_file(&controller, "unexport")
.and_then(|path| write(&path, "1"))
}
/// Returns whether a controller's channel is enabled.
#[instrument]
pub fn is_enabled(&self, controller: &Controller, channel: &Channel) -> Result<bool> {
self.channel_file(controller, channel, "enable")
.and_then(|path| read(&path))
.and_then(parse_bool)
}
/// Enable a channel.
#[instrument]
pub fn enable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "1"))
}
/// Disable a channel.
#[instrument]
pub fn disable(&mut self, controller: Controller, channel: Channel) -> Result<()> {
self.channel_file(&controller, &channel, "enable")
.and_then(|path| write(&path, "0"))
}
/// The total period of the PWM signal (read/write). Value is in nanoseconds
/// and is the sum of the active and inactive time of the PWM.
#[instrument]
pub fn set_period(
&mut self,
controller: Controller,
channel: Channel,
period: Duration,
) -> Result<()> {
let duty_cycle = self
.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "period")
.and_then(|path| write(&path, &period.as_nanos().to_string()))
}
/// The active time of the PWM signal (read/write). Value is in nanoseconds
/// and must be less than the period.
#[instrument]
pub fn set_duty_cycle(
&mut self,
controller: Controller,
channel: Channel,
duty_cycle: Duration,
) -> Result<()> {
let period = self
.channel_file(&controller, &channel, "period")
.and_then(|path| read(&path))
.and_then(parse_duration)?;
if duty_cycle > period {
return Err(PwmError::DutyCycleGreaterThanPeriod);
}
self.channel_file(&controller, &channel, "duty_cycle")
.and_then(|path| write(&path, &duty_cycle.as_nanos().to_string()))
}
/// Changes the polarity of the PWM signal (read/write). Writes to this
/// property only work if the PWM chip supports changing the polarity. The
/// polarity can only be changed if the PWM is not enabled. Value is the
/// string “normal” or “inversed”.
#[instrument]
pub fn set_polarity(
&mut self,
controller: Controller,
channel: Channel,
polarity: Polarity,
) -> Result<()> {
// setting polarity is only allowed if channel is disabled:
if self.is_enabled(&controller, &channel)? {
return Err(PwmError::IllegalChangeWhileEnabled("polarity"));
}
self.channel_file(&controller, &channel, "polarity")
.and_then(|path| write(&path, &polarity.to_string()))
}
fn controller_dir(&self, controller: &Controller) -> Result<PathBuf> {
let path = self.sysfs_root.join(format!("pwmchip{}", controller.0));
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn controller_file(&self, controller: &Controller, fname: &str) -> Result<PathBuf> {
let path = self
.sysfs_root
.join(format!("pwmchip{}/{}", controller.0, fname));
if path.is_file() {
Ok(path)
} else {
Err(PwmError::ControllerNotFound(controller.clone()))
}
}
fn channel_dir(&self, controller: &Controller, channel: &Channel) -> Result<PathBuf> {
let n_pwm = self.npwm(controller)?;
if channel.0 >= n_pwm {
return Err(PwmError::ChannelNotFound(
controller.clone(),
channel.clone(),
));
}
let path = self
.controller_dir(controller)
.map(|controller| controller.join(format!("pwm{}", channel.0)))?;
if path.is_dir() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
fn channel_file(
&self,
controller: &Controller,
channel: &Channel,
fname: &str,
) -> Result<PathBuf> {
let path = self
.channel_dir(controller, channel)
.map(|channel| channel.join(fname))?;
if path.is_file() {
Ok(path)
} else {
Err(PwmError::NotExported(controller.clone()))
}
}
}
fn read(path: &Path) -> Result<String> {
fs::read_to_string(path).map_err(|e| PwmError::Sysfs(Access::Read(path.to_owned()), e))
}
fn write(path: &Path, contents: &str) -> Result<()> {
debug!("writing to {:?}", path);
fs::write(path, contents).map_err(|e| PwmError::Sysfs(Access::Write(path.to_owned()), e))
}
fn parse_bool(s: String) -> Result<bool> {
// sysfs compatible according to http://lkml.iu.edu/hypermail/linux/kernel/1103.2/02488.html
match s.trim_end().to_lowercase().as_ref() {
"1" | "y" | "yes" | "true" => Ok(true),
"0" | "n" | "no" | "false" | "" => Ok(false),
_ => Err(PwmError::NotBoolean(s)),
}
}
fn parse_duration(s: String) -> Result<Duration> {
s. | ve(Debug)]
pub enum Polarity {
Normal,
Inversed,
}
impl Display for Polarity {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
use Polarity::*;
match *self {
Normal => write!(f, "normal"),
Inversed => write!(f, "inversed"),
}
}
}
impl FromStr for Polarity {
type Err = PwmError;
fn from_str(s: &str) -> Result<Self> {
use Polarity::*;
match s {
"normal" => Ok(Normal),
"inversed" => Ok(Inversed),
_ => Err(PwmError::InvalidPolarity),
}
}
}
#[cfg(test)]
mod should {
use super::*;
use temp_dir::TempDir;
#[test]
fn fail_if_controller_not_found() {
let tmp = TempDir::new().unwrap();
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
assert!(matches!(
pwm.export(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
assert!(matches!(
pwm.unexport(Controller(4)),
Err(PwmError::ControllerNotFound(Controller(4)))
));
}
#[test]
fn export_and_unexport_a_controller() {
let tmp = TempDir::new().unwrap();
let chip = tmp.child("pwmchip0");
fs::create_dir(&chip).unwrap();
let export = touch(chip.join("export"));
let unexport = touch(chip.join("unexport"));
let mut pwm = Pwm::with_sysfs_root(tmp.path().to_owned());
pwm.export(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&export).unwrap(), "1");
pwm.unexport(Controller(0)).unwrap();
assert_eq!(fs::read_to_string(&unexport).unwrap(), "1");
}
fn touch(path: PathBuf) -> PathBuf {
fs::write(&path, b"").unwrap();
path
}
}
| trim_end()
.parse::<u64>()
.map_err(|e| PwmError::NotADuration(s, e))
.map(Duration::from_nanos)
}
#[deri | identifier_body |
deckbuilder.py | import argparse
import parms
import pandas as pd
import textwrap
import os
import subprocess
import sys
from PIL import Image, ImageDraw, ImageFont
from fpdf import FPDF
from pathlib import Path
from Card import Card
from cust import cust_title
from cust import cust_description
FILE_EXT = parms.EXT_XLSX()
SHEETS = []
EXCEL = None
MASK_DICT = {}
def build():
global FILE_EXT
global SHEETS
global EXCEL
# parse args
parser = argparse.ArgumentParser(description='Building decks')
parser.add_argument('-s', "--source", type=str, action='store', dest='source', help='Excel source')
parser.add_argument('-o', "--output", type=str, action='store', dest='output', help='Output folder')
parser.add_argument('-f', "--format", type=str, action='store', dest='format', help='Only PDF for now')
parser.add_argument('-t', "--tabletop", type=bool, action='store', dest='tabletop',
help='Export for Tabletop Simulator')
parser.add_argument('-p', "--print", type=bool, action='store', dest='print',
help='Print generated files')
args = parser.parse_args()
# redefine global parameters
parms.FILE_SOURCE = args.source
parms.DIR_OUTPUT = nvl(args.output, parms.DIR_OUTPUT)
parms.FORMAT = nvl(args.format, parms.FORMAT)
parms.FLAG_TABLETOP = nvl(args.tabletop, parms.FLAG_TABLETOP)
parms.PRINT = nvl(args.print, parms.PRINT)
print("[Validating parameters]")
if not valid_parameters():
return
print("[Validating masks]")
if not valid_masks():
return
print("[Processing sheets]")
process_sheets()
def valid_parameters():
if parms.FILE_SOURCE is None:
print("ERROR: Source file path is invalid")
return False
if not Path(parms.FILE_SOURCE).is_file():
print("ERROR: Source file path is invalid")
return False
filename, ext = parms.FILE_SOURCE.split(".")
if ext.lower() not in (parms.EXT_XLS(), parms.EXT_XLSX(), parms.EXT_CSV()):
print("ERROR: Source file type is not supported")
return False
else:
global FILE_EXT
FILE_EXT = ext
if parms.FORMAT not in [parms.FORMAT_PDF()]:
print(parms.FORMAT, parms.FORMAT_PDF())
print("ERROR: Export format not supported")
return False
return True
def valid_masks():
global MASK_DICT
for m in parms.MASKS().split(parms.MASK_SEPARATOR()):
if m.count(".", 1, len(m) - 1) != 1:
print(m.count(".", 1, len(m) - 1))
print("ERROR: Mask", '"' + m + '"', "is invalid")
return False
else:
sheet_title, value = m.split(parms.MASK_DOT())
if sheet_title not in MASK_DICT.keys():
MASK_DICT[sheet_title] = []
MASK_DICT[sheet_title].append(value)
print("Masks:", MASK_DICT)
return True
def process_sheets():
global SHEETS
global EXCEL
# excel
if FILE_EXT in (parms.EXT_XLS(), parms.EXT_XLSX()):
EXCEL = pd.ExcelFile(parms.FILE_SOURCE)
for sn in EXCEL.sheet_names:
sheet = EXCEL.parse(sn)
SHEETS.append(sheet)
process_sheet(sheet, sn)
def process_sheet(sheet, sheet_title):
print("Processing", '"' + sheet_title + '"', "...")
deck = []
if parms.COLUMN_TITLE() not in sheet.keys() or parms.COLUMN_DESCRIPTION() not in sheet.keys():
print("WARNING:", parms.COLUMN_TITLE(), "and", parms.COLUMN_DESCRIPTION(),
"columns must be defined on the sheet. Skipping.")
return
if parms.COLUMN_COUNT() not in sheet.keys():
print("WARNING:", parms.COLUMN_COUNT(), "column not defined on sheet", sheet_title + ".",
"Generating one copy for each card")
sheet[parms.COLUMN_COUNT()] = pd.Series(1, index=sheet.index)
if parms.COLUMN_IDT() not in sheet.keys():
print("WARNING:", parms.COLUMN_IDT(), "column not defined on sheet", sheet_title + ".",
"Ensure that you don't have cards with the same names or define unique identifier")
sheet[parms.COLUMN_IDT()] = pd.Series(None, index=sheet.index)
for index, row in sheet.iterrows():
card_title = cust_title.do(row, sheet_title, row[parms.COLUMN_TITLE()])
if card_included(sheet_title, card_title):
if sheet_title == "Находки":
print(row["Next Location"])
card_description = cust_description.do(row, sheet_title, row[parms.COLUMN_DESCRIPTION()])
card_image = generate_card_image(card_title, card_description)
card_count = row[parms.COLUMN_COUNT()]
card_idt = row[parms.COLUMN_IDT()]
if sheet_title == "Находки":
print(card_description)
card = Card(card_title, card_description, card_image, card_count, card_idt)
deck.append(card)
print(card_count, '"' + card_title + '" cards have been generated.')
save_sheet(sheet_title, deck)
def generate_card_image(title, description):
# scheme, size, background color
img = Image.new('RGB', (parms.DIM_CARD_WIDTH(), parms.DIM_CARD_HEIGHT()), (255, 255, 255)) | unicode_font = ImageFont.truetype("Arial.ttf")
y_text = draw_lines(draw, unicode_font, title, parms.DIM_TEXT_TOP_MARGIN())
# space between title and description
y_text += parms.DIM_TEXT_TOP_MARGIN()
# draw description
for p in str.split(description, "\p"):
for n in str.split(p, "\n"):
y_text = draw_lines(draw, unicode_font, n, y_text)
y_text += parms.DIM_TEXT_TOP_MARGIN()
# border
img = apply_card_border(img)
return img
def draw_lines(draw, font, text, y_text):
lines = textwrap.wrap(text, width=(parms.DIM_CARD_WIDTH() // parms.DIM_CHAR_WIDTH()))
for line in lines:
draw.text((parms.DIM_TEXT_LEFT_MARGIN(), y_text), line, fill=(0, 0, 0), font=font)
y_text += parms.DIM_TEXT_HEIGHT()
return y_text
def apply_card_border(img):
new_size = (img.size[0] + parms.DIM_CARD_BORDER() * 2, img.size[1] + parms.DIM_CARD_BORDER() * 2)
bordered_img = Image.new("RGB", new_size)
bordered_img.paste(img, (parms.DIM_CARD_BORDER(), parms.DIM_CARD_BORDER()))
return bordered_img
def save_sheet(sheet_title, deck):
main_directory = generate_sheet_directories(sheet_title)
pdf = None
if parms.FORMAT == parms.FORMAT_PDF():
pdf = FPDF()
card_paths = []
card_total_count = 0
for c in deck:
card_total_count += c.count
card_counter = 0
for i, card in enumerate(deck):
for j in range(card.count):
# separate images
if card.idt is None:
idt_suffix = ""
else:
idt_suffix = "_" + str(card.idt)
card_path = main_directory + "/" + card.title.replace(" ", "_") + idt_suffix + "_" + str(j) + "." + parms.EXT_PNG()
card_paths.append(card_path)
card.image.save(card_path, parms.EXT_PNG())
card_counter += 1
# combine in one page
if (card_total_count - card_counter) % (parms.CARDS_IN_ROW() * parms.CARDS_IN_COLUMN()) == 0:
print("Page added", card_total_count - card_counter)
sheet_page_image = Image.new('RGB',
(parms.CARDS_IN_ROW() * (parms.DIM_CARD_WIDTH() + parms.DIM_CARD_BORDER() * 2),
parms.CARDS_IN_COLUMN() * (parms.DIM_CARD_HEIGHT() + parms.DIM_CARD_BORDER() * 2 )),
(255,255,255,0))
x_offset = 0
for k, img in enumerate(map(Image.open, card_paths)):
sheet_page_image.paste(img, ((k % parms.CARDS_IN_ROW()) * img.size[0],
(k // parms.CARDS_IN_COLUMN()) * img.size[1]))
x_offset += img.size[0]
sheet_page_image_path = main_directory + "/" + parms.DIR_PAGES() + "/"\
+ parms.FILE_PAGE() + str(card_total_count - card_counter)\
+ "." + parms.EXT_PNG()
sheet_page_image.save(sheet_page_image_path)
# pdf
if parms.FORMAT == parms.FORMAT_PDF():
pdf.add_page()
pdf.image(sheet_page_image_path, x=parms.DIM_PDF_LEFT_MARGIN(), y=parms.DIM_PDF_TOP_MARGIN())
card_paths = []
printing_file = None
if parms.FORMAT == parms.FORMAT_PDF():
printing_file = main_directory + "/" + parms.DIR_PRINT() + "/" + sheet_title.replace(" ", "_")\
+ "." + parms.FORMAT_PDF()
pdf.output(printing_file, "F")
if parms.PRINT is True:
print_sheet(printing_file)
print('"' + sheet_title + '"', "finished.")
def generate_sheet_directories(sheet_title):
main_directory = parms.DIR_OUTPUT + "/" + sheet_title
if not os.path.exists(main_directory):
os.makedirs(main_directory)
for d in [parms.DIR_PAGES(), parms.DIR_PRINT(), parms.DIR_TABLETOP()]:
directory = main_directory + "/" + d
if not os.path.exists(directory):
os.makedirs(directory)
return main_directory
def card_included(sheet_title, card_title):
global MASK_DICT
if sheet_title not in MASK_DICT.keys():
return False
elif parms.MASK_ALL() in MASK_DICT[sheet_title] or card_title in MASK_DICT[sheet_title]:
return True
else:
return False
def print_sheet(sheet_path):
if sheet_path is not None:
print("Printing ...")
if sys.platform == "win32":
os.startfile(sheet_path, "print")
else:
lpr = subprocess.Popen("/usr/bin/lpr", stdin=subprocess.PIPE)
lpr.stdin.write(open(sheet_path, "rb").read())
def nvl(var, val):
if var is None:
return val
return var
if __name__ == "__main__":
build() | draw = ImageDraw.Draw(img)
# draw title | random_line_split |
deckbuilder.py | import argparse
import parms
import pandas as pd
import textwrap
import os
import subprocess
import sys
from PIL import Image, ImageDraw, ImageFont
from fpdf import FPDF
from pathlib import Path
from Card import Card
from cust import cust_title
from cust import cust_description
FILE_EXT = parms.EXT_XLSX()
SHEETS = []
EXCEL = None
MASK_DICT = {}
def build():
global FILE_EXT
global SHEETS
global EXCEL
# parse args
parser = argparse.ArgumentParser(description='Building decks')
parser.add_argument('-s', "--source", type=str, action='store', dest='source', help='Excel source')
parser.add_argument('-o', "--output", type=str, action='store', dest='output', help='Output folder')
parser.add_argument('-f', "--format", type=str, action='store', dest='format', help='Only PDF for now')
parser.add_argument('-t', "--tabletop", type=bool, action='store', dest='tabletop',
help='Export for Tabletop Simulator')
parser.add_argument('-p', "--print", type=bool, action='store', dest='print',
help='Print generated files')
args = parser.parse_args()
# redefine global parameters
parms.FILE_SOURCE = args.source
parms.DIR_OUTPUT = nvl(args.output, parms.DIR_OUTPUT)
parms.FORMAT = nvl(args.format, parms.FORMAT)
parms.FLAG_TABLETOP = nvl(args.tabletop, parms.FLAG_TABLETOP)
parms.PRINT = nvl(args.print, parms.PRINT)
print("[Validating parameters]")
if not valid_parameters():
return
print("[Validating masks]")
if not valid_masks():
return
print("[Processing sheets]")
process_sheets()
def valid_parameters():
if parms.FILE_SOURCE is None:
print("ERROR: Source file path is invalid")
return False
if not Path(parms.FILE_SOURCE).is_file():
|
filename, ext = parms.FILE_SOURCE.split(".")
if ext.lower() not in (parms.EXT_XLS(), parms.EXT_XLSX(), parms.EXT_CSV()):
print("ERROR: Source file type is not supported")
return False
else:
global FILE_EXT
FILE_EXT = ext
if parms.FORMAT not in [parms.FORMAT_PDF()]:
print(parms.FORMAT, parms.FORMAT_PDF())
print("ERROR: Export format not supported")
return False
return True
def valid_masks():
global MASK_DICT
for m in parms.MASKS().split(parms.MASK_SEPARATOR()):
if m.count(".", 1, len(m) - 1) != 1:
print(m.count(".", 1, len(m) - 1))
print("ERROR: Mask", '"' + m + '"', "is invalid")
return False
else:
sheet_title, value = m.split(parms.MASK_DOT())
if sheet_title not in MASK_DICT.keys():
MASK_DICT[sheet_title] = []
MASK_DICT[sheet_title].append(value)
print("Masks:", MASK_DICT)
return True
def process_sheets():
global SHEETS
global EXCEL
# excel
if FILE_EXT in (parms.EXT_XLS(), parms.EXT_XLSX()):
EXCEL = pd.ExcelFile(parms.FILE_SOURCE)
for sn in EXCEL.sheet_names:
sheet = EXCEL.parse(sn)
SHEETS.append(sheet)
process_sheet(sheet, sn)
def process_sheet(sheet, sheet_title):
print("Processing", '"' + sheet_title + '"', "...")
deck = []
if parms.COLUMN_TITLE() not in sheet.keys() or parms.COLUMN_DESCRIPTION() not in sheet.keys():
print("WARNING:", parms.COLUMN_TITLE(), "and", parms.COLUMN_DESCRIPTION(),
"columns must be defined on the sheet. Skipping.")
return
if parms.COLUMN_COUNT() not in sheet.keys():
print("WARNING:", parms.COLUMN_COUNT(), "column not defined on sheet", sheet_title + ".",
"Generating one copy for each card")
sheet[parms.COLUMN_COUNT()] = pd.Series(1, index=sheet.index)
if parms.COLUMN_IDT() not in sheet.keys():
print("WARNING:", parms.COLUMN_IDT(), "column not defined on sheet", sheet_title + ".",
"Ensure that you don't have cards with the same names or define unique identifier")
sheet[parms.COLUMN_IDT()] = pd.Series(None, index=sheet.index)
for index, row in sheet.iterrows():
card_title = cust_title.do(row, sheet_title, row[parms.COLUMN_TITLE()])
if card_included(sheet_title, card_title):
if sheet_title == "Находки":
print(row["Next Location"])
card_description = cust_description.do(row, sheet_title, row[parms.COLUMN_DESCRIPTION()])
card_image = generate_card_image(card_title, card_description)
card_count = row[parms.COLUMN_COUNT()]
card_idt = row[parms.COLUMN_IDT()]
if sheet_title == "Находки":
print(card_description)
card = Card(card_title, card_description, card_image, card_count, card_idt)
deck.append(card)
print(card_count, '"' + card_title + '" cards have been generated.')
save_sheet(sheet_title, deck)
def generate_card_image(title, description):
# scheme, size, background color
img = Image.new('RGB', (parms.DIM_CARD_WIDTH(), parms.DIM_CARD_HEIGHT()), (255, 255, 255))
draw = ImageDraw.Draw(img)
# draw title
unicode_font = ImageFont.truetype("Arial.ttf")
y_text = draw_lines(draw, unicode_font, title, parms.DIM_TEXT_TOP_MARGIN())
# space between title and description
y_text += parms.DIM_TEXT_TOP_MARGIN()
# draw description
for p in str.split(description, "\p"):
for n in str.split(p, "\n"):
y_text = draw_lines(draw, unicode_font, n, y_text)
y_text += parms.DIM_TEXT_TOP_MARGIN()
# border
img = apply_card_border(img)
return img
def draw_lines(draw, font, text, y_text):
lines = textwrap.wrap(text, width=(parms.DIM_CARD_WIDTH() // parms.DIM_CHAR_WIDTH()))
for line in lines:
draw.text((parms.DIM_TEXT_LEFT_MARGIN(), y_text), line, fill=(0, 0, 0), font=font)
y_text += parms.DIM_TEXT_HEIGHT()
return y_text
def apply_card_border(img):
new_size = (img.size[0] + parms.DIM_CARD_BORDER() * 2, img.size[1] + parms.DIM_CARD_BORDER() * 2)
bordered_img = Image.new("RGB", new_size)
bordered_img.paste(img, (parms.DIM_CARD_BORDER(), parms.DIM_CARD_BORDER()))
return bordered_img
def save_sheet(sheet_title, deck):
main_directory = generate_sheet_directories(sheet_title)
pdf = None
if parms.FORMAT == parms.FORMAT_PDF():
pdf = FPDF()
card_paths = []
card_total_count = 0
for c in deck:
card_total_count += c.count
card_counter = 0
for i, card in enumerate(deck):
for j in range(card.count):
# separate images
if card.idt is None:
idt_suffix = ""
else:
idt_suffix = "_" + str(card.idt)
card_path = main_directory + "/" + card.title.replace(" ", "_") + idt_suffix + "_" + str(j) + "." + parms.EXT_PNG()
card_paths.append(card_path)
card.image.save(card_path, parms.EXT_PNG())
card_counter += 1
# combine in one page
if (card_total_count - card_counter) % (parms.CARDS_IN_ROW() * parms.CARDS_IN_COLUMN()) == 0:
print("Page added", card_total_count - card_counter)
sheet_page_image = Image.new('RGB',
(parms.CARDS_IN_ROW() * (parms.DIM_CARD_WIDTH() + parms.DIM_CARD_BORDER() * 2),
parms.CARDS_IN_COLUMN() * (parms.DIM_CARD_HEIGHT() + parms.DIM_CARD_BORDER() * 2 )),
(255,255,255,0))
x_offset = 0
for k, img in enumerate(map(Image.open, card_paths)):
sheet_page_image.paste(img, ((k % parms.CARDS_IN_ROW()) * img.size[0],
(k // parms.CARDS_IN_COLUMN()) * img.size[1]))
x_offset += img.size[0]
sheet_page_image_path = main_directory + "/" + parms.DIR_PAGES() + "/"\
+ parms.FILE_PAGE() + str(card_total_count - card_counter)\
+ "." + parms.EXT_PNG()
sheet_page_image.save(sheet_page_image_path)
# pdf
if parms.FORMAT == parms.FORMAT_PDF():
pdf.add_page()
pdf.image(sheet_page_image_path, x=parms.DIM_PDF_LEFT_MARGIN(), y=parms.DIM_PDF_TOP_MARGIN())
card_paths = []
printing_file = None
if parms.FORMAT == parms.FORMAT_PDF():
printing_file = main_directory + "/" + parms.DIR_PRINT() + "/" + sheet_title.replace(" ", "_")\
+ "." + parms.FORMAT_PDF()
pdf.output(printing_file, "F")
if parms.PRINT is True:
print_sheet(printing_file)
print('"' + sheet_title + '"', "finished.")
def generate_sheet_directories(sheet_title):
main_directory = parms.DIR_OUTPUT + "/" + sheet_title
if not os.path.exists(main_directory):
os.makedirs(main_directory)
for d in [parms.DIR_PAGES(), parms.DIR_PRINT(), parms.DIR_TABLETOP()]:
directory = main_directory + "/" + d
if not os.path.exists(directory):
os.makedirs(directory)
return main_directory
def card_included(sheet_title, card_title):
global MASK_DICT
if sheet_title not in MASK_DICT.keys():
return False
elif parms.MASK_ALL() in MASK_DICT[sheet_title] or card_title in MASK_DICT[sheet_title]:
return True
else:
return False
def print_sheet(sheet_path):
if sheet_path is not None:
print("Printing ...")
if sys.platform == "win32":
os.startfile(sheet_path, "print")
else:
lpr = subprocess.Popen("/usr/bin/lpr", stdin=subprocess.PIPE)
lpr.stdin.write(open(sheet_path, "rb").read())
def nvl(var, val):
if var is None:
return val
return var
if __name__ == "__main__":
build()
| print("ERROR: Source file path is invalid")
return False | conditional_block |
deckbuilder.py | import argparse
import parms
import pandas as pd
import textwrap
import os
import subprocess
import sys
from PIL import Image, ImageDraw, ImageFont
from fpdf import FPDF
from pathlib import Path
from Card import Card
from cust import cust_title
from cust import cust_description
FILE_EXT = parms.EXT_XLSX()
SHEETS = []
EXCEL = None
MASK_DICT = {}
def build():
global FILE_EXT
global SHEETS
global EXCEL
# parse args
parser = argparse.ArgumentParser(description='Building decks')
parser.add_argument('-s', "--source", type=str, action='store', dest='source', help='Excel source')
parser.add_argument('-o', "--output", type=str, action='store', dest='output', help='Output folder')
parser.add_argument('-f', "--format", type=str, action='store', dest='format', help='Only PDF for now')
parser.add_argument('-t', "--tabletop", type=bool, action='store', dest='tabletop',
help='Export for Tabletop Simulator')
parser.add_argument('-p', "--print", type=bool, action='store', dest='print',
help='Print generated files')
args = parser.parse_args()
# redefine global parameters
parms.FILE_SOURCE = args.source
parms.DIR_OUTPUT = nvl(args.output, parms.DIR_OUTPUT)
parms.FORMAT = nvl(args.format, parms.FORMAT)
parms.FLAG_TABLETOP = nvl(args.tabletop, parms.FLAG_TABLETOP)
parms.PRINT = nvl(args.print, parms.PRINT)
print("[Validating parameters]")
if not valid_parameters():
return
print("[Validating masks]")
if not valid_masks():
return
print("[Processing sheets]")
process_sheets()
def valid_parameters():
if parms.FILE_SOURCE is None:
print("ERROR: Source file path is invalid")
return False
if not Path(parms.FILE_SOURCE).is_file():
print("ERROR: Source file path is invalid")
return False
filename, ext = parms.FILE_SOURCE.split(".")
if ext.lower() not in (parms.EXT_XLS(), parms.EXT_XLSX(), parms.EXT_CSV()):
print("ERROR: Source file type is not supported")
return False
else:
global FILE_EXT
FILE_EXT = ext
if parms.FORMAT not in [parms.FORMAT_PDF()]:
print(parms.FORMAT, parms.FORMAT_PDF())
print("ERROR: Export format not supported")
return False
return True
def valid_masks():
global MASK_DICT
for m in parms.MASKS().split(parms.MASK_SEPARATOR()):
if m.count(".", 1, len(m) - 1) != 1:
print(m.count(".", 1, len(m) - 1))
print("ERROR: Mask", '"' + m + '"', "is invalid")
return False
else:
sheet_title, value = m.split(parms.MASK_DOT())
if sheet_title not in MASK_DICT.keys():
MASK_DICT[sheet_title] = []
MASK_DICT[sheet_title].append(value)
print("Masks:", MASK_DICT)
return True
def process_sheets():
global SHEETS
global EXCEL
# excel
if FILE_EXT in (parms.EXT_XLS(), parms.EXT_XLSX()):
EXCEL = pd.ExcelFile(parms.FILE_SOURCE)
for sn in EXCEL.sheet_names:
sheet = EXCEL.parse(sn)
SHEETS.append(sheet)
process_sheet(sheet, sn)
def process_sheet(sheet, sheet_title):
print("Processing", '"' + sheet_title + '"', "...")
deck = []
if parms.COLUMN_TITLE() not in sheet.keys() or parms.COLUMN_DESCRIPTION() not in sheet.keys():
print("WARNING:", parms.COLUMN_TITLE(), "and", parms.COLUMN_DESCRIPTION(),
"columns must be defined on the sheet. Skipping.")
return
if parms.COLUMN_COUNT() not in sheet.keys():
print("WARNING:", parms.COLUMN_COUNT(), "column not defined on sheet", sheet_title + ".",
"Generating one copy for each card")
sheet[parms.COLUMN_COUNT()] = pd.Series(1, index=sheet.index)
if parms.COLUMN_IDT() not in sheet.keys():
print("WARNING:", parms.COLUMN_IDT(), "column not defined on sheet", sheet_title + ".",
"Ensure that you don't have cards with the same names or define unique identifier")
sheet[parms.COLUMN_IDT()] = pd.Series(None, index=sheet.index)
for index, row in sheet.iterrows():
card_title = cust_title.do(row, sheet_title, row[parms.COLUMN_TITLE()])
if card_included(sheet_title, card_title):
if sheet_title == "Находки":
print(row["Next Location"])
card_description = cust_description.do(row, sheet_title, row[parms.COLUMN_DESCRIPTION()])
card_image = generate_card_image(card_title, card_description)
card_count = row[parms.COLUMN_COUNT()]
card_idt = row[parms.COLUMN_IDT()]
if sheet_title == "Находки":
print(card_description)
card = Card(card_title, card_description, card_image, card_count, card_idt)
deck.append(card)
print(card_count, '"' + card_title + '" cards have been generated.')
save_sheet(sheet_title, deck)
def generate_card_image(title, description):
# scheme, size, background color
img = Image.new('RGB', (parms.DIM_CARD_WIDTH(), parms.DIM_CARD_HEIGHT()), (255, 255, 255))
draw = ImageDraw.Draw(img)
# draw title
unicode_font = ImageFont.truetype("Arial.ttf")
y_text = draw_lines(draw, unicode_font, title, parms.DIM_TEXT_TOP_MARGIN())
# space between title and description
y_text += parms.DIM_TEXT_TOP_MARGIN()
# draw description
for p in str.split(description, "\p"):
for n in str.split(p, "\n"):
y_text = draw_lines(draw, unicode_font, n, y_text)
y_text += parms.DIM_TEXT_TOP_MARGIN()
# border
img = apply_card_border(img)
return img
def draw_lines(draw, font, text, y_text):
lines = textwrap.wrap(text, width=(parms.DIM_CARD_WIDTH() // parms.DIM_CHAR_WIDTH()))
for line in lines:
draw.text((parms.DIM_TEXT_LEFT_MARGIN(), y_text), line, fill=(0, 0, 0), font=font)
y_text += parms.DIM_TEXT_HEIGHT()
return y_text
def apply_card_border(img):
new_size = (img.size[0] + parms.DIM_CARD_BORDER() * 2, img.size[1] + parms.DIM_CARD_BORDER() * 2)
bordered_img = Image.new("RGB", new_size)
bordered_img.paste(img, (parms.DIM_CARD_BORDER(), parms.DIM_CARD_BORDER()))
return bordered_img
def save_sheet(sheet_title, deck):
main_directory = generate_sheet_directories(sheet_title)
pdf = None
if parms.FORMAT == parms.FORMAT_PDF():
pdf = FPDF()
card_paths = []
card_total_count = 0
for c in deck:
card_total_count += c.count
card_counter = 0
for i, card in enumerate(deck):
for j in range(card.count):
# separate images
if card.idt is None:
idt_suffix = ""
else:
idt_suffix = "_" + str(card.idt)
card_path = main_directory + "/" + card.title.replace(" ", "_") + idt_suffix + "_" + str(j) + "." + parms.EXT_PNG()
card_paths.append(card_path)
card.image.save(card_path, parms.EXT_PNG())
card_counter += 1
# combine in one page
if (card_total_count - card_counter) % (parms.CARDS_IN_ROW() * parms.CARDS_IN_COLUMN()) == 0:
print("Page added", card_total_count - card_counter)
sheet_page_image = Image.new('RGB',
(parms.CARDS_IN_ROW() * (parms.DIM_CARD_WIDTH() + parms.DIM_CARD_BORDER() * 2),
parms.CARDS_IN_COLUMN() * (parms.DIM_CARD_HEIGHT() + parms.DIM_CARD_BORDER() * 2 )),
(255,255,255,0))
x_offset = 0
for k, img in enumerate(map(Image.open, card_paths)):
sheet_page_image.paste(img, ((k % parms.CARDS_IN_ROW()) * img.size[0],
(k // parms.CARDS_IN_COLUMN()) * img.size[1]))
x_offset += img.size[0]
sheet_page_image_path = main_directory + "/" + parms.DIR_PAGES() + "/"\
+ parms.FILE_PAGE() + str(card_total_count - card_counter)\
+ "." + parms.EXT_PNG()
sheet_page_image.save(sheet_page_image_path)
# pdf
if parms.FORMAT == parms.FORMAT_PDF():
pdf.add_page()
pdf.image(sheet_page_image_path, x=parms.DIM_PDF_LEFT_MARGIN(), y=parms.DIM_PDF_TOP_MARGIN())
card_paths = []
printing_file = None
if parms.FORMAT == parms.FORMAT_PDF():
printing_file = main_directory + "/" + parms.DIR_PRINT() + "/" + sheet_title.replace(" ", "_")\
+ "." + parms.FORMAT_PDF()
pdf.output(printing_file, "F")
if parms.PRINT is True:
print_sheet(printing_file)
print('"' + sheet_title + '"', "finished.")
def generate_sheet_directories(sheet_title):
main_directory = parms.DIR_OUTPUT + "/" + sheet_title
if not os.path.exists(main_directory):
os.makedirs(main_directory)
for d in [parms.DIR_PAGES(), parms.DIR_PRINT(), parms.DIR_TABLETOP()]:
directory = main_directory + "/" + d
if not os.path.exists(directory):
os.makedirs(directory)
return main_directory
def card_included(sheet_title, card_title):
global MASK_DICT
if sheet_title not in MASK_DICT.keys():
return False
elif parms.MASK_ALL() in MASK_DICT[sheet_title] or card_title in MASK_DICT[sheet_title]:
return True
else:
return False
def print_sheet(sh | if sheet_path is not None:
print("Printing ...")
if sys.platform == "win32":
os.startfile(sheet_path, "print")
else:
lpr = subprocess.Popen("/usr/bin/lpr", stdin=subprocess.PIPE)
lpr.stdin.write(open(sheet_path, "rb").read())
def nvl(var, val):
if var is None:
return val
return var
if __name__ == "__main__":
build()
| eet_path):
| identifier_name |
deckbuilder.py | import argparse
import parms
import pandas as pd
import textwrap
import os
import subprocess
import sys
from PIL import Image, ImageDraw, ImageFont
from fpdf import FPDF
from pathlib import Path
from Card import Card
from cust import cust_title
from cust import cust_description
FILE_EXT = parms.EXT_XLSX()
SHEETS = []
EXCEL = None
MASK_DICT = {}
def build():
global FILE_EXT
global SHEETS
global EXCEL
# parse args
parser = argparse.ArgumentParser(description='Building decks')
parser.add_argument('-s', "--source", type=str, action='store', dest='source', help='Excel source')
parser.add_argument('-o', "--output", type=str, action='store', dest='output', help='Output folder')
parser.add_argument('-f', "--format", type=str, action='store', dest='format', help='Only PDF for now')
parser.add_argument('-t', "--tabletop", type=bool, action='store', dest='tabletop',
help='Export for Tabletop Simulator')
parser.add_argument('-p', "--print", type=bool, action='store', dest='print',
help='Print generated files')
args = parser.parse_args()
# redefine global parameters
parms.FILE_SOURCE = args.source
parms.DIR_OUTPUT = nvl(args.output, parms.DIR_OUTPUT)
parms.FORMAT = nvl(args.format, parms.FORMAT)
parms.FLAG_TABLETOP = nvl(args.tabletop, parms.FLAG_TABLETOP)
parms.PRINT = nvl(args.print, parms.PRINT)
print("[Validating parameters]")
if not valid_parameters():
return
print("[Validating masks]")
if not valid_masks():
return
print("[Processing sheets]")
process_sheets()
def valid_parameters():
if parms.FILE_SOURCE is None:
print("ERROR: Source file path is invalid")
return False
if not Path(parms.FILE_SOURCE).is_file():
print("ERROR: Source file path is invalid")
return False
filename, ext = parms.FILE_SOURCE.split(".")
if ext.lower() not in (parms.EXT_XLS(), parms.EXT_XLSX(), parms.EXT_CSV()):
print("ERROR: Source file type is not supported")
return False
else:
global FILE_EXT
FILE_EXT = ext
if parms.FORMAT not in [parms.FORMAT_PDF()]:
print(parms.FORMAT, parms.FORMAT_PDF())
print("ERROR: Export format not supported")
return False
return True
def valid_masks():
global MASK_DICT
for m in parms.MASKS().split(parms.MASK_SEPARATOR()):
if m.count(".", 1, len(m) - 1) != 1:
print(m.count(".", 1, len(m) - 1))
print("ERROR: Mask", '"' + m + '"', "is invalid")
return False
else:
sheet_title, value = m.split(parms.MASK_DOT())
if sheet_title not in MASK_DICT.keys():
MASK_DICT[sheet_title] = []
MASK_DICT[sheet_title].append(value)
print("Masks:", MASK_DICT)
return True
def process_sheets():
global SHEETS
global EXCEL
# excel
if FILE_EXT in (parms.EXT_XLS(), parms.EXT_XLSX()):
EXCEL = pd.ExcelFile(parms.FILE_SOURCE)
for sn in EXCEL.sheet_names:
sheet = EXCEL.parse(sn)
SHEETS.append(sheet)
process_sheet(sheet, sn)
def process_sheet(sheet, sheet_title):
print("Processing", '"' + sheet_title + '"', "...")
deck = []
if parms.COLUMN_TITLE() not in sheet.keys() or parms.COLUMN_DESCRIPTION() not in sheet.keys():
print("WARNING:", parms.COLUMN_TITLE(), "and", parms.COLUMN_DESCRIPTION(),
"columns must be defined on the sheet. Skipping.")
return
if parms.COLUMN_COUNT() not in sheet.keys():
print("WARNING:", parms.COLUMN_COUNT(), "column not defined on sheet", sheet_title + ".",
"Generating one copy for each card")
sheet[parms.COLUMN_COUNT()] = pd.Series(1, index=sheet.index)
if parms.COLUMN_IDT() not in sheet.keys():
print("WARNING:", parms.COLUMN_IDT(), "column not defined on sheet", sheet_title + ".",
"Ensure that you don't have cards with the same names or define unique identifier")
sheet[parms.COLUMN_IDT()] = pd.Series(None, index=sheet.index)
for index, row in sheet.iterrows():
card_title = cust_title.do(row, sheet_title, row[parms.COLUMN_TITLE()])
if card_included(sheet_title, card_title):
if sheet_title == "Находки":
print(row["Next Location"])
card_description = cust_description.do(row, sheet_title, row[parms.COLUMN_DESCRIPTION()])
card_image = generate_card_image(card_title, card_description)
card_count = row[parms.COLUMN_COUNT()]
card_idt = row[parms.COLUMN_IDT()]
if sheet_title == "Находки":
print(card_description)
card = Card(card_title, card_description, card_image, card_count, card_idt)
deck.append(card)
print(card_count, '"' + card_title + '" cards have been generated.')
save_sheet(sheet_title, deck)
def generate_card_image(title, description):
# scheme, size, background color
img = Image.new('RGB', (parms.DIM_CARD_WIDTH(), parms.DIM_CARD_HEIGHT()), (255, 255, 255))
draw = ImageDraw.Draw(img)
# draw title
unicode_font = ImageFont.truetype("Arial.ttf")
y_text = draw_lines(draw, unicode_font, title, parms.DIM_TEXT_TOP_MARGIN())
# space between title and description
y_text += parms.DIM_TEXT_TOP_MARGIN()
# draw description
for p in str.split(description, "\p"):
for n in str.split(p, "\n"):
y_text = draw_lines(draw, unicode_font, n, y_text)
y_text += parms.DIM_TEXT_TOP_MARGIN()
# border
img = apply_card_border(img)
return img
def draw_lines(draw, font, text, y_text):
lines = textwrap.wrap(text, width=(parms.DIM_CARD_WIDTH() // parms.DIM_CHAR_WIDTH()))
for line in lines:
draw.text((parms.DIM_TEXT_LEFT_MARGIN(), y_text), line, fill=(0, 0, 0), font=font)
y_text += parms.DIM_TEXT_HEIGHT()
return y_text
def apply_card_border(img):
new_size = (im | eet(sheet_title, deck):
main_directory = generate_sheet_directories(sheet_title)
pdf = None
if parms.FORMAT == parms.FORMAT_PDF():
pdf = FPDF()
card_paths = []
card_total_count = 0
for c in deck:
card_total_count += c.count
card_counter = 0
for i, card in enumerate(deck):
for j in range(card.count):
# separate images
if card.idt is None:
idt_suffix = ""
else:
idt_suffix = "_" + str(card.idt)
card_path = main_directory + "/" + card.title.replace(" ", "_") + idt_suffix + "_" + str(j) + "." + parms.EXT_PNG()
card_paths.append(card_path)
card.image.save(card_path, parms.EXT_PNG())
card_counter += 1
# combine in one page
if (card_total_count - card_counter) % (parms.CARDS_IN_ROW() * parms.CARDS_IN_COLUMN()) == 0:
print("Page added", card_total_count - card_counter)
sheet_page_image = Image.new('RGB',
(parms.CARDS_IN_ROW() * (parms.DIM_CARD_WIDTH() + parms.DIM_CARD_BORDER() * 2),
parms.CARDS_IN_COLUMN() * (parms.DIM_CARD_HEIGHT() + parms.DIM_CARD_BORDER() * 2 )),
(255,255,255,0))
x_offset = 0
for k, img in enumerate(map(Image.open, card_paths)):
sheet_page_image.paste(img, ((k % parms.CARDS_IN_ROW()) * img.size[0],
(k // parms.CARDS_IN_COLUMN()) * img.size[1]))
x_offset += img.size[0]
sheet_page_image_path = main_directory + "/" + parms.DIR_PAGES() + "/"\
+ parms.FILE_PAGE() + str(card_total_count - card_counter)\
+ "." + parms.EXT_PNG()
sheet_page_image.save(sheet_page_image_path)
# pdf
if parms.FORMAT == parms.FORMAT_PDF():
pdf.add_page()
pdf.image(sheet_page_image_path, x=parms.DIM_PDF_LEFT_MARGIN(), y=parms.DIM_PDF_TOP_MARGIN())
card_paths = []
printing_file = None
if parms.FORMAT == parms.FORMAT_PDF():
printing_file = main_directory + "/" + parms.DIR_PRINT() + "/" + sheet_title.replace(" ", "_")\
+ "." + parms.FORMAT_PDF()
pdf.output(printing_file, "F")
if parms.PRINT is True:
print_sheet(printing_file)
print('"' + sheet_title + '"', "finished.")
def generate_sheet_directories(sheet_title):
main_directory = parms.DIR_OUTPUT + "/" + sheet_title
if not os.path.exists(main_directory):
os.makedirs(main_directory)
for d in [parms.DIR_PAGES(), parms.DIR_PRINT(), parms.DIR_TABLETOP()]:
directory = main_directory + "/" + d
if not os.path.exists(directory):
os.makedirs(directory)
return main_directory
def card_included(sheet_title, card_title):
global MASK_DICT
if sheet_title not in MASK_DICT.keys():
return False
elif parms.MASK_ALL() in MASK_DICT[sheet_title] or card_title in MASK_DICT[sheet_title]:
return True
else:
return False
def print_sheet(sheet_path):
if sheet_path is not None:
print("Printing ...")
if sys.platform == "win32":
os.startfile(sheet_path, "print")
else:
lpr = subprocess.Popen("/usr/bin/lpr", stdin=subprocess.PIPE)
lpr.stdin.write(open(sheet_path, "rb").read())
def nvl(var, val):
if var is None:
return val
return var
if __name__ == "__main__":
build()
| g.size[0] + parms.DIM_CARD_BORDER() * 2, img.size[1] + parms.DIM_CARD_BORDER() * 2)
bordered_img = Image.new("RGB", new_size)
bordered_img.paste(img, (parms.DIM_CARD_BORDER(), parms.DIM_CARD_BORDER()))
return bordered_img
def save_sh | identifier_body |
bot.py | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
from telegram import InlineQueryResultPhoto, InlineQueryResultArticle, ParseMode
from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters
from telegram.utils.helpers import escape_markdown
import logging
import requests
from functools import wraps
import paho.mqtt.publish as publish
from tinydb import TinyDB, Query
from random import randint
from bot_config import API_KEY
from time import localtime, strftime
import datetime as dt
DB = TinyDB('db.json')
# A simple database to store imformation persistently
def setup_db():
""" initialize a new database """
db = TinyDB('db.json')
chats = db.table('chats')
members = db.table('members')
chats.insert({'id': -231128423}) # Kolab chat group
members.insert({'id': 235493361})
def get_member_ids(db):
table = db.table('members')
return [e['id'] for e in table.all()]
def get_chat_ids(db):
table = db.table('chats')
return [e['id'] for e in table.all()]
def add_member_id(db, id):
members = db.table('members')
Member = Query()
if members.get(Member.id == id) is None:
members.insert({'id': id})
return True
else:
return False
def restricted(func):
"""
This decorator allows to restrict the access of a handler
to only KOLAB users and chat groups
"""
@wraps(func)
def wrapped(update, context, *args, **kwargs):
user_id = update.effective_user.id
chat_id = update.effective_chat.id
members = get_member_ids(DB)
chats = get_chat_ids(DB)
first_name = update.effective_user.first_name
last_name = update.effective_user.last_name
print("Request from {} {} ({}) in chat {}."
.format(first_name, last_name, user_id, chat_id))
if user_id not in members and chat_id not in chats:
# Log unauthorized attempt to console and return
first_name = update.effective_user.first_name
last_name = update.effective_user.last_name
print("Unauthorized request from {} {} ({}) in chat {}."
.format(first_name, last_name, user_id, chat_id))
return
return func(update, context, *args, **kwargs)
return wrapped
@restricted
def inlinequery(update: 'Update', context: 'Context'):
"""Handle inline queries."""
query = update.inline_query.query
results = [
InlineQueryResultArticle(
id=uuid4(),
title="Caps",
input_message_content=InputTextMessageContent(
query.upper())),
InlineQueryResultArticle(
id=uuid4(),
title="Bold",
input_message_content=InputTextMessageContent(
"*{}*".format(escape_markdown(query)),
parse_mode=ParseMode.MARKDOWN)),
InlineQueryResultArticle(
id=uuid4(),
title="Italic",
input_message_content=InputTextMessageContent(
"_{}_".format(escape_markdown(query)),
parse_mode=ParseMode.MARKDOWN))]
def get_cat_url():
|
def get_cat_image():
allowed_extension = ['jpg','jpeg','png']
file_extension = ''
while file_extension not in allowed_extension:
url = get_cat_url()
file_extension = re.search("([^.]*)$",url).group(1).lower()
return url
@restricted
def meow(update: 'Update', context: 'CallbackContext'):
bot = context.bot
chat_id = update.message.chat_id
url = get_cat_url()
bot.send_photo(chat_id=chat_id, photo=url)
@restricted
def energy_use(update: 'Update', context: 'CallbackContext'):
""" Send picture of current energy use """
bot = context.bot
chat_id = update.message.chat_id
url = "https://vloer.ko-lab.space/verbruikdag.png?random=" + str(randint(1,9999))
try:
bot.send_photo(chat_id=chat_id, photo=url)
except Exception as err:
msg = "Oops...something went wrong: {}".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def pixelpaint(update: 'Update', context: 'CallbackContext'):
""" start pixelpaint app """
args = context.args
message = " ".join(args)
# send "/paint start" to start the mqtt client on the floor-pi
# do this if another program is running on the led floor.
if message == "start":
print("Trying to start LED floor...")
try:
publish.single("vloer/startscript", "paint", hostname="10.94.176.100",
auth={'username': 'vloer', 'password': 'ko-lab'},
port=1883, client_id="kolabbot")
print("LED floor...")
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not start Pixel Paint: {}".format(err)
print(msg)
update.message.reply_text(msg)
# send a link to the pixel paint app
try:
# TODO: try to open pixel paint url
url = "http://10.90.154.80/"
#response = requests.get(url)
update.message.reply_text("To paint the floor, go to {}".format(url))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not start Pixel Paint: ".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def change_led_floor_color(update: 'Update', context: 'CallbackContext'):
"""
Check if sender is member of Ko-Lab group chat. If yes,
change the color of the LED floor. If not, tell them to go away
"""
args = context.args
message = " ".join(args)
try:
publish.single("ledfloorupdates", message, hostname="10.90.154.80", port=1883, client_id="kolabbot")
update.message.reply_text('Changing LED floor color to "{}".'.format(message))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-floor: {}".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def write_to_led_krant(update: 'Update', context: 'CallbackContext'):
"""
show message on LED-krant
"""
args = context.args
message = " ".join(args)
try:
publish.single("ledkrant/write", message, hostname="10.94.176.100", port=1883, client_id="kolabbot",
auth={'username': 'vloer', 'password': 'ko-lab'})
update.message.reply_text('Writing "{}" to LED-krant.'.format(message))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-krant: {}".format(err)
print(msg)
update.message.reply_text(msg)
def show_time_on_krant(context: 'CallbackContext'):
""" show time on LED-krant """
print("Showing time on LED-Krant")
message = strftime("%H:%M", localtime())
try:
publish.single("ledkrant/time", message, hostname="10.94.176.100", port=1883,
client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'})
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-krant: {}".format(err)
print(msg)
def addme(update: 'Update', context: 'CallbackContext'):
""" Add user to the whitelist. """
user_id = update.effective_user.id
chat_id = update.effective_chat.id
chats = get_chat_ids(DB)
if chat_id not in chats:
update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.')
else:
if add_member_id(DB, user_id):
update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.')
else:
update.message.reply_text('You are already on the whitelist.')
def start(update: 'Update', context: 'CallbackContext'):
""" Send a message when the command /start is issued. """
update.message.reply_text('I am Kolabbot. I pass butter.')
def help(update: 'Update', context: 'CallbackContext'):
""" Send a message when the command /help is issued. """
update.message.reply_text('Beep. Boop.')
def no_command(update: 'Update', context: 'CallbackContext'):
""" What happens when you send a message to the bot with no command. """
update.message.reply_text('Sorry, I am not very chatty. Type / to see a list of commands I understand.')
def error(update: 'Update', context: 'CallbackContext'):
""" Log Errors caused by Updates. """
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
# Updater checks for new events, then passes them on to the dispatcher.
# Dispatcher sorts them and calls the handling functions.
updater = Updater(API_KEY, use_context=True)
dispatcher = updater.dispatcher
jobs = updater.job_queue
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler("krant", write_to_led_krant))
dispatcher.add_handler(CommandHandler("floor", change_led_floor_color))
dispatcher.add_handler(CommandHandler("paint", pixelpaint))
dispatcher.add_handler(CommandHandler("addme", addme))
dispatcher.add_handler(CommandHandler("verbruik", energy_use))
dispatcher.add_handler(CommandHandler("meow", meow))
dispatcher.add_handler(MessageHandler(Filters.text, no_command))
dispatcher.add_handler(InlineQueryHandler(inlinequery))
dispatcher.add_error_handler(error)
current = dt.datetime.now()
current_td = dt.timedelta(hours=current.hour, minutes=current.minute, seconds=current.second, microseconds=current.microsecond)
# to_hour = dt.timedelta(hours=round(current_td.total_seconds()/3600))
to_quarter = dt.timedelta(hours=round(current_td.total_seconds()/900))
# to_min = dt.timedelta(minutes=round(current_td.total_seconds()/60))
startdelta = dt.datetime.combine(current,dt.time(0))+to_quarter
print(startdelta)
jobs.run_repeating(show_time_on_krant, interval=900, first=startdelta)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
logger = logging.getLogger(__name__)
main() | contents = requests.get('https://aws.random.cat/meow').json()
url = contents['file']
return url | identifier_body |
bot.py | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
from telegram import InlineQueryResultPhoto, InlineQueryResultArticle, ParseMode
from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters
from telegram.utils.helpers import escape_markdown
import logging
import requests
from functools import wraps
import paho.mqtt.publish as publish
from tinydb import TinyDB, Query
from random import randint
from bot_config import API_KEY
from time import localtime, strftime
import datetime as dt
DB = TinyDB('db.json')
# A simple database to store imformation persistently
def setup_db():
""" initialize a new database """
db = TinyDB('db.json')
chats = db.table('chats')
members = db.table('members')
chats.insert({'id': -231128423}) # Kolab chat group
members.insert({'id': 235493361})
def get_member_ids(db):
table = db.table('members')
return [e['id'] for e in table.all()]
def get_chat_ids(db):
table = db.table('chats')
return [e['id'] for e in table.all()]
def add_member_id(db, id):
members = db.table('members')
Member = Query()
if members.get(Member.id == id) is None:
members.insert({'id': id})
return True
else:
return False
def restricted(func):
"""
This decorator allows to restrict the access of a handler
to only KOLAB users and chat groups
"""
@wraps(func)
def wrapped(update, context, *args, **kwargs):
user_id = update.effective_user.id
chat_id = update.effective_chat.id
members = get_member_ids(DB)
chats = get_chat_ids(DB)
first_name = update.effective_user.first_name
last_name = update.effective_user.last_name
print("Request from {} {} ({}) in chat {}."
.format(first_name, last_name, user_id, chat_id))
if user_id not in members and chat_id not in chats:
# Log unauthorized attempt to console and return
first_name = update.effective_user.first_name
last_name = update.effective_user.last_name
print("Unauthorized request from {} {} ({}) in chat {}."
.format(first_name, last_name, user_id, chat_id))
return
return func(update, context, *args, **kwargs)
return wrapped
@restricted
def inlinequery(update: 'Update', context: 'Context'):
"""Handle inline queries."""
query = update.inline_query.query
results = [
InlineQueryResultArticle(
id=uuid4(),
title="Caps",
input_message_content=InputTextMessageContent(
query.upper())),
InlineQueryResultArticle(
id=uuid4(),
title="Bold",
input_message_content=InputTextMessageContent(
"*{}*".format(escape_markdown(query)),
parse_mode=ParseMode.MARKDOWN)),
InlineQueryResultArticle(
id=uuid4(),
title="Italic",
input_message_content=InputTextMessageContent(
"_{}_".format(escape_markdown(query)),
parse_mode=ParseMode.MARKDOWN))]
def get_cat_url():
contents = requests.get('https://aws.random.cat/meow').json()
url = contents['file']
return url
def get_cat_image():
allowed_extension = ['jpg','jpeg','png']
file_extension = ''
while file_extension not in allowed_extension:
url = get_cat_url()
file_extension = re.search("([^.]*)$",url).group(1).lower()
return url
@restricted
def meow(update: 'Update', context: 'CallbackContext'):
bot = context.bot
chat_id = update.message.chat_id
url = get_cat_url()
bot.send_photo(chat_id=chat_id, photo=url)
@restricted
def energy_use(update: 'Update', context: 'CallbackContext'):
""" Send picture of current energy use """
bot = context.bot
chat_id = update.message.chat_id
url = "https://vloer.ko-lab.space/verbruikdag.png?random=" + str(randint(1,9999))
try:
bot.send_photo(chat_id=chat_id, photo=url)
except Exception as err:
msg = "Oops...something went wrong: {}".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def pixelpaint(update: 'Update', context: 'CallbackContext'):
""" start pixelpaint app """
args = context.args
message = " ".join(args)
# send "/paint start" to start the mqtt client on the floor-pi
# do this if another program is running on the led floor.
if message == "start":
print("Trying to start LED floor...")
try:
publish.single("vloer/startscript", "paint", hostname="10.94.176.100",
auth={'username': 'vloer', 'password': 'ko-lab'},
port=1883, client_id="kolabbot")
print("LED floor...")
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not start Pixel Paint: {}".format(err)
print(msg)
update.message.reply_text(msg)
# send a link to the pixel paint app
try:
# TODO: try to open pixel paint url
url = "http://10.90.154.80/"
#response = requests.get(url)
update.message.reply_text("To paint the floor, go to {}".format(url))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not start Pixel Paint: ".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def change_led_floor_color(update: 'Update', context: 'CallbackContext'):
"""
Check if sender is member of Ko-Lab group chat. If yes,
change the color of the LED floor. If not, tell them to go away
"""
args = context.args
message = " ".join(args)
try:
publish.single("ledfloorupdates", message, hostname="10.90.154.80", port=1883, client_id="kolabbot")
update.message.reply_text('Changing LED floor color to "{}".'.format(message))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-floor: {}".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def write_to_led_krant(update: 'Update', context: 'CallbackContext'):
"""
show message on LED-krant
"""
args = context.args
message = " ".join(args)
try:
publish.single("ledkrant/write", message, hostname="10.94.176.100", port=1883, client_id="kolabbot",
auth={'username': 'vloer', 'password': 'ko-lab'})
update.message.reply_text('Writing "{}" to LED-krant.'.format(message))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-krant: {}".format(err)
print(msg)
update.message.reply_text(msg)
def show_time_on_krant(context: 'CallbackContext'):
""" show time on LED-krant """
print("Showing time on LED-Krant")
message = strftime("%H:%M", localtime())
try:
publish.single("ledkrant/time", message, hostname="10.94.176.100", port=1883,
client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'})
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-krant: {}".format(err)
print(msg)
def | (update: 'Update', context: 'CallbackContext'):
""" Add user to the whitelist. """
user_id = update.effective_user.id
chat_id = update.effective_chat.id
chats = get_chat_ids(DB)
if chat_id not in chats:
update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.')
else:
if add_member_id(DB, user_id):
update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.')
else:
update.message.reply_text('You are already on the whitelist.')
def start(update: 'Update', context: 'CallbackContext'):
""" Send a message when the command /start is issued. """
update.message.reply_text('I am Kolabbot. I pass butter.')
def help(update: 'Update', context: 'CallbackContext'):
""" Send a message when the command /help is issued. """
update.message.reply_text('Beep. Boop.')
def no_command(update: 'Update', context: 'CallbackContext'):
""" What happens when you send a message to the bot with no command. """
update.message.reply_text('Sorry, I am not very chatty. Type / to see a list of commands I understand.')
def error(update: 'Update', context: 'CallbackContext'):
""" Log Errors caused by Updates. """
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
# Updater checks for new events, then passes them on to the dispatcher.
# Dispatcher sorts them and calls the handling functions.
updater = Updater(API_KEY, use_context=True)
dispatcher = updater.dispatcher
jobs = updater.job_queue
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler("krant", write_to_led_krant))
dispatcher.add_handler(CommandHandler("floor", change_led_floor_color))
dispatcher.add_handler(CommandHandler("paint", pixelpaint))
dispatcher.add_handler(CommandHandler("addme", addme))
dispatcher.add_handler(CommandHandler("verbruik", energy_use))
dispatcher.add_handler(CommandHandler("meow", meow))
dispatcher.add_handler(MessageHandler(Filters.text, no_command))
dispatcher.add_handler(InlineQueryHandler(inlinequery))
dispatcher.add_error_handler(error)
current = dt.datetime.now()
current_td = dt.timedelta(hours=current.hour, minutes=current.minute, seconds=current.second, microseconds=current.microsecond)
# to_hour = dt.timedelta(hours=round(current_td.total_seconds()/3600))
to_quarter = dt.timedelta(hours=round(current_td.total_seconds()/900))
# to_min = dt.timedelta(minutes=round(current_td.total_seconds()/60))
startdelta = dt.datetime.combine(current,dt.time(0))+to_quarter
print(startdelta)
jobs.run_repeating(show_time_on_krant, interval=900, first=startdelta)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
logger = logging.getLogger(__name__)
main() | addme | identifier_name |
bot.py | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
from telegram import InlineQueryResultPhoto, InlineQueryResultArticle, ParseMode
from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters
from telegram.utils.helpers import escape_markdown
import logging
import requests
from functools import wraps
import paho.mqtt.publish as publish
from tinydb import TinyDB, Query
from random import randint
from bot_config import API_KEY
from time import localtime, strftime
import datetime as dt
DB = TinyDB('db.json')
# A simple database to store imformation persistently
def setup_db():
""" initialize a new database """
db = TinyDB('db.json')
chats = db.table('chats')
members = db.table('members')
chats.insert({'id': -231128423}) # Kolab chat group
members.insert({'id': 235493361})
def get_member_ids(db):
table = db.table('members')
return [e['id'] for e in table.all()]
def get_chat_ids(db):
table = db.table('chats')
return [e['id'] for e in table.all()]
def add_member_id(db, id):
members = db.table('members')
Member = Query()
if members.get(Member.id == id) is None:
members.insert({'id': id})
return True
else:
return False
def restricted(func):
"""
This decorator allows to restrict the access of a handler
to only KOLAB users and chat groups
"""
@wraps(func)
def wrapped(update, context, *args, **kwargs):
user_id = update.effective_user.id
chat_id = update.effective_chat.id
members = get_member_ids(DB)
chats = get_chat_ids(DB)
first_name = update.effective_user.first_name
last_name = update.effective_user.last_name
print("Request from {} {} ({}) in chat {}."
.format(first_name, last_name, user_id, chat_id))
if user_id not in members and chat_id not in chats:
# Log unauthorized attempt to console and return
first_name = update.effective_user.first_name
last_name = update.effective_user.last_name
print("Unauthorized request from {} {} ({}) in chat {}."
.format(first_name, last_name, user_id, chat_id))
return
return func(update, context, *args, **kwargs) |
@restricted
def inlinequery(update: 'Update', context: 'Context'):
"""Handle inline queries."""
query = update.inline_query.query
results = [
InlineQueryResultArticle(
id=uuid4(),
title="Caps",
input_message_content=InputTextMessageContent(
query.upper())),
InlineQueryResultArticle(
id=uuid4(),
title="Bold",
input_message_content=InputTextMessageContent(
"*{}*".format(escape_markdown(query)),
parse_mode=ParseMode.MARKDOWN)),
InlineQueryResultArticle(
id=uuid4(),
title="Italic",
input_message_content=InputTextMessageContent(
"_{}_".format(escape_markdown(query)),
parse_mode=ParseMode.MARKDOWN))]
def get_cat_url():
contents = requests.get('https://aws.random.cat/meow').json()
url = contents['file']
return url
def get_cat_image():
allowed_extension = ['jpg','jpeg','png']
file_extension = ''
while file_extension not in allowed_extension:
url = get_cat_url()
file_extension = re.search("([^.]*)$",url).group(1).lower()
return url
@restricted
def meow(update: 'Update', context: 'CallbackContext'):
bot = context.bot
chat_id = update.message.chat_id
url = get_cat_url()
bot.send_photo(chat_id=chat_id, photo=url)
@restricted
def energy_use(update: 'Update', context: 'CallbackContext'):
""" Send picture of current energy use """
bot = context.bot
chat_id = update.message.chat_id
url = "https://vloer.ko-lab.space/verbruikdag.png?random=" + str(randint(1,9999))
try:
bot.send_photo(chat_id=chat_id, photo=url)
except Exception as err:
msg = "Oops...something went wrong: {}".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def pixelpaint(update: 'Update', context: 'CallbackContext'):
""" start pixelpaint app """
args = context.args
message = " ".join(args)
# send "/paint start" to start the mqtt client on the floor-pi
# do this if another program is running on the led floor.
if message == "start":
print("Trying to start LED floor...")
try:
publish.single("vloer/startscript", "paint", hostname="10.94.176.100",
auth={'username': 'vloer', 'password': 'ko-lab'},
port=1883, client_id="kolabbot")
print("LED floor...")
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not start Pixel Paint: {}".format(err)
print(msg)
update.message.reply_text(msg)
# send a link to the pixel paint app
try:
# TODO: try to open pixel paint url
url = "http://10.90.154.80/"
#response = requests.get(url)
update.message.reply_text("To paint the floor, go to {}".format(url))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not start Pixel Paint: ".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def change_led_floor_color(update: 'Update', context: 'CallbackContext'):
"""
Check if sender is member of Ko-Lab group chat. If yes,
change the color of the LED floor. If not, tell them to go away
"""
args = context.args
message = " ".join(args)
try:
publish.single("ledfloorupdates", message, hostname="10.90.154.80", port=1883, client_id="kolabbot")
update.message.reply_text('Changing LED floor color to "{}".'.format(message))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-floor: {}".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def write_to_led_krant(update: 'Update', context: 'CallbackContext'):
"""
show message on LED-krant
"""
args = context.args
message = " ".join(args)
try:
publish.single("ledkrant/write", message, hostname="10.94.176.100", port=1883, client_id="kolabbot",
auth={'username': 'vloer', 'password': 'ko-lab'})
update.message.reply_text('Writing "{}" to LED-krant.'.format(message))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-krant: {}".format(err)
print(msg)
update.message.reply_text(msg)
def show_time_on_krant(context: 'CallbackContext'):
""" show time on LED-krant """
print("Showing time on LED-Krant")
message = strftime("%H:%M", localtime())
try:
publish.single("ledkrant/time", message, hostname="10.94.176.100", port=1883,
client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'})
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-krant: {}".format(err)
print(msg)
def addme(update: 'Update', context: 'CallbackContext'):
""" Add user to the whitelist. """
user_id = update.effective_user.id
chat_id = update.effective_chat.id
chats = get_chat_ids(DB)
if chat_id not in chats:
update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.')
else:
if add_member_id(DB, user_id):
update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.')
else:
update.message.reply_text('You are already on the whitelist.')
def start(update: 'Update', context: 'CallbackContext'):
""" Send a message when the command /start is issued. """
update.message.reply_text('I am Kolabbot. I pass butter.')
def help(update: 'Update', context: 'CallbackContext'):
""" Send a message when the command /help is issued. """
update.message.reply_text('Beep. Boop.')
def no_command(update: 'Update', context: 'CallbackContext'):
""" What happens when you send a message to the bot with no command. """
update.message.reply_text('Sorry, I am not very chatty. Type / to see a list of commands I understand.')
def error(update: 'Update', context: 'CallbackContext'):
""" Log Errors caused by Updates. """
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
# Updater checks for new events, then passes them on to the dispatcher.
# Dispatcher sorts them and calls the handling functions.
updater = Updater(API_KEY, use_context=True)
dispatcher = updater.dispatcher
jobs = updater.job_queue
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler("krant", write_to_led_krant))
dispatcher.add_handler(CommandHandler("floor", change_led_floor_color))
dispatcher.add_handler(CommandHandler("paint", pixelpaint))
dispatcher.add_handler(CommandHandler("addme", addme))
dispatcher.add_handler(CommandHandler("verbruik", energy_use))
dispatcher.add_handler(CommandHandler("meow", meow))
dispatcher.add_handler(MessageHandler(Filters.text, no_command))
dispatcher.add_handler(InlineQueryHandler(inlinequery))
dispatcher.add_error_handler(error)
current = dt.datetime.now()
current_td = dt.timedelta(hours=current.hour, minutes=current.minute, seconds=current.second, microseconds=current.microsecond)
# to_hour = dt.timedelta(hours=round(current_td.total_seconds()/3600))
to_quarter = dt.timedelta(hours=round(current_td.total_seconds()/900))
# to_min = dt.timedelta(minutes=round(current_td.total_seconds()/60))
startdelta = dt.datetime.combine(current,dt.time(0))+to_quarter
print(startdelta)
jobs.run_repeating(show_time_on_krant, interval=900, first=startdelta)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
logger = logging.getLogger(__name__)
main() | return wrapped | random_line_split |
bot.py | #!/usr/bin/env python3.6
# -*- coding: utf-8 -*-
from telegram import InlineQueryResultPhoto, InlineQueryResultArticle, ParseMode
from telegram.ext import Updater, CommandHandler, InlineQueryHandler, MessageHandler, Filters
from telegram.utils.helpers import escape_markdown
import logging
import requests
from functools import wraps
import paho.mqtt.publish as publish
from tinydb import TinyDB, Query
from random import randint
from bot_config import API_KEY
from time import localtime, strftime
import datetime as dt
DB = TinyDB('db.json')
# A simple database to store imformation persistently
def setup_db():
""" initialize a new database """
db = TinyDB('db.json')
chats = db.table('chats')
members = db.table('members')
chats.insert({'id': -231128423}) # Kolab chat group
members.insert({'id': 235493361})
def get_member_ids(db):
table = db.table('members')
return [e['id'] for e in table.all()]
def get_chat_ids(db):
table = db.table('chats')
return [e['id'] for e in table.all()]
def add_member_id(db, id):
members = db.table('members')
Member = Query()
if members.get(Member.id == id) is None:
members.insert({'id': id})
return True
else:
return False
def restricted(func):
"""
This decorator allows to restrict the access of a handler
to only KOLAB users and chat groups
"""
@wraps(func)
def wrapped(update, context, *args, **kwargs):
user_id = update.effective_user.id
chat_id = update.effective_chat.id
members = get_member_ids(DB)
chats = get_chat_ids(DB)
first_name = update.effective_user.first_name
last_name = update.effective_user.last_name
print("Request from {} {} ({}) in chat {}."
.format(first_name, last_name, user_id, chat_id))
if user_id not in members and chat_id not in chats:
# Log unauthorized attempt to console and return
first_name = update.effective_user.first_name
last_name = update.effective_user.last_name
print("Unauthorized request from {} {} ({}) in chat {}."
.format(first_name, last_name, user_id, chat_id))
return
return func(update, context, *args, **kwargs)
return wrapped
@restricted
def inlinequery(update: 'Update', context: 'Context'):
"""Handle inline queries."""
query = update.inline_query.query
results = [
InlineQueryResultArticle(
id=uuid4(),
title="Caps",
input_message_content=InputTextMessageContent(
query.upper())),
InlineQueryResultArticle(
id=uuid4(),
title="Bold",
input_message_content=InputTextMessageContent(
"*{}*".format(escape_markdown(query)),
parse_mode=ParseMode.MARKDOWN)),
InlineQueryResultArticle(
id=uuid4(),
title="Italic",
input_message_content=InputTextMessageContent(
"_{}_".format(escape_markdown(query)),
parse_mode=ParseMode.MARKDOWN))]
def get_cat_url():
contents = requests.get('https://aws.random.cat/meow').json()
url = contents['file']
return url
def get_cat_image():
allowed_extension = ['jpg','jpeg','png']
file_extension = ''
while file_extension not in allowed_extension:
url = get_cat_url()
file_extension = re.search("([^.]*)$",url).group(1).lower()
return url
@restricted
def meow(update: 'Update', context: 'CallbackContext'):
bot = context.bot
chat_id = update.message.chat_id
url = get_cat_url()
bot.send_photo(chat_id=chat_id, photo=url)
@restricted
def energy_use(update: 'Update', context: 'CallbackContext'):
""" Send picture of current energy use """
bot = context.bot
chat_id = update.message.chat_id
url = "https://vloer.ko-lab.space/verbruikdag.png?random=" + str(randint(1,9999))
try:
bot.send_photo(chat_id=chat_id, photo=url)
except Exception as err:
msg = "Oops...something went wrong: {}".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def pixelpaint(update: 'Update', context: 'CallbackContext'):
""" start pixelpaint app """
args = context.args
message = " ".join(args)
# send "/paint start" to start the mqtt client on the floor-pi
# do this if another program is running on the led floor.
if message == "start":
|
# send a link to the pixel paint app
try:
# TODO: try to open pixel paint url
url = "http://10.90.154.80/"
#response = requests.get(url)
update.message.reply_text("To paint the floor, go to {}".format(url))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not start Pixel Paint: ".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def change_led_floor_color(update: 'Update', context: 'CallbackContext'):
"""
Check if sender is member of Ko-Lab group chat. If yes,
change the color of the LED floor. If not, tell them to go away
"""
args = context.args
message = " ".join(args)
try:
publish.single("ledfloorupdates", message, hostname="10.90.154.80", port=1883, client_id="kolabbot")
update.message.reply_text('Changing LED floor color to "{}".'.format(message))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-floor: {}".format(err)
print(msg)
update.message.reply_text(msg)
@restricted
def write_to_led_krant(update: 'Update', context: 'CallbackContext'):
"""
show message on LED-krant
"""
args = context.args
message = " ".join(args)
try:
publish.single("ledkrant/write", message, hostname="10.94.176.100", port=1883, client_id="kolabbot",
auth={'username': 'vloer', 'password': 'ko-lab'})
update.message.reply_text('Writing "{}" to LED-krant.'.format(message))
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-krant: {}".format(err)
print(msg)
update.message.reply_text(msg)
def show_time_on_krant(context: 'CallbackContext'):
""" show time on LED-krant """
print("Showing time on LED-Krant")
message = strftime("%H:%M", localtime())
try:
publish.single("ledkrant/time", message, hostname="10.94.176.100", port=1883,
client_id="kolabbot", auth={'username': 'vloer', 'password': 'ko-lab'})
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not connect to LED-krant: {}".format(err)
print(msg)
def addme(update: 'Update', context: 'CallbackContext'):
""" Add user to the whitelist. """
user_id = update.effective_user.id
chat_id = update.effective_chat.id
chats = get_chat_ids(DB)
if chat_id not in chats:
update.message.reply_text('Did not work. Run this command inside the Ko-Lab group.')
else:
if add_member_id(DB, user_id):
update.message.reply_text('I have added you to the whitelist. You can now send commands from outside the Ko-Lab chat.')
else:
update.message.reply_text('You are already on the whitelist.')
def start(update: 'Update', context: 'CallbackContext'):
""" Send a message when the command /start is issued. """
update.message.reply_text('I am Kolabbot. I pass butter.')
def help(update: 'Update', context: 'CallbackContext'):
""" Send a message when the command /help is issued. """
update.message.reply_text('Beep. Boop.')
def no_command(update: 'Update', context: 'CallbackContext'):
""" What happens when you send a message to the bot with no command. """
update.message.reply_text('Sorry, I am not very chatty. Type / to see a list of commands I understand.')
def error(update: 'Update', context: 'CallbackContext'):
""" Log Errors caused by Updates. """
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
# Updater checks for new events, then passes them on to the dispatcher.
# Dispatcher sorts them and calls the handling functions.
updater = Updater(API_KEY, use_context=True)
dispatcher = updater.dispatcher
jobs = updater.job_queue
logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
level=logging.INFO)
dispatcher.add_handler(CommandHandler("start", start))
dispatcher.add_handler(CommandHandler("help", help))
dispatcher.add_handler(CommandHandler("krant", write_to_led_krant))
dispatcher.add_handler(CommandHandler("floor", change_led_floor_color))
dispatcher.add_handler(CommandHandler("paint", pixelpaint))
dispatcher.add_handler(CommandHandler("addme", addme))
dispatcher.add_handler(CommandHandler("verbruik", energy_use))
dispatcher.add_handler(CommandHandler("meow", meow))
dispatcher.add_handler(MessageHandler(Filters.text, no_command))
dispatcher.add_handler(InlineQueryHandler(inlinequery))
dispatcher.add_error_handler(error)
current = dt.datetime.now()
current_td = dt.timedelta(hours=current.hour, minutes=current.minute, seconds=current.second, microseconds=current.microsecond)
# to_hour = dt.timedelta(hours=round(current_td.total_seconds()/3600))
to_quarter = dt.timedelta(hours=round(current_td.total_seconds()/900))
# to_min = dt.timedelta(minutes=round(current_td.total_seconds()/60))
startdelta = dt.datetime.combine(current,dt.time(0))+to_quarter
print(startdelta)
jobs.run_repeating(show_time_on_krant, interval=900, first=startdelta)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
logger = logging.getLogger(__name__)
main() | print("Trying to start LED floor...")
try:
publish.single("vloer/startscript", "paint", hostname="10.94.176.100",
auth={'username': 'vloer', 'password': 'ko-lab'},
port=1883, client_id="kolabbot")
print("LED floor...")
except (ConnectionRefusedError, TimeoutError) as err:
msg = "Could not start Pixel Paint: {}".format(err)
print(msg)
update.message.reply_text(msg) | conditional_block |
scrapedin.py | #!/usr/bin/env python
import sys
import argparse
import re
import csv
import os
import getpass
import platform
import logging
import time
try:
from tabulate import tabulate
except ImportError:
print('Missing required package: Tabulate')
sys.exit(os.EX_SOFTWARE)
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.common import exceptions
except ImportError:
print('Missing required package: selenium\n')
print('Did you forget to run setup.py?\npython3 setup.py install')
sys.exit(os.EX_SOFTWARE)
if not sys.version_info[0:2] >= (3, 3):
print(
"This script depends on python version 3.3 or higher. We recommend the latest version available from 'https://www.python.org/downloads/'")
sys.exit(os.EX_SOFTWARE)
ARCH = platform.architecture()[0]
if '64' in ARCH:
ARCH = '64'
elif '32' in ARCH:
ARCH = '32'
else:
print('[-] Your architecture isn\'t supported. What are you using, DOS!? Doofus >_<')
sys.exit(os.EX_SOFTWARE)
HELP_EPILOG = """
You may specify multiple formats by using a comma. Using {domain} will
dynamically pick the domain based off of the company name. Here are some common
formats:
Format Schema
--------------------------- -----------------------------
[First Initial] [Last Name] {first:.1}{last}@{domain}.com
[First Name] [Last Initial] {first}{last:.1}@{domain}.com
[First Name].[Last Name] {first}.{last}@{domain}.com
[Last Name].[First Name] {last}.{first}@{domain}.com
"""
USER_ENTITY_CLASS = 'entity-result__item' # Historically search-result__wrapper
class Webpage:
def __init__(self, loglvl='INFO', geckodriver=None):
capabilities = webdriver.DesiredCapabilities().FIREFOX
capabilities["marionette"] = True
self.page = webdriver.Firefox(
executable_path=os.path.join(os.path.dirname(__file__), 'webdriver', ARCH, 'geckodriver'),
log_path='/dev/null', capabilities=capabilities)
self.employee_data = {}
self.log = logging.getLogger(
logging.basicConfig(level=getattr(logging, loglvl),
format="%(name)-15s %(levelname)-10s %(asctime)-10s %(message)s")
)
def enter_data(self, field, text):
'''
Enter data by providing the Id of the HTML field and the text you would like to enter.
web = Webpage()
web.enter_data('your_field', 'the data you want entered')
:param str field: HTML field to enter data
:param str text: The text you want entered into "field"
:rtype: None
'''
self.page.execute_script("document.getElementById(\"" + field + "\").setAttribute('value', \"" + text + "\")")
@staticmethod
def sanitize_name(name):
'''
Takes a given name and sanitizes it for use as an email address. If a name contains more than 4 words
or non ASCII characters other than , or ( and ) then it will NOT be used.
If a name contains a , then we can safely split on that character and reliably detect the users name.
Names like: John (Allan) Doe are normally put in place for people who go by a nickname. The script will break
these names into two separate names:
- John Doe
- Allan Doe
:param str name: Persons name
:rtype: str or None
'''
if ',' in name:
name = name.split(',')[0]
if len(name.split()) > 4:
# Too many words in the name..Won't be able to reliably create an email address.
return None
if re.search("[(].*[)]", name):
name_list = []
# Sometimes nicknames are put in parenthesis. This will find those and create two email guesses.
# First Last
# text_in_parenthesis Last
nickname = re.search("[(].*[)]", name).group()
true_name = ' '.join([i.strip() for i in name.split(nickname)])
if re.match("^[' a-zA-Z']*$", true_name):
name_list.append(true_name)
fixed_nickname = nickname.replace('(', '').replace(')', '').strip()
nick = [i.strip() for i in name.replace(nickname, '').split()]
nick[0] = fixed_nickname
nick = ' '.join(nick)
if re.match("^[' a-zA-Z']*$", nick):
name_list.append(nick)
return name_list
elif re.match("^[' a-zA-Z']*$", name):
# Only matches if the name contains uppercase, lowercase or spaces.
return [name]
else:
return None
def login(self, username, password):
'''
Login to the linked in web page.
The only args required to use this function are the username and password to log into linkedin.
Example usage:
parser = argparse.ArgumentParser()
required = parser.add_argument_group('required arguments')
required.add_argument('-u', dest='username', required=True, help='LinkedIn Login Email')
args = parser.parse_args()
password = getpass.getpass(prompt='LinkedIn Password: ')
web = WebPage()
web.login(username, password)
:param argparse username: argparse object with the attribute "username"
:param argparse password:
:rtype: None
'''
self.page.get("https://www.linkedin.com/")
WebDriverWait(self.page, 10).until(EC.presence_of_element_located((By.NAME, 'session_key')))
self.enter_data('session_key', username)
self.enter_data('session_password', password)
# Find and click submit button by type
submit_btn = self.page.find_elements_by_xpath("//button[@type='submit']")[0]
submit_btn.click()
def apply_filters(self, company, url=None, georegion=None, industry=None, job_title=None):
'''
Utilize the method within the cycle_users function to build different search
parameters such as location, geotag, company, job-title, etc.
This function will return the full URL.
:param str company: target company name
:param str url: default (or custom) linkedin url for faceted linkedin search
:param str georegion: geographic region (-g) to filter
:param str industry: industry (-i_ to filter
:param str job_title: job title (-j) to filter
:rtype: string (url if successful) or int (Unix-style error integer if error is encountered)
'''
filters = []
if not url:
url = 'https://www.linkedin.com/search/results/people/?'
# Filter by Company
# Allows the user to scrape linkedin without specifying a target company, but must do so with intent
if company != "NONE":
filters.append('company={0}'.format(company))
# Filter by Geographic Region
if georegion:
# region object is created for future-proof purposes in the event new filters become available or formating changes
region = {}
try:
region['full_line'] = list_search('georegion', term=georegion, return_results=True)
region['name'] = region['full_line'].split('\t')[-1]
region['code'] = region['full_line'].split('\t')[0].split(
'.') # Should be continent.country.province/state.city_id
region.update({key: value.replace(' ', '') for (key, value) in
zip(('continent', 'country', 'state', 'id'), region['code'])})
filters.append('facetGeoRegion=%5B"{0}%3A{1}"%5D'.format(region['country'], region['id']))
except (IndexError, KeyError, ValueError):
self.log.error("[-] The region you chose is too broad to search. Search by City only")
return os.EX_NOINPUT
# Filter by Industry
if industry:
ind = list_search('industry', term=industry, return_results=True)
if ind:
i_code = ind.split('\t')[0].replace(' ', '')
filters.append('facetIndustry=%5B"{0}"%5D'.format(i_code))
filters.append("origin=FACETED_SEARCH")
if job_title:
filters.append('title={0}'.format(job_title))
else:
filters.append('title=')
# Join additional parameters to the URL by ampersand (&). Order doesn't matter.
filters.append('origin=FACETED_SEARCH')
url += "&".join(filters).lstrip("&") if len(filters) > 1 else filters[0]
self.log.debug("Filtered URL: " + url)
return url
def cycle_users(self, company, url, max_users=None):
'''
You must run the login method before cycle_users will run. Once the login method has run, cycle_users can
collect the names and titles of employees at the company you specify. This method requires the company name
and optional value max_users from argparse. See the login method for a code example.
:param argparse company:
:param argpase max_users:
:rtype: None (self.employee_data will be populated with names, titles and profile URLs)
'''
# Wait for home screen after login
WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.XPATH, "//*[@id='global-nav']/div")))
self.log.debug("URL: " + str(url))
try:
self.page.get(url)
except exceptions.WebDriverException as err:
self.log.error("An error occurred while getting the company page: \n{0}".format(err))
self.log.critical("[!] Check the company name or URL used")
return os.EX_USAGE
count = 1 # WebElements cannot be used for iteration..
current_page = 1
if not max_users:
|
while max_users > len(self.employee_data) and current_page < 100:
self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);")
try:
WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.CLASS_NAME, 'active')))
# Check if the page contains the "no search results" class. This means we are out of users
# This will raise a NoSuchElementException if the element is not found
self.page.find_element_by_class_name("search-no-results__container")
break
except exceptions.NoSuchElementException:
pass
except exceptions.TimeoutException:
# Page didn't load correctly after 20 seconds, cannot reliably recover. Bailing.
return
try:
WebDriverWait(self.page, 5).until(EC.visibility_of_element_located((By.CLASS_NAME, 'name')))
except exceptions.TimeoutException:
try:
if self.page.find_elements_by_class_name('actor-name'):
# If this is true, the page is filled with "LinkedIn Member". It doesn't mean there's no users
# available on the page. If this is the case, click next.
current_page += 1
if 'disabled=""' in self.page.find_element_by_class_name(
"artdeco-pagination__button--next").parent.page_source:
# If this is true then the Next button is "disabled". This happens when there's no more pages
break
self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name(
"artdeco-pagination__button--next"))
continue
except exceptions.NoSuchElementException:
# Reached when there's no more users available on the page.
break
try:
# Get the current page number (at the bottom of a company search)
# The value returned from the HTML looks like this: '1\nCurrent page'
new_page = int(self.page.find_elements_by_class_name('active')[-1].text.split()[0])
except ValueError:
# If there's only one page, linkedin doesn't show page numbers at the bottom. The only result
# will be the text string "people", therefore when we try to convert the value to int we raise
# an exception
new_page = 1
except IndexError:
# Page likely came back with "No more users" even though there appeared to be pages left
return
except exceptions.StaleElementReferenceException:
# Handles a race condition where elements are found but are not populated yet.
continue
for pagnation in self.page.find_elements_by_class_name("artdeco-pagination__button"):
if pagnation.text != "Next":
continue
if not pagnation.is_enabled():
# Next button is disabled.. This is linkedins way of saying "We are done here"
return
if current_page != new_page:
# The script is too fast. This verifies a new page has loaded before proceeding.
continue
# Scroll to the bottom of the page loads all elements (employee_names)
self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Give the elements a second to populate fully
time.sleep(1)
# finds each employee element by a globally set class name
employee_elements = self.page.find_elements_by_xpath("//div[@class='entity-result__item']")
self.log.debug(employee_elements)
for employee in employee_elements:
if count > len(employee_elements):
count = 1
current_page += 1
# click next page
try:
self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name(
"artdeco-pagination__button--next"))
break
except exceptions.NoSuchElementException:
# No more pages
return os.EX_OK
try:
# The elements of LinkedIn change frequently, but the text data of the elements is more-or-less reliable.
# It's better to split via newline than parse xpaths.
data = employee.text.split('\n')
if "LinkedIn Member" not in data[0] and len(data) >= 5:
name = Webpage.sanitize_name(data[0])
title_text = data[3]
region = data[4]
else:
count += 1
continue
try:
# This line/element does not always exist, so an exception will always be raised in this case and must be handled.
alt_text = employee.find_element_by_class_name('search-result__snippets').text
except exceptions.NoSuchElementException:
alt_text = False
title, _, company = title_text.partition(' at ')
if alt_text and not company:
alt_text = alt_text.lstrip('Current: ')
t, _, company = alt_text.partition(' at ')
if not title:
title = t
dept = self.dept_wizard(title)
# If company is still empty at this point, bail out to unemployment
company = company or 'UNEMPLOYED'
url = employee.find_element_by_xpath("//a[@class='app-aware-link']").get_attribute('href')
except (IndexError, exceptions.NoSuchElementException):
count += 1
continue
if not name:
continue
for person in name:
self.log.info(person)
self.employee_data.update({person: [dept, title, company, region, url]})
count += 1
@staticmethod
def dept_wizard(linkedin_title):
'''
Attempt to determine which department a given user belongs to based off of their title. If a title cannot
be reliably determined then it will return a blank string. It is advised to compare their raw untouched
titles to the output of dept_wizard(). Blindly trusting the dept_wizard() could lead to some awkward situations.
If a title matches any of the values in the tuples below, the first value in the tuple will populate the title
column in the CSV.
:param str linkedin_title: a string reflecting an employees title
:rtype: str
'''
sales = ('Sales', 'Account Manager', 'Account Executive', 'New Business', 'Relationship Manager')
hr = ('HR', 'Human Resources', 'Benefits Admin', 'Payroll', 'Talent', 'Recruiter')
accounting = ('Accounting', 'Accountant', 'Financial', 'Finance', 'Billing')
marketing = ('Marketing', 'Content', 'Brand', 'seo', 'Social Media')
it = (
'IT', 'Information Technology', 'Network Engineer', 'Network Admin', 'System Admin', 'sysadmin',
'sys admin',
'Help Desk', 'ITHD', 'Developer', 'Dev')
infosec = (
'Infosec', 'Red Team', 'Blue Team', 'Offensive', 'Defensive', 'Pentest', 'Penetration',
'Information Security')
executive = ('Executive', 'Exec', 'cfo', 'ceo', 'coo', 'cio', 'cmo', 'cbo', 'cto', 'cso', 'Chief')
audit = ('Audit', 'Compliance')
all_depts = [sales, hr, accounting, marketing, it, infosec, executive, audit]
for dept in all_depts:
for common_title in dept:
if re.match('(^|\s)' + re.escape(common_title.lower()) + '($|\s)', linkedin_title.lower()):
return dept[0]
def out_csv(self, filename, company, schema):
'''
Write data from self.employee_data to a CSV. This data is populated from the cycle_users method.
:param argparse filename: argparse object with attribute "file"
:rtype: None
'''
if not self.employee_data:
return None
csv_file = csv.writer(open(filename, "w"))
for name, emp_data in self.employee_data.items():
emails = self.email_formatter(name, company, schema)
for email in emails:
first_name = name.split()[0]
last_name = name.split()[-1]
data = [first_name, last_name, email] + emp_data
csv_file.writerow(data)
def email_formatter(self, name, company, schema):
'''
This method is called by out_csv to determine what format emails should be outputted into. The char_map determines
which indexes to use when generating a username in the __prepare_username__ method.
:param str name:
:param argparse format: argparse object with attribute "format"
:param argparse company: argparse object with attribute "company"
:rtype: list emails
'''
emails = []
for selected in schema:
names = name.split()
email = selected.format(first=names[0], last=names[-1], domain=company.replace(' ', ''))
emails.append(email)
return emails
@staticmethod
def verify_schema(schema):
'''
Verify the chosen email schema is valid.
:param str schema: A comma separated string containing one or more email schema formats
:rtype: list of all valid schemas
'''
schema = schema.split(',')
for email_format in schema:
try:
email_format.format(first='test', last='test', domain='test')
except KeyError:
raise SyntaxWarning('Invalid schema: ' + email_format)
return schema
def list_search(target, term, return_results=False):
'''
Prints list of possible geographic regions & industries per LinkedIn Documentation
Specify -l by itself to print all files, or specify -g <term> / -i <term> to search
for matching geographic regions and industries simultaneously.
Exact matches are required for faceted searches of georegions or industries.
:param str target: Search for a matching georegion or industry by specifying -g or -i parameters
:param str term: Search for specific matching term in -g or -i by adding a term argument to search for
:return: List of matches
'''
print("========================= {0} =========================".format(target.capitalize()))
try:
refs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'refs/')
if target == 'georegion':
search_file = open(os.path.join(refs, 'georegions.txt'), 'r')
elif target == 'industry':
search_file = open(os.path.join(refs, 'industries.txt'), 'r')
else:
return os.EX_NOINPUT
except IOError as unfound_file:
print(
"[-] You are missing the {0}.txt file from your ./refs installation directory. Please re-install scrapedin.".format(
target))
print(unfound_file)
return os.EX_IOERR
results = []
for i, line in enumerate(search_file.readlines()):
if term.lower() in line.lower():
results.append([str(line.split()[0]), str(' '.join(line.split()[1:])).strip('\n')])
# print('[{0}] {1}'.format(i, line.strip('\n')))
search_file.close()
if return_results:
print("Matches found: ", results)
return results[0][0]
print(tabulate(results, headers=['CODE', 'NAME'], tablefmt="orgtbl"))
return os.EX_OK
def main():
parser = argparse.ArgumentParser(epilog=HELP_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-m', dest='max_users', type=int, default=float('inf'),
help='The maximum amount of employees to scrape (default: all)')
parser.add_argument('-l', dest='list_search', action='store_true', default=False,
help='List search for geographic regions and industries. (requires -g or -l)')
parser.add_argument('-L', dest='loglvl', action='store', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='set the logging level')
parser.add_argument('-U', dest='url', action='store', default=None,
help='Explicitly set the company URL to scrape from')
parser.add_argument('-g', dest='georegion', action='store', default=None,
help='Filter results by geographic region')
parser.add_argument('-i', dest='industry', action='store', default=None, help='Filter results by industry')
parser.add_argument('-j', dest='job_title', action='store', default=None, help='Filter results by job title')
required = parser.add_argument_group('required arguments')
required.add_argument('-c', dest='company', action='store', default=None,
help='The company name to scrape users from LinkedIn. Enter "NONE" to scrape users without company')
required.add_argument('-o', dest='filename', help='The output filename')
required.add_argument('-u', dest='username', help='LinkedIn Login Email')
required.add_argument('-s', dest='schema', default='{first:.1}{last}@{domain}.com', help='The email format to use')
args = parser.parse_args()
# If -l is true, search by term or print entire file. -o and -u are not required
if args.list_search:
if not args.georegion and not args.industry:
list_search(target='georegion', term=' ')
list_search(target='industry', term=' ')
if args.georegion:
list_search(target='georegion', term=args.georegion)
if args.industry:
list_search(target='industry', term=args.industry)
return os.EX_OK
# If -l is False (default), -o and -u are required
if not args.filename or not args.username:
parser.error('the following arguments are required to begin scraping -o, -u')
return os.EX_NOINPUT
if not args.company:
parser.error(
'the following arguments are required to begin scraping -c\n\tNOTE: If you are trying to scrape users without a company target, use "-c NONE"')
try:
args.schema = Webpage.verify_schema(args.schema)
except SyntaxWarning as invalid_schema:
print(invalid_schema)
sys.exit(os.EX_SOFTWARE)
if not args.filename.endswith('.csv'):
args.filename += '.csv'
if args.company:
if '"' in args.company:
args.company = args.company.replace('"', '')
web = None
try:
password = getpass.getpass(prompt='LinkedIn Password: ')
web = Webpage(loglvl=args.loglvl)
web.login(username=args.username, password=password)
del password
if not args.url:
url = 'https://www.linkedin.com/search/results/people?'
if args.georegion or args.industry or args.job_title:
filtered_url = web.apply_filters(args.company, url, args.georegion, args.industry, args.job_title)
# Error handler for when apply_filters returns an ox.EX_ code
if isinstance(filtered_url, (int)):
return filtered_url
# Filtered URL
web.cycle_users(args.company, filtered_url, args.max_users)
else: # Default URL
url += "company={0}".format(args.company)
web.cycle_users(args.company, url, args.max_users)
else: # User-defined URL
web.cycle_users(args.company, args.url, args.max_users)
web.out_csv(filename=args.filename, company=args.company, schema=args.schema)
web.page.quit()
except KeyboardInterrupt:
if web:
web.out_csv(args.filename, args.company, args.schema)
return os.EX_OK
if __name__ == '__main__':
if sys.version_info < (3, 3):
print('[-] this script requires Python 3.3+')
sys.exit(os.EX_SOFTWARE)
sys.exit(main())
| max_users = float('inf') | conditional_block |
scrapedin.py | #!/usr/bin/env python
import sys
import argparse
import re
import csv
import os
import getpass
import platform
import logging
import time
try:
from tabulate import tabulate
except ImportError:
print('Missing required package: Tabulate')
sys.exit(os.EX_SOFTWARE)
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.common import exceptions
except ImportError:
print('Missing required package: selenium\n')
print('Did you forget to run setup.py?\npython3 setup.py install')
sys.exit(os.EX_SOFTWARE)
if not sys.version_info[0:2] >= (3, 3):
print(
"This script depends on python version 3.3 or higher. We recommend the latest version available from 'https://www.python.org/downloads/'")
sys.exit(os.EX_SOFTWARE)
ARCH = platform.architecture()[0]
if '64' in ARCH:
ARCH = '64'
elif '32' in ARCH:
ARCH = '32'
else:
print('[-] Your architecture isn\'t supported. What are you using, DOS!? Doofus >_<')
sys.exit(os.EX_SOFTWARE)
HELP_EPILOG = """
You may specify multiple formats by using a comma. Using {domain} will
dynamically pick the domain based off of the company name. Here are some common
formats:
Format Schema
--------------------------- -----------------------------
[First Initial] [Last Name] {first:.1}{last}@{domain}.com
[First Name] [Last Initial] {first}{last:.1}@{domain}.com
[First Name].[Last Name] {first}.{last}@{domain}.com
[Last Name].[First Name] {last}.{first}@{domain}.com
"""
USER_ENTITY_CLASS = 'entity-result__item' # Historically search-result__wrapper
class Webpage:
def __init__(self, loglvl='INFO', geckodriver=None):
capabilities = webdriver.DesiredCapabilities().FIREFOX
capabilities["marionette"] = True
self.page = webdriver.Firefox(
executable_path=os.path.join(os.path.dirname(__file__), 'webdriver', ARCH, 'geckodriver'),
log_path='/dev/null', capabilities=capabilities)
self.employee_data = {}
self.log = logging.getLogger(
logging.basicConfig(level=getattr(logging, loglvl),
format="%(name)-15s %(levelname)-10s %(asctime)-10s %(message)s")
)
def enter_data(self, field, text):
'''
Enter data by providing the Id of the HTML field and the text you would like to enter.
web = Webpage()
web.enter_data('your_field', 'the data you want entered')
:param str field: HTML field to enter data
:param str text: The text you want entered into "field"
:rtype: None
'''
self.page.execute_script("document.getElementById(\"" + field + "\").setAttribute('value', \"" + text + "\")")
@staticmethod
def sanitize_name(name):
'''
Takes a given name and sanitizes it for use as an email address. If a name contains more than 4 words
or non ASCII characters other than , or ( and ) then it will NOT be used.
If a name contains a , then we can safely split on that character and reliably detect the users name.
Names like: John (Allan) Doe are normally put in place for people who go by a nickname. The script will break
these names into two separate names:
- John Doe
- Allan Doe
:param str name: Persons name
:rtype: str or None
'''
if ',' in name:
name = name.split(',')[0]
if len(name.split()) > 4:
# Too many words in the name..Won't be able to reliably create an email address.
return None
if re.search("[(].*[)]", name):
name_list = []
# Sometimes nicknames are put in parenthesis. This will find those and create two email guesses.
# First Last
# text_in_parenthesis Last
nickname = re.search("[(].*[)]", name).group()
true_name = ' '.join([i.strip() for i in name.split(nickname)])
if re.match("^[' a-zA-Z']*$", true_name):
name_list.append(true_name)
fixed_nickname = nickname.replace('(', '').replace(')', '').strip()
nick = [i.strip() for i in name.replace(nickname, '').split()]
nick[0] = fixed_nickname
nick = ' '.join(nick)
if re.match("^[' a-zA-Z']*$", nick):
name_list.append(nick)
return name_list
elif re.match("^[' a-zA-Z']*$", name):
# Only matches if the name contains uppercase, lowercase or spaces.
return [name]
else:
return None
def login(self, username, password):
'''
Login to the linked in web page.
The only args required to use this function are the username and password to log into linkedin.
Example usage:
parser = argparse.ArgumentParser()
required = parser.add_argument_group('required arguments')
required.add_argument('-u', dest='username', required=True, help='LinkedIn Login Email')
args = parser.parse_args()
password = getpass.getpass(prompt='LinkedIn Password: ')
web = WebPage()
web.login(username, password)
:param argparse username: argparse object with the attribute "username"
:param argparse password:
:rtype: None
'''
self.page.get("https://www.linkedin.com/")
WebDriverWait(self.page, 10).until(EC.presence_of_element_located((By.NAME, 'session_key')))
self.enter_data('session_key', username)
self.enter_data('session_password', password)
# Find and click submit button by type
submit_btn = self.page.find_elements_by_xpath("//button[@type='submit']")[0]
submit_btn.click()
def apply_filters(self, company, url=None, georegion=None, industry=None, job_title=None):
|
def cycle_users(self, company, url, max_users=None):
'''
You must run the login method before cycle_users will run. Once the login method has run, cycle_users can
collect the names and titles of employees at the company you specify. This method requires the company name
and optional value max_users from argparse. See the login method for a code example.
:param argparse company:
:param argpase max_users:
:rtype: None (self.employee_data will be populated with names, titles and profile URLs)
'''
# Wait for home screen after login
WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.XPATH, "//*[@id='global-nav']/div")))
self.log.debug("URL: " + str(url))
try:
self.page.get(url)
except exceptions.WebDriverException as err:
self.log.error("An error occurred while getting the company page: \n{0}".format(err))
self.log.critical("[!] Check the company name or URL used")
return os.EX_USAGE
count = 1 # WebElements cannot be used for iteration..
current_page = 1
if not max_users:
max_users = float('inf')
while max_users > len(self.employee_data) and current_page < 100:
self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);")
try:
WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.CLASS_NAME, 'active')))
# Check if the page contains the "no search results" class. This means we are out of users
# This will raise a NoSuchElementException if the element is not found
self.page.find_element_by_class_name("search-no-results__container")
break
except exceptions.NoSuchElementException:
pass
except exceptions.TimeoutException:
# Page didn't load correctly after 20 seconds, cannot reliably recover. Bailing.
return
try:
WebDriverWait(self.page, 5).until(EC.visibility_of_element_located((By.CLASS_NAME, 'name')))
except exceptions.TimeoutException:
try:
if self.page.find_elements_by_class_name('actor-name'):
# If this is true, the page is filled with "LinkedIn Member". It doesn't mean there's no users
# available on the page. If this is the case, click next.
current_page += 1
if 'disabled=""' in self.page.find_element_by_class_name(
"artdeco-pagination__button--next").parent.page_source:
# If this is true then the Next button is "disabled". This happens when there's no more pages
break
self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name(
"artdeco-pagination__button--next"))
continue
except exceptions.NoSuchElementException:
# Reached when there's no more users available on the page.
break
try:
# Get the current page number (at the bottom of a company search)
# The value returned from the HTML looks like this: '1\nCurrent page'
new_page = int(self.page.find_elements_by_class_name('active')[-1].text.split()[0])
except ValueError:
# If there's only one page, linkedin doesn't show page numbers at the bottom. The only result
# will be the text string "people", therefore when we try to convert the value to int we raise
# an exception
new_page = 1
except IndexError:
# Page likely came back with "No more users" even though there appeared to be pages left
return
except exceptions.StaleElementReferenceException:
# Handles a race condition where elements are found but are not populated yet.
continue
for pagnation in self.page.find_elements_by_class_name("artdeco-pagination__button"):
if pagnation.text != "Next":
continue
if not pagnation.is_enabled():
# Next button is disabled.. This is linkedins way of saying "We are done here"
return
if current_page != new_page:
# The script is too fast. This verifies a new page has loaded before proceeding.
continue
# Scroll to the bottom of the page loads all elements (employee_names)
self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Give the elements a second to populate fully
time.sleep(1)
# finds each employee element by a globally set class name
employee_elements = self.page.find_elements_by_xpath("//div[@class='entity-result__item']")
self.log.debug(employee_elements)
for employee in employee_elements:
if count > len(employee_elements):
count = 1
current_page += 1
# click next page
try:
self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name(
"artdeco-pagination__button--next"))
break
except exceptions.NoSuchElementException:
# No more pages
return os.EX_OK
try:
# The elements of LinkedIn change frequently, but the text data of the elements is more-or-less reliable.
# It's better to split via newline than parse xpaths.
data = employee.text.split('\n')
if "LinkedIn Member" not in data[0] and len(data) >= 5:
name = Webpage.sanitize_name(data[0])
title_text = data[3]
region = data[4]
else:
count += 1
continue
try:
# This line/element does not always exist, so an exception will always be raised in this case and must be handled.
alt_text = employee.find_element_by_class_name('search-result__snippets').text
except exceptions.NoSuchElementException:
alt_text = False
title, _, company = title_text.partition(' at ')
if alt_text and not company:
alt_text = alt_text.lstrip('Current: ')
t, _, company = alt_text.partition(' at ')
if not title:
title = t
dept = self.dept_wizard(title)
# If company is still empty at this point, bail out to unemployment
company = company or 'UNEMPLOYED'
url = employee.find_element_by_xpath("//a[@class='app-aware-link']").get_attribute('href')
except (IndexError, exceptions.NoSuchElementException):
count += 1
continue
if not name:
continue
for person in name:
self.log.info(person)
self.employee_data.update({person: [dept, title, company, region, url]})
count += 1
@staticmethod
def dept_wizard(linkedin_title):
'''
Attempt to determine which department a given user belongs to based off of their title. If a title cannot
be reliably determined then it will return a blank string. It is advised to compare their raw untouched
titles to the output of dept_wizard(). Blindly trusting the dept_wizard() could lead to some awkward situations.
If a title matches any of the values in the tuples below, the first value in the tuple will populate the title
column in the CSV.
:param str linkedin_title: a string reflecting an employees title
:rtype: str
'''
sales = ('Sales', 'Account Manager', 'Account Executive', 'New Business', 'Relationship Manager')
hr = ('HR', 'Human Resources', 'Benefits Admin', 'Payroll', 'Talent', 'Recruiter')
accounting = ('Accounting', 'Accountant', 'Financial', 'Finance', 'Billing')
marketing = ('Marketing', 'Content', 'Brand', 'seo', 'Social Media')
it = (
'IT', 'Information Technology', 'Network Engineer', 'Network Admin', 'System Admin', 'sysadmin',
'sys admin',
'Help Desk', 'ITHD', 'Developer', 'Dev')
infosec = (
'Infosec', 'Red Team', 'Blue Team', 'Offensive', 'Defensive', 'Pentest', 'Penetration',
'Information Security')
executive = ('Executive', 'Exec', 'cfo', 'ceo', 'coo', 'cio', 'cmo', 'cbo', 'cto', 'cso', 'Chief')
audit = ('Audit', 'Compliance')
all_depts = [sales, hr, accounting, marketing, it, infosec, executive, audit]
for dept in all_depts:
for common_title in dept:
if re.match('(^|\s)' + re.escape(common_title.lower()) + '($|\s)', linkedin_title.lower()):
return dept[0]
def out_csv(self, filename, company, schema):
'''
Write data from self.employee_data to a CSV. This data is populated from the cycle_users method.
:param argparse filename: argparse object with attribute "file"
:rtype: None
'''
if not self.employee_data:
return None
csv_file = csv.writer(open(filename, "w"))
for name, emp_data in self.employee_data.items():
emails = self.email_formatter(name, company, schema)
for email in emails:
first_name = name.split()[0]
last_name = name.split()[-1]
data = [first_name, last_name, email] + emp_data
csv_file.writerow(data)
def email_formatter(self, name, company, schema):
'''
This method is called by out_csv to determine what format emails should be outputted into. The char_map determines
which indexes to use when generating a username in the __prepare_username__ method.
:param str name:
:param argparse format: argparse object with attribute "format"
:param argparse company: argparse object with attribute "company"
:rtype: list emails
'''
emails = []
for selected in schema:
names = name.split()
email = selected.format(first=names[0], last=names[-1], domain=company.replace(' ', ''))
emails.append(email)
return emails
@staticmethod
def verify_schema(schema):
'''
Verify the chosen email schema is valid.
:param str schema: A comma separated string containing one or more email schema formats
:rtype: list of all valid schemas
'''
schema = schema.split(',')
for email_format in schema:
try:
email_format.format(first='test', last='test', domain='test')
except KeyError:
raise SyntaxWarning('Invalid schema: ' + email_format)
return schema
def list_search(target, term, return_results=False):
'''
Prints list of possible geographic regions & industries per LinkedIn Documentation
Specify -l by itself to print all files, or specify -g <term> / -i <term> to search
for matching geographic regions and industries simultaneously.
Exact matches are required for faceted searches of georegions or industries.
:param str target: Search for a matching georegion or industry by specifying -g or -i parameters
:param str term: Search for specific matching term in -g or -i by adding a term argument to search for
:return: List of matches
'''
print("========================= {0} =========================".format(target.capitalize()))
try:
refs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'refs/')
if target == 'georegion':
search_file = open(os.path.join(refs, 'georegions.txt'), 'r')
elif target == 'industry':
search_file = open(os.path.join(refs, 'industries.txt'), 'r')
else:
return os.EX_NOINPUT
except IOError as unfound_file:
print(
"[-] You are missing the {0}.txt file from your ./refs installation directory. Please re-install scrapedin.".format(
target))
print(unfound_file)
return os.EX_IOERR
results = []
for i, line in enumerate(search_file.readlines()):
if term.lower() in line.lower():
results.append([str(line.split()[0]), str(' '.join(line.split()[1:])).strip('\n')])
# print('[{0}] {1}'.format(i, line.strip('\n')))
search_file.close()
if return_results:
print("Matches found: ", results)
return results[0][0]
print(tabulate(results, headers=['CODE', 'NAME'], tablefmt="orgtbl"))
return os.EX_OK
def main():
parser = argparse.ArgumentParser(epilog=HELP_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-m', dest='max_users', type=int, default=float('inf'),
help='The maximum amount of employees to scrape (default: all)')
parser.add_argument('-l', dest='list_search', action='store_true', default=False,
help='List search for geographic regions and industries. (requires -g or -l)')
parser.add_argument('-L', dest='loglvl', action='store', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='set the logging level')
parser.add_argument('-U', dest='url', action='store', default=None,
help='Explicitly set the company URL to scrape from')
parser.add_argument('-g', dest='georegion', action='store', default=None,
help='Filter results by geographic region')
parser.add_argument('-i', dest='industry', action='store', default=None, help='Filter results by industry')
parser.add_argument('-j', dest='job_title', action='store', default=None, help='Filter results by job title')
required = parser.add_argument_group('required arguments')
required.add_argument('-c', dest='company', action='store', default=None,
help='The company name to scrape users from LinkedIn. Enter "NONE" to scrape users without company')
required.add_argument('-o', dest='filename', help='The output filename')
required.add_argument('-u', dest='username', help='LinkedIn Login Email')
required.add_argument('-s', dest='schema', default='{first:.1}{last}@{domain}.com', help='The email format to use')
args = parser.parse_args()
# If -l is true, search by term or print entire file. -o and -u are not required
if args.list_search:
if not args.georegion and not args.industry:
list_search(target='georegion', term=' ')
list_search(target='industry', term=' ')
if args.georegion:
list_search(target='georegion', term=args.georegion)
if args.industry:
list_search(target='industry', term=args.industry)
return os.EX_OK
# If -l is False (default), -o and -u are required
if not args.filename or not args.username:
parser.error('the following arguments are required to begin scraping -o, -u')
return os.EX_NOINPUT
if not args.company:
parser.error(
'the following arguments are required to begin scraping -c\n\tNOTE: If you are trying to scrape users without a company target, use "-c NONE"')
try:
args.schema = Webpage.verify_schema(args.schema)
except SyntaxWarning as invalid_schema:
print(invalid_schema)
sys.exit(os.EX_SOFTWARE)
if not args.filename.endswith('.csv'):
args.filename += '.csv'
if args.company:
if '"' in args.company:
args.company = args.company.replace('"', '')
web = None
try:
password = getpass.getpass(prompt='LinkedIn Password: ')
web = Webpage(loglvl=args.loglvl)
web.login(username=args.username, password=password)
del password
if not args.url:
url = 'https://www.linkedin.com/search/results/people?'
if args.georegion or args.industry or args.job_title:
filtered_url = web.apply_filters(args.company, url, args.georegion, args.industry, args.job_title)
# Error handler for when apply_filters returns an ox.EX_ code
if isinstance(filtered_url, (int)):
return filtered_url
# Filtered URL
web.cycle_users(args.company, filtered_url, args.max_users)
else: # Default URL
url += "company={0}".format(args.company)
web.cycle_users(args.company, url, args.max_users)
else: # User-defined URL
web.cycle_users(args.company, args.url, args.max_users)
web.out_csv(filename=args.filename, company=args.company, schema=args.schema)
web.page.quit()
except KeyboardInterrupt:
if web:
web.out_csv(args.filename, args.company, args.schema)
return os.EX_OK
if __name__ == '__main__':
if sys.version_info < (3, 3):
print('[-] this script requires Python 3.3+')
sys.exit(os.EX_SOFTWARE)
sys.exit(main())
| '''
Utilize the method within the cycle_users function to build different search
parameters such as location, geotag, company, job-title, etc.
This function will return the full URL.
:param str company: target company name
:param str url: default (or custom) linkedin url for faceted linkedin search
:param str georegion: geographic region (-g) to filter
:param str industry: industry (-i_ to filter
:param str job_title: job title (-j) to filter
:rtype: string (url if successful) or int (Unix-style error integer if error is encountered)
'''
filters = []
if not url:
url = 'https://www.linkedin.com/search/results/people/?'
# Filter by Company
# Allows the user to scrape linkedin without specifying a target company, but must do so with intent
if company != "NONE":
filters.append('company={0}'.format(company))
# Filter by Geographic Region
if georegion:
# region object is created for future-proof purposes in the event new filters become available or formating changes
region = {}
try:
region['full_line'] = list_search('georegion', term=georegion, return_results=True)
region['name'] = region['full_line'].split('\t')[-1]
region['code'] = region['full_line'].split('\t')[0].split(
'.') # Should be continent.country.province/state.city_id
region.update({key: value.replace(' ', '') for (key, value) in
zip(('continent', 'country', 'state', 'id'), region['code'])})
filters.append('facetGeoRegion=%5B"{0}%3A{1}"%5D'.format(region['country'], region['id']))
except (IndexError, KeyError, ValueError):
self.log.error("[-] The region you chose is too broad to search. Search by City only")
return os.EX_NOINPUT
# Filter by Industry
if industry:
ind = list_search('industry', term=industry, return_results=True)
if ind:
i_code = ind.split('\t')[0].replace(' ', '')
filters.append('facetIndustry=%5B"{0}"%5D'.format(i_code))
filters.append("origin=FACETED_SEARCH")
if job_title:
filters.append('title={0}'.format(job_title))
else:
filters.append('title=')
# Join additional parameters to the URL by ampersand (&). Order doesn't matter.
filters.append('origin=FACETED_SEARCH')
url += "&".join(filters).lstrip("&") if len(filters) > 1 else filters[0]
self.log.debug("Filtered URL: " + url)
return url | identifier_body |
scrapedin.py | #!/usr/bin/env python
import sys
import argparse
import re
import csv
import os
import getpass
import platform
import logging
import time
try:
from tabulate import tabulate
except ImportError:
print('Missing required package: Tabulate')
sys.exit(os.EX_SOFTWARE)
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.common import exceptions
except ImportError:
print('Missing required package: selenium\n')
print('Did you forget to run setup.py?\npython3 setup.py install')
sys.exit(os.EX_SOFTWARE)
if not sys.version_info[0:2] >= (3, 3):
print(
"This script depends on python version 3.3 or higher. We recommend the latest version available from 'https://www.python.org/downloads/'")
sys.exit(os.EX_SOFTWARE)
ARCH = platform.architecture()[0]
if '64' in ARCH:
ARCH = '64'
elif '32' in ARCH:
ARCH = '32'
else:
print('[-] Your architecture isn\'t supported. What are you using, DOS!? Doofus >_<')
sys.exit(os.EX_SOFTWARE)
HELP_EPILOG = """
You may specify multiple formats by using a comma. Using {domain} will
dynamically pick the domain based off of the company name. Here are some common
formats:
Format Schema
--------------------------- -----------------------------
[First Initial] [Last Name] {first:.1}{last}@{domain}.com
[First Name] [Last Initial] {first}{last:.1}@{domain}.com
[First Name].[Last Name] {first}.{last}@{domain}.com
[Last Name].[First Name] {last}.{first}@{domain}.com
"""
USER_ENTITY_CLASS = 'entity-result__item' # Historically search-result__wrapper
class Webpage:
def __init__(self, loglvl='INFO', geckodriver=None):
capabilities = webdriver.DesiredCapabilities().FIREFOX
capabilities["marionette"] = True
self.page = webdriver.Firefox(
executable_path=os.path.join(os.path.dirname(__file__), 'webdriver', ARCH, 'geckodriver'),
log_path='/dev/null', capabilities=capabilities)
self.employee_data = {}
self.log = logging.getLogger(
logging.basicConfig(level=getattr(logging, loglvl),
format="%(name)-15s %(levelname)-10s %(asctime)-10s %(message)s")
)
def enter_data(self, field, text):
'''
Enter data by providing the Id of the HTML field and the text you would like to enter.
web = Webpage()
web.enter_data('your_field', 'the data you want entered')
:param str field: HTML field to enter data
:param str text: The text you want entered into "field"
:rtype: None
'''
self.page.execute_script("document.getElementById(\"" + field + "\").setAttribute('value', \"" + text + "\")")
@staticmethod
def sanitize_name(name):
'''
Takes a given name and sanitizes it for use as an email address. If a name contains more than 4 words
or non ASCII characters other than , or ( and ) then it will NOT be used.
If a name contains a , then we can safely split on that character and reliably detect the users name.
Names like: John (Allan) Doe are normally put in place for people who go by a nickname. The script will break
these names into two separate names:
- John Doe
- Allan Doe
:param str name: Persons name
:rtype: str or None
'''
if ',' in name:
name = name.split(',')[0]
if len(name.split()) > 4:
# Too many words in the name..Won't be able to reliably create an email address.
return None
if re.search("[(].*[)]", name):
name_list = []
# Sometimes nicknames are put in parenthesis. This will find those and create two email guesses.
# First Last
# text_in_parenthesis Last
nickname = re.search("[(].*[)]", name).group()
true_name = ' '.join([i.strip() for i in name.split(nickname)])
if re.match("^[' a-zA-Z']*$", true_name):
name_list.append(true_name)
fixed_nickname = nickname.replace('(', '').replace(')', '').strip()
nick = [i.strip() for i in name.replace(nickname, '').split()]
nick[0] = fixed_nickname
nick = ' '.join(nick)
if re.match("^[' a-zA-Z']*$", nick):
name_list.append(nick)
return name_list
elif re.match("^[' a-zA-Z']*$", name):
# Only matches if the name contains uppercase, lowercase or spaces.
return [name]
else:
return None
def login(self, username, password):
'''
Login to the linked in web page.
The only args required to use this function are the username and password to log into linkedin.
Example usage:
parser = argparse.ArgumentParser()
required = parser.add_argument_group('required arguments')
required.add_argument('-u', dest='username', required=True, help='LinkedIn Login Email')
args = parser.parse_args()
password = getpass.getpass(prompt='LinkedIn Password: ')
web = WebPage()
web.login(username, password)
:param argparse username: argparse object with the attribute "username"
:param argparse password:
:rtype: None
'''
self.page.get("https://www.linkedin.com/")
WebDriverWait(self.page, 10).until(EC.presence_of_element_located((By.NAME, 'session_key')))
self.enter_data('session_key', username)
self.enter_data('session_password', password)
# Find and click submit button by type
submit_btn = self.page.find_elements_by_xpath("//button[@type='submit']")[0]
submit_btn.click()
def apply_filters(self, company, url=None, georegion=None, industry=None, job_title=None):
'''
Utilize the method within the cycle_users function to build different search
parameters such as location, geotag, company, job-title, etc.
This function will return the full URL.
:param str company: target company name
:param str url: default (or custom) linkedin url for faceted linkedin search
:param str georegion: geographic region (-g) to filter
:param str industry: industry (-i_ to filter
:param str job_title: job title (-j) to filter
:rtype: string (url if successful) or int (Unix-style error integer if error is encountered)
'''
filters = []
if not url:
url = 'https://www.linkedin.com/search/results/people/?'
# Filter by Company
# Allows the user to scrape linkedin without specifying a target company, but must do so with intent
if company != "NONE":
filters.append('company={0}'.format(company))
# Filter by Geographic Region
if georegion:
# region object is created for future-proof purposes in the event new filters become available or formating changes
region = {}
try:
region['full_line'] = list_search('georegion', term=georegion, return_results=True)
region['name'] = region['full_line'].split('\t')[-1]
region['code'] = region['full_line'].split('\t')[0].split(
'.') # Should be continent.country.province/state.city_id
region.update({key: value.replace(' ', '') for (key, value) in
zip(('continent', 'country', 'state', 'id'), region['code'])})
filters.append('facetGeoRegion=%5B"{0}%3A{1}"%5D'.format(region['country'], region['id']))
except (IndexError, KeyError, ValueError):
self.log.error("[-] The region you chose is too broad to search. Search by City only")
return os.EX_NOINPUT
# Filter by Industry
if industry:
ind = list_search('industry', term=industry, return_results=True)
if ind:
i_code = ind.split('\t')[0].replace(' ', '')
filters.append('facetIndustry=%5B"{0}"%5D'.format(i_code))
filters.append("origin=FACETED_SEARCH")
if job_title:
filters.append('title={0}'.format(job_title))
else:
filters.append('title=')
# Join additional parameters to the URL by ampersand (&). Order doesn't matter.
filters.append('origin=FACETED_SEARCH')
url += "&".join(filters).lstrip("&") if len(filters) > 1 else filters[0]
self.log.debug("Filtered URL: " + url)
return url
def cycle_users(self, company, url, max_users=None):
'''
You must run the login method before cycle_users will run. Once the login method has run, cycle_users can
collect the names and titles of employees at the company you specify. This method requires the company name
and optional value max_users from argparse. See the login method for a code example.
:param argparse company:
:param argpase max_users:
:rtype: None (self.employee_data will be populated with names, titles and profile URLs)
'''
# Wait for home screen after login
WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.XPATH, "//*[@id='global-nav']/div")))
self.log.debug("URL: " + str(url))
try:
self.page.get(url)
except exceptions.WebDriverException as err:
self.log.error("An error occurred while getting the company page: \n{0}".format(err))
self.log.critical("[!] Check the company name or URL used")
return os.EX_USAGE
count = 1 # WebElements cannot be used for iteration..
current_page = 1
if not max_users:
max_users = float('inf')
while max_users > len(self.employee_data) and current_page < 100:
self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);")
try:
WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.CLASS_NAME, 'active')))
# Check if the page contains the "no search results" class. This means we are out of users
# This will raise a NoSuchElementException if the element is not found
self.page.find_element_by_class_name("search-no-results__container")
break
except exceptions.NoSuchElementException:
pass
except exceptions.TimeoutException:
# Page didn't load correctly after 20 seconds, cannot reliably recover. Bailing.
return
try:
WebDriverWait(self.page, 5).until(EC.visibility_of_element_located((By.CLASS_NAME, 'name')))
except exceptions.TimeoutException:
try:
if self.page.find_elements_by_class_name('actor-name'):
# If this is true, the page is filled with "LinkedIn Member". It doesn't mean there's no users
# available on the page. If this is the case, click next.
current_page += 1
if 'disabled=""' in self.page.find_element_by_class_name(
"artdeco-pagination__button--next").parent.page_source:
# If this is true then the Next button is "disabled". This happens when there's no more pages
break
self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name(
"artdeco-pagination__button--next"))
continue
except exceptions.NoSuchElementException:
# Reached when there's no more users available on the page.
break
try:
# Get the current page number (at the bottom of a company search)
# The value returned from the HTML looks like this: '1\nCurrent page'
new_page = int(self.page.find_elements_by_class_name('active')[-1].text.split()[0])
except ValueError:
# If there's only one page, linkedin doesn't show page numbers at the bottom. The only result
# will be the text string "people", therefore when we try to convert the value to int we raise
# an exception
new_page = 1
except IndexError:
# Page likely came back with "No more users" even though there appeared to be pages left
return
except exceptions.StaleElementReferenceException:
# Handles a race condition where elements are found but are not populated yet.
continue
for pagnation in self.page.find_elements_by_class_name("artdeco-pagination__button"):
if pagnation.text != "Next":
continue
if not pagnation.is_enabled():
# Next button is disabled.. This is linkedins way of saying "We are done here"
return
if current_page != new_page:
# The script is too fast. This verifies a new page has loaded before proceeding.
continue
# Scroll to the bottom of the page loads all elements (employee_names)
self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Give the elements a second to populate fully
time.sleep(1)
# finds each employee element by a globally set class name
employee_elements = self.page.find_elements_by_xpath("//div[@class='entity-result__item']")
self.log.debug(employee_elements)
for employee in employee_elements:
if count > len(employee_elements):
count = 1
current_page += 1
# click next page
try:
self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name(
"artdeco-pagination__button--next"))
break
except exceptions.NoSuchElementException:
# No more pages
return os.EX_OK
try:
# The elements of LinkedIn change frequently, but the text data of the elements is more-or-less reliable.
# It's better to split via newline than parse xpaths.
data = employee.text.split('\n')
if "LinkedIn Member" not in data[0] and len(data) >= 5:
name = Webpage.sanitize_name(data[0])
title_text = data[3]
region = data[4]
else:
count += 1
continue
try:
# This line/element does not always exist, so an exception will always be raised in this case and must be handled.
alt_text = employee.find_element_by_class_name('search-result__snippets').text
except exceptions.NoSuchElementException:
alt_text = False
title, _, company = title_text.partition(' at ')
if alt_text and not company:
alt_text = alt_text.lstrip('Current: ')
t, _, company = alt_text.partition(' at ')
if not title:
title = t
dept = self.dept_wizard(title)
# If company is still empty at this point, bail out to unemployment
company = company or 'UNEMPLOYED'
url = employee.find_element_by_xpath("//a[@class='app-aware-link']").get_attribute('href')
except (IndexError, exceptions.NoSuchElementException):
count += 1
continue
if not name:
continue
for person in name:
self.log.info(person)
self.employee_data.update({person: [dept, title, company, region, url]})
count += 1
@staticmethod
def dept_wizard(linkedin_title):
'''
Attempt to determine which department a given user belongs to based off of their title. If a title cannot
be reliably determined then it will return a blank string. It is advised to compare their raw untouched
titles to the output of dept_wizard(). Blindly trusting the dept_wizard() could lead to some awkward situations.
If a title matches any of the values in the tuples below, the first value in the tuple will populate the title
column in the CSV.
:param str linkedin_title: a string reflecting an employees title
:rtype: str
'''
sales = ('Sales', 'Account Manager', 'Account Executive', 'New Business', 'Relationship Manager')
hr = ('HR', 'Human Resources', 'Benefits Admin', 'Payroll', 'Talent', 'Recruiter')
accounting = ('Accounting', 'Accountant', 'Financial', 'Finance', 'Billing')
marketing = ('Marketing', 'Content', 'Brand', 'seo', 'Social Media')
it = (
'IT', 'Information Technology', 'Network Engineer', 'Network Admin', 'System Admin', 'sysadmin',
'sys admin',
'Help Desk', 'ITHD', 'Developer', 'Dev')
infosec = (
'Infosec', 'Red Team', 'Blue Team', 'Offensive', 'Defensive', 'Pentest', 'Penetration',
'Information Security')
executive = ('Executive', 'Exec', 'cfo', 'ceo', 'coo', 'cio', 'cmo', 'cbo', 'cto', 'cso', 'Chief')
audit = ('Audit', 'Compliance')
all_depts = [sales, hr, accounting, marketing, it, infosec, executive, audit]
for dept in all_depts:
for common_title in dept:
if re.match('(^|\s)' + re.escape(common_title.lower()) + '($|\s)', linkedin_title.lower()):
return dept[0]
def out_csv(self, filename, company, schema):
'''
Write data from self.employee_data to a CSV. This data is populated from the cycle_users method.
:param argparse filename: argparse object with attribute "file"
:rtype: None
'''
if not self.employee_data:
return None
csv_file = csv.writer(open(filename, "w"))
for name, emp_data in self.employee_data.items():
emails = self.email_formatter(name, company, schema)
for email in emails:
first_name = name.split()[0]
last_name = name.split()[-1]
data = [first_name, last_name, email] + emp_data
csv_file.writerow(data)
def email_formatter(self, name, company, schema):
'''
This method is called by out_csv to determine what format emails should be outputted into. The char_map determines
which indexes to use when generating a username in the __prepare_username__ method.
:param str name:
:param argparse format: argparse object with attribute "format"
:param argparse company: argparse object with attribute "company"
:rtype: list emails
'''
emails = []
for selected in schema:
names = name.split()
email = selected.format(first=names[0], last=names[-1], domain=company.replace(' ', ''))
emails.append(email)
return emails
@staticmethod
def verify_schema(schema):
'''
Verify the chosen email schema is valid.
:param str schema: A comma separated string containing one or more email schema formats
:rtype: list of all valid schemas
'''
schema = schema.split(',')
for email_format in schema:
try:
email_format.format(first='test', last='test', domain='test')
except KeyError:
raise SyntaxWarning('Invalid schema: ' + email_format)
return schema
def list_search(target, term, return_results=False):
'''
Prints list of possible geographic regions & industries per LinkedIn Documentation
Specify -l by itself to print all files, or specify -g <term> / -i <term> to search
for matching geographic regions and industries simultaneously.
Exact matches are required for faceted searches of georegions or industries.
:param str target: Search for a matching georegion or industry by specifying -g or -i parameters
:param str term: Search for specific matching term in -g or -i by adding a term argument to search for
:return: List of matches
'''
print("========================= {0} =========================".format(target.capitalize()))
try:
refs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'refs/')
if target == 'georegion':
search_file = open(os.path.join(refs, 'georegions.txt'), 'r')
elif target == 'industry':
search_file = open(os.path.join(refs, 'industries.txt'), 'r')
else:
return os.EX_NOINPUT
except IOError as unfound_file:
print(
"[-] You are missing the {0}.txt file from your ./refs installation directory. Please re-install scrapedin.".format(
target))
print(unfound_file)
return os.EX_IOERR
results = []
for i, line in enumerate(search_file.readlines()):
if term.lower() in line.lower():
results.append([str(line.split()[0]), str(' '.join(line.split()[1:])).strip('\n')])
# print('[{0}] {1}'.format(i, line.strip('\n')))
search_file.close()
if return_results:
print("Matches found: ", results)
return results[0][0]
print(tabulate(results, headers=['CODE', 'NAME'], tablefmt="orgtbl"))
return os.EX_OK
def main():
parser = argparse.ArgumentParser(epilog=HELP_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-m', dest='max_users', type=int, default=float('inf'),
help='The maximum amount of employees to scrape (default: all)')
parser.add_argument('-l', dest='list_search', action='store_true', default=False,
help='List search for geographic regions and industries. (requires -g or -l)')
parser.add_argument('-L', dest='loglvl', action='store', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='set the logging level') | parser.add_argument('-U', dest='url', action='store', default=None,
help='Explicitly set the company URL to scrape from')
parser.add_argument('-g', dest='georegion', action='store', default=None,
help='Filter results by geographic region')
parser.add_argument('-i', dest='industry', action='store', default=None, help='Filter results by industry')
parser.add_argument('-j', dest='job_title', action='store', default=None, help='Filter results by job title')
required = parser.add_argument_group('required arguments')
required.add_argument('-c', dest='company', action='store', default=None,
help='The company name to scrape users from LinkedIn. Enter "NONE" to scrape users without company')
required.add_argument('-o', dest='filename', help='The output filename')
required.add_argument('-u', dest='username', help='LinkedIn Login Email')
required.add_argument('-s', dest='schema', default='{first:.1}{last}@{domain}.com', help='The email format to use')
args = parser.parse_args()
# If -l is true, search by term or print entire file. -o and -u are not required
if args.list_search:
if not args.georegion and not args.industry:
list_search(target='georegion', term=' ')
list_search(target='industry', term=' ')
if args.georegion:
list_search(target='georegion', term=args.georegion)
if args.industry:
list_search(target='industry', term=args.industry)
return os.EX_OK
# If -l is False (default), -o and -u are required
if not args.filename or not args.username:
parser.error('the following arguments are required to begin scraping -o, -u')
return os.EX_NOINPUT
if not args.company:
parser.error(
'the following arguments are required to begin scraping -c\n\tNOTE: If you are trying to scrape users without a company target, use "-c NONE"')
try:
args.schema = Webpage.verify_schema(args.schema)
except SyntaxWarning as invalid_schema:
print(invalid_schema)
sys.exit(os.EX_SOFTWARE)
if not args.filename.endswith('.csv'):
args.filename += '.csv'
if args.company:
if '"' in args.company:
args.company = args.company.replace('"', '')
web = None
try:
password = getpass.getpass(prompt='LinkedIn Password: ')
web = Webpage(loglvl=args.loglvl)
web.login(username=args.username, password=password)
del password
if not args.url:
url = 'https://www.linkedin.com/search/results/people?'
if args.georegion or args.industry or args.job_title:
filtered_url = web.apply_filters(args.company, url, args.georegion, args.industry, args.job_title)
# Error handler for when apply_filters returns an ox.EX_ code
if isinstance(filtered_url, (int)):
return filtered_url
# Filtered URL
web.cycle_users(args.company, filtered_url, args.max_users)
else: # Default URL
url += "company={0}".format(args.company)
web.cycle_users(args.company, url, args.max_users)
else: # User-defined URL
web.cycle_users(args.company, args.url, args.max_users)
web.out_csv(filename=args.filename, company=args.company, schema=args.schema)
web.page.quit()
except KeyboardInterrupt:
if web:
web.out_csv(args.filename, args.company, args.schema)
return os.EX_OK
if __name__ == '__main__':
if sys.version_info < (3, 3):
print('[-] this script requires Python 3.3+')
sys.exit(os.EX_SOFTWARE)
sys.exit(main()) | random_line_split | |
scrapedin.py | #!/usr/bin/env python
import sys
import argparse
import re
import csv
import os
import getpass
import platform
import logging
import time
try:
from tabulate import tabulate
except ImportError:
print('Missing required package: Tabulate')
sys.exit(os.EX_SOFTWARE)
try:
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium import webdriver
from selenium.common import exceptions
except ImportError:
print('Missing required package: selenium\n')
print('Did you forget to run setup.py?\npython3 setup.py install')
sys.exit(os.EX_SOFTWARE)
if not sys.version_info[0:2] >= (3, 3):
print(
"This script depends on python version 3.3 or higher. We recommend the latest version available from 'https://www.python.org/downloads/'")
sys.exit(os.EX_SOFTWARE)
ARCH = platform.architecture()[0]
if '64' in ARCH:
ARCH = '64'
elif '32' in ARCH:
ARCH = '32'
else:
print('[-] Your architecture isn\'t supported. What are you using, DOS!? Doofus >_<')
sys.exit(os.EX_SOFTWARE)
HELP_EPILOG = """
You may specify multiple formats by using a comma. Using {domain} will
dynamically pick the domain based off of the company name. Here are some common
formats:
Format Schema
--------------------------- -----------------------------
[First Initial] [Last Name] {first:.1}{last}@{domain}.com
[First Name] [Last Initial] {first}{last:.1}@{domain}.com
[First Name].[Last Name] {first}.{last}@{domain}.com
[Last Name].[First Name] {last}.{first}@{domain}.com
"""
USER_ENTITY_CLASS = 'entity-result__item' # Historically search-result__wrapper
class Webpage:
def __init__(self, loglvl='INFO', geckodriver=None):
capabilities = webdriver.DesiredCapabilities().FIREFOX
capabilities["marionette"] = True
self.page = webdriver.Firefox(
executable_path=os.path.join(os.path.dirname(__file__), 'webdriver', ARCH, 'geckodriver'),
log_path='/dev/null', capabilities=capabilities)
self.employee_data = {}
self.log = logging.getLogger(
logging.basicConfig(level=getattr(logging, loglvl),
format="%(name)-15s %(levelname)-10s %(asctime)-10s %(message)s")
)
def enter_data(self, field, text):
'''
Enter data by providing the Id of the HTML field and the text you would like to enter.
web = Webpage()
web.enter_data('your_field', 'the data you want entered')
:param str field: HTML field to enter data
:param str text: The text you want entered into "field"
:rtype: None
'''
self.page.execute_script("document.getElementById(\"" + field + "\").setAttribute('value', \"" + text + "\")")
@staticmethod
def sanitize_name(name):
'''
Takes a given name and sanitizes it for use as an email address. If a name contains more than 4 words
or non ASCII characters other than , or ( and ) then it will NOT be used.
If a name contains a , then we can safely split on that character and reliably detect the users name.
Names like: John (Allan) Doe are normally put in place for people who go by a nickname. The script will break
these names into two separate names:
- John Doe
- Allan Doe
:param str name: Persons name
:rtype: str or None
'''
if ',' in name:
name = name.split(',')[0]
if len(name.split()) > 4:
# Too many words in the name..Won't be able to reliably create an email address.
return None
if re.search("[(].*[)]", name):
name_list = []
# Sometimes nicknames are put in parenthesis. This will find those and create two email guesses.
# First Last
# text_in_parenthesis Last
nickname = re.search("[(].*[)]", name).group()
true_name = ' '.join([i.strip() for i in name.split(nickname)])
if re.match("^[' a-zA-Z']*$", true_name):
name_list.append(true_name)
fixed_nickname = nickname.replace('(', '').replace(')', '').strip()
nick = [i.strip() for i in name.replace(nickname, '').split()]
nick[0] = fixed_nickname
nick = ' '.join(nick)
if re.match("^[' a-zA-Z']*$", nick):
name_list.append(nick)
return name_list
elif re.match("^[' a-zA-Z']*$", name):
# Only matches if the name contains uppercase, lowercase or spaces.
return [name]
else:
return None
def login(self, username, password):
'''
Login to the linked in web page.
The only args required to use this function are the username and password to log into linkedin.
Example usage:
parser = argparse.ArgumentParser()
required = parser.add_argument_group('required arguments')
required.add_argument('-u', dest='username', required=True, help='LinkedIn Login Email')
args = parser.parse_args()
password = getpass.getpass(prompt='LinkedIn Password: ')
web = WebPage()
web.login(username, password)
:param argparse username: argparse object with the attribute "username"
:param argparse password:
:rtype: None
'''
self.page.get("https://www.linkedin.com/")
WebDriverWait(self.page, 10).until(EC.presence_of_element_located((By.NAME, 'session_key')))
self.enter_data('session_key', username)
self.enter_data('session_password', password)
# Find and click submit button by type
submit_btn = self.page.find_elements_by_xpath("//button[@type='submit']")[0]
submit_btn.click()
def apply_filters(self, company, url=None, georegion=None, industry=None, job_title=None):
'''
Utilize the method within the cycle_users function to build different search
parameters such as location, geotag, company, job-title, etc.
This function will return the full URL.
:param str company: target company name
:param str url: default (or custom) linkedin url for faceted linkedin search
:param str georegion: geographic region (-g) to filter
:param str industry: industry (-i_ to filter
:param str job_title: job title (-j) to filter
:rtype: string (url if successful) or int (Unix-style error integer if error is encountered)
'''
filters = []
if not url:
url = 'https://www.linkedin.com/search/results/people/?'
# Filter by Company
# Allows the user to scrape linkedin without specifying a target company, but must do so with intent
if company != "NONE":
filters.append('company={0}'.format(company))
# Filter by Geographic Region
if georegion:
# region object is created for future-proof purposes in the event new filters become available or formating changes
region = {}
try:
region['full_line'] = list_search('georegion', term=georegion, return_results=True)
region['name'] = region['full_line'].split('\t')[-1]
region['code'] = region['full_line'].split('\t')[0].split(
'.') # Should be continent.country.province/state.city_id
region.update({key: value.replace(' ', '') for (key, value) in
zip(('continent', 'country', 'state', 'id'), region['code'])})
filters.append('facetGeoRegion=%5B"{0}%3A{1}"%5D'.format(region['country'], region['id']))
except (IndexError, KeyError, ValueError):
self.log.error("[-] The region you chose is too broad to search. Search by City only")
return os.EX_NOINPUT
# Filter by Industry
if industry:
ind = list_search('industry', term=industry, return_results=True)
if ind:
i_code = ind.split('\t')[0].replace(' ', '')
filters.append('facetIndustry=%5B"{0}"%5D'.format(i_code))
filters.append("origin=FACETED_SEARCH")
if job_title:
filters.append('title={0}'.format(job_title))
else:
filters.append('title=')
# Join additional parameters to the URL by ampersand (&). Order doesn't matter.
filters.append('origin=FACETED_SEARCH')
url += "&".join(filters).lstrip("&") if len(filters) > 1 else filters[0]
self.log.debug("Filtered URL: " + url)
return url
def cycle_users(self, company, url, max_users=None):
'''
You must run the login method before cycle_users will run. Once the login method has run, cycle_users can
collect the names and titles of employees at the company you specify. This method requires the company name
and optional value max_users from argparse. See the login method for a code example.
:param argparse company:
:param argpase max_users:
:rtype: None (self.employee_data will be populated with names, titles and profile URLs)
'''
# Wait for home screen after login
WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.XPATH, "//*[@id='global-nav']/div")))
self.log.debug("URL: " + str(url))
try:
self.page.get(url)
except exceptions.WebDriverException as err:
self.log.error("An error occurred while getting the company page: \n{0}".format(err))
self.log.critical("[!] Check the company name or URL used")
return os.EX_USAGE
count = 1 # WebElements cannot be used for iteration..
current_page = 1
if not max_users:
max_users = float('inf')
while max_users > len(self.employee_data) and current_page < 100:
self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);")
try:
WebDriverWait(self.page, 20).until(EC.visibility_of_element_located((By.CLASS_NAME, 'active')))
# Check if the page contains the "no search results" class. This means we are out of users
# This will raise a NoSuchElementException if the element is not found
self.page.find_element_by_class_name("search-no-results__container")
break
except exceptions.NoSuchElementException:
pass
except exceptions.TimeoutException:
# Page didn't load correctly after 20 seconds, cannot reliably recover. Bailing.
return
try:
WebDriverWait(self.page, 5).until(EC.visibility_of_element_located((By.CLASS_NAME, 'name')))
except exceptions.TimeoutException:
try:
if self.page.find_elements_by_class_name('actor-name'):
# If this is true, the page is filled with "LinkedIn Member". It doesn't mean there's no users
# available on the page. If this is the case, click next.
current_page += 1
if 'disabled=""' in self.page.find_element_by_class_name(
"artdeco-pagination__button--next").parent.page_source:
# If this is true then the Next button is "disabled". This happens when there's no more pages
break
self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name(
"artdeco-pagination__button--next"))
continue
except exceptions.NoSuchElementException:
# Reached when there's no more users available on the page.
break
try:
# Get the current page number (at the bottom of a company search)
# The value returned from the HTML looks like this: '1\nCurrent page'
new_page = int(self.page.find_elements_by_class_name('active')[-1].text.split()[0])
except ValueError:
# If there's only one page, linkedin doesn't show page numbers at the bottom. The only result
# will be the text string "people", therefore when we try to convert the value to int we raise
# an exception
new_page = 1
except IndexError:
# Page likely came back with "No more users" even though there appeared to be pages left
return
except exceptions.StaleElementReferenceException:
# Handles a race condition where elements are found but are not populated yet.
continue
for pagnation in self.page.find_elements_by_class_name("artdeco-pagination__button"):
if pagnation.text != "Next":
continue
if not pagnation.is_enabled():
# Next button is disabled.. This is linkedins way of saying "We are done here"
return
if current_page != new_page:
# The script is too fast. This verifies a new page has loaded before proceeding.
continue
# Scroll to the bottom of the page loads all elements (employee_names)
self.page.execute_script("window.scrollTo(0, document.body.scrollHeight);")
# Give the elements a second to populate fully
time.sleep(1)
# finds each employee element by a globally set class name
employee_elements = self.page.find_elements_by_xpath("//div[@class='entity-result__item']")
self.log.debug(employee_elements)
for employee in employee_elements:
if count > len(employee_elements):
count = 1
current_page += 1
# click next page
try:
self.page.execute_script("arguments[0].click();", self.page.find_element_by_class_name(
"artdeco-pagination__button--next"))
break
except exceptions.NoSuchElementException:
# No more pages
return os.EX_OK
try:
# The elements of LinkedIn change frequently, but the text data of the elements is more-or-less reliable.
# It's better to split via newline than parse xpaths.
data = employee.text.split('\n')
if "LinkedIn Member" not in data[0] and len(data) >= 5:
name = Webpage.sanitize_name(data[0])
title_text = data[3]
region = data[4]
else:
count += 1
continue
try:
# This line/element does not always exist, so an exception will always be raised in this case and must be handled.
alt_text = employee.find_element_by_class_name('search-result__snippets').text
except exceptions.NoSuchElementException:
alt_text = False
title, _, company = title_text.partition(' at ')
if alt_text and not company:
alt_text = alt_text.lstrip('Current: ')
t, _, company = alt_text.partition(' at ')
if not title:
title = t
dept = self.dept_wizard(title)
# If company is still empty at this point, bail out to unemployment
company = company or 'UNEMPLOYED'
url = employee.find_element_by_xpath("//a[@class='app-aware-link']").get_attribute('href')
except (IndexError, exceptions.NoSuchElementException):
count += 1
continue
if not name:
continue
for person in name:
self.log.info(person)
self.employee_data.update({person: [dept, title, company, region, url]})
count += 1
@staticmethod
def | (linkedin_title):
'''
Attempt to determine which department a given user belongs to based off of their title. If a title cannot
be reliably determined then it will return a blank string. It is advised to compare their raw untouched
titles to the output of dept_wizard(). Blindly trusting the dept_wizard() could lead to some awkward situations.
If a title matches any of the values in the tuples below, the first value in the tuple will populate the title
column in the CSV.
:param str linkedin_title: a string reflecting an employees title
:rtype: str
'''
sales = ('Sales', 'Account Manager', 'Account Executive', 'New Business', 'Relationship Manager')
hr = ('HR', 'Human Resources', 'Benefits Admin', 'Payroll', 'Talent', 'Recruiter')
accounting = ('Accounting', 'Accountant', 'Financial', 'Finance', 'Billing')
marketing = ('Marketing', 'Content', 'Brand', 'seo', 'Social Media')
it = (
'IT', 'Information Technology', 'Network Engineer', 'Network Admin', 'System Admin', 'sysadmin',
'sys admin',
'Help Desk', 'ITHD', 'Developer', 'Dev')
infosec = (
'Infosec', 'Red Team', 'Blue Team', 'Offensive', 'Defensive', 'Pentest', 'Penetration',
'Information Security')
executive = ('Executive', 'Exec', 'cfo', 'ceo', 'coo', 'cio', 'cmo', 'cbo', 'cto', 'cso', 'Chief')
audit = ('Audit', 'Compliance')
all_depts = [sales, hr, accounting, marketing, it, infosec, executive, audit]
for dept in all_depts:
for common_title in dept:
if re.match('(^|\s)' + re.escape(common_title.lower()) + '($|\s)', linkedin_title.lower()):
return dept[0]
def out_csv(self, filename, company, schema):
'''
Write data from self.employee_data to a CSV. This data is populated from the cycle_users method.
:param argparse filename: argparse object with attribute "file"
:rtype: None
'''
if not self.employee_data:
return None
csv_file = csv.writer(open(filename, "w"))
for name, emp_data in self.employee_data.items():
emails = self.email_formatter(name, company, schema)
for email in emails:
first_name = name.split()[0]
last_name = name.split()[-1]
data = [first_name, last_name, email] + emp_data
csv_file.writerow(data)
def email_formatter(self, name, company, schema):
'''
This method is called by out_csv to determine what format emails should be outputted into. The char_map determines
which indexes to use when generating a username in the __prepare_username__ method.
:param str name:
:param argparse format: argparse object with attribute "format"
:param argparse company: argparse object with attribute "company"
:rtype: list emails
'''
emails = []
for selected in schema:
names = name.split()
email = selected.format(first=names[0], last=names[-1], domain=company.replace(' ', ''))
emails.append(email)
return emails
@staticmethod
def verify_schema(schema):
'''
Verify the chosen email schema is valid.
:param str schema: A comma separated string containing one or more email schema formats
:rtype: list of all valid schemas
'''
schema = schema.split(',')
for email_format in schema:
try:
email_format.format(first='test', last='test', domain='test')
except KeyError:
raise SyntaxWarning('Invalid schema: ' + email_format)
return schema
def list_search(target, term, return_results=False):
'''
Prints list of possible geographic regions & industries per LinkedIn Documentation
Specify -l by itself to print all files, or specify -g <term> / -i <term> to search
for matching geographic regions and industries simultaneously.
Exact matches are required for faceted searches of georegions or industries.
:param str target: Search for a matching georegion or industry by specifying -g or -i parameters
:param str term: Search for specific matching term in -g or -i by adding a term argument to search for
:return: List of matches
'''
print("========================= {0} =========================".format(target.capitalize()))
try:
refs = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'refs/')
if target == 'georegion':
search_file = open(os.path.join(refs, 'georegions.txt'), 'r')
elif target == 'industry':
search_file = open(os.path.join(refs, 'industries.txt'), 'r')
else:
return os.EX_NOINPUT
except IOError as unfound_file:
print(
"[-] You are missing the {0}.txt file from your ./refs installation directory. Please re-install scrapedin.".format(
target))
print(unfound_file)
return os.EX_IOERR
results = []
for i, line in enumerate(search_file.readlines()):
if term.lower() in line.lower():
results.append([str(line.split()[0]), str(' '.join(line.split()[1:])).strip('\n')])
# print('[{0}] {1}'.format(i, line.strip('\n')))
search_file.close()
if return_results:
print("Matches found: ", results)
return results[0][0]
print(tabulate(results, headers=['CODE', 'NAME'], tablefmt="orgtbl"))
return os.EX_OK
def main():
parser = argparse.ArgumentParser(epilog=HELP_EPILOG, formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-m', dest='max_users', type=int, default=float('inf'),
help='The maximum amount of employees to scrape (default: all)')
parser.add_argument('-l', dest='list_search', action='store_true', default=False,
help='List search for geographic regions and industries. (requires -g or -l)')
parser.add_argument('-L', dest='loglvl', action='store', choices=['DEBUG', 'INFO', 'WARNING', 'ERROR', 'CRITICAL'],
default='INFO', help='set the logging level')
parser.add_argument('-U', dest='url', action='store', default=None,
help='Explicitly set the company URL to scrape from')
parser.add_argument('-g', dest='georegion', action='store', default=None,
help='Filter results by geographic region')
parser.add_argument('-i', dest='industry', action='store', default=None, help='Filter results by industry')
parser.add_argument('-j', dest='job_title', action='store', default=None, help='Filter results by job title')
required = parser.add_argument_group('required arguments')
required.add_argument('-c', dest='company', action='store', default=None,
help='The company name to scrape users from LinkedIn. Enter "NONE" to scrape users without company')
required.add_argument('-o', dest='filename', help='The output filename')
required.add_argument('-u', dest='username', help='LinkedIn Login Email')
required.add_argument('-s', dest='schema', default='{first:.1}{last}@{domain}.com', help='The email format to use')
args = parser.parse_args()
# If -l is true, search by term or print entire file. -o and -u are not required
if args.list_search:
if not args.georegion and not args.industry:
list_search(target='georegion', term=' ')
list_search(target='industry', term=' ')
if args.georegion:
list_search(target='georegion', term=args.georegion)
if args.industry:
list_search(target='industry', term=args.industry)
return os.EX_OK
# If -l is False (default), -o and -u are required
if not args.filename or not args.username:
parser.error('the following arguments are required to begin scraping -o, -u')
return os.EX_NOINPUT
if not args.company:
parser.error(
'the following arguments are required to begin scraping -c\n\tNOTE: If you are trying to scrape users without a company target, use "-c NONE"')
try:
args.schema = Webpage.verify_schema(args.schema)
except SyntaxWarning as invalid_schema:
print(invalid_schema)
sys.exit(os.EX_SOFTWARE)
if not args.filename.endswith('.csv'):
args.filename += '.csv'
if args.company:
if '"' in args.company:
args.company = args.company.replace('"', '')
web = None
try:
password = getpass.getpass(prompt='LinkedIn Password: ')
web = Webpage(loglvl=args.loglvl)
web.login(username=args.username, password=password)
del password
if not args.url:
url = 'https://www.linkedin.com/search/results/people?'
if args.georegion or args.industry or args.job_title:
filtered_url = web.apply_filters(args.company, url, args.georegion, args.industry, args.job_title)
# Error handler for when apply_filters returns an ox.EX_ code
if isinstance(filtered_url, (int)):
return filtered_url
# Filtered URL
web.cycle_users(args.company, filtered_url, args.max_users)
else: # Default URL
url += "company={0}".format(args.company)
web.cycle_users(args.company, url, args.max_users)
else: # User-defined URL
web.cycle_users(args.company, args.url, args.max_users)
web.out_csv(filename=args.filename, company=args.company, schema=args.schema)
web.page.quit()
except KeyboardInterrupt:
if web:
web.out_csv(args.filename, args.company, args.schema)
return os.EX_OK
if __name__ == '__main__':
if sys.version_info < (3, 3):
print('[-] this script requires Python 3.3+')
sys.exit(os.EX_SOFTWARE)
sys.exit(main())
| dept_wizard | identifier_name |
snapshot.go | // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
package pebble
import (
"context"
"io"
"math"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/pebble/internal/invariants"
"github.com/cockroachdb/pebble/rangekey"
)
// ErrSnapshotExcised is returned from WaitForFileOnlySnapshot if an excise
// overlapping with one of the EventuallyFileOnlySnapshot's KeyRanges gets
// applied before the transition of that EFOS to a file-only snapshot.
var ErrSnapshotExcised = errors.New("pebble: snapshot excised before conversion to file-only snapshot")
// Snapshot provides a read-only point-in-time view of the DB state.
type Snapshot struct {
// The db the snapshot was created from.
db *DB
seqNum uint64
// Set if part of an EventuallyFileOnlySnapshot.
efos *EventuallyFileOnlySnapshot
// The list the snapshot is linked into.
list *snapshotList
// The next/prev link for the snapshotList doubly-linked list of snapshots.
prev, next *Snapshot
}
var _ Reader = (*Snapshot)(nil)
// Get gets the value for the given key. It returns ErrNotFound if the Snapshot
// does not contain the key.
//
// The caller should not modify the contents of the returned slice, but it is
// safe to modify the contents of the argument after Get returns. The returned
// slice will remain valid until the returned Closer is closed. On success, the
// caller MUST call closer.Close() or a memory leak will occur.
func (s *Snapshot) Get(key []byte) ([]byte, io.Closer, error) {
if s.db == nil {
panic(ErrClosed)
}
return s.db.getInternal(key, nil /* batch */, s)
}
// NewIter returns an iterator that is unpositioned (Iterator.Valid() will
// return false). The iterator can be positioned via a call to SeekGE,
// SeekLT, First or Last.
func (s *Snapshot) NewIter(o *IterOptions) (*Iterator, error) {
return s.NewIterWithContext(context.Background(), o)
}
// NewIterWithContext is like NewIter, and additionally accepts a context for
// tracing.
func (s *Snapshot) NewIterWithContext(ctx context.Context, o *IterOptions) (*Iterator, error) {
if s.db == nil {
panic(ErrClosed)
}
return s.db.newIter(ctx, nil /* batch */, snapshotIterOpts{seqNum: s.seqNum}, o), nil
}
// ScanInternal scans all internal keys within the specified bounds, truncating
// any rangedels and rangekeys to those bounds. For use when an external user
// needs to be aware of all internal keys that make up a key range.
//
// See comment on db.ScanInternal for the behaviour that can be expected of
// point keys deleted by range dels and keys masked by range keys.
func (s *Snapshot) ScanInternal(
ctx context.Context,
lower, upper []byte,
visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error,
visitRangeDel func(start, end []byte, seqNum uint64) error,
visitRangeKey func(start, end []byte, keys []rangekey.Key) error,
visitSharedFile func(sst *SharedSSTMeta) error,
) error {
if s.db == nil {
panic(ErrClosed)
}
scanInternalOpts := &scanInternalOptions{
visitPointKey: visitPointKey,
visitRangeDel: visitRangeDel,
visitRangeKey: visitRangeKey,
visitSharedFile: visitSharedFile,
skipSharedLevels: visitSharedFile != nil,
IterOptions: IterOptions{
KeyTypes: IterKeyTypePointsAndRanges,
LowerBound: lower,
UpperBound: upper,
},
}
iter := s.db.newInternalIter(snapshotIterOpts{seqNum: s.seqNum}, scanInternalOpts)
defer iter.close()
return scanInternalImpl(ctx, lower, upper, iter, scanInternalOpts)
}
// closeLocked is similar to Close(), except it requires that db.mu be held
// by the caller.
func (s *Snapshot) closeLocked() error {
s.db.mu.snapshots.remove(s)
// If s was the previous earliest snapshot, we might be able to reclaim
// disk space by dropping obsolete records that were pinned by s.
if e := s.db.mu.snapshots.earliest(); e > s.seqNum {
s.db.maybeScheduleCompactionPicker(pickElisionOnly)
}
s.db = nil
return nil
}
// Close closes the snapshot, releasing its resources. Close must be called.
// Failure to do so will result in a tiny memory leak and a large leak of
// resources on disk due to the entries the snapshot is preventing from being
// deleted.
//
// d.mu must NOT be held by the caller.
func (s *Snapshot) Close() error {
db := s.db
if db == nil {
panic(ErrClosed)
}
db.mu.Lock()
defer db.mu.Unlock()
return s.closeLocked()
}
type snapshotList struct {
root Snapshot
}
func (l *snapshotList) init() {
l.root.next = &l.root
l.root.prev = &l.root
}
func (l *snapshotList) empty() bool {
return l.root.next == &l.root
}
func (l *snapshotList) count() int {
if l.empty() {
return 0
}
var count int
for i := l.root.next; i != &l.root; i = i.next {
count++
}
return count
}
func (l *snapshotList) earliest() uint64 |
func (l *snapshotList) toSlice() []uint64 {
if l.empty() {
return nil
}
var results []uint64
for i := l.root.next; i != &l.root; i = i.next {
results = append(results, i.seqNum)
}
return results
}
func (l *snapshotList) pushBack(s *Snapshot) {
if s.list != nil || s.prev != nil || s.next != nil {
panic("pebble: snapshot list is inconsistent")
}
s.prev = l.root.prev
s.prev.next = s
s.next = &l.root
s.next.prev = s
s.list = l
}
func (l *snapshotList) remove(s *Snapshot) {
if s == &l.root {
panic("pebble: cannot remove snapshot list root node")
}
if s.list != l {
panic("pebble: snapshot list is inconsistent")
}
s.prev.next = s.next
s.next.prev = s.prev
s.next = nil // avoid memory leaks
s.prev = nil // avoid memory leaks
s.list = nil // avoid memory leaks
}
// EventuallyFileOnlySnapshot (aka EFOS) provides a read-only point-in-time view
// of the database state, similar to Snapshot. A EventuallyFileOnlySnapshot
// induces less write amplification than Snapshot, at the cost of increased space
// amplification. While a Snapshot may increase write amplification across all
// flushes and compactions for the duration of its lifetime, an
// EventuallyFileOnlySnapshot only incurs that cost for flushes/compactions if
// memtables at the time of EFOS instantiation contained keys that the EFOS is
// interested in (i.e. its protectedRanges). In that case, the EFOS prevents
// elision of keys visible to it, similar to a Snapshot, until those memtables
// are flushed, and once that happens, the "EventuallyFileOnlySnapshot"
// transitions to a file-only snapshot state in which it pins zombies sstables
// like an open Iterator would, without pinning any memtables. Callers that can
// tolerate the increased space amplification of pinning zombie sstables until
// the snapshot is closed may prefer EventuallyFileOnlySnapshots for their
// reduced write amplification. Callers that desire the benefits of the file-only
// state that requires no pinning of memtables should call
// `WaitForFileOnlySnapshot()` (and possibly re-mint an EFOS if it returns
// ErrSnapshotExcised) before relying on the EFOS to keep producing iterators
// with zero write-amp and zero pinning of memtables in memory.
//
// EventuallyFileOnlySnapshots interact with the IngestAndExcise operation in
// subtle ways. Unlike Snapshots, EFOS guarantees that their read-only
// point-in-time view is unaltered by the excision. However, if a concurrent
// excise were to happen on one of the protectedRanges, WaitForFileOnlySnapshot()
// would return ErrSnapshotExcised and the EFOS would maintain a reference to the
// underlying readState (and by extension, zombie memtables) for its lifetime.
// This could lead to increased memory utilization, which is why callers should
// call WaitForFileOnlySnapshot() if they expect an EFOS to be long-lived.
type EventuallyFileOnlySnapshot struct {
mu struct {
// NB: If both this mutex and db.mu are being grabbed, db.mu should be
// grabbed _before_ grabbing this one.
sync.Mutex
// Either the {snap,readState} fields are set below, or the version is set at
// any given point of time. If a snapshot is referenced, this is not a
// file-only snapshot yet, and if a version is set (and ref'd) this is a
// file-only snapshot.
// The wrapped regular snapshot, if not a file-only snapshot yet. The
// readState has already been ref()d once if it's set.
snap *Snapshot
readState *readState
// The wrapped version reference, if a file-only snapshot.
vers *version
}
// Key ranges to watch for an excise on.
protectedRanges []KeyRange
// excised, if true, signals that the above ranges were excised during the
// lifetime of this snapshot.
excised atomic.Bool
// The db the snapshot was created from.
db *DB
seqNum uint64
closed chan struct{}
}
func (d *DB) makeEventuallyFileOnlySnapshot(
keyRanges []KeyRange, internalKeyRanges []internalKeyRange,
) *EventuallyFileOnlySnapshot {
isFileOnly := true
d.mu.Lock()
defer d.mu.Unlock()
seqNum := d.mu.versions.visibleSeqNum.Load()
// Check if any of the keyRanges overlap with a memtable.
for i := range d.mu.mem.queue {
mem := d.mu.mem.queue[i]
if ingestMemtableOverlaps(d.cmp, mem, internalKeyRanges) {
isFileOnly = false
break
}
}
es := &EventuallyFileOnlySnapshot{
db: d,
seqNum: seqNum,
protectedRanges: keyRanges,
closed: make(chan struct{}),
}
if isFileOnly {
es.mu.vers = d.mu.versions.currentVersion()
es.mu.vers.Ref()
} else {
s := &Snapshot{
db: d,
seqNum: seqNum,
}
s.efos = es
es.mu.snap = s
es.mu.readState = d.loadReadState()
d.mu.snapshots.pushBack(s)
}
return es
}
// Transitions this EventuallyFileOnlySnapshot to a file-only snapshot. Requires
// earliestUnflushedSeqNum and vers to correspond to the same Version from the
// current or a past acquisition of db.mu. vers must have been Ref()'d before
// that mutex was released, if it was released.
//
// NB: The caller is expected to check for es.excised before making this
// call.
//
// d.mu must be held when calling this method.
func (es *EventuallyFileOnlySnapshot) transitionToFileOnlySnapshot(vers *version) error {
es.mu.Lock()
select {
case <-es.closed:
vers.UnrefLocked()
es.mu.Unlock()
return ErrClosed
default:
}
if es.mu.snap == nil {
es.mu.Unlock()
panic("pebble: tried to transition an eventually-file-only-snapshot twice")
}
// The caller has already called Ref() on vers.
es.mu.vers = vers
// NB: The callers should have already done a check of es.excised.
oldSnap := es.mu.snap
oldReadState := es.mu.readState
es.mu.snap = nil
es.mu.readState = nil
es.mu.Unlock()
// It's okay to close a snapshot even if iterators are already open on it.
oldReadState.unrefLocked()
return oldSnap.closeLocked()
}
// releaseReadState is called to release reference to a readState when
// es.excised == true. This is to free up memory as quickly as possible; all
// other snapshot resources are kept around until Close() is called. Safe for
// idempotent calls.
//
// d.mu must be held when calling this method.
func (es *EventuallyFileOnlySnapshot) releaseReadState() {
if !es.excised.Load() {
panic("pebble: releasing read state of eventually-file-only-snapshot but was not excised")
}
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.readState != nil {
es.mu.readState.unrefLocked()
es.db.maybeScheduleObsoleteTableDeletionLocked()
}
}
// hasTransitioned returns true if this EFOS has transitioned to a file-only
// snapshot.
func (es *EventuallyFileOnlySnapshot) hasTransitioned() bool {
es.mu.Lock()
defer es.mu.Unlock()
return es.mu.vers != nil
}
// waitForFlush waits for a flush on any memtables that need to be flushed
// before this EFOS can transition to a file-only snapshot. If this EFOS is
// waiting on a flush of the mutable memtable, it forces a rotation within
// `dur` duration. For immutable memtables, it schedules a flush and waits for
// it to finish.
func (es *EventuallyFileOnlySnapshot) waitForFlush(ctx context.Context, dur time.Duration) error {
es.db.mu.Lock()
defer es.db.mu.Unlock()
earliestUnflushedSeqNum := es.db.getEarliestUnflushedSeqNumLocked()
for earliestUnflushedSeqNum < es.seqNum {
select {
case <-es.closed:
return ErrClosed
case <-ctx.Done():
return ctx.Err()
default:
}
// Check if the current mutable memtable contains keys less than seqNum.
// If so, rotate it.
if es.db.mu.mem.mutable.logSeqNum < es.seqNum && dur.Nanoseconds() > 0 {
es.db.maybeScheduleDelayedFlush(es.db.mu.mem.mutable, dur)
} else {
// Find the last memtable that contains seqNums less than es.seqNum,
// and force a flush on it.
var mem *flushableEntry
for i := range es.db.mu.mem.queue {
if es.db.mu.mem.queue[i].logSeqNum < es.seqNum {
mem = es.db.mu.mem.queue[i]
}
}
mem.flushForced = true
es.db.maybeScheduleFlush()
}
es.db.mu.compact.cond.Wait()
earliestUnflushedSeqNum = es.db.getEarliestUnflushedSeqNumLocked()
}
if es.excised.Load() {
return ErrSnapshotExcised
}
return nil
}
// WaitForFileOnlySnapshot blocks the calling goroutine until this snapshot
// has been converted into a file-only snapshot (i.e. all memtables containing
// keys < seqNum are flushed). A duration can be passed in, and if nonzero,
// a delayed flush will be scheduled at that duration if necessary.
//
// Idempotent; can be called multiple times with no side effects.
func (es *EventuallyFileOnlySnapshot) WaitForFileOnlySnapshot(
ctx context.Context, dur time.Duration,
) error {
if es.hasTransitioned() {
return nil
}
if err := es.waitForFlush(ctx, dur); err != nil {
return err
}
if invariants.Enabled {
// Since we aren't returning an error, we _must_ have transitioned to a
// file-only snapshot by now.
if !es.hasTransitioned() {
panic("expected EFOS to have transitioned to file-only snapshot after flush")
}
}
return nil
}
// Close closes the file-only snapshot and releases all referenced resources.
// Not idempotent.
func (es *EventuallyFileOnlySnapshot) Close() error {
close(es.closed)
es.db.mu.Lock()
defer es.db.mu.Unlock()
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.snap != nil {
if err := es.mu.snap.closeLocked(); err != nil {
return err
}
}
if es.mu.readState != nil {
es.mu.readState.unrefLocked()
es.db.maybeScheduleObsoleteTableDeletionLocked()
}
if es.mu.vers != nil {
es.mu.vers.UnrefLocked()
}
return nil
}
// Get implements the Reader interface.
func (es *EventuallyFileOnlySnapshot) Get(key []byte) (value []byte, closer io.Closer, err error) {
// TODO(jackson): Use getInternal.
iter, err := es.NewIter(nil)
if err != nil {
return nil, nil, err
}
var valid bool
if es.db.opts.Comparer.Split != nil {
valid = iter.SeekPrefixGE(key)
} else {
valid = iter.SeekGE(key)
}
if !valid {
if err = firstError(iter.Error(), iter.Close()); err != nil {
return nil, nil, err
}
return nil, nil, ErrNotFound
}
if !es.db.equal(iter.Key(), key) {
return nil, nil, firstError(iter.Close(), ErrNotFound)
}
return iter.Value(), iter, nil
}
// NewIter returns an iterator that is unpositioned (Iterator.Valid() will
// return false). The iterator can be positioned via a call to SeekGE,
// SeekLT, First or Last.
func (es *EventuallyFileOnlySnapshot) NewIter(o *IterOptions) (*Iterator, error) {
return es.NewIterWithContext(context.Background(), o)
}
// NewIterWithContext is like NewIter, and additionally accepts a context for
// tracing.
func (es *EventuallyFileOnlySnapshot) NewIterWithContext(
ctx context.Context, o *IterOptions,
) (*Iterator, error) {
select {
case <-es.closed:
panic(ErrClosed)
default:
}
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.vers != nil {
sOpts := snapshotIterOpts{seqNum: es.seqNum, vers: es.mu.vers}
return es.db.newIter(ctx, nil /* batch */, sOpts, o), nil
}
if es.excised.Load() {
return nil, ErrSnapshotExcised
}
sOpts := snapshotIterOpts{seqNum: es.seqNum, readState: es.mu.readState}
return es.db.newIter(ctx, nil /* batch */, sOpts, o), nil
}
// ScanInternal scans all internal keys within the specified bounds, truncating
// any rangedels and rangekeys to those bounds. For use when an external user
// needs to be aware of all internal keys that make up a key range.
//
// See comment on db.ScanInternal for the behaviour that can be expected of
// point keys deleted by range dels and keys masked by range keys.
func (es *EventuallyFileOnlySnapshot) ScanInternal(
ctx context.Context,
lower, upper []byte,
visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error,
visitRangeDel func(start, end []byte, seqNum uint64) error,
visitRangeKey func(start, end []byte, keys []rangekey.Key) error,
visitSharedFile func(sst *SharedSSTMeta) error,
) error {
if es.db == nil {
panic(ErrClosed)
}
if es.excised.Load() {
return ErrSnapshotExcised
}
var sOpts snapshotIterOpts
es.mu.Lock()
if es.mu.vers != nil {
sOpts = snapshotIterOpts{
seqNum: es.seqNum,
vers: es.mu.vers,
}
} else {
sOpts = snapshotIterOpts{
seqNum: es.seqNum,
readState: es.mu.readState,
}
}
es.mu.Unlock()
opts := &scanInternalOptions{
IterOptions: IterOptions{
KeyTypes: IterKeyTypePointsAndRanges,
LowerBound: lower,
UpperBound: upper,
},
visitPointKey: visitPointKey,
visitRangeDel: visitRangeDel,
visitRangeKey: visitRangeKey,
visitSharedFile: visitSharedFile,
skipSharedLevels: visitSharedFile != nil,
}
iter := es.db.newInternalIter(sOpts, opts)
defer iter.close()
return scanInternalImpl(ctx, lower, upper, iter, opts)
}
| {
v := uint64(math.MaxUint64)
if !l.empty() {
v = l.root.next.seqNum
}
return v
} | identifier_body |
snapshot.go | // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
package pebble
import (
"context"
"io"
"math"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/pebble/internal/invariants"
"github.com/cockroachdb/pebble/rangekey"
)
// ErrSnapshotExcised is returned from WaitForFileOnlySnapshot if an excise
// overlapping with one of the EventuallyFileOnlySnapshot's KeyRanges gets
// applied before the transition of that EFOS to a file-only snapshot.
var ErrSnapshotExcised = errors.New("pebble: snapshot excised before conversion to file-only snapshot")
// Snapshot provides a read-only point-in-time view of the DB state.
type Snapshot struct {
// The db the snapshot was created from.
db *DB
seqNum uint64
// Set if part of an EventuallyFileOnlySnapshot.
efos *EventuallyFileOnlySnapshot
// The list the snapshot is linked into.
list *snapshotList
// The next/prev link for the snapshotList doubly-linked list of snapshots.
prev, next *Snapshot
}
var _ Reader = (*Snapshot)(nil)
// Get gets the value for the given key. It returns ErrNotFound if the Snapshot
// does not contain the key.
//
// The caller should not modify the contents of the returned slice, but it is
// safe to modify the contents of the argument after Get returns. The returned
// slice will remain valid until the returned Closer is closed. On success, the
// caller MUST call closer.Close() or a memory leak will occur.
func (s *Snapshot) Get(key []byte) ([]byte, io.Closer, error) {
if s.db == nil {
panic(ErrClosed)
}
return s.db.getInternal(key, nil /* batch */, s)
}
// NewIter returns an iterator that is unpositioned (Iterator.Valid() will
// return false). The iterator can be positioned via a call to SeekGE,
// SeekLT, First or Last.
func (s *Snapshot) NewIter(o *IterOptions) (*Iterator, error) {
return s.NewIterWithContext(context.Background(), o)
}
// NewIterWithContext is like NewIter, and additionally accepts a context for
// tracing.
func (s *Snapshot) NewIterWithContext(ctx context.Context, o *IterOptions) (*Iterator, error) {
if s.db == nil {
panic(ErrClosed)
}
return s.db.newIter(ctx, nil /* batch */, snapshotIterOpts{seqNum: s.seqNum}, o), nil
}
// ScanInternal scans all internal keys within the specified bounds, truncating
// any rangedels and rangekeys to those bounds. For use when an external user
// needs to be aware of all internal keys that make up a key range.
//
// See comment on db.ScanInternal for the behaviour that can be expected of
// point keys deleted by range dels and keys masked by range keys.
func (s *Snapshot) ScanInternal(
ctx context.Context,
lower, upper []byte,
visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error,
visitRangeDel func(start, end []byte, seqNum uint64) error,
visitRangeKey func(start, end []byte, keys []rangekey.Key) error,
visitSharedFile func(sst *SharedSSTMeta) error,
) error {
if s.db == nil {
panic(ErrClosed)
}
scanInternalOpts := &scanInternalOptions{
visitPointKey: visitPointKey,
visitRangeDel: visitRangeDel,
visitRangeKey: visitRangeKey,
visitSharedFile: visitSharedFile,
skipSharedLevels: visitSharedFile != nil,
IterOptions: IterOptions{
KeyTypes: IterKeyTypePointsAndRanges,
LowerBound: lower,
UpperBound: upper,
},
}
iter := s.db.newInternalIter(snapshotIterOpts{seqNum: s.seqNum}, scanInternalOpts)
defer iter.close()
return scanInternalImpl(ctx, lower, upper, iter, scanInternalOpts)
}
// closeLocked is similar to Close(), except it requires that db.mu be held
// by the caller.
func (s *Snapshot) closeLocked() error {
s.db.mu.snapshots.remove(s)
// If s was the previous earliest snapshot, we might be able to reclaim
// disk space by dropping obsolete records that were pinned by s.
if e := s.db.mu.snapshots.earliest(); e > s.seqNum {
s.db.maybeScheduleCompactionPicker(pickElisionOnly)
}
s.db = nil
return nil
}
// Close closes the snapshot, releasing its resources. Close must be called.
// Failure to do so will result in a tiny memory leak and a large leak of
// resources on disk due to the entries the snapshot is preventing from being
// deleted.
//
// d.mu must NOT be held by the caller.
func (s *Snapshot) Close() error {
db := s.db
if db == nil {
panic(ErrClosed)
}
db.mu.Lock()
defer db.mu.Unlock()
return s.closeLocked()
}
type snapshotList struct {
root Snapshot
}
func (l *snapshotList) init() {
l.root.next = &l.root
l.root.prev = &l.root
}
func (l *snapshotList) empty() bool {
return l.root.next == &l.root
}
func (l *snapshotList) count() int {
if l.empty() {
return 0
}
var count int
for i := l.root.next; i != &l.root; i = i.next {
count++
}
return count
}
func (l *snapshotList) earliest() uint64 {
v := uint64(math.MaxUint64)
if !l.empty() {
v = l.root.next.seqNum
}
return v
}
func (l *snapshotList) toSlice() []uint64 {
if l.empty() {
return nil
}
var results []uint64
for i := l.root.next; i != &l.root; i = i.next {
results = append(results, i.seqNum)
}
return results
}
func (l *snapshotList) pushBack(s *Snapshot) {
if s.list != nil || s.prev != nil || s.next != nil {
panic("pebble: snapshot list is inconsistent")
}
s.prev = l.root.prev
s.prev.next = s
s.next = &l.root
s.next.prev = s
s.list = l
}
func (l *snapshotList) remove(s *Snapshot) {
if s == &l.root {
panic("pebble: cannot remove snapshot list root node")
}
if s.list != l {
panic("pebble: snapshot list is inconsistent")
}
s.prev.next = s.next
s.next.prev = s.prev
s.next = nil // avoid memory leaks
s.prev = nil // avoid memory leaks
s.list = nil // avoid memory leaks
}
// EventuallyFileOnlySnapshot (aka EFOS) provides a read-only point-in-time view
// of the database state, similar to Snapshot. A EventuallyFileOnlySnapshot
// induces less write amplification than Snapshot, at the cost of increased space
// amplification. While a Snapshot may increase write amplification across all
// flushes and compactions for the duration of its lifetime, an
// EventuallyFileOnlySnapshot only incurs that cost for flushes/compactions if
// memtables at the time of EFOS instantiation contained keys that the EFOS is
// interested in (i.e. its protectedRanges). In that case, the EFOS prevents
// elision of keys visible to it, similar to a Snapshot, until those memtables
// are flushed, and once that happens, the "EventuallyFileOnlySnapshot"
// transitions to a file-only snapshot state in which it pins zombies sstables
// like an open Iterator would, without pinning any memtables. Callers that can
// tolerate the increased space amplification of pinning zombie sstables until
// the snapshot is closed may prefer EventuallyFileOnlySnapshots for their
// reduced write amplification. Callers that desire the benefits of the file-only
// state that requires no pinning of memtables should call
// `WaitForFileOnlySnapshot()` (and possibly re-mint an EFOS if it returns
// ErrSnapshotExcised) before relying on the EFOS to keep producing iterators
// with zero write-amp and zero pinning of memtables in memory.
//
// EventuallyFileOnlySnapshots interact with the IngestAndExcise operation in
// subtle ways. Unlike Snapshots, EFOS guarantees that their read-only
// point-in-time view is unaltered by the excision. However, if a concurrent
// excise were to happen on one of the protectedRanges, WaitForFileOnlySnapshot()
// would return ErrSnapshotExcised and the EFOS would maintain a reference to the
// underlying readState (and by extension, zombie memtables) for its lifetime.
// This could lead to increased memory utilization, which is why callers should
// call WaitForFileOnlySnapshot() if they expect an EFOS to be long-lived.
type EventuallyFileOnlySnapshot struct {
mu struct {
// NB: If both this mutex and db.mu are being grabbed, db.mu should be
// grabbed _before_ grabbing this one.
sync.Mutex
// Either the {snap,readState} fields are set below, or the version is set at
// any given point of time. If a snapshot is referenced, this is not a
// file-only snapshot yet, and if a version is set (and ref'd) this is a
// file-only snapshot.
// The wrapped regular snapshot, if not a file-only snapshot yet. The
// readState has already been ref()d once if it's set.
snap *Snapshot
readState *readState
// The wrapped version reference, if a file-only snapshot.
vers *version
}
// Key ranges to watch for an excise on.
protectedRanges []KeyRange
// excised, if true, signals that the above ranges were excised during the
// lifetime of this snapshot.
excised atomic.Bool
// The db the snapshot was created from.
db *DB
seqNum uint64
closed chan struct{}
}
func (d *DB) makeEventuallyFileOnlySnapshot(
keyRanges []KeyRange, internalKeyRanges []internalKeyRange,
) *EventuallyFileOnlySnapshot {
isFileOnly := true
d.mu.Lock()
defer d.mu.Unlock()
seqNum := d.mu.versions.visibleSeqNum.Load()
// Check if any of the keyRanges overlap with a memtable.
for i := range d.mu.mem.queue {
mem := d.mu.mem.queue[i]
if ingestMemtableOverlaps(d.cmp, mem, internalKeyRanges) {
isFileOnly = false
break
}
}
es := &EventuallyFileOnlySnapshot{
db: d,
seqNum: seqNum,
protectedRanges: keyRanges,
closed: make(chan struct{}),
}
if isFileOnly {
es.mu.vers = d.mu.versions.currentVersion()
es.mu.vers.Ref()
} else {
s := &Snapshot{
db: d,
seqNum: seqNum,
}
s.efos = es
es.mu.snap = s
es.mu.readState = d.loadReadState()
d.mu.snapshots.pushBack(s)
}
return es
}
// Transitions this EventuallyFileOnlySnapshot to a file-only snapshot. Requires
// earliestUnflushedSeqNum and vers to correspond to the same Version from the
// current or a past acquisition of db.mu. vers must have been Ref()'d before
// that mutex was released, if it was released.
//
// NB: The caller is expected to check for es.excised before making this
// call.
//
// d.mu must be held when calling this method.
func (es *EventuallyFileOnlySnapshot) transitionToFileOnlySnapshot(vers *version) error {
es.mu.Lock()
select {
case <-es.closed:
vers.UnrefLocked()
es.mu.Unlock()
return ErrClosed
default:
}
if es.mu.snap == nil {
es.mu.Unlock()
panic("pebble: tried to transition an eventually-file-only-snapshot twice")
}
// The caller has already called Ref() on vers.
es.mu.vers = vers
// NB: The callers should have already done a check of es.excised.
oldSnap := es.mu.snap
oldReadState := es.mu.readState
es.mu.snap = nil
es.mu.readState = nil
es.mu.Unlock()
// It's okay to close a snapshot even if iterators are already open on it.
oldReadState.unrefLocked()
return oldSnap.closeLocked()
}
// releaseReadState is called to release reference to a readState when
// es.excised == true. This is to free up memory as quickly as possible; all
// other snapshot resources are kept around until Close() is called. Safe for
// idempotent calls.
//
// d.mu must be held when calling this method.
func (es *EventuallyFileOnlySnapshot) releaseReadState() {
if !es.excised.Load() {
panic("pebble: releasing read state of eventually-file-only-snapshot but was not excised")
}
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.readState != nil {
es.mu.readState.unrefLocked()
es.db.maybeScheduleObsoleteTableDeletionLocked()
}
}
// hasTransitioned returns true if this EFOS has transitioned to a file-only | return es.mu.vers != nil
}
// waitForFlush waits for a flush on any memtables that need to be flushed
// before this EFOS can transition to a file-only snapshot. If this EFOS is
// waiting on a flush of the mutable memtable, it forces a rotation within
// `dur` duration. For immutable memtables, it schedules a flush and waits for
// it to finish.
func (es *EventuallyFileOnlySnapshot) waitForFlush(ctx context.Context, dur time.Duration) error {
es.db.mu.Lock()
defer es.db.mu.Unlock()
earliestUnflushedSeqNum := es.db.getEarliestUnflushedSeqNumLocked()
for earliestUnflushedSeqNum < es.seqNum {
select {
case <-es.closed:
return ErrClosed
case <-ctx.Done():
return ctx.Err()
default:
}
// Check if the current mutable memtable contains keys less than seqNum.
// If so, rotate it.
if es.db.mu.mem.mutable.logSeqNum < es.seqNum && dur.Nanoseconds() > 0 {
es.db.maybeScheduleDelayedFlush(es.db.mu.mem.mutable, dur)
} else {
// Find the last memtable that contains seqNums less than es.seqNum,
// and force a flush on it.
var mem *flushableEntry
for i := range es.db.mu.mem.queue {
if es.db.mu.mem.queue[i].logSeqNum < es.seqNum {
mem = es.db.mu.mem.queue[i]
}
}
mem.flushForced = true
es.db.maybeScheduleFlush()
}
es.db.mu.compact.cond.Wait()
earliestUnflushedSeqNum = es.db.getEarliestUnflushedSeqNumLocked()
}
if es.excised.Load() {
return ErrSnapshotExcised
}
return nil
}
// WaitForFileOnlySnapshot blocks the calling goroutine until this snapshot
// has been converted into a file-only snapshot (i.e. all memtables containing
// keys < seqNum are flushed). A duration can be passed in, and if nonzero,
// a delayed flush will be scheduled at that duration if necessary.
//
// Idempotent; can be called multiple times with no side effects.
func (es *EventuallyFileOnlySnapshot) WaitForFileOnlySnapshot(
ctx context.Context, dur time.Duration,
) error {
if es.hasTransitioned() {
return nil
}
if err := es.waitForFlush(ctx, dur); err != nil {
return err
}
if invariants.Enabled {
// Since we aren't returning an error, we _must_ have transitioned to a
// file-only snapshot by now.
if !es.hasTransitioned() {
panic("expected EFOS to have transitioned to file-only snapshot after flush")
}
}
return nil
}
// Close closes the file-only snapshot and releases all referenced resources.
// Not idempotent.
func (es *EventuallyFileOnlySnapshot) Close() error {
close(es.closed)
es.db.mu.Lock()
defer es.db.mu.Unlock()
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.snap != nil {
if err := es.mu.snap.closeLocked(); err != nil {
return err
}
}
if es.mu.readState != nil {
es.mu.readState.unrefLocked()
es.db.maybeScheduleObsoleteTableDeletionLocked()
}
if es.mu.vers != nil {
es.mu.vers.UnrefLocked()
}
return nil
}
// Get implements the Reader interface.
func (es *EventuallyFileOnlySnapshot) Get(key []byte) (value []byte, closer io.Closer, err error) {
// TODO(jackson): Use getInternal.
iter, err := es.NewIter(nil)
if err != nil {
return nil, nil, err
}
var valid bool
if es.db.opts.Comparer.Split != nil {
valid = iter.SeekPrefixGE(key)
} else {
valid = iter.SeekGE(key)
}
if !valid {
if err = firstError(iter.Error(), iter.Close()); err != nil {
return nil, nil, err
}
return nil, nil, ErrNotFound
}
if !es.db.equal(iter.Key(), key) {
return nil, nil, firstError(iter.Close(), ErrNotFound)
}
return iter.Value(), iter, nil
}
// NewIter returns an iterator that is unpositioned (Iterator.Valid() will
// return false). The iterator can be positioned via a call to SeekGE,
// SeekLT, First or Last.
func (es *EventuallyFileOnlySnapshot) NewIter(o *IterOptions) (*Iterator, error) {
return es.NewIterWithContext(context.Background(), o)
}
// NewIterWithContext is like NewIter, and additionally accepts a context for
// tracing.
func (es *EventuallyFileOnlySnapshot) NewIterWithContext(
ctx context.Context, o *IterOptions,
) (*Iterator, error) {
select {
case <-es.closed:
panic(ErrClosed)
default:
}
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.vers != nil {
sOpts := snapshotIterOpts{seqNum: es.seqNum, vers: es.mu.vers}
return es.db.newIter(ctx, nil /* batch */, sOpts, o), nil
}
if es.excised.Load() {
return nil, ErrSnapshotExcised
}
sOpts := snapshotIterOpts{seqNum: es.seqNum, readState: es.mu.readState}
return es.db.newIter(ctx, nil /* batch */, sOpts, o), nil
}
// ScanInternal scans all internal keys within the specified bounds, truncating
// any rangedels and rangekeys to those bounds. For use when an external user
// needs to be aware of all internal keys that make up a key range.
//
// See comment on db.ScanInternal for the behaviour that can be expected of
// point keys deleted by range dels and keys masked by range keys.
func (es *EventuallyFileOnlySnapshot) ScanInternal(
ctx context.Context,
lower, upper []byte,
visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error,
visitRangeDel func(start, end []byte, seqNum uint64) error,
visitRangeKey func(start, end []byte, keys []rangekey.Key) error,
visitSharedFile func(sst *SharedSSTMeta) error,
) error {
if es.db == nil {
panic(ErrClosed)
}
if es.excised.Load() {
return ErrSnapshotExcised
}
var sOpts snapshotIterOpts
es.mu.Lock()
if es.mu.vers != nil {
sOpts = snapshotIterOpts{
seqNum: es.seqNum,
vers: es.mu.vers,
}
} else {
sOpts = snapshotIterOpts{
seqNum: es.seqNum,
readState: es.mu.readState,
}
}
es.mu.Unlock()
opts := &scanInternalOptions{
IterOptions: IterOptions{
KeyTypes: IterKeyTypePointsAndRanges,
LowerBound: lower,
UpperBound: upper,
},
visitPointKey: visitPointKey,
visitRangeDel: visitRangeDel,
visitRangeKey: visitRangeKey,
visitSharedFile: visitSharedFile,
skipSharedLevels: visitSharedFile != nil,
}
iter := es.db.newInternalIter(sOpts, opts)
defer iter.close()
return scanInternalImpl(ctx, lower, upper, iter, opts)
} | // snapshot.
func (es *EventuallyFileOnlySnapshot) hasTransitioned() bool {
es.mu.Lock()
defer es.mu.Unlock() | random_line_split |
snapshot.go | // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
package pebble
import (
"context"
"io"
"math"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/pebble/internal/invariants"
"github.com/cockroachdb/pebble/rangekey"
)
// ErrSnapshotExcised is returned from WaitForFileOnlySnapshot if an excise
// overlapping with one of the EventuallyFileOnlySnapshot's KeyRanges gets
// applied before the transition of that EFOS to a file-only snapshot.
var ErrSnapshotExcised = errors.New("pebble: snapshot excised before conversion to file-only snapshot")
// Snapshot provides a read-only point-in-time view of the DB state.
type Snapshot struct {
// The db the snapshot was created from.
db *DB
seqNum uint64
// Set if part of an EventuallyFileOnlySnapshot.
efos *EventuallyFileOnlySnapshot
// The list the snapshot is linked into.
list *snapshotList
// The next/prev link for the snapshotList doubly-linked list of snapshots.
prev, next *Snapshot
}
var _ Reader = (*Snapshot)(nil)
// Get gets the value for the given key. It returns ErrNotFound if the Snapshot
// does not contain the key.
//
// The caller should not modify the contents of the returned slice, but it is
// safe to modify the contents of the argument after Get returns. The returned
// slice will remain valid until the returned Closer is closed. On success, the
// caller MUST call closer.Close() or a memory leak will occur.
func (s *Snapshot) Get(key []byte) ([]byte, io.Closer, error) {
if s.db == nil {
panic(ErrClosed)
}
return s.db.getInternal(key, nil /* batch */, s)
}
// NewIter returns an iterator that is unpositioned (Iterator.Valid() will
// return false). The iterator can be positioned via a call to SeekGE,
// SeekLT, First or Last.
func (s *Snapshot) NewIter(o *IterOptions) (*Iterator, error) {
return s.NewIterWithContext(context.Background(), o)
}
// NewIterWithContext is like NewIter, and additionally accepts a context for
// tracing.
func (s *Snapshot) NewIterWithContext(ctx context.Context, o *IterOptions) (*Iterator, error) {
if s.db == nil {
panic(ErrClosed)
}
return s.db.newIter(ctx, nil /* batch */, snapshotIterOpts{seqNum: s.seqNum}, o), nil
}
// ScanInternal scans all internal keys within the specified bounds, truncating
// any rangedels and rangekeys to those bounds. For use when an external user
// needs to be aware of all internal keys that make up a key range.
//
// See comment on db.ScanInternal for the behaviour that can be expected of
// point keys deleted by range dels and keys masked by range keys.
func (s *Snapshot) ScanInternal(
ctx context.Context,
lower, upper []byte,
visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error,
visitRangeDel func(start, end []byte, seqNum uint64) error,
visitRangeKey func(start, end []byte, keys []rangekey.Key) error,
visitSharedFile func(sst *SharedSSTMeta) error,
) error {
if s.db == nil {
panic(ErrClosed)
}
scanInternalOpts := &scanInternalOptions{
visitPointKey: visitPointKey,
visitRangeDel: visitRangeDel,
visitRangeKey: visitRangeKey,
visitSharedFile: visitSharedFile,
skipSharedLevels: visitSharedFile != nil,
IterOptions: IterOptions{
KeyTypes: IterKeyTypePointsAndRanges,
LowerBound: lower,
UpperBound: upper,
},
}
iter := s.db.newInternalIter(snapshotIterOpts{seqNum: s.seqNum}, scanInternalOpts)
defer iter.close()
return scanInternalImpl(ctx, lower, upper, iter, scanInternalOpts)
}
// closeLocked is similar to Close(), except it requires that db.mu be held
// by the caller.
func (s *Snapshot) closeLocked() error {
s.db.mu.snapshots.remove(s)
// If s was the previous earliest snapshot, we might be able to reclaim
// disk space by dropping obsolete records that were pinned by s.
if e := s.db.mu.snapshots.earliest(); e > s.seqNum {
s.db.maybeScheduleCompactionPicker(pickElisionOnly)
}
s.db = nil
return nil
}
// Close closes the snapshot, releasing its resources. Close must be called.
// Failure to do so will result in a tiny memory leak and a large leak of
// resources on disk due to the entries the snapshot is preventing from being
// deleted.
//
// d.mu must NOT be held by the caller.
func (s *Snapshot) Close() error {
db := s.db
if db == nil {
panic(ErrClosed)
}
db.mu.Lock()
defer db.mu.Unlock()
return s.closeLocked()
}
type snapshotList struct {
root Snapshot
}
func (l *snapshotList) init() {
l.root.next = &l.root
l.root.prev = &l.root
}
func (l *snapshotList) empty() bool {
return l.root.next == &l.root
}
func (l *snapshotList) count() int {
if l.empty() {
return 0
}
var count int
for i := l.root.next; i != &l.root; i = i.next {
count++
}
return count
}
func (l *snapshotList) earliest() uint64 {
v := uint64(math.MaxUint64)
if !l.empty() {
v = l.root.next.seqNum
}
return v
}
func (l *snapshotList) toSlice() []uint64 {
if l.empty() {
return nil
}
var results []uint64
for i := l.root.next; i != &l.root; i = i.next {
results = append(results, i.seqNum)
}
return results
}
func (l *snapshotList) pushBack(s *Snapshot) {
if s.list != nil || s.prev != nil || s.next != nil {
panic("pebble: snapshot list is inconsistent")
}
s.prev = l.root.prev
s.prev.next = s
s.next = &l.root
s.next.prev = s
s.list = l
}
func (l *snapshotList) remove(s *Snapshot) {
if s == &l.root {
panic("pebble: cannot remove snapshot list root node")
}
if s.list != l {
panic("pebble: snapshot list is inconsistent")
}
s.prev.next = s.next
s.next.prev = s.prev
s.next = nil // avoid memory leaks
s.prev = nil // avoid memory leaks
s.list = nil // avoid memory leaks
}
// EventuallyFileOnlySnapshot (aka EFOS) provides a read-only point-in-time view
// of the database state, similar to Snapshot. A EventuallyFileOnlySnapshot
// induces less write amplification than Snapshot, at the cost of increased space
// amplification. While a Snapshot may increase write amplification across all
// flushes and compactions for the duration of its lifetime, an
// EventuallyFileOnlySnapshot only incurs that cost for flushes/compactions if
// memtables at the time of EFOS instantiation contained keys that the EFOS is
// interested in (i.e. its protectedRanges). In that case, the EFOS prevents
// elision of keys visible to it, similar to a Snapshot, until those memtables
// are flushed, and once that happens, the "EventuallyFileOnlySnapshot"
// transitions to a file-only snapshot state in which it pins zombies sstables
// like an open Iterator would, without pinning any memtables. Callers that can
// tolerate the increased space amplification of pinning zombie sstables until
// the snapshot is closed may prefer EventuallyFileOnlySnapshots for their
// reduced write amplification. Callers that desire the benefits of the file-only
// state that requires no pinning of memtables should call
// `WaitForFileOnlySnapshot()` (and possibly re-mint an EFOS if it returns
// ErrSnapshotExcised) before relying on the EFOS to keep producing iterators
// with zero write-amp and zero pinning of memtables in memory.
//
// EventuallyFileOnlySnapshots interact with the IngestAndExcise operation in
// subtle ways. Unlike Snapshots, EFOS guarantees that their read-only
// point-in-time view is unaltered by the excision. However, if a concurrent
// excise were to happen on one of the protectedRanges, WaitForFileOnlySnapshot()
// would return ErrSnapshotExcised and the EFOS would maintain a reference to the
// underlying readState (and by extension, zombie memtables) for its lifetime.
// This could lead to increased memory utilization, which is why callers should
// call WaitForFileOnlySnapshot() if they expect an EFOS to be long-lived.
type EventuallyFileOnlySnapshot struct {
mu struct {
// NB: If both this mutex and db.mu are being grabbed, db.mu should be
// grabbed _before_ grabbing this one.
sync.Mutex
// Either the {snap,readState} fields are set below, or the version is set at
// any given point of time. If a snapshot is referenced, this is not a
// file-only snapshot yet, and if a version is set (and ref'd) this is a
// file-only snapshot.
// The wrapped regular snapshot, if not a file-only snapshot yet. The
// readState has already been ref()d once if it's set.
snap *Snapshot
readState *readState
// The wrapped version reference, if a file-only snapshot.
vers *version
}
// Key ranges to watch for an excise on.
protectedRanges []KeyRange
// excised, if true, signals that the above ranges were excised during the
// lifetime of this snapshot.
excised atomic.Bool
// The db the snapshot was created from.
db *DB
seqNum uint64
closed chan struct{}
}
func (d *DB) makeEventuallyFileOnlySnapshot(
keyRanges []KeyRange, internalKeyRanges []internalKeyRange,
) *EventuallyFileOnlySnapshot {
isFileOnly := true
d.mu.Lock()
defer d.mu.Unlock()
seqNum := d.mu.versions.visibleSeqNum.Load()
// Check if any of the keyRanges overlap with a memtable.
for i := range d.mu.mem.queue {
mem := d.mu.mem.queue[i]
if ingestMemtableOverlaps(d.cmp, mem, internalKeyRanges) {
isFileOnly = false
break
}
}
es := &EventuallyFileOnlySnapshot{
db: d,
seqNum: seqNum,
protectedRanges: keyRanges,
closed: make(chan struct{}),
}
if isFileOnly {
es.mu.vers = d.mu.versions.currentVersion()
es.mu.vers.Ref()
} else {
s := &Snapshot{
db: d,
seqNum: seqNum,
}
s.efos = es
es.mu.snap = s
es.mu.readState = d.loadReadState()
d.mu.snapshots.pushBack(s)
}
return es
}
// Transitions this EventuallyFileOnlySnapshot to a file-only snapshot. Requires
// earliestUnflushedSeqNum and vers to correspond to the same Version from the
// current or a past acquisition of db.mu. vers must have been Ref()'d before
// that mutex was released, if it was released.
//
// NB: The caller is expected to check for es.excised before making this
// call.
//
// d.mu must be held when calling this method.
func (es *EventuallyFileOnlySnapshot) transitionToFileOnlySnapshot(vers *version) error {
es.mu.Lock()
select {
case <-es.closed:
vers.UnrefLocked()
es.mu.Unlock()
return ErrClosed
default:
}
if es.mu.snap == nil {
es.mu.Unlock()
panic("pebble: tried to transition an eventually-file-only-snapshot twice")
}
// The caller has already called Ref() on vers.
es.mu.vers = vers
// NB: The callers should have already done a check of es.excised.
oldSnap := es.mu.snap
oldReadState := es.mu.readState
es.mu.snap = nil
es.mu.readState = nil
es.mu.Unlock()
// It's okay to close a snapshot even if iterators are already open on it.
oldReadState.unrefLocked()
return oldSnap.closeLocked()
}
// releaseReadState is called to release reference to a readState when
// es.excised == true. This is to free up memory as quickly as possible; all
// other snapshot resources are kept around until Close() is called. Safe for
// idempotent calls.
//
// d.mu must be held when calling this method.
func (es *EventuallyFileOnlySnapshot) releaseReadState() {
if !es.excised.Load() {
panic("pebble: releasing read state of eventually-file-only-snapshot but was not excised")
}
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.readState != nil {
es.mu.readState.unrefLocked()
es.db.maybeScheduleObsoleteTableDeletionLocked()
}
}
// hasTransitioned returns true if this EFOS has transitioned to a file-only
// snapshot.
func (es *EventuallyFileOnlySnapshot) | () bool {
es.mu.Lock()
defer es.mu.Unlock()
return es.mu.vers != nil
}
// waitForFlush waits for a flush on any memtables that need to be flushed
// before this EFOS can transition to a file-only snapshot. If this EFOS is
// waiting on a flush of the mutable memtable, it forces a rotation within
// `dur` duration. For immutable memtables, it schedules a flush and waits for
// it to finish.
func (es *EventuallyFileOnlySnapshot) waitForFlush(ctx context.Context, dur time.Duration) error {
es.db.mu.Lock()
defer es.db.mu.Unlock()
earliestUnflushedSeqNum := es.db.getEarliestUnflushedSeqNumLocked()
for earliestUnflushedSeqNum < es.seqNum {
select {
case <-es.closed:
return ErrClosed
case <-ctx.Done():
return ctx.Err()
default:
}
// Check if the current mutable memtable contains keys less than seqNum.
// If so, rotate it.
if es.db.mu.mem.mutable.logSeqNum < es.seqNum && dur.Nanoseconds() > 0 {
es.db.maybeScheduleDelayedFlush(es.db.mu.mem.mutable, dur)
} else {
// Find the last memtable that contains seqNums less than es.seqNum,
// and force a flush on it.
var mem *flushableEntry
for i := range es.db.mu.mem.queue {
if es.db.mu.mem.queue[i].logSeqNum < es.seqNum {
mem = es.db.mu.mem.queue[i]
}
}
mem.flushForced = true
es.db.maybeScheduleFlush()
}
es.db.mu.compact.cond.Wait()
earliestUnflushedSeqNum = es.db.getEarliestUnflushedSeqNumLocked()
}
if es.excised.Load() {
return ErrSnapshotExcised
}
return nil
}
// WaitForFileOnlySnapshot blocks the calling goroutine until this snapshot
// has been converted into a file-only snapshot (i.e. all memtables containing
// keys < seqNum are flushed). A duration can be passed in, and if nonzero,
// a delayed flush will be scheduled at that duration if necessary.
//
// Idempotent; can be called multiple times with no side effects.
func (es *EventuallyFileOnlySnapshot) WaitForFileOnlySnapshot(
ctx context.Context, dur time.Duration,
) error {
if es.hasTransitioned() {
return nil
}
if err := es.waitForFlush(ctx, dur); err != nil {
return err
}
if invariants.Enabled {
// Since we aren't returning an error, we _must_ have transitioned to a
// file-only snapshot by now.
if !es.hasTransitioned() {
panic("expected EFOS to have transitioned to file-only snapshot after flush")
}
}
return nil
}
// Close closes the file-only snapshot and releases all referenced resources.
// Not idempotent.
func (es *EventuallyFileOnlySnapshot) Close() error {
close(es.closed)
es.db.mu.Lock()
defer es.db.mu.Unlock()
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.snap != nil {
if err := es.mu.snap.closeLocked(); err != nil {
return err
}
}
if es.mu.readState != nil {
es.mu.readState.unrefLocked()
es.db.maybeScheduleObsoleteTableDeletionLocked()
}
if es.mu.vers != nil {
es.mu.vers.UnrefLocked()
}
return nil
}
// Get implements the Reader interface.
func (es *EventuallyFileOnlySnapshot) Get(key []byte) (value []byte, closer io.Closer, err error) {
// TODO(jackson): Use getInternal.
iter, err := es.NewIter(nil)
if err != nil {
return nil, nil, err
}
var valid bool
if es.db.opts.Comparer.Split != nil {
valid = iter.SeekPrefixGE(key)
} else {
valid = iter.SeekGE(key)
}
if !valid {
if err = firstError(iter.Error(), iter.Close()); err != nil {
return nil, nil, err
}
return nil, nil, ErrNotFound
}
if !es.db.equal(iter.Key(), key) {
return nil, nil, firstError(iter.Close(), ErrNotFound)
}
return iter.Value(), iter, nil
}
// NewIter returns an iterator that is unpositioned (Iterator.Valid() will
// return false). The iterator can be positioned via a call to SeekGE,
// SeekLT, First or Last.
func (es *EventuallyFileOnlySnapshot) NewIter(o *IterOptions) (*Iterator, error) {
return es.NewIterWithContext(context.Background(), o)
}
// NewIterWithContext is like NewIter, and additionally accepts a context for
// tracing.
func (es *EventuallyFileOnlySnapshot) NewIterWithContext(
ctx context.Context, o *IterOptions,
) (*Iterator, error) {
select {
case <-es.closed:
panic(ErrClosed)
default:
}
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.vers != nil {
sOpts := snapshotIterOpts{seqNum: es.seqNum, vers: es.mu.vers}
return es.db.newIter(ctx, nil /* batch */, sOpts, o), nil
}
if es.excised.Load() {
return nil, ErrSnapshotExcised
}
sOpts := snapshotIterOpts{seqNum: es.seqNum, readState: es.mu.readState}
return es.db.newIter(ctx, nil /* batch */, sOpts, o), nil
}
// ScanInternal scans all internal keys within the specified bounds, truncating
// any rangedels and rangekeys to those bounds. For use when an external user
// needs to be aware of all internal keys that make up a key range.
//
// See comment on db.ScanInternal for the behaviour that can be expected of
// point keys deleted by range dels and keys masked by range keys.
func (es *EventuallyFileOnlySnapshot) ScanInternal(
ctx context.Context,
lower, upper []byte,
visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error,
visitRangeDel func(start, end []byte, seqNum uint64) error,
visitRangeKey func(start, end []byte, keys []rangekey.Key) error,
visitSharedFile func(sst *SharedSSTMeta) error,
) error {
if es.db == nil {
panic(ErrClosed)
}
if es.excised.Load() {
return ErrSnapshotExcised
}
var sOpts snapshotIterOpts
es.mu.Lock()
if es.mu.vers != nil {
sOpts = snapshotIterOpts{
seqNum: es.seqNum,
vers: es.mu.vers,
}
} else {
sOpts = snapshotIterOpts{
seqNum: es.seqNum,
readState: es.mu.readState,
}
}
es.mu.Unlock()
opts := &scanInternalOptions{
IterOptions: IterOptions{
KeyTypes: IterKeyTypePointsAndRanges,
LowerBound: lower,
UpperBound: upper,
},
visitPointKey: visitPointKey,
visitRangeDel: visitRangeDel,
visitRangeKey: visitRangeKey,
visitSharedFile: visitSharedFile,
skipSharedLevels: visitSharedFile != nil,
}
iter := es.db.newInternalIter(sOpts, opts)
defer iter.close()
return scanInternalImpl(ctx, lower, upper, iter, opts)
}
| hasTransitioned | identifier_name |
snapshot.go | // Copyright 2012 The LevelDB-Go and Pebble Authors. All rights reserved. Use
// of this source code is governed by a BSD-style license that can be found in
// the LICENSE file.
package pebble
import (
"context"
"io"
"math"
"sync"
"sync/atomic"
"time"
"github.com/cockroachdb/errors"
"github.com/cockroachdb/pebble/internal/invariants"
"github.com/cockroachdb/pebble/rangekey"
)
// ErrSnapshotExcised is returned from WaitForFileOnlySnapshot if an excise
// overlapping with one of the EventuallyFileOnlySnapshot's KeyRanges gets
// applied before the transition of that EFOS to a file-only snapshot.
var ErrSnapshotExcised = errors.New("pebble: snapshot excised before conversion to file-only snapshot")
// Snapshot provides a read-only point-in-time view of the DB state.
type Snapshot struct {
// The db the snapshot was created from.
db *DB
seqNum uint64
// Set if part of an EventuallyFileOnlySnapshot.
efos *EventuallyFileOnlySnapshot
// The list the snapshot is linked into.
list *snapshotList
// The next/prev link for the snapshotList doubly-linked list of snapshots.
prev, next *Snapshot
}
var _ Reader = (*Snapshot)(nil)
// Get gets the value for the given key. It returns ErrNotFound if the Snapshot
// does not contain the key.
//
// The caller should not modify the contents of the returned slice, but it is
// safe to modify the contents of the argument after Get returns. The returned
// slice will remain valid until the returned Closer is closed. On success, the
// caller MUST call closer.Close() or a memory leak will occur.
func (s *Snapshot) Get(key []byte) ([]byte, io.Closer, error) {
if s.db == nil {
panic(ErrClosed)
}
return s.db.getInternal(key, nil /* batch */, s)
}
// NewIter returns an iterator that is unpositioned (Iterator.Valid() will
// return false). The iterator can be positioned via a call to SeekGE,
// SeekLT, First or Last.
func (s *Snapshot) NewIter(o *IterOptions) (*Iterator, error) {
return s.NewIterWithContext(context.Background(), o)
}
// NewIterWithContext is like NewIter, and additionally accepts a context for
// tracing.
func (s *Snapshot) NewIterWithContext(ctx context.Context, o *IterOptions) (*Iterator, error) {
if s.db == nil {
panic(ErrClosed)
}
return s.db.newIter(ctx, nil /* batch */, snapshotIterOpts{seqNum: s.seqNum}, o), nil
}
// ScanInternal scans all internal keys within the specified bounds, truncating
// any rangedels and rangekeys to those bounds. For use when an external user
// needs to be aware of all internal keys that make up a key range.
//
// See comment on db.ScanInternal for the behaviour that can be expected of
// point keys deleted by range dels and keys masked by range keys.
func (s *Snapshot) ScanInternal(
ctx context.Context,
lower, upper []byte,
visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error,
visitRangeDel func(start, end []byte, seqNum uint64) error,
visitRangeKey func(start, end []byte, keys []rangekey.Key) error,
visitSharedFile func(sst *SharedSSTMeta) error,
) error {
if s.db == nil {
panic(ErrClosed)
}
scanInternalOpts := &scanInternalOptions{
visitPointKey: visitPointKey,
visitRangeDel: visitRangeDel,
visitRangeKey: visitRangeKey,
visitSharedFile: visitSharedFile,
skipSharedLevels: visitSharedFile != nil,
IterOptions: IterOptions{
KeyTypes: IterKeyTypePointsAndRanges,
LowerBound: lower,
UpperBound: upper,
},
}
iter := s.db.newInternalIter(snapshotIterOpts{seqNum: s.seqNum}, scanInternalOpts)
defer iter.close()
return scanInternalImpl(ctx, lower, upper, iter, scanInternalOpts)
}
// closeLocked is similar to Close(), except it requires that db.mu be held
// by the caller.
func (s *Snapshot) closeLocked() error {
s.db.mu.snapshots.remove(s)
// If s was the previous earliest snapshot, we might be able to reclaim
// disk space by dropping obsolete records that were pinned by s.
if e := s.db.mu.snapshots.earliest(); e > s.seqNum {
s.db.maybeScheduleCompactionPicker(pickElisionOnly)
}
s.db = nil
return nil
}
// Close closes the snapshot, releasing its resources. Close must be called.
// Failure to do so will result in a tiny memory leak and a large leak of
// resources on disk due to the entries the snapshot is preventing from being
// deleted.
//
// d.mu must NOT be held by the caller.
func (s *Snapshot) Close() error {
db := s.db
if db == nil {
panic(ErrClosed)
}
db.mu.Lock()
defer db.mu.Unlock()
return s.closeLocked()
}
type snapshotList struct {
root Snapshot
}
func (l *snapshotList) init() {
l.root.next = &l.root
l.root.prev = &l.root
}
func (l *snapshotList) empty() bool {
return l.root.next == &l.root
}
func (l *snapshotList) count() int {
if l.empty() {
return 0
}
var count int
for i := l.root.next; i != &l.root; i = i.next {
count++
}
return count
}
func (l *snapshotList) earliest() uint64 {
v := uint64(math.MaxUint64)
if !l.empty() {
v = l.root.next.seqNum
}
return v
}
func (l *snapshotList) toSlice() []uint64 {
if l.empty() {
return nil
}
var results []uint64
for i := l.root.next; i != &l.root; i = i.next {
results = append(results, i.seqNum)
}
return results
}
func (l *snapshotList) pushBack(s *Snapshot) {
if s.list != nil || s.prev != nil || s.next != nil {
panic("pebble: snapshot list is inconsistent")
}
s.prev = l.root.prev
s.prev.next = s
s.next = &l.root
s.next.prev = s
s.list = l
}
func (l *snapshotList) remove(s *Snapshot) {
if s == &l.root {
panic("pebble: cannot remove snapshot list root node")
}
if s.list != l {
panic("pebble: snapshot list is inconsistent")
}
s.prev.next = s.next
s.next.prev = s.prev
s.next = nil // avoid memory leaks
s.prev = nil // avoid memory leaks
s.list = nil // avoid memory leaks
}
// EventuallyFileOnlySnapshot (aka EFOS) provides a read-only point-in-time view
// of the database state, similar to Snapshot. A EventuallyFileOnlySnapshot
// induces less write amplification than Snapshot, at the cost of increased space
// amplification. While a Snapshot may increase write amplification across all
// flushes and compactions for the duration of its lifetime, an
// EventuallyFileOnlySnapshot only incurs that cost for flushes/compactions if
// memtables at the time of EFOS instantiation contained keys that the EFOS is
// interested in (i.e. its protectedRanges). In that case, the EFOS prevents
// elision of keys visible to it, similar to a Snapshot, until those memtables
// are flushed, and once that happens, the "EventuallyFileOnlySnapshot"
// transitions to a file-only snapshot state in which it pins zombies sstables
// like an open Iterator would, without pinning any memtables. Callers that can
// tolerate the increased space amplification of pinning zombie sstables until
// the snapshot is closed may prefer EventuallyFileOnlySnapshots for their
// reduced write amplification. Callers that desire the benefits of the file-only
// state that requires no pinning of memtables should call
// `WaitForFileOnlySnapshot()` (and possibly re-mint an EFOS if it returns
// ErrSnapshotExcised) before relying on the EFOS to keep producing iterators
// with zero write-amp and zero pinning of memtables in memory.
//
// EventuallyFileOnlySnapshots interact with the IngestAndExcise operation in
// subtle ways. Unlike Snapshots, EFOS guarantees that their read-only
// point-in-time view is unaltered by the excision. However, if a concurrent
// excise were to happen on one of the protectedRanges, WaitForFileOnlySnapshot()
// would return ErrSnapshotExcised and the EFOS would maintain a reference to the
// underlying readState (and by extension, zombie memtables) for its lifetime.
// This could lead to increased memory utilization, which is why callers should
// call WaitForFileOnlySnapshot() if they expect an EFOS to be long-lived.
type EventuallyFileOnlySnapshot struct {
mu struct {
// NB: If both this mutex and db.mu are being grabbed, db.mu should be
// grabbed _before_ grabbing this one.
sync.Mutex
// Either the {snap,readState} fields are set below, or the version is set at
// any given point of time. If a snapshot is referenced, this is not a
// file-only snapshot yet, and if a version is set (and ref'd) this is a
// file-only snapshot.
// The wrapped regular snapshot, if not a file-only snapshot yet. The
// readState has already been ref()d once if it's set.
snap *Snapshot
readState *readState
// The wrapped version reference, if a file-only snapshot.
vers *version
}
// Key ranges to watch for an excise on.
protectedRanges []KeyRange
// excised, if true, signals that the above ranges were excised during the
// lifetime of this snapshot.
excised atomic.Bool
// The db the snapshot was created from.
db *DB
seqNum uint64
closed chan struct{}
}
func (d *DB) makeEventuallyFileOnlySnapshot(
keyRanges []KeyRange, internalKeyRanges []internalKeyRange,
) *EventuallyFileOnlySnapshot {
isFileOnly := true
d.mu.Lock()
defer d.mu.Unlock()
seqNum := d.mu.versions.visibleSeqNum.Load()
// Check if any of the keyRanges overlap with a memtable.
for i := range d.mu.mem.queue {
mem := d.mu.mem.queue[i]
if ingestMemtableOverlaps(d.cmp, mem, internalKeyRanges) {
isFileOnly = false
break
}
}
es := &EventuallyFileOnlySnapshot{
db: d,
seqNum: seqNum,
protectedRanges: keyRanges,
closed: make(chan struct{}),
}
if isFileOnly {
es.mu.vers = d.mu.versions.currentVersion()
es.mu.vers.Ref()
} else {
s := &Snapshot{
db: d,
seqNum: seqNum,
}
s.efos = es
es.mu.snap = s
es.mu.readState = d.loadReadState()
d.mu.snapshots.pushBack(s)
}
return es
}
// Transitions this EventuallyFileOnlySnapshot to a file-only snapshot. Requires
// earliestUnflushedSeqNum and vers to correspond to the same Version from the
// current or a past acquisition of db.mu. vers must have been Ref()'d before
// that mutex was released, if it was released.
//
// NB: The caller is expected to check for es.excised before making this
// call.
//
// d.mu must be held when calling this method.
func (es *EventuallyFileOnlySnapshot) transitionToFileOnlySnapshot(vers *version) error {
es.mu.Lock()
select {
case <-es.closed:
vers.UnrefLocked()
es.mu.Unlock()
return ErrClosed
default:
}
if es.mu.snap == nil {
es.mu.Unlock()
panic("pebble: tried to transition an eventually-file-only-snapshot twice")
}
// The caller has already called Ref() on vers.
es.mu.vers = vers
// NB: The callers should have already done a check of es.excised.
oldSnap := es.mu.snap
oldReadState := es.mu.readState
es.mu.snap = nil
es.mu.readState = nil
es.mu.Unlock()
// It's okay to close a snapshot even if iterators are already open on it.
oldReadState.unrefLocked()
return oldSnap.closeLocked()
}
// releaseReadState is called to release reference to a readState when
// es.excised == true. This is to free up memory as quickly as possible; all
// other snapshot resources are kept around until Close() is called. Safe for
// idempotent calls.
//
// d.mu must be held when calling this method.
func (es *EventuallyFileOnlySnapshot) releaseReadState() {
if !es.excised.Load() {
panic("pebble: releasing read state of eventually-file-only-snapshot but was not excised")
}
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.readState != nil {
es.mu.readState.unrefLocked()
es.db.maybeScheduleObsoleteTableDeletionLocked()
}
}
// hasTransitioned returns true if this EFOS has transitioned to a file-only
// snapshot.
func (es *EventuallyFileOnlySnapshot) hasTransitioned() bool {
es.mu.Lock()
defer es.mu.Unlock()
return es.mu.vers != nil
}
// waitForFlush waits for a flush on any memtables that need to be flushed
// before this EFOS can transition to a file-only snapshot. If this EFOS is
// waiting on a flush of the mutable memtable, it forces a rotation within
// `dur` duration. For immutable memtables, it schedules a flush and waits for
// it to finish.
func (es *EventuallyFileOnlySnapshot) waitForFlush(ctx context.Context, dur time.Duration) error {
es.db.mu.Lock()
defer es.db.mu.Unlock()
earliestUnflushedSeqNum := es.db.getEarliestUnflushedSeqNumLocked()
for earliestUnflushedSeqNum < es.seqNum {
select {
case <-es.closed:
return ErrClosed
case <-ctx.Done():
return ctx.Err()
default:
}
// Check if the current mutable memtable contains keys less than seqNum.
// If so, rotate it.
if es.db.mu.mem.mutable.logSeqNum < es.seqNum && dur.Nanoseconds() > 0 {
es.db.maybeScheduleDelayedFlush(es.db.mu.mem.mutable, dur)
} else {
// Find the last memtable that contains seqNums less than es.seqNum,
// and force a flush on it.
var mem *flushableEntry
for i := range es.db.mu.mem.queue {
if es.db.mu.mem.queue[i].logSeqNum < es.seqNum {
mem = es.db.mu.mem.queue[i]
}
}
mem.flushForced = true
es.db.maybeScheduleFlush()
}
es.db.mu.compact.cond.Wait()
earliestUnflushedSeqNum = es.db.getEarliestUnflushedSeqNumLocked()
}
if es.excised.Load() {
return ErrSnapshotExcised
}
return nil
}
// WaitForFileOnlySnapshot blocks the calling goroutine until this snapshot
// has been converted into a file-only snapshot (i.e. all memtables containing
// keys < seqNum are flushed). A duration can be passed in, and if nonzero,
// a delayed flush will be scheduled at that duration if necessary.
//
// Idempotent; can be called multiple times with no side effects.
func (es *EventuallyFileOnlySnapshot) WaitForFileOnlySnapshot(
ctx context.Context, dur time.Duration,
) error {
if es.hasTransitioned() {
return nil
}
if err := es.waitForFlush(ctx, dur); err != nil {
return err
}
if invariants.Enabled {
// Since we aren't returning an error, we _must_ have transitioned to a
// file-only snapshot by now.
if !es.hasTransitioned() {
panic("expected EFOS to have transitioned to file-only snapshot after flush")
}
}
return nil
}
// Close closes the file-only snapshot and releases all referenced resources.
// Not idempotent.
func (es *EventuallyFileOnlySnapshot) Close() error {
close(es.closed)
es.db.mu.Lock()
defer es.db.mu.Unlock()
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.snap != nil {
if err := es.mu.snap.closeLocked(); err != nil {
return err
}
}
if es.mu.readState != nil {
es.mu.readState.unrefLocked()
es.db.maybeScheduleObsoleteTableDeletionLocked()
}
if es.mu.vers != nil {
es.mu.vers.UnrefLocked()
}
return nil
}
// Get implements the Reader interface.
func (es *EventuallyFileOnlySnapshot) Get(key []byte) (value []byte, closer io.Closer, err error) {
// TODO(jackson): Use getInternal.
iter, err := es.NewIter(nil)
if err != nil {
return nil, nil, err
}
var valid bool
if es.db.opts.Comparer.Split != nil {
valid = iter.SeekPrefixGE(key)
} else {
valid = iter.SeekGE(key)
}
if !valid {
if err = firstError(iter.Error(), iter.Close()); err != nil {
return nil, nil, err
}
return nil, nil, ErrNotFound
}
if !es.db.equal(iter.Key(), key) |
return iter.Value(), iter, nil
}
// NewIter returns an iterator that is unpositioned (Iterator.Valid() will
// return false). The iterator can be positioned via a call to SeekGE,
// SeekLT, First or Last.
func (es *EventuallyFileOnlySnapshot) NewIter(o *IterOptions) (*Iterator, error) {
return es.NewIterWithContext(context.Background(), o)
}
// NewIterWithContext is like NewIter, and additionally accepts a context for
// tracing.
func (es *EventuallyFileOnlySnapshot) NewIterWithContext(
ctx context.Context, o *IterOptions,
) (*Iterator, error) {
select {
case <-es.closed:
panic(ErrClosed)
default:
}
es.mu.Lock()
defer es.mu.Unlock()
if es.mu.vers != nil {
sOpts := snapshotIterOpts{seqNum: es.seqNum, vers: es.mu.vers}
return es.db.newIter(ctx, nil /* batch */, sOpts, o), nil
}
if es.excised.Load() {
return nil, ErrSnapshotExcised
}
sOpts := snapshotIterOpts{seqNum: es.seqNum, readState: es.mu.readState}
return es.db.newIter(ctx, nil /* batch */, sOpts, o), nil
}
// ScanInternal scans all internal keys within the specified bounds, truncating
// any rangedels and rangekeys to those bounds. For use when an external user
// needs to be aware of all internal keys that make up a key range.
//
// See comment on db.ScanInternal for the behaviour that can be expected of
// point keys deleted by range dels and keys masked by range keys.
func (es *EventuallyFileOnlySnapshot) ScanInternal(
ctx context.Context,
lower, upper []byte,
visitPointKey func(key *InternalKey, value LazyValue, iterInfo IteratorLevel) error,
visitRangeDel func(start, end []byte, seqNum uint64) error,
visitRangeKey func(start, end []byte, keys []rangekey.Key) error,
visitSharedFile func(sst *SharedSSTMeta) error,
) error {
if es.db == nil {
panic(ErrClosed)
}
if es.excised.Load() {
return ErrSnapshotExcised
}
var sOpts snapshotIterOpts
es.mu.Lock()
if es.mu.vers != nil {
sOpts = snapshotIterOpts{
seqNum: es.seqNum,
vers: es.mu.vers,
}
} else {
sOpts = snapshotIterOpts{
seqNum: es.seqNum,
readState: es.mu.readState,
}
}
es.mu.Unlock()
opts := &scanInternalOptions{
IterOptions: IterOptions{
KeyTypes: IterKeyTypePointsAndRanges,
LowerBound: lower,
UpperBound: upper,
},
visitPointKey: visitPointKey,
visitRangeDel: visitRangeDel,
visitRangeKey: visitRangeKey,
visitSharedFile: visitSharedFile,
skipSharedLevels: visitSharedFile != nil,
}
iter := es.db.newInternalIter(sOpts, opts)
defer iter.close()
return scanInternalImpl(ctx, lower, upper, iter, opts)
}
| {
return nil, nil, firstError(iter.Close(), ErrNotFound)
} | conditional_block |
conteng_docker.go | /*
MIT License
Copyright (c) 2018 Max Kuznetsov <syhpoon@syhpoon.ca>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package conteng
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
"strings"
"sync"
"github.com/docker/distribution/reference"
hclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/docker/registry"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
"github.com/syhpoon/xenvman/pkg/lib"
"github.com/syhpoon/xenvman/pkg/logger"
)
var dockerLog = logger.GetLogger("xenvman.pkg.conteng.conteng_docker")
type DockerEngineParams struct {
}
type DockerEngine struct {
cl *client.Client
params DockerEngineParams
subNetOct1 int
subNetOct2 int
subNetMu sync.Mutex
}
func NewDockerEngine(params DockerEngineParams) (*DockerEngine, error) {
var opts []func(client2 *client.Client) error
cli, err := client.NewClientWithOpts(opts...)
if err != nil {
return nil, errors.Wrapf(err, "Error creating docker client")
}
cli.NegotiateAPIVersion(context.Background())
dockerLog.Debugf("Docker engine client created")
return &DockerEngine{
cl: cli,
params: params,
subNetOct1: 0,
subNetOct2: 0,
}, nil
}
func (de *DockerEngine) CreateNetwork(ctx context.Context,
name string) (NetworkId, string, error) {
sub, err := de.getSubNet()
if err != nil {
return "", "", err
}
netParams := types.NetworkCreate{
CheckDuplicate: true,
Driver: "bridge",
IPAM: &network.IPAM{
Config: []network.IPAMConfig{
{
Subnet: sub,
IPRange: sub,
},
},
},
}
r, err := de.cl.NetworkCreate(ctx, name, netParams)
if err != nil {
return "", "", errors.Wrapf(err, "Error creating docker network: %s", sub)
}
dockerLog.Debugf("Network created: %s - %s :: %s", name, r.ID, sub)
return r.ID, sub, nil
}
// Run Docker container
func (de *DockerEngine) RunContainer(ctx context.Context, name, tag string,
params RunContainerParams) (string, error) {
// Hosts
var hosts []string
for host, ip := range params.Hosts {
hosts = append(hosts, fmt.Sprintf("%s:%s", host, ip))
} | for contPort, hostPort := range params.Ports {
rawPorts = append(rawPorts, fmt.Sprintf("%d:%d", hostPort, contPort))
}
ports, bindings, err := nat.ParsePortSpecs(rawPorts)
if err != nil {
return "", errors.Wrapf(err, "Error parsing ports for %s", name)
}
// Environ
var environ []string
for k, v := range params.Environ {
environ = append(environ, fmt.Sprintf("%s=%s", k, v))
}
// Mounts
var mounts []mount.Mount
for _, fileMount := range params.FileMounts {
mounts = append(mounts, mount.Mount{
Type: "bind",
Source: fileMount.HostFile,
Target: fileMount.ContainerFile,
ReadOnly: fileMount.Readonly,
})
}
var dns []string
if params.DiscoverDNS != "" {
dns = append(dns, params.DiscoverDNS)
}
hostCont := &container.HostConfig{
NetworkMode: container.NetworkMode(params.NetworkId),
ExtraHosts: hosts,
AutoRemove: false,
DNS: dns,
DNSSearch: []string{"xenv"},
RestartPolicy: container.RestartPolicy{Name: "on-failure"},
PortBindings: bindings,
Mounts: mounts,
}
netConf := &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
params.NetworkId: {
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: params.IP,
},
},
},
}
r, err := de.cl.ContainerCreate(ctx, &container.Config{
Hostname: name,
AttachStdout: true,
AttachStderr: true,
Image: tag,
ExposedPorts: ports,
Env: environ,
Cmd: params.Cmd,
Entrypoint: params.Entrypoint,
}, hostCont, netConf, lib.NewIdShort())
if err != nil {
return "", errors.Wrapf(err, "Error creating container %s", tag)
}
err = de.cl.ContainerStart(ctx, r.ID, types.ContainerStartOptions{})
if err != nil {
return "", errors.Wrapf(err, "Error starting container: %s", tag)
}
dockerLog.Debugf("Container started: %s, network=%s", tag, params.NetworkId)
return r.ID, nil
}
func (de *DockerEngine) RemoveContainer(ctx context.Context, id string) error {
return de.cl.ContainerRemove(ctx, id,
types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})
}
func (de *DockerEngine) StopContainer(ctx context.Context, id string) error {
return de.cl.ContainerKill(ctx, id, "INT")
}
func (de *DockerEngine) RestartContainer(ctx context.Context, id string) error {
return de.cl.ContainerStart(ctx, id, types.ContainerStartOptions{})
}
func (de *DockerEngine) RemoveNetwork(ctx context.Context, id string) error {
return de.cl.NetworkRemove(ctx, id)
}
func (de *DockerEngine) BuildImage(ctx context.Context, imgName string,
buildContext io.Reader) error {
opts := types.ImageBuildOptions{
NetworkMode: "bridge",
Tags: []string{imgName},
Remove: true,
ForceRemove: true,
SuppressOutput: true,
NoCache: true,
PullParent: true,
}
r, err := de.cl.ImageBuild(ctx, buildContext, opts)
if r.Body != nil {
defer r.Body.Close()
// Check server response
if rerr := de.isErrorResponse(r.Body); rerr != nil {
return errors.Errorf("Error from Docker server: %s", rerr)
}
}
if err == nil {
dockerLog.Debugf("Image built: %s", imgName)
}
return err
}
func (de *DockerEngine) RemoveImage(ctx context.Context, imgName string) error {
opts := types.ImageRemoveOptions{
Force: true,
PruneChildren: true,
}
_, err := de.cl.ImageRemove(ctx, imgName, opts)
if err == nil {
dockerLog.Debugf("Image removed: %s", imgName)
}
return err
}
func (de *DockerEngine) FetchImage(ctx context.Context, imgName string) error {
out, err := de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{})
var auth string
if err != nil {
// Retry with auth
auth, err = de.getAuthForImage(imgName)
if err != nil {
return err
}
out, err = de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{
RegistryAuth: auth,
})
}
if err == nil {
dockerLog.Debugf("Image fetched: %s", imgName)
}
if out != nil {
_, _ = io.Copy(ioutil.Discard, out)
}
return err
}
func (de *DockerEngine) GetImagePorts(ctx context.Context,
tag string) ([]uint16, error) {
r, _, err := de.cl.ImageInspectWithRaw(ctx, tag)
if err != nil {
return nil, errors.Wrapf(err, "Error inspecting image %s", tag)
}
var ports []uint16
for p := range r.Config.ExposedPorts {
ports = append(ports, uint16(p.Int()))
}
return ports, nil
}
func (de *DockerEngine) Terminate() {
de.cl.Close()
}
func (de *DockerEngine) isErrorResponse(r io.Reader) error {
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
split := bytes.Split(data, []byte("\n"))
type errResp struct {
Error string
}
for i := range split {
e := errResp{}
if err := json.Unmarshal(split[i], &e); err == nil && e.Error != "" {
return errors.New(e.Error)
}
}
return nil
}
// TODO: This should probably be made more robust at some point
func (de *DockerEngine) getSubNet() (string, error) {
de.subNetMu.Lock()
defer de.subNetMu.Unlock()
addrs, err := net.InterfaceAddrs()
if err != nil {
return "", errors.Wrap(err, "Error getting network addresses")
}
var nets []*net.IPNet
for _, addr := range addrs {
dockerLog.Debugf("Inspecting interface %s", addr.String())
_, n, err := net.ParseCIDR(addr.String())
if err != nil {
dockerLog.Warningf("Error parsing address: %s", addr.String())
continue
}
nets = append(nets, n)
}
netaddr := func() string {
tpl := "10.%d.%d.0/24"
return fmt.Sprintf(tpl, de.subNetOct1, de.subNetOct2)
}
_, pnet, _ := net.ParseCIDR(netaddr())
for {
// Find non-overlapping network
overlap := false
for _, n := range nets {
if lib.NetsOverlap(pnet, n) {
overlap = true
break
}
}
if overlap {
de.subNetOct2 += 1
if de.subNetOct2 > 255 {
de.subNetOct1 += 1
de.subNetOct2 = 0
}
_, pnet, _ = net.ParseCIDR(netaddr())
} else {
break
}
}
return netaddr(), nil
}
// TODO: Pretty naive implementation and will likely not work in all the cases
func (de *DockerEngine) getAuthForImage(imageName string) (string, error) {
type ConfigFile struct {
AuthConfigs map[string]types.AuthConfig `json:"auths"`
CredHelpers map[string]string `json:"credHelpers"`
}
ref, err := reference.ParseNormalizedNamed(imageName)
if err != nil {
return "", errors.Wrapf(err, "Error parsing image name %s", imageName)
}
repoInfo, err := registry.ParseRepositoryInfo(ref)
if err != nil {
return "", errors.Wrapf(err, "Error parsing repository %s", imageName)
}
file := filepath.Join(os.Getenv("HOME"), ".docker", "config.json")
b, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "Error reading docker config %s", file)
}
conf := &ConfigFile{}
err = json.Unmarshal(b, conf)
if err != nil {
return "", errors.Wrapf(err, "Error parsing docker config %s", file)
}
srv := repoInfo.Index.Name
ac := &types.AuthConfig{}
var variants []string
if srv == "docker.io" || srv == "index.docker.io" {
variants = []string{"https://docker.io", "https://index.docker.io/v1/"}
} else {
variants = []string{srv}
}
var finalErr error
for _, host := range variants {
if credHelper, ok := conf.CredHelpers[host]; ok {
dockerLog.Infof("Using '%s' credential helper for %s", credHelper, host)
prog := fmt.Sprintf("docker-credential-%s", credHelper)
p := hclient.NewShellProgramFunc(prog)
creds, err := hclient.Get(p, host)
if err != nil {
finalErr = errors.Wrapf(err, "Error running %s", prog)
continue
}
ac.Username = creds.Username
ac.Password = creds.Secret
} else if authConf, ok := conf.AuthConfigs[host]; ok {
dockerLog.Infof("Using auth section for %s", host)
if authConf.Username != "" {
ac.Username = authConf.Username
}
if authConf.Password != "" {
ac.Password = authConf.Password
}
if ac.Username == "" {
auth, err := base64.StdEncoding.DecodeString(authConf.Auth)
if err != nil {
finalErr = errors.Wrap(err, "Error decoding auth entry")
continue
}
split := strings.SplitN(string(auth), ":", 2)
if len(split) < 2 {
finalErr = errors.Errorf("Invalid auth entry format: %s", auth)
continue
}
ac.Username = split[0]
ac.Password = split[1]
}
} else {
continue
}
ac.ServerAddress = host
b, _ = json.Marshal(ac)
return base64.StdEncoding.EncodeToString(b), nil
}
return "", finalErr
} |
// Ports
var rawPorts []string
| random_line_split |
conteng_docker.go | /*
MIT License
Copyright (c) 2018 Max Kuznetsov <syhpoon@syhpoon.ca>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package conteng
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
"strings"
"sync"
"github.com/docker/distribution/reference"
hclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/docker/registry"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
"github.com/syhpoon/xenvman/pkg/lib"
"github.com/syhpoon/xenvman/pkg/logger"
)
var dockerLog = logger.GetLogger("xenvman.pkg.conteng.conteng_docker")
type DockerEngineParams struct {
}
type DockerEngine struct {
cl *client.Client
params DockerEngineParams
subNetOct1 int
subNetOct2 int
subNetMu sync.Mutex
}
func NewDockerEngine(params DockerEngineParams) (*DockerEngine, error) {
var opts []func(client2 *client.Client) error
cli, err := client.NewClientWithOpts(opts...)
if err != nil {
return nil, errors.Wrapf(err, "Error creating docker client")
}
cli.NegotiateAPIVersion(context.Background())
dockerLog.Debugf("Docker engine client created")
return &DockerEngine{
cl: cli,
params: params,
subNetOct1: 0,
subNetOct2: 0,
}, nil
}
func (de *DockerEngine) CreateNetwork(ctx context.Context,
name string) (NetworkId, string, error) {
sub, err := de.getSubNet()
if err != nil {
return "", "", err
}
netParams := types.NetworkCreate{
CheckDuplicate: true,
Driver: "bridge",
IPAM: &network.IPAM{
Config: []network.IPAMConfig{
{
Subnet: sub,
IPRange: sub,
},
},
},
}
r, err := de.cl.NetworkCreate(ctx, name, netParams)
if err != nil {
return "", "", errors.Wrapf(err, "Error creating docker network: %s", sub)
}
dockerLog.Debugf("Network created: %s - %s :: %s", name, r.ID, sub)
return r.ID, sub, nil
}
// Run Docker container
func (de *DockerEngine) RunContainer(ctx context.Context, name, tag string,
params RunContainerParams) (string, error) {
// Hosts
var hosts []string
for host, ip := range params.Hosts {
hosts = append(hosts, fmt.Sprintf("%s:%s", host, ip))
}
// Ports
var rawPorts []string
for contPort, hostPort := range params.Ports {
rawPorts = append(rawPorts, fmt.Sprintf("%d:%d", hostPort, contPort))
}
ports, bindings, err := nat.ParsePortSpecs(rawPorts)
if err != nil {
return "", errors.Wrapf(err, "Error parsing ports for %s", name)
}
// Environ
var environ []string
for k, v := range params.Environ {
environ = append(environ, fmt.Sprintf("%s=%s", k, v))
}
// Mounts
var mounts []mount.Mount
for _, fileMount := range params.FileMounts {
mounts = append(mounts, mount.Mount{
Type: "bind",
Source: fileMount.HostFile,
Target: fileMount.ContainerFile,
ReadOnly: fileMount.Readonly,
})
}
var dns []string
if params.DiscoverDNS != "" {
dns = append(dns, params.DiscoverDNS)
}
hostCont := &container.HostConfig{
NetworkMode: container.NetworkMode(params.NetworkId),
ExtraHosts: hosts,
AutoRemove: false,
DNS: dns,
DNSSearch: []string{"xenv"},
RestartPolicy: container.RestartPolicy{Name: "on-failure"},
PortBindings: bindings,
Mounts: mounts,
}
netConf := &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
params.NetworkId: {
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: params.IP,
},
},
},
}
r, err := de.cl.ContainerCreate(ctx, &container.Config{
Hostname: name,
AttachStdout: true,
AttachStderr: true,
Image: tag,
ExposedPorts: ports,
Env: environ,
Cmd: params.Cmd,
Entrypoint: params.Entrypoint,
}, hostCont, netConf, lib.NewIdShort())
if err != nil {
return "", errors.Wrapf(err, "Error creating container %s", tag)
}
err = de.cl.ContainerStart(ctx, r.ID, types.ContainerStartOptions{})
if err != nil {
return "", errors.Wrapf(err, "Error starting container: %s", tag)
}
dockerLog.Debugf("Container started: %s, network=%s", tag, params.NetworkId)
return r.ID, nil
}
func (de *DockerEngine) RemoveContainer(ctx context.Context, id string) error {
return de.cl.ContainerRemove(ctx, id,
types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})
}
func (de *DockerEngine) StopContainer(ctx context.Context, id string) error {
return de.cl.ContainerKill(ctx, id, "INT")
}
func (de *DockerEngine) RestartContainer(ctx context.Context, id string) error {
return de.cl.ContainerStart(ctx, id, types.ContainerStartOptions{})
}
func (de *DockerEngine) | (ctx context.Context, id string) error {
return de.cl.NetworkRemove(ctx, id)
}
func (de *DockerEngine) BuildImage(ctx context.Context, imgName string,
buildContext io.Reader) error {
opts := types.ImageBuildOptions{
NetworkMode: "bridge",
Tags: []string{imgName},
Remove: true,
ForceRemove: true,
SuppressOutput: true,
NoCache: true,
PullParent: true,
}
r, err := de.cl.ImageBuild(ctx, buildContext, opts)
if r.Body != nil {
defer r.Body.Close()
// Check server response
if rerr := de.isErrorResponse(r.Body); rerr != nil {
return errors.Errorf("Error from Docker server: %s", rerr)
}
}
if err == nil {
dockerLog.Debugf("Image built: %s", imgName)
}
return err
}
func (de *DockerEngine) RemoveImage(ctx context.Context, imgName string) error {
opts := types.ImageRemoveOptions{
Force: true,
PruneChildren: true,
}
_, err := de.cl.ImageRemove(ctx, imgName, opts)
if err == nil {
dockerLog.Debugf("Image removed: %s", imgName)
}
return err
}
func (de *DockerEngine) FetchImage(ctx context.Context, imgName string) error {
out, err := de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{})
var auth string
if err != nil {
// Retry with auth
auth, err = de.getAuthForImage(imgName)
if err != nil {
return err
}
out, err = de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{
RegistryAuth: auth,
})
}
if err == nil {
dockerLog.Debugf("Image fetched: %s", imgName)
}
if out != nil {
_, _ = io.Copy(ioutil.Discard, out)
}
return err
}
func (de *DockerEngine) GetImagePorts(ctx context.Context,
tag string) ([]uint16, error) {
r, _, err := de.cl.ImageInspectWithRaw(ctx, tag)
if err != nil {
return nil, errors.Wrapf(err, "Error inspecting image %s", tag)
}
var ports []uint16
for p := range r.Config.ExposedPorts {
ports = append(ports, uint16(p.Int()))
}
return ports, nil
}
func (de *DockerEngine) Terminate() {
de.cl.Close()
}
func (de *DockerEngine) isErrorResponse(r io.Reader) error {
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
split := bytes.Split(data, []byte("\n"))
type errResp struct {
Error string
}
for i := range split {
e := errResp{}
if err := json.Unmarshal(split[i], &e); err == nil && e.Error != "" {
return errors.New(e.Error)
}
}
return nil
}
// TODO: This should probably be made more robust at some point
func (de *DockerEngine) getSubNet() (string, error) {
de.subNetMu.Lock()
defer de.subNetMu.Unlock()
addrs, err := net.InterfaceAddrs()
if err != nil {
return "", errors.Wrap(err, "Error getting network addresses")
}
var nets []*net.IPNet
for _, addr := range addrs {
dockerLog.Debugf("Inspecting interface %s", addr.String())
_, n, err := net.ParseCIDR(addr.String())
if err != nil {
dockerLog.Warningf("Error parsing address: %s", addr.String())
continue
}
nets = append(nets, n)
}
netaddr := func() string {
tpl := "10.%d.%d.0/24"
return fmt.Sprintf(tpl, de.subNetOct1, de.subNetOct2)
}
_, pnet, _ := net.ParseCIDR(netaddr())
for {
// Find non-overlapping network
overlap := false
for _, n := range nets {
if lib.NetsOverlap(pnet, n) {
overlap = true
break
}
}
if overlap {
de.subNetOct2 += 1
if de.subNetOct2 > 255 {
de.subNetOct1 += 1
de.subNetOct2 = 0
}
_, pnet, _ = net.ParseCIDR(netaddr())
} else {
break
}
}
return netaddr(), nil
}
// TODO: Pretty naive implementation and will likely not work in all the cases
func (de *DockerEngine) getAuthForImage(imageName string) (string, error) {
type ConfigFile struct {
AuthConfigs map[string]types.AuthConfig `json:"auths"`
CredHelpers map[string]string `json:"credHelpers"`
}
ref, err := reference.ParseNormalizedNamed(imageName)
if err != nil {
return "", errors.Wrapf(err, "Error parsing image name %s", imageName)
}
repoInfo, err := registry.ParseRepositoryInfo(ref)
if err != nil {
return "", errors.Wrapf(err, "Error parsing repository %s", imageName)
}
file := filepath.Join(os.Getenv("HOME"), ".docker", "config.json")
b, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "Error reading docker config %s", file)
}
conf := &ConfigFile{}
err = json.Unmarshal(b, conf)
if err != nil {
return "", errors.Wrapf(err, "Error parsing docker config %s", file)
}
srv := repoInfo.Index.Name
ac := &types.AuthConfig{}
var variants []string
if srv == "docker.io" || srv == "index.docker.io" {
variants = []string{"https://docker.io", "https://index.docker.io/v1/"}
} else {
variants = []string{srv}
}
var finalErr error
for _, host := range variants {
if credHelper, ok := conf.CredHelpers[host]; ok {
dockerLog.Infof("Using '%s' credential helper for %s", credHelper, host)
prog := fmt.Sprintf("docker-credential-%s", credHelper)
p := hclient.NewShellProgramFunc(prog)
creds, err := hclient.Get(p, host)
if err != nil {
finalErr = errors.Wrapf(err, "Error running %s", prog)
continue
}
ac.Username = creds.Username
ac.Password = creds.Secret
} else if authConf, ok := conf.AuthConfigs[host]; ok {
dockerLog.Infof("Using auth section for %s", host)
if authConf.Username != "" {
ac.Username = authConf.Username
}
if authConf.Password != "" {
ac.Password = authConf.Password
}
if ac.Username == "" {
auth, err := base64.StdEncoding.DecodeString(authConf.Auth)
if err != nil {
finalErr = errors.Wrap(err, "Error decoding auth entry")
continue
}
split := strings.SplitN(string(auth), ":", 2)
if len(split) < 2 {
finalErr = errors.Errorf("Invalid auth entry format: %s", auth)
continue
}
ac.Username = split[0]
ac.Password = split[1]
}
} else {
continue
}
ac.ServerAddress = host
b, _ = json.Marshal(ac)
return base64.StdEncoding.EncodeToString(b), nil
}
return "", finalErr
}
| RemoveNetwork | identifier_name |
conteng_docker.go | /*
MIT License
Copyright (c) 2018 Max Kuznetsov <syhpoon@syhpoon.ca>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package conteng
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
"strings"
"sync"
"github.com/docker/distribution/reference"
hclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/docker/registry"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
"github.com/syhpoon/xenvman/pkg/lib"
"github.com/syhpoon/xenvman/pkg/logger"
)
var dockerLog = logger.GetLogger("xenvman.pkg.conteng.conteng_docker")
type DockerEngineParams struct {
}
type DockerEngine struct {
cl *client.Client
params DockerEngineParams
subNetOct1 int
subNetOct2 int
subNetMu sync.Mutex
}
func NewDockerEngine(params DockerEngineParams) (*DockerEngine, error) {
var opts []func(client2 *client.Client) error
cli, err := client.NewClientWithOpts(opts...)
if err != nil {
return nil, errors.Wrapf(err, "Error creating docker client")
}
cli.NegotiateAPIVersion(context.Background())
dockerLog.Debugf("Docker engine client created")
return &DockerEngine{
cl: cli,
params: params,
subNetOct1: 0,
subNetOct2: 0,
}, nil
}
func (de *DockerEngine) CreateNetwork(ctx context.Context,
name string) (NetworkId, string, error) {
sub, err := de.getSubNet()
if err != nil {
return "", "", err
}
netParams := types.NetworkCreate{
CheckDuplicate: true,
Driver: "bridge",
IPAM: &network.IPAM{
Config: []network.IPAMConfig{
{
Subnet: sub,
IPRange: sub,
},
},
},
}
r, err := de.cl.NetworkCreate(ctx, name, netParams)
if err != nil {
return "", "", errors.Wrapf(err, "Error creating docker network: %s", sub)
}
dockerLog.Debugf("Network created: %s - %s :: %s", name, r.ID, sub)
return r.ID, sub, nil
}
// Run Docker container
func (de *DockerEngine) RunContainer(ctx context.Context, name, tag string,
params RunContainerParams) (string, error) {
// Hosts
var hosts []string
for host, ip := range params.Hosts {
hosts = append(hosts, fmt.Sprintf("%s:%s", host, ip))
}
// Ports
var rawPorts []string
for contPort, hostPort := range params.Ports {
rawPorts = append(rawPorts, fmt.Sprintf("%d:%d", hostPort, contPort))
}
ports, bindings, err := nat.ParsePortSpecs(rawPorts)
if err != nil {
return "", errors.Wrapf(err, "Error parsing ports for %s", name)
}
// Environ
var environ []string
for k, v := range params.Environ {
environ = append(environ, fmt.Sprintf("%s=%s", k, v))
}
// Mounts
var mounts []mount.Mount
for _, fileMount := range params.FileMounts {
mounts = append(mounts, mount.Mount{
Type: "bind",
Source: fileMount.HostFile,
Target: fileMount.ContainerFile,
ReadOnly: fileMount.Readonly,
})
}
var dns []string
if params.DiscoverDNS != "" {
dns = append(dns, params.DiscoverDNS)
}
hostCont := &container.HostConfig{
NetworkMode: container.NetworkMode(params.NetworkId),
ExtraHosts: hosts,
AutoRemove: false,
DNS: dns,
DNSSearch: []string{"xenv"},
RestartPolicy: container.RestartPolicy{Name: "on-failure"},
PortBindings: bindings,
Mounts: mounts,
}
netConf := &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
params.NetworkId: {
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: params.IP,
},
},
},
}
r, err := de.cl.ContainerCreate(ctx, &container.Config{
Hostname: name,
AttachStdout: true,
AttachStderr: true,
Image: tag,
ExposedPorts: ports,
Env: environ,
Cmd: params.Cmd,
Entrypoint: params.Entrypoint,
}, hostCont, netConf, lib.NewIdShort())
if err != nil {
return "", errors.Wrapf(err, "Error creating container %s", tag)
}
err = de.cl.ContainerStart(ctx, r.ID, types.ContainerStartOptions{})
if err != nil {
return "", errors.Wrapf(err, "Error starting container: %s", tag)
}
dockerLog.Debugf("Container started: %s, network=%s", tag, params.NetworkId)
return r.ID, nil
}
func (de *DockerEngine) RemoveContainer(ctx context.Context, id string) error {
return de.cl.ContainerRemove(ctx, id,
types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})
}
func (de *DockerEngine) StopContainer(ctx context.Context, id string) error {
return de.cl.ContainerKill(ctx, id, "INT")
}
func (de *DockerEngine) RestartContainer(ctx context.Context, id string) error {
return de.cl.ContainerStart(ctx, id, types.ContainerStartOptions{})
}
func (de *DockerEngine) RemoveNetwork(ctx context.Context, id string) error {
return de.cl.NetworkRemove(ctx, id)
}
func (de *DockerEngine) BuildImage(ctx context.Context, imgName string,
buildContext io.Reader) error {
opts := types.ImageBuildOptions{
NetworkMode: "bridge",
Tags: []string{imgName},
Remove: true,
ForceRemove: true,
SuppressOutput: true,
NoCache: true,
PullParent: true,
}
r, err := de.cl.ImageBuild(ctx, buildContext, opts)
if r.Body != nil {
defer r.Body.Close()
// Check server response
if rerr := de.isErrorResponse(r.Body); rerr != nil {
return errors.Errorf("Error from Docker server: %s", rerr)
}
}
if err == nil {
dockerLog.Debugf("Image built: %s", imgName)
}
return err
}
func (de *DockerEngine) RemoveImage(ctx context.Context, imgName string) error {
opts := types.ImageRemoveOptions{
Force: true,
PruneChildren: true,
}
_, err := de.cl.ImageRemove(ctx, imgName, opts)
if err == nil {
dockerLog.Debugf("Image removed: %s", imgName)
}
return err
}
func (de *DockerEngine) FetchImage(ctx context.Context, imgName string) error {
out, err := de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{})
var auth string
if err != nil {
// Retry with auth
auth, err = de.getAuthForImage(imgName)
if err != nil {
return err
}
out, err = de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{
RegistryAuth: auth,
})
}
if err == nil {
dockerLog.Debugf("Image fetched: %s", imgName)
}
if out != nil {
_, _ = io.Copy(ioutil.Discard, out)
}
return err
}
func (de *DockerEngine) GetImagePorts(ctx context.Context,
tag string) ([]uint16, error) {
r, _, err := de.cl.ImageInspectWithRaw(ctx, tag)
if err != nil {
return nil, errors.Wrapf(err, "Error inspecting image %s", tag)
}
var ports []uint16
for p := range r.Config.ExposedPorts {
ports = append(ports, uint16(p.Int()))
}
return ports, nil
}
func (de *DockerEngine) Terminate() |
func (de *DockerEngine) isErrorResponse(r io.Reader) error {
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
split := bytes.Split(data, []byte("\n"))
type errResp struct {
Error string
}
for i := range split {
e := errResp{}
if err := json.Unmarshal(split[i], &e); err == nil && e.Error != "" {
return errors.New(e.Error)
}
}
return nil
}
// TODO: This should probably be made more robust at some point
func (de *DockerEngine) getSubNet() (string, error) {
de.subNetMu.Lock()
defer de.subNetMu.Unlock()
addrs, err := net.InterfaceAddrs()
if err != nil {
return "", errors.Wrap(err, "Error getting network addresses")
}
var nets []*net.IPNet
for _, addr := range addrs {
dockerLog.Debugf("Inspecting interface %s", addr.String())
_, n, err := net.ParseCIDR(addr.String())
if err != nil {
dockerLog.Warningf("Error parsing address: %s", addr.String())
continue
}
nets = append(nets, n)
}
netaddr := func() string {
tpl := "10.%d.%d.0/24"
return fmt.Sprintf(tpl, de.subNetOct1, de.subNetOct2)
}
_, pnet, _ := net.ParseCIDR(netaddr())
for {
// Find non-overlapping network
overlap := false
for _, n := range nets {
if lib.NetsOverlap(pnet, n) {
overlap = true
break
}
}
if overlap {
de.subNetOct2 += 1
if de.subNetOct2 > 255 {
de.subNetOct1 += 1
de.subNetOct2 = 0
}
_, pnet, _ = net.ParseCIDR(netaddr())
} else {
break
}
}
return netaddr(), nil
}
// TODO: Pretty naive implementation and will likely not work in all the cases
func (de *DockerEngine) getAuthForImage(imageName string) (string, error) {
type ConfigFile struct {
AuthConfigs map[string]types.AuthConfig `json:"auths"`
CredHelpers map[string]string `json:"credHelpers"`
}
ref, err := reference.ParseNormalizedNamed(imageName)
if err != nil {
return "", errors.Wrapf(err, "Error parsing image name %s", imageName)
}
repoInfo, err := registry.ParseRepositoryInfo(ref)
if err != nil {
return "", errors.Wrapf(err, "Error parsing repository %s", imageName)
}
file := filepath.Join(os.Getenv("HOME"), ".docker", "config.json")
b, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "Error reading docker config %s", file)
}
conf := &ConfigFile{}
err = json.Unmarshal(b, conf)
if err != nil {
return "", errors.Wrapf(err, "Error parsing docker config %s", file)
}
srv := repoInfo.Index.Name
ac := &types.AuthConfig{}
var variants []string
if srv == "docker.io" || srv == "index.docker.io" {
variants = []string{"https://docker.io", "https://index.docker.io/v1/"}
} else {
variants = []string{srv}
}
var finalErr error
for _, host := range variants {
if credHelper, ok := conf.CredHelpers[host]; ok {
dockerLog.Infof("Using '%s' credential helper for %s", credHelper, host)
prog := fmt.Sprintf("docker-credential-%s", credHelper)
p := hclient.NewShellProgramFunc(prog)
creds, err := hclient.Get(p, host)
if err != nil {
finalErr = errors.Wrapf(err, "Error running %s", prog)
continue
}
ac.Username = creds.Username
ac.Password = creds.Secret
} else if authConf, ok := conf.AuthConfigs[host]; ok {
dockerLog.Infof("Using auth section for %s", host)
if authConf.Username != "" {
ac.Username = authConf.Username
}
if authConf.Password != "" {
ac.Password = authConf.Password
}
if ac.Username == "" {
auth, err := base64.StdEncoding.DecodeString(authConf.Auth)
if err != nil {
finalErr = errors.Wrap(err, "Error decoding auth entry")
continue
}
split := strings.SplitN(string(auth), ":", 2)
if len(split) < 2 {
finalErr = errors.Errorf("Invalid auth entry format: %s", auth)
continue
}
ac.Username = split[0]
ac.Password = split[1]
}
} else {
continue
}
ac.ServerAddress = host
b, _ = json.Marshal(ac)
return base64.StdEncoding.EncodeToString(b), nil
}
return "", finalErr
}
| {
de.cl.Close()
} | identifier_body |
conteng_docker.go | /*
MIT License
Copyright (c) 2018 Max Kuznetsov <syhpoon@syhpoon.ca>
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
*/
package conteng
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net"
"os"
"path/filepath"
"strings"
"sync"
"github.com/docker/distribution/reference"
hclient "github.com/docker/docker-credential-helpers/client"
"github.com/docker/docker/api/types"
"github.com/docker/docker/api/types/container"
"github.com/docker/docker/api/types/mount"
"github.com/docker/docker/api/types/network"
"github.com/docker/docker/client"
"github.com/docker/docker/registry"
"github.com/docker/go-connections/nat"
"github.com/pkg/errors"
"github.com/syhpoon/xenvman/pkg/lib"
"github.com/syhpoon/xenvman/pkg/logger"
)
var dockerLog = logger.GetLogger("xenvman.pkg.conteng.conteng_docker")
type DockerEngineParams struct {
}
type DockerEngine struct {
cl *client.Client
params DockerEngineParams
subNetOct1 int
subNetOct2 int
subNetMu sync.Mutex
}
func NewDockerEngine(params DockerEngineParams) (*DockerEngine, error) {
var opts []func(client2 *client.Client) error
cli, err := client.NewClientWithOpts(opts...)
if err != nil {
return nil, errors.Wrapf(err, "Error creating docker client")
}
cli.NegotiateAPIVersion(context.Background())
dockerLog.Debugf("Docker engine client created")
return &DockerEngine{
cl: cli,
params: params,
subNetOct1: 0,
subNetOct2: 0,
}, nil
}
func (de *DockerEngine) CreateNetwork(ctx context.Context,
name string) (NetworkId, string, error) {
sub, err := de.getSubNet()
if err != nil |
netParams := types.NetworkCreate{
CheckDuplicate: true,
Driver: "bridge",
IPAM: &network.IPAM{
Config: []network.IPAMConfig{
{
Subnet: sub,
IPRange: sub,
},
},
},
}
r, err := de.cl.NetworkCreate(ctx, name, netParams)
if err != nil {
return "", "", errors.Wrapf(err, "Error creating docker network: %s", sub)
}
dockerLog.Debugf("Network created: %s - %s :: %s", name, r.ID, sub)
return r.ID, sub, nil
}
// Run Docker container
func (de *DockerEngine) RunContainer(ctx context.Context, name, tag string,
params RunContainerParams) (string, error) {
// Hosts
var hosts []string
for host, ip := range params.Hosts {
hosts = append(hosts, fmt.Sprintf("%s:%s", host, ip))
}
// Ports
var rawPorts []string
for contPort, hostPort := range params.Ports {
rawPorts = append(rawPorts, fmt.Sprintf("%d:%d", hostPort, contPort))
}
ports, bindings, err := nat.ParsePortSpecs(rawPorts)
if err != nil {
return "", errors.Wrapf(err, "Error parsing ports for %s", name)
}
// Environ
var environ []string
for k, v := range params.Environ {
environ = append(environ, fmt.Sprintf("%s=%s", k, v))
}
// Mounts
var mounts []mount.Mount
for _, fileMount := range params.FileMounts {
mounts = append(mounts, mount.Mount{
Type: "bind",
Source: fileMount.HostFile,
Target: fileMount.ContainerFile,
ReadOnly: fileMount.Readonly,
})
}
var dns []string
if params.DiscoverDNS != "" {
dns = append(dns, params.DiscoverDNS)
}
hostCont := &container.HostConfig{
NetworkMode: container.NetworkMode(params.NetworkId),
ExtraHosts: hosts,
AutoRemove: false,
DNS: dns,
DNSSearch: []string{"xenv"},
RestartPolicy: container.RestartPolicy{Name: "on-failure"},
PortBindings: bindings,
Mounts: mounts,
}
netConf := &network.NetworkingConfig{
EndpointsConfig: map[string]*network.EndpointSettings{
params.NetworkId: {
IPAMConfig: &network.EndpointIPAMConfig{
IPv4Address: params.IP,
},
},
},
}
r, err := de.cl.ContainerCreate(ctx, &container.Config{
Hostname: name,
AttachStdout: true,
AttachStderr: true,
Image: tag,
ExposedPorts: ports,
Env: environ,
Cmd: params.Cmd,
Entrypoint: params.Entrypoint,
}, hostCont, netConf, lib.NewIdShort())
if err != nil {
return "", errors.Wrapf(err, "Error creating container %s", tag)
}
err = de.cl.ContainerStart(ctx, r.ID, types.ContainerStartOptions{})
if err != nil {
return "", errors.Wrapf(err, "Error starting container: %s", tag)
}
dockerLog.Debugf("Container started: %s, network=%s", tag, params.NetworkId)
return r.ID, nil
}
func (de *DockerEngine) RemoveContainer(ctx context.Context, id string) error {
return de.cl.ContainerRemove(ctx, id,
types.ContainerRemoveOptions{
RemoveVolumes: true,
Force: true,
})
}
func (de *DockerEngine) StopContainer(ctx context.Context, id string) error {
return de.cl.ContainerKill(ctx, id, "INT")
}
func (de *DockerEngine) RestartContainer(ctx context.Context, id string) error {
return de.cl.ContainerStart(ctx, id, types.ContainerStartOptions{})
}
func (de *DockerEngine) RemoveNetwork(ctx context.Context, id string) error {
return de.cl.NetworkRemove(ctx, id)
}
func (de *DockerEngine) BuildImage(ctx context.Context, imgName string,
buildContext io.Reader) error {
opts := types.ImageBuildOptions{
NetworkMode: "bridge",
Tags: []string{imgName},
Remove: true,
ForceRemove: true,
SuppressOutput: true,
NoCache: true,
PullParent: true,
}
r, err := de.cl.ImageBuild(ctx, buildContext, opts)
if r.Body != nil {
defer r.Body.Close()
// Check server response
if rerr := de.isErrorResponse(r.Body); rerr != nil {
return errors.Errorf("Error from Docker server: %s", rerr)
}
}
if err == nil {
dockerLog.Debugf("Image built: %s", imgName)
}
return err
}
func (de *DockerEngine) RemoveImage(ctx context.Context, imgName string) error {
opts := types.ImageRemoveOptions{
Force: true,
PruneChildren: true,
}
_, err := de.cl.ImageRemove(ctx, imgName, opts)
if err == nil {
dockerLog.Debugf("Image removed: %s", imgName)
}
return err
}
func (de *DockerEngine) FetchImage(ctx context.Context, imgName string) error {
out, err := de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{})
var auth string
if err != nil {
// Retry with auth
auth, err = de.getAuthForImage(imgName)
if err != nil {
return err
}
out, err = de.cl.ImagePull(ctx, imgName, types.ImagePullOptions{
RegistryAuth: auth,
})
}
if err == nil {
dockerLog.Debugf("Image fetched: %s", imgName)
}
if out != nil {
_, _ = io.Copy(ioutil.Discard, out)
}
return err
}
func (de *DockerEngine) GetImagePorts(ctx context.Context,
tag string) ([]uint16, error) {
r, _, err := de.cl.ImageInspectWithRaw(ctx, tag)
if err != nil {
return nil, errors.Wrapf(err, "Error inspecting image %s", tag)
}
var ports []uint16
for p := range r.Config.ExposedPorts {
ports = append(ports, uint16(p.Int()))
}
return ports, nil
}
func (de *DockerEngine) Terminate() {
de.cl.Close()
}
func (de *DockerEngine) isErrorResponse(r io.Reader) error {
data, err := ioutil.ReadAll(r)
if err != nil {
return err
}
split := bytes.Split(data, []byte("\n"))
type errResp struct {
Error string
}
for i := range split {
e := errResp{}
if err := json.Unmarshal(split[i], &e); err == nil && e.Error != "" {
return errors.New(e.Error)
}
}
return nil
}
// TODO: This should probably be made more robust at some point
func (de *DockerEngine) getSubNet() (string, error) {
de.subNetMu.Lock()
defer de.subNetMu.Unlock()
addrs, err := net.InterfaceAddrs()
if err != nil {
return "", errors.Wrap(err, "Error getting network addresses")
}
var nets []*net.IPNet
for _, addr := range addrs {
dockerLog.Debugf("Inspecting interface %s", addr.String())
_, n, err := net.ParseCIDR(addr.String())
if err != nil {
dockerLog.Warningf("Error parsing address: %s", addr.String())
continue
}
nets = append(nets, n)
}
netaddr := func() string {
tpl := "10.%d.%d.0/24"
return fmt.Sprintf(tpl, de.subNetOct1, de.subNetOct2)
}
_, pnet, _ := net.ParseCIDR(netaddr())
for {
// Find non-overlapping network
overlap := false
for _, n := range nets {
if lib.NetsOverlap(pnet, n) {
overlap = true
break
}
}
if overlap {
de.subNetOct2 += 1
if de.subNetOct2 > 255 {
de.subNetOct1 += 1
de.subNetOct2 = 0
}
_, pnet, _ = net.ParseCIDR(netaddr())
} else {
break
}
}
return netaddr(), nil
}
// TODO: Pretty naive implementation and will likely not work in all the cases
func (de *DockerEngine) getAuthForImage(imageName string) (string, error) {
type ConfigFile struct {
AuthConfigs map[string]types.AuthConfig `json:"auths"`
CredHelpers map[string]string `json:"credHelpers"`
}
ref, err := reference.ParseNormalizedNamed(imageName)
if err != nil {
return "", errors.Wrapf(err, "Error parsing image name %s", imageName)
}
repoInfo, err := registry.ParseRepositoryInfo(ref)
if err != nil {
return "", errors.Wrapf(err, "Error parsing repository %s", imageName)
}
file := filepath.Join(os.Getenv("HOME"), ".docker", "config.json")
b, err := ioutil.ReadFile(file)
if err != nil {
return "", errors.Wrapf(err, "Error reading docker config %s", file)
}
conf := &ConfigFile{}
err = json.Unmarshal(b, conf)
if err != nil {
return "", errors.Wrapf(err, "Error parsing docker config %s", file)
}
srv := repoInfo.Index.Name
ac := &types.AuthConfig{}
var variants []string
if srv == "docker.io" || srv == "index.docker.io" {
variants = []string{"https://docker.io", "https://index.docker.io/v1/"}
} else {
variants = []string{srv}
}
var finalErr error
for _, host := range variants {
if credHelper, ok := conf.CredHelpers[host]; ok {
dockerLog.Infof("Using '%s' credential helper for %s", credHelper, host)
prog := fmt.Sprintf("docker-credential-%s", credHelper)
p := hclient.NewShellProgramFunc(prog)
creds, err := hclient.Get(p, host)
if err != nil {
finalErr = errors.Wrapf(err, "Error running %s", prog)
continue
}
ac.Username = creds.Username
ac.Password = creds.Secret
} else if authConf, ok := conf.AuthConfigs[host]; ok {
dockerLog.Infof("Using auth section for %s", host)
if authConf.Username != "" {
ac.Username = authConf.Username
}
if authConf.Password != "" {
ac.Password = authConf.Password
}
if ac.Username == "" {
auth, err := base64.StdEncoding.DecodeString(authConf.Auth)
if err != nil {
finalErr = errors.Wrap(err, "Error decoding auth entry")
continue
}
split := strings.SplitN(string(auth), ":", 2)
if len(split) < 2 {
finalErr = errors.Errorf("Invalid auth entry format: %s", auth)
continue
}
ac.Username = split[0]
ac.Password = split[1]
}
} else {
continue
}
ac.ServerAddress = host
b, _ = json.Marshal(ac)
return base64.StdEncoding.EncodeToString(b), nil
}
return "", finalErr
}
| {
return "", "", err
} | conditional_block |
marktree.js | /* MarkTree JavaScript code
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* Miika Nurminen, 12.7.2004.
*/
/* cross-browser (tested with ie5, mozilla 1 and opera 5) keypress detection */
function get_keycode(evt) {
// IE
code = document.layers ? evt.which
: document.all ? event.keyCode // event.keyCode!=evt.keyCode!
: evt.keyCode;
if (code==0)
code=evt.which; // for NS
return code;
}
var lastnode=null;
var listnodes = null;
var list_index=1;
var lastnodetype=''; // determines if node is a link, input or text;
// up, left, down, right, keypress codes
//ijkl
//var keys = new Array(105,106,107,108);
//num arrows
//var keys = new Array(56,52,50,54);
//wasd
// var press2 = new Array(119,97,115,100);
var press = new Array(47,45,42,43);
// keydown codes
// var keys2=new Array(87,65,83,68);
var keys= new Array(38,37,40,39);
// keyset 1 = keydown, otherwise press
function checkup(keyset,n) {
if (keyset==1) return (n==keys[0]);
return ((n==press[0]) /*|| (n==press2[0])*/)
}
function checkdn(keyset,n) {
if (keyset==1) return (n==keys[2]);
return ((n==press[2]) /*|| (n==press2[2])*/)
}
function checkl(keyset,n) {
if (keyset==1) return (n==keys[1]);
return ((n==press[1]) /*|| (n==press2[1])*/)
}
function checkr(keyset,n) {
if (keyset==1) return (n==keys[3]);
return ((n==press[3]) /*|| (n==press2[3])*/)
}
function is_exp(n) {
if (n==null) return false;
return ((n.className=='exp') || (n.className=='exp_active'));
}
function is_col(n) {
if (n==null) return false;
return ((n.className=='col') || (n.className=='col_active'));
}
function is_basic(n) {
if (n==null) return false;
return ((n.className=='basic') || (n.className=='basic_active'));
}
/* returns i>=0 if true */
function is_active(node) {
if (node.className==null) return false
return node.className.indexOf('_active');
}
function toggle_class(node) {
if ((node==null) || (node.className==null)) return;
str=node.className;
result="";
i = str.indexOf('_active');
if (i>0)
result= str.substr(0,i);
else
result= str+"_active";
node.className=result;
return node;
}
function activate(node) {
node.style.backgroundColor='#eeeeff';
}
function deactivate(node) {
node.style.backgroundColor='#ffffff';
}
function is_list_node(n) {
if (n==null) return false;
if (n.className==null) return false;
if ( (is_exp(n)) ||
(is_col(n)) ||
(is_basic(n)) )
return true; else return false;
}
function get_href(n) {
alist=n.attributes;
if (alist!=null) {
hr = alist.getNamedItem('href');
if (hr!=null) return hr.nodeValue;
}
if (n.childNodes.length==0) return '';
for (var i=0; i<n.childNodes.length; i++) {
s = get_href(n.childNodes[i]);
if (s!='') return s;
}
return '';
}
function get_link(n) {
if (n==null) return null;
if (n.style==null) return null;
// disabling uncontrolled recursion to prevent error messages on IE
// when trying to focus to invisible links (readonly mode)
// alert(n.nodeName+' '+n.className);
if ((n.nodeName=='UL') && (n.className=='sub')) return null;
if (n.nodeName=='A') return n;
if (n.childNodes.length==0) return null;
for (var i=0; i<n.childNodes.length; i++) {
s = get_link(n.childNodes[i]);
if (s!=null) return s;
}
return null;
}
function set_lastnode(n) {
/*var d = new Date();
var t_mil = d.getMilliseconds();*/
// testattu nopeuksia explorerilla, ei merkittäviä eroja
if (lastnode==n) return;
/* deactivate(lastnode)
lastnode=n;
activate(lastnode);*/
if (is_active(lastnode)>=0)
toggle_class(lastnode);
lastnode=n;
if (!(is_active(lastnode)>=0))
toggle_class(lastnode);
/*var d2 = new Date();
var t_mil2 = d2.getMilliseconds();
window.alert(t_mil2-t_mil);*/
}
function next_list_node() {
tempIndex = list_index;
while (tempIndex<listnodes.length-1) {
tempIndex++;
var x = listnodes[tempIndex];
if (is_list_node(x)) {
list_index=tempIndex;
return;
}
}
}
function prev_list_node() {
tempIndex = list_index;
while (tempIndex>0) {
tempIndex--;
var x = listnodes[tempIndex];
if (is_list_node(x)) {
list_index=tempIndex;
return;
}
}
}
function getsub (li) {
if (li.childNodes.length==0) return null;
for (var c = 0; c < li.childNodes.length; c++)
if ( (li.childNodes[c].className == 'sub') || (li.childNodes[c].className == 'subexp') )
return li.childNodes[c];
}
function find_listnode_recursive (li) {
if (is_list_node(li)) return li;
if (li.childNodes.length==0) return null;
result=null;
for (var c = 0; c < li.childNodes.length; c++) {
result=find_listnode_recursive(li.childNodes[c]);
if (result!=null) return result;
}
return null;
}
function next_child_listnode(li) {
var result=null;
for (var i=0; i<li.childNodes.length; i++) {
result=find_listnode_recursive(li.childNodes[i]);
if (result!=null) return result;
}
return null;
}
function next_actual_sibling_listnode(li) {
if (li==null) return null;
var temp=li;
while (1) {
var n = temp.nextSibling;
if (n==null) {
n=parent_listnode(temp);
return next_actual_sibling_listnode(n);
}
if (is_list_node(n)) return n;
temp=n;
}
}
function next_sibling_listnode(li) {
if (li==null) return null;
var result=null;
var temp=li;
if (is_col(temp)) return next_child_listnode(temp);
while (1) {
var n = temp.nextSibling;
if (n==null) {
n=parent_listnode(temp);
return next_actual_sibling_listnode(n);
}
if (is_list_node(n)) return n;
temp=n;
}
}
function last_sibling_listnode(li) {
if (li==null) return null;
var temp=li;
var last=null;
while(1) {
var n = temp.nextSibling;
if (is_list_node(temp))
last = temp;
if (n==null) {
if (is_col(last)) return last_sibling_listnode(next_child_listnode(last));
else return last;
}
temp = n;
}
}
function prev_sibling_listnode(li) {
if (li==null) return null;
var temp=li;
var n = null;
while (1) {
n = temp.previousSibling;
if (n==null) {
return parent_listnode(li);
}
if (is_list_node(n)) {
if (is_col(n)) {
return last_sibling_listnode(next_child_listnode(n));
}
else {
return n;
}
}
temp=n;
}
}
function pa | i) {
// added 12.7.2004 to prevent IE error when readonly mode==true
if (li==null) return null;
n=li;
while (1) {
n=n.parentNode;
if (n==null) return null;
if (is_list_node(n)) return n;
}
}
function getVisibleParents(id) {
var n = document.getElementById(id);
while(1) {
expand(n);
n = parent_listnode(n);
if (n==null) return;
}
}
function onClickHandler (evt) {
if (lastnode==null)
{
listnodes = document.getElementsByTagName('li');
lastnode=listnodes[1];
temp=listnodes[1];
}
var target = evt ? evt.target : event.srcElement;
if (!is_list_node(target)) return;
toggle(target);
set_lastnode(target);
}
function expand(node) {
if (!is_exp(node)) return;
if (node.className=='exp_active')
node.className='col_active';
else
node.className='col';
setSubClass(node,'subexp');
// getsub(node).className='subexp';
}
function collapse(node) {
if (!is_col(node)) return;
if (node.className=='col_active')
node.className='exp_active'
else
node.className='exp';
setSubClass(node,'sub');
// getsub(node).className='sub';
}
function setSubClass(node,name) {
sub = getsub(node);
if (sub==null) return;
sub.className=name;
}
function toggle(target) {
if (!is_list_node(target)) return;
if (is_col(target)) {
target.className='exp';
setSubClass(target,'sub');
// getsub(target).className='sub';
}
else if (is_exp(target)) {
target.className='col';
setSubClass(target,'subexp');
// getsub(target).className='subexp';
}
}
function expandAll(node) {
if (node.className=='exp') {
node.className='col';
setSubClass(node,'subexp');
// getsub(node).className='subexp';
}
var i;
if (node.childNodes!=null)
// if (node.hasChildNodes())
for ( i = 0; i<node.childNodes.length; i++)
expandAll(node.childNodes[i]);
}
function collapseAll(node) {
if (node.className=='col') {
node.className='exp';
setSubClass(node,'sub');
// getsub(node).className='sub';
}
var i;
if (node.childNodes!=null)
// for opera if (node.hasChildNodes())
for ( i = 0; i<node.childNodes.length; i++)
collapseAll(node.childNodes[i]);
}
function unFocus(node) {
// unfocuses potential link that is to be hidden (if a==null there is no link so it should not be blurred).
// tested with mozilla 1.7, 12.7.2004. /mn (
intemp=parent_listnode(node);
a = get_link(intemp); // added 6.4. to get keyboard working with
// moved before collapse to prevent an error message with IE when readonly==true
if (a!=null) a.blur(); // netscape after collapsing a focused node
return intemp;
}
// mode: 0==keypress, 1==keyup
function keyfunc(evt,mode) {
var c = get_keycode(evt);
var temp = null;
var a = null;
if (lastnode==null) {
listnodes = document.getElementsByTagName('li');
lastnode=listnodes[1];
temp=listnodes[1];
}
//window.alert(c);
if (checkup(mode,c)) { // i
temp=prev_sibling_listnode(lastnode);
}
else if (checkdn(mode,c)) { // k
temp=next_sibling_listnode(lastnode);
}
else if (checkr(mode,c)) { // l
expand(lastnode);
// temp=next_child_listnode(lastnode);
// if (temp==null) {
a = get_link(lastnode);
if (a!=null) a.focus(); else self.focus();
//}
}
else if (checkl(mode,c)) { // j
if (is_col(lastnode)) {
unFocus(lastnode);
collapse(lastnode);
}
else {
temp=unFocus(lastnode);
collapse(temp);
}
// if (temp==null) lastnode.focus(); // forces focus to correct div (try mozilla typesearch) (doesn't seem to work -mn/6.4.2004)
}
else return;
if (temp!=null) set_lastnode(temp);
// alert('pressed ' + String.fromCharCode(c) + '(' + c + ')');
return true;
}
function keytest (evt) {
return keyfunc(evt,1);
};
function presstest (evt) {
return keyfunc(evt,0);
};
document.onclick = onClickHandler;
document.onkeypress = presstest;
document.onkeyup = keytest;
| rent_listnode(l | identifier_name |
marktree.js | /* MarkTree JavaScript code
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* Miika Nurminen, 12.7.2004.
*/
/* cross-browser (tested with ie5, mozilla 1 and opera 5) keypress detection */
function get_keycode(evt) {
// IE
code = document.layers ? evt.which
: document.all ? event.keyCode // event.keyCode!=evt.keyCode!
: evt.keyCode;
if (code==0)
code=evt.which; // for NS
return code;
}
var lastnode=null;
var listnodes = null;
var list_index=1;
var lastnodetype=''; // determines if node is a link, input or text;
// up, left, down, right, keypress codes
//ijkl
//var keys = new Array(105,106,107,108);
//num arrows
//var keys = new Array(56,52,50,54);
//wasd
// var press2 = new Array(119,97,115,100);
var press = new Array(47,45,42,43);
// keydown codes
// var keys2=new Array(87,65,83,68);
var keys= new Array(38,37,40,39);
// keyset 1 = keydown, otherwise press
function checkup(keyset,n) {
if (keyset==1) return (n==keys[0]);
return ((n==press[0]) /*|| (n==press2[0])*/)
}
function checkdn(keyset,n) {
if (keyset==1) return (n==keys[2]);
return ((n==press[2]) /*|| (n==press2[2])*/)
}
function checkl(keyset,n) {
if (keyset==1) return (n==keys[1]);
return ((n==press[1]) /*|| (n==press2[1])*/)
}
function checkr(keyset,n) {
if (keyset==1) return (n==keys[3]);
return ((n==press[3]) /*|| (n==press2[3])*/)
}
function is_exp(n) {
if (n==null) return false;
return ((n.className=='exp') || (n.className=='exp_active'));
}
function is_col(n) {
if (n==null) return false;
return ((n.className=='col') || (n.className=='col_active'));
}
function is_basic(n) {
if (n==null) return false;
return ((n.className=='basic') || (n.className=='basic_active'));
}
/* returns i>=0 if true */
function is_active(node) {
if (node.className==null) return false
return node.className.indexOf('_active');
}
function toggle_class(node) {
if ((node==null) || (node.className==null)) return;
str=node.className;
result="";
i = str.indexOf('_active');
if (i>0)
result= str.substr(0,i);
else
result= str+"_active";
node.className=result;
return node;
}
function activate(node) {
node.style.backgroundColor='#eeeeff';
}
function deactivate(node) {
node.style.backgroundColor='#ffffff';
}
function is_list_node(n) {
if (n==null) return false;
if (n.className==null) return false;
if ( (is_exp(n)) ||
(is_col(n)) ||
(is_basic(n)) )
return true; else return false;
}
function get_href(n) {
alist=n.attributes;
if (alist!=null) {
hr = alist.getNamedItem('href');
if (hr!=null) return hr.nodeValue;
}
if (n.childNodes.length==0) return '';
for (var i=0; i<n.childNodes.length; i++) {
s = get_href(n.childNodes[i]);
if (s!='') return s;
}
return '';
}
function get_link(n) {
if (n==null) return null;
if (n.style==null) return null;
// disabling uncontrolled recursion to prevent error messages on IE
// when trying to focus to invisible links (readonly mode)
// alert(n.nodeName+' '+n.className);
if ((n.nodeName=='UL') && (n.className=='sub')) return null;
if (n.nodeName=='A') return n;
if (n.childNodes.length==0) return null;
for (var i=0; i<n.childNodes.length; i++) {
s = get_link(n.childNodes[i]);
if (s!=null) return s;
}
return null;
}
function set_lastnode(n) {
/*var d = new Date();
var t_mil = d.getMilliseconds();*/
// testattu nopeuksia explorerilla, ei merkittäviä eroja
if (lastnode==n) return;
/* deactivate(lastnode)
lastnode=n;
activate(lastnode);*/
if (is_active(lastnode)>=0)
toggle_class(lastnode);
lastnode=n;
if (!(is_active(lastnode)>=0))
toggle_class(lastnode);
/*var d2 = new Date();
var t_mil2 = d2.getMilliseconds();
window.alert(t_mil2-t_mil);*/
}
function next_list_node() {
tempIndex = list_index;
while (tempIndex<listnodes.length-1) {
tempIndex++;
var x = listnodes[tempIndex];
if (is_list_node(x)) {
list_index=tempIndex;
return;
}
}
}
function prev_list_node() {
tempIndex = list_index;
while (tempIndex>0) {
tempIndex--;
var x = listnodes[tempIndex];
if (is_list_node(x)) {
list_index=tempIndex;
return;
}
}
}
function getsub (li) {
if (li.childNodes.length==0) return null;
for (var c = 0; c < li.childNodes.length; c++)
if ( (li.childNodes[c].className == 'sub') || (li.childNodes[c].className == 'subexp') )
return li.childNodes[c];
}
function find_listnode_recursive (li) {
if (is_list_node(li)) return li;
if (li.childNodes.length==0) return null;
result=null;
for (var c = 0; c < li.childNodes.length; c++) {
result=find_listnode_recursive(li.childNodes[c]);
if (result!=null) return result;
}
return null;
}
function next_child_listnode(li) {
var result=null;
for (var i=0; i<li.childNodes.length; i++) {
result=find_listnode_recursive(li.childNodes[i]);
if (result!=null) return result;
}
return null;
}
function next_actual_sibling_listnode(li) {
if (li==null) return null;
var temp=li;
while (1) { | return next_actual_sibling_listnode(n);
}
if (is_list_node(n)) return n;
temp=n;
}
}
function next_sibling_listnode(li) {
if (li==null) return null;
var result=null;
var temp=li;
if (is_col(temp)) return next_child_listnode(temp);
while (1) {
var n = temp.nextSibling;
if (n==null) {
n=parent_listnode(temp);
return next_actual_sibling_listnode(n);
}
if (is_list_node(n)) return n;
temp=n;
}
}
function last_sibling_listnode(li) {
if (li==null) return null;
var temp=li;
var last=null;
while(1) {
var n = temp.nextSibling;
if (is_list_node(temp))
last = temp;
if (n==null) {
if (is_col(last)) return last_sibling_listnode(next_child_listnode(last));
else return last;
}
temp = n;
}
}
function prev_sibling_listnode(li) {
if (li==null) return null;
var temp=li;
var n = null;
while (1) {
n = temp.previousSibling;
if (n==null) {
return parent_listnode(li);
}
if (is_list_node(n)) {
if (is_col(n)) {
return last_sibling_listnode(next_child_listnode(n));
}
else {
return n;
}
}
temp=n;
}
}
function parent_listnode(li) {
// added 12.7.2004 to prevent IE error when readonly mode==true
if (li==null) return null;
n=li;
while (1) {
n=n.parentNode;
if (n==null) return null;
if (is_list_node(n)) return n;
}
}
function getVisibleParents(id) {
var n = document.getElementById(id);
while(1) {
expand(n);
n = parent_listnode(n);
if (n==null) return;
}
}
function onClickHandler (evt) {
if (lastnode==null)
{
listnodes = document.getElementsByTagName('li');
lastnode=listnodes[1];
temp=listnodes[1];
}
var target = evt ? evt.target : event.srcElement;
if (!is_list_node(target)) return;
toggle(target);
set_lastnode(target);
}
function expand(node) {
if (!is_exp(node)) return;
if (node.className=='exp_active')
node.className='col_active';
else
node.className='col';
setSubClass(node,'subexp');
// getsub(node).className='subexp';
}
function collapse(node) {
if (!is_col(node)) return;
if (node.className=='col_active')
node.className='exp_active'
else
node.className='exp';
setSubClass(node,'sub');
// getsub(node).className='sub';
}
function setSubClass(node,name) {
sub = getsub(node);
if (sub==null) return;
sub.className=name;
}
function toggle(target) {
if (!is_list_node(target)) return;
if (is_col(target)) {
target.className='exp';
setSubClass(target,'sub');
// getsub(target).className='sub';
}
else if (is_exp(target)) {
target.className='col';
setSubClass(target,'subexp');
// getsub(target).className='subexp';
}
}
function expandAll(node) {
if (node.className=='exp') {
node.className='col';
setSubClass(node,'subexp');
// getsub(node).className='subexp';
}
var i;
if (node.childNodes!=null)
// if (node.hasChildNodes())
for ( i = 0; i<node.childNodes.length; i++)
expandAll(node.childNodes[i]);
}
function collapseAll(node) {
if (node.className=='col') {
node.className='exp';
setSubClass(node,'sub');
// getsub(node).className='sub';
}
var i;
if (node.childNodes!=null)
// for opera if (node.hasChildNodes())
for ( i = 0; i<node.childNodes.length; i++)
collapseAll(node.childNodes[i]);
}
function unFocus(node) {
// unfocuses potential link that is to be hidden (if a==null there is no link so it should not be blurred).
// tested with mozilla 1.7, 12.7.2004. /mn (
intemp=parent_listnode(node);
a = get_link(intemp); // added 6.4. to get keyboard working with
// moved before collapse to prevent an error message with IE when readonly==true
if (a!=null) a.blur(); // netscape after collapsing a focused node
return intemp;
}
// mode: 0==keypress, 1==keyup
function keyfunc(evt,mode) {
var c = get_keycode(evt);
var temp = null;
var a = null;
if (lastnode==null) {
listnodes = document.getElementsByTagName('li');
lastnode=listnodes[1];
temp=listnodes[1];
}
//window.alert(c);
if (checkup(mode,c)) { // i
temp=prev_sibling_listnode(lastnode);
}
else if (checkdn(mode,c)) { // k
temp=next_sibling_listnode(lastnode);
}
else if (checkr(mode,c)) { // l
expand(lastnode);
// temp=next_child_listnode(lastnode);
// if (temp==null) {
a = get_link(lastnode);
if (a!=null) a.focus(); else self.focus();
//}
}
else if (checkl(mode,c)) { // j
if (is_col(lastnode)) {
unFocus(lastnode);
collapse(lastnode);
}
else {
temp=unFocus(lastnode);
collapse(temp);
}
// if (temp==null) lastnode.focus(); // forces focus to correct div (try mozilla typesearch) (doesn't seem to work -mn/6.4.2004)
}
else return;
if (temp!=null) set_lastnode(temp);
// alert('pressed ' + String.fromCharCode(c) + '(' + c + ')');
return true;
}
function keytest (evt) {
return keyfunc(evt,1);
};
function presstest (evt) {
return keyfunc(evt,0);
};
document.onclick = onClickHandler;
document.onkeypress = presstest;
document.onkeyup = keytest; | var n = temp.nextSibling;
if (n==null) {
n=parent_listnode(temp); | random_line_split |
marktree.js | /* MarkTree JavaScript code
*
* The contents of this file are subject to the Mozilla Public License Version
* 1.1 (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
* http://www.mozilla.org/MPL/
*
* Software distributed under the License is distributed on an "AS IS" basis,
* WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License
* for the specific language governing rights and limitations under the
* License.
*
* Miika Nurminen, 12.7.2004.
*/
/* cross-browser (tested with ie5, mozilla 1 and opera 5) keypress detection */
function get_keycode(evt) {
// IE
code = document.layers ? evt.which
: document.all ? event.keyCode // event.keyCode!=evt.keyCode!
: evt.keyCode;
if (code==0)
code=evt.which; // for NS
return code;
}
var lastnode=null;
var listnodes = null;
var list_index=1;
var lastnodetype=''; // determines if node is a link, input or text;
// up, left, down, right, keypress codes
//ijkl
//var keys = new Array(105,106,107,108);
//num arrows
//var keys = new Array(56,52,50,54);
//wasd
// var press2 = new Array(119,97,115,100);
var press = new Array(47,45,42,43);
// keydown codes
// var keys2=new Array(87,65,83,68);
var keys= new Array(38,37,40,39);
// keyset 1 = keydown, otherwise press
function checkup(keyset,n) {
if (keyset==1) return (n==keys[0]);
return ((n==press[0]) /*|| (n==press2[0])*/)
}
function checkdn(keyset,n) {
if (keyset==1) return (n==keys[2]);
return ((n==press[2]) /*|| (n==press2[2])*/)
}
function checkl(keyset,n) {
if (keyset==1) return (n==keys[1]);
return ((n==press[1]) /*|| (n==press2[1])*/)
}
function checkr(keyset,n) {
if (keyset==1) return (n==keys[3]);
return ((n==press[3]) /*|| (n==press2[3])*/)
}
function is_exp(n) {
if (n==null) return false;
return ((n.className=='exp') || (n.className=='exp_active'));
}
function is_col(n) {
if (n==null) return false;
return ((n.className=='col') || (n.className=='col_active'));
}
function is_basic(n) {
if (n==null) return false;
return ((n.className=='basic') || (n.className=='basic_active'));
}
/* returns i>=0 if true */
function is_active(node) {
if (node.className==null) return false
return node.className.indexOf('_active');
}
function toggle_class(node) {
if ((node==null) || (node.className==null)) return;
str=node.className;
result="";
i = str.indexOf('_active');
if (i>0)
result= str.substr(0,i);
else
result= str+"_active";
node.className=result;
return node;
}
function activate(node) {
node.style.backgroundColor='#eeeeff';
}
function deactivate(node) {
node.style.backgroundColor='#ffffff';
}
function is_list_node(n) {
if (n==null) return false;
if (n.className==null) return false;
if ( (is_exp(n)) ||
(is_col(n)) ||
(is_basic(n)) )
return true; else return false;
}
function get_href(n) {
alist=n.attributes;
if (alist!=null) {
hr = alist.getNamedItem('href');
if (hr!=null) return hr.nodeValue;
}
if (n.childNodes.length==0) return '';
for (var i=0; i<n.childNodes.length; i++) {
s = get_href(n.childNodes[i]);
if (s!='') return s;
}
return '';
}
function get_link(n) {
if (n==null) return null;
if (n.style==null) return null;
// disabling uncontrolled recursion to prevent error messages on IE
// when trying to focus to invisible links (readonly mode)
// alert(n.nodeName+' '+n.className);
if ((n.nodeName=='UL') && (n.className=='sub')) return null;
if (n.nodeName=='A') return n;
if (n.childNodes.length==0) return null;
for (var i=0; i<n.childNodes.length; i++) {
s = get_link(n.childNodes[i]);
if (s!=null) return s;
}
return null;
}
function set_lastnode(n) {
/*var d = new Date();
var t_mil = d.getMilliseconds();*/
// testattu nopeuksia explorerilla, ei merkittäviä eroja
if (lastnode==n) return;
/* deactivate(lastnode)
lastnode=n;
activate(lastnode);*/
if (is_active(lastnode)>=0)
toggle_class(lastnode);
lastnode=n;
if (!(is_active(lastnode)>=0))
toggle_class(lastnode);
/*var d2 = new Date();
var t_mil2 = d2.getMilliseconds();
window.alert(t_mil2-t_mil);*/
}
function next_list_node() {
tempIndex = list_index;
while (tempIndex<listnodes.length-1) {
tempIndex++;
var x = listnodes[tempIndex];
if (is_list_node(x)) {
list_index=tempIndex;
return;
}
}
}
function prev_list_node() {
tempIndex = list_index;
while (tempIndex>0) {
tempIndex--;
var x = listnodes[tempIndex];
if (is_list_node(x)) {
list_index=tempIndex;
return;
}
}
}
function getsub (li) {
if (li.childNodes.length==0) return null;
for (var c = 0; c < li.childNodes.length; c++)
if ( (li.childNodes[c].className == 'sub') || (li.childNodes[c].className == 'subexp') )
return li.childNodes[c];
}
function find_listnode_recursive (li) {
if (is_list_node(li)) return li;
if (li.childNodes.length==0) return null;
result=null;
for (var c = 0; c < li.childNodes.length; c++) {
result=find_listnode_recursive(li.childNodes[c]);
if (result!=null) return result;
}
return null;
}
function next_child_listnode(li) {
var result=null;
for (var i=0; i<li.childNodes.length; i++) {
result=find_listnode_recursive(li.childNodes[i]);
if (result!=null) return result;
}
return null;
}
function next_actual_sibling_listnode(li) {
if (li==null) return null;
var temp=li;
while (1) {
var n = temp.nextSibling;
if (n==null) {
n=parent_listnode(temp);
return next_actual_sibling_listnode(n);
}
if (is_list_node(n)) return n;
temp=n;
}
}
function next_sibling_listnode(li) {
if (li==null) return null;
var result=null;
var temp=li;
if (is_col(temp)) return next_child_listnode(temp);
while (1) {
var n = temp.nextSibling;
if (n==null) {
n=parent_listnode(temp);
return next_actual_sibling_listnode(n);
}
if (is_list_node(n)) return n;
temp=n;
}
}
function last_sibling_listnode(li) {
if (li==null) return null;
var temp=li;
var last=null;
while(1) {
var n = temp.nextSibling;
if (is_list_node(temp))
last = temp;
if (n==null) {
if (is_col(last)) return last_sibling_listnode(next_child_listnode(last));
else return last;
}
temp = n;
}
}
function prev_sibling_listnode(li) {
if (li==null) return null;
var temp=li;
var n = null;
while (1) {
n = temp.previousSibling;
if (n==null) {
return parent_listnode(li);
}
if (is_list_node(n)) {
if (is_col(n)) {
return last_sibling_listnode(next_child_listnode(n));
}
else {
return n;
}
}
temp=n;
}
}
function parent_listnode(li) {
// added 12.7.2004 to prevent IE error when readonly mode==true
if (li==null) return null;
n=li;
while (1) {
n=n.parentNode;
if (n==null) return null;
if (is_list_node(n)) return n;
}
}
function getVisibleParents(id) {
var n = document.getElementById(id);
while(1) {
expand(n);
n = parent_listnode(n);
if (n==null) return;
}
}
function onClickHandler (evt) {
if (lastnode==null)
{
listnodes = document.getElementsByTagName('li');
lastnode=listnodes[1];
temp=listnodes[1];
}
var target = evt ? evt.target : event.srcElement;
if (!is_list_node(target)) return;
toggle(target);
set_lastnode(target);
}
function expand(node) {
if (!is_exp(node)) return;
if (node.className=='exp_active')
node.className='col_active';
else
node.className='col';
setSubClass(node,'subexp');
// getsub(node).className='subexp';
}
function collapse(node) {
if (!is_col(node)) return;
if (node.className=='col_active')
node.className='exp_active'
else
node.className='exp';
setSubClass(node,'sub');
// getsub(node).className='sub';
}
function setSubClass(node,name) {
sub = getsub(node);
if (sub==null) return;
sub.className=name;
}
function toggle(target) {
if (!is_list_node(target)) return;
if (is_col(target)) {
target.className='exp';
setSubClass(target,'sub');
// getsub(target).className='sub';
}
else if (is_exp(target)) {
target.className='col';
setSubClass(target,'subexp');
// getsub(target).className='subexp';
}
}
function expandAll(node) {
if (node.className=='exp') {
node.className='col';
setSubClass(node,'subexp');
// getsub(node).className='subexp';
}
var i;
if (node.childNodes!=null)
// if (node.hasChildNodes())
for ( i = 0; i<node.childNodes.length; i++)
expandAll(node.childNodes[i]);
}
function collapseAll(node) {
|
function unFocus(node) {
// unfocuses potential link that is to be hidden (if a==null there is no link so it should not be blurred).
// tested with mozilla 1.7, 12.7.2004. /mn (
intemp=parent_listnode(node);
a = get_link(intemp); // added 6.4. to get keyboard working with
// moved before collapse to prevent an error message with IE when readonly==true
if (a!=null) a.blur(); // netscape after collapsing a focused node
return intemp;
}
// mode: 0==keypress, 1==keyup
function keyfunc(evt,mode) {
var c = get_keycode(evt);
var temp = null;
var a = null;
if (lastnode==null) {
listnodes = document.getElementsByTagName('li');
lastnode=listnodes[1];
temp=listnodes[1];
}
//window.alert(c);
if (checkup(mode,c)) { // i
temp=prev_sibling_listnode(lastnode);
}
else if (checkdn(mode,c)) { // k
temp=next_sibling_listnode(lastnode);
}
else if (checkr(mode,c)) { // l
expand(lastnode);
// temp=next_child_listnode(lastnode);
// if (temp==null) {
a = get_link(lastnode);
if (a!=null) a.focus(); else self.focus();
//}
}
else if (checkl(mode,c)) { // j
if (is_col(lastnode)) {
unFocus(lastnode);
collapse(lastnode);
}
else {
temp=unFocus(lastnode);
collapse(temp);
}
// if (temp==null) lastnode.focus(); // forces focus to correct div (try mozilla typesearch) (doesn't seem to work -mn/6.4.2004)
}
else return;
if (temp!=null) set_lastnode(temp);
// alert('pressed ' + String.fromCharCode(c) + '(' + c + ')');
return true;
}
function keytest (evt) {
return keyfunc(evt,1);
};
function presstest (evt) {
return keyfunc(evt,0);
};
document.onclick = onClickHandler;
document.onkeypress = presstest;
document.onkeyup = keytest;
| if (node.className=='col') {
node.className='exp';
setSubClass(node,'sub');
// getsub(node).className='sub';
}
var i;
if (node.childNodes!=null)
// for opera if (node.hasChildNodes())
for ( i = 0; i<node.childNodes.length; i++)
collapseAll(node.childNodes[i]);
}
| identifier_body |
xcb.rs | pub type AtomID = xcb::Atom;
pub type Color = u32;
pub type ScreenID = i32;
pub type WindowID = xcb::Window;
pub type DrawableID = xcb::Window;
pub type GraphicsContextID = xcb::Atom;
pub type EventKeyID = xcb::EventMask;
pub type ColorMapID = xcb::Atom;
pub type PixMapID = xcb::Atom;
pub type VisualID = xcb::Atom;
#[derive(Debug)]
pub struct Error
{
pub error_code: u8
}
pub enum Event
{
KeyEvent(KeyEvent),
ExposedEvent,
//TODO(fpalacios): Mover los datos de ClientMessageEvent a su propia struct (como en KeyEvent)
ClientMessageEvent
{
window : WindowID,
event_type : AtomID,
data : [u32; 5],
},
UnknownEvent(xcb::EventMask, xcb::Event<xcb::ffi::xcb_generic_event_t>)
}
#[derive(Debug)]
pub enum KeyEvent
{
KeyPress,
KeyReleased,
}
#[derive(Debug)]
pub struct Property
{
pub key : AtomID,
pub value: PropertyValue,
}
#[derive(Debug)]
pub enum PropertyValue
{
String(String),
I32(i32),
U32(u32),
None,
Atom(AtomID),
UnknownAtom(AtomID),
}
impl PropertyValue
{
pub fn get_type_atom_id(&self) -> AtomID
{
return match self
{
PropertyValue::String(_) => xcb::ATOM_STRING,
PropertyValue::I32(_) => xcb::ATOM_INTEGER,
PropertyValue::U32(_) => xcb::ATOM_CARDINAL,
PropertyValue::Atom(_) => xcb::ATOM_ATOM,
PropertyValue::UnknownAtom(atom_id) => atom_id.clone(),
PropertyValue::None => xcb::ATOM_NONE
};
}
}
pub struct Client<'conn>
{
pub conn : &'conn xcb::Connection,
}
impl<'conn> Client<'conn>
{
pub fn new(conn: &'conn xcb::Connection) -> Client
{
return Client
{
conn,
};
}
pub fn find_atom_id_by_name(&self, name: &str) -> Option<AtomID>
{
let atom_id = xcb::intern_atom(&self.conn, false, name).get_reply().unwrap().atom();
return if atom_id == xcb::ATOM_NONE { None } else { Some(atom_id) };
}
pub fn find_atom_name(&self, atom_id: AtomID) -> String
{
return xcb::get_atom_name(&self.conn, atom_id).get_reply().unwrap().name().to_owned();
}
pub fn poll_events(&self) -> Option<Event>
{
let event = match self.conn.poll_for_event()
{
Some(event) => event,
None => return None,
};
match event.response_type() & !0x80
{
xcb::EXPOSE => return Some(Event::ExposedEvent),
xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)),
xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)),
event =>
{
println!("UNKOWN EVENT {:?}", event);
return None;
}
};
}
pub fn send_message(&self, destination: &Window, event: Event)
{
match event
{
Event::ClientMessageEvent {window, event_type, data , ..} =>
{
let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data);
let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new(
32,
window,
event_type,
message_data
);
xcb::send_event_checked(
&self.conn,
false,
destination.id,
xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT,
&event
).request_check().unwrap();
}
_ =>
{
//TODO(fpalacios): Ver que hacer acá
}
};
self.flush().unwrap();
}
pub fn flush(&self) -> Result<(), ()>
{
return if self.conn.flush() { Ok(()) } else { Err(()) };
}
fn generate_id(&self) -> u32
{
return self.conn.generate_id();
}
}
pub struct Screen<'client, 'conn>
{
pub id : ScreenID,
pub client : &'client Client<'conn>,
pub xcb_screen: xcb::Screen<'client>,
}
impl<'client, 'conn> Screen<'client, 'conn>
{
pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>>
{
let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?;
return Some(
Screen
{
id,
client,
xcb_screen,
}
);
}
pub fn root_window(&self) -> Window
{
return Window
{
screen: self,
id: self.xcb_screen.root()
};
}
pub fn get_black_pixel(&self) -> Color
{
return self.xcb_screen.black_pixel();
}
pub fn get_white_pixel(&self) -> Color
{
return self.xcb_screen.white_pixel();
}
}
pub struct Window<'screen, 'client, 'conn>
{
pub screen: &'screen Screen<'client, 'conn>,
pub id: WindowID,
}
impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn>
{
pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>>
{
let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap();
let children = tree.children();
let mut result = Vec::with_capacity(children.len());
for child in children
{
result.push(Window { screen: self.screen, id: child.clone()});
}
return result;
}
pub fn get_property(&self, atom: AtomID) -> Result<Property, Error>
{
let property = match xcb::get_property(
&self.screen.client.conn,
false,
self.id,
atom,
xcb::ATOM_ANY,
0,
1024
).get_reply()
{
Ok(property) => property,
Err(err) =>
{
return Err(
Error
{
error_code: err.error_code()
}
);
}
};
let value = match property.type_()
{
xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]),
xcb::ATOM_NONE => PropertyValue::None,
xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]),
xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]),
unknown_atom =>
{
match self.screen.client.find_atom_name(unknown_atom).as_ref()
{
"UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
_ => PropertyValue::UnknownAtom(unknown_atom)
}
}
};
return Ok(Property{ key: atom, value });
}
pub fn set_property(&self, property: &Property)
{
let atom_type = property.value.get_type_atom_id();
match &property.value
{
PropertyValue::String(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
8,
val.as_bytes()
);
},
PropertyValue::Atom(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::I32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::U32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::None =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[xcb::ATOM_NONE]
);
},
PropertyValue::UnknownAtom(_) =>
{
//TODO(fpalacios): Que hacemo acá?
panic!("Que hacemo acá?");
},
};
}
pub fn geometry(&self) -> (i16, i16, u16, u16)
{
let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply()
{
Ok(geomerty) => geomerty,
Err(error) =>
{
println!("Error al obtener la geometria. Error code [{}]", error.error_code());
panic!();
}
};
return (geometry.x(), geometry.y(), geometry.width(), geometry.height());
}
pub fn map(&self)
{
xcb::map_window(&self.screen.client.conn, self.id);
self.screen.client.flush().unwrap();
}
pub fn create_child_window(
&self,
(x, y, width, height): (i16, i16, u16, u16),
depth : u8,
colormap : Option<&ColorMap>,
background_pixel : Option<u32>,
border_pixel : Option<u32>,
visual_id : Option<VisualID>
)
-> Result<Window<'screen, 'client, 'conn>, Error>
{
let child_id = self.screen.client.generate_id();
let mut window_attributes = vec![
(
xcb::CW_EVENT_MASK,
xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS
)
];
if let Some(colormap) = colormap
{
window_attributes.push((xcb::CW_COLORMAP, colormap.id));
}
if let Some(background_pixel) = background_pixel
{
window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel))
}
if let Some(border_pixel) = border_pixel
{
window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel));
}
let visual_id = match visual_id
{
Some(visual_id) => visual_id,
None => self.screen.xcb_screen.root_visual()
};
if let Err(e) = xcb::create_window_checked(
&self.screen.client.conn,
depth,
child_id,
self.id,
x,
y,
width,
height,
1,
xcb::WINDOW_CLASS_INPUT_OUTPUT as u16,
visual_id,
&window_attributes
).request_check()
{
| self.screen.client.flush().unwrap();
let window = Window
{
screen: self.screen,
id : child_id,
};
window.map();
return Ok(window);
}
}
pub struct GraphicsContext<'client, 'conn>
{
id : GraphicsContextID,
client: &'client Client<'conn>
}
impl<'client, 'conn> GraphicsContext<'client, 'conn>
{
pub fn generate(window: &Window<'_, 'client, 'conn>, foreground: Color, background: Color) -> GraphicsContext<'client, 'conn>
{
let id = window.screen.client.generate_id();
xcb::create_gc_checked(
&window.screen.client.conn,
id,
window.id,
&[
(xcb::GC_FOREGROUND, foreground),
(xcb::GC_BACKGROUND, background),
(xcb::GC_LINE_WIDTH, 1),
(xcb::GC_LINE_STYLE, xcb::LINE_STYLE_SOLID),
(xcb::GC_GRAPHICS_EXPOSURES, 0)
]
).request_check().unwrap();
return GraphicsContext
{
client: window.screen.client,
id,
};
}
pub fn draw_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
pub fn fill_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_fill_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
//TODO(fpalacios): Ver como pintar una imagen en xcb puro
pub fn draw_image(
&self,
drawable: DrawableID,
_image: &image::RgbaImage,
(x, y): (i16, i16),
)
{
//NOTE(fpalacios): Generar y llear este array es para una prueba puntal de ahora
let mut a = [0_u8; 100 * 100 * 4];
for i in (0 .. 100).step_by(4)
{
a[i + 0] = 0x00;
a[i + 1] = 0xFF;
a[i + 2] = 0xFF;
a[i + 3] = 0x00;
}
if let Err(e) = xcb::put_image_checked(
&self.client.conn,
xcb::IMAGE_FORMAT_Z_PIXMAP as u8,
drawable,
self.id,
100,
100,
x,
y,
0,
32,
&a,
).request_check()
{
println!("Error al pintar la imagen {:?}", e);
}
}
pub fn clear_window(&self, window: &'conn Window)
{
let (x, y, width, height) = window.geometry();
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
pub fn clear_area(&self, window: &'conn Window, (x, y, width, height): (i16, i16, u16, u16))
{
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
}
pub struct ColorMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : ColorMapID
}
impl<'client, 'conn> ColorMap<'client, 'conn>
{
pub fn create(window: &'conn Window, visual: VisualID) -> ColorMap<'client, 'conn>
{
let client = window.screen.client;
let id = client.generate_id();
xcb::create_colormap_checked(
&client.conn,
xcb::COLORMAP_ALLOC_NONE as u8,
id,
window.id,
visual
).request_check().unwrap();
return ColorMap
{
client,
id
};
}
}
pub struct PixMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : PixMapID,
pub width : u16,
pub height: u16
}
impl<'client, 'conn> PixMap<'client, 'conn>
{
pub fn create(screen: &Screen<'client, 'conn>, drawable: DrawableID, width: u16, height: u16) -> PixMap<'client, 'conn>
{
let client = screen.client;
let id = client.generate_id();
xcb::create_pixmap_checked(
&client.conn,
32,
id,
drawable,
width,
height
).request_check().unwrap();
return PixMap
{
client,
id,
width,
height
};
}
} | return Err(Error{error_code: e.error_code()})
};
| conditional_block |
xcb.rs | pub type AtomID = xcb::Atom;
pub type Color = u32;
pub type ScreenID = i32;
pub type WindowID = xcb::Window;
pub type DrawableID = xcb::Window;
pub type GraphicsContextID = xcb::Atom;
pub type EventKeyID = xcb::EventMask;
pub type ColorMapID = xcb::Atom;
pub type PixMapID = xcb::Atom;
pub type VisualID = xcb::Atom;
#[derive(Debug)]
pub struct Error
{
pub error_code: u8
}
pub enum Event
{
KeyEvent(KeyEvent),
ExposedEvent,
//TODO(fpalacios): Mover los datos de ClientMessageEvent a su propia struct (como en KeyEvent)
ClientMessageEvent
{
window : WindowID,
event_type : AtomID,
data : [u32; 5],
},
UnknownEvent(xcb::EventMask, xcb::Event<xcb::ffi::xcb_generic_event_t>)
}
#[derive(Debug)]
pub enum KeyEvent
{
KeyPress,
KeyReleased,
}
#[derive(Debug)]
pub struct Property
{
pub key : AtomID,
pub value: PropertyValue,
}
#[derive(Debug)]
pub enum PropertyValue
{
String(String),
I32(i32),
U32(u32), | impl PropertyValue
{
pub fn get_type_atom_id(&self) -> AtomID
{
return match self
{
PropertyValue::String(_) => xcb::ATOM_STRING,
PropertyValue::I32(_) => xcb::ATOM_INTEGER,
PropertyValue::U32(_) => xcb::ATOM_CARDINAL,
PropertyValue::Atom(_) => xcb::ATOM_ATOM,
PropertyValue::UnknownAtom(atom_id) => atom_id.clone(),
PropertyValue::None => xcb::ATOM_NONE
};
}
}
pub struct Client<'conn>
{
pub conn : &'conn xcb::Connection,
}
impl<'conn> Client<'conn>
{
pub fn new(conn: &'conn xcb::Connection) -> Client
{
return Client
{
conn,
};
}
pub fn find_atom_id_by_name(&self, name: &str) -> Option<AtomID>
{
let atom_id = xcb::intern_atom(&self.conn, false, name).get_reply().unwrap().atom();
return if atom_id == xcb::ATOM_NONE { None } else { Some(atom_id) };
}
pub fn find_atom_name(&self, atom_id: AtomID) -> String
{
return xcb::get_atom_name(&self.conn, atom_id).get_reply().unwrap().name().to_owned();
}
pub fn poll_events(&self) -> Option<Event>
{
let event = match self.conn.poll_for_event()
{
Some(event) => event,
None => return None,
};
match event.response_type() & !0x80
{
xcb::EXPOSE => return Some(Event::ExposedEvent),
xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)),
xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)),
event =>
{
println!("UNKOWN EVENT {:?}", event);
return None;
}
};
}
pub fn send_message(&self, destination: &Window, event: Event)
{
match event
{
Event::ClientMessageEvent {window, event_type, data , ..} =>
{
let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data);
let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new(
32,
window,
event_type,
message_data
);
xcb::send_event_checked(
&self.conn,
false,
destination.id,
xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT,
&event
).request_check().unwrap();
}
_ =>
{
//TODO(fpalacios): Ver que hacer acá
}
};
self.flush().unwrap();
}
pub fn flush(&self) -> Result<(), ()>
{
return if self.conn.flush() { Ok(()) } else { Err(()) };
}
fn generate_id(&self) -> u32
{
return self.conn.generate_id();
}
}
pub struct Screen<'client, 'conn>
{
pub id : ScreenID,
pub client : &'client Client<'conn>,
pub xcb_screen: xcb::Screen<'client>,
}
impl<'client, 'conn> Screen<'client, 'conn>
{
pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>>
{
let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?;
return Some(
Screen
{
id,
client,
xcb_screen,
}
);
}
pub fn root_window(&self) -> Window
{
return Window
{
screen: self,
id: self.xcb_screen.root()
};
}
pub fn get_black_pixel(&self) -> Color
{
return self.xcb_screen.black_pixel();
}
pub fn get_white_pixel(&self) -> Color
{
return self.xcb_screen.white_pixel();
}
}
pub struct Window<'screen, 'client, 'conn>
{
pub screen: &'screen Screen<'client, 'conn>,
pub id: WindowID,
}
impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn>
{
pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>>
{
let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap();
let children = tree.children();
let mut result = Vec::with_capacity(children.len());
for child in children
{
result.push(Window { screen: self.screen, id: child.clone()});
}
return result;
}
pub fn get_property(&self, atom: AtomID) -> Result<Property, Error>
{
let property = match xcb::get_property(
&self.screen.client.conn,
false,
self.id,
atom,
xcb::ATOM_ANY,
0,
1024
).get_reply()
{
Ok(property) => property,
Err(err) =>
{
return Err(
Error
{
error_code: err.error_code()
}
);
}
};
let value = match property.type_()
{
xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]),
xcb::ATOM_NONE => PropertyValue::None,
xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]),
xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]),
unknown_atom =>
{
match self.screen.client.find_atom_name(unknown_atom).as_ref()
{
"UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
_ => PropertyValue::UnknownAtom(unknown_atom)
}
}
};
return Ok(Property{ key: atom, value });
}
pub fn set_property(&self, property: &Property)
{
let atom_type = property.value.get_type_atom_id();
match &property.value
{
PropertyValue::String(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
8,
val.as_bytes()
);
},
PropertyValue::Atom(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::I32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::U32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::None =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[xcb::ATOM_NONE]
);
},
PropertyValue::UnknownAtom(_) =>
{
//TODO(fpalacios): Que hacemo acá?
panic!("Que hacemo acá?");
},
};
}
pub fn geometry(&self) -> (i16, i16, u16, u16)
{
let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply()
{
Ok(geomerty) => geomerty,
Err(error) =>
{
println!("Error al obtener la geometria. Error code [{}]", error.error_code());
panic!();
}
};
return (geometry.x(), geometry.y(), geometry.width(), geometry.height());
}
pub fn map(&self)
{
xcb::map_window(&self.screen.client.conn, self.id);
self.screen.client.flush().unwrap();
}
pub fn create_child_window(
&self,
(x, y, width, height): (i16, i16, u16, u16),
depth : u8,
colormap : Option<&ColorMap>,
background_pixel : Option<u32>,
border_pixel : Option<u32>,
visual_id : Option<VisualID>
)
-> Result<Window<'screen, 'client, 'conn>, Error>
{
let child_id = self.screen.client.generate_id();
let mut window_attributes = vec![
(
xcb::CW_EVENT_MASK,
xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS
)
];
if let Some(colormap) = colormap
{
window_attributes.push((xcb::CW_COLORMAP, colormap.id));
}
if let Some(background_pixel) = background_pixel
{
window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel))
}
if let Some(border_pixel) = border_pixel
{
window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel));
}
let visual_id = match visual_id
{
Some(visual_id) => visual_id,
None => self.screen.xcb_screen.root_visual()
};
if let Err(e) = xcb::create_window_checked(
&self.screen.client.conn,
depth,
child_id,
self.id,
x,
y,
width,
height,
1,
xcb::WINDOW_CLASS_INPUT_OUTPUT as u16,
visual_id,
&window_attributes
).request_check()
{
return Err(Error{error_code: e.error_code()})
};
self.screen.client.flush().unwrap();
let window = Window
{
screen: self.screen,
id : child_id,
};
window.map();
return Ok(window);
}
}
pub struct GraphicsContext<'client, 'conn>
{
id : GraphicsContextID,
client: &'client Client<'conn>
}
impl<'client, 'conn> GraphicsContext<'client, 'conn>
{
pub fn generate(window: &Window<'_, 'client, 'conn>, foreground: Color, background: Color) -> GraphicsContext<'client, 'conn>
{
let id = window.screen.client.generate_id();
xcb::create_gc_checked(
&window.screen.client.conn,
id,
window.id,
&[
(xcb::GC_FOREGROUND, foreground),
(xcb::GC_BACKGROUND, background),
(xcb::GC_LINE_WIDTH, 1),
(xcb::GC_LINE_STYLE, xcb::LINE_STYLE_SOLID),
(xcb::GC_GRAPHICS_EXPOSURES, 0)
]
).request_check().unwrap();
return GraphicsContext
{
client: window.screen.client,
id,
};
}
pub fn draw_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
pub fn fill_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_fill_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
//TODO(fpalacios): Ver como pintar una imagen en xcb puro
pub fn draw_image(
&self,
drawable: DrawableID,
_image: &image::RgbaImage,
(x, y): (i16, i16),
)
{
//NOTE(fpalacios): Generar y llear este array es para una prueba puntal de ahora
let mut a = [0_u8; 100 * 100 * 4];
for i in (0 .. 100).step_by(4)
{
a[i + 0] = 0x00;
a[i + 1] = 0xFF;
a[i + 2] = 0xFF;
a[i + 3] = 0x00;
}
if let Err(e) = xcb::put_image_checked(
&self.client.conn,
xcb::IMAGE_FORMAT_Z_PIXMAP as u8,
drawable,
self.id,
100,
100,
x,
y,
0,
32,
&a,
).request_check()
{
println!("Error al pintar la imagen {:?}", e);
}
}
pub fn clear_window(&self, window: &'conn Window)
{
let (x, y, width, height) = window.geometry();
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
pub fn clear_area(&self, window: &'conn Window, (x, y, width, height): (i16, i16, u16, u16))
{
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
}
pub struct ColorMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : ColorMapID
}
impl<'client, 'conn> ColorMap<'client, 'conn>
{
pub fn create(window: &'conn Window, visual: VisualID) -> ColorMap<'client, 'conn>
{
let client = window.screen.client;
let id = client.generate_id();
xcb::create_colormap_checked(
&client.conn,
xcb::COLORMAP_ALLOC_NONE as u8,
id,
window.id,
visual
).request_check().unwrap();
return ColorMap
{
client,
id
};
}
}
pub struct PixMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : PixMapID,
pub width : u16,
pub height: u16
}
impl<'client, 'conn> PixMap<'client, 'conn>
{
pub fn create(screen: &Screen<'client, 'conn>, drawable: DrawableID, width: u16, height: u16) -> PixMap<'client, 'conn>
{
let client = screen.client;
let id = client.generate_id();
xcb::create_pixmap_checked(
&client.conn,
32,
id,
drawable,
width,
height
).request_check().unwrap();
return PixMap
{
client,
id,
width,
height
};
}
} | None,
Atom(AtomID),
UnknownAtom(AtomID),
}
| random_line_split |
xcb.rs | pub type AtomID = xcb::Atom;
pub type Color = u32;
pub type ScreenID = i32;
pub type WindowID = xcb::Window;
pub type DrawableID = xcb::Window;
pub type GraphicsContextID = xcb::Atom;
pub type EventKeyID = xcb::EventMask;
pub type ColorMapID = xcb::Atom;
pub type PixMapID = xcb::Atom;
pub type VisualID = xcb::Atom;
#[derive(Debug)]
pub struct Error
{
pub error_code: u8
}
pub enum Event
{
KeyEvent(KeyEvent),
ExposedEvent,
//TODO(fpalacios): Mover los datos de ClientMessageEvent a su propia struct (como en KeyEvent)
ClientMessageEvent
{
window : WindowID,
event_type : AtomID,
data : [u32; 5],
},
UnknownEvent(xcb::EventMask, xcb::Event<xcb::ffi::xcb_generic_event_t>)
}
#[derive(Debug)]
pub enum KeyEvent
{
KeyPress,
KeyReleased,
}
#[derive(Debug)]
pub struct Property
{
pub key : AtomID,
pub value: PropertyValue,
}
#[derive(Debug)]
pub enum PropertyValue
{
String(String),
I32(i32),
U32(u32),
None,
Atom(AtomID),
UnknownAtom(AtomID),
}
impl PropertyValue
{
pub fn get_type_atom_id(&self) -> AtomID
{
return match self
{
PropertyValue::String(_) => xcb::ATOM_STRING,
PropertyValue::I32(_) => xcb::ATOM_INTEGER,
PropertyValue::U32(_) => xcb::ATOM_CARDINAL,
PropertyValue::Atom(_) => xcb::ATOM_ATOM,
PropertyValue::UnknownAtom(atom_id) => atom_id.clone(),
PropertyValue::None => xcb::ATOM_NONE
};
}
}
pub struct Client<'conn>
{
pub conn : &'conn xcb::Connection,
}
impl<'conn> Client<'conn>
{
pub fn new(conn: &'conn xcb::Connection) -> Client
{
return Client
{
conn,
};
}
pub fn find_atom_id_by_name(&self, name: &str) -> Option<AtomID>
{
let atom_id = xcb::intern_atom(&self.conn, false, name).get_reply().unwrap().atom();
return if atom_id == xcb::ATOM_NONE { None } else { Some(atom_id) };
}
pub fn find_atom_name(&self, atom_id: AtomID) -> String
{
return xcb::get_atom_name(&self.conn, atom_id).get_reply().unwrap().name().to_owned();
}
pub fn poll_events(&self) -> Option<Event>
{
let event = match self.conn.poll_for_event()
{
Some(event) => event,
None => return None,
};
match event.response_type() & !0x80
{
xcb::EXPOSE => return Some(Event::ExposedEvent),
xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)),
xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)),
event =>
{
println!("UNKOWN EVENT {:?}", event);
return None;
}
};
}
pub fn send_message(&self, destination: &Window, event: Event)
{
match event
{
Event::ClientMessageEvent {window, event_type, data , ..} =>
{
let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data);
let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new(
32,
window,
event_type,
message_data
);
xcb::send_event_checked(
&self.conn,
false,
destination.id,
xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT,
&event
).request_check().unwrap();
}
_ =>
{
//TODO(fpalacios): Ver que hacer acá
}
};
self.flush().unwrap();
}
pub fn flush(&self) -> Result<(), ()>
{
return if self.conn.flush() { Ok(()) } else { Err(()) };
}
fn generate_id(&self) -> u32
{
return self.conn.generate_id();
}
}
pub struct Screen<'client, 'conn>
{
pub id : ScreenID,
pub client : &'client Client<'conn>,
pub xcb_screen: xcb::Screen<'client>,
}
impl<'client, 'conn> Screen<'client, 'conn>
{
pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>>
{
let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?;
return Some(
Screen
{
id,
client,
xcb_screen,
}
);
}
pub fn root_window(&self) -> Window
{
return Window
{
screen: self,
id: self.xcb_screen.root()
};
}
pub fn get_black_pixel(&self) -> Color
{
return self.xcb_screen.black_pixel();
}
pub fn get_white_pixel(&self) -> Color
{
return self.xcb_screen.white_pixel();
}
}
pub struct Window<'screen, 'client, 'conn>
{
pub screen: &'screen Screen<'client, 'conn>,
pub id: WindowID,
}
impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn>
{
pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>>
{
let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap();
let children = tree.children();
let mut result = Vec::with_capacity(children.len());
for child in children
{
result.push(Window { screen: self.screen, id: child.clone()});
}
return result;
}
pub fn get_property(&self, atom: AtomID) -> Result<Property, Error>
{
let property = match xcb::get_property(
&self.screen.client.conn,
false,
self.id,
atom,
xcb::ATOM_ANY,
0,
1024
).get_reply()
{
Ok(property) => property,
Err(err) =>
{
return Err(
Error
{
error_code: err.error_code()
}
);
}
};
let value = match property.type_()
{
xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]),
xcb::ATOM_NONE => PropertyValue::None,
xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]),
xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]),
unknown_atom =>
{
match self.screen.client.find_atom_name(unknown_atom).as_ref()
{
"UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
_ => PropertyValue::UnknownAtom(unknown_atom)
}
}
};
return Ok(Property{ key: atom, value });
}
pub fn set_property(&self, property: &Property)
{
let atom_type = property.value.get_type_atom_id();
match &property.value
{
PropertyValue::String(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
8,
val.as_bytes()
);
},
PropertyValue::Atom(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::I32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::U32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::None =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[xcb::ATOM_NONE]
);
},
PropertyValue::UnknownAtom(_) =>
{
//TODO(fpalacios): Que hacemo acá?
panic!("Que hacemo acá?");
},
};
}
pub fn geometry(&self) -> (i16, i16, u16, u16)
{
let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply()
{
Ok(geomerty) => geomerty,
Err(error) =>
{
println!("Error al obtener la geometria. Error code [{}]", error.error_code());
panic!();
}
};
return (geometry.x(), geometry.y(), geometry.width(), geometry.height());
}
pub fn map(&self)
{
xcb::map_window(&self.screen.client.conn, self.id);
self.screen.client.flush().unwrap();
}
pub fn create_child_window(
&self,
(x, y, width, height): (i16, i16, u16, u16),
depth : u8,
colormap : Option<&ColorMap>,
background_pixel : Option<u32>,
border_pixel : Option<u32>,
visual_id : Option<VisualID>
)
-> Result<Window<'screen, 'client, 'conn>, Error>
{
let child_id = self.screen.client.generate_id();
let mut window_attributes = vec![
(
xcb::CW_EVENT_MASK,
xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS
)
];
if let Some(colormap) = colormap
{
window_attributes.push((xcb::CW_COLORMAP, colormap.id));
}
if let Some(background_pixel) = background_pixel
{
window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel))
}
if let Some(border_pixel) = border_pixel
{
window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel));
}
let visual_id = match visual_id
{
Some(visual_id) => visual_id,
None => self.screen.xcb_screen.root_visual()
};
if let Err(e) = xcb::create_window_checked(
&self.screen.client.conn,
depth,
child_id,
self.id,
x,
y,
width,
height,
1,
xcb::WINDOW_CLASS_INPUT_OUTPUT as u16,
visual_id,
&window_attributes
).request_check()
{
return Err(Error{error_code: e.error_code()})
};
self.screen.client.flush().unwrap();
let window = Window
{
screen: self.screen,
id : child_id,
};
window.map();
return Ok(window);
}
}
pub struct GraphicsContext<'client, 'conn>
{
id : GraphicsContextID,
client: &'client Client<'conn>
}
impl<'client, 'conn> GraphicsContext<'client, 'conn>
{
pub fn gen | ndow: &Window<'_, 'client, 'conn>, foreground: Color, background: Color) -> GraphicsContext<'client, 'conn>
{
let id = window.screen.client.generate_id();
xcb::create_gc_checked(
&window.screen.client.conn,
id,
window.id,
&[
(xcb::GC_FOREGROUND, foreground),
(xcb::GC_BACKGROUND, background),
(xcb::GC_LINE_WIDTH, 1),
(xcb::GC_LINE_STYLE, xcb::LINE_STYLE_SOLID),
(xcb::GC_GRAPHICS_EXPOSURES, 0)
]
).request_check().unwrap();
return GraphicsContext
{
client: window.screen.client,
id,
};
}
pub fn draw_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
pub fn fill_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_fill_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
//TODO(fpalacios): Ver como pintar una imagen en xcb puro
pub fn draw_image(
&self,
drawable: DrawableID,
_image: &image::RgbaImage,
(x, y): (i16, i16),
)
{
//NOTE(fpalacios): Generar y llear este array es para una prueba puntal de ahora
let mut a = [0_u8; 100 * 100 * 4];
for i in (0 .. 100).step_by(4)
{
a[i + 0] = 0x00;
a[i + 1] = 0xFF;
a[i + 2] = 0xFF;
a[i + 3] = 0x00;
}
if let Err(e) = xcb::put_image_checked(
&self.client.conn,
xcb::IMAGE_FORMAT_Z_PIXMAP as u8,
drawable,
self.id,
100,
100,
x,
y,
0,
32,
&a,
).request_check()
{
println!("Error al pintar la imagen {:?}", e);
}
}
pub fn clear_window(&self, window: &'conn Window)
{
let (x, y, width, height) = window.geometry();
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
pub fn clear_area(&self, window: &'conn Window, (x, y, width, height): (i16, i16, u16, u16))
{
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
}
pub struct ColorMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : ColorMapID
}
impl<'client, 'conn> ColorMap<'client, 'conn>
{
pub fn create(window: &'conn Window, visual: VisualID) -> ColorMap<'client, 'conn>
{
let client = window.screen.client;
let id = client.generate_id();
xcb::create_colormap_checked(
&client.conn,
xcb::COLORMAP_ALLOC_NONE as u8,
id,
window.id,
visual
).request_check().unwrap();
return ColorMap
{
client,
id
};
}
}
pub struct PixMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : PixMapID,
pub width : u16,
pub height: u16
}
impl<'client, 'conn> PixMap<'client, 'conn>
{
pub fn create(screen: &Screen<'client, 'conn>, drawable: DrawableID, width: u16, height: u16) -> PixMap<'client, 'conn>
{
let client = screen.client;
let id = client.generate_id();
xcb::create_pixmap_checked(
&client.conn,
32,
id,
drawable,
width,
height
).request_check().unwrap();
return PixMap
{
client,
id,
width,
height
};
}
} | erate(wi | identifier_name |
xcb.rs | pub type AtomID = xcb::Atom;
pub type Color = u32;
pub type ScreenID = i32;
pub type WindowID = xcb::Window;
pub type DrawableID = xcb::Window;
pub type GraphicsContextID = xcb::Atom;
pub type EventKeyID = xcb::EventMask;
pub type ColorMapID = xcb::Atom;
pub type PixMapID = xcb::Atom;
pub type VisualID = xcb::Atom;
#[derive(Debug)]
pub struct Error
{
pub error_code: u8
}
pub enum Event
{
KeyEvent(KeyEvent),
ExposedEvent,
//TODO(fpalacios): Mover los datos de ClientMessageEvent a su propia struct (como en KeyEvent)
ClientMessageEvent
{
window : WindowID,
event_type : AtomID,
data : [u32; 5],
},
UnknownEvent(xcb::EventMask, xcb::Event<xcb::ffi::xcb_generic_event_t>)
}
#[derive(Debug)]
pub enum KeyEvent
{
KeyPress,
KeyReleased,
}
#[derive(Debug)]
pub struct Property
{
pub key : AtomID,
pub value: PropertyValue,
}
#[derive(Debug)]
pub enum PropertyValue
{
String(String),
I32(i32),
U32(u32),
None,
Atom(AtomID),
UnknownAtom(AtomID),
}
impl PropertyValue
{
pub fn get_type_atom_id(&self) -> AtomID
{
return match self
{
PropertyValue::String(_) => xcb::ATOM_STRING,
PropertyValue::I32(_) => xcb::ATOM_INTEGER,
PropertyValue::U32(_) => xcb::ATOM_CARDINAL,
PropertyValue::Atom(_) => xcb::ATOM_ATOM,
PropertyValue::UnknownAtom(atom_id) => atom_id.clone(),
PropertyValue::None => xcb::ATOM_NONE
};
}
}
pub struct Client<'conn>
{
pub conn : &'conn xcb::Connection,
}
impl<'conn> Client<'conn>
{
pub fn new(conn: &'conn xcb::Connection) -> Client
{
return Client
{
conn,
};
}
pub fn find_atom_id_by_name(&self, name: &str) -> Option<AtomID>
{
let atom_id = xcb::intern_atom(&self.conn, false, name).get_reply().unwrap().atom();
return if atom_id == xcb::ATOM_NONE { None } else { Some(atom_id) };
}
pub fn find_atom_name(&self, atom_id: AtomID) -> String
{
return xcb::get_atom_name(&self.conn, atom_id).get_reply().unwrap().name().to_owned();
}
pub fn poll_events(&self) -> Option<Event>
|
pub fn send_message(&self, destination: &Window, event: Event)
{
match event
{
Event::ClientMessageEvent {window, event_type, data , ..} =>
{
let message_data = xcb::ffi::xproto::xcb_client_message_data_t::from_data32(data);
let event = xcb::Event::<xcb::ffi::xproto::xcb_client_message_event_t>::new(
32,
window,
event_type,
message_data
);
xcb::send_event_checked(
&self.conn,
false,
destination.id,
xcb::EVENT_MASK_SUBSTRUCTURE_REDIRECT,
&event
).request_check().unwrap();
}
_ =>
{
//TODO(fpalacios): Ver que hacer acá
}
};
self.flush().unwrap();
}
pub fn flush(&self) -> Result<(), ()>
{
return if self.conn.flush() { Ok(()) } else { Err(()) };
}
fn generate_id(&self) -> u32
{
return self.conn.generate_id();
}
}
pub struct Screen<'client, 'conn>
{
pub id : ScreenID,
pub client : &'client Client<'conn>,
pub xcb_screen: xcb::Screen<'client>,
}
impl<'client, 'conn> Screen<'client, 'conn>
{
pub fn from_id(client: &'client Client<'conn>, id: ScreenID) -> Option<Screen<'client, 'conn>>
{
let xcb_screen = client.conn.get_setup().roots().nth(std::convert::TryInto::try_into(id).unwrap())?;
return Some(
Screen
{
id,
client,
xcb_screen,
}
);
}
pub fn root_window(&self) -> Window
{
return Window
{
screen: self,
id: self.xcb_screen.root()
};
}
pub fn get_black_pixel(&self) -> Color
{
return self.xcb_screen.black_pixel();
}
pub fn get_white_pixel(&self) -> Color
{
return self.xcb_screen.white_pixel();
}
}
pub struct Window<'screen, 'client, 'conn>
{
pub screen: &'screen Screen<'client, 'conn>,
pub id: WindowID,
}
impl<'screen, 'client, 'conn> Window<'screen, 'client, 'conn>
{
pub fn children(&self) -> Vec<Window<'screen, 'client, 'conn>>
{
let tree = xcb::query_tree(&self.screen.client.conn, self.id).get_reply().unwrap();
let children = tree.children();
let mut result = Vec::with_capacity(children.len());
for child in children
{
result.push(Window { screen: self.screen, id: child.clone()});
}
return result;
}
pub fn get_property(&self, atom: AtomID) -> Result<Property, Error>
{
let property = match xcb::get_property(
&self.screen.client.conn,
false,
self.id,
atom,
xcb::ATOM_ANY,
0,
1024
).get_reply()
{
Ok(property) => property,
Err(err) =>
{
return Err(
Error
{
error_code: err.error_code()
}
);
}
};
let value = match property.type_()
{
xcb::ATOM_STRING => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
xcb::ATOM_INTEGER => PropertyValue::I32(property.value::<i32>()[0]),
xcb::ATOM_NONE => PropertyValue::None,
xcb::ATOM_ATOM => PropertyValue::Atom(property.value::<u32>()[0]),
xcb::ATOM_CARDINAL => PropertyValue::U32(property.value::<u32>()[0]),
unknown_atom =>
{
match self.screen.client.find_atom_name(unknown_atom).as_ref()
{
"UTF8_STRING" => PropertyValue::String((*String::from_utf8_lossy(property.value::<u8>())).to_owned()),
_ => PropertyValue::UnknownAtom(unknown_atom)
}
}
};
return Ok(Property{ key: atom, value });
}
pub fn set_property(&self, property: &Property)
{
let atom_type = property.value.get_type_atom_id();
match &property.value
{
PropertyValue::String(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
8,
val.as_bytes()
);
},
PropertyValue::Atom(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::I32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::U32(val) =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[val.clone()]
);
},
PropertyValue::None =>
{
xcb::change_property(
&self.screen.client.conn,
xcb::PROP_MODE_REPLACE as u8,
self.id,
property.key,
atom_type,
32,
&[xcb::ATOM_NONE]
);
},
PropertyValue::UnknownAtom(_) =>
{
//TODO(fpalacios): Que hacemo acá?
panic!("Que hacemo acá?");
},
};
}
pub fn geometry(&self) -> (i16, i16, u16, u16)
{
let geometry = match xcb::get_geometry(&self.screen.client.conn, self.id).get_reply()
{
Ok(geomerty) => geomerty,
Err(error) =>
{
println!("Error al obtener la geometria. Error code [{}]", error.error_code());
panic!();
}
};
return (geometry.x(), geometry.y(), geometry.width(), geometry.height());
}
pub fn map(&self)
{
xcb::map_window(&self.screen.client.conn, self.id);
self.screen.client.flush().unwrap();
}
pub fn create_child_window(
&self,
(x, y, width, height): (i16, i16, u16, u16),
depth : u8,
colormap : Option<&ColorMap>,
background_pixel : Option<u32>,
border_pixel : Option<u32>,
visual_id : Option<VisualID>
)
-> Result<Window<'screen, 'client, 'conn>, Error>
{
let child_id = self.screen.client.generate_id();
let mut window_attributes = vec![
(
xcb::CW_EVENT_MASK,
xcb::GC_GRAPHICS_EXPOSURES | xcb::EVENT_MASK_KEY_PRESS
)
];
if let Some(colormap) = colormap
{
window_attributes.push((xcb::CW_COLORMAP, colormap.id));
}
if let Some(background_pixel) = background_pixel
{
window_attributes.push((xcb::CW_BACK_PIXEL, background_pixel))
}
if let Some(border_pixel) = border_pixel
{
window_attributes.push((xcb::CW_BORDER_PIXEL, border_pixel));
}
let visual_id = match visual_id
{
Some(visual_id) => visual_id,
None => self.screen.xcb_screen.root_visual()
};
if let Err(e) = xcb::create_window_checked(
&self.screen.client.conn,
depth,
child_id,
self.id,
x,
y,
width,
height,
1,
xcb::WINDOW_CLASS_INPUT_OUTPUT as u16,
visual_id,
&window_attributes
).request_check()
{
return Err(Error{error_code: e.error_code()})
};
self.screen.client.flush().unwrap();
let window = Window
{
screen: self.screen,
id : child_id,
};
window.map();
return Ok(window);
}
}
pub struct GraphicsContext<'client, 'conn>
{
id : GraphicsContextID,
client: &'client Client<'conn>
}
impl<'client, 'conn> GraphicsContext<'client, 'conn>
{
pub fn generate(window: &Window<'_, 'client, 'conn>, foreground: Color, background: Color) -> GraphicsContext<'client, 'conn>
{
let id = window.screen.client.generate_id();
xcb::create_gc_checked(
&window.screen.client.conn,
id,
window.id,
&[
(xcb::GC_FOREGROUND, foreground),
(xcb::GC_BACKGROUND, background),
(xcb::GC_LINE_WIDTH, 1),
(xcb::GC_LINE_STYLE, xcb::LINE_STYLE_SOLID),
(xcb::GC_GRAPHICS_EXPOSURES, 0)
]
).request_check().unwrap();
return GraphicsContext
{
client: window.screen.client,
id,
};
}
pub fn draw_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
pub fn fill_rects(&self, window: &'conn Window, rectangles: &[xcb::Rectangle])
{
xcb::poly_fill_rectangle(&self.client.conn, window.id, self.id, &rectangles);
}
//TODO(fpalacios): Ver como pintar una imagen en xcb puro
pub fn draw_image(
&self,
drawable: DrawableID,
_image: &image::RgbaImage,
(x, y): (i16, i16),
)
{
//NOTE(fpalacios): Generar y llear este array es para una prueba puntal de ahora
let mut a = [0_u8; 100 * 100 * 4];
for i in (0 .. 100).step_by(4)
{
a[i + 0] = 0x00;
a[i + 1] = 0xFF;
a[i + 2] = 0xFF;
a[i + 3] = 0x00;
}
if let Err(e) = xcb::put_image_checked(
&self.client.conn,
xcb::IMAGE_FORMAT_Z_PIXMAP as u8,
drawable,
self.id,
100,
100,
x,
y,
0,
32,
&a,
).request_check()
{
println!("Error al pintar la imagen {:?}", e);
}
}
pub fn clear_window(&self, window: &'conn Window)
{
let (x, y, width, height) = window.geometry();
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
pub fn clear_area(&self, window: &'conn Window, (x, y, width, height): (i16, i16, u16, u16))
{
xcb::clear_area(&self.client.conn, true, window.id, x, y, width, height);
}
}
pub struct ColorMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : ColorMapID
}
impl<'client, 'conn> ColorMap<'client, 'conn>
{
pub fn create(window: &'conn Window, visual: VisualID) -> ColorMap<'client, 'conn>
{
let client = window.screen.client;
let id = client.generate_id();
xcb::create_colormap_checked(
&client.conn,
xcb::COLORMAP_ALLOC_NONE as u8,
id,
window.id,
visual
).request_check().unwrap();
return ColorMap
{
client,
id
};
}
}
pub struct PixMap<'client, 'conn>
{
pub client: &'client Client<'conn>,
pub id : PixMapID,
pub width : u16,
pub height: u16
}
impl<'client, 'conn> PixMap<'client, 'conn>
{
pub fn create(screen: &Screen<'client, 'conn>, drawable: DrawableID, width: u16, height: u16) -> PixMap<'client, 'conn>
{
let client = screen.client;
let id = client.generate_id();
xcb::create_pixmap_checked(
&client.conn,
32,
id,
drawable,
width,
height
).request_check().unwrap();
return PixMap
{
client,
id,
width,
height
};
}
} | {
let event = match self.conn.poll_for_event()
{
Some(event) => event,
None => return None,
};
match event.response_type() & !0x80
{
xcb::EXPOSE => return Some(Event::ExposedEvent),
xcb::KEY_PRESS => return Some(Event::KeyEvent(KeyEvent::KeyPress)),
xcb::KEY_RELEASE => return Some(Event::KeyEvent(KeyEvent::KeyReleased)),
event =>
{
println!("UNKOWN EVENT {:?}", event);
return None;
}
};
} | identifier_body |
models.py | '''
Updated Models for multilevel approvals
'''
from django.utils.encoding import python_2_unicode_compatible
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils.html import escape
import cbhooks
from accounts.models import Role
from orders.models import Order, get_current_time
import orders.mail
from utilities.logger import ThreadLogger
from utilities.exceptions import CloudBoltException
from quota.exceptions import QuotaError
from quota.quota_set import QuotaSetError
logger = ThreadLogger(__name__)
@python_2_unicode_compatible
class CustomOrder(Order):
class Meta:
app_label = 'orders'
def approve_my_grms(self, profile=None):
'''
in a multilevel approval, we need a user to partially (or fully)
approve an order based on the GroupRoleMembership mappings (excluding)
"approvers" (the default/single-level approval role)
'''
if self.status != 'PENDING':
return
if not profile:
profile = self.owner
oi = self.orderitem_set.first().cast()
bpoi = oi.blueprintitemarguments_set.first()
for grm in CustomOrder.get_my_grms(self, profile):
grmcfv = bpoi.custom_field_values.get(field__name=f'{grm.role.name}_approver_id')
if grmcfv and not grmcfv.int_value:
grmcfv.int_value = grm.profile.user.id
grmcfv.save()
history_msg = _("The '{order}' order has been partially approved by {role_label}.").format(order=escape(self), role_label=grm.role.label)
self.add_event('APPROVED', history_msg, profile=profile)
def get_my_grms(self, profile=None):
|
def is_multilevel_approval(self):
"""
multilevel approvals need to display the roles that have order.approve permissions
based on a BPOI custom_field_value where the field name has an "_approver_id" at the
end, and a valid role exists on the Group for that cfv field name
returns a dictionary of the roles or an empty dict
"""
if not self.orderitem_set.first():
return {}
oi = self.orderitem_set.first().cast()
if not oi or not hasattr(oi, 'blueprintitemarguments_set'):
return {}
bpoi = oi.blueprintitemarguments_set.first()
approval_levels = {}
if not bpoi:
return {}
for cfv in bpoi.custom_field_values.filter(field__name__endswith='_approver_id'):
role_name = cfv.field.name.replace('_approver_id', '')
ml_approver_role = Role.objects.get(name=role_name, permissions__name='order.approve')
if ml_approver_role:
approval_levels[ml_approver_role] = cfv.value
return approval_levels
def should_auto_approve(self):
"""
Return True if this order should be automatically approved. An order
should be auto approved if either it's group has auto approve enabled,
if the submitter is also an approver on this group,
or if all of its order items have environments with auto approve
enabled.
and now if the multi_level auto approval roles are granted to this user profile
"""
if self.group and self.group.allow_auto_approval:
return True
# some orders (like those duplicated by CIT) will not have owners
if self.is_multilevel_approval():
if self.has_all_approver_roles(self.owner, self.group):
return True
return False
else:
if self.owner and self.owner.has_permission('order.approve', self.group):
return True
return False
def has_all_approver_roles(self, profile, group):
'''
for multi_level approvals we want to know if we can approve the order
as part of should_auto_approve()
'''
#Roles
r_needed = Role.objects.filter(grouprolemembership__group=group,
permissions__name='order.approve')
if len(r_needed) > 1:
r_needed = r_needed.exclude(name='approver').distinct()
#GroupRoleMemberships
r_owned = CustomOrder.get_my_grms(self, profile)
if len(r_needed) == len(r_owned):
#if the number of GRMs == the number of Roles for that group
return True
return False
def start_approval_process(self, request=None):
"""
This method determines what order process should be taken, and
takes it. By default, the process is to email the approvers, but
this can be overriden by customers to instead call out to a hook,
and that can be overridden by auto-approval (set on the group or
env, or by the owner being an approver or a super admin).
This method returns a message summarizing what action was taken.
`request` is needed to determine the current portal URL; if not
passed, default portal URL is used.
"""
# done here to avoid circular import
from cbhooks.models import HookPoint
hook_point = HookPoint.objects.filter(name="order_approval").first()
orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point)
if orch_actions:
#the orchestration action NEEDs to be first in order to allow a hook
# to model the approval process correctly and not have something
# auto-approve before the hook is run
logger.debug("Order Approval orchestration actions exist, so bypassing built-in approver emails.")
try:
cbhooks.run_hooks("order_approval", order=self)
except cbhooks.exceptions.HookFailureException as e:
msg = _("Failed to run hook for order approval. Status: {status},"
" Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors)
raise CloudBoltException(msg)
return ""
#now that the hooks have run, check if it should be auto-approved
profile = request.get_user_profile()
if self.is_multilevel_approval():
self.approve_my_grms(profile)
if self.should_auto_approve():
logger.debug("Order can be automatically approved, attempting approval by {}".format(self.owner))
jobs, msg = self.approve(self.owner)
if jobs:
msg = render_to_string(
'orders/approved_msg.html', {
'order': self,
'autoapproved': True,
'num_jobs': len(jobs),
'extramsg': msg,
})
return msg
else:
# No auto approval and no approval hooks, so go with
# the default process of emailing a set of approvers, unless the
# owner is an approver.
msg = _("Order #{order_id} has been submitted for approval. ").format(order_id=self.id)
msg += orders.mail.email_approvers(self, request)
logger.debug(msg)
return msg
def approve(self, approver=None, parent_job=None):
"""
Sets this order to the "Active" status and kicks off the jobs needed
to complete this order.
One job of the appropriate type ('provision' or 'decom') is kicked
off per OrderItem for this order. An exception to this statement is
if the "quantity" field on the OrderItem is set, then a set of
identical jobs will be kicked off (however many are specified by
quantity).
Returns list of jobs and error messages from any cleanup of order
items.
"""
if self.status != 'PENDING':
msg = _(
"Only orders that are in 'PENDING' state can be approved. "
"Current state of order is '{status}'."
).format(status=self.status)
raise CloudBoltException(msg)
approve_this_order = False
if self.is_multilevel_approval():
logger.info('models.approve is multilevel!')
self.approve_my_grms(approver)
logger.info(f'models.approve after approve_my_grms ({approver})!')
if self.is_multilevel_approval():
logger.info('models.approve ml approval complete!')
approve_this_order = True
else:
logger.info('models.approve is NOT multilevel!')
#single-level approval
approve_this_order = True
if not approve_this_order:
#should only kick off if multilevel approvals
msg = _(
"Cannot fully approve this order. Multilevel approvals not complete. "
"Current state of order is '{status}'."
).format(status=self.status)
return [], msg
try:
# Raise an error to bubble up specific reason as part of the exception
self.group.quota_set.can_use(raise_error=True, **self.net_usage())
except QuotaSetError as quota_set_error:
raise QuotaError(_(
"Cannot approve order #{order_id} because doing so would exceed the "
"quota for group '{group}'. {error}"
).format(order_id=self.id, group=self.group, error=quota_set_error))
# Before we create job records, order the order items to make
# sure decom jobs are queued before prov jobs. the job engine
# may still parallelize them, that's something we can revisit
# later. In the meantime, customers can set the concurrency
# level to 1 to prevent this.
# we're taking advantage of the fact that "decom" comes before
# "prov" in the alphabet here.
order_items = [oi.cast() for oi in self.top_level_items.order_by(
"real_type", "add_date")]
order_items, msg = self.__filter_illegal_order_items(order_items)
if not order_items:
msg = _("{message} There are no valid order items left. This order is "
"being marked as complete.").format(message=msg)
self.complete("SUCCESS")
return [], msg
self.status = "ACTIVE"
self.approved_by = approver
self.approve_date = get_current_time()
self.save()
history_msg = _("The '{order}' order has been approved.").format(order=escape(self))
self.add_event('APPROVED', history_msg, profile=self.owner)
# run pre order execution hook
try:
cbhooks.run_hooks("pre_order_execution", order=self)
except cbhooks.exceptions.HookFailureException as e:
self.status = "FAILURE"
self.save()
msg = _("Failed to run hook for order approval. Status: {status},"
" Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors)
history_msg = _("The '{order}' order has failed.").format(order=escape(self))
self.add_event('FAILED', history_msg, profile=self.owner)
raise CloudBoltException(msg)
from jobs.models import Job
# Saving job objects will cause them to be kicked off by the
# job engine within a minute
jobs = []
for order_item in order_items:
jobtype = getattr(order_item, 'job_type', None)
if not jobtype:
# the job type will default to the first word of the class type
# ex. "provision", "decom"
jobtype = str(order_item.real_type).split(" ", 1)[0]
quantity = 1
# quantity is a special field on order_items. If an
# order_item has the quantity field, kick off that many
# jobs
if hasattr(order_item, 'quantity') and \
order_item.quantity is not None and \
order_item.quantity != '':
quantity = int(order_item.quantity)
for i in range(quantity):
job = Job(job_parameters=order_item,
type=jobtype,
owner=self.owner,
parent_job=parent_job)
job.save()
# Associate the job with any server(s)
# This may seem unnecessary because it's done when most jobs
# run, but it's needed at the very least for scheduled server
# modification jobs (for changing resources) so they show up on
# the server as scheduled before they actually run
servers = []
if hasattr(order_item, "server"):
servers = [order_item.server]
elif hasattr(order_item, "servers"):
servers = order_item.servers.all()
for server in servers:
server.jobs.add(job)
jobs.append(job)
# If it didn't make any jobs, just call it done
if not jobs:
self.complete("SUCCESS")
return jobs, msg
| '''
in a multilevel approval, we need a get the GroupRoleMembership mappings
and exclude the default approvers role
as well, if there's only one role.name == approvers
'''
if not profile:
profile = self.owner
owned_grms = profile.grouprolemembership_set.filter(group=self.group,
role__permissions__name='order.approve')
if len(owned_grms) > 1:
#multilevel approvals ignore the "approver" GRM
owned_grms = owned_grms.exclude(role__name='approver')
return owned_grms | identifier_body |
models.py | '''
Updated Models for multilevel approvals
'''
from django.utils.encoding import python_2_unicode_compatible
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils.html import escape
import cbhooks
from accounts.models import Role
from orders.models import Order, get_current_time
import orders.mail
from utilities.logger import ThreadLogger
from utilities.exceptions import CloudBoltException
from quota.exceptions import QuotaError
from quota.quota_set import QuotaSetError
logger = ThreadLogger(__name__)
@python_2_unicode_compatible
class CustomOrder(Order):
class Meta:
app_label = 'orders'
def approve_my_grms(self, profile=None):
'''
in a multilevel approval, we need a user to partially (or fully)
approve an order based on the GroupRoleMembership mappings (excluding)
"approvers" (the default/single-level approval role)
'''
if self.status != 'PENDING':
return
if not profile:
profile = self.owner
oi = self.orderitem_set.first().cast()
bpoi = oi.blueprintitemarguments_set.first()
for grm in CustomOrder.get_my_grms(self, profile):
grmcfv = bpoi.custom_field_values.get(field__name=f'{grm.role.name}_approver_id')
if grmcfv and not grmcfv.int_value:
grmcfv.int_value = grm.profile.user.id
grmcfv.save()
history_msg = _("The '{order}' order has been partially approved by {role_label}.").format(order=escape(self), role_label=grm.role.label)
self.add_event('APPROVED', history_msg, profile=profile)
def get_my_grms(self, profile=None):
'''
in a multilevel approval, we need a get the GroupRoleMembership mappings
and exclude the default approvers role
as well, if there's only one role.name == approvers
'''
if not profile:
profile = self.owner
owned_grms = profile.grouprolemembership_set.filter(group=self.group,
role__permissions__name='order.approve')
if len(owned_grms) > 1:
#multilevel approvals ignore the "approver" GRM
owned_grms = owned_grms.exclude(role__name='approver')
return owned_grms
def is_multilevel_approval(self):
"""
multilevel approvals need to display the roles that have order.approve permissions
based on a BPOI custom_field_value where the field name has an "_approver_id" at the
end, and a valid role exists on the Group for that cfv field name
returns a dictionary of the roles or an empty dict
"""
if not self.orderitem_set.first():
return {}
oi = self.orderitem_set.first().cast()
if not oi or not hasattr(oi, 'blueprintitemarguments_set'):
return {}
bpoi = oi.blueprintitemarguments_set.first()
approval_levels = {}
if not bpoi:
return {}
for cfv in bpoi.custom_field_values.filter(field__name__endswith='_approver_id'):
role_name = cfv.field.name.replace('_approver_id', '')
ml_approver_role = Role.objects.get(name=role_name, permissions__name='order.approve')
if ml_approver_role:
approval_levels[ml_approver_role] = cfv.value
return approval_levels
def should_auto_approve(self):
"""
Return True if this order should be automatically approved. An order
should be auto approved if either it's group has auto approve enabled,
if the submitter is also an approver on this group,
or if all of its order items have environments with auto approve
enabled.
and now if the multi_level auto approval roles are granted to this user profile
"""
if self.group and self.group.allow_auto_approval:
return True
# some orders (like those duplicated by CIT) will not have owners
if self.is_multilevel_approval():
if self.has_all_approver_roles(self.owner, self.group):
return True
return False
else:
if self.owner and self.owner.has_permission('order.approve', self.group):
return True
return False
def has_all_approver_roles(self, profile, group):
'''
for multi_level approvals we want to know if we can approve the order
as part of should_auto_approve()
'''
#Roles
r_needed = Role.objects.filter(grouprolemembership__group=group,
permissions__name='order.approve')
if len(r_needed) > 1:
r_needed = r_needed.exclude(name='approver').distinct()
#GroupRoleMemberships
r_owned = CustomOrder.get_my_grms(self, profile)
if len(r_needed) == len(r_owned):
#if the number of GRMs == the number of Roles for that group
return True
return False
def start_approval_process(self, request=None):
"""
This method determines what order process should be taken, and
takes it. By default, the process is to email the approvers, but
this can be overriden by customers to instead call out to a hook,
and that can be overridden by auto-approval (set on the group or
env, or by the owner being an approver or a super admin).
This method returns a message summarizing what action was taken.
`request` is needed to determine the current portal URL; if not
passed, default portal URL is used.
"""
# done here to avoid circular import
from cbhooks.models import HookPoint
hook_point = HookPoint.objects.filter(name="order_approval").first()
orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point)
if orch_actions:
#the orchestration action NEEDs to be first in order to allow a hook
# to model the approval process correctly and not have something
# auto-approve before the hook is run
logger.debug("Order Approval orchestration actions exist, so bypassing built-in approver emails.")
try:
cbhooks.run_hooks("order_approval", order=self)
except cbhooks.exceptions.HookFailureException as e:
msg = _("Failed to run hook for order approval. Status: {status},"
" Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors)
raise CloudBoltException(msg)
return ""
#now that the hooks have run, check if it should be auto-approved
profile = request.get_user_profile()
if self.is_multilevel_approval():
self.approve_my_grms(profile)
if self.should_auto_approve():
logger.debug("Order can be automatically approved, attempting approval by {}".format(self.owner))
jobs, msg = self.approve(self.owner)
if jobs:
msg = render_to_string(
'orders/approved_msg.html', {
'order': self,
'autoapproved': True,
'num_jobs': len(jobs),
'extramsg': msg,
})
return msg
else:
# No auto approval and no approval hooks, so go with
# the default process of emailing a set of approvers, unless the
# owner is an approver.
msg = _("Order #{order_id} has been submitted for approval. ").format(order_id=self.id)
msg += orders.mail.email_approvers(self, request)
logger.debug(msg)
return msg
def approve(self, approver=None, parent_job=None):
"""
Sets this order to the "Active" status and kicks off the jobs needed
to complete this order.
One job of the appropriate type ('provision' or 'decom') is kicked
off per OrderItem for this order. An exception to this statement is
if the "quantity" field on the OrderItem is set, then a set of
identical jobs will be kicked off (however many are specified by
quantity).
Returns list of jobs and error messages from any cleanup of order
items.
"""
if self.status != 'PENDING':
msg = _(
"Only orders that are in 'PENDING' state can be approved. "
"Current state of order is '{status}'."
).format(status=self.status)
raise CloudBoltException(msg)
approve_this_order = False
if self.is_multilevel_approval():
logger.info('models.approve is multilevel!')
self.approve_my_grms(approver)
logger.info(f'models.approve after approve_my_grms ({approver})!')
if self.is_multilevel_approval():
logger.info('models.approve ml approval complete!')
approve_this_order = True
else:
logger.info('models.approve is NOT multilevel!')
#single-level approval
approve_this_order = True
if not approve_this_order:
#should only kick off if multilevel approvals
msg = _(
"Cannot fully approve this order. Multilevel approvals not complete. "
"Current state of order is '{status}'."
).format(status=self.status)
return [], msg
try:
# Raise an error to bubble up specific reason as part of the exception
self.group.quota_set.can_use(raise_error=True, **self.net_usage())
except QuotaSetError as quota_set_error:
raise QuotaError(_(
"Cannot approve order #{order_id} because doing so would exceed the "
"quota for group '{group}'. {error}"
).format(order_id=self.id, group=self.group, error=quota_set_error))
# Before we create job records, order the order items to make
# sure decom jobs are queued before prov jobs. the job engine
# may still parallelize them, that's something we can revisit
# later. In the meantime, customers can set the concurrency
# level to 1 to prevent this.
# we're taking advantage of the fact that "decom" comes before
# "prov" in the alphabet here.
order_items = [oi.cast() for oi in self.top_level_items.order_by(
"real_type", "add_date")]
order_items, msg = self.__filter_illegal_order_items(order_items)
if not order_items:
msg = _("{message} There are no valid order items left. This order is "
"being marked as complete.").format(message=msg)
self.complete("SUCCESS")
return [], msg
self.status = "ACTIVE"
self.approved_by = approver
| self.save()
history_msg = _("The '{order}' order has been approved.").format(order=escape(self))
self.add_event('APPROVED', history_msg, profile=self.owner)
# run pre order execution hook
try:
cbhooks.run_hooks("pre_order_execution", order=self)
except cbhooks.exceptions.HookFailureException as e:
self.status = "FAILURE"
self.save()
msg = _("Failed to run hook for order approval. Status: {status},"
" Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors)
history_msg = _("The '{order}' order has failed.").format(order=escape(self))
self.add_event('FAILED', history_msg, profile=self.owner)
raise CloudBoltException(msg)
from jobs.models import Job
# Saving job objects will cause them to be kicked off by the
# job engine within a minute
jobs = []
for order_item in order_items:
jobtype = getattr(order_item, 'job_type', None)
if not jobtype:
# the job type will default to the first word of the class type
# ex. "provision", "decom"
jobtype = str(order_item.real_type).split(" ", 1)[0]
quantity = 1
# quantity is a special field on order_items. If an
# order_item has the quantity field, kick off that many
# jobs
if hasattr(order_item, 'quantity') and \
order_item.quantity is not None and \
order_item.quantity != '':
quantity = int(order_item.quantity)
for i in range(quantity):
job = Job(job_parameters=order_item,
type=jobtype,
owner=self.owner,
parent_job=parent_job)
job.save()
# Associate the job with any server(s)
# This may seem unnecessary because it's done when most jobs
# run, but it's needed at the very least for scheduled server
# modification jobs (for changing resources) so they show up on
# the server as scheduled before they actually run
servers = []
if hasattr(order_item, "server"):
servers = [order_item.server]
elif hasattr(order_item, "servers"):
servers = order_item.servers.all()
for server in servers:
server.jobs.add(job)
jobs.append(job)
# If it didn't make any jobs, just call it done
if not jobs:
self.complete("SUCCESS")
return jobs, msg | self.approve_date = get_current_time()
| random_line_split |
models.py | '''
Updated Models for multilevel approvals
'''
from django.utils.encoding import python_2_unicode_compatible
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils.html import escape
import cbhooks
from accounts.models import Role
from orders.models import Order, get_current_time
import orders.mail
from utilities.logger import ThreadLogger
from utilities.exceptions import CloudBoltException
from quota.exceptions import QuotaError
from quota.quota_set import QuotaSetError
logger = ThreadLogger(__name__)
@python_2_unicode_compatible
class CustomOrder(Order):
class Meta:
app_label = 'orders'
def approve_my_grms(self, profile=None):
'''
in a multilevel approval, we need a user to partially (or fully)
approve an order based on the GroupRoleMembership mappings (excluding)
"approvers" (the default/single-level approval role)
'''
if self.status != 'PENDING':
return
if not profile:
profile = self.owner
oi = self.orderitem_set.first().cast()
bpoi = oi.blueprintitemarguments_set.first()
for grm in CustomOrder.get_my_grms(self, profile):
grmcfv = bpoi.custom_field_values.get(field__name=f'{grm.role.name}_approver_id')
if grmcfv and not grmcfv.int_value:
grmcfv.int_value = grm.profile.user.id
grmcfv.save()
history_msg = _("The '{order}' order has been partially approved by {role_label}.").format(order=escape(self), role_label=grm.role.label)
self.add_event('APPROVED', history_msg, profile=profile)
def get_my_grms(self, profile=None):
'''
in a multilevel approval, we need a get the GroupRoleMembership mappings
and exclude the default approvers role
as well, if there's only one role.name == approvers
'''
if not profile:
profile = self.owner
owned_grms = profile.grouprolemembership_set.filter(group=self.group,
role__permissions__name='order.approve')
if len(owned_grms) > 1:
#multilevel approvals ignore the "approver" GRM
owned_grms = owned_grms.exclude(role__name='approver')
return owned_grms
def is_multilevel_approval(self):
"""
multilevel approvals need to display the roles that have order.approve permissions
based on a BPOI custom_field_value where the field name has an "_approver_id" at the
end, and a valid role exists on the Group for that cfv field name
returns a dictionary of the roles or an empty dict
"""
if not self.orderitem_set.first():
return {}
oi = self.orderitem_set.first().cast()
if not oi or not hasattr(oi, 'blueprintitemarguments_set'):
return {}
bpoi = oi.blueprintitemarguments_set.first()
approval_levels = {}
if not bpoi:
return {}
for cfv in bpoi.custom_field_values.filter(field__name__endswith='_approver_id'):
role_name = cfv.field.name.replace('_approver_id', '')
ml_approver_role = Role.objects.get(name=role_name, permissions__name='order.approve')
if ml_approver_role:
approval_levels[ml_approver_role] = cfv.value
return approval_levels
def should_auto_approve(self):
"""
Return True if this order should be automatically approved. An order
should be auto approved if either it's group has auto approve enabled,
if the submitter is also an approver on this group,
or if all of its order items have environments with auto approve
enabled.
and now if the multi_level auto approval roles are granted to this user profile
"""
if self.group and self.group.allow_auto_approval:
return True
# some orders (like those duplicated by CIT) will not have owners
if self.is_multilevel_approval():
if self.has_all_approver_roles(self.owner, self.group):
return True
return False
else:
if self.owner and self.owner.has_permission('order.approve', self.group):
return True
return False
def has_all_approver_roles(self, profile, group):
'''
for multi_level approvals we want to know if we can approve the order
as part of should_auto_approve()
'''
#Roles
r_needed = Role.objects.filter(grouprolemembership__group=group,
permissions__name='order.approve')
if len(r_needed) > 1:
r_needed = r_needed.exclude(name='approver').distinct()
#GroupRoleMemberships
r_owned = CustomOrder.get_my_grms(self, profile)
if len(r_needed) == len(r_owned):
#if the number of GRMs == the number of Roles for that group
return True
return False
def | (self, request=None):
"""
This method determines what order process should be taken, and
takes it. By default, the process is to email the approvers, but
this can be overriden by customers to instead call out to a hook,
and that can be overridden by auto-approval (set on the group or
env, or by the owner being an approver or a super admin).
This method returns a message summarizing what action was taken.
`request` is needed to determine the current portal URL; if not
passed, default portal URL is used.
"""
# done here to avoid circular import
from cbhooks.models import HookPoint
hook_point = HookPoint.objects.filter(name="order_approval").first()
orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point)
if orch_actions:
#the orchestration action NEEDs to be first in order to allow a hook
# to model the approval process correctly and not have something
# auto-approve before the hook is run
logger.debug("Order Approval orchestration actions exist, so bypassing built-in approver emails.")
try:
cbhooks.run_hooks("order_approval", order=self)
except cbhooks.exceptions.HookFailureException as e:
msg = _("Failed to run hook for order approval. Status: {status},"
" Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors)
raise CloudBoltException(msg)
return ""
#now that the hooks have run, check if it should be auto-approved
profile = request.get_user_profile()
if self.is_multilevel_approval():
self.approve_my_grms(profile)
if self.should_auto_approve():
logger.debug("Order can be automatically approved, attempting approval by {}".format(self.owner))
jobs, msg = self.approve(self.owner)
if jobs:
msg = render_to_string(
'orders/approved_msg.html', {
'order': self,
'autoapproved': True,
'num_jobs': len(jobs),
'extramsg': msg,
})
return msg
else:
# No auto approval and no approval hooks, so go with
# the default process of emailing a set of approvers, unless the
# owner is an approver.
msg = _("Order #{order_id} has been submitted for approval. ").format(order_id=self.id)
msg += orders.mail.email_approvers(self, request)
logger.debug(msg)
return msg
def approve(self, approver=None, parent_job=None):
"""
Sets this order to the "Active" status and kicks off the jobs needed
to complete this order.
One job of the appropriate type ('provision' or 'decom') is kicked
off per OrderItem for this order. An exception to this statement is
if the "quantity" field on the OrderItem is set, then a set of
identical jobs will be kicked off (however many are specified by
quantity).
Returns list of jobs and error messages from any cleanup of order
items.
"""
if self.status != 'PENDING':
msg = _(
"Only orders that are in 'PENDING' state can be approved. "
"Current state of order is '{status}'."
).format(status=self.status)
raise CloudBoltException(msg)
approve_this_order = False
if self.is_multilevel_approval():
logger.info('models.approve is multilevel!')
self.approve_my_grms(approver)
logger.info(f'models.approve after approve_my_grms ({approver})!')
if self.is_multilevel_approval():
logger.info('models.approve ml approval complete!')
approve_this_order = True
else:
logger.info('models.approve is NOT multilevel!')
#single-level approval
approve_this_order = True
if not approve_this_order:
#should only kick off if multilevel approvals
msg = _(
"Cannot fully approve this order. Multilevel approvals not complete. "
"Current state of order is '{status}'."
).format(status=self.status)
return [], msg
try:
# Raise an error to bubble up specific reason as part of the exception
self.group.quota_set.can_use(raise_error=True, **self.net_usage())
except QuotaSetError as quota_set_error:
raise QuotaError(_(
"Cannot approve order #{order_id} because doing so would exceed the "
"quota for group '{group}'. {error}"
).format(order_id=self.id, group=self.group, error=quota_set_error))
# Before we create job records, order the order items to make
# sure decom jobs are queued before prov jobs. the job engine
# may still parallelize them, that's something we can revisit
# later. In the meantime, customers can set the concurrency
# level to 1 to prevent this.
# we're taking advantage of the fact that "decom" comes before
# "prov" in the alphabet here.
order_items = [oi.cast() for oi in self.top_level_items.order_by(
"real_type", "add_date")]
order_items, msg = self.__filter_illegal_order_items(order_items)
if not order_items:
msg = _("{message} There are no valid order items left. This order is "
"being marked as complete.").format(message=msg)
self.complete("SUCCESS")
return [], msg
self.status = "ACTIVE"
self.approved_by = approver
self.approve_date = get_current_time()
self.save()
history_msg = _("The '{order}' order has been approved.").format(order=escape(self))
self.add_event('APPROVED', history_msg, profile=self.owner)
# run pre order execution hook
try:
cbhooks.run_hooks("pre_order_execution", order=self)
except cbhooks.exceptions.HookFailureException as e:
self.status = "FAILURE"
self.save()
msg = _("Failed to run hook for order approval. Status: {status},"
" Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors)
history_msg = _("The '{order}' order has failed.").format(order=escape(self))
self.add_event('FAILED', history_msg, profile=self.owner)
raise CloudBoltException(msg)
from jobs.models import Job
# Saving job objects will cause them to be kicked off by the
# job engine within a minute
jobs = []
for order_item in order_items:
jobtype = getattr(order_item, 'job_type', None)
if not jobtype:
# the job type will default to the first word of the class type
# ex. "provision", "decom"
jobtype = str(order_item.real_type).split(" ", 1)[0]
quantity = 1
# quantity is a special field on order_items. If an
# order_item has the quantity field, kick off that many
# jobs
if hasattr(order_item, 'quantity') and \
order_item.quantity is not None and \
order_item.quantity != '':
quantity = int(order_item.quantity)
for i in range(quantity):
job = Job(job_parameters=order_item,
type=jobtype,
owner=self.owner,
parent_job=parent_job)
job.save()
# Associate the job with any server(s)
# This may seem unnecessary because it's done when most jobs
# run, but it's needed at the very least for scheduled server
# modification jobs (for changing resources) so they show up on
# the server as scheduled before they actually run
servers = []
if hasattr(order_item, "server"):
servers = [order_item.server]
elif hasattr(order_item, "servers"):
servers = order_item.servers.all()
for server in servers:
server.jobs.add(job)
jobs.append(job)
# If it didn't make any jobs, just call it done
if not jobs:
self.complete("SUCCESS")
return jobs, msg
| start_approval_process | identifier_name |
models.py | '''
Updated Models for multilevel approvals
'''
from django.utils.encoding import python_2_unicode_compatible
from django.template.loader import render_to_string
from django.utils.translation import ugettext as _
from django.utils.html import escape
import cbhooks
from accounts.models import Role
from orders.models import Order, get_current_time
import orders.mail
from utilities.logger import ThreadLogger
from utilities.exceptions import CloudBoltException
from quota.exceptions import QuotaError
from quota.quota_set import QuotaSetError
logger = ThreadLogger(__name__)
@python_2_unicode_compatible
class CustomOrder(Order):
class Meta:
app_label = 'orders'
def approve_my_grms(self, profile=None):
'''
in a multilevel approval, we need a user to partially (or fully)
approve an order based on the GroupRoleMembership mappings (excluding)
"approvers" (the default/single-level approval role)
'''
if self.status != 'PENDING':
return
if not profile:
profile = self.owner
oi = self.orderitem_set.first().cast()
bpoi = oi.blueprintitemarguments_set.first()
for grm in CustomOrder.get_my_grms(self, profile):
grmcfv = bpoi.custom_field_values.get(field__name=f'{grm.role.name}_approver_id')
if grmcfv and not grmcfv.int_value:
grmcfv.int_value = grm.profile.user.id
grmcfv.save()
history_msg = _("The '{order}' order has been partially approved by {role_label}.").format(order=escape(self), role_label=grm.role.label)
self.add_event('APPROVED', history_msg, profile=profile)
def get_my_grms(self, profile=None):
'''
in a multilevel approval, we need a get the GroupRoleMembership mappings
and exclude the default approvers role
as well, if there's only one role.name == approvers
'''
if not profile:
profile = self.owner
owned_grms = profile.grouprolemembership_set.filter(group=self.group,
role__permissions__name='order.approve')
if len(owned_grms) > 1:
#multilevel approvals ignore the "approver" GRM
owned_grms = owned_grms.exclude(role__name='approver')
return owned_grms
def is_multilevel_approval(self):
"""
multilevel approvals need to display the roles that have order.approve permissions
based on a BPOI custom_field_value where the field name has an "_approver_id" at the
end, and a valid role exists on the Group for that cfv field name
returns a dictionary of the roles or an empty dict
"""
if not self.orderitem_set.first():
return {}
oi = self.orderitem_set.first().cast()
if not oi or not hasattr(oi, 'blueprintitemarguments_set'):
return {}
bpoi = oi.blueprintitemarguments_set.first()
approval_levels = {}
if not bpoi:
return {}
for cfv in bpoi.custom_field_values.filter(field__name__endswith='_approver_id'):
role_name = cfv.field.name.replace('_approver_id', '')
ml_approver_role = Role.objects.get(name=role_name, permissions__name='order.approve')
if ml_approver_role:
approval_levels[ml_approver_role] = cfv.value
return approval_levels
def should_auto_approve(self):
"""
Return True if this order should be automatically approved. An order
should be auto approved if either it's group has auto approve enabled,
if the submitter is also an approver on this group,
or if all of its order items have environments with auto approve
enabled.
and now if the multi_level auto approval roles are granted to this user profile
"""
if self.group and self.group.allow_auto_approval:
|
# some orders (like those duplicated by CIT) will not have owners
if self.is_multilevel_approval():
if self.has_all_approver_roles(self.owner, self.group):
return True
return False
else:
if self.owner and self.owner.has_permission('order.approve', self.group):
return True
return False
def has_all_approver_roles(self, profile, group):
'''
for multi_level approvals we want to know if we can approve the order
as part of should_auto_approve()
'''
#Roles
r_needed = Role.objects.filter(grouprolemembership__group=group,
permissions__name='order.approve')
if len(r_needed) > 1:
r_needed = r_needed.exclude(name='approver').distinct()
#GroupRoleMemberships
r_owned = CustomOrder.get_my_grms(self, profile)
if len(r_needed) == len(r_owned):
#if the number of GRMs == the number of Roles for that group
return True
return False
def start_approval_process(self, request=None):
"""
This method determines what order process should be taken, and
takes it. By default, the process is to email the approvers, but
this can be overriden by customers to instead call out to a hook,
and that can be overridden by auto-approval (set on the group or
env, or by the owner being an approver or a super admin).
This method returns a message summarizing what action was taken.
`request` is needed to determine the current portal URL; if not
passed, default portal URL is used.
"""
# done here to avoid circular import
from cbhooks.models import HookPoint
hook_point = HookPoint.objects.filter(name="order_approval").first()
orch_actions = cbhooks._get_orchestration_actions_to_run(hook_point)
if orch_actions:
#the orchestration action NEEDs to be first in order to allow a hook
# to model the approval process correctly and not have something
# auto-approve before the hook is run
logger.debug("Order Approval orchestration actions exist, so bypassing built-in approver emails.")
try:
cbhooks.run_hooks("order_approval", order=self)
except cbhooks.exceptions.HookFailureException as e:
msg = _("Failed to run hook for order approval. Status: {status},"
" Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors)
raise CloudBoltException(msg)
return ""
#now that the hooks have run, check if it should be auto-approved
profile = request.get_user_profile()
if self.is_multilevel_approval():
self.approve_my_grms(profile)
if self.should_auto_approve():
logger.debug("Order can be automatically approved, attempting approval by {}".format(self.owner))
jobs, msg = self.approve(self.owner)
if jobs:
msg = render_to_string(
'orders/approved_msg.html', {
'order': self,
'autoapproved': True,
'num_jobs': len(jobs),
'extramsg': msg,
})
return msg
else:
# No auto approval and no approval hooks, so go with
# the default process of emailing a set of approvers, unless the
# owner is an approver.
msg = _("Order #{order_id} has been submitted for approval. ").format(order_id=self.id)
msg += orders.mail.email_approvers(self, request)
logger.debug(msg)
return msg
def approve(self, approver=None, parent_job=None):
"""
Sets this order to the "Active" status and kicks off the jobs needed
to complete this order.
One job of the appropriate type ('provision' or 'decom') is kicked
off per OrderItem for this order. An exception to this statement is
if the "quantity" field on the OrderItem is set, then a set of
identical jobs will be kicked off (however many are specified by
quantity).
Returns list of jobs and error messages from any cleanup of order
items.
"""
if self.status != 'PENDING':
msg = _(
"Only orders that are in 'PENDING' state can be approved. "
"Current state of order is '{status}'."
).format(status=self.status)
raise CloudBoltException(msg)
approve_this_order = False
if self.is_multilevel_approval():
logger.info('models.approve is multilevel!')
self.approve_my_grms(approver)
logger.info(f'models.approve after approve_my_grms ({approver})!')
if self.is_multilevel_approval():
logger.info('models.approve ml approval complete!')
approve_this_order = True
else:
logger.info('models.approve is NOT multilevel!')
#single-level approval
approve_this_order = True
if not approve_this_order:
#should only kick off if multilevel approvals
msg = _(
"Cannot fully approve this order. Multilevel approvals not complete. "
"Current state of order is '{status}'."
).format(status=self.status)
return [], msg
try:
# Raise an error to bubble up specific reason as part of the exception
self.group.quota_set.can_use(raise_error=True, **self.net_usage())
except QuotaSetError as quota_set_error:
raise QuotaError(_(
"Cannot approve order #{order_id} because doing so would exceed the "
"quota for group '{group}'. {error}"
).format(order_id=self.id, group=self.group, error=quota_set_error))
# Before we create job records, order the order items to make
# sure decom jobs are queued before prov jobs. the job engine
# may still parallelize them, that's something we can revisit
# later. In the meantime, customers can set the concurrency
# level to 1 to prevent this.
# we're taking advantage of the fact that "decom" comes before
# "prov" in the alphabet here.
order_items = [oi.cast() for oi in self.top_level_items.order_by(
"real_type", "add_date")]
order_items, msg = self.__filter_illegal_order_items(order_items)
if not order_items:
msg = _("{message} There are no valid order items left. This order is "
"being marked as complete.").format(message=msg)
self.complete("SUCCESS")
return [], msg
self.status = "ACTIVE"
self.approved_by = approver
self.approve_date = get_current_time()
self.save()
history_msg = _("The '{order}' order has been approved.").format(order=escape(self))
self.add_event('APPROVED', history_msg, profile=self.owner)
# run pre order execution hook
try:
cbhooks.run_hooks("pre_order_execution", order=self)
except cbhooks.exceptions.HookFailureException as e:
self.status = "FAILURE"
self.save()
msg = _("Failed to run hook for order approval. Status: {status},"
" Output: {output}, Errors: {errors}").format(status=e.status, output=e.output, errors=e.errors)
history_msg = _("The '{order}' order has failed.").format(order=escape(self))
self.add_event('FAILED', history_msg, profile=self.owner)
raise CloudBoltException(msg)
from jobs.models import Job
# Saving job objects will cause them to be kicked off by the
# job engine within a minute
jobs = []
for order_item in order_items:
jobtype = getattr(order_item, 'job_type', None)
if not jobtype:
# the job type will default to the first word of the class type
# ex. "provision", "decom"
jobtype = str(order_item.real_type).split(" ", 1)[0]
quantity = 1
# quantity is a special field on order_items. If an
# order_item has the quantity field, kick off that many
# jobs
if hasattr(order_item, 'quantity') and \
order_item.quantity is not None and \
order_item.quantity != '':
quantity = int(order_item.quantity)
for i in range(quantity):
job = Job(job_parameters=order_item,
type=jobtype,
owner=self.owner,
parent_job=parent_job)
job.save()
# Associate the job with any server(s)
# This may seem unnecessary because it's done when most jobs
# run, but it's needed at the very least for scheduled server
# modification jobs (for changing resources) so they show up on
# the server as scheduled before they actually run
servers = []
if hasattr(order_item, "server"):
servers = [order_item.server]
elif hasattr(order_item, "servers"):
servers = order_item.servers.all()
for server in servers:
server.jobs.add(job)
jobs.append(job)
# If it didn't make any jobs, just call it done
if not jobs:
self.complete("SUCCESS")
return jobs, msg
| return True | conditional_block |
usage.py | # Generate reports showing AWS snapshots, AMIs, volumes, and instances; and their KEEP-tags and if PROD-tagged
# Snapshots report shows the associated AMIs and the KEEP-tags thereof
# Volumes report shows the associated instances and the KEEP-tags thereof
# Code borrowed heavily from Niall's previous script: volume_cleanup.py
import os
import sys
import boto
from boto import ec2
# Name your output files
volumes_data_output_file = "volumes.tsv"
snapshots_data_output_file = "snapshots.tsv"
instances_data_output_file = "instances.tsv"
images_data_output_file = "images.tsv"
def getRegions():
regions = ec2.regions()
region_names = []
for region in regions:
region_names.append(region.name)
return region_names
def credentials():
|
def getInstances(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
instances = []
reservations = conn.get_all_reservations()
for reservation in reservations:
for instance in reservation.instances:
instances.append(instance)
except boto.exception.EC2ResponseError:
return []
return instances
def getVolumes(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
volumes = conn.get_all_volumes()
except boto.exception.EC2ResponseError:
return []
return volumes
# snapshots got this thing where there are public, private, and owned by me: defaults to all or public?
# we're interested in the ones owned by us, so select 'owner_id' = 794321122735
# can use owner='self' as a parameter to get_all_snapshots() too
def getSnapshots(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
snapshots = conn.get_all_snapshots(owner='self')
except boto.exception.EC2ResponseError:
return []
return snapshots
def getImages(region):
"""Return images for one given region, owned by self"""
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
images = conn.get_all_images(owners=['self'])
except boto.exception.EC2ResponseError:
return []
return images
def getSnapshotsOf(image):
"""Return list of snapshot_ids associated with the given image"""
snapshotIds = []
deviceMapping = image.block_device_mapping # dict of devices
devices = deviceMapping.keys()
for d in devices:
snapshotId = deviceMapping[d].snapshot_id
if snapshotId is not None:
snapshotIds.append(snapshotId.encode())
return snapshotIds
def getImagesD(region):
"""Use dictionaries 'cos we'll have to cross-reference to get snapshots that go with the AMIs
returns list of dictionaries representing images from one region
"""
images = getImages(region)
imageDicts = []
for im in images:
imageDict = {"name": im.name,
"id": im.id,
"region": im.region.name,
"state": im.state,
"created": im.creationDate,
"type": im.type,
"KEEP": getKeepTag(im),
"name_tag": get_name_tag(im),
"snapshots": getSnapshotsOf(im),
"description": im.description,
"PROD": isProduction(im)
}
imageDicts.append(imageDict)
return imageDicts
def getSnapshotsD(region):
""" return a list of dictionaries representing snapshots from one region """
# Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)
snapshots = getSnapshots(region)
snapshotsDicts = []
ims = getImages(region)
for s in snapshots:
amis = getAmisOf(s, ims)
amiIds = []
amiKeeps = []
if len(amis) == 1:
amiIds = amis[0].id.encode()
amiKeeps = getKeepTag(amis[0])
elif len(amis) == 0:
amiIds = "-------no-AMI-found"
amiKeeps = "-------no-AMI-found"
else:
for a in amis:
amiIds.append(a.id.encode())
amiKeeps.append(getKeepTag(a))
snapshotsDict = {"id": s.id,
"status": s.status,
"region": s.region.name,
"progress": s.progress,
"start_time": s.start_time,
"volume_id": s.volume_id,
"volume_size": s.volume_size,
"KEEP-tag": getKeepTag(s),
"Name": get_name_tag(s),
"AMI(s)": amiIds,
"AMI_KEEP-tags": amiKeeps,
"PROD": isProduction(s),
"Description": s.description
}
snapshotsDicts.append(snapshotsDict)
return snapshotsDicts
def getVolumesD(region):
""" return a list of dictionaries representing volumes from one region """
volumes = getVolumes(region)
instances = getInstancesD(region)
volumesDicts = []
for v in volumesDicts:
volumesDict = {"id": v.id,
"KEEP-tag": getKeepTag(v),
"instance_KEEP-tag": getKeepTag(getInstanceOf(v)),
"instance": v.attach_data.instance_id,
"status": v.status,
"size": v.size,
"create-time": v.create_time,
"region": v.region.name,
"zone": v.zone,
"snapshot_id": v.snapshot_id,
"PROD": isProduction(v)
}
def getInstancesD(region):
""" return a list of dictionaries representing instances for one region, will help with volume-instance-KEEP-tag look-up. Maybe. """
instances = getInstances(region)
instancesDicts = {"id": i.id,
"KEEP-tag": getKeepTag(i),
"instance_type": i.instance_type,
"state": i.state,
"launch_time": i.launch_time,
"security_groups": getGroups(i),
"region": i.region.name,
"PROD": isProduction(i)
}
########## Seems to work ###################
def getAmisOf(snapshot, images):
"""retrieve list of AMIs that refer to a given snapshot"""
amis = []
for im in images:
snapshotsOfThisIm = getSnapshotsOf(im)
for soti in snapshotsOfThisIm:
if soti == snapshot.id:
amis.append(im)
return amis
def getKeepTag(obj):
"""If tag with key='KEEP' exists, return its value (can be an empty string), else it's '-------no-tag'"""
if 'KEEP' in obj.tags:
return obj.tags['KEEP']
else:
return "-------no-tag"
# try:
# tag = obj.tags['KEEP']
# except:
# # Note: some with empty KEEP-tags, through web console they look the same as those untagged
# return "-----"
# return tag
def isProduction(obj):
"""Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key"""
return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD')
def get_name_tag(obj):
"""Name is a tag that might not exist, but if it does, it's very helpful for users to identify their resources"""
if 'Name' in obj.tags:
return obj.tags['Name']
else:
return ""
def getGroups(instance):
if len(instance.groups) == 1:
# if there's only one group, then unpack it
return instance.groups[0].name
else: # in the not-expected case where there is more than one groups, deal with it
groupList = []
for g in instance.groups:
groupList.append(g.name)
return groupList
def getInstanceOf(volume):
""" Returns the actual instance
(if only instance_id is needed, can access directly from volume)
(if KEEP tag is needed, maybe it's better to grab it from a local dictionary list of instances)
"""
# ughhhhhhhh refactor later (shouldn't do this for every single volume, takes forever)
creds = credentials()
conn = ec2.connect_to_region(volume.region.name, **creds)
ins_id = volume.attach_data.instance_id
reservation = conn.get_all_instances(instance_ids=ins_id)[0]
return reservation.instances[0]
###############################################################################################################################
def generateInfoVolumes(regions):
""" Write volumes to file """
print "\nWriting volumes info to output file %s" % volumes_data_output_file
with open(volumes_data_output_file, 'w') as f1:
f1.write("VOLUMES\n")
f1.write(
"Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n")
for r in regions:
volumes = getVolumes(r)
print "." # give some feedback to the user
for v in volumes:
f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size,
v.create_time, v.region.name, v.zone, v.snapshot_id))
def generateInfoSnapshots(regions):
""" Write snapshots to file """
print "Writing snapshots info to output file %s" % snapshots_data_output_file
snapshots = []
for r in regions:
snapshots += getSnapshotsD(r)
print "." # feedback for the user
with open(snapshots_data_output_file, 'w') as f2:
f2.write("SNAPSHOTS\n")
f2.write(
"Name\tsnapshot_id\tKEEP-tag_of_snapshot\tKEEP-tag_of_AMI\tproduction?\tassociated_AMI\tstart_time\tstatus"
"\tregion\tprogress\tassociated_volume\tvolume_size\tdescription\n\n")
for s in snapshots:
f2.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s['start_time'],
s['status'], s['region'], s['progress'], s['volume_id'], s['volume_size'], s['Description']))
def generateInfoInstances(regions):
""" Write snapshots to file """
print "Writing instances info to output file %s" % instances_data_output_file
with open(instances_data_output_file, 'w') as f3:
f3.write("INSTANCES\n")
f3.write("Name\tinstance ID\tKEEP-tag\tproduction\tinstance_type\tstate\tlaunched\tsecurity_groups\tregion\n\n")
for region in regions:
print "." # feedback for user
instances = getInstances(region)
for i in instances:
f3.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (get_name_tag(i), i.id, getKeepTag(i), isProduction(i), i.instance_type, i.state,
i.launch_time, getGroups(i), i.region.name))
def generateInfoImages(regions):
print "Writing images info to output file %s" % images_data_output_file
with open(images_data_output_file, 'w') as f4:
f4.write("IMAGES\n")
f4.write("AMI_name\talternative_name\timage_id\tKEEP-tag\tproduction?\tregion\tstate\tcreated\ttype\tassociated_snapshots\tdescription\n\n")
for r in regions:
print "." # feedback for user
images = getImagesD(r)
for im in images:
# format multiple snapshots better (only a handful, but it will mess up columns if comma-delimited
if len(im['snapshots']) == 1:
snaps = im['snapshots'][0]
else:
snaps = ""
for s in im['snapshots']:
snaps += s + " "
f4.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (im['name'], im['name_tag'], im['id'], im['KEEP'], im['PROD'], im['region'], im['state'],
im['created'], im['type'], snaps, im['description']))
# TODO: possibility? have these reports accessible from s3, public url, cronjob
def main():
regions = getRegions()
#################################################
# debugging goodies #
# reg = regions[3] # ireland #
# ims = getImages(reg)
# im = ims[0]
# vols = getVolumes(reg)
# vol = vols[0]
# ins = getInstances(reg)
# ins0 = ins[0]
# snaps = getSnapshots(reg)
# snap = snaps[0]
# ireland ims[19] has empty string for PROD-tag
# import pdb; pdb.set_trace()
# #
#################################################
# generateInfoVolumes(regions)
# generateInfoSnapshots(regions)
# generateInfoInstances(regions)
generateInfoImages(regions)
if __name__ == '__main__':
main()
| return {"aws_access_key_id": os.environ['AWS_ACCESS_KEY'],
"aws_secret_access_key": os.environ['AWS_SECRET_KEY']} | identifier_body |
usage.py | # Generate reports showing AWS snapshots, AMIs, volumes, and instances; and their KEEP-tags and if PROD-tagged
# Snapshots report shows the associated AMIs and the KEEP-tags thereof
# Volumes report shows the associated instances and the KEEP-tags thereof
# Code borrowed heavily from Niall's previous script: volume_cleanup.py
import os
import sys
import boto
from boto import ec2
# Name your output files
volumes_data_output_file = "volumes.tsv"
snapshots_data_output_file = "snapshots.tsv"
instances_data_output_file = "instances.tsv"
images_data_output_file = "images.tsv"
def getRegions():
regions = ec2.regions()
region_names = []
for region in regions:
region_names.append(region.name)
return region_names
def credentials():
return {"aws_access_key_id": os.environ['AWS_ACCESS_KEY'],
"aws_secret_access_key": os.environ['AWS_SECRET_KEY']}
def getInstances(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
instances = []
reservations = conn.get_all_reservations()
for reservation in reservations:
for instance in reservation.instances:
instances.append(instance)
except boto.exception.EC2ResponseError:
return []
return instances
def getVolumes(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
volumes = conn.get_all_volumes()
except boto.exception.EC2ResponseError:
return []
return volumes
# snapshots got this thing where there are public, private, and owned by me: defaults to all or public?
# we're interested in the ones owned by us, so select 'owner_id' = 794321122735
# can use owner='self' as a parameter to get_all_snapshots() too
def getSnapshots(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
snapshots = conn.get_all_snapshots(owner='self')
except boto.exception.EC2ResponseError:
return []
return snapshots
def getImages(region):
"""Return images for one given region, owned by self"""
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
images = conn.get_all_images(owners=['self'])
except boto.exception.EC2ResponseError:
return []
return images
def getSnapshotsOf(image):
"""Return list of snapshot_ids associated with the given image"""
snapshotIds = []
deviceMapping = image.block_device_mapping # dict of devices
devices = deviceMapping.keys()
for d in devices:
snapshotId = deviceMapping[d].snapshot_id
if snapshotId is not None:
snapshotIds.append(snapshotId.encode())
return snapshotIds
def getImagesD(region):
"""Use dictionaries 'cos we'll have to cross-reference to get snapshots that go with the AMIs
returns list of dictionaries representing images from one region
"""
images = getImages(region)
imageDicts = []
for im in images:
imageDict = {"name": im.name,
"id": im.id,
"region": im.region.name,
"state": im.state,
"created": im.creationDate,
"type": im.type,
"KEEP": getKeepTag(im),
"name_tag": get_name_tag(im),
"snapshots": getSnapshotsOf(im),
"description": im.description,
"PROD": isProduction(im)
}
imageDicts.append(imageDict)
return imageDicts
def getSnapshotsD(region):
""" return a list of dictionaries representing snapshots from one region """
# Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)
snapshots = getSnapshots(region)
snapshotsDicts = []
ims = getImages(region)
for s in snapshots:
amis = getAmisOf(s, ims)
amiIds = []
amiKeeps = []
if len(amis) == 1:
amiIds = amis[0].id.encode()
amiKeeps = getKeepTag(amis[0])
elif len(amis) == 0:
amiIds = "-------no-AMI-found"
amiKeeps = "-------no-AMI-found"
else:
|
snapshotsDict = {"id": s.id,
"status": s.status,
"region": s.region.name,
"progress": s.progress,
"start_time": s.start_time,
"volume_id": s.volume_id,
"volume_size": s.volume_size,
"KEEP-tag": getKeepTag(s),
"Name": get_name_tag(s),
"AMI(s)": amiIds,
"AMI_KEEP-tags": amiKeeps,
"PROD": isProduction(s),
"Description": s.description
}
snapshotsDicts.append(snapshotsDict)
return snapshotsDicts
def getVolumesD(region):
""" return a list of dictionaries representing volumes from one region """
volumes = getVolumes(region)
instances = getInstancesD(region)
volumesDicts = []
for v in volumesDicts:
volumesDict = {"id": v.id,
"KEEP-tag": getKeepTag(v),
"instance_KEEP-tag": getKeepTag(getInstanceOf(v)),
"instance": v.attach_data.instance_id,
"status": v.status,
"size": v.size,
"create-time": v.create_time,
"region": v.region.name,
"zone": v.zone,
"snapshot_id": v.snapshot_id,
"PROD": isProduction(v)
}
def getInstancesD(region):
""" return a list of dictionaries representing instances for one region, will help with volume-instance-KEEP-tag look-up. Maybe. """
instances = getInstances(region)
instancesDicts = {"id": i.id,
"KEEP-tag": getKeepTag(i),
"instance_type": i.instance_type,
"state": i.state,
"launch_time": i.launch_time,
"security_groups": getGroups(i),
"region": i.region.name,
"PROD": isProduction(i)
}
########## Seems to work ###################
def getAmisOf(snapshot, images):
"""retrieve list of AMIs that refer to a given snapshot"""
amis = []
for im in images:
snapshotsOfThisIm = getSnapshotsOf(im)
for soti in snapshotsOfThisIm:
if soti == snapshot.id:
amis.append(im)
return amis
def getKeepTag(obj):
"""If tag with key='KEEP' exists, return its value (can be an empty string), else it's '-------no-tag'"""
if 'KEEP' in obj.tags:
return obj.tags['KEEP']
else:
return "-------no-tag"
# try:
# tag = obj.tags['KEEP']
# except:
# # Note: some with empty KEEP-tags, through web console they look the same as those untagged
# return "-----"
# return tag
def isProduction(obj):
"""Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key"""
return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD')
def get_name_tag(obj):
"""Name is a tag that might not exist, but if it does, it's very helpful for users to identify their resources"""
if 'Name' in obj.tags:
return obj.tags['Name']
else:
return ""
def getGroups(instance):
if len(instance.groups) == 1:
# if there's only one group, then unpack it
return instance.groups[0].name
else: # in the not-expected case where there is more than one groups, deal with it
groupList = []
for g in instance.groups:
groupList.append(g.name)
return groupList
def getInstanceOf(volume):
""" Returns the actual instance
(if only instance_id is needed, can access directly from volume)
(if KEEP tag is needed, maybe it's better to grab it from a local dictionary list of instances)
"""
# ughhhhhhhh refactor later (shouldn't do this for every single volume, takes forever)
creds = credentials()
conn = ec2.connect_to_region(volume.region.name, **creds)
ins_id = volume.attach_data.instance_id
reservation = conn.get_all_instances(instance_ids=ins_id)[0]
return reservation.instances[0]
###############################################################################################################################
def generateInfoVolumes(regions):
""" Write volumes to file """
print "\nWriting volumes info to output file %s" % volumes_data_output_file
with open(volumes_data_output_file, 'w') as f1:
f1.write("VOLUMES\n")
f1.write(
"Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n")
for r in regions:
volumes = getVolumes(r)
print "." # give some feedback to the user
for v in volumes:
f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size,
v.create_time, v.region.name, v.zone, v.snapshot_id))
def generateInfoSnapshots(regions):
""" Write snapshots to file """
print "Writing snapshots info to output file %s" % snapshots_data_output_file
snapshots = []
for r in regions:
snapshots += getSnapshotsD(r)
print "." # feedback for the user
with open(snapshots_data_output_file, 'w') as f2:
f2.write("SNAPSHOTS\n")
f2.write(
"Name\tsnapshot_id\tKEEP-tag_of_snapshot\tKEEP-tag_of_AMI\tproduction?\tassociated_AMI\tstart_time\tstatus"
"\tregion\tprogress\tassociated_volume\tvolume_size\tdescription\n\n")
for s in snapshots:
f2.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s['start_time'],
s['status'], s['region'], s['progress'], s['volume_id'], s['volume_size'], s['Description']))
def generateInfoInstances(regions):
""" Write snapshots to file """
print "Writing instances info to output file %s" % instances_data_output_file
with open(instances_data_output_file, 'w') as f3:
f3.write("INSTANCES\n")
f3.write("Name\tinstance ID\tKEEP-tag\tproduction\tinstance_type\tstate\tlaunched\tsecurity_groups\tregion\n\n")
for region in regions:
print "." # feedback for user
instances = getInstances(region)
for i in instances:
f3.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (get_name_tag(i), i.id, getKeepTag(i), isProduction(i), i.instance_type, i.state,
i.launch_time, getGroups(i), i.region.name))
def generateInfoImages(regions):
print "Writing images info to output file %s" % images_data_output_file
with open(images_data_output_file, 'w') as f4:
f4.write("IMAGES\n")
f4.write("AMI_name\talternative_name\timage_id\tKEEP-tag\tproduction?\tregion\tstate\tcreated\ttype\tassociated_snapshots\tdescription\n\n")
for r in regions:
print "." # feedback for user
images = getImagesD(r)
for im in images:
# format multiple snapshots better (only a handful, but it will mess up columns if comma-delimited
if len(im['snapshots']) == 1:
snaps = im['snapshots'][0]
else:
snaps = ""
for s in im['snapshots']:
snaps += s + " "
f4.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (im['name'], im['name_tag'], im['id'], im['KEEP'], im['PROD'], im['region'], im['state'],
im['created'], im['type'], snaps, im['description']))
# TODO: possibility? have these reports accessible from s3, public url, cronjob
def main():
regions = getRegions()
#################################################
# debugging goodies #
# reg = regions[3] # ireland #
# ims = getImages(reg)
# im = ims[0]
# vols = getVolumes(reg)
# vol = vols[0]
# ins = getInstances(reg)
# ins0 = ins[0]
# snaps = getSnapshots(reg)
# snap = snaps[0]
# ireland ims[19] has empty string for PROD-tag
# import pdb; pdb.set_trace()
# #
#################################################
# generateInfoVolumes(regions)
# generateInfoSnapshots(regions)
# generateInfoInstances(regions)
generateInfoImages(regions)
if __name__ == '__main__':
main()
| for a in amis:
amiIds.append(a.id.encode())
amiKeeps.append(getKeepTag(a)) | conditional_block |
usage.py | # Generate reports showing AWS snapshots, AMIs, volumes, and instances; and their KEEP-tags and if PROD-tagged
# Snapshots report shows the associated AMIs and the KEEP-tags thereof
# Volumes report shows the associated instances and the KEEP-tags thereof
# Code borrowed heavily from Niall's previous script: volume_cleanup.py
import os
import sys
import boto
from boto import ec2
# Name your output files
volumes_data_output_file = "volumes.tsv"
snapshots_data_output_file = "snapshots.tsv"
instances_data_output_file = "instances.tsv"
images_data_output_file = "images.tsv"
def getRegions():
regions = ec2.regions()
region_names = []
for region in regions:
region_names.append(region.name)
return region_names
def credentials():
return {"aws_access_key_id": os.environ['AWS_ACCESS_KEY'],
"aws_secret_access_key": os.environ['AWS_SECRET_KEY']}
def getInstances(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
instances = []
reservations = conn.get_all_reservations()
for reservation in reservations:
for instance in reservation.instances:
instances.append(instance)
except boto.exception.EC2ResponseError:
return []
return instances
def getVolumes(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
volumes = conn.get_all_volumes()
except boto.exception.EC2ResponseError:
return []
return volumes
# snapshots got this thing where there are public, private, and owned by me: defaults to all or public?
# we're interested in the ones owned by us, so select 'owner_id' = 794321122735
# can use owner='self' as a parameter to get_all_snapshots() too
def getSnapshots(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
snapshots = conn.get_all_snapshots(owner='self')
except boto.exception.EC2ResponseError:
return []
return snapshots
def getImages(region):
"""Return images for one given region, owned by self"""
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
images = conn.get_all_images(owners=['self'])
except boto.exception.EC2ResponseError:
return []
return images
def getSnapshotsOf(image):
"""Return list of snapshot_ids associated with the given image"""
snapshotIds = []
deviceMapping = image.block_device_mapping # dict of devices
devices = deviceMapping.keys()
for d in devices:
snapshotId = deviceMapping[d].snapshot_id
if snapshotId is not None:
snapshotIds.append(snapshotId.encode())
return snapshotIds
def getImagesD(region):
"""Use dictionaries 'cos we'll have to cross-reference to get snapshots that go with the AMIs
returns list of dictionaries representing images from one region
"""
images = getImages(region)
imageDicts = []
for im in images:
imageDict = {"name": im.name,
"id": im.id,
"region": im.region.name,
"state": im.state,
"created": im.creationDate,
"type": im.type,
"KEEP": getKeepTag(im),
"name_tag": get_name_tag(im),
"snapshots": getSnapshotsOf(im),
"description": im.description,
"PROD": isProduction(im)
}
imageDicts.append(imageDict)
return imageDicts
def getSnapshotsD(region):
""" return a list of dictionaries representing snapshots from one region """
# Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)
snapshots = getSnapshots(region)
snapshotsDicts = []
ims = getImages(region)
for s in snapshots:
amis = getAmisOf(s, ims)
amiIds = []
amiKeeps = []
if len(amis) == 1:
amiIds = amis[0].id.encode()
amiKeeps = getKeepTag(amis[0])
elif len(amis) == 0:
amiIds = "-------no-AMI-found"
amiKeeps = "-------no-AMI-found"
else:
for a in amis:
amiIds.append(a.id.encode())
amiKeeps.append(getKeepTag(a))
snapshotsDict = {"id": s.id,
"status": s.status,
"region": s.region.name,
"progress": s.progress,
"start_time": s.start_time,
"volume_id": s.volume_id,
"volume_size": s.volume_size,
"KEEP-tag": getKeepTag(s),
"Name": get_name_tag(s),
"AMI(s)": amiIds,
"AMI_KEEP-tags": amiKeeps, | "Description": s.description
}
snapshotsDicts.append(snapshotsDict)
return snapshotsDicts
def getVolumesD(region):
""" return a list of dictionaries representing volumes from one region """
volumes = getVolumes(region)
instances = getInstancesD(region)
volumesDicts = []
for v in volumesDicts:
volumesDict = {"id": v.id,
"KEEP-tag": getKeepTag(v),
"instance_KEEP-tag": getKeepTag(getInstanceOf(v)),
"instance": v.attach_data.instance_id,
"status": v.status,
"size": v.size,
"create-time": v.create_time,
"region": v.region.name,
"zone": v.zone,
"snapshot_id": v.snapshot_id,
"PROD": isProduction(v)
}
def getInstancesD(region):
""" return a list of dictionaries representing instances for one region, will help with volume-instance-KEEP-tag look-up. Maybe. """
instances = getInstances(region)
instancesDicts = {"id": i.id,
"KEEP-tag": getKeepTag(i),
"instance_type": i.instance_type,
"state": i.state,
"launch_time": i.launch_time,
"security_groups": getGroups(i),
"region": i.region.name,
"PROD": isProduction(i)
}
########## Seems to work ###################
def getAmisOf(snapshot, images):
"""retrieve list of AMIs that refer to a given snapshot"""
amis = []
for im in images:
snapshotsOfThisIm = getSnapshotsOf(im)
for soti in snapshotsOfThisIm:
if soti == snapshot.id:
amis.append(im)
return amis
def getKeepTag(obj):
"""If tag with key='KEEP' exists, return its value (can be an empty string), else it's '-------no-tag'"""
if 'KEEP' in obj.tags:
return obj.tags['KEEP']
else:
return "-------no-tag"
# try:
# tag = obj.tags['KEEP']
# except:
# # Note: some with empty KEEP-tags, through web console they look the same as those untagged
# return "-----"
# return tag
def isProduction(obj):
"""Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key"""
return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD')
def get_name_tag(obj):
"""Name is a tag that might not exist, but if it does, it's very helpful for users to identify their resources"""
if 'Name' in obj.tags:
return obj.tags['Name']
else:
return ""
def getGroups(instance):
if len(instance.groups) == 1:
# if there's only one group, then unpack it
return instance.groups[0].name
else: # in the not-expected case where there is more than one groups, deal with it
groupList = []
for g in instance.groups:
groupList.append(g.name)
return groupList
def getInstanceOf(volume):
""" Returns the actual instance
(if only instance_id is needed, can access directly from volume)
(if KEEP tag is needed, maybe it's better to grab it from a local dictionary list of instances)
"""
# ughhhhhhhh refactor later (shouldn't do this for every single volume, takes forever)
creds = credentials()
conn = ec2.connect_to_region(volume.region.name, **creds)
ins_id = volume.attach_data.instance_id
reservation = conn.get_all_instances(instance_ids=ins_id)[0]
return reservation.instances[0]
###############################################################################################################################
def generateInfoVolumes(regions):
""" Write volumes to file """
print "\nWriting volumes info to output file %s" % volumes_data_output_file
with open(volumes_data_output_file, 'w') as f1:
f1.write("VOLUMES\n")
f1.write(
"Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n")
for r in regions:
volumes = getVolumes(r)
print "." # give some feedback to the user
for v in volumes:
f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size,
v.create_time, v.region.name, v.zone, v.snapshot_id))
def generateInfoSnapshots(regions):
""" Write snapshots to file """
print "Writing snapshots info to output file %s" % snapshots_data_output_file
snapshots = []
for r in regions:
snapshots += getSnapshotsD(r)
print "." # feedback for the user
with open(snapshots_data_output_file, 'w') as f2:
f2.write("SNAPSHOTS\n")
f2.write(
"Name\tsnapshot_id\tKEEP-tag_of_snapshot\tKEEP-tag_of_AMI\tproduction?\tassociated_AMI\tstart_time\tstatus"
"\tregion\tprogress\tassociated_volume\tvolume_size\tdescription\n\n")
for s in snapshots:
f2.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s['start_time'],
s['status'], s['region'], s['progress'], s['volume_id'], s['volume_size'], s['Description']))
def generateInfoInstances(regions):
""" Write snapshots to file """
print "Writing instances info to output file %s" % instances_data_output_file
with open(instances_data_output_file, 'w') as f3:
f3.write("INSTANCES\n")
f3.write("Name\tinstance ID\tKEEP-tag\tproduction\tinstance_type\tstate\tlaunched\tsecurity_groups\tregion\n\n")
for region in regions:
print "." # feedback for user
instances = getInstances(region)
for i in instances:
f3.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (get_name_tag(i), i.id, getKeepTag(i), isProduction(i), i.instance_type, i.state,
i.launch_time, getGroups(i), i.region.name))
def generateInfoImages(regions):
print "Writing images info to output file %s" % images_data_output_file
with open(images_data_output_file, 'w') as f4:
f4.write("IMAGES\n")
f4.write("AMI_name\talternative_name\timage_id\tKEEP-tag\tproduction?\tregion\tstate\tcreated\ttype\tassociated_snapshots\tdescription\n\n")
for r in regions:
print "." # feedback for user
images = getImagesD(r)
for im in images:
# format multiple snapshots better (only a handful, but it will mess up columns if comma-delimited
if len(im['snapshots']) == 1:
snaps = im['snapshots'][0]
else:
snaps = ""
for s in im['snapshots']:
snaps += s + " "
f4.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (im['name'], im['name_tag'], im['id'], im['KEEP'], im['PROD'], im['region'], im['state'],
im['created'], im['type'], snaps, im['description']))
# TODO: possibility? have these reports accessible from s3, public url, cronjob
def main():
regions = getRegions()
#################################################
# debugging goodies #
# reg = regions[3] # ireland #
# ims = getImages(reg)
# im = ims[0]
# vols = getVolumes(reg)
# vol = vols[0]
# ins = getInstances(reg)
# ins0 = ins[0]
# snaps = getSnapshots(reg)
# snap = snaps[0]
# ireland ims[19] has empty string for PROD-tag
# import pdb; pdb.set_trace()
# #
#################################################
# generateInfoVolumes(regions)
# generateInfoSnapshots(regions)
# generateInfoInstances(regions)
generateInfoImages(regions)
if __name__ == '__main__':
main() | "PROD": isProduction(s), | random_line_split |
usage.py | # Generate reports showing AWS snapshots, AMIs, volumes, and instances; and their KEEP-tags and if PROD-tagged
# Snapshots report shows the associated AMIs and the KEEP-tags thereof
# Volumes report shows the associated instances and the KEEP-tags thereof
# Code borrowed heavily from Niall's previous script: volume_cleanup.py
import os
import sys
import boto
from boto import ec2
# Name your output files
volumes_data_output_file = "volumes.tsv"
snapshots_data_output_file = "snapshots.tsv"
instances_data_output_file = "instances.tsv"
images_data_output_file = "images.tsv"
def getRegions():
regions = ec2.regions()
region_names = []
for region in regions:
region_names.append(region.name)
return region_names
def credentials():
return {"aws_access_key_id": os.environ['AWS_ACCESS_KEY'],
"aws_secret_access_key": os.environ['AWS_SECRET_KEY']}
def getInstances(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
instances = []
reservations = conn.get_all_reservations()
for reservation in reservations:
for instance in reservation.instances:
instances.append(instance)
except boto.exception.EC2ResponseError:
return []
return instances
def getVolumes(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
volumes = conn.get_all_volumes()
except boto.exception.EC2ResponseError:
return []
return volumes
# snapshots got this thing where there are public, private, and owned by me: defaults to all or public?
# we're interested in the ones owned by us, so select 'owner_id' = 794321122735
# can use owner='self' as a parameter to get_all_snapshots() too
def getSnapshots(region):
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
snapshots = conn.get_all_snapshots(owner='self')
except boto.exception.EC2ResponseError:
return []
return snapshots
def getImages(region):
"""Return images for one given region, owned by self"""
creds = credentials()
try:
conn = ec2.connect_to_region(region, **creds)
images = conn.get_all_images(owners=['self'])
except boto.exception.EC2ResponseError:
return []
return images
def getSnapshotsOf(image):
"""Return list of snapshot_ids associated with the given image"""
snapshotIds = []
deviceMapping = image.block_device_mapping # dict of devices
devices = deviceMapping.keys()
for d in devices:
snapshotId = deviceMapping[d].snapshot_id
if snapshotId is not None:
snapshotIds.append(snapshotId.encode())
return snapshotIds
def getImagesD(region):
"""Use dictionaries 'cos we'll have to cross-reference to get snapshots that go with the AMIs
returns list of dictionaries representing images from one region
"""
images = getImages(region)
imageDicts = []
for im in images:
imageDict = {"name": im.name,
"id": im.id,
"region": im.region.name,
"state": im.state,
"created": im.creationDate,
"type": im.type,
"KEEP": getKeepTag(im),
"name_tag": get_name_tag(im),
"snapshots": getSnapshotsOf(im),
"description": im.description,
"PROD": isProduction(im)
}
imageDicts.append(imageDict)
return imageDicts
def getSnapshotsD(region):
""" return a list of dictionaries representing snapshots from one region """
# Can a snapshot belong to more than one AMI? Dunno, keep list just in case (so it never breaks due to it)
snapshots = getSnapshots(region)
snapshotsDicts = []
ims = getImages(region)
for s in snapshots:
amis = getAmisOf(s, ims)
amiIds = []
amiKeeps = []
if len(amis) == 1:
amiIds = amis[0].id.encode()
amiKeeps = getKeepTag(amis[0])
elif len(amis) == 0:
amiIds = "-------no-AMI-found"
amiKeeps = "-------no-AMI-found"
else:
for a in amis:
amiIds.append(a.id.encode())
amiKeeps.append(getKeepTag(a))
snapshotsDict = {"id": s.id,
"status": s.status,
"region": s.region.name,
"progress": s.progress,
"start_time": s.start_time,
"volume_id": s.volume_id,
"volume_size": s.volume_size,
"KEEP-tag": getKeepTag(s),
"Name": get_name_tag(s),
"AMI(s)": amiIds,
"AMI_KEEP-tags": amiKeeps,
"PROD": isProduction(s),
"Description": s.description
}
snapshotsDicts.append(snapshotsDict)
return snapshotsDicts
def getVolumesD(region):
""" return a list of dictionaries representing volumes from one region """
volumes = getVolumes(region)
instances = getInstancesD(region)
volumesDicts = []
for v in volumesDicts:
volumesDict = {"id": v.id,
"KEEP-tag": getKeepTag(v),
"instance_KEEP-tag": getKeepTag(getInstanceOf(v)),
"instance": v.attach_data.instance_id,
"status": v.status,
"size": v.size,
"create-time": v.create_time,
"region": v.region.name,
"zone": v.zone,
"snapshot_id": v.snapshot_id,
"PROD": isProduction(v)
}
def getInstancesD(region):
""" return a list of dictionaries representing instances for one region, will help with volume-instance-KEEP-tag look-up. Maybe. """
instances = getInstances(region)
instancesDicts = {"id": i.id,
"KEEP-tag": getKeepTag(i),
"instance_type": i.instance_type,
"state": i.state,
"launch_time": i.launch_time,
"security_groups": getGroups(i),
"region": i.region.name,
"PROD": isProduction(i)
}
########## Seems to work ###################
def getAmisOf(snapshot, images):
"""retrieve list of AMIs that refer to a given snapshot"""
amis = []
for im in images:
snapshotsOfThisIm = getSnapshotsOf(im)
for soti in snapshotsOfThisIm:
if soti == snapshot.id:
amis.append(im)
return amis
def getKeepTag(obj):
"""If tag with key='KEEP' exists, return its value (can be an empty string), else it's '-------no-tag'"""
if 'KEEP' in obj.tags:
return obj.tags['KEEP']
else:
return "-------no-tag"
# try:
# tag = obj.tags['KEEP']
# except:
# # Note: some with empty KEEP-tags, through web console they look the same as those untagged
# return "-----"
# return tag
def isProduction(obj):
"""Returns true if the object (instance, volume, snapshot, AMI) has a tag with 'PROD' for key"""
return 'PROD' in obj.tags # This is deprecated? obj.tags.has_key('PROD')
def get_name_tag(obj):
"""Name is a tag that might not exist, but if it does, it's very helpful for users to identify their resources"""
if 'Name' in obj.tags:
return obj.tags['Name']
else:
return ""
def getGroups(instance):
if len(instance.groups) == 1:
# if there's only one group, then unpack it
return instance.groups[0].name
else: # in the not-expected case where there is more than one groups, deal with it
groupList = []
for g in instance.groups:
groupList.append(g.name)
return groupList
def getInstanceOf(volume):
""" Returns the actual instance
(if only instance_id is needed, can access directly from volume)
(if KEEP tag is needed, maybe it's better to grab it from a local dictionary list of instances)
"""
# ughhhhhhhh refactor later (shouldn't do this for every single volume, takes forever)
creds = credentials()
conn = ec2.connect_to_region(volume.region.name, **creds)
ins_id = volume.attach_data.instance_id
reservation = conn.get_all_instances(instance_ids=ins_id)[0]
return reservation.instances[0]
###############################################################################################################################
def | (regions):
""" Write volumes to file """
print "\nWriting volumes info to output file %s" % volumes_data_output_file
with open(volumes_data_output_file, 'w') as f1:
f1.write("VOLUMES\n")
f1.write(
"Name\tvolume_ID\tKEEP-tag_of_volume\tKEEP-tag_of_instance\tproduction?\tvolume_attachment_state\tassociated_instance\tinstance_state\tsize\tcreate_time\tregion\tzone\tassociated_snapshot\n\n")
for r in regions:
volumes = getVolumes(r)
print "." # give some feedback to the user
for v in volumes:
f1.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (get_name_tag(v), v.id, getKeepTag(v), getKeepTag(getInstanceOf(v)), isProduction(v), v.attachment_state(), v.attach_data.instance_id, v.status, v.size,
v.create_time, v.region.name, v.zone, v.snapshot_id))
def generateInfoSnapshots(regions):
""" Write snapshots to file """
print "Writing snapshots info to output file %s" % snapshots_data_output_file
snapshots = []
for r in regions:
snapshots += getSnapshotsD(r)
print "." # feedback for the user
with open(snapshots_data_output_file, 'w') as f2:
f2.write("SNAPSHOTS\n")
f2.write(
"Name\tsnapshot_id\tKEEP-tag_of_snapshot\tKEEP-tag_of_AMI\tproduction?\tassociated_AMI\tstart_time\tstatus"
"\tregion\tprogress\tassociated_volume\tvolume_size\tdescription\n\n")
for s in snapshots:
f2.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (s['Name'], s['id'], s['KEEP-tag'], s['AMI_KEEP-tags'], s['PROD'], s['AMI(s)'], s['start_time'],
s['status'], s['region'], s['progress'], s['volume_id'], s['volume_size'], s['Description']))
def generateInfoInstances(regions):
""" Write snapshots to file """
print "Writing instances info to output file %s" % instances_data_output_file
with open(instances_data_output_file, 'w') as f3:
f3.write("INSTANCES\n")
f3.write("Name\tinstance ID\tKEEP-tag\tproduction\tinstance_type\tstate\tlaunched\tsecurity_groups\tregion\n\n")
for region in regions:
print "." # feedback for user
instances = getInstances(region)
for i in instances:
f3.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (get_name_tag(i), i.id, getKeepTag(i), isProduction(i), i.instance_type, i.state,
i.launch_time, getGroups(i), i.region.name))
def generateInfoImages(regions):
print "Writing images info to output file %s" % images_data_output_file
with open(images_data_output_file, 'w') as f4:
f4.write("IMAGES\n")
f4.write("AMI_name\talternative_name\timage_id\tKEEP-tag\tproduction?\tregion\tstate\tcreated\ttype\tassociated_snapshots\tdescription\n\n")
for r in regions:
print "." # feedback for user
images = getImagesD(r)
for im in images:
# format multiple snapshots better (only a handful, but it will mess up columns if comma-delimited
if len(im['snapshots']) == 1:
snaps = im['snapshots'][0]
else:
snaps = ""
for s in im['snapshots']:
snaps += s + " "
f4.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n"
% (im['name'], im['name_tag'], im['id'], im['KEEP'], im['PROD'], im['region'], im['state'],
im['created'], im['type'], snaps, im['description']))
# TODO: possibility? have these reports accessible from s3, public url, cronjob
def main():
regions = getRegions()
#################################################
# debugging goodies #
# reg = regions[3] # ireland #
# ims = getImages(reg)
# im = ims[0]
# vols = getVolumes(reg)
# vol = vols[0]
# ins = getInstances(reg)
# ins0 = ins[0]
# snaps = getSnapshots(reg)
# snap = snaps[0]
# ireland ims[19] has empty string for PROD-tag
# import pdb; pdb.set_trace()
# #
#################################################
# generateInfoVolumes(regions)
# generateInfoSnapshots(regions)
# generateInfoInstances(regions)
generateInfoImages(regions)
if __name__ == '__main__':
main()
| generateInfoVolumes | identifier_name |
enum.go | // Copyright (c) 2017-2018 Alexander Eichhorn
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/QuickTime.html
// http://shadowofged.blogspot.ca/2008/06/itunes-content-ratings.html
package itunes
import (
"bytes"
"strconv"
)
type MediaType int
const (
MediaTypeHomeVideo MediaType = 0 // 0 = Movie (deprecated, use 9 instead)
MediaTypeMusic MediaType = 1 // 1 = Normal (Music)
MediaTypeAudiobook MediaType = 2 // 2 = Audiobook
MediaTypeBookmark MediaType = 5 // 5 = Whacked Bookmark
MediaTypeMusicVideo MediaType = 6 // 6 = Music Video
MediaTypeMovie MediaType = 9 // 9 = Short Film / Movie
MediaTypeTVShow MediaType = 10 // 10 = TV Show
MediaTypeBooklet MediaType = 11 // 11 = Booklet
MediaTypeRingtone MediaType = 14 // 14 = Ringtone
MediaTypePodcast MediaType = 21 // 21 = Podcast
)
func (x MediaType) String() string {
switch x {
case MediaTypeHomeVideo:
return "Home Video"
case MediaTypeMusic:
return "Music"
case MediaTypeAudiobook:
return "Audiobook"
case MediaTypeBookmark:
return "Whacked Bookmark"
case MediaTypeMusicVideo:
return "Music Video"
case MediaTypeMovie:
return "Movie"
case MediaTypeTVShow:
return "TV Show"
case MediaTypeBooklet:
return "Booklet"
case MediaTypeRingtone:
return "Ringtone"
case MediaTypePodcast:
return "Podcast"
default:
buf := bytes.Buffer{}
buf.WriteByte('(')
buf.WriteString(strconv.FormatInt(int64(x), 10))
buf.WriteByte(')')
return buf.String()
}
}
type RatingCode int
const (
RatingCodeNone RatingCode = 0 // 0 = None
RatingCodeExplicit RatingCode = 1 // 1 = Explicit
RatingCodeClean RatingCode = 2 // 2 = Clean
RatingCodeExplicitOld RatingCode = 4 // 4 = Explicit (old)
)
type PlayGapMode int
const (
PlayGapInsertGap PlayGapMode = 0 // Insert Gap
PlayGapNoGap PlayGapMode = 1 // No Gap
)
type AppleStoreAccountType int
const (
AppleStoreAccountTypeITunes AppleStoreAccountType = 0
AppleStoreAccountTypeAOL AppleStoreAccountType = 1
)
type LocationRole int
const (
LocationRoleShooting LocationRole = 0
LocationRoleReal LocationRole = 1
LocationRoleFictional LocationRole = 2
)
type AppleStoreCountry int
const (
AppleStoreUSA AppleStoreCountry = 143441 // United States
AppleStoreFRA AppleStoreCountry = 143442 // France
AppleStoreDEU AppleStoreCountry = 143443 // Germany
AppleStoreGBR AppleStoreCountry = 143444 // United Kingdom
AppleStoreAUT AppleStoreCountry = 143445 // Austria
AppleStoreBEL AppleStoreCountry = 143446 // Belgium
AppleStoreFIN AppleStoreCountry = 143447 // Finland
AppleStoreGRC AppleStoreCountry = 143448 // Greece
AppleStoreIRL AppleStoreCountry = 143449 // Ireland
AppleStoreITA AppleStoreCountry = 143450 // Italy
AppleStoreLUX AppleStoreCountry = 143451 // Luxembourg
AppleStoreNLD AppleStoreCountry = 143452 // Netherlands
AppleStorePRT AppleStoreCountry = 143453 // Portugal
AppleStoreESP AppleStoreCountry = 143454 // Spain
AppleStoreCAN AppleStoreCountry = 143455 // Canada
AppleStoreSWE AppleStoreCountry = 143456 // Sweden
AppleStoreNOR AppleStoreCountry = 143457 // Norway
AppleStoreDNK AppleStoreCountry = 143458 // Denmark
AppleStoreCHE AppleStoreCountry = 143459 // Switzerland
AppleStoreAUS AppleStoreCountry = 143460 // Australia
AppleStoreNZL AppleStoreCountry = 143461 // New Zealand
AppleStoreJPN AppleStoreCountry = 143462 // Japan
AppleStoreHKG AppleStoreCountry = 143463 // Hong Kong
AppleStoreSGP AppleStoreCountry = 143464 // Singapore
AppleStoreCHN AppleStoreCountry = 143465 // China
AppleStoreKOR AppleStoreCountry = 143466 // Republic of Korea
AppleStoreIND AppleStoreCountry = 143467 // India
AppleStoreMEX AppleStoreCountry = 143468 // Mexico
AppleStoreRUS AppleStoreCountry = 143469 // Russia
AppleStoreTWN AppleStoreCountry = 143470 // Taiwan
AppleStoreVNM AppleStoreCountry = 143471 // Vietnam
AppleStoreZAF AppleStoreCountry = 143472 // South Africa
AppleStoreMYS AppleStoreCountry = 143473 // Malaysia
AppleStorePHL AppleStoreCountry = 143474 // Philippines
AppleStoreTHA AppleStoreCountry = 143475 // Thailand
AppleStoreIDN AppleStoreCountry = 143476 // Indonesia
AppleStorePAK AppleStoreCountry = 143477 // Pakistan
AppleStorePOL AppleStoreCountry = 143478 // Poland
AppleStoreSAU AppleStoreCountry = 143479 // Saudi Arabia
AppleStoreTUR AppleStoreCountry = 143480 // Turkey
AppleStoreARE AppleStoreCountry = 143481 // United Arab Emirates
AppleStoreHUN AppleStoreCountry = 143482 // Hungary
AppleStoreCHL AppleStoreCountry = 143483 // Chile
AppleStoreNPL AppleStoreCountry = 143484 // Nepal
AppleStorePAN AppleStoreCountry = 143485 // Panama
AppleStoreLKA AppleStoreCountry = 143486 // Sri Lanka
AppleStoreROU AppleStoreCountry = 143487 // Romania
AppleStoreCZE AppleStoreCountry = 143489 // Czech Republic
AppleStoreISR AppleStoreCountry = 143491 // Israel
AppleStoreUKR AppleStoreCountry = 143492 // Ukraine
AppleStoreKWT AppleStoreCountry = 143493 // Kuwait
AppleStoreHRV AppleStoreCountry = 143494 // Croatia
AppleStoreCRI AppleStoreCountry = 143495 // Costa Rica
AppleStoreSVK AppleStoreCountry = 143496 // Slovakia
AppleStoreLBN AppleStoreCountry = 143497 // Lebanon
AppleStoreQAT AppleStoreCountry = 143498 // Qatar
AppleStoreSVN AppleStoreCountry = 143499 // Slovenia
AppleStoreCOL AppleStoreCountry = 143501 // Colombia
AppleStoreVEN AppleStoreCountry = 143502 // Venezuela
AppleStoreBRA AppleStoreCountry = 143503 // Brazil
AppleStoreGTM AppleStoreCountry = 143504 // Guatemala
AppleStoreARG AppleStoreCountry = 143505 // Argentina
AppleStoreSLV AppleStoreCountry = 143506 // El Salvador
AppleStorePER AppleStoreCountry = 143507 // Peru
AppleStoreDOM AppleStoreCountry = 143508 // Dominican Republic
AppleStoreECU AppleStoreCountry = 143509 // Ecuador
AppleStoreHND AppleStoreCountry = 143510 // Honduras
AppleStoreJAM AppleStoreCountry = 143511 // Jamaica
AppleStoreNIC AppleStoreCountry = 143512 // Nicaragua
AppleStorePRY AppleStoreCountry = 143513 // Paraguay
AppleStoreURY AppleStoreCountry = 143514 // Uruguay
AppleStoreMAC AppleStoreCountry = 143515 // Macau
AppleStoreEGY AppleStoreCountry = 143516 // Egypt
AppleStoreKAZ AppleStoreCountry = 143517 // Kazakhstan
AppleStoreEST AppleStoreCountry = 143518 // Estonia
AppleStoreLVA AppleStoreCountry = 143519 // Latvia
AppleStoreLTU AppleStoreCountry = 143520 // Lithuania
AppleStoreMLT AppleStoreCountry = 143521 // Malta
AppleStoreMDA AppleStoreCountry = 143523 // Moldova
AppleStoreARM AppleStoreCountry = 143524 // Armenia
AppleStoreBWA AppleStoreCountry = 143525 // Botswana
AppleStoreBGR AppleStoreCountry = 143526 // Bulgaria
AppleStoreJOR AppleStoreCountry = 143528 // Jordan
AppleStoreKEN AppleStoreCountry = 143529 // Kenya
AppleStoreMKD AppleStoreCountry = 143530 // Macedonia
AppleStoreMDG AppleStoreCountry = 143531 // Madagascar
AppleStoreMLI AppleStoreCountry = 143532 // Mali
AppleStoreMUS AppleStoreCountry = 143533 // Mauritius
AppleStoreNER AppleStoreCountry = 143534 // Niger
AppleStoreSEN AppleStoreCountry = 143535 // Senegal
AppleStoreTUN AppleStoreCountry = 143536 // Tunisia
AppleStoreUGA AppleStoreCountry = 143537 // Uganda
AppleStoreAIA AppleStoreCountry = 143538 // Anguilla
AppleStoreBHS AppleStoreCountry = 143539 // Bahamas
AppleStoreATG AppleStoreCountry = 143540 // Antigua and Barbuda
AppleStoreBRB AppleStoreCountry = 143541 // Barbados
AppleStoreBMU AppleStoreCountry = 143542 // Bermuda
AppleStoreVGB AppleStoreCountry = 143543 // British Virgin Islands
AppleStoreCYM AppleStoreCountry = 143544 // Cayman Islands | AppleStoreDMA AppleStoreCountry = 143545 // Dominica
AppleStoreGRD AppleStoreCountry = 143546 // Grenada
AppleStoreMSR AppleStoreCountry = 143547 // Montserrat
AppleStoreKNA AppleStoreCountry = 143548 // St. Kitts and Nevis
AppleStoreLCA AppleStoreCountry = 143549 // St. Lucia
AppleStoreVCT AppleStoreCountry = 143550 // St. Vincent and The Grenadines
AppleStoreTTO AppleStoreCountry = 143551 // Trinidad and Tobago
AppleStoreTCA AppleStoreCountry = 143552 // Turks and Caicos
AppleStoreGUY AppleStoreCountry = 143553 // Guyana
AppleStoreSUR AppleStoreCountry = 143554 // Suriname
AppleStoreBLZ AppleStoreCountry = 143555 // Belize
AppleStoreBOL AppleStoreCountry = 143556 // Bolivia
AppleStoreCYP AppleStoreCountry = 143557 // Cyprus
AppleStoreISL AppleStoreCountry = 143558 // Iceland
AppleStoreBHR AppleStoreCountry = 143559 // Bahrain
AppleStoreBRN AppleStoreCountry = 143560 // Brunei Darussalam
AppleStoreNGA AppleStoreCountry = 143561 // Nigeria
AppleStoreOMN AppleStoreCountry = 143562 // Oman
AppleStoreDZA AppleStoreCountry = 143563 // Algeria
AppleStoreAGO AppleStoreCountry = 143564 // Angola
AppleStoreBLR AppleStoreCountry = 143565 // Belarus
AppleStoreUZB AppleStoreCountry = 143566 // Uzbekistan
AppleStoreAZE AppleStoreCountry = 143568 // Azerbaijan
AppleStoreYEM AppleStoreCountry = 143571 // Yemen
AppleStoreTZA AppleStoreCountry = 143572 // Tanzania
AppleStoreGHA AppleStoreCountry = 143573 // Ghana
AppleStoreALB AppleStoreCountry = 143575 // Albania
AppleStoreBEN AppleStoreCountry = 143576 // Benin
AppleStoreBTN AppleStoreCountry = 143577 // Bhutan
AppleStoreBFA AppleStoreCountry = 143578 // Burkina Faso
AppleStoreKHM AppleStoreCountry = 143579 // Cambodia
AppleStoreCPV AppleStoreCountry = 143580 // Cape Verde
AppleStoreTCD AppleStoreCountry = 143581 // Chad
AppleStoreCOG AppleStoreCountry = 143582 // Republic of the Congo
AppleStoreFJI AppleStoreCountry = 143583 // Fiji
AppleStoreGMB AppleStoreCountry = 143584 // Gambia
AppleStoreGNB AppleStoreCountry = 143585 // Guinea-Bissau
AppleStoreKGZ AppleStoreCountry = 143586 // Kyrgyzstan
AppleStoreLAO AppleStoreCountry = 143587 // Lao People's Democratic Republic
AppleStoreLBR AppleStoreCountry = 143588 // Liberia
AppleStoreMWI AppleStoreCountry = 143589 // Malawi
AppleStoreMRT AppleStoreCountry = 143590 // Mauritania
AppleStoreFSM AppleStoreCountry = 143591 // Federated States of Micronesia
AppleStoreMNG AppleStoreCountry = 143592 // Mongolia
AppleStoreMOZ AppleStoreCountry = 143593 // Mozambique
AppleStoreNAM AppleStoreCountry = 143594 // Namibia
AppleStorePLW AppleStoreCountry = 143595 // Palau
AppleStorePNG AppleStoreCountry = 143597 // Papua New Guinea
AppleStoreSTP AppleStoreCountry = 143598 // Sao Tome and Principe
AppleStoreSYC AppleStoreCountry = 143599 // Seychelles
AppleStoreSLE AppleStoreCountry = 143600 // Sierra Leone
AppleStoreSLB AppleStoreCountry = 143601 // Solomon Islands
AppleStoreSWZ AppleStoreCountry = 143602 // Swaziland
AppleStoreTJK AppleStoreCountry = 143603 // Tajikistan
AppleStoreTKM AppleStoreCountry = 143604 // Turkmenistan'AppleStore
AppleStoreZWE AppleStoreCountry = 143605 // Zimbabwe
)
// iTunes Genre category, genre and subgenre
// https://affiliate.itunes.apple.com/resources/documentation/genre-mapping/
// https://itunes.apple.com/WebObjects/MZStoreServices.woa/ws/genres
type GenreID int
// ID3v1 Genre id
type GenreCode byte | random_line_split | |
enum.go | // Copyright (c) 2017-2018 Alexander Eichhorn
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/QuickTime.html
// http://shadowofged.blogspot.ca/2008/06/itunes-content-ratings.html
package itunes
import (
"bytes"
"strconv"
)
type MediaType int
const (
MediaTypeHomeVideo MediaType = 0 // 0 = Movie (deprecated, use 9 instead)
MediaTypeMusic MediaType = 1 // 1 = Normal (Music)
MediaTypeAudiobook MediaType = 2 // 2 = Audiobook
MediaTypeBookmark MediaType = 5 // 5 = Whacked Bookmark
MediaTypeMusicVideo MediaType = 6 // 6 = Music Video
MediaTypeMovie MediaType = 9 // 9 = Short Film / Movie
MediaTypeTVShow MediaType = 10 // 10 = TV Show
MediaTypeBooklet MediaType = 11 // 11 = Booklet
MediaTypeRingtone MediaType = 14 // 14 = Ringtone
MediaTypePodcast MediaType = 21 // 21 = Podcast
)
func (x MediaType) String() string |
type RatingCode int
const (
RatingCodeNone RatingCode = 0 // 0 = None
RatingCodeExplicit RatingCode = 1 // 1 = Explicit
RatingCodeClean RatingCode = 2 // 2 = Clean
RatingCodeExplicitOld RatingCode = 4 // 4 = Explicit (old)
)
type PlayGapMode int
const (
PlayGapInsertGap PlayGapMode = 0 // Insert Gap
PlayGapNoGap PlayGapMode = 1 // No Gap
)
type AppleStoreAccountType int
const (
AppleStoreAccountTypeITunes AppleStoreAccountType = 0
AppleStoreAccountTypeAOL AppleStoreAccountType = 1
)
type LocationRole int
const (
LocationRoleShooting LocationRole = 0
LocationRoleReal LocationRole = 1
LocationRoleFictional LocationRole = 2
)
type AppleStoreCountry int
const (
AppleStoreUSA AppleStoreCountry = 143441 // United States
AppleStoreFRA AppleStoreCountry = 143442 // France
AppleStoreDEU AppleStoreCountry = 143443 // Germany
AppleStoreGBR AppleStoreCountry = 143444 // United Kingdom
AppleStoreAUT AppleStoreCountry = 143445 // Austria
AppleStoreBEL AppleStoreCountry = 143446 // Belgium
AppleStoreFIN AppleStoreCountry = 143447 // Finland
AppleStoreGRC AppleStoreCountry = 143448 // Greece
AppleStoreIRL AppleStoreCountry = 143449 // Ireland
AppleStoreITA AppleStoreCountry = 143450 // Italy
AppleStoreLUX AppleStoreCountry = 143451 // Luxembourg
AppleStoreNLD AppleStoreCountry = 143452 // Netherlands
AppleStorePRT AppleStoreCountry = 143453 // Portugal
AppleStoreESP AppleStoreCountry = 143454 // Spain
AppleStoreCAN AppleStoreCountry = 143455 // Canada
AppleStoreSWE AppleStoreCountry = 143456 // Sweden
AppleStoreNOR AppleStoreCountry = 143457 // Norway
AppleStoreDNK AppleStoreCountry = 143458 // Denmark
AppleStoreCHE AppleStoreCountry = 143459 // Switzerland
AppleStoreAUS AppleStoreCountry = 143460 // Australia
AppleStoreNZL AppleStoreCountry = 143461 // New Zealand
AppleStoreJPN AppleStoreCountry = 143462 // Japan
AppleStoreHKG AppleStoreCountry = 143463 // Hong Kong
AppleStoreSGP AppleStoreCountry = 143464 // Singapore
AppleStoreCHN AppleStoreCountry = 143465 // China
AppleStoreKOR AppleStoreCountry = 143466 // Republic of Korea
AppleStoreIND AppleStoreCountry = 143467 // India
AppleStoreMEX AppleStoreCountry = 143468 // Mexico
AppleStoreRUS AppleStoreCountry = 143469 // Russia
AppleStoreTWN AppleStoreCountry = 143470 // Taiwan
AppleStoreVNM AppleStoreCountry = 143471 // Vietnam
AppleStoreZAF AppleStoreCountry = 143472 // South Africa
AppleStoreMYS AppleStoreCountry = 143473 // Malaysia
AppleStorePHL AppleStoreCountry = 143474 // Philippines
AppleStoreTHA AppleStoreCountry = 143475 // Thailand
AppleStoreIDN AppleStoreCountry = 143476 // Indonesia
AppleStorePAK AppleStoreCountry = 143477 // Pakistan
AppleStorePOL AppleStoreCountry = 143478 // Poland
AppleStoreSAU AppleStoreCountry = 143479 // Saudi Arabia
AppleStoreTUR AppleStoreCountry = 143480 // Turkey
AppleStoreARE AppleStoreCountry = 143481 // United Arab Emirates
AppleStoreHUN AppleStoreCountry = 143482 // Hungary
AppleStoreCHL AppleStoreCountry = 143483 // Chile
AppleStoreNPL AppleStoreCountry = 143484 // Nepal
AppleStorePAN AppleStoreCountry = 143485 // Panama
AppleStoreLKA AppleStoreCountry = 143486 // Sri Lanka
AppleStoreROU AppleStoreCountry = 143487 // Romania
AppleStoreCZE AppleStoreCountry = 143489 // Czech Republic
AppleStoreISR AppleStoreCountry = 143491 // Israel
AppleStoreUKR AppleStoreCountry = 143492 // Ukraine
AppleStoreKWT AppleStoreCountry = 143493 // Kuwait
AppleStoreHRV AppleStoreCountry = 143494 // Croatia
AppleStoreCRI AppleStoreCountry = 143495 // Costa Rica
AppleStoreSVK AppleStoreCountry = 143496 // Slovakia
AppleStoreLBN AppleStoreCountry = 143497 // Lebanon
AppleStoreQAT AppleStoreCountry = 143498 // Qatar
AppleStoreSVN AppleStoreCountry = 143499 // Slovenia
AppleStoreCOL AppleStoreCountry = 143501 // Colombia
AppleStoreVEN AppleStoreCountry = 143502 // Venezuela
AppleStoreBRA AppleStoreCountry = 143503 // Brazil
AppleStoreGTM AppleStoreCountry = 143504 // Guatemala
AppleStoreARG AppleStoreCountry = 143505 // Argentina
AppleStoreSLV AppleStoreCountry = 143506 // El Salvador
AppleStorePER AppleStoreCountry = 143507 // Peru
AppleStoreDOM AppleStoreCountry = 143508 // Dominican Republic
AppleStoreECU AppleStoreCountry = 143509 // Ecuador
AppleStoreHND AppleStoreCountry = 143510 // Honduras
AppleStoreJAM AppleStoreCountry = 143511 // Jamaica
AppleStoreNIC AppleStoreCountry = 143512 // Nicaragua
AppleStorePRY AppleStoreCountry = 143513 // Paraguay
AppleStoreURY AppleStoreCountry = 143514 // Uruguay
AppleStoreMAC AppleStoreCountry = 143515 // Macau
AppleStoreEGY AppleStoreCountry = 143516 // Egypt
AppleStoreKAZ AppleStoreCountry = 143517 // Kazakhstan
AppleStoreEST AppleStoreCountry = 143518 // Estonia
AppleStoreLVA AppleStoreCountry = 143519 // Latvia
AppleStoreLTU AppleStoreCountry = 143520 // Lithuania
AppleStoreMLT AppleStoreCountry = 143521 // Malta
AppleStoreMDA AppleStoreCountry = 143523 // Moldova
AppleStoreARM AppleStoreCountry = 143524 // Armenia
AppleStoreBWA AppleStoreCountry = 143525 // Botswana
AppleStoreBGR AppleStoreCountry = 143526 // Bulgaria
AppleStoreJOR AppleStoreCountry = 143528 // Jordan
AppleStoreKEN AppleStoreCountry = 143529 // Kenya
AppleStoreMKD AppleStoreCountry = 143530 // Macedonia
AppleStoreMDG AppleStoreCountry = 143531 // Madagascar
AppleStoreMLI AppleStoreCountry = 143532 // Mali
AppleStoreMUS AppleStoreCountry = 143533 // Mauritius
AppleStoreNER AppleStoreCountry = 143534 // Niger
AppleStoreSEN AppleStoreCountry = 143535 // Senegal
AppleStoreTUN AppleStoreCountry = 143536 // Tunisia
AppleStoreUGA AppleStoreCountry = 143537 // Uganda
AppleStoreAIA AppleStoreCountry = 143538 // Anguilla
AppleStoreBHS AppleStoreCountry = 143539 // Bahamas
AppleStoreATG AppleStoreCountry = 143540 // Antigua and Barbuda
AppleStoreBRB AppleStoreCountry = 143541 // Barbados
AppleStoreBMU AppleStoreCountry = 143542 // Bermuda
AppleStoreVGB AppleStoreCountry = 143543 // British Virgin Islands
AppleStoreCYM AppleStoreCountry = 143544 // Cayman Islands
AppleStoreDMA AppleStoreCountry = 143545 // Dominica
AppleStoreGRD AppleStoreCountry = 143546 // Grenada
AppleStoreMSR AppleStoreCountry = 143547 // Montserrat
AppleStoreKNA AppleStoreCountry = 143548 // St. Kitts and Nevis
AppleStoreLCA AppleStoreCountry = 143549 // St. Lucia
AppleStoreVCT AppleStoreCountry = 143550 // St. Vincent and The Grenadines
AppleStoreTTO AppleStoreCountry = 143551 // Trinidad and Tobago
AppleStoreTCA AppleStoreCountry = 143552 // Turks and Caicos
AppleStoreGUY AppleStoreCountry = 143553 // Guyana
AppleStoreSUR AppleStoreCountry = 143554 // Suriname
AppleStoreBLZ AppleStoreCountry = 143555 // Belize
AppleStoreBOL AppleStoreCountry = 143556 // Bolivia
AppleStoreCYP AppleStoreCountry = 143557 // Cyprus
AppleStoreISL AppleStoreCountry = 143558 // Iceland
AppleStoreBHR AppleStoreCountry = 143559 // Bahrain
AppleStoreBRN AppleStoreCountry = 143560 // Brunei Darussalam
AppleStoreNGA AppleStoreCountry = 143561 // Nigeria
AppleStoreOMN AppleStoreCountry = 143562 // Oman
AppleStoreDZA AppleStoreCountry = 143563 // Algeria
AppleStoreAGO AppleStoreCountry = 143564 // Angola
AppleStoreBLR AppleStoreCountry = 143565 // Belarus
AppleStoreUZB AppleStoreCountry = 143566 // Uzbekistan
AppleStoreAZE AppleStoreCountry = 143568 // Azerbaijan
AppleStoreYEM AppleStoreCountry = 143571 // Yemen
AppleStoreTZA AppleStoreCountry = 143572 // Tanzania
AppleStoreGHA AppleStoreCountry = 143573 // Ghana
AppleStoreALB AppleStoreCountry = 143575 // Albania
AppleStoreBEN AppleStoreCountry = 143576 // Benin
AppleStoreBTN AppleStoreCountry = 143577 // Bhutan
AppleStoreBFA AppleStoreCountry = 143578 // Burkina Faso
AppleStoreKHM AppleStoreCountry = 143579 // Cambodia
AppleStoreCPV AppleStoreCountry = 143580 // Cape Verde
AppleStoreTCD AppleStoreCountry = 143581 // Chad
AppleStoreCOG AppleStoreCountry = 143582 // Republic of the Congo
AppleStoreFJI AppleStoreCountry = 143583 // Fiji
AppleStoreGMB AppleStoreCountry = 143584 // Gambia
AppleStoreGNB AppleStoreCountry = 143585 // Guinea-Bissau
AppleStoreKGZ AppleStoreCountry = 143586 // Kyrgyzstan
AppleStoreLAO AppleStoreCountry = 143587 // Lao People's Democratic Republic
AppleStoreLBR AppleStoreCountry = 143588 // Liberia
AppleStoreMWI AppleStoreCountry = 143589 // Malawi
AppleStoreMRT AppleStoreCountry = 143590 // Mauritania
AppleStoreFSM AppleStoreCountry = 143591 // Federated States of Micronesia
AppleStoreMNG AppleStoreCountry = 143592 // Mongolia
AppleStoreMOZ AppleStoreCountry = 143593 // Mozambique
AppleStoreNAM AppleStoreCountry = 143594 // Namibia
AppleStorePLW AppleStoreCountry = 143595 // Palau
AppleStorePNG AppleStoreCountry = 143597 // Papua New Guinea
AppleStoreSTP AppleStoreCountry = 143598 // Sao Tome and Principe
AppleStoreSYC AppleStoreCountry = 143599 // Seychelles
AppleStoreSLE AppleStoreCountry = 143600 // Sierra Leone
AppleStoreSLB AppleStoreCountry = 143601 // Solomon Islands
AppleStoreSWZ AppleStoreCountry = 143602 // Swaziland
AppleStoreTJK AppleStoreCountry = 143603 // Tajikistan
AppleStoreTKM AppleStoreCountry = 143604 // Turkmenistan'AppleStore
AppleStoreZWE AppleStoreCountry = 143605 // Zimbabwe
)
// iTunes Genre category, genre and subgenre
// https://affiliate.itunes.apple.com/resources/documentation/genre-mapping/
// https://itunes.apple.com/WebObjects/MZStoreServices.woa/ws/genres
type GenreID int
// ID3v1 Genre id
type GenreCode byte
| {
switch x {
case MediaTypeHomeVideo:
return "Home Video"
case MediaTypeMusic:
return "Music"
case MediaTypeAudiobook:
return "Audiobook"
case MediaTypeBookmark:
return "Whacked Bookmark"
case MediaTypeMusicVideo:
return "Music Video"
case MediaTypeMovie:
return "Movie"
case MediaTypeTVShow:
return "TV Show"
case MediaTypeBooklet:
return "Booklet"
case MediaTypeRingtone:
return "Ringtone"
case MediaTypePodcast:
return "Podcast"
default:
buf := bytes.Buffer{}
buf.WriteByte('(')
buf.WriteString(strconv.FormatInt(int64(x), 10))
buf.WriteByte(')')
return buf.String()
}
} | identifier_body |
enum.go | // Copyright (c) 2017-2018 Alexander Eichhorn
//
// Licensed under the Apache License, Version 2.0 (the "License"): you may
// not use this file except in compliance with the License. You may obtain
// a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
// WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
// License for the specific language governing permissions and limitations
// under the License.
// http://www.sno.phy.queensu.ca/~phil/exiftool/TagNames/QuickTime.html
// http://shadowofged.blogspot.ca/2008/06/itunes-content-ratings.html
package itunes
import (
"bytes"
"strconv"
)
type MediaType int
const (
MediaTypeHomeVideo MediaType = 0 // 0 = Movie (deprecated, use 9 instead)
MediaTypeMusic MediaType = 1 // 1 = Normal (Music)
MediaTypeAudiobook MediaType = 2 // 2 = Audiobook
MediaTypeBookmark MediaType = 5 // 5 = Whacked Bookmark
MediaTypeMusicVideo MediaType = 6 // 6 = Music Video
MediaTypeMovie MediaType = 9 // 9 = Short Film / Movie
MediaTypeTVShow MediaType = 10 // 10 = TV Show
MediaTypeBooklet MediaType = 11 // 11 = Booklet
MediaTypeRingtone MediaType = 14 // 14 = Ringtone
MediaTypePodcast MediaType = 21 // 21 = Podcast
)
func (x MediaType) | () string {
switch x {
case MediaTypeHomeVideo:
return "Home Video"
case MediaTypeMusic:
return "Music"
case MediaTypeAudiobook:
return "Audiobook"
case MediaTypeBookmark:
return "Whacked Bookmark"
case MediaTypeMusicVideo:
return "Music Video"
case MediaTypeMovie:
return "Movie"
case MediaTypeTVShow:
return "TV Show"
case MediaTypeBooklet:
return "Booklet"
case MediaTypeRingtone:
return "Ringtone"
case MediaTypePodcast:
return "Podcast"
default:
buf := bytes.Buffer{}
buf.WriteByte('(')
buf.WriteString(strconv.FormatInt(int64(x), 10))
buf.WriteByte(')')
return buf.String()
}
}
type RatingCode int
const (
RatingCodeNone RatingCode = 0 // 0 = None
RatingCodeExplicit RatingCode = 1 // 1 = Explicit
RatingCodeClean RatingCode = 2 // 2 = Clean
RatingCodeExplicitOld RatingCode = 4 // 4 = Explicit (old)
)
type PlayGapMode int
const (
PlayGapInsertGap PlayGapMode = 0 // Insert Gap
PlayGapNoGap PlayGapMode = 1 // No Gap
)
type AppleStoreAccountType int
const (
AppleStoreAccountTypeITunes AppleStoreAccountType = 0
AppleStoreAccountTypeAOL AppleStoreAccountType = 1
)
type LocationRole int
const (
LocationRoleShooting LocationRole = 0
LocationRoleReal LocationRole = 1
LocationRoleFictional LocationRole = 2
)
type AppleStoreCountry int
const (
AppleStoreUSA AppleStoreCountry = 143441 // United States
AppleStoreFRA AppleStoreCountry = 143442 // France
AppleStoreDEU AppleStoreCountry = 143443 // Germany
AppleStoreGBR AppleStoreCountry = 143444 // United Kingdom
AppleStoreAUT AppleStoreCountry = 143445 // Austria
AppleStoreBEL AppleStoreCountry = 143446 // Belgium
AppleStoreFIN AppleStoreCountry = 143447 // Finland
AppleStoreGRC AppleStoreCountry = 143448 // Greece
AppleStoreIRL AppleStoreCountry = 143449 // Ireland
AppleStoreITA AppleStoreCountry = 143450 // Italy
AppleStoreLUX AppleStoreCountry = 143451 // Luxembourg
AppleStoreNLD AppleStoreCountry = 143452 // Netherlands
AppleStorePRT AppleStoreCountry = 143453 // Portugal
AppleStoreESP AppleStoreCountry = 143454 // Spain
AppleStoreCAN AppleStoreCountry = 143455 // Canada
AppleStoreSWE AppleStoreCountry = 143456 // Sweden
AppleStoreNOR AppleStoreCountry = 143457 // Norway
AppleStoreDNK AppleStoreCountry = 143458 // Denmark
AppleStoreCHE AppleStoreCountry = 143459 // Switzerland
AppleStoreAUS AppleStoreCountry = 143460 // Australia
AppleStoreNZL AppleStoreCountry = 143461 // New Zealand
AppleStoreJPN AppleStoreCountry = 143462 // Japan
AppleStoreHKG AppleStoreCountry = 143463 // Hong Kong
AppleStoreSGP AppleStoreCountry = 143464 // Singapore
AppleStoreCHN AppleStoreCountry = 143465 // China
AppleStoreKOR AppleStoreCountry = 143466 // Republic of Korea
AppleStoreIND AppleStoreCountry = 143467 // India
AppleStoreMEX AppleStoreCountry = 143468 // Mexico
AppleStoreRUS AppleStoreCountry = 143469 // Russia
AppleStoreTWN AppleStoreCountry = 143470 // Taiwan
AppleStoreVNM AppleStoreCountry = 143471 // Vietnam
AppleStoreZAF AppleStoreCountry = 143472 // South Africa
AppleStoreMYS AppleStoreCountry = 143473 // Malaysia
AppleStorePHL AppleStoreCountry = 143474 // Philippines
AppleStoreTHA AppleStoreCountry = 143475 // Thailand
AppleStoreIDN AppleStoreCountry = 143476 // Indonesia
AppleStorePAK AppleStoreCountry = 143477 // Pakistan
AppleStorePOL AppleStoreCountry = 143478 // Poland
AppleStoreSAU AppleStoreCountry = 143479 // Saudi Arabia
AppleStoreTUR AppleStoreCountry = 143480 // Turkey
AppleStoreARE AppleStoreCountry = 143481 // United Arab Emirates
AppleStoreHUN AppleStoreCountry = 143482 // Hungary
AppleStoreCHL AppleStoreCountry = 143483 // Chile
AppleStoreNPL AppleStoreCountry = 143484 // Nepal
AppleStorePAN AppleStoreCountry = 143485 // Panama
AppleStoreLKA AppleStoreCountry = 143486 // Sri Lanka
AppleStoreROU AppleStoreCountry = 143487 // Romania
AppleStoreCZE AppleStoreCountry = 143489 // Czech Republic
AppleStoreISR AppleStoreCountry = 143491 // Israel
AppleStoreUKR AppleStoreCountry = 143492 // Ukraine
AppleStoreKWT AppleStoreCountry = 143493 // Kuwait
AppleStoreHRV AppleStoreCountry = 143494 // Croatia
AppleStoreCRI AppleStoreCountry = 143495 // Costa Rica
AppleStoreSVK AppleStoreCountry = 143496 // Slovakia
AppleStoreLBN AppleStoreCountry = 143497 // Lebanon
AppleStoreQAT AppleStoreCountry = 143498 // Qatar
AppleStoreSVN AppleStoreCountry = 143499 // Slovenia
AppleStoreCOL AppleStoreCountry = 143501 // Colombia
AppleStoreVEN AppleStoreCountry = 143502 // Venezuela
AppleStoreBRA AppleStoreCountry = 143503 // Brazil
AppleStoreGTM AppleStoreCountry = 143504 // Guatemala
AppleStoreARG AppleStoreCountry = 143505 // Argentina
AppleStoreSLV AppleStoreCountry = 143506 // El Salvador
AppleStorePER AppleStoreCountry = 143507 // Peru
AppleStoreDOM AppleStoreCountry = 143508 // Dominican Republic
AppleStoreECU AppleStoreCountry = 143509 // Ecuador
AppleStoreHND AppleStoreCountry = 143510 // Honduras
AppleStoreJAM AppleStoreCountry = 143511 // Jamaica
AppleStoreNIC AppleStoreCountry = 143512 // Nicaragua
AppleStorePRY AppleStoreCountry = 143513 // Paraguay
AppleStoreURY AppleStoreCountry = 143514 // Uruguay
AppleStoreMAC AppleStoreCountry = 143515 // Macau
AppleStoreEGY AppleStoreCountry = 143516 // Egypt
AppleStoreKAZ AppleStoreCountry = 143517 // Kazakhstan
AppleStoreEST AppleStoreCountry = 143518 // Estonia
AppleStoreLVA AppleStoreCountry = 143519 // Latvia
AppleStoreLTU AppleStoreCountry = 143520 // Lithuania
AppleStoreMLT AppleStoreCountry = 143521 // Malta
AppleStoreMDA AppleStoreCountry = 143523 // Moldova
AppleStoreARM AppleStoreCountry = 143524 // Armenia
AppleStoreBWA AppleStoreCountry = 143525 // Botswana
AppleStoreBGR AppleStoreCountry = 143526 // Bulgaria
AppleStoreJOR AppleStoreCountry = 143528 // Jordan
AppleStoreKEN AppleStoreCountry = 143529 // Kenya
AppleStoreMKD AppleStoreCountry = 143530 // Macedonia
AppleStoreMDG AppleStoreCountry = 143531 // Madagascar
AppleStoreMLI AppleStoreCountry = 143532 // Mali
AppleStoreMUS AppleStoreCountry = 143533 // Mauritius
AppleStoreNER AppleStoreCountry = 143534 // Niger
AppleStoreSEN AppleStoreCountry = 143535 // Senegal
AppleStoreTUN AppleStoreCountry = 143536 // Tunisia
AppleStoreUGA AppleStoreCountry = 143537 // Uganda
AppleStoreAIA AppleStoreCountry = 143538 // Anguilla
AppleStoreBHS AppleStoreCountry = 143539 // Bahamas
AppleStoreATG AppleStoreCountry = 143540 // Antigua and Barbuda
AppleStoreBRB AppleStoreCountry = 143541 // Barbados
AppleStoreBMU AppleStoreCountry = 143542 // Bermuda
AppleStoreVGB AppleStoreCountry = 143543 // British Virgin Islands
AppleStoreCYM AppleStoreCountry = 143544 // Cayman Islands
AppleStoreDMA AppleStoreCountry = 143545 // Dominica
AppleStoreGRD AppleStoreCountry = 143546 // Grenada
AppleStoreMSR AppleStoreCountry = 143547 // Montserrat
AppleStoreKNA AppleStoreCountry = 143548 // St. Kitts and Nevis
AppleStoreLCA AppleStoreCountry = 143549 // St. Lucia
AppleStoreVCT AppleStoreCountry = 143550 // St. Vincent and The Grenadines
AppleStoreTTO AppleStoreCountry = 143551 // Trinidad and Tobago
AppleStoreTCA AppleStoreCountry = 143552 // Turks and Caicos
AppleStoreGUY AppleStoreCountry = 143553 // Guyana
AppleStoreSUR AppleStoreCountry = 143554 // Suriname
AppleStoreBLZ AppleStoreCountry = 143555 // Belize
AppleStoreBOL AppleStoreCountry = 143556 // Bolivia
AppleStoreCYP AppleStoreCountry = 143557 // Cyprus
AppleStoreISL AppleStoreCountry = 143558 // Iceland
AppleStoreBHR AppleStoreCountry = 143559 // Bahrain
AppleStoreBRN AppleStoreCountry = 143560 // Brunei Darussalam
AppleStoreNGA AppleStoreCountry = 143561 // Nigeria
AppleStoreOMN AppleStoreCountry = 143562 // Oman
AppleStoreDZA AppleStoreCountry = 143563 // Algeria
AppleStoreAGO AppleStoreCountry = 143564 // Angola
AppleStoreBLR AppleStoreCountry = 143565 // Belarus
AppleStoreUZB AppleStoreCountry = 143566 // Uzbekistan
AppleStoreAZE AppleStoreCountry = 143568 // Azerbaijan
AppleStoreYEM AppleStoreCountry = 143571 // Yemen
AppleStoreTZA AppleStoreCountry = 143572 // Tanzania
AppleStoreGHA AppleStoreCountry = 143573 // Ghana
AppleStoreALB AppleStoreCountry = 143575 // Albania
AppleStoreBEN AppleStoreCountry = 143576 // Benin
AppleStoreBTN AppleStoreCountry = 143577 // Bhutan
AppleStoreBFA AppleStoreCountry = 143578 // Burkina Faso
AppleStoreKHM AppleStoreCountry = 143579 // Cambodia
AppleStoreCPV AppleStoreCountry = 143580 // Cape Verde
AppleStoreTCD AppleStoreCountry = 143581 // Chad
AppleStoreCOG AppleStoreCountry = 143582 // Republic of the Congo
AppleStoreFJI AppleStoreCountry = 143583 // Fiji
AppleStoreGMB AppleStoreCountry = 143584 // Gambia
AppleStoreGNB AppleStoreCountry = 143585 // Guinea-Bissau
AppleStoreKGZ AppleStoreCountry = 143586 // Kyrgyzstan
AppleStoreLAO AppleStoreCountry = 143587 // Lao People's Democratic Republic
AppleStoreLBR AppleStoreCountry = 143588 // Liberia
AppleStoreMWI AppleStoreCountry = 143589 // Malawi
AppleStoreMRT AppleStoreCountry = 143590 // Mauritania
AppleStoreFSM AppleStoreCountry = 143591 // Federated States of Micronesia
AppleStoreMNG AppleStoreCountry = 143592 // Mongolia
AppleStoreMOZ AppleStoreCountry = 143593 // Mozambique
AppleStoreNAM AppleStoreCountry = 143594 // Namibia
AppleStorePLW AppleStoreCountry = 143595 // Palau
AppleStorePNG AppleStoreCountry = 143597 // Papua New Guinea
AppleStoreSTP AppleStoreCountry = 143598 // Sao Tome and Principe
AppleStoreSYC AppleStoreCountry = 143599 // Seychelles
AppleStoreSLE AppleStoreCountry = 143600 // Sierra Leone
AppleStoreSLB AppleStoreCountry = 143601 // Solomon Islands
AppleStoreSWZ AppleStoreCountry = 143602 // Swaziland
AppleStoreTJK AppleStoreCountry = 143603 // Tajikistan
AppleStoreTKM AppleStoreCountry = 143604 // Turkmenistan'AppleStore
AppleStoreZWE AppleStoreCountry = 143605 // Zimbabwe
)
// iTunes Genre category, genre and subgenre
// https://affiliate.itunes.apple.com/resources/documentation/genre-mapping/
// https://itunes.apple.com/WebObjects/MZStoreServices.woa/ws/genres
type GenreID int
// ID3v1 Genre id
type GenreCode byte
| String | identifier_name |
insert_organisations.py | #!/usr/bin/env python3
# pylint: disable=wrong-import-position
# Adding working directory to system path
import sys
import time
import json
import logging
import argparse
import Levenshtein
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
sys.path.append(".")
from mysql import mysql
from model import CONF_PATH, attach_search, sanitise_name
from model import User, Org, Orgalias, Note, Address, Orgtag, Contact, Medium
LOG = logging.getLogger('insert_organisation')
LOG_SEARCH = logging.getLogger('search')
def text_to_ngrams(text, size=5):
ngrams = []
for word in text.lower().split():
length = len(word)
space = " " * (size - 1)
word = space + word + space
for i in range(length + size - 1):
ngrams.append(word[i: i + size])
return ngrams
def get_names(orm):
names = {}
for org in orm.query(Org).all():
if org.org_id not in names:
names[org.org_id] = []
names[org.org_id].append(org.name)
for orgalias in orm.query(Orgalias).all():
org_id = orgalias.org.org_id
if org_id not in names:
names[org_id] = []
names[org_id].append(orgalias.name)
return names
def select_from_list(matches):
for m, (name, alias) in enumerate(matches):
print(
" %4d %s %s" % (m, name, (alias and ("[%s]" % alias) or ""))
)
print()
print("Choose name or non-numeric to exit: ", end=' ')
choice = input()
try:
choice = int(choice)
except ValueError:
LOG.warning("Could not convert %s to integer.", choice)
return None
if choice >= len(matches) or choice < 0:
LOG.error("%d is out of range.", choice)
return None
return matches[choice][0]
def closest_names(name, names, orm):
matches = set()
lower = orm.query(Org.name) \
.filter(Org.name > name) \
.order_by(Org.name.asc()) \
.limit(3) \
.all()
higher = orm.query(Org.name) \
.filter(Org.name < name) \
.order_by(Org.name.desc()) \
.limit(3) \
.all()
for (name2, ) in lower + higher:
matches.add((name2, None))
for name2, alias in names:
ratio = Levenshtein.ratio(name.lower(), name2.lower())
if ratio > 0.8:
matches.add((name2, alias))
if not matches:
return None
matches = sorted(list(matches))
print()
print("\n%s\n" % name)
existing_name = select_from_list(matches)
return existing_name
def get_org(orm, name):
name = name.lower()
query = orm.query(Org) \
.filter(func.lower(Org.name) == name)
try:
return query.one()
except NoResultFound:
pass
except MultipleResultsFound:
LOG.warning("Multiple results found for name '%s'.", name)
return query.first()
query = orm.query(Orgalias) \
.filter(func.lower(Orgalias.name) == name)
try:
return query.one().org
except NoResultFound:
pass
except MultipleResultsFound:
LOG.warning("Multiple results found for alias '%s'.", name)
return query.first().org
return None
def get_candidates(es, text):
data = {
"query": {
"multi_match": {
"fields": [
"alias_all.straight^3",
"alias_all.fuzzy",
],
"query": text
}
}
}
LOG.debug("Search query: %s", repr(data))
results = es.search(data, index="mango", doc_type="org")
LOG.debug("Results: %s", repr(results))
org_list = []
for hit in results["hits"]["hits"]:
source = hit["_source"]
source["score"] = hit["_score"]
org_list.append(source)
return org_list
def | (es, text_orig, context=None, just_search=False):
"""Returns False to skip"""
# pylint: disable=redefined-variable-type
# `org_id` may be `None`, `False` or string.
org_id = None
text_search = text_orig
while True:
if context and context.get("refresh", None):
# Necessarily imprecise way of allowing recently
# inserted alias to appear in results
time.sleep(1)
context["refresh"] = False
candidates = get_candidates(es, text_search)
if not candidates:
break
sys.stderr.write(
("\nFind: '\033[92m%s\033[0m'\n\n" % (text_orig))
)
for i, org in enumerate(candidates, 1):
sys.stderr.write(
" %4d: \033[37m%-5d %s\033[0m\n" % (
i, org["org_id"], org["score"])
)
for name in org["alias_all"]:
sys.stderr.write(
(" \033[94m%s\033[0m\n" % name)
)
sys.stderr.write("\n")
sys.stderr.write(" Empty: None of the above\n")
sys.stderr.write(" Text: Alternative search\n: ")
sys.stderr.write(" '-': Skip\n\n: ")
if just_search:
return
choice = input()
choice = choice.strip()
if not len(choice):
org_id = None
break
sys.stderr.write("\n")
if choice == "-":
org_id = False
break
sys.stderr.write("\n")
try:
choice = int(choice)
except ValueError:
text_search = choice
continue
if choice == 0:
org_id = " "
break
if choice > len(candidates):
continue
org_id = candidates[choice - 1]["org_id"]
break
return org_id
def select_org(orm, name, context, search=True):
"""Returns False to skip"""
name = sanitise_name(name)
org = get_org(orm, name)
if org:
return org
if not search:
return
es = orm.get_bind().search
if es is None:
LOG.error("Cannot connect to Elasticsearch.")
sys.exit(1)
org_id = search_org(es, name, context=context)
if not org_id:
return org_id
try:
org = orm.query(Org).filter_by(org_id=org_id).one()
except NoResultFound as e:
LOG.warning("No result found for '%s', org_id '%d'.", name, org_id)
raise e
# Adds new `Orgalias` to `Org`.
Orgalias(name, org, moderation_user=context["user"], public=None)
context["refresh"] = True
es.refresh()
# Calling `refresh` here appears not to make any difference, but in
# theory should be a good idea.
# Waiting for inserted org to be searchable here doesn't seem to work.
return org
def insert_fast(
data, orm,
public=None, tag_names=None, dry_run=None, address_exclusive=None,
search=True, org_id_whitelist=None
):
user = orm.query(User).filter_by(user_id=-1).one()
tag_names = tag_names or []
tags = []
for tag_name in tag_names:
tag = Orgtag.get(
orm,
tag_name,
moderation_user=user,
public=public,
)
tags.append(tag)
context = {
"refresh": False,
"user": user
}
for chunk in data:
# pylint: disable=maybe-no-member
has_address = None
LOG.info("\n%s\n", chunk["name"])
org = select_org(orm, chunk["name"], context, search)
if (
org is False or
(org_id_whitelist and
((not org) or (org.org_id not in org_id_whitelist)))
):
LOG.info("Skipping org: %s", org and org.org_id)
orm.rollback()
continue
if not org:
LOG.warning("\nCreating org %s\n", chunk["name"])
org = Org(chunk["name"], moderation_user=user, public=public,)
orm.add(org)
# Querying org address list on a new org would trigger a commit
has_address = False
else:
has_address = bool(org.address_list)
if tags:
org.orgtag_list = list(set(tags + org.orgtag_list))
if "tag" in chunk:
for tag_name in chunk["tag"]:
tag = Orgtag.get(
orm, tag_name,
moderation_user=user, public=public,
)
if tag not in org.orgtag_list:
org.orgtag_list.append(tag)
if "address" in chunk and not (address_exclusive and has_address):
for address_data in chunk["address"]:
if address_data["postal"] in \
[address.postal for address in org.address_list]:
continue
address = Address(
address_data["postal"], address_data["source"],
moderation_user=user, public=None,
)
address.geocode()
LOG.debug(address)
orm.add(address)
org.address_list.append(address)
if "contact" in chunk:
for contact_data in chunk["contact"]:
text = sanitise_name(contact_data["text"])
match = False
for contact in org.contact_list:
if (
contact.text == text and
contact.medium.name == contact_data["medium"]
):
match = True
break
if match:
continue
try:
medium = orm.query(Medium) \
.filter_by(name=contact_data["medium"]) \
.one()
except NoResultFound:
LOG.warning("%s: No such medium", contact_data["medium"])
continue
contact = Contact(
medium, text,
source=contact_data["source"],
moderation_user=user, public=None,
)
LOG.debug(contact)
orm.add(contact)
org.contact_list.append(contact)
if "note" in chunk:
for note_data in chunk["note"]:
if note_data["text"] in [note.text for note in org.note_list]:
continue
note = Note(
note_data["text"], note_data["source"],
moderation_user=user, public=None,
)
LOG.debug(note)
orm.add(note)
org.note_list.append(note)
if not (orm.new or orm.dirty or orm.deleted):
LOG.info("Nothing to commit.")
continue
if dry_run is True:
LOG.warning("rolling back")
orm.rollback()
continue
LOG.info("Committing.")
orm.commit()
def main():
LOG.addHandler(logging.StreamHandler())
LOG_SEARCH.addHandler(logging.StreamHandler())
parser = argparse.ArgumentParser(description="__DESC__")
parser.add_argument(
"--verbose", "-v",
action="count", default=0,
help="Print verbose information for debugging.")
parser.add_argument(
"--quiet", "-q",
action="count", default=0,
help="Suppress warnings.")
parser.add_argument(
"-t", "--tag",
action="append",
help="Tag to apply to all insertions.", default=[])
parser.add_argument(
"-p", "--public",
action="store", type=int,
help="Public state of new items (True, False, None).")
parser.add_argument(
"-s", "--search",
action="store_true",
help="Search string using import merge tool.")
parser.add_argument(
"-d", "--do-not-search",
action="store_true",
help="Do not search for similar org names.")
parser.add_argument(
"-A", "--address-exclusive",
action="store_true",
help="Only import addresses if org has no existing "
"address.")
parser.add_argument(
"-L", "--limit-org",
action="store",
help="Only apply changes to orgs whose IDs are "
"supplied (a comma separated string)")
parser.add_argument(
"-n", "--dry-run",
action="store_true",
help="Dry run.")
parser.add_argument(
"json_path", metavar="JSON",
nargs="+",
help="Path to JSON file.")
args = parser.parse_args()
log_level = (logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)[
max(0, min(3, 1 + args.verbose - args.quiet))]
LOG.setLevel(log_level)
LOG_SEARCH.setLevel(log_level)
connection_url = mysql.connection_url_app(CONF_PATH)
engine = create_engine(connection_url,)
mysql.engine_disable_mode(engine, "ONLY_FULL_GROUP_BY")
session_ = sessionmaker(bind=engine, autocommit=False, autoflush=False)
orm = session_()
attach_search(engine, orm)
if args.public != None:
args.public = bool(args.public)
if args.search:
es = orm.get_bind().search
for arg in args:
search_org(es, arg, just_search=True)
sys.exit(0)
org_id_whitelist = None
if args.limit_org:
org_id_whitelist = []
for id_ in args.limit_org.split(","):
org_id_whitelist.append(int(id_))
for arg in args.json_path:
try:
data = json.load(open(arg, "r", encoding="utf8"))
except ValueError:
LOG.error("%s: Could not decode JSON data.", arg)
continue
insert_fast(
data, orm, args.public, args.tag, args.dry_run,
args.address_exclusive, (not args.do_not_search),
org_id_whitelist
)
if __name__ == "__main__":
main()
| search_org | identifier_name |
insert_organisations.py | #!/usr/bin/env python3
# pylint: disable=wrong-import-position
# Adding working directory to system path
import sys
import time
import json
import logging
import argparse
import Levenshtein
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
sys.path.append(".")
from mysql import mysql
from model import CONF_PATH, attach_search, sanitise_name
from model import User, Org, Orgalias, Note, Address, Orgtag, Contact, Medium
LOG = logging.getLogger('insert_organisation')
LOG_SEARCH = logging.getLogger('search')
def text_to_ngrams(text, size=5):
ngrams = []
for word in text.lower().split():
length = len(word)
space = " " * (size - 1)
word = space + word + space
for i in range(length + size - 1):
ngrams.append(word[i: i + size])
return ngrams
def get_names(orm):
names = {}
for org in orm.query(Org).all():
if org.org_id not in names:
names[org.org_id] = []
names[org.org_id].append(org.name)
for orgalias in orm.query(Orgalias).all():
org_id = orgalias.org.org_id
if org_id not in names:
names[org_id] = []
names[org_id].append(orgalias.name)
return names
def select_from_list(matches):
for m, (name, alias) in enumerate(matches):
print(
" %4d %s %s" % (m, name, (alias and ("[%s]" % alias) or ""))
)
print()
print("Choose name or non-numeric to exit: ", end=' ')
choice = input()
try:
choice = int(choice)
except ValueError:
LOG.warning("Could not convert %s to integer.", choice)
return None
if choice >= len(matches) or choice < 0:
LOG.error("%d is out of range.", choice)
return None
return matches[choice][0]
def closest_names(name, names, orm):
matches = set()
lower = orm.query(Org.name) \
.filter(Org.name > name) \
.order_by(Org.name.asc()) \
.limit(3) \
.all()
higher = orm.query(Org.name) \
.filter(Org.name < name) \
.order_by(Org.name.desc()) \
.limit(3) \
.all()
for (name2, ) in lower + higher:
matches.add((name2, None))
for name2, alias in names:
ratio = Levenshtein.ratio(name.lower(), name2.lower())
if ratio > 0.8:
matches.add((name2, alias))
if not matches:
return None
matches = sorted(list(matches))
print()
print("\n%s\n" % name)
existing_name = select_from_list(matches)
return existing_name
def get_org(orm, name):
name = name.lower()
query = orm.query(Org) \
.filter(func.lower(Org.name) == name)
try:
return query.one()
except NoResultFound:
pass
except MultipleResultsFound:
LOG.warning("Multiple results found for name '%s'.", name)
return query.first()
query = orm.query(Orgalias) \
.filter(func.lower(Orgalias.name) == name)
try:
return query.one().org
except NoResultFound:
pass
except MultipleResultsFound:
LOG.warning("Multiple results found for alias '%s'.", name)
return query.first().org
return None
def get_candidates(es, text):
data = {
"query": {
"multi_match": {
"fields": [
"alias_all.straight^3",
"alias_all.fuzzy",
],
"query": text
}
}
}
LOG.debug("Search query: %s", repr(data))
results = es.search(data, index="mango", doc_type="org")
LOG.debug("Results: %s", repr(results))
org_list = []
for hit in results["hits"]["hits"]:
source = hit["_source"]
source["score"] = hit["_score"]
org_list.append(source)
return org_list
def search_org(es, text_orig, context=None, just_search=False):
"""Returns False to skip"""
# pylint: disable=redefined-variable-type
# `org_id` may be `None`, `False` or string.
org_id = None
text_search = text_orig
while True:
if context and context.get("refresh", None):
# Necessarily imprecise way of allowing recently
# inserted alias to appear in results
time.sleep(1)
context["refresh"] = False
candidates = get_candidates(es, text_search)
if not candidates:
break
sys.stderr.write(
("\nFind: '\033[92m%s\033[0m'\n\n" % (text_orig))
)
for i, org in enumerate(candidates, 1):
sys.stderr.write(
" %4d: \033[37m%-5d %s\033[0m\n" % (
i, org["org_id"], org["score"])
)
for name in org["alias_all"]:
sys.stderr.write(
(" \033[94m%s\033[0m\n" % name)
)
sys.stderr.write("\n")
sys.stderr.write(" Empty: None of the above\n")
sys.stderr.write(" Text: Alternative search\n: ")
sys.stderr.write(" '-': Skip\n\n: ")
if just_search:
return
choice = input()
choice = choice.strip()
if not len(choice):
org_id = None
break
sys.stderr.write("\n")
if choice == "-":
org_id = False
break
sys.stderr.write("\n")
try:
choice = int(choice)
except ValueError:
text_search = choice
continue
if choice == 0:
org_id = " "
break
if choice > len(candidates):
continue
org_id = candidates[choice - 1]["org_id"]
break
return org_id
def select_org(orm, name, context, search=True):
"""Returns False to skip"""
name = sanitise_name(name)
org = get_org(orm, name)
if org:
return org
if not search:
return
es = orm.get_bind().search
if es is None:
LOG.error("Cannot connect to Elasticsearch.")
sys.exit(1)
org_id = search_org(es, name, context=context)
if not org_id:
return org_id
try:
org = orm.query(Org).filter_by(org_id=org_id).one()
except NoResultFound as e:
LOG.warning("No result found for '%s', org_id '%d'.", name, org_id)
raise e
# Adds new `Orgalias` to `Org`.
Orgalias(name, org, moderation_user=context["user"], public=None)
context["refresh"] = True
es.refresh()
# Calling `refresh` here appears not to make any difference, but in
# theory should be a good idea.
# Waiting for inserted org to be searchable here doesn't seem to work.
return org
def insert_fast(
data, orm,
public=None, tag_names=None, dry_run=None, address_exclusive=None,
search=True, org_id_whitelist=None
):
user = orm.query(User).filter_by(user_id=-1).one()
tag_names = tag_names or []
tags = []
for tag_name in tag_names:
tag = Orgtag.get(
orm,
tag_name,
moderation_user=user,
public=public,
)
tags.append(tag)
context = {
"refresh": False,
"user": user
}
for chunk in data:
# pylint: disable=maybe-no-member
has_address = None
LOG.info("\n%s\n", chunk["name"])
org = select_org(orm, chunk["name"], context, search)
if (
org is False or
(org_id_whitelist and
((not org) or (org.org_id not in org_id_whitelist)))
):
LOG.info("Skipping org: %s", org and org.org_id)
orm.rollback()
continue
if not org:
LOG.warning("\nCreating org %s\n", chunk["name"])
org = Org(chunk["name"], moderation_user=user, public=public,)
orm.add(org)
# Querying org address list on a new org would trigger a commit
has_address = False
else:
has_address = bool(org.address_list)
if tags:
org.orgtag_list = list(set(tags + org.orgtag_list))
if "tag" in chunk:
for tag_name in chunk["tag"]:
tag = Orgtag.get(
orm, tag_name,
moderation_user=user, public=public,
)
if tag not in org.orgtag_list:
org.orgtag_list.append(tag)
if "address" in chunk and not (address_exclusive and has_address):
for address_data in chunk["address"]:
if address_data["postal"] in \
[address.postal for address in org.address_list]:
continue
address = Address(
address_data["postal"], address_data["source"],
moderation_user=user, public=None,
)
address.geocode()
LOG.debug(address)
orm.add(address)
org.address_list.append(address)
if "contact" in chunk:
for contact_data in chunk["contact"]:
text = sanitise_name(contact_data["text"])
match = False
for contact in org.contact_list:
if (
contact.text == text and
contact.medium.name == contact_data["medium"]
):
match = True
break
if match:
continue
try:
medium = orm.query(Medium) \
.filter_by(name=contact_data["medium"]) \
.one()
except NoResultFound:
LOG.warning("%s: No such medium", contact_data["medium"])
continue
contact = Contact(
medium, text,
source=contact_data["source"],
moderation_user=user, public=None,
)
LOG.debug(contact)
orm.add(contact)
org.contact_list.append(contact)
| for note_data in chunk["note"]:
if note_data["text"] in [note.text for note in org.note_list]:
continue
note = Note(
note_data["text"], note_data["source"],
moderation_user=user, public=None,
)
LOG.debug(note)
orm.add(note)
org.note_list.append(note)
if not (orm.new or orm.dirty or orm.deleted):
LOG.info("Nothing to commit.")
continue
if dry_run is True:
LOG.warning("rolling back")
orm.rollback()
continue
LOG.info("Committing.")
orm.commit()
def main():
LOG.addHandler(logging.StreamHandler())
LOG_SEARCH.addHandler(logging.StreamHandler())
parser = argparse.ArgumentParser(description="__DESC__")
parser.add_argument(
"--verbose", "-v",
action="count", default=0,
help="Print verbose information for debugging.")
parser.add_argument(
"--quiet", "-q",
action="count", default=0,
help="Suppress warnings.")
parser.add_argument(
"-t", "--tag",
action="append",
help="Tag to apply to all insertions.", default=[])
parser.add_argument(
"-p", "--public",
action="store", type=int,
help="Public state of new items (True, False, None).")
parser.add_argument(
"-s", "--search",
action="store_true",
help="Search string using import merge tool.")
parser.add_argument(
"-d", "--do-not-search",
action="store_true",
help="Do not search for similar org names.")
parser.add_argument(
"-A", "--address-exclusive",
action="store_true",
help="Only import addresses if org has no existing "
"address.")
parser.add_argument(
"-L", "--limit-org",
action="store",
help="Only apply changes to orgs whose IDs are "
"supplied (a comma separated string)")
parser.add_argument(
"-n", "--dry-run",
action="store_true",
help="Dry run.")
parser.add_argument(
"json_path", metavar="JSON",
nargs="+",
help="Path to JSON file.")
args = parser.parse_args()
log_level = (logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)[
max(0, min(3, 1 + args.verbose - args.quiet))]
LOG.setLevel(log_level)
LOG_SEARCH.setLevel(log_level)
connection_url = mysql.connection_url_app(CONF_PATH)
engine = create_engine(connection_url,)
mysql.engine_disable_mode(engine, "ONLY_FULL_GROUP_BY")
session_ = sessionmaker(bind=engine, autocommit=False, autoflush=False)
orm = session_()
attach_search(engine, orm)
if args.public != None:
args.public = bool(args.public)
if args.search:
es = orm.get_bind().search
for arg in args:
search_org(es, arg, just_search=True)
sys.exit(0)
org_id_whitelist = None
if args.limit_org:
org_id_whitelist = []
for id_ in args.limit_org.split(","):
org_id_whitelist.append(int(id_))
for arg in args.json_path:
try:
data = json.load(open(arg, "r", encoding="utf8"))
except ValueError:
LOG.error("%s: Could not decode JSON data.", arg)
continue
insert_fast(
data, orm, args.public, args.tag, args.dry_run,
args.address_exclusive, (not args.do_not_search),
org_id_whitelist
)
if __name__ == "__main__":
main() | if "note" in chunk: | random_line_split |
insert_organisations.py | #!/usr/bin/env python3
# pylint: disable=wrong-import-position
# Adding working directory to system path
import sys
import time
import json
import logging
import argparse
import Levenshtein
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
sys.path.append(".")
from mysql import mysql
from model import CONF_PATH, attach_search, sanitise_name
from model import User, Org, Orgalias, Note, Address, Orgtag, Contact, Medium
LOG = logging.getLogger('insert_organisation')
LOG_SEARCH = logging.getLogger('search')
def text_to_ngrams(text, size=5):
ngrams = []
for word in text.lower().split():
length = len(word)
space = " " * (size - 1)
word = space + word + space
for i in range(length + size - 1):
ngrams.append(word[i: i + size])
return ngrams
def get_names(orm):
names = {}
for org in orm.query(Org).all():
if org.org_id not in names:
names[org.org_id] = []
names[org.org_id].append(org.name)
for orgalias in orm.query(Orgalias).all():
org_id = orgalias.org.org_id
if org_id not in names:
names[org_id] = []
names[org_id].append(orgalias.name)
return names
def select_from_list(matches):
for m, (name, alias) in enumerate(matches):
print(
" %4d %s %s" % (m, name, (alias and ("[%s]" % alias) or ""))
)
print()
print("Choose name or non-numeric to exit: ", end=' ')
choice = input()
try:
choice = int(choice)
except ValueError:
LOG.warning("Could not convert %s to integer.", choice)
return None
if choice >= len(matches) or choice < 0:
LOG.error("%d is out of range.", choice)
return None
return matches[choice][0]
def closest_names(name, names, orm):
|
def get_org(orm, name):
name = name.lower()
query = orm.query(Org) \
.filter(func.lower(Org.name) == name)
try:
return query.one()
except NoResultFound:
pass
except MultipleResultsFound:
LOG.warning("Multiple results found for name '%s'.", name)
return query.first()
query = orm.query(Orgalias) \
.filter(func.lower(Orgalias.name) == name)
try:
return query.one().org
except NoResultFound:
pass
except MultipleResultsFound:
LOG.warning("Multiple results found for alias '%s'.", name)
return query.first().org
return None
def get_candidates(es, text):
data = {
"query": {
"multi_match": {
"fields": [
"alias_all.straight^3",
"alias_all.fuzzy",
],
"query": text
}
}
}
LOG.debug("Search query: %s", repr(data))
results = es.search(data, index="mango", doc_type="org")
LOG.debug("Results: %s", repr(results))
org_list = []
for hit in results["hits"]["hits"]:
source = hit["_source"]
source["score"] = hit["_score"]
org_list.append(source)
return org_list
def search_org(es, text_orig, context=None, just_search=False):
"""Returns False to skip"""
# pylint: disable=redefined-variable-type
# `org_id` may be `None`, `False` or string.
org_id = None
text_search = text_orig
while True:
if context and context.get("refresh", None):
# Necessarily imprecise way of allowing recently
# inserted alias to appear in results
time.sleep(1)
context["refresh"] = False
candidates = get_candidates(es, text_search)
if not candidates:
break
sys.stderr.write(
("\nFind: '\033[92m%s\033[0m'\n\n" % (text_orig))
)
for i, org in enumerate(candidates, 1):
sys.stderr.write(
" %4d: \033[37m%-5d %s\033[0m\n" % (
i, org["org_id"], org["score"])
)
for name in org["alias_all"]:
sys.stderr.write(
(" \033[94m%s\033[0m\n" % name)
)
sys.stderr.write("\n")
sys.stderr.write(" Empty: None of the above\n")
sys.stderr.write(" Text: Alternative search\n: ")
sys.stderr.write(" '-': Skip\n\n: ")
if just_search:
return
choice = input()
choice = choice.strip()
if not len(choice):
org_id = None
break
sys.stderr.write("\n")
if choice == "-":
org_id = False
break
sys.stderr.write("\n")
try:
choice = int(choice)
except ValueError:
text_search = choice
continue
if choice == 0:
org_id = " "
break
if choice > len(candidates):
continue
org_id = candidates[choice - 1]["org_id"]
break
return org_id
def select_org(orm, name, context, search=True):
"""Returns False to skip"""
name = sanitise_name(name)
org = get_org(orm, name)
if org:
return org
if not search:
return
es = orm.get_bind().search
if es is None:
LOG.error("Cannot connect to Elasticsearch.")
sys.exit(1)
org_id = search_org(es, name, context=context)
if not org_id:
return org_id
try:
org = orm.query(Org).filter_by(org_id=org_id).one()
except NoResultFound as e:
LOG.warning("No result found for '%s', org_id '%d'.", name, org_id)
raise e
# Adds new `Orgalias` to `Org`.
Orgalias(name, org, moderation_user=context["user"], public=None)
context["refresh"] = True
es.refresh()
# Calling `refresh` here appears not to make any difference, but in
# theory should be a good idea.
# Waiting for inserted org to be searchable here doesn't seem to work.
return org
def insert_fast(
data, orm,
public=None, tag_names=None, dry_run=None, address_exclusive=None,
search=True, org_id_whitelist=None
):
user = orm.query(User).filter_by(user_id=-1).one()
tag_names = tag_names or []
tags = []
for tag_name in tag_names:
tag = Orgtag.get(
orm,
tag_name,
moderation_user=user,
public=public,
)
tags.append(tag)
context = {
"refresh": False,
"user": user
}
for chunk in data:
# pylint: disable=maybe-no-member
has_address = None
LOG.info("\n%s\n", chunk["name"])
org = select_org(orm, chunk["name"], context, search)
if (
org is False or
(org_id_whitelist and
((not org) or (org.org_id not in org_id_whitelist)))
):
LOG.info("Skipping org: %s", org and org.org_id)
orm.rollback()
continue
if not org:
LOG.warning("\nCreating org %s\n", chunk["name"])
org = Org(chunk["name"], moderation_user=user, public=public,)
orm.add(org)
# Querying org address list on a new org would trigger a commit
has_address = False
else:
has_address = bool(org.address_list)
if tags:
org.orgtag_list = list(set(tags + org.orgtag_list))
if "tag" in chunk:
for tag_name in chunk["tag"]:
tag = Orgtag.get(
orm, tag_name,
moderation_user=user, public=public,
)
if tag not in org.orgtag_list:
org.orgtag_list.append(tag)
if "address" in chunk and not (address_exclusive and has_address):
for address_data in chunk["address"]:
if address_data["postal"] in \
[address.postal for address in org.address_list]:
continue
address = Address(
address_data["postal"], address_data["source"],
moderation_user=user, public=None,
)
address.geocode()
LOG.debug(address)
orm.add(address)
org.address_list.append(address)
if "contact" in chunk:
for contact_data in chunk["contact"]:
text = sanitise_name(contact_data["text"])
match = False
for contact in org.contact_list:
if (
contact.text == text and
contact.medium.name == contact_data["medium"]
):
match = True
break
if match:
continue
try:
medium = orm.query(Medium) \
.filter_by(name=contact_data["medium"]) \
.one()
except NoResultFound:
LOG.warning("%s: No such medium", contact_data["medium"])
continue
contact = Contact(
medium, text,
source=contact_data["source"],
moderation_user=user, public=None,
)
LOG.debug(contact)
orm.add(contact)
org.contact_list.append(contact)
if "note" in chunk:
for note_data in chunk["note"]:
if note_data["text"] in [note.text for note in org.note_list]:
continue
note = Note(
note_data["text"], note_data["source"],
moderation_user=user, public=None,
)
LOG.debug(note)
orm.add(note)
org.note_list.append(note)
if not (orm.new or orm.dirty or orm.deleted):
LOG.info("Nothing to commit.")
continue
if dry_run is True:
LOG.warning("rolling back")
orm.rollback()
continue
LOG.info("Committing.")
orm.commit()
def main():
LOG.addHandler(logging.StreamHandler())
LOG_SEARCH.addHandler(logging.StreamHandler())
parser = argparse.ArgumentParser(description="__DESC__")
parser.add_argument(
"--verbose", "-v",
action="count", default=0,
help="Print verbose information for debugging.")
parser.add_argument(
"--quiet", "-q",
action="count", default=0,
help="Suppress warnings.")
parser.add_argument(
"-t", "--tag",
action="append",
help="Tag to apply to all insertions.", default=[])
parser.add_argument(
"-p", "--public",
action="store", type=int,
help="Public state of new items (True, False, None).")
parser.add_argument(
"-s", "--search",
action="store_true",
help="Search string using import merge tool.")
parser.add_argument(
"-d", "--do-not-search",
action="store_true",
help="Do not search for similar org names.")
parser.add_argument(
"-A", "--address-exclusive",
action="store_true",
help="Only import addresses if org has no existing "
"address.")
parser.add_argument(
"-L", "--limit-org",
action="store",
help="Only apply changes to orgs whose IDs are "
"supplied (a comma separated string)")
parser.add_argument(
"-n", "--dry-run",
action="store_true",
help="Dry run.")
parser.add_argument(
"json_path", metavar="JSON",
nargs="+",
help="Path to JSON file.")
args = parser.parse_args()
log_level = (logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)[
max(0, min(3, 1 + args.verbose - args.quiet))]
LOG.setLevel(log_level)
LOG_SEARCH.setLevel(log_level)
connection_url = mysql.connection_url_app(CONF_PATH)
engine = create_engine(connection_url,)
mysql.engine_disable_mode(engine, "ONLY_FULL_GROUP_BY")
session_ = sessionmaker(bind=engine, autocommit=False, autoflush=False)
orm = session_()
attach_search(engine, orm)
if args.public != None:
args.public = bool(args.public)
if args.search:
es = orm.get_bind().search
for arg in args:
search_org(es, arg, just_search=True)
sys.exit(0)
org_id_whitelist = None
if args.limit_org:
org_id_whitelist = []
for id_ in args.limit_org.split(","):
org_id_whitelist.append(int(id_))
for arg in args.json_path:
try:
data = json.load(open(arg, "r", encoding="utf8"))
except ValueError:
LOG.error("%s: Could not decode JSON data.", arg)
continue
insert_fast(
data, orm, args.public, args.tag, args.dry_run,
args.address_exclusive, (not args.do_not_search),
org_id_whitelist
)
if __name__ == "__main__":
main()
| matches = set()
lower = orm.query(Org.name) \
.filter(Org.name > name) \
.order_by(Org.name.asc()) \
.limit(3) \
.all()
higher = orm.query(Org.name) \
.filter(Org.name < name) \
.order_by(Org.name.desc()) \
.limit(3) \
.all()
for (name2, ) in lower + higher:
matches.add((name2, None))
for name2, alias in names:
ratio = Levenshtein.ratio(name.lower(), name2.lower())
if ratio > 0.8:
matches.add((name2, alias))
if not matches:
return None
matches = sorted(list(matches))
print()
print("\n%s\n" % name)
existing_name = select_from_list(matches)
return existing_name | identifier_body |
insert_organisations.py | #!/usr/bin/env python3
# pylint: disable=wrong-import-position
# Adding working directory to system path
import sys
import time
import json
import logging
import argparse
import Levenshtein
from sqlalchemy import create_engine, func
from sqlalchemy.orm import sessionmaker
from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
sys.path.append(".")
from mysql import mysql
from model import CONF_PATH, attach_search, sanitise_name
from model import User, Org, Orgalias, Note, Address, Orgtag, Contact, Medium
LOG = logging.getLogger('insert_organisation')
LOG_SEARCH = logging.getLogger('search')
def text_to_ngrams(text, size=5):
ngrams = []
for word in text.lower().split():
length = len(word)
space = " " * (size - 1)
word = space + word + space
for i in range(length + size - 1):
ngrams.append(word[i: i + size])
return ngrams
def get_names(orm):
names = {}
for org in orm.query(Org).all():
if org.org_id not in names:
names[org.org_id] = []
names[org.org_id].append(org.name)
for orgalias in orm.query(Orgalias).all():
org_id = orgalias.org.org_id
if org_id not in names:
names[org_id] = []
names[org_id].append(orgalias.name)
return names
def select_from_list(matches):
for m, (name, alias) in enumerate(matches):
print(
" %4d %s %s" % (m, name, (alias and ("[%s]" % alias) or ""))
)
print()
print("Choose name or non-numeric to exit: ", end=' ')
choice = input()
try:
choice = int(choice)
except ValueError:
LOG.warning("Could not convert %s to integer.", choice)
return None
if choice >= len(matches) or choice < 0:
LOG.error("%d is out of range.", choice)
return None
return matches[choice][0]
def closest_names(name, names, orm):
matches = set()
lower = orm.query(Org.name) \
.filter(Org.name > name) \
.order_by(Org.name.asc()) \
.limit(3) \
.all()
higher = orm.query(Org.name) \
.filter(Org.name < name) \
.order_by(Org.name.desc()) \
.limit(3) \
.all()
for (name2, ) in lower + higher:
matches.add((name2, None))
for name2, alias in names:
ratio = Levenshtein.ratio(name.lower(), name2.lower())
if ratio > 0.8:
matches.add((name2, alias))
if not matches:
return None
matches = sorted(list(matches))
print()
print("\n%s\n" % name)
existing_name = select_from_list(matches)
return existing_name
def get_org(orm, name):
name = name.lower()
query = orm.query(Org) \
.filter(func.lower(Org.name) == name)
try:
return query.one()
except NoResultFound:
pass
except MultipleResultsFound:
LOG.warning("Multiple results found for name '%s'.", name)
return query.first()
query = orm.query(Orgalias) \
.filter(func.lower(Orgalias.name) == name)
try:
return query.one().org
except NoResultFound:
pass
except MultipleResultsFound:
LOG.warning("Multiple results found for alias '%s'.", name)
return query.first().org
return None
def get_candidates(es, text):
data = {
"query": {
"multi_match": {
"fields": [
"alias_all.straight^3",
"alias_all.fuzzy",
],
"query": text
}
}
}
LOG.debug("Search query: %s", repr(data))
results = es.search(data, index="mango", doc_type="org")
LOG.debug("Results: %s", repr(results))
org_list = []
for hit in results["hits"]["hits"]:
source = hit["_source"]
source["score"] = hit["_score"]
org_list.append(source)
return org_list
def search_org(es, text_orig, context=None, just_search=False):
"""Returns False to skip"""
# pylint: disable=redefined-variable-type
# `org_id` may be `None`, `False` or string.
org_id = None
text_search = text_orig
while True:
if context and context.get("refresh", None):
# Necessarily imprecise way of allowing recently
# inserted alias to appear in results
time.sleep(1)
context["refresh"] = False
candidates = get_candidates(es, text_search)
if not candidates:
break
sys.stderr.write(
("\nFind: '\033[92m%s\033[0m'\n\n" % (text_orig))
)
for i, org in enumerate(candidates, 1):
sys.stderr.write(
" %4d: \033[37m%-5d %s\033[0m\n" % (
i, org["org_id"], org["score"])
)
for name in org["alias_all"]:
sys.stderr.write(
(" \033[94m%s\033[0m\n" % name)
)
sys.stderr.write("\n")
sys.stderr.write(" Empty: None of the above\n")
sys.stderr.write(" Text: Alternative search\n: ")
sys.stderr.write(" '-': Skip\n\n: ")
if just_search:
return
choice = input()
choice = choice.strip()
if not len(choice):
org_id = None
break
sys.stderr.write("\n")
if choice == "-":
org_id = False
break
sys.stderr.write("\n")
try:
choice = int(choice)
except ValueError:
text_search = choice
continue
if choice == 0:
org_id = " "
break
if choice > len(candidates):
continue
org_id = candidates[choice - 1]["org_id"]
break
return org_id
def select_org(orm, name, context, search=True):
"""Returns False to skip"""
name = sanitise_name(name)
org = get_org(orm, name)
if org:
return org
if not search:
|
es = orm.get_bind().search
if es is None:
LOG.error("Cannot connect to Elasticsearch.")
sys.exit(1)
org_id = search_org(es, name, context=context)
if not org_id:
return org_id
try:
org = orm.query(Org).filter_by(org_id=org_id).one()
except NoResultFound as e:
LOG.warning("No result found for '%s', org_id '%d'.", name, org_id)
raise e
# Adds new `Orgalias` to `Org`.
Orgalias(name, org, moderation_user=context["user"], public=None)
context["refresh"] = True
es.refresh()
# Calling `refresh` here appears not to make any difference, but in
# theory should be a good idea.
# Waiting for inserted org to be searchable here doesn't seem to work.
return org
def insert_fast(
data, orm,
public=None, tag_names=None, dry_run=None, address_exclusive=None,
search=True, org_id_whitelist=None
):
user = orm.query(User).filter_by(user_id=-1).one()
tag_names = tag_names or []
tags = []
for tag_name in tag_names:
tag = Orgtag.get(
orm,
tag_name,
moderation_user=user,
public=public,
)
tags.append(tag)
context = {
"refresh": False,
"user": user
}
for chunk in data:
# pylint: disable=maybe-no-member
has_address = None
LOG.info("\n%s\n", chunk["name"])
org = select_org(orm, chunk["name"], context, search)
if (
org is False or
(org_id_whitelist and
((not org) or (org.org_id not in org_id_whitelist)))
):
LOG.info("Skipping org: %s", org and org.org_id)
orm.rollback()
continue
if not org:
LOG.warning("\nCreating org %s\n", chunk["name"])
org = Org(chunk["name"], moderation_user=user, public=public,)
orm.add(org)
# Querying org address list on a new org would trigger a commit
has_address = False
else:
has_address = bool(org.address_list)
if tags:
org.orgtag_list = list(set(tags + org.orgtag_list))
if "tag" in chunk:
for tag_name in chunk["tag"]:
tag = Orgtag.get(
orm, tag_name,
moderation_user=user, public=public,
)
if tag not in org.orgtag_list:
org.orgtag_list.append(tag)
if "address" in chunk and not (address_exclusive and has_address):
for address_data in chunk["address"]:
if address_data["postal"] in \
[address.postal for address in org.address_list]:
continue
address = Address(
address_data["postal"], address_data["source"],
moderation_user=user, public=None,
)
address.geocode()
LOG.debug(address)
orm.add(address)
org.address_list.append(address)
if "contact" in chunk:
for contact_data in chunk["contact"]:
text = sanitise_name(contact_data["text"])
match = False
for contact in org.contact_list:
if (
contact.text == text and
contact.medium.name == contact_data["medium"]
):
match = True
break
if match:
continue
try:
medium = orm.query(Medium) \
.filter_by(name=contact_data["medium"]) \
.one()
except NoResultFound:
LOG.warning("%s: No such medium", contact_data["medium"])
continue
contact = Contact(
medium, text,
source=contact_data["source"],
moderation_user=user, public=None,
)
LOG.debug(contact)
orm.add(contact)
org.contact_list.append(contact)
if "note" in chunk:
for note_data in chunk["note"]:
if note_data["text"] in [note.text for note in org.note_list]:
continue
note = Note(
note_data["text"], note_data["source"],
moderation_user=user, public=None,
)
LOG.debug(note)
orm.add(note)
org.note_list.append(note)
if not (orm.new or orm.dirty or orm.deleted):
LOG.info("Nothing to commit.")
continue
if dry_run is True:
LOG.warning("rolling back")
orm.rollback()
continue
LOG.info("Committing.")
orm.commit()
def main():
LOG.addHandler(logging.StreamHandler())
LOG_SEARCH.addHandler(logging.StreamHandler())
parser = argparse.ArgumentParser(description="__DESC__")
parser.add_argument(
"--verbose", "-v",
action="count", default=0,
help="Print verbose information for debugging.")
parser.add_argument(
"--quiet", "-q",
action="count", default=0,
help="Suppress warnings.")
parser.add_argument(
"-t", "--tag",
action="append",
help="Tag to apply to all insertions.", default=[])
parser.add_argument(
"-p", "--public",
action="store", type=int,
help="Public state of new items (True, False, None).")
parser.add_argument(
"-s", "--search",
action="store_true",
help="Search string using import merge tool.")
parser.add_argument(
"-d", "--do-not-search",
action="store_true",
help="Do not search for similar org names.")
parser.add_argument(
"-A", "--address-exclusive",
action="store_true",
help="Only import addresses if org has no existing "
"address.")
parser.add_argument(
"-L", "--limit-org",
action="store",
help="Only apply changes to orgs whose IDs are "
"supplied (a comma separated string)")
parser.add_argument(
"-n", "--dry-run",
action="store_true",
help="Dry run.")
parser.add_argument(
"json_path", metavar="JSON",
nargs="+",
help="Path to JSON file.")
args = parser.parse_args()
log_level = (logging.ERROR, logging.WARNING, logging.INFO, logging.DEBUG)[
max(0, min(3, 1 + args.verbose - args.quiet))]
LOG.setLevel(log_level)
LOG_SEARCH.setLevel(log_level)
connection_url = mysql.connection_url_app(CONF_PATH)
engine = create_engine(connection_url,)
mysql.engine_disable_mode(engine, "ONLY_FULL_GROUP_BY")
session_ = sessionmaker(bind=engine, autocommit=False, autoflush=False)
orm = session_()
attach_search(engine, orm)
if args.public != None:
args.public = bool(args.public)
if args.search:
es = orm.get_bind().search
for arg in args:
search_org(es, arg, just_search=True)
sys.exit(0)
org_id_whitelist = None
if args.limit_org:
org_id_whitelist = []
for id_ in args.limit_org.split(","):
org_id_whitelist.append(int(id_))
for arg in args.json_path:
try:
data = json.load(open(arg, "r", encoding="utf8"))
except ValueError:
LOG.error("%s: Could not decode JSON data.", arg)
continue
insert_fast(
data, orm, args.public, args.tag, args.dry_run,
args.address_exclusive, (not args.do_not_search),
org_id_whitelist
)
if __name__ == "__main__":
main()
| return | conditional_block |
29.js | "1": "<sup>1</sup> Fjala e Zotit që iu drejtua Joelit, birit të Pethuelit.",
"2": "<sup>2</sup> Dëgjoni këtë, o pleq, dëgjoni, ju të gjithë banorë të vendit. A ka ndodhur vallë një gjë e tillë në ditët tuaja apo në ditët e etërve tuaj?",
"3": "<sup>3</sup> Tregojani bijve tuaj, dhe bijtë tuaj bijve të tyre, dhe bijtë e tyre brezit tjetër.",
"4": "<sup>4</sup> Atë që la krimbi e hëngri karkaleci, atë që la karkaleci e hëngri larva e karkalecit, atë që la larva e hëngri bulkthi.",
"5": "<sup>5</sup> Zgjohuni, o të dehur, dhe qani; vajtoni ju të gjithë, që pini verë, për mushtin që ju hoqën nga goja.",
"6": "<sup>6</sup> Sepse një komb i fortë dhe i panumërt ka dalë kundër vendit tim. Dhëmbët e tij janë dhëmbë luani, dhe ka stërdhëmbë luaneshe.",
"7": "<sup>7</sup> Ka shkatërruar hardhinë time, e ka bërë copë-copë fikun tim, ia ka hequr lëvoren krejt dhe e ka hedhur tutje; degët e tij kanë mbetur të bardha.",
"8": "<sup>8</sup> Vajto si një virgjëreshë e veshur me thes për dhëndrin e rinisë së saj.",
"9": "<sup>9</sup> Nga shtëpia e Zotit janë zhdukur ofertat e ushqimit dhe libacionet; priftërinjtë, ministrat e Zotit, pikëllohen.",
"10": "<sup>10</sup> Fusha është shkretuar, vendi është në zi, sepse gruri u prish, mushti u tha dhe vaji humbi.",
"11": "<sup>11</sup> Pikëllohuni, o bujq, vajtoni, o vreshtarë, për grurin dhe për elbin, sepse të korrat e arave humbën.",
"12": "<sup>12</sup> Hardhia u tha, fiku u tha, shega, hurma, molla dhe tërë drurët e fushës u thanë; nuk ka gëzim midis bijve të njerëzve.",
"13": "<sup>13</sup> Ngjeshuni me thes dhe mbani zi, o priftërinj, vajtoni, ministra të altarit. Ejani, rrini tërë natën të veshur me thasë, o ministra të Perëndisë tim, sepse oferta e ushqimit dhe libacioni u zhduk nga shtëpia e Perëndisë tuaj.",
"14": "<sup>14</sup> Shpallni agjërim, thërrisni një kuvend solemn. Mblidhni pleqtë dhe tërë banorët e vendit në shtëpinë e Zotit, Perëndisë tuaj, dhe i klithni Zotit.",
"15": "<sup>15</sup> Mjerë ajo ditë! Sepse dita e Zotit është e afërt; po, do të vijë si një shkatërrim nga i Plotfuqishmi.",
"16": "<sup>16</sup> A nuk u hoq vallë ushqimi para syve tona, dhe gëzimi dhe hareja nga shtëpia e Perëndisë tonë?",
"17": "<sup>17</sup> Farërat po thahen nën plisa, depot janë katandisur të shkreta, hambarët e grurit po rrënohen, sepse gruri u tha.",
"18": "<sup>18</sup> Sa vuajnë kafshët! Kopetë e gjedhëve sillen më kot, sepse nuk ka kullotë për ta; lëngojnë edhe kopetë e deleve.",
"19": "<sup>19</sup> Te ty, o Zot, unë këlthas, sepse një zjarr ka gllabëruar të gjitha tokat për kullotë dhe një flakë ka djegur të gjithë drurët e fushës.",
"20": "<sup>20</sup> Edhe kafshët e fushave i ngrenë sytë drejt teje, sepse rrjedhat e ujit janë tharë dhe zjarri ka gllabëruar tokat për kullotë."
},
"2": {
"1": "<sup>1</sup> I bini borisë në Sion dhe jepni kushtrimin në malin tim të shenjtë! Le të dridhen të gjithë banorët e vendit, sepse dita e Zotit po vjen, është e afërt,",
"2": "<sup>2</sup> po vjen dita e territ dhe e errësirës së dendur, ditë resh dhe mjegulle. Ashtu si përhapet agimi mbi malet, po vjen një popull i shumtë dhe i fuqishëm, të cilit kurrkush nuk i ka ngjarë më parë dhe as nuk do të ketë më kurrë për shumë breza që do të vijnë.",
"3": "<sup>3</sup> Para tij një zjarr po gllabëron dhe pas tij një flakë po djeg. Përpara tij vendi është si kopshti i Edenit; dhe pas tij është si një shkretëtirë e mjeruar; po, asgjë nuk i shpëton atij.",
"4": "<sup>4</sup> Pamja e tyre është si pamja e kuajve, dhe rendin si kuaj të shpejtë.",
"5": "<sup>5</sup> Ata hidhen mbi majat e maleve me zhurmë qerresh, si brambullima e flakës së zjarrit që djeg kallamishtet, si një popull i fortë që është rreshtuar për betejë.",
"6": "<sup>6</sup> Përpara tyre popujt përpëliten nga dhembja, çdo fytyrë zbehet.",
"7": "<sup>7</sup> Rendin si njerëz trima, ngjiten mbi muret si luftëtarë; secili ndjek rrugën e vet pa devijuar prej saj.",
"8": "<sup>8</sup> Askush nuk e shtyn fqinjin e tij, secili ndjek shtegun e vet; sulen në mes të shigjetave, por nuk plagosen.",
"9": "<sup>9</sup> I bien qytetit kryq e tërthor, rendin mbi muret, ngjiten në shtëpi, hyjnë në to nga dritaret si vjedhës.",
"10": "<sup>10</sup> Para tyre dridhet toka, dridhen qiejt, dielli dhe hëna erren dhe yjet humbin shkëlqimin e tyre.",
"11": "<sup>11</sup> Zoti bën që t'i dëgjohet zëri para ushtrisë së tij, sepse fusha e tij është shumë madhe dhe zbatuesi i fjalës së tij është i fuqishëm. Po, dita e Zotit është e madhe dhe fort e tmerrshme; kush mund ta durojë?",
"12": "<sup>12</sup> \"Prandaj tani\", thotë Zoti, \"kthehuni tek unë me gjithë zemrën tuaj, me agjërime, me lot dhe me vajtime\".",
"13": "<sup>13</sup> Grisni zemrën tuaj dhe jo rrobat tuaja dhe kthehuni tek Zoti, Perëndia juaj, sepse ai është i mëshirshëm dhe plot dhembshuri, i ngadalshëm në zemërim dhe me shumë dashamirësi, dhe pendohet për të keqen që ka dërguar.",
"14": "<sup>14</sup> Kush e di, ndoshta kthehet dhe pendohet, dhe lë pas tij një bekim, një ofertë ushqimesh dhe një libacion për Zotin, Perëndinë tuaj?",
"15": "<sup>15</sup> I bini borisë në Sion, shpallni një agjërim, thërrisni një kuvend solemn.",
"16": "<sup>16</sup> Mblidhni popullin, shenjtëroni kuvendin, mblidhni pleqtë, mblidhni fëmijët dhe foshnjat e gjirit. Të dalë dhëndri nga dhoma e tij dhe nusja nga dhoma e saj e nusërisë.",
"17": "<sup>17</sup> Midis portikut dhe altarit le të qajnë priftërinjtë, ministrat e Zotit, dhe le të thonë: \"Fale, o Zot, popullin tënd dhe mos ia lër trashëgiminë tënde turpit apo nënshtrimit nga ana e kombeve. Sepse do të thonë midis popujve: \"Ku është Perëndia i tyre?\"\".",
"18": "<sup>18</sup> Atëherë Zoti u bë xheloz për vendin e tij dhe i erdhi keq për popullin e tij.",
"19": "<sup>19</sup> Zoti do të përgjigjet dhe do t'i thotë popullit të tij: \"Ja, unë do t'ju dërgoj grurë, musht dhe vaj, do t'i keni me bollëk dhe nuk do të jeni më turpi i kombeve.",
"20": "<sup>20</sup> Do të largoj nga ju ushtrinë e veriut dhe do ta çoj në një tokë të zhuritur dhe të shkretë; pararojën e saj drejt detit lindor dhe praparojën e saj drejt detit perëndimor; era e keqe e saj do të përhapet, duhma e saj do të përhapet, sepse ka bërë gjëra të mëdha.",
"21": "<sup>21</sup> Mos kij frikë, o tokë, gëzohu, ngazëllohu, sepse Zoti ka bërë gjëra të mëdha.",
"22": "<sup>22</sup> Mos kini frikë, o kafshë të fushave, sepse tokat për kullotë do të gjelbërojnë, drurët mbajnë frytin e tyre, fiku dhe hardhia japin tërë bollëkun e tyre.",
"23": "<sup>23</sup> Prandaj gëzohuni, o bij të Sionit, dhe kënaquni, ngazëllohuni tek Zoti, Perëndia juaj, sepse ju ka dhënë shiun e parë me drejtësi, dhe do të bëjë të bjerë për ju shiun, shiun e parë dhe shiun e fundit në muajin e parë.",
"24": "<sup>24</sup> Lëmenjtë do të jenë plot me grurë dhe në butet do të grafullojë mushti dhe vaji;",
"25": "<sup>25</sup> kështu do t'ju kompensoj për të korrat që kanë ngrënë karkaleci, larva e karkalecit, bulkthi dhe krimbi, ushtria ime e madhe që kisha dërguar kundër jush.",
"26": "<sup>26</sup> Dhe ju do të hani me bollëk dhe do të ngopeni, dhe do të lëvdoni emrin e Zotit, Perëndisë tuaj, që për ju ka bërë mrekulli, dhe popullin tim nuk do ta mbulojë më turpi.",
"27": "<sup>27</sup> Atëherë ju do të pranoni që unë jam në mes të Izraelit dhe që jam Zoti, Perëndia juaj, dhe nuk ka asnjë tjetër; popullin tim nuk do ta mbulojë më turpi\".",
"28": "<sup>28</sup> \"Mbas kësaj do të ndodhë që unë do të përhap Frymën tim mbi çdo mish; bijtë tuaj dhe bijat tuaja do të profetizojnë, pleqtë tuaj do të shohin ëndrra, të rinjtë tuaj do të kenë vegime.",
"29": "<sup>29</sup> Në ato ditë do të përhap Frymën time edhe mbi shërbëtorët dhe shërbëtoret.",
"30": "<sup>30</sup> Do të bëj mrekulli në qiejt dhe mbi tokë: gjak, zjarr dhe shtëllunga tymi.",
"31": "<sup>31</sup> Dielli do të shndërrohet në terr dhe hëna në gjak, para se të vijë dita e madhe dhe e tmerrshme e Zotit.",
"32": "<sup>32</sup> Dhe do të ndodhë që kushdo që do t'i drejtohet emrit të Zotit do të shpëtojë, sepse në malin Sion dhe në Jeruzalem do të ketë shpëtim, siç e ka thënë Zoti, edhe për ata që kanë mbetur gjallë dhe që Zoti do t'i thërrasë\"."
},
"3": {
"1": "<sup>1</sup> \"Sepse ja, në ato ditë dhe në atë kohë, kur do të bëj që të kthehen nga robëria ata të Judës dhe të Jeruzalemit,",
"2": "<sup>2</sup> do të mbledh tërë kombet dhe do t'i bëj të zbresin në luginën e Jozafatit, dhe atje do të zbatoj gjykimin tim mbi ta, për Izraelin, popullin tim dhe trashëgiminë time, që e kanë shpërndarë midis kombeve, duke e ndarë kështu vendin tim.",
"3": "<sup>3</sup> Kanë hedhur shortin mbi popullin tim, kanë dhënë një djalë në këmbim të një prostitute dhe kanë shitur një vajzë në këmbim të verës, që të mund të pinin.",
"4": "<sup>4</sup> Përveç kësaj çfarë jeni ju për mua, Tiro dhe Sidoni, dhe ju, gjithë krahinat e Filistisë? Mos doni vallë të hakmerreni me mua për ndonjë gjë që kam bërë? Por në rast se hakmerreni, do të bëj që të bjerë shpejt dhe pa ngurrim mbi kokën tuaj ligësia që keni bërë.",
"5": "<sup>5</sup> Sepse ju keni marrë argjendin dhe arin tim dhe keni çuar në tempujt tuaj pjesën më të mirë të sendeve të mia të çmuara,",
"6": "<sup>6</sup> dhe ua keni shitur bijtë e Judës dhe të Jeruzalemit bijve të javanitëve, për t'i larguar nga vendi i tyre.",
"7": "<sup>7</sup> Ja, unë do t'i bëj të zgjohen nga vendi ku i keni shitur dhe do të bëj që të bjerë mbi kokën tuaj ajo që keni bërë.",
"8": "<sup>8</sup> Do t'i shes bijtë tuaj dhe bijat tuaja në duart e bijve të Judës, që do t'ua shesin sabenjve, një komb i largët, sepse Zoti ka folur\".",
"9": "<sup>9</sup> Shpallni këtë midis kombeve: \"Përgatitni luftën, zgjoni njerëzit trima, le të afrohen, le të dalin gjithë luftëtarët!",
"10": "<sup>10</sup> Farkëtoni shpata me ploret tuaj dhe ushta me drapërinjtë tuaj. I dobëti le të thotë: \"Jam i Fortë!\"\".",
"11": "<sup>11</sup> Nxitoni dhe ejani, kombe rreth e rrotull, dhe mblidhuni! O Zot, bëj që të zbresin atje njerëzit e tu trima!",
"12": "<sup>12</sup> \"Le të ngrihen dhe të dalin kombet në luginën e Jozafatit, sepse atje unë do të ulem për të gjykuar të gjitha kombet që janë përreth.",
"13": "<sup>13</sup> Merrni në dorë drapërinjtë, sepse të korrat janë gati. Ejani, zbrisni, sepse trokulla është plot, butet grafullojnë, sepse e madhe është ligësia e tyre\".",
"14": "<sup>14</sup> Turma pas turmash në Luginën e vendimit. Sepse dita e Zotit është e afërt, në Luginën e vendimit\".",
"15": "<sup>15</sup> Dielli dhe hëna po erren dhe yjet po humbin shkëlqimin e tyre.",
"16": "<sup>16</sup> Zoti do të vrumbullojë nga Sioni dhe do të bëjë që t'i dëgjohet zëri nga Jeruzalemi, aq sa qiejt dhe toka do të dridhen. Por Zoti do të jetë një strehë për popullin e tij dhe një fortesë për bijtë e Izraelit.",
"17": "<sup>17</sup> \"Atëherë ju do të pranoni që unë jam Zoti, Perëndia juaj, që banon në Sion, mali im i shenjtë. Kështu Jeruzalemi do të jetë i shenjtë dhe të huajt nuk do të kalojnë më andej\".",
"18": "<sup>18</sup> Atë ditë do të ndodhë që malet të pikojnë musht, qumështi do të rrjedhë nga kodrat dhe uji do të rrjedhë në të gjitha rrëketë e Judës. Nga shtëpia e Zotit do të dalë një burim, që do të ujitë luginën e Shitimit.",
"19": "<sup>19</sup> \"Egjipti do të bëhet shkreti dhe Edomi një shkretëtirë e mjeruar për shkak të dhunës kundër bijve të Judës, sepse kanë derdhur gjak të pafajshëm në vendin e tyre.",
"20": "<sup>20</sup> Por Juda do të mbetet përjetë, edhe Jeruzalemi brez pas brezi.",
"21": "<sup>21</sup> Do t'i pastroj nga gjaku i tyre i derdhur, nga i cili nuk i kisha pastruar, dhe Zoti do të banojë në Sion\"."
}
}
};
module.exports = book; | var book = {
"name": "Joeli",
"numChapters": 3,
"chapters": {
"1": { | random_line_split | |
simulationlf.py | from time import process_time
import numpy as np
from scipy.sparse import block_diag
from app.models.timestamp import Timestamp
import os
class SimulationLF:
def __init__(self, nb=1, tol=0.01, delta=Timestamp.QUARTERS):
"""
:param nb: for number of iterations to do for Monte Carlo
:param tol: the value to which the voltages have to converge
:param delta: a Timestamp value which can be: Timestamp.QUARTERS, Timestamp.HOURS, Timestamp.DAYS.
"""
if nb > 0:
self.__nb_iterations = nb
if tol >= 0:
self.__tolerance = tol
if delta in Timestamp:
self.__delta_time = delta
self.__q = self.calculate_Q()
def calculate_Q(self):
"""
This method will check the value of the simulation's delta_time and will set the Q index value for Monte Carlo
loop.
:return: q, which can be 96 if we use Quarters, 24 for Hours and 1 for Days.
"""
if self.__delta_time is Timestamp.QUARTERS:
q = 96
elif self.__delta_time is Timestamp.HOURS:
q = 24
else:
q = 1
return q
# GETTERS/ACCESSORS
def get_nb_iterations(self): return self.__nb_iterations
def get_tolerance(self): return self.__tolerance
def get_delta_time(self): return self.__delta_time
# SETTERS/MUTATORS
def set_nb_iterations(self, nb): self.__nb_iterations = nb
def set_tolerance(self, t): self.__tolerance = t
def set_delta_time(self, d): self.__delta_time = d
def grid_definition(self, network):
zeros = np.zeros
# As we are working with brackets that are containing a node and a branch, the number of brackets corresponds
# to the number of nodes
nb_brackets = network.get_nb_brackets()-1
# Boolean vector of nodes phases [ [1, 1, 1], [1, 1, 1], ...]
vec_phases = np.ones((1, 3 * nb_brackets))
vec_phases = vec_phases[0]
# Number of phases for each node/bracket
num_phases = zeros((1, nb_brackets+1))
num_phases = num_phases[0]
# Parent line impedances (intermediate step for Zbr construction
z = [0 for i in range(nb_brackets)]
# Power matrix (3 possible phases, number of nodes)
# p = zeros(3, nb_brackets)
# K = cell(nb_brackets, nb_brackets): This kind of Matlab variable can be translated into a nested list.
K = [[np.zeros((3, 3), int) for j in range(nb_brackets)]
for i in range(nb_brackets)]
# As we are going to use the list a certain number of times, we are assigning it to a variable for more
# efficiency
brackets = network.get_brackets()
vec_phases[0:3] = brackets[0].get_branch().get_phases()
num_phases[0] = np.sum(brackets[0].get_branch().get_phases())
for i in range(1, nb_brackets+1):
current_bracket = brackets[i-1]
vec_phases[3*(i-1):3*(i-1)+3] = current_bracket.get_branch().get_phases()
num_phases[i] = np.sum(current_bracket.get_branch().get_phases())
vec_phases_index = np.nonzero(vec_phases)[0]
# We won't work on the slack node because it is pointless, so we're only taking nodes starting with the second
brackets = brackets[1:]
for i in range(nb_brackets):
current_bracket = brackets[i]
# p = self.power_definition()
z[i] = (current_bracket.get_branch().calculate_impedance())
for j in range(nb_brackets):
# If there is a path, we change K[i,j,k,k] to -1
if i + 2 in network.find_path(0, j):
for k in range(3):
K[i][j][k][k] = -1
K = np.vstack([np.hstack(c) for c in K])
K = K[vec_phases_index][vec_phases_index]
# z = np.reshape(z, (19,1))
Zbr = block_diag(z).toarray()
Zbr = Zbr[vec_phases_index][vec_phases_index]
# Transforming all of our matrixes into real matrixes. At this point, they were just arrays.
K = np.mat(K)
Zbr = np.mat(Zbr)
# Zbr = Zbr[vec_phases_index[:], vec_phases_index[:]]
# np.resize(Zbr, (len(Zbr)*3, len(Zbr)*3))
# End of Grid_definition
return {
'K': K,
'Zbr': Zbr,
'vec_phases_index': vec_phases_index
}
# LOAD FLOW METHOD
def load_flow(self, network):
"""
This method will implement the Load Flow algorithm.
:param: network: the network on which we want to do the load flow.
:return: dic: A dictionnary containing every matrix/array involved in the load flow resolution.
"""
# main.m
alpha = 1
nb_brackets = network.get_nb_brackets()-1
# Battery settings
bat_node = 2
bat_phase = 2
bat = (bat_node-2)*3 + bat_phase
Ebat = 0
Ebat_max = 120000
Pbat = 60000
# End
# Grid_definition.m
grid = self.grid_definition(network)
K = grid['K']
Zbr = grid['Zbr']
vec_phases_index = grid['vec_phases_index']
# End of Grid_Definition
brackets = network.get_brackets()[1:]
network_nodes = [brackets[i].get_node() for i in range(nb_brackets)]
# load_flow.m
Ibus = np.zeros((3 * nb_brackets), dtype=np.complex128)
Ibus = Ibus[:, np.newaxis]
Vnl = network.get_slack_voltage()
Vnl = Vnl[vec_phases_index]
Vbus = Vnl
Vbr_prev = Vnl
# If we don't define Tmp as a N-Dim Array, the Tile function will broadcast it to a N-Dim Array of shape
# (1, 1, 57) instead of letting it be (57, 1, 1). This will result by producing a new matrix of shape
# (1, 570, 96). I guess that the tile function will perform some multiplication on the dimensions
# and then will join'em. If Vnl(57,1) & Newmat(10,96):
# Result = (1, 57*10, 96)... Which is not really what we want.
Tmp = (Vnl * 0)
Tmp = Tmp[:, np.newaxis]
V = np.tile(Tmp, (1,1,1))
I = np.tile(Tmp, (1,1,1))
# We don't use the Tmp matrix here because Vnl won't be broadcasted to a 3D matrix but to a 1D. So the bug
# that has been resolved earlier won't happen here
# Imean = np.tile(Vnl*0, (96))
# Vmean = np.tile(Vnl*0, (96))
powers = []
for node in network_nodes:
n_pow = []
for user in node.get_users():
n_pow.append(user.get_P())
powers.extend(n_pow)
"""
Here, we are assigning the NumPy functions we are going to use into the load flow loop to gain
a little bit more efficiency.
"""
# NumPy Functions
conj = np.conj
divide = np.divide
absolute = np.abs
less = np.less
zeros = np.zeros
# Here is the wrapping of the load flow:
# h = 0, nb iterations
# q = 0, 96
P = np.asarray(powers)
P = divide(P, 2)
Q = np.dot(P, np.array([0]))
# Initializing arrays to optimize
Ibr = zeros((nb_brackets, 1))
Vbr = zeros((nb_brackets, 1))
# Before we enter the loop, we make sure we are going to work with matrices instead of arrays.
Ibr = np.matrix(Ibr)
Vbr = np.matrix(Vbr)
# LOAD FLOW LOOP
k = 0
t = process_time()
while True:
k += 1
bal = 0
for i in range(len(P)):
if k == 1:
Ibus[i] = -(np.matrix(np.complex(P[i], Q[i])/Vbus[i]).conj())
else:
Ibus[i] = -(np.matrix(np.complex(P[i], Q[i]) / Vbus[i]).conj())
if i % 3 == bat:
bal = bal + P[i]
if bat != 0:
if bal < 0:
if Ebat < Ebat_max:
Ibus[bat] = min([conj(-Pbat/Vbus[bat]),
conj(bal/Vbus[bat]),
conj(-(Ebat_max - Ebat)/(Vbus[bat]*0.25))])
Ebat += absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.25
elif Ebat > 0:
Ibus[bat] = min([conj(Pbat/Vbus[bat]),
conj(bal/Vbus[bat]),
conj(Ebat/(Vbus[bat]*0.25))])
Ebat -= absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.
Ibr = K * Ibus
Vbr = Zbr * Ibr
if (less(divide(absolute(Vbr - Vbr_prev), absolute(Vbr + 0.0000000000000001)), self.__tolerance)).all():
break
Vbr = Vbr_prev + (alpha * (Vbr - Vbr_prev))
Vbr_prev = Vbr
Vbus = Vnl + np.dot(K.conj().T, Vbr)
Vbus = Vnl + np.dot(K.conj().T, Vbr)
V[:] = Vbus[:, :, np.newaxis]
I[:] = Ibr[:, :, np.newaxis]
Pbr = Qbr = np.array([[[0 for k in range(2)]for j in range(len(vec_phases_index))] for i in range(nb_brackets)])
for i in range(nb_brackets):
for j in range(len(vec_phases_index)):
i_to_j = self.powerflow(Vbus[i], Ibr[i])
j_to_i = self.powerflow(Vbus[i+1], Ibr[i])
Pbr[i][j][0] = i_to_j['active']
Pbr[i][j][1] = j_to_i['active']
Qbr[i][j][0] = i_to_j['reactive']
Qbr[i][j][1] = j_to_i['reactive']
print(np.shape(Pbr), Qbr.shape)
# END OF LOAD FLOW
# End of load_flow.m
print("Process executed in", process_time() - t, "s")
dic = {
'Ibus_bat': Ibus[bat],
'Ebat': Ebat,
'V': V,
'Vbr': Vbr,
'Vbus': Vbus,
'I': I,
'Ibus': Ibus,
'Ibr': Ibr,
'Zbr': Zbr,
'P': P,
'K': K,
'Vnl': Vnl,
'Pbr': Pbr,
'Qbr': Qbr
}
return dic
def powerflow(self, voltage, intensity, conj=np.conj, real=np.real, imag=np.imag): | 'reactive': imag(flow)
}
def printMenu(self, network):
np.set_printoptions(threshold=np.nan, suppress=True, precision=10)
# import re
while True:
# This block is relevant if we use a timestamp.
# It will check the user's input.
# If you uncomment the block, don't forget to uncomment "import re"
"""print("Which time do you want to simulate over? (hh:mm:ss) (q or Q to quit)")
try:
timestamp = str(input())
if timestamp is 'q' or timestamp is 'Q':
print("Goodbye!")
break
# The following is called a Regular Expression:
# It will check if a user input matches a given pattern.
# In this case, the pattern is:
# [0-2 digit][0-9 digit]:[0-5 digit][0-9 digit]:[0 digit][0 digit]
# (?!00:00:00) will ensure that the following pattern won't result in the string "00:00:00"
# (We use the format 01:00:00 -> 24:00:00 and not 00:00:00 -> 23:59:59)
# The first group ([0-1][0-9]|2[0-3]):
# It will check if we enter an hour between 00 and 19, or between 20 and 23.
# Then if we entered the first group, it will check if we enters mins and secs between 00 and 59.
# Finally, if we did not enter a matching hour (between 00 and 23), it will check if the expression
# matches the last group: 24:00:00.
if not re.compile("(?!00:00:00)((([0-1][0-9]|2[0-3]):[0-5][0-9]:00)|(24:00:00))") \
.match(timestamp):
print("Please enter a valid time : hh:mm:ss")
continue
except TypeError:
continue"""
print(" GELEC MAIN MENU")
print("Low Voltage Tool ------------------------- Debug Mode")
print("-----------------------------------------------------")
print("What would you want to do?")
print(" 0. Exit debug mode")
print(" 1. Print Network")
print(" 2. Print Voltages (V[i][j]([h][q]))")
print(" 3. Print Voltages at branches (Vbr)")
print(" 4. Print Voltages at nodes (Vbus)")
print(" 5. Print Intensities (I[i][j]([h][q]))")
print(" 6. Print Intensities at nodes (Ibus)")
print(" 7. Print Intensities at branches(Ibr)")
print(" 8. Print Power Flows on Branches")
print(" 9. Print Topology (K)")
print("10. Print Voltages at slack node (Vnl)")
print("11. Print Impendances on branches (Zbr)")
print("12. Print Intensity at battery node(Ibus[bat])")
print("13. Print Energy at battery(Ebat)")
try:
choice = int(input())
except ValueError:
continue
if choice == 0:
print("Goodbye!")
break
if choice == 1:
from anytree.render import RenderTree
root = network.get_slack_node()
tree = RenderTree(root)
for pre, _, node in tree:
print("%s%s" % (pre, "Node " + str(node.get_id())))
else:
print("calculating load flow...")
load_flow = self.load_flow(network)
if choice == 2:
voltages = load_flow['V']
mods = []
phases = []
print(voltages)
"""for el in voltages:
mods.append(np.absolute(el))
phases.append(np.angle(el))
for i, e in enumerate(voltages):
print("Node %s: %s e^%s" % ((i//3)+1,mods[i][0], phases[i][0]))"""
print(np.shape(voltages))
elif choice == 3:
Vbr = load_flow['Vbr']
mods = []
phases = []
for el in Vbr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vbr):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vbr))
elif choice == 4:
Vbus = load_flow['Vbus']
mods = []
phases = []
for el in Vbus:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vbus):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vbus))
elif choice == 5:
intensities = load_flow['I']
for i, e in enumerate(intensities):
print("Node", (i//3)+1, e)
print(np.shape(intensities))
elif choice == 6:
Ibus = load_flow['Ibus']
mods = []
phases = []
for el in Ibus:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Ibus):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Ibus))
elif choice == 7:
Ibr = load_flow['Ibr']
mods = []
phases = []
for el in Ibr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Ibr):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Ibr))
elif choice == 8:
Pbr = load_flow['Pbr']
Qbr = load_flow['Qbr']
print("Active powers:\n")
for i, e in enumerate(Pbr):
print("Branch between %s and %s: %s" %(i, i+1, e))
print("Reactive power:\n", Qbr)
elif choice == 9:
K = load_flow['K']
print(K)
print(np.shape(K))
elif choice == 10:
Vnl = load_flow['Vnl']
mods = []
phases = []
for el in Vnl:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vnl):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vnl))
elif choice == 11:
Zbr = load_flow['Zbr']
Zbr = Zbr[np.nonzero(Zbr)]
mods = []
phases = []
for el in Zbr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Zbr):
print("Modules\n: %s \n Phases:\n%s" % (mods[i][0], phases[i][0]))
print(np.shape(Zbr))
elif choice == 12:
Ibat = load_flow['Ibus_bat']
print(Ibat)
print(Ibat.shape)
elif choice == 13:
Ebat = load_flow['Ebat']
print("Ebat =", Ebat)
else:
print("Please enter a valid number (see Main Menu)")
if __name__ == '__main__':
from app.models.singleton import NetworkManager
from app.serialization.serialization import Serialization
sim = SimulationLF(tol=0.01)
manager = NetworkManager()
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Block comment to uncomment before packaging
"""while True:
print("Veuillez entrer le chemin du réseau à charger:")
path = str(input())
try:
path = os.path.normpath(path)
break
except Exception:
continue"""
dir = os.path.dirname(__file__)
path = os.path.normpath(os.path.join(dir, '..', '..', 'ressources', 'Network_Flobecq.xlsx'))
t0 = process_time()
network = Serialization.load_network_from_XLSX(path)
t1 = process_time()
print("Network read in", t1 - t0, "seconds")
manager.add_network(network)
sim.printMenu(network) | flow = voltage * conj(intensity)
return {
'active': real(flow), | random_line_split |
simulationlf.py | from time import process_time
import numpy as np
from scipy.sparse import block_diag
from app.models.timestamp import Timestamp
import os
class SimulationLF:
def __init__(self, nb=1, tol=0.01, delta=Timestamp.QUARTERS):
"""
:param nb: for number of iterations to do for Monte Carlo
:param tol: the value to which the voltages have to converge
:param delta: a Timestamp value which can be: Timestamp.QUARTERS, Timestamp.HOURS, Timestamp.DAYS.
"""
if nb > 0:
self.__nb_iterations = nb
if tol >= 0:
self.__tolerance = tol
if delta in Timestamp:
self.__delta_time = delta
self.__q = self.calculate_Q()
def calculate_Q(self):
"""
This method will check the value of the simulation's delta_time and will set the Q index value for Monte Carlo
loop.
:return: q, which can be 96 if we use Quarters, 24 for Hours and 1 for Days.
"""
if self.__delta_time is Timestamp.QUARTERS:
q = 96
elif self.__delta_time is Timestamp.HOURS:
q = 24
else:
q = 1
return q
# GETTERS/ACCESSORS
def get_nb_iterations(self): return self.__nb_iterations
def get_tolerance(self): |
def get_delta_time(self): return self.__delta_time
# SETTERS/MUTATORS
def set_nb_iterations(self, nb): self.__nb_iterations = nb
def set_tolerance(self, t): self.__tolerance = t
def set_delta_time(self, d): self.__delta_time = d
def grid_definition(self, network):
zeros = np.zeros
# As we are working with brackets that are containing a node and a branch, the number of brackets corresponds
# to the number of nodes
nb_brackets = network.get_nb_brackets()-1
# Boolean vector of nodes phases [ [1, 1, 1], [1, 1, 1], ...]
vec_phases = np.ones((1, 3 * nb_brackets))
vec_phases = vec_phases[0]
# Number of phases for each node/bracket
num_phases = zeros((1, nb_brackets+1))
num_phases = num_phases[0]
# Parent line impedances (intermediate step for Zbr construction
z = [0 for i in range(nb_brackets)]
# Power matrix (3 possible phases, number of nodes)
# p = zeros(3, nb_brackets)
# K = cell(nb_brackets, nb_brackets): This kind of Matlab variable can be translated into a nested list.
K = [[np.zeros((3, 3), int) for j in range(nb_brackets)]
for i in range(nb_brackets)]
# As we are going to use the list a certain number of times, we are assigning it to a variable for more
# efficiency
brackets = network.get_brackets()
vec_phases[0:3] = brackets[0].get_branch().get_phases()
num_phases[0] = np.sum(brackets[0].get_branch().get_phases())
for i in range(1, nb_brackets+1):
current_bracket = brackets[i-1]
vec_phases[3*(i-1):3*(i-1)+3] = current_bracket.get_branch().get_phases()
num_phases[i] = np.sum(current_bracket.get_branch().get_phases())
vec_phases_index = np.nonzero(vec_phases)[0]
# We won't work on the slack node because it is pointless, so we're only taking nodes starting with the second
brackets = brackets[1:]
for i in range(nb_brackets):
current_bracket = brackets[i]
# p = self.power_definition()
z[i] = (current_bracket.get_branch().calculate_impedance())
for j in range(nb_brackets):
# If there is a path, we change K[i,j,k,k] to -1
if i + 2 in network.find_path(0, j):
for k in range(3):
K[i][j][k][k] = -1
K = np.vstack([np.hstack(c) for c in K])
K = K[vec_phases_index][vec_phases_index]
# z = np.reshape(z, (19,1))
Zbr = block_diag(z).toarray()
Zbr = Zbr[vec_phases_index][vec_phases_index]
# Transforming all of our matrixes into real matrixes. At this point, they were just arrays.
K = np.mat(K)
Zbr = np.mat(Zbr)
# Zbr = Zbr[vec_phases_index[:], vec_phases_index[:]]
# np.resize(Zbr, (len(Zbr)*3, len(Zbr)*3))
# End of Grid_definition
return {
'K': K,
'Zbr': Zbr,
'vec_phases_index': vec_phases_index
}
# LOAD FLOW METHOD
def load_flow(self, network):
"""
This method will implement the Load Flow algorithm.
:param: network: the network on which we want to do the load flow.
:return: dic: A dictionnary containing every matrix/array involved in the load flow resolution.
"""
# main.m
alpha = 1
nb_brackets = network.get_nb_brackets()-1
# Battery settings
bat_node = 2
bat_phase = 2
bat = (bat_node-2)*3 + bat_phase
Ebat = 0
Ebat_max = 120000
Pbat = 60000
# End
# Grid_definition.m
grid = self.grid_definition(network)
K = grid['K']
Zbr = grid['Zbr']
vec_phases_index = grid['vec_phases_index']
# End of Grid_Definition
brackets = network.get_brackets()[1:]
network_nodes = [brackets[i].get_node() for i in range(nb_brackets)]
# load_flow.m
Ibus = np.zeros((3 * nb_brackets), dtype=np.complex128)
Ibus = Ibus[:, np.newaxis]
Vnl = network.get_slack_voltage()
Vnl = Vnl[vec_phases_index]
Vbus = Vnl
Vbr_prev = Vnl
# If we don't define Tmp as a N-Dim Array, the Tile function will broadcast it to a N-Dim Array of shape
# (1, 1, 57) instead of letting it be (57, 1, 1). This will result by producing a new matrix of shape
# (1, 570, 96). I guess that the tile function will perform some multiplication on the dimensions
# and then will join'em. If Vnl(57,1) & Newmat(10,96):
# Result = (1, 57*10, 96)... Which is not really what we want.
Tmp = (Vnl * 0)
Tmp = Tmp[:, np.newaxis]
V = np.tile(Tmp, (1,1,1))
I = np.tile(Tmp, (1,1,1))
# We don't use the Tmp matrix here because Vnl won't be broadcasted to a 3D matrix but to a 1D. So the bug
# that has been resolved earlier won't happen here
# Imean = np.tile(Vnl*0, (96))
# Vmean = np.tile(Vnl*0, (96))
powers = []
for node in network_nodes:
n_pow = []
for user in node.get_users():
n_pow.append(user.get_P())
powers.extend(n_pow)
"""
Here, we are assigning the NumPy functions we are going to use into the load flow loop to gain
a little bit more efficiency.
"""
# NumPy Functions
conj = np.conj
divide = np.divide
absolute = np.abs
less = np.less
zeros = np.zeros
# Here is the wrapping of the load flow:
# h = 0, nb iterations
# q = 0, 96
P = np.asarray(powers)
P = divide(P, 2)
Q = np.dot(P, np.array([0]))
# Initializing arrays to optimize
Ibr = zeros((nb_brackets, 1))
Vbr = zeros((nb_brackets, 1))
# Before we enter the loop, we make sure we are going to work with matrices instead of arrays.
Ibr = np.matrix(Ibr)
Vbr = np.matrix(Vbr)
# LOAD FLOW LOOP
k = 0
t = process_time()
while True:
k += 1
bal = 0
for i in range(len(P)):
if k == 1:
Ibus[i] = -(np.matrix(np.complex(P[i], Q[i])/Vbus[i]).conj())
else:
Ibus[i] = -(np.matrix(np.complex(P[i], Q[i]) / Vbus[i]).conj())
if i % 3 == bat:
bal = bal + P[i]
if bat != 0:
if bal < 0:
if Ebat < Ebat_max:
Ibus[bat] = min([conj(-Pbat/Vbus[bat]),
conj(bal/Vbus[bat]),
conj(-(Ebat_max - Ebat)/(Vbus[bat]*0.25))])
Ebat += absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.25
elif Ebat > 0:
Ibus[bat] = min([conj(Pbat/Vbus[bat]),
conj(bal/Vbus[bat]),
conj(Ebat/(Vbus[bat]*0.25))])
Ebat -= absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.
Ibr = K * Ibus
Vbr = Zbr * Ibr
if (less(divide(absolute(Vbr - Vbr_prev), absolute(Vbr + 0.0000000000000001)), self.__tolerance)).all():
break
Vbr = Vbr_prev + (alpha * (Vbr - Vbr_prev))
Vbr_prev = Vbr
Vbus = Vnl + np.dot(K.conj().T, Vbr)
Vbus = Vnl + np.dot(K.conj().T, Vbr)
V[:] = Vbus[:, :, np.newaxis]
I[:] = Ibr[:, :, np.newaxis]
Pbr = Qbr = np.array([[[0 for k in range(2)]for j in range(len(vec_phases_index))] for i in range(nb_brackets)])
for i in range(nb_brackets):
for j in range(len(vec_phases_index)):
i_to_j = self.powerflow(Vbus[i], Ibr[i])
j_to_i = self.powerflow(Vbus[i+1], Ibr[i])
Pbr[i][j][0] = i_to_j['active']
Pbr[i][j][1] = j_to_i['active']
Qbr[i][j][0] = i_to_j['reactive']
Qbr[i][j][1] = j_to_i['reactive']
print(np.shape(Pbr), Qbr.shape)
# END OF LOAD FLOW
# End of load_flow.m
print("Process executed in", process_time() - t, "s")
dic = {
'Ibus_bat': Ibus[bat],
'Ebat': Ebat,
'V': V,
'Vbr': Vbr,
'Vbus': Vbus,
'I': I,
'Ibus': Ibus,
'Ibr': Ibr,
'Zbr': Zbr,
'P': P,
'K': K,
'Vnl': Vnl,
'Pbr': Pbr,
'Qbr': Qbr
}
return dic
def powerflow(self, voltage, intensity, conj=np.conj, real=np.real, imag=np.imag):
flow = voltage * conj(intensity)
return {
'active': real(flow),
'reactive': imag(flow)
}
def printMenu(self, network):
np.set_printoptions(threshold=np.nan, suppress=True, precision=10)
# import re
while True:
# This block is relevant if we use a timestamp.
# It will check the user's input.
# If you uncomment the block, don't forget to uncomment "import re"
"""print("Which time do you want to simulate over? (hh:mm:ss) (q or Q to quit)")
try:
timestamp = str(input())
if timestamp is 'q' or timestamp is 'Q':
print("Goodbye!")
break
# The following is called a Regular Expression:
# It will check if a user input matches a given pattern.
# In this case, the pattern is:
# [0-2 digit][0-9 digit]:[0-5 digit][0-9 digit]:[0 digit][0 digit]
# (?!00:00:00) will ensure that the following pattern won't result in the string "00:00:00"
# (We use the format 01:00:00 -> 24:00:00 and not 00:00:00 -> 23:59:59)
# The first group ([0-1][0-9]|2[0-3]):
# It will check if we enter an hour between 00 and 19, or between 20 and 23.
# Then if we entered the first group, it will check if we enters mins and secs between 00 and 59.
# Finally, if we did not enter a matching hour (between 00 and 23), it will check if the expression
# matches the last group: 24:00:00.
if not re.compile("(?!00:00:00)((([0-1][0-9]|2[0-3]):[0-5][0-9]:00)|(24:00:00))") \
.match(timestamp):
print("Please enter a valid time : hh:mm:ss")
continue
except TypeError:
continue"""
print(" GELEC MAIN MENU")
print("Low Voltage Tool ------------------------- Debug Mode")
print("-----------------------------------------------------")
print("What would you want to do?")
print(" 0. Exit debug mode")
print(" 1. Print Network")
print(" 2. Print Voltages (V[i][j]([h][q]))")
print(" 3. Print Voltages at branches (Vbr)")
print(" 4. Print Voltages at nodes (Vbus)")
print(" 5. Print Intensities (I[i][j]([h][q]))")
print(" 6. Print Intensities at nodes (Ibus)")
print(" 7. Print Intensities at branches(Ibr)")
print(" 8. Print Power Flows on Branches")
print(" 9. Print Topology (K)")
print("10. Print Voltages at slack node (Vnl)")
print("11. Print Impendances on branches (Zbr)")
print("12. Print Intensity at battery node(Ibus[bat])")
print("13. Print Energy at battery(Ebat)")
try:
choice = int(input())
except ValueError:
continue
if choice == 0:
print("Goodbye!")
break
if choice == 1:
from anytree.render import RenderTree
root = network.get_slack_node()
tree = RenderTree(root)
for pre, _, node in tree:
print("%s%s" % (pre, "Node " + str(node.get_id())))
else:
print("calculating load flow...")
load_flow = self.load_flow(network)
if choice == 2:
voltages = load_flow['V']
mods = []
phases = []
print(voltages)
"""for el in voltages:
mods.append(np.absolute(el))
phases.append(np.angle(el))
for i, e in enumerate(voltages):
print("Node %s: %s e^%s" % ((i//3)+1,mods[i][0], phases[i][0]))"""
print(np.shape(voltages))
elif choice == 3:
Vbr = load_flow['Vbr']
mods = []
phases = []
for el in Vbr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vbr):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vbr))
elif choice == 4:
Vbus = load_flow['Vbus']
mods = []
phases = []
for el in Vbus:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vbus):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vbus))
elif choice == 5:
intensities = load_flow['I']
for i, e in enumerate(intensities):
print("Node", (i//3)+1, e)
print(np.shape(intensities))
elif choice == 6:
Ibus = load_flow['Ibus']
mods = []
phases = []
for el in Ibus:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Ibus):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Ibus))
elif choice == 7:
Ibr = load_flow['Ibr']
mods = []
phases = []
for el in Ibr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Ibr):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Ibr))
elif choice == 8:
Pbr = load_flow['Pbr']
Qbr = load_flow['Qbr']
print("Active powers:\n")
for i, e in enumerate(Pbr):
print("Branch between %s and %s: %s" %(i, i+1, e))
print("Reactive power:\n", Qbr)
elif choice == 9:
K = load_flow['K']
print(K)
print(np.shape(K))
elif choice == 10:
Vnl = load_flow['Vnl']
mods = []
phases = []
for el in Vnl:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vnl):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vnl))
elif choice == 11:
Zbr = load_flow['Zbr']
Zbr = Zbr[np.nonzero(Zbr)]
mods = []
phases = []
for el in Zbr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Zbr):
print("Modules\n: %s \n Phases:\n%s" % (mods[i][0], phases[i][0]))
print(np.shape(Zbr))
elif choice == 12:
Ibat = load_flow['Ibus_bat']
print(Ibat)
print(Ibat.shape)
elif choice == 13:
Ebat = load_flow['Ebat']
print("Ebat =", Ebat)
else:
print("Please enter a valid number (see Main Menu)")
if __name__ == '__main__':
from app.models.singleton import NetworkManager
from app.serialization.serialization import Serialization
sim = SimulationLF(tol=0.01)
manager = NetworkManager()
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Block comment to uncomment before packaging
"""while True:
print("Veuillez entrer le chemin du réseau à charger:")
path = str(input())
try:
path = os.path.normpath(path)
break
except Exception:
continue"""
dir = os.path.dirname(__file__)
path = os.path.normpath(os.path.join(dir, '..', '..', 'ressources', 'Network_Flobecq.xlsx'))
t0 = process_time()
network = Serialization.load_network_from_XLSX(path)
t1 = process_time()
print("Network read in", t1 - t0, "seconds")
manager.add_network(network)
sim.printMenu(network)
| return self.__tolerance | identifier_body |
simulationlf.py | from time import process_time
import numpy as np
from scipy.sparse import block_diag
from app.models.timestamp import Timestamp
import os
class SimulationLF:
def __init__(self, nb=1, tol=0.01, delta=Timestamp.QUARTERS):
"""
:param nb: for number of iterations to do for Monte Carlo
:param tol: the value to which the voltages have to converge
:param delta: a Timestamp value which can be: Timestamp.QUARTERS, Timestamp.HOURS, Timestamp.DAYS.
"""
if nb > 0:
self.__nb_iterations = nb
if tol >= 0:
self.__tolerance = tol
if delta in Timestamp:
self.__delta_time = delta
self.__q = self.calculate_Q()
def calculate_Q(self):
"""
This method will check the value of the simulation's delta_time and will set the Q index value for Monte Carlo
loop.
:return: q, which can be 96 if we use Quarters, 24 for Hours and 1 for Days.
"""
if self.__delta_time is Timestamp.QUARTERS:
q = 96
elif self.__delta_time is Timestamp.HOURS:
q = 24
else:
q = 1
return q
# GETTERS/ACCESSORS
def | (self): return self.__nb_iterations
def get_tolerance(self): return self.__tolerance
def get_delta_time(self): return self.__delta_time
# SETTERS/MUTATORS
def set_nb_iterations(self, nb): self.__nb_iterations = nb
def set_tolerance(self, t): self.__tolerance = t
def set_delta_time(self, d): self.__delta_time = d
def grid_definition(self, network):
zeros = np.zeros
# As we are working with brackets that are containing a node and a branch, the number of brackets corresponds
# to the number of nodes
nb_brackets = network.get_nb_brackets()-1
# Boolean vector of nodes phases [ [1, 1, 1], [1, 1, 1], ...]
vec_phases = np.ones((1, 3 * nb_brackets))
vec_phases = vec_phases[0]
# Number of phases for each node/bracket
num_phases = zeros((1, nb_brackets+1))
num_phases = num_phases[0]
# Parent line impedances (intermediate step for Zbr construction
z = [0 for i in range(nb_brackets)]
# Power matrix (3 possible phases, number of nodes)
# p = zeros(3, nb_brackets)
# K = cell(nb_brackets, nb_brackets): This kind of Matlab variable can be translated into a nested list.
K = [[np.zeros((3, 3), int) for j in range(nb_brackets)]
for i in range(nb_brackets)]
# As we are going to use the list a certain number of times, we are assigning it to a variable for more
# efficiency
brackets = network.get_brackets()
vec_phases[0:3] = brackets[0].get_branch().get_phases()
num_phases[0] = np.sum(brackets[0].get_branch().get_phases())
for i in range(1, nb_brackets+1):
current_bracket = brackets[i-1]
vec_phases[3*(i-1):3*(i-1)+3] = current_bracket.get_branch().get_phases()
num_phases[i] = np.sum(current_bracket.get_branch().get_phases())
vec_phases_index = np.nonzero(vec_phases)[0]
# We won't work on the slack node because it is pointless, so we're only taking nodes starting with the second
brackets = brackets[1:]
for i in range(nb_brackets):
current_bracket = brackets[i]
# p = self.power_definition()
z[i] = (current_bracket.get_branch().calculate_impedance())
for j in range(nb_brackets):
# If there is a path, we change K[i,j,k,k] to -1
if i + 2 in network.find_path(0, j):
for k in range(3):
K[i][j][k][k] = -1
K = np.vstack([np.hstack(c) for c in K])
K = K[vec_phases_index][vec_phases_index]
# z = np.reshape(z, (19,1))
Zbr = block_diag(z).toarray()
Zbr = Zbr[vec_phases_index][vec_phases_index]
# Transforming all of our matrixes into real matrixes. At this point, they were just arrays.
K = np.mat(K)
Zbr = np.mat(Zbr)
# Zbr = Zbr[vec_phases_index[:], vec_phases_index[:]]
# np.resize(Zbr, (len(Zbr)*3, len(Zbr)*3))
# End of Grid_definition
return {
'K': K,
'Zbr': Zbr,
'vec_phases_index': vec_phases_index
}
# LOAD FLOW METHOD
def load_flow(self, network):
"""
This method will implement the Load Flow algorithm.
:param: network: the network on which we want to do the load flow.
:return: dic: A dictionnary containing every matrix/array involved in the load flow resolution.
"""
# main.m
alpha = 1
nb_brackets = network.get_nb_brackets()-1
# Battery settings
bat_node = 2
bat_phase = 2
bat = (bat_node-2)*3 + bat_phase
Ebat = 0
Ebat_max = 120000
Pbat = 60000
# End
# Grid_definition.m
grid = self.grid_definition(network)
K = grid['K']
Zbr = grid['Zbr']
vec_phases_index = grid['vec_phases_index']
# End of Grid_Definition
brackets = network.get_brackets()[1:]
network_nodes = [brackets[i].get_node() for i in range(nb_brackets)]
# load_flow.m
Ibus = np.zeros((3 * nb_brackets), dtype=np.complex128)
Ibus = Ibus[:, np.newaxis]
Vnl = network.get_slack_voltage()
Vnl = Vnl[vec_phases_index]
Vbus = Vnl
Vbr_prev = Vnl
# If we don't define Tmp as a N-Dim Array, the Tile function will broadcast it to a N-Dim Array of shape
# (1, 1, 57) instead of letting it be (57, 1, 1). This will result by producing a new matrix of shape
# (1, 570, 96). I guess that the tile function will perform some multiplication on the dimensions
# and then will join'em. If Vnl(57,1) & Newmat(10,96):
# Result = (1, 57*10, 96)... Which is not really what we want.
Tmp = (Vnl * 0)
Tmp = Tmp[:, np.newaxis]
V = np.tile(Tmp, (1,1,1))
I = np.tile(Tmp, (1,1,1))
# We don't use the Tmp matrix here because Vnl won't be broadcasted to a 3D matrix but to a 1D. So the bug
# that has been resolved earlier won't happen here
# Imean = np.tile(Vnl*0, (96))
# Vmean = np.tile(Vnl*0, (96))
powers = []
for node in network_nodes:
n_pow = []
for user in node.get_users():
n_pow.append(user.get_P())
powers.extend(n_pow)
"""
Here, we are assigning the NumPy functions we are going to use into the load flow loop to gain
a little bit more efficiency.
"""
# NumPy Functions
conj = np.conj
divide = np.divide
absolute = np.abs
less = np.less
zeros = np.zeros
# Here is the wrapping of the load flow:
# h = 0, nb iterations
# q = 0, 96
P = np.asarray(powers)
P = divide(P, 2)
Q = np.dot(P, np.array([0]))
# Initializing arrays to optimize
Ibr = zeros((nb_brackets, 1))
Vbr = zeros((nb_brackets, 1))
# Before we enter the loop, we make sure we are going to work with matrices instead of arrays.
Ibr = np.matrix(Ibr)
Vbr = np.matrix(Vbr)
# LOAD FLOW LOOP
k = 0
t = process_time()
while True:
k += 1
bal = 0
for i in range(len(P)):
if k == 1:
Ibus[i] = -(np.matrix(np.complex(P[i], Q[i])/Vbus[i]).conj())
else:
Ibus[i] = -(np.matrix(np.complex(P[i], Q[i]) / Vbus[i]).conj())
if i % 3 == bat:
bal = bal + P[i]
if bat != 0:
if bal < 0:
if Ebat < Ebat_max:
Ibus[bat] = min([conj(-Pbat/Vbus[bat]),
conj(bal/Vbus[bat]),
conj(-(Ebat_max - Ebat)/(Vbus[bat]*0.25))])
Ebat += absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.25
elif Ebat > 0:
Ibus[bat] = min([conj(Pbat/Vbus[bat]),
conj(bal/Vbus[bat]),
conj(Ebat/(Vbus[bat]*0.25))])
Ebat -= absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.
Ibr = K * Ibus
Vbr = Zbr * Ibr
if (less(divide(absolute(Vbr - Vbr_prev), absolute(Vbr + 0.0000000000000001)), self.__tolerance)).all():
break
Vbr = Vbr_prev + (alpha * (Vbr - Vbr_prev))
Vbr_prev = Vbr
Vbus = Vnl + np.dot(K.conj().T, Vbr)
Vbus = Vnl + np.dot(K.conj().T, Vbr)
V[:] = Vbus[:, :, np.newaxis]
I[:] = Ibr[:, :, np.newaxis]
Pbr = Qbr = np.array([[[0 for k in range(2)]for j in range(len(vec_phases_index))] for i in range(nb_brackets)])
for i in range(nb_brackets):
for j in range(len(vec_phases_index)):
i_to_j = self.powerflow(Vbus[i], Ibr[i])
j_to_i = self.powerflow(Vbus[i+1], Ibr[i])
Pbr[i][j][0] = i_to_j['active']
Pbr[i][j][1] = j_to_i['active']
Qbr[i][j][0] = i_to_j['reactive']
Qbr[i][j][1] = j_to_i['reactive']
print(np.shape(Pbr), Qbr.shape)
# END OF LOAD FLOW
# End of load_flow.m
print("Process executed in", process_time() - t, "s")
dic = {
'Ibus_bat': Ibus[bat],
'Ebat': Ebat,
'V': V,
'Vbr': Vbr,
'Vbus': Vbus,
'I': I,
'Ibus': Ibus,
'Ibr': Ibr,
'Zbr': Zbr,
'P': P,
'K': K,
'Vnl': Vnl,
'Pbr': Pbr,
'Qbr': Qbr
}
return dic
def powerflow(self, voltage, intensity, conj=np.conj, real=np.real, imag=np.imag):
flow = voltage * conj(intensity)
return {
'active': real(flow),
'reactive': imag(flow)
}
def printMenu(self, network):
np.set_printoptions(threshold=np.nan, suppress=True, precision=10)
# import re
while True:
# This block is relevant if we use a timestamp.
# It will check the user's input.
# If you uncomment the block, don't forget to uncomment "import re"
"""print("Which time do you want to simulate over? (hh:mm:ss) (q or Q to quit)")
try:
timestamp = str(input())
if timestamp is 'q' or timestamp is 'Q':
print("Goodbye!")
break
# The following is called a Regular Expression:
# It will check if a user input matches a given pattern.
# In this case, the pattern is:
# [0-2 digit][0-9 digit]:[0-5 digit][0-9 digit]:[0 digit][0 digit]
# (?!00:00:00) will ensure that the following pattern won't result in the string "00:00:00"
# (We use the format 01:00:00 -> 24:00:00 and not 00:00:00 -> 23:59:59)
# The first group ([0-1][0-9]|2[0-3]):
# It will check if we enter an hour between 00 and 19, or between 20 and 23.
# Then if we entered the first group, it will check if we enters mins and secs between 00 and 59.
# Finally, if we did not enter a matching hour (between 00 and 23), it will check if the expression
# matches the last group: 24:00:00.
if not re.compile("(?!00:00:00)((([0-1][0-9]|2[0-3]):[0-5][0-9]:00)|(24:00:00))") \
.match(timestamp):
print("Please enter a valid time : hh:mm:ss")
continue
except TypeError:
continue"""
print(" GELEC MAIN MENU")
print("Low Voltage Tool ------------------------- Debug Mode")
print("-----------------------------------------------------")
print("What would you want to do?")
print(" 0. Exit debug mode")
print(" 1. Print Network")
print(" 2. Print Voltages (V[i][j]([h][q]))")
print(" 3. Print Voltages at branches (Vbr)")
print(" 4. Print Voltages at nodes (Vbus)")
print(" 5. Print Intensities (I[i][j]([h][q]))")
print(" 6. Print Intensities at nodes (Ibus)")
print(" 7. Print Intensities at branches(Ibr)")
print(" 8. Print Power Flows on Branches")
print(" 9. Print Topology (K)")
print("10. Print Voltages at slack node (Vnl)")
print("11. Print Impendances on branches (Zbr)")
print("12. Print Intensity at battery node(Ibus[bat])")
print("13. Print Energy at battery(Ebat)")
try:
choice = int(input())
except ValueError:
continue
if choice == 0:
print("Goodbye!")
break
if choice == 1:
from anytree.render import RenderTree
root = network.get_slack_node()
tree = RenderTree(root)
for pre, _, node in tree:
print("%s%s" % (pre, "Node " + str(node.get_id())))
else:
print("calculating load flow...")
load_flow = self.load_flow(network)
if choice == 2:
voltages = load_flow['V']
mods = []
phases = []
print(voltages)
"""for el in voltages:
mods.append(np.absolute(el))
phases.append(np.angle(el))
for i, e in enumerate(voltages):
print("Node %s: %s e^%s" % ((i//3)+1,mods[i][0], phases[i][0]))"""
print(np.shape(voltages))
elif choice == 3:
Vbr = load_flow['Vbr']
mods = []
phases = []
for el in Vbr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vbr):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vbr))
elif choice == 4:
Vbus = load_flow['Vbus']
mods = []
phases = []
for el in Vbus:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vbus):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vbus))
elif choice == 5:
intensities = load_flow['I']
for i, e in enumerate(intensities):
print("Node", (i//3)+1, e)
print(np.shape(intensities))
elif choice == 6:
Ibus = load_flow['Ibus']
mods = []
phases = []
for el in Ibus:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Ibus):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Ibus))
elif choice == 7:
Ibr = load_flow['Ibr']
mods = []
phases = []
for el in Ibr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Ibr):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Ibr))
elif choice == 8:
Pbr = load_flow['Pbr']
Qbr = load_flow['Qbr']
print("Active powers:\n")
for i, e in enumerate(Pbr):
print("Branch between %s and %s: %s" %(i, i+1, e))
print("Reactive power:\n", Qbr)
elif choice == 9:
K = load_flow['K']
print(K)
print(np.shape(K))
elif choice == 10:
Vnl = load_flow['Vnl']
mods = []
phases = []
for el in Vnl:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vnl):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vnl))
elif choice == 11:
Zbr = load_flow['Zbr']
Zbr = Zbr[np.nonzero(Zbr)]
mods = []
phases = []
for el in Zbr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Zbr):
print("Modules\n: %s \n Phases:\n%s" % (mods[i][0], phases[i][0]))
print(np.shape(Zbr))
elif choice == 12:
Ibat = load_flow['Ibus_bat']
print(Ibat)
print(Ibat.shape)
elif choice == 13:
Ebat = load_flow['Ebat']
print("Ebat =", Ebat)
else:
print("Please enter a valid number (see Main Menu)")
if __name__ == '__main__':
from app.models.singleton import NetworkManager
from app.serialization.serialization import Serialization
sim = SimulationLF(tol=0.01)
manager = NetworkManager()
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Block comment to uncomment before packaging
"""while True:
print("Veuillez entrer le chemin du réseau à charger:")
path = str(input())
try:
path = os.path.normpath(path)
break
except Exception:
continue"""
dir = os.path.dirname(__file__)
path = os.path.normpath(os.path.join(dir, '..', '..', 'ressources', 'Network_Flobecq.xlsx'))
t0 = process_time()
network = Serialization.load_network_from_XLSX(path)
t1 = process_time()
print("Network read in", t1 - t0, "seconds")
manager.add_network(network)
sim.printMenu(network)
| get_nb_iterations | identifier_name |
simulationlf.py | from time import process_time
import numpy as np
from scipy.sparse import block_diag
from app.models.timestamp import Timestamp
import os
class SimulationLF:
def __init__(self, nb=1, tol=0.01, delta=Timestamp.QUARTERS):
"""
:param nb: for number of iterations to do for Monte Carlo
:param tol: the value to which the voltages have to converge
:param delta: a Timestamp value which can be: Timestamp.QUARTERS, Timestamp.HOURS, Timestamp.DAYS.
"""
if nb > 0:
self.__nb_iterations = nb
if tol >= 0:
self.__tolerance = tol
if delta in Timestamp:
self.__delta_time = delta
self.__q = self.calculate_Q()
def calculate_Q(self):
"""
This method will check the value of the simulation's delta_time and will set the Q index value for Monte Carlo
loop.
:return: q, which can be 96 if we use Quarters, 24 for Hours and 1 for Days.
"""
if self.__delta_time is Timestamp.QUARTERS:
q = 96
elif self.__delta_time is Timestamp.HOURS:
q = 24
else:
q = 1
return q
# GETTERS/ACCESSORS
def get_nb_iterations(self): return self.__nb_iterations
def get_tolerance(self): return self.__tolerance
def get_delta_time(self): return self.__delta_time
# SETTERS/MUTATORS
def set_nb_iterations(self, nb): self.__nb_iterations = nb
def set_tolerance(self, t): self.__tolerance = t
def set_delta_time(self, d): self.__delta_time = d
def grid_definition(self, network):
zeros = np.zeros
# As we are working with brackets that are containing a node and a branch, the number of brackets corresponds
# to the number of nodes
nb_brackets = network.get_nb_brackets()-1
# Boolean vector of nodes phases [ [1, 1, 1], [1, 1, 1], ...]
vec_phases = np.ones((1, 3 * nb_brackets))
vec_phases = vec_phases[0]
# Number of phases for each node/bracket
num_phases = zeros((1, nb_brackets+1))
num_phases = num_phases[0]
# Parent line impedances (intermediate step for Zbr construction
z = [0 for i in range(nb_brackets)]
# Power matrix (3 possible phases, number of nodes)
# p = zeros(3, nb_brackets)
# K = cell(nb_brackets, nb_brackets): This kind of Matlab variable can be translated into a nested list.
K = [[np.zeros((3, 3), int) for j in range(nb_brackets)]
for i in range(nb_brackets)]
# As we are going to use the list a certain number of times, we are assigning it to a variable for more
# efficiency
brackets = network.get_brackets()
vec_phases[0:3] = brackets[0].get_branch().get_phases()
num_phases[0] = np.sum(brackets[0].get_branch().get_phases())
for i in range(1, nb_brackets+1):
current_bracket = brackets[i-1]
vec_phases[3*(i-1):3*(i-1)+3] = current_bracket.get_branch().get_phases()
num_phases[i] = np.sum(current_bracket.get_branch().get_phases())
vec_phases_index = np.nonzero(vec_phases)[0]
# We won't work on the slack node because it is pointless, so we're only taking nodes starting with the second
brackets = brackets[1:]
for i in range(nb_brackets):
current_bracket = brackets[i]
# p = self.power_definition()
z[i] = (current_bracket.get_branch().calculate_impedance())
for j in range(nb_brackets):
# If there is a path, we change K[i,j,k,k] to -1
if i + 2 in network.find_path(0, j):
for k in range(3):
K[i][j][k][k] = -1
K = np.vstack([np.hstack(c) for c in K])
K = K[vec_phases_index][vec_phases_index]
# z = np.reshape(z, (19,1))
Zbr = block_diag(z).toarray()
Zbr = Zbr[vec_phases_index][vec_phases_index]
# Transforming all of our matrixes into real matrixes. At this point, they were just arrays.
K = np.mat(K)
Zbr = np.mat(Zbr)
# Zbr = Zbr[vec_phases_index[:], vec_phases_index[:]]
# np.resize(Zbr, (len(Zbr)*3, len(Zbr)*3))
# End of Grid_definition
return {
'K': K,
'Zbr': Zbr,
'vec_phases_index': vec_phases_index
}
# LOAD FLOW METHOD
def load_flow(self, network):
"""
This method will implement the Load Flow algorithm.
:param: network: the network on which we want to do the load flow.
:return: dic: A dictionnary containing every matrix/array involved in the load flow resolution.
"""
# main.m
alpha = 1
nb_brackets = network.get_nb_brackets()-1
# Battery settings
bat_node = 2
bat_phase = 2
bat = (bat_node-2)*3 + bat_phase
Ebat = 0
Ebat_max = 120000
Pbat = 60000
# End
# Grid_definition.m
grid = self.grid_definition(network)
K = grid['K']
Zbr = grid['Zbr']
vec_phases_index = grid['vec_phases_index']
# End of Grid_Definition
brackets = network.get_brackets()[1:]
network_nodes = [brackets[i].get_node() for i in range(nb_brackets)]
# load_flow.m
Ibus = np.zeros((3 * nb_brackets), dtype=np.complex128)
Ibus = Ibus[:, np.newaxis]
Vnl = network.get_slack_voltage()
Vnl = Vnl[vec_phases_index]
Vbus = Vnl
Vbr_prev = Vnl
# If we don't define Tmp as a N-Dim Array, the Tile function will broadcast it to a N-Dim Array of shape
# (1, 1, 57) instead of letting it be (57, 1, 1). This will result by producing a new matrix of shape
# (1, 570, 96). I guess that the tile function will perform some multiplication on the dimensions
# and then will join'em. If Vnl(57,1) & Newmat(10,96):
# Result = (1, 57*10, 96)... Which is not really what we want.
Tmp = (Vnl * 0)
Tmp = Tmp[:, np.newaxis]
V = np.tile(Tmp, (1,1,1))
I = np.tile(Tmp, (1,1,1))
# We don't use the Tmp matrix here because Vnl won't be broadcasted to a 3D matrix but to a 1D. So the bug
# that has been resolved earlier won't happen here
# Imean = np.tile(Vnl*0, (96))
# Vmean = np.tile(Vnl*0, (96))
powers = []
for node in network_nodes:
n_pow = []
for user in node.get_users():
n_pow.append(user.get_P())
powers.extend(n_pow)
"""
Here, we are assigning the NumPy functions we are going to use into the load flow loop to gain
a little bit more efficiency.
"""
# NumPy Functions
conj = np.conj
divide = np.divide
absolute = np.abs
less = np.less
zeros = np.zeros
# Here is the wrapping of the load flow:
# h = 0, nb iterations
# q = 0, 96
P = np.asarray(powers)
P = divide(P, 2)
Q = np.dot(P, np.array([0]))
# Initializing arrays to optimize
Ibr = zeros((nb_brackets, 1))
Vbr = zeros((nb_brackets, 1))
# Before we enter the loop, we make sure we are going to work with matrices instead of arrays.
Ibr = np.matrix(Ibr)
Vbr = np.matrix(Vbr)
# LOAD FLOW LOOP
k = 0
t = process_time()
while True:
|
Vbus = Vnl + np.dot(K.conj().T, Vbr)
V[:] = Vbus[:, :, np.newaxis]
I[:] = Ibr[:, :, np.newaxis]
Pbr = Qbr = np.array([[[0 for k in range(2)]for j in range(len(vec_phases_index))] for i in range(nb_brackets)])
for i in range(nb_brackets):
for j in range(len(vec_phases_index)):
i_to_j = self.powerflow(Vbus[i], Ibr[i])
j_to_i = self.powerflow(Vbus[i+1], Ibr[i])
Pbr[i][j][0] = i_to_j['active']
Pbr[i][j][1] = j_to_i['active']
Qbr[i][j][0] = i_to_j['reactive']
Qbr[i][j][1] = j_to_i['reactive']
print(np.shape(Pbr), Qbr.shape)
# END OF LOAD FLOW
# End of load_flow.m
print("Process executed in", process_time() - t, "s")
dic = {
'Ibus_bat': Ibus[bat],
'Ebat': Ebat,
'V': V,
'Vbr': Vbr,
'Vbus': Vbus,
'I': I,
'Ibus': Ibus,
'Ibr': Ibr,
'Zbr': Zbr,
'P': P,
'K': K,
'Vnl': Vnl,
'Pbr': Pbr,
'Qbr': Qbr
}
return dic
def powerflow(self, voltage, intensity, conj=np.conj, real=np.real, imag=np.imag):
flow = voltage * conj(intensity)
return {
'active': real(flow),
'reactive': imag(flow)
}
def printMenu(self, network):
np.set_printoptions(threshold=np.nan, suppress=True, precision=10)
# import re
while True:
# This block is relevant if we use a timestamp.
# It will check the user's input.
# If you uncomment the block, don't forget to uncomment "import re"
"""print("Which time do you want to simulate over? (hh:mm:ss) (q or Q to quit)")
try:
timestamp = str(input())
if timestamp is 'q' or timestamp is 'Q':
print("Goodbye!")
break
# The following is called a Regular Expression:
# It will check if a user input matches a given pattern.
# In this case, the pattern is:
# [0-2 digit][0-9 digit]:[0-5 digit][0-9 digit]:[0 digit][0 digit]
# (?!00:00:00) will ensure that the following pattern won't result in the string "00:00:00"
# (We use the format 01:00:00 -> 24:00:00 and not 00:00:00 -> 23:59:59)
# The first group ([0-1][0-9]|2[0-3]):
# It will check if we enter an hour between 00 and 19, or between 20 and 23.
# Then if we entered the first group, it will check if we enters mins and secs between 00 and 59.
# Finally, if we did not enter a matching hour (between 00 and 23), it will check if the expression
# matches the last group: 24:00:00.
if not re.compile("(?!00:00:00)((([0-1][0-9]|2[0-3]):[0-5][0-9]:00)|(24:00:00))") \
.match(timestamp):
print("Please enter a valid time : hh:mm:ss")
continue
except TypeError:
continue"""
print(" GELEC MAIN MENU")
print("Low Voltage Tool ------------------------- Debug Mode")
print("-----------------------------------------------------")
print("What would you want to do?")
print(" 0. Exit debug mode")
print(" 1. Print Network")
print(" 2. Print Voltages (V[i][j]([h][q]))")
print(" 3. Print Voltages at branches (Vbr)")
print(" 4. Print Voltages at nodes (Vbus)")
print(" 5. Print Intensities (I[i][j]([h][q]))")
print(" 6. Print Intensities at nodes (Ibus)")
print(" 7. Print Intensities at branches(Ibr)")
print(" 8. Print Power Flows on Branches")
print(" 9. Print Topology (K)")
print("10. Print Voltages at slack node (Vnl)")
print("11. Print Impendances on branches (Zbr)")
print("12. Print Intensity at battery node(Ibus[bat])")
print("13. Print Energy at battery(Ebat)")
try:
choice = int(input())
except ValueError:
continue
if choice == 0:
print("Goodbye!")
break
if choice == 1:
from anytree.render import RenderTree
root = network.get_slack_node()
tree = RenderTree(root)
for pre, _, node in tree:
print("%s%s" % (pre, "Node " + str(node.get_id())))
else:
print("calculating load flow...")
load_flow = self.load_flow(network)
if choice == 2:
voltages = load_flow['V']
mods = []
phases = []
print(voltages)
"""for el in voltages:
mods.append(np.absolute(el))
phases.append(np.angle(el))
for i, e in enumerate(voltages):
print("Node %s: %s e^%s" % ((i//3)+1,mods[i][0], phases[i][0]))"""
print(np.shape(voltages))
elif choice == 3:
Vbr = load_flow['Vbr']
mods = []
phases = []
for el in Vbr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vbr):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vbr))
elif choice == 4:
Vbus = load_flow['Vbus']
mods = []
phases = []
for el in Vbus:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vbus):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vbus))
elif choice == 5:
intensities = load_flow['I']
for i, e in enumerate(intensities):
print("Node", (i//3)+1, e)
print(np.shape(intensities))
elif choice == 6:
Ibus = load_flow['Ibus']
mods = []
phases = []
for el in Ibus:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Ibus):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Ibus))
elif choice == 7:
Ibr = load_flow['Ibr']
mods = []
phases = []
for el in Ibr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Ibr):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Ibr))
elif choice == 8:
Pbr = load_flow['Pbr']
Qbr = load_flow['Qbr']
print("Active powers:\n")
for i, e in enumerate(Pbr):
print("Branch between %s and %s: %s" %(i, i+1, e))
print("Reactive power:\n", Qbr)
elif choice == 9:
K = load_flow['K']
print(K)
print(np.shape(K))
elif choice == 10:
Vnl = load_flow['Vnl']
mods = []
phases = []
for el in Vnl:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Vnl):
print("Node %s: %s e^%s" % ((i // 3) + 1, mods[i][0], phases[i][0]))
print(np.shape(Vnl))
elif choice == 11:
Zbr = load_flow['Zbr']
Zbr = Zbr[np.nonzero(Zbr)]
mods = []
phases = []
for el in Zbr:
mods.append(np.abs(el))
phases.append(np.angle(el))
for i, e in enumerate(Zbr):
print("Modules\n: %s \n Phases:\n%s" % (mods[i][0], phases[i][0]))
print(np.shape(Zbr))
elif choice == 12:
Ibat = load_flow['Ibus_bat']
print(Ibat)
print(Ibat.shape)
elif choice == 13:
Ebat = load_flow['Ebat']
print("Ebat =", Ebat)
else:
print("Please enter a valid number (see Main Menu)")
if __name__ == '__main__':
from app.models.singleton import NetworkManager
from app.serialization.serialization import Serialization
sim = SimulationLF(tol=0.01)
manager = NetworkManager()
import warnings
with warnings.catch_warnings():
warnings.simplefilter("ignore")
# Block comment to uncomment before packaging
"""while True:
print("Veuillez entrer le chemin du réseau à charger:")
path = str(input())
try:
path = os.path.normpath(path)
break
except Exception:
continue"""
dir = os.path.dirname(__file__)
path = os.path.normpath(os.path.join(dir, '..', '..', 'ressources', 'Network_Flobecq.xlsx'))
t0 = process_time()
network = Serialization.load_network_from_XLSX(path)
t1 = process_time()
print("Network read in", t1 - t0, "seconds")
manager.add_network(network)
sim.printMenu(network)
| k += 1
bal = 0
for i in range(len(P)):
if k == 1:
Ibus[i] = -(np.matrix(np.complex(P[i], Q[i])/Vbus[i]).conj())
else:
Ibus[i] = -(np.matrix(np.complex(P[i], Q[i]) / Vbus[i]).conj())
if i % 3 == bat:
bal = bal + P[i]
if bat != 0:
if bal < 0:
if Ebat < Ebat_max:
Ibus[bat] = min([conj(-Pbat/Vbus[bat]),
conj(bal/Vbus[bat]),
conj(-(Ebat_max - Ebat)/(Vbus[bat]*0.25))])
Ebat += absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.25
elif Ebat > 0:
Ibus[bat] = min([conj(Pbat/Vbus[bat]),
conj(bal/Vbus[bat]),
conj(Ebat/(Vbus[bat]*0.25))])
Ebat -= absolute(np.dot(Ibus[bat], Vbus[bat])) * 0.
Ibr = K * Ibus
Vbr = Zbr * Ibr
if (less(divide(absolute(Vbr - Vbr_prev), absolute(Vbr + 0.0000000000000001)), self.__tolerance)).all():
break
Vbr = Vbr_prev + (alpha * (Vbr - Vbr_prev))
Vbr_prev = Vbr
Vbus = Vnl + np.dot(K.conj().T, Vbr) | conditional_block |
mortgage_pandas.py | # Derived from https://github.com/fschlimb/scale-out-benchs
import numpy as np
import pandas as pd
from pymapd import connect
from pandas.api.types import CategoricalDtype
from io import StringIO
from glob import glob
import os
import time
import pathlib
import sys
import argparse
def run_pd_workflow(quarter=1, year=2000, perf_file="", **kwargs):
t1 = time.time()
names = pd_load_names()
year_string = str(year) + "Q" + str(quarter) + ".txt"
acq_file = os.path.join(data_directory, "acq", "Acquisition_" + year_string)
print("READING DATAFILE", acq_file)
acq_pdf = pd_load_acquisition_csv(acq_file)
print("READING DATAFILE", perf_file)
perf_df_tmp = pd_load_performance_csv(perf_file)
print("read time", (time.time() - t1) * 1000)
t1 = time.time()
acq_pdf = acq_pdf.merge(names, how='left', on=['seller_name'])
acq_pdf.drop(columns=['seller_name'], inplace=True)
acq_pdf['seller_name'] = acq_pdf['new']
acq_pdf.drop(columns=['new'], inplace=True)
pdf = perf_df_tmp
everdf = create_ever_features(pdf)
delinq_merge = create_delinq_features(pdf)
everdf = join_ever_delinq_features(everdf, delinq_merge)
del(delinq_merge)
joined_df = create_joined_df(pdf, everdf)
testdf = create_12_mon_features(joined_df)
joined_df = combine_joined_12_mon(joined_df, testdf)
del(testdf)
perf_df = final_performance_delinquency(pdf, joined_df)
del(pdf, joined_df)
final_pdf = join_perf_acq_pdfs(perf_df, acq_pdf)
del(perf_df)
del(acq_pdf)
print("compute time", (time.time() - t1) * 1000)
final_pdf = last_mile_cleaning(final_pdf)
exec_time = (time.time() - t1) * 1000
print("compute time with copy to host", exec_time)
return final_pdf, exec_time
def pd_load_performance_csv(performance_path, **kwargs):
""" Loads performance data
Returns
-------
PD DataFrame
"""
cols = [
"loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb",
"loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity",
"maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code",
"zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after",
"disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs",
"asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds",
"credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds",
"non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag",
"foreclosure_principal_write_off_amount", "servicing_activity_indicator"
]
dtypes = {
"loan_id": np.int64,
"monthly_reporting_period": str,
"servicer": str,
"interest_rate": np.float64,
"current_actual_upb": np.float64,
"loan_age": np.float64,
"remaining_months_to_legal_maturity": np.float64,
"adj_remaining_months_to_maturity": np.float64,
"maturity_date": str,
"msa": np.float64,
"current_loan_delinquency_status": np.int32,
"mod_flag": CategoricalDtype(['N', 'Y']),
"zero_balance_code": CategoricalDtype(['01', '02', '06', '09', '03', '15', '16']),
"zero_balance_effective_date": str,
"last_paid_installment_date": str,
"foreclosed_after": str,
"disposition_date": str,
"foreclosure_costs": np.float64,
"prop_preservation_and_repair_costs": np.float64,
"asset_recovery_costs": np.float64,
"misc_holding_expenses": np.float64,
"holding_taxes": np.float64,
"net_sale_proceeds": np.float64,
"credit_enhancement_proceeds": np.float64,
"repurchase_make_whole_proceeds": np.float64,
"other_foreclosure_proceeds": np.float64,
"non_interest_bearing_upb": np.float64,
"principal_forgiveness_upb": np.float64,
"repurchase_make_whole_proceeds_flag": CategoricalDtype(['N', 'Y']),
"foreclosure_principal_write_off_amount": np.float64,
"servicing_activity_indicator": CategoricalDtype(['N', 'Y']),
}
return pd.read_csv(performance_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[1,8,13,14,15,16])
def pd_load_acquisition_csv(acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
PD DataFrame
"""
columns = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator', 'year_quarter'
]
dtypes = {
'loan_id': np.int64,
'orig_channel': CategoricalDtype(['B', 'C', 'R']),
'seller_name': str,
'orig_interest_rate': np.float64,
'orig_upb': np.int64,
'orig_loan_term': np.int64,
'orig_date': str,
'first_pay_date': str,
'orig_ltv': np.float64,
'orig_cltv': np.float64,
'num_borrowers': np.float64,
'dti': np.float64,
'borrower_credit_score': np.float64,
'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']),
'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']),
'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']),
'num_units': np.int64,
'occupancy_status': CategoricalDtype(['I', 'P', 'S']),
'property_state': CategoricalDtype(
['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI',
'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN',
'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH',
'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI',
'VT', 'WA', 'WI', 'WV', 'WY']),
'zip': np.int64,
'mortgage_insurance_percent': np.float64,
'product_type': CategoricalDtype(['FRM']),
'coborrow_credit_score': np.float64,
'mortgage_insurance_type': np.float64,
'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']),
'year_quarter': np.int64
}
a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True)
return a
def pd_load_names(**kwargs):
""" Loads names used for renaming the banks
Returns
-------
PD DataFrame
"""
cols = [
'seller_name', 'new'
]
dtypes = {'seller_name':str, 'new':str}
return pd.read_csv(os.path.join(data_directory, "names.csv"), names=cols, delimiter='|', dtype=dtypes)
def create_ever_features(pdf, **kwargs):
everdf = pdf[['loan_id', 'current_loan_delinquency_status']]
everdf = everdf.groupby('loan_id').max()
del(pdf)
everdf['ever_30'] = (everdf['current_loan_delinquency_status'] >= 1).astype('int8')
everdf['ever_90'] = (everdf['current_loan_delinquency_status'] >= 3).astype('int8')
everdf['ever_180'] = (everdf['current_loan_delinquency_status'] >= 6).astype('int8')
everdf.drop(columns=['current_loan_delinquency_status'], inplace=True)
return everdf
def create_delinq_features(pdf, **kwargs):
delinq_pdf = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']]
del(pdf)
delinq_30 = delinq_pdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_30['delinquency_30'] = delinq_30['monthly_reporting_period']
delinq_30.drop(columns=['monthly_reporting_period'], inplace=True)
delinq_90 = delinq_pdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_90['delinquency_90'] = delinq_90['monthly_reporting_period']
delinq_90.drop(columns=['monthly_reporting_period'], inplace=True)
delinq_180 = delinq_pdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_180['delinquency_180'] = delinq_180['monthly_reporting_period']
delinq_180.drop(columns=['monthly_reporting_period'], inplace=True)
del(delinq_pdf)
delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id'])
delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
delinq_merge = delinq_merge.merge(delinq_180, how='left', on=['loan_id'])
delinq_merge['delinquency_180'] = delinq_merge['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
del(delinq_30)
del(delinq_90)
del(delinq_180)
return delinq_merge
def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs):
everdf = everdf_tmp.merge(delinq_merge, on=['loan_id'], how='left')
del(everdf_tmp)
del(delinq_merge)
everdf['delinquency_30'] = everdf['delinquency_30'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_90'] = everdf['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_180'] = everdf['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
return everdf
def create_joined_df(pdf, everdf, **kwargs):
test = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status', 'current_actual_upb']]
del(pdf)
test['timestamp'] = test['monthly_reporting_period']
test.drop(columns=['monthly_reporting_period'], inplace=True)
test['timestamp_month'] = test['timestamp'].dt.month
test['timestamp_year'] = test['timestamp'].dt.year
test['delinquency_12'] = test['current_loan_delinquency_status']
test.drop(columns=['current_loan_delinquency_status'], inplace=True)
test['upb_12'] = test['current_actual_upb']
test.drop(columns=['current_actual_upb'], inplace=True)
test['upb_12'] = test['upb_12'].fillna(999999999)
test['delinquency_12'] = test['delinquency_12'].fillna(-1)
joined_df = test.merge(everdf, how='left', on=['loan_id'])
del(everdf)
del(test)
joined_df['ever_30'] = joined_df['ever_30'].fillna(-1)
joined_df['ever_90'] = joined_df['ever_90'].fillna(-1)
joined_df['ever_180'] = joined_df['ever_180'].fillna(-1)
joined_df['delinquency_30'] = joined_df['delinquency_30'].fillna(-1)
joined_df['delinquency_90'] = joined_df['delinquency_90'].fillna(-1)
joined_df['delinquency_180'] = joined_df['delinquency_180'].fillna(-1)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int32')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int32')
return joined_df
def create_12_mon_features(joined_df, **kwargs):
testdfs = []
n_months = 12
for y in range(1, n_months + 1):
tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']]
tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month']
tmpdf['josh_mody_n'] = np.floor((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12)
tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], as_index=False).agg({'delinquency_12': 'max','upb_12': 'min'})
tmpdf['delinquency_12'] = (tmpdf['delinquency_12']>3).astype('int32')
tmpdf['delinquency_12'] +=(tmpdf['upb_12']==0).astype('int32')
tmpdf['timestamp_year'] = np.floor(((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).astype('int16')
tmpdf['timestamp_month'] = np.int8(y)
tmpdf.drop(columns=['josh_mody_n'], inplace=True)
testdfs.append(tmpdf)
del(tmpdf)
del(joined_df)
return pd.concat(testdfs)
def combine_joined_12_mon(joined_df, testdf, **kwargs):
joined_df.drop(columns=['delinquency_12', 'upb_12'], inplace=True)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int16')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int8')
return joined_df.merge(testdf, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'])
def final_performance_delinquency(merged, joined_df, **kwargs):
merged['timestamp_month'] = merged['monthly_reporting_period'].dt.month
merged['timestamp_month'] = merged['timestamp_month'].astype('int8')
merged['timestamp_year'] = merged['monthly_reporting_period'].dt.year
merged['timestamp_year'] = merged['timestamp_year'].astype('int16')
merged = merged.merge(joined_df, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'])
merged.drop(columns=['timestamp_year'], inplace=True)
merged.drop(columns=['timestamp_month'], inplace=True)
return merged
def join_perf_acq_pdfs(perf, acq, **kwargs):
return perf.merge(acq, how='left', on=['loan_id'])
def last_mile_cleaning(df, **kwargs):
#for col, dtype in df.dtypes.iteritems():
# if str(dtype)=='category':
# df[col] = df[col].cat.codes
df['delinquency_12'] = df['delinquency_12'] > 0
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
return df #.to_arrow(index=False)
# Load database reporting functions
pathToReportDir = os.path.join(pathlib.Path(__file__).parent, "..", "report")
print(pathToReportDir)
sys.path.insert(1, pathToReportDir)
import report
parser = argparse.ArgumentParser(description='Run Mortgage benchmark using pandas')
parser.add_argument('-r', default="report_pandas.csv", help="Report file name.")
parser.add_argument('-df', default=1, type=int, help="Number of datafiles (quarters) to input into database for processing.")
parser.add_argument('-dp', required=True, help="Path to root of mortgage datafiles directory (contains names.csv).")
parser.add_argument('-i', dest="iterations", default=5, type=int, help="Number of iterations to run every benchmark. Best result is selected.")
parser.add_argument("-db-server", default="localhost", help="Host name of MySQL server")
parser.add_argument("-db-port", default=3306, type=int, help="Port number of MySQL server")
parser.add_argument("-db-user", default="", help="Username to use to connect to MySQL database. If user name is specified, script attempts to store results in MySQL database using other -db-* parameters.")
parser.add_argument("-db-pass", default="omniscidb", help="Password to use to connect to MySQL database")
parser.add_argument("-db-name", default="omniscidb", help="MySQL database to use to store benchmark results")
parser.add_argument("-db-table", help="Table to use to store results for this benchmark.")
parser.add_argument("-commit", default="1234567890123456789012345678901234567890", help="Commit hash to use to record this benchmark results")
args = parser.parse_args()
if args.df <= 0:
print("Bad number of data files specified", args.df)
sys.exit(1)
if args.iterations < 1:
print("Bad number of iterations specified", args.t)
db_reporter = None
if args.db_user is not "":
print("Connecting to database")
db = mysql.connector.connect(host=args.db_server, port=args.db_port, user=args.db_user, passwd=args.db_pass, db=args.db_name);
db_reporter = report.DbReport(db, args.db_table, {
'FilesNumber': 'INT UNSIGNED NOT NULL',
'FragmentSize': 'BIGINT UNSIGNED NOT NULL',
'BenchName': 'VARCHAR(500) NOT NULL',
'BestExecTimeMS': 'BIGINT UNSIGNED',
'BestTotalTimeMS': 'BIGINT UNSIGNED',
'WorstExecTimeMS': 'BIGINT UNSIGNED',
'WorstTotalTimeMS': 'BIGINT UNSIGNED',
'AverageExecTimeMS': 'BIGINT UNSIGNED',
'AverageTotalTimeMS': 'BIGINT UNSIGNED'
}, {
'ScriptName': 'mortgage_pandas.py',
'CommitHash': args.commit
})
data_directory = args.dp
benchName = "mortgage_pandas"
perf_data_path = os.path.join(data_directory, "perf")
perf_format_path = os.path.join(perf_data_path, "Performance_%sQ%s.txt")
bestExecTime = float("inf")
bestTotalTime = float("inf")
worstExecTime = 0
worstTotalTime = 0
avgExecTime = 0
avgTotalTime = 0
for iii in range(1, args.iterations + 1):
|
avgExecTime /= args.iterations
avgTotalTime /= args.iterations
try:
with open(args.r, "w") as report:
print("BENCHMARK", benchName, "EXEC TIME", bestExecTime, "TOTAL TIME", bestTotalTime)
print("datafiles,fragment_size,query,query_exec_min,query_total_min,query_exec_max,query_total_max,query_exec_avg,query_total_avg,query_error_info", file=report, flush=True)
print(dataFilesNumber, ",",
0, ",",
benchName, ",",
bestExecTime, ",",
bestTotalTime, ",",
worstExecTime, ",",
worstTotalTime, ",",
avgExecTime, ",",
avgTotalTime, ",",
"", '\n', file=report, sep='', end='', flush=True)
if db_reporter is not None:
db_reporter.submit({
'FilesNumber': dataFilesNumber,
'FragmentSize': 0,
'BenchName': benchName,
'BestExecTimeMS': bestExecTime,
'BestTotalTimeMS': bestTotalTime,
'WorstExecTimeMS': worstExecTime,
'WorstTotalTimeMS': worstTotalTime,
'AverageExecTimeMS': avgExecTime,
'AverageTotalTimeMS': avgTotalTime})
except IOError as err:
print("Failed writing report file", args.r, err)
| dataFilesNumber = 0
time_ETL = time.time()
exec_time_total = 0
print("RUNNING BENCHMARK NUMBER", benchName, "ITERATION NUMBER", iii)
for quarter in range(0, args.df):
year = 2000 + quarter // 4
perf_file = perf_format_path % (str(year), str(quarter % 4 + 1))
files = [f for f in pathlib.Path(perf_data_path).iterdir() if f.match('Performance_%sQ%s.txt*' % (str(year), str(quarter % 4 + 1)))]
for f in files:
dataframe, exec_time = run_pd_workflow(year = year, quarter = (quarter % 4 + 1), perf_file = str(f))
exec_time_total += exec_time
dataFilesNumber += 1
time_ETL_end = time.time()
ttt = (time_ETL_end - time_ETL) * 1000
print("ITERATION", iii, "EXEC TIME: ", exec_time_total, "TOTAL TIME: ", ttt)
if bestExecTime > exec_time_total:
bestExecTime = exec_time_total
if worstExecTime < exec_time_total:
worstExecTime = exec_time_total
avgExecTime += exec_time_total
if bestTotalTime > ttt:
bestTotalTime = ttt
if worstTotalTime < ttt:
bestTotalTime = ttt
avgTotalTime += ttt | conditional_block |
mortgage_pandas.py | # Derived from https://github.com/fschlimb/scale-out-benchs
import numpy as np
import pandas as pd
from pymapd import connect
from pandas.api.types import CategoricalDtype
from io import StringIO
from glob import glob
import os
import time
import pathlib
import sys
import argparse
def run_pd_workflow(quarter=1, year=2000, perf_file="", **kwargs):
t1 = time.time()
names = pd_load_names()
year_string = str(year) + "Q" + str(quarter) + ".txt"
acq_file = os.path.join(data_directory, "acq", "Acquisition_" + year_string)
print("READING DATAFILE", acq_file)
acq_pdf = pd_load_acquisition_csv(acq_file)
print("READING DATAFILE", perf_file)
perf_df_tmp = pd_load_performance_csv(perf_file)
print("read time", (time.time() - t1) * 1000)
t1 = time.time()
acq_pdf = acq_pdf.merge(names, how='left', on=['seller_name'])
acq_pdf.drop(columns=['seller_name'], inplace=True)
acq_pdf['seller_name'] = acq_pdf['new']
acq_pdf.drop(columns=['new'], inplace=True)
pdf = perf_df_tmp
everdf = create_ever_features(pdf)
delinq_merge = create_delinq_features(pdf)
everdf = join_ever_delinq_features(everdf, delinq_merge)
del(delinq_merge)
joined_df = create_joined_df(pdf, everdf)
testdf = create_12_mon_features(joined_df)
joined_df = combine_joined_12_mon(joined_df, testdf)
del(testdf)
perf_df = final_performance_delinquency(pdf, joined_df)
del(pdf, joined_df)
final_pdf = join_perf_acq_pdfs(perf_df, acq_pdf)
del(perf_df)
del(acq_pdf)
print("compute time", (time.time() - t1) * 1000)
final_pdf = last_mile_cleaning(final_pdf)
exec_time = (time.time() - t1) * 1000
print("compute time with copy to host", exec_time)
return final_pdf, exec_time
def pd_load_performance_csv(performance_path, **kwargs):
""" Loads performance data
Returns
-------
PD DataFrame
"""
cols = [
"loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb",
"loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity",
"maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code",
"zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after",
"disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs",
"asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds",
"credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds",
"non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag",
"foreclosure_principal_write_off_amount", "servicing_activity_indicator"
]
dtypes = {
"loan_id": np.int64,
"monthly_reporting_period": str,
"servicer": str,
"interest_rate": np.float64,
"current_actual_upb": np.float64,
"loan_age": np.float64,
"remaining_months_to_legal_maturity": np.float64,
"adj_remaining_months_to_maturity": np.float64,
"maturity_date": str,
"msa": np.float64,
"current_loan_delinquency_status": np.int32,
"mod_flag": CategoricalDtype(['N', 'Y']),
"zero_balance_code": CategoricalDtype(['01', '02', '06', '09', '03', '15', '16']),
"zero_balance_effective_date": str,
"last_paid_installment_date": str,
"foreclosed_after": str,
"disposition_date": str,
"foreclosure_costs": np.float64,
"prop_preservation_and_repair_costs": np.float64,
"asset_recovery_costs": np.float64,
"misc_holding_expenses": np.float64,
"holding_taxes": np.float64,
"net_sale_proceeds": np.float64,
"credit_enhancement_proceeds": np.float64,
"repurchase_make_whole_proceeds": np.float64,
"other_foreclosure_proceeds": np.float64,
"non_interest_bearing_upb": np.float64,
"principal_forgiveness_upb": np.float64,
"repurchase_make_whole_proceeds_flag": CategoricalDtype(['N', 'Y']),
"foreclosure_principal_write_off_amount": np.float64,
"servicing_activity_indicator": CategoricalDtype(['N', 'Y']),
}
return pd.read_csv(performance_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[1,8,13,14,15,16])
def pd_load_acquisition_csv(acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
PD DataFrame
"""
columns = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator', 'year_quarter'
]
dtypes = {
'loan_id': np.int64,
'orig_channel': CategoricalDtype(['B', 'C', 'R']),
'seller_name': str,
'orig_interest_rate': np.float64,
'orig_upb': np.int64,
'orig_loan_term': np.int64,
'orig_date': str,
'first_pay_date': str,
'orig_ltv': np.float64,
'orig_cltv': np.float64,
'num_borrowers': np.float64,
'dti': np.float64,
'borrower_credit_score': np.float64,
'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']),
'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']),
'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']),
'num_units': np.int64,
'occupancy_status': CategoricalDtype(['I', 'P', 'S']),
'property_state': CategoricalDtype(
['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI',
'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN',
'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH',
'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI',
'VT', 'WA', 'WI', 'WV', 'WY']),
'zip': np.int64,
'mortgage_insurance_percent': np.float64,
'product_type': CategoricalDtype(['FRM']),
'coborrow_credit_score': np.float64,
'mortgage_insurance_type': np.float64,
'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']),
'year_quarter': np.int64
}
a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True)
return a
def pd_load_names(**kwargs):
""" Loads names used for renaming the banks
Returns
-------
PD DataFrame
"""
cols = [
'seller_name', 'new'
]
dtypes = {'seller_name':str, 'new':str}
return pd.read_csv(os.path.join(data_directory, "names.csv"), names=cols, delimiter='|', dtype=dtypes)
def create_ever_features(pdf, **kwargs):
everdf = pdf[['loan_id', 'current_loan_delinquency_status']]
everdf = everdf.groupby('loan_id').max()
del(pdf)
everdf['ever_30'] = (everdf['current_loan_delinquency_status'] >= 1).astype('int8')
everdf['ever_90'] = (everdf['current_loan_delinquency_status'] >= 3).astype('int8')
everdf['ever_180'] = (everdf['current_loan_delinquency_status'] >= 6).astype('int8')
everdf.drop(columns=['current_loan_delinquency_status'], inplace=True)
return everdf
def create_delinq_features(pdf, **kwargs):
delinq_pdf = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']]
del(pdf)
delinq_30 = delinq_pdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_30['delinquency_30'] = delinq_30['monthly_reporting_period']
delinq_30.drop(columns=['monthly_reporting_period'], inplace=True)
delinq_90 = delinq_pdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_90['delinquency_90'] = delinq_90['monthly_reporting_period']
delinq_90.drop(columns=['monthly_reporting_period'], inplace=True)
delinq_180 = delinq_pdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_180['delinquency_180'] = delinq_180['monthly_reporting_period']
delinq_180.drop(columns=['monthly_reporting_period'], inplace=True)
del(delinq_pdf)
delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id'])
delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
delinq_merge = delinq_merge.merge(delinq_180, how='left', on=['loan_id'])
delinq_merge['delinquency_180'] = delinq_merge['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
del(delinq_30)
del(delinq_90)
del(delinq_180)
return delinq_merge
def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs):
everdf = everdf_tmp.merge(delinq_merge, on=['loan_id'], how='left')
del(everdf_tmp)
del(delinq_merge)
everdf['delinquency_30'] = everdf['delinquency_30'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_90'] = everdf['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_180'] = everdf['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
return everdf
def create_joined_df(pdf, everdf, **kwargs):
test = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status', 'current_actual_upb']]
del(pdf)
test['timestamp'] = test['monthly_reporting_period']
test.drop(columns=['monthly_reporting_period'], inplace=True)
test['timestamp_month'] = test['timestamp'].dt.month
test['timestamp_year'] = test['timestamp'].dt.year
test['delinquency_12'] = test['current_loan_delinquency_status']
test.drop(columns=['current_loan_delinquency_status'], inplace=True)
test['upb_12'] = test['current_actual_upb']
test.drop(columns=['current_actual_upb'], inplace=True)
test['upb_12'] = test['upb_12'].fillna(999999999)
test['delinquency_12'] = test['delinquency_12'].fillna(-1)
joined_df = test.merge(everdf, how='left', on=['loan_id'])
del(everdf)
del(test)
joined_df['ever_30'] = joined_df['ever_30'].fillna(-1)
joined_df['ever_90'] = joined_df['ever_90'].fillna(-1)
joined_df['ever_180'] = joined_df['ever_180'].fillna(-1)
joined_df['delinquency_30'] = joined_df['delinquency_30'].fillna(-1)
joined_df['delinquency_90'] = joined_df['delinquency_90'].fillna(-1)
joined_df['delinquency_180'] = joined_df['delinquency_180'].fillna(-1)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int32')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int32')
return joined_df
def create_12_mon_features(joined_df, **kwargs):
testdfs = []
n_months = 12
for y in range(1, n_months + 1):
tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']]
tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month']
tmpdf['josh_mody_n'] = np.floor((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12)
tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], as_index=False).agg({'delinquency_12': 'max','upb_12': 'min'})
tmpdf['delinquency_12'] = (tmpdf['delinquency_12']>3).astype('int32')
tmpdf['delinquency_12'] +=(tmpdf['upb_12']==0).astype('int32')
tmpdf['timestamp_year'] = np.floor(((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).astype('int16')
tmpdf['timestamp_month'] = np.int8(y)
tmpdf.drop(columns=['josh_mody_n'], inplace=True)
testdfs.append(tmpdf)
del(tmpdf)
del(joined_df)
return pd.concat(testdfs)
def combine_joined_12_mon(joined_df, testdf, **kwargs):
joined_df.drop(columns=['delinquency_12', 'upb_12'], inplace=True)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int16')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int8')
return joined_df.merge(testdf, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'])
def final_performance_delinquency(merged, joined_df, **kwargs):
|
def join_perf_acq_pdfs(perf, acq, **kwargs):
return perf.merge(acq, how='left', on=['loan_id'])
def last_mile_cleaning(df, **kwargs):
#for col, dtype in df.dtypes.iteritems():
# if str(dtype)=='category':
# df[col] = df[col].cat.codes
df['delinquency_12'] = df['delinquency_12'] > 0
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
return df #.to_arrow(index=False)
# Load database reporting functions
pathToReportDir = os.path.join(pathlib.Path(__file__).parent, "..", "report")
print(pathToReportDir)
sys.path.insert(1, pathToReportDir)
import report
parser = argparse.ArgumentParser(description='Run Mortgage benchmark using pandas')
parser.add_argument('-r', default="report_pandas.csv", help="Report file name.")
parser.add_argument('-df', default=1, type=int, help="Number of datafiles (quarters) to input into database for processing.")
parser.add_argument('-dp', required=True, help="Path to root of mortgage datafiles directory (contains names.csv).")
parser.add_argument('-i', dest="iterations", default=5, type=int, help="Number of iterations to run every benchmark. Best result is selected.")
parser.add_argument("-db-server", default="localhost", help="Host name of MySQL server")
parser.add_argument("-db-port", default=3306, type=int, help="Port number of MySQL server")
parser.add_argument("-db-user", default="", help="Username to use to connect to MySQL database. If user name is specified, script attempts to store results in MySQL database using other -db-* parameters.")
parser.add_argument("-db-pass", default="omniscidb", help="Password to use to connect to MySQL database")
parser.add_argument("-db-name", default="omniscidb", help="MySQL database to use to store benchmark results")
parser.add_argument("-db-table", help="Table to use to store results for this benchmark.")
parser.add_argument("-commit", default="1234567890123456789012345678901234567890", help="Commit hash to use to record this benchmark results")
args = parser.parse_args()
if args.df <= 0:
print("Bad number of data files specified", args.df)
sys.exit(1)
if args.iterations < 1:
print("Bad number of iterations specified", args.t)
db_reporter = None
if args.db_user is not "":
print("Connecting to database")
db = mysql.connector.connect(host=args.db_server, port=args.db_port, user=args.db_user, passwd=args.db_pass, db=args.db_name);
db_reporter = report.DbReport(db, args.db_table, {
'FilesNumber': 'INT UNSIGNED NOT NULL',
'FragmentSize': 'BIGINT UNSIGNED NOT NULL',
'BenchName': 'VARCHAR(500) NOT NULL',
'BestExecTimeMS': 'BIGINT UNSIGNED',
'BestTotalTimeMS': 'BIGINT UNSIGNED',
'WorstExecTimeMS': 'BIGINT UNSIGNED',
'WorstTotalTimeMS': 'BIGINT UNSIGNED',
'AverageExecTimeMS': 'BIGINT UNSIGNED',
'AverageTotalTimeMS': 'BIGINT UNSIGNED'
}, {
'ScriptName': 'mortgage_pandas.py',
'CommitHash': args.commit
})
data_directory = args.dp
benchName = "mortgage_pandas"
perf_data_path = os.path.join(data_directory, "perf")
perf_format_path = os.path.join(perf_data_path, "Performance_%sQ%s.txt")
bestExecTime = float("inf")
bestTotalTime = float("inf")
worstExecTime = 0
worstTotalTime = 0
avgExecTime = 0
avgTotalTime = 0
for iii in range(1, args.iterations + 1):
dataFilesNumber = 0
time_ETL = time.time()
exec_time_total = 0
print("RUNNING BENCHMARK NUMBER", benchName, "ITERATION NUMBER", iii)
for quarter in range(0, args.df):
year = 2000 + quarter // 4
perf_file = perf_format_path % (str(year), str(quarter % 4 + 1))
files = [f for f in pathlib.Path(perf_data_path).iterdir() if f.match('Performance_%sQ%s.txt*' % (str(year), str(quarter % 4 + 1)))]
for f in files:
dataframe, exec_time = run_pd_workflow(year = year, quarter = (quarter % 4 + 1), perf_file = str(f))
exec_time_total += exec_time
dataFilesNumber += 1
time_ETL_end = time.time()
ttt = (time_ETL_end - time_ETL) * 1000
print("ITERATION", iii, "EXEC TIME: ", exec_time_total, "TOTAL TIME: ", ttt)
if bestExecTime > exec_time_total:
bestExecTime = exec_time_total
if worstExecTime < exec_time_total:
worstExecTime = exec_time_total
avgExecTime += exec_time_total
if bestTotalTime > ttt:
bestTotalTime = ttt
if worstTotalTime < ttt:
bestTotalTime = ttt
avgTotalTime += ttt
avgExecTime /= args.iterations
avgTotalTime /= args.iterations
try:
with open(args.r, "w") as report:
print("BENCHMARK", benchName, "EXEC TIME", bestExecTime, "TOTAL TIME", bestTotalTime)
print("datafiles,fragment_size,query,query_exec_min,query_total_min,query_exec_max,query_total_max,query_exec_avg,query_total_avg,query_error_info", file=report, flush=True)
print(dataFilesNumber, ",",
0, ",",
benchName, ",",
bestExecTime, ",",
bestTotalTime, ",",
worstExecTime, ",",
worstTotalTime, ",",
avgExecTime, ",",
avgTotalTime, ",",
"", '\n', file=report, sep='', end='', flush=True)
if db_reporter is not None:
db_reporter.submit({
'FilesNumber': dataFilesNumber,
'FragmentSize': 0,
'BenchName': benchName,
'BestExecTimeMS': bestExecTime,
'BestTotalTimeMS': bestTotalTime,
'WorstExecTimeMS': worstExecTime,
'WorstTotalTimeMS': worstTotalTime,
'AverageExecTimeMS': avgExecTime,
'AverageTotalTimeMS': avgTotalTime})
except IOError as err:
print("Failed writing report file", args.r, err)
| merged['timestamp_month'] = merged['monthly_reporting_period'].dt.month
merged['timestamp_month'] = merged['timestamp_month'].astype('int8')
merged['timestamp_year'] = merged['monthly_reporting_period'].dt.year
merged['timestamp_year'] = merged['timestamp_year'].astype('int16')
merged = merged.merge(joined_df, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'])
merged.drop(columns=['timestamp_year'], inplace=True)
merged.drop(columns=['timestamp_month'], inplace=True)
return merged | identifier_body |
mortgage_pandas.py | # Derived from https://github.com/fschlimb/scale-out-benchs
import numpy as np
import pandas as pd
from pymapd import connect | import pathlib
import sys
import argparse
def run_pd_workflow(quarter=1, year=2000, perf_file="", **kwargs):
t1 = time.time()
names = pd_load_names()
year_string = str(year) + "Q" + str(quarter) + ".txt"
acq_file = os.path.join(data_directory, "acq", "Acquisition_" + year_string)
print("READING DATAFILE", acq_file)
acq_pdf = pd_load_acquisition_csv(acq_file)
print("READING DATAFILE", perf_file)
perf_df_tmp = pd_load_performance_csv(perf_file)
print("read time", (time.time() - t1) * 1000)
t1 = time.time()
acq_pdf = acq_pdf.merge(names, how='left', on=['seller_name'])
acq_pdf.drop(columns=['seller_name'], inplace=True)
acq_pdf['seller_name'] = acq_pdf['new']
acq_pdf.drop(columns=['new'], inplace=True)
pdf = perf_df_tmp
everdf = create_ever_features(pdf)
delinq_merge = create_delinq_features(pdf)
everdf = join_ever_delinq_features(everdf, delinq_merge)
del(delinq_merge)
joined_df = create_joined_df(pdf, everdf)
testdf = create_12_mon_features(joined_df)
joined_df = combine_joined_12_mon(joined_df, testdf)
del(testdf)
perf_df = final_performance_delinquency(pdf, joined_df)
del(pdf, joined_df)
final_pdf = join_perf_acq_pdfs(perf_df, acq_pdf)
del(perf_df)
del(acq_pdf)
print("compute time", (time.time() - t1) * 1000)
final_pdf = last_mile_cleaning(final_pdf)
exec_time = (time.time() - t1) * 1000
print("compute time with copy to host", exec_time)
return final_pdf, exec_time
def pd_load_performance_csv(performance_path, **kwargs):
""" Loads performance data
Returns
-------
PD DataFrame
"""
cols = [
"loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb",
"loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity",
"maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code",
"zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after",
"disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs",
"asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds",
"credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds",
"non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag",
"foreclosure_principal_write_off_amount", "servicing_activity_indicator"
]
dtypes = {
"loan_id": np.int64,
"monthly_reporting_period": str,
"servicer": str,
"interest_rate": np.float64,
"current_actual_upb": np.float64,
"loan_age": np.float64,
"remaining_months_to_legal_maturity": np.float64,
"adj_remaining_months_to_maturity": np.float64,
"maturity_date": str,
"msa": np.float64,
"current_loan_delinquency_status": np.int32,
"mod_flag": CategoricalDtype(['N', 'Y']),
"zero_balance_code": CategoricalDtype(['01', '02', '06', '09', '03', '15', '16']),
"zero_balance_effective_date": str,
"last_paid_installment_date": str,
"foreclosed_after": str,
"disposition_date": str,
"foreclosure_costs": np.float64,
"prop_preservation_and_repair_costs": np.float64,
"asset_recovery_costs": np.float64,
"misc_holding_expenses": np.float64,
"holding_taxes": np.float64,
"net_sale_proceeds": np.float64,
"credit_enhancement_proceeds": np.float64,
"repurchase_make_whole_proceeds": np.float64,
"other_foreclosure_proceeds": np.float64,
"non_interest_bearing_upb": np.float64,
"principal_forgiveness_upb": np.float64,
"repurchase_make_whole_proceeds_flag": CategoricalDtype(['N', 'Y']),
"foreclosure_principal_write_off_amount": np.float64,
"servicing_activity_indicator": CategoricalDtype(['N', 'Y']),
}
return pd.read_csv(performance_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[1,8,13,14,15,16])
def pd_load_acquisition_csv(acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
PD DataFrame
"""
columns = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator', 'year_quarter'
]
dtypes = {
'loan_id': np.int64,
'orig_channel': CategoricalDtype(['B', 'C', 'R']),
'seller_name': str,
'orig_interest_rate': np.float64,
'orig_upb': np.int64,
'orig_loan_term': np.int64,
'orig_date': str,
'first_pay_date': str,
'orig_ltv': np.float64,
'orig_cltv': np.float64,
'num_borrowers': np.float64,
'dti': np.float64,
'borrower_credit_score': np.float64,
'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']),
'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']),
'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']),
'num_units': np.int64,
'occupancy_status': CategoricalDtype(['I', 'P', 'S']),
'property_state': CategoricalDtype(
['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI',
'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN',
'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH',
'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI',
'VT', 'WA', 'WI', 'WV', 'WY']),
'zip': np.int64,
'mortgage_insurance_percent': np.float64,
'product_type': CategoricalDtype(['FRM']),
'coborrow_credit_score': np.float64,
'mortgage_insurance_type': np.float64,
'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']),
'year_quarter': np.int64
}
a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True)
return a
def pd_load_names(**kwargs):
""" Loads names used for renaming the banks
Returns
-------
PD DataFrame
"""
cols = [
'seller_name', 'new'
]
dtypes = {'seller_name':str, 'new':str}
return pd.read_csv(os.path.join(data_directory, "names.csv"), names=cols, delimiter='|', dtype=dtypes)
def create_ever_features(pdf, **kwargs):
everdf = pdf[['loan_id', 'current_loan_delinquency_status']]
everdf = everdf.groupby('loan_id').max()
del(pdf)
everdf['ever_30'] = (everdf['current_loan_delinquency_status'] >= 1).astype('int8')
everdf['ever_90'] = (everdf['current_loan_delinquency_status'] >= 3).astype('int8')
everdf['ever_180'] = (everdf['current_loan_delinquency_status'] >= 6).astype('int8')
everdf.drop(columns=['current_loan_delinquency_status'], inplace=True)
return everdf
def create_delinq_features(pdf, **kwargs):
delinq_pdf = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']]
del(pdf)
delinq_30 = delinq_pdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_30['delinquency_30'] = delinq_30['monthly_reporting_period']
delinq_30.drop(columns=['monthly_reporting_period'], inplace=True)
delinq_90 = delinq_pdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_90['delinquency_90'] = delinq_90['monthly_reporting_period']
delinq_90.drop(columns=['monthly_reporting_period'], inplace=True)
delinq_180 = delinq_pdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_180['delinquency_180'] = delinq_180['monthly_reporting_period']
delinq_180.drop(columns=['monthly_reporting_period'], inplace=True)
del(delinq_pdf)
delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id'])
delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
delinq_merge = delinq_merge.merge(delinq_180, how='left', on=['loan_id'])
delinq_merge['delinquency_180'] = delinq_merge['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
del(delinq_30)
del(delinq_90)
del(delinq_180)
return delinq_merge
def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs):
everdf = everdf_tmp.merge(delinq_merge, on=['loan_id'], how='left')
del(everdf_tmp)
del(delinq_merge)
everdf['delinquency_30'] = everdf['delinquency_30'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_90'] = everdf['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_180'] = everdf['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
return everdf
def create_joined_df(pdf, everdf, **kwargs):
test = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status', 'current_actual_upb']]
del(pdf)
test['timestamp'] = test['monthly_reporting_period']
test.drop(columns=['monthly_reporting_period'], inplace=True)
test['timestamp_month'] = test['timestamp'].dt.month
test['timestamp_year'] = test['timestamp'].dt.year
test['delinquency_12'] = test['current_loan_delinquency_status']
test.drop(columns=['current_loan_delinquency_status'], inplace=True)
test['upb_12'] = test['current_actual_upb']
test.drop(columns=['current_actual_upb'], inplace=True)
test['upb_12'] = test['upb_12'].fillna(999999999)
test['delinquency_12'] = test['delinquency_12'].fillna(-1)
joined_df = test.merge(everdf, how='left', on=['loan_id'])
del(everdf)
del(test)
joined_df['ever_30'] = joined_df['ever_30'].fillna(-1)
joined_df['ever_90'] = joined_df['ever_90'].fillna(-1)
joined_df['ever_180'] = joined_df['ever_180'].fillna(-1)
joined_df['delinquency_30'] = joined_df['delinquency_30'].fillna(-1)
joined_df['delinquency_90'] = joined_df['delinquency_90'].fillna(-1)
joined_df['delinquency_180'] = joined_df['delinquency_180'].fillna(-1)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int32')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int32')
return joined_df
def create_12_mon_features(joined_df, **kwargs):
testdfs = []
n_months = 12
for y in range(1, n_months + 1):
tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']]
tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month']
tmpdf['josh_mody_n'] = np.floor((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12)
tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], as_index=False).agg({'delinquency_12': 'max','upb_12': 'min'})
tmpdf['delinquency_12'] = (tmpdf['delinquency_12']>3).astype('int32')
tmpdf['delinquency_12'] +=(tmpdf['upb_12']==0).astype('int32')
tmpdf['timestamp_year'] = np.floor(((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).astype('int16')
tmpdf['timestamp_month'] = np.int8(y)
tmpdf.drop(columns=['josh_mody_n'], inplace=True)
testdfs.append(tmpdf)
del(tmpdf)
del(joined_df)
return pd.concat(testdfs)
def combine_joined_12_mon(joined_df, testdf, **kwargs):
joined_df.drop(columns=['delinquency_12', 'upb_12'], inplace=True)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int16')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int8')
return joined_df.merge(testdf, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'])
def final_performance_delinquency(merged, joined_df, **kwargs):
merged['timestamp_month'] = merged['monthly_reporting_period'].dt.month
merged['timestamp_month'] = merged['timestamp_month'].astype('int8')
merged['timestamp_year'] = merged['monthly_reporting_period'].dt.year
merged['timestamp_year'] = merged['timestamp_year'].astype('int16')
merged = merged.merge(joined_df, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'])
merged.drop(columns=['timestamp_year'], inplace=True)
merged.drop(columns=['timestamp_month'], inplace=True)
return merged
def join_perf_acq_pdfs(perf, acq, **kwargs):
return perf.merge(acq, how='left', on=['loan_id'])
def last_mile_cleaning(df, **kwargs):
#for col, dtype in df.dtypes.iteritems():
# if str(dtype)=='category':
# df[col] = df[col].cat.codes
df['delinquency_12'] = df['delinquency_12'] > 0
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
return df #.to_arrow(index=False)
# Load database reporting functions
pathToReportDir = os.path.join(pathlib.Path(__file__).parent, "..", "report")
print(pathToReportDir)
sys.path.insert(1, pathToReportDir)
import report
parser = argparse.ArgumentParser(description='Run Mortgage benchmark using pandas')
parser.add_argument('-r', default="report_pandas.csv", help="Report file name.")
parser.add_argument('-df', default=1, type=int, help="Number of datafiles (quarters) to input into database for processing.")
parser.add_argument('-dp', required=True, help="Path to root of mortgage datafiles directory (contains names.csv).")
parser.add_argument('-i', dest="iterations", default=5, type=int, help="Number of iterations to run every benchmark. Best result is selected.")
parser.add_argument("-db-server", default="localhost", help="Host name of MySQL server")
parser.add_argument("-db-port", default=3306, type=int, help="Port number of MySQL server")
parser.add_argument("-db-user", default="", help="Username to use to connect to MySQL database. If user name is specified, script attempts to store results in MySQL database using other -db-* parameters.")
parser.add_argument("-db-pass", default="omniscidb", help="Password to use to connect to MySQL database")
parser.add_argument("-db-name", default="omniscidb", help="MySQL database to use to store benchmark results")
parser.add_argument("-db-table", help="Table to use to store results for this benchmark.")
parser.add_argument("-commit", default="1234567890123456789012345678901234567890", help="Commit hash to use to record this benchmark results")
args = parser.parse_args()
if args.df <= 0:
print("Bad number of data files specified", args.df)
sys.exit(1)
if args.iterations < 1:
print("Bad number of iterations specified", args.t)
db_reporter = None
if args.db_user is not "":
print("Connecting to database")
db = mysql.connector.connect(host=args.db_server, port=args.db_port, user=args.db_user, passwd=args.db_pass, db=args.db_name);
db_reporter = report.DbReport(db, args.db_table, {
'FilesNumber': 'INT UNSIGNED NOT NULL',
'FragmentSize': 'BIGINT UNSIGNED NOT NULL',
'BenchName': 'VARCHAR(500) NOT NULL',
'BestExecTimeMS': 'BIGINT UNSIGNED',
'BestTotalTimeMS': 'BIGINT UNSIGNED',
'WorstExecTimeMS': 'BIGINT UNSIGNED',
'WorstTotalTimeMS': 'BIGINT UNSIGNED',
'AverageExecTimeMS': 'BIGINT UNSIGNED',
'AverageTotalTimeMS': 'BIGINT UNSIGNED'
}, {
'ScriptName': 'mortgage_pandas.py',
'CommitHash': args.commit
})
data_directory = args.dp
benchName = "mortgage_pandas"
perf_data_path = os.path.join(data_directory, "perf")
perf_format_path = os.path.join(perf_data_path, "Performance_%sQ%s.txt")
bestExecTime = float("inf")
bestTotalTime = float("inf")
worstExecTime = 0
worstTotalTime = 0
avgExecTime = 0
avgTotalTime = 0
for iii in range(1, args.iterations + 1):
dataFilesNumber = 0
time_ETL = time.time()
exec_time_total = 0
print("RUNNING BENCHMARK NUMBER", benchName, "ITERATION NUMBER", iii)
for quarter in range(0, args.df):
year = 2000 + quarter // 4
perf_file = perf_format_path % (str(year), str(quarter % 4 + 1))
files = [f for f in pathlib.Path(perf_data_path).iterdir() if f.match('Performance_%sQ%s.txt*' % (str(year), str(quarter % 4 + 1)))]
for f in files:
dataframe, exec_time = run_pd_workflow(year = year, quarter = (quarter % 4 + 1), perf_file = str(f))
exec_time_total += exec_time
dataFilesNumber += 1
time_ETL_end = time.time()
ttt = (time_ETL_end - time_ETL) * 1000
print("ITERATION", iii, "EXEC TIME: ", exec_time_total, "TOTAL TIME: ", ttt)
if bestExecTime > exec_time_total:
bestExecTime = exec_time_total
if worstExecTime < exec_time_total:
worstExecTime = exec_time_total
avgExecTime += exec_time_total
if bestTotalTime > ttt:
bestTotalTime = ttt
if worstTotalTime < ttt:
bestTotalTime = ttt
avgTotalTime += ttt
avgExecTime /= args.iterations
avgTotalTime /= args.iterations
try:
with open(args.r, "w") as report:
print("BENCHMARK", benchName, "EXEC TIME", bestExecTime, "TOTAL TIME", bestTotalTime)
print("datafiles,fragment_size,query,query_exec_min,query_total_min,query_exec_max,query_total_max,query_exec_avg,query_total_avg,query_error_info", file=report, flush=True)
print(dataFilesNumber, ",",
0, ",",
benchName, ",",
bestExecTime, ",",
bestTotalTime, ",",
worstExecTime, ",",
worstTotalTime, ",",
avgExecTime, ",",
avgTotalTime, ",",
"", '\n', file=report, sep='', end='', flush=True)
if db_reporter is not None:
db_reporter.submit({
'FilesNumber': dataFilesNumber,
'FragmentSize': 0,
'BenchName': benchName,
'BestExecTimeMS': bestExecTime,
'BestTotalTimeMS': bestTotalTime,
'WorstExecTimeMS': worstExecTime,
'WorstTotalTimeMS': worstTotalTime,
'AverageExecTimeMS': avgExecTime,
'AverageTotalTimeMS': avgTotalTime})
except IOError as err:
print("Failed writing report file", args.r, err) | from pandas.api.types import CategoricalDtype
from io import StringIO
from glob import glob
import os
import time | random_line_split |
mortgage_pandas.py | # Derived from https://github.com/fschlimb/scale-out-benchs
import numpy as np
import pandas as pd
from pymapd import connect
from pandas.api.types import CategoricalDtype
from io import StringIO
from glob import glob
import os
import time
import pathlib
import sys
import argparse
def run_pd_workflow(quarter=1, year=2000, perf_file="", **kwargs):
t1 = time.time()
names = pd_load_names()
year_string = str(year) + "Q" + str(quarter) + ".txt"
acq_file = os.path.join(data_directory, "acq", "Acquisition_" + year_string)
print("READING DATAFILE", acq_file)
acq_pdf = pd_load_acquisition_csv(acq_file)
print("READING DATAFILE", perf_file)
perf_df_tmp = pd_load_performance_csv(perf_file)
print("read time", (time.time() - t1) * 1000)
t1 = time.time()
acq_pdf = acq_pdf.merge(names, how='left', on=['seller_name'])
acq_pdf.drop(columns=['seller_name'], inplace=True)
acq_pdf['seller_name'] = acq_pdf['new']
acq_pdf.drop(columns=['new'], inplace=True)
pdf = perf_df_tmp
everdf = create_ever_features(pdf)
delinq_merge = create_delinq_features(pdf)
everdf = join_ever_delinq_features(everdf, delinq_merge)
del(delinq_merge)
joined_df = create_joined_df(pdf, everdf)
testdf = create_12_mon_features(joined_df)
joined_df = combine_joined_12_mon(joined_df, testdf)
del(testdf)
perf_df = final_performance_delinquency(pdf, joined_df)
del(pdf, joined_df)
final_pdf = join_perf_acq_pdfs(perf_df, acq_pdf)
del(perf_df)
del(acq_pdf)
print("compute time", (time.time() - t1) * 1000)
final_pdf = last_mile_cleaning(final_pdf)
exec_time = (time.time() - t1) * 1000
print("compute time with copy to host", exec_time)
return final_pdf, exec_time
def pd_load_performance_csv(performance_path, **kwargs):
""" Loads performance data
Returns
-------
PD DataFrame
"""
cols = [
"loan_id", "monthly_reporting_period", "servicer", "interest_rate", "current_actual_upb",
"loan_age", "remaining_months_to_legal_maturity", "adj_remaining_months_to_maturity",
"maturity_date", "msa", "current_loan_delinquency_status", "mod_flag", "zero_balance_code",
"zero_balance_effective_date", "last_paid_installment_date", "foreclosed_after",
"disposition_date", "foreclosure_costs", "prop_preservation_and_repair_costs",
"asset_recovery_costs", "misc_holding_expenses", "holding_taxes", "net_sale_proceeds",
"credit_enhancement_proceeds", "repurchase_make_whole_proceeds", "other_foreclosure_proceeds",
"non_interest_bearing_upb", "principal_forgiveness_upb", "repurchase_make_whole_proceeds_flag",
"foreclosure_principal_write_off_amount", "servicing_activity_indicator"
]
dtypes = {
"loan_id": np.int64,
"monthly_reporting_period": str,
"servicer": str,
"interest_rate": np.float64,
"current_actual_upb": np.float64,
"loan_age": np.float64,
"remaining_months_to_legal_maturity": np.float64,
"adj_remaining_months_to_maturity": np.float64,
"maturity_date": str,
"msa": np.float64,
"current_loan_delinquency_status": np.int32,
"mod_flag": CategoricalDtype(['N', 'Y']),
"zero_balance_code": CategoricalDtype(['01', '02', '06', '09', '03', '15', '16']),
"zero_balance_effective_date": str,
"last_paid_installment_date": str,
"foreclosed_after": str,
"disposition_date": str,
"foreclosure_costs": np.float64,
"prop_preservation_and_repair_costs": np.float64,
"asset_recovery_costs": np.float64,
"misc_holding_expenses": np.float64,
"holding_taxes": np.float64,
"net_sale_proceeds": np.float64,
"credit_enhancement_proceeds": np.float64,
"repurchase_make_whole_proceeds": np.float64,
"other_foreclosure_proceeds": np.float64,
"non_interest_bearing_upb": np.float64,
"principal_forgiveness_upb": np.float64,
"repurchase_make_whole_proceeds_flag": CategoricalDtype(['N', 'Y']),
"foreclosure_principal_write_off_amount": np.float64,
"servicing_activity_indicator": CategoricalDtype(['N', 'Y']),
}
return pd.read_csv(performance_path, names=cols, delimiter='|', dtype=dtypes, parse_dates=[1,8,13,14,15,16])
def | (acquisition_path, **kwargs):
""" Loads acquisition data
Returns
-------
PD DataFrame
"""
columns = [
'loan_id', 'orig_channel', 'seller_name', 'orig_interest_rate', 'orig_upb', 'orig_loan_term',
'orig_date', 'first_pay_date', 'orig_ltv', 'orig_cltv', 'num_borrowers', 'dti', 'borrower_credit_score',
'first_home_buyer', 'loan_purpose', 'property_type', 'num_units', 'occupancy_status', 'property_state',
'zip', 'mortgage_insurance_percent', 'product_type', 'coborrow_credit_score', 'mortgage_insurance_type',
'relocation_mortgage_indicator', 'year_quarter'
]
dtypes = {
'loan_id': np.int64,
'orig_channel': CategoricalDtype(['B', 'C', 'R']),
'seller_name': str,
'orig_interest_rate': np.float64,
'orig_upb': np.int64,
'orig_loan_term': np.int64,
'orig_date': str,
'first_pay_date': str,
'orig_ltv': np.float64,
'orig_cltv': np.float64,
'num_borrowers': np.float64,
'dti': np.float64,
'borrower_credit_score': np.float64,
'first_home_buyer': CategoricalDtype(['N', 'U', 'Y']),
'loan_purpose': CategoricalDtype(['C', 'P', 'R', 'U']),
'property_type': CategoricalDtype(['CO', 'CP', 'MH', 'PU', 'SF']),
'num_units': np.int64,
'occupancy_status': CategoricalDtype(['I', 'P', 'S']),
'property_state': CategoricalDtype(
['AK', 'AL', 'AR', 'AZ', 'CA', 'CO', 'CT', 'DC', 'DE', 'FL', 'GA', 'HI',
'IA', 'ID', 'IL', 'IN', 'KS', 'KY', 'LA', 'MA', 'MD', 'ME', 'MI', 'MN',
'MO', 'MS', 'MT', 'NC', 'ND', 'NE', 'NH', 'NJ', 'NM', 'NV', 'NY', 'OH',
'OK', 'OR', 'PA', 'PR', 'RI', 'SC', 'SD', 'TN', 'TX', 'UT', 'VA', 'VI',
'VT', 'WA', 'WI', 'WV', 'WY']),
'zip': np.int64,
'mortgage_insurance_percent': np.float64,
'product_type': CategoricalDtype(['FRM']),
'coborrow_credit_score': np.float64,
'mortgage_insurance_type': np.float64,
'relocation_mortgage_indicator': CategoricalDtype(['N', 'Y']),
'year_quarter': np.int64
}
a = pd.read_csv(acquisition_path, names=columns, delimiter='|', dtype=dtypes, parse_dates=[6,7], error_bad_lines=True, warn_bad_lines=True, na_filter=True)
return a
def pd_load_names(**kwargs):
""" Loads names used for renaming the banks
Returns
-------
PD DataFrame
"""
cols = [
'seller_name', 'new'
]
dtypes = {'seller_name':str, 'new':str}
return pd.read_csv(os.path.join(data_directory, "names.csv"), names=cols, delimiter='|', dtype=dtypes)
def create_ever_features(pdf, **kwargs):
everdf = pdf[['loan_id', 'current_loan_delinquency_status']]
everdf = everdf.groupby('loan_id').max()
del(pdf)
everdf['ever_30'] = (everdf['current_loan_delinquency_status'] >= 1).astype('int8')
everdf['ever_90'] = (everdf['current_loan_delinquency_status'] >= 3).astype('int8')
everdf['ever_180'] = (everdf['current_loan_delinquency_status'] >= 6).astype('int8')
everdf.drop(columns=['current_loan_delinquency_status'], inplace=True)
return everdf
def create_delinq_features(pdf, **kwargs):
delinq_pdf = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status']]
del(pdf)
delinq_30 = delinq_pdf.query('current_loan_delinquency_status >= 1')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_30['delinquency_30'] = delinq_30['monthly_reporting_period']
delinq_30.drop(columns=['monthly_reporting_period'], inplace=True)
delinq_90 = delinq_pdf.query('current_loan_delinquency_status >= 3')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_90['delinquency_90'] = delinq_90['monthly_reporting_period']
delinq_90.drop(columns=['monthly_reporting_period'], inplace=True)
delinq_180 = delinq_pdf.query('current_loan_delinquency_status >= 6')[['loan_id', 'monthly_reporting_period']].groupby('loan_id').min()
delinq_180['delinquency_180'] = delinq_180['monthly_reporting_period']
delinq_180.drop(columns=['monthly_reporting_period'], inplace=True)
del(delinq_pdf)
delinq_merge = delinq_30.merge(delinq_90, how='left', on=['loan_id'])
delinq_merge['delinquency_90'] = delinq_merge['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
delinq_merge = delinq_merge.merge(delinq_180, how='left', on=['loan_id'])
delinq_merge['delinquency_180'] = delinq_merge['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
del(delinq_30)
del(delinq_90)
del(delinq_180)
return delinq_merge
def join_ever_delinq_features(everdf_tmp, delinq_merge, **kwargs):
everdf = everdf_tmp.merge(delinq_merge, on=['loan_id'], how='left')
del(everdf_tmp)
del(delinq_merge)
everdf['delinquency_30'] = everdf['delinquency_30'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_90'] = everdf['delinquency_90'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
everdf['delinquency_180'] = everdf['delinquency_180'].fillna(np.dtype('datetime64[ms]').type('1970-01-01').astype('datetime64[ms]'))
return everdf
def create_joined_df(pdf, everdf, **kwargs):
test = pdf[['loan_id', 'monthly_reporting_period', 'current_loan_delinquency_status', 'current_actual_upb']]
del(pdf)
test['timestamp'] = test['monthly_reporting_period']
test.drop(columns=['monthly_reporting_period'], inplace=True)
test['timestamp_month'] = test['timestamp'].dt.month
test['timestamp_year'] = test['timestamp'].dt.year
test['delinquency_12'] = test['current_loan_delinquency_status']
test.drop(columns=['current_loan_delinquency_status'], inplace=True)
test['upb_12'] = test['current_actual_upb']
test.drop(columns=['current_actual_upb'], inplace=True)
test['upb_12'] = test['upb_12'].fillna(999999999)
test['delinquency_12'] = test['delinquency_12'].fillna(-1)
joined_df = test.merge(everdf, how='left', on=['loan_id'])
del(everdf)
del(test)
joined_df['ever_30'] = joined_df['ever_30'].fillna(-1)
joined_df['ever_90'] = joined_df['ever_90'].fillna(-1)
joined_df['ever_180'] = joined_df['ever_180'].fillna(-1)
joined_df['delinquency_30'] = joined_df['delinquency_30'].fillna(-1)
joined_df['delinquency_90'] = joined_df['delinquency_90'].fillna(-1)
joined_df['delinquency_180'] = joined_df['delinquency_180'].fillna(-1)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int32')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int32')
return joined_df
def create_12_mon_features(joined_df, **kwargs):
testdfs = []
n_months = 12
for y in range(1, n_months + 1):
tmpdf = joined_df[['loan_id', 'timestamp_year', 'timestamp_month', 'delinquency_12', 'upb_12']]
tmpdf['josh_months'] = tmpdf['timestamp_year'] * 12 + tmpdf['timestamp_month']
tmpdf['josh_mody_n'] = np.floor((tmpdf['josh_months'].astype('float64') - 24000 - y) / 12)
tmpdf = tmpdf.groupby(['loan_id', 'josh_mody_n'], as_index=False).agg({'delinquency_12': 'max','upb_12': 'min'})
tmpdf['delinquency_12'] = (tmpdf['delinquency_12']>3).astype('int32')
tmpdf['delinquency_12'] +=(tmpdf['upb_12']==0).astype('int32')
tmpdf['timestamp_year'] = np.floor(((tmpdf['josh_mody_n'] * n_months) + 24000 + (y - 1)) / 12).astype('int16')
tmpdf['timestamp_month'] = np.int8(y)
tmpdf.drop(columns=['josh_mody_n'], inplace=True)
testdfs.append(tmpdf)
del(tmpdf)
del(joined_df)
return pd.concat(testdfs)
def combine_joined_12_mon(joined_df, testdf, **kwargs):
joined_df.drop(columns=['delinquency_12', 'upb_12'], inplace=True)
joined_df['timestamp_year'] = joined_df['timestamp_year'].astype('int16')
joined_df['timestamp_month'] = joined_df['timestamp_month'].astype('int8')
return joined_df.merge(testdf, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'])
def final_performance_delinquency(merged, joined_df, **kwargs):
merged['timestamp_month'] = merged['monthly_reporting_period'].dt.month
merged['timestamp_month'] = merged['timestamp_month'].astype('int8')
merged['timestamp_year'] = merged['monthly_reporting_period'].dt.year
merged['timestamp_year'] = merged['timestamp_year'].astype('int16')
merged = merged.merge(joined_df, how='left', on=['loan_id', 'timestamp_year', 'timestamp_month'])
merged.drop(columns=['timestamp_year'], inplace=True)
merged.drop(columns=['timestamp_month'], inplace=True)
return merged
def join_perf_acq_pdfs(perf, acq, **kwargs):
return perf.merge(acq, how='left', on=['loan_id'])
def last_mile_cleaning(df, **kwargs):
#for col, dtype in df.dtypes.iteritems():
# if str(dtype)=='category':
# df[col] = df[col].cat.codes
df['delinquency_12'] = df['delinquency_12'] > 0
df['delinquency_12'] = df['delinquency_12'].fillna(False).astype('int32')
return df #.to_arrow(index=False)
# Load database reporting functions
pathToReportDir = os.path.join(pathlib.Path(__file__).parent, "..", "report")
print(pathToReportDir)
sys.path.insert(1, pathToReportDir)
import report
parser = argparse.ArgumentParser(description='Run Mortgage benchmark using pandas')
parser.add_argument('-r', default="report_pandas.csv", help="Report file name.")
parser.add_argument('-df', default=1, type=int, help="Number of datafiles (quarters) to input into database for processing.")
parser.add_argument('-dp', required=True, help="Path to root of mortgage datafiles directory (contains names.csv).")
parser.add_argument('-i', dest="iterations", default=5, type=int, help="Number of iterations to run every benchmark. Best result is selected.")
parser.add_argument("-db-server", default="localhost", help="Host name of MySQL server")
parser.add_argument("-db-port", default=3306, type=int, help="Port number of MySQL server")
parser.add_argument("-db-user", default="", help="Username to use to connect to MySQL database. If user name is specified, script attempts to store results in MySQL database using other -db-* parameters.")
parser.add_argument("-db-pass", default="omniscidb", help="Password to use to connect to MySQL database")
parser.add_argument("-db-name", default="omniscidb", help="MySQL database to use to store benchmark results")
parser.add_argument("-db-table", help="Table to use to store results for this benchmark.")
parser.add_argument("-commit", default="1234567890123456789012345678901234567890", help="Commit hash to use to record this benchmark results")
args = parser.parse_args()
if args.df <= 0:
print("Bad number of data files specified", args.df)
sys.exit(1)
if args.iterations < 1:
print("Bad number of iterations specified", args.t)
db_reporter = None
if args.db_user is not "":
print("Connecting to database")
db = mysql.connector.connect(host=args.db_server, port=args.db_port, user=args.db_user, passwd=args.db_pass, db=args.db_name);
db_reporter = report.DbReport(db, args.db_table, {
'FilesNumber': 'INT UNSIGNED NOT NULL',
'FragmentSize': 'BIGINT UNSIGNED NOT NULL',
'BenchName': 'VARCHAR(500) NOT NULL',
'BestExecTimeMS': 'BIGINT UNSIGNED',
'BestTotalTimeMS': 'BIGINT UNSIGNED',
'WorstExecTimeMS': 'BIGINT UNSIGNED',
'WorstTotalTimeMS': 'BIGINT UNSIGNED',
'AverageExecTimeMS': 'BIGINT UNSIGNED',
'AverageTotalTimeMS': 'BIGINT UNSIGNED'
}, {
'ScriptName': 'mortgage_pandas.py',
'CommitHash': args.commit
})
data_directory = args.dp
benchName = "mortgage_pandas"
perf_data_path = os.path.join(data_directory, "perf")
perf_format_path = os.path.join(perf_data_path, "Performance_%sQ%s.txt")
bestExecTime = float("inf")
bestTotalTime = float("inf")
worstExecTime = 0
worstTotalTime = 0
avgExecTime = 0
avgTotalTime = 0
for iii in range(1, args.iterations + 1):
dataFilesNumber = 0
time_ETL = time.time()
exec_time_total = 0
print("RUNNING BENCHMARK NUMBER", benchName, "ITERATION NUMBER", iii)
for quarter in range(0, args.df):
year = 2000 + quarter // 4
perf_file = perf_format_path % (str(year), str(quarter % 4 + 1))
files = [f for f in pathlib.Path(perf_data_path).iterdir() if f.match('Performance_%sQ%s.txt*' % (str(year), str(quarter % 4 + 1)))]
for f in files:
dataframe, exec_time = run_pd_workflow(year = year, quarter = (quarter % 4 + 1), perf_file = str(f))
exec_time_total += exec_time
dataFilesNumber += 1
time_ETL_end = time.time()
ttt = (time_ETL_end - time_ETL) * 1000
print("ITERATION", iii, "EXEC TIME: ", exec_time_total, "TOTAL TIME: ", ttt)
if bestExecTime > exec_time_total:
bestExecTime = exec_time_total
if worstExecTime < exec_time_total:
worstExecTime = exec_time_total
avgExecTime += exec_time_total
if bestTotalTime > ttt:
bestTotalTime = ttt
if worstTotalTime < ttt:
bestTotalTime = ttt
avgTotalTime += ttt
avgExecTime /= args.iterations
avgTotalTime /= args.iterations
try:
with open(args.r, "w") as report:
print("BENCHMARK", benchName, "EXEC TIME", bestExecTime, "TOTAL TIME", bestTotalTime)
print("datafiles,fragment_size,query,query_exec_min,query_total_min,query_exec_max,query_total_max,query_exec_avg,query_total_avg,query_error_info", file=report, flush=True)
print(dataFilesNumber, ",",
0, ",",
benchName, ",",
bestExecTime, ",",
bestTotalTime, ",",
worstExecTime, ",",
worstTotalTime, ",",
avgExecTime, ",",
avgTotalTime, ",",
"", '\n', file=report, sep='', end='', flush=True)
if db_reporter is not None:
db_reporter.submit({
'FilesNumber': dataFilesNumber,
'FragmentSize': 0,
'BenchName': benchName,
'BestExecTimeMS': bestExecTime,
'BestTotalTimeMS': bestTotalTime,
'WorstExecTimeMS': worstExecTime,
'WorstTotalTimeMS': worstTotalTime,
'AverageExecTimeMS': avgExecTime,
'AverageTotalTimeMS': avgTotalTime})
except IOError as err:
print("Failed writing report file", args.r, err)
| pd_load_acquisition_csv | identifier_name |
api.rs | use std::io::{self, Read, Error, ErrorKind};
use std::borrow::Cow;
use hyper;
use hyper::{client, Client, Url };
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use std::time::Duration;
use serde_json;
use product;
use configs::{Configs, ApiConfigs, ProxyConfigs};
const HOST_URL: &'static str = "https://www.versioneye.com";
//it is used to build url to the product page (SAAS or Enterprise)
pub fn to_product_url(api_confs: &ApiConfigs, lang: &str, prod_key: &str, version: &str) -> String {
let scheme = match api_confs.scheme.clone() {
Some(val) => val,
None => "http".to_string()
};
let host = match api_confs.host.clone() {
Some(val) => val,
None => HOST_URL.to_string()
};
let host_url = match api_confs.port.clone() {
Some(port) => format!("{}://{}:{}", scheme, host, port),
None => format!("{}://{}", scheme, host )
};
format!("{}/{}/{}/{}", host_url, lang, prod_key, version)
}
//it's used to build API url
fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str)
-> Result<hyper::Url, hyper::error::ParseError> {
let url_str = match api_confs.port {
None => {
format!(
"{}://{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
api_confs.path.clone().unwrap(), resource_path,
)
},
Some(port) => format!(
"{}://{}:{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
port, api_confs.path.clone().unwrap(), resource_path
)
};
Url::parse(url_str.as_str())
}
fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> {
let ssl = NativeTlsClient::new().unwrap();
let connector = HttpsConnector::new(ssl);
//use proxy only iff user has defined proxy host and port
let mut client = if proxy_confs.is_complete() {
let host = Cow::from(proxy_confs.host.clone().unwrap());
let port = proxy_confs.port.clone().unwrap();
let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string());
let ssl_proxy = NativeTlsClient::new().unwrap();
let proxy = client::ProxyConfig::new (
scheme.as_str(), host, port, connector, ssl_proxy
);
Client::with_proxy_config(proxy)
} else {
Client::with_connector(connector)
};
client.set_read_timeout(Some(Duration::new(5,0)));
let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url");
let mut body = String::new();
res.read_to_string(&mut body).expect("Failed to read response body");
Some(body)
}
pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str)
-> Result<product::ProductMatch, Error> {
let sha_res = fetch_product_by_sha(&confs, file_sha);
match sha_res {
Ok(m) => {
let sha = m.sha.expect("No product sha from SHA result");
let product = m.product.expect("No product info from SHA result");
match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) {
Ok(mut m) => {
m.sha = Some(sha);
Ok(m)
},
Err(e) => {
println!("Failed to fetch product details for sha: {}", file_sha);
Err(e)
}
}
},
Err(e) => Err(e)
}
}
pub fn fetch_product_by_sha(confs: &Configs, sha: &str)
-> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let resource_path = format!("products/sha/{}", encode_sha(sha) );
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_sha_response(json_txt)
}
//replaces base64 special characters with HTML safe percentage encoding
//source: https://en.wikipedia.org/wiki/Base64#URL_applications
pub fn encode_sha<'a>(sha: &'a str) -> String {
let encoded_sha = sha.to_string();
encoded_sha.replace("+", "%2B")
.replace("/", "%2F")
.replace("=", "%3D")
.trim().to_string()
}
pub fn encode_prod_key<'b>(prod_key: &'b str) -> String {
let encoded_prod_key = prod_key.to_string();
encoded_prod_key
.replace(".", "~")
.replace("/", ":")
.trim().to_string()
}
pub fn encode_language<'b>(lang: &'b str) -> String {
let encoded_lang = lang.to_string();
encoded_lang.replace(".", "").trim().to_lowercase().to_string()
}
pub fn fetch_product<'a>(
confs: &Configs, lang: &str, prod_key: &str, version: &str
) -> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let encoded_prod_key = encode_prod_key(&prod_key);
let encoded_lang = encode_language(lang);
let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone());
let prod_url = to_product_url(
&confs.api,
encoded_lang.clone().as_str(),
prod_key,
version
);
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("prod_version", version)
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_product_response(json_txt, Some(prod_url))
}
#[derive(Serialize, Deserialize, Debug)]
struct | {
error: String
}
#[derive(Serialize, Deserialize, Debug)]
struct ShaItem {
language: String,
prod_key: String,
version: String,
sha_value: String,
sha_method: String,
prod_type: Option<String>,
group_id: Option<String>,
artifact_id: Option<String>,
classifier: Option<String>,
packaging: Option<String>
}
//-- helper functions
pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new(ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?;
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
if !res.is_array() {
let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array");
return Err(e);
}
let shas = res.as_array().unwrap();
if shas.len() == 0 {
let e = Error::new( ErrorKind::Other, "No match for the SHA");
return Err(e);
}
let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap();
let the_prod = product::Product {
name: "".to_string(),
language: doc.language,
prod_key: doc.prod_key,
version: doc.version,
prod_type: doc.prod_type
};
let the_sha = product::ProductSHA {
packaging: doc.packaging.unwrap_or("unknown".to_string()),
method: doc.sha_method,
value: doc.sha_value,
filepath: None
};
Ok(product::ProductMatch::new(the_prod, the_sha))
}
// converts the response of product endpoint into ProductMatch struct
#[derive(Serialize, Deserialize, Debug)]
struct ProductItem {
name: String,
language: String,
prod_key: String,
version: String,
prod_type: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct LicenseItem {
name: String,
url: Option<String>
}
pub fn process_product_response(
json_text: Option<String>, prod_url: Option<String>
) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new( ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?;
if !res.is_object() {
return Err(Error::new(ErrorKind::Other, "No product details"));
}
//if response includes error field in HTTP200 response
// NB! it may include other errors than limit, but @Rob asked to see custom Limit error message
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
let product_doc:ProductItem = serde_json::from_value(res.clone())?;
let the_prod = product::Product {
name: product_doc.name,
language: product_doc.language,
prod_key: product_doc.prod_key,
version: product_doc.version,
prod_type: Some( product_doc.prod_type )
};
//extract license details
let licenses = match res["licenses"].as_array() {
Some(arr) => arr.iter().fold(vec![], |mut acc, ref x| {
let lic_doc = x.as_object().unwrap();
acc.push(product::ProductLicense {
name: lic_doc["name"].as_str().unwrap_or("unknown").to_string(),
url: lic_doc["url"].as_str().unwrap_or("").to_string()
});
acc
}),
None => vec![]
};
//count number of vulnerabilities
let n_vulns = match res["security_vulnerabilities"].as_array() {
Some(arr) => arr.len() as u32,
None => 0 as u32
};
let the_match = product::ProductMatch {
sha: None,
product: Some(the_prod),
url: prod_url,
licenses : licenses,
n_vulns : n_vulns,
error: None
};
Ok(the_match)
}
| ApiError | identifier_name |
api.rs | use std::io::{self, Read, Error, ErrorKind};
use std::borrow::Cow;
use hyper;
use hyper::{client, Client, Url };
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use std::time::Duration;
use serde_json;
use product;
use configs::{Configs, ApiConfigs, ProxyConfigs};
const HOST_URL: &'static str = "https://www.versioneye.com";
//it is used to build url to the product page (SAAS or Enterprise)
pub fn to_product_url(api_confs: &ApiConfigs, lang: &str, prod_key: &str, version: &str) -> String {
let scheme = match api_confs.scheme.clone() {
Some(val) => val,
None => "http".to_string()
};
let host = match api_confs.host.clone() {
Some(val) => val,
None => HOST_URL.to_string()
};
let host_url = match api_confs.port.clone() {
Some(port) => format!("{}://{}:{}", scheme, host, port),
None => format!("{}://{}", scheme, host )
};
format!("{}/{}/{}/{}", host_url, lang, prod_key, version)
}
//it's used to build API url
fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str)
-> Result<hyper::Url, hyper::error::ParseError> {
let url_str = match api_confs.port {
None => {
format!(
"{}://{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
api_confs.path.clone().unwrap(), resource_path,
)
},
Some(port) => format!(
"{}://{}:{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
port, api_confs.path.clone().unwrap(), resource_path
)
};
Url::parse(url_str.as_str())
}
fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> {
let ssl = NativeTlsClient::new().unwrap();
let connector = HttpsConnector::new(ssl);
//use proxy only iff user has defined proxy host and port
let mut client = if proxy_confs.is_complete() {
let host = Cow::from(proxy_confs.host.clone().unwrap());
let port = proxy_confs.port.clone().unwrap();
let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string());
let ssl_proxy = NativeTlsClient::new().unwrap();
let proxy = client::ProxyConfig::new (
scheme.as_str(), host, port, connector, ssl_proxy
);
Client::with_proxy_config(proxy)
} else {
Client::with_connector(connector)
};
client.set_read_timeout(Some(Duration::new(5,0)));
let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url");
let mut body = String::new();
res.read_to_string(&mut body).expect("Failed to read response body");
Some(body)
}
pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str)
-> Result<product::ProductMatch, Error> {
let sha_res = fetch_product_by_sha(&confs, file_sha);
match sha_res {
Ok(m) => {
let sha = m.sha.expect("No product sha from SHA result");
let product = m.product.expect("No product info from SHA result");
match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) {
Ok(mut m) => {
m.sha = Some(sha);
Ok(m)
},
Err(e) => {
println!("Failed to fetch product details for sha: {}", file_sha);
Err(e)
}
}
},
Err(e) => Err(e)
}
}
pub fn fetch_product_by_sha(confs: &Configs, sha: &str)
-> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let resource_path = format!("products/sha/{}", encode_sha(sha) );
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_sha_response(json_txt)
}
//replaces base64 special characters with HTML safe percentage encoding
//source: https://en.wikipedia.org/wiki/Base64#URL_applications
pub fn encode_sha<'a>(sha: &'a str) -> String {
let encoded_sha = sha.to_string();
encoded_sha.replace("+", "%2B")
.replace("/", "%2F")
.replace("=", "%3D")
.trim().to_string()
}
pub fn encode_prod_key<'b>(prod_key: &'b str) -> String {
let encoded_prod_key = prod_key.to_string();
encoded_prod_key
.replace(".", "~")
.replace("/", ":")
.trim().to_string()
}
pub fn encode_language<'b>(lang: &'b str) -> String {
let encoded_lang = lang.to_string();
encoded_lang.replace(".", "").trim().to_lowercase().to_string()
}
pub fn fetch_product<'a>(
confs: &Configs, lang: &str, prod_key: &str, version: &str
) -> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let encoded_prod_key = encode_prod_key(&prod_key);
let encoded_lang = encode_language(lang);
let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone());
let prod_url = to_product_url(
&confs.api,
encoded_lang.clone().as_str(),
prod_key,
version
);
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("prod_version", version)
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_product_response(json_txt, Some(prod_url))
}
#[derive(Serialize, Deserialize, Debug)]
struct ApiError {
error: String
}
#[derive(Serialize, Deserialize, Debug)]
struct ShaItem {
language: String,
prod_key: String,
version: String,
sha_value: String,
sha_method: String,
prod_type: Option<String>,
group_id: Option<String>,
artifact_id: Option<String>,
classifier: Option<String>,
packaging: Option<String>
}
//-- helper functions
pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> |
// converts the response of product endpoint into ProductMatch struct
#[derive(Serialize, Deserialize, Debug)]
struct ProductItem {
name: String,
language: String,
prod_key: String,
version: String,
prod_type: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct LicenseItem {
name: String,
url: Option<String>
}
pub fn process_product_response(
json_text: Option<String>, prod_url: Option<String>
) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new( ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?;
if !res.is_object() {
return Err(Error::new(ErrorKind::Other, "No product details"));
}
//if response includes error field in HTTP200 response
// NB! it may include other errors than limit, but @Rob asked to see custom Limit error message
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
let product_doc:ProductItem = serde_json::from_value(res.clone())?;
let the_prod = product::Product {
name: product_doc.name,
language: product_doc.language,
prod_key: product_doc.prod_key,
version: product_doc.version,
prod_type: Some( product_doc.prod_type )
};
//extract license details
let licenses = match res["licenses"].as_array() {
Some(arr) => arr.iter().fold(vec![], |mut acc, ref x| {
let lic_doc = x.as_object().unwrap();
acc.push(product::ProductLicense {
name: lic_doc["name"].as_str().unwrap_or("unknown").to_string(),
url: lic_doc["url"].as_str().unwrap_or("").to_string()
});
acc
}),
None => vec![]
};
//count number of vulnerabilities
let n_vulns = match res["security_vulnerabilities"].as_array() {
Some(arr) => arr.len() as u32,
None => 0 as u32
};
let the_match = product::ProductMatch {
sha: None,
product: Some(the_prod),
url: prod_url,
licenses : licenses,
n_vulns : n_vulns,
error: None
};
Ok(the_match)
}
| {
if json_text.is_none() {
return Err(
Error::new(ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?;
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
if !res.is_array() {
let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array");
return Err(e);
}
let shas = res.as_array().unwrap();
if shas.len() == 0 {
let e = Error::new( ErrorKind::Other, "No match for the SHA");
return Err(e);
}
let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap();
let the_prod = product::Product {
name: "".to_string(),
language: doc.language,
prod_key: doc.prod_key,
version: doc.version,
prod_type: doc.prod_type
};
let the_sha = product::ProductSHA {
packaging: doc.packaging.unwrap_or("unknown".to_string()),
method: doc.sha_method,
value: doc.sha_value,
filepath: None
};
Ok(product::ProductMatch::new(the_prod, the_sha))
} | identifier_body |
api.rs | use std::io::{self, Read, Error, ErrorKind};
use std::borrow::Cow;
use hyper;
use hyper::{client, Client, Url };
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use std::time::Duration;
use serde_json;
use product;
use configs::{Configs, ApiConfigs, ProxyConfigs};
const HOST_URL: &'static str = "https://www.versioneye.com";
//it is used to build url to the product page (SAAS or Enterprise)
pub fn to_product_url(api_confs: &ApiConfigs, lang: &str, prod_key: &str, version: &str) -> String {
let scheme = match api_confs.scheme.clone() {
Some(val) => val,
None => "http".to_string()
};
let host = match api_confs.host.clone() {
Some(val) => val,
None => HOST_URL.to_string()
};
let host_url = match api_confs.port.clone() {
Some(port) => format!("{}://{}:{}", scheme, host, port),
None => format!("{}://{}", scheme, host )
};
format!("{}/{}/{}/{}", host_url, lang, prod_key, version)
}
//it's used to build API url
fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str)
-> Result<hyper::Url, hyper::error::ParseError> {
let url_str = match api_confs.port {
None => {
format!(
"{}://{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
api_confs.path.clone().unwrap(), resource_path,
)
},
Some(port) => format!(
"{}://{}:{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
port, api_confs.path.clone().unwrap(), resource_path
)
};
Url::parse(url_str.as_str())
}
fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> {
let ssl = NativeTlsClient::new().unwrap();
let connector = HttpsConnector::new(ssl);
//use proxy only iff user has defined proxy host and port
let mut client = if proxy_confs.is_complete() {
let host = Cow::from(proxy_confs.host.clone().unwrap());
let port = proxy_confs.port.clone().unwrap();
let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string());
let ssl_proxy = NativeTlsClient::new().unwrap();
let proxy = client::ProxyConfig::new (
scheme.as_str(), host, port, connector, ssl_proxy
);
Client::with_proxy_config(proxy)
} else {
Client::with_connector(connector)
};
client.set_read_timeout(Some(Duration::new(5,0)));
let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url");
let mut body = String::new();
res.read_to_string(&mut body).expect("Failed to read response body");
Some(body)
}
pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str)
-> Result<product::ProductMatch, Error> {
let sha_res = fetch_product_by_sha(&confs, file_sha);
match sha_res {
Ok(m) => {
let sha = m.sha.expect("No product sha from SHA result");
let product = m.product.expect("No product info from SHA result");
match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) {
Ok(mut m) => {
m.sha = Some(sha);
Ok(m)
},
Err(e) => {
println!("Failed to fetch product details for sha: {}", file_sha);
Err(e)
}
}
},
Err(e) => Err(e)
}
}
pub fn fetch_product_by_sha(confs: &Configs, sha: &str)
-> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let resource_path = format!("products/sha/{}", encode_sha(sha) );
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
}; | .clear()
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_sha_response(json_txt)
}
//replaces base64 special characters with HTML safe percentage encoding
//source: https://en.wikipedia.org/wiki/Base64#URL_applications
pub fn encode_sha<'a>(sha: &'a str) -> String {
let encoded_sha = sha.to_string();
encoded_sha.replace("+", "%2B")
.replace("/", "%2F")
.replace("=", "%3D")
.trim().to_string()
}
pub fn encode_prod_key<'b>(prod_key: &'b str) -> String {
let encoded_prod_key = prod_key.to_string();
encoded_prod_key
.replace(".", "~")
.replace("/", ":")
.trim().to_string()
}
pub fn encode_language<'b>(lang: &'b str) -> String {
let encoded_lang = lang.to_string();
encoded_lang.replace(".", "").trim().to_lowercase().to_string()
}
pub fn fetch_product<'a>(
confs: &Configs, lang: &str, prod_key: &str, version: &str
) -> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let encoded_prod_key = encode_prod_key(&prod_key);
let encoded_lang = encode_language(lang);
let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone());
let prod_url = to_product_url(
&confs.api,
encoded_lang.clone().as_str(),
prod_key,
version
);
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("prod_version", version)
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_product_response(json_txt, Some(prod_url))
}
#[derive(Serialize, Deserialize, Debug)]
struct ApiError {
error: String
}
#[derive(Serialize, Deserialize, Debug)]
struct ShaItem {
language: String,
prod_key: String,
version: String,
sha_value: String,
sha_method: String,
prod_type: Option<String>,
group_id: Option<String>,
artifact_id: Option<String>,
classifier: Option<String>,
packaging: Option<String>
}
//-- helper functions
pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new(ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?;
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
if !res.is_array() {
let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array");
return Err(e);
}
let shas = res.as_array().unwrap();
if shas.len() == 0 {
let e = Error::new( ErrorKind::Other, "No match for the SHA");
return Err(e);
}
let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap();
let the_prod = product::Product {
name: "".to_string(),
language: doc.language,
prod_key: doc.prod_key,
version: doc.version,
prod_type: doc.prod_type
};
let the_sha = product::ProductSHA {
packaging: doc.packaging.unwrap_or("unknown".to_string()),
method: doc.sha_method,
value: doc.sha_value,
filepath: None
};
Ok(product::ProductMatch::new(the_prod, the_sha))
}
// converts the response of product endpoint into ProductMatch struct
#[derive(Serialize, Deserialize, Debug)]
struct ProductItem {
name: String,
language: String,
prod_key: String,
version: String,
prod_type: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct LicenseItem {
name: String,
url: Option<String>
}
pub fn process_product_response(
json_text: Option<String>, prod_url: Option<String>
) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new( ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?;
if !res.is_object() {
return Err(Error::new(ErrorKind::Other, "No product details"));
}
//if response includes error field in HTTP200 response
// NB! it may include other errors than limit, but @Rob asked to see custom Limit error message
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
let product_doc:ProductItem = serde_json::from_value(res.clone())?;
let the_prod = product::Product {
name: product_doc.name,
language: product_doc.language,
prod_key: product_doc.prod_key,
version: product_doc.version,
prod_type: Some( product_doc.prod_type )
};
//extract license details
let licenses = match res["licenses"].as_array() {
Some(arr) => arr.iter().fold(vec![], |mut acc, ref x| {
let lic_doc = x.as_object().unwrap();
acc.push(product::ProductLicense {
name: lic_doc["name"].as_str().unwrap_or("unknown").to_string(),
url: lic_doc["url"].as_str().unwrap_or("").to_string()
});
acc
}),
None => vec![]
};
//count number of vulnerabilities
let n_vulns = match res["security_vulnerabilities"].as_array() {
Some(arr) => arr.len() as u32,
None => 0 as u32
};
let the_match = product::ProductMatch {
sha: None,
product: Some(the_prod),
url: prod_url,
licenses : licenses,
n_vulns : n_vulns,
error: None
};
Ok(the_match)
} |
//attach query params
resource_url
.query_pairs_mut() | random_line_split |
api.rs | use std::io::{self, Read, Error, ErrorKind};
use std::borrow::Cow;
use hyper;
use hyper::{client, Client, Url };
use hyper::net::HttpsConnector;
use hyper_native_tls::NativeTlsClient;
use std::time::Duration;
use serde_json;
use product;
use configs::{Configs, ApiConfigs, ProxyConfigs};
const HOST_URL: &'static str = "https://www.versioneye.com";
//it is used to build url to the product page (SAAS or Enterprise)
pub fn to_product_url(api_confs: &ApiConfigs, lang: &str, prod_key: &str, version: &str) -> String {
let scheme = match api_confs.scheme.clone() {
Some(val) => val,
None => "http".to_string()
};
let host = match api_confs.host.clone() {
Some(val) => val,
None => HOST_URL.to_string()
};
let host_url = match api_confs.port.clone() {
Some(port) => format!("{}://{}:{}", scheme, host, port),
None => format!("{}://{}", scheme, host )
};
format!("{}/{}/{}/{}", host_url, lang, prod_key, version)
}
//it's used to build API url
fn configs_to_url(api_confs: &ApiConfigs, resource_path: &str)
-> Result<hyper::Url, hyper::error::ParseError> {
let url_str = match api_confs.port {
None => {
format!(
"{}://{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
api_confs.path.clone().unwrap(), resource_path,
)
},
Some(port) => format!(
"{}://{}:{}/{}/{}",
api_confs.scheme.clone().unwrap(), api_confs.host.clone().unwrap(),
port, api_confs.path.clone().unwrap(), resource_path
)
};
Url::parse(url_str.as_str())
}
fn request_json<'a>(uri: &Url, proxy_confs: &'a ProxyConfigs) -> Option<String> {
let ssl = NativeTlsClient::new().unwrap();
let connector = HttpsConnector::new(ssl);
//use proxy only iff user has defined proxy host and port
let mut client = if proxy_confs.is_complete() {
let host = Cow::from(proxy_confs.host.clone().unwrap());
let port = proxy_confs.port.clone().unwrap();
let scheme = proxy_confs.scheme.clone().unwrap_or("http".to_string());
let ssl_proxy = NativeTlsClient::new().unwrap();
let proxy = client::ProxyConfig::new (
scheme.as_str(), host, port, connector, ssl_proxy
);
Client::with_proxy_config(proxy)
} else {
Client::with_connector(connector)
};
client.set_read_timeout(Some(Duration::new(5,0)));
let mut res = client.get(uri.as_str()).send().expect("Failed to fetch results from the url");
let mut body = String::new();
res.read_to_string(&mut body).expect("Failed to read response body");
Some(body)
}
pub fn fetch_product_details_by_sha(confs: &Configs, file_sha: &str)
-> Result<product::ProductMatch, Error> {
let sha_res = fetch_product_by_sha(&confs, file_sha);
match sha_res {
Ok(m) => | ,
Err(e) => Err(e)
}
}
pub fn fetch_product_by_sha(confs: &Configs, sha: &str)
-> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let resource_path = format!("products/sha/{}", encode_sha(sha) );
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_sha_response(json_txt)
}
//replaces base64 special characters with HTML safe percentage encoding
//source: https://en.wikipedia.org/wiki/Base64#URL_applications
pub fn encode_sha<'a>(sha: &'a str) -> String {
let encoded_sha = sha.to_string();
encoded_sha.replace("+", "%2B")
.replace("/", "%2F")
.replace("=", "%3D")
.trim().to_string()
}
pub fn encode_prod_key<'b>(prod_key: &'b str) -> String {
let encoded_prod_key = prod_key.to_string();
encoded_prod_key
.replace(".", "~")
.replace("/", ":")
.trim().to_string()
}
pub fn encode_language<'b>(lang: &'b str) -> String {
let encoded_lang = lang.to_string();
encoded_lang.replace(".", "").trim().to_lowercase().to_string()
}
pub fn fetch_product<'a>(
confs: &Configs, lang: &str, prod_key: &str, version: &str
) -> Result<product::ProductMatch, io::Error> {
let api_confs = confs.api.clone();
let encoded_prod_key = encode_prod_key(&prod_key);
let encoded_lang = encode_language(lang);
let resource_path = format!("products/{}/{}", encoded_lang.clone(), encoded_prod_key.clone());
let prod_url = to_product_url(
&confs.api,
encoded_lang.clone().as_str(),
prod_key,
version
);
let mut resource_url = match configs_to_url(&api_confs, resource_path.as_str()) {
Ok(the_url) => the_url,
Err(_) => {
return Err(
Error::new(
ErrorKind::InvalidData,
"The values of API configs make up non-valid URL"
)
)
}
};
//attach query params
resource_url
.query_pairs_mut()
.clear()
.append_pair("prod_version", version)
.append_pair("api_key", api_confs.key.clone().unwrap().as_str());
let json_txt = request_json( &resource_url, &confs.proxy );
process_product_response(json_txt, Some(prod_url))
}
#[derive(Serialize, Deserialize, Debug)]
struct ApiError {
error: String
}
#[derive(Serialize, Deserialize, Debug)]
struct ShaItem {
language: String,
prod_key: String,
version: String,
sha_value: String,
sha_method: String,
prod_type: Option<String>,
group_id: Option<String>,
artifact_id: Option<String>,
classifier: Option<String>,
packaging: Option<String>
}
//-- helper functions
pub fn process_sha_response(json_text: Option<String> ) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new(ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str(json_text.unwrap().as_str())?;
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
if !res.is_array() {
let e = Error::new( ErrorKind::Other, "Unsupported SHA response - expected array");
return Err(e);
}
let shas = res.as_array().unwrap();
if shas.len() == 0 {
let e = Error::new( ErrorKind::Other, "No match for the SHA");
return Err(e);
}
let doc:ShaItem = serde_json::from_value(shas[0].clone()).unwrap();
let the_prod = product::Product {
name: "".to_string(),
language: doc.language,
prod_key: doc.prod_key,
version: doc.version,
prod_type: doc.prod_type
};
let the_sha = product::ProductSHA {
packaging: doc.packaging.unwrap_or("unknown".to_string()),
method: doc.sha_method,
value: doc.sha_value,
filepath: None
};
Ok(product::ProductMatch::new(the_prod, the_sha))
}
// converts the response of product endpoint into ProductMatch struct
#[derive(Serialize, Deserialize, Debug)]
struct ProductItem {
name: String,
language: String,
prod_key: String,
version: String,
prod_type: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct LicenseItem {
name: String,
url: Option<String>
}
pub fn process_product_response(
json_text: Option<String>, prod_url: Option<String>
) -> Result<product::ProductMatch, io::Error> {
if json_text.is_none() {
return Err(
Error::new( ErrorKind::Other, "No response from API")
)
}
let res: serde_json::Value = serde_json::from_str( &json_text.unwrap().as_str() )?;
if !res.is_object() {
return Err(Error::new(ErrorKind::Other, "No product details"));
}
//if response includes error field in HTTP200 response
// NB! it may include other errors than limit, but @Rob asked to see custom Limit error message
if res.is_object() && res.get("error").is_some() {
let e = Error::new(
ErrorKind::Other,
r#"API rate limit reached. Go to https://www.versioneye.com and upgrade your
subscription to a higher plan."#
);
return Err(e);
}
let product_doc:ProductItem = serde_json::from_value(res.clone())?;
let the_prod = product::Product {
name: product_doc.name,
language: product_doc.language,
prod_key: product_doc.prod_key,
version: product_doc.version,
prod_type: Some( product_doc.prod_type )
};
//extract license details
let licenses = match res["licenses"].as_array() {
Some(arr) => arr.iter().fold(vec![], |mut acc, ref x| {
let lic_doc = x.as_object().unwrap();
acc.push(product::ProductLicense {
name: lic_doc["name"].as_str().unwrap_or("unknown").to_string(),
url: lic_doc["url"].as_str().unwrap_or("").to_string()
});
acc
}),
None => vec![]
};
//count number of vulnerabilities
let n_vulns = match res["security_vulnerabilities"].as_array() {
Some(arr) => arr.len() as u32,
None => 0 as u32
};
let the_match = product::ProductMatch {
sha: None,
product: Some(the_prod),
url: prod_url,
licenses : licenses,
n_vulns : n_vulns,
error: None
};
Ok(the_match)
}
| {
let sha = m.sha.expect("No product sha from SHA result");
let product = m.product.expect("No product info from SHA result");
match fetch_product( &confs, &product.language, &product.prod_key, &product.version ) {
Ok(mut m) => {
m.sha = Some(sha);
Ok(m)
},
Err(e) => {
println!("Failed to fetch product details for sha: {}", file_sha);
Err(e)
}
}
} | conditional_block |
txn_ext.rs | // Copyright 2023 TiKV Project Authors. Licensed under Apache-2.0.
//! This module contains everything related to transaction hook.
//!
//! This is the temporary (efficient) solution, it should be implemented as one
//! type of coprocessor.
use std::sync::{atomic::Ordering, Arc};
use crossbeam::atomic::AtomicCell;
use engine_traits::{KvEngine, RaftEngine, CF_LOCK};
use kvproto::{kvrpcpb::ExtraOp, metapb::Region, raft_cmdpb::RaftRequestHeader};
use parking_lot::RwLockWriteGuard;
use raft::eraftpb;
use raftstore::store::{
LocksStatus, PeerPessimisticLocks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX,
};
use slog::{error, info, Logger};
use crate::{
batch::StoreContext,
raft::Peer,
router::{PeerMsg, PeerTick},
worker::pd,
SimpleWriteEncoder,
};
pub struct TxnContext {
ext: Arc<TxnExt>,
extra_op: Arc<AtomicCell<ExtraOp>>,
reactivate_memory_lock_ticks: usize,
}
impl Default for TxnContext {
#[inline]
fn default() -> Self {
Self {
ext: Arc::default(),
extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)),
reactivate_memory_lock_ticks: 0,
}
}
}
impl TxnContext {
#[inline]
pub fn on_region_changed(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &mut StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// A more recent read may happen on the old leader. So max ts should
// be updated after a peer becomes leader.
self.require_updating_max_ts(ctx, term, region, logger);
// Init the in-memory pessimistic lock table when the peer becomes leader.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::Normal;
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// If a follower merges into a leader, a more recent read may happen
// on the leader of the follower. So max ts should be updated after
// a region merge.
self.require_updating_max_ts(ctx, term, region, logger);
}
#[inline]
pub fn on_became_follower(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::NotLeader;
pessimistic_locks.clear();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn ext(&self) -> &Arc<TxnExt> {
&self.ext
}
#[inline]
pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> {
&self.extra_op
}
fn require_updating_max_ts<EK, ER, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) where
EK: KvEngine,
ER: RaftEngine,
{
let epoch = region.get_region_epoch();
let term_low_bits = term & ((1 << 32) - 1); // 32 bits
let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits
let initial_status = (term_low_bits << 32) | (version_lot_bits << 1);
self.ext
.max_ts_sync_status
.store(initial_status, Ordering::SeqCst);
info!(
logger,
"require updating max ts";
"initial_status" => initial_status,
);
let task = pd::Task::UpdateMaxTimestamp {
region_id: region.get_id(),
initial_status,
txn_ext: self.ext.clone(),
};
if let Err(e) = ctx.schedulers.pd.schedule(task) {
error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" => ?e);
}
}
pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> {
// Group in-memory pessimistic locks in the original region into new regions.
// The locks of new regions will be put into the corresponding new regions
// later. And the locks belonging to the old region will stay in the original
// map.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
// Update the version so the concurrent reader will fail due to EpochNotMatch
// instead of PessimisticLockNotFound.
pessimistic_locks.version = derived.get_region_epoch().get_version();
pessimistic_locks.group_by_regions(regions, derived)
}
pub fn init_with_lock(&self, locks: PeerPessimisticLocks) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
*pessimistic_locks = locks;
}
}
impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> {
/// Returns True means the tick is consumed, otherwise the tick should be
/// rescheduled.
pub fn on_reactivate_memory_lock_tick<T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) |
// Returns whether we should propose another TransferLeader command. This is
// for:
// - Considering the amount of pessimistic locks can be big, it can reduce
// unavailable time caused by waiting for the transferee catching up logs.
// - Make transferring leader strictly after write commands that executes before
// proposing the locks, preventing unexpected lock loss.
pub fn propose_locks_before_transfer_leader<T>(
&mut self,
ctx: &mut StoreContext<EK, ER, T>,
msg: &eraftpb::Message,
) -> bool {
// 1. Disable in-memory pessimistic locks.
// Clone to make borrow checker happy when registering ticks.
let txn_ext = self.txn_context().ext.clone();
let mut pessimistic_locks = txn_ext.pessimistic_locks.write();
// If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message
// is a reply to a transfer leader command before. If the locks status remain
// in the TransferringLeader status, we can safely initiate transferring leader
// now.
// If it's not in TransferringLeader status now, it is probably because several
// ticks have passed after proposing the locks in the last time and we
// reactivate the memory locks. Then, we should propose the locks again.
if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX
&& pessimistic_locks.status == LocksStatus::TransferringLeader
{
return false;
}
// If it is not writable, it's probably because it's a retried TransferLeader
// and the locks have been proposed. But we still need to return true to
// propose another TransferLeader command. Otherwise, some write requests that
// have marked some locks as deleted will fail because raft rejects more
// proposals.
// It is OK to return true here if it's in other states like MergingRegion or
// NotLeader. In those cases, the locks will fail to propose and nothing will
// happen.
if !pessimistic_locks.is_writable() {
return true;
}
pessimistic_locks.status = LocksStatus::TransferringLeader;
self.txn_context_mut().reactivate_memory_lock_ticks = 0;
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
// 2. Propose pessimistic locks
if pessimistic_locks.is_empty() {
return false;
}
// FIXME: Raft command has size limit. Either limit the total size of
// pessimistic locks in a region, or split commands here.
let mut encoder = SimpleWriteEncoder::with_capacity(512);
let mut lock_count = 0;
{
// Downgrade to a read guard, do not block readers in the scheduler as far as
// possible.
let pessimistic_locks = RwLockWriteGuard::downgrade(pessimistic_locks);
fail::fail_point!("invalidate_locks_before_transfer_leader");
for (key, (lock, deleted)) in &*pessimistic_locks {
if *deleted {
continue;
}
lock_count += 1;
encoder.put(CF_LOCK, key.as_encoded(), &lock.to_lock().to_bytes());
}
}
if lock_count == 0 {
// If the map is not empty but all locks are deleted, it is possible that a
// write command has just marked locks deleted but not proposed yet.
// It might cause that command to fail if we skip proposing the
// extra TransferLeader command here.
return true;
}
let mut header = Box::<RaftRequestHeader>::default();
header.set_region_id(self.region_id());
header.set_region_epoch(self.region().get_region_epoch().clone());
header.set_peer(self.peer().clone());
info!(
self.logger,
"propose {} locks before transferring leader", lock_count;
);
let PeerMsg::SimpleWrite(write) = PeerMsg::simple_write(header, encoder.encode()).0 else {unreachable!()};
self.on_simple_write(ctx, write.header, write.data, write.ch);
true
}
}
| {
// If it is not leader, we needn't reactivate by tick. In-memory pessimistic
// lock will be enabled when this region becomes leader again.
if !self.is_leader() {
return;
}
let transferring_leader = self.raft_group().raft.lead_transferee.is_some();
let txn_context = self.txn_context_mut();
let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write();
// And this tick is currently only used for the leader transfer failure case.
if pessimistic_locks.status != LocksStatus::TransferringLeader {
return;
}
txn_context.reactivate_memory_lock_ticks += 1;
// `lead_transferee` is not set immediately after the lock status changes. So,
// we need the tick count condition to avoid reactivating too early.
if !transferring_leader
&& txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick
{
pessimistic_locks.status = LocksStatus::Normal;
txn_context.reactivate_memory_lock_ticks = 0;
} else {
drop(pessimistic_locks);
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
}
} | identifier_body |
txn_ext.rs | // Copyright 2023 TiKV Project Authors. Licensed under Apache-2.0.
//! This module contains everything related to transaction hook.
//!
//! This is the temporary (efficient) solution, it should be implemented as one
//! type of coprocessor.
use std::sync::{atomic::Ordering, Arc};
use crossbeam::atomic::AtomicCell;
use engine_traits::{KvEngine, RaftEngine, CF_LOCK};
use kvproto::{kvrpcpb::ExtraOp, metapb::Region, raft_cmdpb::RaftRequestHeader};
use parking_lot::RwLockWriteGuard;
use raft::eraftpb;
use raftstore::store::{
LocksStatus, PeerPessimisticLocks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX,
};
use slog::{error, info, Logger};
use crate::{
batch::StoreContext,
raft::Peer,
router::{PeerMsg, PeerTick},
worker::pd,
SimpleWriteEncoder,
};
pub struct TxnContext {
ext: Arc<TxnExt>,
extra_op: Arc<AtomicCell<ExtraOp>>,
reactivate_memory_lock_ticks: usize,
}
impl Default for TxnContext {
#[inline]
fn default() -> Self {
Self {
ext: Arc::default(),
extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)),
reactivate_memory_lock_ticks: 0,
}
}
}
impl TxnContext {
#[inline]
pub fn on_region_changed(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &mut StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// A more recent read may happen on the old leader. So max ts should
// be updated after a peer becomes leader.
self.require_updating_max_ts(ctx, term, region, logger);
// Init the in-memory pessimistic lock table when the peer becomes leader.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::Normal;
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// If a follower merges into a leader, a more recent read may happen
// on the leader of the follower. So max ts should be updated after
// a region merge.
self.require_updating_max_ts(ctx, term, region, logger);
}
#[inline]
pub fn on_became_follower(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::NotLeader;
pessimistic_locks.clear();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn ext(&self) -> &Arc<TxnExt> {
&self.ext
}
#[inline]
pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> {
&self.extra_op
}
fn require_updating_max_ts<EK, ER, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) where
EK: KvEngine,
ER: RaftEngine,
{
let epoch = region.get_region_epoch();
let term_low_bits = term & ((1 << 32) - 1); // 32 bits
let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits
let initial_status = (term_low_bits << 32) | (version_lot_bits << 1);
self.ext
.max_ts_sync_status
.store(initial_status, Ordering::SeqCst);
info!(
logger,
"require updating max ts";
"initial_status" => initial_status,
);
let task = pd::Task::UpdateMaxTimestamp {
region_id: region.get_id(),
initial_status,
txn_ext: self.ext.clone(),
};
if let Err(e) = ctx.schedulers.pd.schedule(task) {
error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" => ?e);
}
}
pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> {
// Group in-memory pessimistic locks in the original region into new regions.
// The locks of new regions will be put into the corresponding new regions
// later. And the locks belonging to the old region will stay in the original
// map.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
// Update the version so the concurrent reader will fail due to EpochNotMatch
// instead of PessimisticLockNotFound.
pessimistic_locks.version = derived.get_region_epoch().get_version();
pessimistic_locks.group_by_regions(regions, derived)
}
pub fn init_with_lock(&self, locks: PeerPessimisticLocks) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
*pessimistic_locks = locks;
}
}
impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> {
/// Returns True means the tick is consumed, otherwise the tick should be
/// rescheduled.
pub fn | <T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) {
// If it is not leader, we needn't reactivate by tick. In-memory pessimistic
// lock will be enabled when this region becomes leader again.
if !self.is_leader() {
return;
}
let transferring_leader = self.raft_group().raft.lead_transferee.is_some();
let txn_context = self.txn_context_mut();
let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write();
// And this tick is currently only used for the leader transfer failure case.
if pessimistic_locks.status != LocksStatus::TransferringLeader {
return;
}
txn_context.reactivate_memory_lock_ticks += 1;
// `lead_transferee` is not set immediately after the lock status changes. So,
// we need the tick count condition to avoid reactivating too early.
if !transferring_leader
&& txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick
{
pessimistic_locks.status = LocksStatus::Normal;
txn_context.reactivate_memory_lock_ticks = 0;
} else {
drop(pessimistic_locks);
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
}
}
// Returns whether we should propose another TransferLeader command. This is
// for:
// - Considering the amount of pessimistic locks can be big, it can reduce
// unavailable time caused by waiting for the transferee catching up logs.
// - Make transferring leader strictly after write commands that executes before
// proposing the locks, preventing unexpected lock loss.
pub fn propose_locks_before_transfer_leader<T>(
&mut self,
ctx: &mut StoreContext<EK, ER, T>,
msg: &eraftpb::Message,
) -> bool {
// 1. Disable in-memory pessimistic locks.
// Clone to make borrow checker happy when registering ticks.
let txn_ext = self.txn_context().ext.clone();
let mut pessimistic_locks = txn_ext.pessimistic_locks.write();
// If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message
// is a reply to a transfer leader command before. If the locks status remain
// in the TransferringLeader status, we can safely initiate transferring leader
// now.
// If it's not in TransferringLeader status now, it is probably because several
// ticks have passed after proposing the locks in the last time and we
// reactivate the memory locks. Then, we should propose the locks again.
if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX
&& pessimistic_locks.status == LocksStatus::TransferringLeader
{
return false;
}
// If it is not writable, it's probably because it's a retried TransferLeader
// and the locks have been proposed. But we still need to return true to
// propose another TransferLeader command. Otherwise, some write requests that
// have marked some locks as deleted will fail because raft rejects more
// proposals.
// It is OK to return true here if it's in other states like MergingRegion or
// NotLeader. In those cases, the locks will fail to propose and nothing will
// happen.
if !pessimistic_locks.is_writable() {
return true;
}
pessimistic_locks.status = LocksStatus::TransferringLeader;
self.txn_context_mut().reactivate_memory_lock_ticks = 0;
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
// 2. Propose pessimistic locks
if pessimistic_locks.is_empty() {
return false;
}
// FIXME: Raft command has size limit. Either limit the total size of
// pessimistic locks in a region, or split commands here.
let mut encoder = SimpleWriteEncoder::with_capacity(512);
let mut lock_count = 0;
{
// Downgrade to a read guard, do not block readers in the scheduler as far as
// possible.
let pessimistic_locks = RwLockWriteGuard::downgrade(pessimistic_locks);
fail::fail_point!("invalidate_locks_before_transfer_leader");
for (key, (lock, deleted)) in &*pessimistic_locks {
if *deleted {
continue;
}
lock_count += 1;
encoder.put(CF_LOCK, key.as_encoded(), &lock.to_lock().to_bytes());
}
}
if lock_count == 0 {
// If the map is not empty but all locks are deleted, it is possible that a
// write command has just marked locks deleted but not proposed yet.
// It might cause that command to fail if we skip proposing the
// extra TransferLeader command here.
return true;
}
let mut header = Box::<RaftRequestHeader>::default();
header.set_region_id(self.region_id());
header.set_region_epoch(self.region().get_region_epoch().clone());
header.set_peer(self.peer().clone());
info!(
self.logger,
"propose {} locks before transferring leader", lock_count;
);
let PeerMsg::SimpleWrite(write) = PeerMsg::simple_write(header, encoder.encode()).0 else {unreachable!()};
self.on_simple_write(ctx, write.header, write.data, write.ch);
true
}
}
| on_reactivate_memory_lock_tick | identifier_name |
txn_ext.rs | // Copyright 2023 TiKV Project Authors. Licensed under Apache-2.0.
//! This module contains everything related to transaction hook.
//!
//! This is the temporary (efficient) solution, it should be implemented as one
//! type of coprocessor.
use std::sync::{atomic::Ordering, Arc};
use crossbeam::atomic::AtomicCell;
use engine_traits::{KvEngine, RaftEngine, CF_LOCK};
use kvproto::{kvrpcpb::ExtraOp, metapb::Region, raft_cmdpb::RaftRequestHeader};
use parking_lot::RwLockWriteGuard;
use raft::eraftpb;
use raftstore::store::{
LocksStatus, PeerPessimisticLocks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX,
};
use slog::{error, info, Logger};
use crate::{
batch::StoreContext,
raft::Peer,
router::{PeerMsg, PeerTick},
worker::pd,
SimpleWriteEncoder,
};
pub struct TxnContext {
ext: Arc<TxnExt>,
extra_op: Arc<AtomicCell<ExtraOp>>,
reactivate_memory_lock_ticks: usize,
}
impl Default for TxnContext {
#[inline]
fn default() -> Self {
Self {
ext: Arc::default(),
extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)),
reactivate_memory_lock_ticks: 0,
}
}
}
impl TxnContext {
#[inline]
pub fn on_region_changed(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &mut StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// A more recent read may happen on the old leader. So max ts should
// be updated after a peer becomes leader.
self.require_updating_max_ts(ctx, term, region, logger);
// Init the in-memory pessimistic lock table when the peer becomes leader.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::Normal;
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// If a follower merges into a leader, a more recent read may happen
// on the leader of the follower. So max ts should be updated after
// a region merge.
self.require_updating_max_ts(ctx, term, region, logger);
}
#[inline]
pub fn on_became_follower(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::NotLeader;
pessimistic_locks.clear();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn ext(&self) -> &Arc<TxnExt> {
&self.ext
}
#[inline]
pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> {
&self.extra_op
}
fn require_updating_max_ts<EK, ER, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) where
EK: KvEngine,
ER: RaftEngine,
{
let epoch = region.get_region_epoch();
let term_low_bits = term & ((1 << 32) - 1); // 32 bits
let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits
let initial_status = (term_low_bits << 32) | (version_lot_bits << 1);
self.ext
.max_ts_sync_status
.store(initial_status, Ordering::SeqCst);
info!(
logger,
"require updating max ts";
"initial_status" => initial_status,
);
let task = pd::Task::UpdateMaxTimestamp {
region_id: region.get_id(),
initial_status,
txn_ext: self.ext.clone(),
};
if let Err(e) = ctx.schedulers.pd.schedule(task) {
error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" => ?e);
}
}
pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> {
// Group in-memory pessimistic locks in the original region into new regions.
// The locks of new regions will be put into the corresponding new regions
// later. And the locks belonging to the old region will stay in the original
// map.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
// Update the version so the concurrent reader will fail due to EpochNotMatch
// instead of PessimisticLockNotFound.
pessimistic_locks.version = derived.get_region_epoch().get_version();
pessimistic_locks.group_by_regions(regions, derived)
}
pub fn init_with_lock(&self, locks: PeerPessimisticLocks) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
*pessimistic_locks = locks;
}
}
impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> {
/// Returns True means the tick is consumed, otherwise the tick should be
/// rescheduled.
pub fn on_reactivate_memory_lock_tick<T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) {
// If it is not leader, we needn't reactivate by tick. In-memory pessimistic
// lock will be enabled when this region becomes leader again.
if !self.is_leader() {
return;
}
let transferring_leader = self.raft_group().raft.lead_transferee.is_some();
let txn_context = self.txn_context_mut();
let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write();
// And this tick is currently only used for the leader transfer failure case.
if pessimistic_locks.status != LocksStatus::TransferringLeader {
return;
}
txn_context.reactivate_memory_lock_ticks += 1;
// `lead_transferee` is not set immediately after the lock status changes. So,
// we need the tick count condition to avoid reactivating too early.
if !transferring_leader
&& txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick
{
pessimistic_locks.status = LocksStatus::Normal;
txn_context.reactivate_memory_lock_ticks = 0;
} else {
drop(pessimistic_locks);
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
}
}
// Returns whether we should propose another TransferLeader command. This is
// for:
// - Considering the amount of pessimistic locks can be big, it can reduce
// unavailable time caused by waiting for the transferee catching up logs.
// - Make transferring leader strictly after write commands that executes before
// proposing the locks, preventing unexpected lock loss.
pub fn propose_locks_before_transfer_leader<T>(
&mut self,
ctx: &mut StoreContext<EK, ER, T>,
msg: &eraftpb::Message,
) -> bool {
// 1. Disable in-memory pessimistic locks.
// Clone to make borrow checker happy when registering ticks.
let txn_ext = self.txn_context().ext.clone();
let mut pessimistic_locks = txn_ext.pessimistic_locks.write();
// If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message
// is a reply to a transfer leader command before. If the locks status remain
// in the TransferringLeader status, we can safely initiate transferring leader
// now.
// If it's not in TransferringLeader status now, it is probably because several
// ticks have passed after proposing the locks in the last time and we
// reactivate the memory locks. Then, we should propose the locks again.
if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX
&& pessimistic_locks.status == LocksStatus::TransferringLeader
{
return false;
}
// If it is not writable, it's probably because it's a retried TransferLeader
// and the locks have been proposed. But we still need to return true to
// propose another TransferLeader command. Otherwise, some write requests that
// have marked some locks as deleted will fail because raft rejects more
// proposals.
// It is OK to return true here if it's in other states like MergingRegion or
// NotLeader. In those cases, the locks will fail to propose and nothing will
// happen.
if !pessimistic_locks.is_writable() {
return true;
}
pessimistic_locks.status = LocksStatus::TransferringLeader;
self.txn_context_mut().reactivate_memory_lock_ticks = 0;
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
// 2. Propose pessimistic locks
if pessimistic_locks.is_empty() {
return false;
}
// FIXME: Raft command has size limit. Either limit the total size of
// pessimistic locks in a region, or split commands here.
let mut encoder = SimpleWriteEncoder::with_capacity(512);
let mut lock_count = 0;
{
// Downgrade to a read guard, do not block readers in the scheduler as far as
// possible. | continue;
}
lock_count += 1;
encoder.put(CF_LOCK, key.as_encoded(), &lock.to_lock().to_bytes());
}
}
if lock_count == 0 {
// If the map is not empty but all locks are deleted, it is possible that a
// write command has just marked locks deleted but not proposed yet.
// It might cause that command to fail if we skip proposing the
// extra TransferLeader command here.
return true;
}
let mut header = Box::<RaftRequestHeader>::default();
header.set_region_id(self.region_id());
header.set_region_epoch(self.region().get_region_epoch().clone());
header.set_peer(self.peer().clone());
info!(
self.logger,
"propose {} locks before transferring leader", lock_count;
);
let PeerMsg::SimpleWrite(write) = PeerMsg::simple_write(header, encoder.encode()).0 else {unreachable!()};
self.on_simple_write(ctx, write.header, write.data, write.ch);
true
}
} | let pessimistic_locks = RwLockWriteGuard::downgrade(pessimistic_locks);
fail::fail_point!("invalidate_locks_before_transfer_leader");
for (key, (lock, deleted)) in &*pessimistic_locks {
if *deleted { | random_line_split |
txn_ext.rs | // Copyright 2023 TiKV Project Authors. Licensed under Apache-2.0.
//! This module contains everything related to transaction hook.
//!
//! This is the temporary (efficient) solution, it should be implemented as one
//! type of coprocessor.
use std::sync::{atomic::Ordering, Arc};
use crossbeam::atomic::AtomicCell;
use engine_traits::{KvEngine, RaftEngine, CF_LOCK};
use kvproto::{kvrpcpb::ExtraOp, metapb::Region, raft_cmdpb::RaftRequestHeader};
use parking_lot::RwLockWriteGuard;
use raft::eraftpb;
use raftstore::store::{
LocksStatus, PeerPessimisticLocks, TxnExt, TRANSFER_LEADER_COMMAND_REPLY_CTX,
};
use slog::{error, info, Logger};
use crate::{
batch::StoreContext,
raft::Peer,
router::{PeerMsg, PeerTick},
worker::pd,
SimpleWriteEncoder,
};
pub struct TxnContext {
ext: Arc<TxnExt>,
extra_op: Arc<AtomicCell<ExtraOp>>,
reactivate_memory_lock_ticks: usize,
}
impl Default for TxnContext {
#[inline]
fn default() -> Self {
Self {
ext: Arc::default(),
extra_op: Arc::new(AtomicCell::new(ExtraOp::Noop)),
reactivate_memory_lock_ticks: 0,
}
}
}
impl TxnContext {
#[inline]
pub fn on_region_changed(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn on_became_leader<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &mut StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// A more recent read may happen on the old leader. So max ts should
// be updated after a peer becomes leader.
self.require_updating_max_ts(ctx, term, region, logger);
// Init the in-memory pessimistic lock table when the peer becomes leader.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::Normal;
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn after_commit_merge<EK: KvEngine, ER: RaftEngine, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) {
// If a follower merges into a leader, a more recent read may happen
// on the leader of the follower. So max ts should be updated after
// a region merge.
self.require_updating_max_ts(ctx, term, region, logger);
}
#[inline]
pub fn on_became_follower(&self, term: u64, region: &Region) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
pessimistic_locks.status = LocksStatus::NotLeader;
pessimistic_locks.clear();
pessimistic_locks.term = term;
pessimistic_locks.version = region.get_region_epoch().get_version();
}
#[inline]
pub fn ext(&self) -> &Arc<TxnExt> {
&self.ext
}
#[inline]
pub fn extra_op(&self) -> &Arc<AtomicCell<ExtraOp>> {
&self.extra_op
}
fn require_updating_max_ts<EK, ER, T>(
&self,
ctx: &StoreContext<EK, ER, T>,
term: u64,
region: &Region,
logger: &Logger,
) where
EK: KvEngine,
ER: RaftEngine,
{
let epoch = region.get_region_epoch();
let term_low_bits = term & ((1 << 32) - 1); // 32 bits
let version_lot_bits = epoch.get_version() & ((1 << 31) - 1); // 31 bits
let initial_status = (term_low_bits << 32) | (version_lot_bits << 1);
self.ext
.max_ts_sync_status
.store(initial_status, Ordering::SeqCst);
info!(
logger,
"require updating max ts";
"initial_status" => initial_status,
);
let task = pd::Task::UpdateMaxTimestamp {
region_id: region.get_id(),
initial_status,
txn_ext: self.ext.clone(),
};
if let Err(e) = ctx.schedulers.pd.schedule(task) {
error!(logger, "failed to notify pd with UpdateMaxTimestamp"; "err" => ?e);
}
}
pub fn split(&self, regions: &[Region], derived: &Region) -> Vec<PeerPessimisticLocks> {
// Group in-memory pessimistic locks in the original region into new regions.
// The locks of new regions will be put into the corresponding new regions
// later. And the locks belonging to the old region will stay in the original
// map.
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
// Update the version so the concurrent reader will fail due to EpochNotMatch
// instead of PessimisticLockNotFound.
pessimistic_locks.version = derived.get_region_epoch().get_version();
pessimistic_locks.group_by_regions(regions, derived)
}
pub fn init_with_lock(&self, locks: PeerPessimisticLocks) {
let mut pessimistic_locks = self.ext.pessimistic_locks.write();
*pessimistic_locks = locks;
}
}
impl<EK: KvEngine, ER: RaftEngine> Peer<EK, ER> {
/// Returns True means the tick is consumed, otherwise the tick should be
/// rescheduled.
pub fn on_reactivate_memory_lock_tick<T>(&mut self, ctx: &mut StoreContext<EK, ER, T>) {
// If it is not leader, we needn't reactivate by tick. In-memory pessimistic
// lock will be enabled when this region becomes leader again.
if !self.is_leader() {
return;
}
let transferring_leader = self.raft_group().raft.lead_transferee.is_some();
let txn_context = self.txn_context_mut();
let mut pessimistic_locks = txn_context.ext.pessimistic_locks.write();
// And this tick is currently only used for the leader transfer failure case.
if pessimistic_locks.status != LocksStatus::TransferringLeader {
return;
}
txn_context.reactivate_memory_lock_ticks += 1;
// `lead_transferee` is not set immediately after the lock status changes. So,
// we need the tick count condition to avoid reactivating too early.
if !transferring_leader
&& txn_context.reactivate_memory_lock_ticks >= ctx.cfg.reactive_memory_lock_timeout_tick
{
pessimistic_locks.status = LocksStatus::Normal;
txn_context.reactivate_memory_lock_ticks = 0;
} else |
}
// Returns whether we should propose another TransferLeader command. This is
// for:
// - Considering the amount of pessimistic locks can be big, it can reduce
// unavailable time caused by waiting for the transferee catching up logs.
// - Make transferring leader strictly after write commands that executes before
// proposing the locks, preventing unexpected lock loss.
pub fn propose_locks_before_transfer_leader<T>(
&mut self,
ctx: &mut StoreContext<EK, ER, T>,
msg: &eraftpb::Message,
) -> bool {
// 1. Disable in-memory pessimistic locks.
// Clone to make borrow checker happy when registering ticks.
let txn_ext = self.txn_context().ext.clone();
let mut pessimistic_locks = txn_ext.pessimistic_locks.write();
// If the message context == TRANSFER_LEADER_COMMAND_REPLY_CTX, the message
// is a reply to a transfer leader command before. If the locks status remain
// in the TransferringLeader status, we can safely initiate transferring leader
// now.
// If it's not in TransferringLeader status now, it is probably because several
// ticks have passed after proposing the locks in the last time and we
// reactivate the memory locks. Then, we should propose the locks again.
if msg.get_context() == TRANSFER_LEADER_COMMAND_REPLY_CTX
&& pessimistic_locks.status == LocksStatus::TransferringLeader
{
return false;
}
// If it is not writable, it's probably because it's a retried TransferLeader
// and the locks have been proposed. But we still need to return true to
// propose another TransferLeader command. Otherwise, some write requests that
// have marked some locks as deleted will fail because raft rejects more
// proposals.
// It is OK to return true here if it's in other states like MergingRegion or
// NotLeader. In those cases, the locks will fail to propose and nothing will
// happen.
if !pessimistic_locks.is_writable() {
return true;
}
pessimistic_locks.status = LocksStatus::TransferringLeader;
self.txn_context_mut().reactivate_memory_lock_ticks = 0;
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
// 2. Propose pessimistic locks
if pessimistic_locks.is_empty() {
return false;
}
// FIXME: Raft command has size limit. Either limit the total size of
// pessimistic locks in a region, or split commands here.
let mut encoder = SimpleWriteEncoder::with_capacity(512);
let mut lock_count = 0;
{
// Downgrade to a read guard, do not block readers in the scheduler as far as
// possible.
let pessimistic_locks = RwLockWriteGuard::downgrade(pessimistic_locks);
fail::fail_point!("invalidate_locks_before_transfer_leader");
for (key, (lock, deleted)) in &*pessimistic_locks {
if *deleted {
continue;
}
lock_count += 1;
encoder.put(CF_LOCK, key.as_encoded(), &lock.to_lock().to_bytes());
}
}
if lock_count == 0 {
// If the map is not empty but all locks are deleted, it is possible that a
// write command has just marked locks deleted but not proposed yet.
// It might cause that command to fail if we skip proposing the
// extra TransferLeader command here.
return true;
}
let mut header = Box::<RaftRequestHeader>::default();
header.set_region_id(self.region_id());
header.set_region_epoch(self.region().get_region_epoch().clone());
header.set_peer(self.peer().clone());
info!(
self.logger,
"propose {} locks before transferring leader", lock_count;
);
let PeerMsg::SimpleWrite(write) = PeerMsg::simple_write(header, encoder.encode()).0 else {unreachable!()};
self.on_simple_write(ctx, write.header, write.data, write.ch);
true
}
}
| {
drop(pessimistic_locks);
self.add_pending_tick(PeerTick::ReactivateMemoryLock);
} | conditional_block |
trx_mgr.go | package app
import (
"errors"
"fmt"
"github.com/coschain/contentos-go/common"
"github.com/coschain/contentos-go/common/constants"
"github.com/coschain/contentos-go/iservices"
"github.com/coschain/contentos-go/prototype"
"github.com/gogo/protobuf/proto"
"github.com/sirupsen/logrus"
"sync"
"sync/atomic"
"time"
)
// TrxCallback is the type of callback function reporting transaction process results.
type TrxCallback func(result *prototype.TransactionWrapperWithInfo)
// TrxEntry is a wrapper of a transaction with extra information.
type TrxEntry struct {
chainId prototype.ChainId // id of block chain to which the transaction is sent
result *prototype.TransactionWrapperWithInfo // process result involving the transaction
trxId string // transaction id
size int // transaction size
signer string // requested account to sign the transaction
signerKey *prototype.PublicKeyType // the actual public key which signed the transaction
callback TrxCallback // callback function
}
// NewTrxMgrEntry creates an instance of TrxEntry.
func NewTrxMgrEntry(chainId prototype.ChainId, trx *prototype.SignedTransaction, callback TrxCallback) *TrxEntry {
return &TrxEntry{
chainId: chainId,
result: &prototype.TransactionWrapperWithInfo{
SigTrx: trx,
Receipt: &prototype.TransactionReceiptWithInfo{Status: prototype.StatusSuccess},
},
callback: callback,
}
}
// SetError sets the entry's result as given error, and returns the error.
func (e *TrxEntry) SetError(err error) error {
e.result.Receipt.Status = prototype.StatusError
e.result.Receipt.ErrorInfo = err.Error()
return err
}
// Deliver calls entry's callback function.
func (e *TrxEntry) Deliver() {
if e.callback != nil {
e.callback(e.result)
}
}
// InitCheck fills extra information of the entry, and do a basic validation check.
// Note that InitCheck is independent from chain state. We should do it only once for each transaction.
func (e *TrxEntry) InitCheck() error {
trx := e.result.SigTrx
// basic check
if err := trx.Validate(); err != nil {
return e.SetError(err)
}
if trxId, err := trx.Id(); err != nil {
return e.SetError(err)
} else {
e.trxId = string(trxId.Hash)
}
// transaction size limit check
e.size = proto.Size(trx)
if e.size > constants.MaxTransactionSize {
return e.SetError(fmt.Errorf("trx too large, size = %d > %d", e.size, constants.MaxTransactionSize))
}
// get the signer account name
creator := ""
if creators := trx.GetOpCreatorsMap(); len(creators) != 1 {
return e.SetError(fmt.Errorf("non-unique trx creators, found %d", len(creators)))
} else {
for creator = range creators {
break
}
}
e.signer = creator
// recover the signing public key from signature
if signKey, err := trx.ExportPubKeys(e.chainId); err != nil {
return e.SetError(fmt.Errorf("cannot export signing key: %s", err.Error()))
} else {
e.signerKey = signKey
}
return nil
}
// CheckExpiration checks if the transaction is valid based on its expiration.
func (e *TrxEntry) CheckExpiration(blockTime uint32) error {
expiration := e.result.SigTrx.GetTrx().GetExpiration().GetUtcSeconds()
if expiration < blockTime {
return e.SetError(fmt.Errorf("trx expired, %d < %d", expiration, blockTime))
}
if expiration > blockTime + constants.TrxMaxExpirationTime {
return e.SetError(fmt.Errorf("trx expiration too long, %d > %d + %d", expiration, blockTime, constants.TrxMaxExpirationTime))
}
return nil
}
// CheckTapos checks if the transaction is valid based on its tapos information.
func (e *TrxEntry) CheckTapos(checker *TaposChecker) error {
if err := checker.Check(e.result.SigTrx.Trx); err != nil |
return nil
}
// CheckSignerKey checks if the transaction is signed by correct public key.
func (e *TrxEntry) CheckSignerKey(fetcher *AuthFetcher) error {
if err := fetcher.CheckPublicKey(e.signer, e.signerKey); err != nil {
return e.SetError(fmt.Errorf("signature failed: %s", err.Error()))
}
return nil
}
// CheckInBlockTrxs checks if the transaction is a duplicate of any old transaction.
func (e *TrxEntry) CheckInBlockTrxs(checker *InBlockTrxChecker) error {
if checker.Has(e.result.SigTrx) {
return e.SetError(errors.New("found duplicate in-block trx"))
}
return nil
}
func (e *TrxEntry) GetTrxResult() *prototype.TransactionWrapperWithInfo {
return e.result
}
func (e *TrxEntry) GetTrxSize() int {
return e.size
}
func (e *TrxEntry) GetTrxSigner() string {
return e.signer
}
func (e *TrxEntry) GetTrxSigningKey() *prototype.PublicKeyType {
return e.signerKey
}
const (
// maximum count of transactions that are waiting to be packed to blocks.
// if this limit is reached, any incoming transaction will be refused directly.
sMaxWaitingCount = constants.TrxMaxExpirationTime * 2000
// threshold over which cleanings are necessary
sWaitingCountWaterMark = sMaxWaitingCount / 10
// minimal interval between cleanings
sMinCleanupInterval = 10 * time.Second
// shrink the waiting/fetched pools every 100K transactions
sShrinkCountWaterMark = 100000
)
// ITrxMgrPlugin is an interface of manager plugins.
type ITrxMgrPlugin interface {
BlockApplied(b *prototype.SignedBlock) // called once after a block is successfully applied.
BlockReverted(blockNum uint64) // called once after a block is successfully reverted.
BlockCommitted(blockNum uint64) // called once after a block is successfully committed.
}
// The transaction manager.
type TrxMgr struct {
chainId prototype.ChainId // the chain
db iservices.IDatabaseRW // the database
log *logrus.Logger // the logger
headTime uint32 // timestamp of head block, in seconds
waiting map[string]*TrxEntry // transactions waiting to be packed to blocks, trxId -> entry
waitingLock sync.RWMutex // lock of waiting transactions
fetched map[string]*TrxEntry // transactions being packed to a block, trxId -> entry
fetchedLock sync.RWMutex // lock of fetched transactions
auth *AuthFetcher // checker of transaction signatures
tapos *TaposChecker // checker of transaction tapos
history *InBlockTrxChecker // checker of transaction duplication
plugins []ITrxMgrPlugin // manager plugins, consisting of above checkers
lastCleanTime time.Time // last time we clean up expired waiting transactions
shrinkCounter uint64 // a counter to determine when to shrink pools
}
// NewTrxMgr creates an instance of TrxMgr.
func NewTrxMgr(chainId prototype.ChainId, db iservices.IDatabaseRW, logger *logrus.Logger, lastBlock, commitBlock uint64) *TrxMgr {
auth := NewAuthFetcher(db, logger, lastBlock, commitBlock)
tapos := NewTaposChecker(db, logger, lastBlock)
history := NewInBlockTrxChecker(db, logger, lastBlock)
return &TrxMgr{
chainId: chainId,
db: db,
log: logger,
headTime: (&DynamicGlobalPropsRW{db:db}).GetProps().GetTime().GetUtcSeconds(),
waiting: make(map[string]*TrxEntry),
fetched: make(map[string]*TrxEntry),
auth: auth,
tapos: tapos,
history: history,
plugins: []ITrxMgrPlugin{ auth, tapos, history },
lastCleanTime: time.Now(),
}
}
// AddTrx processes an incoming transaction.
// AddTrx returns nil if the incoming transaction is accepted, otherwise an error is returned.
// If a non-nil callback is given, it will be called once asynchronously with the final process result.
func (m *TrxMgr) AddTrx(trx *prototype.SignedTransaction, callback TrxCallback) error {
entry := NewTrxMgrEntry(m.chainId, trx, callback)
// very basic nil pointer check
if trx == nil || trx.Signature == nil {
err := entry.SetError(errors.New("invalid trx"))
m.deliverEntry(entry)
return err
}
// very basic duplication check
if m.isProcessingTrx(trx) != nil {
err := entry.SetError(errors.New("trx already in process"))
m.deliverEntry(entry)
return err
}
c := make(chan error)
go func() {
ok := false
// check the transaction
if entry.InitCheck() != nil || m.checkTrx(entry, atomic.LoadUint32(&m.headTime), false) != nil {
// deliver if failed
m.deliverEntry(entry)
} else {
// if passed, try adding it to the waiting pool
m.waitingLock.Lock()
m.fetchedLock.RLock()
ok = m.addToWaiting(entry) > 0
m.fetchedLock.RUnlock()
m.waitingLock.Unlock()
}
if !ok {
c <- errors.New(entry.result.Receipt.ErrorInfo)
} else {
c <- nil
}
}()
return <-c
}
// WaitingCount returns number of transactions that are waiting to be packed to blocks.
func (m *TrxMgr) WaitingCount() int {
m.waitingLock.RLock()
defer m.waitingLock.RUnlock()
return len(m.waiting)
}
// FetchTrx fetches a batch of transactions from waiting pool.
// Block producer should call FetchTrx to collect transactions of new blocks.
func (m *TrxMgr) FetchTrx(blockTime uint32, maxCount, maxSize int) (entries []*TrxEntry) {
m.waitingLock.Lock()
defer m.waitingLock.Unlock()
m.fetchedLock.Lock()
defer m.fetchedLock.Unlock()
counter, size := 0, 0
// traverse the waiting pool
for s, e := range m.waiting {
// check count limit
if maxCount > 0 && counter >= maxCount {
break
}
// check size limit
if maxSize > 0 && size >= maxSize {
break
}
// check the transaction again
// although transactions in the waiting pool had passed checks when they entered,
// but chain state is keep changing, we have to redo state-dependent checks.
if err := m.checkTrx(e, blockTime, true); err != nil {
// if failed, deliver the transaction.
m.log.Debugf("TRXMGR: FetchTrx check failed: %v, trxId=%x", err, []byte(e.trxId))
m.deliverEntry(e)
} else {
// if passed, pick it
entries = append(entries, e)
// add it to the fetched pool
m.fetched[s] = e
counter++
size += e.size
}
// remove from waiting pool
delete(m.waiting, s)
}
return
}
// ReturnTrx notifies that some previously fetched transactions can't be packed into a block due to errors.
// Block producer should call ReturnTrx for transactions that failed being applied.
func (m *TrxMgr) ReturnTrx(entries ...*TrxEntry) {
m.log.Debug("TRXMGR: ReturnTrx begin")
timing := common.NewTiming()
timing.Begin()
m.fetchedLock.Lock()
defer m.fetchedLock.Unlock()
timing.Mark()
for _, e := range entries {
// any returning transaction should be previously fetched
f := m.fetched[e.trxId]
if f != nil {
m.deliverEntry(f)
delete(m.fetched, e.trxId)
}
}
timing.End()
m.log.Debugf("TRXMGR: ReturnTrx end: #tx=%d, %s", len(entries), timing.String())
}
// CheckBlockTrxs checks if transactions of a block are valid.
// If everything is ok, CheckBlockTrxs returns a TrxEntry slice for transactions and nil error, otherwise, a nil slice
// and an error is returned.
func (m *TrxMgr) CheckBlockTrxs(b *prototype.SignedBlock) (entries []*TrxEntry, err error) {
m.log.Debugf("TRXMGR: CheckBlockTrxs begin %d", b.SignedHeader.Number())
t0 := common.EasyTimer()
if count := len(b.Transactions); count > 0 {
blockTime := b.SignedHeader.Header.Timestamp.UtcSeconds
errs := make([]error, count)
entries = make([]*TrxEntry, count)
errIdx := int32(-1)
var wg sync.WaitGroup
wg.Add(count)
// check transactions asynchronously
for i := 0; i < count; i++ {
go func(idx int) {
defer wg.Done()
var err error
trx := b.Transactions[idx].SigTrx
e := NewTrxMgrEntry(m.chainId, trx, nil)
// do we need the initial check?
// yes for transactions that we never met, otherwise no.
needInitCheck := true
// if we have met this transaction before, skip initial check and fill up extra information.
// this voids doing the expensive public key recovery again.
if ptrx := m.isProcessingTrx(trx); ptrx != nil {
needInitCheck = false
e.trxId = ptrx.trxId
e.size = ptrx.size
e.signer = ptrx.signer
e.signerKey = ptrx.signerKey
}
// do initial check if necessary
if needInitCheck {
err = e.InitCheck()
}
// do state-dependent checks
if err == nil {
err = m.checkTrx(e, blockTime, true)
}
// finalization works
if err != nil {
errs[idx] = err
// remember the first error we met
atomic.CompareAndSwapInt32(&errIdx, -1, int32(idx))
} else {
entries[idx] = e
}
}(i)
}
wg.Wait()
if errIdx >= 0 {
entries = nil
err = fmt.Errorf("block %d trxs[%d] check failed: %s", b.SignedHeader.Number(), errIdx, errs[errIdx].Error())
}
// check duplicate transactions inside the block.
// it's a must to prevent malicious block producers from replay attacking.
// m.history won't help here coz it updates in block level instead of transaction level.
trxSigs, dupTrx := make(map[string]bool), -1
for idx, e := range entries {
if trxSigs[e.trxId] {
dupTrx = idx
break
}
trxSigs[e.trxId] = true
}
if dupTrx >= 0 {
entries = nil
err = fmt.Errorf("block %d trxs[%d] duplicates", b.SignedHeader.Number(), dupTrx)
}
}
m.log.Debugf("TRXMGR: CheckBlockTrxs end %d: #tx=%d, %v", b.SignedHeader.Number(), len(b.Transactions), t0)
return
}
// BlockApplied *MUST* be called *AFTER* a block was successfully applied.
func (m *TrxMgr) BlockApplied(b *prototype.SignedBlock) {
m.log.Debugf("TRXMGR: BlockApplied begin %d", b.SignedHeader.Number())
timing := common.NewTiming()
timing.Begin()
// update head block time
atomic.StoreUint32(&m.headTime, b.SignedHeader.Header.Timestamp.UtcSeconds)
// deliver transactions that are waiting final results
m.waitingLock.Lock()
m.fetchedLock.Lock()
timing.Mark()
for _, txw := range b.Transactions {
trxId, _ := txw.SigTrx.Id()
s := string(trxId.Hash)
if e := m.fetched[s]; e != nil {
m.deliverEntry(e)
delete(m.fetched, s)
}
if e := m.waiting[s]; e != nil {
m.deliverEntry(e)
delete(m.waiting, s)
}
}
// clean expired waiting trxs if necessary
m.cleanExpiredWaiting()
// shrink pool memory if necessary
m.shrinkPoolMemories()
timing.Mark()
m.fetchedLock.Unlock()
m.waitingLock.Unlock()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockApplied(b)
})
timing.End()
m.log.Debugf("TRXMGR: BlockApplied end %d: #tx=%d, %s", b.SignedHeader.Number(), len(b.Transactions), timing.String())
m.log.Debugf("TRXMGR: auth-hit=%v", m.auth.HitRate())
}
// BlockCommitted *MUST* be called *AFTER* a block was successfully committed.
func (m *TrxMgr) BlockCommitted(blockNum uint64) {
m.log.Debugf("TRXMGR: BlockCommitted begin %d", blockNum)
t0 := common.EasyTimer()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockCommitted(blockNum)
})
m.log.Debugf("TRXMGR: BlockCommitted end %d: %v", blockNum, t0)
}
// BlockReverted *MUST* be called *AFTER* a block was successfully reverted.
func (m *TrxMgr) BlockReverted(blockNum uint64) {
m.log.Debugf("TRXMGR: BlockReverted begin %d", blockNum)
t0 := common.EasyTimer()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockReverted(blockNum)
})
m.log.Debugf("TRXMGR: BlockReverted end %d: %v", blockNum, t0)
}
// addToWaiting adds given transaction entries to the waiting pool, and returns the actual number added.
func (m *TrxMgr) addToWaiting(entries...*TrxEntry) (count int) {
// clean expired waiting trxs if necessary
m.cleanExpiredWaiting()
for _, e := range entries {
// check the max waiting count limit
if len(m.waiting) > sMaxWaitingCount {
_ = e.SetError(errors.New("too many waiting trxs"))
m.deliverEntry(e)
continue
}
// check duplication
if m.isProcessingNoLock(e.result.SigTrx) != nil {
_ = e.SetError(errors.New("trx already in process"))
m.deliverEntry(e)
continue
}
m.waiting[e.trxId] = e
count++
}
atomic.AddUint64(&m.shrinkCounter, uint64(count))
return
}
// isProcessingTrx is a thread safe version of isProcessingNoLock.
func (m *TrxMgr) isProcessingTrx(trx *prototype.SignedTransaction) *TrxEntry {
m.waitingLock.RLock()
defer m.waitingLock.RUnlock()
m.fetchedLock.RLock()
defer m.fetchedLock.RUnlock()
return m.isProcessingNoLock(trx)
}
// isProcessingNoLock checks if given transaction is being processed by TrxMgr.
// It returns the transaction entry if given transaction is in the waiting pool or the fetched pool,
// otherwise, nil is returned.
func (m *TrxMgr) isProcessingNoLock(trx *prototype.SignedTransaction) *TrxEntry {
if trx == nil {
return nil
}
if trxId, err := trx.Id(); err == nil {
s := string(trxId.Hash)
if e := m.waiting[s]; e != nil {
return e
}
return m.fetched[s]
} else {
return nil
}
}
// checkTrx does state-dependent checks on given transaction.
func (m *TrxMgr) checkTrx(e *TrxEntry, blockTime uint32, checkTapos bool) (err error) {
if err = e.CheckExpiration(blockTime); err != nil {
return err
}
if checkTapos {
if err = e.CheckTapos(m.tapos); err != nil {
return err
}
}
if err = e.CheckSignerKey(m.auth); err != nil {
return err
}
if err = e.CheckInBlockTrxs(m.history); err != nil {
return err
}
return
}
// deliverEntry delivers given transaction asynchronously.
func (m *TrxMgr) deliverEntry(e *TrxEntry) {
go func() {
e.Deliver()
}()
}
// callPlugins is a helper method that calls given functor with each plugin as its argument.
func (m *TrxMgr) callPlugins(f func(plugin ITrxMgrPlugin)) {
var wg sync.WaitGroup
wg.Add(len(m.plugins))
for i := range m.plugins {
go func(idx int) {
defer wg.Done()
f(m.plugins[idx])
}(i)
}
wg.Wait()
}
func (m *TrxMgr) DiscardAccountCache(name string) {
m.auth.Discard(name)
}
//
// clean expired transactions from waiting pool if waiting pool is large enough.
//
// We need a cleaning procedure, especially for non-producer nodes.
// A non-producer node checks each block it applied and removes in-block transactions from the waiting pool.
// Without waiting pool cleaning, erroneous transactions will remain in the pool forever because they will never
// be packed into blocks. This can eventually fill up the waiting pool, leading to huge memory consumption and
// DoS for new transactions.
//
func (m *TrxMgr) cleanExpiredWaiting() {
// when the waiting pool is small, we don't need cleaning
if len(m.waiting) < sWaitingCountWaterMark {
return
}
// we avoid frequent cleaning
if headBlockTime := atomic.LoadUint32(&m.headTime); headBlockTime > 0 && time.Since(m.lastCleanTime) > sMinCleanupInterval {
m.lastCleanTime = time.Now()
for k, e := range m.waiting {
if err := e.CheckExpiration(headBlockTime); err != nil {
m.deliverEntry(e)
}
delete(m.waiting, k)
}
}
}
// delete(map, key) won't release any memory occupied by a map.
// so we need to re-copy our pools from time to time, otherwise they're eating memory slowly but forever.
func (m *TrxMgr) shrinkPoolMemories() {
if atomic.LoadUint64(&m.shrinkCounter) > sShrinkCountWaterMark {
atomic.StoreUint64(&m.shrinkCounter, 0)
waiting, fetched := make(map[string]*TrxEntry), make(map[string]*TrxEntry)
for k, e := range m.waiting {
waiting[k] = e
}
for k, e := range m.fetched {
fetched[k] = e
}
m.waiting, m.fetched = waiting, fetched
}
}
| {
return e.SetError(fmt.Errorf("tapos failed: %s", err.Error()))
} | conditional_block |
trx_mgr.go | package app
import (
"errors"
"fmt"
"github.com/coschain/contentos-go/common"
"github.com/coschain/contentos-go/common/constants"
"github.com/coschain/contentos-go/iservices"
"github.com/coschain/contentos-go/prototype"
"github.com/gogo/protobuf/proto"
"github.com/sirupsen/logrus"
"sync"
"sync/atomic"
"time"
)
// TrxCallback is the type of callback function reporting transaction process results.
type TrxCallback func(result *prototype.TransactionWrapperWithInfo)
// TrxEntry is a wrapper of a transaction with extra information.
type TrxEntry struct {
chainId prototype.ChainId // id of block chain to which the transaction is sent
result *prototype.TransactionWrapperWithInfo // process result involving the transaction
trxId string // transaction id
size int // transaction size
signer string // requested account to sign the transaction
signerKey *prototype.PublicKeyType // the actual public key which signed the transaction
callback TrxCallback // callback function
}
// NewTrxMgrEntry creates an instance of TrxEntry.
func NewTrxMgrEntry(chainId prototype.ChainId, trx *prototype.SignedTransaction, callback TrxCallback) *TrxEntry {
return &TrxEntry{
chainId: chainId,
result: &prototype.TransactionWrapperWithInfo{
SigTrx: trx,
Receipt: &prototype.TransactionReceiptWithInfo{Status: prototype.StatusSuccess},
},
callback: callback,
}
}
// SetError sets the entry's result as given error, and returns the error.
func (e *TrxEntry) SetError(err error) error {
e.result.Receipt.Status = prototype.StatusError
e.result.Receipt.ErrorInfo = err.Error()
return err
}
// Deliver calls entry's callback function.
func (e *TrxEntry) Deliver() {
if e.callback != nil {
e.callback(e.result)
}
}
// InitCheck fills extra information of the entry, and do a basic validation check.
// Note that InitCheck is independent from chain state. We should do it only once for each transaction.
func (e *TrxEntry) InitCheck() error {
trx := e.result.SigTrx
// basic check
if err := trx.Validate(); err != nil {
return e.SetError(err)
}
if trxId, err := trx.Id(); err != nil {
return e.SetError(err)
} else {
e.trxId = string(trxId.Hash)
}
// transaction size limit check
e.size = proto.Size(trx)
if e.size > constants.MaxTransactionSize {
return e.SetError(fmt.Errorf("trx too large, size = %d > %d", e.size, constants.MaxTransactionSize))
}
// get the signer account name
creator := ""
if creators := trx.GetOpCreatorsMap(); len(creators) != 1 {
return e.SetError(fmt.Errorf("non-unique trx creators, found %d", len(creators)))
} else {
for creator = range creators {
break
}
}
e.signer = creator
// recover the signing public key from signature
if signKey, err := trx.ExportPubKeys(e.chainId); err != nil {
return e.SetError(fmt.Errorf("cannot export signing key: %s", err.Error()))
} else {
e.signerKey = signKey
}
return nil
}
// CheckExpiration checks if the transaction is valid based on its expiration.
func (e *TrxEntry) CheckExpiration(blockTime uint32) error {
expiration := e.result.SigTrx.GetTrx().GetExpiration().GetUtcSeconds()
if expiration < blockTime {
return e.SetError(fmt.Errorf("trx expired, %d < %d", expiration, blockTime))
}
if expiration > blockTime + constants.TrxMaxExpirationTime {
return e.SetError(fmt.Errorf("trx expiration too long, %d > %d + %d", expiration, blockTime, constants.TrxMaxExpirationTime))
}
return nil
}
// CheckTapos checks if the transaction is valid based on its tapos information.
func (e *TrxEntry) CheckTapos(checker *TaposChecker) error {
if err := checker.Check(e.result.SigTrx.Trx); err != nil {
return e.SetError(fmt.Errorf("tapos failed: %s", err.Error()))
}
return nil
}
// CheckSignerKey checks if the transaction is signed by correct public key.
func (e *TrxEntry) CheckSignerKey(fetcher *AuthFetcher) error {
if err := fetcher.CheckPublicKey(e.signer, e.signerKey); err != nil {
return e.SetError(fmt.Errorf("signature failed: %s", err.Error()))
}
return nil
}
// CheckInBlockTrxs checks if the transaction is a duplicate of any old transaction.
func (e *TrxEntry) CheckInBlockTrxs(checker *InBlockTrxChecker) error {
if checker.Has(e.result.SigTrx) {
return e.SetError(errors.New("found duplicate in-block trx"))
}
return nil
}
func (e *TrxEntry) GetTrxResult() *prototype.TransactionWrapperWithInfo {
return e.result
}
func (e *TrxEntry) GetTrxSize() int {
return e.size
}
func (e *TrxEntry) GetTrxSigner() string {
return e.signer
}
func (e *TrxEntry) GetTrxSigningKey() *prototype.PublicKeyType {
return e.signerKey
}
const (
// maximum count of transactions that are waiting to be packed to blocks.
// if this limit is reached, any incoming transaction will be refused directly.
sMaxWaitingCount = constants.TrxMaxExpirationTime * 2000
// threshold over which cleanings are necessary
sWaitingCountWaterMark = sMaxWaitingCount / 10
// minimal interval between cleanings
sMinCleanupInterval = 10 * time.Second
// shrink the waiting/fetched pools every 100K transactions
sShrinkCountWaterMark = 100000
)
// ITrxMgrPlugin is an interface of manager plugins.
type ITrxMgrPlugin interface {
BlockApplied(b *prototype.SignedBlock) // called once after a block is successfully applied.
BlockReverted(blockNum uint64) // called once after a block is successfully reverted.
BlockCommitted(blockNum uint64) // called once after a block is successfully committed.
}
// The transaction manager.
type TrxMgr struct {
chainId prototype.ChainId // the chain
db iservices.IDatabaseRW // the database
log *logrus.Logger // the logger
headTime uint32 // timestamp of head block, in seconds
waiting map[string]*TrxEntry // transactions waiting to be packed to blocks, trxId -> entry
waitingLock sync.RWMutex // lock of waiting transactions
fetched map[string]*TrxEntry // transactions being packed to a block, trxId -> entry
fetchedLock sync.RWMutex // lock of fetched transactions
auth *AuthFetcher // checker of transaction signatures
tapos *TaposChecker // checker of transaction tapos
history *InBlockTrxChecker // checker of transaction duplication
plugins []ITrxMgrPlugin // manager plugins, consisting of above checkers
lastCleanTime time.Time // last time we clean up expired waiting transactions
shrinkCounter uint64 // a counter to determine when to shrink pools
}
// NewTrxMgr creates an instance of TrxMgr.
func NewTrxMgr(chainId prototype.ChainId, db iservices.IDatabaseRW, logger *logrus.Logger, lastBlock, commitBlock uint64) *TrxMgr {
auth := NewAuthFetcher(db, logger, lastBlock, commitBlock)
tapos := NewTaposChecker(db, logger, lastBlock)
history := NewInBlockTrxChecker(db, logger, lastBlock)
return &TrxMgr{
chainId: chainId,
db: db,
log: logger,
headTime: (&DynamicGlobalPropsRW{db:db}).GetProps().GetTime().GetUtcSeconds(),
waiting: make(map[string]*TrxEntry),
fetched: make(map[string]*TrxEntry),
auth: auth,
tapos: tapos,
history: history,
plugins: []ITrxMgrPlugin{ auth, tapos, history },
lastCleanTime: time.Now(),
}
}
// AddTrx processes an incoming transaction.
// AddTrx returns nil if the incoming transaction is accepted, otherwise an error is returned.
// If a non-nil callback is given, it will be called once asynchronously with the final process result.
func (m *TrxMgr) AddTrx(trx *prototype.SignedTransaction, callback TrxCallback) error {
entry := NewTrxMgrEntry(m.chainId, trx, callback)
// very basic nil pointer check
if trx == nil || trx.Signature == nil {
err := entry.SetError(errors.New("invalid trx"))
m.deliverEntry(entry)
return err
}
// very basic duplication check
if m.isProcessingTrx(trx) != nil {
err := entry.SetError(errors.New("trx already in process"))
m.deliverEntry(entry)
return err
}
c := make(chan error)
go func() {
ok := false
// check the transaction
if entry.InitCheck() != nil || m.checkTrx(entry, atomic.LoadUint32(&m.headTime), false) != nil {
// deliver if failed
m.deliverEntry(entry)
} else {
// if passed, try adding it to the waiting pool
m.waitingLock.Lock()
m.fetchedLock.RLock()
ok = m.addToWaiting(entry) > 0
m.fetchedLock.RUnlock()
m.waitingLock.Unlock()
}
if !ok {
c <- errors.New(entry.result.Receipt.ErrorInfo)
} else {
c <- nil
}
}()
return <-c
}
// WaitingCount returns number of transactions that are waiting to be packed to blocks.
func (m *TrxMgr) WaitingCount() int {
m.waitingLock.RLock()
defer m.waitingLock.RUnlock()
return len(m.waiting)
}
// FetchTrx fetches a batch of transactions from waiting pool.
// Block producer should call FetchTrx to collect transactions of new blocks.
func (m *TrxMgr) FetchTrx(blockTime uint32, maxCount, maxSize int) (entries []*TrxEntry) {
m.waitingLock.Lock()
defer m.waitingLock.Unlock()
m.fetchedLock.Lock()
defer m.fetchedLock.Unlock()
counter, size := 0, 0
// traverse the waiting pool
for s, e := range m.waiting {
// check count limit
if maxCount > 0 && counter >= maxCount {
break
}
// check size limit
if maxSize > 0 && size >= maxSize {
break
}
// check the transaction again
// although transactions in the waiting pool had passed checks when they entered,
// but chain state is keep changing, we have to redo state-dependent checks.
if err := m.checkTrx(e, blockTime, true); err != nil {
// if failed, deliver the transaction.
m.log.Debugf("TRXMGR: FetchTrx check failed: %v, trxId=%x", err, []byte(e.trxId))
m.deliverEntry(e)
} else {
// if passed, pick it
entries = append(entries, e)
// add it to the fetched pool
m.fetched[s] = e
counter++
size += e.size
}
// remove from waiting pool
delete(m.waiting, s)
}
return
}
// ReturnTrx notifies that some previously fetched transactions can't be packed into a block due to errors.
// Block producer should call ReturnTrx for transactions that failed being applied.
func (m *TrxMgr) ReturnTrx(entries ...*TrxEntry) {
m.log.Debug("TRXMGR: ReturnTrx begin")
timing := common.NewTiming()
timing.Begin()
m.fetchedLock.Lock()
defer m.fetchedLock.Unlock()
timing.Mark()
for _, e := range entries {
// any returning transaction should be previously fetched
f := m.fetched[e.trxId]
if f != nil {
m.deliverEntry(f)
delete(m.fetched, e.trxId)
}
}
timing.End()
m.log.Debugf("TRXMGR: ReturnTrx end: #tx=%d, %s", len(entries), timing.String())
}
// CheckBlockTrxs checks if transactions of a block are valid.
// If everything is ok, CheckBlockTrxs returns a TrxEntry slice for transactions and nil error, otherwise, a nil slice
// and an error is returned.
func (m *TrxMgr) CheckBlockTrxs(b *prototype.SignedBlock) (entries []*TrxEntry, err error) {
m.log.Debugf("TRXMGR: CheckBlockTrxs begin %d", b.SignedHeader.Number())
t0 := common.EasyTimer()
if count := len(b.Transactions); count > 0 {
blockTime := b.SignedHeader.Header.Timestamp.UtcSeconds
errs := make([]error, count)
entries = make([]*TrxEntry, count)
errIdx := int32(-1)
var wg sync.WaitGroup
wg.Add(count)
// check transactions asynchronously
for i := 0; i < count; i++ {
go func(idx int) {
defer wg.Done()
var err error
trx := b.Transactions[idx].SigTrx
e := NewTrxMgrEntry(m.chainId, trx, nil)
// do we need the initial check?
// yes for transactions that we never met, otherwise no.
needInitCheck := true
| if ptrx := m.isProcessingTrx(trx); ptrx != nil {
needInitCheck = false
e.trxId = ptrx.trxId
e.size = ptrx.size
e.signer = ptrx.signer
e.signerKey = ptrx.signerKey
}
// do initial check if necessary
if needInitCheck {
err = e.InitCheck()
}
// do state-dependent checks
if err == nil {
err = m.checkTrx(e, blockTime, true)
}
// finalization works
if err != nil {
errs[idx] = err
// remember the first error we met
atomic.CompareAndSwapInt32(&errIdx, -1, int32(idx))
} else {
entries[idx] = e
}
}(i)
}
wg.Wait()
if errIdx >= 0 {
entries = nil
err = fmt.Errorf("block %d trxs[%d] check failed: %s", b.SignedHeader.Number(), errIdx, errs[errIdx].Error())
}
// check duplicate transactions inside the block.
// it's a must to prevent malicious block producers from replay attacking.
// m.history won't help here coz it updates in block level instead of transaction level.
trxSigs, dupTrx := make(map[string]bool), -1
for idx, e := range entries {
if trxSigs[e.trxId] {
dupTrx = idx
break
}
trxSigs[e.trxId] = true
}
if dupTrx >= 0 {
entries = nil
err = fmt.Errorf("block %d trxs[%d] duplicates", b.SignedHeader.Number(), dupTrx)
}
}
m.log.Debugf("TRXMGR: CheckBlockTrxs end %d: #tx=%d, %v", b.SignedHeader.Number(), len(b.Transactions), t0)
return
}
// BlockApplied *MUST* be called *AFTER* a block was successfully applied.
func (m *TrxMgr) BlockApplied(b *prototype.SignedBlock) {
m.log.Debugf("TRXMGR: BlockApplied begin %d", b.SignedHeader.Number())
timing := common.NewTiming()
timing.Begin()
// update head block time
atomic.StoreUint32(&m.headTime, b.SignedHeader.Header.Timestamp.UtcSeconds)
// deliver transactions that are waiting final results
m.waitingLock.Lock()
m.fetchedLock.Lock()
timing.Mark()
for _, txw := range b.Transactions {
trxId, _ := txw.SigTrx.Id()
s := string(trxId.Hash)
if e := m.fetched[s]; e != nil {
m.deliverEntry(e)
delete(m.fetched, s)
}
if e := m.waiting[s]; e != nil {
m.deliverEntry(e)
delete(m.waiting, s)
}
}
// clean expired waiting trxs if necessary
m.cleanExpiredWaiting()
// shrink pool memory if necessary
m.shrinkPoolMemories()
timing.Mark()
m.fetchedLock.Unlock()
m.waitingLock.Unlock()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockApplied(b)
})
timing.End()
m.log.Debugf("TRXMGR: BlockApplied end %d: #tx=%d, %s", b.SignedHeader.Number(), len(b.Transactions), timing.String())
m.log.Debugf("TRXMGR: auth-hit=%v", m.auth.HitRate())
}
// BlockCommitted *MUST* be called *AFTER* a block was successfully committed.
func (m *TrxMgr) BlockCommitted(blockNum uint64) {
m.log.Debugf("TRXMGR: BlockCommitted begin %d", blockNum)
t0 := common.EasyTimer()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockCommitted(blockNum)
})
m.log.Debugf("TRXMGR: BlockCommitted end %d: %v", blockNum, t0)
}
// BlockReverted *MUST* be called *AFTER* a block was successfully reverted.
func (m *TrxMgr) BlockReverted(blockNum uint64) {
m.log.Debugf("TRXMGR: BlockReverted begin %d", blockNum)
t0 := common.EasyTimer()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockReverted(blockNum)
})
m.log.Debugf("TRXMGR: BlockReverted end %d: %v", blockNum, t0)
}
// addToWaiting adds given transaction entries to the waiting pool, and returns the actual number added.
func (m *TrxMgr) addToWaiting(entries...*TrxEntry) (count int) {
// clean expired waiting trxs if necessary
m.cleanExpiredWaiting()
for _, e := range entries {
// check the max waiting count limit
if len(m.waiting) > sMaxWaitingCount {
_ = e.SetError(errors.New("too many waiting trxs"))
m.deliverEntry(e)
continue
}
// check duplication
if m.isProcessingNoLock(e.result.SigTrx) != nil {
_ = e.SetError(errors.New("trx already in process"))
m.deliverEntry(e)
continue
}
m.waiting[e.trxId] = e
count++
}
atomic.AddUint64(&m.shrinkCounter, uint64(count))
return
}
// isProcessingTrx is a thread safe version of isProcessingNoLock.
func (m *TrxMgr) isProcessingTrx(trx *prototype.SignedTransaction) *TrxEntry {
m.waitingLock.RLock()
defer m.waitingLock.RUnlock()
m.fetchedLock.RLock()
defer m.fetchedLock.RUnlock()
return m.isProcessingNoLock(trx)
}
// isProcessingNoLock checks if given transaction is being processed by TrxMgr.
// It returns the transaction entry if given transaction is in the waiting pool or the fetched pool,
// otherwise, nil is returned.
func (m *TrxMgr) isProcessingNoLock(trx *prototype.SignedTransaction) *TrxEntry {
if trx == nil {
return nil
}
if trxId, err := trx.Id(); err == nil {
s := string(trxId.Hash)
if e := m.waiting[s]; e != nil {
return e
}
return m.fetched[s]
} else {
return nil
}
}
// checkTrx does state-dependent checks on given transaction.
func (m *TrxMgr) checkTrx(e *TrxEntry, blockTime uint32, checkTapos bool) (err error) {
if err = e.CheckExpiration(blockTime); err != nil {
return err
}
if checkTapos {
if err = e.CheckTapos(m.tapos); err != nil {
return err
}
}
if err = e.CheckSignerKey(m.auth); err != nil {
return err
}
if err = e.CheckInBlockTrxs(m.history); err != nil {
return err
}
return
}
// deliverEntry delivers given transaction asynchronously.
func (m *TrxMgr) deliverEntry(e *TrxEntry) {
go func() {
e.Deliver()
}()
}
// callPlugins is a helper method that calls given functor with each plugin as its argument.
func (m *TrxMgr) callPlugins(f func(plugin ITrxMgrPlugin)) {
var wg sync.WaitGroup
wg.Add(len(m.plugins))
for i := range m.plugins {
go func(idx int) {
defer wg.Done()
f(m.plugins[idx])
}(i)
}
wg.Wait()
}
func (m *TrxMgr) DiscardAccountCache(name string) {
m.auth.Discard(name)
}
//
// clean expired transactions from waiting pool if waiting pool is large enough.
//
// We need a cleaning procedure, especially for non-producer nodes.
// A non-producer node checks each block it applied and removes in-block transactions from the waiting pool.
// Without waiting pool cleaning, erroneous transactions will remain in the pool forever because they will never
// be packed into blocks. This can eventually fill up the waiting pool, leading to huge memory consumption and
// DoS for new transactions.
//
func (m *TrxMgr) cleanExpiredWaiting() {
// when the waiting pool is small, we don't need cleaning
if len(m.waiting) < sWaitingCountWaterMark {
return
}
// we avoid frequent cleaning
if headBlockTime := atomic.LoadUint32(&m.headTime); headBlockTime > 0 && time.Since(m.lastCleanTime) > sMinCleanupInterval {
m.lastCleanTime = time.Now()
for k, e := range m.waiting {
if err := e.CheckExpiration(headBlockTime); err != nil {
m.deliverEntry(e)
}
delete(m.waiting, k)
}
}
}
// delete(map, key) won't release any memory occupied by a map.
// so we need to re-copy our pools from time to time, otherwise they're eating memory slowly but forever.
func (m *TrxMgr) shrinkPoolMemories() {
if atomic.LoadUint64(&m.shrinkCounter) > sShrinkCountWaterMark {
atomic.StoreUint64(&m.shrinkCounter, 0)
waiting, fetched := make(map[string]*TrxEntry), make(map[string]*TrxEntry)
for k, e := range m.waiting {
waiting[k] = e
}
for k, e := range m.fetched {
fetched[k] = e
}
m.waiting, m.fetched = waiting, fetched
}
} | // if we have met this transaction before, skip initial check and fill up extra information.
// this voids doing the expensive public key recovery again. | random_line_split |
trx_mgr.go | package app
import (
"errors"
"fmt"
"github.com/coschain/contentos-go/common"
"github.com/coschain/contentos-go/common/constants"
"github.com/coschain/contentos-go/iservices"
"github.com/coschain/contentos-go/prototype"
"github.com/gogo/protobuf/proto"
"github.com/sirupsen/logrus"
"sync"
"sync/atomic"
"time"
)
// TrxCallback is the type of callback function reporting transaction process results.
type TrxCallback func(result *prototype.TransactionWrapperWithInfo)
// TrxEntry is a wrapper of a transaction with extra information.
type TrxEntry struct {
chainId prototype.ChainId // id of block chain to which the transaction is sent
result *prototype.TransactionWrapperWithInfo // process result involving the transaction
trxId string // transaction id
size int // transaction size
signer string // requested account to sign the transaction
signerKey *prototype.PublicKeyType // the actual public key which signed the transaction
callback TrxCallback // callback function
}
// NewTrxMgrEntry creates an instance of TrxEntry.
func NewTrxMgrEntry(chainId prototype.ChainId, trx *prototype.SignedTransaction, callback TrxCallback) *TrxEntry {
return &TrxEntry{
chainId: chainId,
result: &prototype.TransactionWrapperWithInfo{
SigTrx: trx,
Receipt: &prototype.TransactionReceiptWithInfo{Status: prototype.StatusSuccess},
},
callback: callback,
}
}
// SetError sets the entry's result as given error, and returns the error.
func (e *TrxEntry) SetError(err error) error {
e.result.Receipt.Status = prototype.StatusError
e.result.Receipt.ErrorInfo = err.Error()
return err
}
// Deliver calls entry's callback function.
func (e *TrxEntry) Deliver() {
if e.callback != nil {
e.callback(e.result)
}
}
// InitCheck fills extra information of the entry, and do a basic validation check.
// Note that InitCheck is independent from chain state. We should do it only once for each transaction.
func (e *TrxEntry) InitCheck() error {
trx := e.result.SigTrx
// basic check
if err := trx.Validate(); err != nil {
return e.SetError(err)
}
if trxId, err := trx.Id(); err != nil {
return e.SetError(err)
} else {
e.trxId = string(trxId.Hash)
}
// transaction size limit check
e.size = proto.Size(trx)
if e.size > constants.MaxTransactionSize {
return e.SetError(fmt.Errorf("trx too large, size = %d > %d", e.size, constants.MaxTransactionSize))
}
// get the signer account name
creator := ""
if creators := trx.GetOpCreatorsMap(); len(creators) != 1 {
return e.SetError(fmt.Errorf("non-unique trx creators, found %d", len(creators)))
} else {
for creator = range creators {
break
}
}
e.signer = creator
// recover the signing public key from signature
if signKey, err := trx.ExportPubKeys(e.chainId); err != nil {
return e.SetError(fmt.Errorf("cannot export signing key: %s", err.Error()))
} else {
e.signerKey = signKey
}
return nil
}
// CheckExpiration checks if the transaction is valid based on its expiration.
func (e *TrxEntry) CheckExpiration(blockTime uint32) error {
expiration := e.result.SigTrx.GetTrx().GetExpiration().GetUtcSeconds()
if expiration < blockTime {
return e.SetError(fmt.Errorf("trx expired, %d < %d", expiration, blockTime))
}
if expiration > blockTime + constants.TrxMaxExpirationTime {
return e.SetError(fmt.Errorf("trx expiration too long, %d > %d + %d", expiration, blockTime, constants.TrxMaxExpirationTime))
}
return nil
}
// CheckTapos checks if the transaction is valid based on its tapos information.
func (e *TrxEntry) CheckTapos(checker *TaposChecker) error {
if err := checker.Check(e.result.SigTrx.Trx); err != nil {
return e.SetError(fmt.Errorf("tapos failed: %s", err.Error()))
}
return nil
}
// CheckSignerKey checks if the transaction is signed by correct public key.
func (e *TrxEntry) CheckSignerKey(fetcher *AuthFetcher) error {
if err := fetcher.CheckPublicKey(e.signer, e.signerKey); err != nil {
return e.SetError(fmt.Errorf("signature failed: %s", err.Error()))
}
return nil
}
// CheckInBlockTrxs checks if the transaction is a duplicate of any old transaction.
func (e *TrxEntry) CheckInBlockTrxs(checker *InBlockTrxChecker) error {
if checker.Has(e.result.SigTrx) {
return e.SetError(errors.New("found duplicate in-block trx"))
}
return nil
}
func (e *TrxEntry) GetTrxResult() *prototype.TransactionWrapperWithInfo {
return e.result
}
func (e *TrxEntry) GetTrxSize() int {
return e.size
}
func (e *TrxEntry) GetTrxSigner() string {
return e.signer
}
func (e *TrxEntry) GetTrxSigningKey() *prototype.PublicKeyType {
return e.signerKey
}
const (
// maximum count of transactions that are waiting to be packed to blocks.
// if this limit is reached, any incoming transaction will be refused directly.
sMaxWaitingCount = constants.TrxMaxExpirationTime * 2000
// threshold over which cleanings are necessary
sWaitingCountWaterMark = sMaxWaitingCount / 10
// minimal interval between cleanings
sMinCleanupInterval = 10 * time.Second
// shrink the waiting/fetched pools every 100K transactions
sShrinkCountWaterMark = 100000
)
// ITrxMgrPlugin is an interface of manager plugins.
type ITrxMgrPlugin interface {
BlockApplied(b *prototype.SignedBlock) // called once after a block is successfully applied.
BlockReverted(blockNum uint64) // called once after a block is successfully reverted.
BlockCommitted(blockNum uint64) // called once after a block is successfully committed.
}
// The transaction manager.
type TrxMgr struct {
chainId prototype.ChainId // the chain
db iservices.IDatabaseRW // the database
log *logrus.Logger // the logger
headTime uint32 // timestamp of head block, in seconds
waiting map[string]*TrxEntry // transactions waiting to be packed to blocks, trxId -> entry
waitingLock sync.RWMutex // lock of waiting transactions
fetched map[string]*TrxEntry // transactions being packed to a block, trxId -> entry
fetchedLock sync.RWMutex // lock of fetched transactions
auth *AuthFetcher // checker of transaction signatures
tapos *TaposChecker // checker of transaction tapos
history *InBlockTrxChecker // checker of transaction duplication
plugins []ITrxMgrPlugin // manager plugins, consisting of above checkers
lastCleanTime time.Time // last time we clean up expired waiting transactions
shrinkCounter uint64 // a counter to determine when to shrink pools
}
// NewTrxMgr creates an instance of TrxMgr.
func NewTrxMgr(chainId prototype.ChainId, db iservices.IDatabaseRW, logger *logrus.Logger, lastBlock, commitBlock uint64) *TrxMgr {
auth := NewAuthFetcher(db, logger, lastBlock, commitBlock)
tapos := NewTaposChecker(db, logger, lastBlock)
history := NewInBlockTrxChecker(db, logger, lastBlock)
return &TrxMgr{
chainId: chainId,
db: db,
log: logger,
headTime: (&DynamicGlobalPropsRW{db:db}).GetProps().GetTime().GetUtcSeconds(),
waiting: make(map[string]*TrxEntry),
fetched: make(map[string]*TrxEntry),
auth: auth,
tapos: tapos,
history: history,
plugins: []ITrxMgrPlugin{ auth, tapos, history },
lastCleanTime: time.Now(),
}
}
// AddTrx processes an incoming transaction.
// AddTrx returns nil if the incoming transaction is accepted, otherwise an error is returned.
// If a non-nil callback is given, it will be called once asynchronously with the final process result.
func (m *TrxMgr) AddTrx(trx *prototype.SignedTransaction, callback TrxCallback) error {
entry := NewTrxMgrEntry(m.chainId, trx, callback)
// very basic nil pointer check
if trx == nil || trx.Signature == nil {
err := entry.SetError(errors.New("invalid trx"))
m.deliverEntry(entry)
return err
}
// very basic duplication check
if m.isProcessingTrx(trx) != nil {
err := entry.SetError(errors.New("trx already in process"))
m.deliverEntry(entry)
return err
}
c := make(chan error)
go func() {
ok := false
// check the transaction
if entry.InitCheck() != nil || m.checkTrx(entry, atomic.LoadUint32(&m.headTime), false) != nil {
// deliver if failed
m.deliverEntry(entry)
} else {
// if passed, try adding it to the waiting pool
m.waitingLock.Lock()
m.fetchedLock.RLock()
ok = m.addToWaiting(entry) > 0
m.fetchedLock.RUnlock()
m.waitingLock.Unlock()
}
if !ok {
c <- errors.New(entry.result.Receipt.ErrorInfo)
} else {
c <- nil
}
}()
return <-c
}
// WaitingCount returns number of transactions that are waiting to be packed to blocks.
func (m *TrxMgr) | () int {
m.waitingLock.RLock()
defer m.waitingLock.RUnlock()
return len(m.waiting)
}
// FetchTrx fetches a batch of transactions from waiting pool.
// Block producer should call FetchTrx to collect transactions of new blocks.
func (m *TrxMgr) FetchTrx(blockTime uint32, maxCount, maxSize int) (entries []*TrxEntry) {
m.waitingLock.Lock()
defer m.waitingLock.Unlock()
m.fetchedLock.Lock()
defer m.fetchedLock.Unlock()
counter, size := 0, 0
// traverse the waiting pool
for s, e := range m.waiting {
// check count limit
if maxCount > 0 && counter >= maxCount {
break
}
// check size limit
if maxSize > 0 && size >= maxSize {
break
}
// check the transaction again
// although transactions in the waiting pool had passed checks when they entered,
// but chain state is keep changing, we have to redo state-dependent checks.
if err := m.checkTrx(e, blockTime, true); err != nil {
// if failed, deliver the transaction.
m.log.Debugf("TRXMGR: FetchTrx check failed: %v, trxId=%x", err, []byte(e.trxId))
m.deliverEntry(e)
} else {
// if passed, pick it
entries = append(entries, e)
// add it to the fetched pool
m.fetched[s] = e
counter++
size += e.size
}
// remove from waiting pool
delete(m.waiting, s)
}
return
}
// ReturnTrx notifies that some previously fetched transactions can't be packed into a block due to errors.
// Block producer should call ReturnTrx for transactions that failed being applied.
func (m *TrxMgr) ReturnTrx(entries ...*TrxEntry) {
m.log.Debug("TRXMGR: ReturnTrx begin")
timing := common.NewTiming()
timing.Begin()
m.fetchedLock.Lock()
defer m.fetchedLock.Unlock()
timing.Mark()
for _, e := range entries {
// any returning transaction should be previously fetched
f := m.fetched[e.trxId]
if f != nil {
m.deliverEntry(f)
delete(m.fetched, e.trxId)
}
}
timing.End()
m.log.Debugf("TRXMGR: ReturnTrx end: #tx=%d, %s", len(entries), timing.String())
}
// CheckBlockTrxs checks if transactions of a block are valid.
// If everything is ok, CheckBlockTrxs returns a TrxEntry slice for transactions and nil error, otherwise, a nil slice
// and an error is returned.
func (m *TrxMgr) CheckBlockTrxs(b *prototype.SignedBlock) (entries []*TrxEntry, err error) {
m.log.Debugf("TRXMGR: CheckBlockTrxs begin %d", b.SignedHeader.Number())
t0 := common.EasyTimer()
if count := len(b.Transactions); count > 0 {
blockTime := b.SignedHeader.Header.Timestamp.UtcSeconds
errs := make([]error, count)
entries = make([]*TrxEntry, count)
errIdx := int32(-1)
var wg sync.WaitGroup
wg.Add(count)
// check transactions asynchronously
for i := 0; i < count; i++ {
go func(idx int) {
defer wg.Done()
var err error
trx := b.Transactions[idx].SigTrx
e := NewTrxMgrEntry(m.chainId, trx, nil)
// do we need the initial check?
// yes for transactions that we never met, otherwise no.
needInitCheck := true
// if we have met this transaction before, skip initial check and fill up extra information.
// this voids doing the expensive public key recovery again.
if ptrx := m.isProcessingTrx(trx); ptrx != nil {
needInitCheck = false
e.trxId = ptrx.trxId
e.size = ptrx.size
e.signer = ptrx.signer
e.signerKey = ptrx.signerKey
}
// do initial check if necessary
if needInitCheck {
err = e.InitCheck()
}
// do state-dependent checks
if err == nil {
err = m.checkTrx(e, blockTime, true)
}
// finalization works
if err != nil {
errs[idx] = err
// remember the first error we met
atomic.CompareAndSwapInt32(&errIdx, -1, int32(idx))
} else {
entries[idx] = e
}
}(i)
}
wg.Wait()
if errIdx >= 0 {
entries = nil
err = fmt.Errorf("block %d trxs[%d] check failed: %s", b.SignedHeader.Number(), errIdx, errs[errIdx].Error())
}
// check duplicate transactions inside the block.
// it's a must to prevent malicious block producers from replay attacking.
// m.history won't help here coz it updates in block level instead of transaction level.
trxSigs, dupTrx := make(map[string]bool), -1
for idx, e := range entries {
if trxSigs[e.trxId] {
dupTrx = idx
break
}
trxSigs[e.trxId] = true
}
if dupTrx >= 0 {
entries = nil
err = fmt.Errorf("block %d trxs[%d] duplicates", b.SignedHeader.Number(), dupTrx)
}
}
m.log.Debugf("TRXMGR: CheckBlockTrxs end %d: #tx=%d, %v", b.SignedHeader.Number(), len(b.Transactions), t0)
return
}
// BlockApplied *MUST* be called *AFTER* a block was successfully applied.
func (m *TrxMgr) BlockApplied(b *prototype.SignedBlock) {
m.log.Debugf("TRXMGR: BlockApplied begin %d", b.SignedHeader.Number())
timing := common.NewTiming()
timing.Begin()
// update head block time
atomic.StoreUint32(&m.headTime, b.SignedHeader.Header.Timestamp.UtcSeconds)
// deliver transactions that are waiting final results
m.waitingLock.Lock()
m.fetchedLock.Lock()
timing.Mark()
for _, txw := range b.Transactions {
trxId, _ := txw.SigTrx.Id()
s := string(trxId.Hash)
if e := m.fetched[s]; e != nil {
m.deliverEntry(e)
delete(m.fetched, s)
}
if e := m.waiting[s]; e != nil {
m.deliverEntry(e)
delete(m.waiting, s)
}
}
// clean expired waiting trxs if necessary
m.cleanExpiredWaiting()
// shrink pool memory if necessary
m.shrinkPoolMemories()
timing.Mark()
m.fetchedLock.Unlock()
m.waitingLock.Unlock()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockApplied(b)
})
timing.End()
m.log.Debugf("TRXMGR: BlockApplied end %d: #tx=%d, %s", b.SignedHeader.Number(), len(b.Transactions), timing.String())
m.log.Debugf("TRXMGR: auth-hit=%v", m.auth.HitRate())
}
// BlockCommitted *MUST* be called *AFTER* a block was successfully committed.
func (m *TrxMgr) BlockCommitted(blockNum uint64) {
m.log.Debugf("TRXMGR: BlockCommitted begin %d", blockNum)
t0 := common.EasyTimer()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockCommitted(blockNum)
})
m.log.Debugf("TRXMGR: BlockCommitted end %d: %v", blockNum, t0)
}
// BlockReverted *MUST* be called *AFTER* a block was successfully reverted.
func (m *TrxMgr) BlockReverted(blockNum uint64) {
m.log.Debugf("TRXMGR: BlockReverted begin %d", blockNum)
t0 := common.EasyTimer()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockReverted(blockNum)
})
m.log.Debugf("TRXMGR: BlockReverted end %d: %v", blockNum, t0)
}
// addToWaiting adds given transaction entries to the waiting pool, and returns the actual number added.
func (m *TrxMgr) addToWaiting(entries...*TrxEntry) (count int) {
// clean expired waiting trxs if necessary
m.cleanExpiredWaiting()
for _, e := range entries {
// check the max waiting count limit
if len(m.waiting) > sMaxWaitingCount {
_ = e.SetError(errors.New("too many waiting trxs"))
m.deliverEntry(e)
continue
}
// check duplication
if m.isProcessingNoLock(e.result.SigTrx) != nil {
_ = e.SetError(errors.New("trx already in process"))
m.deliverEntry(e)
continue
}
m.waiting[e.trxId] = e
count++
}
atomic.AddUint64(&m.shrinkCounter, uint64(count))
return
}
// isProcessingTrx is a thread safe version of isProcessingNoLock.
func (m *TrxMgr) isProcessingTrx(trx *prototype.SignedTransaction) *TrxEntry {
m.waitingLock.RLock()
defer m.waitingLock.RUnlock()
m.fetchedLock.RLock()
defer m.fetchedLock.RUnlock()
return m.isProcessingNoLock(trx)
}
// isProcessingNoLock checks if given transaction is being processed by TrxMgr.
// It returns the transaction entry if given transaction is in the waiting pool or the fetched pool,
// otherwise, nil is returned.
func (m *TrxMgr) isProcessingNoLock(trx *prototype.SignedTransaction) *TrxEntry {
if trx == nil {
return nil
}
if trxId, err := trx.Id(); err == nil {
s := string(trxId.Hash)
if e := m.waiting[s]; e != nil {
return e
}
return m.fetched[s]
} else {
return nil
}
}
// checkTrx does state-dependent checks on given transaction.
func (m *TrxMgr) checkTrx(e *TrxEntry, blockTime uint32, checkTapos bool) (err error) {
if err = e.CheckExpiration(blockTime); err != nil {
return err
}
if checkTapos {
if err = e.CheckTapos(m.tapos); err != nil {
return err
}
}
if err = e.CheckSignerKey(m.auth); err != nil {
return err
}
if err = e.CheckInBlockTrxs(m.history); err != nil {
return err
}
return
}
// deliverEntry delivers given transaction asynchronously.
func (m *TrxMgr) deliverEntry(e *TrxEntry) {
go func() {
e.Deliver()
}()
}
// callPlugins is a helper method that calls given functor with each plugin as its argument.
func (m *TrxMgr) callPlugins(f func(plugin ITrxMgrPlugin)) {
var wg sync.WaitGroup
wg.Add(len(m.plugins))
for i := range m.plugins {
go func(idx int) {
defer wg.Done()
f(m.plugins[idx])
}(i)
}
wg.Wait()
}
func (m *TrxMgr) DiscardAccountCache(name string) {
m.auth.Discard(name)
}
//
// clean expired transactions from waiting pool if waiting pool is large enough.
//
// We need a cleaning procedure, especially for non-producer nodes.
// A non-producer node checks each block it applied and removes in-block transactions from the waiting pool.
// Without waiting pool cleaning, erroneous transactions will remain in the pool forever because they will never
// be packed into blocks. This can eventually fill up the waiting pool, leading to huge memory consumption and
// DoS for new transactions.
//
func (m *TrxMgr) cleanExpiredWaiting() {
// when the waiting pool is small, we don't need cleaning
if len(m.waiting) < sWaitingCountWaterMark {
return
}
// we avoid frequent cleaning
if headBlockTime := atomic.LoadUint32(&m.headTime); headBlockTime > 0 && time.Since(m.lastCleanTime) > sMinCleanupInterval {
m.lastCleanTime = time.Now()
for k, e := range m.waiting {
if err := e.CheckExpiration(headBlockTime); err != nil {
m.deliverEntry(e)
}
delete(m.waiting, k)
}
}
}
// delete(map, key) won't release any memory occupied by a map.
// so we need to re-copy our pools from time to time, otherwise they're eating memory slowly but forever.
func (m *TrxMgr) shrinkPoolMemories() {
if atomic.LoadUint64(&m.shrinkCounter) > sShrinkCountWaterMark {
atomic.StoreUint64(&m.shrinkCounter, 0)
waiting, fetched := make(map[string]*TrxEntry), make(map[string]*TrxEntry)
for k, e := range m.waiting {
waiting[k] = e
}
for k, e := range m.fetched {
fetched[k] = e
}
m.waiting, m.fetched = waiting, fetched
}
}
| WaitingCount | identifier_name |
trx_mgr.go | package app
import (
"errors"
"fmt"
"github.com/coschain/contentos-go/common"
"github.com/coschain/contentos-go/common/constants"
"github.com/coschain/contentos-go/iservices"
"github.com/coschain/contentos-go/prototype"
"github.com/gogo/protobuf/proto"
"github.com/sirupsen/logrus"
"sync"
"sync/atomic"
"time"
)
// TrxCallback is the type of callback function reporting transaction process results.
type TrxCallback func(result *prototype.TransactionWrapperWithInfo)
// TrxEntry is a wrapper of a transaction with extra information.
type TrxEntry struct {
chainId prototype.ChainId // id of block chain to which the transaction is sent
result *prototype.TransactionWrapperWithInfo // process result involving the transaction
trxId string // transaction id
size int // transaction size
signer string // requested account to sign the transaction
signerKey *prototype.PublicKeyType // the actual public key which signed the transaction
callback TrxCallback // callback function
}
// NewTrxMgrEntry creates an instance of TrxEntry.
func NewTrxMgrEntry(chainId prototype.ChainId, trx *prototype.SignedTransaction, callback TrxCallback) *TrxEntry {
return &TrxEntry{
chainId: chainId,
result: &prototype.TransactionWrapperWithInfo{
SigTrx: trx,
Receipt: &prototype.TransactionReceiptWithInfo{Status: prototype.StatusSuccess},
},
callback: callback,
}
}
// SetError sets the entry's result as given error, and returns the error.
func (e *TrxEntry) SetError(err error) error |
// Deliver calls entry's callback function.
func (e *TrxEntry) Deliver() {
if e.callback != nil {
e.callback(e.result)
}
}
// InitCheck fills extra information of the entry, and do a basic validation check.
// Note that InitCheck is independent from chain state. We should do it only once for each transaction.
func (e *TrxEntry) InitCheck() error {
trx := e.result.SigTrx
// basic check
if err := trx.Validate(); err != nil {
return e.SetError(err)
}
if trxId, err := trx.Id(); err != nil {
return e.SetError(err)
} else {
e.trxId = string(trxId.Hash)
}
// transaction size limit check
e.size = proto.Size(trx)
if e.size > constants.MaxTransactionSize {
return e.SetError(fmt.Errorf("trx too large, size = %d > %d", e.size, constants.MaxTransactionSize))
}
// get the signer account name
creator := ""
if creators := trx.GetOpCreatorsMap(); len(creators) != 1 {
return e.SetError(fmt.Errorf("non-unique trx creators, found %d", len(creators)))
} else {
for creator = range creators {
break
}
}
e.signer = creator
// recover the signing public key from signature
if signKey, err := trx.ExportPubKeys(e.chainId); err != nil {
return e.SetError(fmt.Errorf("cannot export signing key: %s", err.Error()))
} else {
e.signerKey = signKey
}
return nil
}
// CheckExpiration checks if the transaction is valid based on its expiration.
func (e *TrxEntry) CheckExpiration(blockTime uint32) error {
expiration := e.result.SigTrx.GetTrx().GetExpiration().GetUtcSeconds()
if expiration < blockTime {
return e.SetError(fmt.Errorf("trx expired, %d < %d", expiration, blockTime))
}
if expiration > blockTime + constants.TrxMaxExpirationTime {
return e.SetError(fmt.Errorf("trx expiration too long, %d > %d + %d", expiration, blockTime, constants.TrxMaxExpirationTime))
}
return nil
}
// CheckTapos checks if the transaction is valid based on its tapos information.
func (e *TrxEntry) CheckTapos(checker *TaposChecker) error {
if err := checker.Check(e.result.SigTrx.Trx); err != nil {
return e.SetError(fmt.Errorf("tapos failed: %s", err.Error()))
}
return nil
}
// CheckSignerKey checks if the transaction is signed by correct public key.
func (e *TrxEntry) CheckSignerKey(fetcher *AuthFetcher) error {
if err := fetcher.CheckPublicKey(e.signer, e.signerKey); err != nil {
return e.SetError(fmt.Errorf("signature failed: %s", err.Error()))
}
return nil
}
// CheckInBlockTrxs checks if the transaction is a duplicate of any old transaction.
func (e *TrxEntry) CheckInBlockTrxs(checker *InBlockTrxChecker) error {
if checker.Has(e.result.SigTrx) {
return e.SetError(errors.New("found duplicate in-block trx"))
}
return nil
}
func (e *TrxEntry) GetTrxResult() *prototype.TransactionWrapperWithInfo {
return e.result
}
func (e *TrxEntry) GetTrxSize() int {
return e.size
}
func (e *TrxEntry) GetTrxSigner() string {
return e.signer
}
func (e *TrxEntry) GetTrxSigningKey() *prototype.PublicKeyType {
return e.signerKey
}
const (
// maximum count of transactions that are waiting to be packed to blocks.
// if this limit is reached, any incoming transaction will be refused directly.
sMaxWaitingCount = constants.TrxMaxExpirationTime * 2000
// threshold over which cleanings are necessary
sWaitingCountWaterMark = sMaxWaitingCount / 10
// minimal interval between cleanings
sMinCleanupInterval = 10 * time.Second
// shrink the waiting/fetched pools every 100K transactions
sShrinkCountWaterMark = 100000
)
// ITrxMgrPlugin is an interface of manager plugins.
type ITrxMgrPlugin interface {
BlockApplied(b *prototype.SignedBlock) // called once after a block is successfully applied.
BlockReverted(blockNum uint64) // called once after a block is successfully reverted.
BlockCommitted(blockNum uint64) // called once after a block is successfully committed.
}
// The transaction manager.
type TrxMgr struct {
chainId prototype.ChainId // the chain
db iservices.IDatabaseRW // the database
log *logrus.Logger // the logger
headTime uint32 // timestamp of head block, in seconds
waiting map[string]*TrxEntry // transactions waiting to be packed to blocks, trxId -> entry
waitingLock sync.RWMutex // lock of waiting transactions
fetched map[string]*TrxEntry // transactions being packed to a block, trxId -> entry
fetchedLock sync.RWMutex // lock of fetched transactions
auth *AuthFetcher // checker of transaction signatures
tapos *TaposChecker // checker of transaction tapos
history *InBlockTrxChecker // checker of transaction duplication
plugins []ITrxMgrPlugin // manager plugins, consisting of above checkers
lastCleanTime time.Time // last time we clean up expired waiting transactions
shrinkCounter uint64 // a counter to determine when to shrink pools
}
// NewTrxMgr creates an instance of TrxMgr.
func NewTrxMgr(chainId prototype.ChainId, db iservices.IDatabaseRW, logger *logrus.Logger, lastBlock, commitBlock uint64) *TrxMgr {
auth := NewAuthFetcher(db, logger, lastBlock, commitBlock)
tapos := NewTaposChecker(db, logger, lastBlock)
history := NewInBlockTrxChecker(db, logger, lastBlock)
return &TrxMgr{
chainId: chainId,
db: db,
log: logger,
headTime: (&DynamicGlobalPropsRW{db:db}).GetProps().GetTime().GetUtcSeconds(),
waiting: make(map[string]*TrxEntry),
fetched: make(map[string]*TrxEntry),
auth: auth,
tapos: tapos,
history: history,
plugins: []ITrxMgrPlugin{ auth, tapos, history },
lastCleanTime: time.Now(),
}
}
// AddTrx processes an incoming transaction.
// AddTrx returns nil if the incoming transaction is accepted, otherwise an error is returned.
// If a non-nil callback is given, it will be called once asynchronously with the final process result.
func (m *TrxMgr) AddTrx(trx *prototype.SignedTransaction, callback TrxCallback) error {
entry := NewTrxMgrEntry(m.chainId, trx, callback)
// very basic nil pointer check
if trx == nil || trx.Signature == nil {
err := entry.SetError(errors.New("invalid trx"))
m.deliverEntry(entry)
return err
}
// very basic duplication check
if m.isProcessingTrx(trx) != nil {
err := entry.SetError(errors.New("trx already in process"))
m.deliverEntry(entry)
return err
}
c := make(chan error)
go func() {
ok := false
// check the transaction
if entry.InitCheck() != nil || m.checkTrx(entry, atomic.LoadUint32(&m.headTime), false) != nil {
// deliver if failed
m.deliverEntry(entry)
} else {
// if passed, try adding it to the waiting pool
m.waitingLock.Lock()
m.fetchedLock.RLock()
ok = m.addToWaiting(entry) > 0
m.fetchedLock.RUnlock()
m.waitingLock.Unlock()
}
if !ok {
c <- errors.New(entry.result.Receipt.ErrorInfo)
} else {
c <- nil
}
}()
return <-c
}
// WaitingCount returns number of transactions that are waiting to be packed to blocks.
func (m *TrxMgr) WaitingCount() int {
m.waitingLock.RLock()
defer m.waitingLock.RUnlock()
return len(m.waiting)
}
// FetchTrx fetches a batch of transactions from waiting pool.
// Block producer should call FetchTrx to collect transactions of new blocks.
func (m *TrxMgr) FetchTrx(blockTime uint32, maxCount, maxSize int) (entries []*TrxEntry) {
m.waitingLock.Lock()
defer m.waitingLock.Unlock()
m.fetchedLock.Lock()
defer m.fetchedLock.Unlock()
counter, size := 0, 0
// traverse the waiting pool
for s, e := range m.waiting {
// check count limit
if maxCount > 0 && counter >= maxCount {
break
}
// check size limit
if maxSize > 0 && size >= maxSize {
break
}
// check the transaction again
// although transactions in the waiting pool had passed checks when they entered,
// but chain state is keep changing, we have to redo state-dependent checks.
if err := m.checkTrx(e, blockTime, true); err != nil {
// if failed, deliver the transaction.
m.log.Debugf("TRXMGR: FetchTrx check failed: %v, trxId=%x", err, []byte(e.trxId))
m.deliverEntry(e)
} else {
// if passed, pick it
entries = append(entries, e)
// add it to the fetched pool
m.fetched[s] = e
counter++
size += e.size
}
// remove from waiting pool
delete(m.waiting, s)
}
return
}
// ReturnTrx notifies that some previously fetched transactions can't be packed into a block due to errors.
// Block producer should call ReturnTrx for transactions that failed being applied.
func (m *TrxMgr) ReturnTrx(entries ...*TrxEntry) {
m.log.Debug("TRXMGR: ReturnTrx begin")
timing := common.NewTiming()
timing.Begin()
m.fetchedLock.Lock()
defer m.fetchedLock.Unlock()
timing.Mark()
for _, e := range entries {
// any returning transaction should be previously fetched
f := m.fetched[e.trxId]
if f != nil {
m.deliverEntry(f)
delete(m.fetched, e.trxId)
}
}
timing.End()
m.log.Debugf("TRXMGR: ReturnTrx end: #tx=%d, %s", len(entries), timing.String())
}
// CheckBlockTrxs checks if transactions of a block are valid.
// If everything is ok, CheckBlockTrxs returns a TrxEntry slice for transactions and nil error, otherwise, a nil slice
// and an error is returned.
func (m *TrxMgr) CheckBlockTrxs(b *prototype.SignedBlock) (entries []*TrxEntry, err error) {
m.log.Debugf("TRXMGR: CheckBlockTrxs begin %d", b.SignedHeader.Number())
t0 := common.EasyTimer()
if count := len(b.Transactions); count > 0 {
blockTime := b.SignedHeader.Header.Timestamp.UtcSeconds
errs := make([]error, count)
entries = make([]*TrxEntry, count)
errIdx := int32(-1)
var wg sync.WaitGroup
wg.Add(count)
// check transactions asynchronously
for i := 0; i < count; i++ {
go func(idx int) {
defer wg.Done()
var err error
trx := b.Transactions[idx].SigTrx
e := NewTrxMgrEntry(m.chainId, trx, nil)
// do we need the initial check?
// yes for transactions that we never met, otherwise no.
needInitCheck := true
// if we have met this transaction before, skip initial check and fill up extra information.
// this voids doing the expensive public key recovery again.
if ptrx := m.isProcessingTrx(trx); ptrx != nil {
needInitCheck = false
e.trxId = ptrx.trxId
e.size = ptrx.size
e.signer = ptrx.signer
e.signerKey = ptrx.signerKey
}
// do initial check if necessary
if needInitCheck {
err = e.InitCheck()
}
// do state-dependent checks
if err == nil {
err = m.checkTrx(e, blockTime, true)
}
// finalization works
if err != nil {
errs[idx] = err
// remember the first error we met
atomic.CompareAndSwapInt32(&errIdx, -1, int32(idx))
} else {
entries[idx] = e
}
}(i)
}
wg.Wait()
if errIdx >= 0 {
entries = nil
err = fmt.Errorf("block %d trxs[%d] check failed: %s", b.SignedHeader.Number(), errIdx, errs[errIdx].Error())
}
// check duplicate transactions inside the block.
// it's a must to prevent malicious block producers from replay attacking.
// m.history won't help here coz it updates in block level instead of transaction level.
trxSigs, dupTrx := make(map[string]bool), -1
for idx, e := range entries {
if trxSigs[e.trxId] {
dupTrx = idx
break
}
trxSigs[e.trxId] = true
}
if dupTrx >= 0 {
entries = nil
err = fmt.Errorf("block %d trxs[%d] duplicates", b.SignedHeader.Number(), dupTrx)
}
}
m.log.Debugf("TRXMGR: CheckBlockTrxs end %d: #tx=%d, %v", b.SignedHeader.Number(), len(b.Transactions), t0)
return
}
// BlockApplied *MUST* be called *AFTER* a block was successfully applied.
func (m *TrxMgr) BlockApplied(b *prototype.SignedBlock) {
m.log.Debugf("TRXMGR: BlockApplied begin %d", b.SignedHeader.Number())
timing := common.NewTiming()
timing.Begin()
// update head block time
atomic.StoreUint32(&m.headTime, b.SignedHeader.Header.Timestamp.UtcSeconds)
// deliver transactions that are waiting final results
m.waitingLock.Lock()
m.fetchedLock.Lock()
timing.Mark()
for _, txw := range b.Transactions {
trxId, _ := txw.SigTrx.Id()
s := string(trxId.Hash)
if e := m.fetched[s]; e != nil {
m.deliverEntry(e)
delete(m.fetched, s)
}
if e := m.waiting[s]; e != nil {
m.deliverEntry(e)
delete(m.waiting, s)
}
}
// clean expired waiting trxs if necessary
m.cleanExpiredWaiting()
// shrink pool memory if necessary
m.shrinkPoolMemories()
timing.Mark()
m.fetchedLock.Unlock()
m.waitingLock.Unlock()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockApplied(b)
})
timing.End()
m.log.Debugf("TRXMGR: BlockApplied end %d: #tx=%d, %s", b.SignedHeader.Number(), len(b.Transactions), timing.String())
m.log.Debugf("TRXMGR: auth-hit=%v", m.auth.HitRate())
}
// BlockCommitted *MUST* be called *AFTER* a block was successfully committed.
func (m *TrxMgr) BlockCommitted(blockNum uint64) {
m.log.Debugf("TRXMGR: BlockCommitted begin %d", blockNum)
t0 := common.EasyTimer()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockCommitted(blockNum)
})
m.log.Debugf("TRXMGR: BlockCommitted end %d: %v", blockNum, t0)
}
// BlockReverted *MUST* be called *AFTER* a block was successfully reverted.
func (m *TrxMgr) BlockReverted(blockNum uint64) {
m.log.Debugf("TRXMGR: BlockReverted begin %d", blockNum)
t0 := common.EasyTimer()
// plugin notifications
m.callPlugins(func(plugin ITrxMgrPlugin) {
plugin.BlockReverted(blockNum)
})
m.log.Debugf("TRXMGR: BlockReverted end %d: %v", blockNum, t0)
}
// addToWaiting adds given transaction entries to the waiting pool, and returns the actual number added.
func (m *TrxMgr) addToWaiting(entries...*TrxEntry) (count int) {
// clean expired waiting trxs if necessary
m.cleanExpiredWaiting()
for _, e := range entries {
// check the max waiting count limit
if len(m.waiting) > sMaxWaitingCount {
_ = e.SetError(errors.New("too many waiting trxs"))
m.deliverEntry(e)
continue
}
// check duplication
if m.isProcessingNoLock(e.result.SigTrx) != nil {
_ = e.SetError(errors.New("trx already in process"))
m.deliverEntry(e)
continue
}
m.waiting[e.trxId] = e
count++
}
atomic.AddUint64(&m.shrinkCounter, uint64(count))
return
}
// isProcessingTrx is a thread safe version of isProcessingNoLock.
func (m *TrxMgr) isProcessingTrx(trx *prototype.SignedTransaction) *TrxEntry {
m.waitingLock.RLock()
defer m.waitingLock.RUnlock()
m.fetchedLock.RLock()
defer m.fetchedLock.RUnlock()
return m.isProcessingNoLock(trx)
}
// isProcessingNoLock checks if given transaction is being processed by TrxMgr.
// It returns the transaction entry if given transaction is in the waiting pool or the fetched pool,
// otherwise, nil is returned.
func (m *TrxMgr) isProcessingNoLock(trx *prototype.SignedTransaction) *TrxEntry {
if trx == nil {
return nil
}
if trxId, err := trx.Id(); err == nil {
s := string(trxId.Hash)
if e := m.waiting[s]; e != nil {
return e
}
return m.fetched[s]
} else {
return nil
}
}
// checkTrx does state-dependent checks on given transaction.
func (m *TrxMgr) checkTrx(e *TrxEntry, blockTime uint32, checkTapos bool) (err error) {
if err = e.CheckExpiration(blockTime); err != nil {
return err
}
if checkTapos {
if err = e.CheckTapos(m.tapos); err != nil {
return err
}
}
if err = e.CheckSignerKey(m.auth); err != nil {
return err
}
if err = e.CheckInBlockTrxs(m.history); err != nil {
return err
}
return
}
// deliverEntry delivers given transaction asynchronously.
func (m *TrxMgr) deliverEntry(e *TrxEntry) {
go func() {
e.Deliver()
}()
}
// callPlugins is a helper method that calls given functor with each plugin as its argument.
func (m *TrxMgr) callPlugins(f func(plugin ITrxMgrPlugin)) {
var wg sync.WaitGroup
wg.Add(len(m.plugins))
for i := range m.plugins {
go func(idx int) {
defer wg.Done()
f(m.plugins[idx])
}(i)
}
wg.Wait()
}
func (m *TrxMgr) DiscardAccountCache(name string) {
m.auth.Discard(name)
}
//
// clean expired transactions from waiting pool if waiting pool is large enough.
//
// We need a cleaning procedure, especially for non-producer nodes.
// A non-producer node checks each block it applied and removes in-block transactions from the waiting pool.
// Without waiting pool cleaning, erroneous transactions will remain in the pool forever because they will never
// be packed into blocks. This can eventually fill up the waiting pool, leading to huge memory consumption and
// DoS for new transactions.
//
func (m *TrxMgr) cleanExpiredWaiting() {
// when the waiting pool is small, we don't need cleaning
if len(m.waiting) < sWaitingCountWaterMark {
return
}
// we avoid frequent cleaning
if headBlockTime := atomic.LoadUint32(&m.headTime); headBlockTime > 0 && time.Since(m.lastCleanTime) > sMinCleanupInterval {
m.lastCleanTime = time.Now()
for k, e := range m.waiting {
if err := e.CheckExpiration(headBlockTime); err != nil {
m.deliverEntry(e)
}
delete(m.waiting, k)
}
}
}
// delete(map, key) won't release any memory occupied by a map.
// so we need to re-copy our pools from time to time, otherwise they're eating memory slowly but forever.
func (m *TrxMgr) shrinkPoolMemories() {
if atomic.LoadUint64(&m.shrinkCounter) > sShrinkCountWaterMark {
atomic.StoreUint64(&m.shrinkCounter, 0)
waiting, fetched := make(map[string]*TrxEntry), make(map[string]*TrxEntry)
for k, e := range m.waiting {
waiting[k] = e
}
for k, e := range m.fetched {
fetched[k] = e
}
m.waiting, m.fetched = waiting, fetched
}
}
| {
e.result.Receipt.Status = prototype.StatusError
e.result.Receipt.ErrorInfo = err.Error()
return err
} | identifier_body |
mixhop_trainer.py |
# Standard imports.
import collections
import json
import os
import pickle
# Third-party imports.
from absl import app
from absl import flags
import numpy
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.keras import regularizers as keras_regularizers
# Project imports.
import mixhop_dataset
import mixhop_model
# IO Flags.
flags.DEFINE_string('dataset_dir',
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/planetoid/data'),
'Directory containing all datasets. We assume the format '
'of Planetoid')
flags.DEFINE_string('results_dir', 'results',
'Evaluation results will be written here.')
flags.DEFINE_string('train_dir', 'trained_models',
'Directory where trained models will be written.')
flags.DEFINE_string('run_id', '',
'Will be included in output filenames for model (in '
'--train_dir) and results (in --results_dir).')
flags.DEFINE_boolean('retrain', False,
'If set, model will retrain even if its results file '
'exists')
# Dataset Flags.
flags.DEFINE_string('dataset_name', 'ind.cora', '')
# flags.DEFINE_integer('num_train_nodes', -20,
# 'Number of training nodes. If < 0, then the number is '
# 'converted to positive and that many training nodes are '
# 'used per class. -20 recovers setting in Kipf & Welling.')
flags.DEFINE_integer('num_validate_nodes', 500, '')
# Model Architecture Flags.
flags.DEFINE_string('architecture', '',
'(Optional) path to model architecture JSON file. '
'If given, none of the architecture flags matter anymore: '
'the contents of the file will entirely specify the '
'architecture. For example, see architectures/pubmed.json')
flags.DEFINE_string('hidden_dims_csv', '60',
'Comma-separated list of hidden layer sizes.')
flags.DEFINE_string('output_layer', 'wsum',
'One of: "wsum" (weighted sum) or "fc" (fully-connected).')
flags.DEFINE_string('nonlinearity', 'relu', '')
flags.DEFINE_string('adj_pows', '1',
'Comma-separated list of Adjacency powers. Setting to "1" '
'recovers valinna GCN. Setting to "0,1,2" uses '
'[A^0, A^1, A^2]. Further, you can feed as '
'"0:20:10,1:10:10", where the syntax is '
'<pow>:<capacity in layer1>:<capacity in layer2>. The '
'number of layers equals number of entries in '
'--hidden_dims_csv, plus one (for the output layer). The '
'capacities do *NOT* have to add-up to the corresponding '
'entry in hidden_dims_csv. They will be re-scaled if '
'necessary.')
# Training Flags.
flags.DEFINE_integer('num_train_steps', 400, 'Number of training steps.')
flags.DEFINE_integer('early_stop_steps', 50, 'If the validation accuracy does '
'not increase for this many steps, training is halted.')
flags.DEFINE_float('l2reg', 5e-4, 'L2 Regularization on Kernels.')
flags.DEFINE_float('input_dropout', 0.7, 'Dropout applied at input layer')
flags.DEFINE_float('layer_dropout', 0.9, 'Dropout applied at hidden layers')
flags.DEFINE_string('optimizer', 'GradientDescentOptimizer',
'Name of optimizer to use. Must be member of tf.train.')
flags.DEFINE_float('learn_rate', 0.5, 'Learning Rate for the optimizer.')
flags.DEFINE_float('lr_decrement_ratio_of_initial', 0.01,
'Learning rate will be decremented by '
'this value * --learn_rate.')
flags.DEFINE_float('lr_decrement_every', 40,
'Learning rate will be decremented every this many steps.')
flags.DEFINE_bool('use_signac', False, 'Use signac and put all args into signac workspace.')
flags.DEFINE_string('signac_root', None, 'Root path for signac project.')
flags.DEFINE_bool('debug', False, 'Debug code in VS Code')
flags.DEFINE_bool("_l2_normalization", True, "")
flags.DEFINE_bool("_batch_normalization", True, "")
flags.DEFINE_bool("_psum_output", True, "")
flags.DEFINE_bool("identity_feature", False, "")
FLAGS = flags.FLAGS
def GetEncodedParams():
"""Summarizes all flag values in a string, to be used in output filenames."""
if FLAGS.debug:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=('localhost', 5678), redirect_output=True)
ptvsd.wait_for_attach()
breakpoint()
if FLAGS.use_signac:
import signac
project = signac.get_project(root=FLAGS.signac_root)
job = project.open_job(dict(
dataset_name=FLAGS.dataset_name, run_id=FLAGS.run_id,
optimizer=FLAGS.optimizer, learn_rate=FLAGS.learn_rate,
l2reg=FLAGS.l2reg, output_layer=FLAGS.output_layer,
nonlinearity=FLAGS.nonlinearity,
adj_powers=FLAGS.adj_pows.replace(',', 'x').replace(':', '.'),
architecture=FLAGS.architecture, identity_feature=FLAGS.identity_feature
)).init()
FLAGS.results_dir = job.fn(FLAGS.results_dir)
FLAGS.train_dir = job.fn(FLAGS.train_dir)
FLAGS.run_id = ""
params = '_'.join([
'ds-%s' % FLAGS.dataset_name,
'r-%s' % FLAGS.run_id,
'opt-%s' % FLAGS.optimizer,
'lr-%g' % FLAGS.learn_rate,
'l2-%g' % FLAGS.l2reg,
'o-%s' % FLAGS.output_layer,
'act-%s' % FLAGS.nonlinearity,
'pows-%s' % FLAGS.adj_pows.replace(',', 'x').replace(':', '.'),
])
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
|
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
return params
class AccuracyMonitor(object):
"""Monitors and remembers model parameters @ best validation accuracy."""
def __init__(self, sess, early_stop_steps):
"""Initializes AccuracyMonitor.
Args:
sess: (singleton) instance of tf.Session that is used for training.
early_stop_steps: int with number of steps to allow without any
improvement on the validation accuracy.
"""
self._early_stop_steps = early_stop_steps
self._sess = sess
# (validate accuracy, test accuracy, step #), recorded at best validate
# accuracy.
self.best = (0, 0, 0)
# Will be populated to dict of all tensorflow variable names to their values
# as numpy arrays.
self.params_at_best = None
def mark_accuracy(self, validate_accuracy, test_accuracy, i):
curr_accuracy = (float(validate_accuracy), float(test_accuracy), i)
self.curr_accuracy = curr_accuracy
if curr_accuracy > self.best:
self.best = curr_accuracy
all_variables = tf.global_variables()
all_variable_values = self._sess.run(all_variables)
params_at_best_validate = (
{var.name: val
for var, val in zip(all_variables, all_variable_values)})
self.params_at_best = params_at_best_validate
if i > self.best[-1] + self._early_stop_steps:
return False
return True
# TODO(haija): move to utils.
class AdjacencyPowersParser(object):
def __init__(self):
powers = FLAGS.adj_pows.split(',')
has_colon = None
self._powers = []
self._ratios = []
for i, p in enumerate(powers):
if i == 0:
has_colon = (':' in p)
else:
if has_colon != (':' in p):
raise ValueError(
'Error in flag --adj_pows. Either all powers or non should '
'include ":"')
#
components = p.split(':')
self._powers.append(int(components[0]))
if has_colon:
self._ratios.append(list(map(float, components[1:])))
else:
self._ratios.append([1])
def powers(self):
return self._powers
def output_capacity(self, num_classes):
if all([len(s) == 1 and s[0] == 1 for s in self._ratios]):
return num_classes * len(self._powers)
else:
return sum([s[-1] for s in self._ratios])
def divide_capacity(self, layer_index, total_dim):
sizes = [l[min(layer_index, len(l)-1)] for l in self._ratios]
sum_units = numpy.sum(sizes)
size_per_unit = total_dim / float(sum_units)
dims = []
for s in sizes[:-1]:
dim = int(numpy.round(s * size_per_unit))
dims.append(dim)
dims.append(total_dim - sum(dims))
return dims
def main(unused_argv):
encoded_params = GetEncodedParams()
output_results_file = os.path.join(
FLAGS.results_dir, encoded_params + '.json')
output_model_file = os.path.join(
FLAGS.train_dir, encoded_params + '.pkl')
if os.path.exists(output_results_file) and not FLAGS.retrain:
print('Exiting early. Results are already computed: %s. Pass flag '
'--retrain to override' % output_results_file)
return 0
### LOAD DATASET
# The adjacency matrix is also normalized in this step
dataset = mixhop_dataset.ReadDataset(FLAGS.dataset_dir, FLAGS.dataset_name,
use_identity=FLAGS.identity_feature)
### MODEL REQUIREMENTS (Placeholders, adjacency tensor, regularizers)
x = dataset.sparse_allx_tensor()
y = tf.placeholder(tf.float32, [None, dataset.ally.shape[1]], name='y')
ph_indices = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder_with_default(True, [], name='is_training')
pows_parser = AdjacencyPowersParser() # Parses flag --adj_pows
num_x_entries = dataset.x_indices.shape[0]
sparse_adj = dataset.sparse_adj_tensor()
kernel_regularizer = keras_regularizers.l2(FLAGS.l2reg)
### BUILD MODEL
model = mixhop_model.MixHopModel(
sparse_adj, x, is_training, kernel_regularizer)
if FLAGS.architecture:
model.load_architecture_from_file(FLAGS.architecture)
else:
model.add_layer('mixhop_model', 'sparse_dropout', FLAGS.input_dropout,
num_x_entries, pass_is_training=True)
model.add_layer('tf', 'sparse_tensor_to_dense')
if FLAGS._l2_normalization:
model.add_layer('tf.nn', 'l2_normalize', axis=1)
power_parser = AdjacencyPowersParser()
layer_dims = list(map(int, FLAGS.hidden_dims_csv.split(',')))
layer_dims.append(power_parser.output_capacity(dataset.ally.shape[1]))
for j, dim in enumerate(layer_dims):
if j != 0:
model.add_layer('tf.layers', 'dropout', FLAGS.layer_dropout,
pass_training=True)
capacities = power_parser.divide_capacity(j, dim)
model.add_layer('self', 'mixhop_layer', power_parser.powers(), capacities,
layer_id=j, pass_kernel_regularizer=True)
if j != len(layer_dims) - 1:
if FLAGS._batch_normalization:
model.add_layer('tf.contrib.layers', 'batch_norm')
model.add_layer('tf.nn', FLAGS.nonlinearity)
#
model.add_layer('mixhop_model', 'psum_output_layer', dataset.ally.shape[1],
use_softmax=FLAGS._psum_output)
net = model.activations[-1]
### TRAINING.
sliced_output = tf.gather(net, ph_indices)
learn_rate = tf.placeholder(tf.float32, [], 'learn_rate')
label_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradient(y), logits=sliced_output))
tf.losses.add_loss(label_loss)
loss = tf.losses.get_total_loss()
if FLAGS.optimizer == 'MomentumOptimizer':
optimizer = tf.train.MomentumOptimizer(lr, 0.7, use_nesterov=True)
else:
optimizer_class = getattr(tf.train, FLAGS.optimizer)
optimizer = optimizer_class(learn_rate)
train_op = slim.learning.create_train_op(
loss, optimizer, gradient_multipliers=[])
### CRAETE SESSION
# Now that the graph is frozen
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print("Number of parameters: ",
numpy.sum([numpy.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
print({v.name: v.get_shape().as_list() for v in tf.trainable_variables()})
### PREPARE FOR TRAINING
# Get indices of {train, validate, test} nodes.
train_indices, validate_indices, test_indices = dataset.get_partition_indices(FLAGS.num_validate_nodes)
feed_dict = {y: dataset.ally[train_indices]}
dataset.populate_feed_dict(feed_dict)
LAST_STEP = collections.Counter()
accuracy_monitor = AccuracyMonitor(sess, FLAGS.early_stop_steps)
# Step function makes a single update, prints accuracies, and invokes
# accuracy_monitor to keep track of test accuracy and parameters @ best
# validation accuracy
def step(lr=None, columns=None):
if lr is not None:
feed_dict[learn_rate] = lr
i = LAST_STEP['step']
LAST_STEP['step'] += 1
feed_dict[is_training] = True
feed_dict[ph_indices] = train_indices
# Train step
train_preds, loss_value, _ = sess.run((sliced_output, label_loss, train_op), feed_dict)
if numpy.isnan(loss_value).any():
print('NaN value reached. Debug please.')
import IPython; IPython.embed()
train_accuracy = numpy.mean(
train_preds.argmax(axis=1) == dataset.ally[train_indices].argmax(axis=1))
feed_dict[is_training] = False
feed_dict[ph_indices] = test_indices
test_preds = sess.run(sliced_output, feed_dict)
test_accuracy = numpy.mean(
test_preds.argmax(axis=1) == dataset.ally[test_indices].argmax(axis=1))
feed_dict[ph_indices] = validate_indices
validate_preds = sess.run(sliced_output, feed_dict)
validate_accuracy = numpy.mean(
validate_preds.argmax(axis=1) == dataset.ally[validate_indices].argmax(axis=1))
keep_going = accuracy_monitor.mark_accuracy(validate_accuracy, test_accuracy, i)
print('%i. (loss=%g). Acc: train=%f val=%f test=%f (@ best val test=%f)' % (
i, loss_value, train_accuracy, validate_accuracy, test_accuracy,
accuracy_monitor.best[1]))
if keep_going:
return True
else:
print('Early stopping')
return False
### TRAINING LOOP
lr = FLAGS.learn_rate
lr_decrement = FLAGS.lr_decrement_ratio_of_initial * FLAGS.learn_rate
for i in range(FLAGS.num_train_steps):
if not step(lr=lr):
break
if i > 0 and i % FLAGS.lr_decrement_every == 0:
lr -= lr_decrement
if lr <= 0:
break
if not os.path.exists(FLAGS.results_dir):
os.makedirs(FLAGS.results_dir)
if not os.path.exists(FLAGS.train_dir):
os.makedirs(FLAGS.train_dir)
with open(output_results_file, 'w') as fout:
results = {
'at_best_validate': accuracy_monitor.best,
'current': accuracy_monitor.curr_accuracy,
}
fout.write(json.dumps(results))
with open(output_model_file, 'wb') as fout:
pickle.dump(accuracy_monitor.params_at_best, fout)
print('Wrote model to ' + output_model_file)
print('Wrote results to ' + output_results_file)
if __name__ == '__main__':
app.run(main)
| tf.config.experimental.set_memory_growth(gpu, True) | conditional_block |
mixhop_trainer.py |
# Standard imports.
import collections
import json
import os
import pickle
# Third-party imports.
from absl import app
from absl import flags
import numpy
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.keras import regularizers as keras_regularizers
# Project imports.
import mixhop_dataset
import mixhop_model
# IO Flags.
flags.DEFINE_string('dataset_dir',
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/planetoid/data'),
'Directory containing all datasets. We assume the format '
'of Planetoid')
flags.DEFINE_string('results_dir', 'results',
'Evaluation results will be written here.')
flags.DEFINE_string('train_dir', 'trained_models',
'Directory where trained models will be written.')
flags.DEFINE_string('run_id', '',
'Will be included in output filenames for model (in '
'--train_dir) and results (in --results_dir).')
flags.DEFINE_boolean('retrain', False,
'If set, model will retrain even if its results file '
'exists')
# Dataset Flags.
flags.DEFINE_string('dataset_name', 'ind.cora', '')
# flags.DEFINE_integer('num_train_nodes', -20,
# 'Number of training nodes. If < 0, then the number is '
# 'converted to positive and that many training nodes are '
# 'used per class. -20 recovers setting in Kipf & Welling.')
flags.DEFINE_integer('num_validate_nodes', 500, '')
# Model Architecture Flags.
flags.DEFINE_string('architecture', '',
'(Optional) path to model architecture JSON file. '
'If given, none of the architecture flags matter anymore: '
'the contents of the file will entirely specify the '
'architecture. For example, see architectures/pubmed.json')
flags.DEFINE_string('hidden_dims_csv', '60',
'Comma-separated list of hidden layer sizes.')
flags.DEFINE_string('output_layer', 'wsum',
'One of: "wsum" (weighted sum) or "fc" (fully-connected).')
flags.DEFINE_string('nonlinearity', 'relu', '')
flags.DEFINE_string('adj_pows', '1',
'Comma-separated list of Adjacency powers. Setting to "1" '
'recovers valinna GCN. Setting to "0,1,2" uses '
'[A^0, A^1, A^2]. Further, you can feed as '
'"0:20:10,1:10:10", where the syntax is '
'<pow>:<capacity in layer1>:<capacity in layer2>. The '
'number of layers equals number of entries in '
'--hidden_dims_csv, plus one (for the output layer). The '
'capacities do *NOT* have to add-up to the corresponding '
'entry in hidden_dims_csv. They will be re-scaled if '
'necessary.')
# Training Flags.
flags.DEFINE_integer('num_train_steps', 400, 'Number of training steps.')
flags.DEFINE_integer('early_stop_steps', 50, 'If the validation accuracy does '
'not increase for this many steps, training is halted.')
flags.DEFINE_float('l2reg', 5e-4, 'L2 Regularization on Kernels.')
flags.DEFINE_float('input_dropout', 0.7, 'Dropout applied at input layer')
flags.DEFINE_float('layer_dropout', 0.9, 'Dropout applied at hidden layers')
flags.DEFINE_string('optimizer', 'GradientDescentOptimizer',
'Name of optimizer to use. Must be member of tf.train.')
flags.DEFINE_float('learn_rate', 0.5, 'Learning Rate for the optimizer.')
flags.DEFINE_float('lr_decrement_ratio_of_initial', 0.01,
'Learning rate will be decremented by '
'this value * --learn_rate.')
flags.DEFINE_float('lr_decrement_every', 40,
'Learning rate will be decremented every this many steps.')
flags.DEFINE_bool('use_signac', False, 'Use signac and put all args into signac workspace.')
flags.DEFINE_string('signac_root', None, 'Root path for signac project.')
flags.DEFINE_bool('debug', False, 'Debug code in VS Code')
flags.DEFINE_bool("_l2_normalization", True, "")
flags.DEFINE_bool("_batch_normalization", True, "")
flags.DEFINE_bool("_psum_output", True, "")
flags.DEFINE_bool("identity_feature", False, "")
FLAGS = flags.FLAGS
def GetEncodedParams():
"""Summarizes all flag values in a string, to be used in output filenames."""
if FLAGS.debug:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=('localhost', 5678), redirect_output=True)
ptvsd.wait_for_attach()
breakpoint()
if FLAGS.use_signac:
import signac
project = signac.get_project(root=FLAGS.signac_root)
job = project.open_job(dict(
dataset_name=FLAGS.dataset_name, run_id=FLAGS.run_id,
optimizer=FLAGS.optimizer, learn_rate=FLAGS.learn_rate,
l2reg=FLAGS.l2reg, output_layer=FLAGS.output_layer,
nonlinearity=FLAGS.nonlinearity,
adj_powers=FLAGS.adj_pows.replace(',', 'x').replace(':', '.'),
architecture=FLAGS.architecture, identity_feature=FLAGS.identity_feature
)).init()
FLAGS.results_dir = job.fn(FLAGS.results_dir)
FLAGS.train_dir = job.fn(FLAGS.train_dir)
FLAGS.run_id = ""
params = '_'.join([
'ds-%s' % FLAGS.dataset_name,
'r-%s' % FLAGS.run_id,
'opt-%s' % FLAGS.optimizer,
'lr-%g' % FLAGS.learn_rate,
'l2-%g' % FLAGS.l2reg,
'o-%s' % FLAGS.output_layer,
'act-%s' % FLAGS.nonlinearity,
'pows-%s' % FLAGS.adj_pows.replace(',', 'x').replace(':', '.'),
])
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
return params
class AccuracyMonitor(object):
"""Monitors and remembers model parameters @ best validation accuracy."""
def __init__(self, sess, early_stop_steps):
"""Initializes AccuracyMonitor.
Args:
sess: (singleton) instance of tf.Session that is used for training.
early_stop_steps: int with number of steps to allow without any
improvement on the validation accuracy.
"""
self._early_stop_steps = early_stop_steps
self._sess = sess
# (validate accuracy, test accuracy, step #), recorded at best validate
# accuracy.
self.best = (0, 0, 0)
# Will be populated to dict of all tensorflow variable names to their values
# as numpy arrays.
self.params_at_best = None
def mark_accuracy(self, validate_accuracy, test_accuracy, i):
curr_accuracy = (float(validate_accuracy), float(test_accuracy), i)
self.curr_accuracy = curr_accuracy
if curr_accuracy > self.best:
self.best = curr_accuracy
all_variables = tf.global_variables()
all_variable_values = self._sess.run(all_variables)
params_at_best_validate = (
{var.name: val
for var, val in zip(all_variables, all_variable_values)})
self.params_at_best = params_at_best_validate
if i > self.best[-1] + self._early_stop_steps:
return False
return True
# TODO(haija): move to utils.
class AdjacencyPowersParser(object):
def __init__(self):
powers = FLAGS.adj_pows.split(',')
has_colon = None
self._powers = []
self._ratios = []
for i, p in enumerate(powers):
if i == 0:
has_colon = (':' in p)
else:
if has_colon != (':' in p):
raise ValueError(
'Error in flag --adj_pows. Either all powers or non should '
'include ":"')
#
components = p.split(':')
self._powers.append(int(components[0]))
if has_colon:
self._ratios.append(list(map(float, components[1:])))
else:
self._ratios.append([1])
def powers(self):
return self._powers
def output_capacity(self, num_classes):
if all([len(s) == 1 and s[0] == 1 for s in self._ratios]):
return num_classes * len(self._powers)
else:
return sum([s[-1] for s in self._ratios])
def divide_capacity(self, layer_index, total_dim):
|
def main(unused_argv):
encoded_params = GetEncodedParams()
output_results_file = os.path.join(
FLAGS.results_dir, encoded_params + '.json')
output_model_file = os.path.join(
FLAGS.train_dir, encoded_params + '.pkl')
if os.path.exists(output_results_file) and not FLAGS.retrain:
print('Exiting early. Results are already computed: %s. Pass flag '
'--retrain to override' % output_results_file)
return 0
### LOAD DATASET
# The adjacency matrix is also normalized in this step
dataset = mixhop_dataset.ReadDataset(FLAGS.dataset_dir, FLAGS.dataset_name,
use_identity=FLAGS.identity_feature)
### MODEL REQUIREMENTS (Placeholders, adjacency tensor, regularizers)
x = dataset.sparse_allx_tensor()
y = tf.placeholder(tf.float32, [None, dataset.ally.shape[1]], name='y')
ph_indices = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder_with_default(True, [], name='is_training')
pows_parser = AdjacencyPowersParser() # Parses flag --adj_pows
num_x_entries = dataset.x_indices.shape[0]
sparse_adj = dataset.sparse_adj_tensor()
kernel_regularizer = keras_regularizers.l2(FLAGS.l2reg)
### BUILD MODEL
model = mixhop_model.MixHopModel(
sparse_adj, x, is_training, kernel_regularizer)
if FLAGS.architecture:
model.load_architecture_from_file(FLAGS.architecture)
else:
model.add_layer('mixhop_model', 'sparse_dropout', FLAGS.input_dropout,
num_x_entries, pass_is_training=True)
model.add_layer('tf', 'sparse_tensor_to_dense')
if FLAGS._l2_normalization:
model.add_layer('tf.nn', 'l2_normalize', axis=1)
power_parser = AdjacencyPowersParser()
layer_dims = list(map(int, FLAGS.hidden_dims_csv.split(',')))
layer_dims.append(power_parser.output_capacity(dataset.ally.shape[1]))
for j, dim in enumerate(layer_dims):
if j != 0:
model.add_layer('tf.layers', 'dropout', FLAGS.layer_dropout,
pass_training=True)
capacities = power_parser.divide_capacity(j, dim)
model.add_layer('self', 'mixhop_layer', power_parser.powers(), capacities,
layer_id=j, pass_kernel_regularizer=True)
if j != len(layer_dims) - 1:
if FLAGS._batch_normalization:
model.add_layer('tf.contrib.layers', 'batch_norm')
model.add_layer('tf.nn', FLAGS.nonlinearity)
#
model.add_layer('mixhop_model', 'psum_output_layer', dataset.ally.shape[1],
use_softmax=FLAGS._psum_output)
net = model.activations[-1]
### TRAINING.
sliced_output = tf.gather(net, ph_indices)
learn_rate = tf.placeholder(tf.float32, [], 'learn_rate')
label_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradient(y), logits=sliced_output))
tf.losses.add_loss(label_loss)
loss = tf.losses.get_total_loss()
if FLAGS.optimizer == 'MomentumOptimizer':
optimizer = tf.train.MomentumOptimizer(lr, 0.7, use_nesterov=True)
else:
optimizer_class = getattr(tf.train, FLAGS.optimizer)
optimizer = optimizer_class(learn_rate)
train_op = slim.learning.create_train_op(
loss, optimizer, gradient_multipliers=[])
### CRAETE SESSION
# Now that the graph is frozen
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print("Number of parameters: ",
numpy.sum([numpy.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
print({v.name: v.get_shape().as_list() for v in tf.trainable_variables()})
### PREPARE FOR TRAINING
# Get indices of {train, validate, test} nodes.
train_indices, validate_indices, test_indices = dataset.get_partition_indices(FLAGS.num_validate_nodes)
feed_dict = {y: dataset.ally[train_indices]}
dataset.populate_feed_dict(feed_dict)
LAST_STEP = collections.Counter()
accuracy_monitor = AccuracyMonitor(sess, FLAGS.early_stop_steps)
# Step function makes a single update, prints accuracies, and invokes
# accuracy_monitor to keep track of test accuracy and parameters @ best
# validation accuracy
def step(lr=None, columns=None):
if lr is not None:
feed_dict[learn_rate] = lr
i = LAST_STEP['step']
LAST_STEP['step'] += 1
feed_dict[is_training] = True
feed_dict[ph_indices] = train_indices
# Train step
train_preds, loss_value, _ = sess.run((sliced_output, label_loss, train_op), feed_dict)
if numpy.isnan(loss_value).any():
print('NaN value reached. Debug please.')
import IPython; IPython.embed()
train_accuracy = numpy.mean(
train_preds.argmax(axis=1) == dataset.ally[train_indices].argmax(axis=1))
feed_dict[is_training] = False
feed_dict[ph_indices] = test_indices
test_preds = sess.run(sliced_output, feed_dict)
test_accuracy = numpy.mean(
test_preds.argmax(axis=1) == dataset.ally[test_indices].argmax(axis=1))
feed_dict[ph_indices] = validate_indices
validate_preds = sess.run(sliced_output, feed_dict)
validate_accuracy = numpy.mean(
validate_preds.argmax(axis=1) == dataset.ally[validate_indices].argmax(axis=1))
keep_going = accuracy_monitor.mark_accuracy(validate_accuracy, test_accuracy, i)
print('%i. (loss=%g). Acc: train=%f val=%f test=%f (@ best val test=%f)' % (
i, loss_value, train_accuracy, validate_accuracy, test_accuracy,
accuracy_monitor.best[1]))
if keep_going:
return True
else:
print('Early stopping')
return False
### TRAINING LOOP
lr = FLAGS.learn_rate
lr_decrement = FLAGS.lr_decrement_ratio_of_initial * FLAGS.learn_rate
for i in range(FLAGS.num_train_steps):
if not step(lr=lr):
break
if i > 0 and i % FLAGS.lr_decrement_every == 0:
lr -= lr_decrement
if lr <= 0:
break
if not os.path.exists(FLAGS.results_dir):
os.makedirs(FLAGS.results_dir)
if not os.path.exists(FLAGS.train_dir):
os.makedirs(FLAGS.train_dir)
with open(output_results_file, 'w') as fout:
results = {
'at_best_validate': accuracy_monitor.best,
'current': accuracy_monitor.curr_accuracy,
}
fout.write(json.dumps(results))
with open(output_model_file, 'wb') as fout:
pickle.dump(accuracy_monitor.params_at_best, fout)
print('Wrote model to ' + output_model_file)
print('Wrote results to ' + output_results_file)
if __name__ == '__main__':
app.run(main)
| sizes = [l[min(layer_index, len(l)-1)] for l in self._ratios]
sum_units = numpy.sum(sizes)
size_per_unit = total_dim / float(sum_units)
dims = []
for s in sizes[:-1]:
dim = int(numpy.round(s * size_per_unit))
dims.append(dim)
dims.append(total_dim - sum(dims))
return dims | identifier_body |
mixhop_trainer.py |
# Standard imports.
import collections
import json
import os
import pickle
# Third-party imports.
from absl import app
from absl import flags
import numpy
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.keras import regularizers as keras_regularizers
# Project imports.
import mixhop_dataset
import mixhop_model
# IO Flags.
flags.DEFINE_string('dataset_dir',
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/planetoid/data'),
'Directory containing all datasets. We assume the format '
'of Planetoid')
flags.DEFINE_string('results_dir', 'results',
'Evaluation results will be written here.')
flags.DEFINE_string('train_dir', 'trained_models',
'Directory where trained models will be written.')
flags.DEFINE_string('run_id', '',
'Will be included in output filenames for model (in '
'--train_dir) and results (in --results_dir).')
flags.DEFINE_boolean('retrain', False,
'If set, model will retrain even if its results file '
'exists')
# Dataset Flags.
flags.DEFINE_string('dataset_name', 'ind.cora', '')
# flags.DEFINE_integer('num_train_nodes', -20,
# 'Number of training nodes. If < 0, then the number is '
# 'converted to positive and that many training nodes are '
# 'used per class. -20 recovers setting in Kipf & Welling.')
flags.DEFINE_integer('num_validate_nodes', 500, '')
# Model Architecture Flags.
flags.DEFINE_string('architecture', '',
'(Optional) path to model architecture JSON file. '
'If given, none of the architecture flags matter anymore: '
'the contents of the file will entirely specify the '
'architecture. For example, see architectures/pubmed.json')
flags.DEFINE_string('hidden_dims_csv', '60',
'Comma-separated list of hidden layer sizes.')
flags.DEFINE_string('output_layer', 'wsum',
'One of: "wsum" (weighted sum) or "fc" (fully-connected).')
flags.DEFINE_string('nonlinearity', 'relu', '')
flags.DEFINE_string('adj_pows', '1',
'Comma-separated list of Adjacency powers. Setting to "1" '
'recovers valinna GCN. Setting to "0,1,2" uses '
'[A^0, A^1, A^2]. Further, you can feed as '
'"0:20:10,1:10:10", where the syntax is '
'<pow>:<capacity in layer1>:<capacity in layer2>. The '
'number of layers equals number of entries in '
'--hidden_dims_csv, plus one (for the output layer). The '
'capacities do *NOT* have to add-up to the corresponding '
'entry in hidden_dims_csv. They will be re-scaled if '
'necessary.')
# Training Flags.
flags.DEFINE_integer('num_train_steps', 400, 'Number of training steps.')
flags.DEFINE_integer('early_stop_steps', 50, 'If the validation accuracy does '
'not increase for this many steps, training is halted.')
flags.DEFINE_float('l2reg', 5e-4, 'L2 Regularization on Kernels.')
flags.DEFINE_float('input_dropout', 0.7, 'Dropout applied at input layer')
flags.DEFINE_float('layer_dropout', 0.9, 'Dropout applied at hidden layers')
flags.DEFINE_string('optimizer', 'GradientDescentOptimizer',
'Name of optimizer to use. Must be member of tf.train.')
flags.DEFINE_float('learn_rate', 0.5, 'Learning Rate for the optimizer.')
flags.DEFINE_float('lr_decrement_ratio_of_initial', 0.01,
'Learning rate will be decremented by '
'this value * --learn_rate.')
flags.DEFINE_float('lr_decrement_every', 40,
'Learning rate will be decremented every this many steps.')
flags.DEFINE_bool('use_signac', False, 'Use signac and put all args into signac workspace.')
flags.DEFINE_string('signac_root', None, 'Root path for signac project.')
flags.DEFINE_bool('debug', False, 'Debug code in VS Code')
flags.DEFINE_bool("_l2_normalization", True, "")
flags.DEFINE_bool("_batch_normalization", True, "")
flags.DEFINE_bool("_psum_output", True, "")
flags.DEFINE_bool("identity_feature", False, "")
FLAGS = flags.FLAGS
def GetEncodedParams():
"""Summarizes all flag values in a string, to be used in output filenames."""
if FLAGS.debug:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=('localhost', 5678), redirect_output=True)
ptvsd.wait_for_attach()
breakpoint()
if FLAGS.use_signac:
import signac
project = signac.get_project(root=FLAGS.signac_root)
job = project.open_job(dict(
dataset_name=FLAGS.dataset_name, run_id=FLAGS.run_id,
optimizer=FLAGS.optimizer, learn_rate=FLAGS.learn_rate,
l2reg=FLAGS.l2reg, output_layer=FLAGS.output_layer,
nonlinearity=FLAGS.nonlinearity,
adj_powers=FLAGS.adj_pows.replace(',', 'x').replace(':', '.'),
architecture=FLAGS.architecture, identity_feature=FLAGS.identity_feature
)).init()
FLAGS.results_dir = job.fn(FLAGS.results_dir)
FLAGS.train_dir = job.fn(FLAGS.train_dir)
FLAGS.run_id = ""
params = '_'.join([
'ds-%s' % FLAGS.dataset_name,
'r-%s' % FLAGS.run_id,
'opt-%s' % FLAGS.optimizer,
'lr-%g' % FLAGS.learn_rate,
'l2-%g' % FLAGS.l2reg,
'o-%s' % FLAGS.output_layer,
'act-%s' % FLAGS.nonlinearity,
'pows-%s' % FLAGS.adj_pows.replace(',', 'x').replace(':', '.'),
])
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
return params
class AccuracyMonitor(object):
"""Monitors and remembers model parameters @ best validation accuracy."""
def __init__(self, sess, early_stop_steps):
"""Initializes AccuracyMonitor.
Args:
sess: (singleton) instance of tf.Session that is used for training.
early_stop_steps: int with number of steps to allow without any
improvement on the validation accuracy.
"""
self._early_stop_steps = early_stop_steps
self._sess = sess
# (validate accuracy, test accuracy, step #), recorded at best validate
# accuracy.
self.best = (0, 0, 0)
# Will be populated to dict of all tensorflow variable names to their values
# as numpy arrays.
self.params_at_best = None
def mark_accuracy(self, validate_accuracy, test_accuracy, i):
curr_accuracy = (float(validate_accuracy), float(test_accuracy), i)
self.curr_accuracy = curr_accuracy
if curr_accuracy > self.best:
self.best = curr_accuracy
all_variables = tf.global_variables()
all_variable_values = self._sess.run(all_variables)
params_at_best_validate = (
{var.name: val
for var, val in zip(all_variables, all_variable_values)})
self.params_at_best = params_at_best_validate
if i > self.best[-1] + self._early_stop_steps:
return False
return True
# TODO(haija): move to utils.
class AdjacencyPowersParser(object):
def | (self):
powers = FLAGS.adj_pows.split(',')
has_colon = None
self._powers = []
self._ratios = []
for i, p in enumerate(powers):
if i == 0:
has_colon = (':' in p)
else:
if has_colon != (':' in p):
raise ValueError(
'Error in flag --adj_pows. Either all powers or non should '
'include ":"')
#
components = p.split(':')
self._powers.append(int(components[0]))
if has_colon:
self._ratios.append(list(map(float, components[1:])))
else:
self._ratios.append([1])
def powers(self):
return self._powers
def output_capacity(self, num_classes):
if all([len(s) == 1 and s[0] == 1 for s in self._ratios]):
return num_classes * len(self._powers)
else:
return sum([s[-1] for s in self._ratios])
def divide_capacity(self, layer_index, total_dim):
sizes = [l[min(layer_index, len(l)-1)] for l in self._ratios]
sum_units = numpy.sum(sizes)
size_per_unit = total_dim / float(sum_units)
dims = []
for s in sizes[:-1]:
dim = int(numpy.round(s * size_per_unit))
dims.append(dim)
dims.append(total_dim - sum(dims))
return dims
def main(unused_argv):
encoded_params = GetEncodedParams()
output_results_file = os.path.join(
FLAGS.results_dir, encoded_params + '.json')
output_model_file = os.path.join(
FLAGS.train_dir, encoded_params + '.pkl')
if os.path.exists(output_results_file) and not FLAGS.retrain:
print('Exiting early. Results are already computed: %s. Pass flag '
'--retrain to override' % output_results_file)
return 0
### LOAD DATASET
# The adjacency matrix is also normalized in this step
dataset = mixhop_dataset.ReadDataset(FLAGS.dataset_dir, FLAGS.dataset_name,
use_identity=FLAGS.identity_feature)
### MODEL REQUIREMENTS (Placeholders, adjacency tensor, regularizers)
x = dataset.sparse_allx_tensor()
y = tf.placeholder(tf.float32, [None, dataset.ally.shape[1]], name='y')
ph_indices = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder_with_default(True, [], name='is_training')
pows_parser = AdjacencyPowersParser() # Parses flag --adj_pows
num_x_entries = dataset.x_indices.shape[0]
sparse_adj = dataset.sparse_adj_tensor()
kernel_regularizer = keras_regularizers.l2(FLAGS.l2reg)
### BUILD MODEL
model = mixhop_model.MixHopModel(
sparse_adj, x, is_training, kernel_regularizer)
if FLAGS.architecture:
model.load_architecture_from_file(FLAGS.architecture)
else:
model.add_layer('mixhop_model', 'sparse_dropout', FLAGS.input_dropout,
num_x_entries, pass_is_training=True)
model.add_layer('tf', 'sparse_tensor_to_dense')
if FLAGS._l2_normalization:
model.add_layer('tf.nn', 'l2_normalize', axis=1)
power_parser = AdjacencyPowersParser()
layer_dims = list(map(int, FLAGS.hidden_dims_csv.split(',')))
layer_dims.append(power_parser.output_capacity(dataset.ally.shape[1]))
for j, dim in enumerate(layer_dims):
if j != 0:
model.add_layer('tf.layers', 'dropout', FLAGS.layer_dropout,
pass_training=True)
capacities = power_parser.divide_capacity(j, dim)
model.add_layer('self', 'mixhop_layer', power_parser.powers(), capacities,
layer_id=j, pass_kernel_regularizer=True)
if j != len(layer_dims) - 1:
if FLAGS._batch_normalization:
model.add_layer('tf.contrib.layers', 'batch_norm')
model.add_layer('tf.nn', FLAGS.nonlinearity)
#
model.add_layer('mixhop_model', 'psum_output_layer', dataset.ally.shape[1],
use_softmax=FLAGS._psum_output)
net = model.activations[-1]
### TRAINING.
sliced_output = tf.gather(net, ph_indices)
learn_rate = tf.placeholder(tf.float32, [], 'learn_rate')
label_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradient(y), logits=sliced_output))
tf.losses.add_loss(label_loss)
loss = tf.losses.get_total_loss()
if FLAGS.optimizer == 'MomentumOptimizer':
optimizer = tf.train.MomentumOptimizer(lr, 0.7, use_nesterov=True)
else:
optimizer_class = getattr(tf.train, FLAGS.optimizer)
optimizer = optimizer_class(learn_rate)
train_op = slim.learning.create_train_op(
loss, optimizer, gradient_multipliers=[])
### CRAETE SESSION
# Now that the graph is frozen
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print("Number of parameters: ",
numpy.sum([numpy.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
print({v.name: v.get_shape().as_list() for v in tf.trainable_variables()})
### PREPARE FOR TRAINING
# Get indices of {train, validate, test} nodes.
train_indices, validate_indices, test_indices = dataset.get_partition_indices(FLAGS.num_validate_nodes)
feed_dict = {y: dataset.ally[train_indices]}
dataset.populate_feed_dict(feed_dict)
LAST_STEP = collections.Counter()
accuracy_monitor = AccuracyMonitor(sess, FLAGS.early_stop_steps)
# Step function makes a single update, prints accuracies, and invokes
# accuracy_monitor to keep track of test accuracy and parameters @ best
# validation accuracy
def step(lr=None, columns=None):
if lr is not None:
feed_dict[learn_rate] = lr
i = LAST_STEP['step']
LAST_STEP['step'] += 1
feed_dict[is_training] = True
feed_dict[ph_indices] = train_indices
# Train step
train_preds, loss_value, _ = sess.run((sliced_output, label_loss, train_op), feed_dict)
if numpy.isnan(loss_value).any():
print('NaN value reached. Debug please.')
import IPython; IPython.embed()
train_accuracy = numpy.mean(
train_preds.argmax(axis=1) == dataset.ally[train_indices].argmax(axis=1))
feed_dict[is_training] = False
feed_dict[ph_indices] = test_indices
test_preds = sess.run(sliced_output, feed_dict)
test_accuracy = numpy.mean(
test_preds.argmax(axis=1) == dataset.ally[test_indices].argmax(axis=1))
feed_dict[ph_indices] = validate_indices
validate_preds = sess.run(sliced_output, feed_dict)
validate_accuracy = numpy.mean(
validate_preds.argmax(axis=1) == dataset.ally[validate_indices].argmax(axis=1))
keep_going = accuracy_monitor.mark_accuracy(validate_accuracy, test_accuracy, i)
print('%i. (loss=%g). Acc: train=%f val=%f test=%f (@ best val test=%f)' % (
i, loss_value, train_accuracy, validate_accuracy, test_accuracy,
accuracy_monitor.best[1]))
if keep_going:
return True
else:
print('Early stopping')
return False
### TRAINING LOOP
lr = FLAGS.learn_rate
lr_decrement = FLAGS.lr_decrement_ratio_of_initial * FLAGS.learn_rate
for i in range(FLAGS.num_train_steps):
if not step(lr=lr):
break
if i > 0 and i % FLAGS.lr_decrement_every == 0:
lr -= lr_decrement
if lr <= 0:
break
if not os.path.exists(FLAGS.results_dir):
os.makedirs(FLAGS.results_dir)
if not os.path.exists(FLAGS.train_dir):
os.makedirs(FLAGS.train_dir)
with open(output_results_file, 'w') as fout:
results = {
'at_best_validate': accuracy_monitor.best,
'current': accuracy_monitor.curr_accuracy,
}
fout.write(json.dumps(results))
with open(output_model_file, 'wb') as fout:
pickle.dump(accuracy_monitor.params_at_best, fout)
print('Wrote model to ' + output_model_file)
print('Wrote results to ' + output_results_file)
if __name__ == '__main__':
app.run(main)
| __init__ | identifier_name |
mixhop_trainer.py | # Standard imports.
import collections
import json
import os
import pickle
# Third-party imports.
from absl import app
from absl import flags
import numpy
import tensorflow as tf
import tensorflow.contrib.slim as slim
from tensorflow.python.keras import regularizers as keras_regularizers
# Project imports.
import mixhop_dataset
import mixhop_model
# IO Flags.
flags.DEFINE_string('dataset_dir',
os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data/planetoid/data'),
'Directory containing all datasets. We assume the format '
'of Planetoid')
flags.DEFINE_string('results_dir', 'results',
'Evaluation results will be written here.')
flags.DEFINE_string('train_dir', 'trained_models',
'Directory where trained models will be written.')
flags.DEFINE_string('run_id', '',
'Will be included in output filenames for model (in '
'--train_dir) and results (in --results_dir).')
flags.DEFINE_boolean('retrain', False,
'If set, model will retrain even if its results file '
'exists')
# Dataset Flags.
flags.DEFINE_string('dataset_name', 'ind.cora', '')
# flags.DEFINE_integer('num_train_nodes', -20,
# 'Number of training nodes. If < 0, then the number is '
# 'converted to positive and that many training nodes are '
# 'used per class. -20 recovers setting in Kipf & Welling.')
flags.DEFINE_integer('num_validate_nodes', 500, '')
# Model Architecture Flags.
flags.DEFINE_string('architecture', '',
'(Optional) path to model architecture JSON file. '
'If given, none of the architecture flags matter anymore: '
'the contents of the file will entirely specify the '
'architecture. For example, see architectures/pubmed.json')
flags.DEFINE_string('hidden_dims_csv', '60',
'Comma-separated list of hidden layer sizes.')
flags.DEFINE_string('output_layer', 'wsum',
'One of: "wsum" (weighted sum) or "fc" (fully-connected).')
flags.DEFINE_string('nonlinearity', 'relu', '')
flags.DEFINE_string('adj_pows', '1',
'Comma-separated list of Adjacency powers. Setting to "1" '
'recovers valinna GCN. Setting to "0,1,2" uses '
'[A^0, A^1, A^2]. Further, you can feed as '
'"0:20:10,1:10:10", where the syntax is '
'<pow>:<capacity in layer1>:<capacity in layer2>. The '
'number of layers equals number of entries in '
'--hidden_dims_csv, plus one (for the output layer). The '
'capacities do *NOT* have to add-up to the corresponding '
'entry in hidden_dims_csv. They will be re-scaled if '
'necessary.')
# Training Flags.
flags.DEFINE_integer('num_train_steps', 400, 'Number of training steps.')
flags.DEFINE_integer('early_stop_steps', 50, 'If the validation accuracy does '
'not increase for this many steps, training is halted.')
flags.DEFINE_float('l2reg', 5e-4, 'L2 Regularization on Kernels.')
flags.DEFINE_float('input_dropout', 0.7, 'Dropout applied at input layer')
flags.DEFINE_float('layer_dropout', 0.9, 'Dropout applied at hidden layers')
flags.DEFINE_string('optimizer', 'GradientDescentOptimizer',
'Name of optimizer to use. Must be member of tf.train.')
flags.DEFINE_float('learn_rate', 0.5, 'Learning Rate for the optimizer.')
flags.DEFINE_float('lr_decrement_ratio_of_initial', 0.01,
'Learning rate will be decremented by '
'this value * --learn_rate.')
flags.DEFINE_float('lr_decrement_every', 40,
'Learning rate will be decremented every this many steps.')
flags.DEFINE_bool('use_signac', False, 'Use signac and put all args into signac workspace.')
flags.DEFINE_string('signac_root', None, 'Root path for signac project.')
flags.DEFINE_bool('debug', False, 'Debug code in VS Code')
flags.DEFINE_bool("_l2_normalization", True, "")
flags.DEFINE_bool("_batch_normalization", True, "")
flags.DEFINE_bool("_psum_output", True, "")
flags.DEFINE_bool("identity_feature", False, "")
FLAGS = flags.FLAGS
def GetEncodedParams():
"""Summarizes all flag values in a string, to be used in output filenames."""
if FLAGS.debug:
import ptvsd
print("Waiting for debugger attach")
ptvsd.enable_attach(address=('localhost', 5678), redirect_output=True)
ptvsd.wait_for_attach()
breakpoint()
if FLAGS.use_signac:
import signac
project = signac.get_project(root=FLAGS.signac_root)
job = project.open_job(dict(
dataset_name=FLAGS.dataset_name, run_id=FLAGS.run_id,
optimizer=FLAGS.optimizer, learn_rate=FLAGS.learn_rate,
l2reg=FLAGS.l2reg, output_layer=FLAGS.output_layer,
nonlinearity=FLAGS.nonlinearity,
adj_powers=FLAGS.adj_pows.replace(',', 'x').replace(':', '.'),
architecture=FLAGS.architecture, identity_feature=FLAGS.identity_feature
)).init()
FLAGS.results_dir = job.fn(FLAGS.results_dir)
FLAGS.train_dir = job.fn(FLAGS.train_dir)
FLAGS.run_id = ""
params = '_'.join([
'ds-%s' % FLAGS.dataset_name,
'r-%s' % FLAGS.run_id,
'opt-%s' % FLAGS.optimizer,
'lr-%g' % FLAGS.learn_rate,
'l2-%g' % FLAGS.l2reg,
'o-%s' % FLAGS.output_layer,
'act-%s' % FLAGS.nonlinearity,
'pows-%s' % FLAGS.adj_pows.replace(',', 'x').replace(':', '.'),
])
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
return params
class AccuracyMonitor(object):
"""Monitors and remembers model parameters @ best validation accuracy."""
def __init__(self, sess, early_stop_steps):
"""Initializes AccuracyMonitor.
Args:
sess: (singleton) instance of tf.Session that is used for training.
early_stop_steps: int with number of steps to allow without any
improvement on the validation accuracy.
"""
self._early_stop_steps = early_stop_steps
self._sess = sess
# (validate accuracy, test accuracy, step #), recorded at best validate
# accuracy.
self.best = (0, 0, 0)
# Will be populated to dict of all tensorflow variable names to their values
# as numpy arrays.
self.params_at_best = None
def mark_accuracy(self, validate_accuracy, test_accuracy, i):
curr_accuracy = (float(validate_accuracy), float(test_accuracy), i)
self.curr_accuracy = curr_accuracy
if curr_accuracy > self.best:
self.best = curr_accuracy
all_variables = tf.global_variables()
all_variable_values = self._sess.run(all_variables)
params_at_best_validate = (
{var.name: val
for var, val in zip(all_variables, all_variable_values)})
self.params_at_best = params_at_best_validate
if i > self.best[-1] + self._early_stop_steps:
return False
return True
# TODO(haija): move to utils.
class AdjacencyPowersParser(object):
def __init__(self):
powers = FLAGS.adj_pows.split(',')
has_colon = None
self._powers = []
self._ratios = []
for i, p in enumerate(powers):
if i == 0:
has_colon = (':' in p)
else:
if has_colon != (':' in p):
raise ValueError(
'Error in flag --adj_pows. Either all powers or non should '
'include ":"')
#
components = p.split(':')
self._powers.append(int(components[0]))
if has_colon:
self._ratios.append(list(map(float, components[1:])))
else:
self._ratios.append([1])
def powers(self):
return self._powers
def output_capacity(self, num_classes):
if all([len(s) == 1 and s[0] == 1 for s in self._ratios]):
return num_classes * len(self._powers)
else:
return sum([s[-1] for s in self._ratios])
def divide_capacity(self, layer_index, total_dim):
sizes = [l[min(layer_index, len(l)-1)] for l in self._ratios]
sum_units = numpy.sum(sizes)
size_per_unit = total_dim / float(sum_units)
dims = []
for s in sizes[:-1]:
dim = int(numpy.round(s * size_per_unit))
dims.append(dim)
dims.append(total_dim - sum(dims))
return dims
def main(unused_argv):
encoded_params = GetEncodedParams()
output_results_file = os.path.join(
FLAGS.results_dir, encoded_params + '.json')
output_model_file = os.path.join(
FLAGS.train_dir, encoded_params + '.pkl')
if os.path.exists(output_results_file) and not FLAGS.retrain:
print('Exiting early. Results are already computed: %s. Pass flag '
'--retrain to override' % output_results_file)
return 0
### LOAD DATASET
# The adjacency matrix is also normalized in this step
dataset = mixhop_dataset.ReadDataset(FLAGS.dataset_dir, FLAGS.dataset_name,
use_identity=FLAGS.identity_feature)
### MODEL REQUIREMENTS (Placeholders, adjacency tensor, regularizers)
x = dataset.sparse_allx_tensor()
y = tf.placeholder(tf.float32, [None, dataset.ally.shape[1]], name='y')
ph_indices = tf.placeholder(tf.int64, [None])
is_training = tf.placeholder_with_default(True, [], name='is_training')
pows_parser = AdjacencyPowersParser() # Parses flag --adj_pows
num_x_entries = dataset.x_indices.shape[0]
sparse_adj = dataset.sparse_adj_tensor()
kernel_regularizer = keras_regularizers.l2(FLAGS.l2reg)
### BUILD MODEL
model = mixhop_model.MixHopModel(
sparse_adj, x, is_training, kernel_regularizer)
if FLAGS.architecture:
model.load_architecture_from_file(FLAGS.architecture)
else:
model.add_layer('mixhop_model', 'sparse_dropout', FLAGS.input_dropout,
num_x_entries, pass_is_training=True)
model.add_layer('tf', 'sparse_tensor_to_dense')
if FLAGS._l2_normalization: | layer_dims = list(map(int, FLAGS.hidden_dims_csv.split(',')))
layer_dims.append(power_parser.output_capacity(dataset.ally.shape[1]))
for j, dim in enumerate(layer_dims):
if j != 0:
model.add_layer('tf.layers', 'dropout', FLAGS.layer_dropout,
pass_training=True)
capacities = power_parser.divide_capacity(j, dim)
model.add_layer('self', 'mixhop_layer', power_parser.powers(), capacities,
layer_id=j, pass_kernel_regularizer=True)
if j != len(layer_dims) - 1:
if FLAGS._batch_normalization:
model.add_layer('tf.contrib.layers', 'batch_norm')
model.add_layer('tf.nn', FLAGS.nonlinearity)
#
model.add_layer('mixhop_model', 'psum_output_layer', dataset.ally.shape[1],
use_softmax=FLAGS._psum_output)
net = model.activations[-1]
### TRAINING.
sliced_output = tf.gather(net, ph_indices)
learn_rate = tf.placeholder(tf.float32, [], 'learn_rate')
label_loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
labels=tf.stop_gradient(y), logits=sliced_output))
tf.losses.add_loss(label_loss)
loss = tf.losses.get_total_loss()
if FLAGS.optimizer == 'MomentumOptimizer':
optimizer = tf.train.MomentumOptimizer(lr, 0.7, use_nesterov=True)
else:
optimizer_class = getattr(tf.train, FLAGS.optimizer)
optimizer = optimizer_class(learn_rate)
train_op = slim.learning.create_train_op(
loss, optimizer, gradient_multipliers=[])
### CRAETE SESSION
# Now that the graph is frozen
sess = tf.Session()
sess.run(tf.global_variables_initializer())
print("Number of parameters: ",
numpy.sum([numpy.prod(v.get_shape().as_list()) for v in tf.trainable_variables()]))
print({v.name: v.get_shape().as_list() for v in tf.trainable_variables()})
### PREPARE FOR TRAINING
# Get indices of {train, validate, test} nodes.
train_indices, validate_indices, test_indices = dataset.get_partition_indices(FLAGS.num_validate_nodes)
feed_dict = {y: dataset.ally[train_indices]}
dataset.populate_feed_dict(feed_dict)
LAST_STEP = collections.Counter()
accuracy_monitor = AccuracyMonitor(sess, FLAGS.early_stop_steps)
# Step function makes a single update, prints accuracies, and invokes
# accuracy_monitor to keep track of test accuracy and parameters @ best
# validation accuracy
def step(lr=None, columns=None):
if lr is not None:
feed_dict[learn_rate] = lr
i = LAST_STEP['step']
LAST_STEP['step'] += 1
feed_dict[is_training] = True
feed_dict[ph_indices] = train_indices
# Train step
train_preds, loss_value, _ = sess.run((sliced_output, label_loss, train_op), feed_dict)
if numpy.isnan(loss_value).any():
print('NaN value reached. Debug please.')
import IPython; IPython.embed()
train_accuracy = numpy.mean(
train_preds.argmax(axis=1) == dataset.ally[train_indices].argmax(axis=1))
feed_dict[is_training] = False
feed_dict[ph_indices] = test_indices
test_preds = sess.run(sliced_output, feed_dict)
test_accuracy = numpy.mean(
test_preds.argmax(axis=1) == dataset.ally[test_indices].argmax(axis=1))
feed_dict[ph_indices] = validate_indices
validate_preds = sess.run(sliced_output, feed_dict)
validate_accuracy = numpy.mean(
validate_preds.argmax(axis=1) == dataset.ally[validate_indices].argmax(axis=1))
keep_going = accuracy_monitor.mark_accuracy(validate_accuracy, test_accuracy, i)
print('%i. (loss=%g). Acc: train=%f val=%f test=%f (@ best val test=%f)' % (
i, loss_value, train_accuracy, validate_accuracy, test_accuracy,
accuracy_monitor.best[1]))
if keep_going:
return True
else:
print('Early stopping')
return False
### TRAINING LOOP
lr = FLAGS.learn_rate
lr_decrement = FLAGS.lr_decrement_ratio_of_initial * FLAGS.learn_rate
for i in range(FLAGS.num_train_steps):
if not step(lr=lr):
break
if i > 0 and i % FLAGS.lr_decrement_every == 0:
lr -= lr_decrement
if lr <= 0:
break
if not os.path.exists(FLAGS.results_dir):
os.makedirs(FLAGS.results_dir)
if not os.path.exists(FLAGS.train_dir):
os.makedirs(FLAGS.train_dir)
with open(output_results_file, 'w') as fout:
results = {
'at_best_validate': accuracy_monitor.best,
'current': accuracy_monitor.curr_accuracy,
}
fout.write(json.dumps(results))
with open(output_model_file, 'wb') as fout:
pickle.dump(accuracy_monitor.params_at_best, fout)
print('Wrote model to ' + output_model_file)
print('Wrote results to ' + output_results_file)
if __name__ == '__main__':
app.run(main) | model.add_layer('tf.nn', 'l2_normalize', axis=1)
power_parser = AdjacencyPowersParser() | random_line_split |
lib.rs | use std::fmt;
use std::time::{Duration, SystemTime, SystemTimeError};
/// Enum with the seven days of the week.
#[derive(Debug, Clone, Copy)]
pub enum Day {
Sunday,
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
}
/// Maps the `Day` enum to a string representation, e.g. "Monday".
pub fn day_string(day: Day) -> &'static str {
match day {
Day::Sunday => "Sunday",
Day::Monday => "Monday",
Day::Tuesday => "Tuesday",
Day::Wednesday => "Wednesday",
Day::Thursday => "Thursday",
Day::Friday => "Friday",
Day::Saturday => "Saturday",
}
}
/// Maps the `Day` enum to a shortened string representation, e.g. "Mon".
pub fn day_abbrev_string(day: Day) -> &'static str {
&day_string(day)[0..3]
}
impl fmt::Display for Day {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", day_string(*self))
}
}
/// Enum with the months of the year.
#[derive(Debug, Clone, Copy)]
pub enum Month {
January,
February,
March,
April,
May,
June,
July,
August,
September,
October,
November,
December,
}
/// Maps the `Month` enum to a string representation, e.g. "January".
pub fn month_string(month: Month) -> &'static str {
match month {
Month::January => "January",
Month::February => "February",
Month::March => "March",
Month::April => "April",
Month::May => "May",
Month::June => "June",
Month::July => "July",
Month::August => "August",
Month::September => "September",
Month::October => "October",
Month::November => "November",
Month::December => "December",
}
}
/// Maps the `Month` enum to a shortened string representation, e.g. "Jan".
pub fn month_abbrev_string(month: Month) -> &'static str {
&month_string(month)[0..3]
}
impl fmt::Display for Month {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", month_string(*self))
}
}
| if year % 400 == 0 {
366
} else if year % 100 == 0 {
365
} else if year % 4 == 0 {
366
} else {
365
}
}
/// Takes in a year and month (e.g. 2020, February) and returns the number of days in that month.
pub fn days_in_month(year: u64, month: Month) -> u64 {
match month {
Month::January => 31,
Month::February if days_in_year(year) == 366 => 29,
Month::February => 28,
Month::March => 31,
Month::April => 30,
Month::May => 31,
Month::June => 30,
Month::July => 31,
Month::August => 31,
Month::September => 30,
Month::October => 31,
Month::November => 30,
Month::December => 31,
}
}
/// Converts a `Month` enum to an integer in the range 1-12.
pub fn index_from_month(month: Month) -> u64 {
match month {
Month::January => 1,
Month::February => 2,
Month::March => 3,
Month::April => 4,
Month::May => 5,
Month::June => 6,
Month::July => 7,
Month::August => 8,
Month::September => 9,
Month::October => 10,
Month::November => 11,
Month::December => 12,
}
}
/// Converts an integer in the range 1-12 into the corresponding `Month` enum.
/// Values outside the 1-12 range are converted to `None`.
pub fn month_from_index(index: u64) -> Option<Month> {
match index {
1 => Some(Month::January),
2 => Some(Month::February),
3 => Some(Month::March),
4 => Some(Month::April),
5 => Some(Month::May),
6 => Some(Month::June),
7 => Some(Month::July),
8 => Some(Month::August),
9 => Some(Month::September),
10 => Some(Month::October),
11 => Some(Month::November),
12 => Some(Month::December),
_ => None,
}
}
/// Returns the number of seconds in a day.
pub fn seconds_in_day() -> u64 {
24 * 60 * 60
}
/// Returns the number of seconds in an hour.
pub fn seconds_in_hour() -> u64 {
60 * 60
}
/// Returns the number of seconds in a minute.
pub fn seconds_in_minute() -> u64 {
60
}
/// Conceptually this is a thin wrapper for `std::time::SystemTime`, but provides
/// more useful functions. The impl of this struct has functions that allow easily
/// extracting the year/month/date/etc. for the given point in time. In actual fact
/// the internal representation of this struct is a `Duration` since the unix epoch,
/// so that error-handling is only required once upon creating the instance, and
/// not for each attempt at extracting date/time fields.
pub struct PostEpochTime {
delta: Duration,
}
impl PostEpochTime {
/// Create a `PostEpochTime` from a `SystemTime`. The `SystemTime` must be temporally
/// in the future relative to the unix epoch, or an error will be returned.
pub fn from(st: &SystemTime) -> Result<Self, SystemTimeError> {
Ok(PostEpochTime {
delta: st.duration_since(SystemTime::UNIX_EPOCH)?,
})
}
/// Create a `PostEpochTime` for the current instant. The current instant must be
/// in the future relative to the unix epoch, or an error will be returned.
pub fn now() -> Result<Self, SystemTimeError> {
Self::from(&SystemTime::now())
}
/// Returns the number of milliseconds passed since the unix epoch.
pub fn milliseconds_since_epoch(&self) -> u128 {
self.delta.as_millis()
}
/// Returns the number of microseconds passed since the unix epoch.
pub fn microseconds_since_epoch(&self) -> u128 {
self.delta.as_micros()
}
/// Returns the number of nanoseconds passed since the unix epoch.
pub fn nanoseconds_since_epoch(&self) -> u128 {
self.delta.as_nanos()
}
/// Returns the number of complete seconds passed since the unix epoch.
pub fn seconds_since_epoch(&self) -> u64 {
self.delta.as_secs()
}
/// Returns the number of complete days passed since the unix epoch.
pub fn days_since_epoch(&self) -> u64 {
self.delta.as_secs() / seconds_in_day()
}
/// Returns the day of the week that this point in time falls on.
pub fn day_of_week(&self) -> Day {
match self.days_since_epoch() % 7 {
0 => Day::Thursday,
1 => Day::Friday,
2 => Day::Saturday,
3 => Day::Sunday,
4 => Day::Monday,
5 => Day::Tuesday,
6 => Day::Wednesday,
_ => panic!("Modulo operator is broken"),
}
}
fn year_split(&self) -> (u64, u64) {
let mut days = self.days_since_epoch();
let mut year = 1970;
loop {
let in_year = days_in_year(year);
if days < in_year {
break;
}
days -= in_year;
year += 1;
}
(year, days)
}
/// Returns the year (e.g. 2020) this point in time falls on.
pub fn year(&self) -> u64 {
self.year_split().0
}
/// Returns the day of the year for this point in time (1-indexed).
/// A return value of 1 indicates January 1, a value of 2 indicates January 2,
/// and so on. If the year is a leap year the largest returned value
/// would be 366, and for non-leap years it would be 365.
pub fn day_of_year(&self) -> u64 {
self.year_split().1 + 1
}
fn month_split(&self) -> (Month, u64) {
let (year, mut days) = self.year_split();
let mut month = Month::January;
loop {
let in_month = days_in_month(year, month);
if days < in_month {
break;
}
days -= in_month;
month =
month_from_index(index_from_month(month) + 1).expect("Month should never overflow");
}
(month, days)
}
/// Returns the month this point in time falls on.
pub fn month(&self) -> Month {
self.month_split().0
}
/// Returns the day of the month for this point in time (1-indexed).
/// A return value of 1 means it falls on the first of the month. The maximum
/// returned value will be 31.
pub fn day_of_month(&self) -> u64 {
self.month_split().1 + 1
}
/// Returns the second within the day (0-indexed). This will be in the range
/// 0..86399 (inclusive).
pub fn second_in_day(&self) -> u64 {
self.delta.as_secs() % seconds_in_day()
}
/// Returns the hour within the day (0-indexed). This will be in the range
/// 0..23 (inclusive).
pub fn hour(&self) -> u64 {
self.second_in_day() / seconds_in_hour()
}
/// Returns the second within the hour (0-indexed). This will be in the range
/// 0..3599 (inclusive).
pub fn second_in_hour(&self) -> u64 {
self.second_in_day() % seconds_in_hour()
}
/// Returns the minute within the hour (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn minute(&self) -> u64 {
self.second_in_hour() / seconds_in_minute()
}
/// Returns the second within the minute (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn second(&self) -> u64 {
self.delta.as_secs() % seconds_in_minute()
}
}
impl fmt::Display for PostEpochTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}, {} {} {} {:02}:{:02}:{:02}",
day_abbrev_string(self.day_of_week()),
self.day_of_month(),
month_abbrev_string(self.month()),
self.year(),
self.hour(),
self.minute(),
self.second()
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke_test() {
let timestamp = SystemTime::UNIX_EPOCH + Duration::new(1580610340, 123);
let pet = PostEpochTime::from(×tamp).unwrap();
assert_eq!(format!("{}", pet), "Sun, 2 Feb 2020 02:25:40".to_string());
}
} | /// Takes in a year (e.g. 2019) and returns the number of days in that year.
pub fn days_in_year(year: u64) -> u64 { | random_line_split |
lib.rs | use std::fmt;
use std::time::{Duration, SystemTime, SystemTimeError};
/// Enum with the seven days of the week.
#[derive(Debug, Clone, Copy)]
pub enum Day {
Sunday,
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
}
/// Maps the `Day` enum to a string representation, e.g. "Monday".
pub fn day_string(day: Day) -> &'static str {
match day {
Day::Sunday => "Sunday",
Day::Monday => "Monday",
Day::Tuesday => "Tuesday",
Day::Wednesday => "Wednesday",
Day::Thursday => "Thursday",
Day::Friday => "Friday",
Day::Saturday => "Saturday",
}
}
/// Maps the `Day` enum to a shortened string representation, e.g. "Mon".
pub fn day_abbrev_string(day: Day) -> &'static str {
&day_string(day)[0..3]
}
impl fmt::Display for Day {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", day_string(*self))
}
}
/// Enum with the months of the year.
#[derive(Debug, Clone, Copy)]
pub enum Month {
January,
February,
March,
April,
May,
June,
July,
August,
September,
October,
November,
December,
}
/// Maps the `Month` enum to a string representation, e.g. "January".
pub fn month_string(month: Month) -> &'static str {
match month {
Month::January => "January",
Month::February => "February",
Month::March => "March",
Month::April => "April",
Month::May => "May",
Month::June => "June",
Month::July => "July",
Month::August => "August",
Month::September => "September",
Month::October => "October",
Month::November => "November",
Month::December => "December",
}
}
/// Maps the `Month` enum to a shortened string representation, e.g. "Jan".
pub fn month_abbrev_string(month: Month) -> &'static str {
&month_string(month)[0..3]
}
impl fmt::Display for Month {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", month_string(*self))
}
}
/// Takes in a year (e.g. 2019) and returns the number of days in that year.
pub fn days_in_year(year: u64) -> u64 {
if year % 400 == 0 {
366
} else if year % 100 == 0 {
365
} else if year % 4 == 0 {
366
} else {
365
}
}
/// Takes in a year and month (e.g. 2020, February) and returns the number of days in that month.
pub fn days_in_month(year: u64, month: Month) -> u64 {
match month {
Month::January => 31,
Month::February if days_in_year(year) == 366 => 29,
Month::February => 28,
Month::March => 31,
Month::April => 30,
Month::May => 31,
Month::June => 30,
Month::July => 31,
Month::August => 31,
Month::September => 30,
Month::October => 31,
Month::November => 30,
Month::December => 31,
}
}
/// Converts a `Month` enum to an integer in the range 1-12.
pub fn index_from_month(month: Month) -> u64 {
match month {
Month::January => 1,
Month::February => 2,
Month::March => 3,
Month::April => 4,
Month::May => 5,
Month::June => 6,
Month::July => 7,
Month::August => 8,
Month::September => 9,
Month::October => 10,
Month::November => 11,
Month::December => 12,
}
}
/// Converts an integer in the range 1-12 into the corresponding `Month` enum.
/// Values outside the 1-12 range are converted to `None`.
pub fn month_from_index(index: u64) -> Option<Month> {
match index {
1 => Some(Month::January),
2 => Some(Month::February),
3 => Some(Month::March),
4 => Some(Month::April),
5 => Some(Month::May),
6 => Some(Month::June),
7 => Some(Month::July),
8 => Some(Month::August),
9 => Some(Month::September),
10 => Some(Month::October),
11 => Some(Month::November),
12 => Some(Month::December),
_ => None,
}
}
/// Returns the number of seconds in a day.
pub fn seconds_in_day() -> u64 {
24 * 60 * 60
}
/// Returns the number of seconds in an hour.
pub fn seconds_in_hour() -> u64 {
60 * 60
}
/// Returns the number of seconds in a minute.
pub fn seconds_in_minute() -> u64 {
60
}
/// Conceptually this is a thin wrapper for `std::time::SystemTime`, but provides
/// more useful functions. The impl of this struct has functions that allow easily
/// extracting the year/month/date/etc. for the given point in time. In actual fact
/// the internal representation of this struct is a `Duration` since the unix epoch,
/// so that error-handling is only required once upon creating the instance, and
/// not for each attempt at extracting date/time fields.
pub struct PostEpochTime {
delta: Duration,
}
impl PostEpochTime {
/// Create a `PostEpochTime` from a `SystemTime`. The `SystemTime` must be temporally
/// in the future relative to the unix epoch, or an error will be returned.
pub fn from(st: &SystemTime) -> Result<Self, SystemTimeError> {
Ok(PostEpochTime {
delta: st.duration_since(SystemTime::UNIX_EPOCH)?,
})
}
/// Create a `PostEpochTime` for the current instant. The current instant must be
/// in the future relative to the unix epoch, or an error will be returned.
pub fn now() -> Result<Self, SystemTimeError> |
/// Returns the number of milliseconds passed since the unix epoch.
pub fn milliseconds_since_epoch(&self) -> u128 {
self.delta.as_millis()
}
/// Returns the number of microseconds passed since the unix epoch.
pub fn microseconds_since_epoch(&self) -> u128 {
self.delta.as_micros()
}
/// Returns the number of nanoseconds passed since the unix epoch.
pub fn nanoseconds_since_epoch(&self) -> u128 {
self.delta.as_nanos()
}
/// Returns the number of complete seconds passed since the unix epoch.
pub fn seconds_since_epoch(&self) -> u64 {
self.delta.as_secs()
}
/// Returns the number of complete days passed since the unix epoch.
pub fn days_since_epoch(&self) -> u64 {
self.delta.as_secs() / seconds_in_day()
}
/// Returns the day of the week that this point in time falls on.
pub fn day_of_week(&self) -> Day {
match self.days_since_epoch() % 7 {
0 => Day::Thursday,
1 => Day::Friday,
2 => Day::Saturday,
3 => Day::Sunday,
4 => Day::Monday,
5 => Day::Tuesday,
6 => Day::Wednesday,
_ => panic!("Modulo operator is broken"),
}
}
fn year_split(&self) -> (u64, u64) {
let mut days = self.days_since_epoch();
let mut year = 1970;
loop {
let in_year = days_in_year(year);
if days < in_year {
break;
}
days -= in_year;
year += 1;
}
(year, days)
}
/// Returns the year (e.g. 2020) this point in time falls on.
pub fn year(&self) -> u64 {
self.year_split().0
}
/// Returns the day of the year for this point in time (1-indexed).
/// A return value of 1 indicates January 1, a value of 2 indicates January 2,
/// and so on. If the year is a leap year the largest returned value
/// would be 366, and for non-leap years it would be 365.
pub fn day_of_year(&self) -> u64 {
self.year_split().1 + 1
}
fn month_split(&self) -> (Month, u64) {
let (year, mut days) = self.year_split();
let mut month = Month::January;
loop {
let in_month = days_in_month(year, month);
if days < in_month {
break;
}
days -= in_month;
month =
month_from_index(index_from_month(month) + 1).expect("Month should never overflow");
}
(month, days)
}
/// Returns the month this point in time falls on.
pub fn month(&self) -> Month {
self.month_split().0
}
/// Returns the day of the month for this point in time (1-indexed).
/// A return value of 1 means it falls on the first of the month. The maximum
/// returned value will be 31.
pub fn day_of_month(&self) -> u64 {
self.month_split().1 + 1
}
/// Returns the second within the day (0-indexed). This will be in the range
/// 0..86399 (inclusive).
pub fn second_in_day(&self) -> u64 {
self.delta.as_secs() % seconds_in_day()
}
/// Returns the hour within the day (0-indexed). This will be in the range
/// 0..23 (inclusive).
pub fn hour(&self) -> u64 {
self.second_in_day() / seconds_in_hour()
}
/// Returns the second within the hour (0-indexed). This will be in the range
/// 0..3599 (inclusive).
pub fn second_in_hour(&self) -> u64 {
self.second_in_day() % seconds_in_hour()
}
/// Returns the minute within the hour (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn minute(&self) -> u64 {
self.second_in_hour() / seconds_in_minute()
}
/// Returns the second within the minute (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn second(&self) -> u64 {
self.delta.as_secs() % seconds_in_minute()
}
}
impl fmt::Display for PostEpochTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}, {} {} {} {:02}:{:02}:{:02}",
day_abbrev_string(self.day_of_week()),
self.day_of_month(),
month_abbrev_string(self.month()),
self.year(),
self.hour(),
self.minute(),
self.second()
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke_test() {
let timestamp = SystemTime::UNIX_EPOCH + Duration::new(1580610340, 123);
let pet = PostEpochTime::from(×tamp).unwrap();
assert_eq!(format!("{}", pet), "Sun, 2 Feb 2020 02:25:40".to_string());
}
}
| {
Self::from(&SystemTime::now())
} | identifier_body |
lib.rs | use std::fmt;
use std::time::{Duration, SystemTime, SystemTimeError};
/// Enum with the seven days of the week.
#[derive(Debug, Clone, Copy)]
pub enum Day {
Sunday,
Monday,
Tuesday,
Wednesday,
Thursday,
Friday,
Saturday,
}
/// Maps the `Day` enum to a string representation, e.g. "Monday".
pub fn day_string(day: Day) -> &'static str {
match day {
Day::Sunday => "Sunday",
Day::Monday => "Monday",
Day::Tuesday => "Tuesday",
Day::Wednesday => "Wednesday",
Day::Thursday => "Thursday",
Day::Friday => "Friday",
Day::Saturday => "Saturday",
}
}
/// Maps the `Day` enum to a shortened string representation, e.g. "Mon".
pub fn day_abbrev_string(day: Day) -> &'static str {
&day_string(day)[0..3]
}
impl fmt::Display for Day {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", day_string(*self))
}
}
/// Enum with the months of the year.
#[derive(Debug, Clone, Copy)]
pub enum Month {
January,
February,
March,
April,
May,
June,
July,
August,
September,
October,
November,
December,
}
/// Maps the `Month` enum to a string representation, e.g. "January".
pub fn month_string(month: Month) -> &'static str {
match month {
Month::January => "January",
Month::February => "February",
Month::March => "March",
Month::April => "April",
Month::May => "May",
Month::June => "June",
Month::July => "July",
Month::August => "August",
Month::September => "September",
Month::October => "October",
Month::November => "November",
Month::December => "December",
}
}
/// Maps the `Month` enum to a shortened string representation, e.g. "Jan".
pub fn | (month: Month) -> &'static str {
&month_string(month)[0..3]
}
impl fmt::Display for Month {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{}", month_string(*self))
}
}
/// Takes in a year (e.g. 2019) and returns the number of days in that year.
pub fn days_in_year(year: u64) -> u64 {
if year % 400 == 0 {
366
} else if year % 100 == 0 {
365
} else if year % 4 == 0 {
366
} else {
365
}
}
/// Takes in a year and month (e.g. 2020, February) and returns the number of days in that month.
pub fn days_in_month(year: u64, month: Month) -> u64 {
match month {
Month::January => 31,
Month::February if days_in_year(year) == 366 => 29,
Month::February => 28,
Month::March => 31,
Month::April => 30,
Month::May => 31,
Month::June => 30,
Month::July => 31,
Month::August => 31,
Month::September => 30,
Month::October => 31,
Month::November => 30,
Month::December => 31,
}
}
/// Converts a `Month` enum to an integer in the range 1-12.
pub fn index_from_month(month: Month) -> u64 {
match month {
Month::January => 1,
Month::February => 2,
Month::March => 3,
Month::April => 4,
Month::May => 5,
Month::June => 6,
Month::July => 7,
Month::August => 8,
Month::September => 9,
Month::October => 10,
Month::November => 11,
Month::December => 12,
}
}
/// Converts an integer in the range 1-12 into the corresponding `Month` enum.
/// Values outside the 1-12 range are converted to `None`.
pub fn month_from_index(index: u64) -> Option<Month> {
match index {
1 => Some(Month::January),
2 => Some(Month::February),
3 => Some(Month::March),
4 => Some(Month::April),
5 => Some(Month::May),
6 => Some(Month::June),
7 => Some(Month::July),
8 => Some(Month::August),
9 => Some(Month::September),
10 => Some(Month::October),
11 => Some(Month::November),
12 => Some(Month::December),
_ => None,
}
}
/// Returns the number of seconds in a day.
pub fn seconds_in_day() -> u64 {
24 * 60 * 60
}
/// Returns the number of seconds in an hour.
pub fn seconds_in_hour() -> u64 {
60 * 60
}
/// Returns the number of seconds in a minute.
pub fn seconds_in_minute() -> u64 {
60
}
/// Conceptually this is a thin wrapper for `std::time::SystemTime`, but provides
/// more useful functions. The impl of this struct has functions that allow easily
/// extracting the year/month/date/etc. for the given point in time. In actual fact
/// the internal representation of this struct is a `Duration` since the unix epoch,
/// so that error-handling is only required once upon creating the instance, and
/// not for each attempt at extracting date/time fields.
pub struct PostEpochTime {
delta: Duration,
}
impl PostEpochTime {
/// Create a `PostEpochTime` from a `SystemTime`. The `SystemTime` must be temporally
/// in the future relative to the unix epoch, or an error will be returned.
pub fn from(st: &SystemTime) -> Result<Self, SystemTimeError> {
Ok(PostEpochTime {
delta: st.duration_since(SystemTime::UNIX_EPOCH)?,
})
}
/// Create a `PostEpochTime` for the current instant. The current instant must be
/// in the future relative to the unix epoch, or an error will be returned.
pub fn now() -> Result<Self, SystemTimeError> {
Self::from(&SystemTime::now())
}
/// Returns the number of milliseconds passed since the unix epoch.
pub fn milliseconds_since_epoch(&self) -> u128 {
self.delta.as_millis()
}
/// Returns the number of microseconds passed since the unix epoch.
pub fn microseconds_since_epoch(&self) -> u128 {
self.delta.as_micros()
}
/// Returns the number of nanoseconds passed since the unix epoch.
pub fn nanoseconds_since_epoch(&self) -> u128 {
self.delta.as_nanos()
}
/// Returns the number of complete seconds passed since the unix epoch.
pub fn seconds_since_epoch(&self) -> u64 {
self.delta.as_secs()
}
/// Returns the number of complete days passed since the unix epoch.
pub fn days_since_epoch(&self) -> u64 {
self.delta.as_secs() / seconds_in_day()
}
/// Returns the day of the week that this point in time falls on.
pub fn day_of_week(&self) -> Day {
match self.days_since_epoch() % 7 {
0 => Day::Thursday,
1 => Day::Friday,
2 => Day::Saturday,
3 => Day::Sunday,
4 => Day::Monday,
5 => Day::Tuesday,
6 => Day::Wednesday,
_ => panic!("Modulo operator is broken"),
}
}
fn year_split(&self) -> (u64, u64) {
let mut days = self.days_since_epoch();
let mut year = 1970;
loop {
let in_year = days_in_year(year);
if days < in_year {
break;
}
days -= in_year;
year += 1;
}
(year, days)
}
/// Returns the year (e.g. 2020) this point in time falls on.
pub fn year(&self) -> u64 {
self.year_split().0
}
/// Returns the day of the year for this point in time (1-indexed).
/// A return value of 1 indicates January 1, a value of 2 indicates January 2,
/// and so on. If the year is a leap year the largest returned value
/// would be 366, and for non-leap years it would be 365.
pub fn day_of_year(&self) -> u64 {
self.year_split().1 + 1
}
fn month_split(&self) -> (Month, u64) {
let (year, mut days) = self.year_split();
let mut month = Month::January;
loop {
let in_month = days_in_month(year, month);
if days < in_month {
break;
}
days -= in_month;
month =
month_from_index(index_from_month(month) + 1).expect("Month should never overflow");
}
(month, days)
}
/// Returns the month this point in time falls on.
pub fn month(&self) -> Month {
self.month_split().0
}
/// Returns the day of the month for this point in time (1-indexed).
/// A return value of 1 means it falls on the first of the month. The maximum
/// returned value will be 31.
pub fn day_of_month(&self) -> u64 {
self.month_split().1 + 1
}
/// Returns the second within the day (0-indexed). This will be in the range
/// 0..86399 (inclusive).
pub fn second_in_day(&self) -> u64 {
self.delta.as_secs() % seconds_in_day()
}
/// Returns the hour within the day (0-indexed). This will be in the range
/// 0..23 (inclusive).
pub fn hour(&self) -> u64 {
self.second_in_day() / seconds_in_hour()
}
/// Returns the second within the hour (0-indexed). This will be in the range
/// 0..3599 (inclusive).
pub fn second_in_hour(&self) -> u64 {
self.second_in_day() % seconds_in_hour()
}
/// Returns the minute within the hour (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn minute(&self) -> u64 {
self.second_in_hour() / seconds_in_minute()
}
/// Returns the second within the minute (0-indexed). This will be in the range
/// 0..59 (inclusive).
pub fn second(&self) -> u64 {
self.delta.as_secs() % seconds_in_minute()
}
}
impl fmt::Display for PostEpochTime {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(
f,
"{}, {} {} {} {:02}:{:02}:{:02}",
day_abbrev_string(self.day_of_week()),
self.day_of_month(),
month_abbrev_string(self.month()),
self.year(),
self.hour(),
self.minute(),
self.second()
)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn smoke_test() {
let timestamp = SystemTime::UNIX_EPOCH + Duration::new(1580610340, 123);
let pet = PostEpochTime::from(×tamp).unwrap();
assert_eq!(format!("{}", pet), "Sun, 2 Feb 2020 02:25:40".to_string());
}
}
| month_abbrev_string | identifier_name |
Main.py | '''
Author: 程东洲
Date: 2021-05-19 21:33:22
LastEditTime: 2021-06-11 11:16:35
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \testcamera\testCamera03.py
'''
from typing import List
import numpy as np
from PIL import Image
import base64
import time
from aip import AipFace, face
import os
import sys
# print(sys.path)
from PyQt5.QtCore import QThread, QTimer, Qt, pyqtSignal
import cv2
from PyQt5.QtGui import QPixmap,QImage
from PyQt5.QtWidgets import QDialog, QRadioButton, QWidget,QApplication,QLabel,QHBoxLayout,QVBoxLayout,\
QPushButton,QMessageBox,QLineEdit,QGridLayout,QInputDialog
from urllib import request
######################################################################################
# 这些路径是使用在树莓派上的,也就是说在树莓派上将这几行变量给启用,将下面的
# cap_id = 0
# user_pw={ 'cheng':'123456' }
# path = 'haarcascade_frontalface_default.xml'
# user_path= "user_names.txt"
# passwd_path = 'password.txt'
# trainer_path = r'face_trainer\trainer.yml'
# data_path = 'Facedata'
# image_name = 'youtemp.png'
#这里的路径是不对的,请自行修改对应的路径,在电脑端的vscode上使用时就启用这些变量,将上方的变量进行注释
cap_id = 0
#人脸检测器路径
path = r'D:\Pyqt5\testcamera\haarcascade_frontalface_default.xml'
#数据库路径
user_path= r"D:\Pyqt5\testcamera\testDIR.txt"
# passwd_path = r'D:\Pyqt5\testcamera\password.txt'
#模型路径
trainer_path = r'D:\Pyqt5\testcamera\trainer.yml'
#本地识别收集图片路径
data_path = r'D:\Pyqt5\testcamera\Facedata'
#网络识别图片路径
image_name = r'D:\Pyqt5\testcamera\youtemp.png'
#######################################################################################
""" 你的 APPID AK SK """
APP_ID = ''
API_KEY = ''
SECRET_KEY = ''
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
#图像编码方式
IMAGE_TYPE='BASE64'
#填写你的用户组信息
GROUP = ''
######################################################################################
#判断网络状态,我设置的也只是一开始判断一次,之后不会更换网络状态,如果想要启用程序后能无间断
#检测网络状态,可以使用threading多线程一下
#照片名字
# exit_code = os.system('ping www.baidu.com')
try:
ret = request.urlopen(url="https://www.baidu.com", timeout=3.0)
exit_code = 0
print("开启网络识别")
except:
exit_code = 1
print( "开启本地识别 ")
######################################################################################
#这个类是用来管理登录逻辑
class Demo( QWidget ):
def __init__( self ):
super().__init__()
#控件的初始化
self.resize( 800 , 500 )
self.setWindowTitle( '基于Opencv和树莓派的人脸识别系统')
self.user_name_label = QLabel( "用户名:" ,self )
self.user_line = QLineEdit( self )
self.passwd_label = QLabel( '密码:' , self )
self.passwd_line = QLineEdit( self )
self.login_button = QPushButton( 'log in' , self )
self.signin_button = QPushButton( "sign in" ,self )
self.face_recongition_button = QPushButton( "人脸识别" ,self )
self.collect_buttton = QPushButton( "人脸录入" , self )
#将布局类实例化
self.v_layout = QVBoxLayout()
self.h_layout = QHBoxLayout()
self.h_face_layout = QHBoxLayout()
self.grid_layout = QGridLayout()
#初始化布局
self.layput_init()
#初始化输入框的隐含信息
self.line_init()
#初始化登录按钮的不可用,等待登录框有值时才会启用
self.login_input_init()
#初始化按下登录按钮后与数据库的检查程序,初始化按下注册按钮的注册页面的逻辑
self.button_init()
#实例化signin按钮按下后的管理页面
self.sginin_page = Signin_Dialog()
#实例化face recongition按钮按下的管理页面
self.face_page = Face_start( )
#实例化收集人脸数据集的管理页面
self.colleet_page = CollectPicture_Page( )
def layput_init( self ):
self.grid_layout.addWidget( self.user_name_label , 0 , 0 )
self.grid_layout.addWidget( self.user_line , 0 , 1 )
self.grid_layout.addWidget( self.passwd_label , 1 , 0 )
self.grid_layout.addWidget( self.passwd_line , 1 , 1 )
self.h_layout.addWidget( self.login_button )
self.h_layout.addWidget( self.signin_button )
self.h_face_layout.addWidget( self.face_recongition_button )
self.h_face_layout.addWidget( self.collect_buttton )
self.v_layout.addLayout( self.grid_layout )
self.v_layout.addLayout( self.h_layout )
self.v_layout.addLayout( self.h_face_layout )
self.setLayout( self.v_layout )
def line_init( self ):
self.user_line.setPlaceholderText( "请输入你的用户账号" )
self.passwd_line.setPlaceholderText( "请输入你的密码")
self.passwd_line.setEchoMode( QLineEdit.Password )
self.user_line.textChanged.connect( self.check_input )
self.passwd_line.textChanged.connect( self.check_input )
#检查输入信息,两个框有值就对按钮进行使能,单一或者无值就将其停滞
def check_input( self ):
if self.user_line.text() and self.passwd_line.text() :
self.login_button.setEnabled( True )
else:
self.login_button.setEnabled( False )
def button_init( self ):
self.login_button.clicked.connect( self.check_login_info )
self.signin_button.clicked.connect( self.SigninPage_exe )
self.face_recongition_button.clicked.connect( self.Face_start_exe )
self.collect_buttton.clicked.connect( self.Collect_page_exe )
def SigninPage_exe( self ):
self.sginin_page.exec( )#启动注册页面
# 人脸识别页面
def Face_start_exe( self ):
self.face_page.exec( )
# 人脸收集页面启动
def Collect_page_exe( self ):
self.colleet_page.exec( )
def check_login_info( self ):
f_all = open( user_path , 'r+')
read_dict = eval( f_all.read( ) )
f_all.close()
if self.user_line.text() not in read_dict:
QMessageBox.critical( self , '登录消息' , "登录失败,请填写正确的用户名" , QMessageBox.Ok )
elif read_dict[ self.user_line.text() ] == self.passwd_line.text():
QMessageBox.information( self , '登录消息' , "登录成功" , QMessageBox.Ok )
else:
QMessageBox.critical( self , '登录消息' , "登录失败,请填写正确的密码" , QMessageBox.Ok )
def login_input_init( self ):
self.login_button.setEnabled( False )
# def facepass( self ):
# get_name=self.recognize_face()#返回识别的人名
# if get_name=="unknown":
# reply = QMessageBox.information(self, '提示', '人脸识别失败', QMessageBox.Close)
# else:
# reply = QMessageBox.information(self, '提示', "欢迎您:"+get_name, QMessageBox.Ok)
# print("编写其他程序")
# #多线程进行网络监听
# class My_theard( QThread ):
# my_signal = pyqtSignal( int )
# def __init__( self ):
# super().__init__()
# def run( self ):
# while True:
# exit_code = os.system('ping www.baidu.com')
# if exit_code:
# print("----------没网,启动本地识别-------------------")
# else:
# print("----------有网,启动网络识别-------------------")
# self.my_signal.emit( exit_code )
# self.sleep( 10 )
faceCascade = cv2.CascadeClassifier( path )
class Face_start( QDialog ):
def __init__( self ):
super().__init__()
self.setWindowTitle( '人脸识别' )
self.resize( 1000 ,500 )
self.cameraLabel = QLabel( 'camera', self )
self.cameraLabel.resize(480 ,320 )
self.cameraLabel.setAlignment( Qt.AlignCenter )
self.timer_camera = QTimer()
self.cap = cv2.VideoCapture() #初始化摄像头
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.recognizer.read( trainer_path )
#识别时间10秒;如果置信度大于60%,则识别成功并退出界面;否则至10秒后识别失败并退出
self.font = cv2.FONT_HERSHEY_SIMPLEX
#初始化多线程,进行网络监听
# self.my_thread = My_theard( )
# self.my_thread.start()
fl = open( user_path , 'r+')
read_dict = eval( fl.read() )
self.names = list( read_dict.keys( ) )
fl.close()
# tag = []
# for i in range( len( names ) ) :
# tag.append( eval("False") )
self.minW = 0.1 * self.cap.get(3)
self.minH = 0.1 * self.cap.get(4)
#网络识别一次的初始化标志位
self.OnceBaiduAPI_flag = False
self.layout_main = QVBoxLayout()
self.layout_fun_button = QHBoxLayout()
self.layout_data_show = QHBoxLayout()
self.cameraButton = QPushButton(u'打开相机')
# self.button_close.setMinimumHeight(50)
self.layout_init()
self.slot_init()
def layout_init( self ):
self.layout_data_show.addWidget( self.cameraLabel )
self.layout_fun_button.addWidget( self.cameraButton )
# self.layout_fun_button.addStretch(1)
self.layout_main.addLayout( self.layout_data_show )
self.layout_main.addLayout( self.layout_fun_button )
self.setLayout( self.layout_main )
def slot_init(self):
self.timer_camera.timeout.connect(self.show_camera)
#信号和槽连接
# self.returnButton.clicked.connect(self.returnSignal)
self.cameraButton.clicked.connect(self.slotCameraButton)
# self.cameraButton.clicked.connect( self.recognize_face )
# self.my_thread.my_signal.connect( self.get_Intnet_code )
def get_Intnet_code( self , exitcode ):
self.exit_code = exitcode
#打开关闭摄像头控制
def slotCameraButton(self):
if self.timer_camera.isActive() == False:
#打开摄像头并显示图像信息
self.openCamera()
else:
#关闭摄像头并清空显示信息
self.closeCamera()
def show_camera(self):
if exit_code :#没网就用OpenCV
self.recognize_face()
else: #有网的用百度api
self.recognize_face_intnet()
self.image = cv2.cvtColor(self.image,cv2.COLOR_BGR2RGB ) #视频色彩转换回RGB,这样才是现实的颜色
#pyqt显示逻辑
showImage = QImage( self.image.data, self.image.shape[1] , self.image.shape[0], QImage.Format_RGB888 )
self.cameraLabel.setPixmap(QPixmap.fromImage(showImage))
#打开摄像头
def openCamera(self):
flag = self.cap.open( cap_id )
if flag == False:
msg = QMessageBox.Warning(self, u'Warning', u'请检测相机与电脑是否连接正确',\
buttons=QMessageBox.Ok,
defaultButton=QMessageBox.Ok)
else:
self.timer_camera.start(30)
self.cameraButton.setText('关闭摄像头')
def face_recongnition_start( self ):
faces = faceCascade.detectMultiScale(
self.gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(20, 20) )
for (x,y,w,h) in faces:
cv2.rectangle(self.gray, (x, y), (x + w, y + w), (255,0,0),2 )
roi_gray = self.gray [y:y+h, x:x+w]
roi_color = self.image [y:y+h, x:x+w]
def recognize_face( self ):
flag,self.image = self.cap.read() #从视频流中读取
self.image = cv2.resize(self.image,(480,320)) #把读到的帧的大小重新设置为 640x480
result = "unknown" #初始化识别失败
gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int( self.minW), int( self.minH ) )
)
face_num= None #初始化人脸序号
for (x, y, w, h) in faces:
cv2.rectangle( self.image , (x, y), (x + w, y + h), (0, 0 , 255), 2)
id, confidence = self.recognizer.predict(gray[y:y + h, x:x + w])
if confidence < 100 : #50%的识别置信度
result= self.names[id]
confidencestr = "{0}%".format(round(100 - confidence))
# go_api( round(100 - confidence) , int( idnum ) , tag , names)
else:
confidencestr = "{0}%".format(round(100 - confidence))
cv2.putText( self.image, result , (x + 5, y - 5), self.font, 1, (0, 0, 255), 2 )
cv2.putText( self.image, confidencestr , (x + 5, y + h - 5), self.font, 1, (0, 0, 0), 1)
#网络识别的工具函数
#对图片的格式进行转换
def transimage( self , image_name ):
f = open( image_name ,'rb')
img = base64.b64encode(f.read())
return img
def logging( self , name ):
curren_time = time.asctime(time.localtime(time.time()))
f = open('Log.txt','a+')
f.write("Person: " + name + " " + "Time:" + str(curren_time)+'\n')
f.close()
#上传到百度api进行人脸检测
def go_api( self , image):
result = client.search(str(image, 'utf-8'), IMAGE_TYPE, GROUP)
if result['error_msg'] == 'SUCCESS':
name = result['result']['user_list'][0]['user_id']
score = result['result']['user_list'][0]['score']
if score > 80:
print("Welcome %s !" % name)
self.logging( name )
# recong_result=QMessageBox.information( self ,\
# "登录消息" , "识别成功,是否进入相应功能区" , QMessageBox.Ok |QMessageBox.Cancel )
# if recong_result == QMessageBox.Ok :
# pass
# else:
# self.close()
# self.closeCamera()
else:
print("Sorry...I don't know you !")
name = 'Unknow'
return name,score
if result['error_msg'] == 'pic not has face':
print('There is no face in image!')
return "NO FACE", None
else:
print(result['error_code']+' ' + result['error_code'])
return "ERROR" , None
def recognize_face_intnet( self ):
font = cv2.FONT_HERSHEY_SIMPLEX
flag,self.image = self.cap.read() #从视频流中读取
self.image = cv2.resize(self.image, (480,320) ) #把读到的帧的大小重新设置为 480*320
gray = cv2.cvtColor( self.image , cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
if self.OnceBaiduAPI_flag == False :
self.OnceBaiduAPI_flag = True
cv2.imwrite("youtemp.png", self.image )
self.name , self.score = self.go_api( self.transimage( image_name ) )
for (x,y,w,h) in faces:
cv2.rectangle(self.image,(x,y),(x+w,y+h), (0, 0 , 255), 2)
# roi_gray = gray[y:y+h, x:x+w]
roi_color = self.image[y:y+h, x:x+w]
cv2.putText(self.image, self.name , (x+5,y-5), font, 1, (255,255,255), 2 )
cv2.putText(self.image, str( self.score ), (x+5,y+h-5), font, 1, (255,255,0), 1 )
def closeCamera(self):
self.timer_camera.stop()
self.cap.release()
self.OnceBaiduAPI_flag = False
self.cameraLabel.clear()
self.cameraButton.setText('打开摄像头')
# self.my_thread.terminate()
#这里作为功能启动区,代表着识别成功后就启动该功能
def Function_run( self ):
pass
#这个类主要是管理注册逻辑,这里为什么要用QDialog呢,当然也可以用Qwidget,这俩都是毛坯房,但是
#QDialog有exec方法,Qwidget是没有的。exec_()方法可以让窗口成为模态窗口,而调用show()方法,
#窗口是非模态的。模态窗口将程序控制权占据,只有对当前窗口关闭后才能操作其他窗口;
class Signin_Dialog( QDialog ):
def __init__( self ):
super().__init__()
#控件的初始化
self.setWindowTitle('注册系统')
self.resize( 300 , 250 )
self.user_name_label = QLabel( "user_namer:" ,self )
self.user_line_dialog = QLineEdit( self )
self.passwd_label = QLabel( 'password:' , self )
self.passwd_line_dialog = QLineEdit( self )
self.passwd_re_label = QLabel( 're_password' , self )
self.passwd_re_line = QLineEdit( self )
self.sure_signin_botton = QPushButton( '确认' , self )
self.cancel_button = QPushButton( '取消', self )
#将布局类实例化
self.v_layout = QVBoxLayout()
self.h_layout = QHBoxLayout()
self.grid_layout = QGridLayout()
#将布局初始化
self.__layput_init()
#将确认按钮初始化
self.sure_siginin_botton_init()
self.line_init()
self.sure_botton_init()
def __layput_init( self ):
self.grid_layout.addWidget( self.user_name_label , 0 , 0 )
self.grid_layout.addWidget( self.user_line_dialog , 0 , 1 )
self.grid_layout.addWidget( self.passwd_label , 1 , 0 )
self.grid_layout.addWidget( self.passwd_line_dialog , 1 , 1 )
self.grid_layout.addWidget( self.passwd_re_label , 2 , 0 )
self.grid_layout.addWidget( self.passwd_re_line , 2 , 1 )
self.h_layout.addWidget( self.sure_signin_botton )
self.h_layout.addWidget( self.cancel_button )
self.v_layout.addLayout( self.grid_layout )
self.v_layout.addLayout( self.h_layout )
self.setLayout( self.v_layout )
def line_init( self ):
self.user_line_dialog.setPlaceholderText( "请输入你的用户账号" )
self.passwd_line_dialog.setPlaceholderText( "请输入你的密码")
self.passwd_re_line.setPlaceholderText( '请再次输入你的密码' )
self.passwd_line_dialog.setEchoMode( QLineEdit.Password )
self.passwd_re_line.setEchoMode( QLineEdit.Password )
self.user_line_dialog.textChanged.connect( self.check_input )
self.passwd_line_dialog.textChanged.connect( self.check_input )
self.passwd_re_line.textChanged.connect( self.check_input )
def check_input( self ):
if self.user_line_dialog.text() and self.passwd_line_dialog.text() and self.passwd_re_line.text():
self.sure_signin_botton.setEnabled( True )
else:
self.sure_signin_botton.setEnabled( False )
def sure_siginin_botton_init( self ):
self.sure_signin_botton.setEnabled( False )
#确认按钮与数据库的关联初始化
def sure_botton_init( self ):
self.sure_signin_botton.clicked.connect( self.check_data )
# def clearText( text_path ):
# with open(text_path, 'w') as f1:
# f1.seek(0)
# f1.truncate()
# # print("清空数据")
#如果按钮按下
def check_data( self ):
#--------判断用户是否存在--------------
f_all = open( user_path , 'r+')
read_dict = eval( f_all.read() )
f_all.close()
if self.passwd_line_dialog.text( ) != self.passwd_re_line.text( ) :
QMessageBox.critical( self , '注册消息' ,'两次密码输入不一致' , QMessageBox.Ok | QMessageBox.Cancel )
elif self.user_line_dialog.text() not in read_dict :
read_dict[self.user_line_dialog.text()] = self.passwd_line_dialog.text()
# self.clearText( user_path )
with open(user_path, 'w') as f1:
f1.write( str( read_dict ) )
QMessageBox.information( self , '注册消息' , '注册成功' , QMessageBox.Ok )
self.close()
else:
QMessageBox.critical( self , '注册消息', '注册失败,操作有误' , QMessageBox.Ok )
self.user_line_dialog.clear()
self.passwd_line_dialog.clear()
self.passwd_re_line.clear()
|
class CollectPicture_Page( QDialog ):
mysignal = pyqtSignal( )
def __init__( self ):
super().__init__()
self.setWindowTitle('人脸数据集收集和训练')
self.resize( 1000 ,500 )
self.IsHome_button = QRadioButton( "本地收集" , self)
self.IsInternet_button = QRadioButton( "网络收集" ,self )
self.collect_start_button = QPushButton("开始收集", self )
self.train_run_button = QPushButton( "开始训练" ,self )
self.return_button = QPushButton( "取消" , self )
self.cameraLabel = QLabel( 'camera' ,self )
self.cameraLabel.resize( 480,320 )
self.cameraLabel.setAlignment( Qt.AlignCenter )
self.h_col_style_layout = QHBoxLayout()
self.v_col_styly_layout = QVBoxLayout()
self.h_col_layout = QHBoxLayout()
self.v_layout = QVBoxLayout()
self.cap = cv2.VideoCapture( )
self.collect_time = QTimer()
self.layout_init()
self.button_init()
self.slot_init()
def layout_init( self ):
self.h_col_style_layout.addWidget( self.IsHome_button )
self.h_col_style_layout.addWidget( self.IsInternet_button )
self.h_col_style_layout.addStretch(1)
self.h_col_layout.addWidget( self.collect_start_button )
self.h_col_layout.addWidget( self.train_run_button )
self.h_col_layout.addWidget( self.return_button )
self.v_layout.addWidget( self.cameraLabel )
self.v_layout.addLayout( self.h_col_style_layout )
self.v_layout.addLayout( self.h_col_layout )
self.setLayout( self.v_layout )
def button_init( self ):
self.return_button.clicked.connect( self.cancel_task )
self.collect_start_button.clicked.connect( self.openCamera )
self.train_run_button.clicked.connect( self.Training_faces )
self.IsHome_button.setChecked( True )
def slot_init( self ):
self.collect_time.timeout.connect( self.show_camera )
self.mysignal.connect( self.collect_signal_run )
def camera_init( self ):
self.unregisterFlag = False
self.face_detector = cv2.CascadeClassifier( path )
self.count = 0
fl = open( user_path , 'r+')
real_dict = eval( fl.read() )
names = list( real_dict.keys() )
fl.close()
self.collect_name , ok = QInputDialog.getText( self , '请输入你的名字' ,'必须是已经注册的名字!' )
if self.collect_name in names:
self.face_id = names.index( self.collect_name ) + 1
#face_id = input('\n enter user id:') #输入序号,表示某人的一些列照片
print('\n Initializing face capture. Look at the camera and wait ...')
else:
QMessageBox.warning( self ,'异常状态' , '请去注册' , QMessageBox.Ok )
self.unregisterFlag = True
def cancel_task( self ):
self.collect_time.stop()
self.cap.release()
self.cameraLabel.clear()
self.close()
#打开摄像头
def openCamera(self):
flag = self.cap.open( cap_id )
self.camera_init()
if flag == False:
msg = QMessageBox.Warning(self, u'Warning', u'请检测相机与电脑是否连接正确',\
buttons=QMessageBox.Ok,
defaultButton=QMessageBox.Ok)
elif self.unregisterFlag == True:
pass
else:
self.Intnet_flag = False
self.collect_time.start(30)
def show_camera(self):
# self.face_recongnition_start()
sucess, self.img = self.cap.read()
self.collect_result = None
if self.IsHome_button.isChecked():
if self.count < 30 :
self.Collect_faces()
else:
self.collect_time.stop()
self.mysignal.emit()
else:
if self.Intnet_flag == False :
cv2.imwrite( image_name , self.img )
self.baidu_addUser()
self.collect_result = QMessageBox.information( self , '注册消息','注册完毕', QMessageBox.Ok )
if self.collect_result == QMessageBox.Ok:
self.closeCamera()
self.collect_result = None
self.img = cv2.cvtColor(self.img,cv2.COLOR_BGR2RGB ) #视频色彩转换回RGB,这样才是现实的颜色
#pyqt显示逻辑
showImage = QImage( self.img.data, self.img.shape[1] , self.img.shape[0], QImage.Format_RGB888 )
self.cameraLabel.setPixmap( QPixmap.fromImage(showImage) )
def Collect_faces( self ):
# 转为灰度图片
gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
# 检测人脸
faces = self.face_detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(self.img, (x, y), (x + w, y + w), (255, 0, 0) , 2 )
self.count += 1
# 保存图像,从原始照片中截取人脸尺寸
cv2.imwrite("Facedata/User." +str(self.face_id) + '.' + str(self.count) + '.jpg', gray[y: y + h, x: x + w])
def Training_faces( self ):
# 人脸数据路径
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier(path )
def getImagesAndLabels( data_path ):
imagePaths = [os.path.join( data_path , f) for f in os.listdir( data_path )] # join函数的作用?
faceSamples = []
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img, 'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)
for (x, y, w, h) in faces:
faceSamples.append(img_numpy[y:y + h, x: x + w])
ids.append(id)
return faceSamples, ids
print('Training faces. It will take a few seconds. Wait ...')
faces, ids = getImagesAndLabels( data_path )
recognizer.train(faces, np.array(ids))
recognizer.write( trainer_path )
print("{0} faces trained. Exiting Program".format(len(np.unique(ids))))
QMessageBox.information( self , "训练消息","训练完毕" , QMessageBox.Ok )
def collect_signal_run( self ):
self.collect_result = QMessageBox.information( self , '训练' , '收集完成' , QMessageBox.Ok )
#对图片的格式进行转换
def transimage( self , image_name ):
f = open( image_name ,'rb')
img = base64.b64encode(f.read())
return img
def baidu_addUser( self ):
client.addUser( str( self.transimage(image_name) , 'utf-8') , IMAGE_TYPE, GROUP , self.collect_name )
self.Intnet_flag = True
def closeCamera(self):
self.collect_time.stop()
self.cap.release()
self.cameraLabel.clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = Demo()
demo.show()
sys.exit(app.exec_()) | random_line_split | |
Main.py | '''
Author: 程东洲
Date: 2021-05-19 21:33:22
LastEditTime: 2021-06-11 11:16:35
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \testcamera\testCamera03.py
'''
from typing import List
import numpy as np
from PIL import Image
import base64
import time
from aip import AipFace, face
import os
import sys
# print(sys.path)
from PyQt5.QtCore import QThread, QTimer, Qt, pyqtSignal
import cv2
from PyQt5.QtGui import QPixmap,QImage
from PyQt5.QtWidgets import QDialog, QRadioButton, QWidget,QApplication,QLabel,QHBoxLayout,QVBoxLayout,\
QPushButton,QMessageBox,QLineEdit,QGridLayout,QInputDialog
from urllib import request
######################################################################################
# 这些路径是使用在树莓派上的,也就是说在树莓派上将这几行变量给启用,将下面的
# cap_id = 0
# user_pw={ 'cheng':'123456' }
# path = 'haarcascade_frontalface_default.xml'
# user_path= "user_names.txt"
# passwd_path = 'password.txt'
# trainer_path = r'face_trainer\trainer.yml'
# data_path = 'Facedata'
# image_name = 'youtemp.png'
#这里的路径是不对的,请自行修改对应的路径,在电脑端的vscode上使用时就启用这些变量,将上方的变量进行注释
cap_id = 0
#人脸检测器路径
path = r'D:\Pyqt5\testcamera\haarcascade_frontalface_default.xml'
#数据库路径
user_path= r"D:\Pyqt5\testcamera\testDIR.txt"
# passwd_path = r'D:\Pyqt5\testcamera\password.txt'
#模型路径
trainer_path = r'D:\Pyqt5\testcamera\trainer.yml'
#本地识别收集图片路径
data_path = r'D:\Pyqt5\testcamera\Facedata'
#网络识别图片路径
image_name = r'D:\Pyqt5\testcamera\youtemp.png'
#######################################################################################
""" 你的 APPID AK SK """
APP_ID = ''
API_KEY = ''
SECRET_KEY = ''
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
#图像编码方式
IMAGE_TYPE='BASE64'
#填写你的用户组信息
GROUP = ''
######################################################################################
#判断网络状态,我设置的也只是一开始判断一次,之后不会更换网络状态,如果想要启用程序后能无间断
#检测网络状态,可以使用threading多线程一下
#照片名字
# exit_code = os.system('ping www.baidu.com')
try:
ret = request.urlopen(url="https://www.baidu.com", timeout=3.0)
exit_code = 0
print("开启网络识别")
except:
exit_code = 1
print( "开启本地识别 ")
######################################################################################
#这个类是用来管理登录逻辑
class Demo( QWidget ):
def __init__( self ):
super().__init__()
#控件的初始化
self.resize( 800 , 500 )
self.setWindowTitle( '基于Opencv和树莓派的人脸识别系统')
self.user_name_label = QLabel( "用户名:" ,self )
self.user_line = QLineEdit( self )
self.passwd_label = QLabel( '密码:' , self )
self.passwd_line = QLineEdit( self )
self.login_button = QPushButton( 'log in' , self )
self.signin_button = QPushButton( "sign in" ,self )
self.face_recongition_button = QPushButton( "人脸识别" ,self )
self.collect_buttton = QPushButton( "人脸录入" , self )
#将布局类实例化
self.v_layout = QVBoxLayout()
self.h_layout = QHBoxLayout()
self.h_face_layout = QHBoxLayout()
self.grid_layout = QGridLayout()
#初始化布局
self.layput_init()
#初始化输入框的隐含信息
self.line_init()
#初始化登录按钮的不可用,等待登录框有值时才会启用
self.login_input_init()
#初始化按下登录按钮后与数据库的检查程序,初始化按下注册按钮的注册页面的逻辑
self.button_init()
#实例化signin按钮按下后的管理页面
self.sginin_page = Signin_Dialog()
#实例化face recongition按钮按下的管理页面
self.face_page = Face_start( )
#实例化收集人脸数据集的管理页面
self.colleet_page = CollectPicture_Page( )
def layput_init( self ):
self.grid_layout.addWidget( self.user_name_label , 0 , 0 )
self.grid_layout.addWidget( self.user_line , 0 , 1 )
self.grid_layout.addWidget( self.passwd_label , 1 , 0 )
self.grid_layout.addWidget( self.passwd_line , 1 , 1 )
self.h_layout.addWidget( self.login_button )
self.h_layout.addWidget( self.signin_button )
self.h_face_layout.addWidget( self.face_recongition_button )
self.h_face_layout.addWidget( self.collect_buttton )
self.v_layout.addLayout( self.grid_layout )
self.v_layout.addLayout( self.h_layout )
self.v_layout.addLayout( self.h_face_layout )
self.setLayout( self.v_layout )
def line_init( self ):
self.user_line.setPlaceholderText( "请输入你的用户账号" )
self.passwd_line.setPlaceholderText( "请输入你的密码")
self.passwd_line.setEchoMode( QLineEdit.Password )
self.user_line.textChanged.connect( self.check_input )
self.passwd_line.textChanged.connect( self.check_input )
#检查输入信息,两个框有值就对按钮进行使能,单一或者无值就将其停滞
def check_input( self ):
if self.user_line.text() and self.passwd_line.text() :
self.login_button.setEnabled( True )
else:
self.login_button.setEnabled( False )
def button_init( self ):
self.login_button.clicked.connect( self.check_login_info )
self.signin_button.clicked.connect( self.SigninPage_exe )
self.face_recongition_button.clicked.connect( self.Face_start_exe )
self.collect_buttton.clicked.connect( self.Collect_page_exe )
def SigninPage_exe( self ):
self.sginin_page.exec( )#启动注册页面
# 人脸识别页面
def Face_start_exe( self ):
self.face_page.exec( )
# 人脸收集页面启动
def Collect_page_exe( self ):
self.colleet_page.exec( )
def check_login_info( self ):
f_all = open( user_path , 'r+')
read_dict = eval( f_all.read( ) )
f_all.close()
if self.user_line.text() not in read_dict:
QMessageBox.critical( self , '登录消息' , "登录失败,请填写正确的用户名" , QMessageBox.Ok )
elif read_dict[ self.user_line.text() ] == self.passwd_line.text():
QMessageBox.information( self , '登录消息' , "登录成功" , QMessageBox.Ok )
else:
QMessageBox.critical( self , '登录消息' , "登录失败,请填写正确的密码" , QMessageBox.Ok )
def login_input_init( self ):
self.login_button.setEnabled( False )
# def facepass( self ):
# get_name=self.recognize_face()#返回识别的人名
# if get_name=="unknown":
# reply = QMessageBox.information(self, '提示', '人脸识别失败', QMessageBox.Close)
# else:
# reply = QMessageBox.information(self, '提示', "欢迎您:"+get_name, QMessageBox.Ok)
# print("编写其他程序")
# #多线程进行网络监听
# class My_theard( QThread ):
# my_signal = pyqtSignal( int )
# def __init__( self ):
# super().__init__()
# def run( self ):
# while True:
# exit_code = os.system('ping www.baidu.com')
# if exit_code:
# print("----------没网,启动本地识别-------------------")
# else:
# print("----------有网,启动网络识别-------------------")
# self.my_signal.emit( exit_code )
# self.sleep( 10 )
faceCascade = cv2.CascadeClassifier( path )
class Face_start( QDialog ):
def __init__( self ):
super().__init__()
self.setWindowTitle( '人脸识别' )
self.resize( 1000 ,500 )
self.cameraLabel = QLabel( 'camera', self )
self.cameraLabel.resize(480 ,320 )
self.cameraLabel.setAlignment( Qt.AlignCenter )
self.timer_camera = QTimer()
self.cap = cv2.VideoCapture() #初始化摄像头
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.recognizer.read( trainer_path )
#识别时间10秒;如果置信度大于60%,则识别成功并退出界面;否则至10秒后识别失败并退出
self.font = cv2.FONT_HERSHEY_SIMPLEX
#初始化多线程,进行网络监听
# self.my_thread = My_theard( )
# self.my_thread.start()
fl = open( user_path , 'r+')
read_dict = eval( fl.read() )
self.names = list( read_dict.keys( ) )
fl.close()
# tag = []
# for i in range( len( names ) ) :
# tag.append( eval("False") )
self.minW = 0.1 * self.cap.get(3)
self.minH = 0.1 * self.cap.get(4)
#网络识别一次的初始化标志位
self.OnceBaiduAPI_flag = False
self.layout_main = QVBoxLayout()
self.layout_fun_button = QHBoxLayout()
self.layout_data_show = QHBoxLayout()
self.cameraButton = QPushButton(u'打开相机')
# self.button_close.setMinimumHeight(50)
self.layout_init()
self.slot_init()
def layout_init( self ):
self.layout_data_show.addWidget( self.cameraLabel )
self.layout_fun_button.addWidget( self.cameraButton )
# self.layout_fun_button.addStretch(1)
self.layout_main.addLayout( self.layout_data_show )
self.layout_main.addLayout( self.layout_fun_button )
self.setLayout( self.layout_main )
def slot_init(self):
self.timer_camera.timeout.connect(self.show_camera)
#信号和槽连接
# self.returnButton.clicked.connect(self.returnSignal)
self.cameraButton.clicked.connect(self.slotCameraButton)
# self.cameraButton.clicked.connect( self.recognize_face )
# self.my_thread.my_signal.connect( self.get_Intnet_code )
def get_Intnet_code( self , exitcode ):
self.exit_code = exitcode
#打开关闭摄像头控制
def slotCameraButton(self):
if self.timer_camera.isActive() == False:
#打开摄像头并显示图像信息
self.openCamera()
else:
#关闭摄像头并清空显示信息
self.closeCamera()
def show_camera(self):
if exit_code :#没网就用OpenCV
self.recognize_face()
else: #有网的用百度api
self.recognize_face_intnet()
self.image = cv2.cvtColor(self.image,cv2.COLOR_BGR2RGB ) #视频色彩转换回RGB,这样才是现实的颜色
#pyqt显示逻辑
showImage = QImage( self.image.data, self.image.shape[1] , self.image.shape[0], QImage.Format_RGB888 )
self.cameraLabel.setPixmap(QPixmap.fromImage(showImage))
#打开摄像头
def openCamera(self):
flag = self.cap.open( cap_id )
if flag == False:
msg = QMessageBox.Warning(self, u'Warning', u'请检测相机与电脑是否连接正确',\
buttons=QMessageBox.Ok,
defaultButton=QMessageBox.Ok)
else:
self.timer_camera.start(30)
self.cameraButton.setText('关闭摄像头')
def face_recongnition_start( self ):
faces = faceCascade.detectMultiScale(
self.gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(20, 20) )
for (x,y,w,h) in faces:
cv2.rectangle(self.gray, (x, y), (x + w, y + w), (255,0,0),2 )
roi_gray = self.gray [y:y+h, x:x+w]
roi_color = self.image [y:y+h, x:x+w]
def recognize_face( self ):
flag,self.image = self.cap.read() #从视频流中读取
self.image = cv2.resize(self.image,(480,320)) #把读到的帧的大小重新设置为 640x480
result = "unknown" #初始化识别失败
gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int( self.minW), int( self.minH ) )
)
face_num= None #初始化人脸序号
for (x, y, w, h) in faces:
cv2.rectangle( self.image , (x, y), (x + w, y + h), (0, 0 , 255), 2)
id, confidence = self.recognizer.predict(gray[y:y + h, x:x + w])
if confidence < 100 : #50%的识别置信度
result= self.names[id]
confidencestr = "{0}%".format(round(100 - confidence))
# go_api( round(100 - confidence) , int( idnum ) , tag , names)
else:
confidencestr = "{0}%".format(round(100 - confidence))
cv2.putText( self.image, result , (x + 5, y - 5), self.font, 1, (0, 0, 255), 2 )
cv2.putText( self.image, confidencestr , (x + 5, y + h - 5), self.font, 1, (0, 0, 0), 1)
#网络识别的工具函数
#对图片的格式进行转换
def transimage( self , image_name ):
f = open( image_name ,'rb')
img = base64.b64encode(f.read())
return img
def logging( self , name ):
curren_time = time.asctime(time.localtime(time.time()))
f = open('Log.txt','a+')
f.write("Person: " + name + " " + "Time:" + str(curren_time)+'\n')
f.close()
#上传到百度api进行人脸检测
def go_api( self , image):
result = client.search(str(image, 'utf-8'), IMAGE_TYPE, GROUP)
if result['error_msg'] == 'SUCCESS':
name = result['result']['user_list'][0]['user_id']
score = result['result']['user_list'][0]['score']
if score > 80:
print("Welcome %s !" % name)
self.logging( name )
# recong_result=QMessageBox.information( self ,\
# "登录消息" , "识别成功,是否进入相应功能区" , QMessageBox.Ok |QMessageBox.Cancel )
# if recong_result == QMessageBox.Ok :
# pass
# else:
# self.close()
# self.closeCamera()
else:
print("Sorry...I don't know you !")
name = 'Unknow'
return name,score
if result['error_msg'] == 'pic not has face':
print('There is no face in image!')
return "NO FACE", None
else:
print(result['error_code']+' ' + result['error_code'])
return "ERROR" , None
def recognize_face_intnet( self ):
font = cv2.FONT_HERSHEY_SIMPLEX
flag,self.image = self.cap.read() #从视频流中读取
self.image = cv2.resize(self.image, (480,320) ) #把读到的帧的大小重新设置为 480*320
gray = cv2.cvtColor( self.image , cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
if self.OnceBaiduAPI_flag == False :
self.OnceBaiduAPI_flag = True
cv2.imwrite("youtemp.png", self.image )
self.name , self.score = self.go_api( self.transimage( image_name ) )
for (x,y,w,h) in faces:
cv2.rectangle(self.image,(x,y),(x+w,y+h), (0, 0 , 255), 2)
# roi_gray = gray[y:y+h, x:x+w]
roi_color = self.image[y:y+h, x:x+w]
cv2.putText(self.image, self.name , (x+5,y-5), font, 1, (255,255,255), 2 )
cv2.putText(self.image, str( self.score ), (x+5,y+h-5), font, 1, (255,255,0), 1 )
def closeCamera(self):
self.timer_camera.stop()
self.cap.release()
self.OnceBaiduAPI_flag = False
self.cameraLabel.clear()
self.cameraButton.setText('打开摄像头')
# self.my_thread.terminate()
#这里作为功能启动区,代表着识别成功后就启动该功能
def Function_run( self ):
pass
#这个类主要是管理注册逻辑,这里为什么要用QDialog呢,当然也可以用Qwidget,这俩都是毛坯房,但是
#QDialog有exec方法,Qwidget是没有的。exec_()方法可以让窗口成为模态窗口,而调用show()方法,
#窗口是非模态的。模态窗口将程序控制权占据,只有对当前窗口关闭后才能操作其他窗口;
class Signin_Dialog( QDialog ):
def __init__( self ):
super().__init__()
#控件的初始化
self.setWindowTitle('注册系统')
self.resize( 300 , 250 )
self.user_name_label = QLabel( "user_namer:" ,self )
self.user_line_dialog = QLineEdit( self )
self.passwd_label = QLabel( 'password:' , self )
self.passwd_line_dialog = QLineEdit( self )
self.passwd_re_label = QLabel( 're_password' , self )
self.passwd_re_line = QLineEdit( self )
self.sure_signin_botton = QPushButton( '确认' , self )
self.cancel_button = QPushButton( '取消', self )
#将布局类实例化
self.v_layout = QVBoxLayout()
self.h_layout = QHBoxLayout()
self.grid_layout = QGridLayout()
#将布局初始化
self.__layput_init()
#将确认按钮初始化
self.sure_siginin_botton_init()
self.line_init()
self.sure_botton_init()
def __layput_init( self ):
self.grid_layout.addWidget( self.user_name_label , 0 , 0 )
self.grid_layout.addWidget( self.user_line_dialog , 0 , 1 )
self.grid_layout.addWidget( self.passwd_label , 1 , 0 )
self.grid_layout.addWidget( self.passwd_line_dialog , 1 , 1 )
self.grid_layout.addWidget( self.passwd_re_label , 2 , 0 )
self.grid_layout.addWidget( self.passwd_re_line , 2 , 1 )
self.h_layout.addWidget( self.sure_signin_botton )
self.h_layout.addWidget( self.cancel_button )
self.v_layout.addLayout( self.grid_layout )
self.v_layout.addLayout( self.h_layout )
self.setLayout( self.v_layout )
def line_init( self ):
self.user_line_dialog.setPlaceholderText( "请输入你的用户账号" )
self.passwd_line_dialog.setPlaceholderText( "请输入你的密码")
self.passwd_re_line.setPlaceholderText( '请再次输入你的密码' )
self.passwd_line_dialog.setEchoMode( QLineEdit.Password )
self.passwd_re_line.setEchoMode( QLineEdit.Password )
self.user_line_dialog.textChanged.connect( self.check_input )
self.passwd_line_dialog.textChanged.connect( self.check_input )
self.passwd_re_line.textChanged.connect( self.check_input )
def check_input( self ):
if self.user_line_dialog.text() and self.passwd_line_dialog.text() and self.passwd_re_line.text():
self.sure_signin_botton.setEnabled( True )
else:
self.sure_signin_botton.setEnabled( False )
def sure_siginin_botton_init( self ):
self.sure_signin_botton.setEnabled( False )
#确认按钮与数据库的关联初始化
def sure_botton_init( self ):
self.sure_signin_botton.clicked.connect( self.check_data )
# def clearText( text_path ):
# with open(text_path, 'w') as f1:
# f1.seek(0)
# f1.truncate()
# # print("清空数据")
#如果按钮按下
def check_data( self ):
#--------判断用户是否存在--------------
f_all = open( user_path , 'r+')
read_dict = eval( f_all.read() )
f_all.close()
if self.passwd_line_dialog.text( ) != self.passwd_re_line.text( ) :
QMessageBox.critical( self , '注册消息' ,'两次密码输入不一致' , QMessageBox.Ok | QMessageBox.Cancel )
elif self.user_line_dialog.text() not in read_dict :
read_dict[self.user_line_dialog.text()] = self.passwd_line_dialog.text()
# self.clearText( user_path )
with open(user_path, 'w') as f1:
f1.write( str( read_dict ) )
QMessageBox.information( self , '注册消息' , '注册成功' , QMessageBox.Ok )
self.close()
else:
QMessageBox.critical( self , '注册消息', '注册失败,操作有误' , QMessageBox.Ok )
self.user_line_dialog.clear()
self.passwd_line_dialog.clear()
self.passwd_re_line.clear()
class CollectPicture_Page( QDialog ):
mysignal = pyqtSignal( )
def __init__( self ):
super().__init__()
self.setWindowTitle('人脸数据集收集和训练')
self.resize( 1000 ,500 )
self.IsHome_button = QRadioButton( "本地收集" , self)
self.IsInternet_button = QRadioButton( "网络收集" ,self )
self.collect_start_button = QPushButton("开始收集", self )
self.train_run_button = QPushButton( "开始训练" ,self )
self.return_button = QPushButton( "取消" , self )
self.cameraLabel = QLabel( 'camera' ,self )
self.cameraLabel.resize( 480,320 )
self.cameraLabel.setAlignment( Qt.AlignCenter )
self.h_col_style_layout = QHBoxLayout()
self.v_col_styly_layout = QVBoxLayout()
self.h_col_layout = QHBoxLayout()
self.v_layout = QVBoxLayout()
self.cap = cv2.VideoCapture( )
self.collect_time = QTimer()
self.layout_init()
self.button_init()
self.slot_init()
def layout_init( self ):
self.h_col_style_layout.addWidget( self.IsHome_button )
self.h_col_style_layout.addWidget( self.IsInternet_button )
self.h_col_style_layout.addStretch(1)
self.h_col_layout.addWidget( self.collect_start_button )
self.h_col_layout.addWidget( self.train_run_button )
self.h_col_layout.addWidget( self.return_button )
self.v_layout.addWidget( self.cameraLabel )
self.v_layout.addLayout( self.h_col_style_layout )
self.v_layout.addLayout( self.h_col_layout )
self.setLayout( self.v_layout )
def button_init( self ):
self.return_button.clicked.connect( self.cancel_task )
self.collect_start_button.clicked.connect( self.openCamera )
self.train_run_button.clicked.connect( self.Training_faces )
self.IsHome_button.setChecked( True )
def slot_init( self ):
self.collect_time.timeout.connect( self.show_camera )
self.mysignal.connect( self.collect_signal_run )
def camera_init( self ):
self.unregisterFlag = False
self.face_detector = cv2.CascadeClassifier( path )
self.count = 0
fl = open( user_path , | 'r+')
real_dict = eval( fl.read() )
names = list( real_dict.keys() )
fl.close()
self.collect_name , ok = QInputDialog.getText( self , '请输入你的名字' ,'必须是已经注册的名字!' )
if self.collect_name in names:
self.face_id = names.index( self.collect_name ) + 1
#face_id = input('\n enter user id:') #输入序号,表示某人的一些列照片
print('\n Initializing face capture. Look at the camera and wait ...')
else:
QMessageBox.warning( self ,'异常状态' , '请去注册' , QMessageBox.Ok )
self.unregisterFlag = True
def cancel_task( self ):
self.collect_time.stop()
self.cap.release()
self.cameraLabel.clear()
self.close()
#打开摄像头
def openCamera(self):
flag = self.cap.open( cap_id )
self.camera_init()
if flag == False:
msg = QMessageBox.Warning(self, u'Warning', u'请检测相机与电脑是否连接正确',\
buttons=QMessageBox.Ok,
defaultButton=QMessageBox.Ok)
elif self.unregisterFlag == True:
pass
else:
self.Intnet_flag = False
self.collect_time.start(30)
def show_camera(self):
# self.face_recongnition_start()
sucess, self.img = self.cap.read()
self.collect_result = None
if self.IsHome_button.isChecked():
if self.count < 30 :
self.Collect_faces()
else:
self.collect_time.stop()
self.mysignal.emit()
else:
if self.Intnet_flag == False :
cv2.imwrite( image_name , self.img )
self.baidu_addUser()
self.collect_result = QMessageBox.information( self , '注册消息','注册完毕', QMessageBox.Ok )
if self.collect_result == QMessageBox.Ok:
self.closeCamera()
self.collect_result = None
self.img = cv2.cvtColor(self.img,cv2.COLOR_BGR2RGB ) #视频色彩转换回RGB,这样才是现实的颜色
#pyqt显示逻辑
showImage = QImage( self.img.data, self.img.shape[1] , self.img.shape[0], QImage.Format_RGB888 )
self.cameraLabel.setPixmap( QPixmap.fromImage(showImage) )
def Collect_faces( self ):
# 转为灰度图片
gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
# 检测人脸
faces = self.face_detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(self.img, (x, y), (x + w, y + w), (255, 0, 0) , 2 )
self.count += 1
# 保存图像,从原始照片中截取人脸尺寸
cv2.imwrite("Facedata/User." +str(self.face_id) + '.' + str(self.count) + '.jpg', gray[y: y + h, x: x + w])
def Training_faces( self ):
# 人脸数据路径
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier(path )
def getImagesAndLabels( data_path ):
imagePaths = [os.path.join( data_path , f) for f in os.listdir( data_path )] # join函数的作用?
faceSamples = []
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img, 'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)
for (x, y, w, h) in faces:
faceSamples.append(img_numpy[y:y + h, x: x + w])
ids.append(id)
return faceSamples, ids
print('Training faces. It will take a few seconds. Wait ...')
faces, ids = getImagesAndLabels( data_path )
recognizer.train(faces, np.array(ids))
recognizer.write( trainer_path )
print("{0} faces trained. Exiting Program".format(len(np.unique(ids))))
QMessageBox.information( self , "训练消息","训练完毕" , QMessageBox.Ok )
def collect_signal_run( self ):
self.collect_result = QMessageBox.information( self , '训练' , '收集完成' , QMessageBox.Ok )
#对图片的格式进行转换
def transimage( self , image_name ):
f = open( image_name ,'rb')
img = base64.b64encode(f.read())
return img
def baidu_addUser( self ):
client.addUser( str( self.transimage(image_name) , 'utf-8') , IMAGE_TYPE, GROUP , self.collect_name )
self.Intnet_flag = True
def closeCamera(self):
self.collect_time.stop()
self.cap.release()
self.cameraLabel.clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = Demo()
demo.show()
sys.exit(app.exec_()) | identifier_body | |
Main.py | '''
Author: 程东洲
Date: 2021-05-19 21:33:22
LastEditTime: 2021-06-11 11:16:35
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \testcamera\testCamera03.py
'''
from typing import List
import numpy as np
from PIL import Image
import base64
import time
from aip import AipFace, face
import os
import sys
# print(sys.path)
from PyQt5.QtCore import QThread, QTimer, Qt, pyqtSignal
import cv2
from PyQt5.QtGui import QPixmap,QImage
from PyQt5.QtWidgets import QDialog, QRadioButton, QWidget,QApplication,QLabel,QHBoxLayout,QVBoxLayout,\
QPushButton,QMessageBox,QLineEdit,QGridLayout,QInputDialog
from urllib import request
######################################################################################
# 这些路径是使用在树莓派上的,也就是说在树莓派上将这几行变量给启用,将下面的
# cap_id = 0
# user_pw={ 'cheng':'123456' }
# path = 'haarcascade_frontalface_default.xml'
# user_path= "user_names.txt"
# passwd_path = 'password.txt'
# trainer_path = r'face_trainer\trainer.yml'
# data_path = 'Facedata'
# image_name = 'youtemp.png'
#这里的路径是不对的,请自行修改对应的路径,在电脑端的vscode上使用时就启用这些变量,将上方的变量进行注释
cap_id = 0
#人脸检测器路径
path = r'D:\Pyqt5\testcamera\haarcascade_frontalface_default.xml'
#数据库路径
user_path= r"D:\Pyqt5\testcamera\testDIR.txt"
# passwd_path = r'D:\Pyqt5\testcamera\password.txt'
#模型路径
trainer_path = r'D:\Pyqt5\testcamera\trainer.yml'
#本地识别收集图片路径
data_path = r'D:\Pyqt5\testcamera\Facedata'
#网络识别图片路径
image_name = r'D:\Pyqt5\testcamera\youtemp.png'
#######################################################################################
""" 你的 APPID AK SK """
APP_ID = ''
API_KEY = ''
SECRET_KEY = ''
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
#图像编码方式
IMAGE_TYPE='BASE64'
#填写你的用户组信息
GROUP = ''
######################################################################################
#判断网络状态,我设置的也只是一开始判断一次,之后不会更换网络状态,如果想要启用程序后能无间断
#检测网络状态,可以使用threading多线程一下
#照片名字
# exit_code = os.system('ping www.baidu.com')
try:
ret = request.urlopen(url="https://www.baidu.com", timeout=3.0)
exit_code = 0
print("开启网络识别")
except:
exit_code = 1
print( "开启本地识别 ")
######################################################################################
#这个类是用来管理登录逻辑
class Demo( QWidget ):
def __init__( self ):
super().__init__()
#控件的初始化
self.resize( 800 , 500 )
self.setWindowTitle( '基于Opencv和树莓派的人脸识别系统')
self.user_name_label = QLabel( "用户名:" ,self )
self.user_line = QLineEdit( self )
self.passwd_label = QLabel( '密码:' , self )
self.passwd_line = QLineEdit( self )
self.login_button = QPushButton( 'log in' , self )
self.signin_button = QPushButton( "sign in" ,self )
self.face_recongition_button = QPushButton( "人脸识别" ,self )
self.collect_buttton = QPushButton( "人脸录入" , self )
#将布局类实例化
self.v_layout = QVBoxLayout()
self.h_layout = QHBoxLayout()
self.h_face_layout = QHBoxLayout()
self.grid_layout = QGridLayout()
#初始化布局
self.layput_init()
#初始化输入框的隐含信息
self.line_init()
#初始化登录按钮的不可用,等待登录框有值时才会启用
self.login_input_init()
#初始化按下登录按钮后与数据库的检查程序,初始化按下注册按钮的注册页面的逻辑
self.button_init()
#实例化signin按钮按下后的管理页面
self.sginin_page = Signin_Dialog()
#实例化face recongition按钮按下的管理页面
self.face_page = Face_start( )
#实例化收集人脸数据集的管理页面
self.colleet_page = CollectPicture_Page( )
def layput_init( self ):
self.grid_layout.addWidget( self.user_name_label , 0 , 0 )
self.grid_layout.addWidget( self.user_line , 0 , 1 )
self.grid_layout.addWidget( self.passwd_label , 1 , 0 )
self.grid_layout.addWidget( self.passwd_line , 1 , 1 )
self.h_layout.addWidget( self.login_button )
self.h_layout.addWidget( self.signin_button )
self.h_face_layout.addWidget( self.face_recongition_button )
self.h_face_layout.addWidget( self.collect_buttton )
self.v_layout.addLayout( self.grid_layout )
self.v_layout.addLayout( self.h_layout )
self.v_layout.addLayout( self.h_face_layout )
self.setLayout( self.v_layout )
def line_init( self ):
self.user_line.setPlaceholderText( "请输入你的用户账号" )
self.passwd_line.setPlaceholderText( "请输入你的密码")
self.passwd_line.setEchoMode( QLineEdit.Password )
self.user_line.textChanged.connect( self.check_input )
self.passwd_line.textChanged.connect( self.check_input )
#检查输入信息,两个框有值就对按钮进行使能,单一或者无值就将其停滞
def check_input( self ):
if self.user_line.text() and self.passwd_line.text() :
self.login_button.setEnabled( True )
else:
self.login_button.setEnabled( False )
def button_init( self ):
self.login_button.clicked.connect( self.check_login_info )
self.signin_button.clicked.connect( self.SigninPage_exe )
self.face_recongition_button.clicked.connect( self.Face_start_exe )
self.collect_buttton.clicked.connect( self.Collect_page_exe )
def SigninPage_exe( self ):
self.sginin_page.exec( )#启动注册页面
# 人脸识别页面
def Face_start_exe( self ):
self.face_page.exec( )
# 人脸收集页面启动
def Collect_page_exe( self ):
self.colleet_page.exec( )
def check_login_info( self ):
f_all = open( user_path , 'r+')
read_dict = eval( f_all.read( ) )
f_all.close()
if self.user_line.text() not in read_dict:
QMessageBox.critical( self , '登录消息' , "登录失败,请填写正确的用户名" , QMessageBox.Ok )
elif read_dict[ self.user_line.text() ] == self.passwd_line.text():
QMessageBox.information( self , '登录消息' , "登录成功" , QMessageBox.Ok )
else:
QMessageBox.critical( self , '登录消息' , "登录失败,请填写正确的密码" , QMessageBox.Ok )
def login_input_init( self ):
self.login_button.setEnabled( False )
# def facepass( self ):
# get_name=self.recognize_face()#返回识别的人名
# if get_name=="unknown":
# reply = QMessageBox.information(self, '提示', '人脸识别失败', QMessageBox.Close)
# else:
# reply = QMessageBox.information(self, '提示', "欢迎您:"+get_name, QMessageBox.Ok)
# print("编写其他程序")
# #多线程进行网络监听
# class My_theard( QThread ):
# my_signal = pyqtSignal( int )
# def __init__( self ):
# super().__init__()
# def run( self ):
# while True:
# exit_code = os.system('ping www.baidu.com')
# if exit_code:
# print("----------没网,启动本地识别-------------------")
# else:
# print("----------有网,启动网络识别-------------------")
# self.my_signal.emit( exit_code )
# self.sleep( 10 )
faceCascade = cv2.CascadeClassifier( path )
class Face_start( QDialog ):
def __init__( self ):
super().__init__()
self.setWindowTitle( '人脸识别' )
self.resize( 1000 ,500 )
self.cameraLabel = QLabel( 'camera', self )
self.cameraLabel.resize(480 ,320 )
self.cameraLabel.setAlignment( Qt.AlignCenter )
self.timer_camera = QTimer()
self.cap = cv2.VideoCapture() #初始化摄像头
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.recognizer.read( trainer_path )
#识别时间10秒;如果置信度大于60%,则识别成功并退出界面;否则至10秒后识别失败并退出
self.font = cv2.FONT_HERSHEY_SIMPLEX
#初始化多线程,进行网络监听
# self.my_thread = My_theard( )
# self.my_thread.start()
fl = open( user_path , 'r+')
read_dict = eval( fl.read() )
self.names = list( read_dict.keys( ) )
fl.close()
# tag = []
# for i in range( len( names ) ) :
# tag.append( eval("False") )
self.minW = 0.1 * self.cap.get(3)
self.minH = 0.1 * self.cap.get(4)
#网络识别一次的初始化标志位
self.OnceBaiduAPI_flag = False
self.layout_main = QVBoxLayout()
self.layout_fun_button = QHBoxLayout()
self.layout_data_show = QHBoxLayout()
self.cameraButton = QPushButton(u'打开相机')
# self.button_close.setMinimumHeight(50)
self.layout_init()
self.slot_init()
def layout_init( self ):
self.layout_data_show.addWidget( self.cameraLabel )
self.layout_fun_button.addWidget( self.cameraButton )
# self.layout_fun_button.addStretch(1)
self.layout_main.addLayout( self.layout_data_show )
self.layout_main.addLayout( self.layout_fun_button )
self.setLayout( self.layout_main )
def slot_init(self):
self.timer_camera.timeout.connect(self.show_camera)
#信号和槽连接
# self.returnButton.clicked.connect(self.returnSignal)
self.cameraButton.clicked.connect(self.slotCameraButton)
# self.cameraButton.clicked.connect( self.recognize_face )
# self.my_thread.my_signal.connect( self.get_Intnet_code )
def get_Intnet_code( self , exitcode ):
self.exit_code = exitcode
#打开关闭摄像头控制
def slotCameraButton(self):
if self.timer_camera.isActive() == False:
#打开摄像头并显示图像信息
self.openCamera()
else:
#关闭摄像头并清空显示信息
self.closeCamera()
def show_camera(self):
if exit_code :#没网就用OpenCV
self.recognize_face()
else: #有网的用百度api
self.recognize_face_intnet()
self.image = cv2.cvtCol | ge,cv2.COLOR_BGR2RGB ) #视频色彩转换回RGB,这样才是现实的颜色
#pyqt显示逻辑
showImage = QImage( self.image.data, self.image.shape[1] , self.image.shape[0], QImage.Format_RGB888 )
self.cameraLabel.setPixmap(QPixmap.fromImage(showImage))
#打开摄像头
def openCamera(self):
flag = self.cap.open( cap_id )
if flag == False:
msg = QMessageBox.Warning(self, u'Warning', u'请检测相机与电脑是否连接正确',\
buttons=QMessageBox.Ok,
defaultButton=QMessageBox.Ok)
else:
self.timer_camera.start(30)
self.cameraButton.setText('关闭摄像头')
def face_recongnition_start( self ):
faces = faceCascade.detectMultiScale(
self.gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(20, 20) )
for (x,y,w,h) in faces:
cv2.rectangle(self.gray, (x, y), (x + w, y + w), (255,0,0),2 )
roi_gray = self.gray [y:y+h, x:x+w]
roi_color = self.image [y:y+h, x:x+w]
def recognize_face( self ):
flag,self.image = self.cap.read() #从视频流中读取
self.image = cv2.resize(self.image,(480,320)) #把读到的帧的大小重新设置为 640x480
result = "unknown" #初始化识别失败
gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int( self.minW), int( self.minH ) )
)
face_num= None #初始化人脸序号
for (x, y, w, h) in faces:
cv2.rectangle( self.image , (x, y), (x + w, y + h), (0, 0 , 255), 2)
id, confidence = self.recognizer.predict(gray[y:y + h, x:x + w])
if confidence < 100 : #50%的识别置信度
result= self.names[id]
confidencestr = "{0}%".format(round(100 - confidence))
# go_api( round(100 - confidence) , int( idnum ) , tag , names)
else:
confidencestr = "{0}%".format(round(100 - confidence))
cv2.putText( self.image, result , (x + 5, y - 5), self.font, 1, (0, 0, 255), 2 )
cv2.putText( self.image, confidencestr , (x + 5, y + h - 5), self.font, 1, (0, 0, 0), 1)
#网络识别的工具函数
#对图片的格式进行转换
def transimage( self , image_name ):
f = open( image_name ,'rb')
img = base64.b64encode(f.read())
return img
def logging( self , name ):
curren_time = time.asctime(time.localtime(time.time()))
f = open('Log.txt','a+')
f.write("Person: " + name + " " + "Time:" + str(curren_time)+'\n')
f.close()
#上传到百度api进行人脸检测
def go_api( self , image):
result = client.search(str(image, 'utf-8'), IMAGE_TYPE, GROUP)
if result['error_msg'] == 'SUCCESS':
name = result['result']['user_list'][0]['user_id']
score = result['result']['user_list'][0]['score']
if score > 80:
print("Welcome %s !" % name)
self.logging( name )
# recong_result=QMessageBox.information( self ,\
# "登录消息" , "识别成功,是否进入相应功能区" , QMessageBox.Ok |QMessageBox.Cancel )
# if recong_result == QMessageBox.Ok :
# pass
# else:
# self.close()
# self.closeCamera()
else:
print("Sorry...I don't know you !")
name = 'Unknow'
return name,score
if result['error_msg'] == 'pic not has face':
print('There is no face in image!')
return "NO FACE", None
else:
print(result['error_code']+' ' + result['error_code'])
return "ERROR" , None
def recognize_face_intnet( self ):
font = cv2.FONT_HERSHEY_SIMPLEX
flag,self.image = self.cap.read() #从视频流中读取
self.image = cv2.resize(self.image, (480,320) ) #把读到的帧的大小重新设置为 480*320
gray = cv2.cvtColor( self.image , cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
if self.OnceBaiduAPI_flag == False :
self.OnceBaiduAPI_flag = True
cv2.imwrite("youtemp.png", self.image )
self.name , self.score = self.go_api( self.transimage( image_name ) )
for (x,y,w,h) in faces:
cv2.rectangle(self.image,(x,y),(x+w,y+h), (0, 0 , 255), 2)
# roi_gray = gray[y:y+h, x:x+w]
roi_color = self.image[y:y+h, x:x+w]
cv2.putText(self.image, self.name , (x+5,y-5), font, 1, (255,255,255), 2 )
cv2.putText(self.image, str( self.score ), (x+5,y+h-5), font, 1, (255,255,0), 1 )
def closeCamera(self):
self.timer_camera.stop()
self.cap.release()
self.OnceBaiduAPI_flag = False
self.cameraLabel.clear()
self.cameraButton.setText('打开摄像头')
# self.my_thread.terminate()
#这里作为功能启动区,代表着识别成功后就启动该功能
def Function_run( self ):
pass
#这个类主要是管理注册逻辑,这里为什么要用QDialog呢,当然也可以用Qwidget,这俩都是毛坯房,但是
#QDialog有exec方法,Qwidget是没有的。exec_()方法可以让窗口成为模态窗口,而调用show()方法,
#窗口是非模态的。模态窗口将程序控制权占据,只有对当前窗口关闭后才能操作其他窗口;
class Signin_Dialog( QDialog ):
def __init__( self ):
super().__init__()
#控件的初始化
self.setWindowTitle('注册系统')
self.resize( 300 , 250 )
self.user_name_label = QLabel( "user_namer:" ,self )
self.user_line_dialog = QLineEdit( self )
self.passwd_label = QLabel( 'password:' , self )
self.passwd_line_dialog = QLineEdit( self )
self.passwd_re_label = QLabel( 're_password' , self )
self.passwd_re_line = QLineEdit( self )
self.sure_signin_botton = QPushButton( '确认' , self )
self.cancel_button = QPushButton( '取消', self )
#将布局类实例化
self.v_layout = QVBoxLayout()
self.h_layout = QHBoxLayout()
self.grid_layout = QGridLayout()
#将布局初始化
self.__layput_init()
#将确认按钮初始化
self.sure_siginin_botton_init()
self.line_init()
self.sure_botton_init()
def __layput_init( self ):
self.grid_layout.addWidget( self.user_name_label , 0 , 0 )
self.grid_layout.addWidget( self.user_line_dialog , 0 , 1 )
self.grid_layout.addWidget( self.passwd_label , 1 , 0 )
self.grid_layout.addWidget( self.passwd_line_dialog , 1 , 1 )
self.grid_layout.addWidget( self.passwd_re_label , 2 , 0 )
self.grid_layout.addWidget( self.passwd_re_line , 2 , 1 )
self.h_layout.addWidget( self.sure_signin_botton )
self.h_layout.addWidget( self.cancel_button )
self.v_layout.addLayout( self.grid_layout )
self.v_layout.addLayout( self.h_layout )
self.setLayout( self.v_layout )
def line_init( self ):
self.user_line_dialog.setPlaceholderText( "请输入你的用户账号" )
self.passwd_line_dialog.setPlaceholderText( "请输入你的密码")
self.passwd_re_line.setPlaceholderText( '请再次输入你的密码' )
self.passwd_line_dialog.setEchoMode( QLineEdit.Password )
self.passwd_re_line.setEchoMode( QLineEdit.Password )
self.user_line_dialog.textChanged.connect( self.check_input )
self.passwd_line_dialog.textChanged.connect( self.check_input )
self.passwd_re_line.textChanged.connect( self.check_input )
def check_input( self ):
if self.user_line_dialog.text() and self.passwd_line_dialog.text() and self.passwd_re_line.text():
self.sure_signin_botton.setEnabled( True )
else:
self.sure_signin_botton.setEnabled( False )
def sure_siginin_botton_init( self ):
self.sure_signin_botton.setEnabled( False )
#确认按钮与数据库的关联初始化
def sure_botton_init( self ):
self.sure_signin_botton.clicked.connect( self.check_data )
# def clearText( text_path ):
# with open(text_path, 'w') as f1:
# f1.seek(0)
# f1.truncate()
# # print("清空数据")
#如果按钮按下
def check_data( self ):
#--------判断用户是否存在--------------
f_all = open( user_path , 'r+')
read_dict = eval( f_all.read() )
f_all.close()
if self.passwd_line_dialog.text( ) != self.passwd_re_line.text( ) :
QMessageBox.critical( self , '注册消息' ,'两次密码输入不一致' , QMessageBox.Ok | QMessageBox.Cancel )
elif self.user_line_dialog.text() not in read_dict :
read_dict[self.user_line_dialog.text()] = self.passwd_line_dialog.text()
# self.clearText( user_path )
with open(user_path, 'w') as f1:
f1.write( str( read_dict ) )
QMessageBox.information( self , '注册消息' , '注册成功' , QMessageBox.Ok )
self.close()
else:
QMessageBox.critical( self , '注册消息', '注册失败,操作有误' , QMessageBox.Ok )
self.user_line_dialog.clear()
self.passwd_line_dialog.clear()
self.passwd_re_line.clear()
class CollectPicture_Page( QDialog ):
mysignal = pyqtSignal( )
def __init__( self ):
super().__init__()
self.setWindowTitle('人脸数据集收集和训练')
self.resize( 1000 ,500 )
self.IsHome_button = QRadioButton( "本地收集" , self)
self.IsInternet_button = QRadioButton( "网络收集" ,self )
self.collect_start_button = QPushButton("开始收集", self )
self.train_run_button = QPushButton( "开始训练" ,self )
self.return_button = QPushButton( "取消" , self )
self.cameraLabel = QLabel( 'camera' ,self )
self.cameraLabel.resize( 480,320 )
self.cameraLabel.setAlignment( Qt.AlignCenter )
self.h_col_style_layout = QHBoxLayout()
self.v_col_styly_layout = QVBoxLayout()
self.h_col_layout = QHBoxLayout()
self.v_layout = QVBoxLayout()
self.cap = cv2.VideoCapture( )
self.collect_time = QTimer()
self.layout_init()
self.button_init()
self.slot_init()
def layout_init( self ):
self.h_col_style_layout.addWidget( self.IsHome_button )
self.h_col_style_layout.addWidget( self.IsInternet_button )
self.h_col_style_layout.addStretch(1)
self.h_col_layout.addWidget( self.collect_start_button )
self.h_col_layout.addWidget( self.train_run_button )
self.h_col_layout.addWidget( self.return_button )
self.v_layout.addWidget( self.cameraLabel )
self.v_layout.addLayout( self.h_col_style_layout )
self.v_layout.addLayout( self.h_col_layout )
self.setLayout( self.v_layout )
def button_init( self ):
self.return_button.clicked.connect( self.cancel_task )
self.collect_start_button.clicked.connect( self.openCamera )
self.train_run_button.clicked.connect( self.Training_faces )
self.IsHome_button.setChecked( True )
def slot_init( self ):
self.collect_time.timeout.connect( self.show_camera )
self.mysignal.connect( self.collect_signal_run )
def camera_init( self ):
self.unregisterFlag = False
self.face_detector = cv2.CascadeClassifier( path )
self.count = 0
fl = open( user_path , 'r+')
real_dict = eval( fl.read() )
names = list( real_dict.keys() )
fl.close()
self.collect_name , ok = QInputDialog.getText( self , '请输入你的名字' ,'必须是已经注册的名字!' )
if self.collect_name in names:
self.face_id = names.index( self.collect_name ) + 1
#face_id = input('\n enter user id:') #输入序号,表示某人的一些列照片
print('\n Initializing face capture. Look at the camera and wait ...')
else:
QMessageBox.warning( self ,'异常状态' , '请去注册' , QMessageBox.Ok )
self.unregisterFlag = True
def cancel_task( self ):
self.collect_time.stop()
self.cap.release()
self.cameraLabel.clear()
self.close()
#打开摄像头
def openCamera(self):
flag = self.cap.open( cap_id )
self.camera_init()
if flag == False:
msg = QMessageBox.Warning(self, u'Warning', u'请检测相机与电脑是否连接正确',\
buttons=QMessageBox.Ok,
defaultButton=QMessageBox.Ok)
elif self.unregisterFlag == True:
pass
else:
self.Intnet_flag = False
self.collect_time.start(30)
def show_camera(self):
# self.face_recongnition_start()
sucess, self.img = self.cap.read()
self.collect_result = None
if self.IsHome_button.isChecked():
if self.count < 30 :
self.Collect_faces()
else:
self.collect_time.stop()
self.mysignal.emit()
else:
if self.Intnet_flag == False :
cv2.imwrite( image_name , self.img )
self.baidu_addUser()
self.collect_result = QMessageBox.information( self , '注册消息','注册完毕', QMessageBox.Ok )
if self.collect_result == QMessageBox.Ok:
self.closeCamera()
self.collect_result = None
self.img = cv2.cvtColor(self.img,cv2.COLOR_BGR2RGB ) #视频色彩转换回RGB,这样才是现实的颜色
#pyqt显示逻辑
showImage = QImage( self.img.data, self.img.shape[1] , self.img.shape[0], QImage.Format_RGB888 )
self.cameraLabel.setPixmap( QPixmap.fromImage(showImage) )
def Collect_faces( self ):
# 转为灰度图片
gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
# 检测人脸
faces = self.face_detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(self.img, (x, y), (x + w, y + w), (255, 0, 0) , 2 )
self.count += 1
# 保存图像,从原始照片中截取人脸尺寸
cv2.imwrite("Facedata/User." +str(self.face_id) + '.' + str(self.count) + '.jpg', gray[y: y + h, x: x + w])
def Training_faces( self ):
# 人脸数据路径
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier(path )
def getImagesAndLabels( data_path ):
imagePaths = [os.path.join( data_path , f) for f in os.listdir( data_path )] # join函数的作用?
faceSamples = []
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img, 'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)
for (x, y, w, h) in faces:
faceSamples.append(img_numpy[y:y + h, x: x + w])
ids.append(id)
return faceSamples, ids
print('Training faces. It will take a few seconds. Wait ...')
faces, ids = getImagesAndLabels( data_path )
recognizer.train(faces, np.array(ids))
recognizer.write( trainer_path )
print("{0} faces trained. Exiting Program".format(len(np.unique(ids))))
QMessageBox.information( self , "训练消息","训练完毕" , QMessageBox.Ok )
def collect_signal_run( self ):
self.collect_result = QMessageBox.information( self , '训练' , '收集完成' , QMessageBox.Ok )
#对图片的格式进行转换
def transimage( self , image_name ):
f = open( image_name ,'rb')
img = base64.b64encode(f.read())
return img
def baidu_addUser( self ):
client.addUser( str( self.transimage(image_name) , 'utf-8') , IMAGE_TYPE, GROUP , self.collect_name )
self.Intnet_flag = True
def closeCamera(self):
self.collect_time.stop()
self.cap.release()
self.cameraLabel.clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = Demo()
demo.show()
sys.exit(app.exec_()) | or(self.ima | identifier_name |
Main.py | '''
Author: 程东洲
Date: 2021-05-19 21:33:22
LastEditTime: 2021-06-11 11:16:35
LastEditors: Please set LastEditors
Description: In User Settings Edit
FilePath: \testcamera\testCamera03.py
'''
from typing import List
import numpy as np
from PIL import Image
import base64
import time
from aip import AipFace, face
import os
import sys
# print(sys.path)
from PyQt5.QtCore import QThread, QTimer, Qt, pyqtSignal
import cv2
from PyQt5.QtGui import QPixmap,QImage
from PyQt5.QtWidgets import QDialog, QRadioButton, QWidget,QApplication,QLabel,QHBoxLayout,QVBoxLayout,\
QPushButton,QMessageBox,QLineEdit,QGridLayout,QInputDialog
from urllib import request
######################################################################################
# 这些路径是使用在树莓派上的,也就是说在树莓派上将这几行变量给启用,将下面的
# cap_id = 0
# user_pw={ 'cheng':'123456' }
# path = 'haarcascade_frontalface_default.xml'
# user_path= "user_names.txt"
# passwd_path = 'password.txt'
# trainer_path = r'face_trainer\trainer.yml'
# data_path = 'Facedata'
# image_name = 'youtemp.png'
#这里的路径是不对的,请自行修改对应的路径,在电脑端的vscode上使用时就启用这些变量,将上方的变量进行注释
cap_id = 0
#人脸检测器路径
path = r'D:\Pyqt5\testcamera\haarcascade_frontalface_default.xml'
#数据库路径
user_path= r"D:\Pyqt5\testcamera\testDIR.txt"
# passwd_path = r'D:\Pyqt5\testcamera\password.txt'
#模型路径
trainer_path = r'D:\Pyqt5\testcamera\trainer.yml'
#本地识别收集图片路径
data_path = r'D:\Pyqt5\testcamera\Facedata'
#网络识别图片路径
image_name = r'D:\Pyqt5\testcamera\youtemp.png'
#######################################################################################
""" 你的 APPID AK SK """
APP_ID = ''
API_KEY = ''
SECRET_KEY = ''
client = AipFace(APP_ID, API_KEY, SECRET_KEY)
#图像编码方式
IMAGE_TYPE='BASE64'
#填写你的用户组信息
GROUP = ''
######################################################################################
#判断网络状态,我设置的也只是一开始判断一次,之后不会更换网络状态,如果想要启用程序后能无间断
#检测网络状态,可以使用threading多线程一下
#照片名字
# exit_code = os.system('ping www.baidu.com')
try:
ret = request.urlopen(url="https://www.baidu.com", timeout=3.0)
exit_code = 0
print("开启网络识别")
except:
exit_code = 1
print( "开启本地识别 ")
######################################################################################
#这个类是用来管理登录逻辑
class Demo( QWidget ):
def __init__( self ):
super().__init__()
#控件的初始化
self.resize( 800 , 500 )
self.setWindowTitle( '基于Opencv和树莓派的人脸识别系统')
self.user_name_label = QLabel( "用户名:" ,self )
self.user_line = QLineEdit( self )
self.passwd_label = QLabel( '密码:' , self )
self.passwd_line = QLineEdit( self )
self.login_button = QPushButton( 'log in' , self )
self.signin_button = QPushButton( "sign in" ,self )
self.face_recongition_button = QPushButton( "人脸识别" ,self )
self.collect_buttton = QPushButton( "人脸录入" , self )
#将布局类实例化
self.v_layout = QVBoxLayout()
self.h_layout = QHBoxLayout()
self.h_face_layout = QHBoxLayout()
self.grid_layout = QGridLayout()
#初始化布局
self.layput_init()
#初始化输入框的隐含信息
self.line_init()
#初始化登录按钮的不可用,等待登录框有值时才会启用
self.login_input_init()
#初始化按下登录按钮后与数据库的检查程序,初始化按下注册按钮的注册页面的逻辑
self.button_init()
#实例化signin按钮按下后的管理页面
self.sginin_page = Signin_Dialog()
#实例化face recongition按钮按下的管理页面
self.face_page = Face_start( )
#实例化收集人脸数据集的管理页面
self.colleet_page = CollectPicture_Page( )
def layput_init( self ):
self.grid_layout.addWidget( self.user_name_label , 0 , 0 )
self.grid_layout.addWidget( self.user_line , 0 , 1 )
self.grid_layout.addWidget( self.passwd_label , 1 , 0 )
self.grid_layout.addWidget( self.passwd_line , 1 , 1 )
self.h_layout.addWidget( self.login_button )
self.h_layout.addWidget( self.signin_button )
self.h_face_layout.addWidget( self.face_recongition_button )
self.h_face_layout.addWidget( self.collect_buttton )
self.v_layout.addLayout( self.grid_layout )
self.v_layout.addLayout( self.h_layout )
self.v_layout.addLayout( self.h_face_layout )
self.setLayout( self.v_layout )
def line_init( self ):
self.user_line.setPlaceholderText( "请输入你的用户账号" )
self.passwd_line.setPlaceholderText( "请输入你的密码")
self.passwd_line.setEchoMode( QLineEdit.Password )
self.user_line.textChanged.connect( self.check_input )
self.passwd_line.textChanged.connect( self.check_input )
#检查输入信息,两个框有值就对按钮进行使能,单一或者无值就将其停滞
def check_input( self ):
if self.user_line.text() and self.passwd_line.text() :
self.login_button.setEnabled( True )
else:
self.login_button.setEnabled( False )
def button_init( self ):
self.login_button.clicked.connect( self.check_login_info )
self.signin_button.clicked.connect( self.SigninPage_exe )
self.face_recongition_button.clicked.connect( self.Face_start_exe )
self.collect_buttton.clicked.connect( self.Collect_page_exe )
def SigninPage_exe( self ):
self.sginin_page.exec( )#启动注册页面
# 人脸识别页面
def Face_start_exe( self ):
self.face_page.exec( )
# 人脸收集页面启动
def Collect_page_exe( self ):
self.colleet_page.exec( )
def check_login_info( self ):
f_all = open( user_path , 'r+')
read_dict = eval( f_all.read( ) )
f_all.close()
if self.user_line.text() not in read_dict:
QMessageBox.critical( self , '登录消息' , "登录失败,请填写正确的用户名" , QMessageBox.Ok )
elif read_dict[ self.user_line.text() ] == self.passwd_line.text():
QMessageBox.information( self , '登录消息' , "登录成功" , QMessageBox.Ok )
else:
QMessageBox.critical( self , '登录消息' , "登录失败,请填写正确的密码" , QMessageBox.Ok )
def login_input_init( self ):
self.login_button.setEnabled( False )
# def facepass( self ):
# get_name=self.recognize_face()#返回识别的人名
# if get_name=="unknown":
# reply = QMessageBox.information(self, '提示', '人脸识别失败', QMessageBox.Close)
# else:
# reply = QMessageBox.information(self, '提示', "欢迎您:"+get_name, QMessageBox.Ok)
# print("编写其他程序")
# #多线程进行网络监听
# class My_theard( QThread ):
# my_signal = pyqtSignal( int )
# def __init__( self ):
# super().__init__()
# def run( self ):
# while True:
# exit_code = os.system('ping www.baidu.com')
# if exit_code:
# print("----------没网,启动本地识别-------------------")
# else:
# print("----------有网,启动网络识别-------------------")
# self.my_signal.emit( exit_code )
# self.sleep( 10 )
faceCascade = cv2.CascadeClassifier( path )
class Face_start( QDialog ):
def __init__( self ):
super().__init__()
self.setWindowTitle( '人脸识别' )
self.resize( 1000 ,500 )
self.cameraLabel = QLabel( 'camera', self )
self.cameraLabel.resize(480 ,320 )
self.cameraLabel.setAlignment( Qt.AlignCenter )
self.timer_camera = QTimer()
self.cap = cv2.VideoCapture() #初始化摄像头
self.recognizer = cv2.face.LBPHFaceRecognizer_create()
self.recognizer.read( trainer_path )
#识别时间10秒;如果置信度大于60%,则识别成功并退出界面;否则至10秒后识别失败并退出
self.font = cv2.FONT_HERSHEY_SIMPLEX
#初始化多线程,进行网络监听
# self.my_thread = My_theard( )
# self.my_thread.start()
fl = open( user_path , 'r+')
read_dict = eval( fl.read() )
self.names = list( read_dict.keys( ) )
fl.close()
# tag = []
# for i in range( len( names ) ) :
# tag.append( eval("False") )
self.minW = 0.1 * self.cap.get(3)
self.minH = 0.1 * self.cap.get(4)
#网络识别一次的初始化标志位
self.OnceBaiduAPI_flag = False
self.layout_main = QVBoxLayout()
self.layout_fun_button = QHBoxLayout()
self.layout_data_show = QHBoxLayout()
self.cameraButton = QPushButton(u'打开相机')
# self.button_close.setMinimumHeight(50)
self.layout_init()
self.slot_init()
def layout_init( self ):
self.layout_data_show.addWidget( self.cameraLabel )
self.layout_fun_button.addWidget( self.cameraButton )
# self.layout_fun_button.addStretch(1)
self.layout_main.addLayout( self.layout_data_show )
self.layout_main.addLayout( self.layout_fun_button )
self.setLayout( self.layout_main )
def slot_init(self):
self.timer_camera.timeout.connect(self.show_camera)
#信号和槽连接
# self.returnButton.clicked.connect(self.returnSignal)
self.cameraButton.clicked.connect(self.slotCameraButton)
# self.cameraButton.clicked.connect( self.recognize_face )
# self.my_thread.my_signal.connect( self.get_Intnet_code )
def get_Intnet_code( self , exitcode ):
self.exit_code = exitcode
#打开关闭摄像头控制
def slotCameraButton(self):
if self.timer_camera.isActive() == False:
#打开摄像头并显示图像信息
self.openCamera()
else:
#关闭摄像头并清空显示信息
self.closeCamera()
def show_camera(self):
if exit_code :#没网就用OpenCV
self.recognize_face()
else: #有网的用百度api
self.recognize_face_intnet()
self.image = cv2.cvtColor(self.image,cv2.COLOR_BGR2RGB ) #视频色彩转换回RGB,这样才是现实的颜色
#pyqt显示逻辑
showImage = QImage( self.image.data, self.image.shape[1] , self.image.shape[0], QImage.Format_RGB888 )
self.cameraLabel.setPixmap(QPixmap.fromImage(showImage))
#打开摄像头
def openCamera(self):
flag = self.cap.open( cap_id )
if flag == False:
msg = QMessageBox.Warning(self, u'Warning', u'请检测相机与电脑是否连接正确',\
buttons=QMessageBox.Ok,
defaultButton=QMessageBox.Ok)
else:
self.timer_camera.start(30)
self.cameraButton.setText('关闭摄像头')
def face_recongnition_start( self ):
faces = faceCascade.detectMultiScale(
self.gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(20, 20) )
for (x,y,w,h) in faces:
cv2.rectangle(self.gray, (x, y), (x + w, y + w), (255,0,0),2 )
roi_gray = self.gray [y:y+h, x:x+w]
roi_color = self.image [y:y+h, x:x+w]
def recognize_face( self ):
flag,self.image = self.cap.read() #从视频流中读取
self.image = cv2.resize(self.image,(480,320)) #把读到的帧的大小重新设置为 640x480
result = "unknown" #初始化识别失败
gray = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(
gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(int( self.minW), int( self.minH ) )
)
face_num= None #初始化人脸序号
for (x, y, w, h) in faces:
cv2.rectangle( self.image , (x, y), (x + w, y + h), (0, 0 , 255), 2)
id, confidence = self.recognizer.predict(gray[y:y + h, x:x + w])
if confidence < 100 : #50%的识别置信度
result= self.names[id]
confidencestr = "{0}%".format(round(100 - confidence))
# go_api( round(100 - confidence) , int( idnum ) , tag , names)
else:
confidencestr = "{0}%".format(round(100 - confidence))
cv2.putText( self.image, result , (x + 5, y - 5), self.font, 1, (0, 0, 255), 2 )
cv2.putText( self.image, confidencestr , (x + 5, y + h - 5), self.font, 1, (0, 0, 0), 1)
#网络识别的工具函数
#对图片的格式进行转换
def transimage( self , image_name ):
f = open( image_name ,'rb')
img = base64.b64encode(f.read())
return img
def logging( self , name ):
curren_time = time.asctime(time.localtime(time.time()))
f = open('Log.txt','a+')
f.write("Person: " + name + " " + "Time:" + str(curren_time)+'\n')
f.close()
#上传到百度api进行人脸检测
def go_api( self , image):
result = client.search(str(image, 'utf-8'), IMAGE_TYPE, GROUP)
if result['error_msg'] == 'SUCCESS':
name = result['result']['user_list'][0]['user_id']
score = result['result']['user_list'][0]['score']
if score > 80:
print("Welcome %s !" % name)
self.logging( name )
# recong_result=QMessageBox.information( self ,\
# "登录消息" , "识别成功,是否进入相应功能区" , QMessageBox.Ok |QMessageBox.Cancel )
# if recong_result == QMessageBox.Ok :
# pass
# else:
# self.close()
# self.closeCamera()
else:
print("Sorry...I don't know you !")
name = 'Unknow'
return name,score
if result['error_msg'] == 'pic not has face':
print('There is no face in image!')
return "NO FACE", None
else:
print(result['error_code']+' ' + result['error_code'])
return "ERROR" , None
def recognize_face_intnet( self ):
font = cv2.FONT_HERSHEY_SIMPLEX
flag,self.image = self.cap.read() #从视频流中读取
self.image = cv2.resize(self.image, (480,320) ) #把读到的帧的大小重新设置为 480*320
gray = cv2.cvtColor( self.image , cv2.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(gray, 1.3, 5)
if self.OnceBaiduAPI_flag == False :
self.OnceBaiduAPI_flag = True
cv2.imwrite("youtemp.png", self.image )
self.name , self.score = self.go_api( self.transimage( image_name ) )
for (x,y,w,h) in faces:
cv2.rectangle(self.image,(x,y),(x+w,y+h), (0, 0 , 255), 2)
# roi_gray = gray[y:y+h, x:x+w]
roi_color = self.image[y:y+h, x:x+w]
| 代表着识别成功后就启动该功能
def Function_run( self ):
pass
#这个类主要是管理注册逻辑,这里为什么要用QDialog呢,当然也可以用Qwidget,这俩都是毛坯房,但是
#QDialog有exec方法,Qwidget是没有的。exec_()方法可以让窗口成为模态窗口,而调用show()方法,
#窗口是非模态的。模态窗口将程序控制权占据,只有对当前窗口关闭后才能操作其他窗口;
class Signin_Dialog( QDialog ):
def __init__( self ):
super().__init__()
#控件的初始化
self.setWindowTitle('注册系统')
self.resize( 300 , 250 )
self.user_name_label = QLabel( "user_namer:" ,self )
self.user_line_dialog = QLineEdit( self )
self.passwd_label = QLabel( 'password:' , self )
self.passwd_line_dialog = QLineEdit( self )
self.passwd_re_label = QLabel( 're_password' , self )
self.passwd_re_line = QLineEdit( self )
self.sure_signin_botton = QPushButton( '确认' , self )
self.cancel_button = QPushButton( '取消', self )
#将布局类实例化
self.v_layout = QVBoxLayout()
self.h_layout = QHBoxLayout()
self.grid_layout = QGridLayout()
#将布局初始化
self.__layput_init()
#将确认按钮初始化
self.sure_siginin_botton_init()
self.line_init()
self.sure_botton_init()
def __layput_init( self ):
self.grid_layout.addWidget( self.user_name_label , 0 , 0 )
self.grid_layout.addWidget( self.user_line_dialog , 0 , 1 )
self.grid_layout.addWidget( self.passwd_label , 1 , 0 )
self.grid_layout.addWidget( self.passwd_line_dialog , 1 , 1 )
self.grid_layout.addWidget( self.passwd_re_label , 2 , 0 )
self.grid_layout.addWidget( self.passwd_re_line , 2 , 1 )
self.h_layout.addWidget( self.sure_signin_botton )
self.h_layout.addWidget( self.cancel_button )
self.v_layout.addLayout( self.grid_layout )
self.v_layout.addLayout( self.h_layout )
self.setLayout( self.v_layout )
def line_init( self ):
self.user_line_dialog.setPlaceholderText( "请输入你的用户账号" )
self.passwd_line_dialog.setPlaceholderText( "请输入你的密码")
self.passwd_re_line.setPlaceholderText( '请再次输入你的密码' )
self.passwd_line_dialog.setEchoMode( QLineEdit.Password )
self.passwd_re_line.setEchoMode( QLineEdit.Password )
self.user_line_dialog.textChanged.connect( self.check_input )
self.passwd_line_dialog.textChanged.connect( self.check_input )
self.passwd_re_line.textChanged.connect( self.check_input )
def check_input( self ):
if self.user_line_dialog.text() and self.passwd_line_dialog.text() and self.passwd_re_line.text():
self.sure_signin_botton.setEnabled( True )
else:
self.sure_signin_botton.setEnabled( False )
def sure_siginin_botton_init( self ):
self.sure_signin_botton.setEnabled( False )
#确认按钮与数据库的关联初始化
def sure_botton_init( self ):
self.sure_signin_botton.clicked.connect( self.check_data )
# def clearText( text_path ):
# with open(text_path, 'w') as f1:
# f1.seek(0)
# f1.truncate()
# # print("清空数据")
#如果按钮按下
def check_data( self ):
#--------判断用户是否存在--------------
f_all = open( user_path , 'r+')
read_dict = eval( f_all.read() )
f_all.close()
if self.passwd_line_dialog.text( ) != self.passwd_re_line.text( ) :
QMessageBox.critical( self , '注册消息' ,'两次密码输入不一致' , QMessageBox.Ok | QMessageBox.Cancel )
elif self.user_line_dialog.text() not in read_dict :
read_dict[self.user_line_dialog.text()] = self.passwd_line_dialog.text()
# self.clearText( user_path )
with open(user_path, 'w') as f1:
f1.write( str( read_dict ) )
QMessageBox.information( self , '注册消息' , '注册成功' , QMessageBox.Ok )
self.close()
else:
QMessageBox.critical( self , '注册消息', '注册失败,操作有误' , QMessageBox.Ok )
self.user_line_dialog.clear()
self.passwd_line_dialog.clear()
self.passwd_re_line.clear()
class CollectPicture_Page( QDialog ):
mysignal = pyqtSignal( )
def __init__( self ):
super().__init__()
self.setWindowTitle('人脸数据集收集和训练')
self.resize( 1000 ,500 )
self.IsHome_button = QRadioButton( "本地收集" , self)
self.IsInternet_button = QRadioButton( "网络收集" ,self )
self.collect_start_button = QPushButton("开始收集", self )
self.train_run_button = QPushButton( "开始训练" ,self )
self.return_button = QPushButton( "取消" , self )
self.cameraLabel = QLabel( 'camera' ,self )
self.cameraLabel.resize( 480,320 )
self.cameraLabel.setAlignment( Qt.AlignCenter )
self.h_col_style_layout = QHBoxLayout()
self.v_col_styly_layout = QVBoxLayout()
self.h_col_layout = QHBoxLayout()
self.v_layout = QVBoxLayout()
self.cap = cv2.VideoCapture( )
self.collect_time = QTimer()
self.layout_init()
self.button_init()
self.slot_init()
def layout_init( self ):
self.h_col_style_layout.addWidget( self.IsHome_button )
self.h_col_style_layout.addWidget( self.IsInternet_button )
self.h_col_style_layout.addStretch(1)
self.h_col_layout.addWidget( self.collect_start_button )
self.h_col_layout.addWidget( self.train_run_button )
self.h_col_layout.addWidget( self.return_button )
self.v_layout.addWidget( self.cameraLabel )
self.v_layout.addLayout( self.h_col_style_layout )
self.v_layout.addLayout( self.h_col_layout )
self.setLayout( self.v_layout )
def button_init( self ):
self.return_button.clicked.connect( self.cancel_task )
self.collect_start_button.clicked.connect( self.openCamera )
self.train_run_button.clicked.connect( self.Training_faces )
self.IsHome_button.setChecked( True )
def slot_init( self ):
self.collect_time.timeout.connect( self.show_camera )
self.mysignal.connect( self.collect_signal_run )
def camera_init( self ):
self.unregisterFlag = False
self.face_detector = cv2.CascadeClassifier( path )
self.count = 0
fl = open( user_path , 'r+')
real_dict = eval( fl.read() )
names = list( real_dict.keys() )
fl.close()
self.collect_name , ok = QInputDialog.getText( self , '请输入你的名字' ,'必须是已经注册的名字!' )
if self.collect_name in names:
self.face_id = names.index( self.collect_name ) + 1
#face_id = input('\n enter user id:') #输入序号,表示某人的一些列照片
print('\n Initializing face capture. Look at the camera and wait ...')
else:
QMessageBox.warning( self ,'异常状态' , '请去注册' , QMessageBox.Ok )
self.unregisterFlag = True
def cancel_task( self ):
self.collect_time.stop()
self.cap.release()
self.cameraLabel.clear()
self.close()
#打开摄像头
def openCamera(self):
flag = self.cap.open( cap_id )
self.camera_init()
if flag == False:
msg = QMessageBox.Warning(self, u'Warning', u'请检测相机与电脑是否连接正确',\
buttons=QMessageBox.Ok,
defaultButton=QMessageBox.Ok)
elif self.unregisterFlag == True:
pass
else:
self.Intnet_flag = False
self.collect_time.start(30)
def show_camera(self):
# self.face_recongnition_start()
sucess, self.img = self.cap.read()
self.collect_result = None
if self.IsHome_button.isChecked():
if self.count < 30 :
self.Collect_faces()
else:
self.collect_time.stop()
self.mysignal.emit()
else:
if self.Intnet_flag == False :
cv2.imwrite( image_name , self.img )
self.baidu_addUser()
self.collect_result = QMessageBox.information( self , '注册消息','注册完毕', QMessageBox.Ok )
if self.collect_result == QMessageBox.Ok:
self.closeCamera()
self.collect_result = None
self.img = cv2.cvtColor(self.img,cv2.COLOR_BGR2RGB ) #视频色彩转换回RGB,这样才是现实的颜色
#pyqt显示逻辑
showImage = QImage( self.img.data, self.img.shape[1] , self.img.shape[0], QImage.Format_RGB888 )
self.cameraLabel.setPixmap( QPixmap.fromImage(showImage) )
def Collect_faces( self ):
# 转为灰度图片
gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
# 检测人脸
faces = self.face_detector.detectMultiScale(gray, 1.3, 5)
for (x, y, w, h) in faces:
cv2.rectangle(self.img, (x, y), (x + w, y + w), (255, 0, 0) , 2 )
self.count += 1
# 保存图像,从原始照片中截取人脸尺寸
cv2.imwrite("Facedata/User." +str(self.face_id) + '.' + str(self.count) + '.jpg', gray[y: y + h, x: x + w])
def Training_faces( self ):
# 人脸数据路径
recognizer = cv2.face.LBPHFaceRecognizer_create()
detector = cv2.CascadeClassifier(path )
def getImagesAndLabels( data_path ):
imagePaths = [os.path.join( data_path , f) for f in os.listdir( data_path )] # join函数的作用?
faceSamples = []
ids = []
for imagePath in imagePaths:
PIL_img = Image.open(imagePath).convert('L') # convert it to grayscale
img_numpy = np.array(PIL_img, 'uint8')
id = int(os.path.split(imagePath)[-1].split(".")[1])
faces = detector.detectMultiScale(img_numpy)
for (x, y, w, h) in faces:
faceSamples.append(img_numpy[y:y + h, x: x + w])
ids.append(id)
return faceSamples, ids
print('Training faces. It will take a few seconds. Wait ...')
faces, ids = getImagesAndLabels( data_path )
recognizer.train(faces, np.array(ids))
recognizer.write( trainer_path )
print("{0} faces trained. Exiting Program".format(len(np.unique(ids))))
QMessageBox.information( self , "训练消息","训练完毕" , QMessageBox.Ok )
def collect_signal_run( self ):
self.collect_result = QMessageBox.information( self , '训练' , '收集完成' , QMessageBox.Ok )
#对图片的格式进行转换
def transimage( self , image_name ):
f = open( image_name ,'rb')
img = base64.b64encode(f.read())
return img
def baidu_addUser( self ):
client.addUser( str( self.transimage(image_name) , 'utf-8') , IMAGE_TYPE, GROUP , self.collect_name )
self.Intnet_flag = True
def closeCamera(self):
self.collect_time.stop()
self.cap.release()
self.cameraLabel.clear()
if __name__ == '__main__':
app = QApplication(sys.argv)
demo = Demo()
demo.show()
sys.exit(app.exec_()) | cv2.putText(self.image, self.name , (x+5,y-5), font, 1, (255,255,255), 2 )
cv2.putText(self.image, str( self.score ), (x+5,y+h-5), font, 1, (255,255,0), 1 )
def closeCamera(self):
self.timer_camera.stop()
self.cap.release()
self.OnceBaiduAPI_flag = False
self.cameraLabel.clear()
self.cameraButton.setText('打开摄像头')
# self.my_thread.terminate()
#这里作为功能启动区, | conditional_block |
SPT_AGN_emcee_sampler_MPI.py | """
SPT_AGN_emcee_sampler_MPI.py
Author: Benjamin Floyd
This script will preform the Bayesian analysis on the SPT-AGN data to produce the posterior probability distributions
for all fitting parameters.
"""
import json
import os
from argparse import ArgumentParser
from time import time
import astropy.units as u
import emcee
import numpy as np
from astropy.cosmology import FlatLambdaCDM
from custom_math import trap_weight # Custom trapezoidal integration
from schwimmbad import MPIPool
from scipy.interpolate import lagrange
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
# Set up the luminosity and density evolution using the fits from Assef+11 Table 2
z_i = [0.25, 0.5, 1., 2., 4.]
m_star_z_i = [-23.51, -24.64, -26.10, -27.08]
phi_star_z_i = [-3.41, -3.73, -4.17, -4.65, -5.77]
m_star = lagrange(z_i[1:], m_star_z_i)
log_phi_star = lagrange(z_i, phi_star_z_i)
def luminosity_function(abs_mag, redshift):
|
def model_rate_opted(params, cluster_id, r_r500, j_mag, integral=False):
"""
Our generating model.
Parameters
----------
params : tuple
Tuple of (theta, eta, zeta, beta, rc, C)
cluster_id : str
SPT ID of our cluster in the catalog dictionary
r_r500 : array-like
A vector of radii of objects within the cluster normalized by the cluster's r500
j_mag : array-like
A vector of J-band absolute magnitudes to be used in the luminosity function
integral : bool, optional
Flag indicating if the luminosity function factor of the model should be integrated. Defaults to `False`.
Returns
-------
model
A surface density profile of objects as a function of radius and luminosity.
"""
if args.cluster_only:
# Unpack our parameters
theta, eta, zeta, beta, rc = params
# Set background parameter to 0
C = 0
elif args.background_only:
# Unpack our parameters
C, = params
# Set all other parameters to 0
theta, eta, zeta, beta, rc = [0.]*5
else:
# Unpack our parameters
theta, eta, zeta, beta, rc, C = params
# Extract our data from the catalog dictionary
z = catalog_dict[cluster_id]['redshift']
m = catalog_dict[cluster_id]['m500']
r500 = catalog_dict[cluster_id]['r500']
# Luminosity function number
if integral:
lum_funct_value = np.trapz(luminosity_function(j_mag, z), j_mag)
else:
lum_funct_value = luminosity_function(j_mag, z)
if args.no_luminosity or args.poisson_only:
LF = 1
else:
LF = cosmo.angular_diameter_distance(z) ** 2 * r500 * lum_funct_value
# Convert our background surface density from angular units into units of r500^-2
background = (C / u.arcmin ** 2) * cosmo.arcsec_per_kpc_proper(z).to(u.arcmin / u.Mpc) ** 2 * r500 ** 2
# Our amplitude is determined from the cluster data
a = theta * (1 + z) ** eta * (m / (1e15 * u.Msun)) ** zeta * LF
model = a * (1 + (r_r500 / rc) ** 2) ** (-1.5 * beta + 0.5) + background
return model.value
# Set our log-likelihood
def lnlike(param):
lnlike_list = []
for cluster_id in catalog_dict:
# Get the good pixel fraction for this cluster
gpf_all = catalog_dict[cluster_id]['gpf_rall']
# Get the radial positions of the AGN
radial_r500_maxr = catalog_dict[cluster_id]['radial_r500_maxr']
# Get the completeness weights for the AGN
completeness_weight_maxr = catalog_dict[cluster_id]['completeness_weight_maxr']
# Get the AGN sample degrees of membership
if args.no_selection_membership or args.poisson_only:
agn_membership = 1
else:
agn_membership = catalog_dict[cluster_id]['agn_membership_maxr']
# Get the J-band absolute magnitudes
j_band_abs_mag = catalog_dict[cluster_id]['j_abs_mag']
# Get the radial mesh for integration
rall = catalog_dict[cluster_id]['rall']
# Get the luminosity mesh for integration
jall = catalog_dict[cluster_id]['jall']
# Compute the completeness ratio for this cluster
if args.no_completeness or args.poisson_only:
completeness_ratio = 1.
else:
completeness_ratio = len(completeness_weight_maxr) / np.sum(completeness_weight_maxr)
# Compute the model rate at the locations of the AGN.
ni = model_rate_opted(param, cluster_id, radial_r500_maxr, j_band_abs_mag)
# Compute the full model along the radial direction.
# The completeness weight is set to `1` as the model in the integration is assumed to be complete.
n_mesh = model_rate_opted(param, cluster_id, rall, jall, integral=True)
# Use a spatial poisson point-process log-likelihood
cluster_lnlike = (np.sum(np.log(ni * radial_r500_maxr) * agn_membership)
- completeness_ratio * trap_weight(n_mesh * 2 * np.pi * rall, rall, weight=gpf_all))
lnlike_list.append(cluster_lnlike)
total_lnlike = np.sum(lnlike_list)
return total_lnlike
# For our prior, we will choose uninformative priors for all our parameters and for the constant field value we will use
# a gaussian distribution set by the values obtained from the SDWFS data set.
def lnprior(params):
# Set our hyperparameters
# h_rc = 0.25
# h_rc_err = 0.1
h_C = 0.333
h_C_err = 0.024
# Extract our parameters
if args.cluster_only:
theta, eta, zeta, beta, rc = params
C = 0.
elif args.background_only:
C, = params
theta, eta, zeta, beta, rc = [0.]*5
else:
theta, eta, zeta, beta, rc, C = params
# Define all priors
if (0.0 <= theta <= np.inf and
-6. <= eta <= 6. and
-3. <= zeta <= 3. and
-3. <= beta <= 3. and
0.05 <= rc <= 0.5 and
0.0 <= C < np.inf):
theta_lnprior = 0.0
eta_lnprior = 0.0
beta_lnprior = 0.0
zeta_lnprior = 0.0
# rc_lnprior = -0.5 * np.sum((rc - h_rc) ** 2 / h_rc_err ** 2)
rc_lnprior = 0.0
if args.cluster_only:
C_lnprior = 0.
else:
C_lnprior = -0.5 * np.sum((C - h_C) ** 2 / h_C_err ** 2)
# C_lnprior = 0.0
else:
theta_lnprior = -np.inf
eta_lnprior = -np.inf
beta_lnprior = -np.inf
zeta_lnprior = -np.inf
rc_lnprior = -np.inf
C_lnprior = -np.inf
# Assuming all parameters are independent the joint log-prior is
total_lnprior = theta_lnprior + eta_lnprior + zeta_lnprior + beta_lnprior + rc_lnprior + C_lnprior
return total_lnprior
# Define the log-posterior probability
def lnpost(params):
lp = lnprior(params)
# Check the finiteness of the prior.
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(params)
hcc_prefix = '/work/mei/bfloyd/SPT_AGN/'
# hcc_prefix = ''
parser = ArgumentParser(description='Runs MCMC sampler')
parser.add_argument('--restart', help='Allows restarting the chain in place rather than resetting the chain.',
action='store_true')
parser.add_argument('name', help='Chain name', type=str)
parser.add_argument('--no-luminosity', action='store_true', help='Deactivate luminosity dependence in model.')
parser.add_argument('--no-selection-membership', action='store_true',
help='Deactivate fuzzy degree of membership for AGN selection in likelihood function.')
parser.add_argument('--no-completeness', action='store_true',
help='Deactivate photometric completeness correction in likelihood function.')
parser.add_argument('--poisson-only', action='store_true',
help='Use a pure Poisson likelihood function with a model that has no luminosity dependence.')
parser_grp = parser.add_mutually_exclusive_group()
parser_grp.add_argument('--cluster-only', action='store_true',
help='Sample only on cluster objects.')
parser_grp.add_argument('--background-only', action='store_true',
help='Sample only on background objects.')
args = parser.parse_args()
# Load in the prepossessing file
preprocess_file = os.path.abspath('SPTcl_IRAGN_preprocessing.json')
with open(preprocess_file, 'r') as f:
catalog_dict = json.load(f)
# Go through the catalog dictionary and recasting the cluster's mass and r500 to quantities and recast all the list-type
# data to numpy arrays
for cluster_id, cluster_info in catalog_dict.items():
catalog_dict[cluster_id]['m500'] = cluster_info['m500'] * u.Msun
catalog_dict[cluster_id]['r500'] = cluster_info['r500'] * u.Mpc
for data_name, data in filter(lambda x: isinstance(x[1], list), cluster_info.items()):
catalog_dict[cluster_id][data_name] = np.array(data)
# Set up our MCMC sampler.
# Set the number of dimensions for the parameter space and the number of walkers to use to explore the space.
ndim = 5 if args.cluster_only else (1 if args.background_only else 6)
nwalkers = 6 * ndim
# Also, set the number of steps to run the sampler for.
nsteps = int(1e6)
# We will initialize our walkers in a tight ball near the initial parameter values.
if args.cluster_only:
pos0 = np.vstack([[np.random.uniform(0., 12.), # theta
np.random.uniform(-1., 6.), # eta
np.random.uniform(-3., 3.), # zeta
np.random.uniform(-3., 3.), # beta
np.random.normal(loc=0.1, scale=6e-3) # rc
]
for i in range(nwalkers)])
elif args.background_only:
pos0 = np.vstack([[np.random.normal(loc=0.371, scale=0.157) # C
]
for i in range(nwalkers)])
else:
pos0 = np.vstack([[np.random.uniform(0., 12.), # theta
np.random.uniform(-1., 6.), # eta
np.random.uniform(-3., 3.), # zeta
np.random.uniform(-3., 3.), # beta
np.random.normal(loc=0.1, scale=6e-3), # rc
np.random.normal(loc=0.371, scale=0.157) # C
]
for i in range(nwalkers)])
# Set up the autocorrelation and convergence variables
autocorr = np.empty(nsteps)
old_tau = np.inf # For convergence
with MPIPool() as pool:
# if not pool.is_master():
# pool.wait()
# sys.exit(0)
# Filename for hd5 backend
chain_file = 'emcee_chains_mock_phot_features.h5'
backend = emcee.backends.HDFBackend(chain_file, name=f'{args.name}'
f'{"_no-LF" if args.no_luminosity else ""}'
f'{"_no-mu" if args.no_selection_membership else ""}'
f'{"_no-comp_corr" if args.no_completeness else ""}'
f'{"_poisson-only" if args.poisson_only else ""}')
if not args.restart:
backend.reset(nwalkers, ndim)
# Stretch move proposal. Manually specified to tune the `a` parameter.
moves = emcee.moves.StretchMove(a=2.75)
# Initialize the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost, backend=backend, moves=moves, pool=pool)
# Run the sampler.
print('Starting sampler.')
start_sampler_time = time()
# Sample up to nsteps.
for index, sample in enumerate(sampler.sample(pos0, iterations=nsteps)):
# Only check convergence every 100 steps
if sampler.iteration % 100:
continue
# Compute the autocorrelation time so far.
# Using tol = 0 means we will always get an estimate even if it isn't trustworthy
tau = sampler.get_autocorr_time(tol=0)
autocorr[index] = np.mean(tau)
# Check convergence
converged = np.all(tau * 100 < sampler.iteration)
converged &= np.all(np.abs(old_tau - tau) / tau < 0.01)
if converged:
print(f'Chains have converged. Ending sampler early.\nIteration stopped at: {sampler.iteration}')
break
old_tau = tau
print(f'Sampler runtime: {time() - start_sampler_time:.2f} s')
| """
Assef+11 QLF using luminosity and density evolution.
Parameters
----------
abs_mag : astropy table-like
Rest-frame J-band absolute magnitude.
redshift : astropy table-like
Cluster redshift
Returns
-------
Phi : ndarray
Luminosity density
"""
# L/L_*(z) = 10**(0.4 * (M_*(z) - M))
L_L_star = 10 ** (0.4 * (m_star(redshift) - abs_mag))
# Phi*(z) = 10**(log(Phi*(z))
phi_star = 10 ** log_phi_star(redshift) * (cosmo.h / u.Mpc) ** 3
# QLF slopes
alpha1 = -3.35 # alpha in Table 2
alpha2 = -0.37 # beta in Table 2
Phi = 0.4 * np.log(10) * L_L_star * phi_star * (L_L_star ** -alpha1 + L_L_star ** -alpha2) ** -1
return Phi | identifier_body |
SPT_AGN_emcee_sampler_MPI.py | """
SPT_AGN_emcee_sampler_MPI.py
Author: Benjamin Floyd
This script will preform the Bayesian analysis on the SPT-AGN data to produce the posterior probability distributions
for all fitting parameters.
"""
import json
import os
from argparse import ArgumentParser
from time import time
import astropy.units as u
import emcee
import numpy as np
from astropy.cosmology import FlatLambdaCDM
from custom_math import trap_weight # Custom trapezoidal integration
from schwimmbad import MPIPool
from scipy.interpolate import lagrange
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
# Set up the luminosity and density evolution using the fits from Assef+11 Table 2
z_i = [0.25, 0.5, 1., 2., 4.]
m_star_z_i = [-23.51, -24.64, -26.10, -27.08]
phi_star_z_i = [-3.41, -3.73, -4.17, -4.65, -5.77]
m_star = lagrange(z_i[1:], m_star_z_i)
log_phi_star = lagrange(z_i, phi_star_z_i)
def luminosity_function(abs_mag, redshift):
"""
Assef+11 QLF using luminosity and density evolution.
Parameters
----------
abs_mag : astropy table-like
Rest-frame J-band absolute magnitude.
redshift : astropy table-like
Cluster redshift
Returns
-------
Phi : ndarray
Luminosity density
"""
# L/L_*(z) = 10**(0.4 * (M_*(z) - M))
L_L_star = 10 ** (0.4 * (m_star(redshift) - abs_mag))
# Phi*(z) = 10**(log(Phi*(z))
phi_star = 10 ** log_phi_star(redshift) * (cosmo.h / u.Mpc) ** 3
# QLF slopes
alpha1 = -3.35 # alpha in Table 2
alpha2 = -0.37 # beta in Table 2
Phi = 0.4 * np.log(10) * L_L_star * phi_star * (L_L_star ** -alpha1 + L_L_star ** -alpha2) ** -1
return Phi
def model_rate_opted(params, cluster_id, r_r500, j_mag, integral=False):
"""
Our generating model.
Parameters
----------
params : tuple
Tuple of (theta, eta, zeta, beta, rc, C)
cluster_id : str
SPT ID of our cluster in the catalog dictionary
r_r500 : array-like
A vector of radii of objects within the cluster normalized by the cluster's r500
j_mag : array-like
A vector of J-band absolute magnitudes to be used in the luminosity function
integral : bool, optional
Flag indicating if the luminosity function factor of the model should be integrated. Defaults to `False`.
Returns
-------
model
A surface density profile of objects as a function of radius and luminosity.
"""
if args.cluster_only:
# Unpack our parameters
theta, eta, zeta, beta, rc = params
# Set background parameter to 0
C = 0
elif args.background_only:
# Unpack our parameters
C, = params
# Set all other parameters to 0
theta, eta, zeta, beta, rc = [0.]*5
else:
# Unpack our parameters
theta, eta, zeta, beta, rc, C = params
# Extract our data from the catalog dictionary
z = catalog_dict[cluster_id]['redshift']
m = catalog_dict[cluster_id]['m500']
r500 = catalog_dict[cluster_id]['r500']
# Luminosity function number
if integral:
lum_funct_value = np.trapz(luminosity_function(j_mag, z), j_mag)
else:
lum_funct_value = luminosity_function(j_mag, z)
if args.no_luminosity or args.poisson_only:
LF = 1
else:
LF = cosmo.angular_diameter_distance(z) ** 2 * r500 * lum_funct_value
# Convert our background surface density from angular units into units of r500^-2
background = (C / u.arcmin ** 2) * cosmo.arcsec_per_kpc_proper(z).to(u.arcmin / u.Mpc) ** 2 * r500 ** 2
# Our amplitude is determined from the cluster data
a = theta * (1 + z) ** eta * (m / (1e15 * u.Msun)) ** zeta * LF
model = a * (1 + (r_r500 / rc) ** 2) ** (-1.5 * beta + 0.5) + background
return model.value
# Set our log-likelihood
def lnlike(param):
lnlike_list = []
for cluster_id in catalog_dict:
# Get the good pixel fraction for this cluster
gpf_all = catalog_dict[cluster_id]['gpf_rall']
# Get the radial positions of the AGN
radial_r500_maxr = catalog_dict[cluster_id]['radial_r500_maxr']
# Get the completeness weights for the AGN
completeness_weight_maxr = catalog_dict[cluster_id]['completeness_weight_maxr']
# Get the AGN sample degrees of membership
if args.no_selection_membership or args.poisson_only:
agn_membership = 1
else:
agn_membership = catalog_dict[cluster_id]['agn_membership_maxr']
# Get the J-band absolute magnitudes
j_band_abs_mag = catalog_dict[cluster_id]['j_abs_mag']
# Get the radial mesh for integration
rall = catalog_dict[cluster_id]['rall']
# Get the luminosity mesh for integration
jall = catalog_dict[cluster_id]['jall']
# Compute the completeness ratio for this cluster
if args.no_completeness or args.poisson_only:
completeness_ratio = 1.
else:
completeness_ratio = len(completeness_weight_maxr) / np.sum(completeness_weight_maxr)
# Compute the model rate at the locations of the AGN.
ni = model_rate_opted(param, cluster_id, radial_r500_maxr, j_band_abs_mag)
# Compute the full model along the radial direction.
# The completeness weight is set to `1` as the model in the integration is assumed to be complete.
n_mesh = model_rate_opted(param, cluster_id, rall, jall, integral=True)
# Use a spatial poisson point-process log-likelihood
cluster_lnlike = (np.sum(np.log(ni * radial_r500_maxr) * agn_membership)
- completeness_ratio * trap_weight(n_mesh * 2 * np.pi * rall, rall, weight=gpf_all))
lnlike_list.append(cluster_lnlike)
total_lnlike = np.sum(lnlike_list)
return total_lnlike
# For our prior, we will choose uninformative priors for all our parameters and for the constant field value we will use
# a gaussian distribution set by the values obtained from the SDWFS data set.
def lnprior(params):
# Set our hyperparameters
# h_rc = 0.25
# h_rc_err = 0.1
h_C = 0.333
h_C_err = 0.024
# Extract our parameters
if args.cluster_only:
theta, eta, zeta, beta, rc = params
C = 0.
elif args.background_only:
C, = params
theta, eta, zeta, beta, rc = [0.]*5
else:
|
# Define all priors
if (0.0 <= theta <= np.inf and
-6. <= eta <= 6. and
-3. <= zeta <= 3. and
-3. <= beta <= 3. and
0.05 <= rc <= 0.5 and
0.0 <= C < np.inf):
theta_lnprior = 0.0
eta_lnprior = 0.0
beta_lnprior = 0.0
zeta_lnprior = 0.0
# rc_lnprior = -0.5 * np.sum((rc - h_rc) ** 2 / h_rc_err ** 2)
rc_lnprior = 0.0
if args.cluster_only:
C_lnprior = 0.
else:
C_lnprior = -0.5 * np.sum((C - h_C) ** 2 / h_C_err ** 2)
# C_lnprior = 0.0
else:
theta_lnprior = -np.inf
eta_lnprior = -np.inf
beta_lnprior = -np.inf
zeta_lnprior = -np.inf
rc_lnprior = -np.inf
C_lnprior = -np.inf
# Assuming all parameters are independent the joint log-prior is
total_lnprior = theta_lnprior + eta_lnprior + zeta_lnprior + beta_lnprior + rc_lnprior + C_lnprior
return total_lnprior
# Define the log-posterior probability
def lnpost(params):
lp = lnprior(params)
# Check the finiteness of the prior.
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(params)
hcc_prefix = '/work/mei/bfloyd/SPT_AGN/'
# hcc_prefix = ''
parser = ArgumentParser(description='Runs MCMC sampler')
parser.add_argument('--restart', help='Allows restarting the chain in place rather than resetting the chain.',
action='store_true')
parser.add_argument('name', help='Chain name', type=str)
parser.add_argument('--no-luminosity', action='store_true', help='Deactivate luminosity dependence in model.')
parser.add_argument('--no-selection-membership', action='store_true',
help='Deactivate fuzzy degree of membership for AGN selection in likelihood function.')
parser.add_argument('--no-completeness', action='store_true',
help='Deactivate photometric completeness correction in likelihood function.')
parser.add_argument('--poisson-only', action='store_true',
help='Use a pure Poisson likelihood function with a model that has no luminosity dependence.')
parser_grp = parser.add_mutually_exclusive_group()
parser_grp.add_argument('--cluster-only', action='store_true',
help='Sample only on cluster objects.')
parser_grp.add_argument('--background-only', action='store_true',
help='Sample only on background objects.')
args = parser.parse_args()
# Load in the prepossessing file
preprocess_file = os.path.abspath('SPTcl_IRAGN_preprocessing.json')
with open(preprocess_file, 'r') as f:
catalog_dict = json.load(f)
# Go through the catalog dictionary and recasting the cluster's mass and r500 to quantities and recast all the list-type
# data to numpy arrays
for cluster_id, cluster_info in catalog_dict.items():
catalog_dict[cluster_id]['m500'] = cluster_info['m500'] * u.Msun
catalog_dict[cluster_id]['r500'] = cluster_info['r500'] * u.Mpc
for data_name, data in filter(lambda x: isinstance(x[1], list), cluster_info.items()):
catalog_dict[cluster_id][data_name] = np.array(data)
# Set up our MCMC sampler.
# Set the number of dimensions for the parameter space and the number of walkers to use to explore the space.
ndim = 5 if args.cluster_only else (1 if args.background_only else 6)
nwalkers = 6 * ndim
# Also, set the number of steps to run the sampler for.
nsteps = int(1e6)
# We will initialize our walkers in a tight ball near the initial parameter values.
if args.cluster_only:
pos0 = np.vstack([[np.random.uniform(0., 12.), # theta
np.random.uniform(-1., 6.), # eta
np.random.uniform(-3., 3.), # zeta
np.random.uniform(-3., 3.), # beta
np.random.normal(loc=0.1, scale=6e-3) # rc
]
for i in range(nwalkers)])
elif args.background_only:
pos0 = np.vstack([[np.random.normal(loc=0.371, scale=0.157) # C
]
for i in range(nwalkers)])
else:
pos0 = np.vstack([[np.random.uniform(0., 12.), # theta
np.random.uniform(-1., 6.), # eta
np.random.uniform(-3., 3.), # zeta
np.random.uniform(-3., 3.), # beta
np.random.normal(loc=0.1, scale=6e-3), # rc
np.random.normal(loc=0.371, scale=0.157) # C
]
for i in range(nwalkers)])
# Set up the autocorrelation and convergence variables
autocorr = np.empty(nsteps)
old_tau = np.inf # For convergence
with MPIPool() as pool:
# if not pool.is_master():
# pool.wait()
# sys.exit(0)
# Filename for hd5 backend
chain_file = 'emcee_chains_mock_phot_features.h5'
backend = emcee.backends.HDFBackend(chain_file, name=f'{args.name}'
f'{"_no-LF" if args.no_luminosity else ""}'
f'{"_no-mu" if args.no_selection_membership else ""}'
f'{"_no-comp_corr" if args.no_completeness else ""}'
f'{"_poisson-only" if args.poisson_only else ""}')
if not args.restart:
backend.reset(nwalkers, ndim)
# Stretch move proposal. Manually specified to tune the `a` parameter.
moves = emcee.moves.StretchMove(a=2.75)
# Initialize the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost, backend=backend, moves=moves, pool=pool)
# Run the sampler.
print('Starting sampler.')
start_sampler_time = time()
# Sample up to nsteps.
for index, sample in enumerate(sampler.sample(pos0, iterations=nsteps)):
# Only check convergence every 100 steps
if sampler.iteration % 100:
continue
# Compute the autocorrelation time so far.
# Using tol = 0 means we will always get an estimate even if it isn't trustworthy
tau = sampler.get_autocorr_time(tol=0)
autocorr[index] = np.mean(tau)
# Check convergence
converged = np.all(tau * 100 < sampler.iteration)
converged &= np.all(np.abs(old_tau - tau) / tau < 0.01)
if converged:
print(f'Chains have converged. Ending sampler early.\nIteration stopped at: {sampler.iteration}')
break
old_tau = tau
print(f'Sampler runtime: {time() - start_sampler_time:.2f} s')
| theta, eta, zeta, beta, rc, C = params | conditional_block |
SPT_AGN_emcee_sampler_MPI.py | """
SPT_AGN_emcee_sampler_MPI.py
Author: Benjamin Floyd
This script will preform the Bayesian analysis on the SPT-AGN data to produce the posterior probability distributions
for all fitting parameters.
"""
import json
import os
from argparse import ArgumentParser
from time import time
import astropy.units as u
import emcee
import numpy as np
from astropy.cosmology import FlatLambdaCDM
from custom_math import trap_weight # Custom trapezoidal integration
from schwimmbad import MPIPool
from scipy.interpolate import lagrange
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
# Set up the luminosity and density evolution using the fits from Assef+11 Table 2
z_i = [0.25, 0.5, 1., 2., 4.]
m_star_z_i = [-23.51, -24.64, -26.10, -27.08]
phi_star_z_i = [-3.41, -3.73, -4.17, -4.65, -5.77]
m_star = lagrange(z_i[1:], m_star_z_i)
log_phi_star = lagrange(z_i, phi_star_z_i)
def luminosity_function(abs_mag, redshift):
"""
Assef+11 QLF using luminosity and density evolution.
Parameters
----------
abs_mag : astropy table-like
Rest-frame J-band absolute magnitude.
redshift : astropy table-like
Cluster redshift
Returns
-------
Phi : ndarray
Luminosity density
"""
# L/L_*(z) = 10**(0.4 * (M_*(z) - M))
L_L_star = 10 ** (0.4 * (m_star(redshift) - abs_mag))
# Phi*(z) = 10**(log(Phi*(z))
phi_star = 10 ** log_phi_star(redshift) * (cosmo.h / u.Mpc) ** 3
# QLF slopes
alpha1 = -3.35 # alpha in Table 2
alpha2 = -0.37 # beta in Table 2
Phi = 0.4 * np.log(10) * L_L_star * phi_star * (L_L_star ** -alpha1 + L_L_star ** -alpha2) ** -1
return Phi
def | (params, cluster_id, r_r500, j_mag, integral=False):
"""
Our generating model.
Parameters
----------
params : tuple
Tuple of (theta, eta, zeta, beta, rc, C)
cluster_id : str
SPT ID of our cluster in the catalog dictionary
r_r500 : array-like
A vector of radii of objects within the cluster normalized by the cluster's r500
j_mag : array-like
A vector of J-band absolute magnitudes to be used in the luminosity function
integral : bool, optional
Flag indicating if the luminosity function factor of the model should be integrated. Defaults to `False`.
Returns
-------
model
A surface density profile of objects as a function of radius and luminosity.
"""
if args.cluster_only:
# Unpack our parameters
theta, eta, zeta, beta, rc = params
# Set background parameter to 0
C = 0
elif args.background_only:
# Unpack our parameters
C, = params
# Set all other parameters to 0
theta, eta, zeta, beta, rc = [0.]*5
else:
# Unpack our parameters
theta, eta, zeta, beta, rc, C = params
# Extract our data from the catalog dictionary
z = catalog_dict[cluster_id]['redshift']
m = catalog_dict[cluster_id]['m500']
r500 = catalog_dict[cluster_id]['r500']
# Luminosity function number
if integral:
lum_funct_value = np.trapz(luminosity_function(j_mag, z), j_mag)
else:
lum_funct_value = luminosity_function(j_mag, z)
if args.no_luminosity or args.poisson_only:
LF = 1
else:
LF = cosmo.angular_diameter_distance(z) ** 2 * r500 * lum_funct_value
# Convert our background surface density from angular units into units of r500^-2
background = (C / u.arcmin ** 2) * cosmo.arcsec_per_kpc_proper(z).to(u.arcmin / u.Mpc) ** 2 * r500 ** 2
# Our amplitude is determined from the cluster data
a = theta * (1 + z) ** eta * (m / (1e15 * u.Msun)) ** zeta * LF
model = a * (1 + (r_r500 / rc) ** 2) ** (-1.5 * beta + 0.5) + background
return model.value
# Set our log-likelihood
def lnlike(param):
lnlike_list = []
for cluster_id in catalog_dict:
# Get the good pixel fraction for this cluster
gpf_all = catalog_dict[cluster_id]['gpf_rall']
# Get the radial positions of the AGN
radial_r500_maxr = catalog_dict[cluster_id]['radial_r500_maxr']
# Get the completeness weights for the AGN
completeness_weight_maxr = catalog_dict[cluster_id]['completeness_weight_maxr']
# Get the AGN sample degrees of membership
if args.no_selection_membership or args.poisson_only:
agn_membership = 1
else:
agn_membership = catalog_dict[cluster_id]['agn_membership_maxr']
# Get the J-band absolute magnitudes
j_band_abs_mag = catalog_dict[cluster_id]['j_abs_mag']
# Get the radial mesh for integration
rall = catalog_dict[cluster_id]['rall']
# Get the luminosity mesh for integration
jall = catalog_dict[cluster_id]['jall']
# Compute the completeness ratio for this cluster
if args.no_completeness or args.poisson_only:
completeness_ratio = 1.
else:
completeness_ratio = len(completeness_weight_maxr) / np.sum(completeness_weight_maxr)
# Compute the model rate at the locations of the AGN.
ni = model_rate_opted(param, cluster_id, radial_r500_maxr, j_band_abs_mag)
# Compute the full model along the radial direction.
# The completeness weight is set to `1` as the model in the integration is assumed to be complete.
n_mesh = model_rate_opted(param, cluster_id, rall, jall, integral=True)
# Use a spatial poisson point-process log-likelihood
cluster_lnlike = (np.sum(np.log(ni * radial_r500_maxr) * agn_membership)
- completeness_ratio * trap_weight(n_mesh * 2 * np.pi * rall, rall, weight=gpf_all))
lnlike_list.append(cluster_lnlike)
total_lnlike = np.sum(lnlike_list)
return total_lnlike
# For our prior, we will choose uninformative priors for all our parameters and for the constant field value we will use
# a gaussian distribution set by the values obtained from the SDWFS data set.
def lnprior(params):
# Set our hyperparameters
# h_rc = 0.25
# h_rc_err = 0.1
h_C = 0.333
h_C_err = 0.024
# Extract our parameters
if args.cluster_only:
theta, eta, zeta, beta, rc = params
C = 0.
elif args.background_only:
C, = params
theta, eta, zeta, beta, rc = [0.]*5
else:
theta, eta, zeta, beta, rc, C = params
# Define all priors
if (0.0 <= theta <= np.inf and
-6. <= eta <= 6. and
-3. <= zeta <= 3. and
-3. <= beta <= 3. and
0.05 <= rc <= 0.5 and
0.0 <= C < np.inf):
theta_lnprior = 0.0
eta_lnprior = 0.0
beta_lnprior = 0.0
zeta_lnprior = 0.0
# rc_lnprior = -0.5 * np.sum((rc - h_rc) ** 2 / h_rc_err ** 2)
rc_lnprior = 0.0
if args.cluster_only:
C_lnprior = 0.
else:
C_lnprior = -0.5 * np.sum((C - h_C) ** 2 / h_C_err ** 2)
# C_lnprior = 0.0
else:
theta_lnprior = -np.inf
eta_lnprior = -np.inf
beta_lnprior = -np.inf
zeta_lnprior = -np.inf
rc_lnprior = -np.inf
C_lnprior = -np.inf
# Assuming all parameters are independent the joint log-prior is
total_lnprior = theta_lnprior + eta_lnprior + zeta_lnprior + beta_lnprior + rc_lnprior + C_lnprior
return total_lnprior
# Define the log-posterior probability
def lnpost(params):
lp = lnprior(params)
# Check the finiteness of the prior.
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(params)
hcc_prefix = '/work/mei/bfloyd/SPT_AGN/'
# hcc_prefix = ''
parser = ArgumentParser(description='Runs MCMC sampler')
parser.add_argument('--restart', help='Allows restarting the chain in place rather than resetting the chain.',
action='store_true')
parser.add_argument('name', help='Chain name', type=str)
parser.add_argument('--no-luminosity', action='store_true', help='Deactivate luminosity dependence in model.')
parser.add_argument('--no-selection-membership', action='store_true',
help='Deactivate fuzzy degree of membership for AGN selection in likelihood function.')
parser.add_argument('--no-completeness', action='store_true',
help='Deactivate photometric completeness correction in likelihood function.')
parser.add_argument('--poisson-only', action='store_true',
help='Use a pure Poisson likelihood function with a model that has no luminosity dependence.')
parser_grp = parser.add_mutually_exclusive_group()
parser_grp.add_argument('--cluster-only', action='store_true',
help='Sample only on cluster objects.')
parser_grp.add_argument('--background-only', action='store_true',
help='Sample only on background objects.')
args = parser.parse_args()
# Load in the prepossessing file
preprocess_file = os.path.abspath('SPTcl_IRAGN_preprocessing.json')
with open(preprocess_file, 'r') as f:
catalog_dict = json.load(f)
# Go through the catalog dictionary and recasting the cluster's mass and r500 to quantities and recast all the list-type
# data to numpy arrays
for cluster_id, cluster_info in catalog_dict.items():
catalog_dict[cluster_id]['m500'] = cluster_info['m500'] * u.Msun
catalog_dict[cluster_id]['r500'] = cluster_info['r500'] * u.Mpc
for data_name, data in filter(lambda x: isinstance(x[1], list), cluster_info.items()):
catalog_dict[cluster_id][data_name] = np.array(data)
# Set up our MCMC sampler.
# Set the number of dimensions for the parameter space and the number of walkers to use to explore the space.
ndim = 5 if args.cluster_only else (1 if args.background_only else 6)
nwalkers = 6 * ndim
# Also, set the number of steps to run the sampler for.
nsteps = int(1e6)
# We will initialize our walkers in a tight ball near the initial parameter values.
if args.cluster_only:
pos0 = np.vstack([[np.random.uniform(0., 12.), # theta
np.random.uniform(-1., 6.), # eta
np.random.uniform(-3., 3.), # zeta
np.random.uniform(-3., 3.), # beta
np.random.normal(loc=0.1, scale=6e-3) # rc
]
for i in range(nwalkers)])
elif args.background_only:
pos0 = np.vstack([[np.random.normal(loc=0.371, scale=0.157) # C
]
for i in range(nwalkers)])
else:
pos0 = np.vstack([[np.random.uniform(0., 12.), # theta
np.random.uniform(-1., 6.), # eta
np.random.uniform(-3., 3.), # zeta
np.random.uniform(-3., 3.), # beta
np.random.normal(loc=0.1, scale=6e-3), # rc
np.random.normal(loc=0.371, scale=0.157) # C
]
for i in range(nwalkers)])
# Set up the autocorrelation and convergence variables
autocorr = np.empty(nsteps)
old_tau = np.inf # For convergence
with MPIPool() as pool:
# if not pool.is_master():
# pool.wait()
# sys.exit(0)
# Filename for hd5 backend
chain_file = 'emcee_chains_mock_phot_features.h5'
backend = emcee.backends.HDFBackend(chain_file, name=f'{args.name}'
f'{"_no-LF" if args.no_luminosity else ""}'
f'{"_no-mu" if args.no_selection_membership else ""}'
f'{"_no-comp_corr" if args.no_completeness else ""}'
f'{"_poisson-only" if args.poisson_only else ""}')
if not args.restart:
backend.reset(nwalkers, ndim)
# Stretch move proposal. Manually specified to tune the `a` parameter.
moves = emcee.moves.StretchMove(a=2.75)
# Initialize the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost, backend=backend, moves=moves, pool=pool)
# Run the sampler.
print('Starting sampler.')
start_sampler_time = time()
# Sample up to nsteps.
for index, sample in enumerate(sampler.sample(pos0, iterations=nsteps)):
# Only check convergence every 100 steps
if sampler.iteration % 100:
continue
# Compute the autocorrelation time so far.
# Using tol = 0 means we will always get an estimate even if it isn't trustworthy
tau = sampler.get_autocorr_time(tol=0)
autocorr[index] = np.mean(tau)
# Check convergence
converged = np.all(tau * 100 < sampler.iteration)
converged &= np.all(np.abs(old_tau - tau) / tau < 0.01)
if converged:
print(f'Chains have converged. Ending sampler early.\nIteration stopped at: {sampler.iteration}')
break
old_tau = tau
print(f'Sampler runtime: {time() - start_sampler_time:.2f} s')
| model_rate_opted | identifier_name |
SPT_AGN_emcee_sampler_MPI.py | """
SPT_AGN_emcee_sampler_MPI.py
Author: Benjamin Floyd
This script will preform the Bayesian analysis on the SPT-AGN data to produce the posterior probability distributions
for all fitting parameters.
"""
import json
import os
from argparse import ArgumentParser
from time import time
import astropy.units as u
import emcee
import numpy as np
from astropy.cosmology import FlatLambdaCDM
from custom_math import trap_weight # Custom trapezoidal integration
from schwimmbad import MPIPool
from scipy.interpolate import lagrange
cosmo = FlatLambdaCDM(H0=70, Om0=0.3)
# Set up the luminosity and density evolution using the fits from Assef+11 Table 2
z_i = [0.25, 0.5, 1., 2., 4.]
m_star_z_i = [-23.51, -24.64, -26.10, -27.08]
phi_star_z_i = [-3.41, -3.73, -4.17, -4.65, -5.77]
m_star = lagrange(z_i[1:], m_star_z_i)
log_phi_star = lagrange(z_i, phi_star_z_i)
def luminosity_function(abs_mag, redshift):
"""
Assef+11 QLF using luminosity and density evolution.
Parameters
----------
abs_mag : astropy table-like
Rest-frame J-band absolute magnitude.
redshift : astropy table-like
Cluster redshift
Returns
-------
Phi : ndarray
Luminosity density
"""
# L/L_*(z) = 10**(0.4 * (M_*(z) - M))
L_L_star = 10 ** (0.4 * (m_star(redshift) - abs_mag))
# Phi*(z) = 10**(log(Phi*(z))
phi_star = 10 ** log_phi_star(redshift) * (cosmo.h / u.Mpc) ** 3
# QLF slopes
alpha1 = -3.35 # alpha in Table 2
alpha2 = -0.37 # beta in Table 2
Phi = 0.4 * np.log(10) * L_L_star * phi_star * (L_L_star ** -alpha1 + L_L_star ** -alpha2) ** -1
return Phi
def model_rate_opted(params, cluster_id, r_r500, j_mag, integral=False):
"""
Our generating model.
Parameters
----------
params : tuple
Tuple of (theta, eta, zeta, beta, rc, C)
cluster_id : str
SPT ID of our cluster in the catalog dictionary
r_r500 : array-like
A vector of radii of objects within the cluster normalized by the cluster's r500
j_mag : array-like
A vector of J-band absolute magnitudes to be used in the luminosity function
integral : bool, optional
Flag indicating if the luminosity function factor of the model should be integrated. Defaults to `False`.
Returns
-------
model
A surface density profile of objects as a function of radius and luminosity.
"""
if args.cluster_only:
# Unpack our parameters
theta, eta, zeta, beta, rc = params
# Set background parameter to 0
C = 0
elif args.background_only:
# Unpack our parameters
C, = params
# Set all other parameters to 0
theta, eta, zeta, beta, rc = [0.]*5
else:
# Unpack our parameters
theta, eta, zeta, beta, rc, C = params
# Extract our data from the catalog dictionary
z = catalog_dict[cluster_id]['redshift']
m = catalog_dict[cluster_id]['m500']
r500 = catalog_dict[cluster_id]['r500']
# Luminosity function number
if integral:
lum_funct_value = np.trapz(luminosity_function(j_mag, z), j_mag)
else:
lum_funct_value = luminosity_function(j_mag, z)
if args.no_luminosity or args.poisson_only:
LF = 1
else:
LF = cosmo.angular_diameter_distance(z) ** 2 * r500 * lum_funct_value
# Convert our background surface density from angular units into units of r500^-2
background = (C / u.arcmin ** 2) * cosmo.arcsec_per_kpc_proper(z).to(u.arcmin / u.Mpc) ** 2 * r500 ** 2
# Our amplitude is determined from the cluster data
a = theta * (1 + z) ** eta * (m / (1e15 * u.Msun)) ** zeta * LF
model = a * (1 + (r_r500 / rc) ** 2) ** (-1.5 * beta + 0.5) + background
return model.value
# Set our log-likelihood
def lnlike(param):
lnlike_list = []
for cluster_id in catalog_dict:
# Get the good pixel fraction for this cluster
gpf_all = catalog_dict[cluster_id]['gpf_rall']
# Get the radial positions of the AGN
radial_r500_maxr = catalog_dict[cluster_id]['radial_r500_maxr']
# Get the completeness weights for the AGN
completeness_weight_maxr = catalog_dict[cluster_id]['completeness_weight_maxr']
# Get the AGN sample degrees of membership
if args.no_selection_membership or args.poisson_only:
agn_membership = 1
else:
agn_membership = catalog_dict[cluster_id]['agn_membership_maxr']
# Get the J-band absolute magnitudes
j_band_abs_mag = catalog_dict[cluster_id]['j_abs_mag']
# Get the radial mesh for integration
rall = catalog_dict[cluster_id]['rall']
# Get the luminosity mesh for integration
jall = catalog_dict[cluster_id]['jall']
# Compute the completeness ratio for this cluster
if args.no_completeness or args.poisson_only:
completeness_ratio = 1.
else:
completeness_ratio = len(completeness_weight_maxr) / np.sum(completeness_weight_maxr)
# Compute the model rate at the locations of the AGN.
ni = model_rate_opted(param, cluster_id, radial_r500_maxr, j_band_abs_mag)
# Compute the full model along the radial direction.
# The completeness weight is set to `1` as the model in the integration is assumed to be complete.
n_mesh = model_rate_opted(param, cluster_id, rall, jall, integral=True)
# Use a spatial poisson point-process log-likelihood
cluster_lnlike = (np.sum(np.log(ni * radial_r500_maxr) * agn_membership)
- completeness_ratio * trap_weight(n_mesh * 2 * np.pi * rall, rall, weight=gpf_all))
lnlike_list.append(cluster_lnlike)
total_lnlike = np.sum(lnlike_list)
return total_lnlike
# For our prior, we will choose uninformative priors for all our parameters and for the constant field value we will use
# a gaussian distribution set by the values obtained from the SDWFS data set.
def lnprior(params):
# Set our hyperparameters
# h_rc = 0.25
# h_rc_err = 0.1
h_C = 0.333
h_C_err = 0.024
# Extract our parameters
if args.cluster_only:
theta, eta, zeta, beta, rc = params
C = 0.
elif args.background_only:
C, = params
theta, eta, zeta, beta, rc = [0.]*5
else:
theta, eta, zeta, beta, rc, C = params
# Define all priors
if (0.0 <= theta <= np.inf and
-6. <= eta <= 6. and
-3. <= zeta <= 3. and
-3. <= beta <= 3. and
0.05 <= rc <= 0.5 and
0.0 <= C < np.inf):
theta_lnprior = 0.0
eta_lnprior = 0.0
beta_lnprior = 0.0
zeta_lnprior = 0.0
# rc_lnprior = -0.5 * np.sum((rc - h_rc) ** 2 / h_rc_err ** 2)
rc_lnprior = 0.0
if args.cluster_only:
C_lnprior = 0.
else:
C_lnprior = -0.5 * np.sum((C - h_C) ** 2 / h_C_err ** 2)
# C_lnprior = 0.0
else:
theta_lnprior = -np.inf
eta_lnprior = -np.inf
beta_lnprior = -np.inf
zeta_lnprior = -np.inf
rc_lnprior = -np.inf
C_lnprior = -np.inf
# Assuming all parameters are independent the joint log-prior is
total_lnprior = theta_lnprior + eta_lnprior + zeta_lnprior + beta_lnprior + rc_lnprior + C_lnprior
return total_lnprior
# Define the log-posterior probability
def lnpost(params):
lp = lnprior(params)
# Check the finiteness of the prior.
if not np.isfinite(lp):
return -np.inf
return lp + lnlike(params)
hcc_prefix = '/work/mei/bfloyd/SPT_AGN/'
# hcc_prefix = ''
parser = ArgumentParser(description='Runs MCMC sampler')
parser.add_argument('--restart', help='Allows restarting the chain in place rather than resetting the chain.',
action='store_true')
parser.add_argument('name', help='Chain name', type=str)
parser.add_argument('--no-luminosity', action='store_true', help='Deactivate luminosity dependence in model.')
parser.add_argument('--no-selection-membership', action='store_true',
help='Deactivate fuzzy degree of membership for AGN selection in likelihood function.')
parser.add_argument('--no-completeness', action='store_true',
help='Deactivate photometric completeness correction in likelihood function.')
parser.add_argument('--poisson-only', action='store_true',
help='Use a pure Poisson likelihood function with a model that has no luminosity dependence.')
parser_grp = parser.add_mutually_exclusive_group()
parser_grp.add_argument('--cluster-only', action='store_true',
help='Sample only on cluster objects.')
parser_grp.add_argument('--background-only', action='store_true',
help='Sample only on background objects.')
args = parser.parse_args()
# Load in the prepossessing file
preprocess_file = os.path.abspath('SPTcl_IRAGN_preprocessing.json')
with open(preprocess_file, 'r') as f:
catalog_dict = json.load(f)
# Go through the catalog dictionary and recasting the cluster's mass and r500 to quantities and recast all the list-type
# data to numpy arrays
for cluster_id, cluster_info in catalog_dict.items():
catalog_dict[cluster_id]['m500'] = cluster_info['m500'] * u.Msun
catalog_dict[cluster_id]['r500'] = cluster_info['r500'] * u.Mpc
for data_name, data in filter(lambda x: isinstance(x[1], list), cluster_info.items()):
catalog_dict[cluster_id][data_name] = np.array(data)
# Set up our MCMC sampler.
# Set the number of dimensions for the parameter space and the number of walkers to use to explore the space. | nwalkers = 6 * ndim
# Also, set the number of steps to run the sampler for.
nsteps = int(1e6)
# We will initialize our walkers in a tight ball near the initial parameter values.
if args.cluster_only:
pos0 = np.vstack([[np.random.uniform(0., 12.), # theta
np.random.uniform(-1., 6.), # eta
np.random.uniform(-3., 3.), # zeta
np.random.uniform(-3., 3.), # beta
np.random.normal(loc=0.1, scale=6e-3) # rc
]
for i in range(nwalkers)])
elif args.background_only:
pos0 = np.vstack([[np.random.normal(loc=0.371, scale=0.157) # C
]
for i in range(nwalkers)])
else:
pos0 = np.vstack([[np.random.uniform(0., 12.), # theta
np.random.uniform(-1., 6.), # eta
np.random.uniform(-3., 3.), # zeta
np.random.uniform(-3., 3.), # beta
np.random.normal(loc=0.1, scale=6e-3), # rc
np.random.normal(loc=0.371, scale=0.157) # C
]
for i in range(nwalkers)])
# Set up the autocorrelation and convergence variables
autocorr = np.empty(nsteps)
old_tau = np.inf # For convergence
with MPIPool() as pool:
# if not pool.is_master():
# pool.wait()
# sys.exit(0)
# Filename for hd5 backend
chain_file = 'emcee_chains_mock_phot_features.h5'
backend = emcee.backends.HDFBackend(chain_file, name=f'{args.name}'
f'{"_no-LF" if args.no_luminosity else ""}'
f'{"_no-mu" if args.no_selection_membership else ""}'
f'{"_no-comp_corr" if args.no_completeness else ""}'
f'{"_poisson-only" if args.poisson_only else ""}')
if not args.restart:
backend.reset(nwalkers, ndim)
# Stretch move proposal. Manually specified to tune the `a` parameter.
moves = emcee.moves.StretchMove(a=2.75)
# Initialize the sampler
sampler = emcee.EnsembleSampler(nwalkers, ndim, lnpost, backend=backend, moves=moves, pool=pool)
# Run the sampler.
print('Starting sampler.')
start_sampler_time = time()
# Sample up to nsteps.
for index, sample in enumerate(sampler.sample(pos0, iterations=nsteps)):
# Only check convergence every 100 steps
if sampler.iteration % 100:
continue
# Compute the autocorrelation time so far.
# Using tol = 0 means we will always get an estimate even if it isn't trustworthy
tau = sampler.get_autocorr_time(tol=0)
autocorr[index] = np.mean(tau)
# Check convergence
converged = np.all(tau * 100 < sampler.iteration)
converged &= np.all(np.abs(old_tau - tau) / tau < 0.01)
if converged:
print(f'Chains have converged. Ending sampler early.\nIteration stopped at: {sampler.iteration}')
break
old_tau = tau
print(f'Sampler runtime: {time() - start_sampler_time:.2f} s') | ndim = 5 if args.cluster_only else (1 if args.background_only else 6) | random_line_split |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.