file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
data_plt.py
# -*- coding: utf-8 -*- import numpy as np import pandas as pd from sklearn.ensemble import RandomForestRegressor, GradientBoostingRegressor, ExtraTreesRegressor from sklearn.svm import SVR, LinearSVR from sklearn.linear_model import ElasticNet, SGDRegressor, BayesianRidge,LinearRegression,Ridge,Lasso from sklearn.kernel_ridge import KernelRidge from xgboost import XGBRegressor import lightgbm as lgb import xgboost as xgb from sklearn.model_selection import KFold, RepeatedKFold, cross_val_score, GridSearchCV from sklearn.preprocessing import OneHotEncoder from scipy import sparse import warnings import re import plotly.offline as py py.init_notebook_mode(connected=True) from sklearn.metrics import mean_squared_error warnings.simplefilter(action='ignore', category=FutureWarning) warnings.filterwarnings("ignore") pd.set_option('display.max_columns',None) pd.set_option('max_colwidth',100) train = pd.read_csv('./jinnan_round1_train_20181227.csv') test = pd.read_csv('./jinnan_round1_testB_20190121.csv') class grid(): def __init__(self, model): self.model = model def grid_get(self, X, y, param_grid): grid_search = GridSearchCV(self.model, param_grid, cv=5, scoring="neg_mean_squared_error") grid_search.fit(X, y) print(grid_search.best_params_, np.sqrt(-grid_search.best_score_)) grid_search.cv_results_['mean_test_score'] = np.sqrt(-grid_search.cv_results_['mean_test_score']) print(pd.DataFrame(grid_search.cv_results_)[['params', 'mean_test_score', 'std_test_score']]) def get_phase(t1,t2): try: h1, m1, s1=t1.split(':') h2, m2, s2=t2.split(':') except: if t1 == -1 or t2 == -1: return -1 if int(h2) >= int(h1):
def timeTranSecond(t): try: t, m, s = t.split(":") except: if t == '1900/1/9 7:00': return 7 * 3600 / 3600 elif t == '1900/1/1 2:30': return (2 * 3600 + 30 * 60) / 3600 elif t == -1: return -1 else: return 0 try: tm = (int(t) * 3600 + int(m) * 60 + int(s)) / 3600 except: return (30 * 60) / 3600 return tm def getDuration(se): try: sh, sm, eh, em = re.findall(r"\d+\.?\d*", se) except: if se == -1: return -1 try: if int(sh) > int(eh): tm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600 + 24 else: tm = (int(eh) * 3600 + int(em) * 60 - int(sm) * 60 - int(sh) * 3600) / 3600 except: if se == '19:-20:05': return 1 elif se == '15:00-1600': return 1 return tm def rmse_cv(model,X,y): rmse = np.sqrt(-cross_val_score(model, X, y, scoring="neg_mean_squared_error", cv=5)) return rmse train.loc[train['B14'] == 40, 'B14'] = 400 train.drop(train[train['收率'] < 0.87].index, inplace=True) full = pd.concat([train, test], ignore_index=True) cols = ["A2", "A3", "A4"] for col in cols: full[col].fillna(0, inplace=True) cols1 = ["A7", "A8", "B10", "B11", "A20", "A24", "A26"] for col in cols1: full[col].fillna(-1, inplace=True) cols2 = ["B1", "B2", "B3", "B8", "B12", "B13", "A21", "A23"] for col in cols2: full[col].fillna(full[col].mode()[0], inplace=True) full['a21_a22_a23'] = full['A21']+full['A22']+full['A23'] cols3 = ["A25", "A27"] for col in cols3: full[col] = full.groupby(['a21_a22_a23'])[col].transform(lambda x: x.fillna(x.median())) full['a1_a3_a4']=full['A1']+full['A3']+full['A4'] full['a1_a3']=full['A1']+full['A3'] full['a1_a4']=full['A1']+full['A4'] full['a10_a6']=full['A10']-full['A6'] full['a12_a10']=full['A12']-full['A10'] full['a15_a12']=full['A15']-full['A12'] full['a17_a15']=full['A17']-full['A15'] full['a27_a25']=full['A27']-full['A25'] full['b6_b8']=full['B6']-full['B8'] full['a10_a6/a9_a5']=(full['A10']-full['A6'])/full.apply(lambda df:get_phase(df['A5'],df['A9']),axis=1) full['a12_a10/a11_a9']=(full['A12']-full['A10'])/full.apply(lambda df:get_phase(df['A9'],df['A11']),axis=1) full['a15_a12/a14_a11']=(full['A15']-full['A12'])/full.apply(lambda df:get_phase(df['A11'],df['A14']),axis=1) full['a17_a15/a16_a14']=(full['A17']-full['A15'])/full.apply(lambda df:get_phase(df['A14'],df['A16']),axis=1) full['a27_a25/a26_a24']=(full['A27']-full['A25'])/full.apply(lambda df:get_phase(df['A24'],df['A26']),axis=1) full['b6_b8/b7_b5']=(full['B6']-full['B8'])/full.apply(lambda df:get_phase(df['B5'],df['B7']),axis=1) full['b14/a1_a3_a4_a19_b1_b12'] = full['B14']/(full['A1']+full['A3']+full['A4']+full['A19']+full['B1']+full['B12']) full['b14/a1_a3_a4_a19_b1_b12_b14'] = full['B12']/(full['A1']+full['A3']+full['A4']+full['A19']+full['B1']+full['B14']) for f in ['A5', 'A7', 'A9', 'A11', 'A14', 'A16', 'A24', 'A26', 'B5', 'B7']: try: full[f] = full[f].apply(timeTranSecond) except: print(f, '应该在前面被删除了!') for f in ['A20', 'A28', 'B4', 'B9', 'B10', 'B11']: full[f] = full.apply(lambda df: getDuration(df[f]), axis=1) full['样本id'] = full['样本id'].apply(lambda x: int(x.split('_')[1])) good_cols=list(full.columns) good_cols.remove('样本id') good_cols.remove('收率') # for f in good_cols: # full[f] = full[f].map(dict(zip(full[f].unique(), range(0, full[f].nunique())))) n_train=train.shape[0] X = full[:n_train] test_X = full[n_train:] y= X.收率 X.drop(['收率'], axis=1, inplace=True) test_X.drop(['收率'], axis=1, inplace=True) # X_train = X[list(X.columns)].values # X_test = test_X[list(X.columns)].values # y_train = y.values # # grid(Lasso()).grid_get(X,y,{'alpha': [0.02,0.0002,0.000222,0.0000224],'max_iter':[10000]}) # grid(xgb.XGBRegressor()).grid_get(X_train,y_train,{'num_leaves': [100], # 'min_data_in_leaf': [9], # 'objective': ['regression'], # 'max_depth': [-1], # 'learning_rate': [0.01], # 'min_child_samples': [15], # "boosting": ['gbdt'], # "feature_fraction": [0.9], # "bagging_freq": [1], # "bagging_fraction": [0.9], # "bagging_seed": [5,13,40,50], # "metric": ['mse'], # "lambda_l1": [0.000001], # 'verbosity': [-1]}) # grid(xgb.XGBRegressor()).grid_get(X_train,y_train,{'eta': [0.1], 'max_depth': [6], 'subsample': [0.9], # 'colsample_bytree': [0.5],'objective': ['reg:linear'], # 'eval_metric': ['rmse'], 'silent': [True], 'nthread': [3]}) X_train = X[list(X.columns)].values X_test = test_X[list(X.columns)].values # one hot enc = OneHotEncoder() # for f in good_cols: # enc.fit(full[f].values.reshape(-1, 1)) # X_train = sparse.hstack((X_train, enc.transform(X[f].values.reshape(-1, 1))), 'csr') # X_test = sparse.hstack((X_test, enc.transform(test_X[f].values.reshape(-1, 1))), 'csr') print(X_train.shape) print(X_test.shape) y_train = y.values # param = {'num_leaves': 100, 'min_data_in_leaf': 9, 'objective': 'regression', 'max_depth': -1, 'learning_rate': 0.01, "min_child_samples": 15, "boosting": "gbdt", "feature_fraction": 0.9, "bagging_freq": 1, "bagging_fraction": 0.9, "bagging_seed": 13, "metric": 'mse', "lambda_l1": 0.000001, "verbosity": -1} folds = KFold(n_splits=5, shuffle=True, random_state=2018) oof_lgb = np.zeros(len(train)) predictions_lgb = np.zeros(len(test)) for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)): print("fold n°{}".format(fold_ + 1)) trn_data = lgb.Dataset(X_train[trn_idx], y_train[trn_idx]) val_data = lgb.Dataset(X_train[val_idx], y_train[val_idx]) num_round = 3000 clf = lgb.train(param, trn_data, num_round, valid_sets=[trn_data, val_data], verbose_eval=200, early_stopping_rounds=100) oof_lgb[val_idx] = clf.predict(X_train[val_idx], num_iteration=clf.best_iteration) predictions_lgb += clf.predict(X_test, num_iteration=clf.best_iteration) / folds.n_splits print("CV score: {:<8.8f}".format(mean_squared_error(oof_lgb, y))) ##### xgb xgb_params = {'eta': 0.1, 'max_depth': 6, 'subsample': 0.9, 'colsample_bytree': 0.5, 'objective': 'reg:linear', 'eval_metric': 'rmse', 'silent': True, 'nthread': 3} folds = KFold(n_splits=5, shuffle=True, random_state=2018) oof_xgb = np.zeros(len(train)) predictions_xgb = np.zeros(len(test)) for fold_, (trn_idx, val_idx) in enumerate(folds.split(X_train, y_train)): print("fold n°{}".format(fold_ + 1)) trn_data = xgb.DMatrix(X_train[trn_idx], y_train[trn_idx]) val_data = xgb.DMatrix(X_train[val_idx], y_train[val_idx]) watchlist = [(trn_data, 'train'), (val_data, 'valid_data')] clf = xgb.train(dtrain=trn_data, num_boost_round=20000, evals=watchlist, early_stopping_rounds=200, verbose_eval=100, params=xgb_params) oof_xgb[val_idx] = clf.predict(xgb.DMatrix(X_train[val_idx]), ntree_limit=clf.best_ntree_limit) predictions_xgb += clf.predict(xgb.DMatrix(X_test), ntree_limit=clf.best_ntree_limit) / folds.n_splits print("CV score: {:<8.8f}".format(mean_squared_error(oof_xgb, y))) # 将lgb和xgb的结果进行stacking train_stack = np.vstack([oof_lgb, oof_xgb]).transpose() test_stack = np.vstack([predictions_lgb, predictions_xgb]).transpose() folds_stack = RepeatedKFold(n_splits=5, n_repeats=2, random_state=4590) oof_stack1 = np.zeros(train_stack.shape[0]) predictions1 = np.zeros(test_stack.shape[0]) for fold_, (trn_idx, val_idx) in enumerate(folds_stack.split(train_stack, y)): print("fold {}".format(fold_)) trn_data, trn_y = train_stack[trn_idx], y.iloc[trn_idx].values val_data, val_y = train_stack[val_idx], y.iloc[val_idx].values clf_3 = BayesianRidge() clf_3.fit(trn_data, trn_y) oof_stack1[val_idx] = clf_3.predict(val_data) predictions1 += clf_3.predict(test_stack) / 10 print("CV score: {:<8.8f}".format(mean_squared_error(y.values, oof_stack1))) sub_df = pd.DataFrame() sub_df[0] = pd.read_csv('./jinnan_round1_testB_20190121.csv', header=None)[0][1:] sub_df[1] = predictions1 sub_df[1] = sub_df[1].apply(lambda x:round(x, 3)) sub_df.to_csv('./prediction.csv', index=False, header=None)
tm = (int(h2) * 3600 + int(m2) * 60 - int(m1) * 60 - int(h1) * 3600) / 3600 else: tm = (int(h2) * 3600 + int(m2) * 60 - int(m1) * 60 - int(h1) * 3600) / 3600 + 24 return tm
random_line_split
pkgindexer.py
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from mint import config from mint import helperfuncs from mint.db import projects from mint.lib import scriptlibrary from conary import conaryclient from conary import conarycfg from conary import dbstore from conary import versions from conary.repository import repository import os import time hiddenLabels = [ versions.Label('conary.rpath.com@rpl:rpl1'), versions.Label('conary.rpath.com@ravenous:bugblatterbeast') ] class PackageIndexer(scriptlibrary.SingletonScript): db = None cfgPath = config.RBUILDER_CONFIG def __init__(self, aLockPath = scriptlibrary.DEFAULT_LOCKPATH, aMintServer=None): self.cfg = config.MintConfig() self.cfg.read(self.cfgPath) if self.logFileName: self.logPath = os.path.join(self.cfg.logPath, self.logFileName) scriptlibrary.SingletonScript.__init__(self, aLockPath) def cleanup(self): if self.db: self.db.close() class UpdatePackageIndex(PackageIndexer): logFileName = 'package-index.log' def action(self): self.log.info("Updating package index") self.db = dbstore.connect(self.cfg.dbPath, driver = self.cfg.dbDriver) self.db.loadSchema() cu = self.db.cursor() try: cu.execute('SELECT COUNT(*) FROM PackageIndexMark') if not cu.fetchone()[0]: cu.execute('INSERT INTO PackageIndexMark VALUES(0)') oldMark = 0 else: cu.execute("SELECT MAX(mark) FROM PackageIndexMark") oldMark = cu.fetchone()[0] cu.execute("SELECT COALESCE(MAX(timestamp), 0) FROM Commits") newMark = cu.fetchone()[0] # If the oldMark was 1, and there have been no comits (newMark is # 0), preserve the oldMark of 1, so that the external package # index does not get blown away. if newMark == 0 and oldMark == 1: newMark = 1 # Clear out Package index if the timestamp in PackageIndexMark == 0 cu.execute("""DELETE FROM PackageIndex WHERE (SELECT mark FROM PackageIndexMark) = 0""") cu.execute("""SELECT Projects.projectId, troveName, Commits.version, timestamp FROM Commits LEFT JOIN Projects ON Commits.projectId=Projects.projectId WHERE (troveName NOT LIKE '%:%' OR troveName LIKE '%:source') AND NOT hidden AND NOT disabled AND timestamp >= (SELECT mark FROM PackageIndexMark)""") res = cu.fetchall() numPkgs = len(res) - 1 if numPkgs > 0: self.log.info("Indexing %d packages" % numPkgs) else: self.log.info("Package index is up to date") packageDict = {} labelMap = {} for projectId, troveName, verStr, timeStamp in res: troveEntry = packageDict.setdefault(troveName, {}) version = versions.VersionFromString(verStr) label = str(version.branch().label()) versionList = troveEntry.setdefault(label, []) version = version.copy() version.resetTimeStamps(timeStamp) versionList.append(version) labelMap[label] = projectId packageIndex = [] for troveName in packageDict: for label in packageDict[troveName]: packageIndex.append((labelMap[label], troveName, max(packageDict[troveName][label]))) inserts = [] updates = [] for projectId, troveName, version in packageIndex: cu.execute("""SELECT pkgId, version FROM PackageIndex WHERE projectId=? AND name=?""", projectId, troveName) res = [x[0] for x in cu.fetchall() if \ versions.VersionFromString(x[1]).branch().label() == \ version.branch().label()] label = version.branch().label() serverName = label.getHost() branchName = label.getNamespace() + ":" + label.getLabel() isSource = int(troveName.endswith(':source')) if not res: inserts.append((projectId, troveName, str(version), serverName, branchName, isSource)) else: pkgId = res[0] updates.append((projectId, troveName, str(version), serverName, branchName, isSource, pkgId)) if inserts or updates: self.db.transaction() if inserts: cu.executemany("""INSERT INTO PackageIndex (projectId, name, version, serverName, branchName, isSource) VALUES (?, ?, ?, ?, ?, ?)""", inserts) if updates: cu.executemany("""UPDATE PackageIndex SET projectId=?, name=?, version=?, serverName=?, branchName=?, isSource=? WHERE pkgId=?""", updates) self.db.commit() cu.execute("UPDATE PackageIndexMark SET mark=?", int(newMark)) except Exception, e: self.log.error("Error occurred: %s" % str(e)) self.db.rollback() exitcode = 1 raise
exitcode = 0 self.db.commit() return exitcode class UpdatePackageIndexExternal(PackageIndexer): logFileName = 'package-index-external.log' def updateMark(self): # This code exists to overcome the situation where there are no # internal projects on the rBuilder, or there are internal projects but # they haven't had any commits. internal package index code will delete # the package index if there is no mark or a mark of zero. this code # sets the mark to "1" to ensure no race conditions exist # sorrounding the setting of the mark. cu = self.db.transaction() cu.execute("SELECT COUNT(*) FROM Projects WHERE NOT external") internalProjects = cu.fetchone()[0] cu.execute("SELECT COUNT(*) FROM Commits") commits = cu.fetchone()[0] if not internalProjects or not commits: cu.execute("DELETE FROM PackageIndexMark") cu.execute("INSERT INTO PackageIndexMark ( mark ) VALUES ( 1 )") self.db.commit() else: self.db.rollback() def action(self, fqdn=None): self.log.info("Updating package index") self.db = dbstore.connect(self.cfg.dbPath, driver = self.cfg.dbDriver) self.db.connect() self.db.loadSchema() cu = self.db.cursor() labelsTable = projects.LabelsTable(self.db, self.cfg) self.db.commit() cu = self.db.cursor() sql = """SELECT projectId, fqdn, EXISTS(SELECT * FROM InboundMirrors WHERE projectId=targetProjectId) AS localMirror FROM Projects WHERE external AND NOT hidden AND NOT disabled""" args = [] if fqdn: sql += " AND fqdn = ?" args.append(fqdn) cu.execute(sql, args) labels = {} projectIds = {} netclients = {} hasErrors = False for projectId, hostname, localMirror in cu.fetchall(): try: self.log.info("Retrieving labels from %s...", hostname) l, repMap, userMap, entMap = labelsTable.getLabelsForProject(projectId) hostname = repMap.keys()[0] labels[hostname] = versions.Label(l.keys()[0]) projectIds[hostname] = projectId ccfg = conarycfg.ConaryConfiguration(False) ccfg.configLine('proxyMap * conarys://localhost') ccfg.root = ccfg.dbPath = ':memory:' ccfg.repositoryMap = repMap if not localMirror: for host, authInfo in userMap: ccfg.user.addServerGlob(host, authInfo[0], authInfo[1]) for host, entitlement in entMap: ccfg.entitlement.addEntitlement(host, entitlement[1]) ccfg = helperfuncs.configureClientProxies(ccfg, self.cfg.useInternalConaryProxy, self.cfg.proxy, self.cfg.getInternalProxies()) repos = conaryclient.ConaryClient(ccfg).getRepos() netclients[hostname] = repos except Exception, e: self.log.error("Exception from %s", hostname) self.log.error(str(e)) hasErrors = True rows = [] for host in netclients.keys(): newRows = 0 self.log.info("Retrieving trove list from %s...", host) try: names = netclients[host].troveNamesOnServer(host) names = dict((x, None) for x in names if ':' not in x or x.endswith(':source')) troves = netclients[host].getAllTroveLeaves(host, names) except repository.errors.OpenError, e: self.log.warning("unable to access %s: %s", host, str(e)) continue except repository.errors.InsufficientPermission, e: self.log.warning("unable to access %s: %s", host, str(e)) continue packageDict = {} for pkg in troves: troveEntry = packageDict.setdefault(pkg, {}) verList = troves[pkg].keys() for ver in verList: label = ver.branch().label() if label in hiddenLabels: continue versionList = troveEntry.setdefault(label, []) versionList.append(ver) for troveName in packageDict: for label in packageDict[troveName]: serverName = label.getHost() branchName = label.getNamespace() + ":" + label.getLabel() isSource = int(troveName.endswith(':source')) row = (projectIds[host], troveName, str(max(packageDict[troveName][label])), serverName, branchName, isSource) rows.append(row) newRows += 1 self.log.info("Retrieved %d trove%s from %s.", newRows, ((newRows != 1) and 's' or ''), host) self.log.info("Completed fetching %d trove%s.", len(rows), ((len(rows) != 1) and 's' or '')) self.log.info("Updating database...") placeholders = ', '.join('?' for x in projectIds) args = projectIds.values() cu.execute(""" SELECT projectId, name, version, pkgId FROM PackageIndex WHERE projectId IN (%s) """ % (placeholders,), args) troveLookup = dict((x[:3], x[3]) for x in cu) inserts = [] for row in rows: projectId, name, version, serverName, branchName, isSource = row pkgId = troveLookup.get((projectId, name, version), None) if not pkgId: inserts.append(row) st = time.time() if inserts: self.db.transaction() if inserts: cu.executemany(""" INSERT INTO PackageIndex (projectId, name, version, serverName, branchName, isSource) VALUES (?, ?, ?, ?, ?, ?)""", inserts) self.db.commit() if not hasErrors: self.updateMark() self.log.info("Database update complete, took %.2fs." % (time.time() - st)) else: self.log.info("Database update had errors. not updating the mark") return 0
else: self.log.info("Completed successfully: %d" % len(inserts))
random_line_split
pkgindexer.py
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from mint import config from mint import helperfuncs from mint.db import projects from mint.lib import scriptlibrary from conary import conaryclient from conary import conarycfg from conary import dbstore from conary import versions from conary.repository import repository import os import time hiddenLabels = [ versions.Label('conary.rpath.com@rpl:rpl1'), versions.Label('conary.rpath.com@ravenous:bugblatterbeast') ] class PackageIndexer(scriptlibrary.SingletonScript): db = None cfgPath = config.RBUILDER_CONFIG def __init__(self, aLockPath = scriptlibrary.DEFAULT_LOCKPATH, aMintServer=None): self.cfg = config.MintConfig() self.cfg.read(self.cfgPath) if self.logFileName: self.logPath = os.path.join(self.cfg.logPath, self.logFileName) scriptlibrary.SingletonScript.__init__(self, aLockPath) def cleanup(self): if self.db: self.db.close() class UpdatePackageIndex(PackageIndexer): logFileName = 'package-index.log' def action(self): self.log.info("Updating package index") self.db = dbstore.connect(self.cfg.dbPath, driver = self.cfg.dbDriver) self.db.loadSchema() cu = self.db.cursor() try: cu.execute('SELECT COUNT(*) FROM PackageIndexMark') if not cu.fetchone()[0]: cu.execute('INSERT INTO PackageIndexMark VALUES(0)') oldMark = 0 else: cu.execute("SELECT MAX(mark) FROM PackageIndexMark") oldMark = cu.fetchone()[0] cu.execute("SELECT COALESCE(MAX(timestamp), 0) FROM Commits") newMark = cu.fetchone()[0] # If the oldMark was 1, and there have been no comits (newMark is # 0), preserve the oldMark of 1, so that the external package # index does not get blown away. if newMark == 0 and oldMark == 1: newMark = 1 # Clear out Package index if the timestamp in PackageIndexMark == 0 cu.execute("""DELETE FROM PackageIndex WHERE (SELECT mark FROM PackageIndexMark) = 0""") cu.execute("""SELECT Projects.projectId, troveName, Commits.version, timestamp FROM Commits LEFT JOIN Projects ON Commits.projectId=Projects.projectId WHERE (troveName NOT LIKE '%:%' OR troveName LIKE '%:source') AND NOT hidden AND NOT disabled AND timestamp >= (SELECT mark FROM PackageIndexMark)""") res = cu.fetchall() numPkgs = len(res) - 1 if numPkgs > 0: self.log.info("Indexing %d packages" % numPkgs) else: self.log.info("Package index is up to date") packageDict = {} labelMap = {} for projectId, troveName, verStr, timeStamp in res: troveEntry = packageDict.setdefault(troveName, {}) version = versions.VersionFromString(verStr) label = str(version.branch().label()) versionList = troveEntry.setdefault(label, []) version = version.copy() version.resetTimeStamps(timeStamp) versionList.append(version) labelMap[label] = projectId packageIndex = [] for troveName in packageDict:
inserts = [] updates = [] for projectId, troveName, version in packageIndex: cu.execute("""SELECT pkgId, version FROM PackageIndex WHERE projectId=? AND name=?""", projectId, troveName) res = [x[0] for x in cu.fetchall() if \ versions.VersionFromString(x[1]).branch().label() == \ version.branch().label()] label = version.branch().label() serverName = label.getHost() branchName = label.getNamespace() + ":" + label.getLabel() isSource = int(troveName.endswith(':source')) if not res: inserts.append((projectId, troveName, str(version), serverName, branchName, isSource)) else: pkgId = res[0] updates.append((projectId, troveName, str(version), serverName, branchName, isSource, pkgId)) if inserts or updates: self.db.transaction() if inserts: cu.executemany("""INSERT INTO PackageIndex (projectId, name, version, serverName, branchName, isSource) VALUES (?, ?, ?, ?, ?, ?)""", inserts) if updates: cu.executemany("""UPDATE PackageIndex SET projectId=?, name=?, version=?, serverName=?, branchName=?, isSource=? WHERE pkgId=?""", updates) self.db.commit() cu.execute("UPDATE PackageIndexMark SET mark=?", int(newMark)) except Exception, e: self.log.error("Error occurred: %s" % str(e)) self.db.rollback() exitcode = 1 raise else: self.log.info("Completed successfully: %d" % len(inserts)) exitcode = 0 self.db.commit() return exitcode class UpdatePackageIndexExternal(PackageIndexer): logFileName = 'package-index-external.log' def updateMark(self): # This code exists to overcome the situation where there are no # internal projects on the rBuilder, or there are internal projects but # they haven't had any commits. internal package index code will delete # the package index if there is no mark or a mark of zero. this code # sets the mark to "1" to ensure no race conditions exist # sorrounding the setting of the mark. cu = self.db.transaction() cu.execute("SELECT COUNT(*) FROM Projects WHERE NOT external") internalProjects = cu.fetchone()[0] cu.execute("SELECT COUNT(*) FROM Commits") commits = cu.fetchone()[0] if not internalProjects or not commits: cu.execute("DELETE FROM PackageIndexMark") cu.execute("INSERT INTO PackageIndexMark ( mark ) VALUES ( 1 )") self.db.commit() else: self.db.rollback() def action(self, fqdn=None): self.log.info("Updating package index") self.db = dbstore.connect(self.cfg.dbPath, driver = self.cfg.dbDriver) self.db.connect() self.db.loadSchema() cu = self.db.cursor() labelsTable = projects.LabelsTable(self.db, self.cfg) self.db.commit() cu = self.db.cursor() sql = """SELECT projectId, fqdn, EXISTS(SELECT * FROM InboundMirrors WHERE projectId=targetProjectId) AS localMirror FROM Projects WHERE external AND NOT hidden AND NOT disabled""" args = [] if fqdn: sql += " AND fqdn = ?" args.append(fqdn) cu.execute(sql, args) labels = {} projectIds = {} netclients = {} hasErrors = False for projectId, hostname, localMirror in cu.fetchall(): try: self.log.info("Retrieving labels from %s...", hostname) l, repMap, userMap, entMap = labelsTable.getLabelsForProject(projectId) hostname = repMap.keys()[0] labels[hostname] = versions.Label(l.keys()[0]) projectIds[hostname] = projectId ccfg = conarycfg.ConaryConfiguration(False) ccfg.configLine('proxyMap * conarys://localhost') ccfg.root = ccfg.dbPath = ':memory:' ccfg.repositoryMap = repMap if not localMirror: for host, authInfo in userMap: ccfg.user.addServerGlob(host, authInfo[0], authInfo[1]) for host, entitlement in entMap: ccfg.entitlement.addEntitlement(host, entitlement[1]) ccfg = helperfuncs.configureClientProxies(ccfg, self.cfg.useInternalConaryProxy, self.cfg.proxy, self.cfg.getInternalProxies()) repos = conaryclient.ConaryClient(ccfg).getRepos() netclients[hostname] = repos except Exception, e: self.log.error("Exception from %s", hostname) self.log.error(str(e)) hasErrors = True rows = [] for host in netclients.keys(): newRows = 0 self.log.info("Retrieving trove list from %s...", host) try: names = netclients[host].troveNamesOnServer(host) names = dict((x, None) for x in names if ':' not in x or x.endswith(':source')) troves = netclients[host].getAllTroveLeaves(host, names) except repository.errors.OpenError, e: self.log.warning("unable to access %s: %s", host, str(e)) continue except repository.errors.InsufficientPermission, e: self.log.warning("unable to access %s: %s", host, str(e)) continue packageDict = {} for pkg in troves: troveEntry = packageDict.setdefault(pkg, {}) verList = troves[pkg].keys() for ver in verList: label = ver.branch().label() if label in hiddenLabels: continue versionList = troveEntry.setdefault(label, []) versionList.append(ver) for troveName in packageDict: for label in packageDict[troveName]: serverName = label.getHost() branchName = label.getNamespace() + ":" + label.getLabel() isSource = int(troveName.endswith(':source')) row = (projectIds[host], troveName, str(max(packageDict[troveName][label])), serverName, branchName, isSource) rows.append(row) newRows += 1 self.log.info("Retrieved %d trove%s from %s.", newRows, ((newRows != 1) and 's' or ''), host) self.log.info("Completed fetching %d trove%s.", len(rows), ((len(rows) != 1) and 's' or '')) self.log.info("Updating database...") placeholders = ', '.join('?' for x in projectIds) args = projectIds.values() cu.execute(""" SELECT projectId, name, version, pkgId FROM PackageIndex WHERE projectId IN (%s) """ % (placeholders,), args) troveLookup = dict((x[:3], x[3]) for x in cu) inserts = [] for row in rows: projectId, name, version, serverName, branchName, isSource = row pkgId = troveLookup.get((projectId, name, version), None) if not pkgId: inserts.append(row) st = time.time() if inserts: self.db.transaction() if inserts: cu.executemany(""" INSERT INTO PackageIndex (projectId, name, version, serverName, branchName, isSource) VALUES (?, ?, ?, ?, ?, ?)""", inserts) self.db.commit() if not hasErrors: self.updateMark() self.log.info("Database update complete, took %.2fs." % (time.time() - st)) else: self.log.info("Database update had errors. not updating the mark") return 0
for label in packageDict[troveName]: packageIndex.append((labelMap[label], troveName, max(packageDict[troveName][label])))
conditional_block
pkgindexer.py
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from mint import config from mint import helperfuncs from mint.db import projects from mint.lib import scriptlibrary from conary import conaryclient from conary import conarycfg from conary import dbstore from conary import versions from conary.repository import repository import os import time hiddenLabels = [ versions.Label('conary.rpath.com@rpl:rpl1'), versions.Label('conary.rpath.com@ravenous:bugblatterbeast') ] class PackageIndexer(scriptlibrary.SingletonScript): db = None cfgPath = config.RBUILDER_CONFIG def
(self, aLockPath = scriptlibrary.DEFAULT_LOCKPATH, aMintServer=None): self.cfg = config.MintConfig() self.cfg.read(self.cfgPath) if self.logFileName: self.logPath = os.path.join(self.cfg.logPath, self.logFileName) scriptlibrary.SingletonScript.__init__(self, aLockPath) def cleanup(self): if self.db: self.db.close() class UpdatePackageIndex(PackageIndexer): logFileName = 'package-index.log' def action(self): self.log.info("Updating package index") self.db = dbstore.connect(self.cfg.dbPath, driver = self.cfg.dbDriver) self.db.loadSchema() cu = self.db.cursor() try: cu.execute('SELECT COUNT(*) FROM PackageIndexMark') if not cu.fetchone()[0]: cu.execute('INSERT INTO PackageIndexMark VALUES(0)') oldMark = 0 else: cu.execute("SELECT MAX(mark) FROM PackageIndexMark") oldMark = cu.fetchone()[0] cu.execute("SELECT COALESCE(MAX(timestamp), 0) FROM Commits") newMark = cu.fetchone()[0] # If the oldMark was 1, and there have been no comits (newMark is # 0), preserve the oldMark of 1, so that the external package # index does not get blown away. if newMark == 0 and oldMark == 1: newMark = 1 # Clear out Package index if the timestamp in PackageIndexMark == 0 cu.execute("""DELETE FROM PackageIndex WHERE (SELECT mark FROM PackageIndexMark) = 0""") cu.execute("""SELECT Projects.projectId, troveName, Commits.version, timestamp FROM Commits LEFT JOIN Projects ON Commits.projectId=Projects.projectId WHERE (troveName NOT LIKE '%:%' OR troveName LIKE '%:source') AND NOT hidden AND NOT disabled AND timestamp >= (SELECT mark FROM PackageIndexMark)""") res = cu.fetchall() numPkgs = len(res) - 1 if numPkgs > 0: self.log.info("Indexing %d packages" % numPkgs) else: self.log.info("Package index is up to date") packageDict = {} labelMap = {} for projectId, troveName, verStr, timeStamp in res: troveEntry = packageDict.setdefault(troveName, {}) version = versions.VersionFromString(verStr) label = str(version.branch().label()) versionList = troveEntry.setdefault(label, []) version = version.copy() version.resetTimeStamps(timeStamp) versionList.append(version) labelMap[label] = projectId packageIndex = [] for troveName in packageDict: for label in packageDict[troveName]: packageIndex.append((labelMap[label], troveName, max(packageDict[troveName][label]))) inserts = [] updates = [] for projectId, troveName, version in packageIndex: cu.execute("""SELECT pkgId, version FROM PackageIndex WHERE projectId=? AND name=?""", projectId, troveName) res = [x[0] for x in cu.fetchall() if \ versions.VersionFromString(x[1]).branch().label() == \ version.branch().label()] label = version.branch().label() serverName = label.getHost() branchName = label.getNamespace() + ":" + label.getLabel() isSource = int(troveName.endswith(':source')) if not res: inserts.append((projectId, troveName, str(version), serverName, branchName, isSource)) else: pkgId = res[0] updates.append((projectId, troveName, str(version), serverName, branchName, isSource, pkgId)) if inserts or updates: self.db.transaction() if inserts: cu.executemany("""INSERT INTO PackageIndex (projectId, name, version, serverName, branchName, isSource) VALUES (?, ?, ?, ?, ?, ?)""", inserts) if updates: cu.executemany("""UPDATE PackageIndex SET projectId=?, name=?, version=?, serverName=?, branchName=?, isSource=? WHERE pkgId=?""", updates) self.db.commit() cu.execute("UPDATE PackageIndexMark SET mark=?", int(newMark)) except Exception, e: self.log.error("Error occurred: %s" % str(e)) self.db.rollback() exitcode = 1 raise else: self.log.info("Completed successfully: %d" % len(inserts)) exitcode = 0 self.db.commit() return exitcode class UpdatePackageIndexExternal(PackageIndexer): logFileName = 'package-index-external.log' def updateMark(self): # This code exists to overcome the situation where there are no # internal projects on the rBuilder, or there are internal projects but # they haven't had any commits. internal package index code will delete # the package index if there is no mark or a mark of zero. this code # sets the mark to "1" to ensure no race conditions exist # sorrounding the setting of the mark. cu = self.db.transaction() cu.execute("SELECT COUNT(*) FROM Projects WHERE NOT external") internalProjects = cu.fetchone()[0] cu.execute("SELECT COUNT(*) FROM Commits") commits = cu.fetchone()[0] if not internalProjects or not commits: cu.execute("DELETE FROM PackageIndexMark") cu.execute("INSERT INTO PackageIndexMark ( mark ) VALUES ( 1 )") self.db.commit() else: self.db.rollback() def action(self, fqdn=None): self.log.info("Updating package index") self.db = dbstore.connect(self.cfg.dbPath, driver = self.cfg.dbDriver) self.db.connect() self.db.loadSchema() cu = self.db.cursor() labelsTable = projects.LabelsTable(self.db, self.cfg) self.db.commit() cu = self.db.cursor() sql = """SELECT projectId, fqdn, EXISTS(SELECT * FROM InboundMirrors WHERE projectId=targetProjectId) AS localMirror FROM Projects WHERE external AND NOT hidden AND NOT disabled""" args = [] if fqdn: sql += " AND fqdn = ?" args.append(fqdn) cu.execute(sql, args) labels = {} projectIds = {} netclients = {} hasErrors = False for projectId, hostname, localMirror in cu.fetchall(): try: self.log.info("Retrieving labels from %s...", hostname) l, repMap, userMap, entMap = labelsTable.getLabelsForProject(projectId) hostname = repMap.keys()[0] labels[hostname] = versions.Label(l.keys()[0]) projectIds[hostname] = projectId ccfg = conarycfg.ConaryConfiguration(False) ccfg.configLine('proxyMap * conarys://localhost') ccfg.root = ccfg.dbPath = ':memory:' ccfg.repositoryMap = repMap if not localMirror: for host, authInfo in userMap: ccfg.user.addServerGlob(host, authInfo[0], authInfo[1]) for host, entitlement in entMap: ccfg.entitlement.addEntitlement(host, entitlement[1]) ccfg = helperfuncs.configureClientProxies(ccfg, self.cfg.useInternalConaryProxy, self.cfg.proxy, self.cfg.getInternalProxies()) repos = conaryclient.ConaryClient(ccfg).getRepos() netclients[hostname] = repos except Exception, e: self.log.error("Exception from %s", hostname) self.log.error(str(e)) hasErrors = True rows = [] for host in netclients.keys(): newRows = 0 self.log.info("Retrieving trove list from %s...", host) try: names = netclients[host].troveNamesOnServer(host) names = dict((x, None) for x in names if ':' not in x or x.endswith(':source')) troves = netclients[host].getAllTroveLeaves(host, names) except repository.errors.OpenError, e: self.log.warning("unable to access %s: %s", host, str(e)) continue except repository.errors.InsufficientPermission, e: self.log.warning("unable to access %s: %s", host, str(e)) continue packageDict = {} for pkg in troves: troveEntry = packageDict.setdefault(pkg, {}) verList = troves[pkg].keys() for ver in verList: label = ver.branch().label() if label in hiddenLabels: continue versionList = troveEntry.setdefault(label, []) versionList.append(ver) for troveName in packageDict: for label in packageDict[troveName]: serverName = label.getHost() branchName = label.getNamespace() + ":" + label.getLabel() isSource = int(troveName.endswith(':source')) row = (projectIds[host], troveName, str(max(packageDict[troveName][label])), serverName, branchName, isSource) rows.append(row) newRows += 1 self.log.info("Retrieved %d trove%s from %s.", newRows, ((newRows != 1) and 's' or ''), host) self.log.info("Completed fetching %d trove%s.", len(rows), ((len(rows) != 1) and 's' or '')) self.log.info("Updating database...") placeholders = ', '.join('?' for x in projectIds) args = projectIds.values() cu.execute(""" SELECT projectId, name, version, pkgId FROM PackageIndex WHERE projectId IN (%s) """ % (placeholders,), args) troveLookup = dict((x[:3], x[3]) for x in cu) inserts = [] for row in rows: projectId, name, version, serverName, branchName, isSource = row pkgId = troveLookup.get((projectId, name, version), None) if not pkgId: inserts.append(row) st = time.time() if inserts: self.db.transaction() if inserts: cu.executemany(""" INSERT INTO PackageIndex (projectId, name, version, serverName, branchName, isSource) VALUES (?, ?, ?, ?, ?, ?)""", inserts) self.db.commit() if not hasErrors: self.updateMark() self.log.info("Database update complete, took %.2fs." % (time.time() - st)) else: self.log.info("Database update had errors. not updating the mark") return 0
__init__
identifier_name
pkgindexer.py
# # Copyright (c) SAS Institute Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from mint import config from mint import helperfuncs from mint.db import projects from mint.lib import scriptlibrary from conary import conaryclient from conary import conarycfg from conary import dbstore from conary import versions from conary.repository import repository import os import time hiddenLabels = [ versions.Label('conary.rpath.com@rpl:rpl1'), versions.Label('conary.rpath.com@ravenous:bugblatterbeast') ] class PackageIndexer(scriptlibrary.SingletonScript): db = None cfgPath = config.RBUILDER_CONFIG def __init__(self, aLockPath = scriptlibrary.DEFAULT_LOCKPATH, aMintServer=None): self.cfg = config.MintConfig() self.cfg.read(self.cfgPath) if self.logFileName: self.logPath = os.path.join(self.cfg.logPath, self.logFileName) scriptlibrary.SingletonScript.__init__(self, aLockPath) def cleanup(self): if self.db: self.db.close() class UpdatePackageIndex(PackageIndexer): logFileName = 'package-index.log' def action(self): self.log.info("Updating package index") self.db = dbstore.connect(self.cfg.dbPath, driver = self.cfg.dbDriver) self.db.loadSchema() cu = self.db.cursor() try: cu.execute('SELECT COUNT(*) FROM PackageIndexMark') if not cu.fetchone()[0]: cu.execute('INSERT INTO PackageIndexMark VALUES(0)') oldMark = 0 else: cu.execute("SELECT MAX(mark) FROM PackageIndexMark") oldMark = cu.fetchone()[0] cu.execute("SELECT COALESCE(MAX(timestamp), 0) FROM Commits") newMark = cu.fetchone()[0] # If the oldMark was 1, and there have been no comits (newMark is # 0), preserve the oldMark of 1, so that the external package # index does not get blown away. if newMark == 0 and oldMark == 1: newMark = 1 # Clear out Package index if the timestamp in PackageIndexMark == 0 cu.execute("""DELETE FROM PackageIndex WHERE (SELECT mark FROM PackageIndexMark) = 0""") cu.execute("""SELECT Projects.projectId, troveName, Commits.version, timestamp FROM Commits LEFT JOIN Projects ON Commits.projectId=Projects.projectId WHERE (troveName NOT LIKE '%:%' OR troveName LIKE '%:source') AND NOT hidden AND NOT disabled AND timestamp >= (SELECT mark FROM PackageIndexMark)""") res = cu.fetchall() numPkgs = len(res) - 1 if numPkgs > 0: self.log.info("Indexing %d packages" % numPkgs) else: self.log.info("Package index is up to date") packageDict = {} labelMap = {} for projectId, troveName, verStr, timeStamp in res: troveEntry = packageDict.setdefault(troveName, {}) version = versions.VersionFromString(verStr) label = str(version.branch().label()) versionList = troveEntry.setdefault(label, []) version = version.copy() version.resetTimeStamps(timeStamp) versionList.append(version) labelMap[label] = projectId packageIndex = [] for troveName in packageDict: for label in packageDict[troveName]: packageIndex.append((labelMap[label], troveName, max(packageDict[troveName][label]))) inserts = [] updates = [] for projectId, troveName, version in packageIndex: cu.execute("""SELECT pkgId, version FROM PackageIndex WHERE projectId=? AND name=?""", projectId, troveName) res = [x[0] for x in cu.fetchall() if \ versions.VersionFromString(x[1]).branch().label() == \ version.branch().label()] label = version.branch().label() serverName = label.getHost() branchName = label.getNamespace() + ":" + label.getLabel() isSource = int(troveName.endswith(':source')) if not res: inserts.append((projectId, troveName, str(version), serverName, branchName, isSource)) else: pkgId = res[0] updates.append((projectId, troveName, str(version), serverName, branchName, isSource, pkgId)) if inserts or updates: self.db.transaction() if inserts: cu.executemany("""INSERT INTO PackageIndex (projectId, name, version, serverName, branchName, isSource) VALUES (?, ?, ?, ?, ?, ?)""", inserts) if updates: cu.executemany("""UPDATE PackageIndex SET projectId=?, name=?, version=?, serverName=?, branchName=?, isSource=? WHERE pkgId=?""", updates) self.db.commit() cu.execute("UPDATE PackageIndexMark SET mark=?", int(newMark)) except Exception, e: self.log.error("Error occurred: %s" % str(e)) self.db.rollback() exitcode = 1 raise else: self.log.info("Completed successfully: %d" % len(inserts)) exitcode = 0 self.db.commit() return exitcode class UpdatePackageIndexExternal(PackageIndexer): logFileName = 'package-index-external.log' def updateMark(self): # This code exists to overcome the situation where there are no # internal projects on the rBuilder, or there are internal projects but # they haven't had any commits. internal package index code will delete # the package index if there is no mark or a mark of zero. this code # sets the mark to "1" to ensure no race conditions exist # sorrounding the setting of the mark. cu = self.db.transaction() cu.execute("SELECT COUNT(*) FROM Projects WHERE NOT external") internalProjects = cu.fetchone()[0] cu.execute("SELECT COUNT(*) FROM Commits") commits = cu.fetchone()[0] if not internalProjects or not commits: cu.execute("DELETE FROM PackageIndexMark") cu.execute("INSERT INTO PackageIndexMark ( mark ) VALUES ( 1 )") self.db.commit() else: self.db.rollback() def action(self, fqdn=None):
self.log.info("Updating package index") self.db = dbstore.connect(self.cfg.dbPath, driver = self.cfg.dbDriver) self.db.connect() self.db.loadSchema() cu = self.db.cursor() labelsTable = projects.LabelsTable(self.db, self.cfg) self.db.commit() cu = self.db.cursor() sql = """SELECT projectId, fqdn, EXISTS(SELECT * FROM InboundMirrors WHERE projectId=targetProjectId) AS localMirror FROM Projects WHERE external AND NOT hidden AND NOT disabled""" args = [] if fqdn: sql += " AND fqdn = ?" args.append(fqdn) cu.execute(sql, args) labels = {} projectIds = {} netclients = {} hasErrors = False for projectId, hostname, localMirror in cu.fetchall(): try: self.log.info("Retrieving labels from %s...", hostname) l, repMap, userMap, entMap = labelsTable.getLabelsForProject(projectId) hostname = repMap.keys()[0] labels[hostname] = versions.Label(l.keys()[0]) projectIds[hostname] = projectId ccfg = conarycfg.ConaryConfiguration(False) ccfg.configLine('proxyMap * conarys://localhost') ccfg.root = ccfg.dbPath = ':memory:' ccfg.repositoryMap = repMap if not localMirror: for host, authInfo in userMap: ccfg.user.addServerGlob(host, authInfo[0], authInfo[1]) for host, entitlement in entMap: ccfg.entitlement.addEntitlement(host, entitlement[1]) ccfg = helperfuncs.configureClientProxies(ccfg, self.cfg.useInternalConaryProxy, self.cfg.proxy, self.cfg.getInternalProxies()) repos = conaryclient.ConaryClient(ccfg).getRepos() netclients[hostname] = repos except Exception, e: self.log.error("Exception from %s", hostname) self.log.error(str(e)) hasErrors = True rows = [] for host in netclients.keys(): newRows = 0 self.log.info("Retrieving trove list from %s...", host) try: names = netclients[host].troveNamesOnServer(host) names = dict((x, None) for x in names if ':' not in x or x.endswith(':source')) troves = netclients[host].getAllTroveLeaves(host, names) except repository.errors.OpenError, e: self.log.warning("unable to access %s: %s", host, str(e)) continue except repository.errors.InsufficientPermission, e: self.log.warning("unable to access %s: %s", host, str(e)) continue packageDict = {} for pkg in troves: troveEntry = packageDict.setdefault(pkg, {}) verList = troves[pkg].keys() for ver in verList: label = ver.branch().label() if label in hiddenLabels: continue versionList = troveEntry.setdefault(label, []) versionList.append(ver) for troveName in packageDict: for label in packageDict[troveName]: serverName = label.getHost() branchName = label.getNamespace() + ":" + label.getLabel() isSource = int(troveName.endswith(':source')) row = (projectIds[host], troveName, str(max(packageDict[troveName][label])), serverName, branchName, isSource) rows.append(row) newRows += 1 self.log.info("Retrieved %d trove%s from %s.", newRows, ((newRows != 1) and 's' or ''), host) self.log.info("Completed fetching %d trove%s.", len(rows), ((len(rows) != 1) and 's' or '')) self.log.info("Updating database...") placeholders = ', '.join('?' for x in projectIds) args = projectIds.values() cu.execute(""" SELECT projectId, name, version, pkgId FROM PackageIndex WHERE projectId IN (%s) """ % (placeholders,), args) troveLookup = dict((x[:3], x[3]) for x in cu) inserts = [] for row in rows: projectId, name, version, serverName, branchName, isSource = row pkgId = troveLookup.get((projectId, name, version), None) if not pkgId: inserts.append(row) st = time.time() if inserts: self.db.transaction() if inserts: cu.executemany(""" INSERT INTO PackageIndex (projectId, name, version, serverName, branchName, isSource) VALUES (?, ?, ?, ?, ?, ?)""", inserts) self.db.commit() if not hasErrors: self.updateMark() self.log.info("Database update complete, took %.2fs." % (time.time() - st)) else: self.log.info("Database update had errors. not updating the mark") return 0
identifier_body
libAIRSL3Data.py
""" ------------------------------------------------------------------------------ libAIRSL3Data.py ================= Author : Sylvie Dagoret-Campagne Date : November 21 2016 ----------------------------------------------------------------------------- """ import os import re import matplotlib as mpl import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap import numpy as np from pyhdf.SD import SD, SDC #LSST site Longitude_lsst = -70.7366833333333 # deg Latitude_lsst = -30.240741666666672 #deg Altitude_lsst = 2749.999999999238 #m #CTIO Site Longitude_ctio = -70.815 # deg Latitude_ctio = -30.165277777777778 #deg Altitude_ctio = 2214.9999999993697 #m # Cerro Paranal Longitude_paranal = -70.40300000000002 #deg Latitude_paranal = -24.625199999999996 #deg Altitude_paranal = 2635.0000000009704 #m # Observatoire de Haute Provence Longitude_ohp=5.71222222222 Latitude_ohp=43.9316666667 Altitude_ohp=650. #-------------------------------------------------------------------------- def ensure_dir(f): ''' ensure_dir : check if the directory f exist. If not, it is created ''' d = os.path.dirname(f) if not os.path.exists(f): os.makedirs(f) #----------------------------------------------------------------------------- def loc_ctio(): return(Longitude_ctio,Latitude_ctio,Altitude_ctio) def loc_lsst(): return(Longitude_lsst,Latitude_lsst,Altitude_lsst) def loc_ohp(): return(Longitude_lsst,Latitude_lsst,Altitude_lsst) def loc_none(): return(0,0,0) def observatory_location(obs): if obs== 'ctio':
elif obs=='lsst': loc=loc_lsst() elif obs=='ohp': loc=loc_ohp() else: loc=loc_none() return loc #--------------------------------------------------------------------------------- def GetData(file,datafield): ''' GetData(file,datafield) : read the data labeled datafield in file ================================================================= Retrieve data from a HDF file input : ------ file : name of input file datafield : label of required data field output: ------ data3D : output array ''' hdf = SD(file, SDC.READ) data3D=hdf.select(datafield) return data3D #----------------------------------------------------------------------------- #-------------------------------------------------------------------------------- def AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax): ''' AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax) ================================================== Select an area input: ------ X,Y : Longitude and lattitude 2D array data : Data array LongMin,LongMax,LatMin,LatMax : Longitude and Latitude boundaries output: ------- Xsel,Ysel : Longitude and lattitude 2D array of selected zone extracted_data : data array selected ''' flags_long=np.logical_and(X>=LongMin, X<=LongMax) # flags in X where are the selected longitudes flags_lat=np.logical_and(Y>=LatMin, Y<=LatMax) # flags in Y where are the selected longitudes flags_longlat=np.logical_and(flags_long,flags_lat) # flags where the region is selected in the long-lat matrix (selected_lat_indexes,selected_long_indexes)=np.where(flags_longlat==True) # list of indexes selected_long=longitude[:,selected_long_indexes] # all selected longitudes selected_lat=latitude[selected_lat_indexes,:] # all selected latitudes min_long_index=np.min(selected_long_indexes) max_long_index=np.max(selected_long_indexes) min_lat_index=np.min(selected_lat_indexes) max_lat_index=np.max(selected_lat_indexes) # output extracted_data=data[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the data Xsel=X[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the Long Ysel=Y[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the lat return Xsel,Ysel,extracted_data #--------------------------------------------------------------------------------- #--------------------------------------------------------------------------------- def SelectBin(X,Y,data,Long0,Lat0,DLong=1.0,DLat=1.0): ''' SelectBin(X,Y,data,Long0,Lat0,DLong=1.0,DLat=1.0) ================================================= Select one bin input: ----- X,Y : Longitude and Latitude 2D array data : 2D array of data Long0,Lat0 : The Longitude and Latitude of the bin DLong,DLat : The angular bin width output: ------ sel_min_long_index,sel_min_lat_index : index of selected bin extracted_data : data of the selected bin ''' sel_flags_long=np.logical_and(X>=Long0-float(DLong)/2., X<=Long0+float(DLong)/2.) # flags in X where are the selected longitudes sel_flags_lat=np.logical_and(Y>=Lat0-float(DLat)/2., Y<=Lat0+float(DLat)/2.) # flags in Y where are the selected longitudes sel_flags_longlat=np.logical_and(sel_flags_long,sel_flags_lat) # flags where the region is selected in the long-lat matrix (selected_lat_indexes,selected_long_indexes)=np.where(sel_flags_longlat==True) # list of indexes selected_X=X[:,selected_long_indexes] # all selected longitudes selected_Y=Y[selected_lat_indexes,:] sel_min_long_index=np.min(selected_long_indexes) sel_max_long_index=np.max(selected_long_indexes) sel_min_lat_index=np.min(selected_lat_indexes) sel_max_lat_index=np.max(selected_lat_indexes) extracted_data=data[sel_min_lat_index:sel_max_lat_index+1,sel_min_long_index:sel_max_long_index+1] # extract the data return sel_min_long_index,sel_min_lat_index,extracted_data[0][0] #--------------------------------------------------------------------------------- #--------------------------------------------------------------------------------- def PlotData(X,Y,data,sizex=8,sizey=8,labelx='longitude',labely='latitude',labelz='Unit',title=''): ''' PlotData(X,Y,data,sizex=8,sizey=8,labelx='longitude',labely='latitude',labelz='Unit',title='') ============================================================================================== Plot in matplotlib the 2D array of data input: ------ X,Y : 2D array of lontitude and latitude data : Data array sizex,sizey : size of figure labelx,labely,labelz,title : labels of axis and title output: figure in matplotlib ''' fig=plt.figure(figsize=(sizex,sizey)) im = plt.pcolormesh(X,Y,data) # im = plt.pcolormesh(X,Y,data, cmap='rainbow') cbar=plt.colorbar(im, orientation='vertical') cbar.set_label(labelz) Xmin=X.min() Xmax=X.max() Ymin=Y.min() Ymax=Y.max() plt.axis([Xmin, Xmax,Ymin,Ymax]) plt.xlabel(labelx) plt.ylabel(labely) plt.title(title) #plt.tight_layout() plt.show() #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ if __name__ == "__main__": DATAFIELD_NAME = 'TotO3_D' DATAFIELD_UNIT = DATAFIELD_NAME+' (Ozone:Db) ' os.environ["HDFEOS_ZOO_DIR"] = "/Users/dagoret-campagnesylvie/MacOsX/LSST/MyWork/GitHub/NASA_AIRS_AQUA_DATA/AIRH3STM/2016/h4" # If a certain environment variable is set, look there for the input # file, otherwise look in the current directory. #hdffile = 'AIRS.2002.08.01.L3.RetStd_H031.v4.0.21.0.G06104133732.hdf' hdffile = 'AIRS.2016.01.01.L3.RetStd031.v6.0.31.0.G16034171018.hdf' #hdffile = 'AIRS.2016.02.01.L3.RetStd029.v6.0.31.0.G16063171358.hdf' #hdffile = 'AIRS.2016.03.01.L3.RetStd031.v6.0.31.0.G16095175654.hdf' #hdffile = 'AIRS.2016.04.01.L3.RetStd030.v6.0.31.0.G16122185324.hdf' #hdffile = 'AIRS.2016.05.01.L3.RetStd031.v6.0.31.0.G16153184258.hdf' #hdffile = 'AIRS.2016.06.01.L3.RetStd030.v6.0.31.0.G16189154115.hdf' #hdffile = 'AIRS.2016.07.01.L3.RetStd031.v6.0.31.0.G16223152110.hdf' #hdffile = 'AIRS.2016.08.01.L3.RetStd031.v6.0.31.0.G16245202845.hdf' #hdffile = 'AIRS.2016.09.01.L3.RetStd030.v6.0.31.0.G16281134124.hdf' FILE_NAME= hdffile base_filename=os.path.basename(FILE_NAME).split('.hdf')[0] p = re.compile('[.]') root_filename=p.sub('_',base_filename) rootimg_dir=os.path.join('test_images',root_filename) try: FILE_NAME = os.path.join(os.environ['HDFEOS_ZOO_DIR'], hdffile) except KeyError: pass data3D=GetData(FILE_NAME,DATAFIELD_NAME) data= data3D[:,:] ## Ozone has no additional dimensions lat = GetData(FILE_NAME,'Latitude') latitude = lat[:,:] lon = GetData(FILE_NAME,'Longitude') longitude = lon[:,:] # Handle fill value. attrs = data3D.attributes(full=1) fillvalue=attrs["_FillValue"] # fillvalue[0] is the attribute value. fv = fillvalue[0] data[data == fv] = np.nan data = np.ma.masked_array(data, np.isnan(data)) # Plot world data #------------------------- PlotData(longitude,latitude,data,7,3.5,title=base_filename,labelz=DATAFIELD_UNIT) # Select area LongMin=-100 LongMax=-30 LatMin=-55 LatMax=15 X=longitude Y=latitude (Xsel,Ysel,extracted_data) = AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax) # plot area #------------ PlotData(Xsel,Ysel,extracted_data,6,6,title=base_filename,labelz=DATAFIELD_UNIT) #LSST site Longitude_lsst = -70.7366833333333 # deg Latitude_lsst = -30.240741666666672 #deg Altitude_lsst = 2749.999999999238 #m #CTIO Site Longitude_ctio = -70.815 # deg Latitude_ctio = -30.165277777777778 #deg Altitude_ctio = 2214.9999999993697 #m # Cerro Paranal Longitude_paranal = -70.40300000000002 #deg Latitude_paranal = -24.625199999999996 #deg Altitude_paranal = 2635.0000000009704 #m # Observatoire de Haute Provence Longitude_ohp=5.71222222222 Latitude_ohp=43.9316666667 Altitude_ohp=650. # Select one bin #----------------- (ctio_min_long_index, ctio_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_ctio,Latitude_ctio) print('ctio_min_lat_index=',ctio_min_lat_index) print('ctio_min_long_index=',ctio_min_long_index) print('ctio_data = ',extrdata,DATAFIELD_UNIT) (lsst_min_long_index, lsst_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_lsst,Latitude_lsst) print('lsst_min_lat_index=',lsst_min_lat_index) print('lsst_min_long_index=',lsst_min_long_index) print('lsst_data = ',extrdata,DATAFIELD_UNIT) (ohp_min_long_index, ohp_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_ohp,Latitude_ohp) print('ohp_min_lat_index=',ohp_min_lat_index) print('ohp_min_long_index=',ohp_min_long_index) print('ohp_data = ',extrdata,DATAFIELD_UNIT) #---------------------------------------------------------------------------------
loc=loc_ctio()
conditional_block
libAIRSL3Data.py
""" ------------------------------------------------------------------------------ libAIRSL3Data.py ================= Author : Sylvie Dagoret-Campagne Date : November 21 2016 ----------------------------------------------------------------------------- """ import os import re import matplotlib as mpl import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap import numpy as np from pyhdf.SD import SD, SDC #LSST site Longitude_lsst = -70.7366833333333 # deg Latitude_lsst = -30.240741666666672 #deg Altitude_lsst = 2749.999999999238 #m #CTIO Site Longitude_ctio = -70.815 # deg Latitude_ctio = -30.165277777777778 #deg Altitude_ctio = 2214.9999999993697 #m # Cerro Paranal Longitude_paranal = -70.40300000000002 #deg Latitude_paranal = -24.625199999999996 #deg Altitude_paranal = 2635.0000000009704 #m # Observatoire de Haute Provence Longitude_ohp=5.71222222222 Latitude_ohp=43.9316666667 Altitude_ohp=650. #-------------------------------------------------------------------------- def ensure_dir(f): ''' ensure_dir : check if the directory f exist. If not, it is created ''' d = os.path.dirname(f) if not os.path.exists(f): os.makedirs(f) #----------------------------------------------------------------------------- def loc_ctio(): return(Longitude_ctio,Latitude_ctio,Altitude_ctio) def loc_lsst(): return(Longitude_lsst,Latitude_lsst,Altitude_lsst) def loc_ohp(): return(Longitude_lsst,Latitude_lsst,Altitude_lsst) def loc_none(): return(0,0,0) def observatory_location(obs): if obs== 'ctio': loc=loc_ctio() elif obs=='lsst': loc=loc_lsst() elif obs=='ohp': loc=loc_ohp() else: loc=loc_none() return loc #--------------------------------------------------------------------------------- def GetData(file,datafield): ''' GetData(file,datafield) : read the data labeled datafield in file ================================================================= Retrieve data from a HDF file input : ------ file : name of input file datafield : label of required data field output: ------ data3D : output array ''' hdf = SD(file, SDC.READ) data3D=hdf.select(datafield) return data3D #----------------------------------------------------------------------------- #-------------------------------------------------------------------------------- def AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax): ''' AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax) ================================================== Select an area input: ------ X,Y : Longitude and lattitude 2D array data : Data array LongMin,LongMax,LatMin,LatMax : Longitude and Latitude boundaries output: ------- Xsel,Ysel : Longitude and lattitude 2D array of selected zone extracted_data : data array selected ''' flags_long=np.logical_and(X>=LongMin, X<=LongMax) # flags in X where are the selected longitudes flags_lat=np.logical_and(Y>=LatMin, Y<=LatMax) # flags in Y where are the selected longitudes flags_longlat=np.logical_and(flags_long,flags_lat) # flags where the region is selected in the long-lat matrix (selected_lat_indexes,selected_long_indexes)=np.where(flags_longlat==True) # list of indexes selected_long=longitude[:,selected_long_indexes] # all selected longitudes selected_lat=latitude[selected_lat_indexes,:] # all selected latitudes min_long_index=np.min(selected_long_indexes) max_long_index=np.max(selected_long_indexes) min_lat_index=np.min(selected_lat_indexes) max_lat_index=np.max(selected_lat_indexes) # output extracted_data=data[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the data Xsel=X[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the Long Ysel=Y[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the lat return Xsel,Ysel,extracted_data #--------------------------------------------------------------------------------- #--------------------------------------------------------------------------------- def SelectBin(X,Y,data,Long0,Lat0,DLong=1.0,DLat=1.0): ''' SelectBin(X,Y,data,Long0,Lat0,DLong=1.0,DLat=1.0) ================================================= Select one bin input: ----- X,Y : Longitude and Latitude 2D array data : 2D array of data Long0,Lat0 : The Longitude and Latitude of the bin DLong,DLat : The angular bin width output: ------ sel_min_long_index,sel_min_lat_index : index of selected bin extracted_data : data of the selected bin ''' sel_flags_long=np.logical_and(X>=Long0-float(DLong)/2., X<=Long0+float(DLong)/2.) # flags in X where are the selected longitudes sel_flags_lat=np.logical_and(Y>=Lat0-float(DLat)/2., Y<=Lat0+float(DLat)/2.) # flags in Y where are the selected longitudes sel_flags_longlat=np.logical_and(sel_flags_long,sel_flags_lat) # flags where the region is selected in the long-lat matrix (selected_lat_indexes,selected_long_indexes)=np.where(sel_flags_longlat==True) # list of indexes selected_X=X[:,selected_long_indexes] # all selected longitudes selected_Y=Y[selected_lat_indexes,:] sel_min_long_index=np.min(selected_long_indexes) sel_max_long_index=np.max(selected_long_indexes) sel_min_lat_index=np.min(selected_lat_indexes) sel_max_lat_index=np.max(selected_lat_indexes) extracted_data=data[sel_min_lat_index:sel_max_lat_index+1,sel_min_long_index:sel_max_long_index+1] # extract the data return sel_min_long_index,sel_min_lat_index,extracted_data[0][0] #--------------------------------------------------------------------------------- #--------------------------------------------------------------------------------- def
(X,Y,data,sizex=8,sizey=8,labelx='longitude',labely='latitude',labelz='Unit',title=''): ''' PlotData(X,Y,data,sizex=8,sizey=8,labelx='longitude',labely='latitude',labelz='Unit',title='') ============================================================================================== Plot in matplotlib the 2D array of data input: ------ X,Y : 2D array of lontitude and latitude data : Data array sizex,sizey : size of figure labelx,labely,labelz,title : labels of axis and title output: figure in matplotlib ''' fig=plt.figure(figsize=(sizex,sizey)) im = plt.pcolormesh(X,Y,data) # im = plt.pcolormesh(X,Y,data, cmap='rainbow') cbar=plt.colorbar(im, orientation='vertical') cbar.set_label(labelz) Xmin=X.min() Xmax=X.max() Ymin=Y.min() Ymax=Y.max() plt.axis([Xmin, Xmax,Ymin,Ymax]) plt.xlabel(labelx) plt.ylabel(labely) plt.title(title) #plt.tight_layout() plt.show() #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ if __name__ == "__main__": DATAFIELD_NAME = 'TotO3_D' DATAFIELD_UNIT = DATAFIELD_NAME+' (Ozone:Db) ' os.environ["HDFEOS_ZOO_DIR"] = "/Users/dagoret-campagnesylvie/MacOsX/LSST/MyWork/GitHub/NASA_AIRS_AQUA_DATA/AIRH3STM/2016/h4" # If a certain environment variable is set, look there for the input # file, otherwise look in the current directory. #hdffile = 'AIRS.2002.08.01.L3.RetStd_H031.v4.0.21.0.G06104133732.hdf' hdffile = 'AIRS.2016.01.01.L3.RetStd031.v6.0.31.0.G16034171018.hdf' #hdffile = 'AIRS.2016.02.01.L3.RetStd029.v6.0.31.0.G16063171358.hdf' #hdffile = 'AIRS.2016.03.01.L3.RetStd031.v6.0.31.0.G16095175654.hdf' #hdffile = 'AIRS.2016.04.01.L3.RetStd030.v6.0.31.0.G16122185324.hdf' #hdffile = 'AIRS.2016.05.01.L3.RetStd031.v6.0.31.0.G16153184258.hdf' #hdffile = 'AIRS.2016.06.01.L3.RetStd030.v6.0.31.0.G16189154115.hdf' #hdffile = 'AIRS.2016.07.01.L3.RetStd031.v6.0.31.0.G16223152110.hdf' #hdffile = 'AIRS.2016.08.01.L3.RetStd031.v6.0.31.0.G16245202845.hdf' #hdffile = 'AIRS.2016.09.01.L3.RetStd030.v6.0.31.0.G16281134124.hdf' FILE_NAME= hdffile base_filename=os.path.basename(FILE_NAME).split('.hdf')[0] p = re.compile('[.]') root_filename=p.sub('_',base_filename) rootimg_dir=os.path.join('test_images',root_filename) try: FILE_NAME = os.path.join(os.environ['HDFEOS_ZOO_DIR'], hdffile) except KeyError: pass data3D=GetData(FILE_NAME,DATAFIELD_NAME) data= data3D[:,:] ## Ozone has no additional dimensions lat = GetData(FILE_NAME,'Latitude') latitude = lat[:,:] lon = GetData(FILE_NAME,'Longitude') longitude = lon[:,:] # Handle fill value. attrs = data3D.attributes(full=1) fillvalue=attrs["_FillValue"] # fillvalue[0] is the attribute value. fv = fillvalue[0] data[data == fv] = np.nan data = np.ma.masked_array(data, np.isnan(data)) # Plot world data #------------------------- PlotData(longitude,latitude,data,7,3.5,title=base_filename,labelz=DATAFIELD_UNIT) # Select area LongMin=-100 LongMax=-30 LatMin=-55 LatMax=15 X=longitude Y=latitude (Xsel,Ysel,extracted_data) = AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax) # plot area #------------ PlotData(Xsel,Ysel,extracted_data,6,6,title=base_filename,labelz=DATAFIELD_UNIT) #LSST site Longitude_lsst = -70.7366833333333 # deg Latitude_lsst = -30.240741666666672 #deg Altitude_lsst = 2749.999999999238 #m #CTIO Site Longitude_ctio = -70.815 # deg Latitude_ctio = -30.165277777777778 #deg Altitude_ctio = 2214.9999999993697 #m # Cerro Paranal Longitude_paranal = -70.40300000000002 #deg Latitude_paranal = -24.625199999999996 #deg Altitude_paranal = 2635.0000000009704 #m # Observatoire de Haute Provence Longitude_ohp=5.71222222222 Latitude_ohp=43.9316666667 Altitude_ohp=650. # Select one bin #----------------- (ctio_min_long_index, ctio_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_ctio,Latitude_ctio) print('ctio_min_lat_index=',ctio_min_lat_index) print('ctio_min_long_index=',ctio_min_long_index) print('ctio_data = ',extrdata,DATAFIELD_UNIT) (lsst_min_long_index, lsst_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_lsst,Latitude_lsst) print('lsst_min_lat_index=',lsst_min_lat_index) print('lsst_min_long_index=',lsst_min_long_index) print('lsst_data = ',extrdata,DATAFIELD_UNIT) (ohp_min_long_index, ohp_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_ohp,Latitude_ohp) print('ohp_min_lat_index=',ohp_min_lat_index) print('ohp_min_long_index=',ohp_min_long_index) print('ohp_data = ',extrdata,DATAFIELD_UNIT) #---------------------------------------------------------------------------------
PlotData
identifier_name
libAIRSL3Data.py
""" ------------------------------------------------------------------------------ libAIRSL3Data.py ================= Author : Sylvie Dagoret-Campagne Date : November 21 2016 ----------------------------------------------------------------------------- """ import os import re import matplotlib as mpl import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap import numpy as np from pyhdf.SD import SD, SDC #LSST site Longitude_lsst = -70.7366833333333 # deg Latitude_lsst = -30.240741666666672 #deg Altitude_lsst = 2749.999999999238 #m #CTIO Site Longitude_ctio = -70.815 # deg Latitude_ctio = -30.165277777777778 #deg Altitude_ctio = 2214.9999999993697 #m # Cerro Paranal Longitude_paranal = -70.40300000000002 #deg Latitude_paranal = -24.625199999999996 #deg Altitude_paranal = 2635.0000000009704 #m # Observatoire de Haute Provence Longitude_ohp=5.71222222222 Latitude_ohp=43.9316666667 Altitude_ohp=650. #-------------------------------------------------------------------------- def ensure_dir(f): ''' ensure_dir : check if the directory f exist. If not, it is created ''' d = os.path.dirname(f) if not os.path.exists(f): os.makedirs(f) #----------------------------------------------------------------------------- def loc_ctio(): return(Longitude_ctio,Latitude_ctio,Altitude_ctio) def loc_lsst(): return(Longitude_lsst,Latitude_lsst,Altitude_lsst) def loc_ohp(): return(Longitude_lsst,Latitude_lsst,Altitude_lsst) def loc_none(): return(0,0,0) def observatory_location(obs): if obs== 'ctio': loc=loc_ctio() elif obs=='lsst': loc=loc_lsst() elif obs=='ohp': loc=loc_ohp() else: loc=loc_none() return loc #--------------------------------------------------------------------------------- def GetData(file,datafield):
#----------------------------------------------------------------------------- #-------------------------------------------------------------------------------- def AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax): ''' AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax) ================================================== Select an area input: ------ X,Y : Longitude and lattitude 2D array data : Data array LongMin,LongMax,LatMin,LatMax : Longitude and Latitude boundaries output: ------- Xsel,Ysel : Longitude and lattitude 2D array of selected zone extracted_data : data array selected ''' flags_long=np.logical_and(X>=LongMin, X<=LongMax) # flags in X where are the selected longitudes flags_lat=np.logical_and(Y>=LatMin, Y<=LatMax) # flags in Y where are the selected longitudes flags_longlat=np.logical_and(flags_long,flags_lat) # flags where the region is selected in the long-lat matrix (selected_lat_indexes,selected_long_indexes)=np.where(flags_longlat==True) # list of indexes selected_long=longitude[:,selected_long_indexes] # all selected longitudes selected_lat=latitude[selected_lat_indexes,:] # all selected latitudes min_long_index=np.min(selected_long_indexes) max_long_index=np.max(selected_long_indexes) min_lat_index=np.min(selected_lat_indexes) max_lat_index=np.max(selected_lat_indexes) # output extracted_data=data[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the data Xsel=X[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the Long Ysel=Y[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the lat return Xsel,Ysel,extracted_data #--------------------------------------------------------------------------------- #--------------------------------------------------------------------------------- def SelectBin(X,Y,data,Long0,Lat0,DLong=1.0,DLat=1.0): ''' SelectBin(X,Y,data,Long0,Lat0,DLong=1.0,DLat=1.0) ================================================= Select one bin input: ----- X,Y : Longitude and Latitude 2D array data : 2D array of data Long0,Lat0 : The Longitude and Latitude of the bin DLong,DLat : The angular bin width output: ------ sel_min_long_index,sel_min_lat_index : index of selected bin extracted_data : data of the selected bin ''' sel_flags_long=np.logical_and(X>=Long0-float(DLong)/2., X<=Long0+float(DLong)/2.) # flags in X where are the selected longitudes sel_flags_lat=np.logical_and(Y>=Lat0-float(DLat)/2., Y<=Lat0+float(DLat)/2.) # flags in Y where are the selected longitudes sel_flags_longlat=np.logical_and(sel_flags_long,sel_flags_lat) # flags where the region is selected in the long-lat matrix (selected_lat_indexes,selected_long_indexes)=np.where(sel_flags_longlat==True) # list of indexes selected_X=X[:,selected_long_indexes] # all selected longitudes selected_Y=Y[selected_lat_indexes,:] sel_min_long_index=np.min(selected_long_indexes) sel_max_long_index=np.max(selected_long_indexes) sel_min_lat_index=np.min(selected_lat_indexes) sel_max_lat_index=np.max(selected_lat_indexes) extracted_data=data[sel_min_lat_index:sel_max_lat_index+1,sel_min_long_index:sel_max_long_index+1] # extract the data return sel_min_long_index,sel_min_lat_index,extracted_data[0][0] #--------------------------------------------------------------------------------- #--------------------------------------------------------------------------------- def PlotData(X,Y,data,sizex=8,sizey=8,labelx='longitude',labely='latitude',labelz='Unit',title=''): ''' PlotData(X,Y,data,sizex=8,sizey=8,labelx='longitude',labely='latitude',labelz='Unit',title='') ============================================================================================== Plot in matplotlib the 2D array of data input: ------ X,Y : 2D array of lontitude and latitude data : Data array sizex,sizey : size of figure labelx,labely,labelz,title : labels of axis and title output: figure in matplotlib ''' fig=plt.figure(figsize=(sizex,sizey)) im = plt.pcolormesh(X,Y,data) # im = plt.pcolormesh(X,Y,data, cmap='rainbow') cbar=plt.colorbar(im, orientation='vertical') cbar.set_label(labelz) Xmin=X.min() Xmax=X.max() Ymin=Y.min() Ymax=Y.max() plt.axis([Xmin, Xmax,Ymin,Ymax]) plt.xlabel(labelx) plt.ylabel(labely) plt.title(title) #plt.tight_layout() plt.show() #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ if __name__ == "__main__": DATAFIELD_NAME = 'TotO3_D' DATAFIELD_UNIT = DATAFIELD_NAME+' (Ozone:Db) ' os.environ["HDFEOS_ZOO_DIR"] = "/Users/dagoret-campagnesylvie/MacOsX/LSST/MyWork/GitHub/NASA_AIRS_AQUA_DATA/AIRH3STM/2016/h4" # If a certain environment variable is set, look there for the input # file, otherwise look in the current directory. #hdffile = 'AIRS.2002.08.01.L3.RetStd_H031.v4.0.21.0.G06104133732.hdf' hdffile = 'AIRS.2016.01.01.L3.RetStd031.v6.0.31.0.G16034171018.hdf' #hdffile = 'AIRS.2016.02.01.L3.RetStd029.v6.0.31.0.G16063171358.hdf' #hdffile = 'AIRS.2016.03.01.L3.RetStd031.v6.0.31.0.G16095175654.hdf' #hdffile = 'AIRS.2016.04.01.L3.RetStd030.v6.0.31.0.G16122185324.hdf' #hdffile = 'AIRS.2016.05.01.L3.RetStd031.v6.0.31.0.G16153184258.hdf' #hdffile = 'AIRS.2016.06.01.L3.RetStd030.v6.0.31.0.G16189154115.hdf' #hdffile = 'AIRS.2016.07.01.L3.RetStd031.v6.0.31.0.G16223152110.hdf' #hdffile = 'AIRS.2016.08.01.L3.RetStd031.v6.0.31.0.G16245202845.hdf' #hdffile = 'AIRS.2016.09.01.L3.RetStd030.v6.0.31.0.G16281134124.hdf' FILE_NAME= hdffile base_filename=os.path.basename(FILE_NAME).split('.hdf')[0] p = re.compile('[.]') root_filename=p.sub('_',base_filename) rootimg_dir=os.path.join('test_images',root_filename) try: FILE_NAME = os.path.join(os.environ['HDFEOS_ZOO_DIR'], hdffile) except KeyError: pass data3D=GetData(FILE_NAME,DATAFIELD_NAME) data= data3D[:,:] ## Ozone has no additional dimensions lat = GetData(FILE_NAME,'Latitude') latitude = lat[:,:] lon = GetData(FILE_NAME,'Longitude') longitude = lon[:,:] # Handle fill value. attrs = data3D.attributes(full=1) fillvalue=attrs["_FillValue"] # fillvalue[0] is the attribute value. fv = fillvalue[0] data[data == fv] = np.nan data = np.ma.masked_array(data, np.isnan(data)) # Plot world data #------------------------- PlotData(longitude,latitude,data,7,3.5,title=base_filename,labelz=DATAFIELD_UNIT) # Select area LongMin=-100 LongMax=-30 LatMin=-55 LatMax=15 X=longitude Y=latitude (Xsel,Ysel,extracted_data) = AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax) # plot area #------------ PlotData(Xsel,Ysel,extracted_data,6,6,title=base_filename,labelz=DATAFIELD_UNIT) #LSST site Longitude_lsst = -70.7366833333333 # deg Latitude_lsst = -30.240741666666672 #deg Altitude_lsst = 2749.999999999238 #m #CTIO Site Longitude_ctio = -70.815 # deg Latitude_ctio = -30.165277777777778 #deg Altitude_ctio = 2214.9999999993697 #m # Cerro Paranal Longitude_paranal = -70.40300000000002 #deg Latitude_paranal = -24.625199999999996 #deg Altitude_paranal = 2635.0000000009704 #m # Observatoire de Haute Provence Longitude_ohp=5.71222222222 Latitude_ohp=43.9316666667 Altitude_ohp=650. # Select one bin #----------------- (ctio_min_long_index, ctio_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_ctio,Latitude_ctio) print('ctio_min_lat_index=',ctio_min_lat_index) print('ctio_min_long_index=',ctio_min_long_index) print('ctio_data = ',extrdata,DATAFIELD_UNIT) (lsst_min_long_index, lsst_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_lsst,Latitude_lsst) print('lsst_min_lat_index=',lsst_min_lat_index) print('lsst_min_long_index=',lsst_min_long_index) print('lsst_data = ',extrdata,DATAFIELD_UNIT) (ohp_min_long_index, ohp_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_ohp,Latitude_ohp) print('ohp_min_lat_index=',ohp_min_lat_index) print('ohp_min_long_index=',ohp_min_long_index) print('ohp_data = ',extrdata,DATAFIELD_UNIT) #---------------------------------------------------------------------------------
''' GetData(file,datafield) : read the data labeled datafield in file ================================================================= Retrieve data from a HDF file input : ------ file : name of input file datafield : label of required data field output: ------ data3D : output array ''' hdf = SD(file, SDC.READ) data3D=hdf.select(datafield) return data3D
identifier_body
libAIRSL3Data.py
""" ------------------------------------------------------------------------------ libAIRSL3Data.py ================= Author : Sylvie Dagoret-Campagne Date : November 21 2016 ----------------------------------------------------------------------------- """ import os import re import matplotlib as mpl import matplotlib.pyplot as plt from mpl_toolkits.basemap import Basemap import numpy as np from pyhdf.SD import SD, SDC #LSST site Longitude_lsst = -70.7366833333333 # deg Latitude_lsst = -30.240741666666672 #deg Altitude_lsst = 2749.999999999238 #m #CTIO Site Longitude_ctio = -70.815 # deg Latitude_ctio = -30.165277777777778 #deg Altitude_ctio = 2214.9999999993697 #m # Cerro Paranal Longitude_paranal = -70.40300000000002 #deg Latitude_paranal = -24.625199999999996 #deg Altitude_paranal = 2635.0000000009704 #m # Observatoire de Haute Provence Longitude_ohp=5.71222222222 Latitude_ohp=43.9316666667 Altitude_ohp=650. #-------------------------------------------------------------------------- def ensure_dir(f): ''' ensure_dir : check if the directory f exist. If not, it is created ''' d = os.path.dirname(f) if not os.path.exists(f): os.makedirs(f) #----------------------------------------------------------------------------- def loc_ctio(): return(Longitude_ctio,Latitude_ctio,Altitude_ctio) def loc_lsst(): return(Longitude_lsst,Latitude_lsst,Altitude_lsst) def loc_ohp(): return(Longitude_lsst,Latitude_lsst,Altitude_lsst) def loc_none(): return(0,0,0) def observatory_location(obs): if obs== 'ctio': loc=loc_ctio() elif obs=='lsst': loc=loc_lsst() elif obs=='ohp': loc=loc_ohp() else: loc=loc_none() return loc #--------------------------------------------------------------------------------- def GetData(file,datafield): ''' GetData(file,datafield) : read the data labeled datafield in file ================================================================= Retrieve data from a HDF file input : ------ file : name of input file datafield : label of required data field output: ------ data3D : output array ''' hdf = SD(file, SDC.READ) data3D=hdf.select(datafield) return data3D #----------------------------------------------------------------------------- #-------------------------------------------------------------------------------- def AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax): ''' AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax) ================================================== Select an area input: ------ X,Y : Longitude and lattitude 2D array data : Data array LongMin,LongMax,LatMin,LatMax : Longitude and Latitude boundaries output: ------- Xsel,Ysel : Longitude and lattitude 2D array of selected zone extracted_data : data array selected ''' flags_long=np.logical_and(X>=LongMin, X<=LongMax) # flags in X where are the selected longitudes flags_lat=np.logical_and(Y>=LatMin, Y<=LatMax) # flags in Y where are the selected longitudes flags_longlat=np.logical_and(flags_long,flags_lat) # flags where the region is selected in the long-lat matrix (selected_lat_indexes,selected_long_indexes)=np.where(flags_longlat==True) # list of indexes selected_long=longitude[:,selected_long_indexes] # all selected longitudes selected_lat=latitude[selected_lat_indexes,:] # all selected latitudes min_long_index=np.min(selected_long_indexes) max_long_index=np.max(selected_long_indexes) min_lat_index=np.min(selected_lat_indexes) max_lat_index=np.max(selected_lat_indexes) # output extracted_data=data[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the data Xsel=X[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the Long Ysel=Y[min_lat_index:max_lat_index,min_long_index:max_long_index] # extract the lat return Xsel,Ysel,extracted_data #--------------------------------------------------------------------------------- #--------------------------------------------------------------------------------- def SelectBin(X,Y,data,Long0,Lat0,DLong=1.0,DLat=1.0): ''' SelectBin(X,Y,data,Long0,Lat0,DLong=1.0,DLat=1.0) ================================================= Select one bin input: ----- X,Y : Longitude and Latitude 2D array data : 2D array of data Long0,Lat0 : The Longitude and Latitude of the bin DLong,DLat : The angular bin width output: ------ sel_min_long_index,sel_min_lat_index : index of selected bin extracted_data : data of the selected bin ''' sel_flags_long=np.logical_and(X>=Long0-float(DLong)/2., X<=Long0+float(DLong)/2.) # flags in X where are the selected longitudes sel_flags_lat=np.logical_and(Y>=Lat0-float(DLat)/2., Y<=Lat0+float(DLat)/2.) # flags in Y where are the selected longitudes sel_flags_longlat=np.logical_and(sel_flags_long,sel_flags_lat) # flags where the region is selected in the long-lat matrix
selected_Y=Y[selected_lat_indexes,:] sel_min_long_index=np.min(selected_long_indexes) sel_max_long_index=np.max(selected_long_indexes) sel_min_lat_index=np.min(selected_lat_indexes) sel_max_lat_index=np.max(selected_lat_indexes) extracted_data=data[sel_min_lat_index:sel_max_lat_index+1,sel_min_long_index:sel_max_long_index+1] # extract the data return sel_min_long_index,sel_min_lat_index,extracted_data[0][0] #--------------------------------------------------------------------------------- #--------------------------------------------------------------------------------- def PlotData(X,Y,data,sizex=8,sizey=8,labelx='longitude',labely='latitude',labelz='Unit',title=''): ''' PlotData(X,Y,data,sizex=8,sizey=8,labelx='longitude',labely='latitude',labelz='Unit',title='') ============================================================================================== Plot in matplotlib the 2D array of data input: ------ X,Y : 2D array of lontitude and latitude data : Data array sizex,sizey : size of figure labelx,labely,labelz,title : labels of axis and title output: figure in matplotlib ''' fig=plt.figure(figsize=(sizex,sizey)) im = plt.pcolormesh(X,Y,data) # im = plt.pcolormesh(X,Y,data, cmap='rainbow') cbar=plt.colorbar(im, orientation='vertical') cbar.set_label(labelz) Xmin=X.min() Xmax=X.max() Ymin=Y.min() Ymax=Y.max() plt.axis([Xmin, Xmax,Ymin,Ymax]) plt.xlabel(labelx) plt.ylabel(labely) plt.title(title) #plt.tight_layout() plt.show() #------------------------------------------------------------------------------------ #------------------------------------------------------------------------------ #------------------------------------------------------------------------------ if __name__ == "__main__": DATAFIELD_NAME = 'TotO3_D' DATAFIELD_UNIT = DATAFIELD_NAME+' (Ozone:Db) ' os.environ["HDFEOS_ZOO_DIR"] = "/Users/dagoret-campagnesylvie/MacOsX/LSST/MyWork/GitHub/NASA_AIRS_AQUA_DATA/AIRH3STM/2016/h4" # If a certain environment variable is set, look there for the input # file, otherwise look in the current directory. #hdffile = 'AIRS.2002.08.01.L3.RetStd_H031.v4.0.21.0.G06104133732.hdf' hdffile = 'AIRS.2016.01.01.L3.RetStd031.v6.0.31.0.G16034171018.hdf' #hdffile = 'AIRS.2016.02.01.L3.RetStd029.v6.0.31.0.G16063171358.hdf' #hdffile = 'AIRS.2016.03.01.L3.RetStd031.v6.0.31.0.G16095175654.hdf' #hdffile = 'AIRS.2016.04.01.L3.RetStd030.v6.0.31.0.G16122185324.hdf' #hdffile = 'AIRS.2016.05.01.L3.RetStd031.v6.0.31.0.G16153184258.hdf' #hdffile = 'AIRS.2016.06.01.L3.RetStd030.v6.0.31.0.G16189154115.hdf' #hdffile = 'AIRS.2016.07.01.L3.RetStd031.v6.0.31.0.G16223152110.hdf' #hdffile = 'AIRS.2016.08.01.L3.RetStd031.v6.0.31.0.G16245202845.hdf' #hdffile = 'AIRS.2016.09.01.L3.RetStd030.v6.0.31.0.G16281134124.hdf' FILE_NAME= hdffile base_filename=os.path.basename(FILE_NAME).split('.hdf')[0] p = re.compile('[.]') root_filename=p.sub('_',base_filename) rootimg_dir=os.path.join('test_images',root_filename) try: FILE_NAME = os.path.join(os.environ['HDFEOS_ZOO_DIR'], hdffile) except KeyError: pass data3D=GetData(FILE_NAME,DATAFIELD_NAME) data= data3D[:,:] ## Ozone has no additional dimensions lat = GetData(FILE_NAME,'Latitude') latitude = lat[:,:] lon = GetData(FILE_NAME,'Longitude') longitude = lon[:,:] # Handle fill value. attrs = data3D.attributes(full=1) fillvalue=attrs["_FillValue"] # fillvalue[0] is the attribute value. fv = fillvalue[0] data[data == fv] = np.nan data = np.ma.masked_array(data, np.isnan(data)) # Plot world data #------------------------- PlotData(longitude,latitude,data,7,3.5,title=base_filename,labelz=DATAFIELD_UNIT) # Select area LongMin=-100 LongMax=-30 LatMin=-55 LatMax=15 X=longitude Y=latitude (Xsel,Ysel,extracted_data) = AreaSelect(X,Y,data,LongMin,LongMax,LatMin,LatMax) # plot area #------------ PlotData(Xsel,Ysel,extracted_data,6,6,title=base_filename,labelz=DATAFIELD_UNIT) #LSST site Longitude_lsst = -70.7366833333333 # deg Latitude_lsst = -30.240741666666672 #deg Altitude_lsst = 2749.999999999238 #m #CTIO Site Longitude_ctio = -70.815 # deg Latitude_ctio = -30.165277777777778 #deg Altitude_ctio = 2214.9999999993697 #m # Cerro Paranal Longitude_paranal = -70.40300000000002 #deg Latitude_paranal = -24.625199999999996 #deg Altitude_paranal = 2635.0000000009704 #m # Observatoire de Haute Provence Longitude_ohp=5.71222222222 Latitude_ohp=43.9316666667 Altitude_ohp=650. # Select one bin #----------------- (ctio_min_long_index, ctio_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_ctio,Latitude_ctio) print('ctio_min_lat_index=',ctio_min_lat_index) print('ctio_min_long_index=',ctio_min_long_index) print('ctio_data = ',extrdata,DATAFIELD_UNIT) (lsst_min_long_index, lsst_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_lsst,Latitude_lsst) print('lsst_min_lat_index=',lsst_min_lat_index) print('lsst_min_long_index=',lsst_min_long_index) print('lsst_data = ',extrdata,DATAFIELD_UNIT) (ohp_min_long_index, ohp_min_lat_index,extrdata)=SelectBin(X,Y,data,Longitude_ohp,Latitude_ohp) print('ohp_min_lat_index=',ohp_min_lat_index) print('ohp_min_long_index=',ohp_min_long_index) print('ohp_data = ',extrdata,DATAFIELD_UNIT) #---------------------------------------------------------------------------------
(selected_lat_indexes,selected_long_indexes)=np.where(sel_flags_longlat==True) # list of indexes selected_X=X[:,selected_long_indexes] # all selected longitudes
random_line_split
main.go
// Calculate correlation functions (P2 and P4) from read mapping results. package main import ( "bufio" "bytes" "encoding/json" "fmt" "io" "log" "math/rand" "os" "runtime" "strings" "github.com/biogo/hts/sam" "github.com/mingzhi/biogo/seq" "github.com/mingzhi/ncbiftp/taxonomy" "gopkg.in/alecthomas/kingpin.v2" ) // SubProfile Substitution/mutation profile. type SubProfile struct { Pos int Profile []float64 } // ShowProgress show progress. var ShowProgress bool // MinBaseQuality min base quality var MinBaseQuality int // MinMapQuality min map quality var MinMapQuality int // MinAlleleDepth min allele depth. var MinAlleleDepth int // MinReadLength minimal read length var MinReadLength int func main() { // Command variables. var bamFile string // bam or sam file var outFile string // output file var maxl int // max length of correlation var ncpu int // number of CPUs var minDepth int // min depth var minCoverage float64 // min coveage var gffFile string // gff file var corrResFile string // corr result file. var geneFile string // gene file. var maxDepth float64 // max depth // Parse command arguments. app := kingpin.New("meta_p2", "Calculate mutation correlation from bacterial metagenomic sequence data") app.Version("v20170405") bamFileArg := app.Arg("bamfile", "bam file").Required().String() outFileArg := app.Arg("outfile", "out file").Required().String() maxlFlag := app.Flag("maxl", "max len of correlations").Default("100").Int() ncpuFlag := app.Flag("ncpu", "number of CPUs").Default("0").Int() minDepthFlag := app.Flag("min-depth", "min depth").Default("5").Int() minCoverageFlag := app.Flag("min-coverage", "min coverage").Default("0.5").Float64() progressFlag := app.Flag("progress", "show progress").Default("false").Bool() gffFileFlag := app.Flag("gff-file", "gff file").Default("").String() minBaseQFlag := app.Flag("min-base-qual", "min base quality").Default("30").Int() minMapQFlag := app.Flag("min-map-qual", "min mapping quality").Default("30").Int() corrResFileFlag := app.Flag("corr-res-file", "corr result file").Default("").String() geneFileFlag := app.Flag("gene-file", "gene file").Default("").String() minAlleleDepthFlag := app.Flag("min-allele-depth", "min allele depth").Default("0").Int() maxDepthFlag := app.Flag("max-depth", "max coverage depth for each gene").Default("0").Float64() minReadLenFlag := app.Flag("min-read-length", "minimal read length").Default("60").Int() kingpin.MustParse(app.Parse(os.Args[1:])) bamFile = *bamFileArg outFile = *outFileArg maxl = *maxlFlag if *ncpuFlag == 0 { ncpu = runtime.NumCPU() } else { ncpu = *ncpuFlag } ShowProgress = *progressFlag minDepth = *minDepthFlag minCoverage = *minCoverageFlag gffFile = *gffFileFlag MinBaseQuality = *minBaseQFlag MinMapQuality = *minMapQFlag corrResFile = *corrResFileFlag geneFile = *geneFileFlag MinAlleleDepth = *minAlleleDepthFlag maxDepth = *maxDepthFlag MinReadLength = *minReadLenFlag runtime.GOMAXPROCS(ncpu) // Read sequence reads. var header *sam.Header var recordsChan chan GeneSamRecords if gffFile != "" { gffRecMap := readGffs(gffFile) header, recordsChan = readStrainBamFile(bamFile, gffRecMap) } else { header, recordsChan = readPanGenomeBamFile(bamFile) } var geneSet map[string]bool if geneFile != "" { geneSet = make(map[string]bool) lines := readLines(geneFile) for _, line := range lines { gene := strings.Split(line, "\t")[0] geneSet[gene] = true } } codeTable := taxonomy.GeneticCodes()["11"] done := make(chan bool) p2Chan := make(chan CorrResults) for i := 0; i < ncpu; i++ { go func() { for geneRecords := range recordsChan { if geneFile != "" { if !geneSet[geneRecords.ID] { continue } } if maxDepth > 0 { geneRecords = subsample(geneRecords, maxDepth) } geneLen := geneRecords.End - geneRecords.Start gene := pileupCodons(geneRecords) ok := checkCoverage(gene, geneLen, minDepth, minCoverage) if ok { p2 := calcP2(gene, maxl, minDepth, codeTable) p4 := calcP4(gene, maxl, minDepth, codeTable) p2 = append(p2, p4...) p2Chan <- CorrResults{Results: p2, GeneID: geneRecords.ID, GeneLen: geneLen, ReadNum: len(geneRecords.Records)} } } done <- true }() } go func() { defer close(p2Chan) for i := 0; i < ncpu; i++ { <-done } }() var corrResEncoder *json.Encoder if corrResFile != "" { f, err := os.Create(corrResFile) if err != nil { log.Panic(err) } defer f.Close() corrResEncoder = json.NewEncoder(f) } collector := NewCollector() for corrResults := range p2Chan { collector.Add(corrResults) if corrResFile != "" { if err := corrResEncoder.Encode(corrResults); err != nil { log.Panic(err) } } } numJob := len(header.Refs()) log.Printf("Number of references: %d\n", numJob) w, err := os.Create(outFile) if err != nil { panic(err) } defer w.Close() w.WriteString("l,m,v,n,t,b\n") results := collector.Results() for _, res := range results { w.WriteString(fmt.Sprintf("%d,%g,%g,%d,%s,all\n", res.Lag, res.Value, res.Variance, res.Count, res.Type)) } } // pileupCodons pileup codons of a list of reads at a gene. func pileupCodons(geneRecords GeneSamRecords) (codonGene *CodonGene) { codonGene = NewCodonGene() for _, read := range geneRecords.Records { if checkReadQuality(read) { codonArray := getCodons(read, geneRecords.Start, geneRecords.Strand) for _, codon := range codonArray { if !codon.ContainsGap() { codonGene.AddCodon(codon) } } } } return } // checkReadQuality return false if the read fails quality check. func checkReadQuality(read *sam.Record) bool { if int(read.MapQ) < MinMapQuality || read.Len() < MinReadLength { return false } // for _, cigar := range read.Cigar { // if cigar.Type() != sam.CigarMatch && cigar.Type() != sam.CigarSoftClipped { // return false // } // } return true } // getCodons split a read into a list of Codon. func getCodons(read *sam.Record, offset, strand int) (codonArray []Codon) { // get the mapped sequence of the read onto the reference. mappedSeq, _ := Map2Ref(read) for i := 2; i < len(mappedSeq); { if (read.Pos+i-offset+1)%3 == 0 { codonSeq := mappedSeq[i-2 : i+1] genePos := (read.Pos+i-offset+1)/3 - 1 if genePos >= 0 { if strand == -1 { codonSeq = seq.Reverse(seq.Complement(codonSeq)) } codon := Codon{ReadID: read.Name, Seq: string(codonSeq), GenePos: genePos} codonArray = append(codonArray, codon) } i += 3 } else { i++ } } return } func isATGC(b byte) bool { if b == 'A' { return true } else if b == 'T' { return true } else if b == 'C' { return true } else if b == 'G' { return true } return false } // P2 stores p2 calculation results. type P2 struct { Total float64 Count int } // doubleCount count codon pairs. func
(nc *NuclCov, codonPairArray []CodonPair) { for _, cp := range codonPairArray { a := cp.A.Seq[2] b := cp.B.Seq[2] nc.Add(a, b) } } func calcP2(gene *CodonGene, maxl, minDepth int, codeTable *taxonomy.GeneticCode) (p2Res []CorrResult) { alphabet := []byte{'A', 'T', 'G', 'C'} for i := 0; i < gene.Len(); i++ { for j := i; j < gene.Len(); j++ { codonPairRaw := gene.PairCodonAt(i, j) if len(codonPairRaw) < 2 { continue } lag := codonPairRaw[0].B.GenePos - codonPairRaw[0].A.GenePos if lag < 0 { lag = -lag } if lag >= maxl { break } splittedCodonPairs := SynoumousSplitCodonPairs(codonPairRaw, codeTable) for _, synPairs := range splittedCodonPairs { if len(synPairs) > minDepth { nc := NewNuclCov(alphabet) doubleCount(nc, synPairs) for len(p2Res) <= lag { p2Res = append(p2Res, CorrResult{Type: "P2", Lag: len(p2Res)}) } xy, _, _, n := nc.Cov11(MinAlleleDepth) p2Res[lag].Count += int64(n) p2Res[lag].Value += xy } } } } return } func calcP4(gene *CodonGene, maxl, minDepth int, codeTable *taxonomy.GeneticCode) (p4Res []CorrResult) { var valueArray []float64 var countArray []int var posArray []int for i := 0; i < gene.Len(); i++ { value, count := autoCov(gene, i, minDepth, codeTable) if count > 0 { pos := gene.CodonPiles[i].GenePos() valueArray = append(valueArray, value) countArray = append(countArray, count) posArray = append(posArray, pos) } } for i := 0; i < len(valueArray); i++ { value1 := valueArray[i] count1 := countArray[i] xbar := value1 / float64(count1) for j := i; j < len(valueArray); j++ { value2 := valueArray[j] count2 := countArray[j] ybar := value2 / float64(count2) lag := posArray[j] - posArray[i] if lag < 0 { lag = -lag } if lag >= maxl { break } for len(p4Res) <= lag { p4Res = append(p4Res, CorrResult{Type: "P4", Lag: len(p4Res)}) } p4Res[lag].Value += xbar * ybar p4Res[lag].Count++ } } return } func autoCov(gene *CodonGene, i, minDepth int, codeTable *taxonomy.GeneticCode) (value float64, count int) { alphabet := []byte{'A', 'T', 'G', 'C'} codonPairRaw := gene.PairCodonAt(i, i) if len(codonPairRaw) < 2 { return } lag := codonPairRaw[0].B.GenePos - codonPairRaw[0].A.GenePos if lag < 0 { lag = -lag } splittedCodonPairs := SynoumousSplitCodonPairs(codonPairRaw, codeTable) for _, synPairs := range splittedCodonPairs { if len(synPairs) > minDepth { nc := NewNuclCov(alphabet) doubleCount(nc, synPairs) xy, _, _, n := nc.Cov11(MinAlleleDepth) value += xy count += n } } return } // Map2Ref Obtains a read mapping to the reference genome. func Map2Ref(r *sam.Record) (s []byte, q []byte) { p := 0 // position in the read sequence. read := r.Seq.Expand() // read sequence. qual := r.Qual length := 0 for _, c := range r.Cigar { switch c.Type() { case sam.CigarMatch, sam.CigarMismatch, sam.CigarEqual, sam.CigarSoftClipped: length += c.Len() } } if length != len(read) || len(read) != len(qual) { return } for _, c := range r.Cigar { switch c.Type() { case sam.CigarMatch, sam.CigarMismatch, sam.CigarEqual: s = append(s, read[p:p+c.Len()]...) q = append(q, qual[p:p+c.Len()]...) p += c.Len() case sam.CigarInsertion, sam.CigarSoftClipped: p += c.Len() case sam.CigarDeletion, sam.CigarSkipped: for i := 0; i < c.Len(); i++ { s = append(s, '-') q = append(q, 0) } } } s = bytes.ToUpper(s) for i, a := range q { if int(a) < MinBaseQuality { s[i] = '-' } } return } func checkCoverage(gene *CodonGene, geneLen, minDepth int, minCoverage float64) (ok bool) { num := 0 for _, pile := range gene.CodonPiles { if pile.Len() > minDepth { num++ } } coverage := float64(num) / float64(geneLen) * 3.0 // codon pile is in unit of codons (3) ok = coverage > minCoverage return } // readLines return all trimmed lines. func readLines(filename string) []string { f, err := os.Open(filename) if err != nil { log.Panic(err) } defer f.Close() rd := bufio.NewReader(f) var lines []string for { line, err := rd.ReadString('\n') if err != nil { if err != io.EOF { log.Panic(err) } break } lines = append(lines, strings.TrimSpace(line)) } return lines } // subsample func subsample(geneRecords GeneSamRecords, maxDepth float64) GeneSamRecords { length := float64(geneRecords.End - geneRecords.Start) readNum := len(geneRecords.Records) readLen := float64(geneRecords.Records[0].Len()) maxReadNum := int(length * maxDepth / readLen) if readNum <= maxReadNum { return geneRecords } oldRecords := geneRecords.Records geneRecords.Records = []*sam.Record{} ratio := float64(maxReadNum) / float64(readNum) for _, read := range oldRecords { if rand.Float64() < ratio { geneRecords.Records = append(geneRecords.Records, read) } } return geneRecords }
doubleCount
identifier_name
main.go
// Calculate correlation functions (P2 and P4) from read mapping results. package main import ( "bufio" "bytes" "encoding/json" "fmt" "io" "log" "math/rand" "os" "runtime" "strings" "github.com/biogo/hts/sam" "github.com/mingzhi/biogo/seq" "github.com/mingzhi/ncbiftp/taxonomy" "gopkg.in/alecthomas/kingpin.v2" ) // SubProfile Substitution/mutation profile. type SubProfile struct { Pos int Profile []float64 } // ShowProgress show progress. var ShowProgress bool // MinBaseQuality min base quality var MinBaseQuality int // MinMapQuality min map quality var MinMapQuality int // MinAlleleDepth min allele depth. var MinAlleleDepth int // MinReadLength minimal read length var MinReadLength int func main() { // Command variables. var bamFile string // bam or sam file var outFile string // output file var maxl int // max length of correlation var ncpu int // number of CPUs var minDepth int // min depth var minCoverage float64 // min coveage var gffFile string // gff file var corrResFile string // corr result file. var geneFile string // gene file. var maxDepth float64 // max depth // Parse command arguments. app := kingpin.New("meta_p2", "Calculate mutation correlation from bacterial metagenomic sequence data") app.Version("v20170405") bamFileArg := app.Arg("bamfile", "bam file").Required().String() outFileArg := app.Arg("outfile", "out file").Required().String() maxlFlag := app.Flag("maxl", "max len of correlations").Default("100").Int() ncpuFlag := app.Flag("ncpu", "number of CPUs").Default("0").Int() minDepthFlag := app.Flag("min-depth", "min depth").Default("5").Int() minCoverageFlag := app.Flag("min-coverage", "min coverage").Default("0.5").Float64() progressFlag := app.Flag("progress", "show progress").Default("false").Bool() gffFileFlag := app.Flag("gff-file", "gff file").Default("").String() minBaseQFlag := app.Flag("min-base-qual", "min base quality").Default("30").Int() minMapQFlag := app.Flag("min-map-qual", "min mapping quality").Default("30").Int() corrResFileFlag := app.Flag("corr-res-file", "corr result file").Default("").String() geneFileFlag := app.Flag("gene-file", "gene file").Default("").String() minAlleleDepthFlag := app.Flag("min-allele-depth", "min allele depth").Default("0").Int() maxDepthFlag := app.Flag("max-depth", "max coverage depth for each gene").Default("0").Float64() minReadLenFlag := app.Flag("min-read-length", "minimal read length").Default("60").Int() kingpin.MustParse(app.Parse(os.Args[1:])) bamFile = *bamFileArg outFile = *outFileArg maxl = *maxlFlag if *ncpuFlag == 0 { ncpu = runtime.NumCPU() } else { ncpu = *ncpuFlag } ShowProgress = *progressFlag minDepth = *minDepthFlag minCoverage = *minCoverageFlag gffFile = *gffFileFlag MinBaseQuality = *minBaseQFlag MinMapQuality = *minMapQFlag corrResFile = *corrResFileFlag geneFile = *geneFileFlag MinAlleleDepth = *minAlleleDepthFlag maxDepth = *maxDepthFlag MinReadLength = *minReadLenFlag runtime.GOMAXPROCS(ncpu) // Read sequence reads. var header *sam.Header var recordsChan chan GeneSamRecords if gffFile != "" { gffRecMap := readGffs(gffFile) header, recordsChan = readStrainBamFile(bamFile, gffRecMap) } else { header, recordsChan = readPanGenomeBamFile(bamFile) } var geneSet map[string]bool if geneFile != "" { geneSet = make(map[string]bool) lines := readLines(geneFile) for _, line := range lines { gene := strings.Split(line, "\t")[0] geneSet[gene] = true } } codeTable := taxonomy.GeneticCodes()["11"] done := make(chan bool) p2Chan := make(chan CorrResults) for i := 0; i < ncpu; i++ { go func() { for geneRecords := range recordsChan { if geneFile != "" { if !geneSet[geneRecords.ID] { continue } } if maxDepth > 0 { geneRecords = subsample(geneRecords, maxDepth) } geneLen := geneRecords.End - geneRecords.Start gene := pileupCodons(geneRecords) ok := checkCoverage(gene, geneLen, minDepth, minCoverage) if ok { p2 := calcP2(gene, maxl, minDepth, codeTable) p4 := calcP4(gene, maxl, minDepth, codeTable) p2 = append(p2, p4...) p2Chan <- CorrResults{Results: p2, GeneID: geneRecords.ID, GeneLen: geneLen, ReadNum: len(geneRecords.Records)} } } done <- true }() } go func() { defer close(p2Chan) for i := 0; i < ncpu; i++ { <-done } }() var corrResEncoder *json.Encoder if corrResFile != "" { f, err := os.Create(corrResFile) if err != nil { log.Panic(err) } defer f.Close() corrResEncoder = json.NewEncoder(f) } collector := NewCollector() for corrResults := range p2Chan { collector.Add(corrResults) if corrResFile != "" { if err := corrResEncoder.Encode(corrResults); err != nil { log.Panic(err) } } } numJob := len(header.Refs()) log.Printf("Number of references: %d\n", numJob) w, err := os.Create(outFile) if err != nil { panic(err) } defer w.Close() w.WriteString("l,m,v,n,t,b\n") results := collector.Results() for _, res := range results { w.WriteString(fmt.Sprintf("%d,%g,%g,%d,%s,all\n", res.Lag, res.Value, res.Variance, res.Count, res.Type)) } } // pileupCodons pileup codons of a list of reads at a gene. func pileupCodons(geneRecords GeneSamRecords) (codonGene *CodonGene) { codonGene = NewCodonGene() for _, read := range geneRecords.Records { if checkReadQuality(read) { codonArray := getCodons(read, geneRecords.Start, geneRecords.Strand) for _, codon := range codonArray { if !codon.ContainsGap() { codonGene.AddCodon(codon) } } } } return } // checkReadQuality return false if the read fails quality check. func checkReadQuality(read *sam.Record) bool { if int(read.MapQ) < MinMapQuality || read.Len() < MinReadLength { return false } // for _, cigar := range read.Cigar { // if cigar.Type() != sam.CigarMatch && cigar.Type() != sam.CigarSoftClipped { // return false // } // } return true } // getCodons split a read into a list of Codon. func getCodons(read *sam.Record, offset, strand int) (codonArray []Codon) { // get the mapped sequence of the read onto the reference. mappedSeq, _ := Map2Ref(read) for i := 2; i < len(mappedSeq); { if (read.Pos+i-offset+1)%3 == 0 { codonSeq := mappedSeq[i-2 : i+1] genePos := (read.Pos+i-offset+1)/3 - 1 if genePos >= 0 { if strand == -1 { codonSeq = seq.Reverse(seq.Complement(codonSeq)) } codon := Codon{ReadID: read.Name, Seq: string(codonSeq), GenePos: genePos} codonArray = append(codonArray, codon) } i += 3 } else { i++ } } return } func isATGC(b byte) bool { if b == 'A' { return true } else if b == 'T' { return true } else if b == 'C' { return true } else if b == 'G' { return true } return false } // P2 stores p2 calculation results. type P2 struct { Total float64 Count int } // doubleCount count codon pairs. func doubleCount(nc *NuclCov, codonPairArray []CodonPair) { for _, cp := range codonPairArray { a := cp.A.Seq[2] b := cp.B.Seq[2] nc.Add(a, b) } } func calcP2(gene *CodonGene, maxl, minDepth int, codeTable *taxonomy.GeneticCode) (p2Res []CorrResult) { alphabet := []byte{'A', 'T', 'G', 'C'} for i := 0; i < gene.Len(); i++ { for j := i; j < gene.Len(); j++ { codonPairRaw := gene.PairCodonAt(i, j) if len(codonPairRaw) < 2 { continue } lag := codonPairRaw[0].B.GenePos - codonPairRaw[0].A.GenePos if lag < 0 { lag = -lag } if lag >= maxl { break } splittedCodonPairs := SynoumousSplitCodonPairs(codonPairRaw, codeTable) for _, synPairs := range splittedCodonPairs { if len(synPairs) > minDepth { nc := NewNuclCov(alphabet) doubleCount(nc, synPairs) for len(p2Res) <= lag { p2Res = append(p2Res, CorrResult{Type: "P2", Lag: len(p2Res)}) } xy, _, _, n := nc.Cov11(MinAlleleDepth) p2Res[lag].Count += int64(n) p2Res[lag].Value += xy } } } } return } func calcP4(gene *CodonGene, maxl, minDepth int, codeTable *taxonomy.GeneticCode) (p4Res []CorrResult) { var valueArray []float64 var countArray []int var posArray []int for i := 0; i < gene.Len(); i++ { value, count := autoCov(gene, i, minDepth, codeTable) if count > 0 { pos := gene.CodonPiles[i].GenePos() valueArray = append(valueArray, value) countArray = append(countArray, count) posArray = append(posArray, pos) } } for i := 0; i < len(valueArray); i++ { value1 := valueArray[i] count1 := countArray[i] xbar := value1 / float64(count1) for j := i; j < len(valueArray); j++ { value2 := valueArray[j] count2 := countArray[j] ybar := value2 / float64(count2) lag := posArray[j] - posArray[i] if lag < 0 { lag = -lag } if lag >= maxl { break } for len(p4Res) <= lag { p4Res = append(p4Res, CorrResult{Type: "P4", Lag: len(p4Res)}) } p4Res[lag].Value += xbar * ybar p4Res[lag].Count++ } } return } func autoCov(gene *CodonGene, i, minDepth int, codeTable *taxonomy.GeneticCode) (value float64, count int) { alphabet := []byte{'A', 'T', 'G', 'C'} codonPairRaw := gene.PairCodonAt(i, i) if len(codonPairRaw) < 2
lag := codonPairRaw[0].B.GenePos - codonPairRaw[0].A.GenePos if lag < 0 { lag = -lag } splittedCodonPairs := SynoumousSplitCodonPairs(codonPairRaw, codeTable) for _, synPairs := range splittedCodonPairs { if len(synPairs) > minDepth { nc := NewNuclCov(alphabet) doubleCount(nc, synPairs) xy, _, _, n := nc.Cov11(MinAlleleDepth) value += xy count += n } } return } // Map2Ref Obtains a read mapping to the reference genome. func Map2Ref(r *sam.Record) (s []byte, q []byte) { p := 0 // position in the read sequence. read := r.Seq.Expand() // read sequence. qual := r.Qual length := 0 for _, c := range r.Cigar { switch c.Type() { case sam.CigarMatch, sam.CigarMismatch, sam.CigarEqual, sam.CigarSoftClipped: length += c.Len() } } if length != len(read) || len(read) != len(qual) { return } for _, c := range r.Cigar { switch c.Type() { case sam.CigarMatch, sam.CigarMismatch, sam.CigarEqual: s = append(s, read[p:p+c.Len()]...) q = append(q, qual[p:p+c.Len()]...) p += c.Len() case sam.CigarInsertion, sam.CigarSoftClipped: p += c.Len() case sam.CigarDeletion, sam.CigarSkipped: for i := 0; i < c.Len(); i++ { s = append(s, '-') q = append(q, 0) } } } s = bytes.ToUpper(s) for i, a := range q { if int(a) < MinBaseQuality { s[i] = '-' } } return } func checkCoverage(gene *CodonGene, geneLen, minDepth int, minCoverage float64) (ok bool) { num := 0 for _, pile := range gene.CodonPiles { if pile.Len() > minDepth { num++ } } coverage := float64(num) / float64(geneLen) * 3.0 // codon pile is in unit of codons (3) ok = coverage > minCoverage return } // readLines return all trimmed lines. func readLines(filename string) []string { f, err := os.Open(filename) if err != nil { log.Panic(err) } defer f.Close() rd := bufio.NewReader(f) var lines []string for { line, err := rd.ReadString('\n') if err != nil { if err != io.EOF { log.Panic(err) } break } lines = append(lines, strings.TrimSpace(line)) } return lines } // subsample func subsample(geneRecords GeneSamRecords, maxDepth float64) GeneSamRecords { length := float64(geneRecords.End - geneRecords.Start) readNum := len(geneRecords.Records) readLen := float64(geneRecords.Records[0].Len()) maxReadNum := int(length * maxDepth / readLen) if readNum <= maxReadNum { return geneRecords } oldRecords := geneRecords.Records geneRecords.Records = []*sam.Record{} ratio := float64(maxReadNum) / float64(readNum) for _, read := range oldRecords { if rand.Float64() < ratio { geneRecords.Records = append(geneRecords.Records, read) } } return geneRecords }
{ return }
conditional_block
main.go
// Calculate correlation functions (P2 and P4) from read mapping results. package main import ( "bufio" "bytes" "encoding/json" "fmt" "io" "log" "math/rand" "os" "runtime" "strings" "github.com/biogo/hts/sam" "github.com/mingzhi/biogo/seq" "github.com/mingzhi/ncbiftp/taxonomy" "gopkg.in/alecthomas/kingpin.v2" ) // SubProfile Substitution/mutation profile. type SubProfile struct { Pos int Profile []float64 } // ShowProgress show progress. var ShowProgress bool // MinBaseQuality min base quality var MinBaseQuality int // MinMapQuality min map quality var MinMapQuality int // MinAlleleDepth min allele depth. var MinAlleleDepth int // MinReadLength minimal read length var MinReadLength int func main() { // Command variables. var bamFile string // bam or sam file var outFile string // output file var maxl int // max length of correlation var ncpu int // number of CPUs var minDepth int // min depth var minCoverage float64 // min coveage var gffFile string // gff file var corrResFile string // corr result file. var geneFile string // gene file. var maxDepth float64 // max depth // Parse command arguments. app := kingpin.New("meta_p2", "Calculate mutation correlation from bacterial metagenomic sequence data") app.Version("v20170405") bamFileArg := app.Arg("bamfile", "bam file").Required().String() outFileArg := app.Arg("outfile", "out file").Required().String() maxlFlag := app.Flag("maxl", "max len of correlations").Default("100").Int() ncpuFlag := app.Flag("ncpu", "number of CPUs").Default("0").Int() minDepthFlag := app.Flag("min-depth", "min depth").Default("5").Int() minCoverageFlag := app.Flag("min-coverage", "min coverage").Default("0.5").Float64() progressFlag := app.Flag("progress", "show progress").Default("false").Bool() gffFileFlag := app.Flag("gff-file", "gff file").Default("").String() minBaseQFlag := app.Flag("min-base-qual", "min base quality").Default("30").Int() minMapQFlag := app.Flag("min-map-qual", "min mapping quality").Default("30").Int() corrResFileFlag := app.Flag("corr-res-file", "corr result file").Default("").String() geneFileFlag := app.Flag("gene-file", "gene file").Default("").String() minAlleleDepthFlag := app.Flag("min-allele-depth", "min allele depth").Default("0").Int() maxDepthFlag := app.Flag("max-depth", "max coverage depth for each gene").Default("0").Float64() minReadLenFlag := app.Flag("min-read-length", "minimal read length").Default("60").Int() kingpin.MustParse(app.Parse(os.Args[1:])) bamFile = *bamFileArg outFile = *outFileArg maxl = *maxlFlag if *ncpuFlag == 0 { ncpu = runtime.NumCPU() } else { ncpu = *ncpuFlag } ShowProgress = *progressFlag minDepth = *minDepthFlag minCoverage = *minCoverageFlag gffFile = *gffFileFlag MinBaseQuality = *minBaseQFlag MinMapQuality = *minMapQFlag corrResFile = *corrResFileFlag geneFile = *geneFileFlag MinAlleleDepth = *minAlleleDepthFlag maxDepth = *maxDepthFlag MinReadLength = *minReadLenFlag runtime.GOMAXPROCS(ncpu) // Read sequence reads. var header *sam.Header var recordsChan chan GeneSamRecords if gffFile != "" { gffRecMap := readGffs(gffFile) header, recordsChan = readStrainBamFile(bamFile, gffRecMap) } else { header, recordsChan = readPanGenomeBamFile(bamFile) } var geneSet map[string]bool if geneFile != "" { geneSet = make(map[string]bool) lines := readLines(geneFile) for _, line := range lines { gene := strings.Split(line, "\t")[0] geneSet[gene] = true } } codeTable := taxonomy.GeneticCodes()["11"] done := make(chan bool) p2Chan := make(chan CorrResults) for i := 0; i < ncpu; i++ { go func() { for geneRecords := range recordsChan { if geneFile != "" { if !geneSet[geneRecords.ID] { continue } } if maxDepth > 0 { geneRecords = subsample(geneRecords, maxDepth) } geneLen := geneRecords.End - geneRecords.Start gene := pileupCodons(geneRecords) ok := checkCoverage(gene, geneLen, minDepth, minCoverage) if ok { p2 := calcP2(gene, maxl, minDepth, codeTable) p4 := calcP4(gene, maxl, minDepth, codeTable) p2 = append(p2, p4...) p2Chan <- CorrResults{Results: p2, GeneID: geneRecords.ID, GeneLen: geneLen, ReadNum: len(geneRecords.Records)} } } done <- true }() } go func() { defer close(p2Chan) for i := 0; i < ncpu; i++ { <-done } }() var corrResEncoder *json.Encoder if corrResFile != "" { f, err := os.Create(corrResFile) if err != nil { log.Panic(err) } defer f.Close() corrResEncoder = json.NewEncoder(f) } collector := NewCollector() for corrResults := range p2Chan { collector.Add(corrResults) if corrResFile != "" { if err := corrResEncoder.Encode(corrResults); err != nil { log.Panic(err) } } } numJob := len(header.Refs()) log.Printf("Number of references: %d\n", numJob) w, err := os.Create(outFile) if err != nil { panic(err) } defer w.Close() w.WriteString("l,m,v,n,t,b\n") results := collector.Results() for _, res := range results { w.WriteString(fmt.Sprintf("%d,%g,%g,%d,%s,all\n", res.Lag, res.Value, res.Variance, res.Count, res.Type)) } } // pileupCodons pileup codons of a list of reads at a gene. func pileupCodons(geneRecords GeneSamRecords) (codonGene *CodonGene) { codonGene = NewCodonGene() for _, read := range geneRecords.Records { if checkReadQuality(read) { codonArray := getCodons(read, geneRecords.Start, geneRecords.Strand) for _, codon := range codonArray { if !codon.ContainsGap() { codonGene.AddCodon(codon) } } } } return } // checkReadQuality return false if the read fails quality check. func checkReadQuality(read *sam.Record) bool { if int(read.MapQ) < MinMapQuality || read.Len() < MinReadLength { return false } // for _, cigar := range read.Cigar { // if cigar.Type() != sam.CigarMatch && cigar.Type() != sam.CigarSoftClipped { // return false // } // } return true } // getCodons split a read into a list of Codon. func getCodons(read *sam.Record, offset, strand int) (codonArray []Codon) { // get the mapped sequence of the read onto the reference. mappedSeq, _ := Map2Ref(read) for i := 2; i < len(mappedSeq); { if (read.Pos+i-offset+1)%3 == 0 { codonSeq := mappedSeq[i-2 : i+1] genePos := (read.Pos+i-offset+1)/3 - 1 if genePos >= 0 { if strand == -1 { codonSeq = seq.Reverse(seq.Complement(codonSeq)) } codon := Codon{ReadID: read.Name, Seq: string(codonSeq), GenePos: genePos} codonArray = append(codonArray, codon) } i += 3 } else { i++ } } return } func isATGC(b byte) bool { if b == 'A' { return true } else if b == 'T' { return true } else if b == 'C' { return true } else if b == 'G' { return true } return false } // P2 stores p2 calculation results. type P2 struct { Total float64 Count int } // doubleCount count codon pairs. func doubleCount(nc *NuclCov, codonPairArray []CodonPair) { for _, cp := range codonPairArray { a := cp.A.Seq[2] b := cp.B.Seq[2] nc.Add(a, b) } } func calcP2(gene *CodonGene, maxl, minDepth int, codeTable *taxonomy.GeneticCode) (p2Res []CorrResult) { alphabet := []byte{'A', 'T', 'G', 'C'} for i := 0; i < gene.Len(); i++ { for j := i; j < gene.Len(); j++ { codonPairRaw := gene.PairCodonAt(i, j) if len(codonPairRaw) < 2 { continue } lag := codonPairRaw[0].B.GenePos - codonPairRaw[0].A.GenePos if lag < 0 { lag = -lag } if lag >= maxl { break } splittedCodonPairs := SynoumousSplitCodonPairs(codonPairRaw, codeTable) for _, synPairs := range splittedCodonPairs { if len(synPairs) > minDepth { nc := NewNuclCov(alphabet) doubleCount(nc, synPairs) for len(p2Res) <= lag { p2Res = append(p2Res, CorrResult{Type: "P2", Lag: len(p2Res)}) } xy, _, _, n := nc.Cov11(MinAlleleDepth) p2Res[lag].Count += int64(n) p2Res[lag].Value += xy } } } } return } func calcP4(gene *CodonGene, maxl, minDepth int, codeTable *taxonomy.GeneticCode) (p4Res []CorrResult) { var valueArray []float64 var countArray []int var posArray []int for i := 0; i < gene.Len(); i++ { value, count := autoCov(gene, i, minDepth, codeTable) if count > 0 { pos := gene.CodonPiles[i].GenePos() valueArray = append(valueArray, value) countArray = append(countArray, count) posArray = append(posArray, pos) } } for i := 0; i < len(valueArray); i++ { value1 := valueArray[i] count1 := countArray[i] xbar := value1 / float64(count1) for j := i; j < len(valueArray); j++ { value2 := valueArray[j] count2 := countArray[j] ybar := value2 / float64(count2) lag := posArray[j] - posArray[i] if lag < 0 { lag = -lag } if lag >= maxl { break } for len(p4Res) <= lag { p4Res = append(p4Res, CorrResult{Type: "P4", Lag: len(p4Res)}) } p4Res[lag].Value += xbar * ybar p4Res[lag].Count++ } } return } func autoCov(gene *CodonGene, i, minDepth int, codeTable *taxonomy.GeneticCode) (value float64, count int) { alphabet := []byte{'A', 'T', 'G', 'C'} codonPairRaw := gene.PairCodonAt(i, i) if len(codonPairRaw) < 2 { return } lag := codonPairRaw[0].B.GenePos - codonPairRaw[0].A.GenePos if lag < 0 { lag = -lag } splittedCodonPairs := SynoumousSplitCodonPairs(codonPairRaw, codeTable) for _, synPairs := range splittedCodonPairs { if len(synPairs) > minDepth { nc := NewNuclCov(alphabet) doubleCount(nc, synPairs) xy, _, _, n := nc.Cov11(MinAlleleDepth) value += xy count += n } } return } // Map2Ref Obtains a read mapping to the reference genome. func Map2Ref(r *sam.Record) (s []byte, q []byte) { p := 0 // position in the read sequence. read := r.Seq.Expand() // read sequence. qual := r.Qual length := 0 for _, c := range r.Cigar { switch c.Type() { case sam.CigarMatch, sam.CigarMismatch, sam.CigarEqual, sam.CigarSoftClipped: length += c.Len() } } if length != len(read) || len(read) != len(qual) { return } for _, c := range r.Cigar { switch c.Type() { case sam.CigarMatch, sam.CigarMismatch, sam.CigarEqual: s = append(s, read[p:p+c.Len()]...) q = append(q, qual[p:p+c.Len()]...) p += c.Len() case sam.CigarInsertion, sam.CigarSoftClipped: p += c.Len() case sam.CigarDeletion, sam.CigarSkipped: for i := 0; i < c.Len(); i++ { s = append(s, '-') q = append(q, 0) } } } s = bytes.ToUpper(s) for i, a := range q { if int(a) < MinBaseQuality { s[i] = '-' } } return } func checkCoverage(gene *CodonGene, geneLen, minDepth int, minCoverage float64) (ok bool) { num := 0 for _, pile := range gene.CodonPiles { if pile.Len() > minDepth { num++ } } coverage := float64(num) / float64(geneLen) * 3.0 // codon pile is in unit of codons (3) ok = coverage > minCoverage return } // readLines return all trimmed lines. func readLines(filename string) []string
// subsample func subsample(geneRecords GeneSamRecords, maxDepth float64) GeneSamRecords { length := float64(geneRecords.End - geneRecords.Start) readNum := len(geneRecords.Records) readLen := float64(geneRecords.Records[0].Len()) maxReadNum := int(length * maxDepth / readLen) if readNum <= maxReadNum { return geneRecords } oldRecords := geneRecords.Records geneRecords.Records = []*sam.Record{} ratio := float64(maxReadNum) / float64(readNum) for _, read := range oldRecords { if rand.Float64() < ratio { geneRecords.Records = append(geneRecords.Records, read) } } return geneRecords }
{ f, err := os.Open(filename) if err != nil { log.Panic(err) } defer f.Close() rd := bufio.NewReader(f) var lines []string for { line, err := rd.ReadString('\n') if err != nil { if err != io.EOF { log.Panic(err) } break } lines = append(lines, strings.TrimSpace(line)) } return lines }
identifier_body
main.go
// Calculate correlation functions (P2 and P4) from read mapping results. package main import ( "bufio" "bytes" "encoding/json" "fmt" "io" "log" "math/rand" "os" "runtime" "strings" "github.com/biogo/hts/sam" "github.com/mingzhi/biogo/seq" "github.com/mingzhi/ncbiftp/taxonomy" "gopkg.in/alecthomas/kingpin.v2" ) // SubProfile Substitution/mutation profile. type SubProfile struct { Pos int Profile []float64 } // ShowProgress show progress. var ShowProgress bool // MinBaseQuality min base quality var MinBaseQuality int // MinMapQuality min map quality var MinMapQuality int // MinAlleleDepth min allele depth. var MinAlleleDepth int // MinReadLength minimal read length var MinReadLength int func main() { // Command variables. var bamFile string // bam or sam file var outFile string // output file var maxl int // max length of correlation var ncpu int // number of CPUs var minDepth int // min depth var minCoverage float64 // min coveage var gffFile string // gff file var corrResFile string // corr result file. var geneFile string // gene file. var maxDepth float64 // max depth // Parse command arguments. app := kingpin.New("meta_p2", "Calculate mutation correlation from bacterial metagenomic sequence data") app.Version("v20170405") bamFileArg := app.Arg("bamfile", "bam file").Required().String() outFileArg := app.Arg("outfile", "out file").Required().String() maxlFlag := app.Flag("maxl", "max len of correlations").Default("100").Int() ncpuFlag := app.Flag("ncpu", "number of CPUs").Default("0").Int() minDepthFlag := app.Flag("min-depth", "min depth").Default("5").Int() minCoverageFlag := app.Flag("min-coverage", "min coverage").Default("0.5").Float64() progressFlag := app.Flag("progress", "show progress").Default("false").Bool() gffFileFlag := app.Flag("gff-file", "gff file").Default("").String() minBaseQFlag := app.Flag("min-base-qual", "min base quality").Default("30").Int() minMapQFlag := app.Flag("min-map-qual", "min mapping quality").Default("30").Int() corrResFileFlag := app.Flag("corr-res-file", "corr result file").Default("").String() geneFileFlag := app.Flag("gene-file", "gene file").Default("").String() minAlleleDepthFlag := app.Flag("min-allele-depth", "min allele depth").Default("0").Int() maxDepthFlag := app.Flag("max-depth", "max coverage depth for each gene").Default("0").Float64() minReadLenFlag := app.Flag("min-read-length", "minimal read length").Default("60").Int() kingpin.MustParse(app.Parse(os.Args[1:])) bamFile = *bamFileArg outFile = *outFileArg maxl = *maxlFlag if *ncpuFlag == 0 { ncpu = runtime.NumCPU() } else { ncpu = *ncpuFlag } ShowProgress = *progressFlag minDepth = *minDepthFlag minCoverage = *minCoverageFlag gffFile = *gffFileFlag MinBaseQuality = *minBaseQFlag MinMapQuality = *minMapQFlag corrResFile = *corrResFileFlag geneFile = *geneFileFlag MinAlleleDepth = *minAlleleDepthFlag maxDepth = *maxDepthFlag MinReadLength = *minReadLenFlag runtime.GOMAXPROCS(ncpu) // Read sequence reads. var header *sam.Header var recordsChan chan GeneSamRecords if gffFile != "" { gffRecMap := readGffs(gffFile) header, recordsChan = readStrainBamFile(bamFile, gffRecMap) } else { header, recordsChan = readPanGenomeBamFile(bamFile) } var geneSet map[string]bool if geneFile != "" { geneSet = make(map[string]bool) lines := readLines(geneFile) for _, line := range lines { gene := strings.Split(line, "\t")[0] geneSet[gene] = true } } codeTable := taxonomy.GeneticCodes()["11"] done := make(chan bool) p2Chan := make(chan CorrResults) for i := 0; i < ncpu; i++ { go func() { for geneRecords := range recordsChan { if geneFile != "" { if !geneSet[geneRecords.ID] { continue } } if maxDepth > 0 { geneRecords = subsample(geneRecords, maxDepth) } geneLen := geneRecords.End - geneRecords.Start gene := pileupCodons(geneRecords) ok := checkCoverage(gene, geneLen, minDepth, minCoverage) if ok { p2 := calcP2(gene, maxl, minDepth, codeTable) p4 := calcP4(gene, maxl, minDepth, codeTable) p2 = append(p2, p4...) p2Chan <- CorrResults{Results: p2, GeneID: geneRecords.ID, GeneLen: geneLen, ReadNum: len(geneRecords.Records)} } } done <- true }() } go func() { defer close(p2Chan) for i := 0; i < ncpu; i++ { <-done } }() var corrResEncoder *json.Encoder if corrResFile != "" { f, err := os.Create(corrResFile) if err != nil { log.Panic(err) } defer f.Close() corrResEncoder = json.NewEncoder(f) } collector := NewCollector() for corrResults := range p2Chan { collector.Add(corrResults) if corrResFile != "" { if err := corrResEncoder.Encode(corrResults); err != nil { log.Panic(err) } } } numJob := len(header.Refs()) log.Printf("Number of references: %d\n", numJob) w, err := os.Create(outFile) if err != nil { panic(err) } defer w.Close() w.WriteString("l,m,v,n,t,b\n") results := collector.Results()
for _, res := range results { w.WriteString(fmt.Sprintf("%d,%g,%g,%d,%s,all\n", res.Lag, res.Value, res.Variance, res.Count, res.Type)) } } // pileupCodons pileup codons of a list of reads at a gene. func pileupCodons(geneRecords GeneSamRecords) (codonGene *CodonGene) { codonGene = NewCodonGene() for _, read := range geneRecords.Records { if checkReadQuality(read) { codonArray := getCodons(read, geneRecords.Start, geneRecords.Strand) for _, codon := range codonArray { if !codon.ContainsGap() { codonGene.AddCodon(codon) } } } } return } // checkReadQuality return false if the read fails quality check. func checkReadQuality(read *sam.Record) bool { if int(read.MapQ) < MinMapQuality || read.Len() < MinReadLength { return false } // for _, cigar := range read.Cigar { // if cigar.Type() != sam.CigarMatch && cigar.Type() != sam.CigarSoftClipped { // return false // } // } return true } // getCodons split a read into a list of Codon. func getCodons(read *sam.Record, offset, strand int) (codonArray []Codon) { // get the mapped sequence of the read onto the reference. mappedSeq, _ := Map2Ref(read) for i := 2; i < len(mappedSeq); { if (read.Pos+i-offset+1)%3 == 0 { codonSeq := mappedSeq[i-2 : i+1] genePos := (read.Pos+i-offset+1)/3 - 1 if genePos >= 0 { if strand == -1 { codonSeq = seq.Reverse(seq.Complement(codonSeq)) } codon := Codon{ReadID: read.Name, Seq: string(codonSeq), GenePos: genePos} codonArray = append(codonArray, codon) } i += 3 } else { i++ } } return } func isATGC(b byte) bool { if b == 'A' { return true } else if b == 'T' { return true } else if b == 'C' { return true } else if b == 'G' { return true } return false } // P2 stores p2 calculation results. type P2 struct { Total float64 Count int } // doubleCount count codon pairs. func doubleCount(nc *NuclCov, codonPairArray []CodonPair) { for _, cp := range codonPairArray { a := cp.A.Seq[2] b := cp.B.Seq[2] nc.Add(a, b) } } func calcP2(gene *CodonGene, maxl, minDepth int, codeTable *taxonomy.GeneticCode) (p2Res []CorrResult) { alphabet := []byte{'A', 'T', 'G', 'C'} for i := 0; i < gene.Len(); i++ { for j := i; j < gene.Len(); j++ { codonPairRaw := gene.PairCodonAt(i, j) if len(codonPairRaw) < 2 { continue } lag := codonPairRaw[0].B.GenePos - codonPairRaw[0].A.GenePos if lag < 0 { lag = -lag } if lag >= maxl { break } splittedCodonPairs := SynoumousSplitCodonPairs(codonPairRaw, codeTable) for _, synPairs := range splittedCodonPairs { if len(synPairs) > minDepth { nc := NewNuclCov(alphabet) doubleCount(nc, synPairs) for len(p2Res) <= lag { p2Res = append(p2Res, CorrResult{Type: "P2", Lag: len(p2Res)}) } xy, _, _, n := nc.Cov11(MinAlleleDepth) p2Res[lag].Count += int64(n) p2Res[lag].Value += xy } } } } return } func calcP4(gene *CodonGene, maxl, minDepth int, codeTable *taxonomy.GeneticCode) (p4Res []CorrResult) { var valueArray []float64 var countArray []int var posArray []int for i := 0; i < gene.Len(); i++ { value, count := autoCov(gene, i, minDepth, codeTable) if count > 0 { pos := gene.CodonPiles[i].GenePos() valueArray = append(valueArray, value) countArray = append(countArray, count) posArray = append(posArray, pos) } } for i := 0; i < len(valueArray); i++ { value1 := valueArray[i] count1 := countArray[i] xbar := value1 / float64(count1) for j := i; j < len(valueArray); j++ { value2 := valueArray[j] count2 := countArray[j] ybar := value2 / float64(count2) lag := posArray[j] - posArray[i] if lag < 0 { lag = -lag } if lag >= maxl { break } for len(p4Res) <= lag { p4Res = append(p4Res, CorrResult{Type: "P4", Lag: len(p4Res)}) } p4Res[lag].Value += xbar * ybar p4Res[lag].Count++ } } return } func autoCov(gene *CodonGene, i, minDepth int, codeTable *taxonomy.GeneticCode) (value float64, count int) { alphabet := []byte{'A', 'T', 'G', 'C'} codonPairRaw := gene.PairCodonAt(i, i) if len(codonPairRaw) < 2 { return } lag := codonPairRaw[0].B.GenePos - codonPairRaw[0].A.GenePos if lag < 0 { lag = -lag } splittedCodonPairs := SynoumousSplitCodonPairs(codonPairRaw, codeTable) for _, synPairs := range splittedCodonPairs { if len(synPairs) > minDepth { nc := NewNuclCov(alphabet) doubleCount(nc, synPairs) xy, _, _, n := nc.Cov11(MinAlleleDepth) value += xy count += n } } return } // Map2Ref Obtains a read mapping to the reference genome. func Map2Ref(r *sam.Record) (s []byte, q []byte) { p := 0 // position in the read sequence. read := r.Seq.Expand() // read sequence. qual := r.Qual length := 0 for _, c := range r.Cigar { switch c.Type() { case sam.CigarMatch, sam.CigarMismatch, sam.CigarEqual, sam.CigarSoftClipped: length += c.Len() } } if length != len(read) || len(read) != len(qual) { return } for _, c := range r.Cigar { switch c.Type() { case sam.CigarMatch, sam.CigarMismatch, sam.CigarEqual: s = append(s, read[p:p+c.Len()]...) q = append(q, qual[p:p+c.Len()]...) p += c.Len() case sam.CigarInsertion, sam.CigarSoftClipped: p += c.Len() case sam.CigarDeletion, sam.CigarSkipped: for i := 0; i < c.Len(); i++ { s = append(s, '-') q = append(q, 0) } } } s = bytes.ToUpper(s) for i, a := range q { if int(a) < MinBaseQuality { s[i] = '-' } } return } func checkCoverage(gene *CodonGene, geneLen, minDepth int, minCoverage float64) (ok bool) { num := 0 for _, pile := range gene.CodonPiles { if pile.Len() > minDepth { num++ } } coverage := float64(num) / float64(geneLen) * 3.0 // codon pile is in unit of codons (3) ok = coverage > minCoverage return } // readLines return all trimmed lines. func readLines(filename string) []string { f, err := os.Open(filename) if err != nil { log.Panic(err) } defer f.Close() rd := bufio.NewReader(f) var lines []string for { line, err := rd.ReadString('\n') if err != nil { if err != io.EOF { log.Panic(err) } break } lines = append(lines, strings.TrimSpace(line)) } return lines } // subsample func subsample(geneRecords GeneSamRecords, maxDepth float64) GeneSamRecords { length := float64(geneRecords.End - geneRecords.Start) readNum := len(geneRecords.Records) readLen := float64(geneRecords.Records[0].Len()) maxReadNum := int(length * maxDepth / readLen) if readNum <= maxReadNum { return geneRecords } oldRecords := geneRecords.Records geneRecords.Records = []*sam.Record{} ratio := float64(maxReadNum) / float64(readNum) for _, read := range oldRecords { if rand.Float64() < ratio { geneRecords.Records = append(geneRecords.Records, read) } } return geneRecords }
random_line_split
inih.go
package ini import ( "io" ) const DEFAULT_SECTION = "default" type ini_break_t int // Line break types. const ( // Let the parser choose the break type. ini_ANY_BREAK ini_break_t = iota ini_CR_BREAK // Use CR for line breaks (Mac style). ini_LN_BREAK // Use LN for line breaks (Unix style). ini_CRLN_BREAK // Use CR LN for line breaks (DOS style). ) type ini_error_type_t int // Many bad things could happen with the parser and emitter. const ( // No error is produced. ini_NO_ERROR ini_error_type_t = iota ini_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. ini_READER_ERROR // Cannot read or decode the input stream. ini_SCANNER_ERROR // Cannot scan the input stream. ini_PARSER_ERROR // Cannot parse the input stream. ini_COMPOSER_ERROR // Cannot compose a YAML document. ini_WRITER_ERROR // Cannot write to the output stream. ini_EMITTER_ERROR // Cannot emit a YAML stream. ) // The pointer position. type ini_mark_t struct { index int // The position index. line int // The position line. column int // The position column. } // Node Styles type ini_style_t int8 type ini_scalar_style_t ini_style_t // Scalar styles. const ( // Let the emitter choose the style. ini_ANY_SCALAR_STYLE ini_scalar_style_t = iota ini_PLAIN_SCALAR_STYLE // The plain scalar style. ini_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. ini_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. ) // Tokens type ini_token_type_t int // Token types. const ( // An empty token. ini_NO_TOKEN ini_token_type_t = iota ini_DOCUMENT_START_TOKEN // A DOCUMENT-START token. ini_DOCUMENT_END_TOKEN // A DOCUMENT-START token. ini_SECTION_START_TOKEN // A SECTION-START token. ini_SECTION_INHERIT_TOKEN // A SECTION-INHERIT token. ini_SECTION_ENTRY_TOKEN // A SECTION-ENTRY token. ini_KEY_TOKEN // An VALUE token. ini_VALUE_TOKEN // An VALUE token. ini_SCALAR_TOKEN // A SCALAR token. ini_MAP_TOKEN // A MAP token. ini_COMMENT_START_TOKEN // A COMMENT-START token. ini_COMMENT_END_TOKEN // A COMMENT-END token. ) func (tt ini_token_type_t) String() string { switch tt { case ini_NO_TOKEN: return "ini_NO_TOKEN" case ini_DOCUMENT_START_TOKEN: return "ini_DOCUMENT_START_TOKEN" case ini_DOCUMENT_END_TOKEN: return "ini_DOCUMENT_END_TOKEN" case ini_SECTION_START_TOKEN: return "ini_SECTION_START_TOKEN" case ini_SECTION_INHERIT_TOKEN: return "ini_SECTION_INHERIT_TOKEN" case ini_SECTION_ENTRY_TOKEN: return "ini_SECTION_ENTRY_TOKEN" case ini_KEY_TOKEN: return "ini_KEY_TOKEN" case ini_VALUE_TOKEN: return "ini_VALUE_TOKEN" case ini_SCALAR_TOKEN: return "ini_SCALAR_TOKEN" case ini_COMMENT_START_TOKEN: return "ini_COMMENT_START_TOKEN" case ini_COMMENT_END_TOKEN: return "ini_COMMENT_END_TOKEN" } return "<unknown token>" } // The token structure. type ini_token_t struct { // The token type. typ ini_token_type_t // The start/end of the token. start_mark, end_mark ini_mark_t // The scalar value // (for ini_SCALAR_TOKEN). value []byte // The scalar style (for ini_SCALAR_TOKEN). style ini_scalar_style_t } // Events type ini_event_type_t int8 // Event types. const ( // An empty event. ini_NO_EVENT ini_event_type_t = iota ini_DOCUMENT_START_EVENT // A DOCUMENT-START event. ini_DOCUMENT_END_EVENT // A DOCUMENT-END event.
ini_MAPPING_EVENT // An MAPPING event. ini_SCALAR_EVENT // An SCALAR event. ini_COMMENT_EVENT // A COMMENT event. ) // The event structure. type ini_event_t struct { // The event type. typ ini_event_type_t // The start and end of the event. start_mark, end_mark ini_mark_t // The node value. value []byte // The tag (for ini_SCALAR_EVENT). tag []byte // The style (for ini_ELEMENT_START_EVENT). style ini_style_t } func (e *ini_event_t) event_type() string { switch e.typ { case ini_NO_EVENT: return "ini_NO_EVENT" case ini_DOCUMENT_START_EVENT: return "ini_DOCUMENT_START_EVENT" case ini_DOCUMENT_END_EVENT: return "ini_DOCUMENT_END_EVENT" case ini_SECTION_INHERIT_EVENT: return "ini_SECTION_INHERIT_EVENT" case ini_SECTION_ENTRY_EVENT: return "ini_SECTION_ENTRY_EVENT" case ini_MAPPING_EVENT: return "ini_MAPPING_EVENT" case ini_SCALAR_EVENT: return "ini_SCALAR_EVENT" case ini_COMMENT_EVENT: return "ini_COMMENT_EVENT" } return "<unknown token>" } func (e *ini_event_t) scalar_style() ini_scalar_style_t { return ini_scalar_style_t(e.style) } // Nodes const ( ini_NULL_TAG = "null" // The tag 'null' with the only possible value: null. ini_BOOL_TAG = "bool" // The tag 'bool' with the values: true and false. ini_STR_TAG = "str" // The tag 'str' for string values. ini_INT_TAG = "int" // The tag 'int' for integer values. ini_FLOAT_TAG = "float" // The tag 'float' for float values. ini_BINARY_TAG = "binary" ini_MAP_TAG = "map" ini_SECTION_TAG = "section" ini_DEFAULT_SCALAR_TAG = ini_STR_TAG // The default scalar tag is str ) // The prototype of a read handler. // // The read handler is called when the parser needs to read more bytes from the // source. The handler should write not more than size bytes to the buffer. // The number of written bytes should be set to the size_read variable. // // [in,out] data A pointer to an application data specified by // ini_parser_set_input(). // [out] buffer The buffer to write the data from the source. // [in] size The size of the buffer. // [out] size_read The actual number of bytes read from the source. // // On success, the handler should return 1. If the handler failed, // the returned value should be 0. On EOF, the handler should set the // size_read to 0 and return 1. type ini_read_handler_t func(parser *ini_parser_t, buffer []byte) (n int, err error) // The states of the parser. type ini_parser_state_t int const ( ini_PARSE_DOCUMENT_START_STATE ini_parser_state_t = iota // Expect START. ini_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-START. ini_PARSE_SECTION_FIRST_START_STATE // Expect SECTION-FIRST-ENTRY. ini_PARSE_SECTION_START_STATE // Expect SECTION-ENTRY. ini_PARSE_SECTION_INHERIT_STATE // Expect SECTION-INHERIT. ini_PARSE_SECTION_ENTRY_STATE // Expect SECTION-ENTRY. ini_PARSE_SECTION_KEY_STATE // Expect a KEY. ini_PARSE_SECTION_VALUE_STATE // Expect a VALUE. ini_PARSE_COMMENT_START_STATE // Expect COMMENT-START. ini_PARSE_COMMENT_CONTENT_STATE // Expect the content of a comment. ini_PARSE_COMMENT_END_STATE // Expect COMMENT-END. ) func (ps ini_parser_state_t) String() string { switch ps { case ini_PARSE_DOCUMENT_START_STATE: return "ini_PARSE_DOCUMENT_START_STATE" case ini_PARSE_DOCUMENT_END_STATE: return "ini_PARSE_DOCUMENT_END_STATE" case ini_PARSE_SECTION_FIRST_START_STATE: return "ini_PARSE_SECTION_FIRST_START_STATE" case ini_PARSE_SECTION_START_STATE: return "ini_PARSE_SECTION_START_STATE" case ini_PARSE_SECTION_INHERIT_STATE: return "ini_PARSE_SECTION_INHERIT_STATE" case ini_PARSE_SECTION_ENTRY_STATE: return "ini_PARSE_SECTION_ENTRY_STATE" case ini_PARSE_SECTION_KEY_STATE: return "ini_PARSE_SECTION_KEY_STATE" case ini_PARSE_SECTION_VALUE_STATE: return "ini_PARSE_SECTION_VALUE_STATE" case ini_PARSE_COMMENT_START_STATE: return "ini_PARSE_COMMENT_START_STATE" case ini_PARSE_COMMENT_CONTENT_STATE: return "ini_PARSE_COMMENT_CONTENT_STATE" case ini_PARSE_COMMENT_END_STATE: return "ini_PARSE_COMMENT_END_STATE" } return "<unknown parser state>" } // The parser structure. // // All members are internal. Manage the structure using the // ini_parser_ family of functions. type ini_parser_t struct { // Error handling error ini_error_type_t // Error type. problem string // Error description. // The byte about which the problem occured. problem_offset int problem_value int problem_mark ini_mark_t // The error context. context string context_mark ini_mark_t // Reader stuff read_handler ini_read_handler_t // Read handler. input_file io.Reader // File input data. input []byte // String input data. input_pos int eof bool // EOF flag buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. unread int // The number of unread characters in the buffer. raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. offset int // The offset of the current position (in bytes). mark ini_mark_t // The mark of the current position. key_level int // The current key level. // Scanner stuff document_start_produced bool // Have we started to scan the input stream? document_end_produced bool // Have we reached the end of the input stream? tokens []ini_token_t // The tokens queue. tokens_head int // The head of the tokens queue. tokens_parsed int // The number of tokens fetched from the queue. token_available bool // Does the tokens queue contain a token ready for dequeueing. // Parser stuff state ini_parser_state_t // The current parser state. states []ini_parser_state_t // The parser states stack. marks []ini_mark_t // The stack of marks. } // Emitter Definitions // The prototype of a write handler. // // The write handler is called when the emitter needs to flush the accumulated // characters to the output. The handler should write @a size bytes of the // @a buffer to the output. // // @param[in,out] data A pointer to an application data specified by // ini_emitter_set_output(). // @param[in] buffer The buffer with bytes to be written. // @param[in] size The size of the buffer. // // @returns On success, the handler should return @c 1. If the handler failed, // the returned value should be @c 0. // type ini_write_handler_t func(emitter *ini_emitter_t, buffer []byte) error type ini_emitter_state_t int // The emitter states. const ( // Expect DOCUMENT-START. ini_EMIT_DOCUMENT_START_STATE ini_emitter_state_t = iota ini_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. ini_EMIT_FIRST_SECTION_START_STATE // Expect the first section ini_EMIT_SECTION_START_STATE // Expect the start of section. ini_EMIT_SECTION_FIRST_NODE_KEY_STATE // Expect the start of section. ini_EMIT_ELEMENT_KEY_STATE // Expect the start of section. ini_EMIT_ELEMENT_VALUE_STATE // Expect the node. ini_EMIT_SECTION_END_STATE // Expect the end of section. ini_EMIT_COMMENT_START_STATE // Expect the start of section. ini_EMIT_COMMENT_VALUE_STATE // Expect the content of section. ini_EMIT_COMMENT_END_STATE // Expect the end of section. ) // The emitter structure. // // All members are internal. Manage the structure using the @c ini_emitter_ // family of functions. type ini_emitter_t struct { // Error handling error ini_error_type_t // Error type. problem string // Error description. // Writer stuff write_handler ini_write_handler_t // Write handler. output_buffer *[]byte // String output data. output_file io.Writer // File output data. buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. // Emitter stuff unicode bool // Allow unescaped non-ASCII characters? line_break ini_break_t // The preferred line break. state ini_emitter_state_t // The current emitter state. states []ini_emitter_state_t // The stack of states. events []ini_event_t // The event queue. events_head int // The head of the event queue. level int // The current flow level. root_context bool // Is it the document root context? mapping_context bool // Is it a mapping context? line int // The current line. column int // The current column. whitespace bool // If the last character was a whitespace? open_ended bool // If an explicit document end is required? // Scalar analysis. scalar_data struct { value []byte // The scalar value. multiline bool // Does the scalar contain line breaks? single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? style ini_scalar_style_t // The output style. } // Dumper stuff opened bool // If the document was already opened? closed bool // If the document was already closed? }
ini_SECTION_INHERIT_EVENT // A SECTION-INHERIT event. ini_SECTION_ENTRY_EVENT // A SECTION-ENTRY event.
random_line_split
inih.go
package ini import ( "io" ) const DEFAULT_SECTION = "default" type ini_break_t int // Line break types. const ( // Let the parser choose the break type. ini_ANY_BREAK ini_break_t = iota ini_CR_BREAK // Use CR for line breaks (Mac style). ini_LN_BREAK // Use LN for line breaks (Unix style). ini_CRLN_BREAK // Use CR LN for line breaks (DOS style). ) type ini_error_type_t int // Many bad things could happen with the parser and emitter. const ( // No error is produced. ini_NO_ERROR ini_error_type_t = iota ini_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. ini_READER_ERROR // Cannot read or decode the input stream. ini_SCANNER_ERROR // Cannot scan the input stream. ini_PARSER_ERROR // Cannot parse the input stream. ini_COMPOSER_ERROR // Cannot compose a YAML document. ini_WRITER_ERROR // Cannot write to the output stream. ini_EMITTER_ERROR // Cannot emit a YAML stream. ) // The pointer position. type ini_mark_t struct { index int // The position index. line int // The position line. column int // The position column. } // Node Styles type ini_style_t int8 type ini_scalar_style_t ini_style_t // Scalar styles. const ( // Let the emitter choose the style. ini_ANY_SCALAR_STYLE ini_scalar_style_t = iota ini_PLAIN_SCALAR_STYLE // The plain scalar style. ini_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. ini_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. ) // Tokens type ini_token_type_t int // Token types. const ( // An empty token. ini_NO_TOKEN ini_token_type_t = iota ini_DOCUMENT_START_TOKEN // A DOCUMENT-START token. ini_DOCUMENT_END_TOKEN // A DOCUMENT-START token. ini_SECTION_START_TOKEN // A SECTION-START token. ini_SECTION_INHERIT_TOKEN // A SECTION-INHERIT token. ini_SECTION_ENTRY_TOKEN // A SECTION-ENTRY token. ini_KEY_TOKEN // An VALUE token. ini_VALUE_TOKEN // An VALUE token. ini_SCALAR_TOKEN // A SCALAR token. ini_MAP_TOKEN // A MAP token. ini_COMMENT_START_TOKEN // A COMMENT-START token. ini_COMMENT_END_TOKEN // A COMMENT-END token. ) func (tt ini_token_type_t)
() string { switch tt { case ini_NO_TOKEN: return "ini_NO_TOKEN" case ini_DOCUMENT_START_TOKEN: return "ini_DOCUMENT_START_TOKEN" case ini_DOCUMENT_END_TOKEN: return "ini_DOCUMENT_END_TOKEN" case ini_SECTION_START_TOKEN: return "ini_SECTION_START_TOKEN" case ini_SECTION_INHERIT_TOKEN: return "ini_SECTION_INHERIT_TOKEN" case ini_SECTION_ENTRY_TOKEN: return "ini_SECTION_ENTRY_TOKEN" case ini_KEY_TOKEN: return "ini_KEY_TOKEN" case ini_VALUE_TOKEN: return "ini_VALUE_TOKEN" case ini_SCALAR_TOKEN: return "ini_SCALAR_TOKEN" case ini_COMMENT_START_TOKEN: return "ini_COMMENT_START_TOKEN" case ini_COMMENT_END_TOKEN: return "ini_COMMENT_END_TOKEN" } return "<unknown token>" } // The token structure. type ini_token_t struct { // The token type. typ ini_token_type_t // The start/end of the token. start_mark, end_mark ini_mark_t // The scalar value // (for ini_SCALAR_TOKEN). value []byte // The scalar style (for ini_SCALAR_TOKEN). style ini_scalar_style_t } // Events type ini_event_type_t int8 // Event types. const ( // An empty event. ini_NO_EVENT ini_event_type_t = iota ini_DOCUMENT_START_EVENT // A DOCUMENT-START event. ini_DOCUMENT_END_EVENT // A DOCUMENT-END event. ini_SECTION_INHERIT_EVENT // A SECTION-INHERIT event. ini_SECTION_ENTRY_EVENT // A SECTION-ENTRY event. ini_MAPPING_EVENT // An MAPPING event. ini_SCALAR_EVENT // An SCALAR event. ini_COMMENT_EVENT // A COMMENT event. ) // The event structure. type ini_event_t struct { // The event type. typ ini_event_type_t // The start and end of the event. start_mark, end_mark ini_mark_t // The node value. value []byte // The tag (for ini_SCALAR_EVENT). tag []byte // The style (for ini_ELEMENT_START_EVENT). style ini_style_t } func (e *ini_event_t) event_type() string { switch e.typ { case ini_NO_EVENT: return "ini_NO_EVENT" case ini_DOCUMENT_START_EVENT: return "ini_DOCUMENT_START_EVENT" case ini_DOCUMENT_END_EVENT: return "ini_DOCUMENT_END_EVENT" case ini_SECTION_INHERIT_EVENT: return "ini_SECTION_INHERIT_EVENT" case ini_SECTION_ENTRY_EVENT: return "ini_SECTION_ENTRY_EVENT" case ini_MAPPING_EVENT: return "ini_MAPPING_EVENT" case ini_SCALAR_EVENT: return "ini_SCALAR_EVENT" case ini_COMMENT_EVENT: return "ini_COMMENT_EVENT" } return "<unknown token>" } func (e *ini_event_t) scalar_style() ini_scalar_style_t { return ini_scalar_style_t(e.style) } // Nodes const ( ini_NULL_TAG = "null" // The tag 'null' with the only possible value: null. ini_BOOL_TAG = "bool" // The tag 'bool' with the values: true and false. ini_STR_TAG = "str" // The tag 'str' for string values. ini_INT_TAG = "int" // The tag 'int' for integer values. ini_FLOAT_TAG = "float" // The tag 'float' for float values. ini_BINARY_TAG = "binary" ini_MAP_TAG = "map" ini_SECTION_TAG = "section" ini_DEFAULT_SCALAR_TAG = ini_STR_TAG // The default scalar tag is str ) // The prototype of a read handler. // // The read handler is called when the parser needs to read more bytes from the // source. The handler should write not more than size bytes to the buffer. // The number of written bytes should be set to the size_read variable. // // [in,out] data A pointer to an application data specified by // ini_parser_set_input(). // [out] buffer The buffer to write the data from the source. // [in] size The size of the buffer. // [out] size_read The actual number of bytes read from the source. // // On success, the handler should return 1. If the handler failed, // the returned value should be 0. On EOF, the handler should set the // size_read to 0 and return 1. type ini_read_handler_t func(parser *ini_parser_t, buffer []byte) (n int, err error) // The states of the parser. type ini_parser_state_t int const ( ini_PARSE_DOCUMENT_START_STATE ini_parser_state_t = iota // Expect START. ini_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-START. ini_PARSE_SECTION_FIRST_START_STATE // Expect SECTION-FIRST-ENTRY. ini_PARSE_SECTION_START_STATE // Expect SECTION-ENTRY. ini_PARSE_SECTION_INHERIT_STATE // Expect SECTION-INHERIT. ini_PARSE_SECTION_ENTRY_STATE // Expect SECTION-ENTRY. ini_PARSE_SECTION_KEY_STATE // Expect a KEY. ini_PARSE_SECTION_VALUE_STATE // Expect a VALUE. ini_PARSE_COMMENT_START_STATE // Expect COMMENT-START. ini_PARSE_COMMENT_CONTENT_STATE // Expect the content of a comment. ini_PARSE_COMMENT_END_STATE // Expect COMMENT-END. ) func (ps ini_parser_state_t) String() string { switch ps { case ini_PARSE_DOCUMENT_START_STATE: return "ini_PARSE_DOCUMENT_START_STATE" case ini_PARSE_DOCUMENT_END_STATE: return "ini_PARSE_DOCUMENT_END_STATE" case ini_PARSE_SECTION_FIRST_START_STATE: return "ini_PARSE_SECTION_FIRST_START_STATE" case ini_PARSE_SECTION_START_STATE: return "ini_PARSE_SECTION_START_STATE" case ini_PARSE_SECTION_INHERIT_STATE: return "ini_PARSE_SECTION_INHERIT_STATE" case ini_PARSE_SECTION_ENTRY_STATE: return "ini_PARSE_SECTION_ENTRY_STATE" case ini_PARSE_SECTION_KEY_STATE: return "ini_PARSE_SECTION_KEY_STATE" case ini_PARSE_SECTION_VALUE_STATE: return "ini_PARSE_SECTION_VALUE_STATE" case ini_PARSE_COMMENT_START_STATE: return "ini_PARSE_COMMENT_START_STATE" case ini_PARSE_COMMENT_CONTENT_STATE: return "ini_PARSE_COMMENT_CONTENT_STATE" case ini_PARSE_COMMENT_END_STATE: return "ini_PARSE_COMMENT_END_STATE" } return "<unknown parser state>" } // The parser structure. // // All members are internal. Manage the structure using the // ini_parser_ family of functions. type ini_parser_t struct { // Error handling error ini_error_type_t // Error type. problem string // Error description. // The byte about which the problem occured. problem_offset int problem_value int problem_mark ini_mark_t // The error context. context string context_mark ini_mark_t // Reader stuff read_handler ini_read_handler_t // Read handler. input_file io.Reader // File input data. input []byte // String input data. input_pos int eof bool // EOF flag buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. unread int // The number of unread characters in the buffer. raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. offset int // The offset of the current position (in bytes). mark ini_mark_t // The mark of the current position. key_level int // The current key level. // Scanner stuff document_start_produced bool // Have we started to scan the input stream? document_end_produced bool // Have we reached the end of the input stream? tokens []ini_token_t // The tokens queue. tokens_head int // The head of the tokens queue. tokens_parsed int // The number of tokens fetched from the queue. token_available bool // Does the tokens queue contain a token ready for dequeueing. // Parser stuff state ini_parser_state_t // The current parser state. states []ini_parser_state_t // The parser states stack. marks []ini_mark_t // The stack of marks. } // Emitter Definitions // The prototype of a write handler. // // The write handler is called when the emitter needs to flush the accumulated // characters to the output. The handler should write @a size bytes of the // @a buffer to the output. // // @param[in,out] data A pointer to an application data specified by // ini_emitter_set_output(). // @param[in] buffer The buffer with bytes to be written. // @param[in] size The size of the buffer. // // @returns On success, the handler should return @c 1. If the handler failed, // the returned value should be @c 0. // type ini_write_handler_t func(emitter *ini_emitter_t, buffer []byte) error type ini_emitter_state_t int // The emitter states. const ( // Expect DOCUMENT-START. ini_EMIT_DOCUMENT_START_STATE ini_emitter_state_t = iota ini_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. ini_EMIT_FIRST_SECTION_START_STATE // Expect the first section ini_EMIT_SECTION_START_STATE // Expect the start of section. ini_EMIT_SECTION_FIRST_NODE_KEY_STATE // Expect the start of section. ini_EMIT_ELEMENT_KEY_STATE // Expect the start of section. ini_EMIT_ELEMENT_VALUE_STATE // Expect the node. ini_EMIT_SECTION_END_STATE // Expect the end of section. ini_EMIT_COMMENT_START_STATE // Expect the start of section. ini_EMIT_COMMENT_VALUE_STATE // Expect the content of section. ini_EMIT_COMMENT_END_STATE // Expect the end of section. ) // The emitter structure. // // All members are internal. Manage the structure using the @c ini_emitter_ // family of functions. type ini_emitter_t struct { // Error handling error ini_error_type_t // Error type. problem string // Error description. // Writer stuff write_handler ini_write_handler_t // Write handler. output_buffer *[]byte // String output data. output_file io.Writer // File output data. buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. // Emitter stuff unicode bool // Allow unescaped non-ASCII characters? line_break ini_break_t // The preferred line break. state ini_emitter_state_t // The current emitter state. states []ini_emitter_state_t // The stack of states. events []ini_event_t // The event queue. events_head int // The head of the event queue. level int // The current flow level. root_context bool // Is it the document root context? mapping_context bool // Is it a mapping context? line int // The current line. column int // The current column. whitespace bool // If the last character was a whitespace? open_ended bool // If an explicit document end is required? // Scalar analysis. scalar_data struct { value []byte // The scalar value. multiline bool // Does the scalar contain line breaks? single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? style ini_scalar_style_t // The output style. } // Dumper stuff opened bool // If the document was already opened? closed bool // If the document was already closed? }
String
identifier_name
inih.go
package ini import ( "io" ) const DEFAULT_SECTION = "default" type ini_break_t int // Line break types. const ( // Let the parser choose the break type. ini_ANY_BREAK ini_break_t = iota ini_CR_BREAK // Use CR for line breaks (Mac style). ini_LN_BREAK // Use LN for line breaks (Unix style). ini_CRLN_BREAK // Use CR LN for line breaks (DOS style). ) type ini_error_type_t int // Many bad things could happen with the parser and emitter. const ( // No error is produced. ini_NO_ERROR ini_error_type_t = iota ini_MEMORY_ERROR // Cannot allocate or reallocate a block of memory. ini_READER_ERROR // Cannot read or decode the input stream. ini_SCANNER_ERROR // Cannot scan the input stream. ini_PARSER_ERROR // Cannot parse the input stream. ini_COMPOSER_ERROR // Cannot compose a YAML document. ini_WRITER_ERROR // Cannot write to the output stream. ini_EMITTER_ERROR // Cannot emit a YAML stream. ) // The pointer position. type ini_mark_t struct { index int // The position index. line int // The position line. column int // The position column. } // Node Styles type ini_style_t int8 type ini_scalar_style_t ini_style_t // Scalar styles. const ( // Let the emitter choose the style. ini_ANY_SCALAR_STYLE ini_scalar_style_t = iota ini_PLAIN_SCALAR_STYLE // The plain scalar style. ini_SINGLE_QUOTED_SCALAR_STYLE // The single-quoted scalar style. ini_DOUBLE_QUOTED_SCALAR_STYLE // The double-quoted scalar style. ) // Tokens type ini_token_type_t int // Token types. const ( // An empty token. ini_NO_TOKEN ini_token_type_t = iota ini_DOCUMENT_START_TOKEN // A DOCUMENT-START token. ini_DOCUMENT_END_TOKEN // A DOCUMENT-START token. ini_SECTION_START_TOKEN // A SECTION-START token. ini_SECTION_INHERIT_TOKEN // A SECTION-INHERIT token. ini_SECTION_ENTRY_TOKEN // A SECTION-ENTRY token. ini_KEY_TOKEN // An VALUE token. ini_VALUE_TOKEN // An VALUE token. ini_SCALAR_TOKEN // A SCALAR token. ini_MAP_TOKEN // A MAP token. ini_COMMENT_START_TOKEN // A COMMENT-START token. ini_COMMENT_END_TOKEN // A COMMENT-END token. ) func (tt ini_token_type_t) String() string { switch tt { case ini_NO_TOKEN: return "ini_NO_TOKEN" case ini_DOCUMENT_START_TOKEN: return "ini_DOCUMENT_START_TOKEN" case ini_DOCUMENT_END_TOKEN: return "ini_DOCUMENT_END_TOKEN" case ini_SECTION_START_TOKEN: return "ini_SECTION_START_TOKEN" case ini_SECTION_INHERIT_TOKEN: return "ini_SECTION_INHERIT_TOKEN" case ini_SECTION_ENTRY_TOKEN: return "ini_SECTION_ENTRY_TOKEN" case ini_KEY_TOKEN: return "ini_KEY_TOKEN" case ini_VALUE_TOKEN: return "ini_VALUE_TOKEN" case ini_SCALAR_TOKEN: return "ini_SCALAR_TOKEN" case ini_COMMENT_START_TOKEN: return "ini_COMMENT_START_TOKEN" case ini_COMMENT_END_TOKEN: return "ini_COMMENT_END_TOKEN" } return "<unknown token>" } // The token structure. type ini_token_t struct { // The token type. typ ini_token_type_t // The start/end of the token. start_mark, end_mark ini_mark_t // The scalar value // (for ini_SCALAR_TOKEN). value []byte // The scalar style (for ini_SCALAR_TOKEN). style ini_scalar_style_t } // Events type ini_event_type_t int8 // Event types. const ( // An empty event. ini_NO_EVENT ini_event_type_t = iota ini_DOCUMENT_START_EVENT // A DOCUMENT-START event. ini_DOCUMENT_END_EVENT // A DOCUMENT-END event. ini_SECTION_INHERIT_EVENT // A SECTION-INHERIT event. ini_SECTION_ENTRY_EVENT // A SECTION-ENTRY event. ini_MAPPING_EVENT // An MAPPING event. ini_SCALAR_EVENT // An SCALAR event. ini_COMMENT_EVENT // A COMMENT event. ) // The event structure. type ini_event_t struct { // The event type. typ ini_event_type_t // The start and end of the event. start_mark, end_mark ini_mark_t // The node value. value []byte // The tag (for ini_SCALAR_EVENT). tag []byte // The style (for ini_ELEMENT_START_EVENT). style ini_style_t } func (e *ini_event_t) event_type() string
func (e *ini_event_t) scalar_style() ini_scalar_style_t { return ini_scalar_style_t(e.style) } // Nodes const ( ini_NULL_TAG = "null" // The tag 'null' with the only possible value: null. ini_BOOL_TAG = "bool" // The tag 'bool' with the values: true and false. ini_STR_TAG = "str" // The tag 'str' for string values. ini_INT_TAG = "int" // The tag 'int' for integer values. ini_FLOAT_TAG = "float" // The tag 'float' for float values. ini_BINARY_TAG = "binary" ini_MAP_TAG = "map" ini_SECTION_TAG = "section" ini_DEFAULT_SCALAR_TAG = ini_STR_TAG // The default scalar tag is str ) // The prototype of a read handler. // // The read handler is called when the parser needs to read more bytes from the // source. The handler should write not more than size bytes to the buffer. // The number of written bytes should be set to the size_read variable. // // [in,out] data A pointer to an application data specified by // ini_parser_set_input(). // [out] buffer The buffer to write the data from the source. // [in] size The size of the buffer. // [out] size_read The actual number of bytes read from the source. // // On success, the handler should return 1. If the handler failed, // the returned value should be 0. On EOF, the handler should set the // size_read to 0 and return 1. type ini_read_handler_t func(parser *ini_parser_t, buffer []byte) (n int, err error) // The states of the parser. type ini_parser_state_t int const ( ini_PARSE_DOCUMENT_START_STATE ini_parser_state_t = iota // Expect START. ini_PARSE_DOCUMENT_END_STATE // Expect DOCUMENT-START. ini_PARSE_SECTION_FIRST_START_STATE // Expect SECTION-FIRST-ENTRY. ini_PARSE_SECTION_START_STATE // Expect SECTION-ENTRY. ini_PARSE_SECTION_INHERIT_STATE // Expect SECTION-INHERIT. ini_PARSE_SECTION_ENTRY_STATE // Expect SECTION-ENTRY. ini_PARSE_SECTION_KEY_STATE // Expect a KEY. ini_PARSE_SECTION_VALUE_STATE // Expect a VALUE. ini_PARSE_COMMENT_START_STATE // Expect COMMENT-START. ini_PARSE_COMMENT_CONTENT_STATE // Expect the content of a comment. ini_PARSE_COMMENT_END_STATE // Expect COMMENT-END. ) func (ps ini_parser_state_t) String() string { switch ps { case ini_PARSE_DOCUMENT_START_STATE: return "ini_PARSE_DOCUMENT_START_STATE" case ini_PARSE_DOCUMENT_END_STATE: return "ini_PARSE_DOCUMENT_END_STATE" case ini_PARSE_SECTION_FIRST_START_STATE: return "ini_PARSE_SECTION_FIRST_START_STATE" case ini_PARSE_SECTION_START_STATE: return "ini_PARSE_SECTION_START_STATE" case ini_PARSE_SECTION_INHERIT_STATE: return "ini_PARSE_SECTION_INHERIT_STATE" case ini_PARSE_SECTION_ENTRY_STATE: return "ini_PARSE_SECTION_ENTRY_STATE" case ini_PARSE_SECTION_KEY_STATE: return "ini_PARSE_SECTION_KEY_STATE" case ini_PARSE_SECTION_VALUE_STATE: return "ini_PARSE_SECTION_VALUE_STATE" case ini_PARSE_COMMENT_START_STATE: return "ini_PARSE_COMMENT_START_STATE" case ini_PARSE_COMMENT_CONTENT_STATE: return "ini_PARSE_COMMENT_CONTENT_STATE" case ini_PARSE_COMMENT_END_STATE: return "ini_PARSE_COMMENT_END_STATE" } return "<unknown parser state>" } // The parser structure. // // All members are internal. Manage the structure using the // ini_parser_ family of functions. type ini_parser_t struct { // Error handling error ini_error_type_t // Error type. problem string // Error description. // The byte about which the problem occured. problem_offset int problem_value int problem_mark ini_mark_t // The error context. context string context_mark ini_mark_t // Reader stuff read_handler ini_read_handler_t // Read handler. input_file io.Reader // File input data. input []byte // String input data. input_pos int eof bool // EOF flag buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. unread int // The number of unread characters in the buffer. raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. offset int // The offset of the current position (in bytes). mark ini_mark_t // The mark of the current position. key_level int // The current key level. // Scanner stuff document_start_produced bool // Have we started to scan the input stream? document_end_produced bool // Have we reached the end of the input stream? tokens []ini_token_t // The tokens queue. tokens_head int // The head of the tokens queue. tokens_parsed int // The number of tokens fetched from the queue. token_available bool // Does the tokens queue contain a token ready for dequeueing. // Parser stuff state ini_parser_state_t // The current parser state. states []ini_parser_state_t // The parser states stack. marks []ini_mark_t // The stack of marks. } // Emitter Definitions // The prototype of a write handler. // // The write handler is called when the emitter needs to flush the accumulated // characters to the output. The handler should write @a size bytes of the // @a buffer to the output. // // @param[in,out] data A pointer to an application data specified by // ini_emitter_set_output(). // @param[in] buffer The buffer with bytes to be written. // @param[in] size The size of the buffer. // // @returns On success, the handler should return @c 1. If the handler failed, // the returned value should be @c 0. // type ini_write_handler_t func(emitter *ini_emitter_t, buffer []byte) error type ini_emitter_state_t int // The emitter states. const ( // Expect DOCUMENT-START. ini_EMIT_DOCUMENT_START_STATE ini_emitter_state_t = iota ini_EMIT_DOCUMENT_END_STATE // Expect DOCUMENT-END. ini_EMIT_FIRST_SECTION_START_STATE // Expect the first section ini_EMIT_SECTION_START_STATE // Expect the start of section. ini_EMIT_SECTION_FIRST_NODE_KEY_STATE // Expect the start of section. ini_EMIT_ELEMENT_KEY_STATE // Expect the start of section. ini_EMIT_ELEMENT_VALUE_STATE // Expect the node. ini_EMIT_SECTION_END_STATE // Expect the end of section. ini_EMIT_COMMENT_START_STATE // Expect the start of section. ini_EMIT_COMMENT_VALUE_STATE // Expect the content of section. ini_EMIT_COMMENT_END_STATE // Expect the end of section. ) // The emitter structure. // // All members are internal. Manage the structure using the @c ini_emitter_ // family of functions. type ini_emitter_t struct { // Error handling error ini_error_type_t // Error type. problem string // Error description. // Writer stuff write_handler ini_write_handler_t // Write handler. output_buffer *[]byte // String output data. output_file io.Writer // File output data. buffer []byte // The working buffer. buffer_pos int // The current position of the buffer. raw_buffer []byte // The raw buffer. raw_buffer_pos int // The current position of the buffer. // Emitter stuff unicode bool // Allow unescaped non-ASCII characters? line_break ini_break_t // The preferred line break. state ini_emitter_state_t // The current emitter state. states []ini_emitter_state_t // The stack of states. events []ini_event_t // The event queue. events_head int // The head of the event queue. level int // The current flow level. root_context bool // Is it the document root context? mapping_context bool // Is it a mapping context? line int // The current line. column int // The current column. whitespace bool // If the last character was a whitespace? open_ended bool // If an explicit document end is required? // Scalar analysis. scalar_data struct { value []byte // The scalar value. multiline bool // Does the scalar contain line breaks? single_quoted_allowed bool // Can the scalar be expressed in the single quoted style? style ini_scalar_style_t // The output style. } // Dumper stuff opened bool // If the document was already opened? closed bool // If the document was already closed? }
{ switch e.typ { case ini_NO_EVENT: return "ini_NO_EVENT" case ini_DOCUMENT_START_EVENT: return "ini_DOCUMENT_START_EVENT" case ini_DOCUMENT_END_EVENT: return "ini_DOCUMENT_END_EVENT" case ini_SECTION_INHERIT_EVENT: return "ini_SECTION_INHERIT_EVENT" case ini_SECTION_ENTRY_EVENT: return "ini_SECTION_ENTRY_EVENT" case ini_MAPPING_EVENT: return "ini_MAPPING_EVENT" case ini_SCALAR_EVENT: return "ini_SCALAR_EVENT" case ini_COMMENT_EVENT: return "ini_COMMENT_EVENT" } return "<unknown token>" }
identifier_body
AutoCapture_runtimelog.py
# import wx,time,pyvisa,os import time import subprocess as sp from paramiko import * import locale import os import re from socket import timeout as SocketTimeout # this is quote from the shlex module, added in py3.3 _find_unsafe = re.compile(br'[^\w@%+=:,./~-]').search def _sh_quote(s): """Return a shell-escaped version of the string `s`.""" if not s: return b"" if _find_unsafe(s) is None: return s # use single quotes, and put single quotes into double quotes # the string $'b is then quoted as '$'"'"'b' return b"'" + s.replace(b"'", b"'\"'\"'") + b"'" # Unicode conversion functions; assume UTF-8 def asbytes(s): """Turns unicode into bytes, if needed. Assumes UTF-8. """ if isinstance(s, bytes): return s else: return s.encode('utf-8') def asunicode(s): """Turns bytes into unicode, if needed. Uses UTF-8. """ if isinstance(s, bytes): return s.decode('utf-8', 'replace') else: return s # os.path.sep is unicode on Python 3, no matter the platform bytes_sep = asbytes(os.path.sep) # Unicode conversion function for Windows # Used to convert local paths if the local machine is Windows def asunicode_win(s): """Turns bytes into unicode, if needed. """ if isinstance(s, bytes): return s.decode(locale.getpreferredencoding()) else: return s class SCPClient(object): """ An scp1 implementation, compatible with openssh scp. Raises SCPException for all transport related errors. Local filesystem and OS errors pass through. Main public methods are .put and .get The get method is controlled by the remote scp instance, and behaves accordingly. This means that symlinks are resolved, and the transfer is halted after too many levels of symlinks are detected. The put method uses os.walk for recursion, and sends files accordingly. Since scp doesn't support symlinks, we send file symlinks as the file (matching scp behaviour), but we make no attempt at symlinked directories. """ def __init__(self, transport, buff_size=16384, socket_timeout=5.0, progress=None, sanitize=_sh_quote): """ Create an scp1 client. @param transport: an existing paramiko L{Transport} @type transport: L{Transport} @param buff_size: size of the scp send buffer. @type buff_size: int @param socket_timeout: channel socket timeout in seconds @type socket_timeout: float @param progress: callback - called with (filename, size, sent) during transfers @param sanitize: function - called with filename, should return safe or escaped string. Uses _sh_quote by default. @type progress: function(string, int, int) """ self.transport = transport self.buff_size = buff_size self.socket_timeout = socket_timeout self.channel = None self.preserve_times = False self._progress = progress self._recv_dir = b'' self._rename = False self._utime = None self.sanitize = sanitize self._dirtimes = {} def __enter__(self): self.channel = self._open() return self def __exit__(self, type, value, traceback): self.close() def put(self, files, remote_path=b'.', recursive=False, preserve_times=False): """ Transfer files to remote host. @param files: A single path, or a list of paths to be transfered. recursive must be True to transfer directories. @type files: string OR list of strings @param remote_path: path in which to receive the files on the remote host. defaults to '.' @type remote_path: str @param recursive: transfer files and directories recursively @type recursive: bool @param preserve_times: preserve mtime and atime of transfered files and directories. @type preserve_times: bool """ self.preserve_times = preserve_times self.channel = self._open() self._pushed = 0 self.channel.settimeout(self.socket_timeout) scp_command = (b'scp -t ', b'scp -r -t ')[recursive] self.channel.exec_command(scp_command + self.sanitize(asbytes(remote_path))) self._recv_confirm() if not isinstance(files, (list, tuple)): files = [files] if recursive: self._send_recursive(files) else: self._send_files(files) self.close() def get(self, remote_path, local_path='', recursive=False, preserve_times=False): """ Transfer files from remote host to localhost @param remote_path: path to retreive from remote host. since this is evaluated by scp on the remote host, shell wildcards and environment variables may be used. @type remote_path: str @param local_path: path in which to receive files locally @type local_path: str @param recursive: transfer files and directories recursively @type recursive: bool @param preserve_times: preserve mtime and atime of transfered files and directories. @type preserve_times: bool """ if not isinstance(remote_path, (list, tuple)): remote_path = [remote_path] remote_path = [self.sanitize(asbytes(r)) for r in remote_path] self._recv_dir = local_path or os.getcwd() self._rename = (len(remote_path) == 1 and not os.path.isdir(os.path.abspath(local_path))) if len(remote_path) > 1: if not os.path.exists(self._recv_dir): raise SCPException("Local path '%s' does not exist" % asunicode(self._recv_dir)) elif not os.path.isdir(self._recv_dir): raise SCPException("Local path '%s' is not a directory" % asunicode(self._recv_dir)) rcsv = (b'', b' -r')[recursive] prsv = (b'', b' -p')[preserve_times] self.channel = self._open() self._pushed = 0 self.channel.settimeout(self.socket_timeout) self.channel.exec_command(b"scp" + rcsv + prsv + b" -f " + b' '.join(remote_path)) self._recv_all() self.close() def _open(self): """open a scp channel""" if self.channel is None: self.channel = self.transport.open_session() return self.channel def close(self): """close scp channel""" if self.channel is not None: self.channel.close() self.channel = None def _read_stats(self, name): """return just the file stats needed for scp""" if os.name == 'nt': name = asunicode(name) stats = os.stat(name) mode = oct(stats.st_mode)[-4:] size = stats.st_size atime = int(stats.st_atime) mtime = int(stats.st_mtime) return (mode, size, mtime, atime) def _send_files(self, files): for name in files: basename = asbytes(os.path.basename(name)) (mode, size, mtime, atime) = self._read_stats(name) if self.preserve_times: self._send_time(mtime, atime) file_hdl = open(name, 'rb') # The protocol can't handle \n in the filename. # Quote them as the control sequence \^J for now, # which is how openssh handles it. self.channel.sendall(("C%s %d " % (mode, size)).encode('ascii') + basename.replace(b'\n', b'\\^J') + b"\n") self._recv_confirm() file_pos = 0 if self._progress: if size == 0: # avoid divide-by-zero self._progress(basename, 1, 1) else: self._progress(basename, size, 0) buff_size = self.buff_size chan = self.channel while file_pos < size: chan.sendall(file_hdl.read(buff_size)) file_pos = file_hdl.tell() if self._progress: self._progress(basename, size, file_pos) chan.sendall('\x00') file_hdl.close() self._recv_confirm() def _chdir(self, from_dir, to_dir): # Pop until we're one level up from our next push. # Push *once* into to_dir. # This is dependent on the depth-first traversal from os.walk # add path.sep to each when checking the prefix, so we can use # path.dirname after common = os.path.commonprefix([from_dir + bytes_sep, to_dir + bytes_sep]) # now take the dirname, since commonprefix is character based, # and we either have a seperator, or a partial name common = os.path.dirname(common) cur_dir = from_dir.rstrip(bytes_sep) while cur_dir != common: cur_dir = os.path.split(cur_dir)[0] self._send_popd() # now we're in our common base directory, so on self._send_pushd(to_dir) def _send_recursive(self, files): for base in files: if not os.path.isdir(base): # filename mixed into the bunch self._send_files([base]) continue last_dir = asbytes(base) for root, dirs, fls in os.walk(base): self._chdir(last_dir, asbytes(root)) self._send_files([os.path.join(root, f) for f in fls]) last_dir = asbytes(root) # back out of the directory while self._pushed > 0: self._send_popd() def _send_pushd(self, directory): (mode, size, mtime, atime) = self._read_stats(directory) basename = asbytes(os.path.basename(directory)) if self.preserve_times: self._send_time(mtime, atime) self.channel.sendall(('D%s 0 ' % mode).encode('ascii') + basename.replace(b'\n', b'\\^J') + b'\n') self._recv_confirm() self._pushed += 1 def _send_popd(self): self.channel.sendall('E\n') self._recv_confirm() self._pushed -= 1 def _send_time(self, mtime, atime): self.channel.sendall(('T%d 0 %d 0\n' % (mtime, atime)).encode('ascii')) self._recv_confirm() def _recv_confirm(self): # read scp response msg = b'' try: msg = self.channel.recv(512) except SocketTimeout: raise SCPException('Timout waiting for scp response') # slice off the first byte, so this compare will work in py2 and py3 if msg and msg[0:1] == b'\x00': return elif msg and msg[0:1] == b'\x01': raise SCPException(asunicode(msg[1:])) elif self.channel.recv_stderr_ready(): msg = self.channel.recv_stderr(512) raise SCPException(asunicode(msg)) elif not msg: raise SCPException('No response from server') else: raise SCPException('Invalid response from server', msg) def _recv_all(self): # loop over scp commands, and receive as necessary command = {b'C': self._recv_file, b'T': self._set_time, b'D': self._recv_pushd, b'E': self._recv_popd} while not self.channel.closed: # wait for command as long as we're open self.channel.sendall('\x00') msg = self.channel.recv(1024) if not msg: # chan closed while recving break assert msg[-1:] == b'\n' msg = msg[:-1] code = msg[0:1] try: command[code](msg[1:]) except KeyError: raise SCPException(asunicode(msg[1:])) # directory times can't be set until we're done writing files self._set_dirtimes() def _set_time(self, cmd): try: times = cmd.split(b' ') mtime = int(times[0]) atime = int(times[2]) or mtime except: self.channel.send(b'\x01') raise SCPException('Bad time format') # save for later self._utime = (atime, mtime) def _recv_file(self, cmd): chan = self.channel parts = cmd.strip().split(b' ', 2) try: mode = int(parts[0], 8) size = int(parts[1]) if self._rename: path = self._recv_dir self._rename = False elif os.name == 'nt': path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8')) else: path = os.path.join(asbytes(self._recv_dir), parts[2]) except: chan.send('\x01') chan.close() raise SCPException('Bad file format') try: file_hdl = open(path, 'wb') except IOError as e: chan.send(b'\x01' + str(e).encode('utf-8')) chan.close() raise if self._progress: if size == 0: # avoid divide-by-zero self._progress(path, 1, 1) else: self._progress(path, size, 0) buff_size = self.buff_size pos = 0 chan.send(b'\x00') try: while pos < size: # we have to make sure we don't read the final byte if size - pos <= buff_size: buff_size = size - pos file_hdl.write(chan.recv(buff_size)) pos = file_hdl.tell() if self._progress: self._progress(path, size, pos) msg = chan.recv(512) if msg and msg[0:1] != b'\x00': raise SCPException(asunicode(msg[1:])) except SocketTimeout: chan.close() raise SCPException('Error receiving, socket.timeout') file_hdl.truncate() try: os.utime(path, self._utime) self._utime = None os.chmod(path, mode) # should we notify the other end? finally: file_hdl.close() # '\x00' confirmation sent in _recv_all def _recv_pushd(self, cmd): parts = cmd.split(b' ', 2) try: mode = int(parts[0], 8) if self._rename: path = self._recv_dir self._rename = False elif os.name == 'nt': path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8')) else: path = os.path.join(asbytes(self._recv_dir), parts[2]) except: self.channel.send(b'\x01') raise SCPException('Bad directory format') try: if not os.path.exists(path): os.mkdir(path, mode) elif os.path.isdir(path): os.chmod(path, mode) else: raise SCPException('%s: Not a directory' % path) self._dirtimes[path] = (self._utime) self._utime = None self._recv_dir = path except (OSError, SCPException) as e: self.channel.send(b'\x01' + asbytes(str(e))) raise def _recv_popd(self, *cmd): self._recv_dir = os.path.split(self._recv_dir)[0] def _set_dirtimes(self): try: for d in self._dirtimes: os.utime(d, self._dirtimes[d]) finally: self._dirtimes = {} class SCPException(Exception): """SCP exception class""" pass class SSHClient_noauth(SSHClient): def _auth(self, username, *args): self._transport.auth_none(username) return class SCP(object): def __init__(self, host, port, username='root', password='root'): self.host = host self.port = port self.username = username self.password = password def createSSHClient(self,server,port,user,password): client = SSHClient() client.set_missing_host_key_policy(AutoAddPolicy()) client.connect(server, port, user, password) return client def createSSHClient_No_password(self,server,port,user,password = None): t = SSHClient_noauth() t.set_missing_host_key_policy(AutoAddPolicy()) t.connect(server,port,user,password) return t def connect(self): try: self.ssh= self.createSSHClient(self.host, self.port, self.username, self.password) self.scp = SCPClient(self.ssh.get_transport()) print('success to conenct SCP Client') except: self.ssh = False return self.ssh def upload(self, local_path, remote_path, mode='bin'): mode = mode and mode.lower() or 'bin' if(mode not in ('bin', 'text')): raise RuntimeError("Invalid Ftp mode (%s)" % mode) file_mode = (mode == 'bin') and 'rb' or 'r'#file_mode = 'rb',rb represent readonly file = open(local_path, file_mode) try: if(mode == 'bin'): ret = self.scp.put(local_path,remote_path) #store a file in binary mode print('success to upload %s to remote_path %s'%(local_path,remote_path)) else: print('error') #store a file in line mode return True finally: file.close() return False def download(self, local_path, remote_path, mode='bin'): mode = mode and mode.lower() or 'bin' if(mode not in ('bin', 'text')): raise RuntimeError("Invalid Ftp mode (%s)" % mode) file_mode = (mode == 'bin') and 'w+b' or 'w' file = open(local_path, file_mode) try: if(mode == 'bin'): ret = self.scp.get(remote_path,local_path) print('success to download remote file to local') else: print('error') # self.log.debug("get %s" % ftp_path) # self.log.trace(ret) file.close() return True except Exception as e: file.close() os.remove(local_path) raise e return False def close(self): self.ssh.close() class _SSH_Shell(): def __init__(self):
def open(self,hostname,port,username = 'root',password = None): if password == "None": password = None if password == None: self.ssh = SSHClient_noauth() else: self.ssh = SSHClient() self.ssh.set_missing_host_key_policy(AutoAddPolicy()) try: self.ssh.connect(hostname,port,username,password) return self except Exception: return False def write(self,command): stdin,stdout,stderr = self.ssh.exec_command(command) print("send command => %s"%command)#logging.info return stdin,stdout,stderr def write_long(self,command): stdin,stdout,stderr = self.ssh.exec_command(command) print("send command => %s"%command) result = "" for line1 in stderr.readlines(): print("stdout=>"+str(line1)) result = result+line1 for line1 in stdout.readlines(): print("stdout=>"+str(line1)) result = result+line1 return result def read(self,std): cmd_result = std[1].read().decode(encoding='utf-8'),std[2].read().decode(encoding='utf-8') print(time.strftime('%Y%m%d%H%M%S') + '=>'+cmd_result[0]) return cmd_result def command(self,command): std = self.write(command) time.sleep(0.5) res = self.read(std) return res def close(self): self.ssh.close() class Dialog(wx.App): def __init__(self,window): wx.App.__init__(self) self.window = window def erroralert(self,msg,title): dlg = wx.MessageDialog(None,msg,title,wx.OK|wx.CANCEL|wx.ICON_ERROR) result = dlg.ShowModal() if result == wx.CANCEL: dlg.Destroy() else: dlg.Destroy() return result def Infoalert(self,msg,title): dlg = wx.MessageDialog(None,msg,title,wx.OK|wx.CANCEL|wx.ICON_INFORMATION) result = dlg.ShowModal() if result == wx.CANCEL: dlg.Destroy() else: dlg.Destroy() return result def download_from_ftp_to_local_SCP(host,port,ftp_path, local_file, mode = 'bin', user = 'root', password = 'root'): """ This keyword download a file from Ftp server to local of test case running. example usage: | Download From Ftp To Local | ftp://10.56.117.112/etc/ipsec_configuration.xml | c:${/}ipsec_configuration.xml | BIN | | Return value | the output of command | """ try: scp_obj = SCP(host, port, user, password) scp_obj.connect() except Exception: scp_obj = SCP(host, port, user, password) if os.path.isdir(local_file): print('not support now!') else: scp_obj.download(local_file, ftp_path, mode) scp_obj.close() def PowerSuppy_Init(interface,address): ''' sn should be string, eg: Init_NRP('103123'), '103123' is from NRP_Z21 power viewer Z11 productID: 0x000c, Z21 productID: 0x0003 ''' visaDLL = 'c:/windows/system32/visa64.dll' resourceManager = pyvisa.ResourceManager(visaDLL) if interface == "GPIB": print("PowerSupply remote mode is:"+ interface+", GPIB Address is:"+address) try: PS_handler = resourceManager.open_resource("GPIB0::%s::INSTR"%(address)) return PS_handler except Exception: return False elif interface == "TCPIP": print("PowerSupply remote mode is:"+ interface+", IP Address is:"+address) try: PS_handler = resourceManager.open_resource("TCPIP0::%s::INSTR"%(address)) return PS_handler except Exception: return False else: print("Not Supply this remote mode now") return False def Set_Power_mode(handler,mode): if mode == "ON": handler.write("OUTPUT:STAT ON") else: handler.write("OUTPUT:STAT OFF") def check_ip_pingable(ip_address): status, result = sp.getstatusoutput("ping " + ip_address + " -w 2000") print(status, "result="+result) if ("timed out" in result) or ("fail" in result): return False return True def waitfor_pingable(ip_address,try_times): times = 1 while True: res = check_ip_pingable(ip_address) if not res: print("ping %s Failed %s times"%(ip_address,times)) if times > try_times: break else: print("ping %s Successfully!"%ip_address) break times+=1 if times > try_times: return False else: return True def Init_Breamer_shell(): #Init beamer port #Input: config_file #Output: beamer handler or false if one is not avaliable beamer_ip = "192.168.101.1" beamer_port = 22 beamer_username = "toor4nsn" beamer_pwd = "oZPS0POrRieRtu" beamer_c = _SSH_Shell() for try_time in range(1,10): beamer_handler = beamer_c.open(beamer_ip, beamer_port, beamer_username,beamer_pwd) print("init beamer:%s for %s times, result is %s"%(beamer_ip,try_time,beamer_handler)) if beamer_handler == False: if try_time == 9: break else: pass else: break time.sleep(1) #beamer_handler.write("su root") return beamer_handler def check_beamer_processer(beamer_handler): re = beamer_handler.command("ps")[0] print(re.find("libtestabilitytcp.so.1.0")) if re.find("libtestabilitytcp.so.1.0")>0: return True return False dialog = Dialog(wx.App) def Bring_up(): dialog.Infoalert("Make sure the RRU is power on", "info") sum_run_count = 0 fail_count = 0 while True: #self.dialog.Infoalert("We will bring up beamer from flash,Please make sure the WE is UNCONNECTED!!!", "Warning") # powersuppy_handler = PowerSuppy_Init(interface,address) # if not powersuppy_handler: # dialog.erroralert("The Power supply init failed! Please check", "ERROR") # return False # Set_Power_mode(powersuppy_handler,"OFF") # time.sleep(5) # Set_Power_msode(powersuppy_handler,"ON") beamer_handler = Init_Breamer_shell() if not beamer_handler: dialog.erroralert("Beamer handler init failed for 10 times", "ERROR") return False beamer_handler.write("/usr/bin/rfsw-ncfg-reboot") sum_run_count+=1 print("waiting for 60s...") time.sleep(120) beammer_ip = "192.168.101.1" T = waitfor_pingable(beammer_ip,1000) if not T: fail_count+=1 print("reboot for %s times, beamer run failed %s times"%(sum_run_count,fail_count)) continue print("Beamer Start up normally Now!") beamer_handler = Init_Breamer_shell() if not beamer_handler: dialog.erroralert("Beamer handler init failed for 10 times", "ERROR") return False retry_beamer = 0 for retry_beamer in range(20): res = check_beamer_processer(beamer_handler) if res: break else: if retry_beamer == 19: print("beamer can't start up correctly,will power down and up again") beamer_handler.write("/usr/bin/ccsShell.sh log -c full") time.sleep(10) t = time.strftime('%Y%m%d%H%M%S') folder = "./runtimelog" if not os.path.exists(folder): os.mkdir(folder) download_from_ftp_to_local_SCP(beammer_ip,22,"/ram/1011_runtime.zip", folder+"/"+t+"_runtime.zip", mode = 'bin', user = 'toor4nsn', password = 'oZPS0POrRieRtu') download_from_ftp_to_local_SCP(beammer_ip,22,"/ram/1011_startup.zip", folder+"/"+t+"_runtime.zip", mode = 'bin', user = 'toor4nsn', password = 'oZPS0POrRieRtu') fail_count+=1 break time.sleep(2) print("reboot for %s times, beamer run failed %s times"%(sum_run_count,fail_count)) def runtimelog(): beamer_handler = Init_Breamer_shell() beamer_handler.write("/usr/bin/ccsShell.sh log -c full") time.sleep(10) t = time.strftime('%Y%m%d%H%M%S') folder = "./runtimelog" if not os.path.exists(folder): os.mkdir(folder) download_from_ftp_to_local_SCP(beammer_ip, 22, "/ram/1011_runtime.zip", folder + "/" + t + "_runtime.zip", mode='bin', user='toor4nsn', password='oZPS0POrRieRtu') download_from_ftp_to_local_SCP(beammer_ip, 22, "/ram/1011_startup.zip", folder + "/" + t + "_runtime.zip", mode='bin', user='toor4nsn', password='oZPS0POrRieRtu') # runtimelog()
None
identifier_body
AutoCapture_runtimelog.py
# import wx,time,pyvisa,os import time import subprocess as sp from paramiko import * import locale import os import re from socket import timeout as SocketTimeout # this is quote from the shlex module, added in py3.3 _find_unsafe = re.compile(br'[^\w@%+=:,./~-]').search def _sh_quote(s): """Return a shell-escaped version of the string `s`.""" if not s: return b"" if _find_unsafe(s) is None:
# use single quotes, and put single quotes into double quotes # the string $'b is then quoted as '$'"'"'b' return b"'" + s.replace(b"'", b"'\"'\"'") + b"'" # Unicode conversion functions; assume UTF-8 def asbytes(s): """Turns unicode into bytes, if needed. Assumes UTF-8. """ if isinstance(s, bytes): return s else: return s.encode('utf-8') def asunicode(s): """Turns bytes into unicode, if needed. Uses UTF-8. """ if isinstance(s, bytes): return s.decode('utf-8', 'replace') else: return s # os.path.sep is unicode on Python 3, no matter the platform bytes_sep = asbytes(os.path.sep) # Unicode conversion function for Windows # Used to convert local paths if the local machine is Windows def asunicode_win(s): """Turns bytes into unicode, if needed. """ if isinstance(s, bytes): return s.decode(locale.getpreferredencoding()) else: return s class SCPClient(object): """ An scp1 implementation, compatible with openssh scp. Raises SCPException for all transport related errors. Local filesystem and OS errors pass through. Main public methods are .put and .get The get method is controlled by the remote scp instance, and behaves accordingly. This means that symlinks are resolved, and the transfer is halted after too many levels of symlinks are detected. The put method uses os.walk for recursion, and sends files accordingly. Since scp doesn't support symlinks, we send file symlinks as the file (matching scp behaviour), but we make no attempt at symlinked directories. """ def __init__(self, transport, buff_size=16384, socket_timeout=5.0, progress=None, sanitize=_sh_quote): """ Create an scp1 client. @param transport: an existing paramiko L{Transport} @type transport: L{Transport} @param buff_size: size of the scp send buffer. @type buff_size: int @param socket_timeout: channel socket timeout in seconds @type socket_timeout: float @param progress: callback - called with (filename, size, sent) during transfers @param sanitize: function - called with filename, should return safe or escaped string. Uses _sh_quote by default. @type progress: function(string, int, int) """ self.transport = transport self.buff_size = buff_size self.socket_timeout = socket_timeout self.channel = None self.preserve_times = False self._progress = progress self._recv_dir = b'' self._rename = False self._utime = None self.sanitize = sanitize self._dirtimes = {} def __enter__(self): self.channel = self._open() return self def __exit__(self, type, value, traceback): self.close() def put(self, files, remote_path=b'.', recursive=False, preserve_times=False): """ Transfer files to remote host. @param files: A single path, or a list of paths to be transfered. recursive must be True to transfer directories. @type files: string OR list of strings @param remote_path: path in which to receive the files on the remote host. defaults to '.' @type remote_path: str @param recursive: transfer files and directories recursively @type recursive: bool @param preserve_times: preserve mtime and atime of transfered files and directories. @type preserve_times: bool """ self.preserve_times = preserve_times self.channel = self._open() self._pushed = 0 self.channel.settimeout(self.socket_timeout) scp_command = (b'scp -t ', b'scp -r -t ')[recursive] self.channel.exec_command(scp_command + self.sanitize(asbytes(remote_path))) self._recv_confirm() if not isinstance(files, (list, tuple)): files = [files] if recursive: self._send_recursive(files) else: self._send_files(files) self.close() def get(self, remote_path, local_path='', recursive=False, preserve_times=False): """ Transfer files from remote host to localhost @param remote_path: path to retreive from remote host. since this is evaluated by scp on the remote host, shell wildcards and environment variables may be used. @type remote_path: str @param local_path: path in which to receive files locally @type local_path: str @param recursive: transfer files and directories recursively @type recursive: bool @param preserve_times: preserve mtime and atime of transfered files and directories. @type preserve_times: bool """ if not isinstance(remote_path, (list, tuple)): remote_path = [remote_path] remote_path = [self.sanitize(asbytes(r)) for r in remote_path] self._recv_dir = local_path or os.getcwd() self._rename = (len(remote_path) == 1 and not os.path.isdir(os.path.abspath(local_path))) if len(remote_path) > 1: if not os.path.exists(self._recv_dir): raise SCPException("Local path '%s' does not exist" % asunicode(self._recv_dir)) elif not os.path.isdir(self._recv_dir): raise SCPException("Local path '%s' is not a directory" % asunicode(self._recv_dir)) rcsv = (b'', b' -r')[recursive] prsv = (b'', b' -p')[preserve_times] self.channel = self._open() self._pushed = 0 self.channel.settimeout(self.socket_timeout) self.channel.exec_command(b"scp" + rcsv + prsv + b" -f " + b' '.join(remote_path)) self._recv_all() self.close() def _open(self): """open a scp channel""" if self.channel is None: self.channel = self.transport.open_session() return self.channel def close(self): """close scp channel""" if self.channel is not None: self.channel.close() self.channel = None def _read_stats(self, name): """return just the file stats needed for scp""" if os.name == 'nt': name = asunicode(name) stats = os.stat(name) mode = oct(stats.st_mode)[-4:] size = stats.st_size atime = int(stats.st_atime) mtime = int(stats.st_mtime) return (mode, size, mtime, atime) def _send_files(self, files): for name in files: basename = asbytes(os.path.basename(name)) (mode, size, mtime, atime) = self._read_stats(name) if self.preserve_times: self._send_time(mtime, atime) file_hdl = open(name, 'rb') # The protocol can't handle \n in the filename. # Quote them as the control sequence \^J for now, # which is how openssh handles it. self.channel.sendall(("C%s %d " % (mode, size)).encode('ascii') + basename.replace(b'\n', b'\\^J') + b"\n") self._recv_confirm() file_pos = 0 if self._progress: if size == 0: # avoid divide-by-zero self._progress(basename, 1, 1) else: self._progress(basename, size, 0) buff_size = self.buff_size chan = self.channel while file_pos < size: chan.sendall(file_hdl.read(buff_size)) file_pos = file_hdl.tell() if self._progress: self._progress(basename, size, file_pos) chan.sendall('\x00') file_hdl.close() self._recv_confirm() def _chdir(self, from_dir, to_dir): # Pop until we're one level up from our next push. # Push *once* into to_dir. # This is dependent on the depth-first traversal from os.walk # add path.sep to each when checking the prefix, so we can use # path.dirname after common = os.path.commonprefix([from_dir + bytes_sep, to_dir + bytes_sep]) # now take the dirname, since commonprefix is character based, # and we either have a seperator, or a partial name common = os.path.dirname(common) cur_dir = from_dir.rstrip(bytes_sep) while cur_dir != common: cur_dir = os.path.split(cur_dir)[0] self._send_popd() # now we're in our common base directory, so on self._send_pushd(to_dir) def _send_recursive(self, files): for base in files: if not os.path.isdir(base): # filename mixed into the bunch self._send_files([base]) continue last_dir = asbytes(base) for root, dirs, fls in os.walk(base): self._chdir(last_dir, asbytes(root)) self._send_files([os.path.join(root, f) for f in fls]) last_dir = asbytes(root) # back out of the directory while self._pushed > 0: self._send_popd() def _send_pushd(self, directory): (mode, size, mtime, atime) = self._read_stats(directory) basename = asbytes(os.path.basename(directory)) if self.preserve_times: self._send_time(mtime, atime) self.channel.sendall(('D%s 0 ' % mode).encode('ascii') + basename.replace(b'\n', b'\\^J') + b'\n') self._recv_confirm() self._pushed += 1 def _send_popd(self): self.channel.sendall('E\n') self._recv_confirm() self._pushed -= 1 def _send_time(self, mtime, atime): self.channel.sendall(('T%d 0 %d 0\n' % (mtime, atime)).encode('ascii')) self._recv_confirm() def _recv_confirm(self): # read scp response msg = b'' try: msg = self.channel.recv(512) except SocketTimeout: raise SCPException('Timout waiting for scp response') # slice off the first byte, so this compare will work in py2 and py3 if msg and msg[0:1] == b'\x00': return elif msg and msg[0:1] == b'\x01': raise SCPException(asunicode(msg[1:])) elif self.channel.recv_stderr_ready(): msg = self.channel.recv_stderr(512) raise SCPException(asunicode(msg)) elif not msg: raise SCPException('No response from server') else: raise SCPException('Invalid response from server', msg) def _recv_all(self): # loop over scp commands, and receive as necessary command = {b'C': self._recv_file, b'T': self._set_time, b'D': self._recv_pushd, b'E': self._recv_popd} while not self.channel.closed: # wait for command as long as we're open self.channel.sendall('\x00') msg = self.channel.recv(1024) if not msg: # chan closed while recving break assert msg[-1:] == b'\n' msg = msg[:-1] code = msg[0:1] try: command[code](msg[1:]) except KeyError: raise SCPException(asunicode(msg[1:])) # directory times can't be set until we're done writing files self._set_dirtimes() def _set_time(self, cmd): try: times = cmd.split(b' ') mtime = int(times[0]) atime = int(times[2]) or mtime except: self.channel.send(b'\x01') raise SCPException('Bad time format') # save for later self._utime = (atime, mtime) def _recv_file(self, cmd): chan = self.channel parts = cmd.strip().split(b' ', 2) try: mode = int(parts[0], 8) size = int(parts[1]) if self._rename: path = self._recv_dir self._rename = False elif os.name == 'nt': path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8')) else: path = os.path.join(asbytes(self._recv_dir), parts[2]) except: chan.send('\x01') chan.close() raise SCPException('Bad file format') try: file_hdl = open(path, 'wb') except IOError as e: chan.send(b'\x01' + str(e).encode('utf-8')) chan.close() raise if self._progress: if size == 0: # avoid divide-by-zero self._progress(path, 1, 1) else: self._progress(path, size, 0) buff_size = self.buff_size pos = 0 chan.send(b'\x00') try: while pos < size: # we have to make sure we don't read the final byte if size - pos <= buff_size: buff_size = size - pos file_hdl.write(chan.recv(buff_size)) pos = file_hdl.tell() if self._progress: self._progress(path, size, pos) msg = chan.recv(512) if msg and msg[0:1] != b'\x00': raise SCPException(asunicode(msg[1:])) except SocketTimeout: chan.close() raise SCPException('Error receiving, socket.timeout') file_hdl.truncate() try: os.utime(path, self._utime) self._utime = None os.chmod(path, mode) # should we notify the other end? finally: file_hdl.close() # '\x00' confirmation sent in _recv_all def _recv_pushd(self, cmd): parts = cmd.split(b' ', 2) try: mode = int(parts[0], 8) if self._rename: path = self._recv_dir self._rename = False elif os.name == 'nt': path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8')) else: path = os.path.join(asbytes(self._recv_dir), parts[2]) except: self.channel.send(b'\x01') raise SCPException('Bad directory format') try: if not os.path.exists(path): os.mkdir(path, mode) elif os.path.isdir(path): os.chmod(path, mode) else: raise SCPException('%s: Not a directory' % path) self._dirtimes[path] = (self._utime) self._utime = None self._recv_dir = path except (OSError, SCPException) as e: self.channel.send(b'\x01' + asbytes(str(e))) raise def _recv_popd(self, *cmd): self._recv_dir = os.path.split(self._recv_dir)[0] def _set_dirtimes(self): try: for d in self._dirtimes: os.utime(d, self._dirtimes[d]) finally: self._dirtimes = {} class SCPException(Exception): """SCP exception class""" pass class SSHClient_noauth(SSHClient): def _auth(self, username, *args): self._transport.auth_none(username) return class SCP(object): def __init__(self, host, port, username='root', password='root'): self.host = host self.port = port self.username = username self.password = password def createSSHClient(self,server,port,user,password): client = SSHClient() client.set_missing_host_key_policy(AutoAddPolicy()) client.connect(server, port, user, password) return client def createSSHClient_No_password(self,server,port,user,password = None): t = SSHClient_noauth() t.set_missing_host_key_policy(AutoAddPolicy()) t.connect(server,port,user,password) return t def connect(self): try: self.ssh= self.createSSHClient(self.host, self.port, self.username, self.password) self.scp = SCPClient(self.ssh.get_transport()) print('success to conenct SCP Client') except: self.ssh = False return self.ssh def upload(self, local_path, remote_path, mode='bin'): mode = mode and mode.lower() or 'bin' if(mode not in ('bin', 'text')): raise RuntimeError("Invalid Ftp mode (%s)" % mode) file_mode = (mode == 'bin') and 'rb' or 'r'#file_mode = 'rb',rb represent readonly file = open(local_path, file_mode) try: if(mode == 'bin'): ret = self.scp.put(local_path,remote_path) #store a file in binary mode print('success to upload %s to remote_path %s'%(local_path,remote_path)) else: print('error') #store a file in line mode return True finally: file.close() return False def download(self, local_path, remote_path, mode='bin'): mode = mode and mode.lower() or 'bin' if(mode not in ('bin', 'text')): raise RuntimeError("Invalid Ftp mode (%s)" % mode) file_mode = (mode == 'bin') and 'w+b' or 'w' file = open(local_path, file_mode) try: if(mode == 'bin'): ret = self.scp.get(remote_path,local_path) print('success to download remote file to local') else: print('error') # self.log.debug("get %s" % ftp_path) # self.log.trace(ret) file.close() return True except Exception as e: file.close() os.remove(local_path) raise e return False def close(self): self.ssh.close() class _SSH_Shell(): def __init__(self): None def open(self,hostname,port,username = 'root',password = None): if password == "None": password = None if password == None: self.ssh = SSHClient_noauth() else: self.ssh = SSHClient() self.ssh.set_missing_host_key_policy(AutoAddPolicy()) try: self.ssh.connect(hostname,port,username,password) return self except Exception: return False def write(self,command): stdin,stdout,stderr = self.ssh.exec_command(command) print("send command => %s"%command)#logging.info return stdin,stdout,stderr def write_long(self,command): stdin,stdout,stderr = self.ssh.exec_command(command) print("send command => %s"%command) result = "" for line1 in stderr.readlines(): print("stdout=>"+str(line1)) result = result+line1 for line1 in stdout.readlines(): print("stdout=>"+str(line1)) result = result+line1 return result def read(self,std): cmd_result = std[1].read().decode(encoding='utf-8'),std[2].read().decode(encoding='utf-8') print(time.strftime('%Y%m%d%H%M%S') + '=>'+cmd_result[0]) return cmd_result def command(self,command): std = self.write(command) time.sleep(0.5) res = self.read(std) return res def close(self): self.ssh.close() class Dialog(wx.App): def __init__(self,window): wx.App.__init__(self) self.window = window def erroralert(self,msg,title): dlg = wx.MessageDialog(None,msg,title,wx.OK|wx.CANCEL|wx.ICON_ERROR) result = dlg.ShowModal() if result == wx.CANCEL: dlg.Destroy() else: dlg.Destroy() return result def Infoalert(self,msg,title): dlg = wx.MessageDialog(None,msg,title,wx.OK|wx.CANCEL|wx.ICON_INFORMATION) result = dlg.ShowModal() if result == wx.CANCEL: dlg.Destroy() else: dlg.Destroy() return result def download_from_ftp_to_local_SCP(host,port,ftp_path, local_file, mode = 'bin', user = 'root', password = 'root'): """ This keyword download a file from Ftp server to local of test case running. example usage: | Download From Ftp To Local | ftp://10.56.117.112/etc/ipsec_configuration.xml | c:${/}ipsec_configuration.xml | BIN | | Return value | the output of command | """ try: scp_obj = SCP(host, port, user, password) scp_obj.connect() except Exception: scp_obj = SCP(host, port, user, password) if os.path.isdir(local_file): print('not support now!') else: scp_obj.download(local_file, ftp_path, mode) scp_obj.close() def PowerSuppy_Init(interface,address): ''' sn should be string, eg: Init_NRP('103123'), '103123' is from NRP_Z21 power viewer Z11 productID: 0x000c, Z21 productID: 0x0003 ''' visaDLL = 'c:/windows/system32/visa64.dll' resourceManager = pyvisa.ResourceManager(visaDLL) if interface == "GPIB": print("PowerSupply remote mode is:"+ interface+", GPIB Address is:"+address) try: PS_handler = resourceManager.open_resource("GPIB0::%s::INSTR"%(address)) return PS_handler except Exception: return False elif interface == "TCPIP": print("PowerSupply remote mode is:"+ interface+", IP Address is:"+address) try: PS_handler = resourceManager.open_resource("TCPIP0::%s::INSTR"%(address)) return PS_handler except Exception: return False else: print("Not Supply this remote mode now") return False def Set_Power_mode(handler,mode): if mode == "ON": handler.write("OUTPUT:STAT ON") else: handler.write("OUTPUT:STAT OFF") def check_ip_pingable(ip_address): status, result = sp.getstatusoutput("ping " + ip_address + " -w 2000") print(status, "result="+result) if ("timed out" in result) or ("fail" in result): return False return True def waitfor_pingable(ip_address,try_times): times = 1 while True: res = check_ip_pingable(ip_address) if not res: print("ping %s Failed %s times"%(ip_address,times)) if times > try_times: break else: print("ping %s Successfully!"%ip_address) break times+=1 if times > try_times: return False else: return True def Init_Breamer_shell(): #Init beamer port #Input: config_file #Output: beamer handler or false if one is not avaliable beamer_ip = "192.168.101.1" beamer_port = 22 beamer_username = "toor4nsn" beamer_pwd = "oZPS0POrRieRtu" beamer_c = _SSH_Shell() for try_time in range(1,10): beamer_handler = beamer_c.open(beamer_ip, beamer_port, beamer_username,beamer_pwd) print("init beamer:%s for %s times, result is %s"%(beamer_ip,try_time,beamer_handler)) if beamer_handler == False: if try_time == 9: break else: pass else: break time.sleep(1) #beamer_handler.write("su root") return beamer_handler def check_beamer_processer(beamer_handler): re = beamer_handler.command("ps")[0] print(re.find("libtestabilitytcp.so.1.0")) if re.find("libtestabilitytcp.so.1.0")>0: return True return False dialog = Dialog(wx.App) def Bring_up(): dialog.Infoalert("Make sure the RRU is power on", "info") sum_run_count = 0 fail_count = 0 while True: #self.dialog.Infoalert("We will bring up beamer from flash,Please make sure the WE is UNCONNECTED!!!", "Warning") # powersuppy_handler = PowerSuppy_Init(interface,address) # if not powersuppy_handler: # dialog.erroralert("The Power supply init failed! Please check", "ERROR") # return False # Set_Power_mode(powersuppy_handler,"OFF") # time.sleep(5) # Set_Power_msode(powersuppy_handler,"ON") beamer_handler = Init_Breamer_shell() if not beamer_handler: dialog.erroralert("Beamer handler init failed for 10 times", "ERROR") return False beamer_handler.write("/usr/bin/rfsw-ncfg-reboot") sum_run_count+=1 print("waiting for 60s...") time.sleep(120) beammer_ip = "192.168.101.1" T = waitfor_pingable(beammer_ip,1000) if not T: fail_count+=1 print("reboot for %s times, beamer run failed %s times"%(sum_run_count,fail_count)) continue print("Beamer Start up normally Now!") beamer_handler = Init_Breamer_shell() if not beamer_handler: dialog.erroralert("Beamer handler init failed for 10 times", "ERROR") return False retry_beamer = 0 for retry_beamer in range(20): res = check_beamer_processer(beamer_handler) if res: break else: if retry_beamer == 19: print("beamer can't start up correctly,will power down and up again") beamer_handler.write("/usr/bin/ccsShell.sh log -c full") time.sleep(10) t = time.strftime('%Y%m%d%H%M%S') folder = "./runtimelog" if not os.path.exists(folder): os.mkdir(folder) download_from_ftp_to_local_SCP(beammer_ip,22,"/ram/1011_runtime.zip", folder+"/"+t+"_runtime.zip", mode = 'bin', user = 'toor4nsn', password = 'oZPS0POrRieRtu') download_from_ftp_to_local_SCP(beammer_ip,22,"/ram/1011_startup.zip", folder+"/"+t+"_runtime.zip", mode = 'bin', user = 'toor4nsn', password = 'oZPS0POrRieRtu') fail_count+=1 break time.sleep(2) print("reboot for %s times, beamer run failed %s times"%(sum_run_count,fail_count)) def runtimelog(): beamer_handler = Init_Breamer_shell() beamer_handler.write("/usr/bin/ccsShell.sh log -c full") time.sleep(10) t = time.strftime('%Y%m%d%H%M%S') folder = "./runtimelog" if not os.path.exists(folder): os.mkdir(folder) download_from_ftp_to_local_SCP(beammer_ip, 22, "/ram/1011_runtime.zip", folder + "/" + t + "_runtime.zip", mode='bin', user='toor4nsn', password='oZPS0POrRieRtu') download_from_ftp_to_local_SCP(beammer_ip, 22, "/ram/1011_startup.zip", folder + "/" + t + "_runtime.zip", mode='bin', user='toor4nsn', password='oZPS0POrRieRtu') # runtimelog()
return s
conditional_block
AutoCapture_runtimelog.py
# import wx,time,pyvisa,os import time import subprocess as sp from paramiko import * import locale import os import re from socket import timeout as SocketTimeout # this is quote from the shlex module, added in py3.3 _find_unsafe = re.compile(br'[^\w@%+=:,./~-]').search def _sh_quote(s): """Return a shell-escaped version of the string `s`.""" if not s: return b"" if _find_unsafe(s) is None: return s # use single quotes, and put single quotes into double quotes # the string $'b is then quoted as '$'"'"'b' return b"'" + s.replace(b"'", b"'\"'\"'") + b"'" # Unicode conversion functions; assume UTF-8 def asbytes(s): """Turns unicode into bytes, if needed. Assumes UTF-8. """ if isinstance(s, bytes): return s else: return s.encode('utf-8') def asunicode(s): """Turns bytes into unicode, if needed. Uses UTF-8. """ if isinstance(s, bytes): return s.decode('utf-8', 'replace') else: return s # os.path.sep is unicode on Python 3, no matter the platform bytes_sep = asbytes(os.path.sep) # Unicode conversion function for Windows # Used to convert local paths if the local machine is Windows def asunicode_win(s): """Turns bytes into unicode, if needed. """ if isinstance(s, bytes): return s.decode(locale.getpreferredencoding()) else: return s class SCPClient(object): """ An scp1 implementation, compatible with openssh scp. Raises SCPException for all transport related errors. Local filesystem and OS errors pass through. Main public methods are .put and .get The get method is controlled by the remote scp instance, and behaves accordingly. This means that symlinks are resolved, and the transfer is halted after too many levels of symlinks are detected. The put method uses os.walk for recursion, and sends files accordingly. Since scp doesn't support symlinks, we send file symlinks as the file (matching scp behaviour), but we make no attempt at symlinked directories. """ def __init__(self, transport, buff_size=16384, socket_timeout=5.0, progress=None, sanitize=_sh_quote): """ Create an scp1 client. @param transport: an existing paramiko L{Transport} @type transport: L{Transport} @param buff_size: size of the scp send buffer. @type buff_size: int @param socket_timeout: channel socket timeout in seconds @type socket_timeout: float @param progress: callback - called with (filename, size, sent) during transfers @param sanitize: function - called with filename, should return safe or escaped string. Uses _sh_quote by default. @type progress: function(string, int, int) """ self.transport = transport self.buff_size = buff_size self.socket_timeout = socket_timeout self.channel = None self.preserve_times = False self._progress = progress self._recv_dir = b'' self._rename = False self._utime = None self.sanitize = sanitize self._dirtimes = {} def __enter__(self): self.channel = self._open() return self def __exit__(self, type, value, traceback): self.close() def put(self, files, remote_path=b'.', recursive=False, preserve_times=False): """ Transfer files to remote host. @param files: A single path, or a list of paths to be transfered. recursive must be True to transfer directories. @type files: string OR list of strings @param remote_path: path in which to receive the files on the remote host. defaults to '.' @type remote_path: str @param recursive: transfer files and directories recursively @type recursive: bool @param preserve_times: preserve mtime and atime of transfered files and directories. @type preserve_times: bool """ self.preserve_times = preserve_times self.channel = self._open() self._pushed = 0 self.channel.settimeout(self.socket_timeout) scp_command = (b'scp -t ', b'scp -r -t ')[recursive] self.channel.exec_command(scp_command + self.sanitize(asbytes(remote_path))) self._recv_confirm() if not isinstance(files, (list, tuple)): files = [files] if recursive: self._send_recursive(files) else: self._send_files(files) self.close() def get(self, remote_path, local_path='', recursive=False, preserve_times=False): """ Transfer files from remote host to localhost @param remote_path: path to retreive from remote host. since this is evaluated by scp on the remote host, shell wildcards and environment variables may be used. @type remote_path: str @param local_path: path in which to receive files locally @type local_path: str @param recursive: transfer files and directories recursively @type recursive: bool @param preserve_times: preserve mtime and atime of transfered files and directories. @type preserve_times: bool """ if not isinstance(remote_path, (list, tuple)): remote_path = [remote_path] remote_path = [self.sanitize(asbytes(r)) for r in remote_path] self._recv_dir = local_path or os.getcwd() self._rename = (len(remote_path) == 1 and not os.path.isdir(os.path.abspath(local_path))) if len(remote_path) > 1: if not os.path.exists(self._recv_dir): raise SCPException("Local path '%s' does not exist" % asunicode(self._recv_dir)) elif not os.path.isdir(self._recv_dir): raise SCPException("Local path '%s' is not a directory" % asunicode(self._recv_dir)) rcsv = (b'', b' -r')[recursive] prsv = (b'', b' -p')[preserve_times] self.channel = self._open() self._pushed = 0 self.channel.settimeout(self.socket_timeout) self.channel.exec_command(b"scp" + rcsv + prsv + b" -f " + b' '.join(remote_path)) self._recv_all() self.close() def _open(self): """open a scp channel""" if self.channel is None: self.channel = self.transport.open_session() return self.channel def close(self): """close scp channel""" if self.channel is not None: self.channel.close() self.channel = None def _read_stats(self, name): """return just the file stats needed for scp""" if os.name == 'nt': name = asunicode(name) stats = os.stat(name) mode = oct(stats.st_mode)[-4:] size = stats.st_size atime = int(stats.st_atime) mtime = int(stats.st_mtime) return (mode, size, mtime, atime) def _send_files(self, files): for name in files: basename = asbytes(os.path.basename(name)) (mode, size, mtime, atime) = self._read_stats(name) if self.preserve_times: self._send_time(mtime, atime) file_hdl = open(name, 'rb') # The protocol can't handle \n in the filename. # Quote them as the control sequence \^J for now, # which is how openssh handles it. self.channel.sendall(("C%s %d " % (mode, size)).encode('ascii') + basename.replace(b'\n', b'\\^J') + b"\n") self._recv_confirm() file_pos = 0 if self._progress: if size == 0: # avoid divide-by-zero self._progress(basename, 1, 1) else: self._progress(basename, size, 0) buff_size = self.buff_size chan = self.channel while file_pos < size: chan.sendall(file_hdl.read(buff_size)) file_pos = file_hdl.tell() if self._progress: self._progress(basename, size, file_pos) chan.sendall('\x00') file_hdl.close() self._recv_confirm() def _chdir(self, from_dir, to_dir): # Pop until we're one level up from our next push. # Push *once* into to_dir. # This is dependent on the depth-first traversal from os.walk # add path.sep to each when checking the prefix, so we can use # path.dirname after common = os.path.commonprefix([from_dir + bytes_sep, to_dir + bytes_sep]) # now take the dirname, since commonprefix is character based, # and we either have a seperator, or a partial name common = os.path.dirname(common) cur_dir = from_dir.rstrip(bytes_sep) while cur_dir != common: cur_dir = os.path.split(cur_dir)[0] self._send_popd() # now we're in our common base directory, so on self._send_pushd(to_dir) def _send_recursive(self, files): for base in files: if not os.path.isdir(base): # filename mixed into the bunch self._send_files([base]) continue last_dir = asbytes(base) for root, dirs, fls in os.walk(base): self._chdir(last_dir, asbytes(root)) self._send_files([os.path.join(root, f) for f in fls]) last_dir = asbytes(root) # back out of the directory while self._pushed > 0: self._send_popd() def _send_pushd(self, directory): (mode, size, mtime, atime) = self._read_stats(directory) basename = asbytes(os.path.basename(directory)) if self.preserve_times: self._send_time(mtime, atime) self.channel.sendall(('D%s 0 ' % mode).encode('ascii') + basename.replace(b'\n', b'\\^J') + b'\n') self._recv_confirm() self._pushed += 1 def _send_popd(self): self.channel.sendall('E\n') self._recv_confirm() self._pushed -= 1 def _send_time(self, mtime, atime): self.channel.sendall(('T%d 0 %d 0\n' % (mtime, atime)).encode('ascii')) self._recv_confirm() def _recv_confirm(self): # read scp response msg = b'' try: msg = self.channel.recv(512) except SocketTimeout: raise SCPException('Timout waiting for scp response') # slice off the first byte, so this compare will work in py2 and py3 if msg and msg[0:1] == b'\x00': return elif msg and msg[0:1] == b'\x01': raise SCPException(asunicode(msg[1:])) elif self.channel.recv_stderr_ready(): msg = self.channel.recv_stderr(512) raise SCPException(asunicode(msg)) elif not msg: raise SCPException('No response from server') else: raise SCPException('Invalid response from server', msg) def _recv_all(self): # loop over scp commands, and receive as necessary command = {b'C': self._recv_file, b'T': self._set_time, b'D': self._recv_pushd, b'E': self._recv_popd} while not self.channel.closed: # wait for command as long as we're open self.channel.sendall('\x00') msg = self.channel.recv(1024) if not msg: # chan closed while recving break assert msg[-1:] == b'\n' msg = msg[:-1] code = msg[0:1] try: command[code](msg[1:]) except KeyError: raise SCPException(asunicode(msg[1:])) # directory times can't be set until we're done writing files self._set_dirtimes() def _set_time(self, cmd): try: times = cmd.split(b' ') mtime = int(times[0]) atime = int(times[2]) or mtime except: self.channel.send(b'\x01') raise SCPException('Bad time format') # save for later self._utime = (atime, mtime) def _recv_file(self, cmd): chan = self.channel parts = cmd.strip().split(b' ', 2) try: mode = int(parts[0], 8) size = int(parts[1]) if self._rename: path = self._recv_dir self._rename = False elif os.name == 'nt': path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8')) else: path = os.path.join(asbytes(self._recv_dir), parts[2]) except: chan.send('\x01') chan.close() raise SCPException('Bad file format') try: file_hdl = open(path, 'wb') except IOError as e: chan.send(b'\x01' + str(e).encode('utf-8')) chan.close() raise if self._progress: if size == 0: # avoid divide-by-zero self._progress(path, 1, 1) else: self._progress(path, size, 0) buff_size = self.buff_size pos = 0 chan.send(b'\x00') try: while pos < size: # we have to make sure we don't read the final byte
buff_size = size - pos file_hdl.write(chan.recv(buff_size)) pos = file_hdl.tell() if self._progress: self._progress(path, size, pos) msg = chan.recv(512) if msg and msg[0:1] != b'\x00': raise SCPException(asunicode(msg[1:])) except SocketTimeout: chan.close() raise SCPException('Error receiving, socket.timeout') file_hdl.truncate() try: os.utime(path, self._utime) self._utime = None os.chmod(path, mode) # should we notify the other end? finally: file_hdl.close() # '\x00' confirmation sent in _recv_all def _recv_pushd(self, cmd): parts = cmd.split(b' ', 2) try: mode = int(parts[0], 8) if self._rename: path = self._recv_dir self._rename = False elif os.name == 'nt': path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8')) else: path = os.path.join(asbytes(self._recv_dir), parts[2]) except: self.channel.send(b'\x01') raise SCPException('Bad directory format') try: if not os.path.exists(path): os.mkdir(path, mode) elif os.path.isdir(path): os.chmod(path, mode) else: raise SCPException('%s: Not a directory' % path) self._dirtimes[path] = (self._utime) self._utime = None self._recv_dir = path except (OSError, SCPException) as e: self.channel.send(b'\x01' + asbytes(str(e))) raise def _recv_popd(self, *cmd): self._recv_dir = os.path.split(self._recv_dir)[0] def _set_dirtimes(self): try: for d in self._dirtimes: os.utime(d, self._dirtimes[d]) finally: self._dirtimes = {} class SCPException(Exception): """SCP exception class""" pass class SSHClient_noauth(SSHClient): def _auth(self, username, *args): self._transport.auth_none(username) return class SCP(object): def __init__(self, host, port, username='root', password='root'): self.host = host self.port = port self.username = username self.password = password def createSSHClient(self,server,port,user,password): client = SSHClient() client.set_missing_host_key_policy(AutoAddPolicy()) client.connect(server, port, user, password) return client def createSSHClient_No_password(self,server,port,user,password = None): t = SSHClient_noauth() t.set_missing_host_key_policy(AutoAddPolicy()) t.connect(server,port,user,password) return t def connect(self): try: self.ssh= self.createSSHClient(self.host, self.port, self.username, self.password) self.scp = SCPClient(self.ssh.get_transport()) print('success to conenct SCP Client') except: self.ssh = False return self.ssh def upload(self, local_path, remote_path, mode='bin'): mode = mode and mode.lower() or 'bin' if(mode not in ('bin', 'text')): raise RuntimeError("Invalid Ftp mode (%s)" % mode) file_mode = (mode == 'bin') and 'rb' or 'r'#file_mode = 'rb',rb represent readonly file = open(local_path, file_mode) try: if(mode == 'bin'): ret = self.scp.put(local_path,remote_path) #store a file in binary mode print('success to upload %s to remote_path %s'%(local_path,remote_path)) else: print('error') #store a file in line mode return True finally: file.close() return False def download(self, local_path, remote_path, mode='bin'): mode = mode and mode.lower() or 'bin' if(mode not in ('bin', 'text')): raise RuntimeError("Invalid Ftp mode (%s)" % mode) file_mode = (mode == 'bin') and 'w+b' or 'w' file = open(local_path, file_mode) try: if(mode == 'bin'): ret = self.scp.get(remote_path,local_path) print('success to download remote file to local') else: print('error') # self.log.debug("get %s" % ftp_path) # self.log.trace(ret) file.close() return True except Exception as e: file.close() os.remove(local_path) raise e return False def close(self): self.ssh.close() class _SSH_Shell(): def __init__(self): None def open(self,hostname,port,username = 'root',password = None): if password == "None": password = None if password == None: self.ssh = SSHClient_noauth() else: self.ssh = SSHClient() self.ssh.set_missing_host_key_policy(AutoAddPolicy()) try: self.ssh.connect(hostname,port,username,password) return self except Exception: return False def write(self,command): stdin,stdout,stderr = self.ssh.exec_command(command) print("send command => %s"%command)#logging.info return stdin,stdout,stderr def write_long(self,command): stdin,stdout,stderr = self.ssh.exec_command(command) print("send command => %s"%command) result = "" for line1 in stderr.readlines(): print("stdout=>"+str(line1)) result = result+line1 for line1 in stdout.readlines(): print("stdout=>"+str(line1)) result = result+line1 return result def read(self,std): cmd_result = std[1].read().decode(encoding='utf-8'),std[2].read().decode(encoding='utf-8') print(time.strftime('%Y%m%d%H%M%S') + '=>'+cmd_result[0]) return cmd_result def command(self,command): std = self.write(command) time.sleep(0.5) res = self.read(std) return res def close(self): self.ssh.close() class Dialog(wx.App): def __init__(self,window): wx.App.__init__(self) self.window = window def erroralert(self,msg,title): dlg = wx.MessageDialog(None,msg,title,wx.OK|wx.CANCEL|wx.ICON_ERROR) result = dlg.ShowModal() if result == wx.CANCEL: dlg.Destroy() else: dlg.Destroy() return result def Infoalert(self,msg,title): dlg = wx.MessageDialog(None,msg,title,wx.OK|wx.CANCEL|wx.ICON_INFORMATION) result = dlg.ShowModal() if result == wx.CANCEL: dlg.Destroy() else: dlg.Destroy() return result def download_from_ftp_to_local_SCP(host,port,ftp_path, local_file, mode = 'bin', user = 'root', password = 'root'): """ This keyword download a file from Ftp server to local of test case running. example usage: | Download From Ftp To Local | ftp://10.56.117.112/etc/ipsec_configuration.xml | c:${/}ipsec_configuration.xml | BIN | | Return value | the output of command | """ try: scp_obj = SCP(host, port, user, password) scp_obj.connect() except Exception: scp_obj = SCP(host, port, user, password) if os.path.isdir(local_file): print('not support now!') else: scp_obj.download(local_file, ftp_path, mode) scp_obj.close() def PowerSuppy_Init(interface,address): ''' sn should be string, eg: Init_NRP('103123'), '103123' is from NRP_Z21 power viewer Z11 productID: 0x000c, Z21 productID: 0x0003 ''' visaDLL = 'c:/windows/system32/visa64.dll' resourceManager = pyvisa.ResourceManager(visaDLL) if interface == "GPIB": print("PowerSupply remote mode is:"+ interface+", GPIB Address is:"+address) try: PS_handler = resourceManager.open_resource("GPIB0::%s::INSTR"%(address)) return PS_handler except Exception: return False elif interface == "TCPIP": print("PowerSupply remote mode is:"+ interface+", IP Address is:"+address) try: PS_handler = resourceManager.open_resource("TCPIP0::%s::INSTR"%(address)) return PS_handler except Exception: return False else: print("Not Supply this remote mode now") return False def Set_Power_mode(handler,mode): if mode == "ON": handler.write("OUTPUT:STAT ON") else: handler.write("OUTPUT:STAT OFF") def check_ip_pingable(ip_address): status, result = sp.getstatusoutput("ping " + ip_address + " -w 2000") print(status, "result="+result) if ("timed out" in result) or ("fail" in result): return False return True def waitfor_pingable(ip_address,try_times): times = 1 while True: res = check_ip_pingable(ip_address) if not res: print("ping %s Failed %s times"%(ip_address,times)) if times > try_times: break else: print("ping %s Successfully!"%ip_address) break times+=1 if times > try_times: return False else: return True def Init_Breamer_shell(): #Init beamer port #Input: config_file #Output: beamer handler or false if one is not avaliable beamer_ip = "192.168.101.1" beamer_port = 22 beamer_username = "toor4nsn" beamer_pwd = "oZPS0POrRieRtu" beamer_c = _SSH_Shell() for try_time in range(1,10): beamer_handler = beamer_c.open(beamer_ip, beamer_port, beamer_username,beamer_pwd) print("init beamer:%s for %s times, result is %s"%(beamer_ip,try_time,beamer_handler)) if beamer_handler == False: if try_time == 9: break else: pass else: break time.sleep(1) #beamer_handler.write("su root") return beamer_handler def check_beamer_processer(beamer_handler): re = beamer_handler.command("ps")[0] print(re.find("libtestabilitytcp.so.1.0")) if re.find("libtestabilitytcp.so.1.0")>0: return True return False dialog = Dialog(wx.App) def Bring_up(): dialog.Infoalert("Make sure the RRU is power on", "info") sum_run_count = 0 fail_count = 0 while True: #self.dialog.Infoalert("We will bring up beamer from flash,Please make sure the WE is UNCONNECTED!!!", "Warning") # powersuppy_handler = PowerSuppy_Init(interface,address) # if not powersuppy_handler: # dialog.erroralert("The Power supply init failed! Please check", "ERROR") # return False # Set_Power_mode(powersuppy_handler,"OFF") # time.sleep(5) # Set_Power_msode(powersuppy_handler,"ON") beamer_handler = Init_Breamer_shell() if not beamer_handler: dialog.erroralert("Beamer handler init failed for 10 times", "ERROR") return False beamer_handler.write("/usr/bin/rfsw-ncfg-reboot") sum_run_count+=1 print("waiting for 60s...") time.sleep(120) beammer_ip = "192.168.101.1" T = waitfor_pingable(beammer_ip,1000) if not T: fail_count+=1 print("reboot for %s times, beamer run failed %s times"%(sum_run_count,fail_count)) continue print("Beamer Start up normally Now!") beamer_handler = Init_Breamer_shell() if not beamer_handler: dialog.erroralert("Beamer handler init failed for 10 times", "ERROR") return False retry_beamer = 0 for retry_beamer in range(20): res = check_beamer_processer(beamer_handler) if res: break else: if retry_beamer == 19: print("beamer can't start up correctly,will power down and up again") beamer_handler.write("/usr/bin/ccsShell.sh log -c full") time.sleep(10) t = time.strftime('%Y%m%d%H%M%S') folder = "./runtimelog" if not os.path.exists(folder): os.mkdir(folder) download_from_ftp_to_local_SCP(beammer_ip,22,"/ram/1011_runtime.zip", folder+"/"+t+"_runtime.zip", mode = 'bin', user = 'toor4nsn', password = 'oZPS0POrRieRtu') download_from_ftp_to_local_SCP(beammer_ip,22,"/ram/1011_startup.zip", folder+"/"+t+"_runtime.zip", mode = 'bin', user = 'toor4nsn', password = 'oZPS0POrRieRtu') fail_count+=1 break time.sleep(2) print("reboot for %s times, beamer run failed %s times"%(sum_run_count,fail_count)) def runtimelog(): beamer_handler = Init_Breamer_shell() beamer_handler.write("/usr/bin/ccsShell.sh log -c full") time.sleep(10) t = time.strftime('%Y%m%d%H%M%S') folder = "./runtimelog" if not os.path.exists(folder): os.mkdir(folder) download_from_ftp_to_local_SCP(beammer_ip, 22, "/ram/1011_runtime.zip", folder + "/" + t + "_runtime.zip", mode='bin', user='toor4nsn', password='oZPS0POrRieRtu') download_from_ftp_to_local_SCP(beammer_ip, 22, "/ram/1011_startup.zip", folder + "/" + t + "_runtime.zip", mode='bin', user='toor4nsn', password='oZPS0POrRieRtu') # runtimelog()
if size - pos <= buff_size:
random_line_split
AutoCapture_runtimelog.py
# import wx,time,pyvisa,os import time import subprocess as sp from paramiko import * import locale import os import re from socket import timeout as SocketTimeout # this is quote from the shlex module, added in py3.3 _find_unsafe = re.compile(br'[^\w@%+=:,./~-]').search def _sh_quote(s): """Return a shell-escaped version of the string `s`.""" if not s: return b"" if _find_unsafe(s) is None: return s # use single quotes, and put single quotes into double quotes # the string $'b is then quoted as '$'"'"'b' return b"'" + s.replace(b"'", b"'\"'\"'") + b"'" # Unicode conversion functions; assume UTF-8 def asbytes(s): """Turns unicode into bytes, if needed. Assumes UTF-8. """ if isinstance(s, bytes): return s else: return s.encode('utf-8') def asunicode(s): """Turns bytes into unicode, if needed. Uses UTF-8. """ if isinstance(s, bytes): return s.decode('utf-8', 'replace') else: return s # os.path.sep is unicode on Python 3, no matter the platform bytes_sep = asbytes(os.path.sep) # Unicode conversion function for Windows # Used to convert local paths if the local machine is Windows def asunicode_win(s): """Turns bytes into unicode, if needed. """ if isinstance(s, bytes): return s.decode(locale.getpreferredencoding()) else: return s class SCPClient(object): """ An scp1 implementation, compatible with openssh scp. Raises SCPException for all transport related errors. Local filesystem and OS errors pass through. Main public methods are .put and .get The get method is controlled by the remote scp instance, and behaves accordingly. This means that symlinks are resolved, and the transfer is halted after too many levels of symlinks are detected. The put method uses os.walk for recursion, and sends files accordingly. Since scp doesn't support symlinks, we send file symlinks as the file (matching scp behaviour), but we make no attempt at symlinked directories. """ def __init__(self, transport, buff_size=16384, socket_timeout=5.0, progress=None, sanitize=_sh_quote): """ Create an scp1 client. @param transport: an existing paramiko L{Transport} @type transport: L{Transport} @param buff_size: size of the scp send buffer. @type buff_size: int @param socket_timeout: channel socket timeout in seconds @type socket_timeout: float @param progress: callback - called with (filename, size, sent) during transfers @param sanitize: function - called with filename, should return safe or escaped string. Uses _sh_quote by default. @type progress: function(string, int, int) """ self.transport = transport self.buff_size = buff_size self.socket_timeout = socket_timeout self.channel = None self.preserve_times = False self._progress = progress self._recv_dir = b'' self._rename = False self._utime = None self.sanitize = sanitize self._dirtimes = {} def __enter__(self): self.channel = self._open() return self def __exit__(self, type, value, traceback): self.close() def put(self, files, remote_path=b'.', recursive=False, preserve_times=False): """ Transfer files to remote host. @param files: A single path, or a list of paths to be transfered. recursive must be True to transfer directories. @type files: string OR list of strings @param remote_path: path in which to receive the files on the remote host. defaults to '.' @type remote_path: str @param recursive: transfer files and directories recursively @type recursive: bool @param preserve_times: preserve mtime and atime of transfered files and directories. @type preserve_times: bool """ self.preserve_times = preserve_times self.channel = self._open() self._pushed = 0 self.channel.settimeout(self.socket_timeout) scp_command = (b'scp -t ', b'scp -r -t ')[recursive] self.channel.exec_command(scp_command + self.sanitize(asbytes(remote_path))) self._recv_confirm() if not isinstance(files, (list, tuple)): files = [files] if recursive: self._send_recursive(files) else: self._send_files(files) self.close() def get(self, remote_path, local_path='', recursive=False, preserve_times=False): """ Transfer files from remote host to localhost @param remote_path: path to retreive from remote host. since this is evaluated by scp on the remote host, shell wildcards and environment variables may be used. @type remote_path: str @param local_path: path in which to receive files locally @type local_path: str @param recursive: transfer files and directories recursively @type recursive: bool @param preserve_times: preserve mtime and atime of transfered files and directories. @type preserve_times: bool """ if not isinstance(remote_path, (list, tuple)): remote_path = [remote_path] remote_path = [self.sanitize(asbytes(r)) for r in remote_path] self._recv_dir = local_path or os.getcwd() self._rename = (len(remote_path) == 1 and not os.path.isdir(os.path.abspath(local_path))) if len(remote_path) > 1: if not os.path.exists(self._recv_dir): raise SCPException("Local path '%s' does not exist" % asunicode(self._recv_dir)) elif not os.path.isdir(self._recv_dir): raise SCPException("Local path '%s' is not a directory" % asunicode(self._recv_dir)) rcsv = (b'', b' -r')[recursive] prsv = (b'', b' -p')[preserve_times] self.channel = self._open() self._pushed = 0 self.channel.settimeout(self.socket_timeout) self.channel.exec_command(b"scp" + rcsv + prsv + b" -f " + b' '.join(remote_path)) self._recv_all() self.close() def _open(self): """open a scp channel""" if self.channel is None: self.channel = self.transport.open_session() return self.channel def close(self): """close scp channel""" if self.channel is not None: self.channel.close() self.channel = None def _read_stats(self, name): """return just the file stats needed for scp""" if os.name == 'nt': name = asunicode(name) stats = os.stat(name) mode = oct(stats.st_mode)[-4:] size = stats.st_size atime = int(stats.st_atime) mtime = int(stats.st_mtime) return (mode, size, mtime, atime) def _send_files(self, files): for name in files: basename = asbytes(os.path.basename(name)) (mode, size, mtime, atime) = self._read_stats(name) if self.preserve_times: self._send_time(mtime, atime) file_hdl = open(name, 'rb') # The protocol can't handle \n in the filename. # Quote them as the control sequence \^J for now, # which is how openssh handles it. self.channel.sendall(("C%s %d " % (mode, size)).encode('ascii') + basename.replace(b'\n', b'\\^J') + b"\n") self._recv_confirm() file_pos = 0 if self._progress: if size == 0: # avoid divide-by-zero self._progress(basename, 1, 1) else: self._progress(basename, size, 0) buff_size = self.buff_size chan = self.channel while file_pos < size: chan.sendall(file_hdl.read(buff_size)) file_pos = file_hdl.tell() if self._progress: self._progress(basename, size, file_pos) chan.sendall('\x00') file_hdl.close() self._recv_confirm() def _chdir(self, from_dir, to_dir): # Pop until we're one level up from our next push. # Push *once* into to_dir. # This is dependent on the depth-first traversal from os.walk # add path.sep to each when checking the prefix, so we can use # path.dirname after common = os.path.commonprefix([from_dir + bytes_sep, to_dir + bytes_sep]) # now take the dirname, since commonprefix is character based, # and we either have a seperator, or a partial name common = os.path.dirname(common) cur_dir = from_dir.rstrip(bytes_sep) while cur_dir != common: cur_dir = os.path.split(cur_dir)[0] self._send_popd() # now we're in our common base directory, so on self._send_pushd(to_dir) def _send_recursive(self, files): for base in files: if not os.path.isdir(base): # filename mixed into the bunch self._send_files([base]) continue last_dir = asbytes(base) for root, dirs, fls in os.walk(base): self._chdir(last_dir, asbytes(root)) self._send_files([os.path.join(root, f) for f in fls]) last_dir = asbytes(root) # back out of the directory while self._pushed > 0: self._send_popd() def _send_pushd(self, directory): (mode, size, mtime, atime) = self._read_stats(directory) basename = asbytes(os.path.basename(directory)) if self.preserve_times: self._send_time(mtime, atime) self.channel.sendall(('D%s 0 ' % mode).encode('ascii') + basename.replace(b'\n', b'\\^J') + b'\n') self._recv_confirm() self._pushed += 1 def _send_popd(self): self.channel.sendall('E\n') self._recv_confirm() self._pushed -= 1 def _send_time(self, mtime, atime): self.channel.sendall(('T%d 0 %d 0\n' % (mtime, atime)).encode('ascii')) self._recv_confirm() def _recv_confirm(self): # read scp response msg = b'' try: msg = self.channel.recv(512) except SocketTimeout: raise SCPException('Timout waiting for scp response') # slice off the first byte, so this compare will work in py2 and py3 if msg and msg[0:1] == b'\x00': return elif msg and msg[0:1] == b'\x01': raise SCPException(asunicode(msg[1:])) elif self.channel.recv_stderr_ready(): msg = self.channel.recv_stderr(512) raise SCPException(asunicode(msg)) elif not msg: raise SCPException('No response from server') else: raise SCPException('Invalid response from server', msg) def _recv_all(self): # loop over scp commands, and receive as necessary command = {b'C': self._recv_file, b'T': self._set_time, b'D': self._recv_pushd, b'E': self._recv_popd} while not self.channel.closed: # wait for command as long as we're open self.channel.sendall('\x00') msg = self.channel.recv(1024) if not msg: # chan closed while recving break assert msg[-1:] == b'\n' msg = msg[:-1] code = msg[0:1] try: command[code](msg[1:]) except KeyError: raise SCPException(asunicode(msg[1:])) # directory times can't be set until we're done writing files self._set_dirtimes() def _set_time(self, cmd): try: times = cmd.split(b' ') mtime = int(times[0]) atime = int(times[2]) or mtime except: self.channel.send(b'\x01') raise SCPException('Bad time format') # save for later self._utime = (atime, mtime) def _recv_file(self, cmd): chan = self.channel parts = cmd.strip().split(b' ', 2) try: mode = int(parts[0], 8) size = int(parts[1]) if self._rename: path = self._recv_dir self._rename = False elif os.name == 'nt': path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8')) else: path = os.path.join(asbytes(self._recv_dir), parts[2]) except: chan.send('\x01') chan.close() raise SCPException('Bad file format') try: file_hdl = open(path, 'wb') except IOError as e: chan.send(b'\x01' + str(e).encode('utf-8')) chan.close() raise if self._progress: if size == 0: # avoid divide-by-zero self._progress(path, 1, 1) else: self._progress(path, size, 0) buff_size = self.buff_size pos = 0 chan.send(b'\x00') try: while pos < size: # we have to make sure we don't read the final byte if size - pos <= buff_size: buff_size = size - pos file_hdl.write(chan.recv(buff_size)) pos = file_hdl.tell() if self._progress: self._progress(path, size, pos) msg = chan.recv(512) if msg and msg[0:1] != b'\x00': raise SCPException(asunicode(msg[1:])) except SocketTimeout: chan.close() raise SCPException('Error receiving, socket.timeout') file_hdl.truncate() try: os.utime(path, self._utime) self._utime = None os.chmod(path, mode) # should we notify the other end? finally: file_hdl.close() # '\x00' confirmation sent in _recv_all def _recv_pushd(self, cmd): parts = cmd.split(b' ', 2) try: mode = int(parts[0], 8) if self._rename: path = self._recv_dir self._rename = False elif os.name == 'nt': path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8')) else: path = os.path.join(asbytes(self._recv_dir), parts[2]) except: self.channel.send(b'\x01') raise SCPException('Bad directory format') try: if not os.path.exists(path): os.mkdir(path, mode) elif os.path.isdir(path): os.chmod(path, mode) else: raise SCPException('%s: Not a directory' % path) self._dirtimes[path] = (self._utime) self._utime = None self._recv_dir = path except (OSError, SCPException) as e: self.channel.send(b'\x01' + asbytes(str(e))) raise def _recv_popd(self, *cmd): self._recv_dir = os.path.split(self._recv_dir)[0] def _set_dirtimes(self): try: for d in self._dirtimes: os.utime(d, self._dirtimes[d]) finally: self._dirtimes = {} class SCPException(Exception): """SCP exception class""" pass class SSHClient_noauth(SSHClient): def _auth(self, username, *args): self._transport.auth_none(username) return class SCP(object): def __init__(self, host, port, username='root', password='root'): self.host = host self.port = port self.username = username self.password = password def createSSHClient(self,server,port,user,password): client = SSHClient() client.set_missing_host_key_policy(AutoAddPolicy()) client.connect(server, port, user, password) return client def createSSHClient_No_password(self,server,port,user,password = None): t = SSHClient_noauth() t.set_missing_host_key_policy(AutoAddPolicy()) t.connect(server,port,user,password) return t def connect(self): try: self.ssh= self.createSSHClient(self.host, self.port, self.username, self.password) self.scp = SCPClient(self.ssh.get_transport()) print('success to conenct SCP Client') except: self.ssh = False return self.ssh def upload(self, local_path, remote_path, mode='bin'): mode = mode and mode.lower() or 'bin' if(mode not in ('bin', 'text')): raise RuntimeError("Invalid Ftp mode (%s)" % mode) file_mode = (mode == 'bin') and 'rb' or 'r'#file_mode = 'rb',rb represent readonly file = open(local_path, file_mode) try: if(mode == 'bin'): ret = self.scp.put(local_path,remote_path) #store a file in binary mode print('success to upload %s to remote_path %s'%(local_path,remote_path)) else: print('error') #store a file in line mode return True finally: file.close() return False def download(self, local_path, remote_path, mode='bin'): mode = mode and mode.lower() or 'bin' if(mode not in ('bin', 'text')): raise RuntimeError("Invalid Ftp mode (%s)" % mode) file_mode = (mode == 'bin') and 'w+b' or 'w' file = open(local_path, file_mode) try: if(mode == 'bin'): ret = self.scp.get(remote_path,local_path) print('success to download remote file to local') else: print('error') # self.log.debug("get %s" % ftp_path) # self.log.trace(ret) file.close() return True except Exception as e: file.close() os.remove(local_path) raise e return False def close(self): self.ssh.close() class _SSH_Shell(): def __init__(self): None def open(self,hostname,port,username = 'root',password = None): if password == "None": password = None if password == None: self.ssh = SSHClient_noauth() else: self.ssh = SSHClient() self.ssh.set_missing_host_key_policy(AutoAddPolicy()) try: self.ssh.connect(hostname,port,username,password) return self except Exception: return False def write(self,command): stdin,stdout,stderr = self.ssh.exec_command(command) print("send command => %s"%command)#logging.info return stdin,stdout,stderr def write_long(self,command): stdin,stdout,stderr = self.ssh.exec_command(command) print("send command => %s"%command) result = "" for line1 in stderr.readlines(): print("stdout=>"+str(line1)) result = result+line1 for line1 in stdout.readlines(): print("stdout=>"+str(line1)) result = result+line1 return result def
(self,std): cmd_result = std[1].read().decode(encoding='utf-8'),std[2].read().decode(encoding='utf-8') print(time.strftime('%Y%m%d%H%M%S') + '=>'+cmd_result[0]) return cmd_result def command(self,command): std = self.write(command) time.sleep(0.5) res = self.read(std) return res def close(self): self.ssh.close() class Dialog(wx.App): def __init__(self,window): wx.App.__init__(self) self.window = window def erroralert(self,msg,title): dlg = wx.MessageDialog(None,msg,title,wx.OK|wx.CANCEL|wx.ICON_ERROR) result = dlg.ShowModal() if result == wx.CANCEL: dlg.Destroy() else: dlg.Destroy() return result def Infoalert(self,msg,title): dlg = wx.MessageDialog(None,msg,title,wx.OK|wx.CANCEL|wx.ICON_INFORMATION) result = dlg.ShowModal() if result == wx.CANCEL: dlg.Destroy() else: dlg.Destroy() return result def download_from_ftp_to_local_SCP(host,port,ftp_path, local_file, mode = 'bin', user = 'root', password = 'root'): """ This keyword download a file from Ftp server to local of test case running. example usage: | Download From Ftp To Local | ftp://10.56.117.112/etc/ipsec_configuration.xml | c:${/}ipsec_configuration.xml | BIN | | Return value | the output of command | """ try: scp_obj = SCP(host, port, user, password) scp_obj.connect() except Exception: scp_obj = SCP(host, port, user, password) if os.path.isdir(local_file): print('not support now!') else: scp_obj.download(local_file, ftp_path, mode) scp_obj.close() def PowerSuppy_Init(interface,address): ''' sn should be string, eg: Init_NRP('103123'), '103123' is from NRP_Z21 power viewer Z11 productID: 0x000c, Z21 productID: 0x0003 ''' visaDLL = 'c:/windows/system32/visa64.dll' resourceManager = pyvisa.ResourceManager(visaDLL) if interface == "GPIB": print("PowerSupply remote mode is:"+ interface+", GPIB Address is:"+address) try: PS_handler = resourceManager.open_resource("GPIB0::%s::INSTR"%(address)) return PS_handler except Exception: return False elif interface == "TCPIP": print("PowerSupply remote mode is:"+ interface+", IP Address is:"+address) try: PS_handler = resourceManager.open_resource("TCPIP0::%s::INSTR"%(address)) return PS_handler except Exception: return False else: print("Not Supply this remote mode now") return False def Set_Power_mode(handler,mode): if mode == "ON": handler.write("OUTPUT:STAT ON") else: handler.write("OUTPUT:STAT OFF") def check_ip_pingable(ip_address): status, result = sp.getstatusoutput("ping " + ip_address + " -w 2000") print(status, "result="+result) if ("timed out" in result) or ("fail" in result): return False return True def waitfor_pingable(ip_address,try_times): times = 1 while True: res = check_ip_pingable(ip_address) if not res: print("ping %s Failed %s times"%(ip_address,times)) if times > try_times: break else: print("ping %s Successfully!"%ip_address) break times+=1 if times > try_times: return False else: return True def Init_Breamer_shell(): #Init beamer port #Input: config_file #Output: beamer handler or false if one is not avaliable beamer_ip = "192.168.101.1" beamer_port = 22 beamer_username = "toor4nsn" beamer_pwd = "oZPS0POrRieRtu" beamer_c = _SSH_Shell() for try_time in range(1,10): beamer_handler = beamer_c.open(beamer_ip, beamer_port, beamer_username,beamer_pwd) print("init beamer:%s for %s times, result is %s"%(beamer_ip,try_time,beamer_handler)) if beamer_handler == False: if try_time == 9: break else: pass else: break time.sleep(1) #beamer_handler.write("su root") return beamer_handler def check_beamer_processer(beamer_handler): re = beamer_handler.command("ps")[0] print(re.find("libtestabilitytcp.so.1.0")) if re.find("libtestabilitytcp.so.1.0")>0: return True return False dialog = Dialog(wx.App) def Bring_up(): dialog.Infoalert("Make sure the RRU is power on", "info") sum_run_count = 0 fail_count = 0 while True: #self.dialog.Infoalert("We will bring up beamer from flash,Please make sure the WE is UNCONNECTED!!!", "Warning") # powersuppy_handler = PowerSuppy_Init(interface,address) # if not powersuppy_handler: # dialog.erroralert("The Power supply init failed! Please check", "ERROR") # return False # Set_Power_mode(powersuppy_handler,"OFF") # time.sleep(5) # Set_Power_msode(powersuppy_handler,"ON") beamer_handler = Init_Breamer_shell() if not beamer_handler: dialog.erroralert("Beamer handler init failed for 10 times", "ERROR") return False beamer_handler.write("/usr/bin/rfsw-ncfg-reboot") sum_run_count+=1 print("waiting for 60s...") time.sleep(120) beammer_ip = "192.168.101.1" T = waitfor_pingable(beammer_ip,1000) if not T: fail_count+=1 print("reboot for %s times, beamer run failed %s times"%(sum_run_count,fail_count)) continue print("Beamer Start up normally Now!") beamer_handler = Init_Breamer_shell() if not beamer_handler: dialog.erroralert("Beamer handler init failed for 10 times", "ERROR") return False retry_beamer = 0 for retry_beamer in range(20): res = check_beamer_processer(beamer_handler) if res: break else: if retry_beamer == 19: print("beamer can't start up correctly,will power down and up again") beamer_handler.write("/usr/bin/ccsShell.sh log -c full") time.sleep(10) t = time.strftime('%Y%m%d%H%M%S') folder = "./runtimelog" if not os.path.exists(folder): os.mkdir(folder) download_from_ftp_to_local_SCP(beammer_ip,22,"/ram/1011_runtime.zip", folder+"/"+t+"_runtime.zip", mode = 'bin', user = 'toor4nsn', password = 'oZPS0POrRieRtu') download_from_ftp_to_local_SCP(beammer_ip,22,"/ram/1011_startup.zip", folder+"/"+t+"_runtime.zip", mode = 'bin', user = 'toor4nsn', password = 'oZPS0POrRieRtu') fail_count+=1 break time.sleep(2) print("reboot for %s times, beamer run failed %s times"%(sum_run_count,fail_count)) def runtimelog(): beamer_handler = Init_Breamer_shell() beamer_handler.write("/usr/bin/ccsShell.sh log -c full") time.sleep(10) t = time.strftime('%Y%m%d%H%M%S') folder = "./runtimelog" if not os.path.exists(folder): os.mkdir(folder) download_from_ftp_to_local_SCP(beammer_ip, 22, "/ram/1011_runtime.zip", folder + "/" + t + "_runtime.zip", mode='bin', user='toor4nsn', password='oZPS0POrRieRtu') download_from_ftp_to_local_SCP(beammer_ip, 22, "/ram/1011_startup.zip", folder + "/" + t + "_runtime.zip", mode='bin', user='toor4nsn', password='oZPS0POrRieRtu') # runtimelog()
read
identifier_name
structure.rs
mod iter; use super::{ drop_items::DropItem, dyn_iter::{DynIter, DynIterMut}, items::ItemType, underground_belt::UnderDirection, water_well::FluidBox, FactorishState, Inventory, InventoryTrait, Recipe, }; use rotate_enum::RotateEnum; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; #[macro_export] macro_rules! serialize_impl { () => { fn serialize(&self) -> serde_json::Result<serde_json::Value> { serde_json::to_value(self) } }; } #[macro_export] macro_rules! draw_fuel_alarm { ($self_:expr, $state:expr, $context:expr) => { if $self_.recipe.is_some() && $self_.power == 0. && $state.sim_time % 1. < 0.5 { if let Some(img) = $state.image_fuel_alarm.as_ref() { let (x, y) = ( $self_.position.x as f64 * 32., $self_.position.y as f64 * 32., ); $context.draw_image_with_image_bitmap(&img.bitmap, x, y)?; } else { return js_err!("fuel alarm image not available"); } } }; } #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)] pub(crate) struct StructureId { pub id: u32, pub gen: u32, } pub(crate) struct StructureEntryIterator<'a>(&'a mut [StructureEntry], &'a mut [StructureEntry]); impl<'a> DynIter for StructureEntryIterator<'a> { type Item = StructureEntry; fn dyn_iter(&self) -> Box<dyn Iterator<Item = &Self::Item> + '_> { Box::new(self.0.iter().chain(self.1.iter())) } fn as_dyn_iter(&self) -> &dyn DynIter<Item = Self::Item> { self } } impl<'a> DynIterMut for StructureEntryIterator<'a> { fn dyn_iter_mut(&mut self) -> Box<dyn Iterator<Item = &mut Self::Item> + '_> { Box::new(self.0.iter_mut().chain(self.1.iter_mut())) } } pub(crate) use self::iter::StructureDynIter; #[derive(Eq, PartialEq, Hash, Copy, Clone, Debug, Serialize, Deserialize)] pub(crate) struct Position { pub x: i32, pub y: i32,
impl Position { pub fn new(x: i32, y: i32) -> Self { Self { x, y } } pub(crate) fn div_mod(&self, size: i32) -> (Position, Position) { let div = Position::new(self.x.div_euclid(size), self.y.div_euclid(size)); let mod_ = Position::new(self.x.rem_euclid(size), self.y.rem_euclid(size)); (div, mod_) } pub(crate) fn add(&self, o: (i32, i32)) -> Position { Self { x: self.x + o.0, y: self.y + o.1, } } pub(crate) fn distance(&self, position: &Position) -> i32 { (position.x - self.x).abs().max((position.y - self.y).abs()) } /// Check whether the positions are neighbors. Return false if they are exactly the same. #[allow(dead_code)] pub(crate) fn is_neighbor(&self, pos2: &Position) -> bool { [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().any(|rel_pos| { let pos = Position { x: pos2.x + rel_pos[0], y: pos2.y + rel_pos[1], }; *self == pos }) } pub(crate) fn neighbor_index(&self, pos2: &Position) -> Option<u32> { for (i, rel_pos) in [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().enumerate() { let pos = Position { x: pos2.x + rel_pos[0], y: pos2.y + rel_pos[1], }; if *self == pos { return Some(i as u32); } } None } } impl From<&[i32; 2]> for Position { fn from(xy: &[i32; 2]) -> Self { Self { x: xy[0], y: xy[1] } } } pub(crate) struct Size { pub width: i32, pub height: i32, } pub(crate) struct BoundingBox { pub x0: i32, pub y0: i32, pub x1: i32, pub y1: i32, } #[derive(Copy, Clone, Serialize, Deserialize, RotateEnum, PartialEq)] pub(crate) enum Rotation { Left, Top, Right, Bottom, } impl Rotation { pub fn delta(&self) -> (i32, i32) { match self { Rotation::Left => (-1, 0), Rotation::Top => (0, -1), Rotation::Right => (1, 0), Rotation::Bottom => (0, 1), } } pub fn delta_inv(&self) -> (i32, i32) { let delta = self.delta(); (-delta.0, -delta.1) } pub fn angle_deg(&self) -> i32 { self.angle_4() * 90 } pub fn angle_4(&self) -> i32 { match self { Rotation::Left => 2, Rotation::Top => 3, Rotation::Right => 0, Rotation::Bottom => 1, } } pub fn angle_rad(&self) -> f64 { self.angle_deg() as f64 * std::f64::consts::PI / 180. } pub fn is_horizontal(&self) -> bool { matches!(self, Rotation::Left | Rotation::Right) } pub fn is_vertcial(&self) -> bool { !self.is_horizontal() } } pub(crate) enum FrameProcResult { None, InventoryChanged(Position), } pub(crate) enum ItemResponse { Move(i32, i32), Consume, } pub(crate) type ItemResponseResult = (ItemResponse, Option<FrameProcResult>); #[derive(Debug)] pub(crate) enum RotateErr { NotFound, NotSupported, Other(JsValue), } pub(crate) trait Structure { fn name(&self) -> &str; fn position(&self) -> &Position; fn rotation(&self) -> Option<Rotation> { None } /// Specialized method to get underground belt direction. /// We don't like to put this to Structure trait method, but we don't have an option /// as long as we use trait object polymorphism. /// TODO: Revise needed in ECS. fn under_direction(&self) -> Option<UnderDirection> { None } fn size(&self) -> Size { Size { width: 1, height: 1, } } fn bounding_box(&self) -> BoundingBox { let (position, size) = (self.position(), self.size()); BoundingBox { x0: position.x, y0: position.y, x1: position.x + size.width, y1: position.y + size.height, } } fn contains(&self, pos: &Position) -> bool { let bb = self.bounding_box(); bb.x0 <= pos.x && pos.x < bb.x1 && bb.y0 <= pos.y && pos.y < bb.y1 } fn draw( &self, state: &FactorishState, context: &CanvasRenderingContext2d, depth: i32, is_tooptip: bool, ) -> Result<(), JsValue>; fn desc(&self, _state: &FactorishState) -> String { String::from("") } fn frame_proc( &mut self, _me: StructureId, _state: &mut FactorishState, _structures: &mut StructureDynIter, ) -> Result<FrameProcResult, ()> { Ok(FrameProcResult::None) } /// event handler for costruction events around the structure. fn on_construction( &mut self, _other_id: StructureId, _other: &dyn Structure, _others: &StructureDynIter, _construct: bool, ) -> Result<(), JsValue> { Ok(()) } /// event handler for costruction events for this structure itself. fn on_construction_self( &mut self, _id: StructureId, _others: &StructureDynIter, _construct: bool, ) -> Result<(), JsValue> { Ok(()) } fn movable(&self) -> bool { false } fn rotate( &mut self, _state: &mut FactorishState, _others: &StructureDynIter, ) -> Result<(), RotateErr> { Err(RotateErr::NotSupported) } fn set_rotation(&mut self, _rotation: &Rotation) -> Result<(), ()> { Err(()) } /// Called every frame for each item that is on this structure. fn item_response(&mut self, _item: &DropItem) -> Result<ItemResponseResult, ()> { Err(()) } fn input(&mut self, _o: &DropItem) -> Result<(), JsValue> { Err(JsValue::from_str("Not supported")) } /// Returns wheter the structure can accept an item as the input. If this structure is a factory /// that returns recipes by get_selected_recipe(), it will check if it's in the inputs. fn can_input(&self, item_type: &ItemType) -> bool { if let Some(recipe) = self.get_selected_recipe() { recipe.input.get(item_type).is_some() } else { false } } /// Query a set of items that this structure can output. Actual output would not happen until `output()`, thus /// this method is immutable. It should return empty Inventory if it cannot output anything. fn can_output(&self, _structures: &StructureDynIter) -> Inventory { Inventory::new() } /// Perform actual output. The operation should always succeed since the output-tability is checked beforehand /// with `can_output`. fn output(&mut self, _state: &mut FactorishState, _item_type: &ItemType) -> Result<(), ()> { Err(()) } fn burner_inventory(&self) -> Option<&Inventory> { None } fn add_burner_inventory(&mut self, _item_type: &ItemType, _amount: isize) -> isize { 0 } fn burner_energy(&self) -> Option<(f64, f64)> { None } fn inventory(&self, _is_input: bool) -> Option<&Inventory> { None } fn inventory_mut(&mut self, _is_input: bool) -> Option<&mut Inventory> { None } /// Some structures don't have an inventory, but still can have some item, e.g. inserter hands. /// We need to retrieve them when we destory such a structure, or we might lose items into void. /// It will take away the inventory by default, destroying the instance's inventory. fn destroy_inventory(&mut self) -> Inventory { let mut ret = self .inventory_mut(true) .map_or(Inventory::new(), |inventory| std::mem::take(inventory)); ret.merge( self.inventory_mut(false) .map_or(Inventory::new(), |inventory| std::mem::take(inventory)), ); ret } /// Returns a list of recipes. The return value is wrapped in a Cow because some /// structures can return dynamically configured list of recipes, while some others /// have static fixed list of recipes. In reality, all our structures return a fixed list though. fn get_recipes(&self) -> Cow<[Recipe]> { Cow::from(&[][..]) } fn select_recipe(&mut self, _index: usize) -> Result<bool, JsValue> { Err(JsValue::from_str("recipes not available")) } fn get_selected_recipe(&self) -> Option<&Recipe> { None } fn fluid_box(&self) -> Option<Vec<&FluidBox>> { None } fn fluid_box_mut(&mut self) -> Option<Vec<&mut FluidBox>> { None } fn connection( &self, state: &FactorishState, structures: &dyn DynIter<Item = StructureEntry>, ) -> [bool; 4] { // let mut structures_copy = structures.clone(); let has_fluid_box = |x, y| { if x < 0 || state.width <= x as u32 || y < 0 || state.height <= y as u32 { return false; } if let Some(structure) = structures .dyn_iter() .filter_map(|s| s.dynamic.as_deref()) .find(|s| *s.position() == Position { x, y }) { return structure.fluid_box().is_some(); } false }; // Fluid containers connect to other containers let Position { x, y } = *self.position(); let l = has_fluid_box(x - 1, y); let t = has_fluid_box(x, y - 1); let r = has_fluid_box(x + 1, y); let b = has_fluid_box(x, y + 1); [l, t, r, b] } /// If this structure can connect to power grid. fn power_source(&self) -> bool { false } /// If this structure drains power from the grid fn power_sink(&self) -> bool { false } /// Try to drain power from this structure. /// @param demand in kilojoules. /// @returns None if it does not support power supply. fn power_outlet(&mut self, _demand: f64) -> Option<f64> { None } fn wire_reach(&self) -> u32 { 3 } fn serialize(&self) -> serde_json::Result<serde_json::Value>; } pub(crate) type StructureBoxed = Box<dyn Structure>; pub(crate) struct StructureEntry { pub gen: u32, pub dynamic: Option<StructureBoxed>, }
}
random_line_split
structure.rs
mod iter; use super::{ drop_items::DropItem, dyn_iter::{DynIter, DynIterMut}, items::ItemType, underground_belt::UnderDirection, water_well::FluidBox, FactorishState, Inventory, InventoryTrait, Recipe, }; use rotate_enum::RotateEnum; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; #[macro_export] macro_rules! serialize_impl { () => { fn serialize(&self) -> serde_json::Result<serde_json::Value> { serde_json::to_value(self) } }; } #[macro_export] macro_rules! draw_fuel_alarm { ($self_:expr, $state:expr, $context:expr) => { if $self_.recipe.is_some() && $self_.power == 0. && $state.sim_time % 1. < 0.5 { if let Some(img) = $state.image_fuel_alarm.as_ref() { let (x, y) = ( $self_.position.x as f64 * 32., $self_.position.y as f64 * 32., ); $context.draw_image_with_image_bitmap(&img.bitmap, x, y)?; } else { return js_err!("fuel alarm image not available"); } } }; } #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)] pub(crate) struct StructureId { pub id: u32, pub gen: u32, } pub(crate) struct StructureEntryIterator<'a>(&'a mut [StructureEntry], &'a mut [StructureEntry]); impl<'a> DynIter for StructureEntryIterator<'a> { type Item = StructureEntry; fn dyn_iter(&self) -> Box<dyn Iterator<Item = &Self::Item> + '_> { Box::new(self.0.iter().chain(self.1.iter())) } fn as_dyn_iter(&self) -> &dyn DynIter<Item = Self::Item> { self } } impl<'a> DynIterMut for StructureEntryIterator<'a> { fn dyn_iter_mut(&mut self) -> Box<dyn Iterator<Item = &mut Self::Item> + '_> { Box::new(self.0.iter_mut().chain(self.1.iter_mut())) } } pub(crate) use self::iter::StructureDynIter; #[derive(Eq, PartialEq, Hash, Copy, Clone, Debug, Serialize, Deserialize)] pub(crate) struct Position { pub x: i32, pub y: i32, } impl Position { pub fn new(x: i32, y: i32) -> Self { Self { x, y } } pub(crate) fn div_mod(&self, size: i32) -> (Position, Position) { let div = Position::new(self.x.div_euclid(size), self.y.div_euclid(size)); let mod_ = Position::new(self.x.rem_euclid(size), self.y.rem_euclid(size)); (div, mod_) } pub(crate) fn add(&self, o: (i32, i32)) -> Position { Self { x: self.x + o.0, y: self.y + o.1, } } pub(crate) fn distance(&self, position: &Position) -> i32 { (position.x - self.x).abs().max((position.y - self.y).abs()) } /// Check whether the positions are neighbors. Return false if they are exactly the same. #[allow(dead_code)] pub(crate) fn is_neighbor(&self, pos2: &Position) -> bool { [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().any(|rel_pos| { let pos = Position { x: pos2.x + rel_pos[0], y: pos2.y + rel_pos[1], }; *self == pos }) } pub(crate) fn neighbor_index(&self, pos2: &Position) -> Option<u32> { for (i, rel_pos) in [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().enumerate() { let pos = Position { x: pos2.x + rel_pos[0], y: pos2.y + rel_pos[1], }; if *self == pos { return Some(i as u32); } } None } } impl From<&[i32; 2]> for Position { fn from(xy: &[i32; 2]) -> Self { Self { x: xy[0], y: xy[1] } } } pub(crate) struct Size { pub width: i32, pub height: i32, } pub(crate) struct BoundingBox { pub x0: i32, pub y0: i32, pub x1: i32, pub y1: i32, } #[derive(Copy, Clone, Serialize, Deserialize, RotateEnum, PartialEq)] pub(crate) enum Rotation { Left, Top, Right, Bottom, } impl Rotation { pub fn delta(&self) -> (i32, i32) { match self { Rotation::Left => (-1, 0), Rotation::Top => (0, -1), Rotation::Right => (1, 0), Rotation::Bottom => (0, 1), } } pub fn delta_inv(&self) -> (i32, i32) { let delta = self.delta(); (-delta.0, -delta.1) } pub fn angle_deg(&self) -> i32 { self.angle_4() * 90 } pub fn angle_4(&self) -> i32 { match self { Rotation::Left => 2, Rotation::Top => 3, Rotation::Right => 0, Rotation::Bottom => 1, } } pub fn angle_rad(&self) -> f64 { self.angle_deg() as f64 * std::f64::consts::PI / 180. } pub fn is_horizontal(&self) -> bool { matches!(self, Rotation::Left | Rotation::Right) } pub fn is_vertcial(&self) -> bool { !self.is_horizontal() } } pub(crate) enum FrameProcResult { None, InventoryChanged(Position), } pub(crate) enum ItemResponse { Move(i32, i32), Consume, } pub(crate) type ItemResponseResult = (ItemResponse, Option<FrameProcResult>); #[derive(Debug)] pub(crate) enum RotateErr { NotFound, NotSupported, Other(JsValue), } pub(crate) trait Structure { fn name(&self) -> &str; fn position(&self) -> &Position; fn rotation(&self) -> Option<Rotation> { None } /// Specialized method to get underground belt direction. /// We don't like to put this to Structure trait method, but we don't have an option /// as long as we use trait object polymorphism. /// TODO: Revise needed in ECS. fn under_direction(&self) -> Option<UnderDirection> { None } fn size(&self) -> Size { Size { width: 1, height: 1, } } fn bounding_box(&self) -> BoundingBox { let (position, size) = (self.position(), self.size()); BoundingBox { x0: position.x, y0: position.y, x1: position.x + size.width, y1: position.y + size.height, } } fn contains(&self, pos: &Position) -> bool { let bb = self.bounding_box(); bb.x0 <= pos.x && pos.x < bb.x1 && bb.y0 <= pos.y && pos.y < bb.y1 } fn draw( &self, state: &FactorishState, context: &CanvasRenderingContext2d, depth: i32, is_tooptip: bool, ) -> Result<(), JsValue>; fn desc(&self, _state: &FactorishState) -> String { String::from("") } fn frame_proc( &mut self, _me: StructureId, _state: &mut FactorishState, _structures: &mut StructureDynIter, ) -> Result<FrameProcResult, ()> { Ok(FrameProcResult::None) } /// event handler for costruction events around the structure. fn on_construction( &mut self, _other_id: StructureId, _other: &dyn Structure, _others: &StructureDynIter, _construct: bool, ) -> Result<(), JsValue> { Ok(()) } /// event handler for costruction events for this structure itself. fn on_construction_self( &mut self, _id: StructureId, _others: &StructureDynIter, _construct: bool, ) -> Result<(), JsValue> { Ok(()) } fn movable(&self) -> bool { false } fn rotate( &mut self, _state: &mut FactorishState, _others: &StructureDynIter, ) -> Result<(), RotateErr> { Err(RotateErr::NotSupported) } fn set_rotation(&mut self, _rotation: &Rotation) -> Result<(), ()> { Err(()) } /// Called every frame for each item that is on this structure. fn item_response(&mut self, _item: &DropItem) -> Result<ItemResponseResult, ()> { Err(()) } fn input(&mut self, _o: &DropItem) -> Result<(), JsValue> { Err(JsValue::from_str("Not supported")) } /// Returns wheter the structure can accept an item as the input. If this structure is a factory /// that returns recipes by get_selected_recipe(), it will check if it's in the inputs. fn can_input(&self, item_type: &ItemType) -> bool { if let Some(recipe) = self.get_selected_recipe() { recipe.input.get(item_type).is_some() } else { false } } /// Query a set of items that this structure can output. Actual output would not happen until `output()`, thus /// this method is immutable. It should return empty Inventory if it cannot output anything. fn can_output(&self, _structures: &StructureDynIter) -> Inventory { Inventory::new() } /// Perform actual output. The operation should always succeed since the output-tability is checked beforehand /// with `can_output`. fn output(&mut self, _state: &mut FactorishState, _item_type: &ItemType) -> Result<(), ()> { Err(()) } fn burner_inventory(&self) -> Option<&Inventory> { None } fn add_burner_inventory(&mut self, _item_type: &ItemType, _amount: isize) -> isize { 0 } fn burner_energy(&self) -> Option<(f64, f64)> { None } fn inventory(&self, _is_input: bool) -> Option<&Inventory> { None } fn inventory_mut(&mut self, _is_input: bool) -> Option<&mut Inventory> { None } /// Some structures don't have an inventory, but still can have some item, e.g. inserter hands. /// We need to retrieve them when we destory such a structure, or we might lose items into void. /// It will take away the inventory by default, destroying the instance's inventory. fn destroy_inventory(&mut self) -> Inventory { let mut ret = self .inventory_mut(true) .map_or(Inventory::new(), |inventory| std::mem::take(inventory)); ret.merge( self.inventory_mut(false) .map_or(Inventory::new(), |inventory| std::mem::take(inventory)), ); ret } /// Returns a list of recipes. The return value is wrapped in a Cow because some /// structures can return dynamically configured list of recipes, while some others /// have static fixed list of recipes. In reality, all our structures return a fixed list though. fn get_recipes(&self) -> Cow<[Recipe]>
fn select_recipe(&mut self, _index: usize) -> Result<bool, JsValue> { Err(JsValue::from_str("recipes not available")) } fn get_selected_recipe(&self) -> Option<&Recipe> { None } fn fluid_box(&self) -> Option<Vec<&FluidBox>> { None } fn fluid_box_mut(&mut self) -> Option<Vec<&mut FluidBox>> { None } fn connection( &self, state: &FactorishState, structures: &dyn DynIter<Item = StructureEntry>, ) -> [bool; 4] { // let mut structures_copy = structures.clone(); let has_fluid_box = |x, y| { if x < 0 || state.width <= x as u32 || y < 0 || state.height <= y as u32 { return false; } if let Some(structure) = structures .dyn_iter() .filter_map(|s| s.dynamic.as_deref()) .find(|s| *s.position() == Position { x, y }) { return structure.fluid_box().is_some(); } false }; // Fluid containers connect to other containers let Position { x, y } = *self.position(); let l = has_fluid_box(x - 1, y); let t = has_fluid_box(x, y - 1); let r = has_fluid_box(x + 1, y); let b = has_fluid_box(x, y + 1); [l, t, r, b] } /// If this structure can connect to power grid. fn power_source(&self) -> bool { false } /// If this structure drains power from the grid fn power_sink(&self) -> bool { false } /// Try to drain power from this structure. /// @param demand in kilojoules. /// @returns None if it does not support power supply. fn power_outlet(&mut self, _demand: f64) -> Option<f64> { None } fn wire_reach(&self) -> u32 { 3 } fn serialize(&self) -> serde_json::Result<serde_json::Value>; } pub(crate) type StructureBoxed = Box<dyn Structure>; pub(crate) struct StructureEntry { pub gen: u32, pub dynamic: Option<StructureBoxed>, }
{ Cow::from(&[][..]) }
identifier_body
structure.rs
mod iter; use super::{ drop_items::DropItem, dyn_iter::{DynIter, DynIterMut}, items::ItemType, underground_belt::UnderDirection, water_well::FluidBox, FactorishState, Inventory, InventoryTrait, Recipe, }; use rotate_enum::RotateEnum; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; #[macro_export] macro_rules! serialize_impl { () => { fn serialize(&self) -> serde_json::Result<serde_json::Value> { serde_json::to_value(self) } }; } #[macro_export] macro_rules! draw_fuel_alarm { ($self_:expr, $state:expr, $context:expr) => { if $self_.recipe.is_some() && $self_.power == 0. && $state.sim_time % 1. < 0.5 { if let Some(img) = $state.image_fuel_alarm.as_ref() { let (x, y) = ( $self_.position.x as f64 * 32., $self_.position.y as f64 * 32., ); $context.draw_image_with_image_bitmap(&img.bitmap, x, y)?; } else { return js_err!("fuel alarm image not available"); } } }; } #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)] pub(crate) struct StructureId { pub id: u32, pub gen: u32, } pub(crate) struct StructureEntryIterator<'a>(&'a mut [StructureEntry], &'a mut [StructureEntry]); impl<'a> DynIter for StructureEntryIterator<'a> { type Item = StructureEntry; fn dyn_iter(&self) -> Box<dyn Iterator<Item = &Self::Item> + '_> { Box::new(self.0.iter().chain(self.1.iter())) } fn as_dyn_iter(&self) -> &dyn DynIter<Item = Self::Item> { self } } impl<'a> DynIterMut for StructureEntryIterator<'a> { fn dyn_iter_mut(&mut self) -> Box<dyn Iterator<Item = &mut Self::Item> + '_> { Box::new(self.0.iter_mut().chain(self.1.iter_mut())) } } pub(crate) use self::iter::StructureDynIter; #[derive(Eq, PartialEq, Hash, Copy, Clone, Debug, Serialize, Deserialize)] pub(crate) struct Position { pub x: i32, pub y: i32, } impl Position { pub fn new(x: i32, y: i32) -> Self { Self { x, y } } pub(crate) fn div_mod(&self, size: i32) -> (Position, Position) { let div = Position::new(self.x.div_euclid(size), self.y.div_euclid(size)); let mod_ = Position::new(self.x.rem_euclid(size), self.y.rem_euclid(size)); (div, mod_) } pub(crate) fn add(&self, o: (i32, i32)) -> Position { Self { x: self.x + o.0, y: self.y + o.1, } } pub(crate) fn distance(&self, position: &Position) -> i32 { (position.x - self.x).abs().max((position.y - self.y).abs()) } /// Check whether the positions are neighbors. Return false if they are exactly the same. #[allow(dead_code)] pub(crate) fn is_neighbor(&self, pos2: &Position) -> bool { [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().any(|rel_pos| { let pos = Position { x: pos2.x + rel_pos[0], y: pos2.y + rel_pos[1], }; *self == pos }) } pub(crate) fn neighbor_index(&self, pos2: &Position) -> Option<u32> { for (i, rel_pos) in [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().enumerate() { let pos = Position { x: pos2.x + rel_pos[0], y: pos2.y + rel_pos[1], }; if *self == pos { return Some(i as u32); } } None } } impl From<&[i32; 2]> for Position { fn from(xy: &[i32; 2]) -> Self { Self { x: xy[0], y: xy[1] } } } pub(crate) struct Size { pub width: i32, pub height: i32, } pub(crate) struct BoundingBox { pub x0: i32, pub y0: i32, pub x1: i32, pub y1: i32, } #[derive(Copy, Clone, Serialize, Deserialize, RotateEnum, PartialEq)] pub(crate) enum Rotation { Left, Top, Right, Bottom, } impl Rotation { pub fn delta(&self) -> (i32, i32) { match self { Rotation::Left => (-1, 0), Rotation::Top => (0, -1), Rotation::Right => (1, 0), Rotation::Bottom => (0, 1), } } pub fn delta_inv(&self) -> (i32, i32) { let delta = self.delta(); (-delta.0, -delta.1) } pub fn angle_deg(&self) -> i32 { self.angle_4() * 90 } pub fn angle_4(&self) -> i32 { match self { Rotation::Left => 2, Rotation::Top => 3, Rotation::Right => 0, Rotation::Bottom => 1, } } pub fn angle_rad(&self) -> f64 { self.angle_deg() as f64 * std::f64::consts::PI / 180. } pub fn is_horizontal(&self) -> bool { matches!(self, Rotation::Left | Rotation::Right) } pub fn is_vertcial(&self) -> bool { !self.is_horizontal() } } pub(crate) enum FrameProcResult { None, InventoryChanged(Position), } pub(crate) enum ItemResponse { Move(i32, i32), Consume, } pub(crate) type ItemResponseResult = (ItemResponse, Option<FrameProcResult>); #[derive(Debug)] pub(crate) enum RotateErr { NotFound, NotSupported, Other(JsValue), } pub(crate) trait Structure { fn name(&self) -> &str; fn position(&self) -> &Position; fn rotation(&self) -> Option<Rotation> { None } /// Specialized method to get underground belt direction. /// We don't like to put this to Structure trait method, but we don't have an option /// as long as we use trait object polymorphism. /// TODO: Revise needed in ECS. fn under_direction(&self) -> Option<UnderDirection> { None } fn size(&self) -> Size { Size { width: 1, height: 1, } } fn bounding_box(&self) -> BoundingBox { let (position, size) = (self.position(), self.size()); BoundingBox { x0: position.x, y0: position.y, x1: position.x + size.width, y1: position.y + size.height, } } fn contains(&self, pos: &Position) -> bool { let bb = self.bounding_box(); bb.x0 <= pos.x && pos.x < bb.x1 && bb.y0 <= pos.y && pos.y < bb.y1 } fn draw( &self, state: &FactorishState, context: &CanvasRenderingContext2d, depth: i32, is_tooptip: bool, ) -> Result<(), JsValue>; fn desc(&self, _state: &FactorishState) -> String { String::from("") } fn frame_proc( &mut self, _me: StructureId, _state: &mut FactorishState, _structures: &mut StructureDynIter, ) -> Result<FrameProcResult, ()> { Ok(FrameProcResult::None) } /// event handler for costruction events around the structure. fn on_construction( &mut self, _other_id: StructureId, _other: &dyn Structure, _others: &StructureDynIter, _construct: bool, ) -> Result<(), JsValue> { Ok(()) } /// event handler for costruction events for this structure itself. fn on_construction_self( &mut self, _id: StructureId, _others: &StructureDynIter, _construct: bool, ) -> Result<(), JsValue> { Ok(()) } fn movable(&self) -> bool { false } fn rotate( &mut self, _state: &mut FactorishState, _others: &StructureDynIter, ) -> Result<(), RotateErr> { Err(RotateErr::NotSupported) } fn set_rotation(&mut self, _rotation: &Rotation) -> Result<(), ()> { Err(()) } /// Called every frame for each item that is on this structure. fn item_response(&mut self, _item: &DropItem) -> Result<ItemResponseResult, ()> { Err(()) } fn input(&mut self, _o: &DropItem) -> Result<(), JsValue> { Err(JsValue::from_str("Not supported")) } /// Returns wheter the structure can accept an item as the input. If this structure is a factory /// that returns recipes by get_selected_recipe(), it will check if it's in the inputs. fn can_input(&self, item_type: &ItemType) -> bool { if let Some(recipe) = self.get_selected_recipe() { recipe.input.get(item_type).is_some() } else
} /// Query a set of items that this structure can output. Actual output would not happen until `output()`, thus /// this method is immutable. It should return empty Inventory if it cannot output anything. fn can_output(&self, _structures: &StructureDynIter) -> Inventory { Inventory::new() } /// Perform actual output. The operation should always succeed since the output-tability is checked beforehand /// with `can_output`. fn output(&mut self, _state: &mut FactorishState, _item_type: &ItemType) -> Result<(), ()> { Err(()) } fn burner_inventory(&self) -> Option<&Inventory> { None } fn add_burner_inventory(&mut self, _item_type: &ItemType, _amount: isize) -> isize { 0 } fn burner_energy(&self) -> Option<(f64, f64)> { None } fn inventory(&self, _is_input: bool) -> Option<&Inventory> { None } fn inventory_mut(&mut self, _is_input: bool) -> Option<&mut Inventory> { None } /// Some structures don't have an inventory, but still can have some item, e.g. inserter hands. /// We need to retrieve them when we destory such a structure, or we might lose items into void. /// It will take away the inventory by default, destroying the instance's inventory. fn destroy_inventory(&mut self) -> Inventory { let mut ret = self .inventory_mut(true) .map_or(Inventory::new(), |inventory| std::mem::take(inventory)); ret.merge( self.inventory_mut(false) .map_or(Inventory::new(), |inventory| std::mem::take(inventory)), ); ret } /// Returns a list of recipes. The return value is wrapped in a Cow because some /// structures can return dynamically configured list of recipes, while some others /// have static fixed list of recipes. In reality, all our structures return a fixed list though. fn get_recipes(&self) -> Cow<[Recipe]> { Cow::from(&[][..]) } fn select_recipe(&mut self, _index: usize) -> Result<bool, JsValue> { Err(JsValue::from_str("recipes not available")) } fn get_selected_recipe(&self) -> Option<&Recipe> { None } fn fluid_box(&self) -> Option<Vec<&FluidBox>> { None } fn fluid_box_mut(&mut self) -> Option<Vec<&mut FluidBox>> { None } fn connection( &self, state: &FactorishState, structures: &dyn DynIter<Item = StructureEntry>, ) -> [bool; 4] { // let mut structures_copy = structures.clone(); let has_fluid_box = |x, y| { if x < 0 || state.width <= x as u32 || y < 0 || state.height <= y as u32 { return false; } if let Some(structure) = structures .dyn_iter() .filter_map(|s| s.dynamic.as_deref()) .find(|s| *s.position() == Position { x, y }) { return structure.fluid_box().is_some(); } false }; // Fluid containers connect to other containers let Position { x, y } = *self.position(); let l = has_fluid_box(x - 1, y); let t = has_fluid_box(x, y - 1); let r = has_fluid_box(x + 1, y); let b = has_fluid_box(x, y + 1); [l, t, r, b] } /// If this structure can connect to power grid. fn power_source(&self) -> bool { false } /// If this structure drains power from the grid fn power_sink(&self) -> bool { false } /// Try to drain power from this structure. /// @param demand in kilojoules. /// @returns None if it does not support power supply. fn power_outlet(&mut self, _demand: f64) -> Option<f64> { None } fn wire_reach(&self) -> u32 { 3 } fn serialize(&self) -> serde_json::Result<serde_json::Value>; } pub(crate) type StructureBoxed = Box<dyn Structure>; pub(crate) struct StructureEntry { pub gen: u32, pub dynamic: Option<StructureBoxed>, }
{ false }
conditional_block
structure.rs
mod iter; use super::{ drop_items::DropItem, dyn_iter::{DynIter, DynIterMut}, items::ItemType, underground_belt::UnderDirection, water_well::FluidBox, FactorishState, Inventory, InventoryTrait, Recipe, }; use rotate_enum::RotateEnum; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use wasm_bindgen::prelude::*; use web_sys::CanvasRenderingContext2d; #[macro_export] macro_rules! serialize_impl { () => { fn serialize(&self) -> serde_json::Result<serde_json::Value> { serde_json::to_value(self) } }; } #[macro_export] macro_rules! draw_fuel_alarm { ($self_:expr, $state:expr, $context:expr) => { if $self_.recipe.is_some() && $self_.power == 0. && $state.sim_time % 1. < 0.5 { if let Some(img) = $state.image_fuel_alarm.as_ref() { let (x, y) = ( $self_.position.x as f64 * 32., $self_.position.y as f64 * 32., ); $context.draw_image_with_image_bitmap(&img.bitmap, x, y)?; } else { return js_err!("fuel alarm image not available"); } } }; } #[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, Hash)] pub(crate) struct StructureId { pub id: u32, pub gen: u32, } pub(crate) struct StructureEntryIterator<'a>(&'a mut [StructureEntry], &'a mut [StructureEntry]); impl<'a> DynIter for StructureEntryIterator<'a> { type Item = StructureEntry; fn dyn_iter(&self) -> Box<dyn Iterator<Item = &Self::Item> + '_> { Box::new(self.0.iter().chain(self.1.iter())) } fn as_dyn_iter(&self) -> &dyn DynIter<Item = Self::Item> { self } } impl<'a> DynIterMut for StructureEntryIterator<'a> { fn dyn_iter_mut(&mut self) -> Box<dyn Iterator<Item = &mut Self::Item> + '_> { Box::new(self.0.iter_mut().chain(self.1.iter_mut())) } } pub(crate) use self::iter::StructureDynIter; #[derive(Eq, PartialEq, Hash, Copy, Clone, Debug, Serialize, Deserialize)] pub(crate) struct Position { pub x: i32, pub y: i32, } impl Position { pub fn new(x: i32, y: i32) -> Self { Self { x, y } } pub(crate) fn div_mod(&self, size: i32) -> (Position, Position) { let div = Position::new(self.x.div_euclid(size), self.y.div_euclid(size)); let mod_ = Position::new(self.x.rem_euclid(size), self.y.rem_euclid(size)); (div, mod_) } pub(crate) fn add(&self, o: (i32, i32)) -> Position { Self { x: self.x + o.0, y: self.y + o.1, } } pub(crate) fn distance(&self, position: &Position) -> i32 { (position.x - self.x).abs().max((position.y - self.y).abs()) } /// Check whether the positions are neighbors. Return false if they are exactly the same. #[allow(dead_code)] pub(crate) fn is_neighbor(&self, pos2: &Position) -> bool { [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().any(|rel_pos| { let pos = Position { x: pos2.x + rel_pos[0], y: pos2.y + rel_pos[1], }; *self == pos }) } pub(crate) fn neighbor_index(&self, pos2: &Position) -> Option<u32> { for (i, rel_pos) in [[-1, 0], [0, -1], [1, 0], [0, 1]].iter().enumerate() { let pos = Position { x: pos2.x + rel_pos[0], y: pos2.y + rel_pos[1], }; if *self == pos { return Some(i as u32); } } None } } impl From<&[i32; 2]> for Position { fn from(xy: &[i32; 2]) -> Self { Self { x: xy[0], y: xy[1] } } } pub(crate) struct Size { pub width: i32, pub height: i32, } pub(crate) struct BoundingBox { pub x0: i32, pub y0: i32, pub x1: i32, pub y1: i32, } #[derive(Copy, Clone, Serialize, Deserialize, RotateEnum, PartialEq)] pub(crate) enum Rotation { Left, Top, Right, Bottom, } impl Rotation { pub fn delta(&self) -> (i32, i32) { match self { Rotation::Left => (-1, 0), Rotation::Top => (0, -1), Rotation::Right => (1, 0), Rotation::Bottom => (0, 1), } } pub fn delta_inv(&self) -> (i32, i32) { let delta = self.delta(); (-delta.0, -delta.1) } pub fn angle_deg(&self) -> i32 { self.angle_4() * 90 } pub fn angle_4(&self) -> i32 { match self { Rotation::Left => 2, Rotation::Top => 3, Rotation::Right => 0, Rotation::Bottom => 1, } } pub fn angle_rad(&self) -> f64 { self.angle_deg() as f64 * std::f64::consts::PI / 180. } pub fn is_horizontal(&self) -> bool { matches!(self, Rotation::Left | Rotation::Right) } pub fn is_vertcial(&self) -> bool { !self.is_horizontal() } } pub(crate) enum FrameProcResult { None, InventoryChanged(Position), } pub(crate) enum ItemResponse { Move(i32, i32), Consume, } pub(crate) type ItemResponseResult = (ItemResponse, Option<FrameProcResult>); #[derive(Debug)] pub(crate) enum RotateErr { NotFound, NotSupported, Other(JsValue), } pub(crate) trait Structure { fn name(&self) -> &str; fn position(&self) -> &Position; fn rotation(&self) -> Option<Rotation> { None } /// Specialized method to get underground belt direction. /// We don't like to put this to Structure trait method, but we don't have an option /// as long as we use trait object polymorphism. /// TODO: Revise needed in ECS. fn under_direction(&self) -> Option<UnderDirection> { None } fn size(&self) -> Size { Size { width: 1, height: 1, } } fn bounding_box(&self) -> BoundingBox { let (position, size) = (self.position(), self.size()); BoundingBox { x0: position.x, y0: position.y, x1: position.x + size.width, y1: position.y + size.height, } } fn contains(&self, pos: &Position) -> bool { let bb = self.bounding_box(); bb.x0 <= pos.x && pos.x < bb.x1 && bb.y0 <= pos.y && pos.y < bb.y1 } fn draw( &self, state: &FactorishState, context: &CanvasRenderingContext2d, depth: i32, is_tooptip: bool, ) -> Result<(), JsValue>; fn desc(&self, _state: &FactorishState) -> String { String::from("") } fn frame_proc( &mut self, _me: StructureId, _state: &mut FactorishState, _structures: &mut StructureDynIter, ) -> Result<FrameProcResult, ()> { Ok(FrameProcResult::None) } /// event handler for costruction events around the structure. fn on_construction( &mut self, _other_id: StructureId, _other: &dyn Structure, _others: &StructureDynIter, _construct: bool, ) -> Result<(), JsValue> { Ok(()) } /// event handler for costruction events for this structure itself. fn on_construction_self( &mut self, _id: StructureId, _others: &StructureDynIter, _construct: bool, ) -> Result<(), JsValue> { Ok(()) } fn movable(&self) -> bool { false } fn rotate( &mut self, _state: &mut FactorishState, _others: &StructureDynIter, ) -> Result<(), RotateErr> { Err(RotateErr::NotSupported) } fn set_rotation(&mut self, _rotation: &Rotation) -> Result<(), ()> { Err(()) } /// Called every frame for each item that is on this structure. fn item_response(&mut self, _item: &DropItem) -> Result<ItemResponseResult, ()> { Err(()) } fn input(&mut self, _o: &DropItem) -> Result<(), JsValue> { Err(JsValue::from_str("Not supported")) } /// Returns wheter the structure can accept an item as the input. If this structure is a factory /// that returns recipes by get_selected_recipe(), it will check if it's in the inputs. fn can_input(&self, item_type: &ItemType) -> bool { if let Some(recipe) = self.get_selected_recipe() { recipe.input.get(item_type).is_some() } else { false } } /// Query a set of items that this structure can output. Actual output would not happen until `output()`, thus /// this method is immutable. It should return empty Inventory if it cannot output anything. fn can_output(&self, _structures: &StructureDynIter) -> Inventory { Inventory::new() } /// Perform actual output. The operation should always succeed since the output-tability is checked beforehand /// with `can_output`. fn output(&mut self, _state: &mut FactorishState, _item_type: &ItemType) -> Result<(), ()> { Err(()) } fn burner_inventory(&self) -> Option<&Inventory> { None } fn
(&mut self, _item_type: &ItemType, _amount: isize) -> isize { 0 } fn burner_energy(&self) -> Option<(f64, f64)> { None } fn inventory(&self, _is_input: bool) -> Option<&Inventory> { None } fn inventory_mut(&mut self, _is_input: bool) -> Option<&mut Inventory> { None } /// Some structures don't have an inventory, but still can have some item, e.g. inserter hands. /// We need to retrieve them when we destory such a structure, or we might lose items into void. /// It will take away the inventory by default, destroying the instance's inventory. fn destroy_inventory(&mut self) -> Inventory { let mut ret = self .inventory_mut(true) .map_or(Inventory::new(), |inventory| std::mem::take(inventory)); ret.merge( self.inventory_mut(false) .map_or(Inventory::new(), |inventory| std::mem::take(inventory)), ); ret } /// Returns a list of recipes. The return value is wrapped in a Cow because some /// structures can return dynamically configured list of recipes, while some others /// have static fixed list of recipes. In reality, all our structures return a fixed list though. fn get_recipes(&self) -> Cow<[Recipe]> { Cow::from(&[][..]) } fn select_recipe(&mut self, _index: usize) -> Result<bool, JsValue> { Err(JsValue::from_str("recipes not available")) } fn get_selected_recipe(&self) -> Option<&Recipe> { None } fn fluid_box(&self) -> Option<Vec<&FluidBox>> { None } fn fluid_box_mut(&mut self) -> Option<Vec<&mut FluidBox>> { None } fn connection( &self, state: &FactorishState, structures: &dyn DynIter<Item = StructureEntry>, ) -> [bool; 4] { // let mut structures_copy = structures.clone(); let has_fluid_box = |x, y| { if x < 0 || state.width <= x as u32 || y < 0 || state.height <= y as u32 { return false; } if let Some(structure) = structures .dyn_iter() .filter_map(|s| s.dynamic.as_deref()) .find(|s| *s.position() == Position { x, y }) { return structure.fluid_box().is_some(); } false }; // Fluid containers connect to other containers let Position { x, y } = *self.position(); let l = has_fluid_box(x - 1, y); let t = has_fluid_box(x, y - 1); let r = has_fluid_box(x + 1, y); let b = has_fluid_box(x, y + 1); [l, t, r, b] } /// If this structure can connect to power grid. fn power_source(&self) -> bool { false } /// If this structure drains power from the grid fn power_sink(&self) -> bool { false } /// Try to drain power from this structure. /// @param demand in kilojoules. /// @returns None if it does not support power supply. fn power_outlet(&mut self, _demand: f64) -> Option<f64> { None } fn wire_reach(&self) -> u32 { 3 } fn serialize(&self) -> serde_json::Result<serde_json::Value>; } pub(crate) type StructureBoxed = Box<dyn Structure>; pub(crate) struct StructureEntry { pub gen: u32, pub dynamic: Option<StructureBoxed>, }
add_burner_inventory
identifier_name
main.py
#!/usr/bin/env python from PyQt5.QtCore import QDateTime, Qt, QTimer, QUrl, QAbstractTableModel, QVariant from PyQt5.QtGui import QDesktopServices from PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox, QDateTimeEdit, QDial, QDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QProgressBar, QPushButton, QRadioButton, QScrollBar, QSizePolicy, QSlider, QSpinBox, QStyleFactory, QTableWidget, QTabWidget, QTextEdit, QVBoxLayout, QWidget, QFormLayout, QCompleter, QMainWindow, QTableView, QMessageBox) import webbrowser import os import pickle cargoNames = ["г/к рулоны", "х/к рулоны", "слябы", "профиль", "арматура", "г/к лист", "Нарезка", "Полимер", "Трубы"] zoneNames = [] DUMP_PATH = "cargo.pckl" class DateBase(): def __init__(self): self.rows = [] if os.path.exists(DUMP_PATH): with open(DUMP_PATH, "rb") as f: self.rows = pickle.load(f) def add(self, id, name, count, zone): for row in self.rows: if row["name"] == name and row["zone"] == zone and row["id"] == id: row["count"] += count self.save() return self.rows.append({ "id": id, "name": name, "count": count, "zone": zone }) self.save() def unload(self, id, zone, count): for row in self.rows: if row["zone"] == zone and row["id"] == id: row["count"] -= count self.sanitize() self.save() def sanitize(self): self.rows = list(filter(lambda x: x["count"] > 0, self.rows)) def save(self): with open(DUMP_PATH, "wb") as f: pickle.dump(self.rows, f) class MainLayout(QWidget): def __init__(self): super(MainLayout, self).__init__() self.createMenu() self.setLayout(self.menu) self.resize(400, 100) self.setWindowTitle("Main title"); def createMenu(self): self.menu = QFormLayout() self.addButton = QPushButton("Добавить", self) self.addButton.clicked.connect(self.showAddLayout) self.unloadButton = QPushButton("Выгрузить", self) self.unloadButton.clicked.connect(self.showFindLayout) self.listButton = QPushButton("Наличие", self) self.listButton.clicked.connect(self.showCargoListLayout) self.cargoCountInput = QSpinBox() self.menu.addRow(self.addButton) self.menu.addRow(self.unloadButton) self.menu.addRow(self.listButton) def setAddCargoLayout(self, addLayout): self.addLayout = addLayout def showAddLayout(self): self.addLayout.show() self.hide() def setFindCargoLayout(self, findLayout): self.findLayout = findLayout def showFindLayout(self): self.findLayout.show() self.hide() def setCargoListLayout(self, cargoListLayout): self.cargoListLayout = cargoListLayout def showCargoListLayout(self): self.cargoListLayout.reloadTable() self.cargoListLayout.show() ZONE_ROWS_COUNT = 3 class AddCargoLayout(QWidget): def __init__(self, db, mainLayout): super(AddCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.createInputForm() self.setLayout(self.inputForm) self.resize(400, 100) self.setWindowTitle("Add cargo title"); def createInputForm(self): self.inputForm = QFormLayout() self.idInput = QLineEdit() namesCompleter = QCompleter(cargoNames) namesCompleter.setCaseSensitivity(False) self.nameInput = QLineEdit() self.nameInput.setCompleter(namesCompleter) self.addButton = QPushButton("Добавить", self) self.addButton.clicked.connect(self.addCargo) self.backButton = QPushButton("Назад", self) self.backButton.clicked.connect(self.goBack) self.inputForm.addRow("Приёмный акт:", self.idInput) self.inputForm.addRow("Груз:", self.nameInput) self.zonesInput = [] self.cargosCountInput = [] for i in range(ZONE_ROWS_COUNT): self.zonesInput.append(QLineEdit()) self.cargosCountInput.append(QSpinBox()) self.cargosCountInput[-1].setMaximum(100000000) self.inputForm.addRow(QLabel(""), QLabel("")) self.inputForm.addRow("Зона {}:".format(i + 1), self.zonesInput[-1]) self.inputForm.addRow("Кол-во:", self.cargosCountInput[-1]) self.inputForm.addRow(self.addButton) self.inputForm.addRow(self.backButton) def addCargo(self): for i in range(ZONE_ROWS_COUNT): if self.zonesInput[i].text(): self.db.add( self.idInput.text(), self.nameInput.text(), int(self.cargosCountInput[i].text()), self.zonesInput[i].text()) self.goBack() def goBack(self): self.mainLayout.show() self.close() class UnloadCargoLayout(QDialog): def __init__(self, db, mainLayout, entries): super(UnloadCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.entries = entries self.resize(400, 100); self.createMap() self.createFindResult() self.setLayout(self.resultForm) self.setWindowTitle("Upload title"); def link(self, linkStr): webbrowser.open_new_tab(linkStr) def createMap(self): zones = [] for entry in self.entries: zones.append(entry["zone"]) map_html = None with open("map_template.html") as f: map_html = f.read() map_html = map_html.replace("%zones", str(zones).replace("'", '"')) with open("cargo.geojson") as f: map_html = map_html.replace("%geojson", f.read().replace("'", '"')) with open("map.html", "w") as f: f.write(map_html) def createFindResult(self): self.resultForm = QGridLayout() mapLink = QLabel("Карта") mapLink.linkActivated.connect(self.link) mapLink.setText('<a href="file://{}">Карта</a>'.format(os.path.join(os.getcwdb().decode("utf-8") , "map.html"))) self.cancleButton = QPushButton("Отмена", self) self.cancleButton.clicked.connect(self.closeWidget) self.resultForm.addWidget(mapLink) i = 1 self.unloadCounts = [] for entry in self.entries: zoneLabel = QLabel(entry["zone"]) countLabel = QLabel(str(entry["count"])) unloadCountInput = QSpinBox() unloadCountInput.setMaximum(entry["count"]) self.unloadCounts.append(unloadCountInput) self.resultForm.addWidget(zoneLabel, i, 0) self.resultForm.addWidget(countLabel, i, 1) self.resultForm.addWidget(unloadCountInput, i, 2) i += 1 self.unloadButton = QPushButton("Выгрузить", self) self.unloadButton.clicked.connect(self.unload) self.resultForm.addWidget(self.unloadButton) self.resultForm.addWidget(self.cancleButton) def closeWidget(self): self.mainLayout.show() self.close() def unload(self): for entry, countBox in zip(self.entries, self.unloadCounts): count = int(countBox.text()) if count == 0: continue self.db.unload(entry["id"], entry["zone"], count) self.closeWidget() class FindCargoLayout(QWidget): def __init__(self, db, mainLayout): super(FindCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.createInputForm() self.setLayout(self.inputForm) self.setWindowTitle("Find cargo title") self.resize(400, 100); def createInputForm(self): self.inputForm = QFormLayout() self.idInput = QLineEdit() self.findButton = QPushButton("Найти", self) self.findButton.clicked.connect(self.findCargo) self.backButton = QPushButton("Назад", self) self.backButton.clicked.connect(self.goBack) self.inputForm.addRow("Приёмный акт:", self.idInput) self.inputForm.addRow(self.findButton) self.inputForm.addRow(self.backButton) def findCargo(self): entries = [] for row in self.db.rows: if row["id"] == self.idInput.text(): entries.append({ "zone" : row["zone"], "count" : row["count"], "id": row["id"] }) if not entries: self.notFound() return unloadLayout = UnloadCargoLayout(self.db, self.mainLayout, entries) unloadLayout.exec() self.close() def notFound(self): msgBox = QMessageBox() msgBox.setIcon(QMessageBox.Information) msgBox.setText("Приёмный акт не найден") msgBox.setStandardButtons(QMessageBox.Ok) msgBox.exec() def goBack(self): self.mainLayout.show() self.close() class TableModel(QAbstractTableModel): def __init__(self, db): super(TableModel, self).__init__() self.db = db self.headers = ["Приёмный акт", "Груз", "Кол-во", "Зона"] self.column_keys = ["id", "name", "count", "zone"] def rowCount(self, parent): return len(self.db.rows) def columnCount(self, parent): return len(self.headers) def data(self, index, role): if role != Qt.DisplayRole: return QVariant() return self.db.rows[index.row()][self.column
.column()]] def headerData(self, section, orientation, role): if role != Qt.DisplayRole or orientation != Qt.Horizontal: return QVariant() return self.headers[section] class ListCargoLayout(QTableView): def __init__(self, db, mainLayout): super(ListCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.resize(400, 200); self.reloadTable() self.setWindowTitle("List cargo title"); def reloadTable(self): model = TableModel(db) self.setModel(model) if __name__ == '__main__': import sys abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(dname) db = DateBase() app = QApplication(sys.argv) mainLayout = MainLayout() addCargoLayout = AddCargoLayout(db, mainLayout) mainLayout.setAddCargoLayout(addCargoLayout) findCargoLayout = FindCargoLayout(db, mainLayout) mainLayout.setFindCargoLayout(findCargoLayout) listLayout = ListCargoLayout(db, mainLayout) mainLayout.setCargoListLayout(listLayout) mainLayout.show() sys.exit(app.exec_())
_keys[index
identifier_name
main.py
#!/usr/bin/env python from PyQt5.QtCore import QDateTime, Qt, QTimer, QUrl, QAbstractTableModel, QVariant from PyQt5.QtGui import QDesktopServices from PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox, QDateTimeEdit, QDial, QDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QProgressBar, QPushButton, QRadioButton, QScrollBar, QSizePolicy, QSlider, QSpinBox, QStyleFactory, QTableWidget, QTabWidget, QTextEdit, QVBoxLayout, QWidget, QFormLayout, QCompleter, QMainWindow, QTableView, QMessageBox) import webbrowser import os import pickle cargoNames = ["г/к рулоны", "х/к рулоны", "слябы", "профиль", "арматура", "г/к лист", "Нарезка", "Полимер", "Трубы"] zoneNames = [] DUMP_PATH = "cargo.pckl" class DateBase(): def __init__(self): self.rows = [] if os.path.exists(DUMP_PATH): with open(DUMP_PATH, "rb") as f: self.rows = pickle.load(f) def add(self, id, name, count, zone): for row in self.rows: if row["name"] == name and row["zone"] == zone and row["id"] == id: row["count"] += count self.save() return self.rows.append({ "id": id, "name": name, "count": count, "zone": zone }) self.save() def unload(self, id, zone, count): for row in self.rows: if row["zone"] == zone and row["id"] == id: row["count"] -= count self.sanitize() self.save() def sanitize(self): self.rows = list(filter(lambda x: x["count"] > 0, self.rows)) def save(self): with open(DUMP_PATH, "wb") as f: pickle.dump(self.rows, f) class MainLayout(QWidget): def __init__(self): super(MainLayout, self).__init__() self.createMenu() self.setLayout(self.menu) self.resize(400, 100) self.setWindowTitle("Main title"); def createMenu(self): self.menu = QFormLayout() self.addButton = QPushButton("Добавить", self) self.addButton.clicked.connect(self.showAddLayout) self.unloadButton = QPushButton("Выгрузить", self) self.unloadButton.clicked.connect(self.showFindLayout) self.listButton = QPushButton("Наличие", self) self.listButton.clicked.connect(self.showCargoListLayout) self.cargoCountInput = QSpinBox() self.menu.addRow(self.addButton) self.menu.addRow(self.unloadButton) self.menu.addRow(self.listButton) def setAddCargoLayout(self, addLayout): self.addLayout = addLayout def showAddLayout(self): self.addLayout.show() self.hide() def setFindCargoLayout(self, findLayout): self.findLayout = findLayout def showFindLayout(self): self.findLayout.show() self.hide() def setCargoListLayout(self, cargoListLayout): self.cargoListLayout = cargoListLayout def showCargoListLayout(self): self.cargoListLayout.reloadTable() self.cargoListLayout.show() ZONE_ROWS_COUNT = 3 class AddCargoLayout(QWidget): def __init__(self, db, mainLayout): super(AddCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.createInputForm()
def createInputForm(self): self.inputForm = QFormLayout() self.idInput = QLineEdit() namesCompleter = QCompleter(cargoNames) namesCompleter.setCaseSensitivity(False) self.nameInput = QLineEdit() self.nameInput.setCompleter(namesCompleter) self.addButton = QPushButton("Добавить", self) self.addButton.clicked.connect(self.addCargo) self.backButton = QPushButton("Назад", self) self.backButton.clicked.connect(self.goBack) self.inputForm.addRow("Приёмный акт:", self.idInput) self.inputForm.addRow("Груз:", self.nameInput) self.zonesInput = [] self.cargosCountInput = [] for i in range(ZONE_ROWS_COUNT): self.zonesInput.append(QLineEdit()) self.cargosCountInput.append(QSpinBox()) self.cargosCountInput[-1].setMaximum(100000000) self.inputForm.addRow(QLabel(""), QLabel("")) self.inputForm.addRow("Зона {}:".format(i + 1), self.zonesInput[-1]) self.inputForm.addRow("Кол-во:", self.cargosCountInput[-1]) self.inputForm.addRow(self.addButton) self.inputForm.addRow(self.backButton) def addCargo(self): for i in range(ZONE_ROWS_COUNT): if self.zonesInput[i].text(): self.db.add( self.idInput.text(), self.nameInput.text(), int(self.cargosCountInput[i].text()), self.zonesInput[i].text()) self.goBack() def goBack(self): self.mainLayout.show() self.close() class UnloadCargoLayout(QDialog): def __init__(self, db, mainLayout, entries): super(UnloadCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.entries = entries self.resize(400, 100); self.createMap() self.createFindResult() self.setLayout(self.resultForm) self.setWindowTitle("Upload title"); def link(self, linkStr): webbrowser.open_new_tab(linkStr) def createMap(self): zones = [] for entry in self.entries: zones.append(entry["zone"]) map_html = None with open("map_template.html") as f: map_html = f.read() map_html = map_html.replace("%zones", str(zones).replace("'", '"')) with open("cargo.geojson") as f: map_html = map_html.replace("%geojson", f.read().replace("'", '"')) with open("map.html", "w") as f: f.write(map_html) def createFindResult(self): self.resultForm = QGridLayout() mapLink = QLabel("Карта") mapLink.linkActivated.connect(self.link) mapLink.setText('<a href="file://{}">Карта</a>'.format(os.path.join(os.getcwdb().decode("utf-8") , "map.html"))) self.cancleButton = QPushButton("Отмена", self) self.cancleButton.clicked.connect(self.closeWidget) self.resultForm.addWidget(mapLink) i = 1 self.unloadCounts = [] for entry in self.entries: zoneLabel = QLabel(entry["zone"]) countLabel = QLabel(str(entry["count"])) unloadCountInput = QSpinBox() unloadCountInput.setMaximum(entry["count"]) self.unloadCounts.append(unloadCountInput) self.resultForm.addWidget(zoneLabel, i, 0) self.resultForm.addWidget(countLabel, i, 1) self.resultForm.addWidget(unloadCountInput, i, 2) i += 1 self.unloadButton = QPushButton("Выгрузить", self) self.unloadButton.clicked.connect(self.unload) self.resultForm.addWidget(self.unloadButton) self.resultForm.addWidget(self.cancleButton) def closeWidget(self): self.mainLayout.show() self.close() def unload(self): for entry, countBox in zip(self.entries, self.unloadCounts): count = int(countBox.text()) if count == 0: continue self.db.unload(entry["id"], entry["zone"], count) self.closeWidget() class FindCargoLayout(QWidget): def __init__(self, db, mainLayout): super(FindCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.createInputForm() self.setLayout(self.inputForm) self.setWindowTitle("Find cargo title") self.resize(400, 100); def createInputForm(self): self.inputForm = QFormLayout() self.idInput = QLineEdit() self.findButton = QPushButton("Найти", self) self.findButton.clicked.connect(self.findCargo) self.backButton = QPushButton("Назад", self) self.backButton.clicked.connect(self.goBack) self.inputForm.addRow("Приёмный акт:", self.idInput) self.inputForm.addRow(self.findButton) self.inputForm.addRow(self.backButton) def findCargo(self): entries = [] for row in self.db.rows: if row["id"] == self.idInput.text(): entries.append({ "zone" : row["zone"], "count" : row["count"], "id": row["id"] }) if not entries: self.notFound() return unloadLayout = UnloadCargoLayout(self.db, self.mainLayout, entries) unloadLayout.exec() self.close() def notFound(self): msgBox = QMessageBox() msgBox.setIcon(QMessageBox.Information) msgBox.setText("Приёмный акт не найден") msgBox.setStandardButtons(QMessageBox.Ok) msgBox.exec() def goBack(self): self.mainLayout.show() self.close() class TableModel(QAbstractTableModel): def __init__(self, db): super(TableModel, self).__init__() self.db = db self.headers = ["Приёмный акт", "Груз", "Кол-во", "Зона"] self.column_keys = ["id", "name", "count", "zone"] def rowCount(self, parent): return len(self.db.rows) def columnCount(self, parent): return len(self.headers) def data(self, index, role): if role != Qt.DisplayRole: return QVariant() return self.db.rows[index.row()][self.column_keys[index.column()]] def headerData(self, section, orientation, role): if role != Qt.DisplayRole or orientation != Qt.Horizontal: return QVariant() return self.headers[section] class ListCargoLayout(QTableView): def __init__(self, db, mainLayout): super(ListCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.resize(400, 200); self.reloadTable() self.setWindowTitle("List cargo title"); def reloadTable(self): model = TableModel(db) self.setModel(model) if __name__ == '__main__': import sys abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(dname) db = DateBase() app = QApplication(sys.argv) mainLayout = MainLayout() addCargoLayout = AddCargoLayout(db, mainLayout) mainLayout.setAddCargoLayout(addCargoLayout) findCargoLayout = FindCargoLayout(db, mainLayout) mainLayout.setFindCargoLayout(findCargoLayout) listLayout = ListCargoLayout(db, mainLayout) mainLayout.setCargoListLayout(listLayout) mainLayout.show() sys.exit(app.exec_())
self.setLayout(self.inputForm) self.resize(400, 100) self.setWindowTitle("Add cargo title");
random_line_split
main.py
#!/usr/bin/env python from PyQt5.QtCore import QDateTime, Qt, QTimer, QUrl, QAbstractTableModel, QVariant from PyQt5.QtGui import QDesktopServices from PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox, QDateTimeEdit, QDial, QDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QProgressBar, QPushButton, QRadioButton, QScrollBar, QSizePolicy, QSlider, QSpinBox, QStyleFactory, QTableWidget, QTabWidget, QTextEdit, QVBoxLayout, QWidget, QFormLayout, QCompleter, QMainWindow, QTableView, QMessageBox) import webbrowser import os import pickle cargoNames = ["г/к рулоны", "х/к рулоны", "слябы", "профиль", "арматура", "г/к лист", "Нарезка", "Полимер", "Трубы"] zoneNames = [] DUMP_PATH = "cargo.pckl" class DateBase(): def __init__(self): self.rows = [] if os.path.exists(DUMP_PATH): with open(DUMP_PATH, "rb") as f: self.rows = pickle.load(f) def add(self, id, name, count, zone): for row in self.rows: if row["name"] == name and row["zone"] == zone and row["id"] == id: row["count"] += count self.save() return self.rows.append({ "id": id, "name": name, "count": count, "zone": zone }) self.save() def unload(self, id, zone, count): for row in self.rows: if row["zone"] == zone and row["id"] == id: row["count"] -= count self.sanitize() self.save() def sanitize(self): self.rows = list(filter(lambda x: x["count"] > 0, self.rows)) def save(self): with open(DUMP_PATH, "wb") as f: pickle.dump(self.rows, f) class MainLayout(QWidget): def __init__(self): super(MainLayout, self).__init__() self.createMenu() self.setLayout(self.menu) self.resize(400, 100) self.setWindowTitle("Main title"); def createMenu(self): self.menu = QFormLayout() self.addButton = QPushButton("Добавить", self) self.addButton.clicked.connect(self.showAddLayout) self.unloadButton = QPushButton("Выгрузить", self) self.unloadButton.clicked.connect(self.showFindLayout) self.listButton = QPushButton("Наличие", self) self.listButton.clicked.connect(self.showCargoListLayout) self.cargoCountInput = QSpinBox() self.menu.addRow(self.addButton) self.menu.addRow(self.unloadButton) self.menu.addRow(self.listButton) def setAddCargoLayout(self, addLayout): self.addLayout = addLayout def showAddLayout(self): self.addLayout.show() self.hide() def setFindCargoLayout(self, findLayout): self.findLayout = findLayout def showFindLayout(self): self.findLayout.show() self.hide() def setCargoListLayout(self, cargoListLayout): self.cargoListLayout = cargoListLayout def showCargoListLayout(self): self.cargoListLayout.reloadTable() self.cargoListLayout.show() ZONE_ROWS_COUNT = 3 class AddCargoLayout(QWidget): def __init__(self, db, mainLayout): super(AddCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.createInputForm() self.setLayout(self.inputForm) self.resize(400, 100) self.setWindowTitle("Add cargo title"); def createInputForm(self): self.inputForm = QFormLayout() self.idInput = QLineEdit() namesCompleter = QCompleter(cargoNames) namesCompleter.setCaseSensitivity(False) self.nameInput = QLineEdit() self.nameInput.setCompleter(namesCompleter) self.addButton = QPushButton("Добавить", self) self.addButton.clicked.connect(self.addCargo) self.backButton = QPushButton("Назад", self) self.backButton.clicked.connect(self.goBack) self.inputForm.addRow("Приёмный акт:", self.idInput) self.inputForm.addRow("Груз:", self.nameInput) self.zonesInput = [] self.cargosCountInput = [] for i in range(ZONE_ROWS_COUNT): self.zonesInput.append(QLineEdit()) self.cargosCountInput.append(QSpinBox()) self.cargosCountInput[-1].setMaximum(100000000) self.inputForm.addRow(QLabel(""), QLabel("")) self.inputForm.addRow("Зона {}:".format(i + 1), self.zonesInput[-1]) self.inputForm.addRow("Кол-во:", self.cargosCountInput[-1]) self.inputForm.addRow(self.addButton) self.inputForm.addRow(self.backButton) def addCargo(self): for i in range(ZONE_ROWS_COUNT): if self.zonesInput[i].text(): self.db.add( self.idInput.text(), self.nameInput.text(), int(self.cargosCountInput[i].text()), self.zonesInput[i].text()) self.goBack() def goBack(self): self.mainLayout.show() self.close() class UnloadCargoLayout(QDialog): def __init__(self, db, mainLayout, entries): super(UnloadCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.entries = entries self.resize(400, 100); self.createMap() self.createFindResult() self.setLayout(self.resultForm) self.setWindowTitle("Upload title"); def link(self, linkStr): webbrowser.open_new_tab(linkStr) def createMap(self): zones = [] for entry in self.entries: zones.append(entry["zone"]) map_html = None wit
ink.linkActivated.connect(self.link) mapLink.setText('<a href="file://{}">Карта</a>'.format(os.path.join(os.getcwdb().decode("utf-8") , "map.html"))) self.cancleButton = QPushButton("Отмена", self) self.cancleButton.clicked.connect(self.closeWidget) self.resultForm.addWidget(mapLink) i = 1 self.unloadCounts = [] for entry in self.entries: zoneLabel = QLabel(entry["zone"]) countLabel = QLabel(str(entry["count"])) unloadCountInput = QSpinBox() unloadCountInput.setMaximum(entry["count"]) self.unloadCounts.append(unloadCountInput) self.resultForm.addWidget(zoneLabel, i, 0) self.resultForm.addWidget(countLabel, i, 1) self.resultForm.addWidget(unloadCountInput, i, 2) i += 1 self.unloadButton = QPushButton("Выгрузить", self) self.unloadButton.clicked.connect(self.unload) self.resultForm.addWidget(self.unloadButton) self.resultForm.addWidget(self.cancleButton) def closeWidget(self): self.mainLayout.show() self.close() def unload(self): for entry, countBox in zip(self.entries, self.unloadCounts): count = int(countBox.text()) if count == 0: continue self.db.unload(entry["id"], entry["zone"], count) self.closeWidget() class FindCargoLayout(QWidget): def __init__(self, db, mainLayout): super(FindCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.createInputForm() self.setLayout(self.inputForm) self.setWindowTitle("Find cargo title") self.resize(400, 100); def createInputForm(self): self.inputForm = QFormLayout() self.idInput = QLineEdit() self.findButton = QPushButton("Найти", self) self.findButton.clicked.connect(self.findCargo) self.backButton = QPushButton("Назад", self) self.backButton.clicked.connect(self.goBack) self.inputForm.addRow("Приёмный акт:", self.idInput) self.inputForm.addRow(self.findButton) self.inputForm.addRow(self.backButton) def findCargo(self): entries = [] for row in self.db.rows: if row["id"] == self.idInput.text(): entries.append({ "zone" : row["zone"], "count" : row["count"], "id": row["id"] }) if not entries: self.notFound() return unloadLayout = UnloadCargoLayout(self.db, self.mainLayout, entries) unloadLayout.exec() self.close() def notFound(self): msgBox = QMessageBox() msgBox.setIcon(QMessageBox.Information) msgBox.setText("Приёмный акт не найден") msgBox.setStandardButtons(QMessageBox.Ok) msgBox.exec() def goBack(self): self.mainLayout.show() self.close() class TableModel(QAbstractTableModel): def __init__(self, db): super(TableModel, self).__init__() self.db = db self.headers = ["Приёмный акт", "Груз", "Кол-во", "Зона"] self.column_keys = ["id", "name", "count", "zone"] def rowCount(self, parent): return len(self.db.rows) def columnCount(self, parent): return len(self.headers) def data(self, index, role): if role != Qt.DisplayRole: return QVariant() return self.db.rows[index.row()][self.column_keys[index.column()]] def headerData(self, section, orientation, role): if role != Qt.DisplayRole or orientation != Qt.Horizontal: return QVariant() return self.headers[section] class ListCargoLayout(QTableView): def __init__(self, db, mainLayout): super(ListCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.resize(400, 200); self.reloadTable() self.setWindowTitle("List cargo title"); def reloadTable(self): model = TableModel(db) self.setModel(model) if __name__ == '__main__': import sys abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(dname) db = DateBase() app = QApplication(sys.argv) mainLayout = MainLayout() addCargoLayout = AddCargoLayout(db, mainLayout) mainLayout.setAddCargoLayout(addCargoLayout) findCargoLayout = FindCargoLayout(db, mainLayout) mainLayout.setFindCargoLayout(findCargoLayout) listLayout = ListCargoLayout(db, mainLayout) mainLayout.setCargoListLayout(listLayout) mainLayout.show() sys.exit(app.exec_())
h open("map_template.html") as f: map_html = f.read() map_html = map_html.replace("%zones", str(zones).replace("'", '"')) with open("cargo.geojson") as f: map_html = map_html.replace("%geojson", f.read().replace("'", '"')) with open("map.html", "w") as f: f.write(map_html) def createFindResult(self): self.resultForm = QGridLayout() mapLink = QLabel("Карта") mapL
identifier_body
main.py
#!/usr/bin/env python from PyQt5.QtCore import QDateTime, Qt, QTimer, QUrl, QAbstractTableModel, QVariant from PyQt5.QtGui import QDesktopServices from PyQt5.QtWidgets import (QApplication, QCheckBox, QComboBox, QDateTimeEdit, QDial, QDialog, QGridLayout, QGroupBox, QHBoxLayout, QLabel, QLineEdit, QProgressBar, QPushButton, QRadioButton, QScrollBar, QSizePolicy, QSlider, QSpinBox, QStyleFactory, QTableWidget, QTabWidget, QTextEdit, QVBoxLayout, QWidget, QFormLayout, QCompleter, QMainWindow, QTableView, QMessageBox) import webbrowser import os import pickle cargoNames = ["г/к рулоны", "х/к рулоны", "слябы", "профиль", "арматура", "г/к лист", "Нарезка", "Полимер", "Трубы"] zoneNames = [] DUMP_PATH = "cargo.pckl" class DateBase(): def __init__(self): self.rows = [] if os.path.exists(DUMP_PATH): with open(DUMP_PATH, "rb") as f: self.rows = pickle.load(f) def add(self, id, name, count, zone): for row in self.rows: if row["name"] == name and row["zone"] == zone and row["id"] == id: row["count"] += count self.save() return self.rows.append({ "id": id, "name": name, "count": count, "zone": zone }) self.save() def unload(self, id, zone, count): for row in self.rows: if row["zone"] == zone and row["id"] == id: row["count"] -= count self.sanitize() self.save() def sanitize(self): self.rows = list(filter(lambda x: x["count"] > 0, self.rows)) def save(self): with open(DUMP_PATH, "wb") as f: pickle.dump(self.rows, f) class MainLayout(QWidget): def __init__(self): super(MainLayout, self).__init__() self.createMenu() self.setLayout(self.menu) self.resize(400, 100) self.setWindowTitle("Main title"); def createMenu(self): self.menu = QFormLayout() self.addButton = QPushButton("Добавить", self) self.addButton.clicked.connect(self.showAddLayout) self.unloadButton = QPushButton("Выгрузить", self) self.unloadButton.clicked.connect(self.showFindLayout) self.listButton = QPushButton("Наличие", self) self.listButton.clicked.connect(self.showCargoListLayout) self.cargoCountInput = QSpinBox() self.menu.addRow(self.addButton) self.menu.addRow(self.unloadButton) self.menu.addRow(self.listButton) def setAddCargoLayout(self, addLayout): self.addLayout = addLayout def showAddLayout(self): self.addLayout.show() self.hide() def setFindCargoLayout(self, findLayout): self.findLayout = findLayout def showFindLayout(self): self.findLayout.show() self.hide() def setCargoListLayout(self, cargoListLayout): self.cargoListLayout = cargoListLayout def showCargoListLayout(self): self.cargoListLayout.reloadTable() self.cargoListLayout.show() ZONE_ROWS_COUNT = 3 class AddCargoLayout(QWidget): def __init__(self, db, mainLayout): super(AddCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.createInputForm() self.setLayout(self.inputForm) self.resize(400, 100) self.setWindowTitle("Add cargo title"); def createInputForm(self): self.inputForm = QFormLayout() self.idInput = QLineEdit() namesCompleter = QCompleter(cargoNames) namesCompleter.setCaseSensitivity(False) self.nameInput = QLineEdit() self.nameInput.setCompleter(namesCompleter) self.addButton = QPushButton("Добавить", self) self.addButton.clicked.connect(self.addCargo) self.backButton = QPushButton("Назад", self) self.backButton.clicked.connect(self.goBack) self.inputForm.addRow("Приёмный акт:", self.idInput) self.inputForm.addRow("Груз:", self.nameInput) self.zonesInput = [] self.cargosCountInput = [] for i in range(ZONE_ROWS_COUNT): self.zonesInput.append(QLineEdit()) self.cargosCountInput.append(QSpinBox()) self.cargosCountInput[-1].setMaximum(100000000) self.inputForm.addRow(QLabel(""), QLabel("")) self.inputForm.addRow("Зона {}:".format(i + 1), self.zonesInput[-1]) self.inputForm.addRow("Кол-во:", self.cargosCountInput[-1]) self.inputForm.addRow(self.addButton) self.inputForm.addRow(self.backButton) def addCargo(self): for i in range(ZONE_ROWS_COUNT): if self.zonesInput[i].text(): self.db.add( self.idInput.text(), self.nameInput.text(), int(self.cargosCountInput[i].text()), self.zonesInput[i].text()) self.goBack() def goBack(self): self.mainLayout.show() self.close() class UnloadCargoLayout(QDialog): def __init__(self, db, mainLayout, entries): super(UnloadCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.entries = entries self.resize(400, 100); self.createMap() self.createFindResult() self.setLayout(self.resultForm) self.setWindowTitle("Upload title"); def link(self, linkStr): webbrowser.open_new_tab(linkStr) def createMap(self): zones = [] for entry in self.entries: zones.append(entry["zone"]) map_html = None with open("map_template.html") as f: map_html = f.read() map_html = map_html.replace("%zones", str(zones).replace("'", '"')) with open("cargo.geojson") as f: map_html = map_html.replace("%geojson", f.read().replace("'", '"')) with open("map.html", "w") as f: f.write(map_html) def createFindResult(self): self.resultForm = QGridLayout() mapLink = QLabel("Карта") mapLink.linkActivated.connect(self.link) mapLink.setText('<a href="file://{}">Карта</a>'.format(os.path.join(os.getcwdb().decode("utf-8") , "map.html"))) self.cancleButton = QPushButton("Отмена", self) self.cancleButton.clicked.connect(self.closeWidget) self.resultForm.addWidget(mapLink) i = 1 self.unloadCounts = [] for entry in self.entries: zoneLabel = QLabel(entry["zone"]) countLabel = QLabel(str(entry["count"])) unloadCountInput = QSpinBox() unloadCountInput.setMaximum(entry["count"]) self.unloadCounts.append(unloadCountInput) self.resultForm.addWidget(zoneLabel, i, 0) self.resultForm.addWidget(countLabel, i, 1) self.resultForm.addWidget(unloadCountInput, i, 2) i += 1 self.unloadButton = QPushButton("Выгрузить", self) self.unloadButton.clicked.connect(self.unload) self.resultForm.addWidget(self.unloadButton) self.resultForm.addWidget(self.cancleButton) def closeWidget(self): self.mainLayout.show() self.close() def unload(self): for entry, countBox in zip(self.entries, self.unloadCounts): count = int(countBox.text()) if count == 0: continue self.db.unload(entry["id"], entry["zone"], count) self.closeWidget() class FindCargoLayout(QWidget): def __init__(self, db, mainLayout): super(FindCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.createInputForm() self.setLayout(self.inputForm) self.setWindowTitle("Find cargo title") self.resize(400, 100); def createInputForm(self): self.inputForm = QFormLayout() self.idInput = QLineEdit() self.findButton = QPushButton("Найти", self) self.findButton.clicked.connect(self.findCargo) self.backButton = QPushButton("Назад", self) self.backButton.clicked.connect(self.goBack) self.inputForm.addRow("Приёмный акт:", self.idInput) self.inputForm.addRow(self.findButton) self.inputForm.addRow(self.backButton) def findCargo(self): entries = [] for row in self.db.rows: if row["id"] == self.idInput.text(): entries.append({ "zone" : row["zone"], "count" : row["count"], "id": row["id"] }) if not entries: self.notFound() return unloadLayout = UnloadCargoLayout(self.db, self.mainLayout, entries) unloadLayout.exec() self.close() def notFound(self): msgBox = QMessageBox() msgBox.setIcon(QMessageBox.Information) msgBox.setText("Приёмный акт не найден") msgBox.setStandardButtons(QMessageBox.Ok) msgBox.exec() def goBack(self): self.mainLayout.show() self.close() class TableModel(QAbstractTableModel): def __init__(self, db): super(TableModel, self).__init__() self.db = db self.headers = ["Приёмный акт", "Груз", "Кол-во", "Зона"] self.column_keys = ["id", "name", "count", "zone"] def rowCount(self, parent): return len(self.db.rows) def columnCount(self, parent): return len(self.headers) def data(self, index, role): if role != Qt.DisplayRole: return QVariant() return self.db.rows[index.row()][self.column_keys[index.column()]] def headerData(self, section, orientation, role): if role != Qt.DisplayRole or orientation != Qt.Horizont
eturn QVariant() return self.headers[section] class ListCargoLayout(QTableView): def __init__(self, db, mainLayout): super(ListCargoLayout, self).__init__() self.db = db self.mainLayout = mainLayout self.resize(400, 200); self.reloadTable() self.setWindowTitle("List cargo title"); def reloadTable(self): model = TableModel(db) self.setModel(model) if __name__ == '__main__': import sys abspath = os.path.abspath(__file__) dname = os.path.dirname(abspath) os.chdir(dname) db = DateBase() app = QApplication(sys.argv) mainLayout = MainLayout() addCargoLayout = AddCargoLayout(db, mainLayout) mainLayout.setAddCargoLayout(addCargoLayout) findCargoLayout = FindCargoLayout(db, mainLayout) mainLayout.setFindCargoLayout(findCargoLayout) listLayout = ListCargoLayout(db, mainLayout) mainLayout.setCargoListLayout(listLayout) mainLayout.show() sys.exit(app.exec_())
al: r
conditional_block
nfo.py
import glob import html import os import re import sys import textwrap from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union import pycountry import requests from pyd2v import D2V from pymediainfo import MediaInfo, Track from pynfogen.formatter import CustomFormats class NFO: AUDIO_CHANNEL_LAYOUT_WEIGHT = { "LFE": 0.1 } IMDB_ID_T = re.compile(r"^tt\d{7,8}$") TMDB_ID_T = re.compile(r"^(tv|movie)/\d+$") TVDB_ID_T = re.compile(r"^\d+$") def __init__(self) -> None: self.media_info: MediaInfo self.file: str self.season: Optional[Union[int, str]] self.episode: Optional[int] self.episode_name: Optional[str] self.videos: List[Track] self.audio: List[Track] self.subtitles: List[Track] self.chapters: Dict[str, str] self.chapters_numbered: bool self.fanart_api_key: Optional[str] self.source: Optional[str] self.note: Optional[str] self.preview: Optional[str] self.imdb: str self.tmdb: Optional[str] self.tvdb: Optional[int] self.title_name: str self.title_year: str self.episodes: int self.release_name: str self.preview_images: List[dict[str, str]] self.banner_image: Optional[str] self.session = self.get_session() def __repr__(self) -> str: return "<{c} {attrs}>".format( c=self.__class__.__name__, attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), ) def run(self, template: str, art: Optional[str] = None, **kwargs: Any) -> str: """ Evaluate and apply formatting on template, apply any art if provided. Any additional parameters are passed as extra variables to the template. The extra variables have priority when there's conflicting variable names. """ variables = self.__dict__ variables.update(kwargs) template = CustomFormats().format(template, **variables) if art: art = art.format(nfo=template) template = art for m in re.finditer(r"<\?([01])\?([\D\d]*?)\?>", template): # TODO: This if check is quite yucky, look into alternative options. # Ideally a custom format spec would be great. template = template.replace( m.group(0), m.group(2) if int(m.group(1)) else "" ) template = "\n".join(map(str.rstrip, template.splitlines(keepends=False))) return template def set_config(self, file: str, **config: Any) -> None: self.file = file self.media_info = MediaInfo.parse(self.file) self.fanart_api_key = config.get("fanart_api_key") self.source = config.get("source") self.note = config.get("note") self.preview = config.get("preview") self.season = config.get("season") self.episode, self.episode_name = config.get("episode") or (None, None) self.episodes = self.get_tv_episodes() self.release_name = self.get_release_name() self.videos = self.media_info.video_tracks self.audio = self.media_info.audio_tracks self.subtitles = self.media_info.text_tracks tracks_without_language = [ x for x in self.videos + self.audio + self.subtitles if not x.language or x.language == "und" ] if tracks_without_language: print("The following tracks have no language tag! All tracks need a language tag!") for track in tracks_without_language: print(f"{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)") print( "Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\n" "Don't forget to verify and add language tags to the rest of the files too!" ) sys.exit(1) chapters = next(iter(self.media_info.menu_tracks), None) if chapters: self.chapters = { ".".join([k.replace("_", ".")[:-3], k[-3:]]): v.strip(":") for k, v in chapters.to_data().items() if f"1{k.replace('_', '')}".isdigit() } self.chapters_numbered = all( x.split(":", 1)[-1].lower() in [f"chapter {i + 1}", f"chapter {str(i + 1).zfill(2)}"] for i, x in enumerate(self.chapters.values()) ) else: self.chapters = {} self.chapters_numbered = False self.imdb = self.get_imdb_id(config.get("imdb")) self.tmdb = self.get_tmdb_id(config.get("tmdb")) self.tvdb = self.get_tvdb_id(config.get("tvdb")) self.title_name, self.title_year = self.get_title_name_year() self.banner_image = self.get_banner_image(self.tvdb) if self.tvdb and self.fanart_api_key else None self.preview_images = self.get_preview_images(self.preview) if self.preview else [] def get_imdb_id(self, imdb_id: Any) -> str: """ Get an IMDB ID from either the media's global tags, or the config. Since IMDB IDs are required for this project, it will bug the user for one interactively if not found. """ if not imdb_id: general_track = self.media_info.general_tracks[0].to_data() imdb_id = general_track.get("imdb") if not imdb_id: print("No IMDB ID was provided but is required...") while not imdb_id or not isinstance(imdb_id, str): user_id = input("IMDB ID (e.g., 'tt0487831'): ") if not self.IMDB_ID_T.match(user_id): print(f"The provided IMDB ID {user_id!r} is not valid...") print("Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').") else: imdb_id = user_id return imdb_id def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]: """ Get a TMDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. """ if not tmdb_id: general_track = self.media_info.general_tracks[0].to_data() tmdb_id = general_track.get("tmdb") if not tmdb_id: print("Warning: No TMDB ID was provided...") return None if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str): print(f"The provided TMDB ID {tmdb_id!r} is not valid...") print("Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').") raise ValueError("Invalid TMDB ID") return tmdb_id def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]: """ Get a TVDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. """ if not tvdb_id: general_track = self.media_info.general_tracks[0].to_data() tvdb_id = general_track.get("tvdb") if not tvdb_id: print("Warning: No TVDB ID was provided...") return None if isinstance(tvdb_id, int): tvdb_id = str(tvdb_id) if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str): print(f"The provided TVDB ID {tvdb_id!r} is not valid...") print("Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').") raise ValueError("Invalid TVDB ID") return int(tvdb_id) def get_title_name_year(self) -> Tuple[str, str]: """Scrape Title Name and Year (including e.g. 2019-) from IMDB""" r = self.session.get(f"https://www.imdb.com/title/{self.imdb}") if r.status_code != 200: raise ValueError(f"An unexpected error occurred getting IMDB Title Page [{r.status_code}]") imdb_page = html.unescape(r.text) imdb_title = re.search( # testing ground: https://regex101.com/r/bEoEDn/1 r"<title>(?P<name>.+) \(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)" r"(?P<year>(\d{4})(|– |–\d{4})))\) - IMDb</title>", imdb_page ) if not imdb_title: raise ValueError(f"Could not scrape Movie Title or Year for {self.imdb}...") return imdb_title.group("name").strip(), imdb_title.group("year").strip() def get_tv_episodes(self) -> int: """Calculate total episode count based on neighbouring same-extension files.""" return len(glob.glob(os.path.join( os.path.dirname(self.file), f"*{os.path.splitext(self.file)[-1]}" ))) def get_release_name(self) -> str: """ Retrieve the release name based on the file used during MediaInfo. If a season was specified, but an episode number was not, it presumes the release is a Pack. Hence when pack, it uses the parent folder's name as the release name. """ if self.season is not None and self.episode is None: return os.path.basename(os.path.dirname(self.file)) return os.path.splitext(os.path.basename(self.file))[0] def get_banner_image(self, tvdb_id: int) -> Optional[str]: """ Get a wide banner image from fanart.tv. Currently restricts banners to English-only. """ if not tvdb_id: return None if not self.fanart_api_key: raise ValueError("Need Fanart.tv api key for TV titles!") r = self.session.get(f"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}") if r.status_code == 404: return None res = r.json() error = res.get("error message") if error: if error == "Not found": return None raise ValueError(f"An unexpected error occurred while calling Fanart.tv, {res}") banner = next(( x["url"] for x in (res.get("tvbanner") or []) if x["lang"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language ), None) return banner def get_preview_images(self, url: str) -> List[Dict[str, str]]: if not url: return [] images = [] for domain in ["imgbox.com", "beyondhd.co"]: if domain not in url.lower(): continue page = self.session.get(url).text if domain == "imgbox.com": for m in re.finditer('src="(https://thumbs2.imgbox.com.+/)(\\w+)_b.([^"]+)', page): images.append({ "url": f"https://imgbox.com/{m.group(2)}", "src": f"{m.group(1)}{m.group(2)}_t.{m.group(3)}" }) elif domain == "beyondhd.co": for m in re.finditer('/image/([^"]+)"\\D+src="(https://.*beyondhd.co/images.+/(\\w+).md.[^"]+)', page): images.append({ "url": f"https://beyondhd.co/image/{m.group(1)}", "src": m.group(2) }) break return images def get_video_print(self, videos: List[Track]) -> List[List[str]]: if not videos: return [["--"]] data = [] for video in videos: codec = { "MPEG Video": f"MPEG-{(video.format_version or '').replace('Version ', '')}" }.get(video.format, video.format) scan_overview = video.scan_type vst = False if codec in ["MPEG-1", "MPEG-2"]: # parse d2v file with pyd2v, generates D2V if needed d2v = D2V.load(Path(self.file)) self.file = d2v.path # get every frames' flag data, this contains information on displaying frames # add vob and cell number to each frames flag data as well flags = [f for line in [ [dict(**y, vob=x["vob"], cell=x["cell"]) for y in x["flags"]] for x in d2v.data ] for f in line] interlaced_percent = (sum(1 for f in flags if not f["progressive_frame"]) / len(flags)) * 100 if interlaced_percent == 100: scan_overview = "Interlaced (CST)" else: scan_overview = f"{round(interlaced_percent, 2)}% Interlaced (VST)" vst = True for ext in ["log", "d2v", "mpg", "mpeg"]: fp = os.path.splitext(self.file)[0] + "." + ext if os.path.exists(fp): os.unlink(fp) line_1 = "- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}".format( language=pycountry.languages.get(alpha_2=video.language).name, codec=codec, profile=video.format_profile, width=video.width, height=video.height, aspect=video.other_display_aspect_ratio[0], bitrate=f"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}" ) line_2 = " {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}".format( fps=f"{video.framerate_num}/{video.framerate_den}" if video.framerate_num else video.frame_rate, fps_mode="VFR" if vst else video.frame_rate_mode, color_space=video.color_space, subsampling=video.chroma_subsampling.replace(":", ""), bit_depth=video.bit_depth, scan=scan_overview ) data.append([line_1, line_2]) return data def get_audio_print(self, audio: List[Track]) -> List[str]: if not audio: return ["--"] data = [] for t in audio: if t.title and "Commentary" in t.title: title = t.title else: title = pycountry.languages.get(alpha_2=t.language).name if t.channel_layout: channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x, 1) for x in t.channel_layout.split(" "))) else: channels = float(t.channel_s) bit_rate_mode = f" ({t.bit_rate_mode})" if t.bit_rate_mode else "" l1 = f"- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}" data += [(" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(l1, 64))] return data @staticmethod def get_subtitle_print(subs: List[Track]) -> List[str]: """ Return a list of a brief subtitle overview per-subtitle. e.g. - English, Forced, SubRip (SRT) - English, SubRip (SRT) - English, SDH, SubRip (SRT) - Spanish, Latin American (SDH), SubRip (SRT) The bit of text between the Language and the Subtitle format is the Track Title. It can be of any format, but it is recommended to be used as shown above. It will be returned as a list of strings with the `- ` already pre-pended to each entry. """ data = [] if not subs: data.append("--") for sub in subs: line_items = []
# | Language | Track Title | Output | # | ------------ | ----------------------------- | --------------------------------------------- | # | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) | # | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) | # | es / Spanish | None | - Spanish, SubRip (SRT) | language = pycountry.languages.get(alpha_2=sub.language).name if sub.title: if language.lower() in sub.title.lower(): line_items.append(sub.title) else: line_items.append(f"{language}, {sub.title}") else: line_items.append(language) line_items.append(sub.format.replace("UTF-8", "SubRip (SRT)")) line = "- " + ", ".join(line_items) data += [ (" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(line, 64)) ] return data @staticmethod def get_chapter_print(chapters: Dict[str, str]) -> List[str]: if not chapters: return ["--"] return [ f"- {k}: {v}" for k, v in chapters.items() ] def get_chapter_print_short(self, chapters: Dict[str, str]) -> str: if not chapters: return "No" if self.chapters_numbered: return f"Yes (Numbered 01-{str(len(chapters)).zfill(2)})" return "Yes (Named)" @staticmethod def get_session() -> requests.Session: session = requests.Session() session.headers.update({ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "DNT": "1", "UPGRADE-INSECURE-REQUESTS": "1" }) return session
# following sub.title tree checks and supports three different language and title scenarios # The second scenario is the recommended option to choose if you are open to choosing any # The third scenario should be used if you have nothing unique to state about the track
random_line_split
nfo.py
import glob import html import os import re import sys import textwrap from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union import pycountry import requests from pyd2v import D2V from pymediainfo import MediaInfo, Track from pynfogen.formatter import CustomFormats class NFO: AUDIO_CHANNEL_LAYOUT_WEIGHT = { "LFE": 0.1 } IMDB_ID_T = re.compile(r"^tt\d{7,8}$") TMDB_ID_T = re.compile(r"^(tv|movie)/\d+$") TVDB_ID_T = re.compile(r"^\d+$") def __init__(self) -> None: self.media_info: MediaInfo self.file: str self.season: Optional[Union[int, str]] self.episode: Optional[int] self.episode_name: Optional[str] self.videos: List[Track] self.audio: List[Track] self.subtitles: List[Track] self.chapters: Dict[str, str] self.chapters_numbered: bool self.fanart_api_key: Optional[str] self.source: Optional[str] self.note: Optional[str] self.preview: Optional[str] self.imdb: str self.tmdb: Optional[str] self.tvdb: Optional[int] self.title_name: str self.title_year: str self.episodes: int self.release_name: str self.preview_images: List[dict[str, str]] self.banner_image: Optional[str] self.session = self.get_session() def __repr__(self) -> str: return "<{c} {attrs}>".format( c=self.__class__.__name__, attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), ) def run(self, template: str, art: Optional[str] = None, **kwargs: Any) -> str: """ Evaluate and apply formatting on template, apply any art if provided. Any additional parameters are passed as extra variables to the template. The extra variables have priority when there's conflicting variable names. """ variables = self.__dict__ variables.update(kwargs) template = CustomFormats().format(template, **variables) if art: art = art.format(nfo=template) template = art for m in re.finditer(r"<\?([01])\?([\D\d]*?)\?>", template): # TODO: This if check is quite yucky, look into alternative options. # Ideally a custom format spec would be great. template = template.replace( m.group(0), m.group(2) if int(m.group(1)) else "" ) template = "\n".join(map(str.rstrip, template.splitlines(keepends=False))) return template def
(self, file: str, **config: Any) -> None: self.file = file self.media_info = MediaInfo.parse(self.file) self.fanart_api_key = config.get("fanart_api_key") self.source = config.get("source") self.note = config.get("note") self.preview = config.get("preview") self.season = config.get("season") self.episode, self.episode_name = config.get("episode") or (None, None) self.episodes = self.get_tv_episodes() self.release_name = self.get_release_name() self.videos = self.media_info.video_tracks self.audio = self.media_info.audio_tracks self.subtitles = self.media_info.text_tracks tracks_without_language = [ x for x in self.videos + self.audio + self.subtitles if not x.language or x.language == "und" ] if tracks_without_language: print("The following tracks have no language tag! All tracks need a language tag!") for track in tracks_without_language: print(f"{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)") print( "Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\n" "Don't forget to verify and add language tags to the rest of the files too!" ) sys.exit(1) chapters = next(iter(self.media_info.menu_tracks), None) if chapters: self.chapters = { ".".join([k.replace("_", ".")[:-3], k[-3:]]): v.strip(":") for k, v in chapters.to_data().items() if f"1{k.replace('_', '')}".isdigit() } self.chapters_numbered = all( x.split(":", 1)[-1].lower() in [f"chapter {i + 1}", f"chapter {str(i + 1).zfill(2)}"] for i, x in enumerate(self.chapters.values()) ) else: self.chapters = {} self.chapters_numbered = False self.imdb = self.get_imdb_id(config.get("imdb")) self.tmdb = self.get_tmdb_id(config.get("tmdb")) self.tvdb = self.get_tvdb_id(config.get("tvdb")) self.title_name, self.title_year = self.get_title_name_year() self.banner_image = self.get_banner_image(self.tvdb) if self.tvdb and self.fanart_api_key else None self.preview_images = self.get_preview_images(self.preview) if self.preview else [] def get_imdb_id(self, imdb_id: Any) -> str: """ Get an IMDB ID from either the media's global tags, or the config. Since IMDB IDs are required for this project, it will bug the user for one interactively if not found. """ if not imdb_id: general_track = self.media_info.general_tracks[0].to_data() imdb_id = general_track.get("imdb") if not imdb_id: print("No IMDB ID was provided but is required...") while not imdb_id or not isinstance(imdb_id, str): user_id = input("IMDB ID (e.g., 'tt0487831'): ") if not self.IMDB_ID_T.match(user_id): print(f"The provided IMDB ID {user_id!r} is not valid...") print("Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').") else: imdb_id = user_id return imdb_id def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]: """ Get a TMDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. """ if not tmdb_id: general_track = self.media_info.general_tracks[0].to_data() tmdb_id = general_track.get("tmdb") if not tmdb_id: print("Warning: No TMDB ID was provided...") return None if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str): print(f"The provided TMDB ID {tmdb_id!r} is not valid...") print("Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').") raise ValueError("Invalid TMDB ID") return tmdb_id def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]: """ Get a TVDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. """ if not tvdb_id: general_track = self.media_info.general_tracks[0].to_data() tvdb_id = general_track.get("tvdb") if not tvdb_id: print("Warning: No TVDB ID was provided...") return None if isinstance(tvdb_id, int): tvdb_id = str(tvdb_id) if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str): print(f"The provided TVDB ID {tvdb_id!r} is not valid...") print("Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').") raise ValueError("Invalid TVDB ID") return int(tvdb_id) def get_title_name_year(self) -> Tuple[str, str]: """Scrape Title Name and Year (including e.g. 2019-) from IMDB""" r = self.session.get(f"https://www.imdb.com/title/{self.imdb}") if r.status_code != 200: raise ValueError(f"An unexpected error occurred getting IMDB Title Page [{r.status_code}]") imdb_page = html.unescape(r.text) imdb_title = re.search( # testing ground: https://regex101.com/r/bEoEDn/1 r"<title>(?P<name>.+) \(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)" r"(?P<year>(\d{4})(|– |–\d{4})))\) - IMDb</title>", imdb_page ) if not imdb_title: raise ValueError(f"Could not scrape Movie Title or Year for {self.imdb}...") return imdb_title.group("name").strip(), imdb_title.group("year").strip() def get_tv_episodes(self) -> int: """Calculate total episode count based on neighbouring same-extension files.""" return len(glob.glob(os.path.join( os.path.dirname(self.file), f"*{os.path.splitext(self.file)[-1]}" ))) def get_release_name(self) -> str: """ Retrieve the release name based on the file used during MediaInfo. If a season was specified, but an episode number was not, it presumes the release is a Pack. Hence when pack, it uses the parent folder's name as the release name. """ if self.season is not None and self.episode is None: return os.path.basename(os.path.dirname(self.file)) return os.path.splitext(os.path.basename(self.file))[0] def get_banner_image(self, tvdb_id: int) -> Optional[str]: """ Get a wide banner image from fanart.tv. Currently restricts banners to English-only. """ if not tvdb_id: return None if not self.fanart_api_key: raise ValueError("Need Fanart.tv api key for TV titles!") r = self.session.get(f"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}") if r.status_code == 404: return None res = r.json() error = res.get("error message") if error: if error == "Not found": return None raise ValueError(f"An unexpected error occurred while calling Fanart.tv, {res}") banner = next(( x["url"] for x in (res.get("tvbanner") or []) if x["lang"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language ), None) return banner def get_preview_images(self, url: str) -> List[Dict[str, str]]: if not url: return [] images = [] for domain in ["imgbox.com", "beyondhd.co"]: if domain not in url.lower(): continue page = self.session.get(url).text if domain == "imgbox.com": for m in re.finditer('src="(https://thumbs2.imgbox.com.+/)(\\w+)_b.([^"]+)', page): images.append({ "url": f"https://imgbox.com/{m.group(2)}", "src": f"{m.group(1)}{m.group(2)}_t.{m.group(3)}" }) elif domain == "beyondhd.co": for m in re.finditer('/image/([^"]+)"\\D+src="(https://.*beyondhd.co/images.+/(\\w+).md.[^"]+)', page): images.append({ "url": f"https://beyondhd.co/image/{m.group(1)}", "src": m.group(2) }) break return images def get_video_print(self, videos: List[Track]) -> List[List[str]]: if not videos: return [["--"]] data = [] for video in videos: codec = { "MPEG Video": f"MPEG-{(video.format_version or '').replace('Version ', '')}" }.get(video.format, video.format) scan_overview = video.scan_type vst = False if codec in ["MPEG-1", "MPEG-2"]: # parse d2v file with pyd2v, generates D2V if needed d2v = D2V.load(Path(self.file)) self.file = d2v.path # get every frames' flag data, this contains information on displaying frames # add vob and cell number to each frames flag data as well flags = [f for line in [ [dict(**y, vob=x["vob"], cell=x["cell"]) for y in x["flags"]] for x in d2v.data ] for f in line] interlaced_percent = (sum(1 for f in flags if not f["progressive_frame"]) / len(flags)) * 100 if interlaced_percent == 100: scan_overview = "Interlaced (CST)" else: scan_overview = f"{round(interlaced_percent, 2)}% Interlaced (VST)" vst = True for ext in ["log", "d2v", "mpg", "mpeg"]: fp = os.path.splitext(self.file)[0] + "." + ext if os.path.exists(fp): os.unlink(fp) line_1 = "- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}".format( language=pycountry.languages.get(alpha_2=video.language).name, codec=codec, profile=video.format_profile, width=video.width, height=video.height, aspect=video.other_display_aspect_ratio[0], bitrate=f"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}" ) line_2 = " {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}".format( fps=f"{video.framerate_num}/{video.framerate_den}" if video.framerate_num else video.frame_rate, fps_mode="VFR" if vst else video.frame_rate_mode, color_space=video.color_space, subsampling=video.chroma_subsampling.replace(":", ""), bit_depth=video.bit_depth, scan=scan_overview ) data.append([line_1, line_2]) return data def get_audio_print(self, audio: List[Track]) -> List[str]: if not audio: return ["--"] data = [] for t in audio: if t.title and "Commentary" in t.title: title = t.title else: title = pycountry.languages.get(alpha_2=t.language).name if t.channel_layout: channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x, 1) for x in t.channel_layout.split(" "))) else: channels = float(t.channel_s) bit_rate_mode = f" ({t.bit_rate_mode})" if t.bit_rate_mode else "" l1 = f"- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}" data += [(" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(l1, 64))] return data @staticmethod def get_subtitle_print(subs: List[Track]) -> List[str]: """ Return a list of a brief subtitle overview per-subtitle. e.g. - English, Forced, SubRip (SRT) - English, SubRip (SRT) - English, SDH, SubRip (SRT) - Spanish, Latin American (SDH), SubRip (SRT) The bit of text between the Language and the Subtitle format is the Track Title. It can be of any format, but it is recommended to be used as shown above. It will be returned as a list of strings with the `- ` already pre-pended to each entry. """ data = [] if not subs: data.append("--") for sub in subs: line_items = [] # following sub.title tree checks and supports three different language and title scenarios # The second scenario is the recommended option to choose if you are open to choosing any # The third scenario should be used if you have nothing unique to state about the track # | Language | Track Title | Output | # | ------------ | ----------------------------- | --------------------------------------------- | # | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) | # | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) | # | es / Spanish | None | - Spanish, SubRip (SRT) | language = pycountry.languages.get(alpha_2=sub.language).name if sub.title: if language.lower() in sub.title.lower(): line_items.append(sub.title) else: line_items.append(f"{language}, {sub.title}") else: line_items.append(language) line_items.append(sub.format.replace("UTF-8", "SubRip (SRT)")) line = "- " + ", ".join(line_items) data += [ (" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(line, 64)) ] return data @staticmethod def get_chapter_print(chapters: Dict[str, str]) -> List[str]: if not chapters: return ["--"] return [ f"- {k}: {v}" for k, v in chapters.items() ] def get_chapter_print_short(self, chapters: Dict[str, str]) -> str: if not chapters: return "No" if self.chapters_numbered: return f"Yes (Numbered 01-{str(len(chapters)).zfill(2)})" return "Yes (Named)" @staticmethod def get_session() -> requests.Session: session = requests.Session() session.headers.update({ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "DNT": "1", "UPGRADE-INSECURE-REQUESTS": "1" }) return session
set_config
identifier_name
nfo.py
import glob import html import os import re import sys import textwrap from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union import pycountry import requests from pyd2v import D2V from pymediainfo import MediaInfo, Track from pynfogen.formatter import CustomFormats class NFO: AUDIO_CHANNEL_LAYOUT_WEIGHT = { "LFE": 0.1 } IMDB_ID_T = re.compile(r"^tt\d{7,8}$") TMDB_ID_T = re.compile(r"^(tv|movie)/\d+$") TVDB_ID_T = re.compile(r"^\d+$") def __init__(self) -> None: self.media_info: MediaInfo self.file: str self.season: Optional[Union[int, str]] self.episode: Optional[int] self.episode_name: Optional[str] self.videos: List[Track] self.audio: List[Track] self.subtitles: List[Track] self.chapters: Dict[str, str] self.chapters_numbered: bool self.fanart_api_key: Optional[str] self.source: Optional[str] self.note: Optional[str] self.preview: Optional[str] self.imdb: str self.tmdb: Optional[str] self.tvdb: Optional[int] self.title_name: str self.title_year: str self.episodes: int self.release_name: str self.preview_images: List[dict[str, str]] self.banner_image: Optional[str] self.session = self.get_session() def __repr__(self) -> str: return "<{c} {attrs}>".format( c=self.__class__.__name__, attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), ) def run(self, template: str, art: Optional[str] = None, **kwargs: Any) -> str: """ Evaluate and apply formatting on template, apply any art if provided. Any additional parameters are passed as extra variables to the template. The extra variables have priority when there's conflicting variable names. """ variables = self.__dict__ variables.update(kwargs) template = CustomFormats().format(template, **variables) if art: art = art.format(nfo=template) template = art for m in re.finditer(r"<\?([01])\?([\D\d]*?)\?>", template): # TODO: This if check is quite yucky, look into alternative options. # Ideally a custom format spec would be great. template = template.replace( m.group(0), m.group(2) if int(m.group(1)) else "" ) template = "\n".join(map(str.rstrip, template.splitlines(keepends=False))) return template def set_config(self, file: str, **config: Any) -> None: self.file = file self.media_info = MediaInfo.parse(self.file) self.fanart_api_key = config.get("fanart_api_key") self.source = config.get("source") self.note = config.get("note") self.preview = config.get("preview") self.season = config.get("season") self.episode, self.episode_name = config.get("episode") or (None, None) self.episodes = self.get_tv_episodes() self.release_name = self.get_release_name() self.videos = self.media_info.video_tracks self.audio = self.media_info.audio_tracks self.subtitles = self.media_info.text_tracks tracks_without_language = [ x for x in self.videos + self.audio + self.subtitles if not x.language or x.language == "und" ] if tracks_without_language: print("The following tracks have no language tag! All tracks need a language tag!") for track in tracks_without_language: print(f"{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)") print( "Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\n" "Don't forget to verify and add language tags to the rest of the files too!" ) sys.exit(1) chapters = next(iter(self.media_info.menu_tracks), None) if chapters: self.chapters = { ".".join([k.replace("_", ".")[:-3], k[-3:]]): v.strip(":") for k, v in chapters.to_data().items() if f"1{k.replace('_', '')}".isdigit() } self.chapters_numbered = all( x.split(":", 1)[-1].lower() in [f"chapter {i + 1}", f"chapter {str(i + 1).zfill(2)}"] for i, x in enumerate(self.chapters.values()) ) else: self.chapters = {} self.chapters_numbered = False self.imdb = self.get_imdb_id(config.get("imdb")) self.tmdb = self.get_tmdb_id(config.get("tmdb")) self.tvdb = self.get_tvdb_id(config.get("tvdb")) self.title_name, self.title_year = self.get_title_name_year() self.banner_image = self.get_banner_image(self.tvdb) if self.tvdb and self.fanart_api_key else None self.preview_images = self.get_preview_images(self.preview) if self.preview else [] def get_imdb_id(self, imdb_id: Any) -> str: """ Get an IMDB ID from either the media's global tags, or the config. Since IMDB IDs are required for this project, it will bug the user for one interactively if not found. """ if not imdb_id: general_track = self.media_info.general_tracks[0].to_data() imdb_id = general_track.get("imdb") if not imdb_id: print("No IMDB ID was provided but is required...") while not imdb_id or not isinstance(imdb_id, str): user_id = input("IMDB ID (e.g., 'tt0487831'): ") if not self.IMDB_ID_T.match(user_id): print(f"The provided IMDB ID {user_id!r} is not valid...") print("Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').") else: imdb_id = user_id return imdb_id def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]: """ Get a TMDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. """ if not tmdb_id: general_track = self.media_info.general_tracks[0].to_data() tmdb_id = general_track.get("tmdb") if not tmdb_id: print("Warning: No TMDB ID was provided...") return None if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str): print(f"The provided TMDB ID {tmdb_id!r} is not valid...") print("Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').") raise ValueError("Invalid TMDB ID") return tmdb_id def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]: """ Get a TVDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. """ if not tvdb_id: general_track = self.media_info.general_tracks[0].to_data() tvdb_id = general_track.get("tvdb") if not tvdb_id: print("Warning: No TVDB ID was provided...") return None if isinstance(tvdb_id, int): tvdb_id = str(tvdb_id) if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str): print(f"The provided TVDB ID {tvdb_id!r} is not valid...") print("Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').") raise ValueError("Invalid TVDB ID") return int(tvdb_id) def get_title_name_year(self) -> Tuple[str, str]: """Scrape Title Name and Year (including e.g. 2019-) from IMDB""" r = self.session.get(f"https://www.imdb.com/title/{self.imdb}") if r.status_code != 200: raise ValueError(f"An unexpected error occurred getting IMDB Title Page [{r.status_code}]") imdb_page = html.unescape(r.text) imdb_title = re.search( # testing ground: https://regex101.com/r/bEoEDn/1 r"<title>(?P<name>.+) \(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)" r"(?P<year>(\d{4})(|– |–\d{4})))\) - IMDb</title>", imdb_page ) if not imdb_title: raise ValueError(f"Could not scrape Movie Title or Year for {self.imdb}...") return imdb_title.group("name").strip(), imdb_title.group("year").strip() def get_tv_episodes(self) -> int: """Calculate total episode count based on neighbouring same-extension files.""" return len(glob.glob(os.path.join( os.path.dirname(self.file), f"*{os.path.splitext(self.file)[-1]}" ))) def get_release_name(self) -> str: """ Retrieve the release name based on the file used during MediaInfo. If a season was specified, but an episode number was not, it presumes the release is a Pack. Hence when pack, it uses the parent folder's name as the release name. """ if self.season is not None and self.episode is None: return os.path.basename(os.path.dirname(self.file)) return os.path.splitext(os.path.basename(self.file))[0] def get_banner_image(self, tvdb_id: int) -> Optional[str]: """ Get a wide banner image from fanart.tv. Currently restricts banners to English-only. """ if not tvdb_id: return None if not self.fanart_api_key: raise ValueError("Need Fanart.tv api key for TV titles!") r = self.session.get(f"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}") if r.status_code == 404: return None res = r.json() error = res.get("error message") if error: if error == "Not found": return None raise ValueError(f"An unexpected error occurred while calling Fanart.tv, {res}") banner = next(( x["url"] for x in (res.get("tvbanner") or []) if x["lang"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language ), None) return banner def get_preview_images(self, url: str) -> List[Dict[str, str]]: if not url: return [] images = [] for domain in ["imgbox.com", "beyondhd.co"]: if domain not in url.lower(): continue page = self.session.get(url).text if domain == "imgbox.com": for m in re.finditer('src="(https://thumbs2.imgbox.com.+/)(\\w+)_b.([^"]+)', page): images.append({ "url": f"https://imgbox.com/{m.group(2)}", "src": f"{m.group(1)}{m.group(2)}_t.{m.group(3)}" }) elif domain == "beyondhd.co": for m in re.finditer('/image/([^"]+)"\\D+src="(https://.*beyondhd.co/images.+/(\\w+).md.[^"]+)', page): images.append({ "url": f"https://beyondhd.co/image/{m.group(1)}", "src": m.group(2) }) break return images def get_video_print(self, videos: List[Track]) -> List[List[str]]: if not videos: return [["--"]] data = [] for video in videos: codec = { "MPEG Video": f"MPEG-{(video.format_version or '').replace('Version ', '')}" }.get(video.format, video.format) scan_overview = video.scan_type vst = False if codec in ["MPEG-1", "MPEG-2"]: # parse d2v file with pyd2v, generates D2V if needed d2v = D2V.load(Path(self.file)) self.file = d2v.path # get every frames' flag data, this contains information on displaying frames # add vob and cell number to each frames flag data as well flags = [f for line in [ [dict(**y, vob=x["vob"], cell=x["cell"]) for y in x["flags"]] for x in d2v.data ] for f in line] interlaced_percent = (sum(1 for f in flags if not f["progressive_frame"]) / len(flags)) * 100 if interlaced_percent == 100: scan_overview = "Interlaced (CST)" else: scan_overview = f"{round(interlaced_percent, 2)}% Interlaced (VST)" vst = True for ext in ["log", "d2v", "mpg", "mpeg"]: fp = os.path.splitext(self.file)[0] + "." + ext if os.path.exists(fp): os.unlink(fp) line_1 = "- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}".format( language=pycountry.languages.get(alpha_2=video.language).name, codec=codec, profile=video.format_profile, width=video.width, height=video.height, aspect=video.other_display_aspect_ratio[0], bitrate=f"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}" ) line_2 = " {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}".format( fps=f"{video.framerate_num}/{video.framerate_den}" if video.framerate_num else video.frame_rate, fps_mode="VFR" if vst else video.frame_rate_mode, color_space=video.color_space, subsampling=video.chroma_subsampling.replace(":", ""), bit_depth=video.bit_depth, scan=scan_overview ) data.append([line_1, line_2]) return data def get_audio_print(self, audio: List[Track]) -> List[str]: if not audio: return ["--"] data = [] for t in audio: if t.title and "Commentary" in t.title: title = t.title else: title = pycountry.languages.get(alpha_2=t.language).name if t.channel_layout: channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x, 1) for x in t.channel_layout.split(" "))) else: channels = float(t.channel_s) bit_rate_mode = f" ({t.bit_rate_mode})" if t.bit_rate_mode else "" l1 = f"- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}" data += [(" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(l1, 64))] return data @staticmethod def get_subtitle_print(subs: List[Track]) -> List[str]: """
@staticmethod def get_chapter_print(chapters: Dict[str, str]) -> List[str]: if not chapters: return ["--"] return [ f"- {k}: {v}" for k, v in chapters.items() ] def get_chapter_print_short(self, chapters: Dict[str, str]) -> str: if not chapters: return "No" if self.chapters_numbered: return f"Yes (Numbered 01-{str(len(chapters)).zfill(2)})" return "Yes (Named)" @staticmethod def get_session() -> requests.Session: session = requests.Session() session.headers.update({ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "DNT": "1", "UPGRADE-INSECURE-REQUESTS": "1" }) return session
Return a list of a brief subtitle overview per-subtitle. e.g. - English, Forced, SubRip (SRT) - English, SubRip (SRT) - English, SDH, SubRip (SRT) - Spanish, Latin American (SDH), SubRip (SRT) The bit of text between the Language and the Subtitle format is the Track Title. It can be of any format, but it is recommended to be used as shown above. It will be returned as a list of strings with the `- ` already pre-pended to each entry. """ data = [] if not subs: data.append("--") for sub in subs: line_items = [] # following sub.title tree checks and supports three different language and title scenarios # The second scenario is the recommended option to choose if you are open to choosing any # The third scenario should be used if you have nothing unique to state about the track # | Language | Track Title | Output | # | ------------ | ----------------------------- | --------------------------------------------- | # | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) | # | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) | # | es / Spanish | None | - Spanish, SubRip (SRT) | language = pycountry.languages.get(alpha_2=sub.language).name if sub.title: if language.lower() in sub.title.lower(): line_items.append(sub.title) else: line_items.append(f"{language}, {sub.title}") else: line_items.append(language) line_items.append(sub.format.replace("UTF-8", "SubRip (SRT)")) line = "- " + ", ".join(line_items) data += [ (" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(line, 64)) ] return data
identifier_body
nfo.py
import glob import html import os import re import sys import textwrap from pathlib import Path from typing import Any, Dict, List, Optional, Tuple, Union import pycountry import requests from pyd2v import D2V from pymediainfo import MediaInfo, Track from pynfogen.formatter import CustomFormats class NFO: AUDIO_CHANNEL_LAYOUT_WEIGHT = { "LFE": 0.1 } IMDB_ID_T = re.compile(r"^tt\d{7,8}$") TMDB_ID_T = re.compile(r"^(tv|movie)/\d+$") TVDB_ID_T = re.compile(r"^\d+$") def __init__(self) -> None: self.media_info: MediaInfo self.file: str self.season: Optional[Union[int, str]] self.episode: Optional[int] self.episode_name: Optional[str] self.videos: List[Track] self.audio: List[Track] self.subtitles: List[Track] self.chapters: Dict[str, str] self.chapters_numbered: bool self.fanart_api_key: Optional[str] self.source: Optional[str] self.note: Optional[str] self.preview: Optional[str] self.imdb: str self.tmdb: Optional[str] self.tvdb: Optional[int] self.title_name: str self.title_year: str self.episodes: int self.release_name: str self.preview_images: List[dict[str, str]] self.banner_image: Optional[str] self.session = self.get_session() def __repr__(self) -> str: return "<{c} {attrs}>".format( c=self.__class__.__name__, attrs=" ".join("{}={!r}".format(k, v) for k, v in self.__dict__.items()), ) def run(self, template: str, art: Optional[str] = None, **kwargs: Any) -> str: """ Evaluate and apply formatting on template, apply any art if provided. Any additional parameters are passed as extra variables to the template. The extra variables have priority when there's conflicting variable names. """ variables = self.__dict__ variables.update(kwargs) template = CustomFormats().format(template, **variables) if art: art = art.format(nfo=template) template = art for m in re.finditer(r"<\?([01])\?([\D\d]*?)\?>", template): # TODO: This if check is quite yucky, look into alternative options. # Ideally a custom format spec would be great. template = template.replace( m.group(0), m.group(2) if int(m.group(1)) else "" ) template = "\n".join(map(str.rstrip, template.splitlines(keepends=False))) return template def set_config(self, file: str, **config: Any) -> None: self.file = file self.media_info = MediaInfo.parse(self.file) self.fanart_api_key = config.get("fanart_api_key") self.source = config.get("source") self.note = config.get("note") self.preview = config.get("preview") self.season = config.get("season") self.episode, self.episode_name = config.get("episode") or (None, None) self.episodes = self.get_tv_episodes() self.release_name = self.get_release_name() self.videos = self.media_info.video_tracks self.audio = self.media_info.audio_tracks self.subtitles = self.media_info.text_tracks tracks_without_language = [ x for x in self.videos + self.audio + self.subtitles if not x.language or x.language == "und" ] if tracks_without_language: print("The following tracks have no language tag! All tracks need a language tag!") for track in tracks_without_language: print(f"{track.track_type} Track #{track.track_id} ({track.format}, {track.bit_rate / 1000} kb/s)") print( "Yes, even Video Track's have language e.g., Credits, Signs, Letters, Different Intro Sequence, etc.\n" "Don't forget to verify and add language tags to the rest of the files too!" ) sys.exit(1) chapters = next(iter(self.media_info.menu_tracks), None) if chapters: self.chapters = { ".".join([k.replace("_", ".")[:-3], k[-3:]]): v.strip(":") for k, v in chapters.to_data().items() if f"1{k.replace('_', '')}".isdigit() } self.chapters_numbered = all( x.split(":", 1)[-1].lower() in [f"chapter {i + 1}", f"chapter {str(i + 1).zfill(2)}"] for i, x in enumerate(self.chapters.values()) ) else: self.chapters = {} self.chapters_numbered = False self.imdb = self.get_imdb_id(config.get("imdb")) self.tmdb = self.get_tmdb_id(config.get("tmdb")) self.tvdb = self.get_tvdb_id(config.get("tvdb")) self.title_name, self.title_year = self.get_title_name_year() self.banner_image = self.get_banner_image(self.tvdb) if self.tvdb and self.fanart_api_key else None self.preview_images = self.get_preview_images(self.preview) if self.preview else [] def get_imdb_id(self, imdb_id: Any) -> str: """ Get an IMDB ID from either the media's global tags, or the config. Since IMDB IDs are required for this project, it will bug the user for one interactively if not found. """ if not imdb_id: general_track = self.media_info.general_tracks[0].to_data() imdb_id = general_track.get("imdb") if not imdb_id: print("No IMDB ID was provided but is required...") while not imdb_id or not isinstance(imdb_id, str): user_id = input("IMDB ID (e.g., 'tt0487831'): ") if not self.IMDB_ID_T.match(user_id): print(f"The provided IMDB ID {user_id!r} is not valid...") print("Expected e.g., 'tt0487831', 'tt10810424', (include the 'tt').") else: imdb_id = user_id return imdb_id def get_tmdb_id(self, tmdb_id: Any) -> Optional[str]: """ Get a TMDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. """ if not tmdb_id: general_track = self.media_info.general_tracks[0].to_data() tmdb_id = general_track.get("tmdb") if not tmdb_id: print("Warning: No TMDB ID was provided...") return None if not self.TMDB_ID_T.match(tmdb_id) or not isinstance(tmdb_id, str): print(f"The provided TMDB ID {tmdb_id!r} is not valid...") print("Expected e.g., 'tv/2490', 'movie/14836', (include the 'tv/' or 'movie/').") raise ValueError("Invalid TMDB ID") return tmdb_id def get_tvdb_id(self, tvdb_id: Any) -> Optional[int]: """ Get a TVDB ID from either the media's global tags, or the config. It will raise a ValueError if the provided ID is invalid. """ if not tvdb_id: general_track = self.media_info.general_tracks[0].to_data() tvdb_id = general_track.get("tvdb") if not tvdb_id: print("Warning: No TVDB ID was provided...") return None if isinstance(tvdb_id, int): tvdb_id = str(tvdb_id) if not self.TVDB_ID_T.match(tvdb_id) or not isinstance(tvdb_id, str): print(f"The provided TVDB ID {tvdb_id!r} is not valid...") print("Expected e.g., '79216', '1395', (not the url slug e.g., 'the-office-us').") raise ValueError("Invalid TVDB ID") return int(tvdb_id) def get_title_name_year(self) -> Tuple[str, str]: """Scrape Title Name and Year (including e.g. 2019-) from IMDB""" r = self.session.get(f"https://www.imdb.com/title/{self.imdb}") if r.status_code != 200: raise ValueError(f"An unexpected error occurred getting IMDB Title Page [{r.status_code}]") imdb_page = html.unescape(r.text) imdb_title = re.search( # testing ground: https://regex101.com/r/bEoEDn/1 r"<title>(?P<name>.+) \(((?P<type>TV (Movie|Series|Mini[- ]Series|Short|Episode) |Video |Short |)" r"(?P<year>(\d{4})(|– |–\d{4})))\) - IMDb</title>", imdb_page ) if not imdb_title: raise ValueError(f"Could not scrape Movie Title or Year for {self.imdb}...") return imdb_title.group("name").strip(), imdb_title.group("year").strip() def get_tv_episodes(self) -> int: """Calculate total episode count based on neighbouring same-extension files.""" return len(glob.glob(os.path.join( os.path.dirname(self.file), f"*{os.path.splitext(self.file)[-1]}" ))) def get_release_name(self) -> str: """ Retrieve the release name based on the file used during MediaInfo. If a season was specified, but an episode number was not, it presumes the release is a Pack. Hence when pack, it uses the parent folder's name as the release name. """ if self.season is not None and self.episode is None: return os.path.basename(os.path.dirname(self.file)) return os.path.splitext(os.path.basename(self.file))[0] def get_banner_image(self, tvdb_id: int) -> Optional[str]: """ Get a wide banner image from fanart.tv. Currently restricts banners to English-only. """ if not tvdb_id: return None if not self.fanart_api_key: raise ValueError("Need Fanart.tv api key for TV titles!") r = self.session.get(f"http://webservice.fanart.tv/v3/tv/{tvdb_id}?api_key={self.fanart_api_key}") if r.status_code == 404: return None res = r.json() error = res.get("error message") if error: if error == "Not found": return None raise ValueError(f"An unexpected error occurred while calling Fanart.tv, {res}") banner = next(( x["url"] for x in (res.get("tvbanner") or []) if x["lang"] == sorted(self.audio, key=lambda x: x.streamorder)[0].language ), None) return banner def get_preview_images(self, url: str) -> List[Dict[str, str]]: if not url: return [] images = [] for domain in ["imgbox.com", "beyondhd.co"]: if domain not in url.lower(): continue page = self.session.get(url).text if domain == "imgbox.com": for m in re.finditer('src="(https://thumbs2.imgbox.com.+/)(\\w+)_b.([^"]+)', page): images.append({ "url": f"https://imgbox.com/{m.group(2)}", "src": f"{m.group(1)}{m.group(2)}_t.{m.group(3)}" }) elif domain == "beyondhd.co": for m in re.finditer('/image/([^"]+)"\\D+src="(https://.*beyondhd.co/images.+/(\\w+).md.[^"]+)', page): images.append({ "url": f"https://beyondhd.co/image/{m.group(1)}", "src": m.group(2) }) break return images def get_video_print(self, videos: List[Track]) -> List[List[str]]: if not videos: return [["--"]] data = [] for video in videos: codec = { "MPEG Video": f"MPEG-{(video.format_version or '').replace('Version ', '')}" }.get(video.format, video.format) scan_overview = video.scan_type vst = False if codec in ["MPEG-1", "MPEG-2"]: # parse d2v file with pyd2v, generates D2V if needed d2v = D2V.load(Path(self.file)) self.file = d2v.path # get every frames' flag data, this contains information on displaying frames # add vob and cell number to each frames flag data as well flags = [f for line in [ [dict(**y, vob=x["vob"], cell=x["cell"]) for y in x["flags"]] for x in d2v.data ] for f in line] interlaced_percent = (sum(1 for f in flags if not f["progressive_frame"]) / len(flags)) * 100 if interlaced_percent == 100: scan_overview = "Interlaced (CST)" else: scan_overview = f"{round(interlaced_percent, 2)}% Interlaced (VST)" vst = True for ext in ["log", "d2v", "mpg", "mpeg"]: fp = os.path.splitext(self.file)[0] + "." + ext if os.path.exists(fp): os.u
line_1 = "- {language}, {codec} ({profile}) {width}x{height} ({aspect}) @ {bitrate}".format( language=pycountry.languages.get(alpha_2=video.language).name, codec=codec, profile=video.format_profile, width=video.width, height=video.height, aspect=video.other_display_aspect_ratio[0], bitrate=f"{video.other_bit_rate[0]}{f' ({video.bit_rate_mode})' if video.bit_rate_mode else ''}" ) line_2 = " {fps} FPS ({fps_mode}), {color_space}{subsampling}P{bit_depth}, {scan}".format( fps=f"{video.framerate_num}/{video.framerate_den}" if video.framerate_num else video.frame_rate, fps_mode="VFR" if vst else video.frame_rate_mode, color_space=video.color_space, subsampling=video.chroma_subsampling.replace(":", ""), bit_depth=video.bit_depth, scan=scan_overview ) data.append([line_1, line_2]) return data def get_audio_print(self, audio: List[Track]) -> List[str]: if not audio: return ["--"] data = [] for t in audio: if t.title and "Commentary" in t.title: title = t.title else: title = pycountry.languages.get(alpha_2=t.language).name if t.channel_layout: channels = float(sum(self.AUDIO_CHANNEL_LAYOUT_WEIGHT.get(x, 1) for x in t.channel_layout.split(" "))) else: channels = float(t.channel_s) bit_rate_mode = f" ({t.bit_rate_mode})" if t.bit_rate_mode else "" l1 = f"- {title}, {t.format} {channels} @ {t.other_bit_rate[0]}{bit_rate_mode}" data += [(" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(l1, 64))] return data @staticmethod def get_subtitle_print(subs: List[Track]) -> List[str]: """ Return a list of a brief subtitle overview per-subtitle. e.g. - English, Forced, SubRip (SRT) - English, SubRip (SRT) - English, SDH, SubRip (SRT) - Spanish, Latin American (SDH), SubRip (SRT) The bit of text between the Language and the Subtitle format is the Track Title. It can be of any format, but it is recommended to be used as shown above. It will be returned as a list of strings with the `- ` already pre-pended to each entry. """ data = [] if not subs: data.append("--") for sub in subs: line_items = [] # following sub.title tree checks and supports three different language and title scenarios # The second scenario is the recommended option to choose if you are open to choosing any # The third scenario should be used if you have nothing unique to state about the track # | Language | Track Title | Output | # | ------------ | ----------------------------- | --------------------------------------------- | # | es / Spanish | Spanish (Latin American, SDH) | - Spanish (Latin American, SDH), SubRip (SRT) | # | es / Spanish | Latin American (SDH) | - Spanish, Latin American (SDH), SubRip (SRT) | # | es / Spanish | None | - Spanish, SubRip (SRT) | language = pycountry.languages.get(alpha_2=sub.language).name if sub.title: if language.lower() in sub.title.lower(): line_items.append(sub.title) else: line_items.append(f"{language}, {sub.title}") else: line_items.append(language) line_items.append(sub.format.replace("UTF-8", "SubRip (SRT)")) line = "- " + ", ".join(line_items) data += [ (" " + x if i > 0 else x) for i, x in enumerate(textwrap.wrap(line, 64)) ] return data @staticmethod def get_chapter_print(chapters: Dict[str, str]) -> List[str]: if not chapters: return ["--"] return [ f"- {k}: {v}" for k, v in chapters.items() ] def get_chapter_print_short(self, chapters: Dict[str, str]) -> str: if not chapters: return "No" if self.chapters_numbered: return f"Yes (Numbered 01-{str(len(chapters)).zfill(2)})" return "Yes (Named)" @staticmethod def get_session() -> requests.Session: session = requests.Session() session.headers.update({ "User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:81.0) Gecko/20100101 Firefox/81.0", "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8", "Accept-Language": "en-US,en;q=0.5", "DNT": "1", "UPGRADE-INSECURE-REQUESTS": "1" }) return session
nlink(fp)
conditional_block
driver.rs
use eventsim::{Process, ProcessState, EventId}; use super::infrastructure::*; use input::staticinfrastructure::*; use smallvec::SmallVec; use super::dynamics::*; use output::history::TrainLogEvent; use super::Sim; enum ModelContainment { Inside, Outside, } enum Activation { Wait(EventId), Activate, Running, } #[derive(Debug)] struct Train { location: (NodeId, (Option<NodeId>, f64)), velocity: f64, params: TrainParams, under_train: SmallVec<[(NodeId, f64); 4]>, } pub struct
{ id :usize, train: Train, authority: f64, step: (DriverAction, f64), connected_signals: SmallVec<[(ObjectId, f64); 4]>, logger: Box<Fn(TrainLogEvent)>, activation: Activation, timestep: Option<f64>, } impl Driver { pub fn new(sim: &mut Sim, id :usize, activated: EventId, node: NodeId, auth: f64, params: TrainParams, logger: Box<Fn(TrainLogEvent)>, timestep: Option<f64>) -> Self { //println!("INITIAL AUTHORITY {:?}", auth); let train = Train { params: params, location: (0, (Some(node),0.0)), velocity: 0.0, under_train: SmallVec::new(), }; let d = Driver { id: id, train: train, authority: auth - 20.0, step: (DriverAction::Coast, *sim.time()), connected_signals: SmallVec::new(), logger: logger, activation: Activation::Wait(activated), timestep: timestep }; d } fn activate(&mut self, sim:&mut Sim) { if *sim.time() > 0.0 { (self.logger)(TrainLogEvent::Wait(*sim.time())); } self.step = (DriverAction::Coast, *sim.time()); self.move_train_discrete(sim); } fn goto_node(&mut self, sim: &mut Sim, node: NodeId) { //println!("TRAIN goto node {}", node); for obj in sim.world.statics.nodes[node].objects.clone() { if let Some(p) = sim.world.statics.objects[obj].arrive_front(node, self.id) { sim.start_process(p); } self.arrive_front(sim, obj); } self.train.under_train.push((node, self.train.params.length)); } fn arrive_front(&mut self, sim: &Sim, obj: ObjectId) { match sim.world.statics.objects[obj] { StaticObject::Sight { distance, signal } => { self.connected_signals.push((signal, distance)); (self.logger)(TrainLogEvent::Sight(signal,true)); } StaticObject::Signal { .. } => { let log = &mut self.logger; self.connected_signals.retain(|&mut (s, _d)| { let lost = s == obj; if lost { log(TrainLogEvent::Sight(s,false)); } !lost }); } _ => {} } } fn move_train(&mut self, sim: &mut Sim) -> ModelContainment { let dt = *sim.time() - self.step.1; if dt <= 1e-5 { return ModelContainment::Inside; } self.move_train_continuous(sim); self.move_train_discrete(sim); if (self.train.location.1).0.is_none() && self.train.under_train.len() == 0 { ModelContainment::Outside } else { ModelContainment::Inside } } fn move_train_continuous(&mut self, sim :&mut Sim) { let (action, action_time) = self.step; let dt = *sim.time() - action_time; let update = dynamic_update(&self.train.params, self.train.velocity, DriverPlan { action: action, dt: dt, }); //println!("DYNAMIC UPDATE {:?}", (action,dt)); //println!("{:?}", update); (self.logger)(TrainLogEvent::Move(dt, action, update)); self.train.velocity = update.v; //println!("train loc {:?}", self.train.location); (self.train.location.1).1 -= update.dx; //println!("train loc {:?}", self.train.location); // In case there are no signals in sight, // the remembered authority is updated. self.authority -= update.dx; let id = self.id; self.train.under_train.retain(|&mut (node, ref mut dist)| { *dist -= update.dx; if *dist < 1e-5 { // Cleared a node. for obj in sim.world.statics.nodes[node].objects.clone() { if let Some(p) = sim.world.statics.objects[obj].arrive_back(node, id) { sim.start_process(p); } } false } else { true } }); { let log = &mut self.logger; self.connected_signals.retain(|&mut (obj, ref mut dist)| { *dist -= update.dx; let lost = *dist < 10.0; // If closer than 10 m, signal should already be green // and seeing a red for a very short time should be because // detector is placed in front of signal and this should not // bother the driver. if lost { log(TrainLogEvent::Sight(obj, false)); } !lost }); } } fn move_train_discrete(&mut self, sim :&mut Sim) { loop { let (_, (end_node, dist)) = self.train.location; if dist > 1e-5 || end_node.is_none() { break; } let new_start = sim.world.statics.nodes[end_node.unwrap()].other_node; (self.logger)(TrainLogEvent::Node(end_node.unwrap())); self.goto_node(sim, new_start); (self.logger)(TrainLogEvent::Node(new_start)); match sim.world.edge_from(new_start) { Some((Some(new_end_node), d)) => { self.train.location = (new_start, (Some(new_end_node), d)); (self.logger)(TrainLogEvent::Edge(new_start, Some(new_end_node))); } Some((None, d)) => { self.train.location = (new_start, (None, d)); (self.logger)(TrainLogEvent::Edge(new_start, None)); } None => panic!("Derailed"), } } } fn plan_ahead(&mut self, sim: &Sim) -> DriverPlan { // Travel distance is limited by next node //println!("Travel distance is limited by next node"); //println!("{:?}", (self.train.location.1).1); let mut max_dist = (self.train.location.1).1; // Travel distance is limited by nodes under train //println!("Travel distance is limited by nodes under train"); //println!("{:?}", self.train.under_train); for &(_n, d) in self.train.under_train.iter() { max_dist = max_dist.min(d); } // Travel distance is limited by sight distances //println!("Travel distance is limited by sight distances"); //println!("{:?}", self.connected_signals); for &(_n, d) in self.connected_signals.iter() { max_dist = max_dist.min(d); } // Authority is updated by signals for &(sig, dist) in self.connected_signals.iter() { match sim.world.state[sig] { ObjectState::Signal { ref authority } => { match *authority.get() { (Some(auth_dist), distant_sig) => { //println!("Signal green in sight dist{} sigauth{} self.auth{}", dist, d, dist+d-20.0); self.authority = dist + auth_dist + distant_sig.unwrap_or(0.0) - 20.0; if self.authority < 0.0 { self.authority = 0.0; } } (None,_) => { //println!("Signal red in sight dist{} self.auth{}", dist,dist-20.0); self.authority = dist - 20.0; if self.authority < 0.0 { self.authority = 0.0; } break; } } } _ => panic!("Not a signal"), } } //println!("Updated authority {}", self.authority); // Static maximum speed profile ahead from current position // TODO: other speed limitations let static_speed_profile = StaticMaximumVelocityProfile { local_max_velocity: self.train.params.max_vel, max_velocity_ahead: SmallVec::from_slice(&[DistanceVelocity { dx: self.authority, v: 0.0}]), }; let plan = dynamic_plan_step(&self.train.params, max_dist, self.train.velocity, &static_speed_profile); //println!("PLAN: {:?} {:?} {:?} {:?} {:?} ", self.train.params, max_dist, self.train.velocity, static_speed_profile,plan); plan } } impl<'a> Process<Infrastructure<'a>> for Driver { fn resume(&mut self, sim: &mut Sim) -> ProcessState { match self.activation { Activation::Wait(ev) => { self.activation = Activation::Activate; return ProcessState::Wait(SmallVec::from_slice(&[ev])); }, Activation::Activate => { self.activate(sim); self.activation = Activation::Running; }, Activation::Running => { } }; //println!("resume train"); let modelcontainment = self.move_train(sim); match modelcontainment { ModelContainment::Outside => { //println!("TRAIN FINISHED"); ProcessState::Finished }, ModelContainment::Inside => { let plan = self.plan_ahead(sim); self.step = (plan.action, *sim.time()); //println!("PLAN {:?}", plan); let mut events = SmallVec::new(); if plan.dt > 1e-5 { let dt = match self.timestep { Some(m) => if m < plan.dt && plan.dt.is_normal() { m } else { plan.dt }, None => plan.dt, }; //println!("SET TIMOUT {:?} {:?}", plan.dt, dt); events.push(sim.create_timeout(dt)); } else { if self.train.velocity > 1e-5 { panic!("Velocity, but no plan."); } self.train.velocity = 0.0; self.step.0 = DriverAction::Coast; } //println!("Connected signals: {:?}", self.connected_signals); for &(ref sig, _) in self.connected_signals.iter() { match sim.world.state[*sig] { ObjectState::Signal { ref authority } => events.push(authority.event()), _ => panic!("Object is not a signal"), } } ProcessState::Wait(events) } } } }
Driver
identifier_name
driver.rs
use eventsim::{Process, ProcessState, EventId}; use super::infrastructure::*; use input::staticinfrastructure::*; use smallvec::SmallVec; use super::dynamics::*; use output::history::TrainLogEvent; use super::Sim; enum ModelContainment { Inside, Outside, } enum Activation { Wait(EventId), Activate, Running, } #[derive(Debug)] struct Train { location: (NodeId, (Option<NodeId>, f64)), velocity: f64, params: TrainParams, under_train: SmallVec<[(NodeId, f64); 4]>, } pub struct Driver { id :usize, train: Train, authority: f64, step: (DriverAction, f64), connected_signals: SmallVec<[(ObjectId, f64); 4]>, logger: Box<Fn(TrainLogEvent)>, activation: Activation, timestep: Option<f64>, } impl Driver { pub fn new(sim: &mut Sim, id :usize, activated: EventId, node: NodeId, auth: f64, params: TrainParams, logger: Box<Fn(TrainLogEvent)>, timestep: Option<f64>) -> Self { //println!("INITIAL AUTHORITY {:?}", auth); let train = Train { params: params, location: (0, (Some(node),0.0)), velocity: 0.0, under_train: SmallVec::new(), }; let d = Driver { id: id, train: train, authority: auth - 20.0, step: (DriverAction::Coast, *sim.time()), connected_signals: SmallVec::new(), logger: logger, activation: Activation::Wait(activated), timestep: timestep }; d } fn activate(&mut self, sim:&mut Sim) { if *sim.time() > 0.0 { (self.logger)(TrainLogEvent::Wait(*sim.time())); } self.step = (DriverAction::Coast, *sim.time()); self.move_train_discrete(sim); } fn goto_node(&mut self, sim: &mut Sim, node: NodeId) { //println!("TRAIN goto node {}", node); for obj in sim.world.statics.nodes[node].objects.clone() { if let Some(p) = sim.world.statics.objects[obj].arrive_front(node, self.id) { sim.start_process(p); } self.arrive_front(sim, obj); } self.train.under_train.push((node, self.train.params.length)); } fn arrive_front(&mut self, sim: &Sim, obj: ObjectId) { match sim.world.statics.objects[obj] { StaticObject::Sight { distance, signal } => { self.connected_signals.push((signal, distance)); (self.logger)(TrainLogEvent::Sight(signal,true)); } StaticObject::Signal { .. } => { let log = &mut self.logger; self.connected_signals.retain(|&mut (s, _d)| { let lost = s == obj; if lost { log(TrainLogEvent::Sight(s,false)); } !lost }); } _ => {} } } fn move_train(&mut self, sim: &mut Sim) -> ModelContainment { let dt = *sim.time() - self.step.1; if dt <= 1e-5 { return ModelContainment::Inside; } self.move_train_continuous(sim); self.move_train_discrete(sim); if (self.train.location.1).0.is_none() && self.train.under_train.len() == 0 { ModelContainment::Outside } else { ModelContainment::Inside } } fn move_train_continuous(&mut self, sim :&mut Sim) { let (action, action_time) = self.step; let dt = *sim.time() - action_time; let update = dynamic_update(&self.train.params, self.train.velocity, DriverPlan { action: action, dt: dt, }); //println!("DYNAMIC UPDATE {:?}", (action,dt)); //println!("{:?}", update); (self.logger)(TrainLogEvent::Move(dt, action, update)); self.train.velocity = update.v; //println!("train loc {:?}", self.train.location); (self.train.location.1).1 -= update.dx; //println!("train loc {:?}", self.train.location); // In case there are no signals in sight, // the remembered authority is updated. self.authority -= update.dx; let id = self.id; self.train.under_train.retain(|&mut (node, ref mut dist)| { *dist -= update.dx; if *dist < 1e-5 { // Cleared a node. for obj in sim.world.statics.nodes[node].objects.clone() { if let Some(p) = sim.world.statics.objects[obj].arrive_back(node, id) { sim.start_process(p); } } false } else { true } }); { let log = &mut self.logger; self.connected_signals.retain(|&mut (obj, ref mut dist)| { *dist -= update.dx; let lost = *dist < 10.0; // If closer than 10 m, signal should already be green // and seeing a red for a very short time should be because // detector is placed in front of signal and this should not // bother the driver. if lost { log(TrainLogEvent::Sight(obj, false)); } !lost }); } } fn move_train_discrete(&mut self, sim :&mut Sim) { loop { let (_, (end_node, dist)) = self.train.location; if dist > 1e-5 || end_node.is_none() { break; } let new_start = sim.world.statics.nodes[end_node.unwrap()].other_node; (self.logger)(TrainLogEvent::Node(end_node.unwrap())); self.goto_node(sim, new_start); (self.logger)(TrainLogEvent::Node(new_start)); match sim.world.edge_from(new_start) { Some((Some(new_end_node), d)) => { self.train.location = (new_start, (Some(new_end_node), d)); (self.logger)(TrainLogEvent::Edge(new_start, Some(new_end_node))); } Some((None, d)) => { self.train.location = (new_start, (None, d)); (self.logger)(TrainLogEvent::Edge(new_start, None)); } None => panic!("Derailed"), } } } fn plan_ahead(&mut self, sim: &Sim) -> DriverPlan { // Travel distance is limited by next node //println!("Travel distance is limited by next node"); //println!("{:?}", (self.train.location.1).1); let mut max_dist = (self.train.location.1).1; // Travel distance is limited by nodes under train //println!("Travel distance is limited by nodes under train"); //println!("{:?}", self.train.under_train); for &(_n, d) in self.train.under_train.iter() { max_dist = max_dist.min(d); } // Travel distance is limited by sight distances //println!("Travel distance is limited by sight distances"); //println!("{:?}", self.connected_signals); for &(_n, d) in self.connected_signals.iter() { max_dist = max_dist.min(d); } // Authority is updated by signals for &(sig, dist) in self.connected_signals.iter() { match sim.world.state[sig] { ObjectState::Signal { ref authority } => { match *authority.get() { (Some(auth_dist), distant_sig) => { //println!("Signal green in sight dist{} sigauth{} self.auth{}", dist, d, dist+d-20.0); self.authority = dist + auth_dist + distant_sig.unwrap_or(0.0) - 20.0; if self.authority < 0.0 { self.authority = 0.0; } } (None,_) =>
} } _ => panic!("Not a signal"), } } //println!("Updated authority {}", self.authority); // Static maximum speed profile ahead from current position // TODO: other speed limitations let static_speed_profile = StaticMaximumVelocityProfile { local_max_velocity: self.train.params.max_vel, max_velocity_ahead: SmallVec::from_slice(&[DistanceVelocity { dx: self.authority, v: 0.0}]), }; let plan = dynamic_plan_step(&self.train.params, max_dist, self.train.velocity, &static_speed_profile); //println!("PLAN: {:?} {:?} {:?} {:?} {:?} ", self.train.params, max_dist, self.train.velocity, static_speed_profile,plan); plan } } impl<'a> Process<Infrastructure<'a>> for Driver { fn resume(&mut self, sim: &mut Sim) -> ProcessState { match self.activation { Activation::Wait(ev) => { self.activation = Activation::Activate; return ProcessState::Wait(SmallVec::from_slice(&[ev])); }, Activation::Activate => { self.activate(sim); self.activation = Activation::Running; }, Activation::Running => { } }; //println!("resume train"); let modelcontainment = self.move_train(sim); match modelcontainment { ModelContainment::Outside => { //println!("TRAIN FINISHED"); ProcessState::Finished }, ModelContainment::Inside => { let plan = self.plan_ahead(sim); self.step = (plan.action, *sim.time()); //println!("PLAN {:?}", plan); let mut events = SmallVec::new(); if plan.dt > 1e-5 { let dt = match self.timestep { Some(m) => if m < plan.dt && plan.dt.is_normal() { m } else { plan.dt }, None => plan.dt, }; //println!("SET TIMOUT {:?} {:?}", plan.dt, dt); events.push(sim.create_timeout(dt)); } else { if self.train.velocity > 1e-5 { panic!("Velocity, but no plan."); } self.train.velocity = 0.0; self.step.0 = DriverAction::Coast; } //println!("Connected signals: {:?}", self.connected_signals); for &(ref sig, _) in self.connected_signals.iter() { match sim.world.state[*sig] { ObjectState::Signal { ref authority } => events.push(authority.event()), _ => panic!("Object is not a signal"), } } ProcessState::Wait(events) } } } }
{ //println!("Signal red in sight dist{} self.auth{}", dist,dist-20.0); self.authority = dist - 20.0; if self.authority < 0.0 { self.authority = 0.0; } break; }
conditional_block
driver.rs
use eventsim::{Process, ProcessState, EventId}; use super::infrastructure::*; use input::staticinfrastructure::*; use smallvec::SmallVec; use super::dynamics::*; use output::history::TrainLogEvent; use super::Sim; enum ModelContainment { Inside, Outside, } enum Activation { Wait(EventId), Activate, Running, } #[derive(Debug)] struct Train { location: (NodeId, (Option<NodeId>, f64)), velocity: f64, params: TrainParams, under_train: SmallVec<[(NodeId, f64); 4]>, } pub struct Driver { id :usize, train: Train, authority: f64, step: (DriverAction, f64), connected_signals: SmallVec<[(ObjectId, f64); 4]>, logger: Box<Fn(TrainLogEvent)>, activation: Activation, timestep: Option<f64>, } impl Driver { pub fn new(sim: &mut Sim, id :usize, activated: EventId, node: NodeId, auth: f64, params: TrainParams, logger: Box<Fn(TrainLogEvent)>, timestep: Option<f64>) -> Self { //println!("INITIAL AUTHORITY {:?}", auth); let train = Train { params: params, location: (0, (Some(node),0.0)), velocity: 0.0, under_train: SmallVec::new(), }; let d = Driver { id: id, train: train, authority: auth - 20.0, step: (DriverAction::Coast, *sim.time()), connected_signals: SmallVec::new(), logger: logger, activation: Activation::Wait(activated), timestep: timestep }; d } fn activate(&mut self, sim:&mut Sim) { if *sim.time() > 0.0 { (self.logger)(TrainLogEvent::Wait(*sim.time())); } self.step = (DriverAction::Coast, *sim.time()); self.move_train_discrete(sim); } fn goto_node(&mut self, sim: &mut Sim, node: NodeId) { //println!("TRAIN goto node {}", node); for obj in sim.world.statics.nodes[node].objects.clone() { if let Some(p) = sim.world.statics.objects[obj].arrive_front(node, self.id) { sim.start_process(p); } self.arrive_front(sim, obj); } self.train.under_train.push((node, self.train.params.length)); } fn arrive_front(&mut self, sim: &Sim, obj: ObjectId) { match sim.world.statics.objects[obj] { StaticObject::Sight { distance, signal } => { self.connected_signals.push((signal, distance)); (self.logger)(TrainLogEvent::Sight(signal,true)); } StaticObject::Signal { .. } => { let log = &mut self.logger; self.connected_signals.retain(|&mut (s, _d)| { let lost = s == obj; if lost { log(TrainLogEvent::Sight(s,false)); } !lost }); } _ => {} } } fn move_train(&mut self, sim: &mut Sim) -> ModelContainment { let dt = *sim.time() - self.step.1; if dt <= 1e-5 { return ModelContainment::Inside; } self.move_train_continuous(sim); self.move_train_discrete(sim); if (self.train.location.1).0.is_none() && self.train.under_train.len() == 0 { ModelContainment::Outside } else { ModelContainment::Inside } } fn move_train_continuous(&mut self, sim :&mut Sim) { let (action, action_time) = self.step; let dt = *sim.time() - action_time; let update = dynamic_update(&self.train.params, self.train.velocity, DriverPlan { action: action, dt: dt, }); //println!("DYNAMIC UPDATE {:?}", (action,dt)); //println!("{:?}", update); (self.logger)(TrainLogEvent::Move(dt, action, update)); self.train.velocity = update.v; //println!("train loc {:?}", self.train.location); (self.train.location.1).1 -= update.dx; //println!("train loc {:?}", self.train.location); // In case there are no signals in sight, // the remembered authority is updated. self.authority -= update.dx; let id = self.id; self.train.under_train.retain(|&mut (node, ref mut dist)| { *dist -= update.dx; if *dist < 1e-5 { // Cleared a node. for obj in sim.world.statics.nodes[node].objects.clone() { if let Some(p) = sim.world.statics.objects[obj].arrive_back(node, id) {
} false } else { true } }); { let log = &mut self.logger; self.connected_signals.retain(|&mut (obj, ref mut dist)| { *dist -= update.dx; let lost = *dist < 10.0; // If closer than 10 m, signal should already be green // and seeing a red for a very short time should be because // detector is placed in front of signal and this should not // bother the driver. if lost { log(TrainLogEvent::Sight(obj, false)); } !lost }); } } fn move_train_discrete(&mut self, sim :&mut Sim) { loop { let (_, (end_node, dist)) = self.train.location; if dist > 1e-5 || end_node.is_none() { break; } let new_start = sim.world.statics.nodes[end_node.unwrap()].other_node; (self.logger)(TrainLogEvent::Node(end_node.unwrap())); self.goto_node(sim, new_start); (self.logger)(TrainLogEvent::Node(new_start)); match sim.world.edge_from(new_start) { Some((Some(new_end_node), d)) => { self.train.location = (new_start, (Some(new_end_node), d)); (self.logger)(TrainLogEvent::Edge(new_start, Some(new_end_node))); } Some((None, d)) => { self.train.location = (new_start, (None, d)); (self.logger)(TrainLogEvent::Edge(new_start, None)); } None => panic!("Derailed"), } } } fn plan_ahead(&mut self, sim: &Sim) -> DriverPlan { // Travel distance is limited by next node //println!("Travel distance is limited by next node"); //println!("{:?}", (self.train.location.1).1); let mut max_dist = (self.train.location.1).1; // Travel distance is limited by nodes under train //println!("Travel distance is limited by nodes under train"); //println!("{:?}", self.train.under_train); for &(_n, d) in self.train.under_train.iter() { max_dist = max_dist.min(d); } // Travel distance is limited by sight distances //println!("Travel distance is limited by sight distances"); //println!("{:?}", self.connected_signals); for &(_n, d) in self.connected_signals.iter() { max_dist = max_dist.min(d); } // Authority is updated by signals for &(sig, dist) in self.connected_signals.iter() { match sim.world.state[sig] { ObjectState::Signal { ref authority } => { match *authority.get() { (Some(auth_dist), distant_sig) => { //println!("Signal green in sight dist{} sigauth{} self.auth{}", dist, d, dist+d-20.0); self.authority = dist + auth_dist + distant_sig.unwrap_or(0.0) - 20.0; if self.authority < 0.0 { self.authority = 0.0; } } (None,_) => { //println!("Signal red in sight dist{} self.auth{}", dist,dist-20.0); self.authority = dist - 20.0; if self.authority < 0.0 { self.authority = 0.0; } break; } } } _ => panic!("Not a signal"), } } //println!("Updated authority {}", self.authority); // Static maximum speed profile ahead from current position // TODO: other speed limitations let static_speed_profile = StaticMaximumVelocityProfile { local_max_velocity: self.train.params.max_vel, max_velocity_ahead: SmallVec::from_slice(&[DistanceVelocity { dx: self.authority, v: 0.0}]), }; let plan = dynamic_plan_step(&self.train.params, max_dist, self.train.velocity, &static_speed_profile); //println!("PLAN: {:?} {:?} {:?} {:?} {:?} ", self.train.params, max_dist, self.train.velocity, static_speed_profile,plan); plan } } impl<'a> Process<Infrastructure<'a>> for Driver { fn resume(&mut self, sim: &mut Sim) -> ProcessState { match self.activation { Activation::Wait(ev) => { self.activation = Activation::Activate; return ProcessState::Wait(SmallVec::from_slice(&[ev])); }, Activation::Activate => { self.activate(sim); self.activation = Activation::Running; }, Activation::Running => { } }; //println!("resume train"); let modelcontainment = self.move_train(sim); match modelcontainment { ModelContainment::Outside => { //println!("TRAIN FINISHED"); ProcessState::Finished }, ModelContainment::Inside => { let plan = self.plan_ahead(sim); self.step = (plan.action, *sim.time()); //println!("PLAN {:?}", plan); let mut events = SmallVec::new(); if plan.dt > 1e-5 { let dt = match self.timestep { Some(m) => if m < plan.dt && plan.dt.is_normal() { m } else { plan.dt }, None => plan.dt, }; //println!("SET TIMOUT {:?} {:?}", plan.dt, dt); events.push(sim.create_timeout(dt)); } else { if self.train.velocity > 1e-5 { panic!("Velocity, but no plan."); } self.train.velocity = 0.0; self.step.0 = DriverAction::Coast; } //println!("Connected signals: {:?}", self.connected_signals); for &(ref sig, _) in self.connected_signals.iter() { match sim.world.state[*sig] { ObjectState::Signal { ref authority } => events.push(authority.event()), _ => panic!("Object is not a signal"), } } ProcessState::Wait(events) } } } }
sim.start_process(p); }
random_line_split
lib.rs
// Copyright 2018 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. mod codec; use std::{cmp, iter, mem}; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; use bytes::Bytes; use libp2p_core::{ Endpoint, StreamMuxer, upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo, Negotiated} }; use log::{debug, trace}; use parking_lot::Mutex; use fnv::{FnvHashMap, FnvHashSet}; use futures::{prelude::*, executor, future, stream::Fuse, task, task_local, try_ready}; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; /// Configuration for the multiplexer. #[derive(Debug, Clone)] pub struct MplexConfig { /// Maximum number of simultaneously-open substreams. max_substreams: usize, /// Maximum number of elements in the internal buffer. max_buffer_len: usize, /// Behaviour when the buffer size limit is reached. max_buffer_behaviour: MaxBufferBehaviour, /// When sending data, split it into frames whose maximum size is this value /// (max 1MByte, as per the Mplex spec). split_send_size: usize, } impl MplexConfig { /// Builds the default configuration. #[inline] pub fn new() -> MplexConfig { Default::default() } /// Sets the maximum number of simultaneously opened substreams, after which an error is /// generated and the connection closes. /// /// A limit is necessary in order to avoid DoS attacks. #[inline] pub fn max_substreams(&mut self, max: usize) -> &mut Self { self.max_substreams = max; self } /// Sets the maximum number of pending incoming messages. /// /// A limit is necessary in order to avoid DoS attacks. #[inline] pub fn max_buffer_len(&mut self, max: usize) -> &mut Self { self.max_buffer_len = max; self } /// Sets the behaviour when the maximum buffer length has been reached. /// /// See the documentation of `MaxBufferBehaviour`. #[inline] pub fn max_buffer_len_behaviour(&mut self, behaviour: MaxBufferBehaviour) -> &mut Self { self.max_buffer_behaviour = behaviour; self } /// Sets the frame size used when sending data. Capped at 1Mbyte as per the /// Mplex spec. pub fn split_send_size(&mut self, size: usize) -> &mut Self { let size = cmp::min(size, codec::MAX_FRAME_SIZE); self.split_send_size = size; self } #[inline] fn upgrade<C>(self, i: C) -> Multiplex<C> where C: AsyncRead + AsyncWrite { let max_buffer_len = self.max_buffer_len; Multiplex { inner: Mutex::new(MultiplexInner { error: Ok(()), inner: executor::spawn(Framed::new(i, codec::Codec::new()).fuse()), config: self, buffer: Vec::with_capacity(cmp::min(max_buffer_len, 512)), opened_substreams: Default::default(), next_outbound_stream_id: 0, notifier_read: Arc::new(Notifier { to_notify: Mutex::new(Default::default()), }), notifier_write: Arc::new(Notifier { to_notify: Mutex::new(Default::default()), }), is_shutdown: false, is_acknowledged: false, }) } } } impl Default for MplexConfig { #[inline] fn default() -> MplexConfig { MplexConfig { max_substreams: 128, max_buffer_len: 4096, max_buffer_behaviour: MaxBufferBehaviour::CloseAll, split_send_size: 1024, } } } /// Behaviour when the maximum length of the buffer is reached. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum MaxBufferBehaviour { /// Produce an error on all the substreams. CloseAll, /// No new message will be read from the underlying connection if the buffer is full. /// /// This can potentially introduce a deadlock if you are waiting for a message from a substream /// before processing the messages received on another substream. Block, } impl UpgradeInfo for MplexConfig { type Info = &'static [u8]; type InfoIter = iter::Once<Self::Info>; #[inline] fn protocol_info(&self) -> Self::InfoIter { iter::once(b"/mplex/6.7.0") } } impl<C> InboundUpgrade<C> for MplexConfig where C: AsyncRead + AsyncWrite, { type Output = Multiplex<Negotiated<C>>; type Error = IoError; type Future = future::FutureResult<Self::Output, IoError>; fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future { future::ok(self.upgrade(socket)) } } impl<C> OutboundUpgrade<C> for MplexConfig where C: AsyncRead + AsyncWrite, { type Output = Multiplex<Negotiated<C>>; type Error = IoError; type Future = future::FutureResult<Self::Output, IoError>; fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future { future::ok(self.upgrade(socket)) } } /// Multiplexer. Implements the `StreamMuxer` trait. pub struct Multiplex<C> { inner: Mutex<MultiplexInner<C>>, } // Struct shared throughout the implementation. struct MultiplexInner<C> { // Error that happened earlier. Should poison any attempt to use this `MultiplexError`. error: Result<(), IoError>, // Underlying stream. inner: executor::Spawn<Fuse<Framed<C, codec::Codec>>>, /// The original configuration. config: MplexConfig, // Buffer of elements pulled from the stream but not processed yet. buffer: Vec<codec::Elem>, // List of Ids of opened substreams. Used to filter out messages that don't belong to any // substream. Note that this is handled exclusively by `next_match`. // The `Endpoint` value denotes who initiated the substream from our point of view // (see note [StreamId]). opened_substreams: FnvHashSet<(u32, Endpoint)>, // Id of the next outgoing substream. next_outbound_stream_id: u32, /// List of tasks to notify when a read event happens on the underlying stream. notifier_read: Arc<Notifier>, /// List of tasks to notify when a write event happens on the underlying stream. notifier_write: Arc<Notifier>, /// If true, the connection has been shut down. We need to be careful not to accidentally /// call `Sink::poll_complete` or `Sink::start_send` after `Sink::close`. is_shutdown: bool, /// If true, the remote has sent data to us. is_acknowledged: bool, } struct Notifier { /// List of tasks to notify. to_notify: Mutex<FnvHashMap<usize, task::Task>>, } impl executor::Notify for Notifier { fn notify(&self, _: usize) { let tasks = mem::replace(&mut *self.to_notify.lock(), Default::default()); for (_, task) in tasks { task.notify(); } } } // TODO: replace with another system static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0); task_local!{ static TASK_ID: usize = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed) } // Note [StreamId]: mplex no longer partitions stream IDs into odd (for initiators) and // even ones (for receivers). Streams are instead identified by a number and whether the flag // is odd (for receivers) or even (for initiators). `Open` frames do not have a flag, but are // sent unidirectional. As a consequence, we need to remember if the stream was initiated by us // or remotely and we store the information from our point of view, i.e. receiving an `Open` frame // is stored as `(<u32>, Listener)`, sending an `Open` frame as `(<u32>, Dialer)`. Receiving // a `Data` frame with flag `MessageReceiver` (= 1) means that we initiated the stream, so the // entry has been stored as `(<u32>, Dialer)`. So, when looking up streams based on frames // received, we have to invert the `Endpoint`, except for `Open`. /// Processes elements in `inner` until one matching `filter` is found. /// /// If `NotReady` is returned, the current task is scheduled for later, just like with any `Poll`. /// `Ready(Some())` is almost always returned. An error is returned if the stream is EOF. fn next_match<C, F, O>(inner: &mut MultiplexInner<C>, mut filter: F) -> Poll<O, IoError> where C: AsyncRead + AsyncWrite, F: FnMut(&codec::Elem) -> Option<O>, { // If an error happened earlier, immediately return it. if let Err(ref err) = inner.error { return Err(IoError::new(err.kind(), err.to_string())); } if let Some((offset, out)) = inner.buffer.iter().enumerate().filter_map(|(n, v)| filter(v).map(|v| (n, v))).next() { // The buffer was full and no longer is, so let's notify everything. if inner.buffer.len() == inner.config.max_buffer_len { executor::Notify::notify(&*inner.notifier_read, 0); } inner.buffer.remove(offset); return Ok(Async::Ready(out)); } loop { // Check if we reached max buffer length first. debug_assert!(inner.buffer.len() <= inner.config.max_buffer_len); if inner.buffer.len() == inner.config.max_buffer_len { debug!("Reached mplex maximum buffer length"); match inner.config.max_buffer_behaviour { MaxBufferBehaviour::CloseAll => { inner.error = Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length")); return Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length")); }, MaxBufferBehaviour::Block => { inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady); }, } } let elem = match inner.inner.poll_stream_notify(&inner.notifier_read, 0) { Ok(Async::Ready(Some(item))) => item, Ok(Async::Ready(None)) => return Err(IoErrorKind::BrokenPipe.into()), Ok(Async::NotReady) => { inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady); }, Err(err) => { let err2 = IoError::new(err.kind(), err.to_string()); inner.error = Err(err); return Err(err2); }, }; trace!("Received message: {:?}", elem); inner.is_acknowledged = true; // Handle substreams opening/closing. match elem { codec::Elem::Open { substream_id } => { if !inner.opened_substreams.insert((substream_id, Endpoint::Listener)) { debug!("Received open message for substream {} which was already open", substream_id) } } codec::Elem::Close { substream_id, endpoint, .. } | codec::Elem::Reset { substream_id, endpoint, .. } => { inner.opened_substreams.remove(&(substream_id, !endpoint)); } _ => () } if let Some(out) = filter(&elem) { return Ok(Async::Ready(out)); } else { let endpoint = elem.endpoint().unwrap_or(Endpoint::Dialer); if inner.opened_substreams.contains(&(elem.substream_id(), !endpoint)) || elem.is_open_msg() { inner.buffer.push(elem); } else if !elem.is_close_or_reset_msg() { debug!("Ignored message {:?} because the substream wasn't open", elem); } } } } // Small convenience function that tries to write `elem` to the stream. fn poll_send<C>(inner: &mut MultiplexInner<C>, elem: codec::Elem) -> Poll<(), IoError> where C: AsyncRead + AsyncWrite { if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } match inner.inner.start_send_notify(elem, &inner.notifier_write, 0) { Ok(AsyncSink::Ready) => { Ok(Async::Ready(())) }, Ok(AsyncSink::NotReady(_)) => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); Ok(Async::NotReady) }, Err(err) => Err(err) } } impl<C> StreamMuxer for Multiplex<C> where C: AsyncRead + AsyncWrite { type Substream = Substream; type OutboundSubstream = OutboundSubstream; type Error = IoError; fn poll_inbound(&self) -> Poll<Self::Substream, IoError> { let mut inner = self.inner.lock(); if inner.opened_substreams.len() >= inner.config.max_substreams { debug!("Refused substream; reached maximum number of substreams {}", inner.config.max_substreams); return Err(IoError::new(IoErrorKind::ConnectionRefused, "exceeded maximum number of open substreams")); } let num = try_ready!(next_match(&mut inner, |elem| { match elem { codec::Elem::Open { substream_id } => Some(*substream_id), _ => None, } })); debug!("Successfully opened inbound substream {}", num); Ok(Async::Ready(Substream { current_data: Bytes::new(), num, endpoint: Endpoint::Listener, local_open: true, remote_open: true, })) } fn open_outbound(&self) -> Self::OutboundSubstream { let mut inner = self.inner.lock(); // Assign a substream ID now. let substream_id = { let n = inner.next_outbound_stream_id; inner.next_outbound_stream_id = inner.next_outbound_stream_id.checked_add(1) .expect("Mplex substream ID overflowed"); n }; inner.opened_substreams.insert((substream_id, Endpoint::Dialer)); OutboundSubstream { num: substream_id, state: OutboundSubstreamState::SendElem(codec::Elem::Open { substream_id }), } } fn poll_outbound(&self, substream: &mut Self::OutboundSubstream) -> Poll<Self::Substream, IoError> { loop { let mut inner = self.inner.lock(); let polling = match substream.state { OutboundSubstreamState::SendElem(ref elem) => { poll_send(&mut inner, elem.clone()) }, OutboundSubstreamState::Flush => { if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } let inner = &mut *inner; // Avoids borrow errors inner.inner.poll_flush_notify(&inner.notifier_write, 0) }, OutboundSubstreamState::Done => { panic!("Polling outbound substream after it's been succesfully open"); }, }; match polling { Ok(Async::Ready(())) => (), Ok(Async::NotReady) => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady) }, Err(err) => { debug!("Failed to open outbound substream {}", substream.num); inner.buffer.retain(|elem| { elem.substream_id() != substream.num || elem.endpoint() == Some(Endpoint::Dialer) }); return Err(err) }, }; drop(inner); // Going to next step. match substream.state { OutboundSubstreamState::SendElem(_) => { substream.state = OutboundSubstreamState::Flush; }, OutboundSubstreamState::Flush => { debug!("Successfully opened outbound substream {}", substream.num); substream.state = OutboundSubstreamState::Done; return Ok(Async::Ready(Substream { num: substream.num, current_data: Bytes::new(), endpoint: Endpoint::Dialer, local_open: true, remote_open: true, })); }, OutboundSubstreamState::Done => unreachable!(), } } } #[inline] fn destroy_outbound(&self, _substream: Self::OutboundSubstream) { // Nothing to do. } unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { false } fn read_substream(&self, substream: &mut Self::Substream, buf: &mut [u8]) -> Poll<usize, IoError> { loop { // First, transfer from `current_data`. if !substream.current_data.is_empty() { let len = cmp::min(substream.current_data.len(), buf.len()); buf[..len].copy_from_slice(&substream.current_data.split_to(len)); return Ok(Async::Ready(len)); } // If the remote writing side is closed, return EOF. if !substream.remote_open { return Ok(Async::Ready(0)); } // Try to find a packet of data in the buffer. let mut inner = self.inner.lock(); let next_data_poll = next_match(&mut inner, |elem| { match elem { codec::Elem::Data { substream_id, endpoint, data, .. } if *substream_id == substream.num && *endpoint != substream.endpoint => // see note [StreamId] { Some(Some(data.clone())) } codec::Elem::Close { substream_id, endpoint } if *substream_id == substream.num && *endpoint != substream.endpoint => // see note [StreamId] { Some(None) } _ => None } }); // We're in a loop, so all we need to do is set `substream.current_data` to the data we // just read and wait for the next iteration. match next_data_poll? { Async::Ready(Some(data)) => substream.current_data = data, Async::Ready(None) => { substream.remote_open = false; return Ok(Async::Ready(0)); }, Async::NotReady =>
, } } } fn write_substream(&self, substream: &mut Self::Substream, buf: &[u8]) -> Poll<usize, IoError> { if !substream.local_open { return Err(IoErrorKind::BrokenPipe.into()); } let mut inner = self.inner.lock(); let to_write = cmp::min(buf.len(), inner.config.split_send_size); let elem = codec::Elem::Data { substream_id: substream.num, data: From::from(&buf[..to_write]), endpoint: substream.endpoint, }; match poll_send(&mut inner, elem)? { Async::Ready(()) => Ok(Async::Ready(to_write)), Async::NotReady => Ok(Async::NotReady) } } fn flush_substream(&self, _substream: &mut Self::Substream) -> Poll<(), IoError> { let mut inner = self.inner.lock(); if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } let inner = &mut *inner; // Avoids borrow errors match inner.inner.poll_flush_notify(&inner.notifier_write, 0)? { Async::Ready(()) => Ok(Async::Ready(())), Async::NotReady => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); Ok(Async::NotReady) } } } fn shutdown_substream(&self, sub: &mut Self::Substream) -> Poll<(), IoError> { if !sub.local_open { return Ok(Async::Ready(())); } let elem = codec::Elem::Close { substream_id: sub.num, endpoint: sub.endpoint, }; let mut inner = self.inner.lock(); let result = poll_send(&mut inner, elem); if let Ok(Async::Ready(())) = result { sub.local_open = false; } result } fn destroy_substream(&self, sub: Self::Substream) { self.inner.lock().buffer.retain(|elem| { elem.substream_id() != sub.num || elem.endpoint() == Some(sub.endpoint) }) } fn is_remote_acknowledged(&self) -> bool { self.inner.lock().is_acknowledged } #[inline] fn close(&self) -> Poll<(), IoError> { let inner = &mut *self.inner.lock(); try_ready!(inner.inner.close_notify(&inner.notifier_write, 0)); inner.is_shutdown = true; Ok(Async::Ready(())) } #[inline] fn flush_all(&self) -> Poll<(), IoError> { let inner = &mut *self.inner.lock(); if inner.is_shutdown { return Ok(Async::Ready(())) } inner.inner.poll_flush_notify(&inner.notifier_write, 0) } } /// Active attempt to open an outbound substream. pub struct OutboundSubstream { /// Substream number. num: u32, state: OutboundSubstreamState, } enum OutboundSubstreamState { /// We need to send `Elem` on the underlying stream. SendElem(codec::Elem), /// We need to flush the underlying stream. Flush, /// The substream is open and the `OutboundSubstream` is now useless. Done, } /// Active substream to the remote. pub struct Substream { /// Substream number. num: u32, // Read buffer. Contains data read from `inner` but not yet dispatched by a call to `read()`. current_data: Bytes, endpoint: Endpoint, /// If true, our writing side is still open. local_open: bool, /// If true, the remote writing side is still open. remote_open: bool, }
{ // There was no data packet in the buffer about this substream; maybe it's // because it has been closed. if inner.opened_substreams.contains(&(substream.num, substream.endpoint)) { return Ok(Async::NotReady) } else { return Ok(Async::Ready(0)) } }
conditional_block
lib.rs
// Copyright 2018 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. mod codec; use std::{cmp, iter, mem}; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; use bytes::Bytes; use libp2p_core::{ Endpoint, StreamMuxer, upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo, Negotiated} }; use log::{debug, trace}; use parking_lot::Mutex; use fnv::{FnvHashMap, FnvHashSet}; use futures::{prelude::*, executor, future, stream::Fuse, task, task_local, try_ready}; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; /// Configuration for the multiplexer. #[derive(Debug, Clone)] pub struct MplexConfig { /// Maximum number of simultaneously-open substreams. max_substreams: usize, /// Maximum number of elements in the internal buffer. max_buffer_len: usize, /// Behaviour when the buffer size limit is reached. max_buffer_behaviour: MaxBufferBehaviour, /// When sending data, split it into frames whose maximum size is this value /// (max 1MByte, as per the Mplex spec). split_send_size: usize, } impl MplexConfig { /// Builds the default configuration. #[inline] pub fn new() -> MplexConfig { Default::default() } /// Sets the maximum number of simultaneously opened substreams, after which an error is /// generated and the connection closes. /// /// A limit is necessary in order to avoid DoS attacks. #[inline] pub fn max_substreams(&mut self, max: usize) -> &mut Self { self.max_substreams = max; self } /// Sets the maximum number of pending incoming messages. /// /// A limit is necessary in order to avoid DoS attacks. #[inline] pub fn max_buffer_len(&mut self, max: usize) -> &mut Self { self.max_buffer_len = max; self } /// Sets the behaviour when the maximum buffer length has been reached. /// /// See the documentation of `MaxBufferBehaviour`. #[inline] pub fn max_buffer_len_behaviour(&mut self, behaviour: MaxBufferBehaviour) -> &mut Self { self.max_buffer_behaviour = behaviour; self } /// Sets the frame size used when sending data. Capped at 1Mbyte as per the /// Mplex spec. pub fn split_send_size(&mut self, size: usize) -> &mut Self { let size = cmp::min(size, codec::MAX_FRAME_SIZE); self.split_send_size = size; self } #[inline] fn upgrade<C>(self, i: C) -> Multiplex<C> where C: AsyncRead + AsyncWrite { let max_buffer_len = self.max_buffer_len; Multiplex { inner: Mutex::new(MultiplexInner { error: Ok(()), inner: executor::spawn(Framed::new(i, codec::Codec::new()).fuse()), config: self, buffer: Vec::with_capacity(cmp::min(max_buffer_len, 512)), opened_substreams: Default::default(), next_outbound_stream_id: 0, notifier_read: Arc::new(Notifier { to_notify: Mutex::new(Default::default()), }), notifier_write: Arc::new(Notifier { to_notify: Mutex::new(Default::default()), }), is_shutdown: false, is_acknowledged: false, }) } } } impl Default for MplexConfig { #[inline] fn default() -> MplexConfig
} /// Behaviour when the maximum length of the buffer is reached. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum MaxBufferBehaviour { /// Produce an error on all the substreams. CloseAll, /// No new message will be read from the underlying connection if the buffer is full. /// /// This can potentially introduce a deadlock if you are waiting for a message from a substream /// before processing the messages received on another substream. Block, } impl UpgradeInfo for MplexConfig { type Info = &'static [u8]; type InfoIter = iter::Once<Self::Info>; #[inline] fn protocol_info(&self) -> Self::InfoIter { iter::once(b"/mplex/6.7.0") } } impl<C> InboundUpgrade<C> for MplexConfig where C: AsyncRead + AsyncWrite, { type Output = Multiplex<Negotiated<C>>; type Error = IoError; type Future = future::FutureResult<Self::Output, IoError>; fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future { future::ok(self.upgrade(socket)) } } impl<C> OutboundUpgrade<C> for MplexConfig where C: AsyncRead + AsyncWrite, { type Output = Multiplex<Negotiated<C>>; type Error = IoError; type Future = future::FutureResult<Self::Output, IoError>; fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future { future::ok(self.upgrade(socket)) } } /// Multiplexer. Implements the `StreamMuxer` trait. pub struct Multiplex<C> { inner: Mutex<MultiplexInner<C>>, } // Struct shared throughout the implementation. struct MultiplexInner<C> { // Error that happened earlier. Should poison any attempt to use this `MultiplexError`. error: Result<(), IoError>, // Underlying stream. inner: executor::Spawn<Fuse<Framed<C, codec::Codec>>>, /// The original configuration. config: MplexConfig, // Buffer of elements pulled from the stream but not processed yet. buffer: Vec<codec::Elem>, // List of Ids of opened substreams. Used to filter out messages that don't belong to any // substream. Note that this is handled exclusively by `next_match`. // The `Endpoint` value denotes who initiated the substream from our point of view // (see note [StreamId]). opened_substreams: FnvHashSet<(u32, Endpoint)>, // Id of the next outgoing substream. next_outbound_stream_id: u32, /// List of tasks to notify when a read event happens on the underlying stream. notifier_read: Arc<Notifier>, /// List of tasks to notify when a write event happens on the underlying stream. notifier_write: Arc<Notifier>, /// If true, the connection has been shut down. We need to be careful not to accidentally /// call `Sink::poll_complete` or `Sink::start_send` after `Sink::close`. is_shutdown: bool, /// If true, the remote has sent data to us. is_acknowledged: bool, } struct Notifier { /// List of tasks to notify. to_notify: Mutex<FnvHashMap<usize, task::Task>>, } impl executor::Notify for Notifier { fn notify(&self, _: usize) { let tasks = mem::replace(&mut *self.to_notify.lock(), Default::default()); for (_, task) in tasks { task.notify(); } } } // TODO: replace with another system static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0); task_local!{ static TASK_ID: usize = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed) } // Note [StreamId]: mplex no longer partitions stream IDs into odd (for initiators) and // even ones (for receivers). Streams are instead identified by a number and whether the flag // is odd (for receivers) or even (for initiators). `Open` frames do not have a flag, but are // sent unidirectional. As a consequence, we need to remember if the stream was initiated by us // or remotely and we store the information from our point of view, i.e. receiving an `Open` frame // is stored as `(<u32>, Listener)`, sending an `Open` frame as `(<u32>, Dialer)`. Receiving // a `Data` frame with flag `MessageReceiver` (= 1) means that we initiated the stream, so the // entry has been stored as `(<u32>, Dialer)`. So, when looking up streams based on frames // received, we have to invert the `Endpoint`, except for `Open`. /// Processes elements in `inner` until one matching `filter` is found. /// /// If `NotReady` is returned, the current task is scheduled for later, just like with any `Poll`. /// `Ready(Some())` is almost always returned. An error is returned if the stream is EOF. fn next_match<C, F, O>(inner: &mut MultiplexInner<C>, mut filter: F) -> Poll<O, IoError> where C: AsyncRead + AsyncWrite, F: FnMut(&codec::Elem) -> Option<O>, { // If an error happened earlier, immediately return it. if let Err(ref err) = inner.error { return Err(IoError::new(err.kind(), err.to_string())); } if let Some((offset, out)) = inner.buffer.iter().enumerate().filter_map(|(n, v)| filter(v).map(|v| (n, v))).next() { // The buffer was full and no longer is, so let's notify everything. if inner.buffer.len() == inner.config.max_buffer_len { executor::Notify::notify(&*inner.notifier_read, 0); } inner.buffer.remove(offset); return Ok(Async::Ready(out)); } loop { // Check if we reached max buffer length first. debug_assert!(inner.buffer.len() <= inner.config.max_buffer_len); if inner.buffer.len() == inner.config.max_buffer_len { debug!("Reached mplex maximum buffer length"); match inner.config.max_buffer_behaviour { MaxBufferBehaviour::CloseAll => { inner.error = Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length")); return Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length")); }, MaxBufferBehaviour::Block => { inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady); }, } } let elem = match inner.inner.poll_stream_notify(&inner.notifier_read, 0) { Ok(Async::Ready(Some(item))) => item, Ok(Async::Ready(None)) => return Err(IoErrorKind::BrokenPipe.into()), Ok(Async::NotReady) => { inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady); }, Err(err) => { let err2 = IoError::new(err.kind(), err.to_string()); inner.error = Err(err); return Err(err2); }, }; trace!("Received message: {:?}", elem); inner.is_acknowledged = true; // Handle substreams opening/closing. match elem { codec::Elem::Open { substream_id } => { if !inner.opened_substreams.insert((substream_id, Endpoint::Listener)) { debug!("Received open message for substream {} which was already open", substream_id) } } codec::Elem::Close { substream_id, endpoint, .. } | codec::Elem::Reset { substream_id, endpoint, .. } => { inner.opened_substreams.remove(&(substream_id, !endpoint)); } _ => () } if let Some(out) = filter(&elem) { return Ok(Async::Ready(out)); } else { let endpoint = elem.endpoint().unwrap_or(Endpoint::Dialer); if inner.opened_substreams.contains(&(elem.substream_id(), !endpoint)) || elem.is_open_msg() { inner.buffer.push(elem); } else if !elem.is_close_or_reset_msg() { debug!("Ignored message {:?} because the substream wasn't open", elem); } } } } // Small convenience function that tries to write `elem` to the stream. fn poll_send<C>(inner: &mut MultiplexInner<C>, elem: codec::Elem) -> Poll<(), IoError> where C: AsyncRead + AsyncWrite { if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } match inner.inner.start_send_notify(elem, &inner.notifier_write, 0) { Ok(AsyncSink::Ready) => { Ok(Async::Ready(())) }, Ok(AsyncSink::NotReady(_)) => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); Ok(Async::NotReady) }, Err(err) => Err(err) } } impl<C> StreamMuxer for Multiplex<C> where C: AsyncRead + AsyncWrite { type Substream = Substream; type OutboundSubstream = OutboundSubstream; type Error = IoError; fn poll_inbound(&self) -> Poll<Self::Substream, IoError> { let mut inner = self.inner.lock(); if inner.opened_substreams.len() >= inner.config.max_substreams { debug!("Refused substream; reached maximum number of substreams {}", inner.config.max_substreams); return Err(IoError::new(IoErrorKind::ConnectionRefused, "exceeded maximum number of open substreams")); } let num = try_ready!(next_match(&mut inner, |elem| { match elem { codec::Elem::Open { substream_id } => Some(*substream_id), _ => None, } })); debug!("Successfully opened inbound substream {}", num); Ok(Async::Ready(Substream { current_data: Bytes::new(), num, endpoint: Endpoint::Listener, local_open: true, remote_open: true, })) } fn open_outbound(&self) -> Self::OutboundSubstream { let mut inner = self.inner.lock(); // Assign a substream ID now. let substream_id = { let n = inner.next_outbound_stream_id; inner.next_outbound_stream_id = inner.next_outbound_stream_id.checked_add(1) .expect("Mplex substream ID overflowed"); n }; inner.opened_substreams.insert((substream_id, Endpoint::Dialer)); OutboundSubstream { num: substream_id, state: OutboundSubstreamState::SendElem(codec::Elem::Open { substream_id }), } } fn poll_outbound(&self, substream: &mut Self::OutboundSubstream) -> Poll<Self::Substream, IoError> { loop { let mut inner = self.inner.lock(); let polling = match substream.state { OutboundSubstreamState::SendElem(ref elem) => { poll_send(&mut inner, elem.clone()) }, OutboundSubstreamState::Flush => { if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } let inner = &mut *inner; // Avoids borrow errors inner.inner.poll_flush_notify(&inner.notifier_write, 0) }, OutboundSubstreamState::Done => { panic!("Polling outbound substream after it's been succesfully open"); }, }; match polling { Ok(Async::Ready(())) => (), Ok(Async::NotReady) => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady) }, Err(err) => { debug!("Failed to open outbound substream {}", substream.num); inner.buffer.retain(|elem| { elem.substream_id() != substream.num || elem.endpoint() == Some(Endpoint::Dialer) }); return Err(err) }, }; drop(inner); // Going to next step. match substream.state { OutboundSubstreamState::SendElem(_) => { substream.state = OutboundSubstreamState::Flush; }, OutboundSubstreamState::Flush => { debug!("Successfully opened outbound substream {}", substream.num); substream.state = OutboundSubstreamState::Done; return Ok(Async::Ready(Substream { num: substream.num, current_data: Bytes::new(), endpoint: Endpoint::Dialer, local_open: true, remote_open: true, })); }, OutboundSubstreamState::Done => unreachable!(), } } } #[inline] fn destroy_outbound(&self, _substream: Self::OutboundSubstream) { // Nothing to do. } unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { false } fn read_substream(&self, substream: &mut Self::Substream, buf: &mut [u8]) -> Poll<usize, IoError> { loop { // First, transfer from `current_data`. if !substream.current_data.is_empty() { let len = cmp::min(substream.current_data.len(), buf.len()); buf[..len].copy_from_slice(&substream.current_data.split_to(len)); return Ok(Async::Ready(len)); } // If the remote writing side is closed, return EOF. if !substream.remote_open { return Ok(Async::Ready(0)); } // Try to find a packet of data in the buffer. let mut inner = self.inner.lock(); let next_data_poll = next_match(&mut inner, |elem| { match elem { codec::Elem::Data { substream_id, endpoint, data, .. } if *substream_id == substream.num && *endpoint != substream.endpoint => // see note [StreamId] { Some(Some(data.clone())) } codec::Elem::Close { substream_id, endpoint } if *substream_id == substream.num && *endpoint != substream.endpoint => // see note [StreamId] { Some(None) } _ => None } }); // We're in a loop, so all we need to do is set `substream.current_data` to the data we // just read and wait for the next iteration. match next_data_poll? { Async::Ready(Some(data)) => substream.current_data = data, Async::Ready(None) => { substream.remote_open = false; return Ok(Async::Ready(0)); }, Async::NotReady => { // There was no data packet in the buffer about this substream; maybe it's // because it has been closed. if inner.opened_substreams.contains(&(substream.num, substream.endpoint)) { return Ok(Async::NotReady) } else { return Ok(Async::Ready(0)) } }, } } } fn write_substream(&self, substream: &mut Self::Substream, buf: &[u8]) -> Poll<usize, IoError> { if !substream.local_open { return Err(IoErrorKind::BrokenPipe.into()); } let mut inner = self.inner.lock(); let to_write = cmp::min(buf.len(), inner.config.split_send_size); let elem = codec::Elem::Data { substream_id: substream.num, data: From::from(&buf[..to_write]), endpoint: substream.endpoint, }; match poll_send(&mut inner, elem)? { Async::Ready(()) => Ok(Async::Ready(to_write)), Async::NotReady => Ok(Async::NotReady) } } fn flush_substream(&self, _substream: &mut Self::Substream) -> Poll<(), IoError> { let mut inner = self.inner.lock(); if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } let inner = &mut *inner; // Avoids borrow errors match inner.inner.poll_flush_notify(&inner.notifier_write, 0)? { Async::Ready(()) => Ok(Async::Ready(())), Async::NotReady => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); Ok(Async::NotReady) } } } fn shutdown_substream(&self, sub: &mut Self::Substream) -> Poll<(), IoError> { if !sub.local_open { return Ok(Async::Ready(())); } let elem = codec::Elem::Close { substream_id: sub.num, endpoint: sub.endpoint, }; let mut inner = self.inner.lock(); let result = poll_send(&mut inner, elem); if let Ok(Async::Ready(())) = result { sub.local_open = false; } result } fn destroy_substream(&self, sub: Self::Substream) { self.inner.lock().buffer.retain(|elem| { elem.substream_id() != sub.num || elem.endpoint() == Some(sub.endpoint) }) } fn is_remote_acknowledged(&self) -> bool { self.inner.lock().is_acknowledged } #[inline] fn close(&self) -> Poll<(), IoError> { let inner = &mut *self.inner.lock(); try_ready!(inner.inner.close_notify(&inner.notifier_write, 0)); inner.is_shutdown = true; Ok(Async::Ready(())) } #[inline] fn flush_all(&self) -> Poll<(), IoError> { let inner = &mut *self.inner.lock(); if inner.is_shutdown { return Ok(Async::Ready(())) } inner.inner.poll_flush_notify(&inner.notifier_write, 0) } } /// Active attempt to open an outbound substream. pub struct OutboundSubstream { /// Substream number. num: u32, state: OutboundSubstreamState, } enum OutboundSubstreamState { /// We need to send `Elem` on the underlying stream. SendElem(codec::Elem), /// We need to flush the underlying stream. Flush, /// The substream is open and the `OutboundSubstream` is now useless. Done, } /// Active substream to the remote. pub struct Substream { /// Substream number. num: u32, // Read buffer. Contains data read from `inner` but not yet dispatched by a call to `read()`. current_data: Bytes, endpoint: Endpoint, /// If true, our writing side is still open. local_open: bool, /// If true, the remote writing side is still open. remote_open: bool, }
{ MplexConfig { max_substreams: 128, max_buffer_len: 4096, max_buffer_behaviour: MaxBufferBehaviour::CloseAll, split_send_size: 1024, } }
identifier_body
lib.rs
// Copyright 2018 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. mod codec; use std::{cmp, iter, mem}; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; use bytes::Bytes; use libp2p_core::{ Endpoint, StreamMuxer, upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo, Negotiated} }; use log::{debug, trace}; use parking_lot::Mutex; use fnv::{FnvHashMap, FnvHashSet}; use futures::{prelude::*, executor, future, stream::Fuse, task, task_local, try_ready}; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; /// Configuration for the multiplexer. #[derive(Debug, Clone)] pub struct MplexConfig { /// Maximum number of simultaneously-open substreams. max_substreams: usize, /// Maximum number of elements in the internal buffer. max_buffer_len: usize, /// Behaviour when the buffer size limit is reached. max_buffer_behaviour: MaxBufferBehaviour, /// When sending data, split it into frames whose maximum size is this value /// (max 1MByte, as per the Mplex spec). split_send_size: usize, } impl MplexConfig { /// Builds the default configuration. #[inline] pub fn new() -> MplexConfig { Default::default() } /// Sets the maximum number of simultaneously opened substreams, after which an error is /// generated and the connection closes. /// /// A limit is necessary in order to avoid DoS attacks. #[inline] pub fn max_substreams(&mut self, max: usize) -> &mut Self { self.max_substreams = max; self } /// Sets the maximum number of pending incoming messages. /// /// A limit is necessary in order to avoid DoS attacks. #[inline] pub fn max_buffer_len(&mut self, max: usize) -> &mut Self { self.max_buffer_len = max; self } /// Sets the behaviour when the maximum buffer length has been reached. /// /// See the documentation of `MaxBufferBehaviour`. #[inline] pub fn max_buffer_len_behaviour(&mut self, behaviour: MaxBufferBehaviour) -> &mut Self { self.max_buffer_behaviour = behaviour; self } /// Sets the frame size used when sending data. Capped at 1Mbyte as per the /// Mplex spec. pub fn
(&mut self, size: usize) -> &mut Self { let size = cmp::min(size, codec::MAX_FRAME_SIZE); self.split_send_size = size; self } #[inline] fn upgrade<C>(self, i: C) -> Multiplex<C> where C: AsyncRead + AsyncWrite { let max_buffer_len = self.max_buffer_len; Multiplex { inner: Mutex::new(MultiplexInner { error: Ok(()), inner: executor::spawn(Framed::new(i, codec::Codec::new()).fuse()), config: self, buffer: Vec::with_capacity(cmp::min(max_buffer_len, 512)), opened_substreams: Default::default(), next_outbound_stream_id: 0, notifier_read: Arc::new(Notifier { to_notify: Mutex::new(Default::default()), }), notifier_write: Arc::new(Notifier { to_notify: Mutex::new(Default::default()), }), is_shutdown: false, is_acknowledged: false, }) } } } impl Default for MplexConfig { #[inline] fn default() -> MplexConfig { MplexConfig { max_substreams: 128, max_buffer_len: 4096, max_buffer_behaviour: MaxBufferBehaviour::CloseAll, split_send_size: 1024, } } } /// Behaviour when the maximum length of the buffer is reached. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum MaxBufferBehaviour { /// Produce an error on all the substreams. CloseAll, /// No new message will be read from the underlying connection if the buffer is full. /// /// This can potentially introduce a deadlock if you are waiting for a message from a substream /// before processing the messages received on another substream. Block, } impl UpgradeInfo for MplexConfig { type Info = &'static [u8]; type InfoIter = iter::Once<Self::Info>; #[inline] fn protocol_info(&self) -> Self::InfoIter { iter::once(b"/mplex/6.7.0") } } impl<C> InboundUpgrade<C> for MplexConfig where C: AsyncRead + AsyncWrite, { type Output = Multiplex<Negotiated<C>>; type Error = IoError; type Future = future::FutureResult<Self::Output, IoError>; fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future { future::ok(self.upgrade(socket)) } } impl<C> OutboundUpgrade<C> for MplexConfig where C: AsyncRead + AsyncWrite, { type Output = Multiplex<Negotiated<C>>; type Error = IoError; type Future = future::FutureResult<Self::Output, IoError>; fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future { future::ok(self.upgrade(socket)) } } /// Multiplexer. Implements the `StreamMuxer` trait. pub struct Multiplex<C> { inner: Mutex<MultiplexInner<C>>, } // Struct shared throughout the implementation. struct MultiplexInner<C> { // Error that happened earlier. Should poison any attempt to use this `MultiplexError`. error: Result<(), IoError>, // Underlying stream. inner: executor::Spawn<Fuse<Framed<C, codec::Codec>>>, /// The original configuration. config: MplexConfig, // Buffer of elements pulled from the stream but not processed yet. buffer: Vec<codec::Elem>, // List of Ids of opened substreams. Used to filter out messages that don't belong to any // substream. Note that this is handled exclusively by `next_match`. // The `Endpoint` value denotes who initiated the substream from our point of view // (see note [StreamId]). opened_substreams: FnvHashSet<(u32, Endpoint)>, // Id of the next outgoing substream. next_outbound_stream_id: u32, /// List of tasks to notify when a read event happens on the underlying stream. notifier_read: Arc<Notifier>, /// List of tasks to notify when a write event happens on the underlying stream. notifier_write: Arc<Notifier>, /// If true, the connection has been shut down. We need to be careful not to accidentally /// call `Sink::poll_complete` or `Sink::start_send` after `Sink::close`. is_shutdown: bool, /// If true, the remote has sent data to us. is_acknowledged: bool, } struct Notifier { /// List of tasks to notify. to_notify: Mutex<FnvHashMap<usize, task::Task>>, } impl executor::Notify for Notifier { fn notify(&self, _: usize) { let tasks = mem::replace(&mut *self.to_notify.lock(), Default::default()); for (_, task) in tasks { task.notify(); } } } // TODO: replace with another system static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0); task_local!{ static TASK_ID: usize = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed) } // Note [StreamId]: mplex no longer partitions stream IDs into odd (for initiators) and // even ones (for receivers). Streams are instead identified by a number and whether the flag // is odd (for receivers) or even (for initiators). `Open` frames do not have a flag, but are // sent unidirectional. As a consequence, we need to remember if the stream was initiated by us // or remotely and we store the information from our point of view, i.e. receiving an `Open` frame // is stored as `(<u32>, Listener)`, sending an `Open` frame as `(<u32>, Dialer)`. Receiving // a `Data` frame with flag `MessageReceiver` (= 1) means that we initiated the stream, so the // entry has been stored as `(<u32>, Dialer)`. So, when looking up streams based on frames // received, we have to invert the `Endpoint`, except for `Open`. /// Processes elements in `inner` until one matching `filter` is found. /// /// If `NotReady` is returned, the current task is scheduled for later, just like with any `Poll`. /// `Ready(Some())` is almost always returned. An error is returned if the stream is EOF. fn next_match<C, F, O>(inner: &mut MultiplexInner<C>, mut filter: F) -> Poll<O, IoError> where C: AsyncRead + AsyncWrite, F: FnMut(&codec::Elem) -> Option<O>, { // If an error happened earlier, immediately return it. if let Err(ref err) = inner.error { return Err(IoError::new(err.kind(), err.to_string())); } if let Some((offset, out)) = inner.buffer.iter().enumerate().filter_map(|(n, v)| filter(v).map(|v| (n, v))).next() { // The buffer was full and no longer is, so let's notify everything. if inner.buffer.len() == inner.config.max_buffer_len { executor::Notify::notify(&*inner.notifier_read, 0); } inner.buffer.remove(offset); return Ok(Async::Ready(out)); } loop { // Check if we reached max buffer length first. debug_assert!(inner.buffer.len() <= inner.config.max_buffer_len); if inner.buffer.len() == inner.config.max_buffer_len { debug!("Reached mplex maximum buffer length"); match inner.config.max_buffer_behaviour { MaxBufferBehaviour::CloseAll => { inner.error = Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length")); return Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length")); }, MaxBufferBehaviour::Block => { inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady); }, } } let elem = match inner.inner.poll_stream_notify(&inner.notifier_read, 0) { Ok(Async::Ready(Some(item))) => item, Ok(Async::Ready(None)) => return Err(IoErrorKind::BrokenPipe.into()), Ok(Async::NotReady) => { inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady); }, Err(err) => { let err2 = IoError::new(err.kind(), err.to_string()); inner.error = Err(err); return Err(err2); }, }; trace!("Received message: {:?}", elem); inner.is_acknowledged = true; // Handle substreams opening/closing. match elem { codec::Elem::Open { substream_id } => { if !inner.opened_substreams.insert((substream_id, Endpoint::Listener)) { debug!("Received open message for substream {} which was already open", substream_id) } } codec::Elem::Close { substream_id, endpoint, .. } | codec::Elem::Reset { substream_id, endpoint, .. } => { inner.opened_substreams.remove(&(substream_id, !endpoint)); } _ => () } if let Some(out) = filter(&elem) { return Ok(Async::Ready(out)); } else { let endpoint = elem.endpoint().unwrap_or(Endpoint::Dialer); if inner.opened_substreams.contains(&(elem.substream_id(), !endpoint)) || elem.is_open_msg() { inner.buffer.push(elem); } else if !elem.is_close_or_reset_msg() { debug!("Ignored message {:?} because the substream wasn't open", elem); } } } } // Small convenience function that tries to write `elem` to the stream. fn poll_send<C>(inner: &mut MultiplexInner<C>, elem: codec::Elem) -> Poll<(), IoError> where C: AsyncRead + AsyncWrite { if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } match inner.inner.start_send_notify(elem, &inner.notifier_write, 0) { Ok(AsyncSink::Ready) => { Ok(Async::Ready(())) }, Ok(AsyncSink::NotReady(_)) => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); Ok(Async::NotReady) }, Err(err) => Err(err) } } impl<C> StreamMuxer for Multiplex<C> where C: AsyncRead + AsyncWrite { type Substream = Substream; type OutboundSubstream = OutboundSubstream; type Error = IoError; fn poll_inbound(&self) -> Poll<Self::Substream, IoError> { let mut inner = self.inner.lock(); if inner.opened_substreams.len() >= inner.config.max_substreams { debug!("Refused substream; reached maximum number of substreams {}", inner.config.max_substreams); return Err(IoError::new(IoErrorKind::ConnectionRefused, "exceeded maximum number of open substreams")); } let num = try_ready!(next_match(&mut inner, |elem| { match elem { codec::Elem::Open { substream_id } => Some(*substream_id), _ => None, } })); debug!("Successfully opened inbound substream {}", num); Ok(Async::Ready(Substream { current_data: Bytes::new(), num, endpoint: Endpoint::Listener, local_open: true, remote_open: true, })) } fn open_outbound(&self) -> Self::OutboundSubstream { let mut inner = self.inner.lock(); // Assign a substream ID now. let substream_id = { let n = inner.next_outbound_stream_id; inner.next_outbound_stream_id = inner.next_outbound_stream_id.checked_add(1) .expect("Mplex substream ID overflowed"); n }; inner.opened_substreams.insert((substream_id, Endpoint::Dialer)); OutboundSubstream { num: substream_id, state: OutboundSubstreamState::SendElem(codec::Elem::Open { substream_id }), } } fn poll_outbound(&self, substream: &mut Self::OutboundSubstream) -> Poll<Self::Substream, IoError> { loop { let mut inner = self.inner.lock(); let polling = match substream.state { OutboundSubstreamState::SendElem(ref elem) => { poll_send(&mut inner, elem.clone()) }, OutboundSubstreamState::Flush => { if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } let inner = &mut *inner; // Avoids borrow errors inner.inner.poll_flush_notify(&inner.notifier_write, 0) }, OutboundSubstreamState::Done => { panic!("Polling outbound substream after it's been succesfully open"); }, }; match polling { Ok(Async::Ready(())) => (), Ok(Async::NotReady) => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady) }, Err(err) => { debug!("Failed to open outbound substream {}", substream.num); inner.buffer.retain(|elem| { elem.substream_id() != substream.num || elem.endpoint() == Some(Endpoint::Dialer) }); return Err(err) }, }; drop(inner); // Going to next step. match substream.state { OutboundSubstreamState::SendElem(_) => { substream.state = OutboundSubstreamState::Flush; }, OutboundSubstreamState::Flush => { debug!("Successfully opened outbound substream {}", substream.num); substream.state = OutboundSubstreamState::Done; return Ok(Async::Ready(Substream { num: substream.num, current_data: Bytes::new(), endpoint: Endpoint::Dialer, local_open: true, remote_open: true, })); }, OutboundSubstreamState::Done => unreachable!(), } } } #[inline] fn destroy_outbound(&self, _substream: Self::OutboundSubstream) { // Nothing to do. } unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { false } fn read_substream(&self, substream: &mut Self::Substream, buf: &mut [u8]) -> Poll<usize, IoError> { loop { // First, transfer from `current_data`. if !substream.current_data.is_empty() { let len = cmp::min(substream.current_data.len(), buf.len()); buf[..len].copy_from_slice(&substream.current_data.split_to(len)); return Ok(Async::Ready(len)); } // If the remote writing side is closed, return EOF. if !substream.remote_open { return Ok(Async::Ready(0)); } // Try to find a packet of data in the buffer. let mut inner = self.inner.lock(); let next_data_poll = next_match(&mut inner, |elem| { match elem { codec::Elem::Data { substream_id, endpoint, data, .. } if *substream_id == substream.num && *endpoint != substream.endpoint => // see note [StreamId] { Some(Some(data.clone())) } codec::Elem::Close { substream_id, endpoint } if *substream_id == substream.num && *endpoint != substream.endpoint => // see note [StreamId] { Some(None) } _ => None } }); // We're in a loop, so all we need to do is set `substream.current_data` to the data we // just read and wait for the next iteration. match next_data_poll? { Async::Ready(Some(data)) => substream.current_data = data, Async::Ready(None) => { substream.remote_open = false; return Ok(Async::Ready(0)); }, Async::NotReady => { // There was no data packet in the buffer about this substream; maybe it's // because it has been closed. if inner.opened_substreams.contains(&(substream.num, substream.endpoint)) { return Ok(Async::NotReady) } else { return Ok(Async::Ready(0)) } }, } } } fn write_substream(&self, substream: &mut Self::Substream, buf: &[u8]) -> Poll<usize, IoError> { if !substream.local_open { return Err(IoErrorKind::BrokenPipe.into()); } let mut inner = self.inner.lock(); let to_write = cmp::min(buf.len(), inner.config.split_send_size); let elem = codec::Elem::Data { substream_id: substream.num, data: From::from(&buf[..to_write]), endpoint: substream.endpoint, }; match poll_send(&mut inner, elem)? { Async::Ready(()) => Ok(Async::Ready(to_write)), Async::NotReady => Ok(Async::NotReady) } } fn flush_substream(&self, _substream: &mut Self::Substream) -> Poll<(), IoError> { let mut inner = self.inner.lock(); if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } let inner = &mut *inner; // Avoids borrow errors match inner.inner.poll_flush_notify(&inner.notifier_write, 0)? { Async::Ready(()) => Ok(Async::Ready(())), Async::NotReady => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); Ok(Async::NotReady) } } } fn shutdown_substream(&self, sub: &mut Self::Substream) -> Poll<(), IoError> { if !sub.local_open { return Ok(Async::Ready(())); } let elem = codec::Elem::Close { substream_id: sub.num, endpoint: sub.endpoint, }; let mut inner = self.inner.lock(); let result = poll_send(&mut inner, elem); if let Ok(Async::Ready(())) = result { sub.local_open = false; } result } fn destroy_substream(&self, sub: Self::Substream) { self.inner.lock().buffer.retain(|elem| { elem.substream_id() != sub.num || elem.endpoint() == Some(sub.endpoint) }) } fn is_remote_acknowledged(&self) -> bool { self.inner.lock().is_acknowledged } #[inline] fn close(&self) -> Poll<(), IoError> { let inner = &mut *self.inner.lock(); try_ready!(inner.inner.close_notify(&inner.notifier_write, 0)); inner.is_shutdown = true; Ok(Async::Ready(())) } #[inline] fn flush_all(&self) -> Poll<(), IoError> { let inner = &mut *self.inner.lock(); if inner.is_shutdown { return Ok(Async::Ready(())) } inner.inner.poll_flush_notify(&inner.notifier_write, 0) } } /// Active attempt to open an outbound substream. pub struct OutboundSubstream { /// Substream number. num: u32, state: OutboundSubstreamState, } enum OutboundSubstreamState { /// We need to send `Elem` on the underlying stream. SendElem(codec::Elem), /// We need to flush the underlying stream. Flush, /// The substream is open and the `OutboundSubstream` is now useless. Done, } /// Active substream to the remote. pub struct Substream { /// Substream number. num: u32, // Read buffer. Contains data read from `inner` but not yet dispatched by a call to `read()`. current_data: Bytes, endpoint: Endpoint, /// If true, our writing side is still open. local_open: bool, /// If true, the remote writing side is still open. remote_open: bool, }
split_send_size
identifier_name
lib.rs
// Copyright 2018 Parity Technologies (UK) Ltd. // // Permission is hereby granted, free of charge, to any person obtaining a // copy of this software and associated documentation files (the "Software"), // to deal in the Software without restriction, including without limitation // the rights to use, copy, modify, merge, publish, distribute, sublicense, // and/or sell copies of the Software, and to permit persons to whom the // Software is furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS // OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING // FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER // DEALINGS IN THE SOFTWARE. mod codec; use std::{cmp, iter, mem}; use std::io::{Error as IoError, ErrorKind as IoErrorKind}; use std::sync::{atomic::AtomicUsize, atomic::Ordering, Arc}; use bytes::Bytes; use libp2p_core::{ Endpoint, StreamMuxer, upgrade::{InboundUpgrade, OutboundUpgrade, UpgradeInfo, Negotiated} }; use log::{debug, trace}; use parking_lot::Mutex; use fnv::{FnvHashMap, FnvHashSet}; use futures::{prelude::*, executor, future, stream::Fuse, task, task_local, try_ready}; use tokio_codec::Framed; use tokio_io::{AsyncRead, AsyncWrite}; /// Configuration for the multiplexer. #[derive(Debug, Clone)] pub struct MplexConfig { /// Maximum number of simultaneously-open substreams. max_substreams: usize, /// Maximum number of elements in the internal buffer. max_buffer_len: usize, /// Behaviour when the buffer size limit is reached. max_buffer_behaviour: MaxBufferBehaviour, /// When sending data, split it into frames whose maximum size is this value /// (max 1MByte, as per the Mplex spec). split_send_size: usize, } impl MplexConfig { /// Builds the default configuration. #[inline] pub fn new() -> MplexConfig { Default::default() } /// Sets the maximum number of simultaneously opened substreams, after which an error is /// generated and the connection closes. /// /// A limit is necessary in order to avoid DoS attacks. #[inline] pub fn max_substreams(&mut self, max: usize) -> &mut Self { self.max_substreams = max; self } /// Sets the maximum number of pending incoming messages. /// /// A limit is necessary in order to avoid DoS attacks. #[inline] pub fn max_buffer_len(&mut self, max: usize) -> &mut Self { self.max_buffer_len = max; self } /// Sets the behaviour when the maximum buffer length has been reached. /// /// See the documentation of `MaxBufferBehaviour`. #[inline] pub fn max_buffer_len_behaviour(&mut self, behaviour: MaxBufferBehaviour) -> &mut Self { self.max_buffer_behaviour = behaviour; self } /// Sets the frame size used when sending data. Capped at 1Mbyte as per the /// Mplex spec. pub fn split_send_size(&mut self, size: usize) -> &mut Self { let size = cmp::min(size, codec::MAX_FRAME_SIZE); self.split_send_size = size; self } #[inline] fn upgrade<C>(self, i: C) -> Multiplex<C> where C: AsyncRead + AsyncWrite { let max_buffer_len = self.max_buffer_len; Multiplex { inner: Mutex::new(MultiplexInner { error: Ok(()), inner: executor::spawn(Framed::new(i, codec::Codec::new()).fuse()), config: self, buffer: Vec::with_capacity(cmp::min(max_buffer_len, 512)), opened_substreams: Default::default(), next_outbound_stream_id: 0, notifier_read: Arc::new(Notifier { to_notify: Mutex::new(Default::default()), }), notifier_write: Arc::new(Notifier { to_notify: Mutex::new(Default::default()), }), is_shutdown: false, is_acknowledged: false, }) } } } impl Default for MplexConfig { #[inline] fn default() -> MplexConfig { MplexConfig { max_substreams: 128, max_buffer_len: 4096, max_buffer_behaviour: MaxBufferBehaviour::CloseAll, split_send_size: 1024, } } } /// Behaviour when the maximum length of the buffer is reached. #[derive(Debug, Copy, Clone, PartialEq, Eq)] pub enum MaxBufferBehaviour { /// Produce an error on all the substreams. CloseAll, /// No new message will be read from the underlying connection if the buffer is full. /// /// This can potentially introduce a deadlock if you are waiting for a message from a substream /// before processing the messages received on another substream. Block, } impl UpgradeInfo for MplexConfig { type Info = &'static [u8]; type InfoIter = iter::Once<Self::Info>; #[inline] fn protocol_info(&self) -> Self::InfoIter { iter::once(b"/mplex/6.7.0") } } impl<C> InboundUpgrade<C> for MplexConfig where C: AsyncRead + AsyncWrite, { type Output = Multiplex<Negotiated<C>>; type Error = IoError; type Future = future::FutureResult<Self::Output, IoError>; fn upgrade_inbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future { future::ok(self.upgrade(socket)) } } impl<C> OutboundUpgrade<C> for MplexConfig where C: AsyncRead + AsyncWrite, { type Output = Multiplex<Negotiated<C>>; type Error = IoError; type Future = future::FutureResult<Self::Output, IoError>; fn upgrade_outbound(self, socket: Negotiated<C>, _: Self::Info) -> Self::Future { future::ok(self.upgrade(socket)) } } /// Multiplexer. Implements the `StreamMuxer` trait. pub struct Multiplex<C> { inner: Mutex<MultiplexInner<C>>, } // Struct shared throughout the implementation. struct MultiplexInner<C> { // Error that happened earlier. Should poison any attempt to use this `MultiplexError`. error: Result<(), IoError>, // Underlying stream. inner: executor::Spawn<Fuse<Framed<C, codec::Codec>>>, /// The original configuration. config: MplexConfig, // Buffer of elements pulled from the stream but not processed yet. buffer: Vec<codec::Elem>, // List of Ids of opened substreams. Used to filter out messages that don't belong to any // substream. Note that this is handled exclusively by `next_match`. // The `Endpoint` value denotes who initiated the substream from our point of view // (see note [StreamId]). opened_substreams: FnvHashSet<(u32, Endpoint)>, // Id of the next outgoing substream. next_outbound_stream_id: u32, /// List of tasks to notify when a read event happens on the underlying stream. notifier_read: Arc<Notifier>, /// List of tasks to notify when a write event happens on the underlying stream. notifier_write: Arc<Notifier>, /// If true, the connection has been shut down. We need to be careful not to accidentally /// call `Sink::poll_complete` or `Sink::start_send` after `Sink::close`. is_shutdown: bool, /// If true, the remote has sent data to us. is_acknowledged: bool, } struct Notifier { /// List of tasks to notify. to_notify: Mutex<FnvHashMap<usize, task::Task>>, } impl executor::Notify for Notifier { fn notify(&self, _: usize) { let tasks = mem::replace(&mut *self.to_notify.lock(), Default::default()); for (_, task) in tasks { task.notify(); } } } // TODO: replace with another system static NEXT_TASK_ID: AtomicUsize = AtomicUsize::new(0); task_local!{ static TASK_ID: usize = NEXT_TASK_ID.fetch_add(1, Ordering::Relaxed) } // Note [StreamId]: mplex no longer partitions stream IDs into odd (for initiators) and // even ones (for receivers). Streams are instead identified by a number and whether the flag // is odd (for receivers) or even (for initiators). `Open` frames do not have a flag, but are // sent unidirectional. As a consequence, we need to remember if the stream was initiated by us // or remotely and we store the information from our point of view, i.e. receiving an `Open` frame // is stored as `(<u32>, Listener)`, sending an `Open` frame as `(<u32>, Dialer)`. Receiving // a `Data` frame with flag `MessageReceiver` (= 1) means that we initiated the stream, so the // entry has been stored as `(<u32>, Dialer)`. So, when looking up streams based on frames // received, we have to invert the `Endpoint`, except for `Open`. /// Processes elements in `inner` until one matching `filter` is found. /// /// If `NotReady` is returned, the current task is scheduled for later, just like with any `Poll`. /// `Ready(Some())` is almost always returned. An error is returned if the stream is EOF. fn next_match<C, F, O>(inner: &mut MultiplexInner<C>, mut filter: F) -> Poll<O, IoError> where C: AsyncRead + AsyncWrite, F: FnMut(&codec::Elem) -> Option<O>, { // If an error happened earlier, immediately return it. if let Err(ref err) = inner.error { return Err(IoError::new(err.kind(), err.to_string())); } if let Some((offset, out)) = inner.buffer.iter().enumerate().filter_map(|(n, v)| filter(v).map(|v| (n, v))).next() { // The buffer was full and no longer is, so let's notify everything. if inner.buffer.len() == inner.config.max_buffer_len { executor::Notify::notify(&*inner.notifier_read, 0); } inner.buffer.remove(offset); return Ok(Async::Ready(out)); } loop { // Check if we reached max buffer length first. debug_assert!(inner.buffer.len() <= inner.config.max_buffer_len); if inner.buffer.len() == inner.config.max_buffer_len { debug!("Reached mplex maximum buffer length"); match inner.config.max_buffer_behaviour { MaxBufferBehaviour::CloseAll => { inner.error = Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length")); return Err(IoError::new(IoErrorKind::Other, "reached maximum buffer length")); }, MaxBufferBehaviour::Block => { inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady); }, } } let elem = match inner.inner.poll_stream_notify(&inner.notifier_read, 0) { Ok(Async::Ready(Some(item))) => item, Ok(Async::Ready(None)) => return Err(IoErrorKind::BrokenPipe.into()), Ok(Async::NotReady) => { inner.notifier_read.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady); }, Err(err) => { let err2 = IoError::new(err.kind(), err.to_string()); inner.error = Err(err); return Err(err2); }, }; trace!("Received message: {:?}", elem); inner.is_acknowledged = true; // Handle substreams opening/closing. match elem { codec::Elem::Open { substream_id } => { if !inner.opened_substreams.insert((substream_id, Endpoint::Listener)) { debug!("Received open message for substream {} which was already open", substream_id) } } codec::Elem::Close { substream_id, endpoint, .. } | codec::Elem::Reset { substream_id, endpoint, .. } => { inner.opened_substreams.remove(&(substream_id, !endpoint)); } _ => () } if let Some(out) = filter(&elem) { return Ok(Async::Ready(out)); } else { let endpoint = elem.endpoint().unwrap_or(Endpoint::Dialer); if inner.opened_substreams.contains(&(elem.substream_id(), !endpoint)) || elem.is_open_msg() { inner.buffer.push(elem); } else if !elem.is_close_or_reset_msg() { debug!("Ignored message {:?} because the substream wasn't open", elem); } } } }
// Small convenience function that tries to write `elem` to the stream. fn poll_send<C>(inner: &mut MultiplexInner<C>, elem: codec::Elem) -> Poll<(), IoError> where C: AsyncRead + AsyncWrite { if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } match inner.inner.start_send_notify(elem, &inner.notifier_write, 0) { Ok(AsyncSink::Ready) => { Ok(Async::Ready(())) }, Ok(AsyncSink::NotReady(_)) => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); Ok(Async::NotReady) }, Err(err) => Err(err) } } impl<C> StreamMuxer for Multiplex<C> where C: AsyncRead + AsyncWrite { type Substream = Substream; type OutboundSubstream = OutboundSubstream; type Error = IoError; fn poll_inbound(&self) -> Poll<Self::Substream, IoError> { let mut inner = self.inner.lock(); if inner.opened_substreams.len() >= inner.config.max_substreams { debug!("Refused substream; reached maximum number of substreams {}", inner.config.max_substreams); return Err(IoError::new(IoErrorKind::ConnectionRefused, "exceeded maximum number of open substreams")); } let num = try_ready!(next_match(&mut inner, |elem| { match elem { codec::Elem::Open { substream_id } => Some(*substream_id), _ => None, } })); debug!("Successfully opened inbound substream {}", num); Ok(Async::Ready(Substream { current_data: Bytes::new(), num, endpoint: Endpoint::Listener, local_open: true, remote_open: true, })) } fn open_outbound(&self) -> Self::OutboundSubstream { let mut inner = self.inner.lock(); // Assign a substream ID now. let substream_id = { let n = inner.next_outbound_stream_id; inner.next_outbound_stream_id = inner.next_outbound_stream_id.checked_add(1) .expect("Mplex substream ID overflowed"); n }; inner.opened_substreams.insert((substream_id, Endpoint::Dialer)); OutboundSubstream { num: substream_id, state: OutboundSubstreamState::SendElem(codec::Elem::Open { substream_id }), } } fn poll_outbound(&self, substream: &mut Self::OutboundSubstream) -> Poll<Self::Substream, IoError> { loop { let mut inner = self.inner.lock(); let polling = match substream.state { OutboundSubstreamState::SendElem(ref elem) => { poll_send(&mut inner, elem.clone()) }, OutboundSubstreamState::Flush => { if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } let inner = &mut *inner; // Avoids borrow errors inner.inner.poll_flush_notify(&inner.notifier_write, 0) }, OutboundSubstreamState::Done => { panic!("Polling outbound substream after it's been succesfully open"); }, }; match polling { Ok(Async::Ready(())) => (), Ok(Async::NotReady) => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); return Ok(Async::NotReady) }, Err(err) => { debug!("Failed to open outbound substream {}", substream.num); inner.buffer.retain(|elem| { elem.substream_id() != substream.num || elem.endpoint() == Some(Endpoint::Dialer) }); return Err(err) }, }; drop(inner); // Going to next step. match substream.state { OutboundSubstreamState::SendElem(_) => { substream.state = OutboundSubstreamState::Flush; }, OutboundSubstreamState::Flush => { debug!("Successfully opened outbound substream {}", substream.num); substream.state = OutboundSubstreamState::Done; return Ok(Async::Ready(Substream { num: substream.num, current_data: Bytes::new(), endpoint: Endpoint::Dialer, local_open: true, remote_open: true, })); }, OutboundSubstreamState::Done => unreachable!(), } } } #[inline] fn destroy_outbound(&self, _substream: Self::OutboundSubstream) { // Nothing to do. } unsafe fn prepare_uninitialized_buffer(&self, _: &mut [u8]) -> bool { false } fn read_substream(&self, substream: &mut Self::Substream, buf: &mut [u8]) -> Poll<usize, IoError> { loop { // First, transfer from `current_data`. if !substream.current_data.is_empty() { let len = cmp::min(substream.current_data.len(), buf.len()); buf[..len].copy_from_slice(&substream.current_data.split_to(len)); return Ok(Async::Ready(len)); } // If the remote writing side is closed, return EOF. if !substream.remote_open { return Ok(Async::Ready(0)); } // Try to find a packet of data in the buffer. let mut inner = self.inner.lock(); let next_data_poll = next_match(&mut inner, |elem| { match elem { codec::Elem::Data { substream_id, endpoint, data, .. } if *substream_id == substream.num && *endpoint != substream.endpoint => // see note [StreamId] { Some(Some(data.clone())) } codec::Elem::Close { substream_id, endpoint } if *substream_id == substream.num && *endpoint != substream.endpoint => // see note [StreamId] { Some(None) } _ => None } }); // We're in a loop, so all we need to do is set `substream.current_data` to the data we // just read and wait for the next iteration. match next_data_poll? { Async::Ready(Some(data)) => substream.current_data = data, Async::Ready(None) => { substream.remote_open = false; return Ok(Async::Ready(0)); }, Async::NotReady => { // There was no data packet in the buffer about this substream; maybe it's // because it has been closed. if inner.opened_substreams.contains(&(substream.num, substream.endpoint)) { return Ok(Async::NotReady) } else { return Ok(Async::Ready(0)) } }, } } } fn write_substream(&self, substream: &mut Self::Substream, buf: &[u8]) -> Poll<usize, IoError> { if !substream.local_open { return Err(IoErrorKind::BrokenPipe.into()); } let mut inner = self.inner.lock(); let to_write = cmp::min(buf.len(), inner.config.split_send_size); let elem = codec::Elem::Data { substream_id: substream.num, data: From::from(&buf[..to_write]), endpoint: substream.endpoint, }; match poll_send(&mut inner, elem)? { Async::Ready(()) => Ok(Async::Ready(to_write)), Async::NotReady => Ok(Async::NotReady) } } fn flush_substream(&self, _substream: &mut Self::Substream) -> Poll<(), IoError> { let mut inner = self.inner.lock(); if inner.is_shutdown { return Err(IoError::new(IoErrorKind::Other, "connection is shut down")) } let inner = &mut *inner; // Avoids borrow errors match inner.inner.poll_flush_notify(&inner.notifier_write, 0)? { Async::Ready(()) => Ok(Async::Ready(())), Async::NotReady => { inner.notifier_write.to_notify.lock().insert(TASK_ID.with(|&t| t), task::current()); Ok(Async::NotReady) } } } fn shutdown_substream(&self, sub: &mut Self::Substream) -> Poll<(), IoError> { if !sub.local_open { return Ok(Async::Ready(())); } let elem = codec::Elem::Close { substream_id: sub.num, endpoint: sub.endpoint, }; let mut inner = self.inner.lock(); let result = poll_send(&mut inner, elem); if let Ok(Async::Ready(())) = result { sub.local_open = false; } result } fn destroy_substream(&self, sub: Self::Substream) { self.inner.lock().buffer.retain(|elem| { elem.substream_id() != sub.num || elem.endpoint() == Some(sub.endpoint) }) } fn is_remote_acknowledged(&self) -> bool { self.inner.lock().is_acknowledged } #[inline] fn close(&self) -> Poll<(), IoError> { let inner = &mut *self.inner.lock(); try_ready!(inner.inner.close_notify(&inner.notifier_write, 0)); inner.is_shutdown = true; Ok(Async::Ready(())) } #[inline] fn flush_all(&self) -> Poll<(), IoError> { let inner = &mut *self.inner.lock(); if inner.is_shutdown { return Ok(Async::Ready(())) } inner.inner.poll_flush_notify(&inner.notifier_write, 0) } } /// Active attempt to open an outbound substream. pub struct OutboundSubstream { /// Substream number. num: u32, state: OutboundSubstreamState, } enum OutboundSubstreamState { /// We need to send `Elem` on the underlying stream. SendElem(codec::Elem), /// We need to flush the underlying stream. Flush, /// The substream is open and the `OutboundSubstream` is now useless. Done, } /// Active substream to the remote. pub struct Substream { /// Substream number. num: u32, // Read buffer. Contains data read from `inner` but not yet dispatched by a call to `read()`. current_data: Bytes, endpoint: Endpoint, /// If true, our writing side is still open. local_open: bool, /// If true, the remote writing side is still open. remote_open: bool, }
random_line_split
app.component.ts
// @ts-nocheck import { Component, OnInit } from '@angular/core'; import * as tf from '@tensorflow/tfjs'; import * as tfvis from '@tensorflow/tfjs-vis'; class CharacterTable { /** * Constructor of CharacterTable. * @param chars A string that contains the characters that can appear * in the input. */ constructor(chars) { this.chars = chars; this.charIndices = {}; this.indicesChar = {}; this.size = this.chars.length; for (let i = 0; i < this.size; ++i) { const char = this.chars[i]; if (this.charIndices[char] != null) { throw new Error(`Duplicate character '${char}'`); } this.charIndices[this.chars[i]] = i; this.indicesChar[i] = this.chars[i]; } } /** * Convert a string into a one-hot encoded tensor. * * @param str The input string. * @param numRows Number of rows of the output tensor. * @returns The one-hot encoded 2D tensor. * @throws If `str` contains any characters outside the `CharacterTable`'s * vocabulary. */ encode(str, numRows) { const buf = tf.buffer([numRows, this.size]); for (let i = 0; i < str.length; ++i) { const char = str[i]; if (this.charIndices[char] == null) { throw new Error(`Unknown character: '${char}'`); } buf.set(1, i, this.charIndices[char]); } return buf.toTensor().as2D(numRows, this.size); } encodeBatch(strings, numRows) { const numExamples = strings.length; const buf = tf.buffer([numExamples, numRows, this.size]); for (let n = 0; n < numExamples; ++n) { const str = strings[n]; for (let i = 0; i < str.length; ++i) { const char = str[i]; if (this.charIndices[char] == null) { throw new Error(`Unknown character: '${char}'`); } buf.set(1, n, i, this.charIndices[char]); } } return buf.toTensor().as3D(numExamples, numRows, this.size); } /** * Convert a 2D tensor into a string with the CharacterTable's vocabulary. * * @param x Input 2D tensor. * @param calcArgmax Whether to perform `argMax` operation on `x` before * indexing into the `CharacterTable`'s vocabulary. * @returns The decoded string. */ decode(x, calcArgmax = true) { return tf.tidy(() => { if (calcArgmax) { x = x.argMax(1); } const xData = x.dataSync(); // TODO(cais): Performance implication? let output = ''; for (const index of Array.from(xData)) { output += this.indicesChar[index]; } return output; }); } } /** * Generate examples. * * Each example consists of a question, e.g., '123+456' and and an * answer, e.g., '579'. * * @param digits Maximum number of digits of each operand of the * @param numExamples Number of examples to generate. * @param invert Whether to invert the strings in the question. * @returns The generated examples. */ function generateData(digits, numExamples, invert) { const digitArray = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']; const arraySize = digitArray.length; const output = []; const maxLen = digits + 1 + digits; const f = () => { let str = ''; while (str.length < digits) { const index = Math.floor(Math.random() * arraySize); str += digitArray[index]; } return Number.parseInt(str); }; const seen = new Set(); while (output.length < numExamples) { const a = f(); const b = f(); const sorted = b > a ? [a, b] : [b, a]; const key = sorted[0] + '`' + sorted[1]; if (seen.has(key)) { continue; } seen.add(key); // Pad the data with spaces such that it is always maxLen. const q = `${a}+${b}`; const query = q + ' '.repeat(maxLen - q.length); let ans = (a + b).toString(); // Answer can be of maximum size `digits + 1`. ans += ' '.repeat(digits + 1 - ans.length); if (invert) { throw new Error('invert is not implemented yet'); } output.push([query, ans]); } return output; } function convertDataToTensors(data, charTable, digits) { const maxLen = digits + 1 + digits; const questions = data.map((datum) => datum[0]); const answers = data.map((datum) => datum[1]); return [ charTable.encodeBatch(questions, maxLen), charTable.encodeBatch(answers, digits + 1), ]; } function createAndCompileModel( layers, hiddenSize, rnnType, digits, vocabularySize ) { const maxLen = digits + 1 + digits; const model = tf.sequential(); switch (rnnType) { case 'SimpleRNN': model.add( tf.layers.simpleRNN({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; case 'GRU': model.add( tf.layers.gru({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; case 'LSTM': model.add( tf.layers.lstm({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; default: throw new Error(`Unsupported RNN type: '${rnnType}'`); } model.add(tf.layers.repeatVector({ n: digits + 1 })); switch (rnnType) { case 'SimpleRNN': model.add( tf.layers.simpleRNN({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; case 'GRU': model.add( tf.layers.gru({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; case 'LSTM': model.add( tf.layers.lstm({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; default: throw new Error(`Unsupported RNN type: '${rnnType}'`); } model.add( tf.layers.timeDistributed({ layer: tf.layers.dense({ units: vocabularySize }), }) ); model.add(tf.layers.activation({ activation: 'softmax' })); model.compile({ loss: 'categoricalCrossentropy', optimizer: 'adam', metrics: ['accuracy'], }); return model; } class AdditionRNNDemo {
(digits, trainingSize, rnnType, layers, hiddenSize) { // Prepare training data. const chars = '0123456789+ '; this.charTable = new CharacterTable(chars); console.log('Generating training data'); const data = generateData(digits, trainingSize, false); const split = Math.floor(trainingSize * 0.9); this.trainData = data.slice(0, split); this.testData = data.slice(split); [this.trainXs, this.trainYs] = convertDataToTensors( this.trainData, this.charTable, digits ); [this.testXs, this.testYs] = convertDataToTensors( this.testData, this.charTable, digits ); this.model = createAndCompileModel( layers, hiddenSize, rnnType, digits, chars.length ); } async train(iterations, batchSize, numTestExamples) { const lossValues = [[], []]; const accuracyValues = [[], []]; for (let i = 0; i < iterations; ++i) { const beginMs = performance.now(); const history = await this.model.fit(this.trainXs, this.trainYs, { epochs: 1, batchSize, validationData: [this.testXs, this.testYs], yieldEvery: 'epoch', }); const elapsedMs = performance.now() - beginMs; const modelFitTime = elapsedMs / 1000; const trainLoss = history.history['loss'][0]; const trainAccuracy = history.history['acc'][0]; const valLoss = history.history['val_loss'][0]; const valAccuracy = history.history['val_acc'][0]; lossValues[0].push({ x: i, y: trainLoss }); lossValues[1].push({ x: i, y: valLoss }); accuracyValues[0].push({ x: i, y: trainAccuracy }); accuracyValues[1].push({ x: i, y: valAccuracy }); document.getElementById('trainStatus').textContent = `Iteration ${i + 1} of ${iterations}: ` + `Time per iteration: ${modelFitTime.toFixed(3)} (seconds)`; const lossContainer = document.getElementById('lossChart'); tfvis.render.linechart( lossContainer, { values: lossValues, series: ['train', 'validation'] }, { width: 420, height: 300, xLabel: 'epoch', yLabel: 'loss', } ); const accuracyContainer = document.getElementById('accuracyChart'); tfvis.render.linechart( accuracyContainer, { values: accuracyValues, series: ['train', 'validation'] }, { width: 420, height: 300, xLabel: 'epoch', yLabel: 'accuracy', } ); if ( this.testXsForDisplay == null || this.testXsForDisplay.shape[0] !== numTestExamples ) { if (this.textXsForDisplay) { this.textXsForDisplay.dispose(); } this.testXsForDisplay = this.testXs.slice( [0, 0, 0], [numTestExamples, this.testXs.shape[1], this.testXs.shape[2]] ); } const examples = []; const isCorrect = []; tf.tidy(() => { const predictOut = this.model.predict(this.testXsForDisplay); for (let k = 0; k < numTestExamples; ++k) { const scores = predictOut .slice([k, 0, 0], [1, predictOut.shape[1], predictOut.shape[2]]) .as2D(predictOut.shape[1], predictOut.shape[2]); const decoded = this.charTable.decode(scores); examples.push(this.testData[k][0] + ' = ' + decoded); isCorrect.push(this.testData[k][1].trim() === decoded.trim()); } }); const examplesDiv = document.getElementById('testExamples'); const examplesContent = examples.map( (example, i) => `<div class="${isCorrect[i] ? 'answer-correct' : 'answer-wrong'}">` + `${example}` + `</div>` ); examplesDiv.innerHTML = examplesContent.join('\n'); } } } async function runAdditionRNNDemo() { document.getElementById('trainModel').addEventListener('click', async () => { const digits = +document.getElementById('digits').value; const trainingSize = +document.getElementById('trainingSize').value; const rnnTypeSelect = document.getElementById('rnnType'); const rnnType = rnnTypeSelect.options[ rnnTypeSelect.selectedIndex ].getAttribute('value'); const layers = +document.getElementById('rnnLayers').value; const hiddenSize = +document.getElementById('rnnLayerSize').value; const batchSize = +document.getElementById('batchSize').value; const trainIterations = +document.getElementById('trainIterations').value; const numTestExamples = +document.getElementById('numTestExamples').value; // Do some checks on the user-specified parameters. const status = document.getElementById('trainStatus'); if (digits < 1 || digits > 5) { status.textContent = 'digits must be >= 1 and <= 5'; return; } const trainingSizeLimit = Math.pow(Math.pow(10, digits), 2); if (trainingSize > trainingSizeLimit) { status.textContent = `With digits = ${digits}, you cannot have more than ` + `${trainingSizeLimit} examples`; return; } const demo = new AdditionRNNDemo( digits, trainingSize, rnnType, layers, hiddenSize ); await demo.train(trainIterations, batchSize, numTestExamples); }); } @Component({ selector: 'app-root', templateUrl: './app.component.html', styleUrls: ['./app.component.scss'], }) export class AppComponent implements OnInit { title = 'tfjs-with-angular'; ngOnInit() { runAdditionRNNDemo(); } }
constructor
identifier_name
app.component.ts
// @ts-nocheck import { Component, OnInit } from '@angular/core'; import * as tf from '@tensorflow/tfjs'; import * as tfvis from '@tensorflow/tfjs-vis'; class CharacterTable { /** * Constructor of CharacterTable. * @param chars A string that contains the characters that can appear * in the input. */ constructor(chars) { this.chars = chars; this.charIndices = {}; this.indicesChar = {}; this.size = this.chars.length; for (let i = 0; i < this.size; ++i) { const char = this.chars[i]; if (this.charIndices[char] != null) { throw new Error(`Duplicate character '${char}'`); } this.charIndices[this.chars[i]] = i; this.indicesChar[i] = this.chars[i]; } } /** * Convert a string into a one-hot encoded tensor. * * @param str The input string. * @param numRows Number of rows of the output tensor. * @returns The one-hot encoded 2D tensor. * @throws If `str` contains any characters outside the `CharacterTable`'s * vocabulary. */ encode(str, numRows) { const buf = tf.buffer([numRows, this.size]); for (let i = 0; i < str.length; ++i) { const char = str[i]; if (this.charIndices[char] == null) { throw new Error(`Unknown character: '${char}'`); } buf.set(1, i, this.charIndices[char]); } return buf.toTensor().as2D(numRows, this.size); } encodeBatch(strings, numRows) { const numExamples = strings.length; const buf = tf.buffer([numExamples, numRows, this.size]); for (let n = 0; n < numExamples; ++n) { const str = strings[n]; for (let i = 0; i < str.length; ++i) { const char = str[i]; if (this.charIndices[char] == null) { throw new Error(`Unknown character: '${char}'`); } buf.set(1, n, i, this.charIndices[char]); } } return buf.toTensor().as3D(numExamples, numRows, this.size); } /** * Convert a 2D tensor into a string with the CharacterTable's vocabulary. * * @param x Input 2D tensor. * @param calcArgmax Whether to perform `argMax` operation on `x` before * indexing into the `CharacterTable`'s vocabulary. * @returns The decoded string. */ decode(x, calcArgmax = true) { return tf.tidy(() => { if (calcArgmax) { x = x.argMax(1); } const xData = x.dataSync(); // TODO(cais): Performance implication? let output = ''; for (const index of Array.from(xData)) { output += this.indicesChar[index]; } return output; }); } } /** * Generate examples. * * Each example consists of a question, e.g., '123+456' and and an * answer, e.g., '579'. * * @param digits Maximum number of digits of each operand of the * @param numExamples Number of examples to generate. * @param invert Whether to invert the strings in the question. * @returns The generated examples. */ function generateData(digits, numExamples, invert) { const digitArray = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']; const arraySize = digitArray.length; const output = []; const maxLen = digits + 1 + digits; const f = () => { let str = ''; while (str.length < digits) { const index = Math.floor(Math.random() * arraySize); str += digitArray[index]; } return Number.parseInt(str); }; const seen = new Set(); while (output.length < numExamples) { const a = f(); const b = f(); const sorted = b > a ? [a, b] : [b, a]; const key = sorted[0] + '`' + sorted[1]; if (seen.has(key)) { continue; } seen.add(key); // Pad the data with spaces such that it is always maxLen. const q = `${a}+${b}`; const query = q + ' '.repeat(maxLen - q.length); let ans = (a + b).toString(); // Answer can be of maximum size `digits + 1`. ans += ' '.repeat(digits + 1 - ans.length); if (invert) { throw new Error('invert is not implemented yet'); } output.push([query, ans]); } return output; } function convertDataToTensors(data, charTable, digits)
function createAndCompileModel( layers, hiddenSize, rnnType, digits, vocabularySize ) { const maxLen = digits + 1 + digits; const model = tf.sequential(); switch (rnnType) { case 'SimpleRNN': model.add( tf.layers.simpleRNN({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; case 'GRU': model.add( tf.layers.gru({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; case 'LSTM': model.add( tf.layers.lstm({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; default: throw new Error(`Unsupported RNN type: '${rnnType}'`); } model.add(tf.layers.repeatVector({ n: digits + 1 })); switch (rnnType) { case 'SimpleRNN': model.add( tf.layers.simpleRNN({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; case 'GRU': model.add( tf.layers.gru({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; case 'LSTM': model.add( tf.layers.lstm({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; default: throw new Error(`Unsupported RNN type: '${rnnType}'`); } model.add( tf.layers.timeDistributed({ layer: tf.layers.dense({ units: vocabularySize }), }) ); model.add(tf.layers.activation({ activation: 'softmax' })); model.compile({ loss: 'categoricalCrossentropy', optimizer: 'adam', metrics: ['accuracy'], }); return model; } class AdditionRNNDemo { constructor(digits, trainingSize, rnnType, layers, hiddenSize) { // Prepare training data. const chars = '0123456789+ '; this.charTable = new CharacterTable(chars); console.log('Generating training data'); const data = generateData(digits, trainingSize, false); const split = Math.floor(trainingSize * 0.9); this.trainData = data.slice(0, split); this.testData = data.slice(split); [this.trainXs, this.trainYs] = convertDataToTensors( this.trainData, this.charTable, digits ); [this.testXs, this.testYs] = convertDataToTensors( this.testData, this.charTable, digits ); this.model = createAndCompileModel( layers, hiddenSize, rnnType, digits, chars.length ); } async train(iterations, batchSize, numTestExamples) { const lossValues = [[], []]; const accuracyValues = [[], []]; for (let i = 0; i < iterations; ++i) { const beginMs = performance.now(); const history = await this.model.fit(this.trainXs, this.trainYs, { epochs: 1, batchSize, validationData: [this.testXs, this.testYs], yieldEvery: 'epoch', }); const elapsedMs = performance.now() - beginMs; const modelFitTime = elapsedMs / 1000; const trainLoss = history.history['loss'][0]; const trainAccuracy = history.history['acc'][0]; const valLoss = history.history['val_loss'][0]; const valAccuracy = history.history['val_acc'][0]; lossValues[0].push({ x: i, y: trainLoss }); lossValues[1].push({ x: i, y: valLoss }); accuracyValues[0].push({ x: i, y: trainAccuracy }); accuracyValues[1].push({ x: i, y: valAccuracy }); document.getElementById('trainStatus').textContent = `Iteration ${i + 1} of ${iterations}: ` + `Time per iteration: ${modelFitTime.toFixed(3)} (seconds)`; const lossContainer = document.getElementById('lossChart'); tfvis.render.linechart( lossContainer, { values: lossValues, series: ['train', 'validation'] }, { width: 420, height: 300, xLabel: 'epoch', yLabel: 'loss', } ); const accuracyContainer = document.getElementById('accuracyChart'); tfvis.render.linechart( accuracyContainer, { values: accuracyValues, series: ['train', 'validation'] }, { width: 420, height: 300, xLabel: 'epoch', yLabel: 'accuracy', } ); if ( this.testXsForDisplay == null || this.testXsForDisplay.shape[0] !== numTestExamples ) { if (this.textXsForDisplay) { this.textXsForDisplay.dispose(); } this.testXsForDisplay = this.testXs.slice( [0, 0, 0], [numTestExamples, this.testXs.shape[1], this.testXs.shape[2]] ); } const examples = []; const isCorrect = []; tf.tidy(() => { const predictOut = this.model.predict(this.testXsForDisplay); for (let k = 0; k < numTestExamples; ++k) { const scores = predictOut .slice([k, 0, 0], [1, predictOut.shape[1], predictOut.shape[2]]) .as2D(predictOut.shape[1], predictOut.shape[2]); const decoded = this.charTable.decode(scores); examples.push(this.testData[k][0] + ' = ' + decoded); isCorrect.push(this.testData[k][1].trim() === decoded.trim()); } }); const examplesDiv = document.getElementById('testExamples'); const examplesContent = examples.map( (example, i) => `<div class="${isCorrect[i] ? 'answer-correct' : 'answer-wrong'}">` + `${example}` + `</div>` ); examplesDiv.innerHTML = examplesContent.join('\n'); } } } async function runAdditionRNNDemo() { document.getElementById('trainModel').addEventListener('click', async () => { const digits = +document.getElementById('digits').value; const trainingSize = +document.getElementById('trainingSize').value; const rnnTypeSelect = document.getElementById('rnnType'); const rnnType = rnnTypeSelect.options[ rnnTypeSelect.selectedIndex ].getAttribute('value'); const layers = +document.getElementById('rnnLayers').value; const hiddenSize = +document.getElementById('rnnLayerSize').value; const batchSize = +document.getElementById('batchSize').value; const trainIterations = +document.getElementById('trainIterations').value; const numTestExamples = +document.getElementById('numTestExamples').value; // Do some checks on the user-specified parameters. const status = document.getElementById('trainStatus'); if (digits < 1 || digits > 5) { status.textContent = 'digits must be >= 1 and <= 5'; return; } const trainingSizeLimit = Math.pow(Math.pow(10, digits), 2); if (trainingSize > trainingSizeLimit) { status.textContent = `With digits = ${digits}, you cannot have more than ` + `${trainingSizeLimit} examples`; return; } const demo = new AdditionRNNDemo( digits, trainingSize, rnnType, layers, hiddenSize ); await demo.train(trainIterations, batchSize, numTestExamples); }); } @Component({ selector: 'app-root', templateUrl: './app.component.html', styleUrls: ['./app.component.scss'], }) export class AppComponent implements OnInit { title = 'tfjs-with-angular'; ngOnInit() { runAdditionRNNDemo(); } }
{ const maxLen = digits + 1 + digits; const questions = data.map((datum) => datum[0]); const answers = data.map((datum) => datum[1]); return [ charTable.encodeBatch(questions, maxLen), charTable.encodeBatch(answers, digits + 1), ]; }
identifier_body
app.component.ts
// @ts-nocheck import { Component, OnInit } from '@angular/core'; import * as tf from '@tensorflow/tfjs'; import * as tfvis from '@tensorflow/tfjs-vis'; class CharacterTable { /** * Constructor of CharacterTable. * @param chars A string that contains the characters that can appear * in the input. */ constructor(chars) { this.chars = chars; this.charIndices = {}; this.indicesChar = {}; this.size = this.chars.length; for (let i = 0; i < this.size; ++i) { const char = this.chars[i]; if (this.charIndices[char] != null) { throw new Error(`Duplicate character '${char}'`); } this.charIndices[this.chars[i]] = i; this.indicesChar[i] = this.chars[i]; } } /** * Convert a string into a one-hot encoded tensor. * * @param str The input string. * @param numRows Number of rows of the output tensor. * @returns The one-hot encoded 2D tensor. * @throws If `str` contains any characters outside the `CharacterTable`'s * vocabulary. */ encode(str, numRows) { const buf = tf.buffer([numRows, this.size]); for (let i = 0; i < str.length; ++i) { const char = str[i]; if (this.charIndices[char] == null) { throw new Error(`Unknown character: '${char}'`); } buf.set(1, i, this.charIndices[char]); } return buf.toTensor().as2D(numRows, this.size); } encodeBatch(strings, numRows) { const numExamples = strings.length; const buf = tf.buffer([numExamples, numRows, this.size]); for (let n = 0; n < numExamples; ++n)
return buf.toTensor().as3D(numExamples, numRows, this.size); } /** * Convert a 2D tensor into a string with the CharacterTable's vocabulary. * * @param x Input 2D tensor. * @param calcArgmax Whether to perform `argMax` operation on `x` before * indexing into the `CharacterTable`'s vocabulary. * @returns The decoded string. */ decode(x, calcArgmax = true) { return tf.tidy(() => { if (calcArgmax) { x = x.argMax(1); } const xData = x.dataSync(); // TODO(cais): Performance implication? let output = ''; for (const index of Array.from(xData)) { output += this.indicesChar[index]; } return output; }); } } /** * Generate examples. * * Each example consists of a question, e.g., '123+456' and and an * answer, e.g., '579'. * * @param digits Maximum number of digits of each operand of the * @param numExamples Number of examples to generate. * @param invert Whether to invert the strings in the question. * @returns The generated examples. */ function generateData(digits, numExamples, invert) { const digitArray = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']; const arraySize = digitArray.length; const output = []; const maxLen = digits + 1 + digits; const f = () => { let str = ''; while (str.length < digits) { const index = Math.floor(Math.random() * arraySize); str += digitArray[index]; } return Number.parseInt(str); }; const seen = new Set(); while (output.length < numExamples) { const a = f(); const b = f(); const sorted = b > a ? [a, b] : [b, a]; const key = sorted[0] + '`' + sorted[1]; if (seen.has(key)) { continue; } seen.add(key); // Pad the data with spaces such that it is always maxLen. const q = `${a}+${b}`; const query = q + ' '.repeat(maxLen - q.length); let ans = (a + b).toString(); // Answer can be of maximum size `digits + 1`. ans += ' '.repeat(digits + 1 - ans.length); if (invert) { throw new Error('invert is not implemented yet'); } output.push([query, ans]); } return output; } function convertDataToTensors(data, charTable, digits) { const maxLen = digits + 1 + digits; const questions = data.map((datum) => datum[0]); const answers = data.map((datum) => datum[1]); return [ charTable.encodeBatch(questions, maxLen), charTable.encodeBatch(answers, digits + 1), ]; } function createAndCompileModel( layers, hiddenSize, rnnType, digits, vocabularySize ) { const maxLen = digits + 1 + digits; const model = tf.sequential(); switch (rnnType) { case 'SimpleRNN': model.add( tf.layers.simpleRNN({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; case 'GRU': model.add( tf.layers.gru({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; case 'LSTM': model.add( tf.layers.lstm({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; default: throw new Error(`Unsupported RNN type: '${rnnType}'`); } model.add(tf.layers.repeatVector({ n: digits + 1 })); switch (rnnType) { case 'SimpleRNN': model.add( tf.layers.simpleRNN({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; case 'GRU': model.add( tf.layers.gru({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; case 'LSTM': model.add( tf.layers.lstm({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; default: throw new Error(`Unsupported RNN type: '${rnnType}'`); } model.add( tf.layers.timeDistributed({ layer: tf.layers.dense({ units: vocabularySize }), }) ); model.add(tf.layers.activation({ activation: 'softmax' })); model.compile({ loss: 'categoricalCrossentropy', optimizer: 'adam', metrics: ['accuracy'], }); return model; } class AdditionRNNDemo { constructor(digits, trainingSize, rnnType, layers, hiddenSize) { // Prepare training data. const chars = '0123456789+ '; this.charTable = new CharacterTable(chars); console.log('Generating training data'); const data = generateData(digits, trainingSize, false); const split = Math.floor(trainingSize * 0.9); this.trainData = data.slice(0, split); this.testData = data.slice(split); [this.trainXs, this.trainYs] = convertDataToTensors( this.trainData, this.charTable, digits ); [this.testXs, this.testYs] = convertDataToTensors( this.testData, this.charTable, digits ); this.model = createAndCompileModel( layers, hiddenSize, rnnType, digits, chars.length ); } async train(iterations, batchSize, numTestExamples) { const lossValues = [[], []]; const accuracyValues = [[], []]; for (let i = 0; i < iterations; ++i) { const beginMs = performance.now(); const history = await this.model.fit(this.trainXs, this.trainYs, { epochs: 1, batchSize, validationData: [this.testXs, this.testYs], yieldEvery: 'epoch', }); const elapsedMs = performance.now() - beginMs; const modelFitTime = elapsedMs / 1000; const trainLoss = history.history['loss'][0]; const trainAccuracy = history.history['acc'][0]; const valLoss = history.history['val_loss'][0]; const valAccuracy = history.history['val_acc'][0]; lossValues[0].push({ x: i, y: trainLoss }); lossValues[1].push({ x: i, y: valLoss }); accuracyValues[0].push({ x: i, y: trainAccuracy }); accuracyValues[1].push({ x: i, y: valAccuracy }); document.getElementById('trainStatus').textContent = `Iteration ${i + 1} of ${iterations}: ` + `Time per iteration: ${modelFitTime.toFixed(3)} (seconds)`; const lossContainer = document.getElementById('lossChart'); tfvis.render.linechart( lossContainer, { values: lossValues, series: ['train', 'validation'] }, { width: 420, height: 300, xLabel: 'epoch', yLabel: 'loss', } ); const accuracyContainer = document.getElementById('accuracyChart'); tfvis.render.linechart( accuracyContainer, { values: accuracyValues, series: ['train', 'validation'] }, { width: 420, height: 300, xLabel: 'epoch', yLabel: 'accuracy', } ); if ( this.testXsForDisplay == null || this.testXsForDisplay.shape[0] !== numTestExamples ) { if (this.textXsForDisplay) { this.textXsForDisplay.dispose(); } this.testXsForDisplay = this.testXs.slice( [0, 0, 0], [numTestExamples, this.testXs.shape[1], this.testXs.shape[2]] ); } const examples = []; const isCorrect = []; tf.tidy(() => { const predictOut = this.model.predict(this.testXsForDisplay); for (let k = 0; k < numTestExamples; ++k) { const scores = predictOut .slice([k, 0, 0], [1, predictOut.shape[1], predictOut.shape[2]]) .as2D(predictOut.shape[1], predictOut.shape[2]); const decoded = this.charTable.decode(scores); examples.push(this.testData[k][0] + ' = ' + decoded); isCorrect.push(this.testData[k][1].trim() === decoded.trim()); } }); const examplesDiv = document.getElementById('testExamples'); const examplesContent = examples.map( (example, i) => `<div class="${isCorrect[i] ? 'answer-correct' : 'answer-wrong'}">` + `${example}` + `</div>` ); examplesDiv.innerHTML = examplesContent.join('\n'); } } } async function runAdditionRNNDemo() { document.getElementById('trainModel').addEventListener('click', async () => { const digits = +document.getElementById('digits').value; const trainingSize = +document.getElementById('trainingSize').value; const rnnTypeSelect = document.getElementById('rnnType'); const rnnType = rnnTypeSelect.options[ rnnTypeSelect.selectedIndex ].getAttribute('value'); const layers = +document.getElementById('rnnLayers').value; const hiddenSize = +document.getElementById('rnnLayerSize').value; const batchSize = +document.getElementById('batchSize').value; const trainIterations = +document.getElementById('trainIterations').value; const numTestExamples = +document.getElementById('numTestExamples').value; // Do some checks on the user-specified parameters. const status = document.getElementById('trainStatus'); if (digits < 1 || digits > 5) { status.textContent = 'digits must be >= 1 and <= 5'; return; } const trainingSizeLimit = Math.pow(Math.pow(10, digits), 2); if (trainingSize > trainingSizeLimit) { status.textContent = `With digits = ${digits}, you cannot have more than ` + `${trainingSizeLimit} examples`; return; } const demo = new AdditionRNNDemo( digits, trainingSize, rnnType, layers, hiddenSize ); await demo.train(trainIterations, batchSize, numTestExamples); }); } @Component({ selector: 'app-root', templateUrl: './app.component.html', styleUrls: ['./app.component.scss'], }) export class AppComponent implements OnInit { title = 'tfjs-with-angular'; ngOnInit() { runAdditionRNNDemo(); } }
{ const str = strings[n]; for (let i = 0; i < str.length; ++i) { const char = str[i]; if (this.charIndices[char] == null) { throw new Error(`Unknown character: '${char}'`); } buf.set(1, n, i, this.charIndices[char]); } }
conditional_block
app.component.ts
// @ts-nocheck import { Component, OnInit } from '@angular/core'; import * as tf from '@tensorflow/tfjs'; import * as tfvis from '@tensorflow/tfjs-vis'; class CharacterTable { /** * Constructor of CharacterTable. * @param chars A string that contains the characters that can appear * in the input. */ constructor(chars) { this.chars = chars; this.charIndices = {}; this.indicesChar = {}; this.size = this.chars.length; for (let i = 0; i < this.size; ++i) { const char = this.chars[i]; if (this.charIndices[char] != null) { throw new Error(`Duplicate character '${char}'`); } this.charIndices[this.chars[i]] = i; this.indicesChar[i] = this.chars[i]; } } /** * Convert a string into a one-hot encoded tensor. * * @param str The input string. * @param numRows Number of rows of the output tensor. * @returns The one-hot encoded 2D tensor. * @throws If `str` contains any characters outside the `CharacterTable`'s * vocabulary. */ encode(str, numRows) { const buf = tf.buffer([numRows, this.size]); for (let i = 0; i < str.length; ++i) { const char = str[i]; if (this.charIndices[char] == null) { throw new Error(`Unknown character: '${char}'`); } buf.set(1, i, this.charIndices[char]); } return buf.toTensor().as2D(numRows, this.size); } encodeBatch(strings, numRows) { const numExamples = strings.length; const buf = tf.buffer([numExamples, numRows, this.size]); for (let n = 0; n < numExamples; ++n) { const str = strings[n]; for (let i = 0; i < str.length; ++i) { const char = str[i]; if (this.charIndices[char] == null) { throw new Error(`Unknown character: '${char}'`); } buf.set(1, n, i, this.charIndices[char]); } } return buf.toTensor().as3D(numExamples, numRows, this.size); } /** * Convert a 2D tensor into a string with the CharacterTable's vocabulary. * * @param x Input 2D tensor. * @param calcArgmax Whether to perform `argMax` operation on `x` before * indexing into the `CharacterTable`'s vocabulary. * @returns The decoded string. */ decode(x, calcArgmax = true) { return tf.tidy(() => { if (calcArgmax) { x = x.argMax(1); } const xData = x.dataSync(); // TODO(cais): Performance implication? let output = ''; for (const index of Array.from(xData)) { output += this.indicesChar[index]; } return output; }); } } /** * Generate examples. * * Each example consists of a question, e.g., '123+456' and and an * answer, e.g., '579'. * * @param digits Maximum number of digits of each operand of the * @param numExamples Number of examples to generate. * @param invert Whether to invert the strings in the question. * @returns The generated examples. */ function generateData(digits, numExamples, invert) { const digitArray = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']; const arraySize = digitArray.length; const output = []; const maxLen = digits + 1 + digits; const f = () => { let str = ''; while (str.length < digits) { const index = Math.floor(Math.random() * arraySize); str += digitArray[index]; } return Number.parseInt(str); }; const seen = new Set(); while (output.length < numExamples) { const a = f(); const b = f(); const sorted = b > a ? [a, b] : [b, a]; const key = sorted[0] + '`' + sorted[1]; if (seen.has(key)) { continue; } seen.add(key); // Pad the data with spaces such that it is always maxLen. const q = `${a}+${b}`; const query = q + ' '.repeat(maxLen - q.length); let ans = (a + b).toString(); // Answer can be of maximum size `digits + 1`. ans += ' '.repeat(digits + 1 - ans.length); if (invert) { throw new Error('invert is not implemented yet'); } output.push([query, ans]); } return output; } function convertDataToTensors(data, charTable, digits) { const maxLen = digits + 1 + digits; const questions = data.map((datum) => datum[0]); const answers = data.map((datum) => datum[1]); return [ charTable.encodeBatch(questions, maxLen), charTable.encodeBatch(answers, digits + 1), ]; } function createAndCompileModel( layers, hiddenSize, rnnType, digits, vocabularySize ) { const maxLen = digits + 1 + digits; const model = tf.sequential(); switch (rnnType) { case 'SimpleRNN': model.add( tf.layers.simpleRNN({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; case 'GRU': model.add( tf.layers.gru({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; case 'LSTM': model.add( tf.layers.lstm({ units: hiddenSize, recurrentInitializer: 'glorotNormal', inputShape: [maxLen, vocabularySize], }) ); break; default: throw new Error(`Unsupported RNN type: '${rnnType}'`); } model.add(tf.layers.repeatVector({ n: digits + 1 })); switch (rnnType) { case 'SimpleRNN': model.add( tf.layers.simpleRNN({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; case 'GRU': model.add( tf.layers.gru({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; case 'LSTM': model.add( tf.layers.lstm({ units: hiddenSize, recurrentInitializer: 'glorotNormal', returnSequences: true, }) ); break; default: throw new Error(`Unsupported RNN type: '${rnnType}'`); } model.add( tf.layers.timeDistributed({ layer: tf.layers.dense({ units: vocabularySize }), }) ); model.add(tf.layers.activation({ activation: 'softmax' })); model.compile({ loss: 'categoricalCrossentropy', optimizer: 'adam', metrics: ['accuracy'], }); return model; } class AdditionRNNDemo { constructor(digits, trainingSize, rnnType, layers, hiddenSize) { // Prepare training data. const chars = '0123456789+ '; this.charTable = new CharacterTable(chars); console.log('Generating training data'); const data = generateData(digits, trainingSize, false); const split = Math.floor(trainingSize * 0.9); this.trainData = data.slice(0, split); this.testData = data.slice(split); [this.trainXs, this.trainYs] = convertDataToTensors( this.trainData, this.charTable, digits ); [this.testXs, this.testYs] = convertDataToTensors( this.testData, this.charTable, digits ); this.model = createAndCompileModel( layers, hiddenSize, rnnType, digits, chars.length ); } async train(iterations, batchSize, numTestExamples) { const lossValues = [[], []]; const accuracyValues = [[], []]; for (let i = 0; i < iterations; ++i) { const beginMs = performance.now(); const history = await this.model.fit(this.trainXs, this.trainYs, { epochs: 1, batchSize, validationData: [this.testXs, this.testYs], yieldEvery: 'epoch', }); const elapsedMs = performance.now() - beginMs;
const trainLoss = history.history['loss'][0]; const trainAccuracy = history.history['acc'][0]; const valLoss = history.history['val_loss'][0]; const valAccuracy = history.history['val_acc'][0]; lossValues[0].push({ x: i, y: trainLoss }); lossValues[1].push({ x: i, y: valLoss }); accuracyValues[0].push({ x: i, y: trainAccuracy }); accuracyValues[1].push({ x: i, y: valAccuracy }); document.getElementById('trainStatus').textContent = `Iteration ${i + 1} of ${iterations}: ` + `Time per iteration: ${modelFitTime.toFixed(3)} (seconds)`; const lossContainer = document.getElementById('lossChart'); tfvis.render.linechart( lossContainer, { values: lossValues, series: ['train', 'validation'] }, { width: 420, height: 300, xLabel: 'epoch', yLabel: 'loss', } ); const accuracyContainer = document.getElementById('accuracyChart'); tfvis.render.linechart( accuracyContainer, { values: accuracyValues, series: ['train', 'validation'] }, { width: 420, height: 300, xLabel: 'epoch', yLabel: 'accuracy', } ); if ( this.testXsForDisplay == null || this.testXsForDisplay.shape[0] !== numTestExamples ) { if (this.textXsForDisplay) { this.textXsForDisplay.dispose(); } this.testXsForDisplay = this.testXs.slice( [0, 0, 0], [numTestExamples, this.testXs.shape[1], this.testXs.shape[2]] ); } const examples = []; const isCorrect = []; tf.tidy(() => { const predictOut = this.model.predict(this.testXsForDisplay); for (let k = 0; k < numTestExamples; ++k) { const scores = predictOut .slice([k, 0, 0], [1, predictOut.shape[1], predictOut.shape[2]]) .as2D(predictOut.shape[1], predictOut.shape[2]); const decoded = this.charTable.decode(scores); examples.push(this.testData[k][0] + ' = ' + decoded); isCorrect.push(this.testData[k][1].trim() === decoded.trim()); } }); const examplesDiv = document.getElementById('testExamples'); const examplesContent = examples.map( (example, i) => `<div class="${isCorrect[i] ? 'answer-correct' : 'answer-wrong'}">` + `${example}` + `</div>` ); examplesDiv.innerHTML = examplesContent.join('\n'); } } } async function runAdditionRNNDemo() { document.getElementById('trainModel').addEventListener('click', async () => { const digits = +document.getElementById('digits').value; const trainingSize = +document.getElementById('trainingSize').value; const rnnTypeSelect = document.getElementById('rnnType'); const rnnType = rnnTypeSelect.options[ rnnTypeSelect.selectedIndex ].getAttribute('value'); const layers = +document.getElementById('rnnLayers').value; const hiddenSize = +document.getElementById('rnnLayerSize').value; const batchSize = +document.getElementById('batchSize').value; const trainIterations = +document.getElementById('trainIterations').value; const numTestExamples = +document.getElementById('numTestExamples').value; // Do some checks on the user-specified parameters. const status = document.getElementById('trainStatus'); if (digits < 1 || digits > 5) { status.textContent = 'digits must be >= 1 and <= 5'; return; } const trainingSizeLimit = Math.pow(Math.pow(10, digits), 2); if (trainingSize > trainingSizeLimit) { status.textContent = `With digits = ${digits}, you cannot have more than ` + `${trainingSizeLimit} examples`; return; } const demo = new AdditionRNNDemo( digits, trainingSize, rnnType, layers, hiddenSize ); await demo.train(trainIterations, batchSize, numTestExamples); }); } @Component({ selector: 'app-root', templateUrl: './app.component.html', styleUrls: ['./app.component.scss'], }) export class AppComponent implements OnInit { title = 'tfjs-with-angular'; ngOnInit() { runAdditionRNNDemo(); } }
const modelFitTime = elapsedMs / 1000;
random_line_split
lib.rs
//! Data Encryption Standard Rust implementation. //! //! The only supported mode is Electronic Codebook (ECB). //! //! # Example //! //! ``` //! extern crate des_rs_krautcat; //! //! let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1]; //! let message = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]; //! let cipher = des_rs_krautcat::encrypt(&message, &key); //! let message = des_rs_krautcat::decrypt(&cipher, &key); //! ``` //! //! # Usage //! //! Des exports two functions: `encrypt` and `decrypt`. //! Use the former to encrypt some data with a key and the later to decrypt the data. pub type Key = [u8; 8]; const FIRST_BIT: u32 = 1 << 31; const HALF_KEY_SIZE: i64 = KEY_SIZE / 2; const KEY_SIZE: i64 = 56; enum Ip { Direct, Reverse } /// Циклический сдвиг влево половины ключа fn circular_left_shift(ci: u32, di: u32, shift_count: u8) -> (u32, u32) { let mut ci_next = ci; let mut di_next = di; for _ in 0 .. shift_count { ci_next = (ci_next << 1) | ((ci_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1)); di_next = (di_next << 1) | ((di_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1)); } (ci_next, di_next) } /// Обмен битов с расстоянием delta и маской mask в числе a fn delta_swap(a: u64, delta: u8, mask: u64) -> u64 { let b = (a ^ (a >> delta)) & mask; a ^ b ^ (b << delta) } /// Конвертирование ключа из массива u8 в одно число типа u64 fn key_to_u64(key: &Key) -> u64 { let mut result = 0; for &part in key { result <<= 8; result += part as u64; } result } /// Конвертирование сообщения из массива u8 в вектор u64 fn message_to_u64s(message: &[u8]) -> Vec<u64> { message.chunks(8) .map(|m| { let mut result: u64 = 0; for &part in m { result <<= 8; result += part as u64; } if m.len() < 8 { result <<= 8 * (8 - m.len()); } result }) .collect() } /// Конвертирование u64 в вектор u8 fn to_u8_vec(num: u64) -> Vec<u8> { vec![ ((num & 0xFF00000000000000) >> 56) as u8, ((num & 0x00FF000000000000) >> 48) as u8, ((num & 0x0000FF0000000000) >> 40) as u8, ((num & 0x000000FF00000000) >> 32) as u8, ((num & 0x00000000FF000000) >> 24) as u8, ((num & 0x0000000000FF0000) >> 16) as u8, ((num & 0x000000000000FF00) >> 8) as u8, ((num & 0x00000000000000FF) >> 0) as u8 ] } /// Процедура создания 16 подключей fn compute_subkeys(key: u64) -> Vec<u64> { let table = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]; let k0 = pc1(key); let mut subkeys = vec![k0]; for shift_count in &table { let last_key = *subkeys.last().unwrap(); let last_ci = ((last_key & 0xFFFFFFF000000000) >> 32) as u32; let last_di = (last_key >> 4) as u32; let (ci, di) = circular_left_shift(last_ci, last_di, *shift_count); let current_key = ((ci as u64) << 32) | ((di as u64) << 4); subkeys.push(current_key); } subkeys.remove(0); subkeys.iter().map(|&n| { pc2(n) }).collect() } /// Перестановка согласно таблице PC-1 fn pc1(key: u64) -> u64 { let key = delta_swap(key, 2, 0x3333000033330000); let key = delta_swap(key, 4, 0x0F0F0F0F00000000); let key = delta_swap(key, 8, 0x009A000A00A200A8); let key = delta_swap(key, 16, 0x00006C6C0000CCCC); let key = delta_swap(key, 1, 0x1045500500550550); let key = delta_swap(key, 32, 0x00000000F0F0F5FA); let key = delta_swap(key, 8, 0x00550055006A00AA); let key = delta_swap(key, 2, 0x0000333330000300); key & 0xFFFFFFFFFFFFFF00 } /// Перестановка согласно таблице PC-2 fn pc2(key: u64) -> u64 { const PC_2_TABLE: [u8; 48] = [ 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34 ,53, 46, 42, 50, 36, 29, 32 ]; const OUT_SIZE: u8 = 64; let mut result: u64 = 0; for m in 0 .. PC_2_TABLE.len() as usize { if PC_2_TABLE[m] > m as u8 { result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) << PC_2_TABLE[m] - (m as u8 + 1); } else { result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) >> (m as u8 + 1) - PC_2_TABLE[m]; } } result & 0xFFFFFFFFFFFF0000 } /// Перестановка согласно таблице E fn e(block: u32) -> u64 { const BLOCK_LEN: usize = 32; const RESULT_LEN: usize = 48; let block_exp = (block as u64) << 32; let b1 = ((block_exp << (BLOCK_LEN - 1)) & 0x8000000000000000) as u64; let b2 = ((block_exp >> 1) & 0x7C00000000000000) as u64;; let b3 = ((block_exp >> 3) & 0x03F0000000000000) as u64;; let b4 = ((block_exp >> 5) & 0x000FC00000000000) as u64;; let b5 = ((block_exp >> 7) & 0x00003F0000000000) as u64;; let b6 = ((block_exp >> 9) & 0x000000FC00000000) as u64;; let b7 = ((block_exp >> 11) & 0x00000003F0000000) as u64;; let b8 = ((block_exp >> 13) & 0x000000000FC00000) as u64;; let b9 = ((block_exp >> 15) & 0x00000000003E0000) as u64;; let b10 = ((block_exp >> (RESULT_LEN - 1)) & 0x0000000000010000) as u64;; b1 | b2 | b3 | b4 | b5 | b6 | b7 | b8 | b9 | b10 } /// Перестановка согласно таблицек P fn p(block: u32) -> u32 { const P_TABLE: [u8; 32] = [ 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 ]; const BLOCK_SIZE: u8 = 32; let mut result: u32 = 0; for m in 0 .. P_TABLE.len() as usize { if P_TABLE[m] > m as u8 { result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) << P_TABLE[m] - (m as u8 + 1); } else { result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) >> (m as u8 + 1) - P_TABLE[m]; } } result } /// Реализация S-блоков fn s(box_id: usize, block: u8) -> u8 { const TABLES: [[[u8; 16]; 4]; 8] = [ [ [ 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7], [ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8], [ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0], [ 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13] ], [ [ 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10], [ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5], [ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], [ 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9] ], [ [ 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8], [ 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1], [ 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7], [ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12] ], [ [ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15], [ 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9], [ 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4], [ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14] ], [ [ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9], [ 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6], [ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14], [ 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3] ], [ [ 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11], [ 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8], [ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6], [ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13] ], [ [ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1], [ 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6], [ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2], [ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12] ], [ [ 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7], [ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2], [ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8], [ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11] ] ]; let i = ((block & 0x20) >> 4 | (block & 0x01)) as usize; let j = ((block & 0x1E) >> 1) as usize; TABLES[box_id][i][j] } /// --------------------------------------------------------------- /// # Функции, используемые непосредственно в главном алгоритме DES /// --------------------------------------------------------------- /// IP-перестановка (прямая и обратная) fn ip(message: u64, dir: Ip) -> u64 { const COUNT: usize = 5; const MASK: [u64; COUNT] = [ 0x0055005500550055, 0x0000333300003333, 0x000000000F0F0F0F, 0x00000000FF00FF00, 0x000000FF000000FF ]; const DELTA: [u8; COUNT] = [ 9, 18, 36, 24, 24]; let mut result: u64 = message; match dir { Ip::Direct => for i in 0 .. COUNT { result = delta_swap(result, DELTA[i], MASK[i]) }, Ip::Reverse => for i in (0 .. COUNT).rev() { result = delta_swap(result, DELTA[i], MASK[i]) } } result } /// Функция Фейстеля fn feistel(half_block: u32, subkey: u64) -> u32 { let expanded = e(half_block); let mut intermediate = expanded ^ subkey; let mut result = 0 as u32; for i in 0 .. 8 { let block = ((intermediate & 0xFC00000000000000) >> 58) as u8; intermediate <<= 6; result <<= 4; result |= s(i, block) as u32; } p(result) } /// Алгоритм DES fn des(message: &[u8], subkeys: Vec<u64>) -> Vec<u8> {
let mut blocks = vec![]; for msg in message { let permuted = ip(msg, Ip::Direct); let mut li: u32 = ((permuted & 0xFFFFFFFF00000000) >> 32) as u32; let mut ri: u32 = ((permuted & 0x00000000FFFFFFFF)) as u32; for subkey in &subkeys { let last_li = li; li = ri; ri = last_li ^ feistel(ri, *subkey); } let r16l16 = ( ( ri as u64 ) << 32 ) | li as u64; blocks.push(to_u8_vec(ip(r16l16, Ip::Reverse))); } let mut result = Vec::with_capacity(message_len); for mut block in blocks.into_iter() { result.append(&mut block); } result } /// Шифрование pub fn encrypt(message: &[u8], key: &Key) -> Vec<u8> { let key = key_to_u64(key); let subkeys = compute_subkeys(key); des(message, subkeys) } /// Расшифрование pub fn decrypt(cipher: &[u8], key: &Key) -> Vec<u8> { let key = key_to_u64(key); let mut subkeys = compute_subkeys(key); subkeys.reverse(); des(cipher, subkeys) } #[cfg(test)] mod tests { use super::{decrypt, encrypt}; use super::{e, p, pc1, pc2}; #[test] fn test_e() { let result: [u64; 3] = [ e(0b1111_0000_1010_1010_1111_0000_1010_1010), e(0b1111_0000_1010_1010_1111_0000_1010_1011), e(0b1111_1111_1111_1111_1111_1111_1111_1111), ]; let expect: [u64; 3] = [ 0b011110_100001_010101_010101_011110_100001_010101_010101u64 << 16, 0b111110_100001_010101_010101_011110_100001_010101_010111u64 << 16, 0b111111_111111_111111_111111_111111_111111_111111_111111u64 << 16, ]; for i in 0 .. 3 { assert_eq!(expect[i], result[i]); } } #[test] fn test_p() { let result: [u32; 2] = [ p(0b1111_0000_0101_1010_1110_0111_1100_0011), p(0b1011_0111_0001_1000_0000_1011_0110_1010), ]; let expect: [u32; 2] = [ 0b0000_0101_1111_0111_1010_1010_1100_1011, 0b0101_1100_1011_0010_0110_0110_0101_0010, ]; for i in 0 .. 2 { assert_eq!(expect[i], result[i]); } } #[test] fn test_pc1() { let result = pc1(0b00010011_00110100_01010111_01111001_10011011_10111100_11011111_11110001); assert_eq!(0b1111000_0110011_0010101_0101111_0101010_1011001_1001111_0001111 << 8, result); } #[test] fn test_pc2() { let result = pc2(0b1110000_1100110_0101010_1011111_1010101_0110011_0011110_0011110 << 8); assert_eq!(0b000110_110000_001011_101111_111111_000111_000001_110010 << 16, result); } #[test] fn test_encrypt_decrypt() { let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1]; let message = [0x52, 0x75, 0x73, 0x74, 0x20, 0x44, 0x45, 0x53]; let expected_cipher = vec![0x27, 0xC1, 0x4F, 0xA6, 0x9A, 0x04, 0x4E, 0x28]; let cipher = encrypt(&message, &key); assert_eq!(cipher, expected_cipher); let cipher = expected_cipher; let expected_message = message; let message = decrypt(&cipher, &key); assert_eq!(message, expected_message); let message = [0x64, 0x65, 0x73, 0x2D, 0x72, 0x73, 0x2D, 0x6B, 0x72, 0x61, 0x75, 0x74, 0x63, 0x61, 0x74, 0x20, 0x69, 0x73, 0x20, 0x6D, 0x79, 0x20, 0x69, 0x6D, 0x70, 0x6C, 0x65, 0x6D, 0x65, 0x6E, 0x74, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20, 0x44, 0x45, 0x53, 0x20, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x69, 0x74, 0x68, 0x6D, 0x20, 0x69, 0x6E, 0x20, 0x52, 0x75, 0x73, 0x74]; let expected_cipher = vec![0x82, 0x8D, 0xB8, 0xD5, 0xFF, 0x41, 0xDF, 0xF7, 0x91, 0x34, 0xCC, 0x88, 0xFB, 0x52, 0xCB, 0xB7, 0x3C, 0x30, 0x17, 0x36, 0x9C, 0x3A, 0x70, 0xE0, 0x17, 0x64, 0x25, 0xDB, 0x17, 0xF5, 0x10, 0x80, 0x02, 0xAF, 0x08, 0x04, 0x6F, 0x3A, 0xA9, 0xB1, 0x3D, 0x74, 0x5C, 0xA7, 0x05, 0x8A, 0x13, 0x46, 0xB8, 0x0B, 0x5C, 0x9C, 0xE6, 0x01, 0x76, 0x92, 0x1C, 0x42, 0x30, 0x7E, 0xB6, 0xFA, 0xE4, 0xD3]; let cipher = encrypt(&message, &key); assert_eq!(cipher, expected_cipher); let cipher = expected_cipher; let expected_message = message; let message = decrypt(&cipher, &key); assert_eq!(&message[..expected_message.len()], &expected_message[..]); } }
let message_len = message.len(); let message = message_to_u64s(message);
random_line_split
lib.rs
//! Data Encryption Standard Rust implementation. //! //! The only supported mode is Electronic Codebook (ECB). //! //! # Example //! //! ``` //! extern crate des_rs_krautcat; //! //! let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1]; //! let message = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]; //! let cipher = des_rs_krautcat::encrypt(&message, &key); //! let message = des_rs_krautcat::decrypt(&cipher, &key); //! ``` //! //! # Usage //! //! Des exports two functions: `encrypt` and `decrypt`. //! Use the former to encrypt some data with a key and the later to decrypt the data. pub type Key = [u8; 8]; const FIRST_BIT: u32 = 1 << 31; const HALF_KEY_SIZE: i64 = KEY_SIZE / 2; const KEY_SIZE: i64 = 56; enum Ip { Direct, Reverse } /// Циклический сдвиг влево половины ключа fn circular_left_shift(ci: u32, di: u32, shift_count: u8) -> (u32, u32) { let mut ci_next = ci; let mut di_next = di; for _ in 0 .. shift_count { ci_next = (ci_next << 1) | ((ci_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1)); di_next = (di_next << 1) | ((di_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1)); } (ci_next, di_next) } /// Обмен битов с расстоянием delta и маской mask в числе a fn delta_swap(a: u64, delta: u8, mask: u64) -> u64 { let b = (a ^ (a >> delta)) & mask; a ^ b ^ (b << delta) } /// Конвертирование ключа из массива u8 в одно число типа u64 fn key_to_u64(key: &Key) -> u64 { let mut result = 0; for &part in key { result <<= 8; result += part as u64; } result } /// Конвертирование сообщения из массива u8 в вектор u64 fn message_to_u64s(message: &[u8]) -> Vec<u64> { message.chunks(8) .map(|m| { let mut result: u64 = 0; for &part in m { result <<= 8; result += part as u64; } if m.len() < 8 { result <<= 8 * (8 - m.len()); } result }) .collect() } /// Конвертирование u64 в вектор u8 fn to_u8_vec(num: u64) -> Vec<u8> { vec![ ((num & 0xFF00000000000000) >> 56) as u8, ((num & 0x00FF000000000000) >> 48) as u8, ((num & 0x0000FF0000000
0) as u8, ((num & 0x000000FF00000000) >> 32) as u8, ((num & 0x00000000FF000000) >> 24) as u8, ((num & 0x0000000000FF0000) >> 16) as u8, ((num & 0x000000000000FF00) >> 8) as u8, ((num & 0x00000000000000FF) >> 0) as u8 ] } /// Процедура создания 16 подключей fn compute_subkeys(key: u64) -> Vec<u64> { let table = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]; let k0 = pc1(key); let mut subkeys = vec![k0]; for shift_count in &table { let last_key = *subkeys.last().unwrap(); let last_ci = ((last_key & 0xFFFFFFF000000000) >> 32) as u32; let last_di = (last_key >> 4) as u32; let (ci, di) = circular_left_shift(last_ci, last_di, *shift_count); let current_key = ((ci as u64) << 32) | ((di as u64) << 4); subkeys.push(current_key); } subkeys.remove(0); subkeys.iter().map(|&n| { pc2(n) }).collect() } /// Перестановка согласно таблице PC-1 fn pc1(key: u64) -> u64 { let key = delta_swap(key, 2, 0x3333000033330000); let key = delta_swap(key, 4, 0x0F0F0F0F00000000); let key = delta_swap(key, 8, 0x009A000A00A200A8); let key = delta_swap(key, 16, 0x00006C6C0000CCCC); let key = delta_swap(key, 1, 0x1045500500550550); let key = delta_swap(key, 32, 0x00000000F0F0F5FA); let key = delta_swap(key, 8, 0x00550055006A00AA); let key = delta_swap(key, 2, 0x0000333330000300); key & 0xFFFFFFFFFFFFFF00 } /// Перестановка согласно таблице PC-2 fn pc2(key: u64) -> u64 { const PC_2_TABLE: [u8; 48] = [ 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34 ,53, 46, 42, 50, 36, 29, 32 ]; const OUT_SIZE: u8 = 64; let mut result: u64 = 0; for m in 0 .. PC_2_TABLE.len() as usize { if PC_2_TABLE[m] > m as u8 { result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) << PC_2_TABLE[m] - (m as u8 + 1); } else { result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) >> (m as u8 + 1) - PC_2_TABLE[m]; } } result & 0xFFFFFFFFFFFF0000 } /// Перестановка согласно таблице E fn e(block: u32) -> u64 { const BLOCK_LEN: usize = 32; const RESULT_LEN: usize = 48; let block_exp = (block as u64) << 32; let b1 = ((block_exp << (BLOCK_LEN - 1)) & 0x8000000000000000) as u64; let b2 = ((block_exp >> 1) & 0x7C00000000000000) as u64;; let b3 = ((block_exp >> 3) & 0x03F0000000000000) as u64;; let b4 = ((block_exp >> 5) & 0x000FC00000000000) as u64;; let b5 = ((block_exp >> 7) & 0x00003F0000000000) as u64;; let b6 = ((block_exp >> 9) & 0x000000FC00000000) as u64;; let b7 = ((block_exp >> 11) & 0x00000003F0000000) as u64;; let b8 = ((block_exp >> 13) & 0x000000000FC00000) as u64;; let b9 = ((block_exp >> 15) & 0x00000000003E0000) as u64;; let b10 = ((block_exp >> (RESULT_LEN - 1)) & 0x0000000000010000) as u64;; b1 | b2 | b3 | b4 | b5 | b6 | b7 | b8 | b9 | b10 } /// Перестановка согласно таблицек P fn p(block: u32) -> u32 { const P_TABLE: [u8; 32] = [ 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 ]; const BLOCK_SIZE: u8 = 32; let mut result: u32 = 0; for m in 0 .. P_TABLE.len() as usize { if P_TABLE[m] > m as u8 { result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) << P_TABLE[m] - (m as u8 + 1); } else { result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) >> (m as u8 + 1) - P_TABLE[m]; } } result } /// Реализация S-блоков fn s(box_id: usize, block: u8) -> u8 { const TABLES: [[[u8; 16]; 4]; 8] = [ [ [ 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7], [ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8], [ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0], [ 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13] ], [ [ 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10], [ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5], [ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], [ 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9] ], [ [ 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8], [ 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1], [ 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7], [ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12] ], [ [ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15], [ 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9], [ 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4], [ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14] ], [ [ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9], [ 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6], [ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14], [ 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3] ], [ [ 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11], [ 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8], [ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6], [ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13] ], [ [ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1], [ 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6], [ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2], [ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12] ], [ [ 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7], [ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2], [ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8], [ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11] ] ]; let i = ((block & 0x20) >> 4 | (block & 0x01)) as usize; let j = ((block & 0x1E) >> 1) as usize; TABLES[box_id][i][j] } /// --------------------------------------------------------------- /// # Функции, используемые непосредственно в главном алгоритме DES /// --------------------------------------------------------------- /// IP-перестановка (прямая и обратная) fn ip(message: u64, dir: Ip) -> u64 { const COUNT: usize = 5; const MASK: [u64; COUNT] = [ 0x0055005500550055, 0x0000333300003333, 0x000000000F0F0F0F, 0x00000000FF00FF00, 0x000000FF000000FF ]; const DELTA: [u8; COUNT] = [ 9, 18, 36, 24, 24]; let mut result: u64 = message; match dir { Ip::Direct => for i in 0 .. COUNT { result = delta_swap(result, DELTA[i], MASK[i]) }, Ip::Reverse => for i in (0 .. COUNT).rev() { result = delta_swap(result, DELTA[i], MASK[i]) } } result } /// Функция Фейстеля fn feistel(half_block: u32, subkey: u64) -> u32 { let expanded = e(half_block); let mut intermediate = expanded ^ subkey; let mut result = 0 as u32; for i in 0 .. 8 { let block = ((intermediate & 0xFC00000000000000) >> 58) as u8; intermediate <<= 6; result <<= 4; result |= s(i, block) as u32; } p(result) } /// Алгоритм DES fn des(message: &[u8], subkeys: Vec<u64>) -> Vec<u8> { let message_len = message.len(); let message = message_to_u64s(message); let mut blocks = vec![]; for msg in message { let permuted = ip(msg, Ip::Direct); let mut li: u32 = ((permuted & 0xFFFFFFFF00000000) >> 32) as u32; let mut ri: u32 = ((permuted & 0x00000000FFFFFFFF)) as u32; for subkey in &subkeys { let last_li = li; li = ri; ri = last_li ^ feistel(ri, *subkey); } let r16l16 = ( ( ri as u64 ) << 32 ) | li as u64; blocks.push(to_u8_vec(ip(r16l16, Ip::Reverse))); } let mut result = Vec::with_capacity(message_len); for mut block in blocks.into_iter() { result.append(&mut block); } result } /// Шифрование pub fn encrypt(message: &[u8], key: &Key) -> Vec<u8> { let key = key_to_u64(key); let subkeys = compute_subkeys(key); des(message, subkeys) } /// Расшифрование pub fn decrypt(cipher: &[u8], key: &Key) -> Vec<u8> { let key = key_to_u64(key); let mut subkeys = compute_subkeys(key); subkeys.reverse(); des(cipher, subkeys) } #[cfg(test)] mod tests { use super::{decrypt, encrypt}; use super::{e, p, pc1, pc2}; #[test] fn test_e() { let result: [u64; 3] = [ e(0b1111_0000_1010_1010_1111_0000_1010_1010), e(0b1111_0000_1010_1010_1111_0000_1010_1011), e(0b1111_1111_1111_1111_1111_1111_1111_1111), ]; let expect: [u64; 3] = [ 0b011110_100001_010101_010101_011110_100001_010101_010101u64 << 16, 0b111110_100001_010101_010101_011110_100001_010101_010111u64 << 16, 0b111111_111111_111111_111111_111111_111111_111111_111111u64 << 16, ]; for i in 0 .. 3 { assert_eq!(expect[i], result[i]); } } #[test] fn test_p() { let result: [u32; 2] = [ p(0b1111_0000_0101_1010_1110_0111_1100_0011), p(0b1011_0111_0001_1000_0000_1011_0110_1010), ]; let expect: [u32; 2] = [ 0b0000_0101_1111_0111_1010_1010_1100_1011, 0b0101_1100_1011_0010_0110_0110_0101_0010, ]; for i in 0 .. 2 { assert_eq!(expect[i], result[i]); } } #[test] fn test_pc1() { let result = pc1(0b00010011_00110100_01010111_01111001_10011011_10111100_11011111_11110001); assert_eq!(0b1111000_0110011_0010101_0101111_0101010_1011001_1001111_0001111 << 8, result); } #[test] fn test_pc2() { let result = pc2(0b1110000_1100110_0101010_1011111_1010101_0110011_0011110_0011110 << 8); assert_eq!(0b000110_110000_001011_101111_111111_000111_000001_110010 << 16, result); } #[test] fn test_encrypt_decrypt() { let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1]; let message = [0x52, 0x75, 0x73, 0x74, 0x20, 0x44, 0x45, 0x53]; let expected_cipher = vec![0x27, 0xC1, 0x4F, 0xA6, 0x9A, 0x04, 0x4E, 0x28]; let cipher = encrypt(&message, &key); assert_eq!(cipher, expected_cipher); let cipher = expected_cipher; let expected_message = message; let message = decrypt(&cipher, &key); assert_eq!(message, expected_message); let message = [0x64, 0x65, 0x73, 0x2D, 0x72, 0x73, 0x2D, 0x6B, 0x72, 0x61, 0x75, 0x74, 0x63, 0x61, 0x74, 0x20, 0x69, 0x73, 0x20, 0x6D, 0x79, 0x20, 0x69, 0x6D, 0x70, 0x6C, 0x65, 0x6D, 0x65, 0x6E, 0x74, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20, 0x44, 0x45, 0x53, 0x20, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x69, 0x74, 0x68, 0x6D, 0x20, 0x69, 0x6E, 0x20, 0x52, 0x75, 0x73, 0x74]; let expected_cipher = vec![0x82, 0x8D, 0xB8, 0xD5, 0xFF, 0x41, 0xDF, 0xF7, 0x91, 0x34, 0xCC, 0x88, 0xFB, 0x52, 0xCB, 0xB7, 0x3C, 0x30, 0x17, 0x36, 0x9C, 0x3A, 0x70, 0xE0, 0x17, 0x64, 0x25, 0xDB, 0x17, 0xF5, 0x10, 0x80, 0x02, 0xAF, 0x08, 0x04, 0x6F, 0x3A, 0xA9, 0xB1, 0x3D, 0x74, 0x5C, 0xA7, 0x05, 0x8A, 0x13, 0x46, 0xB8, 0x0B, 0x5C, 0x9C, 0xE6, 0x01, 0x76, 0x92, 0x1C, 0x42, 0x30, 0x7E, 0xB6, 0xFA, 0xE4, 0xD3]; let cipher = encrypt(&message, &key); assert_eq!(cipher, expected_cipher); let cipher = expected_cipher; let expected_message = message; let message = decrypt(&cipher, &key); assert_eq!(&message[..expected_message.len()], &expected_message[..]); } }
000) >> 4
identifier_name
lib.rs
//! Data Encryption Standard Rust implementation. //! //! The only supported mode is Electronic Codebook (ECB). //! //! # Example //! //! ``` //! extern crate des_rs_krautcat; //! //! let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1]; //! let message = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]; //! let cipher = des_rs_krautcat::encrypt(&message, &key); //! let message = des_rs_krautcat::decrypt(&cipher, &key); //! ``` //! //! # Usage //! //! Des exports two functions: `encrypt` and `decrypt`. //! Use the former to encrypt some data with a key and the later to decrypt the data. pub type Key = [u8; 8]; const FIRST_BIT: u32 = 1 << 31; const HALF_KEY_SIZE: i64 = KEY_SIZE / 2; const KEY_SIZE: i64 = 56; enum Ip { Direct, Reverse } /// Циклический сдвиг влево половины ключа fn circular_left_shift(ci: u32, di: u32, shift_count: u8) -> (u32, u32) { let mut ci_next = ci; let mut di_next = di; for _ in 0 .. shift_count { ci_next = (ci_next << 1) | ((ci_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1)); di_next = (di_next << 1) | ((di_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1)); } (ci_next, di_next) } /// Обмен битов с расстоянием delta и маской mask в числе a fn delta_swap(a: u64, delta: u8, mask: u64) -> u64 { let b = (a ^ (a >> delta)) & mask; a ^ b ^ (b << delta) } /// Конвертирование ключа из массива u8 в одно число типа u64 fn key_to_u64(key: &Key) -> u64 { let mut result = 0; for &part in key { result <<= 8; result += part as u64; } result } /// Конвертирование сообщения из массива u8 в вектор u64 fn message_to_u64s(message: &[u8]) -> Vec<u64> { message.chunks(8) .map(|m| { let mut result: u64 = 0; for &part in m { result <<= 8; result += part as u64; } if m.len() < 8 { result <<= 8 * (8 - m.len()); } result }) .collect() } /// Конвертирование u64 в вектор u8 fn to_u8_vec(num: u64) -> Vec<u8> { vec![ ((num & 0xFF00000000000000) >> 56) as u8, ((num & 0x00FF000000000000) >> 48) as u8, ((num & 0x0000FF0000000000) >> 40) as u8, ((num & 0x000000FF00000000) >> 32) as u8, ((num & 0x00000000FF000000) >> 24) as u8, ((num & 0x0000000000FF0000) >> 16) as u8, ((num & 0x000000000000FF00) >> 8) as u8, ((num & 0x00000000000000FF) >> 0) as u8 ] } /// Процедура создания 16 подключей fn compute_subkeys(key: u64) -> Vec<u64> { let table = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]; let k0 = pc1(key); let mut subkeys = vec![k0]; for shift_count in &table { let last_key = *subkeys.last().unwrap(); let last_ci = ((last_key & 0xFFFFFFF000000000) >> 32) as u32; let last_di = (last_key >> 4) as u32; let (ci, di) = circular_left_shift(last_ci, last_di, *shift_count); let current_key = ((ci as u64) << 32) | ((di as u64) << 4); subkeys.push(current_key); } subkeys.remove(0); subkeys.iter().map(|&n| { pc2(n) }).collect() } /// Перестановка согласно таблице PC-1 fn pc1(key: u64) -> u64 { let key = delta_swap(key, 2, 0x3333000033330000); let key = delta_swap(key, 4, 0x0F0F0F0F00000000); let key = delta_swap(key, 8, 0x009A000A00A200A8); let key = delta_swap(key, 16, 0x00006C6C0000CCCC); let key = delta_swap(key, 1, 0x1045500500550550); let key = delta_swap(key, 32, 0x00000000F0F0F5FA); let key = delta_swap(key, 8, 0x00550055006A00AA); let key = delta_swap(key, 2, 0x0000333330000300); key & 0xFFFFFFFFFFFFFF00 } /// Перестановка согласно таблице PC-2 fn pc2(key: u64) -> u64 { const PC_2_TABLE: [u8; 48] = [ 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34 ,53, 46, 42, 50, 36, 29, 32 ]; const OUT_SIZE: u8 = 64; let mut result: u64 = 0; for m in 0 .. PC_2_TABLE.len() as usize { if PC_2_TABLE[m] > m as u8 { result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) << PC_2_TABLE[m] - (m as u8 + 1); } else { result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) >> (m as u8 + 1) - PC_2_TABLE[m]; } } result & 0xFFFFFFFFFFFF0000 } /// Перестановка согласно таблице E fn e(block: u32) -> u64 { const BLOCK_LEN: usize = 32; const RESULT_LEN: usize = 48; let block_exp = (block as u64) << 32; let b1 = ((block_exp << (BLOCK_LEN - 1)) & 0x8000000000000000) as u64; let b2 = ((block_exp >> 1) & 0x7C00000000000000) as u64;; let b3 = ((block_exp >> 3) & 0x03F0000000000000) as u64;; let b4 = ((block_exp >> 5) & 0x000FC00000000000) as u64;; let b5 = ((block_exp >> 7) & 0x00003F0000000000) as u64;; let b6 = ((block_exp >> 9) & 0x000000FC00000000) as u64;; let b7 = ((block_exp >> 11) & 0x00000003F0000000) as u64;; let b8 = ((block_exp >> 13) & 0x000000000FC00000) as u64;; let b9 = ((block_exp >> 15) & 0x00000000003E0000) as u64;; let b10 = ((block_exp >> (RESULT_LEN - 1)) & 0x0000000000010000) as u64;; b1 | b2 | b3 | b4 | b5 | b6 | b7 | b8 | b9 | b10 } /// Перестановка согласно таблицек P fn p(block: u32) -> u32 { const P_TABLE: [u8; 32] = [ 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 ]; const BLOCK_SIZE: u8 = 32; let mut result: u32 = 0; for m in 0 .. P_TABLE.len() as usize { if P_TABLE[m] > m as u8 { result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) << P_TABLE[m] - (m as u8 + 1); } else { result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) >> (m as u8 + 1) - P_TABLE[m]; } } result } /// Реализация S-блоков fn s(box_id: usize, block: u8) -> u8 { const TABLES: [[[u8; 16]; 4]; 8] = [ [ [ 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7], [ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8], [ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0], [ 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13] ], [ [ 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10], [ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5], [ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], [ 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9] ], [ [ 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8], [ 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1], [ 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7], [ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12] ], [ [ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15], [ 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9], [ 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4], [ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14] ], [ [ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9], [ 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6], [ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14], [ 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3] ], [ [ 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11], [ 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8], [ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6], [ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13] ], [ [ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1], [ 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6], [ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2], [ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12] ], [ [ 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7], [ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2], [ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8], [ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11] ] ]; let i = ((block & 0x20) >> 4 | (block & 0x01)) as usize; let j = ((block & 0x1E) >> 1) as usize; TABLES[box_id][i][j] } /// --------------------------------------------------------------- /// # Функции, используемые непосредственно в главном алгоритме DES /// --------------------------------------------------------------- /// IP-перестановка (прямая и обратная) fn ip(message: u64, dir: Ip) -> u64 { const COUNT: usize = 5; const MASK: [u64; COUNT] = [ 0x0055005500550055, 0x0000333300003333, 0x000000000F0F0F0F, 0x00000000FF00FF00, 0x000000FF000000FF ]; const DELTA: [u8; COUNT] = [ 9, 18, 36, 24, 24]; let mut result: u64 = message; match dir { Ip::Direct => for i in 0 .. COUNT { result = delta_swap(result, DELTA[i], MASK[i]) }, Ip::Reverse => for i in (0 .. COUNT).rev() { result = delta_swap(result, DELTA[i], MASK[i]) } } result } /// Функция Фейстеля fn feistel(half_block: u32, subkey: u64) -> u32 { let expanded = e(half_block); let mut intermediate = expanded ^ subkey; let mut result = 0 as u32; for i in 0 .. 8 { let block = ((intermediate & 0xFC00000000000000) >> 58) as u8; intermediate <<= 6; result <<= 4; result |= s(i, block) as u32; } p(result) } /// Алгоритм DES fn des(message: &[u8], subkeys: Vec<u64>) -> Vec<u8> { let message_len = message.len(); let message = message_to_u64s(message); let mut blocks = vec![]; for msg in message { let permuted = ip(msg, Ip::Direct); let mut li: u32 = ((permuted & 0xFFFFFFFF00000000) >> 32) as u32; let mut ri: u32 = ((permuted & 0x00000000FFFFFFFF)) as u32; for subkey in &subkeys { let last_li = li; li = ri; ri = last_li ^ feistel(ri, *subkey); } let r16l16 = ( ( ri as u64 ) << 32 ) | li as u64; blocks.push(to_u8_vec(ip(r16l16, Ip::Reverse))); } let mut result = Vec::with_capacity(message_len); for mut block in blocks.into_iter() { result.append(&mut block); } result } /// Шифрование pub fn encrypt(message: &[u8], key: &Key) -> Vec<u8> { let key = key_to_u64(key); let subkeys = compute_subkeys(key); des(message, subkeys) } /// Расшифрование pub fn decrypt(cipher: &[u8], key: &Key) -> Vec<u8> { let key = key_to_u64(key); let mut subkeys = compute_subkeys(key); subkeys.reverse(); des(cipher, subkeys) } #[cfg(test)] mod tests { use super::{decrypt, encrypt}; use super::{e, p, pc1, pc2}; #[test] fn test_e() { let result: [u64; 3] = [ e(0b1111_0000_1010_1010_1111_0000_1010_1010), e(0b1111_0000_1010_1010_1111_0000_1010_1011), e(0b1111_1111_1111_1111_1111_1111_1111_1111), ]; let expect: [u64; 3] = [ 0b011110_100001_010101_010101_011110_100001_010101_010101u64 << 16, 0b111110_100001_010101_010101_011110_100001_010101_010111u64 << 16, 0b111111_111111_111111_111111_111111_111111_111111_111111u64 << 16, ]; for i in 0 .. 3 { assert_eq!(expect[i], result[i]); } } #[test] fn test_p() { let result: [u32; 2] = [ p(0b1111_0000_0101_1010_1110_0111_1100_0011), p(0b1011_0111_0001_1000_0000_1011_0110_1010), ]; let expect: [u32; 2] = [ 0b0000_0101_1111_0111_1010_1010_1100_1011, 0b0101_1100_1011_0010_0110_0110_0101_0010, ]; for i in 0 .. 2 { assert_eq!(expect[i], result[i]); } } #[test] fn test_pc1() { let
<< 16, result); } #[test] fn test_encrypt_decrypt() { let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1]; let message = [0x52, 0x75, 0x73, 0x74, 0x20, 0x44, 0x45, 0x53]; let expected_cipher = vec![0x27, 0xC1, 0x4F, 0xA6, 0x9A, 0x04, 0x4E, 0x28]; let cipher = encrypt(&message, &key); assert_eq!(cipher, expected_cipher); let cipher = expected_cipher; let expected_message = message; let message = decrypt(&cipher, &key); assert_eq!(message, expected_message); let message = [0x64, 0x65, 0x73, 0x2D, 0x72, 0x73, 0x2D, 0x6B, 0x72, 0x61, 0x75, 0x74, 0x63, 0x61, 0x74, 0x20, 0x69, 0x73, 0x20, 0x6D, 0x79, 0x20, 0x69, 0x6D, 0x70, 0x6C, 0x65, 0x6D, 0x65, 0x6E, 0x74, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20, 0x44, 0x45, 0x53, 0x20, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x69, 0x74, 0x68, 0x6D, 0x20, 0x69, 0x6E, 0x20, 0x52, 0x75, 0x73, 0x74]; let expected_cipher = vec![0x82, 0x8D, 0xB8, 0xD5, 0xFF, 0x41, 0xDF, 0xF7, 0x91, 0x34, 0xCC, 0x88, 0xFB, 0x52, 0xCB, 0xB7, 0x3C, 0x30, 0x17, 0x36, 0x9C, 0x3A, 0x70, 0xE0, 0x17, 0x64, 0x25, 0xDB, 0x17, 0xF5, 0x10, 0x80, 0x02, 0xAF, 0x08, 0x04, 0x6F, 0x3A, 0xA9, 0xB1, 0x3D, 0x74, 0x5C, 0xA7, 0x05, 0x8A, 0x13, 0x46, 0xB8, 0x0B, 0x5C, 0x9C, 0xE6, 0x01, 0x76, 0x92, 0x1C, 0x42, 0x30, 0x7E, 0xB6, 0xFA, 0xE4, 0xD3]; let cipher = encrypt(&message, &key); assert_eq!(cipher, expected_cipher); let cipher = expected_cipher; let expected_message = message; let message = decrypt(&cipher, &key); assert_eq!(&message[..expected_message.len()], &expected_message[..]); } }
result = pc1(0b00010011_00110100_01010111_01111001_10011011_10111100_11011111_11110001); assert_eq!(0b1111000_0110011_0010101_0101111_0101010_1011001_1001111_0001111 << 8, result); } #[test] fn test_pc2() { let result = pc2(0b1110000_1100110_0101010_1011111_1010101_0110011_0011110_0011110 << 8); assert_eq!(0b000110_110000_001011_101111_111111_000111_000001_110010
identifier_body
lib.rs
//! Data Encryption Standard Rust implementation. //! //! The only supported mode is Electronic Codebook (ECB). //! //! # Example //! //! ``` //! extern crate des_rs_krautcat; //! //! let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1]; //! let message = [0x01, 0x23, 0x45, 0x67, 0x89, 0xAB, 0xCD, 0xEF]; //! let cipher = des_rs_krautcat::encrypt(&message, &key); //! let message = des_rs_krautcat::decrypt(&cipher, &key); //! ``` //! //! # Usage //! //! Des exports two functions: `encrypt` and `decrypt`. //! Use the former to encrypt some data with a key and the later to decrypt the data. pub type Key = [u8; 8]; const FIRST_BIT: u32 = 1 << 31; const HALF_KEY_SIZE: i64 = KEY_SIZE / 2; const KEY_SIZE: i64 = 56; enum Ip { Direct, Reverse } /// Циклический сдвиг влево половины ключа fn circular_left_shift(ci: u32, di: u32, shift_count: u8) -> (u32, u32) { let mut ci_next = ci; let mut di_next = di; for _ in 0 .. shift_count { ci_next = (ci_next << 1) | ((ci_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1)); di_next = (di_next << 1) | ((di_next & FIRST_BIT) >> (HALF_KEY_SIZE - 1)); } (ci_next, di_next) } /// Обмен битов с расстоянием delta и маской mask в числе a fn delta_swap(a: u64, delta: u8, mask: u64) -> u64 { let b = (a ^ (a >> delta)) & mask; a ^ b ^ (b << delta) } /// Конвертирование ключа из массива u8 в одно число типа u64 fn key_to_u64(key: &Key) -> u64 { let mut result = 0; for &part in key { result <<= 8; result += part as u64; } result } /// Конвертирование сообщения из массива u8 в вектор u64 fn message_to_u64s(message: &[u8]) -> Vec<u64> { message.chunks(8) .map(|m| { let mut result: u64 = 0; for &part in m { result <<= 8; result += part as u64; } if m.len() < 8 { result <<= 8 * (8 - m.len()); } result }) .collect() } /// Конвертирование u64 в вектор u8 fn to_u8_vec(num: u64) -> Vec<u8> { vec![ ((num & 0xFF00000000000000) >> 56) as u8, ((num & 0x00FF000000000000) >> 48) as u8, ((num & 0x0000FF0000000000) >> 40) as u8, ((num & 0x000000FF00000000) >> 32) as u8, ((num & 0x00000000FF000000) >> 24) as u8, ((num & 0x0000000000FF0000) >> 16) as u8, ((num & 0x000000000000FF00) >> 8) as u8, ((num & 0x00000000000000FF) >> 0) as u8 ] } /// Процедура создания 16 подключей fn compute_subkeys(key: u64) -> Vec<u64> { let table = [1, 1, 2, 2, 2, 2, 2, 2, 1, 2, 2, 2, 2, 2, 2, 1]; let k0 = pc1(key); let mut subkeys = vec![k0]; for shift_count in &table { let last_key = *subkeys.last().unwrap(); let last_ci = ((last_key & 0xFFFFFFF000000000) >> 32) as u32; let last_di = (last_key >> 4) as u32; let (ci, di) = circular_left_shift(last_ci, last_di, *shift_count); let current_key = ((ci as u64) << 32) | ((di as u64) << 4); subkeys.push(current_key); } subkeys.remove(0); subkeys.iter().map(|&n| { pc2(n) }).collect() } /// Перестановка согласно таблице PC-1 fn pc1(key: u64) -> u64 { let key = delta_swap(key, 2, 0x3333000033330000); let key = delta_swap(key, 4, 0x0F0F0F0F00000000); let key = delta_swap(key, 8, 0x009A000A00A200A8); let key = delta_swap(key, 16, 0x00006C6C0000CCCC); let key = delta_swap(key, 1, 0x1045500500550550); let key = delta_swap(key, 32, 0x00000000F0F0F5FA); let key = delta_swap(key, 8, 0x00550055006A00AA); let key = delta_swap(key, 2, 0x0000333330000300); key & 0xFFFFFFFFFFFFFF00 } /// Перестановка согласно таблице PC-2 fn pc2(key: u64) -> u64 { const PC_2_TABLE: [u8; 48] = [ 14, 17, 11, 24, 1, 5, 3, 28, 15, 6, 21, 10, 23, 19, 12, 4, 26, 8, 16, 7, 27, 20, 13, 2, 41, 52, 31, 37, 47, 55, 30, 40, 51, 45, 33, 48, 44, 49, 39, 56, 34 ,53, 46, 42, 50, 36, 29, 32 ]; const OUT_SIZE: u8 = 64; let mut result: u64 = 0; for m in 0 .. PC_2_TABLE.len() as usize { if PC_2_TABLE[m] > m as u8 { result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) << PC_2_TABLE[m] - (m as u8 + 1); } else { result |= (key & (0x01 << OUT_SIZE - PC_2_TABLE[m])) >> (m as u8 + 1) - PC_2_TABLE[m]; } } result & 0xFFFFFFFFFFFF0000 } /// Перестановка согласно таблице E fn e(block: u32) -> u64 { const BLOCK_LEN: usize = 32; con
1)) & 0x8000000000000000) as u64; let b2 = ((block_exp >> 1) & 0x7C00000000000000) as u64;; let b3 = ((block_exp >> 3) & 0x03F0000000000000) as u64;; let b4 = ((block_exp >> 5) & 0x000FC00000000000) as u64;; let b5 = ((block_exp >> 7) & 0x00003F0000000000) as u64;; let b6 = ((block_exp >> 9) & 0x000000FC00000000) as u64;; let b7 = ((block_exp >> 11) & 0x00000003F0000000) as u64;; let b8 = ((block_exp >> 13) & 0x000000000FC00000) as u64;; let b9 = ((block_exp >> 15) & 0x00000000003E0000) as u64;; let b10 = ((block_exp >> (RESULT_LEN - 1)) & 0x0000000000010000) as u64;; b1 | b2 | b3 | b4 | b5 | b6 | b7 | b8 | b9 | b10 } /// Перестановка согласно таблицек P fn p(block: u32) -> u32 { const P_TABLE: [u8; 32] = [ 16, 7, 20, 21, 29, 12, 28, 17, 1, 15, 23, 26, 5, 18, 31, 10, 2, 8, 24, 14, 32, 27, 3, 9, 19, 13, 30, 6, 22, 11, 4, 25 ]; const BLOCK_SIZE: u8 = 32; let mut result: u32 = 0; for m in 0 .. P_TABLE.len() as usize { if P_TABLE[m] > m as u8 { result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) << P_TABLE[m] - (m as u8 + 1); } else { result |= (block & (0x01 << BLOCK_SIZE - P_TABLE[m])) >> (m as u8 + 1) - P_TABLE[m]; } } result } /// Реализация S-блоков fn s(box_id: usize, block: u8) -> u8 { const TABLES: [[[u8; 16]; 4]; 8] = [ [ [ 14, 4, 13, 1, 2, 15, 11, 8, 3, 10, 6, 12, 5, 9, 0, 7], [ 0, 15, 7, 4, 14, 2, 13, 1, 10, 6, 12, 11, 9, 5, 3, 8], [ 4, 1, 14, 8, 13, 6, 2, 11, 15, 12, 9, 7, 3, 10, 5, 0], [ 15, 12, 8, 2, 4, 9, 1, 7, 5, 11, 3, 14, 10, 0, 6, 13] ], [ [ 15, 1, 8, 14, 6, 11, 3, 4, 9, 7, 2, 13, 12, 0, 5, 10], [ 3, 13, 4, 7, 15, 2, 8, 14, 12, 0, 1, 10, 6, 9, 11, 5], [ 0, 14, 7, 11, 10, 4, 13, 1, 5, 8, 12, 6, 9, 3, 2, 15], [ 13, 8, 10, 1, 3, 15, 4, 2, 11, 6, 7, 12, 0, 5, 14, 9] ], [ [ 10, 0, 9, 14, 6, 3, 15, 5, 1, 13, 12, 7, 11, 4, 2, 8], [ 13, 7, 0, 9, 3, 4, 6, 10, 2, 8, 5, 14, 12, 11, 15, 1], [ 13, 6, 4, 9, 8, 15, 3, 0, 11, 1, 2, 12, 5, 10, 14, 7], [ 1, 10, 13, 0, 6, 9, 8, 7, 4, 15, 14, 3, 11, 5, 2, 12] ], [ [ 7, 13, 14, 3, 0, 6, 9, 10, 1, 2, 8, 5, 11, 12, 4, 15], [ 13, 8, 11, 5, 6, 15, 0, 3, 4, 7, 2, 12, 1, 10, 14, 9], [ 10, 6, 9, 0, 12, 11, 7, 13, 15, 1, 3, 14, 5, 2, 8, 4], [ 3, 15, 0, 6, 10, 1, 13, 8, 9, 4, 5, 11, 12, 7, 2, 14] ], [ [ 2, 12, 4, 1, 7, 10, 11, 6, 8, 5, 3, 15, 13, 0, 14, 9], [ 14, 11, 2, 12, 4, 7, 13, 1, 5, 0, 15, 10, 3, 9, 8, 6], [ 4, 2, 1, 11, 10, 13, 7, 8, 15, 9, 12, 5, 6, 3, 0, 14], [ 11, 8, 12, 7, 1, 14, 2, 13, 6, 15, 0, 9, 10, 4, 5, 3] ], [ [ 12, 1, 10, 15, 9, 2, 6, 8, 0, 13, 3, 4, 14, 7, 5, 11], [ 10, 15, 4, 2, 7, 12, 9, 5, 6, 1, 13, 14, 0, 11, 3, 8], [ 9, 14, 15, 5, 2, 8, 12, 3, 7, 0, 4, 10, 1, 13, 11, 6], [ 4, 3, 2, 12, 9, 5, 15, 10, 11, 14, 1, 7, 6, 0, 8, 13] ], [ [ 4, 11, 2, 14, 15, 0, 8, 13, 3, 12, 9, 7, 5, 10, 6, 1], [ 13, 0, 11, 7, 4, 9, 1, 10, 14, 3, 5, 12, 2, 15, 8, 6], [ 1, 4, 11, 13, 12, 3, 7, 14, 10, 15, 6, 8, 0, 5, 9, 2], [ 6, 11, 13, 8, 1, 4, 10, 7, 9, 5, 0, 15, 14, 2, 3, 12] ], [ [ 13, 2, 8, 4, 6, 15, 11, 1, 10, 9, 3, 14, 5, 0, 12, 7], [ 1, 15, 13, 8, 10, 3, 7, 4, 12, 5, 6, 11, 0, 14, 9, 2], [ 7, 11, 4, 1, 9, 12, 14, 2, 0, 6, 10, 13, 15, 3, 5, 8], [ 2, 1, 14, 7, 4, 10, 8, 13, 15, 12, 9, 0, 3, 5, 6, 11] ] ]; let i = ((block & 0x20) >> 4 | (block & 0x01)) as usize; let j = ((block & 0x1E) >> 1) as usize; TABLES[box_id][i][j] } /// --------------------------------------------------------------- /// # Функции, используемые непосредственно в главном алгоритме DES /// --------------------------------------------------------------- /// IP-перестановка (прямая и обратная) fn ip(message: u64, dir: Ip) -> u64 { const COUNT: usize = 5; const MASK: [u64; COUNT] = [ 0x0055005500550055, 0x0000333300003333, 0x000000000F0F0F0F, 0x00000000FF00FF00, 0x000000FF000000FF ]; const DELTA: [u8; COUNT] = [ 9, 18, 36, 24, 24]; let mut result: u64 = message; match dir { Ip::Direct => for i in 0 .. COUNT { result = delta_swap(result, DELTA[i], MASK[i]) }, Ip::Reverse => for i in (0 .. COUNT).rev() { result = delta_swap(result, DELTA[i], MASK[i]) } } result } /// Функция Фейстеля fn feistel(half_block: u32, subkey: u64) -> u32 { let expanded = e(half_block); let mut intermediate = expanded ^ subkey; let mut result = 0 as u32; for i in 0 .. 8 { let block = ((intermediate & 0xFC00000000000000) >> 58) as u8; intermediate <<= 6; result <<= 4; result |= s(i, block) as u32; } p(result) } /// Алгоритм DES fn des(message: &[u8], subkeys: Vec<u64>) -> Vec<u8> { let message_len = message.len(); let message = message_to_u64s(message); let mut blocks = vec![]; for msg in message { let permuted = ip(msg, Ip::Direct); let mut li: u32 = ((permuted & 0xFFFFFFFF00000000) >> 32) as u32; let mut ri: u32 = ((permuted & 0x00000000FFFFFFFF)) as u32; for subkey in &subkeys { let last_li = li; li = ri; ri = last_li ^ feistel(ri, *subkey); } let r16l16 = ( ( ri as u64 ) << 32 ) | li as u64; blocks.push(to_u8_vec(ip(r16l16, Ip::Reverse))); } let mut result = Vec::with_capacity(message_len); for mut block in blocks.into_iter() { result.append(&mut block); } result } /// Шифрование pub fn encrypt(message: &[u8], key: &Key) -> Vec<u8> { let key = key_to_u64(key); let subkeys = compute_subkeys(key); des(message, subkeys) } /// Расшифрование pub fn decrypt(cipher: &[u8], key: &Key) -> Vec<u8> { let key = key_to_u64(key); let mut subkeys = compute_subkeys(key); subkeys.reverse(); des(cipher, subkeys) } #[cfg(test)] mod tests { use super::{decrypt, encrypt}; use super::{e, p, pc1, pc2}; #[test] fn test_e() { let result: [u64; 3] = [ e(0b1111_0000_1010_1010_1111_0000_1010_1010), e(0b1111_0000_1010_1010_1111_0000_1010_1011), e(0b1111_1111_1111_1111_1111_1111_1111_1111), ]; let expect: [u64; 3] = [ 0b011110_100001_010101_010101_011110_100001_010101_010101u64 << 16, 0b111110_100001_010101_010101_011110_100001_010101_010111u64 << 16, 0b111111_111111_111111_111111_111111_111111_111111_111111u64 << 16, ]; for i in 0 .. 3 { assert_eq!(expect[i], result[i]); } } #[test] fn test_p() { let result: [u32; 2] = [ p(0b1111_0000_0101_1010_1110_0111_1100_0011), p(0b1011_0111_0001_1000_0000_1011_0110_1010), ]; let expect: [u32; 2] = [ 0b0000_0101_1111_0111_1010_1010_1100_1011, 0b0101_1100_1011_0010_0110_0110_0101_0010, ]; for i in 0 .. 2 { assert_eq!(expect[i], result[i]); } } #[test] fn test_pc1() { let result = pc1(0b00010011_00110100_01010111_01111001_10011011_10111100_11011111_11110001); assert_eq!(0b1111000_0110011_0010101_0101111_0101010_1011001_1001111_0001111 << 8, result); } #[test] fn test_pc2() { let result = pc2(0b1110000_1100110_0101010_1011111_1010101_0110011_0011110_0011110 << 8); assert_eq!(0b000110_110000_001011_101111_111111_000111_000001_110010 << 16, result); } #[test] fn test_encrypt_decrypt() { let key = [0x13, 0x34, 0x57, 0x79, 0x9B, 0xBC, 0xDF, 0xF1]; let message = [0x52, 0x75, 0x73, 0x74, 0x20, 0x44, 0x45, 0x53]; let expected_cipher = vec![0x27, 0xC1, 0x4F, 0xA6, 0x9A, 0x04, 0x4E, 0x28]; let cipher = encrypt(&message, &key); assert_eq!(cipher, expected_cipher); let cipher = expected_cipher; let expected_message = message; let message = decrypt(&cipher, &key); assert_eq!(message, expected_message); let message = [0x64, 0x65, 0x73, 0x2D, 0x72, 0x73, 0x2D, 0x6B, 0x72, 0x61, 0x75, 0x74, 0x63, 0x61, 0x74, 0x20, 0x69, 0x73, 0x20, 0x6D, 0x79, 0x20, 0x69, 0x6D, 0x70, 0x6C, 0x65, 0x6D, 0x65, 0x6E, 0x74, 0x61, 0x74, 0x69, 0x6F, 0x6E, 0x20, 0x6F, 0x66, 0x20, 0x44, 0x45, 0x53, 0x20, 0x61, 0x6C, 0x67, 0x6F, 0x72, 0x69, 0x74, 0x68, 0x6D, 0x20, 0x69, 0x6E, 0x20, 0x52, 0x75, 0x73, 0x74]; let expected_cipher = vec![0x82, 0x8D, 0xB8, 0xD5, 0xFF, 0x41, 0xDF, 0xF7, 0x91, 0x34, 0xCC, 0x88, 0xFB, 0x52, 0xCB, 0xB7, 0x3C, 0x30, 0x17, 0x36, 0x9C, 0x3A, 0x70, 0xE0, 0x17, 0x64, 0x25, 0xDB, 0x17, 0xF5, 0x10, 0x80, 0x02, 0xAF, 0x08, 0x04, 0x6F, 0x3A, 0xA9, 0xB1, 0x3D, 0x74, 0x5C, 0xA7, 0x05, 0x8A, 0x13, 0x46, 0xB8, 0x0B, 0x5C, 0x9C, 0xE6, 0x01, 0x76, 0x92, 0x1C, 0x42, 0x30, 0x7E, 0xB6, 0xFA, 0xE4, 0xD3]; let cipher = encrypt(&message, &key); assert_eq!(cipher, expected_cipher); let cipher = expected_cipher; let expected_message = message; let message = decrypt(&cipher, &key); assert_eq!(&message[..expected_message.len()], &expected_message[..]); } }
st RESULT_LEN: usize = 48; let block_exp = (block as u64) << 32; let b1 = ((block_exp << (BLOCK_LEN -
conditional_block
functions.py
from sys import meta_path import pandas as pd import numpy as np from datetime import datetime import pytz #Modulo necessário para trabalhar com fuso horário import time import MetaTrader5 as mt5 import plotly.express as px from collections import OrderedDict from workadays import workdays as wd # default='warn' from numpy.core.fromnumeric import var from py_vollib.black.implied_volatility import implied_volatility from py_vollib.black.implied_volatility import implied_volatility_of_discounted_option_price as ivdp from py_vollib.black_scholes import black_scholes from py_vollib.black_scholes_merton import black_scholes_merton from py_vollib.black.greeks.analytical import * # Inicializando mt5 path = r'C:\Program Files\MetaTrader 5 Terminal\terminal64.exe' # path do terminal login = 66304787 password = 'MT5@#!4500' server = "XPMT5-DEMO" # timezone pytz.timezone('America/Sao_Paulo') # fuso horario como utc timing = datetime.now() # Inicializar a conexão if not mt5.initialize(path=path,login=login,server=server,password=password): print("initialize() failed, error code=",mt5.last_error()) mt5.shutdown() #ativos e similares ativo_e_similar = {} def vol_e_preco_max(ativo): """ Retorna preço e volatilidade realizada do ativo OBS: MUDAR O PATH """ rates_frames = pd.read_csv('Dados Históricos\{}_historico.csv'.format(ativo)) vol = rates_frames['retorno'].std() * 252 ** (1/2) # volatilidade realizada anualizada preco_max = rates_frames['close'].max() # maxima historica return vol, preco_max def call_negociadas(ativo, data_do_vencimento=[]): """ Retorna tickers de todas as calls negociadas com vencimento até 160 dias. Ativo: nome do ativo sem o número de on ou pn, exemplo: Se quiser calls de PETROBRAS, o input deve ser "PETR" """ ativo = ativo.rstrip('123456789') calls_codigos = "ABCDEFGHIJKLN" # Pegar o ticks de todas as opçoes negociadas. calls_names = [] nomes_calls = [] for codigo in calls_codigos: calls_name = "*{}".format(ativo)+"{}*".format(codigo) calls_names.append(calls_name) for calls in calls_names: data = mt5.symbols_get(calls) for s in data: expiration_date = datetime.fromtimestamp(s.expiration_time).strftime("%Y-%m-%d") # string_date = str(expiration_date) # now_to_expired_days = (expiration_date - TIME_NOW).days if string_date in data_do_vencimento: #print(s.name) nomes_calls.append(s.name) return nomes_calls def streaming(input_ativo,input_data=[]): """ Retorna os dados de opções do ativo em real(se o mercado estiver aberto) input_ativo: ativo desejado input_data: Datas de vencimento das opções """ vol, preco_max = vol_e_preco_max(ativo=input_ativo) dados = call_negociadas(ativo=input_ativo, data_do_vencimento=input_data) # time.sleep(30) # garantir que a conexão foi garantida antes de a chamar a função de streaming mt5.market_book_add(input_ativo) for ticker in dados: mt5.market_book_add(ticker) mt5.market_book_add('DI1@') while(True): df = pd.DataFrame() last_subjacente = float(mt5.symbol_info_tick(input_ativo).last) # ultimos preço do ativo subjacente for s in dados: # informações do ativo name = s # nome do ativo simbol = mt5.symbol_info(s) last = float(simbol.last) # ultimo preço da call strike = float(mt5.symbol_info(s).option_strike) # strike expiration_date = wd.networkdays(timing,(datetime.fromtimestamp(simbol.expiration_time)),country='BR',state='SP') / 365 volume = float(simbol.session_volume) n_negocios = float(simbol.session_deals) di = mt5.symbol_info_tick('DI1@').last / 100 # di # calculo preco_teorico = round(black_scholes_merton('c',last_subjacente,strike,expiration_date,di,vol,q=0),2) vol_implicita = implied_volatility(preco_teorico,last_subjacente,strike,expiration_date,di,'c') dl = delta('c',last_subjacente,strike,expiration_date,di,vol) distancia_preco_max = (preco_max / last_subjacente - 1) diferenças_volatilidades = ( vol_implicita - vol ) lista = {'ticker':name, 'ultimo':round(last,2), 'Ativo Subjacente':last_subjacente, 'Strike':strike, 'Volume':volume, 'N° de Negocios': n_negocios, 'Vencimento em(dias)':round((expiration_date*365)), 'Preco Teorico':preco_teorico, 'Implicita':vol_implicita, 'Distancia do Max Historico(%)': distancia_preco_max, 'Diferença entre Implicita e Realizada(%)': diferenças_volatilidades, 'Delta':round(dl,4)} df = df.append(lista,ignore_index=True) tabela = df time.sleep(0.5) return tabela.to_dict('records') def payoffs(preco, strike,
tivo, tipo='c'): """ Retorna os payoffs da call, dado o preço da call, strike, e último preço do ativo subjacente e se a opção foi comprada o vendida. """ ## Gera uma array com preços do ativo subjacente baseado no preço do momento do ativo. p_min, p_max = int(preco_ativo * 0.70), int(preco_ativo * 1.2) step = (p_max - p_min) * 100 ativo_subjacente = np.round(np.linspace(p_min,p_max,step), decimals=2) payoffs = [] for price in ativo_subjacente: if tipo == 'v': if price > strike: payoff = - (price - strike - preco) payoffs.append(round(payoff, 2)) else: payoff = preco payoffs.append(round(payoff, 2)) elif tipo == 'c': if price > strike: payoff = price - strike - preco payoffs.append(round(payoff, 2)) else: payoff = - preco payoffs.append(round(payoff, 2)) payoffs = pd.DataFrame({'AtivoSubjacente': ativo_subjacente, 'Payoffs': payoffs}) return payoffs def posicoes_montandas(input_ativo, input_data=[]): while True: df = streaming(input_ativo,input_data) df = pd.DataFrame(df) itm = df[(df['Delta'] > 0.85) & (df['Delta'] <= 1.)].copy() atm = df[(df['Delta'] > 0.50) & (df['Delta'] <= 0.75)].copy() dataframe = list() ativo = itm['Ativo Subjacente'].iloc[0] for i in itm.index: if itm['Preco Teorico'].loc[i] != 0: preco_comprado = itm['Preco Teorico'].loc[i].item() strike_comprado = itm['Strike'].loc[i].item() vencimento = itm['Vencimento em(dias)'].loc[i].item() tickers_c = itm['ticker'].loc[i] delta_comprado = itm['Delta'].loc[i].item() vol_implicita_c = itm['Implicita'].loc[i].item() implicita_historica_c = itm['Diferença entre Implicita e Realizada(%)'].loc[i].item() for j in atm.index: if atm['Preco Teorico'].loc[j].item() != 0 and atm['Vencimento em(dias)'].loc[j].item() == vencimento: preco_vendido = atm['Preco Teorico'].loc[j] strike_vendido = atm['Strike'].loc[j] try: tickers_v = atm['ticker'].loc[j] except: tickers_v = np.nan delta_vendido = atm['Delta'].loc[j].item() vol_implicita_v = atm['Implicita'].loc[j].item() implicita_historica_v = atm['Diferença entre Implicita e Realizada(%)'].loc[j].item() payoff_comprada = payoffs(preco_comprado,strike_comprado,ativo,tipo='c') payoff_vendida = payoffs(preco_vendido,strike_vendido,ativo,tipo='v') payoff_operacao = pd.merge(payoff_comprada, payoff_vendida, on='AtivoSubjacente', suffixes=('_comprado', '_vendido')) payoff_operacao['Payoff_total'] = payoff_operacao.Payoffs_comprado + payoff_operacao.Payoffs_vendido try: breakeven = payoff_operacao[payoff_operacao.Payoff_total == 0] preco_breakeven = breakeven.AtivoSubjacente.item() precos_diferenca = preco_comprado - preco_vendido strike_diferenca = strike_comprado - strike_vendido queda_para_prejuizo = (breakeven.AtivoSubjacente.item()/ativo - 1) ganho_maximo = ((preco_comprado - preco_vendido + payoff_operacao.Payoff_total.iloc[-1]) / (preco_comprado - preco_vendido) - 1) except ValueError: breakeven = np.nan preco_breakeven = np.nan ganho_maximo = np.nan strike_diferenca = np.nan queda_para_prejuizo = np.nan pass dados = OrderedDict([('Call 1',tickers_c), ('Call 2',tickers_v), ('Vencimento',vencimento), ('Preço Teórico(Call 1)',preco_comprado), ('Preço Teórico(Call 2)',preco_vendido), ('Diferença entre Preços',precos_diferenca), ('Strike Call 1',strike_comprado), ('Strike Call 2',strike_vendido), ('Diferença entre strikes',strike_diferenca), ('Ganho Máximo',ganho_maximo), ('Queda para Prejuízo',queda_para_prejuizo), ('Preco Breakeven',preco_breakeven), ('Delta Call 1',delta_comprado), ('Delta Call 2',delta_vendido), ('Vol Implicita(Call 1)',vol_implicita_c), ('Vol Implicita(Call 2)',vol_implicita_v), ('Implicita vs Histórica(Call 1)',implicita_historica_c), ('Implicita vs Histórica(Call 2)',implicita_historica_v), ]) dataframe.append(dados) df = pd.DataFrame(dataframe) return df.to_dict('records') def option_figure(ativo_sub,call_1,call_2): "Retorna o grafico de payoff das opções selecionadas." preco_itm, strike_itm = float(mt5.symbol_info_tick(call_1).last), float(mt5.symbol_info(call_1).option_strike) preco_atm, strike_atm = float(mt5.symbol_info_tick(call_2).last), float(mt5.symbol_info(call_2).option_strike) ativo = float(mt5.symbol_info_tick(ativo_sub).last) payoff_comprada = payoffs(preco_itm,strike_itm,ativo,tipo='c') payoff_vendida = payoffs(preco_atm,strike_atm,ativo,tipo='v') payoff_operacao = pd.merge(payoff_comprada, payoff_vendida, on='AtivoSubjacente', suffixes=('_comprado', '_vendido')) payoff_operacao['Payoff_total'] = payoff_operacao.Payoffs_comprado + payoff_operacao.Payoffs_vendido breakeven = payoff_operacao.loc[payoff_operacao.Payoff_total == 0] #Figure fig = px.line( title="Payoff Opções", x=payoff_operacao.AtivoSubjacente, y=payoff_operacao.Payoff_total, labels={"x": "Preço do Ativo", "y": "Payoff"}, ) fig.add_annotation(x=ativo, y=payoff_operacao.loc[payoff_operacao.AtivoSubjacente == ativo, 'Payoff_total'].item(), text="Resultado atual", showarrow=True, arrowhead=2) fig.update_yaxes(zeroline=True, zerolinecolor="#FF0000", spikedash='dot') fig.update_layout(hovermode="x") return fig def variacao_indicadores(dados): ''' Retorna o dicionario contendo a variação dos principais indicadores para cada ativo na base de dados(arquivo referencia ibov-top40-volume.xlsx) ''' financeiros = pd.read_csv(dados,thousands=r',') variacoes = {} for i in financeiros['ATIVO'].unique(): data = financeiros[financeiros['ATIVO'] == i]['Data'].reset_index(drop=True) var_lpa = financeiros['LPA'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_pl = financeiros['P/L'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_pvpa = financeiros['P/VPA'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_psales = financeiros['P/SALES'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) ativo = [i]*len(var_pl) variacao = {'ATIVO':ativo, 'Data':data, 'Variacao LPA':var_lpa, 'Variacao P/L':var_pl, 'Variacao P/VPA':var_pvpa, 'Variacao P/SALES': var_psales} variacoes[i] = variacao return variacoes def retornos_volatilidade(input_ativo): dia_hoje = timing.strftime('%Y-%m-%d') setores = pd.read_excel('setores.xlsx').dropna(axis=1) historico = pd.DataFrame(mt5.copy_rates_from_pos(input_ativo,mt5.TIMEFRAME_D1,0,252)) historico.index = pd.to_datetime(historico['time'],unit='s') historico.sort_index(ascending=False,inplace=True) ativo_setor = setores['Subsetor Bovespa'][setores['Código'] == input_ativo].item() ativo_g = input_ativo.rstrip('123456789') ativos_similares = [i for i in setores['Código'][(setores['Subsetor Bovespa'] == ativo_setor) & (~setores['Código'].str.contains(ativo_g))]] max_52s = historico['close'].max() # 52 semanas retorno_52s = historico['close'][0]/historico['close'][-1] - 1 # retorno 52 semanas vol_52s = historico['close'].pct_change().std() * 252 ** (1/2) # volatilidade de 52 semanas retorno_30d = historico['close'].head(21)[0]/historico['close'].head(21)[-1] - 1 # retorno 1 mês retorno_1d = historico['close'][0]/historico['close'][1] - 1 # retorno dia ativo_e_similar[input_ativo+'_'+dia_hoje]= {'Código':input_ativo, 'Data':dia_hoje, 'Max 52s':max_52s, 'Retorno 52s':retorno_52s, 'Volatilidade 52s':vol_52s, 'Retorno 30d':retorno_30d, 'Retorno 1d':retorno_1d} for ativo in ativos_similares: name = ativo +'_'+ dia_hoje if name not in ativo_e_similar.keys(): historico = pd.DataFrame(mt5.copy_rates_from_pos(ativo,mt5.TIMEFRAME_D1,0,252)) historico.index = pd.to_datetime(historico['time'],unit='s') historico.sort_index(ascending=False,inplace=True) max_52s = historico['close'].max() # 52 semanas retorno_52s = historico['close'][0]/historico['close'][-1] - 1 # retorno 52 semanas vol_52s = historico['close'].pct_change().std() * 252 ** (1/2) # volatilidade de 52 semanas retorno_30d = historico['close'].head(21)[0]/historico['close'].head(21)[-1] - 1 # retorno 1 mês retorno_1d = historico['close'][0]/historico['close'][1] - 1 # retorno dia ativo_e_similar[name]= { 'Código':ativo, 'Max 52s':max_52s, 'Data': dia_hoje, 'Retorno 52s':retorno_52s, 'Volatilidade 52s':vol_52s, 'Retorno 30d':retorno_30d, 'Retorno 1d':retorno_1d} #novos_similares = [i+'_'+dia_hoje for i in ativos_similares] retornos = pd.DataFrame([ativo_e_similar.get(key) for key in ativo_e_similar.keys()]) ativo = retornos[retornos['Código']==input_ativo].to_dict('records') similares = retornos[retornos['Código'].isin(ativos_similares)].to_dict('records') return ativo, similares
preco_a
identifier_name
functions.py
from sys import meta_path import pandas as pd import numpy as np from datetime import datetime import pytz #Modulo necessário para trabalhar com fuso horário import time import MetaTrader5 as mt5 import plotly.express as px from collections import OrderedDict from workadays import workdays as wd # default='warn' from numpy.core.fromnumeric import var from py_vollib.black.implied_volatility import implied_volatility from py_vollib.black.implied_volatility import implied_volatility_of_discounted_option_price as ivdp from py_vollib.black_scholes import black_scholes from py_vollib.black_scholes_merton import black_scholes_merton from py_vollib.black.greeks.analytical import * # Inicializando mt5 path = r'C:\Program Files\MetaTrader 5 Terminal\terminal64.exe' # path do terminal login = 66304787 password = 'MT5@#!4500' server = "XPMT5-DEMO" # timezone pytz.timezone('America/Sao_Paulo') # fuso horario como utc timing = datetime.now() # Inicializar a conexão if not mt5.initialize(path=path,login=login,server=server,password=password): print("initialize() failed, error code=",mt5.last_error()) mt5.shutdown() #ativos e similares ativo_e_similar = {} def vol_e_preco_max(ativo): """ Retorna preço e volatilidade realizada do ativo OBS: MUDAR O PATH """ rates_frames = pd.read_csv('Dados Históricos\{}_historico.csv'.format(ativo)) vol = rates_frames['retorno'].std() * 252 ** (1/2) # volatilidade realizada anualizada preco_max = rates_frames['close'].max() # maxima historica return vol, preco_max def call_negociadas(ativo, data_do_vencimento=[]): """ Retorna tickers de todas as calls negociadas com vencimento até 160 dias. Ativo: nome do ativo sem o número de on ou pn, exemplo: Se quiser calls de PETROBRAS, o input deve ser "PETR" """ ativo = ativo.rstrip('123456789') calls_codigos = "ABCDEFGHIJKLN" # Pegar o ticks de todas as opçoes negociadas. calls_names = [] nomes_calls = [] for codigo in calls_codigos: calls_name = "*{}".format(ativo)+"{}*".format(codigo) calls_names.append(calls_name) for calls in calls_names: data = mt5.symbols_get(calls) for s in data: expiration_date = datetime.fromtimestamp(s.expiration_time).strftime("%Y-%m-%d") # string_date = str(expiration_date) # now_to_expired_days = (expiration_date - TIME_NOW).days if string_date in data_do_vencimento: #print(s.name) nomes_calls.append(s.name) return nomes_calls def streaming(input_ativo,input_data=[]): """ Retorna os dados de opções do ativo em real(se o mercado estiver aberto) input_ativo: ativo desejado input_data: Datas de vencimento das opções """ vol, preco_max = vol_e_preco_max(ativo=input_ativo) dados = call_negociadas(ativo=input_ativo, data_do_vencimento=input_data) # time.sleep(30) # garantir que a conexão foi garantida antes de a chamar a função de streaming mt5.market_book_add(input_ativo) for ticker in dados: mt5.market_book_add(ticker) mt5.market_book_add('DI1@') while(True): df = pd.DataFrame() last_subjacente = float(mt5.symbol_info_tick(input_ativo).last) # ultimos preço do ativo subjacente for s in dados: # informações do ativo name = s # nome do ativo simbol = mt5.symbol_info(s) last = float(simbol.last) # ultimo preço da call strike = float(mt5.symbol_info(s).option_strike) # strike expiration_date = wd.networkdays(timing,(datetime.fromtimestamp(simbol.expiration_time)),country='BR',state='SP') / 365 volume = float(simbol.session_volume) n_negocios = float(simbol.session_deals) di = mt5.symbol_info_tick('DI1@').last / 100 # di # calculo preco_teorico = round(black_scholes_merton('c',last_subjacente,strike,expiration_date,di,vol,q=0),2) vol_implicita = implied_volatility(preco_teorico,last_subjacente,strike,expiration_date,di,'c') dl = delta('c',last_subjacente,strike,expiration_date,di,vol) distancia_preco_max = (preco_max / last_subjacente - 1) diferenças_volatilidades = ( vol_implicita - vol ) lista = {'ticker':name, 'ultimo':round(last,2), 'Ativo Subjacente':last_subjacente, 'Strike':strike, 'Volume':volume, 'N° de Negocios': n_negocios, 'Vencimento em(dias)':round((expiration_date*365)), 'Preco Teorico':preco_teorico, 'Implicita':vol_implicita, 'Distancia do Max Historico(%)': distancia_preco_max, 'Diferença entre Implicita e Realizada(%)': diferenças_volatilidades, 'Delta':round(dl,4)} df = df.append(lista,ignore_index=True) tabela = df time.sleep(0.5) return tabela.to_dict('records') def payoffs(preco, strike, preco_ativo, tipo='c'): """ Retorna os payoffs da call, dado o preço da call, strike, e último preço do ativo subjacente e se a opção foi comprada o vendida. """ ## Gera uma array com preços do ativo subjacente baseado no preço do momento do ativo. p_min, p_max = int(preco_ativo * 0.70), int(preco_ativo * 1.2) step = (p_max - p_min) * 100 ativo_subjacente = np.round(np.linspace(p_min,p_max,step), decimals=2) payoffs = [] for price in ativo_subjacente: if tipo == 'v': if price > strike: payoff = - (price - strike - preco) payoffs.append(round(payoff, 2)) else: payoff = preco payoffs.append(round(payoff, 2)) elif tipo == 'c': if price > strike: payoff = price - strike - preco payoffs.append(round(payoff, 2)) else: payoff = - preco payoffs.append(round(payoff, 2)) payoffs = pd.DataFrame({'AtivoSubjacente': ativo_subjacente, 'Payoffs': payoffs}) return payoffs def posicoes_montandas(input_ativo, input_data=[]): while True: df = streaming(input_ativo,input_data) df = pd.DataFrame(df) itm = df[(df['Delta'] > 0.85) & (df['Delta'] <= 1.)].copy() atm = df[(df['Delta'] > 0.50) & (df['Delta'] <= 0.75)].copy() dataframe = list() ativo = itm['Ativo Subjacente'].iloc[0] for i in itm.index: if itm['Preco Teorico'].loc[i] != 0: preco_comprado = itm['Preco Teorico'].loc[i].item() strike_comprado = itm['Strike'].loc[i].item() vencimento = itm['Vencimento em(dias)'].loc[i].item() tickers_c = itm['ticker'].loc[i] delta_comprado = itm['Delta'].loc[i].item() vol_implicita_c = itm['Implicita'].loc[i].item() implicita_historica_c = itm['Diferença entre Implicita e Realizada(%)'].loc[i].item() for j in atm.index: if atm['Preco Teorico'].loc[j].item() != 0 and atm['Vencimento em(dias)'].loc[j].item() == vencimento: preco_vendido = atm['Preco Teorico'].loc[j] strike_vendido = atm['Strike'].loc[j] try: tickers_v = atm['ticker'].loc[j] except: tickers_v = np.nan delta_vendido = atm['Delta'].loc[j].item() vol_implicita_v = atm['Implicita'].loc[j].item() implicita_historica_v = atm['Diferença entre Implicita e Realizada(%)'].loc[j].item() payoff_comprada = payoffs(preco_comprado,strike_comprado,ativo,tipo='c') payoff_vendida = payoffs(preco_vendido,strike_vendido,ativo,tipo='v') payoff_operacao = pd.merge(payoff_comprada, payoff_vendida, on='AtivoSubjacente', suffixes=('_comprado', '_vendido')) payoff_operacao['Payoff_total'] = payoff_operacao.Payoffs_comprado + payoff_operacao.Payoffs_vendido try: breakeven = payoff_operacao[payoff_operacao.Payoff_total == 0] preco_breakeven = breakeven.AtivoSubjacente.item() precos_diferenca = preco_comprado - preco_vendido strike_diferenca = strike_comprado - strike_vendido queda_para_prejuizo = (breakeven.AtivoSubjacente.item()/ativo - 1) ganho_maximo = ((preco_comprado - preco_vendido + payoff_operacao.Payoff_total.iloc[-1]) / (preco_comprado - preco_vendido) - 1) except ValueError: breakeven = np.nan preco_breakeven = np.nan ganho_maximo = np.nan strike_diferenca = np.nan queda_para_prejuizo = np.nan pass dados = OrderedDict([('Call 1',tickers_c), ('Call 2',tickers_v), ('Vencimento',vencimento), ('Preço Teórico(Call 1)',preco_comprado), ('Preço Teórico(Call 2)',preco_vendido), ('Diferença entre Preços',precos_diferenca), ('Strike Call 1',strike_comprado), ('Strike Call 2',strike_vendido), ('Diferença entre strikes',strike_diferenca), ('Ganho Máximo',ganho_maximo), ('Queda para Prejuízo',queda_para_prejuizo), ('Preco Breakeven',preco_breakeven), ('Delta Call 1',delta_comprado), ('Delta Call 2',delta_vendido), ('Vol Implicita(Call 1)',vol_implicita_c), ('Vol Implicita(Call 2)',vol_implicita_v), ('Implicita vs Histórica(Call 1)',implicita_historica_c), ('Implicita vs Histórica(Call 2)',implicita_historica_v), ]) dataframe.append(dados) df = pd.DataFrame(dataframe) return df.to_dict('records') def option_figure(ativo_sub,call_1,call_2): "Retorna o grafico de payoff das opções selecionadas." preco_itm, strike_itm = float(mt5.symbol_info_tick(call_1).last), float(mt5.symbol_info(call_1).option_strike) preco_atm, strike_atm = float(mt5.symbol_info_tick(call_2).last), float(mt5.symbol_info(call_2).option_strike) ativo = float(mt5.symbol_info_tick(ativo_sub).last) payoff_comprada = payoffs(preco_itm,strike_itm,ativo,tipo='c') payoff_vendida = payoffs(preco_atm,strike_atm,ativo,tipo='v') payoff_operacao = pd.merge(payoff_comprada, payoff_vendida, on='AtivoSubjacente', suffixes=('_comprado', '_vendido')) payoff_operacao['Payoff_total'] = payoff_operacao.Payoffs_comprado + payoff_operacao.Payoffs_vendido breakeven = payoff_operacao.loc[payoff_operacao.Payoff_total == 0] #Figure fig = px.line( title="Payoff Opções", x=payoff_operacao.AtivoSubjacente, y=payoff_operacao.Payoff_total, labels={"x": "Preço do Ativo", "y": "Payoff"}, ) fig.add_annotation(x=ativo, y=payoff_operacao.loc[payoff_operacao.AtivoSubjacente == ativo, 'Payoff_total'].item(), text="Resultado atual", showarrow=True, arrowhead=2) fig.update_yaxes(zeroline=True, zerolinecolor="#FF0000", spikedash='dot') fig.update_layout(hovermode="x") return fig def variacao_indicadores(dados): ''' Retorna o dicionario contendo a variação dos principais indicadores para cada ativo na base de dados(arquivo referencia ibov-top40-volume.xlsx) ''' financeiros = pd.read_csv(dados,thousands=r',') variacoes = {} for i in financeiros['ATIVO'].unique(): data = financeiros[financeiros['ATIVO'] == i]['Data'].reset_index(drop=True) var_lpa = financeiros['LPA'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_pl = financeiros['P/L'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_pvpa = financeiros['P/VPA'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_psales = financeiros['P/SALES'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) ativo = [i]*len(var_pl) variacao = {'ATIVO':ativo, 'Data':data, 'Variacao LPA':var_lpa, 'Variacao P/L':var_pl, 'Variacao P/VPA':var_pvpa, 'Variacao P/SALES': var_psales} variacoes[i] = variacao return variacoes def retornos_volatilidade(input_ativo): dia_hoje = timing.strftime('%Y-%m-%d') setores = pd.read_excel('setores.xlsx').dropna(axis=1) historico = pd.DataFrame(mt5.copy_rates_from_pos(input_ativo,mt5.TIMEFRAME_D1,0,252)) historico.index = pd.to_datetime(historico['time'],unit='s') historico.sort_index(ascending=False,inplace=True) ativo_setor = setores['Subsetor Bovespa'][setores['Código'] == input_ativo].item() ativo_g = input_ativo.rstrip('123456789') ativos_similares = [i for i in setores['Código'][(setores['Subsetor Bovespa'] == ativo_setor) & (~setores['Código'].str.contains(ativo_g))]] max_52s = historico['close'].max() # 52 semanas retorno_52s = historico['close'][0]/historico['close'][-1] - 1 # retorno 52 semanas vol_52s = historico['close'].pct_change().std() * 252 ** (1/2) # volatilidade de 52 semanas retorno_30d = historico['close'].head(21)[0]/historico['close'].head(21)[-1] - 1 # retorno 1 mês retorno_1d = historico['close'][0]/historico['close'][1] - 1 # retorno dia ativo_e_similar[input_ativo+'_'+dia_hoje]= {'Código':input_ativo, 'Data':dia_hoje, 'Max 52s':max_52s, 'Retorno 52s':retorno_52s, 'Volatilidade 52s':vol_52s, 'Retorno 30d':retorno_30d, 'Retorno 1d':retorno_1d} for ativo in ativos_similares: name = ativo +'_'+ dia_hoje if name not
r i in ativos_similares] retornos = pd.DataFrame([ativo_e_similar.get(key) for key in ativo_e_similar.keys()]) ativo = retornos[retornos['Código']==input_ativo].to_dict('records') similares = retornos[retornos['Código'].isin(ativos_similares)].to_dict('records') return ativo, similares
in ativo_e_similar.keys(): historico = pd.DataFrame(mt5.copy_rates_from_pos(ativo,mt5.TIMEFRAME_D1,0,252)) historico.index = pd.to_datetime(historico['time'],unit='s') historico.sort_index(ascending=False,inplace=True) max_52s = historico['close'].max() # 52 semanas retorno_52s = historico['close'][0]/historico['close'][-1] - 1 # retorno 52 semanas vol_52s = historico['close'].pct_change().std() * 252 ** (1/2) # volatilidade de 52 semanas retorno_30d = historico['close'].head(21)[0]/historico['close'].head(21)[-1] - 1 # retorno 1 mês retorno_1d = historico['close'][0]/historico['close'][1] - 1 # retorno dia ativo_e_similar[name]= { 'Código':ativo, 'Max 52s':max_52s, 'Data': dia_hoje, 'Retorno 52s':retorno_52s, 'Volatilidade 52s':vol_52s, 'Retorno 30d':retorno_30d, 'Retorno 1d':retorno_1d} #novos_similares = [i+'_'+dia_hoje fo
conditional_block
functions.py
from sys import meta_path import pandas as pd import numpy as np from datetime import datetime import pytz #Modulo necessário para trabalhar com fuso horário import time import MetaTrader5 as mt5 import plotly.express as px from collections import OrderedDict from workadays import workdays as wd # default='warn' from numpy.core.fromnumeric import var from py_vollib.black.implied_volatility import implied_volatility from py_vollib.black.implied_volatility import implied_volatility_of_discounted_option_price as ivdp from py_vollib.black_scholes import black_scholes from py_vollib.black_scholes_merton import black_scholes_merton from py_vollib.black.greeks.analytical import * # Inicializando mt5 path = r'C:\Program Files\MetaTrader 5 Terminal\terminal64.exe' # path do terminal login = 66304787 password = 'MT5@#!4500' server = "XPMT5-DEMO" # timezone pytz.timezone('America/Sao_Paulo') # fuso horario como utc timing = datetime.now() # Inicializar a conexão if not mt5.initialize(path=path,login=login,server=server,password=password): print("initialize() failed, error code=",mt5.last_error()) mt5.shutdown() #ativos e similares ativo_e_similar = {} def vol_e_preco_max(ativo): """ Retorna preço e volatilidade realizada do ativo OBS: MUDAR O PATH """ rates_frames = pd.read_csv('Dados Históricos\{}_historico.csv'.format(ativo)) vol = rates_frames['retorno'].std() * 252 ** (1/2) # volatilidade realizada anualizada preco_max = rates_frames['close'].max() # maxima historica return vol, preco_max def call_negociadas(ativo, data_do_vencimento=[]): """ Retorna tickers de todas as calls negociadas com vencimento até 160 dias. Ativo: nome do ativo sem o número de on ou pn, exemplo: Se quiser calls de PETROBRAS, o input deve ser "PETR" """ ativo = ativo.rstrip('123456789') calls_codigos = "ABCDEFGHIJKLN" # Pegar o ticks de todas as opçoes negociadas. calls_names = [] nomes_calls = [] for codigo in calls_codigos: calls_name = "*{}".format(ativo)+"{}*".format(codigo) calls_names.append(calls_name) for calls in calls_names: data = mt5.symbols_get(calls) for s in data: expiration_date = datetime.fromtimestamp(s.expiration_time).strftime("%Y-%m-%d") # string_date = str(expiration_date) # now_to_expired_days = (expiration_date - TIME_NOW).days if string_date in data_do_vencimento: #print(s.name) nomes_calls.append(s.name) return nomes_calls def streaming(input_ativo,input_data=[]): """ Retorna os dados de opções do ativo em real(se o mercado estiver aberto) input_ativo: ativo desejado
""" vol, preco_max = vol_e_preco_max(ativo=input_ativo) dados = call_negociadas(ativo=input_ativo, data_do_vencimento=input_data) # time.sleep(30) # garantir que a conexão foi garantida antes de a chamar a função de streaming mt5.market_book_add(input_ativo) for ticker in dados: mt5.market_book_add(ticker) mt5.market_book_add('DI1@') while(True): df = pd.DataFrame() last_subjacente = float(mt5.symbol_info_tick(input_ativo).last) # ultimos preço do ativo subjacente for s in dados: # informações do ativo name = s # nome do ativo simbol = mt5.symbol_info(s) last = float(simbol.last) # ultimo preço da call strike = float(mt5.symbol_info(s).option_strike) # strike expiration_date = wd.networkdays(timing,(datetime.fromtimestamp(simbol.expiration_time)),country='BR',state='SP') / 365 volume = float(simbol.session_volume) n_negocios = float(simbol.session_deals) di = mt5.symbol_info_tick('DI1@').last / 100 # di # calculo preco_teorico = round(black_scholes_merton('c',last_subjacente,strike,expiration_date,di,vol,q=0),2) vol_implicita = implied_volatility(preco_teorico,last_subjacente,strike,expiration_date,di,'c') dl = delta('c',last_subjacente,strike,expiration_date,di,vol) distancia_preco_max = (preco_max / last_subjacente - 1) diferenças_volatilidades = ( vol_implicita - vol ) lista = {'ticker':name, 'ultimo':round(last,2), 'Ativo Subjacente':last_subjacente, 'Strike':strike, 'Volume':volume, 'N° de Negocios': n_negocios, 'Vencimento em(dias)':round((expiration_date*365)), 'Preco Teorico':preco_teorico, 'Implicita':vol_implicita, 'Distancia do Max Historico(%)': distancia_preco_max, 'Diferença entre Implicita e Realizada(%)': diferenças_volatilidades, 'Delta':round(dl,4)} df = df.append(lista,ignore_index=True) tabela = df time.sleep(0.5) return tabela.to_dict('records') def payoffs(preco, strike, preco_ativo, tipo='c'): """ Retorna os payoffs da call, dado o preço da call, strike, e último preço do ativo subjacente e se a opção foi comprada o vendida. """ ## Gera uma array com preços do ativo subjacente baseado no preço do momento do ativo. p_min, p_max = int(preco_ativo * 0.70), int(preco_ativo * 1.2) step = (p_max - p_min) * 100 ativo_subjacente = np.round(np.linspace(p_min,p_max,step), decimals=2) payoffs = [] for price in ativo_subjacente: if tipo == 'v': if price > strike: payoff = - (price - strike - preco) payoffs.append(round(payoff, 2)) else: payoff = preco payoffs.append(round(payoff, 2)) elif tipo == 'c': if price > strike: payoff = price - strike - preco payoffs.append(round(payoff, 2)) else: payoff = - preco payoffs.append(round(payoff, 2)) payoffs = pd.DataFrame({'AtivoSubjacente': ativo_subjacente, 'Payoffs': payoffs}) return payoffs def posicoes_montandas(input_ativo, input_data=[]): while True: df = streaming(input_ativo,input_data) df = pd.DataFrame(df) itm = df[(df['Delta'] > 0.85) & (df['Delta'] <= 1.)].copy() atm = df[(df['Delta'] > 0.50) & (df['Delta'] <= 0.75)].copy() dataframe = list() ativo = itm['Ativo Subjacente'].iloc[0] for i in itm.index: if itm['Preco Teorico'].loc[i] != 0: preco_comprado = itm['Preco Teorico'].loc[i].item() strike_comprado = itm['Strike'].loc[i].item() vencimento = itm['Vencimento em(dias)'].loc[i].item() tickers_c = itm['ticker'].loc[i] delta_comprado = itm['Delta'].loc[i].item() vol_implicita_c = itm['Implicita'].loc[i].item() implicita_historica_c = itm['Diferença entre Implicita e Realizada(%)'].loc[i].item() for j in atm.index: if atm['Preco Teorico'].loc[j].item() != 0 and atm['Vencimento em(dias)'].loc[j].item() == vencimento: preco_vendido = atm['Preco Teorico'].loc[j] strike_vendido = atm['Strike'].loc[j] try: tickers_v = atm['ticker'].loc[j] except: tickers_v = np.nan delta_vendido = atm['Delta'].loc[j].item() vol_implicita_v = atm['Implicita'].loc[j].item() implicita_historica_v = atm['Diferença entre Implicita e Realizada(%)'].loc[j].item() payoff_comprada = payoffs(preco_comprado,strike_comprado,ativo,tipo='c') payoff_vendida = payoffs(preco_vendido,strike_vendido,ativo,tipo='v') payoff_operacao = pd.merge(payoff_comprada, payoff_vendida, on='AtivoSubjacente', suffixes=('_comprado', '_vendido')) payoff_operacao['Payoff_total'] = payoff_operacao.Payoffs_comprado + payoff_operacao.Payoffs_vendido try: breakeven = payoff_operacao[payoff_operacao.Payoff_total == 0] preco_breakeven = breakeven.AtivoSubjacente.item() precos_diferenca = preco_comprado - preco_vendido strike_diferenca = strike_comprado - strike_vendido queda_para_prejuizo = (breakeven.AtivoSubjacente.item()/ativo - 1) ganho_maximo = ((preco_comprado - preco_vendido + payoff_operacao.Payoff_total.iloc[-1]) / (preco_comprado - preco_vendido) - 1) except ValueError: breakeven = np.nan preco_breakeven = np.nan ganho_maximo = np.nan strike_diferenca = np.nan queda_para_prejuizo = np.nan pass dados = OrderedDict([('Call 1',tickers_c), ('Call 2',tickers_v), ('Vencimento',vencimento), ('Preço Teórico(Call 1)',preco_comprado), ('Preço Teórico(Call 2)',preco_vendido), ('Diferença entre Preços',precos_diferenca), ('Strike Call 1',strike_comprado), ('Strike Call 2',strike_vendido), ('Diferença entre strikes',strike_diferenca), ('Ganho Máximo',ganho_maximo), ('Queda para Prejuízo',queda_para_prejuizo), ('Preco Breakeven',preco_breakeven), ('Delta Call 1',delta_comprado), ('Delta Call 2',delta_vendido), ('Vol Implicita(Call 1)',vol_implicita_c), ('Vol Implicita(Call 2)',vol_implicita_v), ('Implicita vs Histórica(Call 1)',implicita_historica_c), ('Implicita vs Histórica(Call 2)',implicita_historica_v), ]) dataframe.append(dados) df = pd.DataFrame(dataframe) return df.to_dict('records') def option_figure(ativo_sub,call_1,call_2): "Retorna o grafico de payoff das opções selecionadas." preco_itm, strike_itm = float(mt5.symbol_info_tick(call_1).last), float(mt5.symbol_info(call_1).option_strike) preco_atm, strike_atm = float(mt5.symbol_info_tick(call_2).last), float(mt5.symbol_info(call_2).option_strike) ativo = float(mt5.symbol_info_tick(ativo_sub).last) payoff_comprada = payoffs(preco_itm,strike_itm,ativo,tipo='c') payoff_vendida = payoffs(preco_atm,strike_atm,ativo,tipo='v') payoff_operacao = pd.merge(payoff_comprada, payoff_vendida, on='AtivoSubjacente', suffixes=('_comprado', '_vendido')) payoff_operacao['Payoff_total'] = payoff_operacao.Payoffs_comprado + payoff_operacao.Payoffs_vendido breakeven = payoff_operacao.loc[payoff_operacao.Payoff_total == 0] #Figure fig = px.line( title="Payoff Opções", x=payoff_operacao.AtivoSubjacente, y=payoff_operacao.Payoff_total, labels={"x": "Preço do Ativo", "y": "Payoff"}, ) fig.add_annotation(x=ativo, y=payoff_operacao.loc[payoff_operacao.AtivoSubjacente == ativo, 'Payoff_total'].item(), text="Resultado atual", showarrow=True, arrowhead=2) fig.update_yaxes(zeroline=True, zerolinecolor="#FF0000", spikedash='dot') fig.update_layout(hovermode="x") return fig def variacao_indicadores(dados): ''' Retorna o dicionario contendo a variação dos principais indicadores para cada ativo na base de dados(arquivo referencia ibov-top40-volume.xlsx) ''' financeiros = pd.read_csv(dados,thousands=r',') variacoes = {} for i in financeiros['ATIVO'].unique(): data = financeiros[financeiros['ATIVO'] == i]['Data'].reset_index(drop=True) var_lpa = financeiros['LPA'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_pl = financeiros['P/L'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_pvpa = financeiros['P/VPA'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_psales = financeiros['P/SALES'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) ativo = [i]*len(var_pl) variacao = {'ATIVO':ativo, 'Data':data, 'Variacao LPA':var_lpa, 'Variacao P/L':var_pl, 'Variacao P/VPA':var_pvpa, 'Variacao P/SALES': var_psales} variacoes[i] = variacao return variacoes def retornos_volatilidade(input_ativo): dia_hoje = timing.strftime('%Y-%m-%d') setores = pd.read_excel('setores.xlsx').dropna(axis=1) historico = pd.DataFrame(mt5.copy_rates_from_pos(input_ativo,mt5.TIMEFRAME_D1,0,252)) historico.index = pd.to_datetime(historico['time'],unit='s') historico.sort_index(ascending=False,inplace=True) ativo_setor = setores['Subsetor Bovespa'][setores['Código'] == input_ativo].item() ativo_g = input_ativo.rstrip('123456789') ativos_similares = [i for i in setores['Código'][(setores['Subsetor Bovespa'] == ativo_setor) & (~setores['Código'].str.contains(ativo_g))]] max_52s = historico['close'].max() # 52 semanas retorno_52s = historico['close'][0]/historico['close'][-1] - 1 # retorno 52 semanas vol_52s = historico['close'].pct_change().std() * 252 ** (1/2) # volatilidade de 52 semanas retorno_30d = historico['close'].head(21)[0]/historico['close'].head(21)[-1] - 1 # retorno 1 mês retorno_1d = historico['close'][0]/historico['close'][1] - 1 # retorno dia ativo_e_similar[input_ativo+'_'+dia_hoje]= {'Código':input_ativo, 'Data':dia_hoje, 'Max 52s':max_52s, 'Retorno 52s':retorno_52s, 'Volatilidade 52s':vol_52s, 'Retorno 30d':retorno_30d, 'Retorno 1d':retorno_1d} for ativo in ativos_similares: name = ativo +'_'+ dia_hoje if name not in ativo_e_similar.keys(): historico = pd.DataFrame(mt5.copy_rates_from_pos(ativo,mt5.TIMEFRAME_D1,0,252)) historico.index = pd.to_datetime(historico['time'],unit='s') historico.sort_index(ascending=False,inplace=True) max_52s = historico['close'].max() # 52 semanas retorno_52s = historico['close'][0]/historico['close'][-1] - 1 # retorno 52 semanas vol_52s = historico['close'].pct_change().std() * 252 ** (1/2) # volatilidade de 52 semanas retorno_30d = historico['close'].head(21)[0]/historico['close'].head(21)[-1] - 1 # retorno 1 mês retorno_1d = historico['close'][0]/historico['close'][1] - 1 # retorno dia ativo_e_similar[name]= { 'Código':ativo, 'Max 52s':max_52s, 'Data': dia_hoje, 'Retorno 52s':retorno_52s, 'Volatilidade 52s':vol_52s, 'Retorno 30d':retorno_30d, 'Retorno 1d':retorno_1d} #novos_similares = [i+'_'+dia_hoje for i in ativos_similares] retornos = pd.DataFrame([ativo_e_similar.get(key) for key in ativo_e_similar.keys()]) ativo = retornos[retornos['Código']==input_ativo].to_dict('records') similares = retornos[retornos['Código'].isin(ativos_similares)].to_dict('records') return ativo, similares
input_data: Datas de vencimento das opções
random_line_split
functions.py
from sys import meta_path import pandas as pd import numpy as np from datetime import datetime import pytz #Modulo necessário para trabalhar com fuso horário import time import MetaTrader5 as mt5 import plotly.express as px from collections import OrderedDict from workadays import workdays as wd # default='warn' from numpy.core.fromnumeric import var from py_vollib.black.implied_volatility import implied_volatility from py_vollib.black.implied_volatility import implied_volatility_of_discounted_option_price as ivdp from py_vollib.black_scholes import black_scholes from py_vollib.black_scholes_merton import black_scholes_merton from py_vollib.black.greeks.analytical import * # Inicializando mt5 path = r'C:\Program Files\MetaTrader 5 Terminal\terminal64.exe' # path do terminal login = 66304787 password = 'MT5@#!4500' server = "XPMT5-DEMO" # timezone pytz.timezone('America/Sao_Paulo') # fuso horario como utc timing = datetime.now() # Inicializar a conexão if not mt5.initialize(path=path,login=login,server=server,password=password): print("initialize() failed, error code=",mt5.last_error()) mt5.shutdown() #ativos e similares ativo_e_similar = {} def vol_e_preco_max(ativo): """ Retorna preço e volatilidade realizada do ativo OBS: MUDAR O PATH """ rates_frames = pd.read_csv('Dados Históricos\{}_historico.csv'.format(ativo)) vol = rates_frames['retorno'].std() * 252 ** (1/2) # volatilidade realizada anualizada preco_max = rates_frames['close'].max() # maxima historica return vol, preco_max def call_negociadas(ativo, data_do_vencimento=[]): """ Retorna tickers de todas as calls negociadas com vencimento até 160 dias. Ativo: nome do ativo sem o número de on ou pn, exemplo: Se quiser calls de PETROBRAS, o input deve ser "PETR" """ ativo = ativo.rstrip('123456789') calls_codigos = "ABCDEFGHIJKLN" # Pegar o ticks de todas as opçoes negociadas. calls_names = [] nomes_calls = [] for codigo in calls_codigos: calls_name = "*{}".format(ativo)+"{}*".format(codigo) calls_names.append(calls_name) for calls in calls_names: data = mt5.symbols_get(calls) for s in data: expiration_date = datetime.fromtimestamp(s.expiration_time).strftime("%Y-%m-%d") # string_date = str(expiration_date) # now_to_expired_days = (expiration_date - TIME_NOW).days if string_date in data_do_vencimento: #print(s.name) nomes_calls.append(s.name) return nomes_calls def streaming(input_ativo,input_data=[]): """ Retorna os dados de opções do ativo em real(se o mercado estiver aberto) input_ativo: ativo desejado input_data: Datas de vencimento das opções """ vol, preco_max = vol_e_preco_max(ativo=input_ativo) dados = call_negociadas(ativo=input_ativo, data_do_vencimento=input_data) # time.sleep(30) # garantir que a conexão foi garantida antes de a chamar a função de streaming mt5.market_book_add(input_ativo) for ticker in dados: mt5.market_book_add(ticker) mt5.market_book_add('DI1@') while(True): df = pd.DataFrame() last_subjacente = float(mt5.symbol_info_tick(input_ativo).last) # ultimos preço do ativo subjacente for s in dados: # informações do ativo name = s # nome do ativo simbol = mt5.symbol_info(s) last = float(simbol.last) # ultimo preço da call strike = float(mt5.symbol_info(s).option_strike) # strike expiration_date = wd.networkdays(timing,(datetime.fromtimestamp(simbol.expiration_time)),country='BR',state='SP') / 365 volume = float(simbol.session_volume) n_negocios = float(simbol.session_deals) di = mt5.symbol_info_tick('DI1@').last / 100 # di # calculo preco_teorico = round(black_scholes_merton('c',last_subjacente,strike,expiration_date,di,vol,q=0),2) vol_implicita = implied_volatility(preco_teorico,last_subjacente,strike,expiration_date,di,'c') dl = delta('c',last_subjacente,strike,expiration_date,di,vol) distancia_preco_max = (preco_max / last_subjacente - 1) diferenças_volatilidades = ( vol_implicita - vol ) lista = {'ticker':name, 'ultimo':round(last,2), 'Ativo Subjacente':last_subjacente, 'Strike':strike, 'Volume':volume, 'N° de Negocios': n_negocios, 'Vencimento em(dias)':round((expiration_date*365)), 'Preco Teorico':preco_teorico, 'Implicita':vol_implicita, 'Distancia do Max Historico(%)': distancia_preco_max, 'Diferença entre Implicita e Realizada(%)': diferenças_volatilidades, 'Delta':round(dl,4)} df = df.append(lista,ignore_index=True) tabela = df time.sleep(0.5) return tabela.to_dict('records') def payoffs(preco, strike, preco_ativo, tipo='c'): """ Retorna os payoffs da call, dado o preço da call, strike, e último preço do ativo subjacente e se a opção foi comprada o vendida. """ ## Gera uma array com preços do ativo subjacente baseado no preço do momento do ativo. p_min, p_max = int(preco_ativo * 0.70), int(preco_ativo * 1.2) step = (p_max - p_min) * 100 ativo_subjacente = np.round(np.linspace(p_min,p_max,step), decimals=2) payoffs = [] for price in ativo_subjacente: if tipo == 'v': if price > strike: payoff = - (price - strike - preco) payoffs.append(round(payoff, 2)) else: payoff = preco payoffs.append(round(payoff, 2)) elif tipo == 'c': if price > strike: payoff = price - strike - preco payoffs.append(round(payoff, 2)) else: payoff = - preco payoffs.append(round(payoff, 2)) payoffs = pd.DataFrame({'AtivoSubjacente': ativo_subjacente, 'Payoffs': payoffs}) return payoffs def posicoes_montandas(input_ativo, input_data=[]): while True: df = stre
def option_figure(ativo_sub,call_1,call_2): "Retorna o grafico de payoff das opções selecionadas." preco_itm, strike_itm = float(mt5.symbol_info_tick(call_1).last), float(mt5.symbol_info(call_1).option_strike) preco_atm, strike_atm = float(mt5.symbol_info_tick(call_2).last), float(mt5.symbol_info(call_2).option_strike) ativo = float(mt5.symbol_info_tick(ativo_sub).last) payoff_comprada = payoffs(preco_itm,strike_itm,ativo,tipo='c') payoff_vendida = payoffs(preco_atm,strike_atm,ativo,tipo='v') payoff_operacao = pd.merge(payoff_comprada, payoff_vendida, on='AtivoSubjacente', suffixes=('_comprado', '_vendido')) payoff_operacao['Payoff_total'] = payoff_operacao.Payoffs_comprado + payoff_operacao.Payoffs_vendido breakeven = payoff_operacao.loc[payoff_operacao.Payoff_total == 0] #Figure fig = px.line( title="Payoff Opções", x=payoff_operacao.AtivoSubjacente, y=payoff_operacao.Payoff_total, labels={"x": "Preço do Ativo", "y": "Payoff"}, ) fig.add_annotation(x=ativo, y=payoff_operacao.loc[payoff_operacao.AtivoSubjacente == ativo, 'Payoff_total'].item(), text="Resultado atual", showarrow=True, arrowhead=2) fig.update_yaxes(zeroline=True, zerolinecolor="#FF0000", spikedash='dot') fig.update_layout(hovermode="x") return fig def variacao_indicadores(dados): ''' Retorna o dicionario contendo a variação dos principais indicadores para cada ativo na base de dados(arquivo referencia ibov-top40-volume.xlsx) ''' financeiros = pd.read_csv(dados,thousands=r',') variacoes = {} for i in financeiros['ATIVO'].unique(): data = financeiros[financeiros['ATIVO'] == i]['Data'].reset_index(drop=True) var_lpa = financeiros['LPA'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_pl = financeiros['P/L'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_pvpa = financeiros['P/VPA'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) var_psales = financeiros['P/SALES'][financeiros['ATIVO'] == i].pct_change().reset_index(drop=True) ativo = [i]*len(var_pl) variacao = {'ATIVO':ativo, 'Data':data, 'Variacao LPA':var_lpa, 'Variacao P/L':var_pl, 'Variacao P/VPA':var_pvpa, 'Variacao P/SALES': var_psales} variacoes[i] = variacao return variacoes def retornos_volatilidade(input_ativo): dia_hoje = timing.strftime('%Y-%m-%d') setores = pd.read_excel('setores.xlsx').dropna(axis=1) historico = pd.DataFrame(mt5.copy_rates_from_pos(input_ativo,mt5.TIMEFRAME_D1,0,252)) historico.index = pd.to_datetime(historico['time'],unit='s') historico.sort_index(ascending=False,inplace=True) ativo_setor = setores['Subsetor Bovespa'][setores['Código'] == input_ativo].item() ativo_g = input_ativo.rstrip('123456789') ativos_similares = [i for i in setores['Código'][(setores['Subsetor Bovespa'] == ativo_setor) & (~setores['Código'].str.contains(ativo_g))]] max_52s = historico['close'].max() # 52 semanas retorno_52s = historico['close'][0]/historico['close'][-1] - 1 # retorno 52 semanas vol_52s = historico['close'].pct_change().std() * 252 ** (1/2) # volatilidade de 52 semanas retorno_30d = historico['close'].head(21)[0]/historico['close'].head(21)[-1] - 1 # retorno 1 mês retorno_1d = historico['close'][0]/historico['close'][1] - 1 # retorno dia ativo_e_similar[input_ativo+'_'+dia_hoje]= {'Código':input_ativo, 'Data':dia_hoje, 'Max 52s':max_52s, 'Retorno 52s':retorno_52s, 'Volatilidade 52s':vol_52s, 'Retorno 30d':retorno_30d, 'Retorno 1d':retorno_1d} for ativo in ativos_similares: name = ativo +'_'+ dia_hoje if name not in ativo_e_similar.keys(): historico = pd.DataFrame(mt5.copy_rates_from_pos(ativo,mt5.TIMEFRAME_D1,0,252)) historico.index = pd.to_datetime(historico['time'],unit='s') historico.sort_index(ascending=False,inplace=True) max_52s = historico['close'].max() # 52 semanas retorno_52s = historico['close'][0]/historico['close'][-1] - 1 # retorno 52 semanas vol_52s = historico['close'].pct_change().std() * 252 ** (1/2) # volatilidade de 52 semanas retorno_30d = historico['close'].head(21)[0]/historico['close'].head(21)[-1] - 1 # retorno 1 mês retorno_1d = historico['close'][0]/historico['close'][1] - 1 # retorno dia ativo_e_similar[name]= { 'Código':ativo, 'Max 52s':max_52s, 'Data': dia_hoje, 'Retorno 52s':retorno_52s, 'Volatilidade 52s':vol_52s, 'Retorno 30d':retorno_30d, 'Retorno 1d':retorno_1d} #novos_similares = [i+'_'+dia_hoje for i in ativos_similares] retornos = pd.DataFrame([ativo_e_similar.get(key) for key in ativo_e_similar.keys()]) ativo = retornos[retornos['Código']==input_ativo].to_dict('records') similares = retornos[retornos['Código'].isin(ativos_similares)].to_dict('records') return ativo, similares
aming(input_ativo,input_data) df = pd.DataFrame(df) itm = df[(df['Delta'] > 0.85) & (df['Delta'] <= 1.)].copy() atm = df[(df['Delta'] > 0.50) & (df['Delta'] <= 0.75)].copy() dataframe = list() ativo = itm['Ativo Subjacente'].iloc[0] for i in itm.index: if itm['Preco Teorico'].loc[i] != 0: preco_comprado = itm['Preco Teorico'].loc[i].item() strike_comprado = itm['Strike'].loc[i].item() vencimento = itm['Vencimento em(dias)'].loc[i].item() tickers_c = itm['ticker'].loc[i] delta_comprado = itm['Delta'].loc[i].item() vol_implicita_c = itm['Implicita'].loc[i].item() implicita_historica_c = itm['Diferença entre Implicita e Realizada(%)'].loc[i].item() for j in atm.index: if atm['Preco Teorico'].loc[j].item() != 0 and atm['Vencimento em(dias)'].loc[j].item() == vencimento: preco_vendido = atm['Preco Teorico'].loc[j] strike_vendido = atm['Strike'].loc[j] try: tickers_v = atm['ticker'].loc[j] except: tickers_v = np.nan delta_vendido = atm['Delta'].loc[j].item() vol_implicita_v = atm['Implicita'].loc[j].item() implicita_historica_v = atm['Diferença entre Implicita e Realizada(%)'].loc[j].item() payoff_comprada = payoffs(preco_comprado,strike_comprado,ativo,tipo='c') payoff_vendida = payoffs(preco_vendido,strike_vendido,ativo,tipo='v') payoff_operacao = pd.merge(payoff_comprada, payoff_vendida, on='AtivoSubjacente', suffixes=('_comprado', '_vendido')) payoff_operacao['Payoff_total'] = payoff_operacao.Payoffs_comprado + payoff_operacao.Payoffs_vendido try: breakeven = payoff_operacao[payoff_operacao.Payoff_total == 0] preco_breakeven = breakeven.AtivoSubjacente.item() precos_diferenca = preco_comprado - preco_vendido strike_diferenca = strike_comprado - strike_vendido queda_para_prejuizo = (breakeven.AtivoSubjacente.item()/ativo - 1) ganho_maximo = ((preco_comprado - preco_vendido + payoff_operacao.Payoff_total.iloc[-1]) / (preco_comprado - preco_vendido) - 1) except ValueError: breakeven = np.nan preco_breakeven = np.nan ganho_maximo = np.nan strike_diferenca = np.nan queda_para_prejuizo = np.nan pass dados = OrderedDict([('Call 1',tickers_c), ('Call 2',tickers_v), ('Vencimento',vencimento), ('Preço Teórico(Call 1)',preco_comprado), ('Preço Teórico(Call 2)',preco_vendido), ('Diferença entre Preços',precos_diferenca), ('Strike Call 1',strike_comprado), ('Strike Call 2',strike_vendido), ('Diferença entre strikes',strike_diferenca), ('Ganho Máximo',ganho_maximo), ('Queda para Prejuízo',queda_para_prejuizo), ('Preco Breakeven',preco_breakeven), ('Delta Call 1',delta_comprado), ('Delta Call 2',delta_vendido), ('Vol Implicita(Call 1)',vol_implicita_c), ('Vol Implicita(Call 2)',vol_implicita_v), ('Implicita vs Histórica(Call 1)',implicita_historica_c), ('Implicita vs Histórica(Call 2)',implicita_historica_v), ]) dataframe.append(dados) df = pd.DataFrame(dataframe) return df.to_dict('records')
identifier_body
proc_fork.rs
use super::*; use crate::{ capture_snapshot, os::task::OwnedTaskStatus, runtime::task_manager::{TaskWasm, TaskWasmRunProperties}, syscalls::*, WasiThreadHandle, }; use serde::{Deserialize, Serialize}; use wasmer::Memory; #[derive(Serialize, Deserialize)] pub(crate) struct ForkResult { pub pid: Pid, pub ret: Errno, } /// ### `proc_fork()` /// Forks the current process into a new subprocess. If the function /// returns a zero then its the new subprocess. If it returns a positive /// number then its the current process and the $pid represents the child. #[instrument(level = "debug", skip_all, fields(pid = ctx.data().process.pid().raw()), ret, err)] pub fn
<M: MemorySize>( mut ctx: FunctionEnvMut<'_, WasiEnv>, mut copy_memory: Bool, pid_ptr: WasmPtr<Pid, M>, ) -> Result<Errno, WasiError> { wasi_try_ok!(WasiEnv::process_signals_and_exit(&mut ctx)?); // If we were just restored then we need to return the value instead if let Some(result) = unsafe { handle_rewind::<M, ForkResult>(&mut ctx) } { if result.pid == 0 { trace!("handle_rewind - i am child (ret={})", result.ret); } else { trace!( "handle_rewind - i am parent (child={}, ret={})", result.pid, result.ret ); } let memory = unsafe { ctx.data().memory_view(&ctx) }; wasi_try_mem_ok!(pid_ptr.write(&memory, result.pid)); return Ok(result.ret); } trace!(%copy_memory, "capturing"); // Fork the environment which will copy all the open file handlers // and associate a new context but otherwise shares things like the // file system interface. The handle to the forked process is stored // in the parent process context let (mut child_env, mut child_handle) = match ctx.data().fork() { Ok(p) => p, Err(err) => { debug!("could not fork process: {err}"); // TODO: evaluate the appropriate error code, document it in the spec. return Ok(Errno::Perm); } }; let child_pid = child_env.process.pid(); let child_finished = child_env.process.finished.clone(); // We write a zero to the PID before we capture the stack // so that this is what will be returned to the child { let mut inner = ctx.data().process.inner.write().unwrap(); inner.children.push(child_env.process.clone()); } let env = ctx.data(); let memory = unsafe { env.memory_view(&ctx) }; // Setup some properties in the child environment wasi_try_mem_ok!(pid_ptr.write(&memory, 0)); let pid = child_env.pid(); let tid = child_env.tid(); // Pass some offsets to the unwind function let pid_offset = pid_ptr.offset(); // If we are not copying the memory then we act like a `vfork` // instead which will pretend to be the new process for a period // of time until `proc_exec` is called at which point the fork // actually occurs if copy_memory == Bool::False { // Perform the unwind action return unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| { // Grab all the globals and serialize them let store_data = crate::utils::store::capture_snapshot(&mut ctx.as_store_mut()) .serialize() .unwrap(); let store_data = Bytes::from(store_data); // We first fork the environment and replace the current environment // so that the process can continue to prepare for the real fork as // if it had actually forked child_env.swap_inner(ctx.data_mut()); std::mem::swap(ctx.data_mut(), &mut child_env); ctx.data_mut().vfork.replace(WasiVFork { rewind_stack: rewind_stack.clone(), memory_stack: memory_stack.clone(), store_data: store_data.clone(), env: Box::new(child_env), handle: child_handle, }); // Carry on as if the fork had taken place (which basically means // it prevents to be the new process with the old one suspended) // Rewind the stack and carry on match rewind::<M, _>( ctx, memory_stack.freeze(), rewind_stack.freeze(), store_data, ForkResult { pid: 0, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!("failed - could not rewind the stack - errno={}", err); OnCalledAction::Trap(Box::new(WasiError::Exit(err.into()))) } } }); } // Create the thread that will back this forked process let state = env.state.clone(); let bin_factory = env.bin_factory.clone(); // Perform the unwind action let snapshot = capture_snapshot(&mut ctx.as_store_mut()); unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| { let tasks = ctx.data().tasks().clone(); let span = debug_span!( "unwind", memory_stack_len = memory_stack.len(), rewind_stack_len = rewind_stack.len() ); let _span_guard = span.enter(); let memory_stack = memory_stack.freeze(); let rewind_stack = rewind_stack.freeze(); // Grab all the globals and serialize them let store_data = snapshot.serialize().unwrap(); let store_data = Bytes::from(store_data); // Now we use the environment and memory references let runtime = child_env.runtime.clone(); let tasks = child_env.tasks().clone(); let child_memory_stack = memory_stack.clone(); let child_rewind_stack = rewind_stack.clone(); let module = unsafe { ctx.data().inner() }.module_clone(); let memory = unsafe { ctx.data().inner() }.memory_clone(); let spawn_type = SpawnMemoryType::CopyMemory(memory, ctx.as_store_ref()); // Spawn a new process with this current execution environment let signaler = Box::new(child_env.process.clone()); { let runtime = runtime.clone(); let tasks = tasks.clone(); let tasks_outer = tasks.clone(); let store_data = store_data.clone(); let run = move |mut props: TaskWasmRunProperties| { let ctx = props.ctx; let mut store = props.store; // Rewind the stack and carry on { trace!("rewinding child"); let mut ctx = ctx.env.clone().into_mut(&mut store); let (data, mut store) = ctx.data_and_store_mut(); match rewind::<M, _>( ctx, child_memory_stack, child_rewind_stack, store_data.clone(), ForkResult { pid: 0, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!( "wasm rewind failed - could not rewind the stack - errno={}", err ); return; } }; } // Invoke the start function run::<M>(ctx, store, child_handle, None); }; tasks_outer .task_wasm( TaskWasm::new(Box::new(run), child_env, module, false) .with_snapshot(&snapshot) .with_memory(spawn_type), ) .map_err(|err| { warn!( "failed to fork as the process could not be spawned - {}", err ); err }) .ok(); }; // Rewind the stack and carry on match rewind::<M, _>( ctx, memory_stack, rewind_stack, store_data, ForkResult { pid: child_pid.raw() as Pid, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!("failed - could not rewind the stack - errno={}", err); OnCalledAction::Trap(Box::new(WasiError::Exit(err.into()))) } } }) } fn run<M: MemorySize>( ctx: WasiFunctionEnv, mut store: Store, child_handle: WasiThreadHandle, rewind_state: Option<(RewindState, Bytes)>, ) -> ExitCode { let env = ctx.data(&store); let tasks = env.tasks().clone(); let pid = env.pid(); let tid = env.tid(); // If we need to rewind then do so if let Some((rewind_state, rewind_result)) = rewind_state { let res = rewind_ext::<M>( ctx.env.clone().into_mut(&mut store), rewind_state.memory_stack, rewind_state.rewind_stack, rewind_state.store_data, rewind_result, ); if res != Errno::Success { return res.into(); } } let mut ret: ExitCode = Errno::Success.into(); let err = if ctx.data(&store).thread.is_main() { trace!(%pid, %tid, "re-invoking main"); let start = unsafe { ctx.data(&store).inner() }.start.clone().unwrap(); start.call(&mut store) } else { trace!(%pid, %tid, "re-invoking thread_spawn"); let start = unsafe { ctx.data(&store).inner() } .thread_spawn .clone() .unwrap(); start.call(&mut store, 0, 0) }; if let Err(err) = err { match err.downcast::<WasiError>() { Ok(WasiError::Exit(exit_code)) => { ret = exit_code; } Ok(WasiError::DeepSleep(deep)) => { trace!(%pid, %tid, "entered a deep sleep"); // Create the respawn function let respawn = { let tasks = tasks.clone(); let rewind_state = deep.rewind; move |ctx, store, rewind_result| { run::<M>( ctx, store, child_handle, Some((rewind_state, rewind_result)), ); } }; /// Spawns the WASM process after a trigger unsafe { tasks.resume_wasm_after_poller(Box::new(respawn), ctx, store, deep.trigger) }; return Errno::Success.into(); } _ => {} } } trace!(%pid, %tid, "child exited (code = {})", ret); // Clean up the environment and return the result ctx.cleanup((&mut store), Some(ret)); // We drop the handle at the last moment which will close the thread drop(child_handle); ret }
proc_fork
identifier_name
proc_fork.rs
use super::*; use crate::{ capture_snapshot, os::task::OwnedTaskStatus, runtime::task_manager::{TaskWasm, TaskWasmRunProperties}, syscalls::*, WasiThreadHandle, }; use serde::{Deserialize, Serialize}; use wasmer::Memory; #[derive(Serialize, Deserialize)] pub(crate) struct ForkResult { pub pid: Pid, pub ret: Errno, } /// ### `proc_fork()` /// Forks the current process into a new subprocess. If the function /// returns a zero then its the new subprocess. If it returns a positive /// number then its the current process and the $pid represents the child. #[instrument(level = "debug", skip_all, fields(pid = ctx.data().process.pid().raw()), ret, err)] pub fn proc_fork<M: MemorySize>( mut ctx: FunctionEnvMut<'_, WasiEnv>, mut copy_memory: Bool, pid_ptr: WasmPtr<Pid, M>, ) -> Result<Errno, WasiError> { wasi_try_ok!(WasiEnv::process_signals_and_exit(&mut ctx)?); // If we were just restored then we need to return the value instead if let Some(result) = unsafe { handle_rewind::<M, ForkResult>(&mut ctx) } { if result.pid == 0 { trace!("handle_rewind - i am child (ret={})", result.ret); } else { trace!( "handle_rewind - i am parent (child={}, ret={})", result.pid, result.ret ); } let memory = unsafe { ctx.data().memory_view(&ctx) }; wasi_try_mem_ok!(pid_ptr.write(&memory, result.pid)); return Ok(result.ret); } trace!(%copy_memory, "capturing"); // Fork the environment which will copy all the open file handlers // and associate a new context but otherwise shares things like the // file system interface. The handle to the forked process is stored // in the parent process context let (mut child_env, mut child_handle) = match ctx.data().fork() { Ok(p) => p, Err(err) => { debug!("could not fork process: {err}"); // TODO: evaluate the appropriate error code, document it in the spec. return Ok(Errno::Perm); } }; let child_pid = child_env.process.pid(); let child_finished = child_env.process.finished.clone(); // We write a zero to the PID before we capture the stack // so that this is what will be returned to the child { let mut inner = ctx.data().process.inner.write().unwrap(); inner.children.push(child_env.process.clone()); } let env = ctx.data(); let memory = unsafe { env.memory_view(&ctx) }; // Setup some properties in the child environment wasi_try_mem_ok!(pid_ptr.write(&memory, 0)); let pid = child_env.pid(); let tid = child_env.tid(); // Pass some offsets to the unwind function let pid_offset = pid_ptr.offset(); // If we are not copying the memory then we act like a `vfork` // instead which will pretend to be the new process for a period // of time until `proc_exec` is called at which point the fork // actually occurs if copy_memory == Bool::False { // Perform the unwind action return unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| { // Grab all the globals and serialize them let store_data = crate::utils::store::capture_snapshot(&mut ctx.as_store_mut()) .serialize() .unwrap(); let store_data = Bytes::from(store_data); // We first fork the environment and replace the current environment // so that the process can continue to prepare for the real fork as // if it had actually forked child_env.swap_inner(ctx.data_mut()); std::mem::swap(ctx.data_mut(), &mut child_env); ctx.data_mut().vfork.replace(WasiVFork { rewind_stack: rewind_stack.clone(), memory_stack: memory_stack.clone(), store_data: store_data.clone(), env: Box::new(child_env), handle: child_handle, }); // Carry on as if the fork had taken place (which basically means // it prevents to be the new process with the old one suspended) // Rewind the stack and carry on match rewind::<M, _>( ctx, memory_stack.freeze(), rewind_stack.freeze(), store_data, ForkResult { pid: 0, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!("failed - could not rewind the stack - errno={}", err); OnCalledAction::Trap(Box::new(WasiError::Exit(err.into()))) } } }); } // Create the thread that will back this forked process let state = env.state.clone(); let bin_factory = env.bin_factory.clone(); // Perform the unwind action let snapshot = capture_snapshot(&mut ctx.as_store_mut()); unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| { let tasks = ctx.data().tasks().clone(); let span = debug_span!( "unwind", memory_stack_len = memory_stack.len(), rewind_stack_len = rewind_stack.len() ); let _span_guard = span.enter(); let memory_stack = memory_stack.freeze(); let rewind_stack = rewind_stack.freeze(); // Grab all the globals and serialize them let store_data = snapshot.serialize().unwrap(); let store_data = Bytes::from(store_data); // Now we use the environment and memory references let runtime = child_env.runtime.clone(); let tasks = child_env.tasks().clone(); let child_memory_stack = memory_stack.clone(); let child_rewind_stack = rewind_stack.clone(); let module = unsafe { ctx.data().inner() }.module_clone(); let memory = unsafe { ctx.data().inner() }.memory_clone(); let spawn_type = SpawnMemoryType::CopyMemory(memory, ctx.as_store_ref()); // Spawn a new process with this current execution environment let signaler = Box::new(child_env.process.clone()); { let runtime = runtime.clone(); let tasks = tasks.clone(); let tasks_outer = tasks.clone(); let store_data = store_data.clone(); let run = move |mut props: TaskWasmRunProperties| { let ctx = props.ctx; let mut store = props.store; // Rewind the stack and carry on { trace!("rewinding child"); let mut ctx = ctx.env.clone().into_mut(&mut store); let (data, mut store) = ctx.data_and_store_mut(); match rewind::<M, _>( ctx, child_memory_stack, child_rewind_stack, store_data.clone(), ForkResult { pid: 0, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!( "wasm rewind failed - could not rewind the stack - errno={}", err ); return; } }; } // Invoke the start function run::<M>(ctx, store, child_handle, None); }; tasks_outer .task_wasm( TaskWasm::new(Box::new(run), child_env, module, false) .with_snapshot(&snapshot) .with_memory(spawn_type), ) .map_err(|err| { warn!( "failed to fork as the process could not be spawned - {}", err ); err }) .ok(); }; // Rewind the stack and carry on match rewind::<M, _>( ctx, memory_stack, rewind_stack, store_data, ForkResult { pid: child_pid.raw() as Pid, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!("failed - could not rewind the stack - errno={}", err); OnCalledAction::Trap(Box::new(WasiError::Exit(err.into()))) } } }) } fn run<M: MemorySize>( ctx: WasiFunctionEnv, mut store: Store, child_handle: WasiThreadHandle, rewind_state: Option<(RewindState, Bytes)>, ) -> ExitCode { let env = ctx.data(&store); let tasks = env.tasks().clone(); let pid = env.pid(); let tid = env.tid(); // If we need to rewind then do so if let Some((rewind_state, rewind_result)) = rewind_state { let res = rewind_ext::<M>( ctx.env.clone().into_mut(&mut store), rewind_state.memory_stack, rewind_state.rewind_stack, rewind_state.store_data, rewind_result, ); if res != Errno::Success { return res.into(); } } let mut ret: ExitCode = Errno::Success.into(); let err = if ctx.data(&store).thread.is_main() { trace!(%pid, %tid, "re-invoking main"); let start = unsafe { ctx.data(&store).inner() }.start.clone().unwrap(); start.call(&mut store) } else { trace!(%pid, %tid, "re-invoking thread_spawn"); let start = unsafe { ctx.data(&store).inner() } .thread_spawn .clone() .unwrap(); start.call(&mut store, 0, 0) }; if let Err(err) = err { match err.downcast::<WasiError>() { Ok(WasiError::Exit(exit_code)) => { ret = exit_code; } Ok(WasiError::DeepSleep(deep)) => { trace!(%pid, %tid, "entered a deep sleep"); // Create the respawn function let respawn = { let tasks = tasks.clone(); let rewind_state = deep.rewind; move |ctx, store, rewind_result| { run::<M>( ctx, store, child_handle, Some((rewind_state, rewind_result)), ); } }; /// Spawns the WASM process after a trigger unsafe { tasks.resume_wasm_after_poller(Box::new(respawn), ctx, store, deep.trigger) }; return Errno::Success.into(); } _ =>
} } trace!(%pid, %tid, "child exited (code = {})", ret); // Clean up the environment and return the result ctx.cleanup((&mut store), Some(ret)); // We drop the handle at the last moment which will close the thread drop(child_handle); ret }
{}
conditional_block
proc_fork.rs
use super::*; use crate::{ capture_snapshot, os::task::OwnedTaskStatus, runtime::task_manager::{TaskWasm, TaskWasmRunProperties}, syscalls::*, WasiThreadHandle, }; use serde::{Deserialize, Serialize}; use wasmer::Memory; #[derive(Serialize, Deserialize)] pub(crate) struct ForkResult { pub pid: Pid, pub ret: Errno, } /// ### `proc_fork()` /// Forks the current process into a new subprocess. If the function /// returns a zero then its the new subprocess. If it returns a positive /// number then its the current process and the $pid represents the child. #[instrument(level = "debug", skip_all, fields(pid = ctx.data().process.pid().raw()), ret, err)] pub fn proc_fork<M: MemorySize>( mut ctx: FunctionEnvMut<'_, WasiEnv>, mut copy_memory: Bool, pid_ptr: WasmPtr<Pid, M>, ) -> Result<Errno, WasiError> { wasi_try_ok!(WasiEnv::process_signals_and_exit(&mut ctx)?); // If we were just restored then we need to return the value instead if let Some(result) = unsafe { handle_rewind::<M, ForkResult>(&mut ctx) } { if result.pid == 0 { trace!("handle_rewind - i am child (ret={})", result.ret); } else { trace!( "handle_rewind - i am parent (child={}, ret={})", result.pid, result.ret ); } let memory = unsafe { ctx.data().memory_view(&ctx) }; wasi_try_mem_ok!(pid_ptr.write(&memory, result.pid)); return Ok(result.ret); } trace!(%copy_memory, "capturing"); // Fork the environment which will copy all the open file handlers // and associate a new context but otherwise shares things like the // file system interface. The handle to the forked process is stored // in the parent process context let (mut child_env, mut child_handle) = match ctx.data().fork() { Ok(p) => p, Err(err) => { debug!("could not fork process: {err}"); // TODO: evaluate the appropriate error code, document it in the spec. return Ok(Errno::Perm); } }; let child_pid = child_env.process.pid(); let child_finished = child_env.process.finished.clone(); // We write a zero to the PID before we capture the stack // so that this is what will be returned to the child { let mut inner = ctx.data().process.inner.write().unwrap(); inner.children.push(child_env.process.clone()); } let env = ctx.data(); let memory = unsafe { env.memory_view(&ctx) }; // Setup some properties in the child environment wasi_try_mem_ok!(pid_ptr.write(&memory, 0)); let pid = child_env.pid(); let tid = child_env.tid(); // Pass some offsets to the unwind function let pid_offset = pid_ptr.offset(); // If we are not copying the memory then we act like a `vfork` // instead which will pretend to be the new process for a period // of time until `proc_exec` is called at which point the fork // actually occurs if copy_memory == Bool::False { // Perform the unwind action return unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| { // Grab all the globals and serialize them let store_data = crate::utils::store::capture_snapshot(&mut ctx.as_store_mut()) .serialize() .unwrap(); let store_data = Bytes::from(store_data); // We first fork the environment and replace the current environment // so that the process can continue to prepare for the real fork as // if it had actually forked child_env.swap_inner(ctx.data_mut()); std::mem::swap(ctx.data_mut(), &mut child_env); ctx.data_mut().vfork.replace(WasiVFork { rewind_stack: rewind_stack.clone(), memory_stack: memory_stack.clone(), store_data: store_data.clone(), env: Box::new(child_env), handle: child_handle, }); // Carry on as if the fork had taken place (which basically means // it prevents to be the new process with the old one suspended) // Rewind the stack and carry on match rewind::<M, _>( ctx, memory_stack.freeze(), rewind_stack.freeze(), store_data, ForkResult { pid: 0, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!("failed - could not rewind the stack - errno={}", err); OnCalledAction::Trap(Box::new(WasiError::Exit(err.into()))) } } }); } // Create the thread that will back this forked process let state = env.state.clone(); let bin_factory = env.bin_factory.clone(); // Perform the unwind action let snapshot = capture_snapshot(&mut ctx.as_store_mut()); unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| { let tasks = ctx.data().tasks().clone(); let span = debug_span!( "unwind", memory_stack_len = memory_stack.len(), rewind_stack_len = rewind_stack.len() ); let _span_guard = span.enter(); let memory_stack = memory_stack.freeze(); let rewind_stack = rewind_stack.freeze(); // Grab all the globals and serialize them let store_data = snapshot.serialize().unwrap(); let store_data = Bytes::from(store_data); // Now we use the environment and memory references let runtime = child_env.runtime.clone(); let tasks = child_env.tasks().clone(); let child_memory_stack = memory_stack.clone(); let child_rewind_stack = rewind_stack.clone(); let module = unsafe { ctx.data().inner() }.module_clone(); let memory = unsafe { ctx.data().inner() }.memory_clone(); let spawn_type = SpawnMemoryType::CopyMemory(memory, ctx.as_store_ref()); // Spawn a new process with this current execution environment let signaler = Box::new(child_env.process.clone()); { let runtime = runtime.clone(); let tasks = tasks.clone(); let tasks_outer = tasks.clone(); let store_data = store_data.clone(); let run = move |mut props: TaskWasmRunProperties| { let ctx = props.ctx; let mut store = props.store; // Rewind the stack and carry on { trace!("rewinding child"); let mut ctx = ctx.env.clone().into_mut(&mut store); let (data, mut store) = ctx.data_and_store_mut(); match rewind::<M, _>( ctx, child_memory_stack, child_rewind_stack, store_data.clone(), ForkResult { pid: 0, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!( "wasm rewind failed - could not rewind the stack - errno={}", err ); return; } }; } // Invoke the start function run::<M>(ctx, store, child_handle, None); }; tasks_outer .task_wasm( TaskWasm::new(Box::new(run), child_env, module, false) .with_snapshot(&snapshot) .with_memory(spawn_type), ) .map_err(|err| { warn!( "failed to fork as the process could not be spawned - {}", err ); err }) .ok(); }; // Rewind the stack and carry on match rewind::<M, _>( ctx, memory_stack, rewind_stack, store_data, ForkResult { pid: child_pid.raw() as Pid, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!("failed - could not rewind the stack - errno={}", err); OnCalledAction::Trap(Box::new(WasiError::Exit(err.into()))) } } }) } fn run<M: MemorySize>( ctx: WasiFunctionEnv, mut store: Store, child_handle: WasiThreadHandle, rewind_state: Option<(RewindState, Bytes)>, ) -> ExitCode
{ let env = ctx.data(&store); let tasks = env.tasks().clone(); let pid = env.pid(); let tid = env.tid(); // If we need to rewind then do so if let Some((rewind_state, rewind_result)) = rewind_state { let res = rewind_ext::<M>( ctx.env.clone().into_mut(&mut store), rewind_state.memory_stack, rewind_state.rewind_stack, rewind_state.store_data, rewind_result, ); if res != Errno::Success { return res.into(); } } let mut ret: ExitCode = Errno::Success.into(); let err = if ctx.data(&store).thread.is_main() { trace!(%pid, %tid, "re-invoking main"); let start = unsafe { ctx.data(&store).inner() }.start.clone().unwrap(); start.call(&mut store) } else { trace!(%pid, %tid, "re-invoking thread_spawn"); let start = unsafe { ctx.data(&store).inner() } .thread_spawn .clone() .unwrap(); start.call(&mut store, 0, 0) }; if let Err(err) = err { match err.downcast::<WasiError>() { Ok(WasiError::Exit(exit_code)) => { ret = exit_code; } Ok(WasiError::DeepSleep(deep)) => { trace!(%pid, %tid, "entered a deep sleep"); // Create the respawn function let respawn = { let tasks = tasks.clone(); let rewind_state = deep.rewind; move |ctx, store, rewind_result| { run::<M>( ctx, store, child_handle, Some((rewind_state, rewind_result)), ); } }; /// Spawns the WASM process after a trigger unsafe { tasks.resume_wasm_after_poller(Box::new(respawn), ctx, store, deep.trigger) }; return Errno::Success.into(); } _ => {} } } trace!(%pid, %tid, "child exited (code = {})", ret); // Clean up the environment and return the result ctx.cleanup((&mut store), Some(ret)); // We drop the handle at the last moment which will close the thread drop(child_handle); ret }
identifier_body
proc_fork.rs
use super::*; use crate::{ capture_snapshot, os::task::OwnedTaskStatus, runtime::task_manager::{TaskWasm, TaskWasmRunProperties}, syscalls::*, WasiThreadHandle, }; use serde::{Deserialize, Serialize}; use wasmer::Memory; #[derive(Serialize, Deserialize)] pub(crate) struct ForkResult { pub pid: Pid, pub ret: Errno, } /// ### `proc_fork()` /// Forks the current process into a new subprocess. If the function /// returns a zero then its the new subprocess. If it returns a positive /// number then its the current process and the $pid represents the child. #[instrument(level = "debug", skip_all, fields(pid = ctx.data().process.pid().raw()), ret, err)] pub fn proc_fork<M: MemorySize>( mut ctx: FunctionEnvMut<'_, WasiEnv>, mut copy_memory: Bool, pid_ptr: WasmPtr<Pid, M>, ) -> Result<Errno, WasiError> { wasi_try_ok!(WasiEnv::process_signals_and_exit(&mut ctx)?); // If we were just restored then we need to return the value instead if let Some(result) = unsafe { handle_rewind::<M, ForkResult>(&mut ctx) } { if result.pid == 0 { trace!("handle_rewind - i am child (ret={})", result.ret); } else { trace!( "handle_rewind - i am parent (child={}, ret={})", result.pid, result.ret ); } let memory = unsafe { ctx.data().memory_view(&ctx) }; wasi_try_mem_ok!(pid_ptr.write(&memory, result.pid)); return Ok(result.ret); } trace!(%copy_memory, "capturing"); // Fork the environment which will copy all the open file handlers // and associate a new context but otherwise shares things like the // file system interface. The handle to the forked process is stored // in the parent process context let (mut child_env, mut child_handle) = match ctx.data().fork() { Ok(p) => p, Err(err) => { debug!("could not fork process: {err}"); // TODO: evaluate the appropriate error code, document it in the spec. return Ok(Errno::Perm); } }; let child_pid = child_env.process.pid(); let child_finished = child_env.process.finished.clone(); // We write a zero to the PID before we capture the stack // so that this is what will be returned to the child { let mut inner = ctx.data().process.inner.write().unwrap(); inner.children.push(child_env.process.clone()); } let env = ctx.data(); let memory = unsafe { env.memory_view(&ctx) }; // Setup some properties in the child environment wasi_try_mem_ok!(pid_ptr.write(&memory, 0)); let pid = child_env.pid(); let tid = child_env.tid(); // Pass some offsets to the unwind function let pid_offset = pid_ptr.offset(); // If we are not copying the memory then we act like a `vfork` // instead which will pretend to be the new process for a period // of time until `proc_exec` is called at which point the fork // actually occurs if copy_memory == Bool::False { // Perform the unwind action return unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| { // Grab all the globals and serialize them let store_data = crate::utils::store::capture_snapshot(&mut ctx.as_store_mut()) .serialize() .unwrap(); let store_data = Bytes::from(store_data); // We first fork the environment and replace the current environment // so that the process can continue to prepare for the real fork as // if it had actually forked child_env.swap_inner(ctx.data_mut()); std::mem::swap(ctx.data_mut(), &mut child_env); ctx.data_mut().vfork.replace(WasiVFork { rewind_stack: rewind_stack.clone(), memory_stack: memory_stack.clone(), store_data: store_data.clone(), env: Box::new(child_env), handle: child_handle, }); // Carry on as if the fork had taken place (which basically means // it prevents to be the new process with the old one suspended) // Rewind the stack and carry on match rewind::<M, _>( ctx, memory_stack.freeze(), rewind_stack.freeze(), store_data, ForkResult { pid: 0, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!("failed - could not rewind the stack - errno={}", err); OnCalledAction::Trap(Box::new(WasiError::Exit(err.into()))) } } }); } // Create the thread that will back this forked process let state = env.state.clone(); let bin_factory = env.bin_factory.clone(); // Perform the unwind action let snapshot = capture_snapshot(&mut ctx.as_store_mut()); unwind::<M, _>(ctx, move |mut ctx, mut memory_stack, rewind_stack| { let tasks = ctx.data().tasks().clone(); let span = debug_span!( "unwind", memory_stack_len = memory_stack.len(), rewind_stack_len = rewind_stack.len() ); let _span_guard = span.enter(); let memory_stack = memory_stack.freeze(); let rewind_stack = rewind_stack.freeze(); // Grab all the globals and serialize them let store_data = snapshot.serialize().unwrap(); let store_data = Bytes::from(store_data); // Now we use the environment and memory references let runtime = child_env.runtime.clone(); let tasks = child_env.tasks().clone(); let child_memory_stack = memory_stack.clone(); let child_rewind_stack = rewind_stack.clone(); let module = unsafe { ctx.data().inner() }.module_clone(); let memory = unsafe { ctx.data().inner() }.memory_clone(); let spawn_type = SpawnMemoryType::CopyMemory(memory, ctx.as_store_ref()); // Spawn a new process with this current execution environment let signaler = Box::new(child_env.process.clone()); { let runtime = runtime.clone(); let tasks = tasks.clone(); let tasks_outer = tasks.clone(); let store_data = store_data.clone(); let run = move |mut props: TaskWasmRunProperties| { let ctx = props.ctx; let mut store = props.store; // Rewind the stack and carry on { trace!("rewinding child"); let mut ctx = ctx.env.clone().into_mut(&mut store); let (data, mut store) = ctx.data_and_store_mut(); match rewind::<M, _>( ctx, child_memory_stack, child_rewind_stack, store_data.clone(), ForkResult { pid: 0, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!( "wasm rewind failed - could not rewind the stack - errno={}", err ); return; } }; } // Invoke the start function run::<M>(ctx, store, child_handle, None); }; tasks_outer .task_wasm( TaskWasm::new(Box::new(run), child_env, module, false) .with_snapshot(&snapshot) .with_memory(spawn_type), ) .map_err(|err| { warn!( "failed to fork as the process could not be spawned - {}", err ); err }) .ok(); }; // Rewind the stack and carry on match rewind::<M, _>( ctx, memory_stack, rewind_stack, store_data, ForkResult { pid: child_pid.raw() as Pid, ret: Errno::Success, }, ) { Errno::Success => OnCalledAction::InvokeAgain, err => { warn!("failed - could not rewind the stack - errno={}", err); OnCalledAction::Trap(Box::new(WasiError::Exit(err.into()))) } } }) } fn run<M: MemorySize>( ctx: WasiFunctionEnv, mut store: Store, child_handle: WasiThreadHandle, rewind_state: Option<(RewindState, Bytes)>, ) -> ExitCode { let env = ctx.data(&store); let tasks = env.tasks().clone(); let pid = env.pid(); let tid = env.tid(); // If we need to rewind then do so if let Some((rewind_state, rewind_result)) = rewind_state { let res = rewind_ext::<M>( ctx.env.clone().into_mut(&mut store), rewind_state.memory_stack, rewind_state.rewind_stack, rewind_state.store_data, rewind_result, ); if res != Errno::Success { return res.into(); } } let mut ret: ExitCode = Errno::Success.into(); let err = if ctx.data(&store).thread.is_main() { trace!(%pid, %tid, "re-invoking main"); let start = unsafe { ctx.data(&store).inner() }.start.clone().unwrap(); start.call(&mut store) } else { trace!(%pid, %tid, "re-invoking thread_spawn"); let start = unsafe { ctx.data(&store).inner() } .thread_spawn .clone() .unwrap(); start.call(&mut store, 0, 0) }; if let Err(err) = err { match err.downcast::<WasiError>() { Ok(WasiError::Exit(exit_code)) => { ret = exit_code; } Ok(WasiError::DeepSleep(deep)) => { trace!(%pid, %tid, "entered a deep sleep");
let rewind_state = deep.rewind; move |ctx, store, rewind_result| { run::<M>( ctx, store, child_handle, Some((rewind_state, rewind_result)), ); } }; /// Spawns the WASM process after a trigger unsafe { tasks.resume_wasm_after_poller(Box::new(respawn), ctx, store, deep.trigger) }; return Errno::Success.into(); } _ => {} } } trace!(%pid, %tid, "child exited (code = {})", ret); // Clean up the environment and return the result ctx.cleanup((&mut store), Some(ret)); // We drop the handle at the last moment which will close the thread drop(child_handle); ret }
// Create the respawn function let respawn = { let tasks = tasks.clone();
random_line_split
phase1b_stack.py
import glob import numpy as np import astropy.io.fits as pyfits import commands import sys from drizzlepac import tweakreg, astrodrizzle import collections import copy set_num = sys.argv[1] def do_it(cmd): print cmd print commands.getoutput(cmd) def get_filter(the_header): try: return the_header["FILTER"] except: filt = the_header["FILTER1"] if filt.find("CLEAR") == -1: return filt else: return the_header["FILTER2"] def find_filter(flt_list, the_filters):
def find_best_ref(all_flc_list, filt_priority=["F110W", "F105W", "F140W", "F125W", "F814W", "F775W", "F606W", "F160W"]): flc_list = [] for filt in filt_priority: if flc_list == []: flc_list = find_filter(all_flc_list, filt) print "flc_list for ", filt, flc_list print "Find ref with least maximum disagreement." print "In princple, this should take rotation into account." xlist = np.array([], dtype=np.float64) ylist = np.array([], dtype=np.float64) for fl in flc_list: f = pyfits.open(fl) ra = f[0].header["RA_TARG"] dec = f[0].header["DEC_TARG"] f.close() x = ra*np.cos(dec/57.3)*3600*20 y = dec*3600*20 xlist = np.append(xlist, x) ylist = np.append(ylist, y) besttotal = 1.e10 for i in range(len(xlist)): new = np.sqrt((xlist - xlist[i])**2. + (ylist - ylist[i])**2.) if max(new) < besttotal: besttotal = max(new) besti = i print "Ref to use ", flc_list[besti], besti return flc_list[besti], besti def transfer_header(infl, outfl): """I don't know why Eli's version of this doesn't work...""" print "Transfer", infl, "to", outfl fin = pyfits.open(infl) fout = pyfits.open(outfl, 'update') dont_transfer = ["HSTSLAC", "MDRIZSKY", "LACOSMIC", "HISTORY", "COMMENT", ""] print "Transferring: ", for i in range(len(fin)): for key in fin[i].header: if dont_transfer.count(key) == 0: if fin[i].header[key] != fout[i].header.get(key, default = None): print key, fout[i].header[key] = fin[i].header[key] fout.flush() fout.close() fin.close() print def do_tweak(flt_list, besti, lowthreshold = 0): f = open(bad_pix_list_wfc3) lines = f.read().split('\n') f.close() lines = [item.split(None) for item in lines] lines = [item for item in lines if item != []] bad_pix = [(int(item[0]), int(item[1])) for item in lines] tmp_ims = [] for i in range(len(flt_list)): f = pyfits.open(flt_list[i]) if f[0].header["INSTRUME"] == "ACS": tmp_ims.append(flt_list[i].replace(".fits", "_lac.fits")) acs = True else: tmp_ims.append(flt_list[i].replace(".fits", "_filter.fits")) if flt_list[i] == tmp_ims[i]: print "Error with ", flt_list[i] sys.exit(1) print "Median Filtering ", flt_list[i] f = pyfits.open(flt_list[i]) tmpdata = copy.deepcopy(f["SCI"].data) LTV1 = f["SCI"].header["LTV1"] LTV2 = f["SCI"].header["LTV2"] for this_x, this_y in bad_pix: this_x += LTV1 this_y += LTV2 if this_x > 1 and this_x < len(tmpdata[0]) and this_y > 1 and this_y < len(tmpdata): f["SCI"].data[int(np.around(this_y - 1)), int(np.around(this_x - 1))] = np.median(tmpdata[int(np.around(this_y - 2)): int(np.around(this_y + 1)), int(np.around(this_x - 2)): int(np.around(this_x + 1))]) f.writeto(tmp_ims[i], clobber = True) f.close() acs = False do_it("cp -f " + tmp_ims[i] + " " + tmp_ims[i].replace("/orig_files/", "/")) tmp_ims[i] = tmp_ims[i].replace("/orig_files/", "/") print "tmp_ims ", tmp_ims tweakref = tmp_ims[besti] tweakreg.TweakReg(','.join(tmp_ims), updatehdr=True, shiftfile=True, # This is just for show ############ Change This Between Iterations: ########## refimage=tweakref, updatewcs=False, # I think this should always be false. searchrad=4, searchunits='arcseconds', threshold=(1. + 7.*acs)/(lowthreshold + 1.), conv_width=(2.5 + 1.*acs), # 3.5 for optical, 2.5 for IR ######### Change This Between Iterations: ############## wcsname="TWEAK_rough", residplot='No plot', see2dplot=False, fitgeometry='shift') # Have to change this for that one epoch, G cluster? f = open("shifts.txt") lines = f.read() f.close() if lines.find(" nan ") != -1: print "Couldn't match!" if lowthreshold == 0: # First iteration print "Trying lower threshold..." do_tweak(flt_list, besti, lowthreshold = 1) else: print "...even though lowthreshold is ", lowthreshold sys.exit(1) for i in range(len(flt_list)): print "Transferring from ", tmp_ims[i], flt_list[i] transfer_header(tmp_ims[i], flt_list[i]) def do_drizzle(flc_list, outputname, clean = True, refimage = "", build = True, cr_sensitive = False, outputscale = 0.05): print "overriding cr_sensitive", cr_sensitive cr_sensitive = True n_img = len(flc_list) combine_type = "minmed"*(n_img <= 4.) + "median"*(n_img > 4) print "Number of images ", n_img, combine_type if refimage != "": print "Using refimage", refimage nicmos = (flc_list[0].split("/")[-1][0] == "n") if nicmos: combine_type = "minmed" wfc3 = (flc_list[0].split("/")[-1][0] == "i") print "flc_list, nicmos, wfc3 ", flc_list, nicmos, wfc3 astrodrizzle.AstroDrizzle(','.join(flc_list), preserve=False, build=build, output=outputname, clean=clean*0, # Clean up tmp files updatewcs=nicmos, # This is right proc_unit='native', driz_sep_kernel='square', driz_sep_pixfrac=1.0, driz_sep_scale=0.128, driz_sep_bits=(0 + (512+1024+2048)*nicmos + (2048+8192)*wfc3), combine_type=combine_type, driz_cr=(n_img > 1), median=(n_img > 1), blot=(n_img > 1), static=(n_img > 1), #driz_cr_snr = "3.5 3.0", driz_cr_scale=("3 2"*(1 - cr_sensitive) + "2 1.5"*cr_sensitive), # Up from default 1.2, 0.7 #driz_cr_scale = "2. 1.5", #final_wht_type = "ERR", # This is very wrong! Why do they even include it? final_wht_type="EXP", # This one works! final_kernel="gaussian", final_pixfrac=1.0, # Should be default. final_wcs=True, final_rot=0., final_bits=(0 + (512+1024+2048)*nicmos + (2048+8192)*wfc3), final_scale=outputscale, final_refimage=refimage) if nicmos: f = pyfits.open(outputname + "_drz.fits", 'update') expend = f[0].header["EXPEND"] print outputname, "EXPEND", expend if expend > 51544: print "Multiplying by 1.007!" f["SCI"].data *= 1.007 f.flush() f.close() def get_fls_by_filter_date(globpath = ""): files_by_filter_date = collections.OrderedDict() if globpath == "": origfls = glob.glob(data_path + "set_%s/orig_files/*flt.fits" % set_num) simfls = [] #glob.glob("simulated_ims/*flt.fits") else: origfls = glob.glob(globpath) simfls = [] for i in range(len(origfls))[::-1]: foundsim = 0 for simfl in simfls: if origfls[i].split("/")[-1] == simfl.split("/")[-1]: foundsim = 1 if foundsim: del origfls[i] fls_sorted_by_date = [] for fl in origfls + simfls: f = pyfits.open(fl) EXPEND = f[0].header["EXPEND"] f.close() fls_sorted_by_date.append((EXPEND, fl)) fls_sorted_by_date.sort() # print fls_sorted_by_date fls_sorted_by_date = [item[1] for item in fls_sorted_by_date] for fl in fls_sorted_by_date: f = pyfits.open(fl) EXPEND = f[0].header["EXPEND"] FILTER = f[0].header["FILTER"] f.close() found = 0 for key in files_by_filter_date: if (key[0] == FILTER) and (abs(EXPEND - key[1]) < 1.): files_by_filter_date[key].append(fl) found += 1 assert found < 2 if found == 0: files_by_filter_date[(FILTER, EXPEND)] = [fl] # for key in files_by_filter_date: # print key, files_by_filter_date[key] return files_by_filter_date def sort_ims(ims_path): origfls = glob.glob(ims_path+'/*flt.fits') print origfls ims_dict = {} for fl in origfls: f = pyfits.open(fl) EXPEND = int(f[0].header["EXPEND"]) FILTER = f[0].header["FILTER"] f.close() just_fl = fl.split('/')[-1] print just_fl, FILTER, EXPEND try: ims_dict[FILTER] except: ims_dict[FILTER] = {} try: ims_dict[FILTER][EXPEND].append(just_fl) except: ims_dict[FILTER][EXPEND] = [] ims_dict[FILTER][EXPEND].append(just_fl) filt1, filt2 = ims_dict.keys() filt1_e1 = np.min(ims_dict[filt1].keys()) filt1_e2 = np.max(ims_dict[filt1].keys()) filt2_e1 = np.min(ims_dict[filt2].keys()) filt2_e2 = np.max(ims_dict[filt2].keys()) filt1_epoch1_fls = ims_dict[filt1][filt1_e1] filt1_epoch2_fls = ims_dict[filt1][filt1_e2] filt2_epoch1_fls = ims_dict[filt2][filt2_e1] filt2_epoch2_fls = ims_dict[filt2][filt2_e2] return filt1, filt2, filt1_epoch1_fls, filt1_epoch2_fls, \ filt2_epoch1_fls, filt2_epoch2_fls def get_filters(ims_path): origfls = glob.glob(ims_path+'/*flt.fits') print ims_path print origfls filts = [] for fl in origfls: f = pyfits.open(fl) FILTER = f[0].header["FILTER"] f.close() filts.append(FILTER) unique_filters = np.unique(filts) return unique_filters #path = '/Users/mcurrie/Projects/TransiNet/data/set_%s/orig_files' % set_num #data_path = '/Users/mcurrie/Projects/TransiNet/data/' #path = '/Volumes/My_book/TransiNet/data/set_%s/orig_files' % set_num #data_path = '/Volumes/My_book/TransiNet/data/' data_path = '/Volumes/My_Book/TransiNet/data/sets_newbadpix/' # step 0: stack images with open('obj_coords.dat', 'wb') as f: f.write('set_%s 0 0' % set_num) outputscale = 0.09 sky_nlc_order = 'nlcsky' bad_pix_list_wfc3 = data_path + 'bad_pix_list_wfc3.txt' set_num = sys.argv[1] set_dir = 'set_' + set_num userrefimage = '' do_it("mkdir %s/%s/orig_files" % (data_path, set_dir)) do_it("mv %s/*fits %s/orig_files" % (data_path + set_dir, data_path + set_dir)) print "Aligning Images..." for filter in [["F606W", "F775W", "F814W"], ["F105W", "F110W", "F125W", "F140W", "F160W"]]: flt_list = glob.glob(data_path + set_dir + "/orig_files/i*flt.fits") + \ glob.glob(data_path + set_dir + "/orig_files/j*flc.fits") flt_list.sort() flt_list = find_filter(flt_list, filter) if flt_list != []: best_ref, besti = find_best_ref(flt_list) do_tweak(flt_list, besti) do_it("rm -f %s/*.coo %s/*.match %s/*catfile.list" % (data_path + set_dir, data_path + set_dir, data_path + set_dir)) do_it("mv shifts.txt " + data_path + set_dir + "/shifts_%s.txt" % "_".join(filter)) print 'Finished alignment' print "Drizzling WFC3..." for filter in ["F105W", "F110W", "F125W", "F140W", "F160W"]: files = find_filter(glob.glob(data_path + set_dir + "/orig_files/i*flt.fits"), filter) print "filter, files", filter, files if len(files) > 0: for cr_sensitive in [0]: new_files = [item.replace("/orig_files", "") for item in files] for file, new_file in zip(files, new_files): if new_file == file: print "Error,", new_file, "is the same!" sys.exit(1) do_it("cp -vf " + file + " " + new_file) driz_filename = filter + "_stack" + "_CRsens"*cr_sensitive do_drizzle(new_files, driz_filename, clean=True, refimage=(userrefimage != "None")*userrefimage, build = True, cr_sensitive=cr_sensitive, outputscale=outputscale) do_it("mv " + driz_filename + "_drz.fits " + data_path + set_dir) do_it("rm -fv " + " ".join(new_files)) print "Drizzling ACS..." for filter in ["F775W", "F814W", "F606W", "F850LP"]: files = find_filter(glob.glob(data_path + set_dir + "/orig_files/j*flc.fits"), filter) print "filter, files", filter, files if len(files) > 0: for cr_sensitive in [0]: new_files = [item.replace("/orig_files", "") for item in files] for file, new_file in zip(files, new_files): if new_file == file: print "Error,", new_file, "is the same!" sys.exit(1) do_it("cp -vf " + file + " " + new_file) driz_filename = filter + "_stack" + "_CRsens"*cr_sensitive do_drizzle(new_files, driz_filename, clean=True, refimage=(userrefimage != "None")*userrefimage, build=True, cr_sensitive=cr_sensitive, outputscale=outputscale) do_it("mv " + driz_filename + "_drc.fits " + data_path + set_dir) do_it("rm -fv " + " ".join(new_files)) unique_filters = get_filters(data_path+set_dir+'/orig_files/') origfls = glob.glob(data_path+'/orig_files/*flt.fits') with open(data_path + 'paramfile_%s.txt' % set_num, 'wb') as paramfl: paramfl.write('drz\t%s/set_%s/%s_stack_drz.fits\n' % (data_path, set_num, unique_filters[0])) paramfl.write('aligned\t%s\n' % ' '.join(origfls)) paramfl.write('F125W_zp\t26.23\n') paramfl.write('F105W_zp\t26.24\n') paramfl.write('F140W_zp\t26.44\n') paramfl.write('F160W_zp\t25.92\n') paramfl.write('min_mag\t25.0\n') paramfl.write('max_mag\t27.0\n') paramfl.write('step_mag\t0.2\n') paramfl.write('gauss_r\t4\n') paramfl.write('frac_real\t0.5\n') paramfl.write('F125W_highz\t26.8\n') paramfl.write('F105W_highz\t26.8\n') paramfl.write('F140W_highz\t26.0\n') paramfl.write('F160W_highz\t25.9\n') paramfl.write('frac_highz\t0.003\n') # stack epochs fls_by_filter_date = get_fls_by_filter_date() commands.getoutput("rm -f %s/set_%s_epochs/*" % (data_path, set_num)) commands.getoutput("mkdir %s/set_%s_epochs" % (data_path, set_num)) filter_counter = [] for item in fls_by_filter_date: print item for im in fls_by_filter_date[item]: commands.getoutput("cp " + im + " %s/set_%s_epochs" % (data_path, set_num)) filter_counter.append(item[0]) refimage = commands.getoutput("grep drz "+data_path+"paramfile_%s.txt" % set_num).split(None)[1] + "[SCI]" print "refimage", refimage do_drizzle([data_path + "set_"+set_num+"_epochs/" + subitem.split("/")[-1] for subitem in fls_by_filter_date[item]], outputname = data_path + "set_"+set_num+"_epochs/" + item[0] + "_epoch%02i" % (filter_counter.count(filter_counter[-1])), refimage=refimage, outputscale=outputscale)
if type(the_filters) == type("a"): filt_list = [copy.deepcopy(the_filters)] else: filt_list = copy.deepcopy(the_filters) f125w_list = [] for item in flt_list: f = pyfits.open(item) if filt_list.count(get_filter(f[0].header)): f125w_list.append(item) return f125w_list
identifier_body
phase1b_stack.py
import glob import numpy as np import astropy.io.fits as pyfits import commands import sys from drizzlepac import tweakreg, astrodrizzle import collections import copy set_num = sys.argv[1] def do_it(cmd): print cmd print commands.getoutput(cmd) def get_filter(the_header): try: return the_header["FILTER"] except: filt = the_header["FILTER1"] if filt.find("CLEAR") == -1: return filt else: return the_header["FILTER2"] def find_filter(flt_list, the_filters): if type(the_filters) == type("a"): filt_list = [copy.deepcopy(the_filters)] else: filt_list = copy.deepcopy(the_filters) f125w_list = [] for item in flt_list: f = pyfits.open(item) if filt_list.count(get_filter(f[0].header)): f125w_list.append(item) return f125w_list def find_best_ref(all_flc_list, filt_priority=["F110W", "F105W", "F140W", "F125W", "F814W", "F775W", "F606W", "F160W"]): flc_list = [] for filt in filt_priority: if flc_list == []: flc_list = find_filter(all_flc_list, filt) print "flc_list for ", filt, flc_list print "Find ref with least maximum disagreement." print "In princple, this should take rotation into account." xlist = np.array([], dtype=np.float64) ylist = np.array([], dtype=np.float64) for fl in flc_list: f = pyfits.open(fl) ra = f[0].header["RA_TARG"] dec = f[0].header["DEC_TARG"] f.close() x = ra*np.cos(dec/57.3)*3600*20 y = dec*3600*20 xlist = np.append(xlist, x) ylist = np.append(ylist, y) besttotal = 1.e10 for i in range(len(xlist)): new = np.sqrt((xlist - xlist[i])**2. + (ylist - ylist[i])**2.) if max(new) < besttotal: besttotal = max(new) besti = i print "Ref to use ", flc_list[besti], besti return flc_list[besti], besti def transfer_header(infl, outfl): """I don't know why Eli's version of this doesn't work...""" print "Transfer", infl, "to", outfl fin = pyfits.open(infl) fout = pyfits.open(outfl, 'update') dont_transfer = ["HSTSLAC", "MDRIZSKY", "LACOSMIC", "HISTORY", "COMMENT", ""] print "Transferring: ", for i in range(len(fin)): for key in fin[i].header: if dont_transfer.count(key) == 0: if fin[i].header[key] != fout[i].header.get(key, default = None): print key, fout[i].header[key] = fin[i].header[key] fout.flush() fout.close() fin.close() print def do_tweak(flt_list, besti, lowthreshold = 0): f = open(bad_pix_list_wfc3) lines = f.read().split('\n') f.close() lines = [item.split(None) for item in lines] lines = [item for item in lines if item != []] bad_pix = [(int(item[0]), int(item[1])) for item in lines] tmp_ims = [] for i in range(len(flt_list)): f = pyfits.open(flt_list[i]) if f[0].header["INSTRUME"] == "ACS": tmp_ims.append(flt_list[i].replace(".fits", "_lac.fits")) acs = True else: tmp_ims.append(flt_list[i].replace(".fits", "_filter.fits")) if flt_list[i] == tmp_ims[i]: print "Error with ", flt_list[i] sys.exit(1) print "Median Filtering ", flt_list[i] f = pyfits.open(flt_list[i]) tmpdata = copy.deepcopy(f["SCI"].data) LTV1 = f["SCI"].header["LTV1"] LTV2 = f["SCI"].header["LTV2"] for this_x, this_y in bad_pix: this_x += LTV1 this_y += LTV2 if this_x > 1 and this_x < len(tmpdata[0]) and this_y > 1 and this_y < len(tmpdata): f["SCI"].data[int(np.around(this_y - 1)), int(np.around(this_x - 1))] = np.median(tmpdata[int(np.around(this_y - 2)): int(np.around(this_y + 1)), int(np.around(this_x - 2)): int(np.around(this_x + 1))]) f.writeto(tmp_ims[i], clobber = True) f.close() acs = False do_it("cp -f " + tmp_ims[i] + " " + tmp_ims[i].replace("/orig_files/", "/")) tmp_ims[i] = tmp_ims[i].replace("/orig_files/", "/") print "tmp_ims ", tmp_ims tweakref = tmp_ims[besti] tweakreg.TweakReg(','.join(tmp_ims), updatehdr=True, shiftfile=True, # This is just for show ############ Change This Between Iterations: ########## refimage=tweakref, updatewcs=False, # I think this should always be false. searchrad=4, searchunits='arcseconds', threshold=(1. + 7.*acs)/(lowthreshold + 1.), conv_width=(2.5 + 1.*acs), # 3.5 for optical, 2.5 for IR ######### Change This Between Iterations: ############## wcsname="TWEAK_rough", residplot='No plot', see2dplot=False, fitgeometry='shift') # Have to change this for that one epoch, G cluster? f = open("shifts.txt") lines = f.read() f.close() if lines.find(" nan ") != -1: print "Couldn't match!" if lowthreshold == 0: # First iteration print "Trying lower threshold..." do_tweak(flt_list, besti, lowthreshold = 1) else: print "...even though lowthreshold is ", lowthreshold sys.exit(1) for i in range(len(flt_list)): print "Transferring from ", tmp_ims[i], flt_list[i] transfer_header(tmp_ims[i], flt_list[i]) def do_drizzle(flc_list, outputname, clean = True, refimage = "", build = True, cr_sensitive = False, outputscale = 0.05): print "overriding cr_sensitive", cr_sensitive cr_sensitive = True n_img = len(flc_list) combine_type = "minmed"*(n_img <= 4.) + "median"*(n_img > 4) print "Number of images ", n_img, combine_type if refimage != "": print "Using refimage", refimage nicmos = (flc_list[0].split("/")[-1][0] == "n") if nicmos: combine_type = "minmed" wfc3 = (flc_list[0].split("/")[-1][0] == "i") print "flc_list, nicmos, wfc3 ", flc_list, nicmos, wfc3 astrodrizzle.AstroDrizzle(','.join(flc_list), preserve=False, build=build, output=outputname, clean=clean*0, # Clean up tmp files updatewcs=nicmos, # This is right proc_unit='native', driz_sep_kernel='square', driz_sep_pixfrac=1.0, driz_sep_scale=0.128, driz_sep_bits=(0 + (512+1024+2048)*nicmos + (2048+8192)*wfc3), combine_type=combine_type, driz_cr=(n_img > 1), median=(n_img > 1), blot=(n_img > 1), static=(n_img > 1), #driz_cr_snr = "3.5 3.0", driz_cr_scale=("3 2"*(1 - cr_sensitive) + "2 1.5"*cr_sensitive), # Up from default 1.2, 0.7 #driz_cr_scale = "2. 1.5", #final_wht_type = "ERR", # This is very wrong! Why do they even include it? final_wht_type="EXP", # This one works! final_kernel="gaussian", final_pixfrac=1.0, # Should be default. final_wcs=True, final_rot=0., final_bits=(0 + (512+1024+2048)*nicmos + (2048+8192)*wfc3), final_scale=outputscale, final_refimage=refimage) if nicmos: f = pyfits.open(outputname + "_drz.fits", 'update') expend = f[0].header["EXPEND"] print outputname, "EXPEND", expend if expend > 51544: print "Multiplying by 1.007!" f["SCI"].data *= 1.007 f.flush() f.close() def get_fls_by_filter_date(globpath = ""): files_by_filter_date = collections.OrderedDict() if globpath == "": origfls = glob.glob(data_path + "set_%s/orig_files/*flt.fits" % set_num) simfls = [] #glob.glob("simulated_ims/*flt.fits") else: origfls = glob.glob(globpath) simfls = [] for i in range(len(origfls))[::-1]: foundsim = 0 for simfl in simfls: if origfls[i].split("/")[-1] == simfl.split("/")[-1]: foundsim = 1 if foundsim: del origfls[i] fls_sorted_by_date = [] for fl in origfls + simfls: f = pyfits.open(fl) EXPEND = f[0].header["EXPEND"] f.close() fls_sorted_by_date.append((EXPEND, fl)) fls_sorted_by_date.sort() # print fls_sorted_by_date fls_sorted_by_date = [item[1] for item in fls_sorted_by_date] for fl in fls_sorted_by_date: f = pyfits.open(fl) EXPEND = f[0].header["EXPEND"] FILTER = f[0].header["FILTER"] f.close() found = 0 for key in files_by_filter_date: if (key[0] == FILTER) and (abs(EXPEND - key[1]) < 1.): files_by_filter_date[key].append(fl) found += 1 assert found < 2 if found == 0: files_by_filter_date[(FILTER, EXPEND)] = [fl] # for key in files_by_filter_date: # print key, files_by_filter_date[key] return files_by_filter_date def
(ims_path): origfls = glob.glob(ims_path+'/*flt.fits') print origfls ims_dict = {} for fl in origfls: f = pyfits.open(fl) EXPEND = int(f[0].header["EXPEND"]) FILTER = f[0].header["FILTER"] f.close() just_fl = fl.split('/')[-1] print just_fl, FILTER, EXPEND try: ims_dict[FILTER] except: ims_dict[FILTER] = {} try: ims_dict[FILTER][EXPEND].append(just_fl) except: ims_dict[FILTER][EXPEND] = [] ims_dict[FILTER][EXPEND].append(just_fl) filt1, filt2 = ims_dict.keys() filt1_e1 = np.min(ims_dict[filt1].keys()) filt1_e2 = np.max(ims_dict[filt1].keys()) filt2_e1 = np.min(ims_dict[filt2].keys()) filt2_e2 = np.max(ims_dict[filt2].keys()) filt1_epoch1_fls = ims_dict[filt1][filt1_e1] filt1_epoch2_fls = ims_dict[filt1][filt1_e2] filt2_epoch1_fls = ims_dict[filt2][filt2_e1] filt2_epoch2_fls = ims_dict[filt2][filt2_e2] return filt1, filt2, filt1_epoch1_fls, filt1_epoch2_fls, \ filt2_epoch1_fls, filt2_epoch2_fls def get_filters(ims_path): origfls = glob.glob(ims_path+'/*flt.fits') print ims_path print origfls filts = [] for fl in origfls: f = pyfits.open(fl) FILTER = f[0].header["FILTER"] f.close() filts.append(FILTER) unique_filters = np.unique(filts) return unique_filters #path = '/Users/mcurrie/Projects/TransiNet/data/set_%s/orig_files' % set_num #data_path = '/Users/mcurrie/Projects/TransiNet/data/' #path = '/Volumes/My_book/TransiNet/data/set_%s/orig_files' % set_num #data_path = '/Volumes/My_book/TransiNet/data/' data_path = '/Volumes/My_Book/TransiNet/data/sets_newbadpix/' # step 0: stack images with open('obj_coords.dat', 'wb') as f: f.write('set_%s 0 0' % set_num) outputscale = 0.09 sky_nlc_order = 'nlcsky' bad_pix_list_wfc3 = data_path + 'bad_pix_list_wfc3.txt' set_num = sys.argv[1] set_dir = 'set_' + set_num userrefimage = '' do_it("mkdir %s/%s/orig_files" % (data_path, set_dir)) do_it("mv %s/*fits %s/orig_files" % (data_path + set_dir, data_path + set_dir)) print "Aligning Images..." for filter in [["F606W", "F775W", "F814W"], ["F105W", "F110W", "F125W", "F140W", "F160W"]]: flt_list = glob.glob(data_path + set_dir + "/orig_files/i*flt.fits") + \ glob.glob(data_path + set_dir + "/orig_files/j*flc.fits") flt_list.sort() flt_list = find_filter(flt_list, filter) if flt_list != []: best_ref, besti = find_best_ref(flt_list) do_tweak(flt_list, besti) do_it("rm -f %s/*.coo %s/*.match %s/*catfile.list" % (data_path + set_dir, data_path + set_dir, data_path + set_dir)) do_it("mv shifts.txt " + data_path + set_dir + "/shifts_%s.txt" % "_".join(filter)) print 'Finished alignment' print "Drizzling WFC3..." for filter in ["F105W", "F110W", "F125W", "F140W", "F160W"]: files = find_filter(glob.glob(data_path + set_dir + "/orig_files/i*flt.fits"), filter) print "filter, files", filter, files if len(files) > 0: for cr_sensitive in [0]: new_files = [item.replace("/orig_files", "") for item in files] for file, new_file in zip(files, new_files): if new_file == file: print "Error,", new_file, "is the same!" sys.exit(1) do_it("cp -vf " + file + " " + new_file) driz_filename = filter + "_stack" + "_CRsens"*cr_sensitive do_drizzle(new_files, driz_filename, clean=True, refimage=(userrefimage != "None")*userrefimage, build = True, cr_sensitive=cr_sensitive, outputscale=outputscale) do_it("mv " + driz_filename + "_drz.fits " + data_path + set_dir) do_it("rm -fv " + " ".join(new_files)) print "Drizzling ACS..." for filter in ["F775W", "F814W", "F606W", "F850LP"]: files = find_filter(glob.glob(data_path + set_dir + "/orig_files/j*flc.fits"), filter) print "filter, files", filter, files if len(files) > 0: for cr_sensitive in [0]: new_files = [item.replace("/orig_files", "") for item in files] for file, new_file in zip(files, new_files): if new_file == file: print "Error,", new_file, "is the same!" sys.exit(1) do_it("cp -vf " + file + " " + new_file) driz_filename = filter + "_stack" + "_CRsens"*cr_sensitive do_drizzle(new_files, driz_filename, clean=True, refimage=(userrefimage != "None")*userrefimage, build=True, cr_sensitive=cr_sensitive, outputscale=outputscale) do_it("mv " + driz_filename + "_drc.fits " + data_path + set_dir) do_it("rm -fv " + " ".join(new_files)) unique_filters = get_filters(data_path+set_dir+'/orig_files/') origfls = glob.glob(data_path+'/orig_files/*flt.fits') with open(data_path + 'paramfile_%s.txt' % set_num, 'wb') as paramfl: paramfl.write('drz\t%s/set_%s/%s_stack_drz.fits\n' % (data_path, set_num, unique_filters[0])) paramfl.write('aligned\t%s\n' % ' '.join(origfls)) paramfl.write('F125W_zp\t26.23\n') paramfl.write('F105W_zp\t26.24\n') paramfl.write('F140W_zp\t26.44\n') paramfl.write('F160W_zp\t25.92\n') paramfl.write('min_mag\t25.0\n') paramfl.write('max_mag\t27.0\n') paramfl.write('step_mag\t0.2\n') paramfl.write('gauss_r\t4\n') paramfl.write('frac_real\t0.5\n') paramfl.write('F125W_highz\t26.8\n') paramfl.write('F105W_highz\t26.8\n') paramfl.write('F140W_highz\t26.0\n') paramfl.write('F160W_highz\t25.9\n') paramfl.write('frac_highz\t0.003\n') # stack epochs fls_by_filter_date = get_fls_by_filter_date() commands.getoutput("rm -f %s/set_%s_epochs/*" % (data_path, set_num)) commands.getoutput("mkdir %s/set_%s_epochs" % (data_path, set_num)) filter_counter = [] for item in fls_by_filter_date: print item for im in fls_by_filter_date[item]: commands.getoutput("cp " + im + " %s/set_%s_epochs" % (data_path, set_num)) filter_counter.append(item[0]) refimage = commands.getoutput("grep drz "+data_path+"paramfile_%s.txt" % set_num).split(None)[1] + "[SCI]" print "refimage", refimage do_drizzle([data_path + "set_"+set_num+"_epochs/" + subitem.split("/")[-1] for subitem in fls_by_filter_date[item]], outputname = data_path + "set_"+set_num+"_epochs/" + item[0] + "_epoch%02i" % (filter_counter.count(filter_counter[-1])), refimage=refimage, outputscale=outputscale)
sort_ims
identifier_name
phase1b_stack.py
import glob import numpy as np import astropy.io.fits as pyfits import commands import sys from drizzlepac import tweakreg, astrodrizzle import collections import copy set_num = sys.argv[1] def do_it(cmd): print cmd print commands.getoutput(cmd) def get_filter(the_header): try: return the_header["FILTER"] except: filt = the_header["FILTER1"] if filt.find("CLEAR") == -1: return filt else: return the_header["FILTER2"] def find_filter(flt_list, the_filters): if type(the_filters) == type("a"): filt_list = [copy.deepcopy(the_filters)] else: filt_list = copy.deepcopy(the_filters) f125w_list = [] for item in flt_list: f = pyfits.open(item) if filt_list.count(get_filter(f[0].header)): f125w_list.append(item) return f125w_list def find_best_ref(all_flc_list, filt_priority=["F110W", "F105W", "F140W", "F125W", "F814W", "F775W", "F606W", "F160W"]): flc_list = [] for filt in filt_priority: if flc_list == []: flc_list = find_filter(all_flc_list, filt) print "flc_list for ", filt, flc_list print "Find ref with least maximum disagreement." print "In princple, this should take rotation into account." xlist = np.array([], dtype=np.float64) ylist = np.array([], dtype=np.float64) for fl in flc_list: f = pyfits.open(fl) ra = f[0].header["RA_TARG"] dec = f[0].header["DEC_TARG"] f.close() x = ra*np.cos(dec/57.3)*3600*20 y = dec*3600*20 xlist = np.append(xlist, x) ylist = np.append(ylist, y) besttotal = 1.e10 for i in range(len(xlist)): new = np.sqrt((xlist - xlist[i])**2. + (ylist - ylist[i])**2.) if max(new) < besttotal: besttotal = max(new) besti = i print "Ref to use ", flc_list[besti], besti return flc_list[besti], besti def transfer_header(infl, outfl): """I don't know why Eli's version of this doesn't work...""" print "Transfer", infl, "to", outfl fin = pyfits.open(infl) fout = pyfits.open(outfl, 'update') dont_transfer = ["HSTSLAC", "MDRIZSKY", "LACOSMIC", "HISTORY", "COMMENT", ""] print "Transferring: ", for i in range(len(fin)): for key in fin[i].header: if dont_transfer.count(key) == 0: if fin[i].header[key] != fout[i].header.get(key, default = None): print key, fout[i].header[key] = fin[i].header[key] fout.flush() fout.close() fin.close() print def do_tweak(flt_list, besti, lowthreshold = 0): f = open(bad_pix_list_wfc3) lines = f.read().split('\n') f.close() lines = [item.split(None) for item in lines] lines = [item for item in lines if item != []] bad_pix = [(int(item[0]), int(item[1])) for item in lines] tmp_ims = [] for i in range(len(flt_list)): f = pyfits.open(flt_list[i]) if f[0].header["INSTRUME"] == "ACS": tmp_ims.append(flt_list[i].replace(".fits", "_lac.fits")) acs = True else: tmp_ims.append(flt_list[i].replace(".fits", "_filter.fits")) if flt_list[i] == tmp_ims[i]: print "Error with ", flt_list[i] sys.exit(1) print "Median Filtering ", flt_list[i] f = pyfits.open(flt_list[i]) tmpdata = copy.deepcopy(f["SCI"].data) LTV1 = f["SCI"].header["LTV1"] LTV2 = f["SCI"].header["LTV2"] for this_x, this_y in bad_pix: this_x += LTV1 this_y += LTV2 if this_x > 1 and this_x < len(tmpdata[0]) and this_y > 1 and this_y < len(tmpdata): f["SCI"].data[int(np.around(this_y - 1)), int(np.around(this_x - 1))] = np.median(tmpdata[int(np.around(this_y - 2)): int(np.around(this_y + 1)), int(np.around(this_x - 2)): int(np.around(this_x + 1))]) f.writeto(tmp_ims[i], clobber = True) f.close() acs = False do_it("cp -f " + tmp_ims[i] + " " + tmp_ims[i].replace("/orig_files/", "/")) tmp_ims[i] = tmp_ims[i].replace("/orig_files/", "/") print "tmp_ims ", tmp_ims tweakref = tmp_ims[besti] tweakreg.TweakReg(','.join(tmp_ims), updatehdr=True, shiftfile=True, # This is just for show ############ Change This Between Iterations: ########## refimage=tweakref, updatewcs=False, # I think this should always be false. searchrad=4, searchunits='arcseconds', threshold=(1. + 7.*acs)/(lowthreshold + 1.), conv_width=(2.5 + 1.*acs), # 3.5 for optical, 2.5 for IR ######### Change This Between Iterations: ############## wcsname="TWEAK_rough", residplot='No plot', see2dplot=False, fitgeometry='shift') # Have to change this for that one epoch, G cluster? f = open("shifts.txt") lines = f.read() f.close() if lines.find(" nan ") != -1: print "Couldn't match!" if lowthreshold == 0: # First iteration print "Trying lower threshold..." do_tweak(flt_list, besti, lowthreshold = 1) else: print "...even though lowthreshold is ", lowthreshold sys.exit(1) for i in range(len(flt_list)): print "Transferring from ", tmp_ims[i], flt_list[i] transfer_header(tmp_ims[i], flt_list[i]) def do_drizzle(flc_list, outputname, clean = True, refimage = "", build = True, cr_sensitive = False, outputscale = 0.05): print "overriding cr_sensitive", cr_sensitive cr_sensitive = True n_img = len(flc_list) combine_type = "minmed"*(n_img <= 4.) + "median"*(n_img > 4) print "Number of images ", n_img, combine_type if refimage != "": print "Using refimage", refimage nicmos = (flc_list[0].split("/")[-1][0] == "n") if nicmos: combine_type = "minmed" wfc3 = (flc_list[0].split("/")[-1][0] == "i") print "flc_list, nicmos, wfc3 ", flc_list, nicmos, wfc3 astrodrizzle.AstroDrizzle(','.join(flc_list), preserve=False, build=build, output=outputname, clean=clean*0, # Clean up tmp files updatewcs=nicmos, # This is right proc_unit='native', driz_sep_kernel='square', driz_sep_pixfrac=1.0, driz_sep_scale=0.128, driz_sep_bits=(0 + (512+1024+2048)*nicmos + (2048+8192)*wfc3), combine_type=combine_type, driz_cr=(n_img > 1), median=(n_img > 1), blot=(n_img > 1), static=(n_img > 1), #driz_cr_snr = "3.5 3.0", driz_cr_scale=("3 2"*(1 - cr_sensitive) + "2 1.5"*cr_sensitive), # Up from default 1.2, 0.7 #driz_cr_scale = "2. 1.5", #final_wht_type = "ERR", # This is very wrong! Why do they even include it? final_wht_type="EXP", # This one works! final_kernel="gaussian", final_pixfrac=1.0, # Should be default. final_wcs=True, final_rot=0., final_bits=(0 + (512+1024+2048)*nicmos + (2048+8192)*wfc3), final_scale=outputscale, final_refimage=refimage) if nicmos: f = pyfits.open(outputname + "_drz.fits", 'update') expend = f[0].header["EXPEND"] print outputname, "EXPEND", expend if expend > 51544: print "Multiplying by 1.007!"
def get_fls_by_filter_date(globpath = ""): files_by_filter_date = collections.OrderedDict() if globpath == "": origfls = glob.glob(data_path + "set_%s/orig_files/*flt.fits" % set_num) simfls = [] #glob.glob("simulated_ims/*flt.fits") else: origfls = glob.glob(globpath) simfls = [] for i in range(len(origfls))[::-1]: foundsim = 0 for simfl in simfls: if origfls[i].split("/")[-1] == simfl.split("/")[-1]: foundsim = 1 if foundsim: del origfls[i] fls_sorted_by_date = [] for fl in origfls + simfls: f = pyfits.open(fl) EXPEND = f[0].header["EXPEND"] f.close() fls_sorted_by_date.append((EXPEND, fl)) fls_sorted_by_date.sort() # print fls_sorted_by_date fls_sorted_by_date = [item[1] for item in fls_sorted_by_date] for fl in fls_sorted_by_date: f = pyfits.open(fl) EXPEND = f[0].header["EXPEND"] FILTER = f[0].header["FILTER"] f.close() found = 0 for key in files_by_filter_date: if (key[0] == FILTER) and (abs(EXPEND - key[1]) < 1.): files_by_filter_date[key].append(fl) found += 1 assert found < 2 if found == 0: files_by_filter_date[(FILTER, EXPEND)] = [fl] # for key in files_by_filter_date: # print key, files_by_filter_date[key] return files_by_filter_date def sort_ims(ims_path): origfls = glob.glob(ims_path+'/*flt.fits') print origfls ims_dict = {} for fl in origfls: f = pyfits.open(fl) EXPEND = int(f[0].header["EXPEND"]) FILTER = f[0].header["FILTER"] f.close() just_fl = fl.split('/')[-1] print just_fl, FILTER, EXPEND try: ims_dict[FILTER] except: ims_dict[FILTER] = {} try: ims_dict[FILTER][EXPEND].append(just_fl) except: ims_dict[FILTER][EXPEND] = [] ims_dict[FILTER][EXPEND].append(just_fl) filt1, filt2 = ims_dict.keys() filt1_e1 = np.min(ims_dict[filt1].keys()) filt1_e2 = np.max(ims_dict[filt1].keys()) filt2_e1 = np.min(ims_dict[filt2].keys()) filt2_e2 = np.max(ims_dict[filt2].keys()) filt1_epoch1_fls = ims_dict[filt1][filt1_e1] filt1_epoch2_fls = ims_dict[filt1][filt1_e2] filt2_epoch1_fls = ims_dict[filt2][filt2_e1] filt2_epoch2_fls = ims_dict[filt2][filt2_e2] return filt1, filt2, filt1_epoch1_fls, filt1_epoch2_fls, \ filt2_epoch1_fls, filt2_epoch2_fls def get_filters(ims_path): origfls = glob.glob(ims_path+'/*flt.fits') print ims_path print origfls filts = [] for fl in origfls: f = pyfits.open(fl) FILTER = f[0].header["FILTER"] f.close() filts.append(FILTER) unique_filters = np.unique(filts) return unique_filters #path = '/Users/mcurrie/Projects/TransiNet/data/set_%s/orig_files' % set_num #data_path = '/Users/mcurrie/Projects/TransiNet/data/' #path = '/Volumes/My_book/TransiNet/data/set_%s/orig_files' % set_num #data_path = '/Volumes/My_book/TransiNet/data/' data_path = '/Volumes/My_Book/TransiNet/data/sets_newbadpix/' # step 0: stack images with open('obj_coords.dat', 'wb') as f: f.write('set_%s 0 0' % set_num) outputscale = 0.09 sky_nlc_order = 'nlcsky' bad_pix_list_wfc3 = data_path + 'bad_pix_list_wfc3.txt' set_num = sys.argv[1] set_dir = 'set_' + set_num userrefimage = '' do_it("mkdir %s/%s/orig_files" % (data_path, set_dir)) do_it("mv %s/*fits %s/orig_files" % (data_path + set_dir, data_path + set_dir)) print "Aligning Images..." for filter in [["F606W", "F775W", "F814W"], ["F105W", "F110W", "F125W", "F140W", "F160W"]]: flt_list = glob.glob(data_path + set_dir + "/orig_files/i*flt.fits") + \ glob.glob(data_path + set_dir + "/orig_files/j*flc.fits") flt_list.sort() flt_list = find_filter(flt_list, filter) if flt_list != []: best_ref, besti = find_best_ref(flt_list) do_tweak(flt_list, besti) do_it("rm -f %s/*.coo %s/*.match %s/*catfile.list" % (data_path + set_dir, data_path + set_dir, data_path + set_dir)) do_it("mv shifts.txt " + data_path + set_dir + "/shifts_%s.txt" % "_".join(filter)) print 'Finished alignment' print "Drizzling WFC3..." for filter in ["F105W", "F110W", "F125W", "F140W", "F160W"]: files = find_filter(glob.glob(data_path + set_dir + "/orig_files/i*flt.fits"), filter) print "filter, files", filter, files if len(files) > 0: for cr_sensitive in [0]: new_files = [item.replace("/orig_files", "") for item in files] for file, new_file in zip(files, new_files): if new_file == file: print "Error,", new_file, "is the same!" sys.exit(1) do_it("cp -vf " + file + " " + new_file) driz_filename = filter + "_stack" + "_CRsens"*cr_sensitive do_drizzle(new_files, driz_filename, clean=True, refimage=(userrefimage != "None")*userrefimage, build = True, cr_sensitive=cr_sensitive, outputscale=outputscale) do_it("mv " + driz_filename + "_drz.fits " + data_path + set_dir) do_it("rm -fv " + " ".join(new_files)) print "Drizzling ACS..." for filter in ["F775W", "F814W", "F606W", "F850LP"]: files = find_filter(glob.glob(data_path + set_dir + "/orig_files/j*flc.fits"), filter) print "filter, files", filter, files if len(files) > 0: for cr_sensitive in [0]: new_files = [item.replace("/orig_files", "") for item in files] for file, new_file in zip(files, new_files): if new_file == file: print "Error,", new_file, "is the same!" sys.exit(1) do_it("cp -vf " + file + " " + new_file) driz_filename = filter + "_stack" + "_CRsens"*cr_sensitive do_drizzle(new_files, driz_filename, clean=True, refimage=(userrefimage != "None")*userrefimage, build=True, cr_sensitive=cr_sensitive, outputscale=outputscale) do_it("mv " + driz_filename + "_drc.fits " + data_path + set_dir) do_it("rm -fv " + " ".join(new_files)) unique_filters = get_filters(data_path+set_dir+'/orig_files/') origfls = glob.glob(data_path+'/orig_files/*flt.fits') with open(data_path + 'paramfile_%s.txt' % set_num, 'wb') as paramfl: paramfl.write('drz\t%s/set_%s/%s_stack_drz.fits\n' % (data_path, set_num, unique_filters[0])) paramfl.write('aligned\t%s\n' % ' '.join(origfls)) paramfl.write('F125W_zp\t26.23\n') paramfl.write('F105W_zp\t26.24\n') paramfl.write('F140W_zp\t26.44\n') paramfl.write('F160W_zp\t25.92\n') paramfl.write('min_mag\t25.0\n') paramfl.write('max_mag\t27.0\n') paramfl.write('step_mag\t0.2\n') paramfl.write('gauss_r\t4\n') paramfl.write('frac_real\t0.5\n') paramfl.write('F125W_highz\t26.8\n') paramfl.write('F105W_highz\t26.8\n') paramfl.write('F140W_highz\t26.0\n') paramfl.write('F160W_highz\t25.9\n') paramfl.write('frac_highz\t0.003\n') # stack epochs fls_by_filter_date = get_fls_by_filter_date() commands.getoutput("rm -f %s/set_%s_epochs/*" % (data_path, set_num)) commands.getoutput("mkdir %s/set_%s_epochs" % (data_path, set_num)) filter_counter = [] for item in fls_by_filter_date: print item for im in fls_by_filter_date[item]: commands.getoutput("cp " + im + " %s/set_%s_epochs" % (data_path, set_num)) filter_counter.append(item[0]) refimage = commands.getoutput("grep drz "+data_path+"paramfile_%s.txt" % set_num).split(None)[1] + "[SCI]" print "refimage", refimage do_drizzle([data_path + "set_"+set_num+"_epochs/" + subitem.split("/")[-1] for subitem in fls_by_filter_date[item]], outputname = data_path + "set_"+set_num+"_epochs/" + item[0] + "_epoch%02i" % (filter_counter.count(filter_counter[-1])), refimage=refimage, outputscale=outputscale)
f["SCI"].data *= 1.007 f.flush() f.close()
random_line_split
phase1b_stack.py
import glob import numpy as np import astropy.io.fits as pyfits import commands import sys from drizzlepac import tweakreg, astrodrizzle import collections import copy set_num = sys.argv[1] def do_it(cmd): print cmd print commands.getoutput(cmd) def get_filter(the_header): try: return the_header["FILTER"] except: filt = the_header["FILTER1"] if filt.find("CLEAR") == -1: return filt else: return the_header["FILTER2"] def find_filter(flt_list, the_filters): if type(the_filters) == type("a"): filt_list = [copy.deepcopy(the_filters)] else: filt_list = copy.deepcopy(the_filters) f125w_list = [] for item in flt_list: f = pyfits.open(item) if filt_list.count(get_filter(f[0].header)): f125w_list.append(item) return f125w_list def find_best_ref(all_flc_list, filt_priority=["F110W", "F105W", "F140W", "F125W", "F814W", "F775W", "F606W", "F160W"]): flc_list = [] for filt in filt_priority: if flc_list == []: flc_list = find_filter(all_flc_list, filt) print "flc_list for ", filt, flc_list print "Find ref with least maximum disagreement." print "In princple, this should take rotation into account." xlist = np.array([], dtype=np.float64) ylist = np.array([], dtype=np.float64) for fl in flc_list: f = pyfits.open(fl) ra = f[0].header["RA_TARG"] dec = f[0].header["DEC_TARG"] f.close() x = ra*np.cos(dec/57.3)*3600*20 y = dec*3600*20 xlist = np.append(xlist, x) ylist = np.append(ylist, y) besttotal = 1.e10 for i in range(len(xlist)): new = np.sqrt((xlist - xlist[i])**2. + (ylist - ylist[i])**2.) if max(new) < besttotal: besttotal = max(new) besti = i print "Ref to use ", flc_list[besti], besti return flc_list[besti], besti def transfer_header(infl, outfl): """I don't know why Eli's version of this doesn't work...""" print "Transfer", infl, "to", outfl fin = pyfits.open(infl) fout = pyfits.open(outfl, 'update') dont_transfer = ["HSTSLAC", "MDRIZSKY", "LACOSMIC", "HISTORY", "COMMENT", ""] print "Transferring: ", for i in range(len(fin)): for key in fin[i].header: if dont_transfer.count(key) == 0: if fin[i].header[key] != fout[i].header.get(key, default = None): print key, fout[i].header[key] = fin[i].header[key] fout.flush() fout.close() fin.close() print def do_tweak(flt_list, besti, lowthreshold = 0): f = open(bad_pix_list_wfc3) lines = f.read().split('\n') f.close() lines = [item.split(None) for item in lines] lines = [item for item in lines if item != []] bad_pix = [(int(item[0]), int(item[1])) for item in lines] tmp_ims = [] for i in range(len(flt_list)):
print "tmp_ims ", tmp_ims tweakref = tmp_ims[besti] tweakreg.TweakReg(','.join(tmp_ims), updatehdr=True, shiftfile=True, # This is just for show ############ Change This Between Iterations: ########## refimage=tweakref, updatewcs=False, # I think this should always be false. searchrad=4, searchunits='arcseconds', threshold=(1. + 7.*acs)/(lowthreshold + 1.), conv_width=(2.5 + 1.*acs), # 3.5 for optical, 2.5 for IR ######### Change This Between Iterations: ############## wcsname="TWEAK_rough", residplot='No plot', see2dplot=False, fitgeometry='shift') # Have to change this for that one epoch, G cluster? f = open("shifts.txt") lines = f.read() f.close() if lines.find(" nan ") != -1: print "Couldn't match!" if lowthreshold == 0: # First iteration print "Trying lower threshold..." do_tweak(flt_list, besti, lowthreshold = 1) else: print "...even though lowthreshold is ", lowthreshold sys.exit(1) for i in range(len(flt_list)): print "Transferring from ", tmp_ims[i], flt_list[i] transfer_header(tmp_ims[i], flt_list[i]) def do_drizzle(flc_list, outputname, clean = True, refimage = "", build = True, cr_sensitive = False, outputscale = 0.05): print "overriding cr_sensitive", cr_sensitive cr_sensitive = True n_img = len(flc_list) combine_type = "minmed"*(n_img <= 4.) + "median"*(n_img > 4) print "Number of images ", n_img, combine_type if refimage != "": print "Using refimage", refimage nicmos = (flc_list[0].split("/")[-1][0] == "n") if nicmos: combine_type = "minmed" wfc3 = (flc_list[0].split("/")[-1][0] == "i") print "flc_list, nicmos, wfc3 ", flc_list, nicmos, wfc3 astrodrizzle.AstroDrizzle(','.join(flc_list), preserve=False, build=build, output=outputname, clean=clean*0, # Clean up tmp files updatewcs=nicmos, # This is right proc_unit='native', driz_sep_kernel='square', driz_sep_pixfrac=1.0, driz_sep_scale=0.128, driz_sep_bits=(0 + (512+1024+2048)*nicmos + (2048+8192)*wfc3), combine_type=combine_type, driz_cr=(n_img > 1), median=(n_img > 1), blot=(n_img > 1), static=(n_img > 1), #driz_cr_snr = "3.5 3.0", driz_cr_scale=("3 2"*(1 - cr_sensitive) + "2 1.5"*cr_sensitive), # Up from default 1.2, 0.7 #driz_cr_scale = "2. 1.5", #final_wht_type = "ERR", # This is very wrong! Why do they even include it? final_wht_type="EXP", # This one works! final_kernel="gaussian", final_pixfrac=1.0, # Should be default. final_wcs=True, final_rot=0., final_bits=(0 + (512+1024+2048)*nicmos + (2048+8192)*wfc3), final_scale=outputscale, final_refimage=refimage) if nicmos: f = pyfits.open(outputname + "_drz.fits", 'update') expend = f[0].header["EXPEND"] print outputname, "EXPEND", expend if expend > 51544: print "Multiplying by 1.007!" f["SCI"].data *= 1.007 f.flush() f.close() def get_fls_by_filter_date(globpath = ""): files_by_filter_date = collections.OrderedDict() if globpath == "": origfls = glob.glob(data_path + "set_%s/orig_files/*flt.fits" % set_num) simfls = [] #glob.glob("simulated_ims/*flt.fits") else: origfls = glob.glob(globpath) simfls = [] for i in range(len(origfls))[::-1]: foundsim = 0 for simfl in simfls: if origfls[i].split("/")[-1] == simfl.split("/")[-1]: foundsim = 1 if foundsim: del origfls[i] fls_sorted_by_date = [] for fl in origfls + simfls: f = pyfits.open(fl) EXPEND = f[0].header["EXPEND"] f.close() fls_sorted_by_date.append((EXPEND, fl)) fls_sorted_by_date.sort() # print fls_sorted_by_date fls_sorted_by_date = [item[1] for item in fls_sorted_by_date] for fl in fls_sorted_by_date: f = pyfits.open(fl) EXPEND = f[0].header["EXPEND"] FILTER = f[0].header["FILTER"] f.close() found = 0 for key in files_by_filter_date: if (key[0] == FILTER) and (abs(EXPEND - key[1]) < 1.): files_by_filter_date[key].append(fl) found += 1 assert found < 2 if found == 0: files_by_filter_date[(FILTER, EXPEND)] = [fl] # for key in files_by_filter_date: # print key, files_by_filter_date[key] return files_by_filter_date def sort_ims(ims_path): origfls = glob.glob(ims_path+'/*flt.fits') print origfls ims_dict = {} for fl in origfls: f = pyfits.open(fl) EXPEND = int(f[0].header["EXPEND"]) FILTER = f[0].header["FILTER"] f.close() just_fl = fl.split('/')[-1] print just_fl, FILTER, EXPEND try: ims_dict[FILTER] except: ims_dict[FILTER] = {} try: ims_dict[FILTER][EXPEND].append(just_fl) except: ims_dict[FILTER][EXPEND] = [] ims_dict[FILTER][EXPEND].append(just_fl) filt1, filt2 = ims_dict.keys() filt1_e1 = np.min(ims_dict[filt1].keys()) filt1_e2 = np.max(ims_dict[filt1].keys()) filt2_e1 = np.min(ims_dict[filt2].keys()) filt2_e2 = np.max(ims_dict[filt2].keys()) filt1_epoch1_fls = ims_dict[filt1][filt1_e1] filt1_epoch2_fls = ims_dict[filt1][filt1_e2] filt2_epoch1_fls = ims_dict[filt2][filt2_e1] filt2_epoch2_fls = ims_dict[filt2][filt2_e2] return filt1, filt2, filt1_epoch1_fls, filt1_epoch2_fls, \ filt2_epoch1_fls, filt2_epoch2_fls def get_filters(ims_path): origfls = glob.glob(ims_path+'/*flt.fits') print ims_path print origfls filts = [] for fl in origfls: f = pyfits.open(fl) FILTER = f[0].header["FILTER"] f.close() filts.append(FILTER) unique_filters = np.unique(filts) return unique_filters #path = '/Users/mcurrie/Projects/TransiNet/data/set_%s/orig_files' % set_num #data_path = '/Users/mcurrie/Projects/TransiNet/data/' #path = '/Volumes/My_book/TransiNet/data/set_%s/orig_files' % set_num #data_path = '/Volumes/My_book/TransiNet/data/' data_path = '/Volumes/My_Book/TransiNet/data/sets_newbadpix/' # step 0: stack images with open('obj_coords.dat', 'wb') as f: f.write('set_%s 0 0' % set_num) outputscale = 0.09 sky_nlc_order = 'nlcsky' bad_pix_list_wfc3 = data_path + 'bad_pix_list_wfc3.txt' set_num = sys.argv[1] set_dir = 'set_' + set_num userrefimage = '' do_it("mkdir %s/%s/orig_files" % (data_path, set_dir)) do_it("mv %s/*fits %s/orig_files" % (data_path + set_dir, data_path + set_dir)) print "Aligning Images..." for filter in [["F606W", "F775W", "F814W"], ["F105W", "F110W", "F125W", "F140W", "F160W"]]: flt_list = glob.glob(data_path + set_dir + "/orig_files/i*flt.fits") + \ glob.glob(data_path + set_dir + "/orig_files/j*flc.fits") flt_list.sort() flt_list = find_filter(flt_list, filter) if flt_list != []: best_ref, besti = find_best_ref(flt_list) do_tweak(flt_list, besti) do_it("rm -f %s/*.coo %s/*.match %s/*catfile.list" % (data_path + set_dir, data_path + set_dir, data_path + set_dir)) do_it("mv shifts.txt " + data_path + set_dir + "/shifts_%s.txt" % "_".join(filter)) print 'Finished alignment' print "Drizzling WFC3..." for filter in ["F105W", "F110W", "F125W", "F140W", "F160W"]: files = find_filter(glob.glob(data_path + set_dir + "/orig_files/i*flt.fits"), filter) print "filter, files", filter, files if len(files) > 0: for cr_sensitive in [0]: new_files = [item.replace("/orig_files", "") for item in files] for file, new_file in zip(files, new_files): if new_file == file: print "Error,", new_file, "is the same!" sys.exit(1) do_it("cp -vf " + file + " " + new_file) driz_filename = filter + "_stack" + "_CRsens"*cr_sensitive do_drizzle(new_files, driz_filename, clean=True, refimage=(userrefimage != "None")*userrefimage, build = True, cr_sensitive=cr_sensitive, outputscale=outputscale) do_it("mv " + driz_filename + "_drz.fits " + data_path + set_dir) do_it("rm -fv " + " ".join(new_files)) print "Drizzling ACS..." for filter in ["F775W", "F814W", "F606W", "F850LP"]: files = find_filter(glob.glob(data_path + set_dir + "/orig_files/j*flc.fits"), filter) print "filter, files", filter, files if len(files) > 0: for cr_sensitive in [0]: new_files = [item.replace("/orig_files", "") for item in files] for file, new_file in zip(files, new_files): if new_file == file: print "Error,", new_file, "is the same!" sys.exit(1) do_it("cp -vf " + file + " " + new_file) driz_filename = filter + "_stack" + "_CRsens"*cr_sensitive do_drizzle(new_files, driz_filename, clean=True, refimage=(userrefimage != "None")*userrefimage, build=True, cr_sensitive=cr_sensitive, outputscale=outputscale) do_it("mv " + driz_filename + "_drc.fits " + data_path + set_dir) do_it("rm -fv " + " ".join(new_files)) unique_filters = get_filters(data_path+set_dir+'/orig_files/') origfls = glob.glob(data_path+'/orig_files/*flt.fits') with open(data_path + 'paramfile_%s.txt' % set_num, 'wb') as paramfl: paramfl.write('drz\t%s/set_%s/%s_stack_drz.fits\n' % (data_path, set_num, unique_filters[0])) paramfl.write('aligned\t%s\n' % ' '.join(origfls)) paramfl.write('F125W_zp\t26.23\n') paramfl.write('F105W_zp\t26.24\n') paramfl.write('F140W_zp\t26.44\n') paramfl.write('F160W_zp\t25.92\n') paramfl.write('min_mag\t25.0\n') paramfl.write('max_mag\t27.0\n') paramfl.write('step_mag\t0.2\n') paramfl.write('gauss_r\t4\n') paramfl.write('frac_real\t0.5\n') paramfl.write('F125W_highz\t26.8\n') paramfl.write('F105W_highz\t26.8\n') paramfl.write('F140W_highz\t26.0\n') paramfl.write('F160W_highz\t25.9\n') paramfl.write('frac_highz\t0.003\n') # stack epochs fls_by_filter_date = get_fls_by_filter_date() commands.getoutput("rm -f %s/set_%s_epochs/*" % (data_path, set_num)) commands.getoutput("mkdir %s/set_%s_epochs" % (data_path, set_num)) filter_counter = [] for item in fls_by_filter_date: print item for im in fls_by_filter_date[item]: commands.getoutput("cp " + im + " %s/set_%s_epochs" % (data_path, set_num)) filter_counter.append(item[0]) refimage = commands.getoutput("grep drz "+data_path+"paramfile_%s.txt" % set_num).split(None)[1] + "[SCI]" print "refimage", refimage do_drizzle([data_path + "set_"+set_num+"_epochs/" + subitem.split("/")[-1] for subitem in fls_by_filter_date[item]], outputname = data_path + "set_"+set_num+"_epochs/" + item[0] + "_epoch%02i" % (filter_counter.count(filter_counter[-1])), refimage=refimage, outputscale=outputscale)
f = pyfits.open(flt_list[i]) if f[0].header["INSTRUME"] == "ACS": tmp_ims.append(flt_list[i].replace(".fits", "_lac.fits")) acs = True else: tmp_ims.append(flt_list[i].replace(".fits", "_filter.fits")) if flt_list[i] == tmp_ims[i]: print "Error with ", flt_list[i] sys.exit(1) print "Median Filtering ", flt_list[i] f = pyfits.open(flt_list[i]) tmpdata = copy.deepcopy(f["SCI"].data) LTV1 = f["SCI"].header["LTV1"] LTV2 = f["SCI"].header["LTV2"] for this_x, this_y in bad_pix: this_x += LTV1 this_y += LTV2 if this_x > 1 and this_x < len(tmpdata[0]) and this_y > 1 and this_y < len(tmpdata): f["SCI"].data[int(np.around(this_y - 1)), int(np.around(this_x - 1))] = np.median(tmpdata[int(np.around(this_y - 2)): int(np.around(this_y + 1)), int(np.around(this_x - 2)): int(np.around(this_x + 1))]) f.writeto(tmp_ims[i], clobber = True) f.close() acs = False do_it("cp -f " + tmp_ims[i] + " " + tmp_ims[i].replace("/orig_files/", "/")) tmp_ims[i] = tmp_ims[i].replace("/orig_files/", "/")
conditional_block
main.rs
#[cfg(test)] extern crate memsec; use std::collections::BTreeMap; use std::env; use std::fmt::{self, Display, Formatter}; use std::fs; use std::iter; use std::process::{Command, Stdio}; use std::str::{self, FromStr}; use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use std::thread::{self, JoinHandle}; // The number of space characters (" ") between table columns. const COLUMN_BUFFER: usize = 8; // Ignore child processes with the following names. const IGNORE_CHILD_PROCS: [&str; 3] = ["rustc", "[rustc]", "rustdoc"]; type Pid = u32; type Pname = String; #[derive(Clone, Debug)] struct Pinfo { pname: Pname, max_locked: u64, } #[derive(Debug)] struct Database(BTreeMap<Pid, Pinfo>); impl Database { fn new() -> Self { Database(BTreeMap::new()) } fn contains(&self, pid: &Pid) -> bool { self.0.contains_key(pid) } fn new_child_process(&mut self, pid: Pid, pname: Pname) { self.0.insert(pid, Pinfo { pname, max_locked: 0 }); } fn update(&mut self, pid: Pid, kbs_locked: u64)
fn table(&self) -> String { let col1_heading = "Process Name"; let col2_heading = "Max Locked Memory (kb)"; let col1_heading_len = col1_heading.chars().count(); let col2_heading_len = col2_heading.chars().count(); let min_col2_start = col1_heading_len + COLUMN_BUFFER; let col2_start = self.0 .values() .fold(min_col2_start, |longest, pinfo| { match pinfo.pname.chars().count() + COLUMN_BUFFER { n_chars if n_chars > longest => n_chars, _ => longest, } }); let heading_whitespace: String = (0..col2_start - col1_heading_len) .map(|_| ' ') .collect(); let heading = format!( "{}{}{}", col1_heading, heading_whitespace, col2_heading, ); let top_border = format!( "{}{}{}", (0..col1_heading_len).map(|_| '=').collect::<String>(), heading_whitespace, (0..col2_heading_len).map(|_| '=').collect::<String>(), ); let mut stdout = format!("\n{}\n{}\n", heading, top_border); for Pinfo { pname, max_locked } in self.0.values() { let pname_len = pname.chars().count(); let whitespace: String = (0..col2_start - pname_len) .map(|_| ' ') .collect(); let line = format!("{}{}{}\n", pname, whitespace, max_locked); stdout.push_str(&line); } let table_width = col2_start + col2_heading_len; let bottom_border: String = (0..table_width).map(|_| '=').collect(); stdout.push_str(&bottom_border); stdout } } #[derive(Debug)] enum Limit { Kb(u64), Unlimited, } impl Display for Limit { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match self { Limit::Kb(kbs) => write!(f, "{}", kbs), _ => write!(f, "unlimited"), } } } impl FromStr for Limit { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { if s == "unlimited" { Ok(Limit::Unlimited) } else { let n_bytes: u64 = s.parse::<u64>().map_err(|_| ())?; Ok(Limit::Kb(n_bytes / 1024)) } } } #[derive(Debug)] struct MlockLimit { soft: Limit, hard: Limit, } fn run_prlimit() -> MlockLimit { let output = Command::new("prlimit") .args(&["--memlock", "--output=SOFT,HARD", "--noheadings"]) .output() .map(|output| String::from_utf8(output.stdout).unwrap()) .unwrap_or_else(|e| panic!("Subprocess failed: `ulimit`: {:?}", e)); let split: Vec<&str> = output.split_whitespace().collect(); let soft = Limit::from_str(split[0]).unwrap(); let hard = Limit::from_str(split[1]).unwrap(); MlockLimit { soft, hard } } fn run_ps(cargo_test_pid: Pid) -> Vec<(Pid, Pname)> { let mut ps = vec![]; let ppid = cargo_test_pid.to_string(); let output = Command::new("ps") .args(&["-f", "--ppid", &ppid]) .output() .map(|output| String::from_utf8(output.stdout).unwrap()) .expect("Subprocess failed: `ps`"); for line in output.trim().lines().skip(1) { let split: Vec<&str> = line.split_whitespace().collect(); let pid: Pid = split[1].parse().unwrap(); let pname: Pname = split[7] .split_whitespace() .nth(0) .unwrap() .split('/') .last() .unwrap() .to_string(); if !IGNORE_CHILD_PROCS.contains(&pname.as_ref()) { ps.push((pid, pname)); } } ps } // Launches a thread that continuously calls `ps`, updates the shared // `child_pids` vector, and inserts the child processes' pids and names // into the measurements database. fn launch_ps_thread( cargo_test_pid: Arc<Mutex<Option<Pid>>>, child_pids: Arc<Mutex<Vec<Pid>>>, db: Arc<Mutex<Database>>, done: Arc<AtomicBool>, ) -> JoinHandle<()> { thread::spawn(move || { let cargo_test_pid = loop { if let Some(pid) = *cargo_test_pid.lock().unwrap() { break pid; } }; while !done.load(Ordering::Relaxed) { let ps = run_ps(cargo_test_pid); *child_pids.lock().unwrap() = ps.iter().map(|(pid, _pname)| *pid).collect(); let mut db = db.lock().unwrap(); for (pid, pname) in ps { if !db.contains(&pid) { db.new_child_process(pid, pname); } } thread::sleep(Duration::from_millis(100)); } }) } // Launches a thread that continuously reads each child processes' // "status" file, parses each file to get the ammount memory locked by that // child process, then updates the database with the locked memory // information. fn launch_measurements_thread( cargo_test_pid: Arc<Mutex<Option<Pid>>>, child_pids: Arc<Mutex<Vec<Pid>>>, db: Arc<Mutex<Database>>, done: Arc<AtomicBool>, ) -> JoinHandle<()> { thread::spawn(move || { while cargo_test_pid.lock().unwrap().is_none() { thread::sleep(Duration::from_millis(1)); } while !done.load(Ordering::Relaxed) { for child_pid in child_pids.lock().unwrap().iter() { if let Some(kbs_locked) = parse_status_file(*child_pid) { db.lock().unwrap().update(*child_pid, kbs_locked); } } thread::sleep(Duration::from_millis(1)); } }) } // Reads a processes' "status" file; parsing it for the ammount of memory // currently locked by the process. fn parse_status_file(pid: Pid) -> Option<u64> { let path = format!("/proc/{}/status", pid); let file = fs::read_to_string(path).ok()?; for line in file.lines() { if line.starts_with("VmLck") { match line.trim().split_whitespace().nth(1) { Some(s) => return s.parse().ok(), _ => return None, }; } } None } fn main() { println!("CURRENT CWD => {:?}", env::current_dir()); println!("CURRENT EXE => {:?}", env::current_exe()); // Initialize the values that will be shared between threads. let cargo_test_pid: Arc<Mutex<Option<Pid>>> = Arc::new(Mutex::new(None)); let child_pids: Arc<Mutex<Vec<Pid>>> = Arc::new(Mutex::new(vec![])); let db = Arc::new(Mutex::new(Database::new())); let done = Arc::new(AtomicBool::new(false)); // Start the worker threads. let ps_thread = launch_ps_thread( cargo_test_pid.clone(), child_pids.clone(), db.clone(), done.clone() ); let file_reader_thread = launch_measurements_thread( cargo_test_pid.clone(), child_pids.clone(), db.clone(), done.clone() ); // Get the system's locked memory limit. let mlock_limit = run_prlimit(); println!("\nMlock Monitor for `cargo test`"); println!("==============================="); println!("Locked memory limit (soft, kb): {}", mlock_limit.soft); println!("Lock memory limit (hard, kb): {}", mlock_limit.hard); print!("\nRunning `cargo test` ... "); // Run `cargo test`. let cwd = env::current_dir().unwrap(); let mut cargo_test_args = vec![ "test".to_string(), format!("--manifest-path={}/Cargo.toml", cwd.to_str().unwrap()), ]; cargo_test_args.extend(env::args().skip(1)); /* let mut cargo_test_cmd = Command::new("cargo") .args(&cargo_test_args) .envs(env::vars()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); println!("Running `cargo test`: {:?}", cargo_test_cmd); let cargo_test_output = cargo_test_cmd .spawn() .and_then(|child| { *cargo_test_pid.lock().unwrap() = Some(child.id()); child.wait_with_output() }) .unwrap(); */ let cargo_test_output = Command::new("cargo") .args(&cargo_test_args) .envs(env::vars()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() .and_then(|child| { *cargo_test_pid.lock().unwrap() = Some(child.id()); child.wait_with_output() }) .unwrap(); // Once `cargo test` has finished, stop the worker the threads and // print the measurement results. println!("done!"); done.store(true, Ordering::Relaxed); let _ = ps_thread.join(); let _ = file_reader_thread.join(); println!("{}", db.lock().unwrap().table()); println!("\nOutput `cargo test`"); println!("===================="); println!("{}", String::from_utf8_lossy(&cargo_test_output.stdout)); } #[cfg(test)] mod tests { use std::mem::size_of_val; use std::thread; use std::time::Duration; use memsec::mlock; #[test] fn test_mlock() { println!("TEST TEST TEST"); let buf: [u64; 600] = [555; 600]; let ptr = (&buf).as_ptr() as *mut u8; unsafe { mlock(ptr, size_of_val(&buf)); } thread::sleep(Duration::from_secs(2)); assert!(true); } }
{ if let Some(pinfo) = self.0.get_mut(&pid) { if kbs_locked > pinfo.max_locked { pinfo.max_locked = kbs_locked; } } }
identifier_body
main.rs
#[cfg(test)] extern crate memsec; use std::collections::BTreeMap; use std::env; use std::fmt::{self, Display, Formatter}; use std::fs; use std::iter; use std::process::{Command, Stdio}; use std::str::{self, FromStr}; use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use std::thread::{self, JoinHandle}; // The number of space characters (" ") between table columns. const COLUMN_BUFFER: usize = 8; // Ignore child processes with the following names. const IGNORE_CHILD_PROCS: [&str; 3] = ["rustc", "[rustc]", "rustdoc"]; type Pid = u32; type Pname = String; #[derive(Clone, Debug)] struct Pinfo { pname: Pname, max_locked: u64, } #[derive(Debug)] struct Database(BTreeMap<Pid, Pinfo>); impl Database { fn new() -> Self { Database(BTreeMap::new()) } fn contains(&self, pid: &Pid) -> bool { self.0.contains_key(pid) } fn new_child_process(&mut self, pid: Pid, pname: Pname) { self.0.insert(pid, Pinfo { pname, max_locked: 0 }); } fn update(&mut self, pid: Pid, kbs_locked: u64) { if let Some(pinfo) = self.0.get_mut(&pid) { if kbs_locked > pinfo.max_locked { pinfo.max_locked = kbs_locked; } } } fn table(&self) -> String { let col1_heading = "Process Name"; let col2_heading = "Max Locked Memory (kb)"; let col1_heading_len = col1_heading.chars().count(); let col2_heading_len = col2_heading.chars().count(); let min_col2_start = col1_heading_len + COLUMN_BUFFER; let col2_start = self.0 .values() .fold(min_col2_start, |longest, pinfo| { match pinfo.pname.chars().count() + COLUMN_BUFFER { n_chars if n_chars > longest => n_chars, _ => longest, } }); let heading_whitespace: String = (0..col2_start - col1_heading_len) .map(|_| ' ') .collect(); let heading = format!( "{}{}{}", col1_heading, heading_whitespace, col2_heading, ); let top_border = format!( "{}{}{}", (0..col1_heading_len).map(|_| '=').collect::<String>(), heading_whitespace, (0..col2_heading_len).map(|_| '=').collect::<String>(), ); let mut stdout = format!("\n{}\n{}\n", heading, top_border); for Pinfo { pname, max_locked } in self.0.values() { let pname_len = pname.chars().count(); let whitespace: String = (0..col2_start - pname_len) .map(|_| ' ') .collect(); let line = format!("{}{}{}\n", pname, whitespace, max_locked); stdout.push_str(&line); } let table_width = col2_start + col2_heading_len; let bottom_border: String = (0..table_width).map(|_| '=').collect(); stdout.push_str(&bottom_border); stdout } } #[derive(Debug)] enum Limit { Kb(u64), Unlimited, } impl Display for Limit { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match self { Limit::Kb(kbs) => write!(f, "{}", kbs), _ => write!(f, "unlimited"), } } } impl FromStr for Limit { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { if s == "unlimited" { Ok(Limit::Unlimited) } else { let n_bytes: u64 = s.parse::<u64>().map_err(|_| ())?; Ok(Limit::Kb(n_bytes / 1024)) } } } #[derive(Debug)] struct
{ soft: Limit, hard: Limit, } fn run_prlimit() -> MlockLimit { let output = Command::new("prlimit") .args(&["--memlock", "--output=SOFT,HARD", "--noheadings"]) .output() .map(|output| String::from_utf8(output.stdout).unwrap()) .unwrap_or_else(|e| panic!("Subprocess failed: `ulimit`: {:?}", e)); let split: Vec<&str> = output.split_whitespace().collect(); let soft = Limit::from_str(split[0]).unwrap(); let hard = Limit::from_str(split[1]).unwrap(); MlockLimit { soft, hard } } fn run_ps(cargo_test_pid: Pid) -> Vec<(Pid, Pname)> { let mut ps = vec![]; let ppid = cargo_test_pid.to_string(); let output = Command::new("ps") .args(&["-f", "--ppid", &ppid]) .output() .map(|output| String::from_utf8(output.stdout).unwrap()) .expect("Subprocess failed: `ps`"); for line in output.trim().lines().skip(1) { let split: Vec<&str> = line.split_whitespace().collect(); let pid: Pid = split[1].parse().unwrap(); let pname: Pname = split[7] .split_whitespace() .nth(0) .unwrap() .split('/') .last() .unwrap() .to_string(); if !IGNORE_CHILD_PROCS.contains(&pname.as_ref()) { ps.push((pid, pname)); } } ps } // Launches a thread that continuously calls `ps`, updates the shared // `child_pids` vector, and inserts the child processes' pids and names // into the measurements database. fn launch_ps_thread( cargo_test_pid: Arc<Mutex<Option<Pid>>>, child_pids: Arc<Mutex<Vec<Pid>>>, db: Arc<Mutex<Database>>, done: Arc<AtomicBool>, ) -> JoinHandle<()> { thread::spawn(move || { let cargo_test_pid = loop { if let Some(pid) = *cargo_test_pid.lock().unwrap() { break pid; } }; while !done.load(Ordering::Relaxed) { let ps = run_ps(cargo_test_pid); *child_pids.lock().unwrap() = ps.iter().map(|(pid, _pname)| *pid).collect(); let mut db = db.lock().unwrap(); for (pid, pname) in ps { if !db.contains(&pid) { db.new_child_process(pid, pname); } } thread::sleep(Duration::from_millis(100)); } }) } // Launches a thread that continuously reads each child processes' // "status" file, parses each file to get the ammount memory locked by that // child process, then updates the database with the locked memory // information. fn launch_measurements_thread( cargo_test_pid: Arc<Mutex<Option<Pid>>>, child_pids: Arc<Mutex<Vec<Pid>>>, db: Arc<Mutex<Database>>, done: Arc<AtomicBool>, ) -> JoinHandle<()> { thread::spawn(move || { while cargo_test_pid.lock().unwrap().is_none() { thread::sleep(Duration::from_millis(1)); } while !done.load(Ordering::Relaxed) { for child_pid in child_pids.lock().unwrap().iter() { if let Some(kbs_locked) = parse_status_file(*child_pid) { db.lock().unwrap().update(*child_pid, kbs_locked); } } thread::sleep(Duration::from_millis(1)); } }) } // Reads a processes' "status" file; parsing it for the ammount of memory // currently locked by the process. fn parse_status_file(pid: Pid) -> Option<u64> { let path = format!("/proc/{}/status", pid); let file = fs::read_to_string(path).ok()?; for line in file.lines() { if line.starts_with("VmLck") { match line.trim().split_whitespace().nth(1) { Some(s) => return s.parse().ok(), _ => return None, }; } } None } fn main() { println!("CURRENT CWD => {:?}", env::current_dir()); println!("CURRENT EXE => {:?}", env::current_exe()); // Initialize the values that will be shared between threads. let cargo_test_pid: Arc<Mutex<Option<Pid>>> = Arc::new(Mutex::new(None)); let child_pids: Arc<Mutex<Vec<Pid>>> = Arc::new(Mutex::new(vec![])); let db = Arc::new(Mutex::new(Database::new())); let done = Arc::new(AtomicBool::new(false)); // Start the worker threads. let ps_thread = launch_ps_thread( cargo_test_pid.clone(), child_pids.clone(), db.clone(), done.clone() ); let file_reader_thread = launch_measurements_thread( cargo_test_pid.clone(), child_pids.clone(), db.clone(), done.clone() ); // Get the system's locked memory limit. let mlock_limit = run_prlimit(); println!("\nMlock Monitor for `cargo test`"); println!("==============================="); println!("Locked memory limit (soft, kb): {}", mlock_limit.soft); println!("Lock memory limit (hard, kb): {}", mlock_limit.hard); print!("\nRunning `cargo test` ... "); // Run `cargo test`. let cwd = env::current_dir().unwrap(); let mut cargo_test_args = vec![ "test".to_string(), format!("--manifest-path={}/Cargo.toml", cwd.to_str().unwrap()), ]; cargo_test_args.extend(env::args().skip(1)); /* let mut cargo_test_cmd = Command::new("cargo") .args(&cargo_test_args) .envs(env::vars()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); println!("Running `cargo test`: {:?}", cargo_test_cmd); let cargo_test_output = cargo_test_cmd .spawn() .and_then(|child| { *cargo_test_pid.lock().unwrap() = Some(child.id()); child.wait_with_output() }) .unwrap(); */ let cargo_test_output = Command::new("cargo") .args(&cargo_test_args) .envs(env::vars()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() .and_then(|child| { *cargo_test_pid.lock().unwrap() = Some(child.id()); child.wait_with_output() }) .unwrap(); // Once `cargo test` has finished, stop the worker the threads and // print the measurement results. println!("done!"); done.store(true, Ordering::Relaxed); let _ = ps_thread.join(); let _ = file_reader_thread.join(); println!("{}", db.lock().unwrap().table()); println!("\nOutput `cargo test`"); println!("===================="); println!("{}", String::from_utf8_lossy(&cargo_test_output.stdout)); } #[cfg(test)] mod tests { use std::mem::size_of_val; use std::thread; use std::time::Duration; use memsec::mlock; #[test] fn test_mlock() { println!("TEST TEST TEST"); let buf: [u64; 600] = [555; 600]; let ptr = (&buf).as_ptr() as *mut u8; unsafe { mlock(ptr, size_of_val(&buf)); } thread::sleep(Duration::from_secs(2)); assert!(true); } }
MlockLimit
identifier_name
main.rs
#[cfg(test)] extern crate memsec; use std::collections::BTreeMap; use std::env; use std::fmt::{self, Display, Formatter}; use std::fs; use std::iter; use std::process::{Command, Stdio}; use std::str::{self, FromStr}; use std::sync::{Arc, Mutex}; use std::sync::atomic::{AtomicBool, Ordering}; use std::time::Duration; use std::thread::{self, JoinHandle}; // The number of space characters (" ") between table columns. const COLUMN_BUFFER: usize = 8; // Ignore child processes with the following names. const IGNORE_CHILD_PROCS: [&str; 3] = ["rustc", "[rustc]", "rustdoc"]; type Pid = u32; type Pname = String; #[derive(Clone, Debug)] struct Pinfo { pname: Pname, max_locked: u64, } #[derive(Debug)] struct Database(BTreeMap<Pid, Pinfo>); impl Database { fn new() -> Self { Database(BTreeMap::new()) } fn contains(&self, pid: &Pid) -> bool { self.0.contains_key(pid) } fn new_child_process(&mut self, pid: Pid, pname: Pname) { self.0.insert(pid, Pinfo { pname, max_locked: 0 }); } fn update(&mut self, pid: Pid, kbs_locked: u64) { if let Some(pinfo) = self.0.get_mut(&pid) { if kbs_locked > pinfo.max_locked { pinfo.max_locked = kbs_locked; } } } fn table(&self) -> String { let col1_heading = "Process Name"; let col2_heading = "Max Locked Memory (kb)"; let col1_heading_len = col1_heading.chars().count(); let col2_heading_len = col2_heading.chars().count(); let min_col2_start = col1_heading_len + COLUMN_BUFFER; let col2_start = self.0 .values() .fold(min_col2_start, |longest, pinfo| { match pinfo.pname.chars().count() + COLUMN_BUFFER { n_chars if n_chars > longest => n_chars, _ => longest, } }); let heading_whitespace: String = (0..col2_start - col1_heading_len) .map(|_| ' ') .collect(); let heading = format!( "{}{}{}", col1_heading, heading_whitespace, col2_heading, ); let top_border = format!( "{}{}{}", (0..col1_heading_len).map(|_| '=').collect::<String>(), heading_whitespace, (0..col2_heading_len).map(|_| '=').collect::<String>(), ); let mut stdout = format!("\n{}\n{}\n", heading, top_border); for Pinfo { pname, max_locked } in self.0.values() { let pname_len = pname.chars().count(); let whitespace: String = (0..col2_start - pname_len) .map(|_| ' ') .collect(); let line = format!("{}{}{}\n", pname, whitespace, max_locked); stdout.push_str(&line); } let table_width = col2_start + col2_heading_len; let bottom_border: String = (0..table_width).map(|_| '=').collect(); stdout.push_str(&bottom_border); stdout } } #[derive(Debug)] enum Limit { Kb(u64), Unlimited, } impl Display for Limit { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match self { Limit::Kb(kbs) => write!(f, "{}", kbs), _ => write!(f, "unlimited"), } } } impl FromStr for Limit { type Err = (); fn from_str(s: &str) -> Result<Self, Self::Err> { if s == "unlimited" { Ok(Limit::Unlimited) } else { let n_bytes: u64 = s.parse::<u64>().map_err(|_| ())?; Ok(Limit::Kb(n_bytes / 1024)) } } } #[derive(Debug)] struct MlockLimit { soft: Limit, hard: Limit, } fn run_prlimit() -> MlockLimit { let output = Command::new("prlimit") .args(&["--memlock", "--output=SOFT,HARD", "--noheadings"]) .output() .map(|output| String::from_utf8(output.stdout).unwrap()) .unwrap_or_else(|e| panic!("Subprocess failed: `ulimit`: {:?}", e)); let split: Vec<&str> = output.split_whitespace().collect(); let soft = Limit::from_str(split[0]).unwrap(); let hard = Limit::from_str(split[1]).unwrap(); MlockLimit { soft, hard } } fn run_ps(cargo_test_pid: Pid) -> Vec<(Pid, Pname)> { let mut ps = vec![]; let ppid = cargo_test_pid.to_string(); let output = Command::new("ps") .args(&["-f", "--ppid", &ppid]) .output() .map(|output| String::from_utf8(output.stdout).unwrap()) .expect("Subprocess failed: `ps`"); for line in output.trim().lines().skip(1) { let split: Vec<&str> = line.split_whitespace().collect(); let pid: Pid = split[1].parse().unwrap(); let pname: Pname = split[7] .split_whitespace() .nth(0) .unwrap() .split('/') .last() .unwrap() .to_string(); if !IGNORE_CHILD_PROCS.contains(&pname.as_ref()) { ps.push((pid, pname)); } } ps } // Launches a thread that continuously calls `ps`, updates the shared // `child_pids` vector, and inserts the child processes' pids and names // into the measurements database. fn launch_ps_thread( cargo_test_pid: Arc<Mutex<Option<Pid>>>, child_pids: Arc<Mutex<Vec<Pid>>>, db: Arc<Mutex<Database>>, done: Arc<AtomicBool>, ) -> JoinHandle<()> { thread::spawn(move || { let cargo_test_pid = loop { if let Some(pid) = *cargo_test_pid.lock().unwrap() { break pid; } }; while !done.load(Ordering::Relaxed) { let ps = run_ps(cargo_test_pid); *child_pids.lock().unwrap() = ps.iter().map(|(pid, _pname)| *pid).collect(); let mut db = db.lock().unwrap(); for (pid, pname) in ps { if !db.contains(&pid) { db.new_child_process(pid, pname); } } thread::sleep(Duration::from_millis(100)); } }) } // Launches a thread that continuously reads each child processes' // "status" file, parses each file to get the ammount memory locked by that // child process, then updates the database with the locked memory // information. fn launch_measurements_thread(
cargo_test_pid: Arc<Mutex<Option<Pid>>>, child_pids: Arc<Mutex<Vec<Pid>>>, db: Arc<Mutex<Database>>, done: Arc<AtomicBool>, ) -> JoinHandle<()> { thread::spawn(move || { while cargo_test_pid.lock().unwrap().is_none() { thread::sleep(Duration::from_millis(1)); } while !done.load(Ordering::Relaxed) { for child_pid in child_pids.lock().unwrap().iter() { if let Some(kbs_locked) = parse_status_file(*child_pid) { db.lock().unwrap().update(*child_pid, kbs_locked); } } thread::sleep(Duration::from_millis(1)); } }) } // Reads a processes' "status" file; parsing it for the ammount of memory // currently locked by the process. fn parse_status_file(pid: Pid) -> Option<u64> { let path = format!("/proc/{}/status", pid); let file = fs::read_to_string(path).ok()?; for line in file.lines() { if line.starts_with("VmLck") { match line.trim().split_whitespace().nth(1) { Some(s) => return s.parse().ok(), _ => return None, }; } } None } fn main() { println!("CURRENT CWD => {:?}", env::current_dir()); println!("CURRENT EXE => {:?}", env::current_exe()); // Initialize the values that will be shared between threads. let cargo_test_pid: Arc<Mutex<Option<Pid>>> = Arc::new(Mutex::new(None)); let child_pids: Arc<Mutex<Vec<Pid>>> = Arc::new(Mutex::new(vec![])); let db = Arc::new(Mutex::new(Database::new())); let done = Arc::new(AtomicBool::new(false)); // Start the worker threads. let ps_thread = launch_ps_thread( cargo_test_pid.clone(), child_pids.clone(), db.clone(), done.clone() ); let file_reader_thread = launch_measurements_thread( cargo_test_pid.clone(), child_pids.clone(), db.clone(), done.clone() ); // Get the system's locked memory limit. let mlock_limit = run_prlimit(); println!("\nMlock Monitor for `cargo test`"); println!("==============================="); println!("Locked memory limit (soft, kb): {}", mlock_limit.soft); println!("Lock memory limit (hard, kb): {}", mlock_limit.hard); print!("\nRunning `cargo test` ... "); // Run `cargo test`. let cwd = env::current_dir().unwrap(); let mut cargo_test_args = vec![ "test".to_string(), format!("--manifest-path={}/Cargo.toml", cwd.to_str().unwrap()), ]; cargo_test_args.extend(env::args().skip(1)); /* let mut cargo_test_cmd = Command::new("cargo") .args(&cargo_test_args) .envs(env::vars()) .stdout(Stdio::piped()) .stderr(Stdio::piped()); println!("Running `cargo test`: {:?}", cargo_test_cmd); let cargo_test_output = cargo_test_cmd .spawn() .and_then(|child| { *cargo_test_pid.lock().unwrap() = Some(child.id()); child.wait_with_output() }) .unwrap(); */ let cargo_test_output = Command::new("cargo") .args(&cargo_test_args) .envs(env::vars()) .stdout(Stdio::piped()) .stderr(Stdio::piped()) .spawn() .and_then(|child| { *cargo_test_pid.lock().unwrap() = Some(child.id()); child.wait_with_output() }) .unwrap(); // Once `cargo test` has finished, stop the worker the threads and // print the measurement results. println!("done!"); done.store(true, Ordering::Relaxed); let _ = ps_thread.join(); let _ = file_reader_thread.join(); println!("{}", db.lock().unwrap().table()); println!("\nOutput `cargo test`"); println!("===================="); println!("{}", String::from_utf8_lossy(&cargo_test_output.stdout)); } #[cfg(test)] mod tests { use std::mem::size_of_val; use std::thread; use std::time::Duration; use memsec::mlock; #[test] fn test_mlock() { println!("TEST TEST TEST"); let buf: [u64; 600] = [555; 600]; let ptr = (&buf).as_ptr() as *mut u8; unsafe { mlock(ptr, size_of_val(&buf)); } thread::sleep(Duration::from_secs(2)); assert!(true); } }
random_line_split
dep_cache.rs
//! There are 2 sources of facts for the resolver: //! //! - The `Registry` tells us for a `Dependency` what versions are available to fulfil it. //! - The `Summary` tells us for a version (and features) what dependencies need to be fulfilled for it to be activated. //! //! These constitute immutable facts, the soled ground truth that all other inference depends on. //! Theoretically this could all be enumerated ahead of time, but we want to be lazy and only //! look up things we need to. The compromise is to cache the results as they are computed. //! //! This module impl that cache in all the gory details use crate::core::resolver::context::Context; use crate::core::resolver::errors::describe_path_in_context; use crate::core::resolver::types::{ConflictReason, DepInfo, FeaturesSet}; use crate::core::resolver::{ ActivateError, ActivateResult, CliFeatures, RequestedFeatures, ResolveOpts, VersionOrdering, VersionPreferences, }; use crate::core::{ Dependency, FeatureValue, PackageId, PackageIdSpec, QueryKind, Registry, Summary, }; use crate::util::errors::CargoResult; use crate::util::interning::InternedString; use crate::util::PartialVersion; use anyhow::Context as _; use std::collections::{BTreeSet, HashMap, HashSet}; use std::rc::Rc; use std::task::Poll; use tracing::debug; pub struct RegistryQueryer<'a> { pub registry: &'a mut (dyn Registry + 'a), replacements: &'a [(PackageIdSpec, Dependency)], version_prefs: &'a VersionPreferences, /// If set the list of dependency candidates will be sorted by minimal /// versions first. That allows `cargo update -Z minimal-versions` which will /// specify minimum dependency versions to be used. minimal_versions: bool, max_rust_version: Option<PartialVersion>, /// a cache of `Candidate`s that fulfil a `Dependency` (and whether `first_minimal_version`) registry_cache: HashMap<(Dependency, bool), Poll<Rc<Vec<Summary>>>>, /// a cache of `Dependency`s that are required for a `Summary` /// /// HACK: `first_minimal_version` is not kept in the cache key is it is 1:1 with /// `parent.is_none()` (the first element of the cache key) as it doesn't change through /// execution. summary_cache: HashMap< (Option<PackageId>, Summary, ResolveOpts), (Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>, bool), >, /// all the cases we ended up using a supplied replacement used_replacements: HashMap<PackageId, Summary>, } impl<'a> RegistryQueryer<'a> { pub fn new( registry: &'a mut dyn Registry, replacements: &'a [(PackageIdSpec, Dependency)], version_prefs: &'a VersionPreferences, minimal_versions: bool, max_rust_version: Option<PartialVersion>, ) -> Self { RegistryQueryer { registry, replacements, version_prefs, minimal_versions, max_rust_version, registry_cache: HashMap::new(), summary_cache: HashMap::new(), used_replacements: HashMap::new(), } } pub fn reset_pending(&mut self) -> bool { let mut all_ready = true; self.registry_cache.retain(|_, r| { if !r.is_ready() { all_ready = false; } r.is_ready() }); self.summary_cache.retain(|_, (_, r)| { if !*r { all_ready = false; } *r }); all_ready } pub fn used_replacement_for(&self, p: PackageId) -> Option<(PackageId, PackageId)> { self.used_replacements.get(&p).map(|r| (p, r.package_id())) } pub fn replacement_summary(&self, p: PackageId) -> Option<&Summary> { self.used_replacements.get(&p) } /// Queries the `registry` to return a list of candidates for `dep`. /// /// This method is the location where overrides are taken into account. If /// any candidates are returned which match an override then the override is /// applied by performing a second query for what the override should /// return. pub fn query( &mut self, dep: &Dependency, first_minimal_version: bool, ) -> Poll<CargoResult<Rc<Vec<Summary>>>> { let registry_cache_key = (dep.clone(), first_minimal_version); if let Some(out) = self.registry_cache.get(&registry_cache_key).cloned() { return out.map(Result::Ok); } let mut ret = Vec::new(); let ready = self.registry.query(dep, QueryKind::Exact, &mut |s| { if self.max_rust_version.is_none() || s.rust_version() <= self.max_rust_version { ret.push(s); } })?; if ready.is_pending() { self.registry_cache .insert((dep.clone(), first_minimal_version), Poll::Pending); return Poll::Pending; } for summary in ret.iter() { let mut potential_matches = self .replacements .iter() .filter(|&&(ref spec, _)| spec.matches(summary.package_id())); let &(ref spec, ref dep) = match potential_matches.next() { None => continue, Some(replacement) => replacement, }; debug!( "found an override for {} {}", dep.package_name(), dep.version_req() ); let mut summaries = match self.registry.query_vec(dep, QueryKind::Exact)? { Poll::Ready(s) => s.into_iter(), Poll::Pending => { self.registry_cache .insert((dep.clone(), first_minimal_version), Poll::Pending); return Poll::Pending; } }; let s = summaries.next().ok_or_else(|| { anyhow::format_err!( "no matching package for override `{}` found\n\ location searched: {}\n\ version required: {}", spec, dep.source_id(), dep.version_req() ) })?; let summaries = summaries.collect::<Vec<_>>(); if !summaries.is_empty() { let bullets = summaries .iter() .map(|s| format!(" * {}", s.package_id())) .collect::<Vec<_>>(); return Poll::Ready(Err(anyhow::anyhow!( "the replacement specification `{}` matched \ multiple packages:\n * {}\n{}", spec, s.package_id(), bullets.join("\n") ))); } // The dependency should be hard-coded to have the same name and an // exact version requirement, so both of these assertions should // never fail. assert_eq!(s.version(), summary.version()); assert_eq!(s.name(), summary.name()); let replace = if s.source_id() == summary.source_id() { debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s); None } else { Some(s) }; let matched_spec = spec.clone(); // Make sure no duplicates if let Some(&(ref spec, _)) = potential_matches.next() { return Poll::Ready(Err(anyhow::anyhow!( "overlapping replacement specifications found:\n\n \ * {}\n * {}\n\nboth specifications match: {}", matched_spec, spec, summary.package_id() ))); } for dep in summary.dependencies() { debug!("\t{} => {}", dep.package_name(), dep.version_req()); } if let Some(r) = replace { self.used_replacements.insert(summary.package_id(), r); } } // When we attempt versions for a package we'll want to do so in a sorted fashion to pick // the "best candidates" first. VersionPreferences implements this notion. let ordering = if first_minimal_version || self.minimal_versions { VersionOrdering::MinimumVersionsFirst } else { VersionOrdering::MaximumVersionsFirst }; let first_version = first_minimal_version; self.version_prefs .sort_summaries(&mut ret, ordering, first_version); let out = Poll::Ready(Rc::new(ret)); self.registry_cache.insert(registry_cache_key, out.clone()); out.map(Result::Ok) } /// Find out what dependencies will be added by activating `candidate`, /// with features described in `opts`. Then look up in the `registry` /// the candidates that will fulfil each of these dependencies, as it is the /// next obvious question. pub fn build_deps( &mut self, cx: &Context, parent: Option<PackageId>, candidate: &Summary, opts: &ResolveOpts, first_minimal_version: bool, ) -> ActivateResult<Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>> { // if we have calculated a result before, then we can just return it, // as it is a "pure" query of its arguments. if let Some(out) = self .summary_cache .get(&(parent, candidate.clone(), opts.clone())) { return Ok(out.0.clone()); } // First, figure out our set of dependencies based on the requested set // of features. This also calculates what features we're going to enable // for our own dependencies. let (used_features, deps) = resolve_features(parent, candidate, opts)?; // Next, transform all dependencies into a list of possible candidates // which can satisfy that dependency. let mut all_ready = true; let mut deps = deps .into_iter() .filter_map( |(dep, features)| match self.query(&dep, first_minimal_version) { Poll::Ready(Ok(candidates)) => Some(Ok((dep, candidates, features))), Poll::Pending => { all_ready = false; // we can ignore Pending deps, resolve will be repeatedly called // until there are none to ignore None } Poll::Ready(Err(e)) => Some(Err(e).with_context(|| { format!( "failed to get `{}` as a dependency of {}", dep.package_name(), describe_path_in_context(cx, &candidate.package_id()), ) })), }, ) .collect::<CargoResult<Vec<DepInfo>>>()?; // Attempt to resolve dependencies with fewer candidates before trying // dependencies with more candidates. This way if the dependency with // only one candidate can't be resolved we don't have to do a bunch of // work before we figure that out. deps.sort_by_key(|&(_, ref a, _)| a.len()); let out = Rc::new((used_features, Rc::new(deps))); // If we succeed we add the result to the cache so we can use it again next time. // We don't cache the failure cases as they don't impl Clone. self.summary_cache.insert( (parent, candidate.clone(), opts.clone()), (out.clone(), all_ready), ); Ok(out) } } /// Returns the features we ended up using and /// all dependencies and the features we want from each of them. pub fn resolve_features<'b>( parent: Option<PackageId>, s: &'b Summary, opts: &'b ResolveOpts, ) -> ActivateResult<(HashSet<InternedString>, Vec<(Dependency, FeaturesSet)>)> { // First, filter by dev-dependencies. let deps = s.dependencies(); let deps = deps.iter().filter(|d| d.is_transitive() || opts.dev_deps); let reqs = build_requirements(parent, s, opts)?; let mut ret = Vec::new(); let default_dep = BTreeSet::new(); let mut valid_dep_names = HashSet::new(); // Next, collect all actually enabled dependencies and their features. for dep in deps { // Skip optional dependencies, but not those enabled through a // feature if dep.is_optional() && !reqs.deps.contains_key(&dep.name_in_toml()) { continue; } valid_dep_names.insert(dep.name_in_toml()); // So we want this dependency. Move the features we want from // `feature_deps` to `ret` and register ourselves as using this // name. let mut base = reqs .deps .get(&dep.name_in_toml()) .unwrap_or(&default_dep) .clone(); base.extend(dep.features().iter()); ret.push((dep.clone(), Rc::new(base))); } // This is a special case for command-line `--features // dep_name/feat_name` where `dep_name` does not exist. All other // validation is done either in `build_requirements` or // `build_feature_map`. if parent.is_none() { for dep_name in reqs.deps.keys() { if !valid_dep_names.contains(dep_name) { let e = RequirementError::MissingDependency(*dep_name); return Err(e.into_activate_error(parent, s)); } } } Ok((reqs.into_features(), ret)) } /// Takes requested features for a single package from the input `ResolveOpts` and /// recurses to find all requested features, dependencies and requested /// dependency features in a `Requirements` object, returning it to the resolver. fn build_requirements<'a, 'b: 'a>( parent: Option<PackageId>, s: &'a Summary, opts: &'b ResolveOpts, ) -> ActivateResult<Requirements<'a>> { let mut reqs = Requirements::new(s); let handle_default = |uses_default_features, reqs: &mut Requirements<'_>| { if uses_default_features && s.features().contains_key("default") { if let Err(e) = reqs.require_feature(InternedString::new("default")) { return Err(e.into_activate_error(parent, s)); } } Ok(()) }; match &opts.features { RequestedFeatures::CliFeatures(CliFeatures { features, all_features, uses_default_features, }) => { if *all_features { for key in s.features().keys() { if let Err(e) = reqs.require_feature(*key) { return Err(e.into_activate_error(parent, s)); } } } for fv in features.iter() { if let Err(e) = reqs.require_value(fv) { return Err(e.into_activate_error(parent, s)); } } handle_default(*uses_default_features, &mut reqs)?; } RequestedFeatures::DepFeatures { features, uses_default_features, } => { for feature in features.iter() { if let Err(e) = reqs.require_feature(*feature) { return Err(e.into_activate_error(parent, s)); } } handle_default(*uses_default_features, &mut reqs)?; } } Ok(reqs) } /// Set of feature and dependency requirements for a package. #[derive(Debug)] struct Requirements<'a> { summary: &'a Summary, /// The deps map is a mapping of dependency name to list of features enabled. /// /// The resolver will activate all of these dependencies, with the given /// features enabled. deps: HashMap<InternedString, BTreeSet<InternedString>>, /// The set of features enabled on this package which is later used when /// compiling to instruct the code what features were enabled. features: HashSet<InternedString>, } /// An error for a requirement. /// /// This will later be converted to an `ActivateError` depending on whether or /// not this is a dependency or a root package. enum RequirementError { /// The package does not have the requested feature. MissingFeature(InternedString), /// The package does not have the requested dependency. MissingDependency(InternedString), /// A feature has a direct cycle to itself. /// /// Note that cycles through multiple features are allowed (but perhaps /// they shouldn't be?). Cycle(InternedString), }
impl Requirements<'_> { fn new(summary: &Summary) -> Requirements<'_> { Requirements { summary, deps: HashMap::new(), features: HashSet::new(), } } fn into_features(self) -> HashSet<InternedString> { self.features } fn require_dep_feature( &mut self, package: InternedString, feat: InternedString, weak: bool, ) -> Result<(), RequirementError> { // If `package` is indeed an optional dependency then we activate the // feature named `package`, but otherwise if `package` is a required // dependency then there's no feature associated with it. if !weak && self .summary .dependencies() .iter() .any(|dep| dep.name_in_toml() == package && dep.is_optional()) { // This optional dependency may not have an implicit feature of // the same name if the `dep:` syntax is used to avoid creating // that implicit feature. if self.summary.features().contains_key(&package) { self.require_feature(package)?; } } self.deps.entry(package).or_default().insert(feat); Ok(()) } fn require_dependency(&mut self, pkg: InternedString) { self.deps.entry(pkg).or_default(); } fn require_feature(&mut self, feat: InternedString) -> Result<(), RequirementError> { if !self.features.insert(feat) { // Already seen this feature. return Ok(()); } let fvs = match self.summary.features().get(&feat) { Some(fvs) => fvs, None => return Err(RequirementError::MissingFeature(feat)), }; for fv in fvs { if let FeatureValue::Feature(dep_feat) = fv { if *dep_feat == feat { return Err(RequirementError::Cycle(feat)); } } self.require_value(fv)?; } Ok(()) } fn require_value(&mut self, fv: &FeatureValue) -> Result<(), RequirementError> { match fv { FeatureValue::Feature(feat) => self.require_feature(*feat)?, FeatureValue::Dep { dep_name } => self.require_dependency(*dep_name), FeatureValue::DepFeature { dep_name, dep_feature, // Weak features are always activated in the dependency // resolver. They will be narrowed inside the new feature // resolver. weak, } => self.require_dep_feature(*dep_name, *dep_feature, *weak)?, }; Ok(()) } } impl RequirementError { fn into_activate_error(self, parent: Option<PackageId>, summary: &Summary) -> ActivateError { match self { RequirementError::MissingFeature(feat) => { let deps: Vec<_> = summary .dependencies() .iter() .filter(|dep| dep.name_in_toml() == feat) .collect(); if deps.is_empty() { return match parent { None => ActivateError::Fatal(anyhow::format_err!( "Package `{}` does not have the feature `{}`", summary.package_id(), feat )), Some(p) => ActivateError::Conflict( p, ConflictReason::MissingFeatures(feat.to_string()), ), }; } if deps.iter().any(|dep| dep.is_optional()) { match parent { None => ActivateError::Fatal(anyhow::format_err!( "Package `{}` does not have feature `{}`. It has an optional dependency \ with that name, but that dependency uses the \"dep:\" \ syntax in the features table, so it does not have an implicit feature with that name.", summary.package_id(), feat )), Some(p) => ActivateError::Conflict( p, ConflictReason::NonImplicitDependencyAsFeature(feat), ), } } else { match parent { None => ActivateError::Fatal(anyhow::format_err!( "Package `{}` does not have feature `{}`. It has a required dependency \ with that name, but only optional dependencies can be used as features.", summary.package_id(), feat )), Some(p) => ActivateError::Conflict( p, ConflictReason::RequiredDependencyAsFeature(feat), ), } } } RequirementError::MissingDependency(dep_name) => { match parent { None => ActivateError::Fatal(anyhow::format_err!( "package `{}` does not have a dependency named `{}`", summary.package_id(), dep_name )), // This code path currently isn't used, since `foo/bar` // and `dep:` syntax is not allowed in a dependency. Some(p) => ActivateError::Conflict( p, ConflictReason::MissingFeatures(dep_name.to_string()), ), } } RequirementError::Cycle(feat) => ActivateError::Fatal(anyhow::format_err!( "cyclic feature dependency: feature `{}` depends on itself", feat )), } } }
random_line_split
dep_cache.rs
//! There are 2 sources of facts for the resolver: //! //! - The `Registry` tells us for a `Dependency` what versions are available to fulfil it. //! - The `Summary` tells us for a version (and features) what dependencies need to be fulfilled for it to be activated. //! //! These constitute immutable facts, the soled ground truth that all other inference depends on. //! Theoretically this could all be enumerated ahead of time, but we want to be lazy and only //! look up things we need to. The compromise is to cache the results as they are computed. //! //! This module impl that cache in all the gory details use crate::core::resolver::context::Context; use crate::core::resolver::errors::describe_path_in_context; use crate::core::resolver::types::{ConflictReason, DepInfo, FeaturesSet}; use crate::core::resolver::{ ActivateError, ActivateResult, CliFeatures, RequestedFeatures, ResolveOpts, VersionOrdering, VersionPreferences, }; use crate::core::{ Dependency, FeatureValue, PackageId, PackageIdSpec, QueryKind, Registry, Summary, }; use crate::util::errors::CargoResult; use crate::util::interning::InternedString; use crate::util::PartialVersion; use anyhow::Context as _; use std::collections::{BTreeSet, HashMap, HashSet}; use std::rc::Rc; use std::task::Poll; use tracing::debug; pub struct RegistryQueryer<'a> { pub registry: &'a mut (dyn Registry + 'a), replacements: &'a [(PackageIdSpec, Dependency)], version_prefs: &'a VersionPreferences, /// If set the list of dependency candidates will be sorted by minimal /// versions first. That allows `cargo update -Z minimal-versions` which will /// specify minimum dependency versions to be used. minimal_versions: bool, max_rust_version: Option<PartialVersion>, /// a cache of `Candidate`s that fulfil a `Dependency` (and whether `first_minimal_version`) registry_cache: HashMap<(Dependency, bool), Poll<Rc<Vec<Summary>>>>, /// a cache of `Dependency`s that are required for a `Summary` /// /// HACK: `first_minimal_version` is not kept in the cache key is it is 1:1 with /// `parent.is_none()` (the first element of the cache key) as it doesn't change through /// execution. summary_cache: HashMap< (Option<PackageId>, Summary, ResolveOpts), (Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>, bool), >, /// all the cases we ended up using a supplied replacement used_replacements: HashMap<PackageId, Summary>, } impl<'a> RegistryQueryer<'a> { pub fn new( registry: &'a mut dyn Registry, replacements: &'a [(PackageIdSpec, Dependency)], version_prefs: &'a VersionPreferences, minimal_versions: bool, max_rust_version: Option<PartialVersion>, ) -> Self { RegistryQueryer { registry, replacements, version_prefs, minimal_versions, max_rust_version, registry_cache: HashMap::new(), summary_cache: HashMap::new(), used_replacements: HashMap::new(), } } pub fn reset_pending(&mut self) -> bool { let mut all_ready = true; self.registry_cache.retain(|_, r| { if !r.is_ready() { all_ready = false; } r.is_ready() }); self.summary_cache.retain(|_, (_, r)| { if !*r { all_ready = false; } *r }); all_ready } pub fn used_replacement_for(&self, p: PackageId) -> Option<(PackageId, PackageId)> { self.used_replacements.get(&p).map(|r| (p, r.package_id())) } pub fn replacement_summary(&self, p: PackageId) -> Option<&Summary> { self.used_replacements.get(&p) } /// Queries the `registry` to return a list of candidates for `dep`. /// /// This method is the location where overrides are taken into account. If /// any candidates are returned which match an override then the override is /// applied by performing a second query for what the override should /// return. pub fn query( &mut self, dep: &Dependency, first_minimal_version: bool, ) -> Poll<CargoResult<Rc<Vec<Summary>>>> { let registry_cache_key = (dep.clone(), first_minimal_version); if let Some(out) = self.registry_cache.get(&registry_cache_key).cloned() { return out.map(Result::Ok); } let mut ret = Vec::new(); let ready = self.registry.query(dep, QueryKind::Exact, &mut |s| { if self.max_rust_version.is_none() || s.rust_version() <= self.max_rust_version { ret.push(s); } })?; if ready.is_pending() { self.registry_cache .insert((dep.clone(), first_minimal_version), Poll::Pending); return Poll::Pending; } for summary in ret.iter() { let mut potential_matches = self .replacements .iter() .filter(|&&(ref spec, _)| spec.matches(summary.package_id())); let &(ref spec, ref dep) = match potential_matches.next() { None => continue, Some(replacement) => replacement, }; debug!( "found an override for {} {}", dep.package_name(), dep.version_req() ); let mut summaries = match self.registry.query_vec(dep, QueryKind::Exact)? { Poll::Ready(s) => s.into_iter(), Poll::Pending => { self.registry_cache .insert((dep.clone(), first_minimal_version), Poll::Pending); return Poll::Pending; } }; let s = summaries.next().ok_or_else(|| { anyhow::format_err!( "no matching package for override `{}` found\n\ location searched: {}\n\ version required: {}", spec, dep.source_id(), dep.version_req() ) })?; let summaries = summaries.collect::<Vec<_>>(); if !summaries.is_empty() { let bullets = summaries .iter() .map(|s| format!(" * {}", s.package_id())) .collect::<Vec<_>>(); return Poll::Ready(Err(anyhow::anyhow!( "the replacement specification `{}` matched \ multiple packages:\n * {}\n{}", spec, s.package_id(), bullets.join("\n") ))); } // The dependency should be hard-coded to have the same name and an // exact version requirement, so both of these assertions should // never fail. assert_eq!(s.version(), summary.version()); assert_eq!(s.name(), summary.name()); let replace = if s.source_id() == summary.source_id() { debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s); None } else { Some(s) }; let matched_spec = spec.clone(); // Make sure no duplicates if let Some(&(ref spec, _)) = potential_matches.next() { return Poll::Ready(Err(anyhow::anyhow!( "overlapping replacement specifications found:\n\n \ * {}\n * {}\n\nboth specifications match: {}", matched_spec, spec, summary.package_id() ))); } for dep in summary.dependencies() { debug!("\t{} => {}", dep.package_name(), dep.version_req()); } if let Some(r) = replace { self.used_replacements.insert(summary.package_id(), r); } } // When we attempt versions for a package we'll want to do so in a sorted fashion to pick // the "best candidates" first. VersionPreferences implements this notion. let ordering = if first_minimal_version || self.minimal_versions { VersionOrdering::MinimumVersionsFirst } else { VersionOrdering::MaximumVersionsFirst }; let first_version = first_minimal_version; self.version_prefs .sort_summaries(&mut ret, ordering, first_version); let out = Poll::Ready(Rc::new(ret)); self.registry_cache.insert(registry_cache_key, out.clone()); out.map(Result::Ok) } /// Find out what dependencies will be added by activating `candidate`, /// with features described in `opts`. Then look up in the `registry` /// the candidates that will fulfil each of these dependencies, as it is the /// next obvious question. pub fn build_deps( &mut self, cx: &Context, parent: Option<PackageId>, candidate: &Summary, opts: &ResolveOpts, first_minimal_version: bool, ) -> ActivateResult<Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>> { // if we have calculated a result before, then we can just return it, // as it is a "pure" query of its arguments. if let Some(out) = self .summary_cache .get(&(parent, candidate.clone(), opts.clone())) { return Ok(out.0.clone()); } // First, figure out our set of dependencies based on the requested set // of features. This also calculates what features we're going to enable // for our own dependencies. let (used_features, deps) = resolve_features(parent, candidate, opts)?; // Next, transform all dependencies into a list of possible candidates // which can satisfy that dependency. let mut all_ready = true; let mut deps = deps .into_iter() .filter_map( |(dep, features)| match self.query(&dep, first_minimal_version) { Poll::Ready(Ok(candidates)) => Some(Ok((dep, candidates, features))), Poll::Pending => { all_ready = false; // we can ignore Pending deps, resolve will be repeatedly called // until there are none to ignore None } Poll::Ready(Err(e)) => Some(Err(e).with_context(|| { format!( "failed to get `{}` as a dependency of {}", dep.package_name(), describe_path_in_context(cx, &candidate.package_id()), ) })), }, ) .collect::<CargoResult<Vec<DepInfo>>>()?; // Attempt to resolve dependencies with fewer candidates before trying // dependencies with more candidates. This way if the dependency with // only one candidate can't be resolved we don't have to do a bunch of // work before we figure that out. deps.sort_by_key(|&(_, ref a, _)| a.len()); let out = Rc::new((used_features, Rc::new(deps))); // If we succeed we add the result to the cache so we can use it again next time. // We don't cache the failure cases as they don't impl Clone. self.summary_cache.insert( (parent, candidate.clone(), opts.clone()), (out.clone(), all_ready), ); Ok(out) } } /// Returns the features we ended up using and /// all dependencies and the features we want from each of them. pub fn resolve_features<'b>( parent: Option<PackageId>, s: &'b Summary, opts: &'b ResolveOpts, ) -> ActivateResult<(HashSet<InternedString>, Vec<(Dependency, FeaturesSet)>)> { // First, filter by dev-dependencies. let deps = s.dependencies(); let deps = deps.iter().filter(|d| d.is_transitive() || opts.dev_deps); let reqs = build_requirements(parent, s, opts)?; let mut ret = Vec::new(); let default_dep = BTreeSet::new(); let mut valid_dep_names = HashSet::new(); // Next, collect all actually enabled dependencies and their features. for dep in deps { // Skip optional dependencies, but not those enabled through a // feature if dep.is_optional() && !reqs.deps.contains_key(&dep.name_in_toml()) { continue; } valid_dep_names.insert(dep.name_in_toml()); // So we want this dependency. Move the features we want from // `feature_deps` to `ret` and register ourselves as using this // name. let mut base = reqs .deps .get(&dep.name_in_toml()) .unwrap_or(&default_dep) .clone(); base.extend(dep.features().iter()); ret.push((dep.clone(), Rc::new(base))); } // This is a special case for command-line `--features // dep_name/feat_name` where `dep_name` does not exist. All other // validation is done either in `build_requirements` or // `build_feature_map`. if parent.is_none() { for dep_name in reqs.deps.keys() { if !valid_dep_names.contains(dep_name) { let e = RequirementError::MissingDependency(*dep_name); return Err(e.into_activate_error(parent, s)); } } } Ok((reqs.into_features(), ret)) } /// Takes requested features for a single package from the input `ResolveOpts` and /// recurses to find all requested features, dependencies and requested /// dependency features in a `Requirements` object, returning it to the resolver. fn build_requirements<'a, 'b: 'a>( parent: Option<PackageId>, s: &'a Summary, opts: &'b ResolveOpts, ) -> ActivateResult<Requirements<'a>> { let mut reqs = Requirements::new(s); let handle_default = |uses_default_features, reqs: &mut Requirements<'_>| { if uses_default_features && s.features().contains_key("default") { if let Err(e) = reqs.require_feature(InternedString::new("default")) { return Err(e.into_activate_error(parent, s)); } } Ok(()) }; match &opts.features { RequestedFeatures::CliFeatures(CliFeatures { features, all_features, uses_default_features, }) => { if *all_features { for key in s.features().keys() { if let Err(e) = reqs.require_feature(*key) { return Err(e.into_activate_error(parent, s)); } } } for fv in features.iter() { if let Err(e) = reqs.require_value(fv) { return Err(e.into_activate_error(parent, s)); } } handle_default(*uses_default_features, &mut reqs)?; } RequestedFeatures::DepFeatures { features, uses_default_features, } => { for feature in features.iter() { if let Err(e) = reqs.require_feature(*feature) { return Err(e.into_activate_error(parent, s)); } } handle_default(*uses_default_features, &mut reqs)?; } } Ok(reqs) } /// Set of feature and dependency requirements for a package. #[derive(Debug)] struct Requirements<'a> { summary: &'a Summary, /// The deps map is a mapping of dependency name to list of features enabled. /// /// The resolver will activate all of these dependencies, with the given /// features enabled. deps: HashMap<InternedString, BTreeSet<InternedString>>, /// The set of features enabled on this package which is later used when /// compiling to instruct the code what features were enabled. features: HashSet<InternedString>, } /// An error for a requirement. /// /// This will later be converted to an `ActivateError` depending on whether or /// not this is a dependency or a root package. enum RequirementError { /// The package does not have the requested feature. MissingFeature(InternedString), /// The package does not have the requested dependency. MissingDependency(InternedString), /// A feature has a direct cycle to itself. /// /// Note that cycles through multiple features are allowed (but perhaps /// they shouldn't be?). Cycle(InternedString), } impl Requirements<'_> { fn new(summary: &Summary) -> Requirements<'_> { Requirements { summary, deps: HashMap::new(), features: HashSet::new(), } } fn into_features(self) -> HashSet<InternedString> { self.features } fn require_dep_feature( &mut self, package: InternedString, feat: InternedString, weak: bool, ) -> Result<(), RequirementError> { // If `package` is indeed an optional dependency then we activate the // feature named `package`, but otherwise if `package` is a required // dependency then there's no feature associated with it. if !weak && self .summary .dependencies() .iter() .any(|dep| dep.name_in_toml() == package && dep.is_optional()) { // This optional dependency may not have an implicit feature of // the same name if the `dep:` syntax is used to avoid creating // that implicit feature. if self.summary.features().contains_key(&package) { self.require_feature(package)?; } } self.deps.entry(package).or_default().insert(feat); Ok(()) } fn require_dependency(&mut self, pkg: InternedString) { self.deps.entry(pkg).or_default(); } fn require_feature(&mut self, feat: InternedString) -> Result<(), RequirementError> { if !self.features.insert(feat) { // Already seen this feature. return Ok(()); } let fvs = match self.summary.features().get(&feat) { Some(fvs) => fvs, None => return Err(RequirementError::MissingFeature(feat)), }; for fv in fvs { if let FeatureValue::Feature(dep_feat) = fv { if *dep_feat == feat { return Err(RequirementError::Cycle(feat)); } } self.require_value(fv)?; } Ok(()) } fn require_value(&mut self, fv: &FeatureValue) -> Result<(), RequirementError> { match fv { FeatureValue::Feature(feat) => self.require_feature(*feat)?, FeatureValue::Dep { dep_name } => self.require_dependency(*dep_name), FeatureValue::DepFeature { dep_name, dep_feature, // Weak features are always activated in the dependency // resolver. They will be narrowed inside the new feature // resolver. weak, } => self.require_dep_feature(*dep_name, *dep_feature, *weak)?, }; Ok(()) } } impl RequirementError { fn into_activate_error(self, parent: Option<PackageId>, summary: &Summary) -> ActivateError
}
{ match self { RequirementError::MissingFeature(feat) => { let deps: Vec<_> = summary .dependencies() .iter() .filter(|dep| dep.name_in_toml() == feat) .collect(); if deps.is_empty() { return match parent { None => ActivateError::Fatal(anyhow::format_err!( "Package `{}` does not have the feature `{}`", summary.package_id(), feat )), Some(p) => ActivateError::Conflict( p, ConflictReason::MissingFeatures(feat.to_string()), ), }; } if deps.iter().any(|dep| dep.is_optional()) { match parent { None => ActivateError::Fatal(anyhow::format_err!( "Package `{}` does not have feature `{}`. It has an optional dependency \ with that name, but that dependency uses the \"dep:\" \ syntax in the features table, so it does not have an implicit feature with that name.", summary.package_id(), feat )), Some(p) => ActivateError::Conflict( p, ConflictReason::NonImplicitDependencyAsFeature(feat), ), } } else { match parent { None => ActivateError::Fatal(anyhow::format_err!( "Package `{}` does not have feature `{}`. It has a required dependency \ with that name, but only optional dependencies can be used as features.", summary.package_id(), feat )), Some(p) => ActivateError::Conflict( p, ConflictReason::RequiredDependencyAsFeature(feat), ), } } } RequirementError::MissingDependency(dep_name) => { match parent { None => ActivateError::Fatal(anyhow::format_err!( "package `{}` does not have a dependency named `{}`", summary.package_id(), dep_name )), // This code path currently isn't used, since `foo/bar` // and `dep:` syntax is not allowed in a dependency. Some(p) => ActivateError::Conflict( p, ConflictReason::MissingFeatures(dep_name.to_string()), ), } } RequirementError::Cycle(feat) => ActivateError::Fatal(anyhow::format_err!( "cyclic feature dependency: feature `{}` depends on itself", feat )), } }
identifier_body
dep_cache.rs
//! There are 2 sources of facts for the resolver: //! //! - The `Registry` tells us for a `Dependency` what versions are available to fulfil it. //! - The `Summary` tells us for a version (and features) what dependencies need to be fulfilled for it to be activated. //! //! These constitute immutable facts, the soled ground truth that all other inference depends on. //! Theoretically this could all be enumerated ahead of time, but we want to be lazy and only //! look up things we need to. The compromise is to cache the results as they are computed. //! //! This module impl that cache in all the gory details use crate::core::resolver::context::Context; use crate::core::resolver::errors::describe_path_in_context; use crate::core::resolver::types::{ConflictReason, DepInfo, FeaturesSet}; use crate::core::resolver::{ ActivateError, ActivateResult, CliFeatures, RequestedFeatures, ResolveOpts, VersionOrdering, VersionPreferences, }; use crate::core::{ Dependency, FeatureValue, PackageId, PackageIdSpec, QueryKind, Registry, Summary, }; use crate::util::errors::CargoResult; use crate::util::interning::InternedString; use crate::util::PartialVersion; use anyhow::Context as _; use std::collections::{BTreeSet, HashMap, HashSet}; use std::rc::Rc; use std::task::Poll; use tracing::debug; pub struct RegistryQueryer<'a> { pub registry: &'a mut (dyn Registry + 'a), replacements: &'a [(PackageIdSpec, Dependency)], version_prefs: &'a VersionPreferences, /// If set the list of dependency candidates will be sorted by minimal /// versions first. That allows `cargo update -Z minimal-versions` which will /// specify minimum dependency versions to be used. minimal_versions: bool, max_rust_version: Option<PartialVersion>, /// a cache of `Candidate`s that fulfil a `Dependency` (and whether `first_minimal_version`) registry_cache: HashMap<(Dependency, bool), Poll<Rc<Vec<Summary>>>>, /// a cache of `Dependency`s that are required for a `Summary` /// /// HACK: `first_minimal_version` is not kept in the cache key is it is 1:1 with /// `parent.is_none()` (the first element of the cache key) as it doesn't change through /// execution. summary_cache: HashMap< (Option<PackageId>, Summary, ResolveOpts), (Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>, bool), >, /// all the cases we ended up using a supplied replacement used_replacements: HashMap<PackageId, Summary>, } impl<'a> RegistryQueryer<'a> { pub fn new( registry: &'a mut dyn Registry, replacements: &'a [(PackageIdSpec, Dependency)], version_prefs: &'a VersionPreferences, minimal_versions: bool, max_rust_version: Option<PartialVersion>, ) -> Self { RegistryQueryer { registry, replacements, version_prefs, minimal_versions, max_rust_version, registry_cache: HashMap::new(), summary_cache: HashMap::new(), used_replacements: HashMap::new(), } } pub fn reset_pending(&mut self) -> bool { let mut all_ready = true; self.registry_cache.retain(|_, r| { if !r.is_ready() { all_ready = false; } r.is_ready() }); self.summary_cache.retain(|_, (_, r)| { if !*r { all_ready = false; } *r }); all_ready } pub fn used_replacement_for(&self, p: PackageId) -> Option<(PackageId, PackageId)> { self.used_replacements.get(&p).map(|r| (p, r.package_id())) } pub fn replacement_summary(&self, p: PackageId) -> Option<&Summary> { self.used_replacements.get(&p) } /// Queries the `registry` to return a list of candidates for `dep`. /// /// This method is the location where overrides are taken into account. If /// any candidates are returned which match an override then the override is /// applied by performing a second query for what the override should /// return. pub fn query( &mut self, dep: &Dependency, first_minimal_version: bool, ) -> Poll<CargoResult<Rc<Vec<Summary>>>> { let registry_cache_key = (dep.clone(), first_minimal_version); if let Some(out) = self.registry_cache.get(&registry_cache_key).cloned() { return out.map(Result::Ok); } let mut ret = Vec::new(); let ready = self.registry.query(dep, QueryKind::Exact, &mut |s| { if self.max_rust_version.is_none() || s.rust_version() <= self.max_rust_version { ret.push(s); } })?; if ready.is_pending() { self.registry_cache .insert((dep.clone(), first_minimal_version), Poll::Pending); return Poll::Pending; } for summary in ret.iter() { let mut potential_matches = self .replacements .iter() .filter(|&&(ref spec, _)| spec.matches(summary.package_id())); let &(ref spec, ref dep) = match potential_matches.next() { None => continue, Some(replacement) => replacement, }; debug!( "found an override for {} {}", dep.package_name(), dep.version_req() ); let mut summaries = match self.registry.query_vec(dep, QueryKind::Exact)? { Poll::Ready(s) => s.into_iter(), Poll::Pending => { self.registry_cache .insert((dep.clone(), first_minimal_version), Poll::Pending); return Poll::Pending; } }; let s = summaries.next().ok_or_else(|| { anyhow::format_err!( "no matching package for override `{}` found\n\ location searched: {}\n\ version required: {}", spec, dep.source_id(), dep.version_req() ) })?; let summaries = summaries.collect::<Vec<_>>(); if !summaries.is_empty() { let bullets = summaries .iter() .map(|s| format!(" * {}", s.package_id())) .collect::<Vec<_>>(); return Poll::Ready(Err(anyhow::anyhow!( "the replacement specification `{}` matched \ multiple packages:\n * {}\n{}", spec, s.package_id(), bullets.join("\n") ))); } // The dependency should be hard-coded to have the same name and an // exact version requirement, so both of these assertions should // never fail. assert_eq!(s.version(), summary.version()); assert_eq!(s.name(), summary.name()); let replace = if s.source_id() == summary.source_id() { debug!("Preventing\n{:?}\nfrom replacing\n{:?}", summary, s); None } else { Some(s) }; let matched_spec = spec.clone(); // Make sure no duplicates if let Some(&(ref spec, _)) = potential_matches.next() { return Poll::Ready(Err(anyhow::anyhow!( "overlapping replacement specifications found:\n\n \ * {}\n * {}\n\nboth specifications match: {}", matched_spec, spec, summary.package_id() ))); } for dep in summary.dependencies() { debug!("\t{} => {}", dep.package_name(), dep.version_req()); } if let Some(r) = replace { self.used_replacements.insert(summary.package_id(), r); } } // When we attempt versions for a package we'll want to do so in a sorted fashion to pick // the "best candidates" first. VersionPreferences implements this notion. let ordering = if first_minimal_version || self.minimal_versions { VersionOrdering::MinimumVersionsFirst } else { VersionOrdering::MaximumVersionsFirst }; let first_version = first_minimal_version; self.version_prefs .sort_summaries(&mut ret, ordering, first_version); let out = Poll::Ready(Rc::new(ret)); self.registry_cache.insert(registry_cache_key, out.clone()); out.map(Result::Ok) } /// Find out what dependencies will be added by activating `candidate`, /// with features described in `opts`. Then look up in the `registry` /// the candidates that will fulfil each of these dependencies, as it is the /// next obvious question. pub fn build_deps( &mut self, cx: &Context, parent: Option<PackageId>, candidate: &Summary, opts: &ResolveOpts, first_minimal_version: bool, ) -> ActivateResult<Rc<(HashSet<InternedString>, Rc<Vec<DepInfo>>)>> { // if we have calculated a result before, then we can just return it, // as it is a "pure" query of its arguments. if let Some(out) = self .summary_cache .get(&(parent, candidate.clone(), opts.clone())) { return Ok(out.0.clone()); } // First, figure out our set of dependencies based on the requested set // of features. This also calculates what features we're going to enable // for our own dependencies. let (used_features, deps) = resolve_features(parent, candidate, opts)?; // Next, transform all dependencies into a list of possible candidates // which can satisfy that dependency. let mut all_ready = true; let mut deps = deps .into_iter() .filter_map( |(dep, features)| match self.query(&dep, first_minimal_version) { Poll::Ready(Ok(candidates)) => Some(Ok((dep, candidates, features))), Poll::Pending => { all_ready = false; // we can ignore Pending deps, resolve will be repeatedly called // until there are none to ignore None } Poll::Ready(Err(e)) => Some(Err(e).with_context(|| { format!( "failed to get `{}` as a dependency of {}", dep.package_name(), describe_path_in_context(cx, &candidate.package_id()), ) })), }, ) .collect::<CargoResult<Vec<DepInfo>>>()?; // Attempt to resolve dependencies with fewer candidates before trying // dependencies with more candidates. This way if the dependency with // only one candidate can't be resolved we don't have to do a bunch of // work before we figure that out. deps.sort_by_key(|&(_, ref a, _)| a.len()); let out = Rc::new((used_features, Rc::new(deps))); // If we succeed we add the result to the cache so we can use it again next time. // We don't cache the failure cases as they don't impl Clone. self.summary_cache.insert( (parent, candidate.clone(), opts.clone()), (out.clone(), all_ready), ); Ok(out) } } /// Returns the features we ended up using and /// all dependencies and the features we want from each of them. pub fn resolve_features<'b>( parent: Option<PackageId>, s: &'b Summary, opts: &'b ResolveOpts, ) -> ActivateResult<(HashSet<InternedString>, Vec<(Dependency, FeaturesSet)>)> { // First, filter by dev-dependencies. let deps = s.dependencies(); let deps = deps.iter().filter(|d| d.is_transitive() || opts.dev_deps); let reqs = build_requirements(parent, s, opts)?; let mut ret = Vec::new(); let default_dep = BTreeSet::new(); let mut valid_dep_names = HashSet::new(); // Next, collect all actually enabled dependencies and their features. for dep in deps { // Skip optional dependencies, but not those enabled through a // feature if dep.is_optional() && !reqs.deps.contains_key(&dep.name_in_toml()) { continue; } valid_dep_names.insert(dep.name_in_toml()); // So we want this dependency. Move the features we want from // `feature_deps` to `ret` and register ourselves as using this // name. let mut base = reqs .deps .get(&dep.name_in_toml()) .unwrap_or(&default_dep) .clone(); base.extend(dep.features().iter()); ret.push((dep.clone(), Rc::new(base))); } // This is a special case for command-line `--features // dep_name/feat_name` where `dep_name` does not exist. All other // validation is done either in `build_requirements` or // `build_feature_map`. if parent.is_none() { for dep_name in reqs.deps.keys() { if !valid_dep_names.contains(dep_name) { let e = RequirementError::MissingDependency(*dep_name); return Err(e.into_activate_error(parent, s)); } } } Ok((reqs.into_features(), ret)) } /// Takes requested features for a single package from the input `ResolveOpts` and /// recurses to find all requested features, dependencies and requested /// dependency features in a `Requirements` object, returning it to the resolver. fn build_requirements<'a, 'b: 'a>( parent: Option<PackageId>, s: &'a Summary, opts: &'b ResolveOpts, ) -> ActivateResult<Requirements<'a>> { let mut reqs = Requirements::new(s); let handle_default = |uses_default_features, reqs: &mut Requirements<'_>| { if uses_default_features && s.features().contains_key("default") { if let Err(e) = reqs.require_feature(InternedString::new("default")) { return Err(e.into_activate_error(parent, s)); } } Ok(()) }; match &opts.features { RequestedFeatures::CliFeatures(CliFeatures { features, all_features, uses_default_features, }) => { if *all_features { for key in s.features().keys() { if let Err(e) = reqs.require_feature(*key) { return Err(e.into_activate_error(parent, s)); } } } for fv in features.iter() { if let Err(e) = reqs.require_value(fv) { return Err(e.into_activate_error(parent, s)); } } handle_default(*uses_default_features, &mut reqs)?; } RequestedFeatures::DepFeatures { features, uses_default_features, } => { for feature in features.iter() { if let Err(e) = reqs.require_feature(*feature) { return Err(e.into_activate_error(parent, s)); } } handle_default(*uses_default_features, &mut reqs)?; } } Ok(reqs) } /// Set of feature and dependency requirements for a package. #[derive(Debug)] struct Requirements<'a> { summary: &'a Summary, /// The deps map is a mapping of dependency name to list of features enabled. /// /// The resolver will activate all of these dependencies, with the given /// features enabled. deps: HashMap<InternedString, BTreeSet<InternedString>>, /// The set of features enabled on this package which is later used when /// compiling to instruct the code what features were enabled. features: HashSet<InternedString>, } /// An error for a requirement. /// /// This will later be converted to an `ActivateError` depending on whether or /// not this is a dependency or a root package. enum RequirementError { /// The package does not have the requested feature. MissingFeature(InternedString), /// The package does not have the requested dependency. MissingDependency(InternedString), /// A feature has a direct cycle to itself. /// /// Note that cycles through multiple features are allowed (but perhaps /// they shouldn't be?). Cycle(InternedString), } impl Requirements<'_> { fn new(summary: &Summary) -> Requirements<'_> { Requirements { summary, deps: HashMap::new(), features: HashSet::new(), } } fn
(self) -> HashSet<InternedString> { self.features } fn require_dep_feature( &mut self, package: InternedString, feat: InternedString, weak: bool, ) -> Result<(), RequirementError> { // If `package` is indeed an optional dependency then we activate the // feature named `package`, but otherwise if `package` is a required // dependency then there's no feature associated with it. if !weak && self .summary .dependencies() .iter() .any(|dep| dep.name_in_toml() == package && dep.is_optional()) { // This optional dependency may not have an implicit feature of // the same name if the `dep:` syntax is used to avoid creating // that implicit feature. if self.summary.features().contains_key(&package) { self.require_feature(package)?; } } self.deps.entry(package).or_default().insert(feat); Ok(()) } fn require_dependency(&mut self, pkg: InternedString) { self.deps.entry(pkg).or_default(); } fn require_feature(&mut self, feat: InternedString) -> Result<(), RequirementError> { if !self.features.insert(feat) { // Already seen this feature. return Ok(()); } let fvs = match self.summary.features().get(&feat) { Some(fvs) => fvs, None => return Err(RequirementError::MissingFeature(feat)), }; for fv in fvs { if let FeatureValue::Feature(dep_feat) = fv { if *dep_feat == feat { return Err(RequirementError::Cycle(feat)); } } self.require_value(fv)?; } Ok(()) } fn require_value(&mut self, fv: &FeatureValue) -> Result<(), RequirementError> { match fv { FeatureValue::Feature(feat) => self.require_feature(*feat)?, FeatureValue::Dep { dep_name } => self.require_dependency(*dep_name), FeatureValue::DepFeature { dep_name, dep_feature, // Weak features are always activated in the dependency // resolver. They will be narrowed inside the new feature // resolver. weak, } => self.require_dep_feature(*dep_name, *dep_feature, *weak)?, }; Ok(()) } } impl RequirementError { fn into_activate_error(self, parent: Option<PackageId>, summary: &Summary) -> ActivateError { match self { RequirementError::MissingFeature(feat) => { let deps: Vec<_> = summary .dependencies() .iter() .filter(|dep| dep.name_in_toml() == feat) .collect(); if deps.is_empty() { return match parent { None => ActivateError::Fatal(anyhow::format_err!( "Package `{}` does not have the feature `{}`", summary.package_id(), feat )), Some(p) => ActivateError::Conflict( p, ConflictReason::MissingFeatures(feat.to_string()), ), }; } if deps.iter().any(|dep| dep.is_optional()) { match parent { None => ActivateError::Fatal(anyhow::format_err!( "Package `{}` does not have feature `{}`. It has an optional dependency \ with that name, but that dependency uses the \"dep:\" \ syntax in the features table, so it does not have an implicit feature with that name.", summary.package_id(), feat )), Some(p) => ActivateError::Conflict( p, ConflictReason::NonImplicitDependencyAsFeature(feat), ), } } else { match parent { None => ActivateError::Fatal(anyhow::format_err!( "Package `{}` does not have feature `{}`. It has a required dependency \ with that name, but only optional dependencies can be used as features.", summary.package_id(), feat )), Some(p) => ActivateError::Conflict( p, ConflictReason::RequiredDependencyAsFeature(feat), ), } } } RequirementError::MissingDependency(dep_name) => { match parent { None => ActivateError::Fatal(anyhow::format_err!( "package `{}` does not have a dependency named `{}`", summary.package_id(), dep_name )), // This code path currently isn't used, since `foo/bar` // and `dep:` syntax is not allowed in a dependency. Some(p) => ActivateError::Conflict( p, ConflictReason::MissingFeatures(dep_name.to_string()), ), } } RequirementError::Cycle(feat) => ActivateError::Fatal(anyhow::format_err!( "cyclic feature dependency: feature `{}` depends on itself", feat )), } } }
into_features
identifier_name
main.py
import bz2 import json import os from pyspark.context import SparkContext from pyspark.accumulators import AccumulatorParam import numpy as np from scipy import spatial import pandas as pd import re import operator import csv CACHE_DIR = "D:\TwitterDatastream\PYTHONCACHE_SMALL" EDU_DATA = 'merged.csv' TRAIN_FEAT_CSV = 'testFeat.csv' TRAIN_LABS_CSV = 'testLabs.csv' TRAIN_FEAT_LABS_CSV = 'testFeatLabs.csv' FEATURE_NAMES_CSV = 'featureNames.csv' sc = SparkContext('local', 'test') # location_data = pd.read_csv('new_merged.csv') class WordsSetAccumulatorParam(AccumulatorParam): def zero(self, v): return set() def
(self, acc1, acc2): return acc1.union(acc2) # An accumulator used to build the word vocabulary class WordsDictAccumulatorParam(AccumulatorParam): def zero(self, v): return dict() def addInPlace(self, acc1, acc2): for key in acc2.keys(): try: acc1[key] += acc2[key] except: acc1[key] = acc2[key] return acc1 # An accumulator used to build the word vocabulary # vocabulary = sc.accumulator(set(), WordsSetAccumulatorParam()) vocabulary = sc.accumulator(dict(), WordsDictAccumulatorParam()) # load Education census data location_data = pd.read_csv(EDU_DATA) area_dict = dict(zip(location_data['city'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist())) county_dict = dict(zip(location_data['county'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist())) coord_dict = {tuple(x[:2]):x[2] for x in location_data[['lat', 'lng', 'county']].values} # create a KD tree of known county center locations to be used to map a tweet coordinate to a county latlon = list() for index, row in location_data.iterrows(): latlon.append([location_data['lat'][index], location_data['lng'][index]]) latlon = np.array(latlon) latlonKDT = spatial.KDTree(latlon) # function to map place, location or coordinate data from a tweet to a FIPS code of the county and the education # level distribution of that county def mapToCounty(place, location, coordinates): # coordr_dict = {tuple(x[:2]):x[2] for x in location_data[['lat_r', 'lng_r', 'county']].values} if place: place = (place.split(",")[0]).lower() # country = (place.split(",")[1]).lower() try: if area_dict[place]: return area_dict[place] except: None if location: location = (location.split(",")[0]).lower() try: if area_dict[location]: return area_dict[location] except: None if coordinates: closestLoc = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9)[1] try: closest = latlon[closestLoc] except: return None # closest = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9) # if closest[0] != float('inf') and latlon[closest[1]][0] != 0. and latlon[closest[1]][1] != 0.: # print(coordinates, closest, latlon[closest[1]]) # return closest[0], closest[1] if coord_dict[closest[0], closest[1]]: county_k = coord_dict[(closest[0], closest[1])] return county_dict[county_k] return None # Load Tweets from each file (.bz2 or .json) def load_bz2_json(filename): if '.bz2' in filename: with bz2.open(filename, 'rt') as f: lines = str(f.read()).split('\n') else: with open(filename) as f: lines = str(f.readlines()).split('\\n') num_lines = len(lines) tweets = [] for line in lines: try: if line == "": num_lines -= 1 continue tweets.append(json.loads(line)) except: continue # print(filename, len(tweets)) return tweets # strip each tweet object and keep only whats necessary in a dictonary def load_tweet(tweet, tweets_saved): try: # tweet_id = tweet['id'] tweet_text = tweet['text'] tweet_user_id = tweet['user']['id'] tweet_user_location = tweet['user']['location'] tweet_user_lang = tweet['user']['lang'] try: tweet_coordinates = tweet['coordinates']['coordinates'] except: tweet_coordinates = None try: tweet_place = tweet['place']['full_name'] except: tweet_place = None map_to_county = mapToCounty(tweet_place, tweet_user_location, tweet_coordinates) if map_to_county: tweet_county = int(map_to_county[0]) tweet_education_level = tuple(map_to_county[1:]) else: tweet_county = None tweet_education_level = None # created_at = tweet['created_at'] except KeyError: return {}, tweets_saved data = {'tweet_text': tweet_text, # 'tweet_id': tweet_id, 'tweet_user_id': tweet_user_id, # 'tweet_user_location': tweet_user_location, 'tweet_user_lang': tweet_user_lang, # 'tweet_place': tweet_place, # 'tweet_coordinates': tweet_coordinates, 'tweet_county': tweet_county, 'tweet_education_level': tweet_education_level} # 'date_loaded': datetime.datetime.now(), # 'tweet_json': json.dumps(tweet)} tweets_saved += 1 return data, tweets_saved wordPattern = re.compile(r"\b[A-Za-z_.,!\"']+\b", re.IGNORECASE) httpPattern = re.compile(r"^RT |@\S+|http\S+", re.IGNORECASE) # Function that uses regular expressions to remove unwanted characters, URLs, etc. and split tweet_text # into meaningful words def parseTweetText(tweet): text = tweet['tweet_text'] text = httpPattern.sub(r"", text) words = wordPattern.findall(text) tweet['tweet_text'] = words #list(zip(words, [1]*len(words))) # print(tweet) return tweet # function to combine word lists and count frequency of each word locally def combineWordLists(x ,y): global vocabulary if isinstance(x, dict): wordDict = x xny = y else: wordDict = dict() xny = x + y for w in xny: # vocabulary +=[w] vocabulary += {w: 1} try: wordDict[w] += 1 except: wordDict[w] = 1 return wordDict # function to add words to the vocabulary and count frequency of each word globally def genVocabulary(x): global vocabulary arr = x[1] if isinstance(arr, dict): return x else: wordDict = dict() for w in arr: vocabulary += {w: 1} try: wordDict[w] += 1 except: wordDict[w] = 1 x = (x[0],wordDict) return x # read tweets from each file and parse them into dictionaries with only relevant data def handle_file(filename): tweets = load_bz2_json(filename) tweet_dicts = [] tweets_saved = 0 for tweet in tweets: tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved) if tweet_dict: tweet_dicts.append(tweet_dict) return tweet_dicts # filter only tweets that have text, land, education and are written in english def filterTweets(tweet): # location = tweet['tweet_user_location'] # coordinates = tweet['tweet_place'] # place = tweet['tweet_coordinates'] text = tweet['tweet_text'] lang = tweet['tweet_user_lang'] education = tweet['tweet_education_level'] county = tweet['tweet_county'] # if location or coordinates or place: ret = True # else: return False if not text or text == []: return False if lang != 'en': return False if education is None or county is None: return False return True # store all data into CSV files def storeResults(traindata, vocab): columnIdx = {vocab[voc][0]: voc for voc in range(len(vocab))} with open(TRAIN_FEAT_CSV, 'wt') as trainFeatFile, open(TRAIN_LABS_CSV, 'wt') as trainLabsFile, open(TRAIN_FEAT_LABS_CSV, 'wt') as trainFeatLabsFile: trainFeatwriter = csv.writer(trainFeatFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') trainLabswriter = csv.writer(trainLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') trainFeatLabswriter = csv.writer(trainFeatLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') for row in traindata: edu = row[0][1] featDict = row[1] feats = np.zeros(len(columnIdx)) for key in featDict: try: feats[columnIdx[key]] = featDict[key] except: continue trainFeatwriter.writerow(feats.tolist()) trainLabswriter.writerow(list(edu)) combList = list(edu) + feats.tolist() trainFeatLabswriter.writerow(combList) # main function with all the Spark code def main(): fileNames = sc.parallelize([]) # generate a list of all files in the data directory for root, dirs, files in os.walk(CACHE_DIR): subFileNames = sc.parallelize(files).map(lambda file: os.path.join(root, file)) fileNames = sc.union([fileNames, subFileNames]) # load all tweets and filter tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda tweet: filterTweets(tweet)) # clean, parse and filter tweets and map each to county and education level wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda tweet: filterTweets(tweet)) # set county and education level as the key for each tweet and keep only the text as value countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'], tweet['tweet_education_level']), tweet['tweet_text'])) # aggregate tweets based on county level and generate vocabulary countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)).map(lambda z: genVocabulary(z)) tempRes = countyEduRdd.collect() # print(tempRes) print(len(tempRes)) vocabRDD = sc.parallelize(vocabulary.value.items()) # filter out words that only occur once in the entire dataset (mainly noise) vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False) # print("vocabulary = ", sorted(vocabulary.value.items(), key=operator.itemgetter(1))) vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True) # print("vocabulary = ", vocab) print("vocabulary size = ", len(vocab)) storeResults(tempRes, vocab) if __name__ == "__main__": main()
addInPlace
identifier_name
main.py
import bz2 import json import os from pyspark.context import SparkContext from pyspark.accumulators import AccumulatorParam import numpy as np from scipy import spatial import pandas as pd import re import operator import csv CACHE_DIR = "D:\TwitterDatastream\PYTHONCACHE_SMALL" EDU_DATA = 'merged.csv' TRAIN_FEAT_CSV = 'testFeat.csv' TRAIN_LABS_CSV = 'testLabs.csv' TRAIN_FEAT_LABS_CSV = 'testFeatLabs.csv' FEATURE_NAMES_CSV = 'featureNames.csv' sc = SparkContext('local', 'test') # location_data = pd.read_csv('new_merged.csv') class WordsSetAccumulatorParam(AccumulatorParam): def zero(self, v): return set() def addInPlace(self, acc1, acc2): return acc1.union(acc2) # An accumulator used to build the word vocabulary class WordsDictAccumulatorParam(AccumulatorParam): def zero(self, v): return dict() def addInPlace(self, acc1, acc2): for key in acc2.keys(): try: acc1[key] += acc2[key] except: acc1[key] = acc2[key] return acc1 # An accumulator used to build the word vocabulary # vocabulary = sc.accumulator(set(), WordsSetAccumulatorParam()) vocabulary = sc.accumulator(dict(), WordsDictAccumulatorParam()) # load Education census data location_data = pd.read_csv(EDU_DATA) area_dict = dict(zip(location_data['city'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist())) county_dict = dict(zip(location_data['county'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist())) coord_dict = {tuple(x[:2]):x[2] for x in location_data[['lat', 'lng', 'county']].values} # create a KD tree of known county center locations to be used to map a tweet coordinate to a county latlon = list() for index, row in location_data.iterrows(): latlon.append([location_data['lat'][index], location_data['lng'][index]]) latlon = np.array(latlon) latlonKDT = spatial.KDTree(latlon) # function to map place, location or coordinate data from a tweet to a FIPS code of the county and the education # level distribution of that county def mapToCounty(place, location, coordinates): # coordr_dict = {tuple(x[:2]):x[2] for x in location_data[['lat_r', 'lng_r', 'county']].values} if place: place = (place.split(",")[0]).lower() # country = (place.split(",")[1]).lower() try: if area_dict[place]: return area_dict[place] except: None if location: location = (location.split(",")[0]).lower() try: if area_dict[location]:
except: None if coordinates: closestLoc = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9)[1] try: closest = latlon[closestLoc] except: return None # closest = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9) # if closest[0] != float('inf') and latlon[closest[1]][0] != 0. and latlon[closest[1]][1] != 0.: # print(coordinates, closest, latlon[closest[1]]) # return closest[0], closest[1] if coord_dict[closest[0], closest[1]]: county_k = coord_dict[(closest[0], closest[1])] return county_dict[county_k] return None # Load Tweets from each file (.bz2 or .json) def load_bz2_json(filename): if '.bz2' in filename: with bz2.open(filename, 'rt') as f: lines = str(f.read()).split('\n') else: with open(filename) as f: lines = str(f.readlines()).split('\\n') num_lines = len(lines) tweets = [] for line in lines: try: if line == "": num_lines -= 1 continue tweets.append(json.loads(line)) except: continue # print(filename, len(tweets)) return tweets # strip each tweet object and keep only whats necessary in a dictonary def load_tweet(tweet, tweets_saved): try: # tweet_id = tweet['id'] tweet_text = tweet['text'] tweet_user_id = tweet['user']['id'] tweet_user_location = tweet['user']['location'] tweet_user_lang = tweet['user']['lang'] try: tweet_coordinates = tweet['coordinates']['coordinates'] except: tweet_coordinates = None try: tweet_place = tweet['place']['full_name'] except: tweet_place = None map_to_county = mapToCounty(tweet_place, tweet_user_location, tweet_coordinates) if map_to_county: tweet_county = int(map_to_county[0]) tweet_education_level = tuple(map_to_county[1:]) else: tweet_county = None tweet_education_level = None # created_at = tweet['created_at'] except KeyError: return {}, tweets_saved data = {'tweet_text': tweet_text, # 'tweet_id': tweet_id, 'tweet_user_id': tweet_user_id, # 'tweet_user_location': tweet_user_location, 'tweet_user_lang': tweet_user_lang, # 'tweet_place': tweet_place, # 'tweet_coordinates': tweet_coordinates, 'tweet_county': tweet_county, 'tweet_education_level': tweet_education_level} # 'date_loaded': datetime.datetime.now(), # 'tweet_json': json.dumps(tweet)} tweets_saved += 1 return data, tweets_saved wordPattern = re.compile(r"\b[A-Za-z_.,!\"']+\b", re.IGNORECASE) httpPattern = re.compile(r"^RT |@\S+|http\S+", re.IGNORECASE) # Function that uses regular expressions to remove unwanted characters, URLs, etc. and split tweet_text # into meaningful words def parseTweetText(tweet): text = tweet['tweet_text'] text = httpPattern.sub(r"", text) words = wordPattern.findall(text) tweet['tweet_text'] = words #list(zip(words, [1]*len(words))) # print(tweet) return tweet # function to combine word lists and count frequency of each word locally def combineWordLists(x ,y): global vocabulary if isinstance(x, dict): wordDict = x xny = y else: wordDict = dict() xny = x + y for w in xny: # vocabulary +=[w] vocabulary += {w: 1} try: wordDict[w] += 1 except: wordDict[w] = 1 return wordDict # function to add words to the vocabulary and count frequency of each word globally def genVocabulary(x): global vocabulary arr = x[1] if isinstance(arr, dict): return x else: wordDict = dict() for w in arr: vocabulary += {w: 1} try: wordDict[w] += 1 except: wordDict[w] = 1 x = (x[0],wordDict) return x # read tweets from each file and parse them into dictionaries with only relevant data def handle_file(filename): tweets = load_bz2_json(filename) tweet_dicts = [] tweets_saved = 0 for tweet in tweets: tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved) if tweet_dict: tweet_dicts.append(tweet_dict) return tweet_dicts # filter only tweets that have text, land, education and are written in english def filterTweets(tweet): # location = tweet['tweet_user_location'] # coordinates = tweet['tweet_place'] # place = tweet['tweet_coordinates'] text = tweet['tweet_text'] lang = tweet['tweet_user_lang'] education = tweet['tweet_education_level'] county = tweet['tweet_county'] # if location or coordinates or place: ret = True # else: return False if not text or text == []: return False if lang != 'en': return False if education is None or county is None: return False return True # store all data into CSV files def storeResults(traindata, vocab): columnIdx = {vocab[voc][0]: voc for voc in range(len(vocab))} with open(TRAIN_FEAT_CSV, 'wt') as trainFeatFile, open(TRAIN_LABS_CSV, 'wt') as trainLabsFile, open(TRAIN_FEAT_LABS_CSV, 'wt') as trainFeatLabsFile: trainFeatwriter = csv.writer(trainFeatFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') trainLabswriter = csv.writer(trainLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') trainFeatLabswriter = csv.writer(trainFeatLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') for row in traindata: edu = row[0][1] featDict = row[1] feats = np.zeros(len(columnIdx)) for key in featDict: try: feats[columnIdx[key]] = featDict[key] except: continue trainFeatwriter.writerow(feats.tolist()) trainLabswriter.writerow(list(edu)) combList = list(edu) + feats.tolist() trainFeatLabswriter.writerow(combList) # main function with all the Spark code def main(): fileNames = sc.parallelize([]) # generate a list of all files in the data directory for root, dirs, files in os.walk(CACHE_DIR): subFileNames = sc.parallelize(files).map(lambda file: os.path.join(root, file)) fileNames = sc.union([fileNames, subFileNames]) # load all tweets and filter tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda tweet: filterTweets(tweet)) # clean, parse and filter tweets and map each to county and education level wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda tweet: filterTweets(tweet)) # set county and education level as the key for each tweet and keep only the text as value countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'], tweet['tweet_education_level']), tweet['tweet_text'])) # aggregate tweets based on county level and generate vocabulary countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)).map(lambda z: genVocabulary(z)) tempRes = countyEduRdd.collect() # print(tempRes) print(len(tempRes)) vocabRDD = sc.parallelize(vocabulary.value.items()) # filter out words that only occur once in the entire dataset (mainly noise) vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False) # print("vocabulary = ", sorted(vocabulary.value.items(), key=operator.itemgetter(1))) vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True) # print("vocabulary = ", vocab) print("vocabulary size = ", len(vocab)) storeResults(tempRes, vocab) if __name__ == "__main__": main()
return area_dict[location]
conditional_block
main.py
import bz2 import json import os from pyspark.context import SparkContext from pyspark.accumulators import AccumulatorParam import numpy as np from scipy import spatial import pandas as pd import re import operator import csv CACHE_DIR = "D:\TwitterDatastream\PYTHONCACHE_SMALL" EDU_DATA = 'merged.csv' TRAIN_FEAT_CSV = 'testFeat.csv' TRAIN_LABS_CSV = 'testLabs.csv' TRAIN_FEAT_LABS_CSV = 'testFeatLabs.csv' FEATURE_NAMES_CSV = 'featureNames.csv' sc = SparkContext('local', 'test') # location_data = pd.read_csv('new_merged.csv') class WordsSetAccumulatorParam(AccumulatorParam): def zero(self, v): return set() def addInPlace(self, acc1, acc2): return acc1.union(acc2) # An accumulator used to build the word vocabulary class WordsDictAccumulatorParam(AccumulatorParam): def zero(self, v): return dict() def addInPlace(self, acc1, acc2): for key in acc2.keys(): try: acc1[key] += acc2[key] except: acc1[key] = acc2[key] return acc1 # An accumulator used to build the word vocabulary # vocabulary = sc.accumulator(set(), WordsSetAccumulatorParam()) vocabulary = sc.accumulator(dict(), WordsDictAccumulatorParam()) # load Education census data location_data = pd.read_csv(EDU_DATA) area_dict = dict(zip(location_data['city'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist())) county_dict = dict(zip(location_data['county'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist())) coord_dict = {tuple(x[:2]):x[2] for x in location_data[['lat', 'lng', 'county']].values} # create a KD tree of known county center locations to be used to map a tweet coordinate to a county latlon = list() for index, row in location_data.iterrows(): latlon.append([location_data['lat'][index], location_data['lng'][index]]) latlon = np.array(latlon) latlonKDT = spatial.KDTree(latlon) # function to map place, location or coordinate data from a tweet to a FIPS code of the county and the education # level distribution of that county def mapToCounty(place, location, coordinates): # coordr_dict = {tuple(x[:2]):x[2] for x in location_data[['lat_r', 'lng_r', 'county']].values} if place: place = (place.split(",")[0]).lower() # country = (place.split(",")[1]).lower() try: if area_dict[place]: return area_dict[place] except: None if location: location = (location.split(",")[0]).lower() try: if area_dict[location]: return area_dict[location] except: None if coordinates: closestLoc = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9)[1] try: closest = latlon[closestLoc] except: return None # closest = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9) # if closest[0] != float('inf') and latlon[closest[1]][0] != 0. and latlon[closest[1]][1] != 0.: # print(coordinates, closest, latlon[closest[1]]) # return closest[0], closest[1] if coord_dict[closest[0], closest[1]]: county_k = coord_dict[(closest[0], closest[1])] return county_dict[county_k] return None # Load Tweets from each file (.bz2 or .json) def load_bz2_json(filename): if '.bz2' in filename: with bz2.open(filename, 'rt') as f: lines = str(f.read()).split('\n') else: with open(filename) as f: lines = str(f.readlines()).split('\\n') num_lines = len(lines) tweets = [] for line in lines: try: if line == "": num_lines -= 1 continue tweets.append(json.loads(line)) except: continue # print(filename, len(tweets)) return tweets # strip each tweet object and keep only whats necessary in a dictonary def load_tweet(tweet, tweets_saved): try: # tweet_id = tweet['id'] tweet_text = tweet['text'] tweet_user_id = tweet['user']['id'] tweet_user_location = tweet['user']['location'] tweet_user_lang = tweet['user']['lang'] try: tweet_coordinates = tweet['coordinates']['coordinates'] except: tweet_coordinates = None try: tweet_place = tweet['place']['full_name'] except: tweet_place = None map_to_county = mapToCounty(tweet_place, tweet_user_location, tweet_coordinates) if map_to_county: tweet_county = int(map_to_county[0]) tweet_education_level = tuple(map_to_county[1:]) else: tweet_county = None tweet_education_level = None # created_at = tweet['created_at'] except KeyError: return {}, tweets_saved data = {'tweet_text': tweet_text, # 'tweet_id': tweet_id, 'tweet_user_id': tweet_user_id, # 'tweet_user_location': tweet_user_location, 'tweet_user_lang': tweet_user_lang, # 'tweet_place': tweet_place, # 'tweet_coordinates': tweet_coordinates, 'tweet_county': tweet_county, 'tweet_education_level': tweet_education_level} # 'date_loaded': datetime.datetime.now(), # 'tweet_json': json.dumps(tweet)} tweets_saved += 1 return data, tweets_saved wordPattern = re.compile(r"\b[A-Za-z_.,!\"']+\b", re.IGNORECASE) httpPattern = re.compile(r"^RT |@\S+|http\S+", re.IGNORECASE) # Function that uses regular expressions to remove unwanted characters, URLs, etc. and split tweet_text # into meaningful words def parseTweetText(tweet): text = tweet['tweet_text'] text = httpPattern.sub(r"", text) words = wordPattern.findall(text) tweet['tweet_text'] = words #list(zip(words, [1]*len(words))) # print(tweet) return tweet # function to combine word lists and count frequency of each word locally def combineWordLists(x ,y): global vocabulary if isinstance(x, dict): wordDict = x xny = y else: wordDict = dict() xny = x + y
for w in xny: # vocabulary +=[w] vocabulary += {w: 1} try: wordDict[w] += 1 except: wordDict[w] = 1 return wordDict # function to add words to the vocabulary and count frequency of each word globally def genVocabulary(x): global vocabulary arr = x[1] if isinstance(arr, dict): return x else: wordDict = dict() for w in arr: vocabulary += {w: 1} try: wordDict[w] += 1 except: wordDict[w] = 1 x = (x[0],wordDict) return x # read tweets from each file and parse them into dictionaries with only relevant data def handle_file(filename): tweets = load_bz2_json(filename) tweet_dicts = [] tweets_saved = 0 for tweet in tweets: tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved) if tweet_dict: tweet_dicts.append(tweet_dict) return tweet_dicts # filter only tweets that have text, land, education and are written in english def filterTweets(tweet): # location = tweet['tweet_user_location'] # coordinates = tweet['tweet_place'] # place = tweet['tweet_coordinates'] text = tweet['tweet_text'] lang = tweet['tweet_user_lang'] education = tweet['tweet_education_level'] county = tweet['tweet_county'] # if location or coordinates or place: ret = True # else: return False if not text or text == []: return False if lang != 'en': return False if education is None or county is None: return False return True # store all data into CSV files def storeResults(traindata, vocab): columnIdx = {vocab[voc][0]: voc for voc in range(len(vocab))} with open(TRAIN_FEAT_CSV, 'wt') as trainFeatFile, open(TRAIN_LABS_CSV, 'wt') as trainLabsFile, open(TRAIN_FEAT_LABS_CSV, 'wt') as trainFeatLabsFile: trainFeatwriter = csv.writer(trainFeatFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') trainLabswriter = csv.writer(trainLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') trainFeatLabswriter = csv.writer(trainFeatLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') for row in traindata: edu = row[0][1] featDict = row[1] feats = np.zeros(len(columnIdx)) for key in featDict: try: feats[columnIdx[key]] = featDict[key] except: continue trainFeatwriter.writerow(feats.tolist()) trainLabswriter.writerow(list(edu)) combList = list(edu) + feats.tolist() trainFeatLabswriter.writerow(combList) # main function with all the Spark code def main(): fileNames = sc.parallelize([]) # generate a list of all files in the data directory for root, dirs, files in os.walk(CACHE_DIR): subFileNames = sc.parallelize(files).map(lambda file: os.path.join(root, file)) fileNames = sc.union([fileNames, subFileNames]) # load all tweets and filter tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda tweet: filterTweets(tweet)) # clean, parse and filter tweets and map each to county and education level wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda tweet: filterTweets(tweet)) # set county and education level as the key for each tweet and keep only the text as value countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'], tweet['tweet_education_level']), tweet['tweet_text'])) # aggregate tweets based on county level and generate vocabulary countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)).map(lambda z: genVocabulary(z)) tempRes = countyEduRdd.collect() # print(tempRes) print(len(tempRes)) vocabRDD = sc.parallelize(vocabulary.value.items()) # filter out words that only occur once in the entire dataset (mainly noise) vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False) # print("vocabulary = ", sorted(vocabulary.value.items(), key=operator.itemgetter(1))) vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True) # print("vocabulary = ", vocab) print("vocabulary size = ", len(vocab)) storeResults(tempRes, vocab) if __name__ == "__main__": main()
random_line_split
main.py
import bz2 import json import os from pyspark.context import SparkContext from pyspark.accumulators import AccumulatorParam import numpy as np from scipy import spatial import pandas as pd import re import operator import csv CACHE_DIR = "D:\TwitterDatastream\PYTHONCACHE_SMALL" EDU_DATA = 'merged.csv' TRAIN_FEAT_CSV = 'testFeat.csv' TRAIN_LABS_CSV = 'testLabs.csv' TRAIN_FEAT_LABS_CSV = 'testFeatLabs.csv' FEATURE_NAMES_CSV = 'featureNames.csv' sc = SparkContext('local', 'test') # location_data = pd.read_csv('new_merged.csv') class WordsSetAccumulatorParam(AccumulatorParam): def zero(self, v): return set() def addInPlace(self, acc1, acc2): return acc1.union(acc2) # An accumulator used to build the word vocabulary class WordsDictAccumulatorParam(AccumulatorParam): def zero(self, v): return dict() def addInPlace(self, acc1, acc2): for key in acc2.keys(): try: acc1[key] += acc2[key] except: acc1[key] = acc2[key] return acc1 # An accumulator used to build the word vocabulary # vocabulary = sc.accumulator(set(), WordsSetAccumulatorParam()) vocabulary = sc.accumulator(dict(), WordsDictAccumulatorParam()) # load Education census data location_data = pd.read_csv(EDU_DATA) area_dict = dict(zip(location_data['city'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist())) county_dict = dict(zip(location_data['county'], location_data[['fips', 'without_hsd','with_hsd', 'somecollege', 'bachelors']].values.tolist())) coord_dict = {tuple(x[:2]):x[2] for x in location_data[['lat', 'lng', 'county']].values} # create a KD tree of known county center locations to be used to map a tweet coordinate to a county latlon = list() for index, row in location_data.iterrows(): latlon.append([location_data['lat'][index], location_data['lng'][index]]) latlon = np.array(latlon) latlonKDT = spatial.KDTree(latlon) # function to map place, location or coordinate data from a tweet to a FIPS code of the county and the education # level distribution of that county def mapToCounty(place, location, coordinates): # coordr_dict = {tuple(x[:2]):x[2] for x in location_data[['lat_r', 'lng_r', 'county']].values} if place: place = (place.split(",")[0]).lower() # country = (place.split(",")[1]).lower() try: if area_dict[place]: return area_dict[place] except: None if location: location = (location.split(",")[0]).lower() try: if area_dict[location]: return area_dict[location] except: None if coordinates: closestLoc = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9)[1] try: closest = latlon[closestLoc] except: return None # closest = spatial.KDTree(latlon).query(coordinates, k=1, distance_upper_bound=9) # if closest[0] != float('inf') and latlon[closest[1]][0] != 0. and latlon[closest[1]][1] != 0.: # print(coordinates, closest, latlon[closest[1]]) # return closest[0], closest[1] if coord_dict[closest[0], closest[1]]: county_k = coord_dict[(closest[0], closest[1])] return county_dict[county_k] return None # Load Tweets from each file (.bz2 or .json) def load_bz2_json(filename): if '.bz2' in filename: with bz2.open(filename, 'rt') as f: lines = str(f.read()).split('\n') else: with open(filename) as f: lines = str(f.readlines()).split('\\n') num_lines = len(lines) tweets = [] for line in lines: try: if line == "": num_lines -= 1 continue tweets.append(json.loads(line)) except: continue # print(filename, len(tweets)) return tweets # strip each tweet object and keep only whats necessary in a dictonary def load_tweet(tweet, tweets_saved): try: # tweet_id = tweet['id'] tweet_text = tweet['text'] tweet_user_id = tweet['user']['id'] tweet_user_location = tweet['user']['location'] tweet_user_lang = tweet['user']['lang'] try: tweet_coordinates = tweet['coordinates']['coordinates'] except: tweet_coordinates = None try: tweet_place = tweet['place']['full_name'] except: tweet_place = None map_to_county = mapToCounty(tweet_place, tweet_user_location, tweet_coordinates) if map_to_county: tweet_county = int(map_to_county[0]) tweet_education_level = tuple(map_to_county[1:]) else: tweet_county = None tweet_education_level = None # created_at = tweet['created_at'] except KeyError: return {}, tweets_saved data = {'tweet_text': tweet_text, # 'tweet_id': tweet_id, 'tweet_user_id': tweet_user_id, # 'tweet_user_location': tweet_user_location, 'tweet_user_lang': tweet_user_lang, # 'tweet_place': tweet_place, # 'tweet_coordinates': tweet_coordinates, 'tweet_county': tweet_county, 'tweet_education_level': tweet_education_level} # 'date_loaded': datetime.datetime.now(), # 'tweet_json': json.dumps(tweet)} tweets_saved += 1 return data, tweets_saved wordPattern = re.compile(r"\b[A-Za-z_.,!\"']+\b", re.IGNORECASE) httpPattern = re.compile(r"^RT |@\S+|http\S+", re.IGNORECASE) # Function that uses regular expressions to remove unwanted characters, URLs, etc. and split tweet_text # into meaningful words def parseTweetText(tweet): text = tweet['tweet_text'] text = httpPattern.sub(r"", text) words = wordPattern.findall(text) tweet['tweet_text'] = words #list(zip(words, [1]*len(words))) # print(tweet) return tweet # function to combine word lists and count frequency of each word locally def combineWordLists(x ,y):
# function to add words to the vocabulary and count frequency of each word globally def genVocabulary(x): global vocabulary arr = x[1] if isinstance(arr, dict): return x else: wordDict = dict() for w in arr: vocabulary += {w: 1} try: wordDict[w] += 1 except: wordDict[w] = 1 x = (x[0],wordDict) return x # read tweets from each file and parse them into dictionaries with only relevant data def handle_file(filename): tweets = load_bz2_json(filename) tweet_dicts = [] tweets_saved = 0 for tweet in tweets: tweet_dict, tweets_saved = load_tweet(tweet, tweets_saved) if tweet_dict: tweet_dicts.append(tweet_dict) return tweet_dicts # filter only tweets that have text, land, education and are written in english def filterTweets(tweet): # location = tweet['tweet_user_location'] # coordinates = tweet['tweet_place'] # place = tweet['tweet_coordinates'] text = tweet['tweet_text'] lang = tweet['tweet_user_lang'] education = tweet['tweet_education_level'] county = tweet['tweet_county'] # if location or coordinates or place: ret = True # else: return False if not text or text == []: return False if lang != 'en': return False if education is None or county is None: return False return True # store all data into CSV files def storeResults(traindata, vocab): columnIdx = {vocab[voc][0]: voc for voc in range(len(vocab))} with open(TRAIN_FEAT_CSV, 'wt') as trainFeatFile, open(TRAIN_LABS_CSV, 'wt') as trainLabsFile, open(TRAIN_FEAT_LABS_CSV, 'wt') as trainFeatLabsFile: trainFeatwriter = csv.writer(trainFeatFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') trainLabswriter = csv.writer(trainLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') trainFeatLabswriter = csv.writer(trainFeatLabsFile, delimiter=',',quotechar='|', quoting=csv.QUOTE_MINIMAL, lineterminator='\n') for row in traindata: edu = row[0][1] featDict = row[1] feats = np.zeros(len(columnIdx)) for key in featDict: try: feats[columnIdx[key]] = featDict[key] except: continue trainFeatwriter.writerow(feats.tolist()) trainLabswriter.writerow(list(edu)) combList = list(edu) + feats.tolist() trainFeatLabswriter.writerow(combList) # main function with all the Spark code def main(): fileNames = sc.parallelize([]) # generate a list of all files in the data directory for root, dirs, files in os.walk(CACHE_DIR): subFileNames = sc.parallelize(files).map(lambda file: os.path.join(root, file)) fileNames = sc.union([fileNames, subFileNames]) # load all tweets and filter tweetsRdd = fileNames.flatMap(lambda file: handle_file(file)).filter(lambda tweet: filterTweets(tweet)) # clean, parse and filter tweets and map each to county and education level wordsRdd = tweetsRdd.map(lambda tweet: parseTweetText(tweet)).filter(lambda tweet: filterTweets(tweet)) # set county and education level as the key for each tweet and keep only the text as value countyEduRdd = wordsRdd.map(lambda tweet: ((tweet['tweet_county'], tweet['tweet_education_level']), tweet['tweet_text'])) # aggregate tweets based on county level and generate vocabulary countyEduRdd = countyEduRdd.reduceByKey(lambda x, y: combineWordLists(x, y)).map(lambda z: genVocabulary(z)) tempRes = countyEduRdd.collect() # print(tempRes) print(len(tempRes)) vocabRDD = sc.parallelize(vocabulary.value.items()) # filter out words that only occur once in the entire dataset (mainly noise) vocabRDD = vocabRDD.filter(lambda voc: True if voc[1] > 1 else False) # print("vocabulary = ", sorted(vocabulary.value.items(), key=operator.itemgetter(1))) vocab = sorted(vocabRDD.collect(), key=operator.itemgetter(1), reverse=True) # print("vocabulary = ", vocab) print("vocabulary size = ", len(vocab)) storeResults(tempRes, vocab) if __name__ == "__main__": main()
global vocabulary if isinstance(x, dict): wordDict = x xny = y else: wordDict = dict() xny = x + y for w in xny: # vocabulary +=[w] vocabulary += {w: 1} try: wordDict[w] += 1 except: wordDict[w] = 1 return wordDict
identifier_body
conference.ts
import moment from 'moment' import { orderBy } from '../components/utils/arraySort' import SponsorData from '../config/sponsors' import { Conference as IConference, ImportantDate, TicketNumberWhileVoting, TicketsProvider } from './types' import venue from './venue' const name = 'DDD Sydney' const tagLine = `${name} is an inclusive non-profit conference for the Developer, Data and Design community` const hideDate = false const isSoldOut = false const date = moment('2019-09-21T08:00') const endDate = date.clone().add(12, 'h') const currentInstance = parseInt(date.format('YYYY'), 10) const firstInstance = 2016 const registrationOpenFrom = moment('2019-06-10T08:00:00') const registrationOpenUntil = hideDate ? null : date .clone() .add(-1, 'd') .startOf('day') .add(17, 'h') const presentationSubmissionsOpenFrom = moment('2019-06-10T08:00:00') const presentationSubmissionsOpenUntil = moment('2019-07-14T23:59:59') const votingOpenFrom = moment('2019-07-15T04:00:00') const votingOpenUntil = moment('2019-07-28T23:59:59') const agendaPublishedFrom = moment('2019-08-04T20:00:00') const feedbackOpenFrom = date.clone() const feedbackOpenUntil = endDate const importantDates: ImportantDate[] = [ { Date: presentationSubmissionsOpenFrom, Description: 'Call for presentations open', Type: 'content', }, { Date: presentationSubmissionsOpenUntil, Description: 'Call for presentations close', Type: 'content', }, { Date: registrationOpenFrom, Description: 'Ticket sales open', Type: 'tickets', }, { Description: 'Voting open', Date: votingOpenFrom, Type: 'voting' }, { Description: 'Voting close', Date: votingOpenUntil, Type: 'voting' }, { Date: agendaPublishedFrom, Description: 'Agenda published', Type: 'agenda', }, ] if (registrationOpenUntil !== null && !isSoldOut) { importantDates.push({ Date: registrationOpenUntil, Description: 'Ticket sales close', Type: 'tickets', }) } if (!hideDate) { importantDates.push({ Date: date, Description: 'Conference day', Type: 'conference', }) } const Conference: IConference = { AgendaPublishedFrom: agendaPublishedFrom, AnonymousReportFormUrl: '', AnonymousVoting: true, ContactEmail: 'team@dddsydney.com.au', ChildcarePrice: '', Date: date, DoorsOpenTime: '8:10am', EmergencyContactName: 'Aaron Powell', EmergencyContactPhoneNumber: '0439 878 200', EndDate: endDate, FeedbackOpenFrom: feedbackOpenFrom, FeedbackOpenUntil: feedbackOpenUntil, FinishTime: '5:10pm', Goal: 'We are dedicated to making the technology industry in Australia more inclusive: giving minority groups a voice in the future of digital.', GoogleAnalyticsId: 'UA-98480529-1', Handbook: null, HashTag: 'dddsydney', Instance: currentInstance.toString(), IsSoldOut: isSoldOut, MediaOfficerName: '', MentoringEmail: 'team@dddsydney.com.au', MaxVotes: 5, MinVotes: 1, getSubmissionsUrl: 'https://dddsydneyapi.azurewebsites.net/v2/sessions/2019', submitVoteUrl: 'https://dddsydneyapi.azurewebsites.net/v2/vote/2019', // getSubmissionsUrl: 'http://192.168.0.180:7071/v2/sessions/2019', // submitVoteUrl: 'http://192.168.0.180:7071/v2/vote/2019', Name: name, Organiser: { Name: 'SydDev Inc.', ShirtColour: 'black', Url: '', }, PreferentialVoting: false, PreviousInstance: (currentInstance - 1).toString(), PreviousInstances: [...Array(currentInstance - firstInstance).keys()].map((_, i) => (firstInstance + i).toString()), PreviouslySubmittedTopics: 'Agile, building great teams, UI design, software testing, virtual reality, open source software, bots, IoT, machine learning, automated deployments, mobile development, architecture, microservices, APIs, actors, JavaScript, authentication, React, UWP, HTTP protocol, Git, Docker and pointers', SellingPoints: ['One day', 'Fully catered', 'Inclusive atmosphere', 'Interesting presentations', 'Awesome people'], SessionizeEditUrl: 'https://sessionize.com/app/speaker/', SessionizeUrl: 'https://sessionize.com/ddd-sydney-2019/', SiteDescription: `${tagLine}.`, SponsorshipEmail: 'sponsors@dddsydney.com.au', TagLine: tagLine, TicketNumberWhileVoting: TicketNumberWhileVoting.Optional, TicketPrice: '~$60', TicketsProviderAccountId: 'ddd-sydney', TicketsProviderEventId: 'ddd-sydney-2019', TicketsProviderFinancialAssistanceCode: '', TicketsProviderId: TicketsProvider.Tito, PresentationSubmissionsOpenFrom: presentationSubmissionsOpenFrom, PresentationSubmissionsOpenUntil: presentationSubmissionsOpenUntil, RegistrationOpenFrom: registrationOpenFrom, RegistrationOpenUntil: registrationOpenUntil, VotingOpenFrom: votingOpenFrom, VotingOpenUntil: votingOpenUntil, HideAfterpartyVenue: venue === null || venue.Afterparty === null, HideDate: hideDate, HideSponsors: false, HideSponsorshipUpsell: false, HideVenue: venue === null, Venue: venue, Socials: { Blog: 'https://blog.dddsydney.com.au', Email: 'team@dddsydney.com.au', Facebook: '', Flickr: '', GitHub: 'dddsydney', MailingList: 'https://mailchi.mp/a016f18cb8a7/ddd-sydney-subscribers', Twitter: { Id: '', Name: 'dddsydney', }, Youtube: '', }, ImageStrip: [ { Url: '/static/images/strip/1.jpg', Alternate: 'Kris Howard delivering her 2017 locknote' }, { Url: '/static/images/strip/2.jpg', Alternate: 'Our 2017 speakers' }, { Url: '/static/images/strip/3.jpg', Alternate: 'Visting the readify booth' }, { Url: '/static/images/strip/4.jpg', Alternate: 'Early morning registration' }, { Url: '/static/images/strip/5.jpg', Alternate: 'Donna Edwards speaking at DDD 2017' }, ], ImportantContacts: { CentreAgainstSexualAssault: { Details: '1800 806 292', }, EmergencyMedical: { Details: '', MapUrl: '', }, NonEmergencyMedical: { Details: 'Broadway General Practice, Broadway Shopping Centre, M105, Level 1, Bay Street, Broadway NSW 2007', MapUrl: 'https://goo.gl/maps/epN85RpHomF2', }, Police: { Details: '', MapUrl: '', }, }, ImportantDates: orderBy(importantDates, i => i.Date),
Sponsors: SponsorData, Keynotes: [ { SessionAbstract: `Having different ideas, opinions, interests can be quite lonely and lead to thinking about where you can fit especially in the fast-paced tech industry. Coming to the industry, not by a traditional path, being the only one in the room to have a unique opinion can be very intimidating. Being unique can put us in a position where we think that we don't belong here. After 5 years in tech being different brought me to this state - a software engineer, a founder of the Australia wide diversity community Muses Code JS, a co-organizer of the international organization Women Who Code, Google Developer Expert in Web technologies and international speaker. Being different is great!`, SessionId: 'keynote', Presenters: [ { Bio: `Tanya Butenko is passionate about free education and diversity and doing all she can to support and promote women in IT. Teacher, translator, manager previously, nowadays software engineer and IT community activist. After finishing GeneralAssembly 4 years ago on web-development immersive 3-month course Tanya steps into IT world and started her journey as a web developer. Today she is working in Hireup as a software engineer, co-organizer of Women Who Code Sydney chapter, Muses Code JS (previously known as NodeGirls) Founder, CEO. Also Google Developer Expert in Web Technologies and international speaker. She is much into JavaScript and tech communities and that is where you can meet her in person across Australia.`, firstName: 'Tanya', lastName: 'Butenko', photo: '/static/images/2019/tanya-keynote.JPG', tagline: `#js and #nodejs developer, founder of @MusesCodeJS, free coding events enthusiast, @WWCSyd co-org, International speaker, #mentor @GoogleDevExpert`, twitter: 'ButenkoMe', url: 'https://musescodejs.org/', }, ], Tags: ['keynote'], SessionTitle: 'It is OK to be Different', RecommendedAudience: '', SessionLength: '', TrackType: 'keynote', Year: '2019', }, { SessionId: 'locknote', SessionAbstract: `Have you ever felt like a fraud? Like you did not deserve the opportunities in front of you? Have you ever thought "why me, I should not be here"? Alternatively, have you seen a talented colleague not recognising their own achievements? A colleague hesitant to take on a new role? This is imposter syndrome; I encounter it constantly and fight against it in myself and others. I want you to unite with me in this battle; for yourself, your colleagues and friends. Join me as I share my experiences with imposter syndrome, how I have fought against it and how you can join the fight.`, SessionTitle: 'Battles of an imposter', Tags: ['locknote'], RecommendedAudience: '', SessionLength: '', TrackType: 'locknote', Year: '2019', Presenters: [ { Bio: `Melissa Houghton is a Senior Developer at Readify. She has a passion for technology, learning and giving back to the community. She is an organiser for DDD Perth and has previously been involved in other NFP organisations such as CoderDojo and Rotary Youth Leadership Camps. Originally from California, Melissa loves to travel, drink wine and learn new things.`, firstName: 'Melissa', lastName: 'Houghton', photo: '/static/images/2019/melissa-locknote.jpg', tagline: 'Senior Software Developer @Readify | Organiser @DDDPerth conference | Traveller | Wine lover 🍷', twitter: 'meliss_houghton', url: '', }, ], }, ], ConferenceFeedbackLink: '', SessionFeedbackLink: '', } export default Conference
random_line_split
conference.ts
import moment from 'moment' import { orderBy } from '../components/utils/arraySort' import SponsorData from '../config/sponsors' import { Conference as IConference, ImportantDate, TicketNumberWhileVoting, TicketsProvider } from './types' import venue from './venue' const name = 'DDD Sydney' const tagLine = `${name} is an inclusive non-profit conference for the Developer, Data and Design community` const hideDate = false const isSoldOut = false const date = moment('2019-09-21T08:00') const endDate = date.clone().add(12, 'h') const currentInstance = parseInt(date.format('YYYY'), 10) const firstInstance = 2016 const registrationOpenFrom = moment('2019-06-10T08:00:00') const registrationOpenUntil = hideDate ? null : date .clone() .add(-1, 'd') .startOf('day') .add(17, 'h') const presentationSubmissionsOpenFrom = moment('2019-06-10T08:00:00') const presentationSubmissionsOpenUntil = moment('2019-07-14T23:59:59') const votingOpenFrom = moment('2019-07-15T04:00:00') const votingOpenUntil = moment('2019-07-28T23:59:59') const agendaPublishedFrom = moment('2019-08-04T20:00:00') const feedbackOpenFrom = date.clone() const feedbackOpenUntil = endDate const importantDates: ImportantDate[] = [ { Date: presentationSubmissionsOpenFrom, Description: 'Call for presentations open', Type: 'content', }, { Date: presentationSubmissionsOpenUntil, Description: 'Call for presentations close', Type: 'content', }, { Date: registrationOpenFrom, Description: 'Ticket sales open', Type: 'tickets', }, { Description: 'Voting open', Date: votingOpenFrom, Type: 'voting' }, { Description: 'Voting close', Date: votingOpenUntil, Type: 'voting' }, { Date: agendaPublishedFrom, Description: 'Agenda published', Type: 'agenda', }, ] if (registrationOpenUntil !== null && !isSoldOut) { importantDates.push({ Date: registrationOpenUntil, Description: 'Ticket sales close', Type: 'tickets', }) } if (!hideDate)
const Conference: IConference = { AgendaPublishedFrom: agendaPublishedFrom, AnonymousReportFormUrl: '', AnonymousVoting: true, ContactEmail: 'team@dddsydney.com.au', ChildcarePrice: '', Date: date, DoorsOpenTime: '8:10am', EmergencyContactName: 'Aaron Powell', EmergencyContactPhoneNumber: '0439 878 200', EndDate: endDate, FeedbackOpenFrom: feedbackOpenFrom, FeedbackOpenUntil: feedbackOpenUntil, FinishTime: '5:10pm', Goal: 'We are dedicated to making the technology industry in Australia more inclusive: giving minority groups a voice in the future of digital.', GoogleAnalyticsId: 'UA-98480529-1', Handbook: null, HashTag: 'dddsydney', Instance: currentInstance.toString(), IsSoldOut: isSoldOut, MediaOfficerName: '', MentoringEmail: 'team@dddsydney.com.au', MaxVotes: 5, MinVotes: 1, getSubmissionsUrl: 'https://dddsydneyapi.azurewebsites.net/v2/sessions/2019', submitVoteUrl: 'https://dddsydneyapi.azurewebsites.net/v2/vote/2019', // getSubmissionsUrl: 'http://192.168.0.180:7071/v2/sessions/2019', // submitVoteUrl: 'http://192.168.0.180:7071/v2/vote/2019', Name: name, Organiser: { Name: 'SydDev Inc.', ShirtColour: 'black', Url: '', }, PreferentialVoting: false, PreviousInstance: (currentInstance - 1).toString(), PreviousInstances: [...Array(currentInstance - firstInstance).keys()].map((_, i) => (firstInstance + i).toString()), PreviouslySubmittedTopics: 'Agile, building great teams, UI design, software testing, virtual reality, open source software, bots, IoT, machine learning, automated deployments, mobile development, architecture, microservices, APIs, actors, JavaScript, authentication, React, UWP, HTTP protocol, Git, Docker and pointers', SellingPoints: ['One day', 'Fully catered', 'Inclusive atmosphere', 'Interesting presentations', 'Awesome people'], SessionizeEditUrl: 'https://sessionize.com/app/speaker/', SessionizeUrl: 'https://sessionize.com/ddd-sydney-2019/', SiteDescription: `${tagLine}.`, SponsorshipEmail: 'sponsors@dddsydney.com.au', TagLine: tagLine, TicketNumberWhileVoting: TicketNumberWhileVoting.Optional, TicketPrice: '~$60', TicketsProviderAccountId: 'ddd-sydney', TicketsProviderEventId: 'ddd-sydney-2019', TicketsProviderFinancialAssistanceCode: '', TicketsProviderId: TicketsProvider.Tito, PresentationSubmissionsOpenFrom: presentationSubmissionsOpenFrom, PresentationSubmissionsOpenUntil: presentationSubmissionsOpenUntil, RegistrationOpenFrom: registrationOpenFrom, RegistrationOpenUntil: registrationOpenUntil, VotingOpenFrom: votingOpenFrom, VotingOpenUntil: votingOpenUntil, HideAfterpartyVenue: venue === null || venue.Afterparty === null, HideDate: hideDate, HideSponsors: false, HideSponsorshipUpsell: false, HideVenue: venue === null, Venue: venue, Socials: { Blog: 'https://blog.dddsydney.com.au', Email: 'team@dddsydney.com.au', Facebook: '', Flickr: '', GitHub: 'dddsydney', MailingList: 'https://mailchi.mp/a016f18cb8a7/ddd-sydney-subscribers', Twitter: { Id: '', Name: 'dddsydney', }, Youtube: '', }, ImageStrip: [ { Url: '/static/images/strip/1.jpg', Alternate: 'Kris Howard delivering her 2017 locknote' }, { Url: '/static/images/strip/2.jpg', Alternate: 'Our 2017 speakers' }, { Url: '/static/images/strip/3.jpg', Alternate: 'Visting the readify booth' }, { Url: '/static/images/strip/4.jpg', Alternate: 'Early morning registration' }, { Url: '/static/images/strip/5.jpg', Alternate: 'Donna Edwards speaking at DDD 2017' }, ], ImportantContacts: { CentreAgainstSexualAssault: { Details: '1800 806 292', }, EmergencyMedical: { Details: '', MapUrl: '', }, NonEmergencyMedical: { Details: 'Broadway General Practice, Broadway Shopping Centre, M105, Level 1, Bay Street, Broadway NSW 2007', MapUrl: 'https://goo.gl/maps/epN85RpHomF2', }, Police: { Details: '', MapUrl: '', }, }, ImportantDates: orderBy(importantDates, i => i.Date), Sponsors: SponsorData, Keynotes: [ { SessionAbstract: `Having different ideas, opinions, interests can be quite lonely and lead to thinking about where you can fit especially in the fast-paced tech industry. Coming to the industry, not by a traditional path, being the only one in the room to have a unique opinion can be very intimidating. Being unique can put us in a position where we think that we don't belong here. After 5 years in tech being different brought me to this state - a software engineer, a founder of the Australia wide diversity community Muses Code JS, a co-organizer of the international organization Women Who Code, Google Developer Expert in Web technologies and international speaker. Being different is great!`, SessionId: 'keynote', Presenters: [ { Bio: `Tanya Butenko is passionate about free education and diversity and doing all she can to support and promote women in IT. Teacher, translator, manager previously, nowadays software engineer and IT community activist. After finishing GeneralAssembly 4 years ago on web-development immersive 3-month course Tanya steps into IT world and started her journey as a web developer. Today she is working in Hireup as a software engineer, co-organizer of Women Who Code Sydney chapter, Muses Code JS (previously known as NodeGirls) Founder, CEO. Also Google Developer Expert in Web Technologies and international speaker. She is much into JavaScript and tech communities and that is where you can meet her in person across Australia.`, firstName: 'Tanya', lastName: 'Butenko', photo: '/static/images/2019/tanya-keynote.JPG', tagline: `#js and #nodejs developer, founder of @MusesCodeJS, free coding events enthusiast, @WWCSyd co-org, International speaker, #mentor @GoogleDevExpert`, twitter: 'ButenkoMe', url: 'https://musescodejs.org/', }, ], Tags: ['keynote'], SessionTitle: 'It is OK to be Different', RecommendedAudience: '', SessionLength: '', TrackType: 'keynote', Year: '2019', }, { SessionId: 'locknote', SessionAbstract: `Have you ever felt like a fraud? Like you did not deserve the opportunities in front of you? Have you ever thought "why me, I should not be here"? Alternatively, have you seen a talented colleague not recognising their own achievements? A colleague hesitant to take on a new role? This is imposter syndrome; I encounter it constantly and fight against it in myself and others. I want you to unite with me in this battle; for yourself, your colleagues and friends. Join me as I share my experiences with imposter syndrome, how I have fought against it and how you can join the fight.`, SessionTitle: 'Battles of an imposter', Tags: ['locknote'], RecommendedAudience: '', SessionLength: '', TrackType: 'locknote', Year: '2019', Presenters: [ { Bio: `Melissa Houghton is a Senior Developer at Readify. She has a passion for technology, learning and giving back to the community. She is an organiser for DDD Perth and has previously been involved in other NFP organisations such as CoderDojo and Rotary Youth Leadership Camps. Originally from California, Melissa loves to travel, drink wine and learn new things.`, firstName: 'Melissa', lastName: 'Houghton', photo: '/static/images/2019/melissa-locknote.jpg', tagline: 'Senior Software Developer @Readify | Organiser @DDDPerth conference | Traveller | Wine lover 🍷', twitter: 'meliss_houghton', url: '', }, ], }, ], ConferenceFeedbackLink: '', SessionFeedbackLink: '', } export default Conference
{ importantDates.push({ Date: date, Description: 'Conference day', Type: 'conference', }) }
conditional_block
plugin.go
package main import ( "context" "encoding/json" "fmt" "path" "regexp" "strconv" "strings" "time" "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/backend/datasource" "github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt" "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/hashicorp/consul/api" ) func newDatasource() datasource.ServeOpts { im := datasource.NewInstanceManager(newDataSourceInstance) ds := &ConsulDataSource{ im: im, } return datasource.ServeOpts{ QueryDataHandler: ds, CheckHealthHandler: ds, } } type ConsulDataSource struct { im instancemgmt.InstanceManager } // QueryData handles multiple queries and returns multiple responses. // req contains the queries []DataQuery (where each query contains RefID as a unique identifier). // The QueryDataResponse contains a map of RefID to the response for each query, and each response // contains Frames ([]*Frame). func (td *ConsulDataSource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { log.DefaultLogger.Debug("QueryData", "request", req) consul, err := td.getConsulClient(req.PluginContext) if err != nil { return nil, err } queries, err := parseQueries(req) if err != nil { return nil, err } if len(queries) == 0 { return nil, fmt.Errorf("no queries found in request") } return query(ctx, consul, queries), nil } func (td *ConsulDataSource) getConsulClient(pluginCtx backend.PluginContext) (*api.Client, error) { instance, err := td.im.Get(pluginCtx) if err != nil { return nil, fmt.Errorf("could not get plugin instance: %v", err) } instanceSettings, ok := instance.(*instanceSettings) if !ok { return nil, fmt.Errorf("could not get plugin instance") } return instanceSettings.consul, nil } type queryModel struct { Format string `json:"format"` Target string `json:"target"` Type string `json:"type"` Columns string `json:"columns"` Error error } func parseQueries(req *backend.QueryDataRequest) (map[string]queryModel, error) { log.DefaultLogger.Debug("parseQueries", "queries", req.Queries) queries := map[string]queryModel{} for _, rawQuery := range req.Queries { var q queryModel if err := json.Unmarshal(rawQuery.JSON, &q); err != nil { queries[rawQuery.RefID] = queryModel{Error: fmt.Errorf("error parsing query %s: %v", rawQuery.JSON, err)} continue } queries[rawQuery.RefID] = q } return queries, nil } func query(ctx context.Context, consul *api.Client, queries map[string]queryModel) *backend.QueryDataResponse { log.DefaultLogger.Debug("query", "queries", queries) response := backend.NewQueryDataResponse() for refID, query := range queries { if query.Error != nil { response.Responses[refID] = backend.DataResponse{Error: query.Error} continue } switch query.Format { case "", "timeseries": response.Responses[refID] = queryTimeSeries(ctx, consul, query) case "table": response.Responses[refID] = queryTable(ctx, consul, query) default: response.Responses[refID] = backend.DataResponse{Error: fmt.Errorf("unknown format %s", query.Format)} } } return response } func queryTimeSeries(ctx context.Context, consul *api.Client, query queryModel) backend.DataResponse { log.DefaultLogger.Debug("queryTimeSeries", "query", query) if query.Format == "" { log.DefaultLogger.Debug("format is empty. defaulting to time series") query.Format = "timeseries" } if query.Type == "" { log.DefaultLogger.Debug("type is empty. defaulting to get value") query.Type = "get" } // clean target q := strings.Replace(query.Target, "\\.", ".", -1) switch query.Type { case "get": return handleGet(ctx, consul, q) case "keys": return handleKeys(ctx, consul, q) case "tags": return handleTags(ctx, consul, q, false) case "tagsrec": return handleTags(ctx, consul, q, true) } return backend.DataResponse{Error: fmt.Errorf("unknown query type: %s", query.Type)} } func handleGet(ctx context.Context, consul *api.Client, target string) backend.DataResponse { log.DefaultLogger.Debug("handleGet", "target", target) if strings.HasSuffix(target, "/") { target = target[:len(target)-1] } var kvs []*api.KVPair kv, _, err := consul.KV().Get(target, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul get %s: %v", target, err)} } if kv != nil { kvs = append(kvs, kv) } return generateDataResponseFromKV(kvs) } func handleKeys(ctx context.Context, consul *api.Client, target string) backend.DataResponse { log.DefaultLogger.Debug("handleKeys", "target", target) if !strings.HasSuffix(target, "/") { target = target + "/" } keys, _, err := consul.KV().Keys(target, "/", (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul keys %s: %v", target, err)} } return generateDataResponseFromKeys(keys) } func handleTags(ctx context.Context, consul *api.Client, target string, recursive bool) backend.DataResponse { log.DefaultLogger.Debug("handleTags", "target", target) if !strings.HasSuffix(target, "/") { target = target + "/" } separator := "/" if recursive { separator = "" } keys, _, err := consul.KV().Keys(target, separator, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul keys %s: %v", target, err)} } var tagKVs []*api.KVPair for _, key := range keys { tagKV, _, err := consul.KV().Get(key, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul get %s: %v", key, err)} } if tagKV != nil
} return generateDataResponseWithTags(target, tagKVs) } func generateDataResponseFromKV(kvs []*api.KVPair) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseFromKV", "kv", kvs) response := backend.DataResponse{} for _, kv := range kvs { floatValue, err := strconv.ParseFloat(string(kv.Value), 64) if err != nil { return backend.DataResponse{Error: err} } now := time.Now() value := []float64{floatValue} log.DefaultLogger.Debug("appending data frame to response", "name", kv.Key, "time", now, "value", value) response.Frames = append(response.Frames, data.NewFrame(kv.Key, data.NewField("time", nil, []time.Time{now}), data.NewField("values", nil, value), )) } return response } func generateDataResponseFromKeys(keys []string) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseFromKeys", "keys", keys) response := backend.DataResponse{} for _, key := range keys { now := time.Now() value := []float64{1} log.DefaultLogger.Debug("appending data frame to response", "name", key, "time", now, "value", value) response.Frames = append(response.Frames, data.NewFrame(key, data.NewField("time", nil, []time.Time{now}), data.NewField("values", nil, value), )) } return response } func generateDataResponseWithTags(target string, tagKVs []*api.KVPair) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseWithTags", "tags", tagKVs) response := backend.DataResponse{} tags := data.Labels{} for _, tagKV := range tagKVs { tagName := strings.TrimPrefix(tagKV.Key, target) tagName = strings.Replace(tagName, "/", ".", -1) tags[tagName] = string(tagKV.Value) } now := time.Now() value := []float64{1} log.DefaultLogger.Debug("appending data frame to response", "name", target, "time", now, "value", value, "tags", tags) response.Frames = append(response.Frames, data.NewFrame(target, data.NewField("time", nil, []time.Time{now}), data.NewField("values", tags, value), )) return response } func queryTable(ctx context.Context, consul *api.Client, query queryModel) backend.DataResponse { log.DefaultLogger.Debug("queryTable", "query", query) defer func() { if err := recover(); err != nil { log.DefaultLogger.Error("Recovered in queryTable", "err", err) } }() // Compile targetRegex target := strings.Replace(query.Target, "*", ".*", -1) targetRegex, err := regexp.Compile(target) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error compiling regex %s: %v", target, err)} } // Calculate Prefix to execute consul.KV().Keys() on firstStar := strings.Index(query.Target, "*") prefix := query.Target if firstStar > 0 { prefix = query.Target[:firstStar] } // Get keys with prefix log.DefaultLogger.Debug("queryTable: get keys below prefix", "prefix", prefix) keys, _, err := consul.KV().Keys(prefix, "", &api.QueryOptions{}) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error gettings keys %s from consul: %v", prefix, err)} } // Filter keys that match the targetRegex // One matchingKey will be one line in the table var matchingKeys []string for _, key := range keys { if targetRegex.Match([]byte(key)) { matchingKeys = append(matchingKeys, key) } } columns := strings.Split(query.Columns, ",") fields := []*data.Field{} for rowIdx, key := range matchingKeys { for colIdx, col := range columns { // calculate key for column value colKey := calculateColumnKey(key, col) // get field from Consul field, value := getColumnValueForKey(ctx, consul, colKey) // If it's the first row ,append it to the fields array if rowIdx == 0 { log.DefaultLogger.Debug("queryTable: appending first row field", "value", value, "rowIdx", rowIdx, "colIdx", colIdx) fields = append(fields, field) continue } // Else, append it to the field of the current column log.DefaultLogger.Debug("queryTable: appending value to field", "value", value, "rowIdx", rowIdx, "colIdx", colIdx) fields[colIdx].Append(value) } } return backend.DataResponse{Frames: []*data.Frame{data.NewFrame("table", fields...)}} } func getColumnValueForKey(ctx context.Context, consul *api.Client, colKey string) (*data.Field, interface{}) { log.DefaultLogger.Debug("getColumnValueForKey", "key", colKey) kv, _, err := consul.KV().Get(colKey, (&api.QueryOptions{}).WithContext(ctx)) if err != nil || kv == nil { return data.NewField(path.Base(colKey), nil, []string{"Not Found"}), "Not Found" } // try to parse int intValue, err := strconv.ParseInt(string(kv.Value), 10, 64) if err != nil { return data.NewField(path.Base(colKey), nil, []string{string(kv.Value)}), string(kv.Value) } return data.NewField(path.Base(colKey), nil, []int64{intValue}), intValue } func calculateColumnKey(key string, col string) string { for strings.HasPrefix(col, "../") { lastSlash := strings.LastIndex(key, "/") key = key[:lastSlash] col = strings.TrimPrefix(col, "../") } return path.Join(key, col) } // CheckHealth handles health checks sent from Grafana to the plugin. // The main use case for these health checks is the test button on the // datasource configuration page which allows users to verify that // a datasource is working as expected. func (td *ConsulDataSource) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { log.DefaultLogger.Debug("CheckHealth", "request", req) consul, err := td.getConsulClient(req.PluginContext) if err != nil { return nil, err } if _, err := consul.Status().Leader(); err != nil { return &backend.CheckHealthResult{ Status: backend.HealthStatusError, Message: fmt.Sprintf("Consul health check failed: %v", err), }, nil } return &backend.CheckHealthResult{ Status: backend.HealthStatusOk, Message: "Consul data source is working", }, nil } type instanceSettings struct { consul *api.Client } type jsonData struct { ConsulAddr string } func newDataSourceInstance(setting backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) { jData := jsonData{} if err := json.Unmarshal(setting.JSONData, &jData); err != nil { return nil, fmt.Errorf("error decoding jsonData: %v", err) } if jData.ConsulAddr == "" { log.DefaultLogger.Error("newDataSourceInstance", "ConsulAddr", jData.ConsulAddr, "err", "consulAddr should not be empty") return nil, fmt.Errorf("consulAddr should not be empty") } conf := api.DefaultConfig() conf.Address = jData.ConsulAddr conf.Token = setting.DecryptedSecureJSONData["consulToken"] conf.TLSConfig.InsecureSkipVerify = true client, err := api.NewClient(conf) if err != nil { return nil, fmt.Errorf("error creating consul client: %v", err) } return &instanceSettings{ consul: client, }, nil } func (s *instanceSettings) Dispose() { }
{ tagKVs = append(tagKVs, tagKV) }
conditional_block
plugin.go
package main import ( "context" "encoding/json" "fmt" "path" "regexp" "strconv" "strings" "time" "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/backend/datasource" "github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt" "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/hashicorp/consul/api" ) func newDatasource() datasource.ServeOpts { im := datasource.NewInstanceManager(newDataSourceInstance) ds := &ConsulDataSource{ im: im, } return datasource.ServeOpts{ QueryDataHandler: ds, CheckHealthHandler: ds, } } type ConsulDataSource struct { im instancemgmt.InstanceManager } // QueryData handles multiple queries and returns multiple responses. // req contains the queries []DataQuery (where each query contains RefID as a unique identifier). // The QueryDataResponse contains a map of RefID to the response for each query, and each response // contains Frames ([]*Frame). func (td *ConsulDataSource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { log.DefaultLogger.Debug("QueryData", "request", req) consul, err := td.getConsulClient(req.PluginContext) if err != nil { return nil, err } queries, err := parseQueries(req) if err != nil { return nil, err } if len(queries) == 0 { return nil, fmt.Errorf("no queries found in request") } return query(ctx, consul, queries), nil } func (td *ConsulDataSource) getConsulClient(pluginCtx backend.PluginContext) (*api.Client, error) { instance, err := td.im.Get(pluginCtx) if err != nil { return nil, fmt.Errorf("could not get plugin instance: %v", err) } instanceSettings, ok := instance.(*instanceSettings) if !ok { return nil, fmt.Errorf("could not get plugin instance") } return instanceSettings.consul, nil } type queryModel struct { Format string `json:"format"` Target string `json:"target"` Type string `json:"type"` Columns string `json:"columns"` Error error } func parseQueries(req *backend.QueryDataRequest) (map[string]queryModel, error) { log.DefaultLogger.Debug("parseQueries", "queries", req.Queries) queries := map[string]queryModel{} for _, rawQuery := range req.Queries { var q queryModel if err := json.Unmarshal(rawQuery.JSON, &q); err != nil { queries[rawQuery.RefID] = queryModel{Error: fmt.Errorf("error parsing query %s: %v", rawQuery.JSON, err)} continue } queries[rawQuery.RefID] = q } return queries, nil } func query(ctx context.Context, consul *api.Client, queries map[string]queryModel) *backend.QueryDataResponse { log.DefaultLogger.Debug("query", "queries", queries) response := backend.NewQueryDataResponse() for refID, query := range queries { if query.Error != nil { response.Responses[refID] = backend.DataResponse{Error: query.Error} continue } switch query.Format { case "", "timeseries": response.Responses[refID] = queryTimeSeries(ctx, consul, query) case "table": response.Responses[refID] = queryTable(ctx, consul, query) default: response.Responses[refID] = backend.DataResponse{Error: fmt.Errorf("unknown format %s", query.Format)} } } return response } func
(ctx context.Context, consul *api.Client, query queryModel) backend.DataResponse { log.DefaultLogger.Debug("queryTimeSeries", "query", query) if query.Format == "" { log.DefaultLogger.Debug("format is empty. defaulting to time series") query.Format = "timeseries" } if query.Type == "" { log.DefaultLogger.Debug("type is empty. defaulting to get value") query.Type = "get" } // clean target q := strings.Replace(query.Target, "\\.", ".", -1) switch query.Type { case "get": return handleGet(ctx, consul, q) case "keys": return handleKeys(ctx, consul, q) case "tags": return handleTags(ctx, consul, q, false) case "tagsrec": return handleTags(ctx, consul, q, true) } return backend.DataResponse{Error: fmt.Errorf("unknown query type: %s", query.Type)} } func handleGet(ctx context.Context, consul *api.Client, target string) backend.DataResponse { log.DefaultLogger.Debug("handleGet", "target", target) if strings.HasSuffix(target, "/") { target = target[:len(target)-1] } var kvs []*api.KVPair kv, _, err := consul.KV().Get(target, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul get %s: %v", target, err)} } if kv != nil { kvs = append(kvs, kv) } return generateDataResponseFromKV(kvs) } func handleKeys(ctx context.Context, consul *api.Client, target string) backend.DataResponse { log.DefaultLogger.Debug("handleKeys", "target", target) if !strings.HasSuffix(target, "/") { target = target + "/" } keys, _, err := consul.KV().Keys(target, "/", (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul keys %s: %v", target, err)} } return generateDataResponseFromKeys(keys) } func handleTags(ctx context.Context, consul *api.Client, target string, recursive bool) backend.DataResponse { log.DefaultLogger.Debug("handleTags", "target", target) if !strings.HasSuffix(target, "/") { target = target + "/" } separator := "/" if recursive { separator = "" } keys, _, err := consul.KV().Keys(target, separator, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul keys %s: %v", target, err)} } var tagKVs []*api.KVPair for _, key := range keys { tagKV, _, err := consul.KV().Get(key, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul get %s: %v", key, err)} } if tagKV != nil { tagKVs = append(tagKVs, tagKV) } } return generateDataResponseWithTags(target, tagKVs) } func generateDataResponseFromKV(kvs []*api.KVPair) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseFromKV", "kv", kvs) response := backend.DataResponse{} for _, kv := range kvs { floatValue, err := strconv.ParseFloat(string(kv.Value), 64) if err != nil { return backend.DataResponse{Error: err} } now := time.Now() value := []float64{floatValue} log.DefaultLogger.Debug("appending data frame to response", "name", kv.Key, "time", now, "value", value) response.Frames = append(response.Frames, data.NewFrame(kv.Key, data.NewField("time", nil, []time.Time{now}), data.NewField("values", nil, value), )) } return response } func generateDataResponseFromKeys(keys []string) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseFromKeys", "keys", keys) response := backend.DataResponse{} for _, key := range keys { now := time.Now() value := []float64{1} log.DefaultLogger.Debug("appending data frame to response", "name", key, "time", now, "value", value) response.Frames = append(response.Frames, data.NewFrame(key, data.NewField("time", nil, []time.Time{now}), data.NewField("values", nil, value), )) } return response } func generateDataResponseWithTags(target string, tagKVs []*api.KVPair) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseWithTags", "tags", tagKVs) response := backend.DataResponse{} tags := data.Labels{} for _, tagKV := range tagKVs { tagName := strings.TrimPrefix(tagKV.Key, target) tagName = strings.Replace(tagName, "/", ".", -1) tags[tagName] = string(tagKV.Value) } now := time.Now() value := []float64{1} log.DefaultLogger.Debug("appending data frame to response", "name", target, "time", now, "value", value, "tags", tags) response.Frames = append(response.Frames, data.NewFrame(target, data.NewField("time", nil, []time.Time{now}), data.NewField("values", tags, value), )) return response } func queryTable(ctx context.Context, consul *api.Client, query queryModel) backend.DataResponse { log.DefaultLogger.Debug("queryTable", "query", query) defer func() { if err := recover(); err != nil { log.DefaultLogger.Error("Recovered in queryTable", "err", err) } }() // Compile targetRegex target := strings.Replace(query.Target, "*", ".*", -1) targetRegex, err := regexp.Compile(target) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error compiling regex %s: %v", target, err)} } // Calculate Prefix to execute consul.KV().Keys() on firstStar := strings.Index(query.Target, "*") prefix := query.Target if firstStar > 0 { prefix = query.Target[:firstStar] } // Get keys with prefix log.DefaultLogger.Debug("queryTable: get keys below prefix", "prefix", prefix) keys, _, err := consul.KV().Keys(prefix, "", &api.QueryOptions{}) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error gettings keys %s from consul: %v", prefix, err)} } // Filter keys that match the targetRegex // One matchingKey will be one line in the table var matchingKeys []string for _, key := range keys { if targetRegex.Match([]byte(key)) { matchingKeys = append(matchingKeys, key) } } columns := strings.Split(query.Columns, ",") fields := []*data.Field{} for rowIdx, key := range matchingKeys { for colIdx, col := range columns { // calculate key for column value colKey := calculateColumnKey(key, col) // get field from Consul field, value := getColumnValueForKey(ctx, consul, colKey) // If it's the first row ,append it to the fields array if rowIdx == 0 { log.DefaultLogger.Debug("queryTable: appending first row field", "value", value, "rowIdx", rowIdx, "colIdx", colIdx) fields = append(fields, field) continue } // Else, append it to the field of the current column log.DefaultLogger.Debug("queryTable: appending value to field", "value", value, "rowIdx", rowIdx, "colIdx", colIdx) fields[colIdx].Append(value) } } return backend.DataResponse{Frames: []*data.Frame{data.NewFrame("table", fields...)}} } func getColumnValueForKey(ctx context.Context, consul *api.Client, colKey string) (*data.Field, interface{}) { log.DefaultLogger.Debug("getColumnValueForKey", "key", colKey) kv, _, err := consul.KV().Get(colKey, (&api.QueryOptions{}).WithContext(ctx)) if err != nil || kv == nil { return data.NewField(path.Base(colKey), nil, []string{"Not Found"}), "Not Found" } // try to parse int intValue, err := strconv.ParseInt(string(kv.Value), 10, 64) if err != nil { return data.NewField(path.Base(colKey), nil, []string{string(kv.Value)}), string(kv.Value) } return data.NewField(path.Base(colKey), nil, []int64{intValue}), intValue } func calculateColumnKey(key string, col string) string { for strings.HasPrefix(col, "../") { lastSlash := strings.LastIndex(key, "/") key = key[:lastSlash] col = strings.TrimPrefix(col, "../") } return path.Join(key, col) } // CheckHealth handles health checks sent from Grafana to the plugin. // The main use case for these health checks is the test button on the // datasource configuration page which allows users to verify that // a datasource is working as expected. func (td *ConsulDataSource) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { log.DefaultLogger.Debug("CheckHealth", "request", req) consul, err := td.getConsulClient(req.PluginContext) if err != nil { return nil, err } if _, err := consul.Status().Leader(); err != nil { return &backend.CheckHealthResult{ Status: backend.HealthStatusError, Message: fmt.Sprintf("Consul health check failed: %v", err), }, nil } return &backend.CheckHealthResult{ Status: backend.HealthStatusOk, Message: "Consul data source is working", }, nil } type instanceSettings struct { consul *api.Client } type jsonData struct { ConsulAddr string } func newDataSourceInstance(setting backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) { jData := jsonData{} if err := json.Unmarshal(setting.JSONData, &jData); err != nil { return nil, fmt.Errorf("error decoding jsonData: %v", err) } if jData.ConsulAddr == "" { log.DefaultLogger.Error("newDataSourceInstance", "ConsulAddr", jData.ConsulAddr, "err", "consulAddr should not be empty") return nil, fmt.Errorf("consulAddr should not be empty") } conf := api.DefaultConfig() conf.Address = jData.ConsulAddr conf.Token = setting.DecryptedSecureJSONData["consulToken"] conf.TLSConfig.InsecureSkipVerify = true client, err := api.NewClient(conf) if err != nil { return nil, fmt.Errorf("error creating consul client: %v", err) } return &instanceSettings{ consul: client, }, nil } func (s *instanceSettings) Dispose() { }
queryTimeSeries
identifier_name
plugin.go
package main import ( "context" "encoding/json" "fmt" "path" "regexp" "strconv" "strings" "time" "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/backend/datasource" "github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt" "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/hashicorp/consul/api" ) func newDatasource() datasource.ServeOpts { im := datasource.NewInstanceManager(newDataSourceInstance) ds := &ConsulDataSource{ im: im, } return datasource.ServeOpts{ QueryDataHandler: ds, CheckHealthHandler: ds, } } type ConsulDataSource struct { im instancemgmt.InstanceManager } // QueryData handles multiple queries and returns multiple responses. // req contains the queries []DataQuery (where each query contains RefID as a unique identifier). // The QueryDataResponse contains a map of RefID to the response for each query, and each response // contains Frames ([]*Frame). func (td *ConsulDataSource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { log.DefaultLogger.Debug("QueryData", "request", req) consul, err := td.getConsulClient(req.PluginContext) if err != nil { return nil, err } queries, err := parseQueries(req) if err != nil { return nil, err } if len(queries) == 0 { return nil, fmt.Errorf("no queries found in request") } return query(ctx, consul, queries), nil } func (td *ConsulDataSource) getConsulClient(pluginCtx backend.PluginContext) (*api.Client, error) { instance, err := td.im.Get(pluginCtx) if err != nil { return nil, fmt.Errorf("could not get plugin instance: %v", err) } instanceSettings, ok := instance.(*instanceSettings) if !ok { return nil, fmt.Errorf("could not get plugin instance") } return instanceSettings.consul, nil } type queryModel struct { Format string `json:"format"` Target string `json:"target"` Type string `json:"type"` Columns string `json:"columns"` Error error } func parseQueries(req *backend.QueryDataRequest) (map[string]queryModel, error) { log.DefaultLogger.Debug("parseQueries", "queries", req.Queries) queries := map[string]queryModel{} for _, rawQuery := range req.Queries { var q queryModel if err := json.Unmarshal(rawQuery.JSON, &q); err != nil { queries[rawQuery.RefID] = queryModel{Error: fmt.Errorf("error parsing query %s: %v", rawQuery.JSON, err)} continue } queries[rawQuery.RefID] = q } return queries, nil } func query(ctx context.Context, consul *api.Client, queries map[string]queryModel) *backend.QueryDataResponse { log.DefaultLogger.Debug("query", "queries", queries) response := backend.NewQueryDataResponse() for refID, query := range queries { if query.Error != nil { response.Responses[refID] = backend.DataResponse{Error: query.Error} continue } switch query.Format { case "", "timeseries": response.Responses[refID] = queryTimeSeries(ctx, consul, query) case "table": response.Responses[refID] = queryTable(ctx, consul, query) default: response.Responses[refID] = backend.DataResponse{Error: fmt.Errorf("unknown format %s", query.Format)} } } return response } func queryTimeSeries(ctx context.Context, consul *api.Client, query queryModel) backend.DataResponse { log.DefaultLogger.Debug("queryTimeSeries", "query", query) if query.Format == "" { log.DefaultLogger.Debug("format is empty. defaulting to time series") query.Format = "timeseries" } if query.Type == "" { log.DefaultLogger.Debug("type is empty. defaulting to get value") query.Type = "get" } // clean target q := strings.Replace(query.Target, "\\.", ".", -1) switch query.Type { case "get": return handleGet(ctx, consul, q) case "keys": return handleKeys(ctx, consul, q) case "tags": return handleTags(ctx, consul, q, false) case "tagsrec": return handleTags(ctx, consul, q, true) } return backend.DataResponse{Error: fmt.Errorf("unknown query type: %s", query.Type)} } func handleGet(ctx context.Context, consul *api.Client, target string) backend.DataResponse { log.DefaultLogger.Debug("handleGet", "target", target) if strings.HasSuffix(target, "/") { target = target[:len(target)-1] } var kvs []*api.KVPair kv, _, err := consul.KV().Get(target, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul get %s: %v", target, err)} } if kv != nil { kvs = append(kvs, kv) } return generateDataResponseFromKV(kvs) } func handleKeys(ctx context.Context, consul *api.Client, target string) backend.DataResponse { log.DefaultLogger.Debug("handleKeys", "target", target) if !strings.HasSuffix(target, "/") { target = target + "/" } keys, _, err := consul.KV().Keys(target, "/", (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul keys %s: %v", target, err)} } return generateDataResponseFromKeys(keys) } func handleTags(ctx context.Context, consul *api.Client, target string, recursive bool) backend.DataResponse { log.DefaultLogger.Debug("handleTags", "target", target) if !strings.HasSuffix(target, "/") { target = target + "/" } separator := "/" if recursive { separator = "" } keys, _, err := consul.KV().Keys(target, separator, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul keys %s: %v", target, err)} } var tagKVs []*api.KVPair for _, key := range keys { tagKV, _, err := consul.KV().Get(key, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul get %s: %v", key, err)} } if tagKV != nil { tagKVs = append(tagKVs, tagKV) } } return generateDataResponseWithTags(target, tagKVs) } func generateDataResponseFromKV(kvs []*api.KVPair) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseFromKV", "kv", kvs) response := backend.DataResponse{} for _, kv := range kvs { floatValue, err := strconv.ParseFloat(string(kv.Value), 64) if err != nil { return backend.DataResponse{Error: err} } now := time.Now() value := []float64{floatValue} log.DefaultLogger.Debug("appending data frame to response", "name", kv.Key, "time", now, "value", value) response.Frames = append(response.Frames, data.NewFrame(kv.Key, data.NewField("time", nil, []time.Time{now}), data.NewField("values", nil, value), )) } return response } func generateDataResponseFromKeys(keys []string) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseFromKeys", "keys", keys) response := backend.DataResponse{} for _, key := range keys { now := time.Now() value := []float64{1} log.DefaultLogger.Debug("appending data frame to response", "name", key, "time", now, "value", value) response.Frames = append(response.Frames, data.NewFrame(key, data.NewField("time", nil, []time.Time{now}), data.NewField("values", nil, value), )) } return response } func generateDataResponseWithTags(target string, tagKVs []*api.KVPair) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseWithTags", "tags", tagKVs) response := backend.DataResponse{} tags := data.Labels{} for _, tagKV := range tagKVs { tagName := strings.TrimPrefix(tagKV.Key, target) tagName = strings.Replace(tagName, "/", ".", -1) tags[tagName] = string(tagKV.Value) } now := time.Now() value := []float64{1} log.DefaultLogger.Debug("appending data frame to response", "name", target, "time", now, "value", value, "tags", tags) response.Frames = append(response.Frames, data.NewFrame(target, data.NewField("time", nil, []time.Time{now}), data.NewField("values", tags, value), )) return response } func queryTable(ctx context.Context, consul *api.Client, query queryModel) backend.DataResponse { log.DefaultLogger.Debug("queryTable", "query", query) defer func() { if err := recover(); err != nil { log.DefaultLogger.Error("Recovered in queryTable", "err", err) } }() // Compile targetRegex target := strings.Replace(query.Target, "*", ".*", -1)
if err != nil { return backend.DataResponse{Error: fmt.Errorf("error compiling regex %s: %v", target, err)} } // Calculate Prefix to execute consul.KV().Keys() on firstStar := strings.Index(query.Target, "*") prefix := query.Target if firstStar > 0 { prefix = query.Target[:firstStar] } // Get keys with prefix log.DefaultLogger.Debug("queryTable: get keys below prefix", "prefix", prefix) keys, _, err := consul.KV().Keys(prefix, "", &api.QueryOptions{}) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error gettings keys %s from consul: %v", prefix, err)} } // Filter keys that match the targetRegex // One matchingKey will be one line in the table var matchingKeys []string for _, key := range keys { if targetRegex.Match([]byte(key)) { matchingKeys = append(matchingKeys, key) } } columns := strings.Split(query.Columns, ",") fields := []*data.Field{} for rowIdx, key := range matchingKeys { for colIdx, col := range columns { // calculate key for column value colKey := calculateColumnKey(key, col) // get field from Consul field, value := getColumnValueForKey(ctx, consul, colKey) // If it's the first row ,append it to the fields array if rowIdx == 0 { log.DefaultLogger.Debug("queryTable: appending first row field", "value", value, "rowIdx", rowIdx, "colIdx", colIdx) fields = append(fields, field) continue } // Else, append it to the field of the current column log.DefaultLogger.Debug("queryTable: appending value to field", "value", value, "rowIdx", rowIdx, "colIdx", colIdx) fields[colIdx].Append(value) } } return backend.DataResponse{Frames: []*data.Frame{data.NewFrame("table", fields...)}} } func getColumnValueForKey(ctx context.Context, consul *api.Client, colKey string) (*data.Field, interface{}) { log.DefaultLogger.Debug("getColumnValueForKey", "key", colKey) kv, _, err := consul.KV().Get(colKey, (&api.QueryOptions{}).WithContext(ctx)) if err != nil || kv == nil { return data.NewField(path.Base(colKey), nil, []string{"Not Found"}), "Not Found" } // try to parse int intValue, err := strconv.ParseInt(string(kv.Value), 10, 64) if err != nil { return data.NewField(path.Base(colKey), nil, []string{string(kv.Value)}), string(kv.Value) } return data.NewField(path.Base(colKey), nil, []int64{intValue}), intValue } func calculateColumnKey(key string, col string) string { for strings.HasPrefix(col, "../") { lastSlash := strings.LastIndex(key, "/") key = key[:lastSlash] col = strings.TrimPrefix(col, "../") } return path.Join(key, col) } // CheckHealth handles health checks sent from Grafana to the plugin. // The main use case for these health checks is the test button on the // datasource configuration page which allows users to verify that // a datasource is working as expected. func (td *ConsulDataSource) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { log.DefaultLogger.Debug("CheckHealth", "request", req) consul, err := td.getConsulClient(req.PluginContext) if err != nil { return nil, err } if _, err := consul.Status().Leader(); err != nil { return &backend.CheckHealthResult{ Status: backend.HealthStatusError, Message: fmt.Sprintf("Consul health check failed: %v", err), }, nil } return &backend.CheckHealthResult{ Status: backend.HealthStatusOk, Message: "Consul data source is working", }, nil } type instanceSettings struct { consul *api.Client } type jsonData struct { ConsulAddr string } func newDataSourceInstance(setting backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) { jData := jsonData{} if err := json.Unmarshal(setting.JSONData, &jData); err != nil { return nil, fmt.Errorf("error decoding jsonData: %v", err) } if jData.ConsulAddr == "" { log.DefaultLogger.Error("newDataSourceInstance", "ConsulAddr", jData.ConsulAddr, "err", "consulAddr should not be empty") return nil, fmt.Errorf("consulAddr should not be empty") } conf := api.DefaultConfig() conf.Address = jData.ConsulAddr conf.Token = setting.DecryptedSecureJSONData["consulToken"] conf.TLSConfig.InsecureSkipVerify = true client, err := api.NewClient(conf) if err != nil { return nil, fmt.Errorf("error creating consul client: %v", err) } return &instanceSettings{ consul: client, }, nil } func (s *instanceSettings) Dispose() { }
targetRegex, err := regexp.Compile(target)
random_line_split
plugin.go
package main import ( "context" "encoding/json" "fmt" "path" "regexp" "strconv" "strings" "time" "github.com/grafana/grafana-plugin-sdk-go/backend" "github.com/grafana/grafana-plugin-sdk-go/backend/datasource" "github.com/grafana/grafana-plugin-sdk-go/backend/instancemgmt" "github.com/grafana/grafana-plugin-sdk-go/backend/log" "github.com/grafana/grafana-plugin-sdk-go/data" "github.com/hashicorp/consul/api" ) func newDatasource() datasource.ServeOpts { im := datasource.NewInstanceManager(newDataSourceInstance) ds := &ConsulDataSource{ im: im, } return datasource.ServeOpts{ QueryDataHandler: ds, CheckHealthHandler: ds, } } type ConsulDataSource struct { im instancemgmt.InstanceManager } // QueryData handles multiple queries and returns multiple responses. // req contains the queries []DataQuery (where each query contains RefID as a unique identifier). // The QueryDataResponse contains a map of RefID to the response for each query, and each response // contains Frames ([]*Frame). func (td *ConsulDataSource) QueryData(ctx context.Context, req *backend.QueryDataRequest) (*backend.QueryDataResponse, error) { log.DefaultLogger.Debug("QueryData", "request", req) consul, err := td.getConsulClient(req.PluginContext) if err != nil { return nil, err } queries, err := parseQueries(req) if err != nil { return nil, err } if len(queries) == 0 { return nil, fmt.Errorf("no queries found in request") } return query(ctx, consul, queries), nil } func (td *ConsulDataSource) getConsulClient(pluginCtx backend.PluginContext) (*api.Client, error) { instance, err := td.im.Get(pluginCtx) if err != nil { return nil, fmt.Errorf("could not get plugin instance: %v", err) } instanceSettings, ok := instance.(*instanceSettings) if !ok { return nil, fmt.Errorf("could not get plugin instance") } return instanceSettings.consul, nil } type queryModel struct { Format string `json:"format"` Target string `json:"target"` Type string `json:"type"` Columns string `json:"columns"` Error error } func parseQueries(req *backend.QueryDataRequest) (map[string]queryModel, error) { log.DefaultLogger.Debug("parseQueries", "queries", req.Queries) queries := map[string]queryModel{} for _, rawQuery := range req.Queries { var q queryModel if err := json.Unmarshal(rawQuery.JSON, &q); err != nil { queries[rawQuery.RefID] = queryModel{Error: fmt.Errorf("error parsing query %s: %v", rawQuery.JSON, err)} continue } queries[rawQuery.RefID] = q } return queries, nil } func query(ctx context.Context, consul *api.Client, queries map[string]queryModel) *backend.QueryDataResponse { log.DefaultLogger.Debug("query", "queries", queries) response := backend.NewQueryDataResponse() for refID, query := range queries { if query.Error != nil { response.Responses[refID] = backend.DataResponse{Error: query.Error} continue } switch query.Format { case "", "timeseries": response.Responses[refID] = queryTimeSeries(ctx, consul, query) case "table": response.Responses[refID] = queryTable(ctx, consul, query) default: response.Responses[refID] = backend.DataResponse{Error: fmt.Errorf("unknown format %s", query.Format)} } } return response } func queryTimeSeries(ctx context.Context, consul *api.Client, query queryModel) backend.DataResponse { log.DefaultLogger.Debug("queryTimeSeries", "query", query) if query.Format == "" { log.DefaultLogger.Debug("format is empty. defaulting to time series") query.Format = "timeseries" } if query.Type == "" { log.DefaultLogger.Debug("type is empty. defaulting to get value") query.Type = "get" } // clean target q := strings.Replace(query.Target, "\\.", ".", -1) switch query.Type { case "get": return handleGet(ctx, consul, q) case "keys": return handleKeys(ctx, consul, q) case "tags": return handleTags(ctx, consul, q, false) case "tagsrec": return handleTags(ctx, consul, q, true) } return backend.DataResponse{Error: fmt.Errorf("unknown query type: %s", query.Type)} } func handleGet(ctx context.Context, consul *api.Client, target string) backend.DataResponse
func handleKeys(ctx context.Context, consul *api.Client, target string) backend.DataResponse { log.DefaultLogger.Debug("handleKeys", "target", target) if !strings.HasSuffix(target, "/") { target = target + "/" } keys, _, err := consul.KV().Keys(target, "/", (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul keys %s: %v", target, err)} } return generateDataResponseFromKeys(keys) } func handleTags(ctx context.Context, consul *api.Client, target string, recursive bool) backend.DataResponse { log.DefaultLogger.Debug("handleTags", "target", target) if !strings.HasSuffix(target, "/") { target = target + "/" } separator := "/" if recursive { separator = "" } keys, _, err := consul.KV().Keys(target, separator, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul keys %s: %v", target, err)} } var tagKVs []*api.KVPair for _, key := range keys { tagKV, _, err := consul.KV().Get(key, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul get %s: %v", key, err)} } if tagKV != nil { tagKVs = append(tagKVs, tagKV) } } return generateDataResponseWithTags(target, tagKVs) } func generateDataResponseFromKV(kvs []*api.KVPair) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseFromKV", "kv", kvs) response := backend.DataResponse{} for _, kv := range kvs { floatValue, err := strconv.ParseFloat(string(kv.Value), 64) if err != nil { return backend.DataResponse{Error: err} } now := time.Now() value := []float64{floatValue} log.DefaultLogger.Debug("appending data frame to response", "name", kv.Key, "time", now, "value", value) response.Frames = append(response.Frames, data.NewFrame(kv.Key, data.NewField("time", nil, []time.Time{now}), data.NewField("values", nil, value), )) } return response } func generateDataResponseFromKeys(keys []string) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseFromKeys", "keys", keys) response := backend.DataResponse{} for _, key := range keys { now := time.Now() value := []float64{1} log.DefaultLogger.Debug("appending data frame to response", "name", key, "time", now, "value", value) response.Frames = append(response.Frames, data.NewFrame(key, data.NewField("time", nil, []time.Time{now}), data.NewField("values", nil, value), )) } return response } func generateDataResponseWithTags(target string, tagKVs []*api.KVPair) backend.DataResponse { log.DefaultLogger.Debug("generateDataResponseWithTags", "tags", tagKVs) response := backend.DataResponse{} tags := data.Labels{} for _, tagKV := range tagKVs { tagName := strings.TrimPrefix(tagKV.Key, target) tagName = strings.Replace(tagName, "/", ".", -1) tags[tagName] = string(tagKV.Value) } now := time.Now() value := []float64{1} log.DefaultLogger.Debug("appending data frame to response", "name", target, "time", now, "value", value, "tags", tags) response.Frames = append(response.Frames, data.NewFrame(target, data.NewField("time", nil, []time.Time{now}), data.NewField("values", tags, value), )) return response } func queryTable(ctx context.Context, consul *api.Client, query queryModel) backend.DataResponse { log.DefaultLogger.Debug("queryTable", "query", query) defer func() { if err := recover(); err != nil { log.DefaultLogger.Error("Recovered in queryTable", "err", err) } }() // Compile targetRegex target := strings.Replace(query.Target, "*", ".*", -1) targetRegex, err := regexp.Compile(target) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error compiling regex %s: %v", target, err)} } // Calculate Prefix to execute consul.KV().Keys() on firstStar := strings.Index(query.Target, "*") prefix := query.Target if firstStar > 0 { prefix = query.Target[:firstStar] } // Get keys with prefix log.DefaultLogger.Debug("queryTable: get keys below prefix", "prefix", prefix) keys, _, err := consul.KV().Keys(prefix, "", &api.QueryOptions{}) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error gettings keys %s from consul: %v", prefix, err)} } // Filter keys that match the targetRegex // One matchingKey will be one line in the table var matchingKeys []string for _, key := range keys { if targetRegex.Match([]byte(key)) { matchingKeys = append(matchingKeys, key) } } columns := strings.Split(query.Columns, ",") fields := []*data.Field{} for rowIdx, key := range matchingKeys { for colIdx, col := range columns { // calculate key for column value colKey := calculateColumnKey(key, col) // get field from Consul field, value := getColumnValueForKey(ctx, consul, colKey) // If it's the first row ,append it to the fields array if rowIdx == 0 { log.DefaultLogger.Debug("queryTable: appending first row field", "value", value, "rowIdx", rowIdx, "colIdx", colIdx) fields = append(fields, field) continue } // Else, append it to the field of the current column log.DefaultLogger.Debug("queryTable: appending value to field", "value", value, "rowIdx", rowIdx, "colIdx", colIdx) fields[colIdx].Append(value) } } return backend.DataResponse{Frames: []*data.Frame{data.NewFrame("table", fields...)}} } func getColumnValueForKey(ctx context.Context, consul *api.Client, colKey string) (*data.Field, interface{}) { log.DefaultLogger.Debug("getColumnValueForKey", "key", colKey) kv, _, err := consul.KV().Get(colKey, (&api.QueryOptions{}).WithContext(ctx)) if err != nil || kv == nil { return data.NewField(path.Base(colKey), nil, []string{"Not Found"}), "Not Found" } // try to parse int intValue, err := strconv.ParseInt(string(kv.Value), 10, 64) if err != nil { return data.NewField(path.Base(colKey), nil, []string{string(kv.Value)}), string(kv.Value) } return data.NewField(path.Base(colKey), nil, []int64{intValue}), intValue } func calculateColumnKey(key string, col string) string { for strings.HasPrefix(col, "../") { lastSlash := strings.LastIndex(key, "/") key = key[:lastSlash] col = strings.TrimPrefix(col, "../") } return path.Join(key, col) } // CheckHealth handles health checks sent from Grafana to the plugin. // The main use case for these health checks is the test button on the // datasource configuration page which allows users to verify that // a datasource is working as expected. func (td *ConsulDataSource) CheckHealth(ctx context.Context, req *backend.CheckHealthRequest) (*backend.CheckHealthResult, error) { log.DefaultLogger.Debug("CheckHealth", "request", req) consul, err := td.getConsulClient(req.PluginContext) if err != nil { return nil, err } if _, err := consul.Status().Leader(); err != nil { return &backend.CheckHealthResult{ Status: backend.HealthStatusError, Message: fmt.Sprintf("Consul health check failed: %v", err), }, nil } return &backend.CheckHealthResult{ Status: backend.HealthStatusOk, Message: "Consul data source is working", }, nil } type instanceSettings struct { consul *api.Client } type jsonData struct { ConsulAddr string } func newDataSourceInstance(setting backend.DataSourceInstanceSettings) (instancemgmt.Instance, error) { jData := jsonData{} if err := json.Unmarshal(setting.JSONData, &jData); err != nil { return nil, fmt.Errorf("error decoding jsonData: %v", err) } if jData.ConsulAddr == "" { log.DefaultLogger.Error("newDataSourceInstance", "ConsulAddr", jData.ConsulAddr, "err", "consulAddr should not be empty") return nil, fmt.Errorf("consulAddr should not be empty") } conf := api.DefaultConfig() conf.Address = jData.ConsulAddr conf.Token = setting.DecryptedSecureJSONData["consulToken"] conf.TLSConfig.InsecureSkipVerify = true client, err := api.NewClient(conf) if err != nil { return nil, fmt.Errorf("error creating consul client: %v", err) } return &instanceSettings{ consul: client, }, nil } func (s *instanceSettings) Dispose() { }
{ log.DefaultLogger.Debug("handleGet", "target", target) if strings.HasSuffix(target, "/") { target = target[:len(target)-1] } var kvs []*api.KVPair kv, _, err := consul.KV().Get(target, (&api.QueryOptions{RequireConsistent: true}).WithContext(ctx)) if err != nil { return backend.DataResponse{Error: fmt.Errorf("error consul get %s: %v", target, err)} } if kv != nil { kvs = append(kvs, kv) } return generateDataResponseFromKV(kvs) }
identifier_body
util.py
__author__ = 'siyuqiu' import numpy as np from scipy.stats import norm from tweetsManager import textManager from random import shuffle from sklearn.feature_extraction.text import TfidfVectorizer from sklearn import svm from sklearn.linear_model import SGDClassifier from sklearn import neighbors from sklearn import cross_validation import string import re from math import * from collections import Counter import operator import wordnetutil from sklearn.cluster import KMeans from collections import defaultdict import warnings warnings.simplefilter("error") class statis: def __init__(self, arr):
def setArray(self,arr): self.array = np.array(arr) def appendArray(self,num): self.plain_arr.append(num) def getPlainArr(self): return self.plain_arr def setFromPlainArr(self): self.array = np.array(self.plain_arr) def getavg(self): try: return np.mean(self.array) except: return 0 def getstd(self): try: return np.std(self.array) except: return 0 def getmin(self): try: return np.min(self.array) except: return 0 def getmax(self): try: return np.max(self.array) except: return 0 def getreport(self): f ={'avg':self.getavg, 'std':self.getstd, 'max':self.getmax, 'min':self.getmin} ret = "" for k, v in f.items(): ret += k+": "+ str(v())+'\n' return ret def getvalue(self,x, mean, std): return norm.pdf(x, mean, std) class dataprepare: def __init__(self): self.tweetmanager = textManager() self.punctuation = list(string.punctuation) def cleantext(self, fname): ff = open(fname.split('.')[0]+'_cleaned.txt','w') with open(fname) as f: for l in f.readlines(): tokens = self.tweetmanager.tokenizefromstring(l) for t in tokens: try: ff.write(t.encode('utf-8')+" ") except: pass ff.write('\n') f.close() ff.close() return ff.name.__str__() def labeldata(self,f1,f2): ls = [(l[:-1],1) for l in open(f1,'r').readlines()] + [(l[:-1],0) for l in open(f2,'r').readlines()] shuffle(ls) f = open('train.txt','w') for l in ls: f.write(l[0]+'\t'+str(l[1])+'\n') f.close() def avgch(self,ws): total = reduce(lambda x,y: x+len(y), ws,0) return round(total/(len(ws)+1e-10),2) def genfeature(self, ls_x): ''' a. Shallow features 1. number of words in the sentence (normalize) 2. average number of characters in the words 3. percentage of stop words 4. minimum, maximum and average inverse document frequency :param ls_x: sencences X without label :return: ''' vectorizer = TfidfVectorizer(stop_words='english',smooth_idf=True, sublinear_tf=False, use_idf=True) tfidf = vectorizer.fit_transform(ls_x) array = tfidf.toarray() X = [] append = X.append maxtoken = 0 for idx,l in enumerate(ls_x): ws = l.split() maxtoken = max(len(ws),maxtoken) try: stops = round(reduce(lambda x,y: x+1 if y in self.tweetmanager.stop else x, ws,0)/(len(ws)+1e-10),2) except: pass append([len(ws),self.avgch(ws), stops, min(array[idx]), max(array[idx]), sum(array[idx])/len(array[idx])]) return [[round(x[0]*1.0/maxtoken,2)] + x[1:] for x in X] def crossvalidation(self, rawX, Y): trainF = self.genfeature(rawX) X_train, X_test, y_train, y_test = cross_validation.train_test_split(trainF, Y, test_size=0.4, random_state=0) clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train) print 'svc linear', clf.score(X_test, y_test),clf.coef_ clf = SGDClassifier(loss="hinge", penalty="l2").fit(X_train,y_train) print 'SGDC hinge/l2',clf.score(X_test,y_test),clf.coef_ clf = neighbors.KNeighborsClassifier(5 , weights='uniform').fit(X_train,y_train) print 'KNN 5/uniform',clf.score(X_test,y_test) def genParaphrase(self, fname): tweet = {} ret = [] with open(fname) as f: for l in f.readlines(): nl = ''.join(ch for ch in l if ch not in self.punctuation) if len(nl.strip()) == 0: sorted_x = dict(sorted(tweet.items(), key=operator.itemgetter(1))) ret.append([k for k,v in sorted_x.items() if v > 1]) tweet.clear() continue try: tweet[nl] += 1 except: tweet[nl] = 1 return ret def genParaterm(self,fname): terms = {} result = [] with open(fname) as f: for l in f.readlines(): if len(l.strip()) == 0: sorted_x = dict(sorted(terms.items(), key=operator.itemgetter(1))) result.append([k for k,v in sorted_x.items() if v > 1]) terms.clear() continue ret = self.tweetmanager.tokenizefromstring(l) for v, w in zip(ret[:-1], ret[1:]): try: terms[v+" "+w] += 1 except: terms[v+" "+w] = 1 return result class sentenceSimilarity: def __init__(self): self.WORD = re.compile(r'\w+') def excatWordscore(self, text1, text2): vector1 = self.text_to_vector(text1) vector2 = self.text_to_vector(text2) return self.get_cosine(vector1, vector2) def groupExcatWordscore(self, candi): scores = defaultdict(list) l = len(candi) ret = [] total = [] for i in xrange(l): for j in xrange(i+1, l): t = self.excatWordscore(candi[i], candi[j]) scores[i].append(t) total.append(t) scores[j].append(t) # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') stat = statis(total) avg = stat.getavg() std = stat.getstd() lower = avg-std upper = avg+std for k,v in scores.items(): cur = statis(v) cur_avg = cur.getavg() if cur_avg > upper or cur_avg < lower: continue ret.append(candi[k]) return ret def get_cosine(self,vec1, vec2): intersection = set(vec1.keys()) & set(vec2.keys()) numerator = sum([vec1[x] * vec2[x] for x in intersection]) sum1 = sum([vec1[x]**2 for x in vec1.keys()]) sum2 = sum([vec2[x]**2 for x in vec2.keys()]) denominator = sqrt(sum1) * sqrt(sum2) if not denominator: return 0.0 else: return float(numerator) / denominator def text_to_vector(self,text): words = self.WORD.findall(text) return Counter(words) def buildEmbedding(self): self.w2v = {} with open('files/glove.twitter.27B.50d.txt') as f: for line in f: pts = line.split() self.w2v[pts[0]] = [float(x) for x in pts[1:]] f.close() def sentenceEmbedding(self, line): token = line.split() count = 0 ret = [0 for _ in xrange(len(self.w2v[self.w2v.keys()[0]]))] for t in token: if self.w2v.has_key(t): ret = map(operator.add, ret, self.w2v[t]) count += 1 if count == 0: return ret else: return [x/count for x in ret] def square_rooted(self,x): return round(sqrt(sum([a*a for a in x])),3) def similarity(self,x,y): numerator = sum(a*b for a,b in zip(x,y)) denominator = self.square_rooted(x)*self.square_rooted(y)+1e-10 return round(numerator/float(denominator),3) def embeddingScore(self, candi): scores = {} embed = {} ret = [] total = [] for idx,c in enumerate(candi): embed[idx] = self.sentenceEmbedding(c) l = len(candi) for i in xrange(l): try: scores[i] += 0 except: scores[i] = 0 for j in xrange(i+1, l): t = self.similarity(embed[i], embed[j]) # assert t is not None scores[i] += t total.append(t) try: scores[j] += t except: scores[j] = t try: scores[i] /= (l-1) except: pass # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') # stat = statis(total) # std = stat.getstd() # avg = stat.getavg() try: threshold = sum(total)/len(total) except: threshold = 0 print 'embedding',threshold for k,v in scores.items(): if v > threshold: ret.append(candi[k]) return ret def wordNetScore(self,candi): scores = {} l = len(candi) ret = [] total = [] for i in xrange(l): try: scores[i] += 0 except: scores[i] = 0 for j in xrange(i+1, l): c1 = re.sub(r'[^\w\s]+','',candi[i]) c2 = re.sub(r'[^\w\s]+','',candi[j]) t = wordnetutil.similarity(c1,c2,True) total.append(t) scores[i] += t try: scores[j] += t except: scores[j] = t try: scores[i] /= (l-1) except: pass # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') try: threshold = sum(total)/len(total) except: threshold = 0 print 'wordnet',threshold for k,v in scores.items(): if v > threshold: ret.append(candi[k]) return ret def extracAllword(self,candi): words = set() for s in candi: ws = self.WORD.findall(s) for w in ws: words.add(w) return list(words) def KnnClassify(self,candi): words = self.extracAllword(candi) word_dict = {w:idx for idx, w in enumerate(words)} x = [[0 for _ in xrange(len(words))] for _ in xrange(len(candi))] if len(x) < 3: return candi for id, s in enumerate(candi): tmp = self.text_to_vector(s) for k,v in tmp.items(): x[id][word_dict[k]] = float(v) km = KMeans(n_clusters=3) km.fit(x) samples = {} X_new = km.transform(x) # try: # X_new = km.transform(x) # except: # print 'mooo' for idx, l in enumerate(km.labels_): try: samples[l][idx] = X_new[idx][l] except: samples[l] ={} samples[l][idx] = X_new[idx][l] ret = [] for k, v in samples.items(): sortedv = sorted(v.items(), key=operator.itemgetter(1), reverse=True) for it in sortedv: ret.append(candi[it[0]]) return ret
if arr: self.array = np.array(arr) self.plain_arr = []
identifier_body
util.py
__author__ = 'siyuqiu' import numpy as np from scipy.stats import norm from tweetsManager import textManager from random import shuffle from sklearn.feature_extraction.text import TfidfVectorizer from sklearn import svm from sklearn.linear_model import SGDClassifier from sklearn import neighbors from sklearn import cross_validation import string import re from math import * from collections import Counter import operator import wordnetutil from sklearn.cluster import KMeans from collections import defaultdict import warnings warnings.simplefilter("error") class statis: def __init__(self, arr): if arr: self.array = np.array(arr) self.plain_arr = [] def setArray(self,arr): self.array = np.array(arr) def appendArray(self,num): self.plain_arr.append(num) def getPlainArr(self): return self.plain_arr def setFromPlainArr(self): self.array = np.array(self.plain_arr) def getavg(self): try: return np.mean(self.array) except: return 0 def getstd(self): try: return np.std(self.array) except: return 0 def getmin(self): try: return np.min(self.array) except: return 0 def getmax(self): try: return np.max(self.array) except: return 0 def getreport(self): f ={'avg':self.getavg, 'std':self.getstd, 'max':self.getmax, 'min':self.getmin} ret = "" for k, v in f.items(): ret += k+": "+ str(v())+'\n' return ret def getvalue(self,x, mean, std): return norm.pdf(x, mean, std) class dataprepare: def __init__(self): self.tweetmanager = textManager() self.punctuation = list(string.punctuation) def cleantext(self, fname): ff = open(fname.split('.')[0]+'_cleaned.txt','w') with open(fname) as f: for l in f.readlines(): tokens = self.tweetmanager.tokenizefromstring(l) for t in tokens: try: ff.write(t.encode('utf-8')+" ") except: pass ff.write('\n') f.close() ff.close() return ff.name.__str__() def labeldata(self,f1,f2): ls = [(l[:-1],1) for l in open(f1,'r').readlines()] + [(l[:-1],0) for l in open(f2,'r').readlines()] shuffle(ls) f = open('train.txt','w') for l in ls: f.write(l[0]+'\t'+str(l[1])+'\n') f.close() def avgch(self,ws): total = reduce(lambda x,y: x+len(y), ws,0) return round(total/(len(ws)+1e-10),2) def genfeature(self, ls_x): ''' a. Shallow features 1. number of words in the sentence (normalize) 2. average number of characters in the words 3. percentage of stop words 4. minimum, maximum and average inverse document frequency :param ls_x: sencences X without label :return: ''' vectorizer = TfidfVectorizer(stop_words='english',smooth_idf=True, sublinear_tf=False, use_idf=True) tfidf = vectorizer.fit_transform(ls_x) array = tfidf.toarray() X = [] append = X.append maxtoken = 0 for idx,l in enumerate(ls_x): ws = l.split() maxtoken = max(len(ws),maxtoken) try: stops = round(reduce(lambda x,y: x+1 if y in self.tweetmanager.stop else x, ws,0)/(len(ws)+1e-10),2) except: pass append([len(ws),self.avgch(ws), stops, min(array[idx]), max(array[idx]), sum(array[idx])/len(array[idx])]) return [[round(x[0]*1.0/maxtoken,2)] + x[1:] for x in X] def crossvalidation(self, rawX, Y): trainF = self.genfeature(rawX) X_train, X_test, y_train, y_test = cross_validation.train_test_split(trainF, Y, test_size=0.4, random_state=0) clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train) print 'svc linear', clf.score(X_test, y_test),clf.coef_ clf = SGDClassifier(loss="hinge", penalty="l2").fit(X_train,y_train) print 'SGDC hinge/l2',clf.score(X_test,y_test),clf.coef_ clf = neighbors.KNeighborsClassifier(5 , weights='uniform').fit(X_train,y_train) print 'KNN 5/uniform',clf.score(X_test,y_test) def genParaphrase(self, fname): tweet = {} ret = [] with open(fname) as f: for l in f.readlines(): nl = ''.join(ch for ch in l if ch not in self.punctuation) if len(nl.strip()) == 0: sorted_x = dict(sorted(tweet.items(), key=operator.itemgetter(1))) ret.append([k for k,v in sorted_x.items() if v > 1]) tweet.clear() continue try: tweet[nl] += 1 except: tweet[nl] = 1 return ret def
(self,fname): terms = {} result = [] with open(fname) as f: for l in f.readlines(): if len(l.strip()) == 0: sorted_x = dict(sorted(terms.items(), key=operator.itemgetter(1))) result.append([k for k,v in sorted_x.items() if v > 1]) terms.clear() continue ret = self.tweetmanager.tokenizefromstring(l) for v, w in zip(ret[:-1], ret[1:]): try: terms[v+" "+w] += 1 except: terms[v+" "+w] = 1 return result class sentenceSimilarity: def __init__(self): self.WORD = re.compile(r'\w+') def excatWordscore(self, text1, text2): vector1 = self.text_to_vector(text1) vector2 = self.text_to_vector(text2) return self.get_cosine(vector1, vector2) def groupExcatWordscore(self, candi): scores = defaultdict(list) l = len(candi) ret = [] total = [] for i in xrange(l): for j in xrange(i+1, l): t = self.excatWordscore(candi[i], candi[j]) scores[i].append(t) total.append(t) scores[j].append(t) # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') stat = statis(total) avg = stat.getavg() std = stat.getstd() lower = avg-std upper = avg+std for k,v in scores.items(): cur = statis(v) cur_avg = cur.getavg() if cur_avg > upper or cur_avg < lower: continue ret.append(candi[k]) return ret def get_cosine(self,vec1, vec2): intersection = set(vec1.keys()) & set(vec2.keys()) numerator = sum([vec1[x] * vec2[x] for x in intersection]) sum1 = sum([vec1[x]**2 for x in vec1.keys()]) sum2 = sum([vec2[x]**2 for x in vec2.keys()]) denominator = sqrt(sum1) * sqrt(sum2) if not denominator: return 0.0 else: return float(numerator) / denominator def text_to_vector(self,text): words = self.WORD.findall(text) return Counter(words) def buildEmbedding(self): self.w2v = {} with open('files/glove.twitter.27B.50d.txt') as f: for line in f: pts = line.split() self.w2v[pts[0]] = [float(x) for x in pts[1:]] f.close() def sentenceEmbedding(self, line): token = line.split() count = 0 ret = [0 for _ in xrange(len(self.w2v[self.w2v.keys()[0]]))] for t in token: if self.w2v.has_key(t): ret = map(operator.add, ret, self.w2v[t]) count += 1 if count == 0: return ret else: return [x/count for x in ret] def square_rooted(self,x): return round(sqrt(sum([a*a for a in x])),3) def similarity(self,x,y): numerator = sum(a*b for a,b in zip(x,y)) denominator = self.square_rooted(x)*self.square_rooted(y)+1e-10 return round(numerator/float(denominator),3) def embeddingScore(self, candi): scores = {} embed = {} ret = [] total = [] for idx,c in enumerate(candi): embed[idx] = self.sentenceEmbedding(c) l = len(candi) for i in xrange(l): try: scores[i] += 0 except: scores[i] = 0 for j in xrange(i+1, l): t = self.similarity(embed[i], embed[j]) # assert t is not None scores[i] += t total.append(t) try: scores[j] += t except: scores[j] = t try: scores[i] /= (l-1) except: pass # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') # stat = statis(total) # std = stat.getstd() # avg = stat.getavg() try: threshold = sum(total)/len(total) except: threshold = 0 print 'embedding',threshold for k,v in scores.items(): if v > threshold: ret.append(candi[k]) return ret def wordNetScore(self,candi): scores = {} l = len(candi) ret = [] total = [] for i in xrange(l): try: scores[i] += 0 except: scores[i] = 0 for j in xrange(i+1, l): c1 = re.sub(r'[^\w\s]+','',candi[i]) c2 = re.sub(r'[^\w\s]+','',candi[j]) t = wordnetutil.similarity(c1,c2,True) total.append(t) scores[i] += t try: scores[j] += t except: scores[j] = t try: scores[i] /= (l-1) except: pass # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') try: threshold = sum(total)/len(total) except: threshold = 0 print 'wordnet',threshold for k,v in scores.items(): if v > threshold: ret.append(candi[k]) return ret def extracAllword(self,candi): words = set() for s in candi: ws = self.WORD.findall(s) for w in ws: words.add(w) return list(words) def KnnClassify(self,candi): words = self.extracAllword(candi) word_dict = {w:idx for idx, w in enumerate(words)} x = [[0 for _ in xrange(len(words))] for _ in xrange(len(candi))] if len(x) < 3: return candi for id, s in enumerate(candi): tmp = self.text_to_vector(s) for k,v in tmp.items(): x[id][word_dict[k]] = float(v) km = KMeans(n_clusters=3) km.fit(x) samples = {} X_new = km.transform(x) # try: # X_new = km.transform(x) # except: # print 'mooo' for idx, l in enumerate(km.labels_): try: samples[l][idx] = X_new[idx][l] except: samples[l] ={} samples[l][idx] = X_new[idx][l] ret = [] for k, v in samples.items(): sortedv = sorted(v.items(), key=operator.itemgetter(1), reverse=True) for it in sortedv: ret.append(candi[it[0]]) return ret
genParaterm
identifier_name
util.py
__author__ = 'siyuqiu'
from tweetsManager import textManager from random import shuffle from sklearn.feature_extraction.text import TfidfVectorizer from sklearn import svm from sklearn.linear_model import SGDClassifier from sklearn import neighbors from sklearn import cross_validation import string import re from math import * from collections import Counter import operator import wordnetutil from sklearn.cluster import KMeans from collections import defaultdict import warnings warnings.simplefilter("error") class statis: def __init__(self, arr): if arr: self.array = np.array(arr) self.plain_arr = [] def setArray(self,arr): self.array = np.array(arr) def appendArray(self,num): self.plain_arr.append(num) def getPlainArr(self): return self.plain_arr def setFromPlainArr(self): self.array = np.array(self.plain_arr) def getavg(self): try: return np.mean(self.array) except: return 0 def getstd(self): try: return np.std(self.array) except: return 0 def getmin(self): try: return np.min(self.array) except: return 0 def getmax(self): try: return np.max(self.array) except: return 0 def getreport(self): f ={'avg':self.getavg, 'std':self.getstd, 'max':self.getmax, 'min':self.getmin} ret = "" for k, v in f.items(): ret += k+": "+ str(v())+'\n' return ret def getvalue(self,x, mean, std): return norm.pdf(x, mean, std) class dataprepare: def __init__(self): self.tweetmanager = textManager() self.punctuation = list(string.punctuation) def cleantext(self, fname): ff = open(fname.split('.')[0]+'_cleaned.txt','w') with open(fname) as f: for l in f.readlines(): tokens = self.tweetmanager.tokenizefromstring(l) for t in tokens: try: ff.write(t.encode('utf-8')+" ") except: pass ff.write('\n') f.close() ff.close() return ff.name.__str__() def labeldata(self,f1,f2): ls = [(l[:-1],1) for l in open(f1,'r').readlines()] + [(l[:-1],0) for l in open(f2,'r').readlines()] shuffle(ls) f = open('train.txt','w') for l in ls: f.write(l[0]+'\t'+str(l[1])+'\n') f.close() def avgch(self,ws): total = reduce(lambda x,y: x+len(y), ws,0) return round(total/(len(ws)+1e-10),2) def genfeature(self, ls_x): ''' a. Shallow features 1. number of words in the sentence (normalize) 2. average number of characters in the words 3. percentage of stop words 4. minimum, maximum and average inverse document frequency :param ls_x: sencences X without label :return: ''' vectorizer = TfidfVectorizer(stop_words='english',smooth_idf=True, sublinear_tf=False, use_idf=True) tfidf = vectorizer.fit_transform(ls_x) array = tfidf.toarray() X = [] append = X.append maxtoken = 0 for idx,l in enumerate(ls_x): ws = l.split() maxtoken = max(len(ws),maxtoken) try: stops = round(reduce(lambda x,y: x+1 if y in self.tweetmanager.stop else x, ws,0)/(len(ws)+1e-10),2) except: pass append([len(ws),self.avgch(ws), stops, min(array[idx]), max(array[idx]), sum(array[idx])/len(array[idx])]) return [[round(x[0]*1.0/maxtoken,2)] + x[1:] for x in X] def crossvalidation(self, rawX, Y): trainF = self.genfeature(rawX) X_train, X_test, y_train, y_test = cross_validation.train_test_split(trainF, Y, test_size=0.4, random_state=0) clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train) print 'svc linear', clf.score(X_test, y_test),clf.coef_ clf = SGDClassifier(loss="hinge", penalty="l2").fit(X_train,y_train) print 'SGDC hinge/l2',clf.score(X_test,y_test),clf.coef_ clf = neighbors.KNeighborsClassifier(5 , weights='uniform').fit(X_train,y_train) print 'KNN 5/uniform',clf.score(X_test,y_test) def genParaphrase(self, fname): tweet = {} ret = [] with open(fname) as f: for l in f.readlines(): nl = ''.join(ch for ch in l if ch not in self.punctuation) if len(nl.strip()) == 0: sorted_x = dict(sorted(tweet.items(), key=operator.itemgetter(1))) ret.append([k for k,v in sorted_x.items() if v > 1]) tweet.clear() continue try: tweet[nl] += 1 except: tweet[nl] = 1 return ret def genParaterm(self,fname): terms = {} result = [] with open(fname) as f: for l in f.readlines(): if len(l.strip()) == 0: sorted_x = dict(sorted(terms.items(), key=operator.itemgetter(1))) result.append([k for k,v in sorted_x.items() if v > 1]) terms.clear() continue ret = self.tweetmanager.tokenizefromstring(l) for v, w in zip(ret[:-1], ret[1:]): try: terms[v+" "+w] += 1 except: terms[v+" "+w] = 1 return result class sentenceSimilarity: def __init__(self): self.WORD = re.compile(r'\w+') def excatWordscore(self, text1, text2): vector1 = self.text_to_vector(text1) vector2 = self.text_to_vector(text2) return self.get_cosine(vector1, vector2) def groupExcatWordscore(self, candi): scores = defaultdict(list) l = len(candi) ret = [] total = [] for i in xrange(l): for j in xrange(i+1, l): t = self.excatWordscore(candi[i], candi[j]) scores[i].append(t) total.append(t) scores[j].append(t) # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') stat = statis(total) avg = stat.getavg() std = stat.getstd() lower = avg-std upper = avg+std for k,v in scores.items(): cur = statis(v) cur_avg = cur.getavg() if cur_avg > upper or cur_avg < lower: continue ret.append(candi[k]) return ret def get_cosine(self,vec1, vec2): intersection = set(vec1.keys()) & set(vec2.keys()) numerator = sum([vec1[x] * vec2[x] for x in intersection]) sum1 = sum([vec1[x]**2 for x in vec1.keys()]) sum2 = sum([vec2[x]**2 for x in vec2.keys()]) denominator = sqrt(sum1) * sqrt(sum2) if not denominator: return 0.0 else: return float(numerator) / denominator def text_to_vector(self,text): words = self.WORD.findall(text) return Counter(words) def buildEmbedding(self): self.w2v = {} with open('files/glove.twitter.27B.50d.txt') as f: for line in f: pts = line.split() self.w2v[pts[0]] = [float(x) for x in pts[1:]] f.close() def sentenceEmbedding(self, line): token = line.split() count = 0 ret = [0 for _ in xrange(len(self.w2v[self.w2v.keys()[0]]))] for t in token: if self.w2v.has_key(t): ret = map(operator.add, ret, self.w2v[t]) count += 1 if count == 0: return ret else: return [x/count for x in ret] def square_rooted(self,x): return round(sqrt(sum([a*a for a in x])),3) def similarity(self,x,y): numerator = sum(a*b for a,b in zip(x,y)) denominator = self.square_rooted(x)*self.square_rooted(y)+1e-10 return round(numerator/float(denominator),3) def embeddingScore(self, candi): scores = {} embed = {} ret = [] total = [] for idx,c in enumerate(candi): embed[idx] = self.sentenceEmbedding(c) l = len(candi) for i in xrange(l): try: scores[i] += 0 except: scores[i] = 0 for j in xrange(i+1, l): t = self.similarity(embed[i], embed[j]) # assert t is not None scores[i] += t total.append(t) try: scores[j] += t except: scores[j] = t try: scores[i] /= (l-1) except: pass # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') # stat = statis(total) # std = stat.getstd() # avg = stat.getavg() try: threshold = sum(total)/len(total) except: threshold = 0 print 'embedding',threshold for k,v in scores.items(): if v > threshold: ret.append(candi[k]) return ret def wordNetScore(self,candi): scores = {} l = len(candi) ret = [] total = [] for i in xrange(l): try: scores[i] += 0 except: scores[i] = 0 for j in xrange(i+1, l): c1 = re.sub(r'[^\w\s]+','',candi[i]) c2 = re.sub(r'[^\w\s]+','',candi[j]) t = wordnetutil.similarity(c1,c2,True) total.append(t) scores[i] += t try: scores[j] += t except: scores[j] = t try: scores[i] /= (l-1) except: pass # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') try: threshold = sum(total)/len(total) except: threshold = 0 print 'wordnet',threshold for k,v in scores.items(): if v > threshold: ret.append(candi[k]) return ret def extracAllword(self,candi): words = set() for s in candi: ws = self.WORD.findall(s) for w in ws: words.add(w) return list(words) def KnnClassify(self,candi): words = self.extracAllword(candi) word_dict = {w:idx for idx, w in enumerate(words)} x = [[0 for _ in xrange(len(words))] for _ in xrange(len(candi))] if len(x) < 3: return candi for id, s in enumerate(candi): tmp = self.text_to_vector(s) for k,v in tmp.items(): x[id][word_dict[k]] = float(v) km = KMeans(n_clusters=3) km.fit(x) samples = {} X_new = km.transform(x) # try: # X_new = km.transform(x) # except: # print 'mooo' for idx, l in enumerate(km.labels_): try: samples[l][idx] = X_new[idx][l] except: samples[l] ={} samples[l][idx] = X_new[idx][l] ret = [] for k, v in samples.items(): sortedv = sorted(v.items(), key=operator.itemgetter(1), reverse=True) for it in sortedv: ret.append(candi[it[0]]) return ret
import numpy as np from scipy.stats import norm
random_line_split
util.py
__author__ = 'siyuqiu' import numpy as np from scipy.stats import norm from tweetsManager import textManager from random import shuffle from sklearn.feature_extraction.text import TfidfVectorizer from sklearn import svm from sklearn.linear_model import SGDClassifier from sklearn import neighbors from sklearn import cross_validation import string import re from math import * from collections import Counter import operator import wordnetutil from sklearn.cluster import KMeans from collections import defaultdict import warnings warnings.simplefilter("error") class statis: def __init__(self, arr): if arr: self.array = np.array(arr) self.plain_arr = [] def setArray(self,arr): self.array = np.array(arr) def appendArray(self,num): self.plain_arr.append(num) def getPlainArr(self): return self.plain_arr def setFromPlainArr(self): self.array = np.array(self.plain_arr) def getavg(self): try: return np.mean(self.array) except: return 0 def getstd(self): try: return np.std(self.array) except: return 0 def getmin(self): try: return np.min(self.array) except: return 0 def getmax(self): try: return np.max(self.array) except: return 0 def getreport(self): f ={'avg':self.getavg, 'std':self.getstd, 'max':self.getmax, 'min':self.getmin} ret = "" for k, v in f.items(): ret += k+": "+ str(v())+'\n' return ret def getvalue(self,x, mean, std): return norm.pdf(x, mean, std) class dataprepare: def __init__(self): self.tweetmanager = textManager() self.punctuation = list(string.punctuation) def cleantext(self, fname): ff = open(fname.split('.')[0]+'_cleaned.txt','w') with open(fname) as f: for l in f.readlines(): tokens = self.tweetmanager.tokenizefromstring(l) for t in tokens: try: ff.write(t.encode('utf-8')+" ") except: pass ff.write('\n') f.close() ff.close() return ff.name.__str__() def labeldata(self,f1,f2): ls = [(l[:-1],1) for l in open(f1,'r').readlines()] + [(l[:-1],0) for l in open(f2,'r').readlines()] shuffle(ls) f = open('train.txt','w') for l in ls: f.write(l[0]+'\t'+str(l[1])+'\n') f.close() def avgch(self,ws): total = reduce(lambda x,y: x+len(y), ws,0) return round(total/(len(ws)+1e-10),2) def genfeature(self, ls_x): ''' a. Shallow features 1. number of words in the sentence (normalize) 2. average number of characters in the words 3. percentage of stop words 4. minimum, maximum and average inverse document frequency :param ls_x: sencences X without label :return: ''' vectorizer = TfidfVectorizer(stop_words='english',smooth_idf=True, sublinear_tf=False, use_idf=True) tfidf = vectorizer.fit_transform(ls_x) array = tfidf.toarray() X = [] append = X.append maxtoken = 0 for idx,l in enumerate(ls_x): ws = l.split() maxtoken = max(len(ws),maxtoken) try: stops = round(reduce(lambda x,y: x+1 if y in self.tweetmanager.stop else x, ws,0)/(len(ws)+1e-10),2) except: pass append([len(ws),self.avgch(ws), stops, min(array[idx]), max(array[idx]), sum(array[idx])/len(array[idx])]) return [[round(x[0]*1.0/maxtoken,2)] + x[1:] for x in X] def crossvalidation(self, rawX, Y): trainF = self.genfeature(rawX) X_train, X_test, y_train, y_test = cross_validation.train_test_split(trainF, Y, test_size=0.4, random_state=0) clf = svm.SVC(kernel='linear', C=1).fit(X_train, y_train) print 'svc linear', clf.score(X_test, y_test),clf.coef_ clf = SGDClassifier(loss="hinge", penalty="l2").fit(X_train,y_train) print 'SGDC hinge/l2',clf.score(X_test,y_test),clf.coef_ clf = neighbors.KNeighborsClassifier(5 , weights='uniform').fit(X_train,y_train) print 'KNN 5/uniform',clf.score(X_test,y_test) def genParaphrase(self, fname): tweet = {} ret = [] with open(fname) as f: for l in f.readlines(): nl = ''.join(ch for ch in l if ch not in self.punctuation) if len(nl.strip()) == 0: sorted_x = dict(sorted(tweet.items(), key=operator.itemgetter(1))) ret.append([k for k,v in sorted_x.items() if v > 1]) tweet.clear() continue try: tweet[nl] += 1 except: tweet[nl] = 1 return ret def genParaterm(self,fname): terms = {} result = [] with open(fname) as f: for l in f.readlines(): if len(l.strip()) == 0: sorted_x = dict(sorted(terms.items(), key=operator.itemgetter(1))) result.append([k for k,v in sorted_x.items() if v > 1]) terms.clear() continue ret = self.tweetmanager.tokenizefromstring(l) for v, w in zip(ret[:-1], ret[1:]):
return result class sentenceSimilarity: def __init__(self): self.WORD = re.compile(r'\w+') def excatWordscore(self, text1, text2): vector1 = self.text_to_vector(text1) vector2 = self.text_to_vector(text2) return self.get_cosine(vector1, vector2) def groupExcatWordscore(self, candi): scores = defaultdict(list) l = len(candi) ret = [] total = [] for i in xrange(l): for j in xrange(i+1, l): t = self.excatWordscore(candi[i], candi[j]) scores[i].append(t) total.append(t) scores[j].append(t) # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') stat = statis(total) avg = stat.getavg() std = stat.getstd() lower = avg-std upper = avg+std for k,v in scores.items(): cur = statis(v) cur_avg = cur.getavg() if cur_avg > upper or cur_avg < lower: continue ret.append(candi[k]) return ret def get_cosine(self,vec1, vec2): intersection = set(vec1.keys()) & set(vec2.keys()) numerator = sum([vec1[x] * vec2[x] for x in intersection]) sum1 = sum([vec1[x]**2 for x in vec1.keys()]) sum2 = sum([vec2[x]**2 for x in vec2.keys()]) denominator = sqrt(sum1) * sqrt(sum2) if not denominator: return 0.0 else: return float(numerator) / denominator def text_to_vector(self,text): words = self.WORD.findall(text) return Counter(words) def buildEmbedding(self): self.w2v = {} with open('files/glove.twitter.27B.50d.txt') as f: for line in f: pts = line.split() self.w2v[pts[0]] = [float(x) for x in pts[1:]] f.close() def sentenceEmbedding(self, line): token = line.split() count = 0 ret = [0 for _ in xrange(len(self.w2v[self.w2v.keys()[0]]))] for t in token: if self.w2v.has_key(t): ret = map(operator.add, ret, self.w2v[t]) count += 1 if count == 0: return ret else: return [x/count for x in ret] def square_rooted(self,x): return round(sqrt(sum([a*a for a in x])),3) def similarity(self,x,y): numerator = sum(a*b for a,b in zip(x,y)) denominator = self.square_rooted(x)*self.square_rooted(y)+1e-10 return round(numerator/float(denominator),3) def embeddingScore(self, candi): scores = {} embed = {} ret = [] total = [] for idx,c in enumerate(candi): embed[idx] = self.sentenceEmbedding(c) l = len(candi) for i in xrange(l): try: scores[i] += 0 except: scores[i] = 0 for j in xrange(i+1, l): t = self.similarity(embed[i], embed[j]) # assert t is not None scores[i] += t total.append(t) try: scores[j] += t except: scores[j] = t try: scores[i] /= (l-1) except: pass # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') # stat = statis(total) # std = stat.getstd() # avg = stat.getavg() try: threshold = sum(total)/len(total) except: threshold = 0 print 'embedding',threshold for k,v in scores.items(): if v > threshold: ret.append(candi[k]) return ret def wordNetScore(self,candi): scores = {} l = len(candi) ret = [] total = [] for i in xrange(l): try: scores[i] += 0 except: scores[i] = 0 for j in xrange(i+1, l): c1 = re.sub(r'[^\w\s]+','',candi[i]) c2 = re.sub(r'[^\w\s]+','',candi[j]) t = wordnetutil.similarity(c1,c2,True) total.append(t) scores[i] += t try: scores[j] += t except: scores[j] = t try: scores[i] /= (l-1) except: pass # sorted_s = sorted(scores.items(), key=operator.itemgetter(1), reverse=True) # for k in sorted_s[:l/2+1]: # fout.write(candi[k[0]]+'\n') # fout.write('\n') try: threshold = sum(total)/len(total) except: threshold = 0 print 'wordnet',threshold for k,v in scores.items(): if v > threshold: ret.append(candi[k]) return ret def extracAllword(self,candi): words = set() for s in candi: ws = self.WORD.findall(s) for w in ws: words.add(w) return list(words) def KnnClassify(self,candi): words = self.extracAllword(candi) word_dict = {w:idx for idx, w in enumerate(words)} x = [[0 for _ in xrange(len(words))] for _ in xrange(len(candi))] if len(x) < 3: return candi for id, s in enumerate(candi): tmp = self.text_to_vector(s) for k,v in tmp.items(): x[id][word_dict[k]] = float(v) km = KMeans(n_clusters=3) km.fit(x) samples = {} X_new = km.transform(x) # try: # X_new = km.transform(x) # except: # print 'mooo' for idx, l in enumerate(km.labels_): try: samples[l][idx] = X_new[idx][l] except: samples[l] ={} samples[l][idx] = X_new[idx][l] ret = [] for k, v in samples.items(): sortedv = sorted(v.items(), key=operator.itemgetter(1), reverse=True) for it in sortedv: ret.append(candi[it[0]]) return ret
try: terms[v+" "+w] += 1 except: terms[v+" "+w] = 1
conditional_block
app.py
import json import os import time from io import StringIO import requests from flask import Flask, request from flask_httpauth import HTTPBasicAuth from flask_restful import Resource, Api from lxml import etree from pyace import ACERecord, ACEAdminConnection, ACEAdminConnectionError, subdirs_file_content_to_dict, \ hash_dict_values, create_dir_if_not_exists from werkzeug.security import check_password_hash, generate_password_hash project_types = ('applications', 'services', 'rest-apis') app = Flask(__name__) api = Api(app) auth = HTTPBasicAuth() mount_path = os.environ.get('EOD20_UNITTESTAPI_MOUNT_PATH') data_dir = os.path.join(mount_path, "data") user_dir = os.path.join(mount_path, "users") ace_config_dir = os.path.join(mount_path, "ace-config") ace_config = subdirs_file_content_to_dict(ace_config_dir, split_by_line=False) ace_conn = ACEAdminConnection(host=ace_config["host"], admin_port=int(ace_config["port"]), admin_https=False, user=ace_config["user"], pw=ace_config["pw"]) user_auth = subdirs_file_content_to_dict(user_dir, split_by_line=False) hash_dict_values(user_auth) def invalid_project_tye_msg(proj_type): """API return string and HTTP Bad Request (400) issued when project type is not valid. :param proj_type: the first path parameter of the Query resource that specifies the ACE project type (applications, services, rest-apis) :type proj_type: string :returns: a standard JSON error message""" return {"error": f"Project type {proj_type} is not valid, please use one of the following: " f"{', '.join(project_types)}"}, 400 def api_response(project_type, result): """"Wrapper to return the desired api response only if the specified project type is valid :param project_type: the first path parameter (string) of the Query resource that specifies the ACE project type (applications, services, rest-apis) :type project_type: string :param result: result to return if project_type is valid :type result: any :returns: result if the project_type is valid, returns a standard error otherwise (see invalid_project_type_msg)""" if project_type not in project_types: return invalid_project_tye_msg(project_type) return result def is_valid_query(query): """Function to check for XPath validity. Tries to create an etree ETXPath instance from the query. If this fails, the XPathSyntaxError is excepted to return a False. Returns True otherwise :param query: XPath query :type query: string :returns: True/False""" try: etree.ETXPath(query) return True except etree.XPathSyntaxError: return False def process_queries(req, save_dir, replace_allowed): """" First checks if the request is a simple dictionary with string keys and string values. If so, the queries in the request message are saved to disk :param req: the API request object :type req: flask request object :param save_dir: the directory string ('project_type/project/flow/node/terminal') to which the queries will be saved :type save_dir: str :param replace_allowed: indication whether to PUT (true) or POST (false) :type replace_allowed: boolean""" create_dir_if_not_exists(save_dir) req_obj = json.loads(req.data) result_dict = dict() if not all(map(lambda x: all(map(lambda y: isinstance(y, str), x)), req_obj.items())): return {"message": f"Not all query names or values are strings"}, 400 for query_name, query_value in req_obj.items(): exists = os.path.exists(os.path.join(save_dir, query_name)) if not is_valid_query(query_value): result_dict[query_name] = "invalid XPath expression" elif not replace_allowed and exists: result_dict[query_name] = "not created: already exists" elif replace_allowed and not exists: result_dict[query_name] = "not replaced: does not exist" else: with open(os.path.join(save_dir, query_name), 'w') as f: f.write(query_value) result_dict[query_name] = "replaced" if replace_allowed else "created" if len(os.listdir(save_dir)) == 0: os.rmdir(save_dir) return result_dict, 200 @auth.verify_password def verify_password(username, password): """Default basic auth verification method""" if username in user_auth and check_password_hash(user_auth.get(username), password): return username class Query(Resource): @auth.login_required def get(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all queries on disk queried by the parameters :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: XPath query""" return api_response(project_type, (subdirs_file_content_to_dict(os.path.join(data_dir, project_type, project, msgflow, node, terminal), split_by_line=False, subdict_by_path=True), 200)) @auth.login_required def
(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all submitted queries and their processing status :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: processing status (see process_queries function)""" return api_response(project_type, process_queries(request, os.path.join(data_dir, project_type, project, msgflow, node, terminal), False)) @auth.login_required def put(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all submitted queries and their processing status :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: processing status (see process_queries function)""" return api_response(project_type, process_queries(request, os.path.join(data_dir, project_type, project, msgflow, node, terminal), True)) def query_dict_for_record(record, touched_queries): """Returns a query result dictionary, given an ACE Record instance and a dictionary of the XPath queries that need to be executed on this record. :param record: instance of ACERecord :type record: ACERecord :param touched_queries: key-value dictionary (key: query name, value: XPath expression) that need to be executed on the record :type touched_queries: dictionary :returns: a dictionary with the same keys as touched_queries, and has a subdict as value containing 'query' (XPath expression) and 'result' (query result)""" result = dict() if len(touched_queries) > 0: parsed_record = etree.parse(StringIO(record.test_data_xml())) result.update(dict((q_name, {'query': q_value, 'result': list(x.text for x in etree.ETXPath(q_value)(parsed_record))}) for q_name, q_value in touched_queries.items())) return result def perform_queries(records, project_type, project, msgflow): """Returns list for each ACERecord supplied, each specifying from-to which node+terminal between which this record was obtained, together with a query result dictionary. :param records: list of ACERecord instances :type records: list(ACERecord) :param project_type: choice (application/services/rest-apis) :type project_type: string :param project: name of the ACE project :type project: string :param msgflow: name of the ACE message flow in the ACE project :type msgflow: string :returns: a list, with each element containing: - the from-to node+terminal info - a dictionary of queries executed on the record""" all_queries = subdirs_file_content_to_dict(os.path.join(data_dir, project_type, project, msgflow), split_by_line=False, subdict_by_path=True) result = list({'from': {'node': record.source_node, 'terminal': record.source_terminal}, 'to': {'node': record.target_node, 'terminal': record.target_terminal}, 'queries': query_dict_for_record(record, all_queries.get(record.source_node, dict()).get( record.source_terminal, dict()) | all_queries.get(record.target_node, dict()).get( record.target_terminal, dict()))} for record in records) return result class Exerciser(Resource): @auth.login_required def post(self, project_type, project, msgflow, node): """Endpoint to exercise a message. The message is injected into the flow. For this recording and injection must be temporarily enabled on the flow. Test data is obtained, after which instances of ACERecord are created and sorted on flowSequenceNumber. For each record, an object is created with the from-to node+terminal info and also the results of the queries matching either the source or target node+terminal :param project_type: choice of either applications/services/rest-apis :type project_type: string :param project: name of the ACE project :type project: string :param msgflow: name of the message flow in the ACE project :type msgflow: string :param node: input node of the message flow the message needs to be injected in :type msgflow: string :returns: list of dictionaries, one per ACERecord, with the exercise results""" result = dict() try: ace_conn.start_recording(project_type, project, msgflow) ace_conn.start_injection(project_type, project, msgflow) ace_conn.inject(project_type, project, msgflow, node, request.data) ace_conn.stop_injection(project_type, project, msgflow) ace_conn.stop_recording(project_type, project, msgflow) test_payload = sorted(filter(lambda x: x.application == project and x.message_flow == msgflow, map(lambda x: ACERecord(x), ace_conn.get_recorded_test_data())), key=lambda x: x.flow_sequence_number) ace_conn.delete_recorded_test_data() result = perform_queries(test_payload, project_type, project, msgflow) except ACEAdminConnectionError as e: result = e, 500 except Exception: result = {"error": "An error not related to ACE connections occurred."}, 500 finally: return result, 200 api.add_resource(Query, '/queries/<string:project_type>/<string:project>/<string:msgflow>/<string:node>/<string:terminal>') api.add_resource(Exerciser, '/exercise/<string:project_type>/<string:project>/<string:msgflow>/<string:node>') app.run(host='0.0.0.0', port=8082, debug=True)
post
identifier_name
app.py
import json import os import time from io import StringIO import requests from flask import Flask, request from flask_httpauth import HTTPBasicAuth from flask_restful import Resource, Api from lxml import etree from pyace import ACERecord, ACEAdminConnection, ACEAdminConnectionError, subdirs_file_content_to_dict, \ hash_dict_values, create_dir_if_not_exists from werkzeug.security import check_password_hash, generate_password_hash project_types = ('applications', 'services', 'rest-apis') app = Flask(__name__) api = Api(app) auth = HTTPBasicAuth() mount_path = os.environ.get('EOD20_UNITTESTAPI_MOUNT_PATH') data_dir = os.path.join(mount_path, "data") user_dir = os.path.join(mount_path, "users") ace_config_dir = os.path.join(mount_path, "ace-config") ace_config = subdirs_file_content_to_dict(ace_config_dir, split_by_line=False) ace_conn = ACEAdminConnection(host=ace_config["host"], admin_port=int(ace_config["port"]), admin_https=False, user=ace_config["user"], pw=ace_config["pw"]) user_auth = subdirs_file_content_to_dict(user_dir, split_by_line=False) hash_dict_values(user_auth) def invalid_project_tye_msg(proj_type): """API return string and HTTP Bad Request (400) issued when project type is not valid. :param proj_type: the first path parameter of the Query resource that specifies the ACE project type (applications, services, rest-apis) :type proj_type: string :returns: a standard JSON error message""" return {"error": f"Project type {proj_type} is not valid, please use one of the following: " f"{', '.join(project_types)}"}, 400 def api_response(project_type, result): """"Wrapper to return the desired api response only if the specified project type is valid :param project_type: the first path parameter (string) of the Query resource that specifies the ACE project type (applications, services, rest-apis) :type project_type: string :param result: result to return if project_type is valid :type result: any :returns: result if the project_type is valid, returns a standard error otherwise (see invalid_project_type_msg)""" if project_type not in project_types: return invalid_project_tye_msg(project_type) return result def is_valid_query(query): """Function to check for XPath validity. Tries to create an etree ETXPath instance from the query. If this fails, the XPathSyntaxError is excepted to return a False. Returns True otherwise :param query: XPath query :type query: string :returns: True/False""" try: etree.ETXPath(query) return True except etree.XPathSyntaxError: return False def process_queries(req, save_dir, replace_allowed): """" First checks if the request is a simple dictionary with string keys and string values. If so, the queries in the request message are saved to disk :param req: the API request object :type req: flask request object :param save_dir: the directory string ('project_type/project/flow/node/terminal') to which the queries will be saved :type save_dir: str :param replace_allowed: indication whether to PUT (true) or POST (false) :type replace_allowed: boolean""" create_dir_if_not_exists(save_dir) req_obj = json.loads(req.data) result_dict = dict() if not all(map(lambda x: all(map(lambda y: isinstance(y, str), x)), req_obj.items())): return {"message": f"Not all query names or values are strings"}, 400 for query_name, query_value in req_obj.items(): exists = os.path.exists(os.path.join(save_dir, query_name)) if not is_valid_query(query_value): result_dict[query_name] = "invalid XPath expression" elif not replace_allowed and exists: result_dict[query_name] = "not created: already exists" elif replace_allowed and not exists: result_dict[query_name] = "not replaced: does not exist" else: with open(os.path.join(save_dir, query_name), 'w') as f: f.write(query_value) result_dict[query_name] = "replaced" if replace_allowed else "created" if len(os.listdir(save_dir)) == 0: os.rmdir(save_dir) return result_dict, 200 @auth.verify_password def verify_password(username, password): """Default basic auth verification method""" if username in user_auth and check_password_hash(user_auth.get(username), password): return username class Query(Resource):
def query_dict_for_record(record, touched_queries): """Returns a query result dictionary, given an ACE Record instance and a dictionary of the XPath queries that need to be executed on this record. :param record: instance of ACERecord :type record: ACERecord :param touched_queries: key-value dictionary (key: query name, value: XPath expression) that need to be executed on the record :type touched_queries: dictionary :returns: a dictionary with the same keys as touched_queries, and has a subdict as value containing 'query' (XPath expression) and 'result' (query result)""" result = dict() if len(touched_queries) > 0: parsed_record = etree.parse(StringIO(record.test_data_xml())) result.update(dict((q_name, {'query': q_value, 'result': list(x.text for x in etree.ETXPath(q_value)(parsed_record))}) for q_name, q_value in touched_queries.items())) return result def perform_queries(records, project_type, project, msgflow): """Returns list for each ACERecord supplied, each specifying from-to which node+terminal between which this record was obtained, together with a query result dictionary. :param records: list of ACERecord instances :type records: list(ACERecord) :param project_type: choice (application/services/rest-apis) :type project_type: string :param project: name of the ACE project :type project: string :param msgflow: name of the ACE message flow in the ACE project :type msgflow: string :returns: a list, with each element containing: - the from-to node+terminal info - a dictionary of queries executed on the record""" all_queries = subdirs_file_content_to_dict(os.path.join(data_dir, project_type, project, msgflow), split_by_line=False, subdict_by_path=True) result = list({'from': {'node': record.source_node, 'terminal': record.source_terminal}, 'to': {'node': record.target_node, 'terminal': record.target_terminal}, 'queries': query_dict_for_record(record, all_queries.get(record.source_node, dict()).get( record.source_terminal, dict()) | all_queries.get(record.target_node, dict()).get( record.target_terminal, dict()))} for record in records) return result class Exerciser(Resource): @auth.login_required def post(self, project_type, project, msgflow, node): """Endpoint to exercise a message. The message is injected into the flow. For this recording and injection must be temporarily enabled on the flow. Test data is obtained, after which instances of ACERecord are created and sorted on flowSequenceNumber. For each record, an object is created with the from-to node+terminal info and also the results of the queries matching either the source or target node+terminal :param project_type: choice of either applications/services/rest-apis :type project_type: string :param project: name of the ACE project :type project: string :param msgflow: name of the message flow in the ACE project :type msgflow: string :param node: input node of the message flow the message needs to be injected in :type msgflow: string :returns: list of dictionaries, one per ACERecord, with the exercise results""" result = dict() try: ace_conn.start_recording(project_type, project, msgflow) ace_conn.start_injection(project_type, project, msgflow) ace_conn.inject(project_type, project, msgflow, node, request.data) ace_conn.stop_injection(project_type, project, msgflow) ace_conn.stop_recording(project_type, project, msgflow) test_payload = sorted(filter(lambda x: x.application == project and x.message_flow == msgflow, map(lambda x: ACERecord(x), ace_conn.get_recorded_test_data())), key=lambda x: x.flow_sequence_number) ace_conn.delete_recorded_test_data() result = perform_queries(test_payload, project_type, project, msgflow) except ACEAdminConnectionError as e: result = e, 500 except Exception: result = {"error": "An error not related to ACE connections occurred."}, 500 finally: return result, 200 api.add_resource(Query, '/queries/<string:project_type>/<string:project>/<string:msgflow>/<string:node>/<string:terminal>') api.add_resource(Exerciser, '/exercise/<string:project_type>/<string:project>/<string:msgflow>/<string:node>') app.run(host='0.0.0.0', port=8082, debug=True)
@auth.login_required def get(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all queries on disk queried by the parameters :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: XPath query""" return api_response(project_type, (subdirs_file_content_to_dict(os.path.join(data_dir, project_type, project, msgflow, node, terminal), split_by_line=False, subdict_by_path=True), 200)) @auth.login_required def post(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all submitted queries and their processing status :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: processing status (see process_queries function)""" return api_response(project_type, process_queries(request, os.path.join(data_dir, project_type, project, msgflow, node, terminal), False)) @auth.login_required def put(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all submitted queries and their processing status :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: processing status (see process_queries function)""" return api_response(project_type, process_queries(request, os.path.join(data_dir, project_type, project, msgflow, node, terminal), True))
identifier_body
app.py
import json import os import time from io import StringIO import requests from flask import Flask, request from flask_httpauth import HTTPBasicAuth from flask_restful import Resource, Api from lxml import etree from pyace import ACERecord, ACEAdminConnection, ACEAdminConnectionError, subdirs_file_content_to_dict, \ hash_dict_values, create_dir_if_not_exists from werkzeug.security import check_password_hash, generate_password_hash project_types = ('applications', 'services', 'rest-apis') app = Flask(__name__) api = Api(app) auth = HTTPBasicAuth() mount_path = os.environ.get('EOD20_UNITTESTAPI_MOUNT_PATH') data_dir = os.path.join(mount_path, "data") user_dir = os.path.join(mount_path, "users") ace_config_dir = os.path.join(mount_path, "ace-config") ace_config = subdirs_file_content_to_dict(ace_config_dir, split_by_line=False) ace_conn = ACEAdminConnection(host=ace_config["host"], admin_port=int(ace_config["port"]), admin_https=False, user=ace_config["user"], pw=ace_config["pw"]) user_auth = subdirs_file_content_to_dict(user_dir, split_by_line=False) hash_dict_values(user_auth) def invalid_project_tye_msg(proj_type): """API return string and HTTP Bad Request (400) issued when project type is not valid. :param proj_type: the first path parameter of the Query resource that specifies the ACE project type (applications, services, rest-apis) :type proj_type: string :returns: a standard JSON error message""" return {"error": f"Project type {proj_type} is not valid, please use one of the following: " f"{', '.join(project_types)}"}, 400 def api_response(project_type, result): """"Wrapper to return the desired api response only if the specified project type is valid :param project_type: the first path parameter (string) of the Query resource that specifies the ACE project type (applications, services, rest-apis) :type project_type: string :param result: result to return if project_type is valid :type result: any :returns: result if the project_type is valid, returns a standard error otherwise (see invalid_project_type_msg)""" if project_type not in project_types: return invalid_project_tye_msg(project_type) return result def is_valid_query(query): """Function to check for XPath validity. Tries to create an etree ETXPath instance from the query. If this fails, the XPathSyntaxError is excepted to return a False. Returns True otherwise :param query: XPath query :type query: string :returns: True/False""" try: etree.ETXPath(query) return True except etree.XPathSyntaxError: return False def process_queries(req, save_dir, replace_allowed): """" First checks if the request is a simple dictionary with string keys and string values. If so, the queries in the request message are saved to disk :param req: the API request object :type req: flask request object :param save_dir: the directory string ('project_type/project/flow/node/terminal') to which the queries will be saved :type save_dir: str :param replace_allowed: indication whether to PUT (true) or POST (false) :type replace_allowed: boolean""" create_dir_if_not_exists(save_dir) req_obj = json.loads(req.data) result_dict = dict() if not all(map(lambda x: all(map(lambda y: isinstance(y, str), x)), req_obj.items())): return {"message": f"Not all query names or values are strings"}, 400 for query_name, query_value in req_obj.items(): exists = os.path.exists(os.path.join(save_dir, query_name)) if not is_valid_query(query_value): result_dict[query_name] = "invalid XPath expression" elif not replace_allowed and exists: result_dict[query_name] = "not created: already exists" elif replace_allowed and not exists: result_dict[query_name] = "not replaced: does not exist" else: with open(os.path.join(save_dir, query_name), 'w') as f: f.write(query_value) result_dict[query_name] = "replaced" if replace_allowed else "created" if len(os.listdir(save_dir)) == 0: os.rmdir(save_dir) return result_dict, 200 @auth.verify_password def verify_password(username, password): """Default basic auth verification method""" if username in user_auth and check_password_hash(user_auth.get(username), password): return username class Query(Resource): @auth.login_required def get(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all queries on disk queried by the parameters :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: XPath query""" return api_response(project_type, (subdirs_file_content_to_dict(os.path.join(data_dir, project_type, project, msgflow, node, terminal), split_by_line=False, subdict_by_path=True), 200)) @auth.login_required def post(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all submitted queries and their processing status :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: processing status (see process_queries function)""" return api_response(project_type, process_queries(request, os.path.join(data_dir, project_type, project, msgflow, node, terminal), False)) @auth.login_required def put(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all submitted queries and their processing status :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: processing status (see process_queries function)""" return api_response(project_type, process_queries(request, os.path.join(data_dir, project_type, project, msgflow, node, terminal), True)) def query_dict_for_record(record, touched_queries): """Returns a query result dictionary, given an ACE Record instance and a dictionary of the XPath queries that need to be executed on this record. :param record: instance of ACERecord :type record: ACERecord :param touched_queries: key-value dictionary (key: query name, value: XPath expression) that need to be executed on the record :type touched_queries: dictionary :returns: a dictionary with the same keys as touched_queries, and has a subdict as value containing 'query' (XPath expression) and 'result' (query result)""" result = dict() if len(touched_queries) > 0: parsed_record = etree.parse(StringIO(record.test_data_xml())) result.update(dict((q_name, {'query': q_value, 'result': list(x.text for x in etree.ETXPath(q_value)(parsed_record))}) for q_name, q_value in touched_queries.items())) return result def perform_queries(records, project_type, project, msgflow): """Returns list for each ACERecord supplied, each specifying from-to which node+terminal between which this
record was obtained, together with a query result dictionary. :param records: list of ACERecord instances :type records: list(ACERecord) :param project_type: choice (application/services/rest-apis) :type project_type: string :param project: name of the ACE project :type project: string :param msgflow: name of the ACE message flow in the ACE project :type msgflow: string :returns: a list, with each element containing: - the from-to node+terminal info - a dictionary of queries executed on the record""" all_queries = subdirs_file_content_to_dict(os.path.join(data_dir, project_type, project, msgflow), split_by_line=False, subdict_by_path=True) result = list({'from': {'node': record.source_node, 'terminal': record.source_terminal}, 'to': {'node': record.target_node, 'terminal': record.target_terminal}, 'queries': query_dict_for_record(record, all_queries.get(record.source_node, dict()).get( record.source_terminal, dict()) | all_queries.get(record.target_node, dict()).get( record.target_terminal, dict()))} for record in records) return result class Exerciser(Resource): @auth.login_required def post(self, project_type, project, msgflow, node): """Endpoint to exercise a message. The message is injected into the flow. For this recording and injection must be temporarily enabled on the flow. Test data is obtained, after which instances of ACERecord are created and sorted on flowSequenceNumber. For each record, an object is created with the from-to node+terminal info and also the results of the queries matching either the source or target node+terminal :param project_type: choice of either applications/services/rest-apis :type project_type: string :param project: name of the ACE project :type project: string :param msgflow: name of the message flow in the ACE project :type msgflow: string :param node: input node of the message flow the message needs to be injected in :type msgflow: string :returns: list of dictionaries, one per ACERecord, with the exercise results""" result = dict() try: ace_conn.start_recording(project_type, project, msgflow) ace_conn.start_injection(project_type, project, msgflow) ace_conn.inject(project_type, project, msgflow, node, request.data) ace_conn.stop_injection(project_type, project, msgflow) ace_conn.stop_recording(project_type, project, msgflow) test_payload = sorted(filter(lambda x: x.application == project and x.message_flow == msgflow, map(lambda x: ACERecord(x), ace_conn.get_recorded_test_data())), key=lambda x: x.flow_sequence_number) ace_conn.delete_recorded_test_data() result = perform_queries(test_payload, project_type, project, msgflow) except ACEAdminConnectionError as e: result = e, 500 except Exception: result = {"error": "An error not related to ACE connections occurred."}, 500 finally: return result, 200 api.add_resource(Query, '/queries/<string:project_type>/<string:project>/<string:msgflow>/<string:node>/<string:terminal>') api.add_resource(Exerciser, '/exercise/<string:project_type>/<string:project>/<string:msgflow>/<string:node>') app.run(host='0.0.0.0', port=8082, debug=True)
random_line_split
app.py
import json import os import time from io import StringIO import requests from flask import Flask, request from flask_httpauth import HTTPBasicAuth from flask_restful import Resource, Api from lxml import etree from pyace import ACERecord, ACEAdminConnection, ACEAdminConnectionError, subdirs_file_content_to_dict, \ hash_dict_values, create_dir_if_not_exists from werkzeug.security import check_password_hash, generate_password_hash project_types = ('applications', 'services', 'rest-apis') app = Flask(__name__) api = Api(app) auth = HTTPBasicAuth() mount_path = os.environ.get('EOD20_UNITTESTAPI_MOUNT_PATH') data_dir = os.path.join(mount_path, "data") user_dir = os.path.join(mount_path, "users") ace_config_dir = os.path.join(mount_path, "ace-config") ace_config = subdirs_file_content_to_dict(ace_config_dir, split_by_line=False) ace_conn = ACEAdminConnection(host=ace_config["host"], admin_port=int(ace_config["port"]), admin_https=False, user=ace_config["user"], pw=ace_config["pw"]) user_auth = subdirs_file_content_to_dict(user_dir, split_by_line=False) hash_dict_values(user_auth) def invalid_project_tye_msg(proj_type): """API return string and HTTP Bad Request (400) issued when project type is not valid. :param proj_type: the first path parameter of the Query resource that specifies the ACE project type (applications, services, rest-apis) :type proj_type: string :returns: a standard JSON error message""" return {"error": f"Project type {proj_type} is not valid, please use one of the following: " f"{', '.join(project_types)}"}, 400 def api_response(project_type, result): """"Wrapper to return the desired api response only if the specified project type is valid :param project_type: the first path parameter (string) of the Query resource that specifies the ACE project type (applications, services, rest-apis) :type project_type: string :param result: result to return if project_type is valid :type result: any :returns: result if the project_type is valid, returns a standard error otherwise (see invalid_project_type_msg)""" if project_type not in project_types: return invalid_project_tye_msg(project_type) return result def is_valid_query(query): """Function to check for XPath validity. Tries to create an etree ETXPath instance from the query. If this fails, the XPathSyntaxError is excepted to return a False. Returns True otherwise :param query: XPath query :type query: string :returns: True/False""" try: etree.ETXPath(query) return True except etree.XPathSyntaxError: return False def process_queries(req, save_dir, replace_allowed): """" First checks if the request is a simple dictionary with string keys and string values. If so, the queries in the request message are saved to disk :param req: the API request object :type req: flask request object :param save_dir: the directory string ('project_type/project/flow/node/terminal') to which the queries will be saved :type save_dir: str :param replace_allowed: indication whether to PUT (true) or POST (false) :type replace_allowed: boolean""" create_dir_if_not_exists(save_dir) req_obj = json.loads(req.data) result_dict = dict() if not all(map(lambda x: all(map(lambda y: isinstance(y, str), x)), req_obj.items())): return {"message": f"Not all query names or values are strings"}, 400 for query_name, query_value in req_obj.items(): exists = os.path.exists(os.path.join(save_dir, query_name)) if not is_valid_query(query_value): result_dict[query_name] = "invalid XPath expression" elif not replace_allowed and exists: result_dict[query_name] = "not created: already exists" elif replace_allowed and not exists: result_dict[query_name] = "not replaced: does not exist" else: with open(os.path.join(save_dir, query_name), 'w') as f: f.write(query_value) result_dict[query_name] = "replaced" if replace_allowed else "created" if len(os.listdir(save_dir)) == 0: os.rmdir(save_dir) return result_dict, 200 @auth.verify_password def verify_password(username, password): """Default basic auth verification method""" if username in user_auth and check_password_hash(user_auth.get(username), password): return username class Query(Resource): @auth.login_required def get(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all queries on disk queried by the parameters :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: XPath query""" return api_response(project_type, (subdirs_file_content_to_dict(os.path.join(data_dir, project_type, project, msgflow, node, terminal), split_by_line=False, subdict_by_path=True), 200)) @auth.login_required def post(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all submitted queries and their processing status :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: processing status (see process_queries function)""" return api_response(project_type, process_queries(request, os.path.join(data_dir, project_type, project, msgflow, node, terminal), False)) @auth.login_required def put(self, project_type, project, msgflow, node, terminal): """Returns a dictionary with all submitted queries and their processing status :param project_type: choice between applications/services/rest-apis :type project_type: string :param project: name of the ACE project: Project1 :type project: string :param msgflow: brokerschema'd messageflow name: brokerchema.Flow1 :type msgflow: string :param node: node in the message flow :type node: string :param terminal: node terminal :type terminal: string :returns: key-value dictionary. Keys: query names. Values: processing status (see process_queries function)""" return api_response(project_type, process_queries(request, os.path.join(data_dir, project_type, project, msgflow, node, terminal), True)) def query_dict_for_record(record, touched_queries): """Returns a query result dictionary, given an ACE Record instance and a dictionary of the XPath queries that need to be executed on this record. :param record: instance of ACERecord :type record: ACERecord :param touched_queries: key-value dictionary (key: query name, value: XPath expression) that need to be executed on the record :type touched_queries: dictionary :returns: a dictionary with the same keys as touched_queries, and has a subdict as value containing 'query' (XPath expression) and 'result' (query result)""" result = dict() if len(touched_queries) > 0:
return result def perform_queries(records, project_type, project, msgflow): """Returns list for each ACERecord supplied, each specifying from-to which node+terminal between which this record was obtained, together with a query result dictionary. :param records: list of ACERecord instances :type records: list(ACERecord) :param project_type: choice (application/services/rest-apis) :type project_type: string :param project: name of the ACE project :type project: string :param msgflow: name of the ACE message flow in the ACE project :type msgflow: string :returns: a list, with each element containing: - the from-to node+terminal info - a dictionary of queries executed on the record""" all_queries = subdirs_file_content_to_dict(os.path.join(data_dir, project_type, project, msgflow), split_by_line=False, subdict_by_path=True) result = list({'from': {'node': record.source_node, 'terminal': record.source_terminal}, 'to': {'node': record.target_node, 'terminal': record.target_terminal}, 'queries': query_dict_for_record(record, all_queries.get(record.source_node, dict()).get( record.source_terminal, dict()) | all_queries.get(record.target_node, dict()).get( record.target_terminal, dict()))} for record in records) return result class Exerciser(Resource): @auth.login_required def post(self, project_type, project, msgflow, node): """Endpoint to exercise a message. The message is injected into the flow. For this recording and injection must be temporarily enabled on the flow. Test data is obtained, after which instances of ACERecord are created and sorted on flowSequenceNumber. For each record, an object is created with the from-to node+terminal info and also the results of the queries matching either the source or target node+terminal :param project_type: choice of either applications/services/rest-apis :type project_type: string :param project: name of the ACE project :type project: string :param msgflow: name of the message flow in the ACE project :type msgflow: string :param node: input node of the message flow the message needs to be injected in :type msgflow: string :returns: list of dictionaries, one per ACERecord, with the exercise results""" result = dict() try: ace_conn.start_recording(project_type, project, msgflow) ace_conn.start_injection(project_type, project, msgflow) ace_conn.inject(project_type, project, msgflow, node, request.data) ace_conn.stop_injection(project_type, project, msgflow) ace_conn.stop_recording(project_type, project, msgflow) test_payload = sorted(filter(lambda x: x.application == project and x.message_flow == msgflow, map(lambda x: ACERecord(x), ace_conn.get_recorded_test_data())), key=lambda x: x.flow_sequence_number) ace_conn.delete_recorded_test_data() result = perform_queries(test_payload, project_type, project, msgflow) except ACEAdminConnectionError as e: result = e, 500 except Exception: result = {"error": "An error not related to ACE connections occurred."}, 500 finally: return result, 200 api.add_resource(Query, '/queries/<string:project_type>/<string:project>/<string:msgflow>/<string:node>/<string:terminal>') api.add_resource(Exerciser, '/exercise/<string:project_type>/<string:project>/<string:msgflow>/<string:node>') app.run(host='0.0.0.0', port=8082, debug=True)
parsed_record = etree.parse(StringIO(record.test_data_xml())) result.update(dict((q_name, {'query': q_value, 'result': list(x.text for x in etree.ETXPath(q_value)(parsed_record))}) for q_name, q_value in touched_queries.items()))
conditional_block
operands.go
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package metamorphic import ( "fmt" "math/rand" "strconv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/hlc" ) type operandType int const ( operandTransaction operandType = iota operandReadWriter operandMVCCKey operandPastTS operandNextTS operandValue operandIterator operandFloat ) const ( maxValueSize = 16 ) // operandGenerator represents an object to generate instances of operands // that can be passed to an operation as an argument. For simplicity, // we create operandGenerators for each type of argument, even primitive ones // like MVCCKeys and values. All state about open objects (iterators, // transactions, writers, etc) during generation should be stored in an // operandGenerator. // // operandGenerators are strictly for generation-time use only; all info about // execution time objects is stored directly in the metaTestRunner. type operandGenerator interface { // get retrieves an instance of this operand. Depending on operand type (eg. // keys), it could also generate and return a new type of an instance. An // operand is represented as a serializable string, that can be converted into // a concrete instance type during execution by calling a get<concrete type>() // or parse() method on the concrete operand generator. get() string // getNew retrieves a new instance of this type of operand. Called when an // opener operation (with isOpener = true) needs an ID to store its output. getNew() string // opener returns the name of an operation generator (defined in // operations.go) that always creates a new instance of this object. Called by // the test runner when an operation requires one instance of this // operand to exist, and count() == 0. opener() string // count returns the number of live objects being managed by this generator. // If 0, the opener() operation can be called when necessary. count() int // closeAll closes all managed operands. Used when the test exits, or when a // restart operation executes. closeAll() } func generateBytes(rng *rand.Rand, min int, max int) []byte { // For better readability, stick to lowercase alphabet characters. iterations := min + rng.Intn(max-min) result := make([]byte, 0, iterations) for i := 0; i < iterations; i++ { result = append(result, byte(rng.Float64()*float64('z'-'a')+'a')) } return result } type keyGenerator struct { liveKeys []storage.MVCCKey rng *rand.Rand tsGenerator *tsGenerator } var _ operandGenerator = &keyGenerator{} func (k *keyGenerator) opener() string { return "" } func (k *keyGenerator) count() int { // Always return a nonzero value so opener() is never called directly. return len(k.liveKeys) + 1 } func (k *keyGenerator) open() storage.MVCCKey { var key storage.MVCCKey key.Key = generateBytes(k.rng, 8, maxValueSize) key.Timestamp = k.tsGenerator.lastTS k.liveKeys = append(k.liveKeys, key) return key } func (k *keyGenerator) toString(key storage.MVCCKey) string { return fmt.Sprintf("%s/%d", key.Key, key.Timestamp.WallTime) } func (k *keyGenerator) get() string { // 15% chance of returning a new key even if some exist. if len(k.liveKeys) == 0 || k.rng.Float64() < 0.30 { return k.toString(k.open()) } return k.toString(k.liveKeys[k.rng.Intn(len(k.liveKeys))]) } func (k *keyGenerator) getNew() string { return k.get() } func (k *keyGenerator) closeAll() { // No-op. } func (k *keyGenerator) parse(input string) storage.MVCCKey { var key storage.MVCCKey key.Key = make([]byte, 0, maxValueSize) _, err := fmt.Sscanf(input, "%q/%d", &key.Key, &key.Timestamp.WallTime) if err != nil { panic(err) } return key } type valueGenerator struct { rng *rand.Rand } var _ operandGenerator = &valueGenerator{} func (v *valueGenerator) opener() string { return "" } func (v *valueGenerator) count() int { return 1 } func (v *valueGenerator) get() string { return v.toString(generateBytes(v.rng, 4, maxValueSize)) } func (v *valueGenerator) getNew() string { return v.get() } func (v *valueGenerator) closeAll() { // No-op. } func (v *valueGenerator) toString(value []byte) string { return fmt.Sprintf("%s", value) } func (v *valueGenerator) parse(input string) []byte { return []byte(input) } type txnID string type txnGenerator struct { rng *rand.Rand testRunner *metaTestRunner tsGenerator *tsGenerator liveTxns []txnID txnIDMap map[txnID]*roachpb.Transaction openBatches map[txnID]map[readWriterID]struct{} // Counts "generated" transactions - i.e. how many txn_open()s have been // inserted so far. Could stay 0 in check mode. txnGenCounter uint64 } var _ operandGenerator = &txnGenerator{} func (t *txnGenerator) opener() string { return "txn_open" } func (t *txnGenerator) count() int { return len(t.txnIDMap) } func (t *txnGenerator) get() string { if len(t.liveTxns) == 0 { panic("no open txns") } return string(t.liveTxns[t.rng.Intn(len(t.liveTxns))]) } // getNew returns a transaction ID, and saves this transaction as a "live" // transaction for generation purposes. Called only during generation, and // must be matched with a generateClose call. func (t *txnGenerator) getNew() string { t.txnGenCounter++ id := txnID(fmt.Sprintf("t%d", t.txnGenCounter)) // Increment the timestamp. t.tsGenerator.generate() // This ensures count() is correct as of generation time. t.txnIDMap[id] = nil t.liveTxns = append(t.liveTxns, id) return string(id) } // generateClose is called when a transaction closing operation is generated. func (t *txnGenerator) generateClose(id txnID) { delete(t.openBatches, id) delete(t.txnIDMap, id) for i := range t.liveTxns { if t.liveTxns[i] == id { t.liveTxns[i] = t.liveTxns[len(t.liveTxns)-1] t.liveTxns = t.liveTxns[:len(t.liveTxns)-1] break } } } func (t *txnGenerator) clearBatch(batch readWriterID) { for _, batches := range t.openBatches { delete(batches, batch) } } func (t *txnGenerator) trackWriteOnBatch(w readWriterID, txn txnID) { if w == "engine" { return } openBatches, ok := t.openBatches[txn] if !ok { t.openBatches[txn] = make(map[readWriterID]struct{}) openBatches = t.openBatches[txn] } openBatches[w] = struct{}{} } func (t *txnGenerator) closeAll() { t.liveTxns = nil t.txnIDMap = make(map[txnID]*roachpb.Transaction) t.openBatches = make(map[txnID]map[readWriterID]struct{}) } type pastTSGenerator struct { rng *rand.Rand tsGenerator *tsGenerator } var _ operandGenerator = &pastTSGenerator{} func (t *pastTSGenerator) opener() string { return "" } func (t *pastTSGenerator) count() int
func (t *pastTSGenerator) closeAll() { // No-op. } func (t *pastTSGenerator) toString(ts hlc.Timestamp) string { return fmt.Sprintf("%d", ts.WallTime) } func (t *pastTSGenerator) parse(input string) hlc.Timestamp { var ts hlc.Timestamp wallTime, err := strconv.ParseInt(input, 10, 0) if err != nil { panic(err) } ts.WallTime = wallTime return ts } func (t *pastTSGenerator) get() string { return t.toString(t.tsGenerator.randomPastTimestamp(t.rng)) } func (t *pastTSGenerator) getNew() string { return t.get() } // Similar to pastTSGenerator, except it always increments the "current" timestamp // and returns the newest one. type nextTSGenerator struct { pastTSGenerator } func (t *nextTSGenerator) get() string { return t.toString(t.tsGenerator.generate()) } func (t *nextTSGenerator) getNew() string { return t.get() } type readWriterID string type readWriterGenerator struct { rng *rand.Rand m *metaTestRunner liveBatches []readWriterID batchIDMap map[readWriterID]storage.Batch batchGenCounter uint64 } var _ operandGenerator = &readWriterGenerator{} func (w *readWriterGenerator) get() string { // 25% chance of returning the engine, even if there are live batches. if len(w.liveBatches) == 0 || w.rng.Float64() < 0.25 { return "engine" } return string(w.liveBatches[w.rng.Intn(len(w.liveBatches))]) } // getNew is called during generation to generate a batch ID. func (w *readWriterGenerator) getNew() string { w.batchGenCounter++ id := readWriterID(fmt.Sprintf("batch%d", w.batchGenCounter)) w.batchIDMap[id] = nil w.liveBatches = append(w.liveBatches, id) return string(id) } func (w *readWriterGenerator) opener() string { return "batch_open" } // generateClose is called during generation when an operation that closes a // readWriter is generated. func (w *readWriterGenerator) generateClose(id readWriterID) { if id == "engine" { return } delete(w.batchIDMap, id) for i, batch := range w.liveBatches { if batch == id { w.liveBatches[i] = w.liveBatches[len(w.liveBatches)-1] w.liveBatches = w.liveBatches[:len(w.liveBatches)-1] break } } w.m.txnGenerator.clearBatch(id) } func (w *readWriterGenerator) count() int { return len(w.batchIDMap) + 1 } func (w *readWriterGenerator) closeAll() { for _, batch := range w.batchIDMap { if batch != nil { batch.Close() } } w.liveBatches = w.liveBatches[:0] w.batchIDMap = make(map[readWriterID]storage.Batch) } type iteratorID string type iteratorInfo struct { id iteratorID iter storage.MVCCIterator lowerBound roachpb.Key isBatchIter bool } type iteratorGenerator struct { rng *rand.Rand readerToIter map[readWriterID][]iteratorID iterInfo map[iteratorID]iteratorInfo liveIters []iteratorID iterGenCounter uint64 } var _ operandGenerator = &iteratorGenerator{} func (i *iteratorGenerator) get() string { if len(i.liveIters) == 0 { panic("no open iterators") } return string(i.liveIters[i.rng.Intn(len(i.liveIters))]) } func (i *iteratorGenerator) getNew() string { i.iterGenCounter++ id := fmt.Sprintf("iter%d", i.iterGenCounter) return id } // generateOpen is called during generation to generate an iterator ID for the // specified readWriter. func (i *iteratorGenerator) generateOpen(rwID readWriterID, id iteratorID) { i.iterInfo[id] = iteratorInfo{ id: id, lowerBound: nil, isBatchIter: rwID != "engine", } i.readerToIter[rwID] = append(i.readerToIter[rwID], id) i.liveIters = append(i.liveIters, id) } // generateClose is called during generation when an operation that closes an // iterator is generated. func (i *iteratorGenerator) generateClose(id iteratorID) { delete(i.iterInfo, id) // Clear iter from readerToIter for reader, iters := range i.readerToIter { for j, id2 := range iters { if id == id2 { // Delete iters[j] iters[j] = iters[len(iters)-1] i.readerToIter[reader] = iters[:len(iters)-1] break } } } // Clear iter from liveIters for j, iter := range i.liveIters { if id == iter { i.liveIters[j] = i.liveIters[len(i.liveIters)-1] i.liveIters = i.liveIters[:len(i.liveIters)-1] break } } } func (i *iteratorGenerator) opener() string { return "iterator_open" } func (i *iteratorGenerator) count() int { return len(i.iterInfo) } func (i *iteratorGenerator) closeAll() { i.liveIters = nil i.iterInfo = make(map[iteratorID]iteratorInfo) i.readerToIter = make(map[readWriterID][]iteratorID) } type floatGenerator struct { rng *rand.Rand } func (f *floatGenerator) get() string { return fmt.Sprintf("%.4f", f.rng.Float32()) } func (f *floatGenerator) getNew() string { return f.get() } func (f *floatGenerator) parse(input string) float32 { var result float32 if _, err := fmt.Sscanf(input, "%f", &result); err != nil { panic(err) } return result } func (f *floatGenerator) opener() string { // Not applicable, because count() is always nonzero. return "" } func (f *floatGenerator) count() int { return 1 } func (f *floatGenerator) closeAll() { // No-op. }
{ // Always return a non-zero count so opener() is never called. return int(t.tsGenerator.lastTS.WallTime) + 1 }
identifier_body
operands.go
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package metamorphic import ( "fmt" "math/rand" "strconv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/hlc" ) type operandType int const ( operandTransaction operandType = iota operandReadWriter operandMVCCKey operandPastTS operandNextTS operandValue operandIterator operandFloat ) const ( maxValueSize = 16 ) // operandGenerator represents an object to generate instances of operands // that can be passed to an operation as an argument. For simplicity, // we create operandGenerators for each type of argument, even primitive ones // like MVCCKeys and values. All state about open objects (iterators, // transactions, writers, etc) during generation should be stored in an // operandGenerator. // // operandGenerators are strictly for generation-time use only; all info about // execution time objects is stored directly in the metaTestRunner. type operandGenerator interface { // get retrieves an instance of this operand. Depending on operand type (eg. // keys), it could also generate and return a new type of an instance. An // operand is represented as a serializable string, that can be converted into // a concrete instance type during execution by calling a get<concrete type>() // or parse() method on the concrete operand generator. get() string // getNew retrieves a new instance of this type of operand. Called when an // opener operation (with isOpener = true) needs an ID to store its output. getNew() string // opener returns the name of an operation generator (defined in // operations.go) that always creates a new instance of this object. Called by // the test runner when an operation requires one instance of this // operand to exist, and count() == 0. opener() string // count returns the number of live objects being managed by this generator. // If 0, the opener() operation can be called when necessary. count() int // closeAll closes all managed operands. Used when the test exits, or when a // restart operation executes. closeAll() } func generateBytes(rng *rand.Rand, min int, max int) []byte { // For better readability, stick to lowercase alphabet characters. iterations := min + rng.Intn(max-min) result := make([]byte, 0, iterations) for i := 0; i < iterations; i++ { result = append(result, byte(rng.Float64()*float64('z'-'a')+'a')) } return result } type keyGenerator struct { liveKeys []storage.MVCCKey rng *rand.Rand tsGenerator *tsGenerator } var _ operandGenerator = &keyGenerator{} func (k *keyGenerator) opener() string { return "" } func (k *keyGenerator) count() int { // Always return a nonzero value so opener() is never called directly. return len(k.liveKeys) + 1 } func (k *keyGenerator) open() storage.MVCCKey { var key storage.MVCCKey key.Key = generateBytes(k.rng, 8, maxValueSize) key.Timestamp = k.tsGenerator.lastTS k.liveKeys = append(k.liveKeys, key) return key } func (k *keyGenerator) toString(key storage.MVCCKey) string { return fmt.Sprintf("%s/%d", key.Key, key.Timestamp.WallTime) } func (k *keyGenerator) get() string { // 15% chance of returning a new key even if some exist. if len(k.liveKeys) == 0 || k.rng.Float64() < 0.30 { return k.toString(k.open()) } return k.toString(k.liveKeys[k.rng.Intn(len(k.liveKeys))]) } func (k *keyGenerator) getNew() string { return k.get() } func (k *keyGenerator) closeAll() { // No-op. } func (k *keyGenerator) parse(input string) storage.MVCCKey { var key storage.MVCCKey key.Key = make([]byte, 0, maxValueSize) _, err := fmt.Sscanf(input, "%q/%d", &key.Key, &key.Timestamp.WallTime) if err != nil { panic(err) } return key } type valueGenerator struct { rng *rand.Rand } var _ operandGenerator = &valueGenerator{} func (v *valueGenerator) opener() string { return "" } func (v *valueGenerator) count() int { return 1 } func (v *valueGenerator) get() string { return v.toString(generateBytes(v.rng, 4, maxValueSize)) } func (v *valueGenerator) getNew() string { return v.get() } func (v *valueGenerator) closeAll() { // No-op. } func (v *valueGenerator) toString(value []byte) string { return fmt.Sprintf("%s", value) }
} type txnID string type txnGenerator struct { rng *rand.Rand testRunner *metaTestRunner tsGenerator *tsGenerator liveTxns []txnID txnIDMap map[txnID]*roachpb.Transaction openBatches map[txnID]map[readWriterID]struct{} // Counts "generated" transactions - i.e. how many txn_open()s have been // inserted so far. Could stay 0 in check mode. txnGenCounter uint64 } var _ operandGenerator = &txnGenerator{} func (t *txnGenerator) opener() string { return "txn_open" } func (t *txnGenerator) count() int { return len(t.txnIDMap) } func (t *txnGenerator) get() string { if len(t.liveTxns) == 0 { panic("no open txns") } return string(t.liveTxns[t.rng.Intn(len(t.liveTxns))]) } // getNew returns a transaction ID, and saves this transaction as a "live" // transaction for generation purposes. Called only during generation, and // must be matched with a generateClose call. func (t *txnGenerator) getNew() string { t.txnGenCounter++ id := txnID(fmt.Sprintf("t%d", t.txnGenCounter)) // Increment the timestamp. t.tsGenerator.generate() // This ensures count() is correct as of generation time. t.txnIDMap[id] = nil t.liveTxns = append(t.liveTxns, id) return string(id) } // generateClose is called when a transaction closing operation is generated. func (t *txnGenerator) generateClose(id txnID) { delete(t.openBatches, id) delete(t.txnIDMap, id) for i := range t.liveTxns { if t.liveTxns[i] == id { t.liveTxns[i] = t.liveTxns[len(t.liveTxns)-1] t.liveTxns = t.liveTxns[:len(t.liveTxns)-1] break } } } func (t *txnGenerator) clearBatch(batch readWriterID) { for _, batches := range t.openBatches { delete(batches, batch) } } func (t *txnGenerator) trackWriteOnBatch(w readWriterID, txn txnID) { if w == "engine" { return } openBatches, ok := t.openBatches[txn] if !ok { t.openBatches[txn] = make(map[readWriterID]struct{}) openBatches = t.openBatches[txn] } openBatches[w] = struct{}{} } func (t *txnGenerator) closeAll() { t.liveTxns = nil t.txnIDMap = make(map[txnID]*roachpb.Transaction) t.openBatches = make(map[txnID]map[readWriterID]struct{}) } type pastTSGenerator struct { rng *rand.Rand tsGenerator *tsGenerator } var _ operandGenerator = &pastTSGenerator{} func (t *pastTSGenerator) opener() string { return "" } func (t *pastTSGenerator) count() int { // Always return a non-zero count so opener() is never called. return int(t.tsGenerator.lastTS.WallTime) + 1 } func (t *pastTSGenerator) closeAll() { // No-op. } func (t *pastTSGenerator) toString(ts hlc.Timestamp) string { return fmt.Sprintf("%d", ts.WallTime) } func (t *pastTSGenerator) parse(input string) hlc.Timestamp { var ts hlc.Timestamp wallTime, err := strconv.ParseInt(input, 10, 0) if err != nil { panic(err) } ts.WallTime = wallTime return ts } func (t *pastTSGenerator) get() string { return t.toString(t.tsGenerator.randomPastTimestamp(t.rng)) } func (t *pastTSGenerator) getNew() string { return t.get() } // Similar to pastTSGenerator, except it always increments the "current" timestamp // and returns the newest one. type nextTSGenerator struct { pastTSGenerator } func (t *nextTSGenerator) get() string { return t.toString(t.tsGenerator.generate()) } func (t *nextTSGenerator) getNew() string { return t.get() } type readWriterID string type readWriterGenerator struct { rng *rand.Rand m *metaTestRunner liveBatches []readWriterID batchIDMap map[readWriterID]storage.Batch batchGenCounter uint64 } var _ operandGenerator = &readWriterGenerator{} func (w *readWriterGenerator) get() string { // 25% chance of returning the engine, even if there are live batches. if len(w.liveBatches) == 0 || w.rng.Float64() < 0.25 { return "engine" } return string(w.liveBatches[w.rng.Intn(len(w.liveBatches))]) } // getNew is called during generation to generate a batch ID. func (w *readWriterGenerator) getNew() string { w.batchGenCounter++ id := readWriterID(fmt.Sprintf("batch%d", w.batchGenCounter)) w.batchIDMap[id] = nil w.liveBatches = append(w.liveBatches, id) return string(id) } func (w *readWriterGenerator) opener() string { return "batch_open" } // generateClose is called during generation when an operation that closes a // readWriter is generated. func (w *readWriterGenerator) generateClose(id readWriterID) { if id == "engine" { return } delete(w.batchIDMap, id) for i, batch := range w.liveBatches { if batch == id { w.liveBatches[i] = w.liveBatches[len(w.liveBatches)-1] w.liveBatches = w.liveBatches[:len(w.liveBatches)-1] break } } w.m.txnGenerator.clearBatch(id) } func (w *readWriterGenerator) count() int { return len(w.batchIDMap) + 1 } func (w *readWriterGenerator) closeAll() { for _, batch := range w.batchIDMap { if batch != nil { batch.Close() } } w.liveBatches = w.liveBatches[:0] w.batchIDMap = make(map[readWriterID]storage.Batch) } type iteratorID string type iteratorInfo struct { id iteratorID iter storage.MVCCIterator lowerBound roachpb.Key isBatchIter bool } type iteratorGenerator struct { rng *rand.Rand readerToIter map[readWriterID][]iteratorID iterInfo map[iteratorID]iteratorInfo liveIters []iteratorID iterGenCounter uint64 } var _ operandGenerator = &iteratorGenerator{} func (i *iteratorGenerator) get() string { if len(i.liveIters) == 0 { panic("no open iterators") } return string(i.liveIters[i.rng.Intn(len(i.liveIters))]) } func (i *iteratorGenerator) getNew() string { i.iterGenCounter++ id := fmt.Sprintf("iter%d", i.iterGenCounter) return id } // generateOpen is called during generation to generate an iterator ID for the // specified readWriter. func (i *iteratorGenerator) generateOpen(rwID readWriterID, id iteratorID) { i.iterInfo[id] = iteratorInfo{ id: id, lowerBound: nil, isBatchIter: rwID != "engine", } i.readerToIter[rwID] = append(i.readerToIter[rwID], id) i.liveIters = append(i.liveIters, id) } // generateClose is called during generation when an operation that closes an // iterator is generated. func (i *iteratorGenerator) generateClose(id iteratorID) { delete(i.iterInfo, id) // Clear iter from readerToIter for reader, iters := range i.readerToIter { for j, id2 := range iters { if id == id2 { // Delete iters[j] iters[j] = iters[len(iters)-1] i.readerToIter[reader] = iters[:len(iters)-1] break } } } // Clear iter from liveIters for j, iter := range i.liveIters { if id == iter { i.liveIters[j] = i.liveIters[len(i.liveIters)-1] i.liveIters = i.liveIters[:len(i.liveIters)-1] break } } } func (i *iteratorGenerator) opener() string { return "iterator_open" } func (i *iteratorGenerator) count() int { return len(i.iterInfo) } func (i *iteratorGenerator) closeAll() { i.liveIters = nil i.iterInfo = make(map[iteratorID]iteratorInfo) i.readerToIter = make(map[readWriterID][]iteratorID) } type floatGenerator struct { rng *rand.Rand } func (f *floatGenerator) get() string { return fmt.Sprintf("%.4f", f.rng.Float32()) } func (f *floatGenerator) getNew() string { return f.get() } func (f *floatGenerator) parse(input string) float32 { var result float32 if _, err := fmt.Sscanf(input, "%f", &result); err != nil { panic(err) } return result } func (f *floatGenerator) opener() string { // Not applicable, because count() is always nonzero. return "" } func (f *floatGenerator) count() int { return 1 } func (f *floatGenerator) closeAll() { // No-op. }
func (v *valueGenerator) parse(input string) []byte { return []byte(input)
random_line_split
operands.go
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package metamorphic import ( "fmt" "math/rand" "strconv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/hlc" ) type operandType int const ( operandTransaction operandType = iota operandReadWriter operandMVCCKey operandPastTS operandNextTS operandValue operandIterator operandFloat ) const ( maxValueSize = 16 ) // operandGenerator represents an object to generate instances of operands // that can be passed to an operation as an argument. For simplicity, // we create operandGenerators for each type of argument, even primitive ones // like MVCCKeys and values. All state about open objects (iterators, // transactions, writers, etc) during generation should be stored in an // operandGenerator. // // operandGenerators are strictly for generation-time use only; all info about // execution time objects is stored directly in the metaTestRunner. type operandGenerator interface { // get retrieves an instance of this operand. Depending on operand type (eg. // keys), it could also generate and return a new type of an instance. An // operand is represented as a serializable string, that can be converted into // a concrete instance type during execution by calling a get<concrete type>() // or parse() method on the concrete operand generator. get() string // getNew retrieves a new instance of this type of operand. Called when an // opener operation (with isOpener = true) needs an ID to store its output. getNew() string // opener returns the name of an operation generator (defined in // operations.go) that always creates a new instance of this object. Called by // the test runner when an operation requires one instance of this // operand to exist, and count() == 0. opener() string // count returns the number of live objects being managed by this generator. // If 0, the opener() operation can be called when necessary. count() int // closeAll closes all managed operands. Used when the test exits, or when a // restart operation executes. closeAll() } func generateBytes(rng *rand.Rand, min int, max int) []byte { // For better readability, stick to lowercase alphabet characters. iterations := min + rng.Intn(max-min) result := make([]byte, 0, iterations) for i := 0; i < iterations; i++ { result = append(result, byte(rng.Float64()*float64('z'-'a')+'a')) } return result } type keyGenerator struct { liveKeys []storage.MVCCKey rng *rand.Rand tsGenerator *tsGenerator } var _ operandGenerator = &keyGenerator{} func (k *keyGenerator) opener() string { return "" } func (k *keyGenerator) count() int { // Always return a nonzero value so opener() is never called directly. return len(k.liveKeys) + 1 } func (k *keyGenerator) open() storage.MVCCKey { var key storage.MVCCKey key.Key = generateBytes(k.rng, 8, maxValueSize) key.Timestamp = k.tsGenerator.lastTS k.liveKeys = append(k.liveKeys, key) return key } func (k *keyGenerator) toString(key storage.MVCCKey) string { return fmt.Sprintf("%s/%d", key.Key, key.Timestamp.WallTime) } func (k *keyGenerator) get() string { // 15% chance of returning a new key even if some exist. if len(k.liveKeys) == 0 || k.rng.Float64() < 0.30 { return k.toString(k.open()) } return k.toString(k.liveKeys[k.rng.Intn(len(k.liveKeys))]) } func (k *keyGenerator) getNew() string { return k.get() } func (k *keyGenerator) closeAll() { // No-op. } func (k *keyGenerator) parse(input string) storage.MVCCKey { var key storage.MVCCKey key.Key = make([]byte, 0, maxValueSize) _, err := fmt.Sscanf(input, "%q/%d", &key.Key, &key.Timestamp.WallTime) if err != nil { panic(err) } return key } type valueGenerator struct { rng *rand.Rand } var _ operandGenerator = &valueGenerator{} func (v *valueGenerator) opener() string { return "" } func (v *valueGenerator) count() int { return 1 } func (v *valueGenerator) get() string { return v.toString(generateBytes(v.rng, 4, maxValueSize)) } func (v *valueGenerator) getNew() string { return v.get() } func (v *valueGenerator) closeAll() { // No-op. } func (v *valueGenerator) toString(value []byte) string { return fmt.Sprintf("%s", value) } func (v *valueGenerator) parse(input string) []byte { return []byte(input) } type txnID string type txnGenerator struct { rng *rand.Rand testRunner *metaTestRunner tsGenerator *tsGenerator liveTxns []txnID txnIDMap map[txnID]*roachpb.Transaction openBatches map[txnID]map[readWriterID]struct{} // Counts "generated" transactions - i.e. how many txn_open()s have been // inserted so far. Could stay 0 in check mode. txnGenCounter uint64 } var _ operandGenerator = &txnGenerator{} func (t *txnGenerator) opener() string { return "txn_open" } func (t *txnGenerator) count() int { return len(t.txnIDMap) } func (t *txnGenerator) get() string { if len(t.liveTxns) == 0 { panic("no open txns") } return string(t.liveTxns[t.rng.Intn(len(t.liveTxns))]) } // getNew returns a transaction ID, and saves this transaction as a "live" // transaction for generation purposes. Called only during generation, and // must be matched with a generateClose call. func (t *txnGenerator) getNew() string { t.txnGenCounter++ id := txnID(fmt.Sprintf("t%d", t.txnGenCounter)) // Increment the timestamp. t.tsGenerator.generate() // This ensures count() is correct as of generation time. t.txnIDMap[id] = nil t.liveTxns = append(t.liveTxns, id) return string(id) } // generateClose is called when a transaction closing operation is generated. func (t *txnGenerator) generateClose(id txnID) { delete(t.openBatches, id) delete(t.txnIDMap, id) for i := range t.liveTxns { if t.liveTxns[i] == id { t.liveTxns[i] = t.liveTxns[len(t.liveTxns)-1] t.liveTxns = t.liveTxns[:len(t.liveTxns)-1] break } } } func (t *txnGenerator) clearBatch(batch readWriterID) { for _, batches := range t.openBatches { delete(batches, batch) } } func (t *txnGenerator) trackWriteOnBatch(w readWriterID, txn txnID) { if w == "engine" { return } openBatches, ok := t.openBatches[txn] if !ok
openBatches[w] = struct{}{} } func (t *txnGenerator) closeAll() { t.liveTxns = nil t.txnIDMap = make(map[txnID]*roachpb.Transaction) t.openBatches = make(map[txnID]map[readWriterID]struct{}) } type pastTSGenerator struct { rng *rand.Rand tsGenerator *tsGenerator } var _ operandGenerator = &pastTSGenerator{} func (t *pastTSGenerator) opener() string { return "" } func (t *pastTSGenerator) count() int { // Always return a non-zero count so opener() is never called. return int(t.tsGenerator.lastTS.WallTime) + 1 } func (t *pastTSGenerator) closeAll() { // No-op. } func (t *pastTSGenerator) toString(ts hlc.Timestamp) string { return fmt.Sprintf("%d", ts.WallTime) } func (t *pastTSGenerator) parse(input string) hlc.Timestamp { var ts hlc.Timestamp wallTime, err := strconv.ParseInt(input, 10, 0) if err != nil { panic(err) } ts.WallTime = wallTime return ts } func (t *pastTSGenerator) get() string { return t.toString(t.tsGenerator.randomPastTimestamp(t.rng)) } func (t *pastTSGenerator) getNew() string { return t.get() } // Similar to pastTSGenerator, except it always increments the "current" timestamp // and returns the newest one. type nextTSGenerator struct { pastTSGenerator } func (t *nextTSGenerator) get() string { return t.toString(t.tsGenerator.generate()) } func (t *nextTSGenerator) getNew() string { return t.get() } type readWriterID string type readWriterGenerator struct { rng *rand.Rand m *metaTestRunner liveBatches []readWriterID batchIDMap map[readWriterID]storage.Batch batchGenCounter uint64 } var _ operandGenerator = &readWriterGenerator{} func (w *readWriterGenerator) get() string { // 25% chance of returning the engine, even if there are live batches. if len(w.liveBatches) == 0 || w.rng.Float64() < 0.25 { return "engine" } return string(w.liveBatches[w.rng.Intn(len(w.liveBatches))]) } // getNew is called during generation to generate a batch ID. func (w *readWriterGenerator) getNew() string { w.batchGenCounter++ id := readWriterID(fmt.Sprintf("batch%d", w.batchGenCounter)) w.batchIDMap[id] = nil w.liveBatches = append(w.liveBatches, id) return string(id) } func (w *readWriterGenerator) opener() string { return "batch_open" } // generateClose is called during generation when an operation that closes a // readWriter is generated. func (w *readWriterGenerator) generateClose(id readWriterID) { if id == "engine" { return } delete(w.batchIDMap, id) for i, batch := range w.liveBatches { if batch == id { w.liveBatches[i] = w.liveBatches[len(w.liveBatches)-1] w.liveBatches = w.liveBatches[:len(w.liveBatches)-1] break } } w.m.txnGenerator.clearBatch(id) } func (w *readWriterGenerator) count() int { return len(w.batchIDMap) + 1 } func (w *readWriterGenerator) closeAll() { for _, batch := range w.batchIDMap { if batch != nil { batch.Close() } } w.liveBatches = w.liveBatches[:0] w.batchIDMap = make(map[readWriterID]storage.Batch) } type iteratorID string type iteratorInfo struct { id iteratorID iter storage.MVCCIterator lowerBound roachpb.Key isBatchIter bool } type iteratorGenerator struct { rng *rand.Rand readerToIter map[readWriterID][]iteratorID iterInfo map[iteratorID]iteratorInfo liveIters []iteratorID iterGenCounter uint64 } var _ operandGenerator = &iteratorGenerator{} func (i *iteratorGenerator) get() string { if len(i.liveIters) == 0 { panic("no open iterators") } return string(i.liveIters[i.rng.Intn(len(i.liveIters))]) } func (i *iteratorGenerator) getNew() string { i.iterGenCounter++ id := fmt.Sprintf("iter%d", i.iterGenCounter) return id } // generateOpen is called during generation to generate an iterator ID for the // specified readWriter. func (i *iteratorGenerator) generateOpen(rwID readWriterID, id iteratorID) { i.iterInfo[id] = iteratorInfo{ id: id, lowerBound: nil, isBatchIter: rwID != "engine", } i.readerToIter[rwID] = append(i.readerToIter[rwID], id) i.liveIters = append(i.liveIters, id) } // generateClose is called during generation when an operation that closes an // iterator is generated. func (i *iteratorGenerator) generateClose(id iteratorID) { delete(i.iterInfo, id) // Clear iter from readerToIter for reader, iters := range i.readerToIter { for j, id2 := range iters { if id == id2 { // Delete iters[j] iters[j] = iters[len(iters)-1] i.readerToIter[reader] = iters[:len(iters)-1] break } } } // Clear iter from liveIters for j, iter := range i.liveIters { if id == iter { i.liveIters[j] = i.liveIters[len(i.liveIters)-1] i.liveIters = i.liveIters[:len(i.liveIters)-1] break } } } func (i *iteratorGenerator) opener() string { return "iterator_open" } func (i *iteratorGenerator) count() int { return len(i.iterInfo) } func (i *iteratorGenerator) closeAll() { i.liveIters = nil i.iterInfo = make(map[iteratorID]iteratorInfo) i.readerToIter = make(map[readWriterID][]iteratorID) } type floatGenerator struct { rng *rand.Rand } func (f *floatGenerator) get() string { return fmt.Sprintf("%.4f", f.rng.Float32()) } func (f *floatGenerator) getNew() string { return f.get() } func (f *floatGenerator) parse(input string) float32 { var result float32 if _, err := fmt.Sscanf(input, "%f", &result); err != nil { panic(err) } return result } func (f *floatGenerator) opener() string { // Not applicable, because count() is always nonzero. return "" } func (f *floatGenerator) count() int { return 1 } func (f *floatGenerator) closeAll() { // No-op. }
{ t.openBatches[txn] = make(map[readWriterID]struct{}) openBatches = t.openBatches[txn] }
conditional_block
operands.go
// Copyright 2020 The Cockroach Authors. // // Use of this software is governed by the Business Source License // included in the file licenses/BSL.txt. // // As of the Change Date specified in that file, in accordance with // the Business Source License, use of this software will be governed // by the Apache License, Version 2.0, included in the file // licenses/APL.txt. package metamorphic import ( "fmt" "math/rand" "strconv" "github.com/cockroachdb/cockroach/pkg/roachpb" "github.com/cockroachdb/cockroach/pkg/storage" "github.com/cockroachdb/cockroach/pkg/util/hlc" ) type operandType int const ( operandTransaction operandType = iota operandReadWriter operandMVCCKey operandPastTS operandNextTS operandValue operandIterator operandFloat ) const ( maxValueSize = 16 ) // operandGenerator represents an object to generate instances of operands // that can be passed to an operation as an argument. For simplicity, // we create operandGenerators for each type of argument, even primitive ones // like MVCCKeys and values. All state about open objects (iterators, // transactions, writers, etc) during generation should be stored in an // operandGenerator. // // operandGenerators are strictly for generation-time use only; all info about // execution time objects is stored directly in the metaTestRunner. type operandGenerator interface { // get retrieves an instance of this operand. Depending on operand type (eg. // keys), it could also generate and return a new type of an instance. An // operand is represented as a serializable string, that can be converted into // a concrete instance type during execution by calling a get<concrete type>() // or parse() method on the concrete operand generator. get() string // getNew retrieves a new instance of this type of operand. Called when an // opener operation (with isOpener = true) needs an ID to store its output. getNew() string // opener returns the name of an operation generator (defined in // operations.go) that always creates a new instance of this object. Called by // the test runner when an operation requires one instance of this // operand to exist, and count() == 0. opener() string // count returns the number of live objects being managed by this generator. // If 0, the opener() operation can be called when necessary. count() int // closeAll closes all managed operands. Used when the test exits, or when a // restart operation executes. closeAll() } func generateBytes(rng *rand.Rand, min int, max int) []byte { // For better readability, stick to lowercase alphabet characters. iterations := min + rng.Intn(max-min) result := make([]byte, 0, iterations) for i := 0; i < iterations; i++ { result = append(result, byte(rng.Float64()*float64('z'-'a')+'a')) } return result } type keyGenerator struct { liveKeys []storage.MVCCKey rng *rand.Rand tsGenerator *tsGenerator } var _ operandGenerator = &keyGenerator{} func (k *keyGenerator) opener() string { return "" } func (k *keyGenerator) count() int { // Always return a nonzero value so opener() is never called directly. return len(k.liveKeys) + 1 } func (k *keyGenerator) open() storage.MVCCKey { var key storage.MVCCKey key.Key = generateBytes(k.rng, 8, maxValueSize) key.Timestamp = k.tsGenerator.lastTS k.liveKeys = append(k.liveKeys, key) return key } func (k *keyGenerator) toString(key storage.MVCCKey) string { return fmt.Sprintf("%s/%d", key.Key, key.Timestamp.WallTime) } func (k *keyGenerator) get() string { // 15% chance of returning a new key even if some exist. if len(k.liveKeys) == 0 || k.rng.Float64() < 0.30 { return k.toString(k.open()) } return k.toString(k.liveKeys[k.rng.Intn(len(k.liveKeys))]) } func (k *keyGenerator) getNew() string { return k.get() } func (k *keyGenerator) closeAll() { // No-op. } func (k *keyGenerator) parse(input string) storage.MVCCKey { var key storage.MVCCKey key.Key = make([]byte, 0, maxValueSize) _, err := fmt.Sscanf(input, "%q/%d", &key.Key, &key.Timestamp.WallTime) if err != nil { panic(err) } return key } type valueGenerator struct { rng *rand.Rand } var _ operandGenerator = &valueGenerator{} func (v *valueGenerator) opener() string { return "" } func (v *valueGenerator) count() int { return 1 } func (v *valueGenerator) get() string { return v.toString(generateBytes(v.rng, 4, maxValueSize)) } func (v *valueGenerator) getNew() string { return v.get() } func (v *valueGenerator) closeAll() { // No-op. } func (v *valueGenerator) toString(value []byte) string { return fmt.Sprintf("%s", value) } func (v *valueGenerator) parse(input string) []byte { return []byte(input) } type txnID string type txnGenerator struct { rng *rand.Rand testRunner *metaTestRunner tsGenerator *tsGenerator liveTxns []txnID txnIDMap map[txnID]*roachpb.Transaction openBatches map[txnID]map[readWriterID]struct{} // Counts "generated" transactions - i.e. how many txn_open()s have been // inserted so far. Could stay 0 in check mode. txnGenCounter uint64 } var _ operandGenerator = &txnGenerator{} func (t *txnGenerator) opener() string { return "txn_open" } func (t *txnGenerator) count() int { return len(t.txnIDMap) } func (t *txnGenerator) get() string { if len(t.liveTxns) == 0 { panic("no open txns") } return string(t.liveTxns[t.rng.Intn(len(t.liveTxns))]) } // getNew returns a transaction ID, and saves this transaction as a "live" // transaction for generation purposes. Called only during generation, and // must be matched with a generateClose call. func (t *txnGenerator) getNew() string { t.txnGenCounter++ id := txnID(fmt.Sprintf("t%d", t.txnGenCounter)) // Increment the timestamp. t.tsGenerator.generate() // This ensures count() is correct as of generation time. t.txnIDMap[id] = nil t.liveTxns = append(t.liveTxns, id) return string(id) } // generateClose is called when a transaction closing operation is generated. func (t *txnGenerator) generateClose(id txnID) { delete(t.openBatches, id) delete(t.txnIDMap, id) for i := range t.liveTxns { if t.liveTxns[i] == id { t.liveTxns[i] = t.liveTxns[len(t.liveTxns)-1] t.liveTxns = t.liveTxns[:len(t.liveTxns)-1] break } } } func (t *txnGenerator) clearBatch(batch readWriterID) { for _, batches := range t.openBatches { delete(batches, batch) } } func (t *txnGenerator) trackWriteOnBatch(w readWriterID, txn txnID) { if w == "engine" { return } openBatches, ok := t.openBatches[txn] if !ok { t.openBatches[txn] = make(map[readWriterID]struct{}) openBatches = t.openBatches[txn] } openBatches[w] = struct{}{} } func (t *txnGenerator) closeAll() { t.liveTxns = nil t.txnIDMap = make(map[txnID]*roachpb.Transaction) t.openBatches = make(map[txnID]map[readWriterID]struct{}) } type pastTSGenerator struct { rng *rand.Rand tsGenerator *tsGenerator } var _ operandGenerator = &pastTSGenerator{} func (t *pastTSGenerator) opener() string { return "" } func (t *pastTSGenerator) count() int { // Always return a non-zero count so opener() is never called. return int(t.tsGenerator.lastTS.WallTime) + 1 } func (t *pastTSGenerator) closeAll() { // No-op. } func (t *pastTSGenerator) toString(ts hlc.Timestamp) string { return fmt.Sprintf("%d", ts.WallTime) } func (t *pastTSGenerator) parse(input string) hlc.Timestamp { var ts hlc.Timestamp wallTime, err := strconv.ParseInt(input, 10, 0) if err != nil { panic(err) } ts.WallTime = wallTime return ts } func (t *pastTSGenerator)
() string { return t.toString(t.tsGenerator.randomPastTimestamp(t.rng)) } func (t *pastTSGenerator) getNew() string { return t.get() } // Similar to pastTSGenerator, except it always increments the "current" timestamp // and returns the newest one. type nextTSGenerator struct { pastTSGenerator } func (t *nextTSGenerator) get() string { return t.toString(t.tsGenerator.generate()) } func (t *nextTSGenerator) getNew() string { return t.get() } type readWriterID string type readWriterGenerator struct { rng *rand.Rand m *metaTestRunner liveBatches []readWriterID batchIDMap map[readWriterID]storage.Batch batchGenCounter uint64 } var _ operandGenerator = &readWriterGenerator{} func (w *readWriterGenerator) get() string { // 25% chance of returning the engine, even if there are live batches. if len(w.liveBatches) == 0 || w.rng.Float64() < 0.25 { return "engine" } return string(w.liveBatches[w.rng.Intn(len(w.liveBatches))]) } // getNew is called during generation to generate a batch ID. func (w *readWriterGenerator) getNew() string { w.batchGenCounter++ id := readWriterID(fmt.Sprintf("batch%d", w.batchGenCounter)) w.batchIDMap[id] = nil w.liveBatches = append(w.liveBatches, id) return string(id) } func (w *readWriterGenerator) opener() string { return "batch_open" } // generateClose is called during generation when an operation that closes a // readWriter is generated. func (w *readWriterGenerator) generateClose(id readWriterID) { if id == "engine" { return } delete(w.batchIDMap, id) for i, batch := range w.liveBatches { if batch == id { w.liveBatches[i] = w.liveBatches[len(w.liveBatches)-1] w.liveBatches = w.liveBatches[:len(w.liveBatches)-1] break } } w.m.txnGenerator.clearBatch(id) } func (w *readWriterGenerator) count() int { return len(w.batchIDMap) + 1 } func (w *readWriterGenerator) closeAll() { for _, batch := range w.batchIDMap { if batch != nil { batch.Close() } } w.liveBatches = w.liveBatches[:0] w.batchIDMap = make(map[readWriterID]storage.Batch) } type iteratorID string type iteratorInfo struct { id iteratorID iter storage.MVCCIterator lowerBound roachpb.Key isBatchIter bool } type iteratorGenerator struct { rng *rand.Rand readerToIter map[readWriterID][]iteratorID iterInfo map[iteratorID]iteratorInfo liveIters []iteratorID iterGenCounter uint64 } var _ operandGenerator = &iteratorGenerator{} func (i *iteratorGenerator) get() string { if len(i.liveIters) == 0 { panic("no open iterators") } return string(i.liveIters[i.rng.Intn(len(i.liveIters))]) } func (i *iteratorGenerator) getNew() string { i.iterGenCounter++ id := fmt.Sprintf("iter%d", i.iterGenCounter) return id } // generateOpen is called during generation to generate an iterator ID for the // specified readWriter. func (i *iteratorGenerator) generateOpen(rwID readWriterID, id iteratorID) { i.iterInfo[id] = iteratorInfo{ id: id, lowerBound: nil, isBatchIter: rwID != "engine", } i.readerToIter[rwID] = append(i.readerToIter[rwID], id) i.liveIters = append(i.liveIters, id) } // generateClose is called during generation when an operation that closes an // iterator is generated. func (i *iteratorGenerator) generateClose(id iteratorID) { delete(i.iterInfo, id) // Clear iter from readerToIter for reader, iters := range i.readerToIter { for j, id2 := range iters { if id == id2 { // Delete iters[j] iters[j] = iters[len(iters)-1] i.readerToIter[reader] = iters[:len(iters)-1] break } } } // Clear iter from liveIters for j, iter := range i.liveIters { if id == iter { i.liveIters[j] = i.liveIters[len(i.liveIters)-1] i.liveIters = i.liveIters[:len(i.liveIters)-1] break } } } func (i *iteratorGenerator) opener() string { return "iterator_open" } func (i *iteratorGenerator) count() int { return len(i.iterInfo) } func (i *iteratorGenerator) closeAll() { i.liveIters = nil i.iterInfo = make(map[iteratorID]iteratorInfo) i.readerToIter = make(map[readWriterID][]iteratorID) } type floatGenerator struct { rng *rand.Rand } func (f *floatGenerator) get() string { return fmt.Sprintf("%.4f", f.rng.Float32()) } func (f *floatGenerator) getNew() string { return f.get() } func (f *floatGenerator) parse(input string) float32 { var result float32 if _, err := fmt.Sscanf(input, "%f", &result); err != nil { panic(err) } return result } func (f *floatGenerator) opener() string { // Not applicable, because count() is always nonzero. return "" } func (f *floatGenerator) count() int { return 1 } func (f *floatGenerator) closeAll() { // No-op. }
get
identifier_name
benchmark.rs
#![feature(duration_as_u128)] //! Executes all mjtests in the /exec/big folder. use compiler_cli::optimization_arg; use compiler_shared::timing::{AsciiDisp, CompilerMeasurements, SingleMeasurement}; use humantime::format_duration; use optimization; use regex::Regex; use runner_integration_tests::{compiler_call, Backend, CompilerCall, CompilerPhase}; use stats::OnlineStats; use std::{ collections::HashMap, ffi::OsStr, fmt, fs::{self, File, OpenOptions}, io::{self, BufReader}, path::PathBuf, process::Command, time::{Duration, SystemTime}, }; use structopt::StructOpt; fn test_folder() -> PathBuf { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../mjtest-rs/tests") } #[derive(Debug, Clone)] struct BigTest { minijava: PathBuf, stdin: Option<PathBuf>, } fn big_tests() -> Vec<BigTest> { let dirpath = test_folder().join("exec/big"); log::info!("test directory is {}", dirpath.display()); let dirlisting = fs::read_dir(dirpath).unwrap(); let mut big_tests = vec![]; for entry in dirlisting { let path = entry.unwrap().path(); if path.extension() == Some(OsStr::new("java")) { let test = BigTest { stdin: { let mut stdin = path.clone(); let stem = path.file_stem().unwrap(); // remove extension stdin.pop(); stdin.push(stem); // set new extension // TODO: support multiple input files stdin.set_extension("0.inputc"); log::debug!("looking for stdin file at {}", stdin.display()); if stdin.is_file() { Some(stdin) } else { None } }, minijava: path, }; log::debug!("Found test: {:?}", test); big_tests.push(test); } } big_tests } fn profile_compiler( test: &BigTest, optimizations: optimization::Level, backend: Backend, ) -> Option<(PathBuf, CompilerMeasurements)> { let outpath = test.minijava.with_extension("benchmark.out"); let mut cmd = compiler_call( CompilerCall::RawCompiler(CompilerPhase::Binary { backend, // TODO: use temp dir, don't trash output: outpath.clone(), assembly: None, optimizations, }), &test.minijava, ); let measurement_path = "measurement.json"; cmd.env("MEASURE_JSON", measurement_path); // TODO: run and benchmark the binary //if let Some(stdin_path) = test.stdin { //cmd.stdin(Stdio::piped()); //let mut stdin_reader = File::open(&stdin_path).expect("failed to open stdin // file"); io::copy(&mut stdin_reader, stdin) //.expect("failed to write to stdin of binary"); //} log::debug!("calling compiler as: {:?}", cmd); match cmd.status() { Ok(status) if status.success() => (), Ok(status) => { log::error!("compiler failed with non-zero exit code: {:?}", status); return None; } Err(msg) => { log::error!("compiler crash {:?}", msg); return None; } } let stats_file = File::open(measurement_path).unwrap(); let stats_reader = BufReader::new(stats_file); let profile = serde_json::from_reader(stats_reader).unwrap(); log::debug!("Stats:\n{}", AsciiDisp(&profile)); Some((outpath, profile)) } #[derive(StructOpt)] #[structopt(name = "benchmark")] /// Small utility to benchmark each step of the compiler pipeline pub struct Opts { /// Number of invokations per test file #[structopt(short = "s", long = "samples", default_value = "2")] samples: usize, /// Only test filenames matching the given regex are benchmarked #[structopt(short = "o", long = "only", default_value = "")] filter: Regex, /// Optimization level that should be applied #[structopt(long = "--optimization", short = "-O", default_value = "aggressive")] opt_level: optimization_arg::Arg, #[structopt(long = "--backend", short = "-b")] backend: Backend, } #[derive(serde_derive::Serialize, serde_derive::Deserialize)] struct ReferenceBenchmark { mean: f64, timestamp: SystemTime, } #[derive(serde_derive::Serialize, serde_derive::Deserialize)] struct ReferenceFormat { // TODO: in contrast to the other code in this file this does // not support multiple benchmarks with identical names measurements: HashMap<String, ReferenceBenchmark>, } impl ReferenceFormat { fn new() -> Self { Self { measurements: HashMap::new(), } } } fn main() { env_logger::init(); let opts = Opts::from_args(); for big_test in &big_tests() { if !opts.filter.is_match(&big_test.minijava.to_string_lossy()) { log::info!("skipping {}", big_test.minijava.display()); continue; } let mut bench = Benchmark::new(big_test.minijava.clone()); let mut out = None; for _ in 0..opts.samples { if let Some((outbinary, timings)) = profile_compiler(big_test, opts.opt_level.clone().into(), opts.backend) { bench.add(&timings); out = Some(outbinary); } } let title = format!( "BENCHMARK {}", big_test.minijava.file_stem().unwrap().to_string_lossy() ); bench.load_reference_from_disk(); println!("{}\n{}\n", title, "=".repeat(title.len())); println!("{}\n", bench); bench.write_to_disk(); if let (Ok(cmd_str), Some(binary_path)) = ( if big_test.stdin.is_some() { std::env::var("COMPILED_PROGRAM_BENCHMARK_WITH_STDIN") } else { std::env::var("COMPILED_PROGRAM_BENCHMARK") }, out, ) { let cmd_str = cmd_str.replace("BINARY_PATH", binary_path.as_path().to_str().unwrap()); let cmd_str = if let Some(stdin_file) = &big_test.stdin { cmd_str.replace( "INPUT_PATH", &stdin_file.as_path().to_str().unwrap().to_owned(), ) } else { cmd_str }; let pieces = shell_words::split(&cmd_str).expect("invalid program benchmark command"); let (prog, args) = pieces.split_at(1); let mut cmd = Command::new(&prog[0]); cmd.args(args); log::debug!("Benchmarking generated binary using: {:?}", cmd); match cmd.status() { Ok(status) if status.success() => {} Ok(status) => { log::error!( "binary benchmark failed with non-zero exit code: {:?}", status ); } Err(msg) => { log::error!("binary benchmark crash {:?}", msg); } } } } } #[derive(Debug, Clone)] pub struct BenchmarkEntry { label: String, indent: usize, stats: OnlineStats, } pub struct Benchmark { file: PathBuf, measurements: Vec<BenchmarkEntry>, reference: Option<ReferenceFormat>, } impl Benchmark { pub fn new(file: PathBuf) -> Self { Self { measurements: Vec::new(), reference: None, file, } } pub fn add(&mut self, measurements: &[SingleMeasurement]) { if self.measurements.is_empty() { for measurement in measurements { self.measurements.push(BenchmarkEntry { label: measurement.label.clone(), indent: measurement.indent, stats: OnlineStats::from_slice(&[measurement.duration.as_millis()]), }); } return; } if !self.is_compatible(measurements) { panic!("measurements incomplete"); } for (i, item) in measurements.iter().enumerate() { self.measurements[i].stats.add(item.duration.as_millis()); } } fn is_compatible(&self, measurements: &[SingleMeasurement]) -> bool { if measurements.len() != self.measurements.len() { return false; } for (this, other) in self.measurements.iter().zip(measurements.iter()) { if this.label != other.label || this.indent != other.indent { return false; } } true } fn filename(&self) -> PathBuf { self.file.with_extension("benchmark.json") } fn write_to_disk(&self) { let mut diskformat = ReferenceFormat::new(); let now = SystemTime::now(); for measurement in self.measurements.iter() { diskformat.measurements.insert( measurement.label.clone(), ReferenceBenchmark { mean: measurement.stats.mean(), timestamp: now, }, ); } match OpenOptions::new() .write(true) .truncate(true) .create(true) .open(self.filename()) { Ok(outfile) =>
Err(msg) => { log::debug!( "could not open file for reference benchmark of {}: {:?}", self.file.display(), msg ); } } } fn load_reference_from_disk(&mut self) { if let Ok(res) = self._load_reference() { self.reference = Some(res); } else { log::debug!( "could not find reference benchmark for {}", self.file.display() ); } } fn _load_reference(&mut self) -> io::Result<ReferenceFormat> { let file = File::open(self.filename())?; let reader = BufReader::new(file); // Read the JSON contents of the file as an instance of `User`. let u = serde_json::from_reader(reader)?; Ok(u) } } impl fmt::Display for Benchmark { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let min_label_width = 50; let now = SystemTime::now(); for timing in &self.measurements { let indent = " ".repeat(timing.indent); let (change, timestamp) = if let Some(previous_invocation) = &self.reference { if let Some(previous_result) = previous_invocation.measurements.get(&timing.label) { let change = (timing.stats.mean() / previous_result.mean * 100.0) - 100.0; let reference_date = now.duration_since(previous_result.timestamp).unwrap(); // remove some precession let reference_date = Duration::from_secs(reference_date.as_secs()); ( format!("{: >+8.3}%", change), format_duration(reference_date).to_string(), ) } else { ("n/a".to_string(), "".to_string()) } } else { ("".to_string(), "".to_string()) }; writeln!( f, "{nesting}{label: <label_width$} \ {ms: >ms_width$.5} +/- {stddev: >stddev_width$.5}ms \ {samples} samples \ {change} {timestamp} ago", label = timing.label, ms = timing.stats.mean(), stddev = timing.stats.stddev(), samples = timing.stats.len(), nesting = indent, label_width = min_label_width - indent.len(), ms_width = 20, stddev_width = 10, change = change, timestamp = timestamp )?; } Ok(()) } }
{ if let Err(msg) = serde_json::ser::to_writer(&outfile, &diskformat) { log::debug!( "could not write file for reference benchmark of {}: {:?}", self.file.display(), msg ); } }
conditional_block
benchmark.rs
#![feature(duration_as_u128)] //! Executes all mjtests in the /exec/big folder. use compiler_cli::optimization_arg; use compiler_shared::timing::{AsciiDisp, CompilerMeasurements, SingleMeasurement}; use humantime::format_duration; use optimization; use regex::Regex; use runner_integration_tests::{compiler_call, Backend, CompilerCall, CompilerPhase}; use stats::OnlineStats; use std::{ collections::HashMap, ffi::OsStr, fmt, fs::{self, File, OpenOptions}, io::{self, BufReader}, path::PathBuf, process::Command, time::{Duration, SystemTime}, }; use structopt::StructOpt; fn test_folder() -> PathBuf { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../mjtest-rs/tests") } #[derive(Debug, Clone)] struct BigTest { minijava: PathBuf, stdin: Option<PathBuf>, } fn big_tests() -> Vec<BigTest> { let dirpath = test_folder().join("exec/big"); log::info!("test directory is {}", dirpath.display()); let dirlisting = fs::read_dir(dirpath).unwrap(); let mut big_tests = vec![]; for entry in dirlisting { let path = entry.unwrap().path(); if path.extension() == Some(OsStr::new("java")) { let test = BigTest { stdin: { let mut stdin = path.clone(); let stem = path.file_stem().unwrap(); // remove extension stdin.pop(); stdin.push(stem); // set new extension // TODO: support multiple input files stdin.set_extension("0.inputc"); log::debug!("looking for stdin file at {}", stdin.display()); if stdin.is_file() { Some(stdin) } else { None } }, minijava: path, }; log::debug!("Found test: {:?}", test); big_tests.push(test); } } big_tests } fn profile_compiler( test: &BigTest, optimizations: optimization::Level, backend: Backend, ) -> Option<(PathBuf, CompilerMeasurements)> { let outpath = test.minijava.with_extension("benchmark.out"); let mut cmd = compiler_call( CompilerCall::RawCompiler(CompilerPhase::Binary { backend, // TODO: use temp dir, don't trash output: outpath.clone(), assembly: None, optimizations, }), &test.minijava, ); let measurement_path = "measurement.json"; cmd.env("MEASURE_JSON", measurement_path); // TODO: run and benchmark the binary //if let Some(stdin_path) = test.stdin { //cmd.stdin(Stdio::piped()); //let mut stdin_reader = File::open(&stdin_path).expect("failed to open stdin // file"); io::copy(&mut stdin_reader, stdin) //.expect("failed to write to stdin of binary"); //} log::debug!("calling compiler as: {:?}", cmd); match cmd.status() { Ok(status) if status.success() => (), Ok(status) => { log::error!("compiler failed with non-zero exit code: {:?}", status); return None; } Err(msg) => { log::error!("compiler crash {:?}", msg); return None; } } let stats_file = File::open(measurement_path).unwrap(); let stats_reader = BufReader::new(stats_file); let profile = serde_json::from_reader(stats_reader).unwrap(); log::debug!("Stats:\n{}", AsciiDisp(&profile)); Some((outpath, profile)) } #[derive(StructOpt)] #[structopt(name = "benchmark")] /// Small utility to benchmark each step of the compiler pipeline pub struct Opts { /// Number of invokations per test file #[structopt(short = "s", long = "samples", default_value = "2")] samples: usize, /// Only test filenames matching the given regex are benchmarked #[structopt(short = "o", long = "only", default_value = "")] filter: Regex, /// Optimization level that should be applied #[structopt(long = "--optimization", short = "-O", default_value = "aggressive")] opt_level: optimization_arg::Arg, #[structopt(long = "--backend", short = "-b")] backend: Backend, } #[derive(serde_derive::Serialize, serde_derive::Deserialize)] struct ReferenceBenchmark { mean: f64, timestamp: SystemTime, } #[derive(serde_derive::Serialize, serde_derive::Deserialize)] struct ReferenceFormat { // TODO: in contrast to the other code in this file this does // not support multiple benchmarks with identical names measurements: HashMap<String, ReferenceBenchmark>, } impl ReferenceFormat { fn new() -> Self
} fn main() { env_logger::init(); let opts = Opts::from_args(); for big_test in &big_tests() { if !opts.filter.is_match(&big_test.minijava.to_string_lossy()) { log::info!("skipping {}", big_test.minijava.display()); continue; } let mut bench = Benchmark::new(big_test.minijava.clone()); let mut out = None; for _ in 0..opts.samples { if let Some((outbinary, timings)) = profile_compiler(big_test, opts.opt_level.clone().into(), opts.backend) { bench.add(&timings); out = Some(outbinary); } } let title = format!( "BENCHMARK {}", big_test.minijava.file_stem().unwrap().to_string_lossy() ); bench.load_reference_from_disk(); println!("{}\n{}\n", title, "=".repeat(title.len())); println!("{}\n", bench); bench.write_to_disk(); if let (Ok(cmd_str), Some(binary_path)) = ( if big_test.stdin.is_some() { std::env::var("COMPILED_PROGRAM_BENCHMARK_WITH_STDIN") } else { std::env::var("COMPILED_PROGRAM_BENCHMARK") }, out, ) { let cmd_str = cmd_str.replace("BINARY_PATH", binary_path.as_path().to_str().unwrap()); let cmd_str = if let Some(stdin_file) = &big_test.stdin { cmd_str.replace( "INPUT_PATH", &stdin_file.as_path().to_str().unwrap().to_owned(), ) } else { cmd_str }; let pieces = shell_words::split(&cmd_str).expect("invalid program benchmark command"); let (prog, args) = pieces.split_at(1); let mut cmd = Command::new(&prog[0]); cmd.args(args); log::debug!("Benchmarking generated binary using: {:?}", cmd); match cmd.status() { Ok(status) if status.success() => {} Ok(status) => { log::error!( "binary benchmark failed with non-zero exit code: {:?}", status ); } Err(msg) => { log::error!("binary benchmark crash {:?}", msg); } } } } } #[derive(Debug, Clone)] pub struct BenchmarkEntry { label: String, indent: usize, stats: OnlineStats, } pub struct Benchmark { file: PathBuf, measurements: Vec<BenchmarkEntry>, reference: Option<ReferenceFormat>, } impl Benchmark { pub fn new(file: PathBuf) -> Self { Self { measurements: Vec::new(), reference: None, file, } } pub fn add(&mut self, measurements: &[SingleMeasurement]) { if self.measurements.is_empty() { for measurement in measurements { self.measurements.push(BenchmarkEntry { label: measurement.label.clone(), indent: measurement.indent, stats: OnlineStats::from_slice(&[measurement.duration.as_millis()]), }); } return; } if !self.is_compatible(measurements) { panic!("measurements incomplete"); } for (i, item) in measurements.iter().enumerate() { self.measurements[i].stats.add(item.duration.as_millis()); } } fn is_compatible(&self, measurements: &[SingleMeasurement]) -> bool { if measurements.len() != self.measurements.len() { return false; } for (this, other) in self.measurements.iter().zip(measurements.iter()) { if this.label != other.label || this.indent != other.indent { return false; } } true } fn filename(&self) -> PathBuf { self.file.with_extension("benchmark.json") } fn write_to_disk(&self) { let mut diskformat = ReferenceFormat::new(); let now = SystemTime::now(); for measurement in self.measurements.iter() { diskformat.measurements.insert( measurement.label.clone(), ReferenceBenchmark { mean: measurement.stats.mean(), timestamp: now, }, ); } match OpenOptions::new() .write(true) .truncate(true) .create(true) .open(self.filename()) { Ok(outfile) => { if let Err(msg) = serde_json::ser::to_writer(&outfile, &diskformat) { log::debug!( "could not write file for reference benchmark of {}: {:?}", self.file.display(), msg ); } } Err(msg) => { log::debug!( "could not open file for reference benchmark of {}: {:?}", self.file.display(), msg ); } } } fn load_reference_from_disk(&mut self) { if let Ok(res) = self._load_reference() { self.reference = Some(res); } else { log::debug!( "could not find reference benchmark for {}", self.file.display() ); } } fn _load_reference(&mut self) -> io::Result<ReferenceFormat> { let file = File::open(self.filename())?; let reader = BufReader::new(file); // Read the JSON contents of the file as an instance of `User`. let u = serde_json::from_reader(reader)?; Ok(u) } } impl fmt::Display for Benchmark { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let min_label_width = 50; let now = SystemTime::now(); for timing in &self.measurements { let indent = " ".repeat(timing.indent); let (change, timestamp) = if let Some(previous_invocation) = &self.reference { if let Some(previous_result) = previous_invocation.measurements.get(&timing.label) { let change = (timing.stats.mean() / previous_result.mean * 100.0) - 100.0; let reference_date = now.duration_since(previous_result.timestamp).unwrap(); // remove some precession let reference_date = Duration::from_secs(reference_date.as_secs()); ( format!("{: >+8.3}%", change), format_duration(reference_date).to_string(), ) } else { ("n/a".to_string(), "".to_string()) } } else { ("".to_string(), "".to_string()) }; writeln!( f, "{nesting}{label: <label_width$} \ {ms: >ms_width$.5} +/- {stddev: >stddev_width$.5}ms \ {samples} samples \ {change} {timestamp} ago", label = timing.label, ms = timing.stats.mean(), stddev = timing.stats.stddev(), samples = timing.stats.len(), nesting = indent, label_width = min_label_width - indent.len(), ms_width = 20, stddev_width = 10, change = change, timestamp = timestamp )?; } Ok(()) } }
{ Self { measurements: HashMap::new(), } }
identifier_body
benchmark.rs
#![feature(duration_as_u128)] //! Executes all mjtests in the /exec/big folder. use compiler_cli::optimization_arg; use compiler_shared::timing::{AsciiDisp, CompilerMeasurements, SingleMeasurement}; use humantime::format_duration; use optimization; use regex::Regex; use runner_integration_tests::{compiler_call, Backend, CompilerCall, CompilerPhase}; use stats::OnlineStats; use std::{ collections::HashMap, ffi::OsStr, fmt, fs::{self, File, OpenOptions}, io::{self, BufReader}, path::PathBuf, process::Command, time::{Duration, SystemTime}, }; use structopt::StructOpt; fn test_folder() -> PathBuf { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../mjtest-rs/tests") } #[derive(Debug, Clone)] struct BigTest { minijava: PathBuf, stdin: Option<PathBuf>, } fn big_tests() -> Vec<BigTest> { let dirpath = test_folder().join("exec/big"); log::info!("test directory is {}", dirpath.display()); let dirlisting = fs::read_dir(dirpath).unwrap(); let mut big_tests = vec![]; for entry in dirlisting { let path = entry.unwrap().path(); if path.extension() == Some(OsStr::new("java")) { let test = BigTest { stdin: { let mut stdin = path.clone(); let stem = path.file_stem().unwrap(); // remove extension stdin.pop(); stdin.push(stem); // set new extension // TODO: support multiple input files stdin.set_extension("0.inputc"); log::debug!("looking for stdin file at {}", stdin.display()); if stdin.is_file() { Some(stdin) } else { None } }, minijava: path, }; log::debug!("Found test: {:?}", test); big_tests.push(test); } } big_tests } fn profile_compiler( test: &BigTest, optimizations: optimization::Level, backend: Backend, ) -> Option<(PathBuf, CompilerMeasurements)> { let outpath = test.minijava.with_extension("benchmark.out"); let mut cmd = compiler_call( CompilerCall::RawCompiler(CompilerPhase::Binary { backend, // TODO: use temp dir, don't trash output: outpath.clone(), assembly: None, optimizations, }), &test.minijava, ); let measurement_path = "measurement.json"; cmd.env("MEASURE_JSON", measurement_path); // TODO: run and benchmark the binary //if let Some(stdin_path) = test.stdin { //cmd.stdin(Stdio::piped()); //let mut stdin_reader = File::open(&stdin_path).expect("failed to open stdin // file"); io::copy(&mut stdin_reader, stdin) //.expect("failed to write to stdin of binary"); //} log::debug!("calling compiler as: {:?}", cmd); match cmd.status() { Ok(status) if status.success() => (), Ok(status) => { log::error!("compiler failed with non-zero exit code: {:?}", status); return None; } Err(msg) => { log::error!("compiler crash {:?}", msg); return None; } } let stats_file = File::open(measurement_path).unwrap(); let stats_reader = BufReader::new(stats_file); let profile = serde_json::from_reader(stats_reader).unwrap(); log::debug!("Stats:\n{}", AsciiDisp(&profile)); Some((outpath, profile)) } #[derive(StructOpt)] #[structopt(name = "benchmark")] /// Small utility to benchmark each step of the compiler pipeline pub struct Opts { /// Number of invokations per test file #[structopt(short = "s", long = "samples", default_value = "2")] samples: usize, /// Only test filenames matching the given regex are benchmarked #[structopt(short = "o", long = "only", default_value = "")] filter: Regex, /// Optimization level that should be applied #[structopt(long = "--optimization", short = "-O", default_value = "aggressive")] opt_level: optimization_arg::Arg, #[structopt(long = "--backend", short = "-b")] backend: Backend, } #[derive(serde_derive::Serialize, serde_derive::Deserialize)] struct ReferenceBenchmark { mean: f64, timestamp: SystemTime, } #[derive(serde_derive::Serialize, serde_derive::Deserialize)] struct ReferenceFormat { // TODO: in contrast to the other code in this file this does // not support multiple benchmarks with identical names measurements: HashMap<String, ReferenceBenchmark>, } impl ReferenceFormat { fn new() -> Self { Self { measurements: HashMap::new(), } } } fn main() { env_logger::init(); let opts = Opts::from_args(); for big_test in &big_tests() { if !opts.filter.is_match(&big_test.minijava.to_string_lossy()) { log::info!("skipping {}", big_test.minijava.display()); continue; } let mut bench = Benchmark::new(big_test.minijava.clone()); let mut out = None; for _ in 0..opts.samples { if let Some((outbinary, timings)) = profile_compiler(big_test, opts.opt_level.clone().into(), opts.backend) { bench.add(&timings); out = Some(outbinary); } } let title = format!( "BENCHMARK {}", big_test.minijava.file_stem().unwrap().to_string_lossy() ); bench.load_reference_from_disk(); println!("{}\n{}\n", title, "=".repeat(title.len())); println!("{}\n", bench); bench.write_to_disk(); if let (Ok(cmd_str), Some(binary_path)) = ( if big_test.stdin.is_some() { std::env::var("COMPILED_PROGRAM_BENCHMARK_WITH_STDIN") } else { std::env::var("COMPILED_PROGRAM_BENCHMARK") }, out, ) { let cmd_str = cmd_str.replace("BINARY_PATH", binary_path.as_path().to_str().unwrap()); let cmd_str = if let Some(stdin_file) = &big_test.stdin { cmd_str.replace( "INPUT_PATH", &stdin_file.as_path().to_str().unwrap().to_owned(), ) } else { cmd_str }; let pieces = shell_words::split(&cmd_str).expect("invalid program benchmark command"); let (prog, args) = pieces.split_at(1); let mut cmd = Command::new(&prog[0]); cmd.args(args); log::debug!("Benchmarking generated binary using: {:?}", cmd); match cmd.status() { Ok(status) if status.success() => {} Ok(status) => { log::error!( "binary benchmark failed with non-zero exit code: {:?}", status ); } Err(msg) => { log::error!("binary benchmark crash {:?}", msg); } } } } } #[derive(Debug, Clone)] pub struct BenchmarkEntry { label: String, indent: usize, stats: OnlineStats, } pub struct Benchmark { file: PathBuf, measurements: Vec<BenchmarkEntry>, reference: Option<ReferenceFormat>, } impl Benchmark { pub fn
(file: PathBuf) -> Self { Self { measurements: Vec::new(), reference: None, file, } } pub fn add(&mut self, measurements: &[SingleMeasurement]) { if self.measurements.is_empty() { for measurement in measurements { self.measurements.push(BenchmarkEntry { label: measurement.label.clone(), indent: measurement.indent, stats: OnlineStats::from_slice(&[measurement.duration.as_millis()]), }); } return; } if !self.is_compatible(measurements) { panic!("measurements incomplete"); } for (i, item) in measurements.iter().enumerate() { self.measurements[i].stats.add(item.duration.as_millis()); } } fn is_compatible(&self, measurements: &[SingleMeasurement]) -> bool { if measurements.len() != self.measurements.len() { return false; } for (this, other) in self.measurements.iter().zip(measurements.iter()) { if this.label != other.label || this.indent != other.indent { return false; } } true } fn filename(&self) -> PathBuf { self.file.with_extension("benchmark.json") } fn write_to_disk(&self) { let mut diskformat = ReferenceFormat::new(); let now = SystemTime::now(); for measurement in self.measurements.iter() { diskformat.measurements.insert( measurement.label.clone(), ReferenceBenchmark { mean: measurement.stats.mean(), timestamp: now, }, ); } match OpenOptions::new() .write(true) .truncate(true) .create(true) .open(self.filename()) { Ok(outfile) => { if let Err(msg) = serde_json::ser::to_writer(&outfile, &diskformat) { log::debug!( "could not write file for reference benchmark of {}: {:?}", self.file.display(), msg ); } } Err(msg) => { log::debug!( "could not open file for reference benchmark of {}: {:?}", self.file.display(), msg ); } } } fn load_reference_from_disk(&mut self) { if let Ok(res) = self._load_reference() { self.reference = Some(res); } else { log::debug!( "could not find reference benchmark for {}", self.file.display() ); } } fn _load_reference(&mut self) -> io::Result<ReferenceFormat> { let file = File::open(self.filename())?; let reader = BufReader::new(file); // Read the JSON contents of the file as an instance of `User`. let u = serde_json::from_reader(reader)?; Ok(u) } } impl fmt::Display for Benchmark { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let min_label_width = 50; let now = SystemTime::now(); for timing in &self.measurements { let indent = " ".repeat(timing.indent); let (change, timestamp) = if let Some(previous_invocation) = &self.reference { if let Some(previous_result) = previous_invocation.measurements.get(&timing.label) { let change = (timing.stats.mean() / previous_result.mean * 100.0) - 100.0; let reference_date = now.duration_since(previous_result.timestamp).unwrap(); // remove some precession let reference_date = Duration::from_secs(reference_date.as_secs()); ( format!("{: >+8.3}%", change), format_duration(reference_date).to_string(), ) } else { ("n/a".to_string(), "".to_string()) } } else { ("".to_string(), "".to_string()) }; writeln!( f, "{nesting}{label: <label_width$} \ {ms: >ms_width$.5} +/- {stddev: >stddev_width$.5}ms \ {samples} samples \ {change} {timestamp} ago", label = timing.label, ms = timing.stats.mean(), stddev = timing.stats.stddev(), samples = timing.stats.len(), nesting = indent, label_width = min_label_width - indent.len(), ms_width = 20, stddev_width = 10, change = change, timestamp = timestamp )?; } Ok(()) } }
new
identifier_name
benchmark.rs
#![feature(duration_as_u128)] //! Executes all mjtests in the /exec/big folder. use compiler_cli::optimization_arg; use compiler_shared::timing::{AsciiDisp, CompilerMeasurements, SingleMeasurement}; use humantime::format_duration; use optimization; use regex::Regex; use runner_integration_tests::{compiler_call, Backend, CompilerCall, CompilerPhase}; use stats::OnlineStats; use std::{ collections::HashMap, ffi::OsStr, fmt, fs::{self, File, OpenOptions}, io::{self, BufReader}, path::PathBuf, process::Command, time::{Duration, SystemTime}, }; use structopt::StructOpt; fn test_folder() -> PathBuf { PathBuf::from(env!("CARGO_MANIFEST_DIR")).join("../mjtest-rs/tests") } #[derive(Debug, Clone)] struct BigTest { minijava: PathBuf, stdin: Option<PathBuf>, } fn big_tests() -> Vec<BigTest> { let dirpath = test_folder().join("exec/big"); log::info!("test directory is {}", dirpath.display()); let dirlisting = fs::read_dir(dirpath).unwrap(); let mut big_tests = vec![]; for entry in dirlisting { let path = entry.unwrap().path(); if path.extension() == Some(OsStr::new("java")) { let test = BigTest { stdin: { let mut stdin = path.clone(); let stem = path.file_stem().unwrap(); // remove extension stdin.pop(); stdin.push(stem); // set new extension // TODO: support multiple input files stdin.set_extension("0.inputc"); log::debug!("looking for stdin file at {}", stdin.display()); if stdin.is_file() { Some(stdin) } else { None } }, minijava: path, }; log::debug!("Found test: {:?}", test); big_tests.push(test); } } big_tests } fn profile_compiler( test: &BigTest, optimizations: optimization::Level, backend: Backend, ) -> Option<(PathBuf, CompilerMeasurements)> { let outpath = test.minijava.with_extension("benchmark.out"); let mut cmd = compiler_call( CompilerCall::RawCompiler(CompilerPhase::Binary { backend, // TODO: use temp dir, don't trash output: outpath.clone(), assembly: None, optimizations, }), &test.minijava, ); let measurement_path = "measurement.json"; cmd.env("MEASURE_JSON", measurement_path); // TODO: run and benchmark the binary //if let Some(stdin_path) = test.stdin { //cmd.stdin(Stdio::piped()); //let mut stdin_reader = File::open(&stdin_path).expect("failed to open stdin // file"); io::copy(&mut stdin_reader, stdin) //.expect("failed to write to stdin of binary"); //} log::debug!("calling compiler as: {:?}", cmd); match cmd.status() { Ok(status) if status.success() => (), Ok(status) => { log::error!("compiler failed with non-zero exit code: {:?}", status); return None; } Err(msg) => { log::error!("compiler crash {:?}", msg); return None; } } let stats_file = File::open(measurement_path).unwrap(); let stats_reader = BufReader::new(stats_file); let profile = serde_json::from_reader(stats_reader).unwrap(); log::debug!("Stats:\n{}", AsciiDisp(&profile)); Some((outpath, profile)) } #[derive(StructOpt)] #[structopt(name = "benchmark")] /// Small utility to benchmark each step of the compiler pipeline pub struct Opts { /// Number of invokations per test file #[structopt(short = "s", long = "samples", default_value = "2")] samples: usize, /// Only test filenames matching the given regex are benchmarked #[structopt(short = "o", long = "only", default_value = "")] filter: Regex, /// Optimization level that should be applied #[structopt(long = "--optimization", short = "-O", default_value = "aggressive")] opt_level: optimization_arg::Arg, #[structopt(long = "--backend", short = "-b")] backend: Backend, } #[derive(serde_derive::Serialize, serde_derive::Deserialize)] struct ReferenceBenchmark { mean: f64, timestamp: SystemTime, } #[derive(serde_derive::Serialize, serde_derive::Deserialize)] struct ReferenceFormat { // TODO: in contrast to the other code in this file this does // not support multiple benchmarks with identical names measurements: HashMap<String, ReferenceBenchmark>, } impl ReferenceFormat { fn new() -> Self { Self { measurements: HashMap::new(),
fn main() { env_logger::init(); let opts = Opts::from_args(); for big_test in &big_tests() { if !opts.filter.is_match(&big_test.minijava.to_string_lossy()) { log::info!("skipping {}", big_test.minijava.display()); continue; } let mut bench = Benchmark::new(big_test.minijava.clone()); let mut out = None; for _ in 0..opts.samples { if let Some((outbinary, timings)) = profile_compiler(big_test, opts.opt_level.clone().into(), opts.backend) { bench.add(&timings); out = Some(outbinary); } } let title = format!( "BENCHMARK {}", big_test.minijava.file_stem().unwrap().to_string_lossy() ); bench.load_reference_from_disk(); println!("{}\n{}\n", title, "=".repeat(title.len())); println!("{}\n", bench); bench.write_to_disk(); if let (Ok(cmd_str), Some(binary_path)) = ( if big_test.stdin.is_some() { std::env::var("COMPILED_PROGRAM_BENCHMARK_WITH_STDIN") } else { std::env::var("COMPILED_PROGRAM_BENCHMARK") }, out, ) { let cmd_str = cmd_str.replace("BINARY_PATH", binary_path.as_path().to_str().unwrap()); let cmd_str = if let Some(stdin_file) = &big_test.stdin { cmd_str.replace( "INPUT_PATH", &stdin_file.as_path().to_str().unwrap().to_owned(), ) } else { cmd_str }; let pieces = shell_words::split(&cmd_str).expect("invalid program benchmark command"); let (prog, args) = pieces.split_at(1); let mut cmd = Command::new(&prog[0]); cmd.args(args); log::debug!("Benchmarking generated binary using: {:?}", cmd); match cmd.status() { Ok(status) if status.success() => {} Ok(status) => { log::error!( "binary benchmark failed with non-zero exit code: {:?}", status ); } Err(msg) => { log::error!("binary benchmark crash {:?}", msg); } } } } } #[derive(Debug, Clone)] pub struct BenchmarkEntry { label: String, indent: usize, stats: OnlineStats, } pub struct Benchmark { file: PathBuf, measurements: Vec<BenchmarkEntry>, reference: Option<ReferenceFormat>, } impl Benchmark { pub fn new(file: PathBuf) -> Self { Self { measurements: Vec::new(), reference: None, file, } } pub fn add(&mut self, measurements: &[SingleMeasurement]) { if self.measurements.is_empty() { for measurement in measurements { self.measurements.push(BenchmarkEntry { label: measurement.label.clone(), indent: measurement.indent, stats: OnlineStats::from_slice(&[measurement.duration.as_millis()]), }); } return; } if !self.is_compatible(measurements) { panic!("measurements incomplete"); } for (i, item) in measurements.iter().enumerate() { self.measurements[i].stats.add(item.duration.as_millis()); } } fn is_compatible(&self, measurements: &[SingleMeasurement]) -> bool { if measurements.len() != self.measurements.len() { return false; } for (this, other) in self.measurements.iter().zip(measurements.iter()) { if this.label != other.label || this.indent != other.indent { return false; } } true } fn filename(&self) -> PathBuf { self.file.with_extension("benchmark.json") } fn write_to_disk(&self) { let mut diskformat = ReferenceFormat::new(); let now = SystemTime::now(); for measurement in self.measurements.iter() { diskformat.measurements.insert( measurement.label.clone(), ReferenceBenchmark { mean: measurement.stats.mean(), timestamp: now, }, ); } match OpenOptions::new() .write(true) .truncate(true) .create(true) .open(self.filename()) { Ok(outfile) => { if let Err(msg) = serde_json::ser::to_writer(&outfile, &diskformat) { log::debug!( "could not write file for reference benchmark of {}: {:?}", self.file.display(), msg ); } } Err(msg) => { log::debug!( "could not open file for reference benchmark of {}: {:?}", self.file.display(), msg ); } } } fn load_reference_from_disk(&mut self) { if let Ok(res) = self._load_reference() { self.reference = Some(res); } else { log::debug!( "could not find reference benchmark for {}", self.file.display() ); } } fn _load_reference(&mut self) -> io::Result<ReferenceFormat> { let file = File::open(self.filename())?; let reader = BufReader::new(file); // Read the JSON contents of the file as an instance of `User`. let u = serde_json::from_reader(reader)?; Ok(u) } } impl fmt::Display for Benchmark { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { let min_label_width = 50; let now = SystemTime::now(); for timing in &self.measurements { let indent = " ".repeat(timing.indent); let (change, timestamp) = if let Some(previous_invocation) = &self.reference { if let Some(previous_result) = previous_invocation.measurements.get(&timing.label) { let change = (timing.stats.mean() / previous_result.mean * 100.0) - 100.0; let reference_date = now.duration_since(previous_result.timestamp).unwrap(); // remove some precession let reference_date = Duration::from_secs(reference_date.as_secs()); ( format!("{: >+8.3}%", change), format_duration(reference_date).to_string(), ) } else { ("n/a".to_string(), "".to_string()) } } else { ("".to_string(), "".to_string()) }; writeln!( f, "{nesting}{label: <label_width$} \ {ms: >ms_width$.5} +/- {stddev: >stddev_width$.5}ms \ {samples} samples \ {change} {timestamp} ago", label = timing.label, ms = timing.stats.mean(), stddev = timing.stats.stddev(), samples = timing.stats.len(), nesting = indent, label_width = min_label_width - indent.len(), ms_width = 20, stddev_width = 10, change = change, timestamp = timestamp )?; } Ok(()) } }
} } }
random_line_split
utils.py
import os import sys import json import time import asyncio import asyncpg import logging import requests from lxml import html from redis import StrictRedis from datetime import datetime from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', filename='worker.log', datefmt='%d-%b-%y %H:%M:%S') API_HOST = os.environ.get('API_HOST') API_PORT = os.environ.get('API_PORT') REDIS_HOST = os.environ.get('REDIS_HOST') REDIS_PORT = os.environ.get('REDIS_PORT') POSTGRES_HOST = os.environ.get('POSTGRES_HOST') POSTGRES_PORT = os.environ.get('POSTGRES_PORT') POSTGRES_USER = os.environ.get('POSTGRES_USER') POSTGRES_PASSWORD = os.environ.get('POSTGRES_PASSWORD') POSTGRES_DB = os.environ.get('POSTGRES_DB') postgres_details = { 'host': POSTGRES_HOST, 'user': POSTGRES_USER, 'password': POSTGRES_PASSWORD, 'database': POSTGRES_DB, 'port': POSTGRES_PORT } async def get_failed_data(timeout, ip): try: cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) conn = await asyncpg.connect(**postgres_details) failed_items = await conn.fetch( '''SELECT * FROM parts_data WHERE part_num_analyzed = 'failed' LIMIT 300;''' ) # Nothing left to scrape if not failed_items: print('Scraping Complete.') logging.info('Scraping is successfully completed...') sys.exit(0) for failed in failed_items: failed_urls = dict(failed) # Make a new url list to scrape failed_url_data = { 'ID': failed_urls.get('url_list_id'), 'part_url': failed_urls.get('url'), 'part_name': failed_urls.get('part_num') } cache.rpush('failed_urls', json.dumps(failed_url_data)) except Exception: raise async def store_data(data_list): sql_insert = '''INSERT INTO parts_data ( url_list_id, url, part_num, part_num_analyzed, details, specs, datasheet_url, issued_time, issued_to, completed_time ) VALUES ($1, $2, $3, $4, $5::jsonb, $6::jsonb, $7, $8, $9, $10);''' sql_update = '''UPDATE parts_data SET part_num_analyzed = $1, details = $2::jsonb, specs = $3::jsonb, datasheet_url = $4, issued_time = $5, issued_to = $6, completed_time = $7 WHERE url = $8;''' try: async with asyncpg.create_pool(**postgres_details) as pool: for data in data_list: async with pool.acquire() as conn: # Functions for making a custom JSONB codec # to autmatically serialize and de-serialize # into JSONB as you pull and push into the db. def _encoder(value): return b'\x01' + json.dumps(value).encode('utf-8') def _decoder(value): return json.loads(value[1:].decode('utf-8')) await conn.set_type_codec( 'jsonb', encoder=_encoder, decoder=_decoder, schema='pg_catalog', format='binary' ) issued_time = datetime.now() completed_time = data.get('completed_time') time_complete = datetime.now() if completed_time else None row = await conn.fetchrow( 'SELECT * FROM parts_data WHERE url = $1', data.get('url') ) if row is not None: record_to_dict = dict(row) part_num_analyzed = record_to_dict['part_num_analyzed'] if part_num_analyzed == 'success': continue if row: await conn.execute( sql_update, data.get('part_num_analyzed'), data.get('details'), data.get('specs'), data.get('datasheet_url'), issued_time, data.get('issued_to'), time_complete, data.get('url') ) else: await conn.execute( sql_insert, data.get('url_list_id'), data.get('url'), data.get('part_num'), data.get('part_num_analyzed'), data.get('details'), data.get('specs'), data.get('datasheet_url'), issued_time, data.get('issued_to'), time_complete, ) _clear_cache() print('Successfully stored data in postgres...') logging.info( 'Successfully stored data in postgres and cleared cache...') except Exception: raise def scrape_data(urls_data, timeout, ip): print('Scraping...') opts = Options() opts.headless = True browser = Firefox(options=opts) try: cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) for url_info in urls_data: url = url_info.get('part_url') url_list_id = url_info.get('ID') part_number = url_info.get('part_name') time_now = time.time() data = { 'url_list_id': url_list_id, 'url': url, 'part_num': part_number, 'part_num_analyzed': 'failed', 'details': None, 'specs': None, 'datasheet_url': None, 'issued_time': time_now, 'issued_to': ip, 'completed_time': None } try: browser.get(url) source = browser.page_source tree = html.fromstring(source.encode()) partpage = tree.xpath('//span[@class="part-number"]') if not partpage: raise Exception('Landed not on the part page') hrefs = tree.xpath('//@href') for item in hrefs: if item.startswith('/pdf'): data['datasheet_url'] = item spec_keys = tree.xpath( '//table[@class="specs"]//td[1]//text()') spec_values = tree.xpath( '//table[@class="specs"]//td[2]//text()') detail_keys = tree.xpath('//div/b/text()') detail_values = tree.xpath( '//*[@id="part-details"]//div/text()') details = _make_details(detail_keys, detail_values) specs = _make_specs(spec_keys, spec_values) data['details'] = details data['specs'] = specs data['part_num_analyzed'] = 'success' data['completed_time'] = time_now # Cache the data in redis cache.rpush('pages', json.dumps(data)) time.sleep(timeout) except Exception as error: logging.error(f'{error}.', exc_info=True) print(f'ERROR: {error}. Check master.log for tracestack.') # Cache the failed data in redis cache.rpush('pages', json.dumps(data)) time.sleep(timeout) # Returns the data from the Redis Cache # and deserializes it back to normal structure. scraped_data = cache.lrange('pages', 0, -1) scraped_data = _deserialize(scraped_data) return scraped_data except Exception: raise finally: if browser: browser.quit() # For assurance that there are no # mem leaks from the browser os.system('killall firefox-esr') def get_ip(): try: get_ip = requests.get(f'http://{API_HOST}:{API_PORT}/api/get_ip') ip = get_ip.json()['ip'] if ip.startswith('127.0.0.1'): ip = requests.get('https://ident.me/').text return ip except Exception: raise def get_url_list(): urls = requests.get(f'http://{API_HOST}:{API_PORT}/api/urls') url_list = urls.json()
return url_list, last_item def _make_details(detail_keys, detail_values): details = {} for detail_key in detail_keys: details[detail_key[:-1]] = '' clean_values = _clean(detail_values) for index, clean_value in enumerate(clean_values): details[detail_keys[index][:-1]] = clean_value return details def _make_specs(spec_keys, spec_values): specs = {} clean_keys = _clean(spec_keys) clean_values = _clean(spec_values) for index, clean_value in enumerate(clean_values): specs[clean_keys[index]] = clean_value return specs def _clean(values): clean_items = [] for value in values: value = value.replace('\n', '') value = value.replace('\t', '') if value and not value.lower().startswith('show'): clean_items.append(value.strip()) return clean_items def _deserialize(data): deserialized_data = [] for item in data: deserialized_data.append(json.loads(item)) return deserialized_data def _clear_cache(): cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) cache.flushall() print('Cache cleared...')
last_item = url_list[len(url_list) - 1]
random_line_split
utils.py
import os import sys import json import time import asyncio import asyncpg import logging import requests from lxml import html from redis import StrictRedis from datetime import datetime from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', filename='worker.log', datefmt='%d-%b-%y %H:%M:%S') API_HOST = os.environ.get('API_HOST') API_PORT = os.environ.get('API_PORT') REDIS_HOST = os.environ.get('REDIS_HOST') REDIS_PORT = os.environ.get('REDIS_PORT') POSTGRES_HOST = os.environ.get('POSTGRES_HOST') POSTGRES_PORT = os.environ.get('POSTGRES_PORT') POSTGRES_USER = os.environ.get('POSTGRES_USER') POSTGRES_PASSWORD = os.environ.get('POSTGRES_PASSWORD') POSTGRES_DB = os.environ.get('POSTGRES_DB') postgres_details = { 'host': POSTGRES_HOST, 'user': POSTGRES_USER, 'password': POSTGRES_PASSWORD, 'database': POSTGRES_DB, 'port': POSTGRES_PORT } async def get_failed_data(timeout, ip): try: cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) conn = await asyncpg.connect(**postgres_details) failed_items = await conn.fetch( '''SELECT * FROM parts_data WHERE part_num_analyzed = 'failed' LIMIT 300;''' ) # Nothing left to scrape if not failed_items: print('Scraping Complete.') logging.info('Scraping is successfully completed...') sys.exit(0) for failed in failed_items: failed_urls = dict(failed) # Make a new url list to scrape failed_url_data = { 'ID': failed_urls.get('url_list_id'), 'part_url': failed_urls.get('url'), 'part_name': failed_urls.get('part_num') } cache.rpush('failed_urls', json.dumps(failed_url_data)) except Exception: raise async def store_data(data_list): sql_insert = '''INSERT INTO parts_data ( url_list_id, url, part_num, part_num_analyzed, details, specs, datasheet_url, issued_time, issued_to, completed_time ) VALUES ($1, $2, $3, $4, $5::jsonb, $6::jsonb, $7, $8, $9, $10);''' sql_update = '''UPDATE parts_data SET part_num_analyzed = $1, details = $2::jsonb, specs = $3::jsonb, datasheet_url = $4, issued_time = $5, issued_to = $6, completed_time = $7 WHERE url = $8;''' try: async with asyncpg.create_pool(**postgres_details) as pool: for data in data_list: async with pool.acquire() as conn: # Functions for making a custom JSONB codec # to autmatically serialize and de-serialize # into JSONB as you pull and push into the db. def _encoder(value): return b'\x01' + json.dumps(value).encode('utf-8') def _decoder(value): return json.loads(value[1:].decode('utf-8')) await conn.set_type_codec( 'jsonb', encoder=_encoder, decoder=_decoder, schema='pg_catalog', format='binary' ) issued_time = datetime.now() completed_time = data.get('completed_time') time_complete = datetime.now() if completed_time else None row = await conn.fetchrow( 'SELECT * FROM parts_data WHERE url = $1', data.get('url') ) if row is not None: record_to_dict = dict(row) part_num_analyzed = record_to_dict['part_num_analyzed'] if part_num_analyzed == 'success': continue if row: await conn.execute( sql_update, data.get('part_num_analyzed'), data.get('details'), data.get('specs'), data.get('datasheet_url'), issued_time, data.get('issued_to'), time_complete, data.get('url') ) else: await conn.execute( sql_insert, data.get('url_list_id'), data.get('url'), data.get('part_num'), data.get('part_num_analyzed'), data.get('details'), data.get('specs'), data.get('datasheet_url'), issued_time, data.get('issued_to'), time_complete, ) _clear_cache() print('Successfully stored data in postgres...') logging.info( 'Successfully stored data in postgres and cleared cache...') except Exception: raise def scrape_data(urls_data, timeout, ip): print('Scraping...') opts = Options() opts.headless = True browser = Firefox(options=opts) try: cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) for url_info in urls_data: url = url_info.get('part_url') url_list_id = url_info.get('ID') part_number = url_info.get('part_name') time_now = time.time() data = { 'url_list_id': url_list_id, 'url': url, 'part_num': part_number, 'part_num_analyzed': 'failed', 'details': None, 'specs': None, 'datasheet_url': None, 'issued_time': time_now, 'issued_to': ip, 'completed_time': None } try: browser.get(url) source = browser.page_source tree = html.fromstring(source.encode()) partpage = tree.xpath('//span[@class="part-number"]') if not partpage: raise Exception('Landed not on the part page') hrefs = tree.xpath('//@href') for item in hrefs: if item.startswith('/pdf'): data['datasheet_url'] = item spec_keys = tree.xpath( '//table[@class="specs"]//td[1]//text()') spec_values = tree.xpath( '//table[@class="specs"]//td[2]//text()') detail_keys = tree.xpath('//div/b/text()') detail_values = tree.xpath( '//*[@id="part-details"]//div/text()') details = _make_details(detail_keys, detail_values) specs = _make_specs(spec_keys, spec_values) data['details'] = details data['specs'] = specs data['part_num_analyzed'] = 'success' data['completed_time'] = time_now # Cache the data in redis cache.rpush('pages', json.dumps(data)) time.sleep(timeout) except Exception as error: logging.error(f'{error}.', exc_info=True) print(f'ERROR: {error}. Check master.log for tracestack.') # Cache the failed data in redis cache.rpush('pages', json.dumps(data)) time.sleep(timeout) # Returns the data from the Redis Cache # and deserializes it back to normal structure. scraped_data = cache.lrange('pages', 0, -1) scraped_data = _deserialize(scraped_data) return scraped_data except Exception: raise finally: if browser: browser.quit() # For assurance that there are no # mem leaks from the browser os.system('killall firefox-esr') def get_ip(): try: get_ip = requests.get(f'http://{API_HOST}:{API_PORT}/api/get_ip') ip = get_ip.json()['ip'] if ip.startswith('127.0.0.1'): ip = requests.get('https://ident.me/').text return ip except Exception: raise def
(): urls = requests.get(f'http://{API_HOST}:{API_PORT}/api/urls') url_list = urls.json() last_item = url_list[len(url_list) - 1] return url_list, last_item def _make_details(detail_keys, detail_values): details = {} for detail_key in detail_keys: details[detail_key[:-1]] = '' clean_values = _clean(detail_values) for index, clean_value in enumerate(clean_values): details[detail_keys[index][:-1]] = clean_value return details def _make_specs(spec_keys, spec_values): specs = {} clean_keys = _clean(spec_keys) clean_values = _clean(spec_values) for index, clean_value in enumerate(clean_values): specs[clean_keys[index]] = clean_value return specs def _clean(values): clean_items = [] for value in values: value = value.replace('\n', '') value = value.replace('\t', '') if value and not value.lower().startswith('show'): clean_items.append(value.strip()) return clean_items def _deserialize(data): deserialized_data = [] for item in data: deserialized_data.append(json.loads(item)) return deserialized_data def _clear_cache(): cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) cache.flushall() print('Cache cleared...')
get_url_list
identifier_name
utils.py
import os import sys import json import time import asyncio import asyncpg import logging import requests from lxml import html from redis import StrictRedis from datetime import datetime from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', filename='worker.log', datefmt='%d-%b-%y %H:%M:%S') API_HOST = os.environ.get('API_HOST') API_PORT = os.environ.get('API_PORT') REDIS_HOST = os.environ.get('REDIS_HOST') REDIS_PORT = os.environ.get('REDIS_PORT') POSTGRES_HOST = os.environ.get('POSTGRES_HOST') POSTGRES_PORT = os.environ.get('POSTGRES_PORT') POSTGRES_USER = os.environ.get('POSTGRES_USER') POSTGRES_PASSWORD = os.environ.get('POSTGRES_PASSWORD') POSTGRES_DB = os.environ.get('POSTGRES_DB') postgres_details = { 'host': POSTGRES_HOST, 'user': POSTGRES_USER, 'password': POSTGRES_PASSWORD, 'database': POSTGRES_DB, 'port': POSTGRES_PORT } async def get_failed_data(timeout, ip): try: cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) conn = await asyncpg.connect(**postgres_details) failed_items = await conn.fetch( '''SELECT * FROM parts_data WHERE part_num_analyzed = 'failed' LIMIT 300;''' ) # Nothing left to scrape if not failed_items: print('Scraping Complete.') logging.info('Scraping is successfully completed...') sys.exit(0) for failed in failed_items: failed_urls = dict(failed) # Make a new url list to scrape failed_url_data = { 'ID': failed_urls.get('url_list_id'), 'part_url': failed_urls.get('url'), 'part_name': failed_urls.get('part_num') } cache.rpush('failed_urls', json.dumps(failed_url_data)) except Exception: raise async def store_data(data_list): sql_insert = '''INSERT INTO parts_data ( url_list_id, url, part_num, part_num_analyzed, details, specs, datasheet_url, issued_time, issued_to, completed_time ) VALUES ($1, $2, $3, $4, $5::jsonb, $6::jsonb, $7, $8, $9, $10);''' sql_update = '''UPDATE parts_data SET part_num_analyzed = $1, details = $2::jsonb, specs = $3::jsonb, datasheet_url = $4, issued_time = $5, issued_to = $6, completed_time = $7 WHERE url = $8;''' try: async with asyncpg.create_pool(**postgres_details) as pool: for data in data_list: async with pool.acquire() as conn: # Functions for making a custom JSONB codec # to autmatically serialize and de-serialize # into JSONB as you pull and push into the db. def _encoder(value): return b'\x01' + json.dumps(value).encode('utf-8') def _decoder(value): return json.loads(value[1:].decode('utf-8')) await conn.set_type_codec( 'jsonb', encoder=_encoder, decoder=_decoder, schema='pg_catalog', format='binary' ) issued_time = datetime.now() completed_time = data.get('completed_time') time_complete = datetime.now() if completed_time else None row = await conn.fetchrow( 'SELECT * FROM parts_data WHERE url = $1', data.get('url') ) if row is not None: record_to_dict = dict(row) part_num_analyzed = record_to_dict['part_num_analyzed'] if part_num_analyzed == 'success': continue if row: await conn.execute( sql_update, data.get('part_num_analyzed'), data.get('details'), data.get('specs'), data.get('datasheet_url'), issued_time, data.get('issued_to'), time_complete, data.get('url') ) else: await conn.execute( sql_insert, data.get('url_list_id'), data.get('url'), data.get('part_num'), data.get('part_num_analyzed'), data.get('details'), data.get('specs'), data.get('datasheet_url'), issued_time, data.get('issued_to'), time_complete, ) _clear_cache() print('Successfully stored data in postgres...') logging.info( 'Successfully stored data in postgres and cleared cache...') except Exception: raise def scrape_data(urls_data, timeout, ip): print('Scraping...') opts = Options() opts.headless = True browser = Firefox(options=opts) try: cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) for url_info in urls_data: url = url_info.get('part_url') url_list_id = url_info.get('ID') part_number = url_info.get('part_name') time_now = time.time() data = { 'url_list_id': url_list_id, 'url': url, 'part_num': part_number, 'part_num_analyzed': 'failed', 'details': None, 'specs': None, 'datasheet_url': None, 'issued_time': time_now, 'issued_to': ip, 'completed_time': None } try: browser.get(url) source = browser.page_source tree = html.fromstring(source.encode()) partpage = tree.xpath('//span[@class="part-number"]') if not partpage: raise Exception('Landed not on the part page') hrefs = tree.xpath('//@href') for item in hrefs: if item.startswith('/pdf'): data['datasheet_url'] = item spec_keys = tree.xpath( '//table[@class="specs"]//td[1]//text()') spec_values = tree.xpath( '//table[@class="specs"]//td[2]//text()') detail_keys = tree.xpath('//div/b/text()') detail_values = tree.xpath( '//*[@id="part-details"]//div/text()') details = _make_details(detail_keys, detail_values) specs = _make_specs(spec_keys, spec_values) data['details'] = details data['specs'] = specs data['part_num_analyzed'] = 'success' data['completed_time'] = time_now # Cache the data in redis cache.rpush('pages', json.dumps(data)) time.sleep(timeout) except Exception as error: logging.error(f'{error}.', exc_info=True) print(f'ERROR: {error}. Check master.log for tracestack.') # Cache the failed data in redis cache.rpush('pages', json.dumps(data)) time.sleep(timeout) # Returns the data from the Redis Cache # and deserializes it back to normal structure. scraped_data = cache.lrange('pages', 0, -1) scraped_data = _deserialize(scraped_data) return scraped_data except Exception: raise finally: if browser: browser.quit() # For assurance that there are no # mem leaks from the browser os.system('killall firefox-esr') def get_ip(): try: get_ip = requests.get(f'http://{API_HOST}:{API_PORT}/api/get_ip') ip = get_ip.json()['ip'] if ip.startswith('127.0.0.1'): ip = requests.get('https://ident.me/').text return ip except Exception: raise def get_url_list():
def _make_details(detail_keys, detail_values): details = {} for detail_key in detail_keys: details[detail_key[:-1]] = '' clean_values = _clean(detail_values) for index, clean_value in enumerate(clean_values): details[detail_keys[index][:-1]] = clean_value return details def _make_specs(spec_keys, spec_values): specs = {} clean_keys = _clean(spec_keys) clean_values = _clean(spec_values) for index, clean_value in enumerate(clean_values): specs[clean_keys[index]] = clean_value return specs def _clean(values): clean_items = [] for value in values: value = value.replace('\n', '') value = value.replace('\t', '') if value and not value.lower().startswith('show'): clean_items.append(value.strip()) return clean_items def _deserialize(data): deserialized_data = [] for item in data: deserialized_data.append(json.loads(item)) return deserialized_data def _clear_cache(): cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) cache.flushall() print('Cache cleared...')
urls = requests.get(f'http://{API_HOST}:{API_PORT}/api/urls') url_list = urls.json() last_item = url_list[len(url_list) - 1] return url_list, last_item
identifier_body
utils.py
import os import sys import json import time import asyncio import asyncpg import logging import requests from lxml import html from redis import StrictRedis from datetime import datetime from selenium.webdriver import Firefox from selenium.webdriver.firefox.options import Options logging.basicConfig(format='%(asctime)s - %(levelname)s - %(message)s', filename='worker.log', datefmt='%d-%b-%y %H:%M:%S') API_HOST = os.environ.get('API_HOST') API_PORT = os.environ.get('API_PORT') REDIS_HOST = os.environ.get('REDIS_HOST') REDIS_PORT = os.environ.get('REDIS_PORT') POSTGRES_HOST = os.environ.get('POSTGRES_HOST') POSTGRES_PORT = os.environ.get('POSTGRES_PORT') POSTGRES_USER = os.environ.get('POSTGRES_USER') POSTGRES_PASSWORD = os.environ.get('POSTGRES_PASSWORD') POSTGRES_DB = os.environ.get('POSTGRES_DB') postgres_details = { 'host': POSTGRES_HOST, 'user': POSTGRES_USER, 'password': POSTGRES_PASSWORD, 'database': POSTGRES_DB, 'port': POSTGRES_PORT } async def get_failed_data(timeout, ip): try: cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) conn = await asyncpg.connect(**postgres_details) failed_items = await conn.fetch( '''SELECT * FROM parts_data WHERE part_num_analyzed = 'failed' LIMIT 300;''' ) # Nothing left to scrape if not failed_items: print('Scraping Complete.') logging.info('Scraping is successfully completed...') sys.exit(0) for failed in failed_items: failed_urls = dict(failed) # Make a new url list to scrape failed_url_data = { 'ID': failed_urls.get('url_list_id'), 'part_url': failed_urls.get('url'), 'part_name': failed_urls.get('part_num') } cache.rpush('failed_urls', json.dumps(failed_url_data)) except Exception: raise async def store_data(data_list): sql_insert = '''INSERT INTO parts_data ( url_list_id, url, part_num, part_num_analyzed, details, specs, datasheet_url, issued_time, issued_to, completed_time ) VALUES ($1, $2, $3, $4, $5::jsonb, $6::jsonb, $7, $8, $9, $10);''' sql_update = '''UPDATE parts_data SET part_num_analyzed = $1, details = $2::jsonb, specs = $3::jsonb, datasheet_url = $4, issued_time = $5, issued_to = $6, completed_time = $7 WHERE url = $8;''' try: async with asyncpg.create_pool(**postgres_details) as pool: for data in data_list: async with pool.acquire() as conn: # Functions for making a custom JSONB codec # to autmatically serialize and de-serialize # into JSONB as you pull and push into the db. def _encoder(value): return b'\x01' + json.dumps(value).encode('utf-8') def _decoder(value): return json.loads(value[1:].decode('utf-8')) await conn.set_type_codec( 'jsonb', encoder=_encoder, decoder=_decoder, schema='pg_catalog', format='binary' ) issued_time = datetime.now() completed_time = data.get('completed_time') time_complete = datetime.now() if completed_time else None row = await conn.fetchrow( 'SELECT * FROM parts_data WHERE url = $1', data.get('url') ) if row is not None: record_to_dict = dict(row) part_num_analyzed = record_to_dict['part_num_analyzed'] if part_num_analyzed == 'success': continue if row: await conn.execute( sql_update, data.get('part_num_analyzed'), data.get('details'), data.get('specs'), data.get('datasheet_url'), issued_time, data.get('issued_to'), time_complete, data.get('url') ) else: await conn.execute( sql_insert, data.get('url_list_id'), data.get('url'), data.get('part_num'), data.get('part_num_analyzed'), data.get('details'), data.get('specs'), data.get('datasheet_url'), issued_time, data.get('issued_to'), time_complete, ) _clear_cache() print('Successfully stored data in postgres...') logging.info( 'Successfully stored data in postgres and cleared cache...') except Exception: raise def scrape_data(urls_data, timeout, ip): print('Scraping...') opts = Options() opts.headless = True browser = Firefox(options=opts) try: cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) for url_info in urls_data: url = url_info.get('part_url') url_list_id = url_info.get('ID') part_number = url_info.get('part_name') time_now = time.time() data = { 'url_list_id': url_list_id, 'url': url, 'part_num': part_number, 'part_num_analyzed': 'failed', 'details': None, 'specs': None, 'datasheet_url': None, 'issued_time': time_now, 'issued_to': ip, 'completed_time': None } try: browser.get(url) source = browser.page_source tree = html.fromstring(source.encode()) partpage = tree.xpath('//span[@class="part-number"]') if not partpage: raise Exception('Landed not on the part page') hrefs = tree.xpath('//@href') for item in hrefs: if item.startswith('/pdf'): data['datasheet_url'] = item spec_keys = tree.xpath( '//table[@class="specs"]//td[1]//text()') spec_values = tree.xpath( '//table[@class="specs"]//td[2]//text()') detail_keys = tree.xpath('//div/b/text()') detail_values = tree.xpath( '//*[@id="part-details"]//div/text()') details = _make_details(detail_keys, detail_values) specs = _make_specs(spec_keys, spec_values) data['details'] = details data['specs'] = specs data['part_num_analyzed'] = 'success' data['completed_time'] = time_now # Cache the data in redis cache.rpush('pages', json.dumps(data)) time.sleep(timeout) except Exception as error: logging.error(f'{error}.', exc_info=True) print(f'ERROR: {error}. Check master.log for tracestack.') # Cache the failed data in redis cache.rpush('pages', json.dumps(data)) time.sleep(timeout) # Returns the data from the Redis Cache # and deserializes it back to normal structure. scraped_data = cache.lrange('pages', 0, -1) scraped_data = _deserialize(scraped_data) return scraped_data except Exception: raise finally: if browser: browser.quit() # For assurance that there are no # mem leaks from the browser os.system('killall firefox-esr') def get_ip(): try: get_ip = requests.get(f'http://{API_HOST}:{API_PORT}/api/get_ip') ip = get_ip.json()['ip'] if ip.startswith('127.0.0.1'): ip = requests.get('https://ident.me/').text return ip except Exception: raise def get_url_list(): urls = requests.get(f'http://{API_HOST}:{API_PORT}/api/urls') url_list = urls.json() last_item = url_list[len(url_list) - 1] return url_list, last_item def _make_details(detail_keys, detail_values): details = {} for detail_key in detail_keys: details[detail_key[:-1]] = '' clean_values = _clean(detail_values) for index, clean_value in enumerate(clean_values): details[detail_keys[index][:-1]] = clean_value return details def _make_specs(spec_keys, spec_values): specs = {} clean_keys = _clean(spec_keys) clean_values = _clean(spec_values) for index, clean_value in enumerate(clean_values): specs[clean_keys[index]] = clean_value return specs def _clean(values): clean_items = [] for value in values:
return clean_items def _deserialize(data): deserialized_data = [] for item in data: deserialized_data.append(json.loads(item)) return deserialized_data def _clear_cache(): cache = StrictRedis(host=REDIS_HOST, port=REDIS_PORT) cache.flushall() print('Cache cleared...')
value = value.replace('\n', '') value = value.replace('\t', '') if value and not value.lower().startswith('show'): clean_items.append(value.strip())
conditional_block
data_logger.py
#!/usr/bin/env python3 # # Adapted from the pynetworktables json_logger example program # # While this is designed to work with the robot.py example in this directory, # because the transport uses NetworkTables you can use it with a robot program # written in any FRC language. # # The expected NT interface is as follows: # # - /robot/autospeed : This program sends this to the robot. In autonomous mode, # the robot should attempt to drive at this speed # # - /robot/telemetry : The robot sends this. It is a number array that contains: # - time, battery, autospeed, # lmotor_volts, rmotor_volts, # l_encoder_count, r_encoder_count, # l_encoder_velocity, r_encoder_velocity # # Change the following constant if your robot wheels are slipping during the # the fast test, or if the robot is not moving ROBOT_FAST_SPEED = 0.5 from networktables import NetworkTables, __version__ as ntversion from networktables.util import ntproperty # Older versions of pynetworktables (and ntcore) had bugs related to flush() if tuple(map(int, ntversion.split(".")[:3])) < (2018, 1, 2): print("Requires pynetworktables >= 2018.1.3, %s is installed" % ntversion) exit(1) import json import queue import time import threading from data_analyzer import analyze_data, AUTOSPEED_COL, ENCODER_P_COL import logging logger = logging.getLogger("logger") # FMSControlData bitfields ENABLED_FIELD = 1 << 0 AUTO_FIELD = 1 << 1 TEST_FIELD = 1 << 2 EMERGENCY_STOP_FIELD = 1 << 3 FMS_ATTACHED_FIELD = 1 << 4 DS_ATTACHED_FIELD = 1 << 5 def translate_control_word(value): value = int(value) if value & ENABLED_FIELD == 0: return "disabled" if value & AUTO_FIELD: return "auto" if value & TEST_FIELD: return "test" else: return "teleop" class DataLogger: # Change this key to whatever NT key you want to log log_key = "/robot/telemetry" matchNumber = ntproperty("/FMSInfo/MatchNumber", 0, writeDefault=False) eventName = ntproperty("/FMSInfo/EventName", "unknown", writeDefault=False) autospeed = ntproperty("/robot/autospeed", 0, writeDefault=True) def __init__(self): self.queue = queue.Queue() self.mode = "disabled" self.data = [] self.lock = threading.Condition() # Tells the listener to not store data self.discard_data = True # Last telemetry data received from the robot self.last_data = (0,) * 20 def connectionListener(self, connected, info): # set our robot to 'disabled' if the connection drops so that we can # guarantee the data gets written to disk if not connected: self.valueChanged("/FMSInfo/FMSControlData", 0, False) self.queue.put("connected" if connected else "disconnected") def valueChanged(self, key, value, isNew): if key == "/FMSInfo/FMSControlData": mode = translate_control_word(value) with self.lock: last = self.mode self.mode = mode data = self.data self.data = [] self.lock.notifyAll() logger.info("Robot mode: %s -> %s", last, mode) # This example only stores on auto -> disabled transition. Change it # to whatever it is that you need for logging if last == "auto": logger.info("%d items received", len(data)) # Don't block the NT thread -- trite the data to the queue so # it can be processed elsewhere self.queue.put(data) elif key == self.log_key: self.last_data = value if not self.discard_data: with self.lock: self.data.append(value) dlen = len(self.data) if dlen and dlen % 100 == 0: logger.info( "Received %d datapoints (last commanded speed: %.2f)", dlen, value[AUTOSPEED_COL], ) def get_nowait(self, timeout=None): try: return self.queue.get(block=False, timeout=timeout) except queue.Empty: return queue.Empty def wait_for_stationary(self): # Wait for the velocity to be 0 for at least one second logger.info("Waiting for robot to stop moving for at least 1 second...") first_stationary_time = time.monotonic() last_encoder = 0 while True: # check the queue in case we switched out of auto mode
def ramp_voltage_in_auto(self, initial_speed, ramp): logger.info( "Activating arm at %.1f%%, adding %.3f per 50ms", initial_speed, ramp ) self.discard_data = False self.autospeed = initial_speed NetworkTables.flush() try: while True: # check the queue in case we switched out of auto mode qdata = self.get_nowait() if qdata != queue.Empty: return qdata time.sleep(0.050) self.autospeed = self.autospeed + ramp NetworkTables.flush() finally: self.discard_data = True self.autospeed = 0 def run(self): # Initialize networktables team = "" while team == "": team = input("Enter team number or 'sim': ") if team == "sim": NetworkTables.initialize(server="localhost") else: NetworkTables.startClientTeam(int(team)) # Use listeners to receive the data NetworkTables.addConnectionListener( self.connectionListener, immediateNotify=True ) NetworkTables.addEntryListener(self.valueChanged) # Wait for a connection notification, then continue on the path print("Waiting for NT connection..") while True: if self.queue.get() == "connected": break print("Connected!") print() autonomous = [ # name, initial speed, ramp ("slow-forward", 0, 0.001), ("slow-backward", 0, -0.001), ("fast-forward", abs(ROBOT_FAST_SPEED), 0), ("fast-backward", -abs(ROBOT_FAST_SPEED), 0), ] stored_data = {} # # Wait for the user to cycle through the 4 autonomus modes # for i, (name, initial_speed, ramp) in enumerate(autonomous): # Initialize the robot commanded speed to 0 self.autospeed = 0 self.discard_data = True print() print("Autonomous %d/%d: %s" % (i + 1, len(autonomous), name)) print() print("Please enable the robot in autonomous mode.") print() print( "WARNING: It will not automatically stop moving, so disable the robot" ) print("before the arm hits something!") print("") # Wait for robot to signal that it entered autonomous mode with self.lock: self.lock.wait_for(lambda: self.mode == "auto") data = self.wait_for_stationary() if data is not None: if data in ("connected", "disconnected"): print( "ERROR: NT disconnected, results won't be reliable. Giving up." ) return else: print("Robot exited autonomous mode before data could be sent?") break # Ramp the voltage at the specified rate data = self.ramp_voltage_in_auto(initial_speed, ramp) if data in ("connected", "disconnected"): print("ERROR: NT disconnected, results won't be reliable. Giving up.") return # output sanity check if len(data) < 3: print( "WARNING: There wasn't a lot of data received during that last run" ) else: distance = data[-1][ENCODER_P_COL] - data[0][ENCODER_P_COL] print() print("The robot arm reported traveling the following distance:") print() print("Left: %.3f degrees" % distance) print() print( "If that doesn't seem quite right... you should change the encoder calibration" ) print("in the robot program or fix your encoders!") stored_data[name] = data # In case the user decides to re-enable autonomous again.. self.autospeed = 0 # # We have data! Do something with it now # # Write it to disk first, in case the processing fails for some reason # -> Using JSON for simplicity, maybe add csv at a later date now = time.strftime("%Y%m%d-%H%M-%S") fname = "%s-data.json" % now print() print("Data collection complete! saving to %s..." % fname) with open(fname, "w") as fp: json.dump(stored_data, fp, indent=4, separators=(",", ": ")) analyze_data(stored_data) if __name__ == "__main__": log_datefmt = "%H:%M:%S" log_format = "%(asctime)s:%(msecs)03d %(levelname)-8s: %(name)-20s: %(message)s" logging.basicConfig(level=logging.INFO, datefmt=log_datefmt, format=log_format) dl = DataLogger() dl.run()
qdata = self.get_nowait() if qdata != queue.Empty: return qdata now = time.monotonic() # check the encoder position values, are they stationary? last_data = self.last_data try: encoder = last_data[ENCODER_P_COL] except IndexError: print(self.last_data) raise if ( abs(last_encoder - encoder) > 0.01 ): first_stationary_time = now elif now - first_stationary_time > 1: logger.info("Robot has waited long enough, beginning test") return last_encoder = encoder
conditional_block
data_logger.py
#!/usr/bin/env python3 # # Adapted from the pynetworktables json_logger example program # # While this is designed to work with the robot.py example in this directory, # because the transport uses NetworkTables you can use it with a robot program # written in any FRC language. # # The expected NT interface is as follows: # # - /robot/autospeed : This program sends this to the robot. In autonomous mode, # the robot should attempt to drive at this speed # # - /robot/telemetry : The robot sends this. It is a number array that contains: # - time, battery, autospeed, # lmotor_volts, rmotor_volts, # l_encoder_count, r_encoder_count, # l_encoder_velocity, r_encoder_velocity # # Change the following constant if your robot wheels are slipping during the # the fast test, or if the robot is not moving ROBOT_FAST_SPEED = 0.5 from networktables import NetworkTables, __version__ as ntversion from networktables.util import ntproperty # Older versions of pynetworktables (and ntcore) had bugs related to flush() if tuple(map(int, ntversion.split(".")[:3])) < (2018, 1, 2): print("Requires pynetworktables >= 2018.1.3, %s is installed" % ntversion) exit(1) import json import queue import time import threading from data_analyzer import analyze_data, AUTOSPEED_COL, ENCODER_P_COL import logging logger = logging.getLogger("logger") # FMSControlData bitfields ENABLED_FIELD = 1 << 0 AUTO_FIELD = 1 << 1 TEST_FIELD = 1 << 2 EMERGENCY_STOP_FIELD = 1 << 3 FMS_ATTACHED_FIELD = 1 << 4 DS_ATTACHED_FIELD = 1 << 5 def translate_control_word(value): value = int(value) if value & ENABLED_FIELD == 0: return "disabled" if value & AUTO_FIELD: return "auto" if value & TEST_FIELD: return "test" else: return "teleop" class DataLogger: # Change this key to whatever NT key you want to log
if __name__ == "__main__": log_datefmt = "%H:%M:%S" log_format = "%(asctime)s:%(msecs)03d %(levelname)-8s: %(name)-20s: %(message)s" logging.basicConfig(level=logging.INFO, datefmt=log_datefmt, format=log_format) dl = DataLogger() dl.run()
log_key = "/robot/telemetry" matchNumber = ntproperty("/FMSInfo/MatchNumber", 0, writeDefault=False) eventName = ntproperty("/FMSInfo/EventName", "unknown", writeDefault=False) autospeed = ntproperty("/robot/autospeed", 0, writeDefault=True) def __init__(self): self.queue = queue.Queue() self.mode = "disabled" self.data = [] self.lock = threading.Condition() # Tells the listener to not store data self.discard_data = True # Last telemetry data received from the robot self.last_data = (0,) * 20 def connectionListener(self, connected, info): # set our robot to 'disabled' if the connection drops so that we can # guarantee the data gets written to disk if not connected: self.valueChanged("/FMSInfo/FMSControlData", 0, False) self.queue.put("connected" if connected else "disconnected") def valueChanged(self, key, value, isNew): if key == "/FMSInfo/FMSControlData": mode = translate_control_word(value) with self.lock: last = self.mode self.mode = mode data = self.data self.data = [] self.lock.notifyAll() logger.info("Robot mode: %s -> %s", last, mode) # This example only stores on auto -> disabled transition. Change it # to whatever it is that you need for logging if last == "auto": logger.info("%d items received", len(data)) # Don't block the NT thread -- trite the data to the queue so # it can be processed elsewhere self.queue.put(data) elif key == self.log_key: self.last_data = value if not self.discard_data: with self.lock: self.data.append(value) dlen = len(self.data) if dlen and dlen % 100 == 0: logger.info( "Received %d datapoints (last commanded speed: %.2f)", dlen, value[AUTOSPEED_COL], ) def get_nowait(self, timeout=None): try: return self.queue.get(block=False, timeout=timeout) except queue.Empty: return queue.Empty def wait_for_stationary(self): # Wait for the velocity to be 0 for at least one second logger.info("Waiting for robot to stop moving for at least 1 second...") first_stationary_time = time.monotonic() last_encoder = 0 while True: # check the queue in case we switched out of auto mode qdata = self.get_nowait() if qdata != queue.Empty: return qdata now = time.monotonic() # check the encoder position values, are they stationary? last_data = self.last_data try: encoder = last_data[ENCODER_P_COL] except IndexError: print(self.last_data) raise if ( abs(last_encoder - encoder) > 0.01 ): first_stationary_time = now elif now - first_stationary_time > 1: logger.info("Robot has waited long enough, beginning test") return last_encoder = encoder def ramp_voltage_in_auto(self, initial_speed, ramp): logger.info( "Activating arm at %.1f%%, adding %.3f per 50ms", initial_speed, ramp ) self.discard_data = False self.autospeed = initial_speed NetworkTables.flush() try: while True: # check the queue in case we switched out of auto mode qdata = self.get_nowait() if qdata != queue.Empty: return qdata time.sleep(0.050) self.autospeed = self.autospeed + ramp NetworkTables.flush() finally: self.discard_data = True self.autospeed = 0 def run(self): # Initialize networktables team = "" while team == "": team = input("Enter team number or 'sim': ") if team == "sim": NetworkTables.initialize(server="localhost") else: NetworkTables.startClientTeam(int(team)) # Use listeners to receive the data NetworkTables.addConnectionListener( self.connectionListener, immediateNotify=True ) NetworkTables.addEntryListener(self.valueChanged) # Wait for a connection notification, then continue on the path print("Waiting for NT connection..") while True: if self.queue.get() == "connected": break print("Connected!") print() autonomous = [ # name, initial speed, ramp ("slow-forward", 0, 0.001), ("slow-backward", 0, -0.001), ("fast-forward", abs(ROBOT_FAST_SPEED), 0), ("fast-backward", -abs(ROBOT_FAST_SPEED), 0), ] stored_data = {} # # Wait for the user to cycle through the 4 autonomus modes # for i, (name, initial_speed, ramp) in enumerate(autonomous): # Initialize the robot commanded speed to 0 self.autospeed = 0 self.discard_data = True print() print("Autonomous %d/%d: %s" % (i + 1, len(autonomous), name)) print() print("Please enable the robot in autonomous mode.") print() print( "WARNING: It will not automatically stop moving, so disable the robot" ) print("before the arm hits something!") print("") # Wait for robot to signal that it entered autonomous mode with self.lock: self.lock.wait_for(lambda: self.mode == "auto") data = self.wait_for_stationary() if data is not None: if data in ("connected", "disconnected"): print( "ERROR: NT disconnected, results won't be reliable. Giving up." ) return else: print("Robot exited autonomous mode before data could be sent?") break # Ramp the voltage at the specified rate data = self.ramp_voltage_in_auto(initial_speed, ramp) if data in ("connected", "disconnected"): print("ERROR: NT disconnected, results won't be reliable. Giving up.") return # output sanity check if len(data) < 3: print( "WARNING: There wasn't a lot of data received during that last run" ) else: distance = data[-1][ENCODER_P_COL] - data[0][ENCODER_P_COL] print() print("The robot arm reported traveling the following distance:") print() print("Left: %.3f degrees" % distance) print() print( "If that doesn't seem quite right... you should change the encoder calibration" ) print("in the robot program or fix your encoders!") stored_data[name] = data # In case the user decides to re-enable autonomous again.. self.autospeed = 0 # # We have data! Do something with it now # # Write it to disk first, in case the processing fails for some reason # -> Using JSON for simplicity, maybe add csv at a later date now = time.strftime("%Y%m%d-%H%M-%S") fname = "%s-data.json" % now print() print("Data collection complete! saving to %s..." % fname) with open(fname, "w") as fp: json.dump(stored_data, fp, indent=4, separators=(",", ": ")) analyze_data(stored_data)
identifier_body
data_logger.py
#!/usr/bin/env python3 # # Adapted from the pynetworktables json_logger example program # # While this is designed to work with the robot.py example in this directory, # because the transport uses NetworkTables you can use it with a robot program # written in any FRC language. # # The expected NT interface is as follows: # # - /robot/autospeed : This program sends this to the robot. In autonomous mode, # the robot should attempt to drive at this speed # # - /robot/telemetry : The robot sends this. It is a number array that contains: # - time, battery, autospeed, # lmotor_volts, rmotor_volts,
# Change the following constant if your robot wheels are slipping during the # the fast test, or if the robot is not moving ROBOT_FAST_SPEED = 0.5 from networktables import NetworkTables, __version__ as ntversion from networktables.util import ntproperty # Older versions of pynetworktables (and ntcore) had bugs related to flush() if tuple(map(int, ntversion.split(".")[:3])) < (2018, 1, 2): print("Requires pynetworktables >= 2018.1.3, %s is installed" % ntversion) exit(1) import json import queue import time import threading from data_analyzer import analyze_data, AUTOSPEED_COL, ENCODER_P_COL import logging logger = logging.getLogger("logger") # FMSControlData bitfields ENABLED_FIELD = 1 << 0 AUTO_FIELD = 1 << 1 TEST_FIELD = 1 << 2 EMERGENCY_STOP_FIELD = 1 << 3 FMS_ATTACHED_FIELD = 1 << 4 DS_ATTACHED_FIELD = 1 << 5 def translate_control_word(value): value = int(value) if value & ENABLED_FIELD == 0: return "disabled" if value & AUTO_FIELD: return "auto" if value & TEST_FIELD: return "test" else: return "teleop" class DataLogger: # Change this key to whatever NT key you want to log log_key = "/robot/telemetry" matchNumber = ntproperty("/FMSInfo/MatchNumber", 0, writeDefault=False) eventName = ntproperty("/FMSInfo/EventName", "unknown", writeDefault=False) autospeed = ntproperty("/robot/autospeed", 0, writeDefault=True) def __init__(self): self.queue = queue.Queue() self.mode = "disabled" self.data = [] self.lock = threading.Condition() # Tells the listener to not store data self.discard_data = True # Last telemetry data received from the robot self.last_data = (0,) * 20 def connectionListener(self, connected, info): # set our robot to 'disabled' if the connection drops so that we can # guarantee the data gets written to disk if not connected: self.valueChanged("/FMSInfo/FMSControlData", 0, False) self.queue.put("connected" if connected else "disconnected") def valueChanged(self, key, value, isNew): if key == "/FMSInfo/FMSControlData": mode = translate_control_word(value) with self.lock: last = self.mode self.mode = mode data = self.data self.data = [] self.lock.notifyAll() logger.info("Robot mode: %s -> %s", last, mode) # This example only stores on auto -> disabled transition. Change it # to whatever it is that you need for logging if last == "auto": logger.info("%d items received", len(data)) # Don't block the NT thread -- trite the data to the queue so # it can be processed elsewhere self.queue.put(data) elif key == self.log_key: self.last_data = value if not self.discard_data: with self.lock: self.data.append(value) dlen = len(self.data) if dlen and dlen % 100 == 0: logger.info( "Received %d datapoints (last commanded speed: %.2f)", dlen, value[AUTOSPEED_COL], ) def get_nowait(self, timeout=None): try: return self.queue.get(block=False, timeout=timeout) except queue.Empty: return queue.Empty def wait_for_stationary(self): # Wait for the velocity to be 0 for at least one second logger.info("Waiting for robot to stop moving for at least 1 second...") first_stationary_time = time.monotonic() last_encoder = 0 while True: # check the queue in case we switched out of auto mode qdata = self.get_nowait() if qdata != queue.Empty: return qdata now = time.monotonic() # check the encoder position values, are they stationary? last_data = self.last_data try: encoder = last_data[ENCODER_P_COL] except IndexError: print(self.last_data) raise if ( abs(last_encoder - encoder) > 0.01 ): first_stationary_time = now elif now - first_stationary_time > 1: logger.info("Robot has waited long enough, beginning test") return last_encoder = encoder def ramp_voltage_in_auto(self, initial_speed, ramp): logger.info( "Activating arm at %.1f%%, adding %.3f per 50ms", initial_speed, ramp ) self.discard_data = False self.autospeed = initial_speed NetworkTables.flush() try: while True: # check the queue in case we switched out of auto mode qdata = self.get_nowait() if qdata != queue.Empty: return qdata time.sleep(0.050) self.autospeed = self.autospeed + ramp NetworkTables.flush() finally: self.discard_data = True self.autospeed = 0 def run(self): # Initialize networktables team = "" while team == "": team = input("Enter team number or 'sim': ") if team == "sim": NetworkTables.initialize(server="localhost") else: NetworkTables.startClientTeam(int(team)) # Use listeners to receive the data NetworkTables.addConnectionListener( self.connectionListener, immediateNotify=True ) NetworkTables.addEntryListener(self.valueChanged) # Wait for a connection notification, then continue on the path print("Waiting for NT connection..") while True: if self.queue.get() == "connected": break print("Connected!") print() autonomous = [ # name, initial speed, ramp ("slow-forward", 0, 0.001), ("slow-backward", 0, -0.001), ("fast-forward", abs(ROBOT_FAST_SPEED), 0), ("fast-backward", -abs(ROBOT_FAST_SPEED), 0), ] stored_data = {} # # Wait for the user to cycle through the 4 autonomus modes # for i, (name, initial_speed, ramp) in enumerate(autonomous): # Initialize the robot commanded speed to 0 self.autospeed = 0 self.discard_data = True print() print("Autonomous %d/%d: %s" % (i + 1, len(autonomous), name)) print() print("Please enable the robot in autonomous mode.") print() print( "WARNING: It will not automatically stop moving, so disable the robot" ) print("before the arm hits something!") print("") # Wait for robot to signal that it entered autonomous mode with self.lock: self.lock.wait_for(lambda: self.mode == "auto") data = self.wait_for_stationary() if data is not None: if data in ("connected", "disconnected"): print( "ERROR: NT disconnected, results won't be reliable. Giving up." ) return else: print("Robot exited autonomous mode before data could be sent?") break # Ramp the voltage at the specified rate data = self.ramp_voltage_in_auto(initial_speed, ramp) if data in ("connected", "disconnected"): print("ERROR: NT disconnected, results won't be reliable. Giving up.") return # output sanity check if len(data) < 3: print( "WARNING: There wasn't a lot of data received during that last run" ) else: distance = data[-1][ENCODER_P_COL] - data[0][ENCODER_P_COL] print() print("The robot arm reported traveling the following distance:") print() print("Left: %.3f degrees" % distance) print() print( "If that doesn't seem quite right... you should change the encoder calibration" ) print("in the robot program or fix your encoders!") stored_data[name] = data # In case the user decides to re-enable autonomous again.. self.autospeed = 0 # # We have data! Do something with it now # # Write it to disk first, in case the processing fails for some reason # -> Using JSON for simplicity, maybe add csv at a later date now = time.strftime("%Y%m%d-%H%M-%S") fname = "%s-data.json" % now print() print("Data collection complete! saving to %s..." % fname) with open(fname, "w") as fp: json.dump(stored_data, fp, indent=4, separators=(",", ": ")) analyze_data(stored_data) if __name__ == "__main__": log_datefmt = "%H:%M:%S" log_format = "%(asctime)s:%(msecs)03d %(levelname)-8s: %(name)-20s: %(message)s" logging.basicConfig(level=logging.INFO, datefmt=log_datefmt, format=log_format) dl = DataLogger() dl.run()
# l_encoder_count, r_encoder_count, # l_encoder_velocity, r_encoder_velocity #
random_line_split
data_logger.py
#!/usr/bin/env python3 # # Adapted from the pynetworktables json_logger example program # # While this is designed to work with the robot.py example in this directory, # because the transport uses NetworkTables you can use it with a robot program # written in any FRC language. # # The expected NT interface is as follows: # # - /robot/autospeed : This program sends this to the robot. In autonomous mode, # the robot should attempt to drive at this speed # # - /robot/telemetry : The robot sends this. It is a number array that contains: # - time, battery, autospeed, # lmotor_volts, rmotor_volts, # l_encoder_count, r_encoder_count, # l_encoder_velocity, r_encoder_velocity # # Change the following constant if your robot wheels are slipping during the # the fast test, or if the robot is not moving ROBOT_FAST_SPEED = 0.5 from networktables import NetworkTables, __version__ as ntversion from networktables.util import ntproperty # Older versions of pynetworktables (and ntcore) had bugs related to flush() if tuple(map(int, ntversion.split(".")[:3])) < (2018, 1, 2): print("Requires pynetworktables >= 2018.1.3, %s is installed" % ntversion) exit(1) import json import queue import time import threading from data_analyzer import analyze_data, AUTOSPEED_COL, ENCODER_P_COL import logging logger = logging.getLogger("logger") # FMSControlData bitfields ENABLED_FIELD = 1 << 0 AUTO_FIELD = 1 << 1 TEST_FIELD = 1 << 2 EMERGENCY_STOP_FIELD = 1 << 3 FMS_ATTACHED_FIELD = 1 << 4 DS_ATTACHED_FIELD = 1 << 5 def translate_control_word(value): value = int(value) if value & ENABLED_FIELD == 0: return "disabled" if value & AUTO_FIELD: return "auto" if value & TEST_FIELD: return "test" else: return "teleop" class DataLogger: # Change this key to whatever NT key you want to log log_key = "/robot/telemetry" matchNumber = ntproperty("/FMSInfo/MatchNumber", 0, writeDefault=False) eventName = ntproperty("/FMSInfo/EventName", "unknown", writeDefault=False) autospeed = ntproperty("/robot/autospeed", 0, writeDefault=True) def __init__(self): self.queue = queue.Queue() self.mode = "disabled" self.data = [] self.lock = threading.Condition() # Tells the listener to not store data self.discard_data = True # Last telemetry data received from the robot self.last_data = (0,) * 20 def connectionListener(self, connected, info): # set our robot to 'disabled' if the connection drops so that we can # guarantee the data gets written to disk if not connected: self.valueChanged("/FMSInfo/FMSControlData", 0, False) self.queue.put("connected" if connected else "disconnected") def valueChanged(self, key, value, isNew): if key == "/FMSInfo/FMSControlData": mode = translate_control_word(value) with self.lock: last = self.mode self.mode = mode data = self.data self.data = [] self.lock.notifyAll() logger.info("Robot mode: %s -> %s", last, mode) # This example only stores on auto -> disabled transition. Change it # to whatever it is that you need for logging if last == "auto": logger.info("%d items received", len(data)) # Don't block the NT thread -- trite the data to the queue so # it can be processed elsewhere self.queue.put(data) elif key == self.log_key: self.last_data = value if not self.discard_data: with self.lock: self.data.append(value) dlen = len(self.data) if dlen and dlen % 100 == 0: logger.info( "Received %d datapoints (last commanded speed: %.2f)", dlen, value[AUTOSPEED_COL], ) def get_nowait(self, timeout=None): try: return self.queue.get(block=False, timeout=timeout) except queue.Empty: return queue.Empty def wait_for_stationary(self): # Wait for the velocity to be 0 for at least one second logger.info("Waiting for robot to stop moving for at least 1 second...") first_stationary_time = time.monotonic() last_encoder = 0 while True: # check the queue in case we switched out of auto mode qdata = self.get_nowait() if qdata != queue.Empty: return qdata now = time.monotonic() # check the encoder position values, are they stationary? last_data = self.last_data try: encoder = last_data[ENCODER_P_COL] except IndexError: print(self.last_data) raise if ( abs(last_encoder - encoder) > 0.01 ): first_stationary_time = now elif now - first_stationary_time > 1: logger.info("Robot has waited long enough, beginning test") return last_encoder = encoder def
(self, initial_speed, ramp): logger.info( "Activating arm at %.1f%%, adding %.3f per 50ms", initial_speed, ramp ) self.discard_data = False self.autospeed = initial_speed NetworkTables.flush() try: while True: # check the queue in case we switched out of auto mode qdata = self.get_nowait() if qdata != queue.Empty: return qdata time.sleep(0.050) self.autospeed = self.autospeed + ramp NetworkTables.flush() finally: self.discard_data = True self.autospeed = 0 def run(self): # Initialize networktables team = "" while team == "": team = input("Enter team number or 'sim': ") if team == "sim": NetworkTables.initialize(server="localhost") else: NetworkTables.startClientTeam(int(team)) # Use listeners to receive the data NetworkTables.addConnectionListener( self.connectionListener, immediateNotify=True ) NetworkTables.addEntryListener(self.valueChanged) # Wait for a connection notification, then continue on the path print("Waiting for NT connection..") while True: if self.queue.get() == "connected": break print("Connected!") print() autonomous = [ # name, initial speed, ramp ("slow-forward", 0, 0.001), ("slow-backward", 0, -0.001), ("fast-forward", abs(ROBOT_FAST_SPEED), 0), ("fast-backward", -abs(ROBOT_FAST_SPEED), 0), ] stored_data = {} # # Wait for the user to cycle through the 4 autonomus modes # for i, (name, initial_speed, ramp) in enumerate(autonomous): # Initialize the robot commanded speed to 0 self.autospeed = 0 self.discard_data = True print() print("Autonomous %d/%d: %s" % (i + 1, len(autonomous), name)) print() print("Please enable the robot in autonomous mode.") print() print( "WARNING: It will not automatically stop moving, so disable the robot" ) print("before the arm hits something!") print("") # Wait for robot to signal that it entered autonomous mode with self.lock: self.lock.wait_for(lambda: self.mode == "auto") data = self.wait_for_stationary() if data is not None: if data in ("connected", "disconnected"): print( "ERROR: NT disconnected, results won't be reliable. Giving up." ) return else: print("Robot exited autonomous mode before data could be sent?") break # Ramp the voltage at the specified rate data = self.ramp_voltage_in_auto(initial_speed, ramp) if data in ("connected", "disconnected"): print("ERROR: NT disconnected, results won't be reliable. Giving up.") return # output sanity check if len(data) < 3: print( "WARNING: There wasn't a lot of data received during that last run" ) else: distance = data[-1][ENCODER_P_COL] - data[0][ENCODER_P_COL] print() print("The robot arm reported traveling the following distance:") print() print("Left: %.3f degrees" % distance) print() print( "If that doesn't seem quite right... you should change the encoder calibration" ) print("in the robot program or fix your encoders!") stored_data[name] = data # In case the user decides to re-enable autonomous again.. self.autospeed = 0 # # We have data! Do something with it now # # Write it to disk first, in case the processing fails for some reason # -> Using JSON for simplicity, maybe add csv at a later date now = time.strftime("%Y%m%d-%H%M-%S") fname = "%s-data.json" % now print() print("Data collection complete! saving to %s..." % fname) with open(fname, "w") as fp: json.dump(stored_data, fp, indent=4, separators=(",", ": ")) analyze_data(stored_data) if __name__ == "__main__": log_datefmt = "%H:%M:%S" log_format = "%(asctime)s:%(msecs)03d %(levelname)-8s: %(name)-20s: %(message)s" logging.basicConfig(level=logging.INFO, datefmt=log_datefmt, format=log_format) dl = DataLogger() dl.run()
ramp_voltage_in_auto
identifier_name
SpatialGP.py
import os import time import numpy as np import collections import scipy import scipy.sparse import pyublas import hashlib from sigvisa.gpr import munge, kernels, evaluate, learn, distributions, plot from sigvisa.gpr.gp import GaussianProcess from sigvisa.gpr.util import marshal_fn, unmarshal_fn from sigvisa.models.spatial_regression.baseline_models import ParamModel from sigvisa.source.event import Event from sigvisa.utils.cover_tree import VectorTree, MatrixTree start_params_dad_log = {"coda_decay": [.022, .0187, 1.00, .14, .1], "amp_transfer": [1.1, 3.4, 9.5, 0.1, .31], "peak_offset": [2.7, 3.4, 2, .7, 0.1] } start_params_lld = {"coda_decay": [.022, .0187, 50.00, 1.0], "amp_transfer": [1.1, 3.4, 100.00, 1.0], "peak_offset": [2.7, 3.4, 50.00, 1.0] } start_params_composite = {"coda_decay": [.022, .01, 1.0, .01, 100.0, .01, 3.0, .01, 100.0], "amp_transfer": [1.1, 3.0, 5.0, 3.0, 100.0, 3.0, 3.0, 3.0, 100.0], "peak_offset": [2.7, 3.0, 5.0, 3.0, 100.0, 3.0, 3.0, 3.0, 100.0], } start_params = {"dad_log": start_params_dad_log, "lld": start_params_lld, "composite": start_params_composite } X_LON, X_LAT, X_DEPTH, X_DIST, X_AZI = range(5) def dist_azi_depth_distfn_log(lldda1, lldda2, params): import sigvisa.utils.geog as geog import numpy as np azi_scale = params[0] depth_scale = params[1] dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1) avg_dist = (lldda1[3] + lldda2[3]) / 2 azi = geog.degdiff(lldda1[4], lldda2[4]) * np.log(avg_dist) depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1) r = np.sqrt(dist ** 2 + (azi_scale * azi) ** 2 + (depth_scale * depth) ** 2) return r def dist_azi_depth_distfn_deriv_log(i, lldda1, lldda2, params): import numpy as np import sigvisa.utils.geog as geog azi_scale = params[0] depth_scale = params[1] dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1) avg_dist = (lldda1[3] + lldda2[3]) / 2 azi = geog.degdiff(lldda1[4], lldda2[4]) * np.log(avg_dist + 1) depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1) r = np.sqrt(dist ** 2 + (azi_scale * azi) ** 2 + (depth_scale * depth) ** 2) if i == 0: # deriv wrt azi_scale deriv = azi_scale * azi ** 2 / r if r != 0 else 0 elif i == 1: # deriv wrt depth_scale deriv = depth_scale * depth ** 2 / r if r != 0 else 0 else: raise Exception("unknown parameter number %d" % i) return deriv def lon_lat_depth_distfn(lldda1, lldda2, params=None): import sigvisa.utils.geog as geog import numpy as np ll = geog.dist_km(tuple(lldda1[0:2]), tuple(lldda2[0:2])) depth = ( lldda1[2] - lldda2[2] ) * params[0] r = np.sqrt(ll ** 2 + depth ** 2) return r def lon_lat_depth_distfn_deriv(i, lldda1, lldda2, params=None): import sigvisa.utils.geog as geog import numpy as np assert (i == 0) ll = geog.dist_km(tuple(lldda1[0:2]), tuple(lldda2[0:2])) depth = ( lldda1[2] - lldda2[2] ) * params[0] r = np.sqrt(ll ** 2 + depth ** 2) return ( params[0] * ( lldda1[2] - lldda2[2] )**2 ) / r if r != 0 else 0.0 def logdist_diff_distfn(lldda1, lldda2, params=None): import numpy as np dist = np.log(lldda1[3] + 1) - np.log(lldda2[3] + 1) return dist def azi_diff_distfn(lldda1, lldda2, params=None): import sigvisa.utils.geog as geog import numpy as np azi = np.abs ( geog.degdiff(lldda1[4], lldda2[4]) ) return azi def logdepth_diff_distfn(lldda1, lldda2, params=None): import numpy as np depth = np.log(lldda1[2] + 1) - np.log(lldda2[2] + 1) return depth X_LON, X_LAT, X_DEPTH, X_DIST, X_AZI = range(5) def spatial_kernel_from_str(kernel_str, target=None, params=None): params = params if params is not None else start_params[kernel_str][target] priors = [None,] * len(params) # TODO: use real priors if kernel_str == "dad_log": k = kernels.setup_kernel(name='distfn', params = params, extra=[dist_azi_depth_distfn_log, dist_azi_depth_distfn_deriv_log], ) elif kernel_str == "lld": noise_kernel = kernels.DiagonalKernel(params=params[0:1], priors = priors[0:1]) local_kernel = kernels.DistFNKernel(params=params[1:4], priors=priors[1:4], distfn = lon_lat_depth_distfn, deriv=lon_lat_depth_distfn_deriv) k = noise_kernel + local_kernel elif kernel_str == "composite": # assume we are passed the following params/priors: # 0 : sigma2_n -- noise variance # 1 : sigma2_f_dist -- function variance wrt dist_diff # 2 : w_dist -- length scale for dist_diff # 3 : sigma2_f_azi -- function variance wrt azi_diff # 4 : w_azi -- length scale for azi_diff # 5 : sigma2_f_depth -- function variance wrt depth_diff # 6 : w_depth -- length scale for depth_diff # 7 : sigma2_f_local -- function variance wrt local_dist # 8 : w_local -- length scale for local_dist noise_kernel = kernels.DiagonalKernel(params=params[0:1], priors = priors[0:1]) distdiff_kernel = kernels.DistFNKernel(params=params[1:3], priors=priors[1:3], distfn = logdist_diff_distfn, deriv=None) azidiff_kernel = kernels.DistFNKernel(params=params[3:5], priors=priors[3:5], distfn = azi_diff_distfn, deriv=None) depthdiff_kernel = kernels.DistFNKernel(params=params[5:7], priors=priors[5:7], distfn = logdepth_diff_distfn, deriv=None) local_kernel = kernels.DistFNKernel(params=params[7:10], priors=priors[7:10], distfn = lon_lat_depth_distfn, deriv=lon_lat_depth_distfn_deriv) k = noise_kernel + distdiff_kernel + azidiff_kernel + depthdiff_kernel + local_kernel return k """ def spatial_kernel_from_str(target=None, params=None): params = params if params is not None else start_params_lld[target] return params """ class SpatialGP(GaussianProcess, ParamModel): def init_hyperparams(self, hyperparams): (noise_var, signal_var, ll_scale, d_scale) = hyperparams self.noise_var = noise_var self.dfn_params = np.array((ll_scale, d_scale), dtype=np.float) self.wfn_params = np.array((signal_var,), copy=True, dtype=np.float) def build_kernel_matrix(self, X, hyperparams): self.init_hyperparams(hyperparams) vt = VectorTree(X[0:1,:], 1, "lld", self.dfn_params) K = vt.kernel_matrix(X, X, "se", self.wfn_params, False) + self.noise_var * np.eye(len(X), dtype=np.float64) K += np.eye(K.shape[0], dtype=np.float64) * 1e-8 # try to avoid losing # positive-definiteness # to numeric issues return K def invert_kernel_matrix(self, K): L = None alpha = None try: L = scipy.linalg.cholesky(K, lower=True) alpha = scipy.linalg.cho_solve((L, True), self.y) Kinv = scipy.linalg.inv(K) except np.linalg.linalg.LinAlgError: #u,v = np.linalg.eig(K) #print K, u #import pdb; pdb.set_trace() raise except ValueError: raise return alpha, L, Kinv def build_parametric_model(self, alpha, Kinv_sp, H, b, B): # notation follows section 2.7 of Rasmussen and Williams Binv = scipy.linalg.inv(B) tmp = np.dot(H, alpha) + np.dot(Binv, b) # H * K^-1 * y + B^-1 * b HKinv = H * Kinv_sp M_inv = Binv + np.dot(HKinv, H.T) # here M = (inv(B) + # H*K^-1*H.T)^-1 is the # posterior covariance # matrix on the params. c = scipy.linalg.cholesky(M_inv, lower=True) # c = sqrt(inv(B) + H*K^-1*H.T) beta_bar = scipy.linalg.cho_solve((c, True), tmp) invc = scipy.linalg.inv(c) return c, beta_bar, invc, HKinv def sparsify(self, M): return scipy.sparse.csr_matrix(M * (np.abs(M) > self.sparse_threshold)) def sort_events(self, X, y): combined = np.hstack([X, np.reshape(y, (-1, 1))]) combined_sorted = np.array(sorted(combined, key = lambda x: x[0]), dtype=float) X_sorted = np.array(combined_sorted[:, :-1], copy=True, dtype=float) y_sorted = combined_sorted[:, -1].flatten() return X_sorted, y_sorted def __init__(self, X=None, y=None, fname=None, basisfns=None, hyperparams=None, param_mean=None, param_cov=None, compute_ll=False, compute_grad=False, sparse_threshold=1e-20, sta = None, sort_events=True): try: ParamModel.__init__(self, sta=sta) except KeyError: pass if fname is not None: self.load_trained_model(fname) else: if sort_events: X, y = self.sort_events(X, y) # arrange events by # lon/lat, as a # heuristic to expose # block structure in the # kernel matrix self.hyperparams = np.array(hyperparams) self.sparse_threshold = sparse_threshold self.X = X self.n = X.shape[0] self.basisfns = basisfns mu, self.y, H = self.setup_mean("parametric", X, y) # train model #t0 = time.time() K = self.build_kernel_matrix(self.X, hyperparams) #t1 = time.time() self.alpha, L, Kinv = self.invert_kernel_matrix(K) Kinv_tri = 2 * np.tril(Kinv, k=0) - np.diag(np.diag(Kinv)) #t2 = time.time() self.Kinv_sp = self.sparsify(Kinv) self.Kinv_sp_tri = self.sparsify(Kinv_tri) #t3 = time.time() self.c,self.beta_bar, self.invc, self.HKinv = self.build_parametric_model(self.alpha, self.Kinv_sp, H, b=param_mean, B=param_cov) #t4 = time.time() r = self.y - np.dot(H.T, self.beta_bar) self.alpha_r = scipy.linalg.cho_solve((L, True), r) #t5 = time.time() self.build_point_tree(HKinv = self.HKinv, Kinv = Kinv_tri, Kinv_sp=self.Kinv_sp_tri, alpha_r = self.alpha_r) #t6 = time.time() # precompute training set log likelihood, so we don't need # to keep L around. z = np.dot(H.T, param_mean) - self.y B = param_cov if compute_ll: self._compute_marginal_likelihood(L=L, z=z, B=B, H=H, K=K, Kinv_sp=self.Kinv_sp_tri) else: self.ll = -np.inf #t7 = time.time() if compute_grad: self.ll_grad = self._log_likelihood_gradient(z=z, K=K, H=H, B=B, Kinv=Kinv) np.save('spatialK.npy', K) np.save('spatialKinv.npy', Kinv) #t8 = time.time() """ print t1-t0 print t2-t1 print t3-t2 print t4-t3 print t5-t4 print t6-t5 print t7-t6 print t8-t7 """ def build_point_tree(self, HKinv, Kinv, Kinv_sp, alpha_r): self.predict_tree = VectorTree(self.X, 1, "lld", self.dfn_params) self.predict_tree.set_v(0, alpha_r.astype(np.float)) d = len(self.basisfns) self.cov_tree = VectorTree(self.X, d, "lld", self.dfn_params) HKinv = HKinv.astype(np.float) for i in range(d): self.cov_tree.set_v(i, HKinv[i, :]) nzr, nzc = Kinv_sp.nonzero() self.double_tree = MatrixTree(self.X, nzr, nzc, "lld", self.dfn_params) kkk = np.matrix(Kinv, copy=True, dtype=np.float64) self.double_tree.set_m(kkk) def predict(self, cond, eps=1e-8): X1 = self.standardize_input_array(cond).astype(np.float) gp_pred = np.array([self.predict_tree.weighted_sum(0, np.reshape(x, (1,-1)), eps, "se", self.wfn_params) for x in X1]) H = self.get_data_features(X1) mean_pred = np.reshape(np.dot(H.T, self.beta_bar), gp_pred.shape) gp_pred += mean_pred if len(gp_pred) == 1: gp_pred = gp_pred[0] return gp_pred def kernel(self, X1, X2, identical=False): K = self.predict_tree.kernel_matrix(X1, X2, "se", self.wfn_params, False) if identical: K += self.noise_var * np.eye(K.shape[0]) return K def covariance(self, cond, include_obs=False, parametric_only=False, pad=1e-8): """ Compute the posterior covariance matrix at a set of points given by the rows of X1. Default is to compute the covariance of f, the latent function values. If obs_covar is True, we instead compute the covariance of y, the observed values. By default, we add a tiny bit of padding to the diagonal to counteract any potential loss of positive definiteness from numerical issues. Setting pad=0 disables this. """ X1 = self.standardize_input_array(cond) m = X1.shape[0] Kstar = self.get_query_K(X1) if not parametric_only: tmp = self.Kinv_sp_tri * Kstar qf = np.dot(Kstar.T, tmp) k = self.kernel(X1,X1, identical=include_obs) gp_cov = k - qf else: gp_cov = np.zeros((m,m)) R = self.query_R tmp = np.dot(self.invc, R) mean_cov = np.dot(tmp.T, tmp) gp_cov += mean_cov gp_cov += pad * np.eye(gp_cov.shape[0]) return gp_cov def covariance_double_tree(self, cond, include_obs=False, parametric_only=False, pad=1e-8, eps=1e-8): X1 = self.standardize_input_array(cond) m = X1.shape[0] d = len(self.basisfns) t0 = time.time() if not parametric_only: k = self.kernel(X1, X1, identical=include_obs) qf = self.double_tree.quadratic_form(X1, eps, "se", self.wfn_params) gp_cov = k - qf else: gp_cov = np.zeros((m,m)) t1 = time.time() H = np.array([[f(x) for x in X1] for f in self.basisfns], dtype=np.float64) HKinvKstar = np.zeros((d, m)) for i in range(d): for j in range(m): HKinvKstar[i,j] = self.cov_tree.weighted_sum(i, X1[j:j+1,:], eps, "se", self.wfn_params) R = H - HKinvKstar v = np.dot(self.invc, R) mc = np.dot(v.T, v) gp_cov += mc t2 = time.time() self.nptime = (t1-t0) self.ptime = (t2-t1) gp_cov += pad * np.eye(m) return gp_cov def variance(self, cond, **kwargs): X1 = self.standardize_input_array(cond) result = GaussianProcess.variance(self, X1, **kwargs) if len(result) == 1: result = result[0] return result def sample(self, cond): X1 = self.standardize_input_array(cond) result = GaussianProcess.sample(self, X1) if len(result) == 1: result = result[0] return result def log_p(self, x, cond): X1 = self.standardize_input_array(cond) x = x if isinstance(x, collections.Iterable) else (x,) result = GaussianProcess.posterior_log_likelihood(self, X1, x) return result def pack_npz(self): d = dict() d['c'] = self.c d['beta_bar'] = self.beta_bar d['invc'] = self.invc d['HKinv'] = self.HKinv d['basisfns'] = np.array([marshal_fn(f) for f in self.basisfns], dtype=object) d['X'] = self.X, d['y'] =self.y, d['alpha'] =self.alpha, d['hyperparams'] = self.hyperparams d['Kinv_sp_tri'] =self.Kinv_sp_tri, #d['Kinv_sp'] =self.Kinv_sp, d['sparse_threshold'] =self.sparse_threshold, d['ll'] =self.ll, d['alpha_r'] = self.alpha_r return d def save_trained_model(self, filename): """ Serialize the model to a file. """ d = self.pack_npz() with open(filename, 'wb') as f: np.savez(f, base_str=super(SpatialGP, self).__repr_base_params__(), **d) def unpack_npz(self, npzfile): self.X = npzfile['X'][0] self.y = npzfile['y'][0] self.alpha = npzfile['alpha'][0] self.hyperparams = npzfile['hyperparams'] self.init_hyperparams(self.hyperparams) self.Kinv_sp_tri = npzfile['Kinv_sp_tri'][0] #self.Kinv_sp = npzfile['Kinv_sp'][0] self.sparse_threshold = npzfile['sparse_threshold'][0] self.ll = npzfile['ll'][0] self.basisfns = npzfile['basisfns'] self.basisfns = [unmarshal_fn(code) for code in self.basisfns] self.beta_bar = npzfile['beta_bar'] self.c = npzfile['c'] self.invc = npzfile['invc'] self.HKinv = npzfile['HKinv'] self.alpha_r = npzfile['alpha_r'] def load_trained_model(self, filename): npzfile = np.load(filename) self.unpack_npz(npzfile) super(SpatialGP, self).__unrepr_base_params__(str(npzfile['base_str'])) del npzfile.f npzfile.close() self.n = self.X.shape[0] self.build_point_tree(HKinv=self.HKinv, Kinv=self.Kinv_sp_tri.todense(), Kinv_sp=self.Kinv_sp_tri, alpha_r=self.alpha_r) def _compute_marginal_likelihood(self, L, z, B, H, K, Kinv_sp): # here we follow eqn 2.43 in R&W # # let z = H.T*b - y, then we want # .5 * z.T * (K + H.T * B * H)^-1 * z # minus some other stuff (dealt with below). # by the matrix inv lemma, # (K + H.T * B * H)^-1 # = Kinv - Kinv*H.T*(Binv + H*Kinv*H.T)^-1*H*Kinv # = Kinv - Kinv*H.T* invc.T * invc *H*Kinv # = Kinv - (invc * HKinv)^T (invc * HKinv) # # so we have z.T * Kinv * z - z.T * (other thing) * z # i.e.: term1 - term2 # in the notation of the code. tmp1 = Kinv_sp * z term1 = np.dot(z.T, tmp1) tmp2 = np.dot(self.HKinv, z) tmp3 = np.dot(self.invc, tmp2) term2 = np.dot(tmp3.T, tmp3) # following eqn 2.43 in R&W, we want to compute # log det(K + H.T * B * H). using the matrix inversion # lemma, we instead compute # log det(K) + log det(B) + log det(B^-1 + H*K^-1*H.T) # to compute log(det(K)), we use the trick that the # determinant of a symmetric pos. def. matrix is the # product of squares of the diagonal elements of the # Cholesky factor ld2_K = np.log(np.diag(L)).sum() ld2 = np.log(np.diag(self.c)).sum() # det( B^-1 - H * K^-1 * H.T ) ld_B = np.log(np.linalg.det(B)) # eqn 2.43 in R&W, using the matrix inv lemma self.ll = -.5 * (term1 - term2 + self.n * np.log(2*np.pi) + ld_B) - ld2_K - ld2 def _log_likelihood_gradient(self, z, K, H, B, Kinv): """ Gradient of the training set log likelihood with respect to the kernel hyperparams. """ nparams = 4 grad = np.zeros((nparams,)) #t0 = time.time() tmp = np.dot(self.invc, self.HKinv) #t1 = time.time() K_HBH_inv = Kinv - np.dot(tmp.T, tmp) #t2 = time.time() alpha_z = np.dot(K_HBH_inv, z) #t3 = time.time() #print "gradient: %f %f %f" % (t1-t0, t2-t1, t3-t2) for i in range(nparams): tA = time.time() if (i == 0): dKdi = np.eye(self.n) else: dKdi = self.predict_tree.kernel_deriv_wrt_i(self.X, self.X, "se", self.wfn_params, i-1) dlldi = .5 * np.dot(alpha_z.T, np.dot(dKdi, alpha_z)) tB = time.time() # here we use the fact: # trace(AB) = sum_{ij} A_ij * B_ij dlldi -= .5 * np.sum(np.sum(K_HBH_inv.T * dKdi)) grad[i] = dlldi tC = time.time() print " %d: %f %f" % (i, tB-tA, tC-tB) return grad def spatialgp_nll_ngrad(**kwargs): """ Get both the negative log-likelihood and its gradient simultaneously (more efficient than doing it separately since we only create one new GP object, which only constructs the kernel matrix once, etc.). """ try: # print "optimizing params", kernel_params gp = SpatialGP(compute_ll=True, compute_grad=True, **kwargs) nll = -1 * gp.ll ngrad = -1 * gp.ll_grad
except np.linalg.linalg.LinAlgError as e: print "warning: lin alg error (%s) in likelihood computation, returning likelihood -inf" % str(e) nll = np.float("inf") ngrad = None except ValueError as e: print "warning: value error (%s) in likelihood computation, returning likelihood -inf" % str(e) nll = np.float("inf") ngrad = None return nll, ngrad
random_line_split