id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
1738434 | from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.metrics import accuracy_score, confusion_matrix,precision_recall_fscore_support, classification_report
class Combinator(BaseEstimator, TransformerMixin):
""" Static A posteriori Combinator of predictions.
Args:
- scheme: String flag. Can be one of the following:
- 'majority': Simple Hard Majority Voting
- 'weights': Weighted Voting, with weights
passed by user in the weights
arg
- 'accuracy': Weights are calculated according
to prediction accuracy over the
meta train set
- 'optimal': The optimal weights are found, this
is done by optimizing over the classification
error
- weights: list or numpy.array(not sure?) containing as many
weights as the models in the ensemble
Returns:
- The ensemble Model. Needs to be fitted for the encoding part
"""
def __init__(self, scheme='majority', weights=None):
from sklearn.preprocessing import LabelEncoder, OneHotEncoder
import numpy
self.scheme = scheme
self.weights = weights
self.transformation = self.voting
self.num_labels = 0
self.num_models = 0
self.lab = LabelEncoder()
self.ohe = OneHotEncoder()
if self.scheme == 'majority':
# print "Hurray! Equality for all!"
self.weights = None
else:
# print "Not so much Vox Populi, Vox Dei, huh?"
if self.scheme == 'weights':
if type(self.weights) in (numpy.array, numpy.ndarray):
pass # It is from the optimization part
else:
if not(self.weights):
print "Need weights for this scheme!"
self.weights = weights
weights_string = " %.2f |" * len(self.weights) % tuple(self.weights)
# print "Using given weights: | %s" % weights_string
else:
# print "Will find the weights after fitting"
pass
def fit(self, X, y, weights=None):
if y is None:
raise ValueError('We need y labels to supervise-fit!')
X_tr, y_tr = self.fit_encoders(X, y)
if not(self.scheme in ['majority', 'weights']):
self.find_weights(X, y, X_tr, y_tr)
weights_string = " %.2f |" * len(self.weights) % tuple(self.weights)
# print "Using found weights: | %s" % weights_string
return self
def transform(self, X):
import numpy
if type(X[0]) is numpy.array:
N_samples = X[0].shape[0]
else:
N_samples = len(X[0])
X = self.lab.transform(X)
X = self.ohe.transform(X.reshape(-1, 1)).todense().reshape(N_samples, -1)
prediction = self.transformation(X)
prediction = self.lab.inverse_transform(prediction.argmax(axis=1))
return prediction
def predict(self, X):
return self.transform(X)
def score(self, X, y_true):
from sklearn.metrics import accuracy_score
y_pred = self.predict(X)
return accuracy_score(y_true, y_pred, normalize=True)
def fit_encoders(self, X, y):
import numpy
self.num_labels = len(set(y))
N_samples = len(y)
# print "N_smaples"
# print N_samples
if type(X) is numpy.array:
y = y.reshape(-1, 1)
else:
y = numpy.array(y).reshape(-1, 1)
# print y
y = self.lab.fit_transform(y).reshape(-1, 1)
# print 'label'
# print y
y = self.ohe.fit_transform(y).todense()
# print 'ohe'
# print y
# print X
# print len(X)
# print len(X[0])
X = self.lab.transform(X)
# print 'Label'
# print X
# print X.shape
# reshape(N_samples, N_samples*self.num_labels)
X = self.ohe.transform(X.T.reshape(-1, 1)).todense().reshape(N_samples, -1)
# print 'ohe'
# print X.shape
self.num_models = int(X.shape[1] / self.num_labels)
return X, y
def voting(self, X):
import numpy
predictions = numpy.zeros([X.shape[0], self.num_labels])
if type(self.weights) in (numpy.array, numpy.ndarray):
pass
else:
if not(self.weights):
self.weights = [1 for i in xrange(self.num_models)]
for i in xrange(X.shape[0]):
# print X.shape
# print X
subarrays = numpy.split(X[i, :], self.num_models, axis=1)
# print "subarrays"
# print subarrays
votes = numpy.zeros([1, self.num_labels])
for model_index, subar in enumerate(subarrays):
# print subar
votes = numpy.vstack((votes, subar*self.weights[model_index]))
# print votes
pred_ = votes.sum(axis=0).argmax()
pred_ = self.ohe.transform(pred_).todense()
predictions[i, :] = pred_
return predictions
def find_weights(self, X, y, X_tr, y_tr):
import numpy
weights = [0 for i in xrange(self.num_models)]
if self.scheme == 'accuracy':
for i in xrange(X_tr.shape[0]):
subarrays = numpy.split(X_tr[i, :], self.num_models, axis=1)
for model_index, subar in enumerate(subarrays):
if (subar == y_tr[i, :]).all():
weights[model_index] += 1
self.weights = weights
if self.scheme == 'optimal':
from scipy.optimize import minimize
w = [1 for i in xrange(self.num_models)]
bnds = tuple([(0, None) for i in xrange(self.num_models)])
a = minimize(self.f, w, args=(Combinator, X, y), method='SLSQP', bounds=bnds)
self.weights = list(a.x)
return
def f(self, w, Combinator, x, y):
gg = Combinator(scheme='weights', weights=w)
gg.fit(x, y)
score = 1 - gg.score(x, y)
# print 'Weights'
# print w
# print 'Score: ' + str(score)
return score
# Example Usage
# def f(w, Combinator, x, y):
# gg = Combinator(scheme='weights', weights=w)
# gg.fit(x, y)
# score = 1- gg.score(x, y)
# #print 'Weights'
# #print w
# #print 'Score: ' + str(score)
# return score
# w = [1 for i in xrange(6)]
# bnds = tuple([(0, None) for i in xrange(6)])
# a = minimize(f, w, args=(Combinator, predictions_meta, y_meta), method='SLSQP', bounds=bnds)
class SubSpaceEnsemble4_2(BaseEstimator, TransformerMixin):
""" Best model base on the prediction of the k-nearest, according to each model, neighbor.
Implementing fitting with random weight searching for better results."""
def __init__(self, models, cv_scores, k=6, weights=[0.6, 0.2, 0.3, 6], N_rand=8, rand_split=0.6):
if (not models) or (not cv_scores):
raise AttributeError('Models expexts a dictonary of models \
containg the predictions of y_true for each classifier.\
cv_score expects a list len(models.keys()) with the\
cross validation scores of each model')
else:
self.models = models
self.cv_scores = cv_scores
self.k = k
self.ind2names = {}
self.weights = weights
self.N_rand = N_rand
self.rand_split = rand_split
for i, name in enumerate(models.keys()):
self.ind2names[i] = name
self.predictions = []
self.true = []
self.trees = []
self.representations = []
def fit(self, X_cv, y_true=None, weights=None):
from sklearn.neighbors import BallTree
from sklearn.metrics import accuracy_score
import random
import time
if y_true is None:
raise ValueError('we need y labels to supervise-fit!')
else:
t0 = time.time()
predictions = []
for name, model in self.models.iteritems():
predictions.append(model.predict(X_cv))
# print len(predictions[-1])
transf = model.steps[1][1].transform(X_cv)
if hasattr(transf, "toarray"):
# print 'Exei'
self.representations.append(transf.toarray())
else:
self.representations.append(transf)
self.trees.append(BallTree(self.representations[-1], leaf_size=20))
self.predictions = predictions
self.true = y_true
N_rand1 = int(self.rand_split * self.N_rand)
poss_w = []
acc_ = []
pred = []
for i in xrange(N_rand1):
tmp_w = [0.6, 0.2, 0.3, 6]
tmp_w[0] = round(random.random(), 3)
tmp_w[1] = round(1 - tmp_w[0], 3)
tmp_w[2] = round(random.uniform(0.2, 0.8), 3)
# tmp_w[3] = random.randint(1,10)
poss_w.append(tmp_w)
pred = self.find_weights(X_cv, tmp_w)
acc = accuracy_score(self.true, pred)
# print('Accuracy : {}'.format(acc))
acc_.append(acc)
print('First search took: %0.3f seconds') % (time.time() - t0)
tmp_w = poss_w[acc_.index(max(acc_))]
poss_w = []
acc_ = []
for i in xrange(self.N_rand -N_rand1):
tmp_w2 = tmp_w
tmp_w2[0] = round(random.uniform(tmp_w[0] - 0.1, tmp_w[0] + 0.1), 3)
tmp_w2[1] = round(1 - tmp_w2[0], 3)
tmp_w2[2] = round(random.uniform(tmp_w[2] - 0.1, tmp_w[1] + 0.1), 3)
poss_w.append(tmp_w2)
pred = self.find_weights(X_cv, tmp_w2)
acc = accuracy_score(self.true, pred)
# print('Accuracy : {}'.format(acc))
acc_.append(acc)
self.weights = poss_w[acc_.index(max(acc_))]
self.k = self.weights[3]
print 'Accuracy obtained in CV-data: %0.3f' % (100 * acc_[acc_.index(max(acc_))])
print self.weights
print('Fit took: %0.3f seconds') % (time.time() - t0)
# print self.expert_scores
# print self.experts
return self
def find_weights(self, X_cv, w):
y_pred = []
# t0 = time.time()
for x in X_cv:
# print 'True: ' + y_real[i]
y_pred.append(self.expert_fit_decision(x, w))
# print('Predict took: %0.3f seconds') % (time.time()-t0)
return y_pred
def expert_fit_decision(self, x_sample, w):
from sklearn.metrics import accuracy_score
# from collections import Counter
possible_experts = []
sample_predictions = []
acc = []
possible_experts_sc = []
for model_i in xrange(len(self.models.values())):
# print 'Model: ' + self.ind2names[model_i]
temp_trans = self.models[self.ind2names[model_i]].steps[1][1].transform([x_sample])
if hasattr(temp_trans, 'toarray'):
temp_trans = temp_trans.toarray()
_, model_neig = self.trees[model_i].query(temp_trans, w[3])
# print "Model neig"
# print model_neig[0].tolist()[0]
model_neig_pred = []
neigh_true = []
for model_n_i in model_neig[0].tolist():
model_neig_pred.append(self.predictions[model_i][model_n_i])
neigh_true.append(self.true[model_n_i])
# print "True_neighbors"
# print neigh_true
# print "Predicted neighbors"
# print model_neig_pred
acc.append(accuracy_score(neigh_true, model_neig_pred, normalize=True))
# print 'Neig Accc: % 0.2f' % acc[-1]
predicted = self.models[self.ind2names[model_i]].predict([x_sample])[0]
proba = max(self.models[self.ind2names[model_i]].predict_proba([x_sample])[0])
# print 'Predicted Sample: %s with proba: %0.3f' % (predicted, 100*proba)
if acc[-1] > w[2]:
possible_experts.append(model_i)
possible_experts_sc.append(w[1]*acc[-1]+w[0]*proba)
sample_predictions.append(predicted)
if possible_experts:
# print 'Possible experts:'
# print [self.ind2names[poss] for poss in possible_experts]
# print sample_predictions
# print 'Selected: '
# print 'Place of best expert: %d ' % possible_scores.index(max(possible_scores))
# print 'Name: ' + self.ind2names[possible_experts[possible_scores.index(max(possible_scores))]]
# print 'PRediction index: '
# print possible_scores.index(max(possible_scores))
# print 'PRediction : '
# print sample_predictions[possible_experts_sc.index(max(possible_experts_sc))]
return sample_predictions[possible_experts_sc.index(max(possible_experts_sc))]
else:
# print 'Selected2 from base model: ' + self.ind2names[(self.acc.index(max(acc)))]
# print self.models[self.ind2names[(self.acc.index(max(acc)))]].predict([x_sample])[0]
return self.models[self.ind2names[(acc.index(max(acc)))]].predict([x_sample])[0]
def predict(self, X):
# import time
# print "PRedict"
# print X.shape
y_pred = []
# t0 = time.time()
for i, x in enumerate(X):
# print 'True: ' + y_real[i]
y_pred.append(self.expert_decision(x))
# print('Predict took: %0.3f seconds') % (time.time()-t0)
return y_pred
def score(self, X, y, sample_weight=None):
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X), normalize=True)
# return self.svc.score(self.transform_to_y(X), y, sample_weight)
def expert_decision(self, x_sample):
from sklearn.metrics import accuracy_score
# from collections import Counter
possible_experts = []
sample_predictions = []
acc = []
possible_experts_sc = []
for model_i in xrange(len(self.models.values())):
# print 'Model: ' + self.ind2names[model_i]
temp_trans = self.models[self.ind2names[model_i]].steps[1][1].transform([x_sample])
if hasattr(temp_trans, 'toarray'):
temp_trans = temp_trans.toarray()
_, model_neig = self.trees[model_i].query(temp_trans, self.k)
# print "Model neig"
# print model_neig[0].tolist()[0]
model_neig_pred = []
neigh_true = []
for model_n_i in model_neig[0].tolist():
model_neig_pred.append(self.predictions[model_i][model_n_i])
neigh_true.append(self.true[model_n_i])
# print "True_neighbors"
# print neigh_true
# print "Predicted neighbors"
# print model_neig_pred
acc.append(accuracy_score(neigh_true, model_neig_pred, normalize=True))
# print 'Neig Accc: % 0.2f' % acc[-1]
predicted = self.models[self.ind2names[model_i]].predict([x_sample])[0]
proba = max(self.models[self.ind2names[model_i]].predict_proba([x_sample])[0])
# print 'Predicted Sample: %s with proba: %0.3f' % (predicted, 100*proba)
if acc[-1] > self.weights[2]:
possible_experts.append(model_i)
possible_experts_sc.append(self.weights[1] * acc[-1] + self.weights[0] * proba)
sample_predictions.append(predicted)
if possible_experts:
# print 'Possible experts:'
# print [self.ind2names[poss] for poss in possible_experts]
# print sample_predictions
# print possible_experts_sc
# print 'Selected: '
# print 'Place of best expert: %d ' % possible_scores.index(max(possible_scores))
# print 'Name: ' + self.ind2names[possible_experts[possible_scores.index(max(possible_scores))]]
# print 'PRediction index: '
# print possible_scores.index(max(possible_scores))
# print 'PRediction : '
# print sample_predictions[possible_experts_sc.index(max(possible_experts_sc))]
return sample_predictions[possible_experts_sc.index(max(possible_experts_sc))]
else:
# print 'Selected2 from base model: ' + self.ind2names[(acc.index(max(acc)))]
# print self.models[self.ind2names[(acc.index(max(acc)))]].predict([x_sample])[0]
return self.models[self.ind2names[(acc.index(max(acc)))]].predict([x_sample])[0]
class Neigbors_DS(BaseEstimator, TransformerMixin):
""" Best model base on the predictions of the k-nearest neighbors. Many different schemes.
Also, implements a common neighborhoud instead a per transformation one.
Args:
- scheme: String flag. Can be one of the following:
- 'LCA': Local Class Accuracy
- 'OLA': Overall Local Accuracy
- 'KNE': K_Neighbors Elimination. Start from a k
- 'optimal': The optimal weights are found, this
is done by optimizing over the classification
error
- weights: list or numpy.array(not sure?) containing as many
weights as the models in the ensemble
Returns:
- The ensemble Model. Needs to be fitted for the encoding part
"""
def __init__(self, models, models_tr, k= 5, scheme='LCA', common_neigh=False):
if (not models) or (not models_tr):
raise AttributeError('Models expexts a dictonary of models \
containg the predictions of y_true for each classifier.\
cv_score expects a list len(models.keys()) with the\
cross validation scores of each model')
else:
self.models = models
self.models_tr = models_tr
self.k = k
self.ind2names = {}
for i, name in enumerate(models.keys()):
self.ind2names[i] = name
self.predictions = {}
self.true = []
self.trees = {}
self.scheme = scheme
self.common_neigh = common_neigh
if common_neigh:
from sklearn.feature_extraction.text import CountVectorizer
self.counter = CountVectorizer()
parameters = {
'input': 'content',
'encoding': 'utf-8',
'decode_error': 'ignore',
'analyzer': 'word',
'stop_words': 'english',
# 'vocabulary':list(voc),
#'tokenizer': tokenization,
#'tokenizer': _twokenize.tokenizeRawTweetText, # self.tokenization,
#'tokenizer': lambda text: _twokenize.tokenizeRawTweetText(nonan.sub(po_re.sub('', text))),
'max_df': 1.0,
'min_df': 1,
'max_features':None
}
self.counter.set_params(**parameters)
self.gt_tree = None
else:
self.counter = None
if self.scheme == 'LCA':
self.predictor = self.predict_lca
elif self.scheme == 'KNE':
self.predictor = self.predict_kne
elif self.scheme == 'OLA':
self.predictor = self.predict_ola
elif self.scheme == 'KNU':
self.predictor = self.predict_knu
else:
self.predictor = self.predict_ola
def fit(self, X_cv, y_true=None, weights=None):
from sklearn.neighbors import BallTree
from sklearn.metrics import accuracy_score
import random
import time
if y_true is None:
raise ValueError('we need y labels to supervise-fit!')
else:
t0 = time.time()
predictions = []
for name, model in self.models.iteritems():
#predictions.append(model.predict(X_cv))
# print len(predictions[-1])
if self.common_neigh:
X_tr = self.counter.fit_transform(X_cv)
self.gt_tree = BallTree(X_tr.toarray(), leaf_size=20)
else:
X_tr = self.models_tr[name].transform(X_cv)
if hasattr(X_tr, "toarray"):
self.trees[name] = BallTree(X_tr.toarray(), leaf_size=20)
else:
self.trees[name] = BallTree(X_tr, leaf_size=20)
self.predictions[name] = model.predict(X_cv)
self.true = y_true
print 'Fitting time %0.2f' % (time.time() - t0)
def predict(self, X):
# import time
# print "PRedict"
# print X.shape
y_pred = []
# t0 = time.time()
for i, x in enumerate(X):
# print 'True Sample: ' + y_real[i]
y_pred.append(self.predictor(x))
# print('Predict took: %0.3f seconds') % (time.time()-t0)
return y_pred
def score(self, X, y, sample_weight=None):
from sklearn.metrics import accuracy_score
return accuracy_score(y, self.predict(X), normalize=True)
# return self.svc.score(self.transform_to_y(X), y, sample_weight)
def predict_lca(self, sample):
preds = []
for name, model in self.models.iteritems():
preds.append(model.predict([sample])[0])
# print 'Preds: ' + str(preds)
if len(set(preds))==1:
# print 'Unanimous Decision: ' + str(preds[0])
# print '='*50
return preds[0]
else:
lca = [0 for pred in preds]
model_ind = 0
for name, model in self.models.iteritems():
# print 'Model: ' + name
sample_trans = self.models_tr[name].transform([sample])
step = 50
found_k_class_n = self.k
neigh_indexes = []
while found_k_class_n>=0:
if self.common_neigh:
_, model_neig = self.gt_tree.query(self.counter.transform([sample]).toarray(), step)
else:
if hasattr(sample_trans, "toarray"):
_, model_neig = self.trees[name].query(sample_trans.toarray(), step)
else:
_, model_neig = self.trees[name].query(sample_trans, step)
for model_n_i in model_neig[0].tolist():
if name == 'lsi':
if self.true[model_n_i] != '35-49':
pass
# print 'GG'
if preds[model_ind] == self.true[model_n_i]:
neigh_indexes.append(model_n_i)
found_k_class_n -= 1
step *= 2
if step >= len(self.predictions[name]):
step = len(self.predictions[name])-1
neigh_indexes = neigh_indexes[:self.k]
model_neig_pred = []
neigh_true = []
for model_n_i in neigh_indexes:
model_neig_pred.append(self.predictions[name][model_n_i])
neigh_true.append(self.true[model_n_i])
lca[model_ind] = accuracy_score(neigh_true, model_neig_pred, normalize=True)
# print 'True Neigh: ' + str(neigh_true)
# print 'Predicted Neigh: ' + str(model_neig_pred)
model_ind += 1
# print 'LCA: %s' % str(['%0.2f' % (100*k) for k in lca])
# print "Total Predicted: %s from model %s" % (str(preds[lca.index(max(lca))]), self.models.keys()[lca.index(max(lca))])
# print '='*50
return preds[lca.index(max(lca))]
def predict_ola(self, sample):
preds = []
for name, model in self.models.iteritems():
preds.append(model.predict([sample])[0])
# print 'Preds: ' + str(preds)
if len(set(preds))==1:
# print 'Unanimous Decision: ' + str(preds[0])
# print '='*50
return preds[0]
else:
ola = [0 for pred in preds]
model_ind = 0
for name, model in self.models.iteritems():
# print 'Model: ' + name
if self.common_neigh:
_, model_neig = self.gt_tree.query(self.counter.transform([sample]).toarray(), self.k)
else:
sample_trans = self.models_tr[name].transform([sample])
if hasattr(sample_trans, "toarray"):
_, model_neig = self.trees[name].query(sample_trans.toarray(), self.k)
else:
_, model_neig = self.trees[name].query(sample_trans, self.k)
model_neig_pred = []
neigh_true = []
for model_n_i in model_neig[0].tolist():
model_neig_pred.append(self.predictions[name][model_n_i])
neigh_true.append(self.true[model_n_i])
ola[model_ind] = accuracy_score(neigh_true, model_neig_pred, normalize=True)
# print 'True Neigh: ' + str(neigh_true)
# print 'Predicted Neigh: ' + str(model_neig_pred)
# print 'OLA: %s' % str(['%0.2f' % (100*k) for k in ola])
model_ind += 1
# print "Total Predicted: %s from model %s" % (str(preds[ola.index(max(ola))]), self.models.keys()[ola.index(max(ola))])
# print '='*50
return preds[ola.index(max(ola))]
def predict_kne(self, sample):
preds = []
for name, model in self.models.iteritems():
preds.append(model.predict([sample])[0])
# print 'Preds: ' + str(preds)
if len(set(preds))==1:
# print 'Unanimous Decision: ' + str(preds[0])
# print '='*50
return preds[0]
else:
k = self.k
possible_experts = []
neigh_radius = []
ola_scores = []
while k>0 :
model_ind = 0
# print k
for name, model in self.models.iteritems():
# print 'Model: ' + name
if self.common_neigh:
_, model_neig = self.gt_tree.query(self.counter.transform([sample]).toarray(), k)
else:
sample_trans = self.models_tr[name].transform([sample])
if hasattr(sample_trans, "toarray"):
_, model_neig = self.trees[name].query(sample_trans.toarray(), k)
else:
_, model_neig = self.trees[name].query(sample_trans, k)
model_neig_pred = []
neigh_true = []
for model_n_i in model_neig[0].tolist():
model_neig_pred.append(self.predictions[name][model_n_i])
neigh_true.append(self.true[model_n_i])
# print 'True Neigh: ' + str(neigh_true)
# print 'Predicted Neigh: ' + str(model_neig_pred)
if k == self.k:
ola_scores.append(accuracy_score(neigh_true, model_neig_pred, normalize=True))
if neigh_true == model_neig_pred:
possible_experts.append(preds[model_ind])
neigh_radius.append(k)
model_ind += 1
if not(possible_experts):
k -= 1
else:
break
if not(possible_experts):
# print 'No experts'
# print 'OLA_Scores: %s' % str(['%0.2f' % (100*k) for k in ola_scores])
# print preds[ola_scores.index(max(ola_scores))]
return preds[ola_scores.index(max(ola_scores))]
else:
# print 'Experts:'
# print possible_experts
# print neigh_radius
return possible_experts[0]
def predict_knu(self, sample):
preds = []
for name, model in self.models.iteritems():
preds.append(model.predict([sample])[0])
#print 'Preds: ' + str(preds)
if len(set(preds))==1:
# print 'Unanimous Decision: ' + str(preds[0])
# print '='*50
return preds[0]
else:
possible_experts = []
neigh_radius = []
ola_scores = []
model_ind = 0
for name, model in self.models.iteritems():
# print 'Model: ' + name
if self.common_neigh:
_, model_neig = self.gt_tree.query(self.counter.transform([sample]).toarray(), self.k)
else:
sample_trans = self.models_tr[name].transform([sample])
if hasattr(sample_trans, "toarray"):
_, model_neig = self.trees[name].query(sample_trans.toarray(), self.k)
else:
_, model_neig = self.trees[name].query(sample_trans, self.k)
model_neig_pred = []
neigh_true = []
for model_n_i in model_neig[0].tolist():
model_neig_pred.append(self.predictions[name][model_n_i])
neigh_true.append(self.true[model_n_i])
if model_neig_pred[-1] == neigh_true[-1]:
possible_experts.append(preds[model_ind])
ola_scores.append(accuracy_score(neigh_true, model_neig_pred, normalize=True))
# print 'True Neigh: ' + str(neigh_true)
# print 'Predicted Neigh: ' + str(model_neig_pred)
if not(possible_experts):
# print 'No experts'
# print 'OLA_Scores: %s' % str(['%0.2f' % (100*k) for k in ola_scores])
# print preds[ola_scores.index(max(ola_scores))]
return preds[ola_scores.index(max(ola_scores))]
else:
# print 'Experts:'
# print possible_experts
# print most_common(possible_experts)
return most_common(possible_experts)
def most_common(lst):
return max(set(lst), key=lst.count)
| StarcoderdataPython |
33599 | from numpy import average, number
from textblob import TextBlob
class ScaleUtilities:
average = 0
number = 0
def __init__(self, string, number):
self.string = string
def get_subjectivity_of(string):
polarity = TextBlob(string).sentiment.polarity * 5
number += 1
average += polarity
return polarity
def average_opinion():
if (number == 0):
print("You idiot")
exit(1)
return average / number
| StarcoderdataPython |
4813781 | #coding:utf-8
import tensorflow as tf
from model import ShowAndTell
import keras
import os
import keras.backend as K
import numpy as np
K.set_learning_phase(0)
FLAGS = tf.app.flags.FLAGS
tf.app.flags.DEFINE_string("weight_path", "./keras_weight/weights_full.h5", "Weights data path")
tf.app.flags.DEFINE_string("image_path", None, "Test image path to be tested with")
tf.app.flags.DEFINE_integer("max_sentence_length", 20, "Max length of sentence to be predicted")
weight_path = FLAGS.weight_path
image_path = FLAGS.image_path
max_length_of_sentence = FLAGS.max_sentence_length
def main(unused_argv):
if not os.path.isfile(image_path):
print('Image file not found at: '+image_path)
return
model = ShowAndTell()
model.config.mode = 'inference'
model.build_model()
# Load weights if exists
if os.path.isfile(weight_path):
model.inception_part_model.load_weights(weight_path, by_name=True)
model.lstm_part_model.load_weights(weight_path, by_name=True)
else:
print('Weight file not found')
return
sess = K.get_session()
with tf.gfile.FastGFile(image_path, "rb") as f:
origin_img_val = model.process_image(f.read())
test_image = tf.expand_dims(origin_img_val, 0)
next_h_state = K.eval(model.inception_part_model([test_image])[0])
next_c_state = K.eval(model.inception_part_model([test_image])[1])
words = [model.vocab.start_id]
for i in range(max_length_of_sentence):
X, next_h_state, next_c_state = sess.run(model.lstm_part_model.outputs, feed_dict={"input_1:0": [[words[-1]]], "lstm_h_state:0": next_h_state, "lstm_c_state:0": next_c_state})
words.append(np.argmax(X))
if words[-1] == model.vocab.end_id:
break
print([model.vocab.id_to_word(word) for i, word in enumerate(words)])
if __name__ == '__main__':
tf.app.run() # 解析命令行参数,调用main 函数 main(sys.argv)
| StarcoderdataPython |
80295 |
from rest_framework import serializers
from rest_framework_gis import serializers
from rest_framework.serializers import CharField, IntegerField, BooleanField
from passenger_census_api.models import PassengerCensus, AnnualRouteRidership, OrCensusBlockPolygons, WaCensusBlockPolygons, AnnualCensusBlockRidership, CensusBlockChange, RouteChange, AnnualRouteDifferencesForScatter
class AnnualCensusBlockRidershipSerializer(serializers.GeoFeatureModelSerializer):
class Meta:
model = AnnualCensusBlockRidership
fields = '__all__'
geo_field = 'geom_polygon_4326'
class AnnualRouteDifferencesSerializer(serializers.ModelSerializer):
class Meta:
model = AnnualRouteDifferencesForScatter
fields = '__all__'
class AnnualRouteRidershipSerializer(serializers.ModelSerializer):
class Meta:
model = AnnualRouteRidership
fields = '__all__'
class CensusBlockChangeSerializer(serializers.GeoFeatureModelSerializer):
class Meta:
model = CensusBlockChange
fields = '__all__'
geo_field = 'geom_polygon_4326'
class RouteChangeSerializer(serializers.GeoFeatureModelSerializer):
class Meta:
model = RouteChange
fields = '__all__'
geo_field = 'geom_linestring'
class OrCensusBlockPolygonsSerializer(serializers.GeoFeatureModelSerializer):
class Meta:
model = OrCensusBlockPolygons
geo_field = 'geom_polygon_4326'
id = 'census_block'
fields = '__all__'
class WaCensusBlockPolygonsSerializer(serializers.GeoFeatureModelSerializer):
class Meta:
model = WaCensusBlockPolygons
geo_field = 'geom_polygon_4326'
id = 'census_block'
fields = '__all__'
class PassengerCensusSerializer(serializers.GeoFeatureModelSerializer):
class Meta:
model = PassengerCensus
geo_field = 'geom_4326'
id = 'id'
fields = '__all__'
class PassengerCensusAnnualSerializer(serializers.ModelSerializer):
class Meta:
model = PassengerCensus
year = IntegerField()
weekday_sum_ons = IntegerField()
weekday_sum_offs = IntegerField()
saturday_sum_ons = IntegerField()
saturday_sum_offs = IntegerField()
num_of_yearly_census = IntegerField()
sunday_census = BooleanField()
saturday_census = BooleanField()
sunday_sum_ons = IntegerField()
sunday_sum_offs = IntegerField()
annual_sum_ons = IntegerField()
annual_sum_offs = IntegerField()
class PassengerCensusInfoSerializer(serializers.ModelSerializer):
class Meta:
model = PassengerCensus
fields = ['summary_begin_date', 'service_key']
total_routes = IntegerField()
| StarcoderdataPython |
3229871 | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Fri May 3 14:28:57 2019
@author: logancross
"""
from mvpa2.suite import *
from pymvpaw import *
import matplotlib.pyplot as plt
from mvpa2.measures.searchlight import sphere_searchlight
import mvpa_utils_pav
import sys
your_path = '/Users/logancross/Documents/EvaPavlovian/'
#add utils to path
sys.path.insert(0, your_path+'mvpa')
#svm (classification) or rsa result
analysis_prefix = 'svm'
subjs_in_group = ['01','07']
#number of permutations done at the first level
num_perms=100
###################################################
#load a sample subject to get the voxel information
subj_temp = subjs_in_group[0]
sample_standard_img = your_path+'mvpa/analyses/sub-'+subj_temp+'/'+analysis_prefix+'_cs+_cs-_standard'
fds_standard = fmri_dataset(samples=sample_standard_img, targets=0, chunks=0, mask=sample_standard_img)
#number of voxels in standard template
num_voxs = fds_standard.shape[1]
###################################################
#PERMUTATION TESTS ENTERED AT THE SECOND LEVEL
#PROCEDURE PERFORMED AS DESCRIBED HERE https://www.sciencedirect.com/science/article/pii/S1053811912009810?via%3Dihub#bb0035
#MORE INFORMATION ON PYMVPA PROCEDURE HERE http://www.pymvpa.org/generated/mvpa2.algorithms.group_clusterthr.GroupClusterThreshold.html#r3
#slightly untested - needs to be tested with full dataset of subjects
#loop through the subjects, and concatenate to map a big matrix of accuracies, permutation accuracies, and a different chunk label for every subject
acc_map_all_subjs = np.zeros([len(subjs_in_group),num_voxs])
perms_all_subjs = np.zeros([num_perms*len(subjs_in_group),num_voxs])
chunks = np.zeros(num_perms*len(subjs_in_group))
subj_count = -1
for subj in subjs_in_group:
subj_count+=1
vector_file = '/Users/logancross/Documents/EvaPavlovian/mvpa/permutation_tests/sub-'+subj+'/sample_svm_cs+_cs-'
#vector_file = '/Users/logancross/Documents/EvaPavlovian/mvpa/analyses/sub-'+subj+'/'+analysis_prefix+'_cs+_cs-'
temp_results = h5load(vector_file)
acc_map_all_subjs[subj_count,:] = temp_results
perm_file = vector_file+'_nulldist.hdf5'
temp_perms = h5load(perm_file).samples
#reshape and transpose to get num perms x num voxels
temp_perms_reshape = temp_perms.reshape(num_voxs, num_perms).T
perms_all_subjs[(subj_count*100):((1+subj_count)*100),:] = temp_perms_reshape
chunks[(subj_count*100):((1+subj_count)*100)] = subj_count*np.ones(num_perms)
#create a pymvpa dataset that concatenates the accuracy maps for every subject
mean_map = fds_standard.copy(deep=False, sa=[], fa=['voxel_indices'], a=['voxel_dim','mapper','voxel_eldim'])
mean_map.samples = acc_map_all_subjs
#create a pymvpa dataset that concatenates the permutation maps for every subject
perms = fds_standard.copy(deep=False, sa=[], fa=['voxel_indices'], a=['voxel_dim','mapper','voxel_eldim'])
perms.samples = perms_all_subjs
perms.sa.set_length_check(len(chunks))
#chunks tell pymvpa which samples belong to which subject
perms.sa['chunks'] = chunks.astype(int)
fwe_rate = .05
n_bootstrap = 10000
save_file = your_path+'mvpa/analyses/group/'+analysis_prefix+'_cs+_cs-_slClassPerm.hdf5'
group_result = Perm_GroupClusterThreshold(mean_map, perms, NN = 1, feature_thresh_prob = .005, n_bootstrap = n_bootstrap, fwe_rate = fwe_rate, h5 = 1, h5out = save_file) | StarcoderdataPython |
129349 | # Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class PyFilelock(PythonPackage):
"""A platform-independent file lock for Python.
This package contains a single module, which implements a platform
independent file lock in Python, which provides a simple way of
inter-process communication"""
homepage = "https://github.com/benediktschmitt/py-filelock"
pypi = "filelock/filelock-3.0.4.tar.gz"
version('3.4.0', sha256='93d512b32a23baf4cac44ffd72ccf70732aeff7b8050fcaf6d3ec406d954baf4')
version('3.0.12', sha256='18d82244ee114f543149c66a6e0c14e9c4f8a1044b5cdaadd0f82159d6a6ff59')
version('3.0.4', sha256='011327d4ed939693a5b28c0fdf2fd9bda1f68614c1d6d0643a89382ce9843a71')
version('3.0.3', sha256='7d8a86350736aa0efea0730e6a7f774195cbb1c2d61134c15f6be576399e87ff')
version('3.0.0', sha256='b3ad481724adfb2280773edd95ce501e497e88fa4489c6e41e637ab3fd9a456c')
version('2.0.13', sha256='d05079e7d7cae7576e192749d3461999ca6b0843d35b0f79f1fa956b0f6fc7d8')
version('2.0.12', sha256='eb4314a9a032707a914b037433ce866d4ed363fce8605d45f0c9d2cd6ac52f98')
version('2.0.11', sha256='e9e370efe86c30b19a2c8c36dd9fcce8e5ce294ef4ed6ac86664b666eaf852ca')
version('2.0.10', sha256='c73bf706d8a0c5722de0b745495fed9cda0e46c0eabb44eb18ee3f00520fa85f')
version('2.0.9', sha256='0f91dce339c9f25d6f2e0733a17e4f9a47b139dffda52619a0e61e013e5c6782')
version('2.0.8', sha256='7e48e4906de3c9a5d64d8f235eb3ae1050dfefa63fd65eaf318cc915c935212b')
depends_on('python@3.6:', when='@3.3:', type=('build', 'run'))
depends_on('python@2.7:2,3.5:', when='@3.1:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
| StarcoderdataPython |
67832 | from tests.utils import WriteTests
class WriteTh(WriteTests):
fixture_dir = "tests/fixtures/templates/tags/th"
class TestColspanMergesCells(WriteTh):
template_file = "colspan_merges.html.jinja2"
expected_result_file = "colspan_merges.xlsx"
class TestRowspanMergesCells(WriteTh):
template_file = "rowspan_merges.html.jinja2"
expected_result_file = "rowspan_merges.xlsx"
class TestRowspanContainedRow(WriteTh):
template_file = "rowspan_contained_row.html.jinja2"
expected_result_file = "rowspan_contained_row.xlsx"
| StarcoderdataPython |
104006 | from django.urls import path
from . import views
from django.conf.urls import url
urlpatterns = [
url(r'^$', views.home, name='home'),
path('', views.index, name='index'),
path('compute/', views.ocr_view, name='ocr'),
url('uploads/form/$', views.model_form_upload, name='model_form_upload'),
]
| StarcoderdataPython |
1730542 | <reponame>renmengye/tfplus
import numpy as np
import os
import time
from tfplus.utils import cmd_args, logger, listener, OptionBase, Factory
from tfplus.utils import plotter
cmd_args.add('save_ckpt', 'bool', False)
_factory = None
def get_factory():
global _factory
if _factory is None:
_factory = Factory()
pass
return _factory
def create(_clsname, **kwargs):
return get_factory().create(_clsname, **kwargs)
def create_from_main(_clsname, **kwargs):
return get_factory().create_from_main(_clsname, **kwargs)
class EmptyRunner(OptionBase):
def __init__(self):
super(EmptyRunner, self).__init__()
self._name = 'default'
self._interval = 1
self._offset = 0
self._experiment = None
pass
def run_step(self):
pass
def finalize(self):
pass
def get_name(self):
return self._name
def set_name(self, value):
self._name = value
return self
@property
def name(self):
return self._name
def get_interval(self):
return self._interval
def set_interval(self, value):
self._interval = value
return self
@property
def interval(self):
return self._interval
def get_offset(self):
return self._offset
def set_offset(self, value):
self._offset = value
return self
@property
def offset(self):
return self._offset
def get_experiment(self):
return self._experiment
def set_experiment(self, value):
self._experiment = value
return self
@property
def experiment(self):
return self._experiment
class SessionRunner(EmptyRunner):
def __init__(self):
super(SessionRunner, self).__init__()
self._sess = None
self._model = None
pass
def get_session(self):
return self._sess
def set_session(self, value):
self._sess = value
return self
def get_model(self):
return self._model
def set_model(self, value):
self._model = value
return self
@property
def session(self):
return self._sess
@property
def model(self):
return self._model
class SaverRunner(SessionRunner):
def __init__(self):
super(SaverRunner, self).__init__()
self._log = logger.get()
self._step = 0
self.register_option('save_ckpt')
pass
def get_save_ckpt(self):
return self.get_option('save_ckpt')
def set_save_ckpt(self, value):
return self.set_option('save_ckpt', value)
@property
def step(self):
return self._step
def run_step(self):
if self.model.has_var('step'):
step = self.get_session().run(self.model.get_var('step'))
step = int(step)
self._step = int(step)
else:
step = 0
if self.get_option('save_ckpt'):
self._log.info('Saving checkpoint')
self.model.save(self.get_session(), step=step)
pass
else:
self._log.warning(
'Saving is turned off. Use --save_ckpt flag to save.')
pass
pass
pass
get_factory().register('saver', SaverRunner)
class RestorerRunner(SessionRunner):
def __init__(self, folder=None):
super(RestorerRunner, self).__init__()
self._log = logger.get()
self.folder = folder
pass
def set_folder(self, value):
self.folder = value
return self
def run_step(self):
if self.model.has_var('step'):
step = self.get_session().run(self.model.get_var('step'))
step = int(step)
self._log.info('Restoring checkpoint')
self.model = None
self.model.restore_weights_from(self.get_session(), self.folder)
pass
pass
get_factory().register('restorer', RestorerRunner)
class BasicRunner(SessionRunner):
def __init__(self):
super(BasicRunner, self).__init__()
self._step = 0
self._data_provider = None
self._phase_train = True
self._outputs = []
self._current_batch = {}
self._log = logger.get()
self._preprocessor = lambda x: x
self._listeners = []
pass
@property
def listeners(self):
return self._listeners
def write_log(self, results):
for listener in self.listeners:
listener.listen(results)
listener.stage()
pass
def add_listener(self, listener):
self.listeners.append(listener)
return self
def add_csv_listener(self, name, var_name, label=None):
self.listeners.append(listener.get_factory().create(
'csv', name=name, var_name=var_name, label=label))
# if var_name not in self.outputs:
# self.outputs.append(var_name)
return self
def add_plot_listener(self, name, mapping):
self.listeners.append(listener.AdapterListener(
mapping=mapping,
listener=plotter.get(name)))
# for var_name in mapping.iterkeys():
# if var_name not in self.outputs:
# self.outputs.append(var_name)
return self
def add_cmd_listener(self, name, var_name):
self.listeners.append(listener.get_factory().create(
'cmd', name=name, var_name=var_name))
# if var_name not in self.outputs:
# self.outputs.append(var_name)
return self
def finalize(self):
for listener in self.listeners:
listener.finalize()
self._iter.finalize()
pass
@property
def log(self):
return self._log
@property
def phase_train(self):
return self._phase_train
def get_phase_train(self):
return self._phase_train
def set_phase_train(self, value):
self._phase_train = value
return self
@property
def iter(self):
return self._iter
def get_iter(self):
return self._iter
def set_iter(self, value):
self._iter = value
return self
@property
def outputs(self):
return self._outputs
def get_outputs(self):
return self._outputs
def set_outputs(self, value):
self._outputs = value
return self
def add_output(self, var_name):
self._outputs.append(var_name)
return self
@property
def preprocessor(self):
return self._preprocessor
def set_preprocessor(self, value):
self._preprocessor = value
return self
@property
def step(self):
return self._step
@property
def current_batch(self):
return self._current_batch
def _run_step(self, inp):
"""Train step"""
self._current_batch = inp
bat_sz_total = 0
results = {}
feed_dict = self.get_feed_dict(inp)
start_time = time.time()
r = self.run_model(inp)
step_time = (time.time() - start_time) * 1000
if 'step_time' in self.outputs:
r['step_time'] = step_time
if 'step' in r:
self._step = int(r['step'])
return r
def get_feed_dict(self, inp):
inp = self._preprocessor(inp)
feed_dict = {}
if self.model.has_var('phase_train'):
feed_dict[self.model.get_input_var(
'phase_train')] = self.phase_train
for key in inp.iterkeys():
if self.model.has_input_var(key):
feed_dict[self.model.get_input_var(key)] = inp[key]
pass
return feed_dict
def run_model(self, inp):
feed_dict = self.get_feed_dict(inp)
symbol_list = []
output_list = []
for r in self.outputs:
if r != 'step_time':
symbol_list.append(self.model.get_var(r))
output_list.append(r)
if self.model.has_var('step'):
symbol_list.append(self.model.get_var('step'))
output_list.append('step')
results = self.session.run(symbol_list, feed_dict=feed_dict)
results_dict = {}
for rr, name in zip(results, output_list):
results_dict[name] = rr
return results_dict
def run_step(self):
inp = self.iter.next()
if len(self.outputs) > 0:
results = self._run_step(inp)
else:
results = {}
# Add identity mappings to ease things up.
for key in inp.iterkeys():
if key not in results:
results[key] = inp[key]
self.write_log(results)
return results
pass
get_factory().register('basic', BasicRunner)
class EvalRunner(BasicRunner):
"""A runner that runs a full cycle of the iterator."""
def __init__(self):
super(EvalRunner, self).__init__()
self._cycle = False
@property
def cycle(self):
return self._cycle
def get_cycle(self):
return self._cycle
def set_cycle(self, value):
self._cycle = value
return self
def run_step(self):
for inp in self.iter:
if len(self.outputs) > 0:
results = self._run_step(inp)
else:
results = {}
# Add identity mappings to ease things up.
for key in inp.iterkeys():
if key not in results:
results[key] = inp[key]
for listener in self.listeners:
listener.listen(results)
for listener in self.listeners:
listener.stage()
if self.cycle:
self.iter.reset()
else:
raise StopIteration
get_factory().register('eval', EvalRunner)
class AverageRunner(BasicRunner):
def __init__(self):
super(AverageRunner, self).__init__()
self._num_batch = 1
pass
@property
def num_batch(self):
return self._num_batch
def get_num_batch(self):
return self._num_batch
def set_num_batch(self, value):
self._num_batch = value
return self
def run_step(self):
bat_sz_total = 0
results = {}
# Initialize values.
if len(self.outputs) == 0:
self.log.warning(
'Empty outputs list for runner "{}"'.format(self.name))
for key in self.outputs:
results[key] = 0.0
pass
stop_flag = False
# Run each batch.
for bb in xrange(self.num_batch):
try:
# inp = self.data_provider.get_batch()
inp = self.iter.next()
except StopIteration:
stop_flag = True
break
_results = self._run_step(inp)
bat_sz = inp[inp.keys()[0]].shape[0]
bat_sz_total += bat_sz
for key in _results.iterkeys():
if _results[key] is not None:
if key not in results:
results[key] = 0.0
results[key] += _results[key] * bat_sz
pass
pass
# Average out all batches.
for key in results.iterkeys():
results[key] = results[key] / bat_sz_total
pass
# Do not average steps.
results['step'] = self.step
self.write_log(results)
if stop_flag:
raise StopIteration
pass
get_factory().register('average', AverageRunner)
class AccumulateRunner(BasicRunner):
def __init__(self):
super(AccumulateRunner, self).__init__()
self._num_batch = 1
self._input_identity = []
pass
@property
def input_identity(self):
return self._input_identity
@property
def num_batch(self):
return self._num_batch
def get_num_batch(self):
return self._num_batch
def set_num_batch(self, value):
self._num_batch = value
return self
def add_input_identity(self, name):
self._input_identity.append(name)
return self
def run_step(self):
bat_sz_total = 0
results = {}
# Initialize values.
if len(self.outputs) == 0:
self.log.warning(
'Empty outputs list for runner "{}"'.format(self.name))
for key in self.outputs:
results[key] = []
pass
for key in self.input_identity:
results[key] = []
pass
results['step_time'] = []
stop_flag = False
# Run each batch.
for bb in xrange(self.num_batch):
try:
# inp = self.data_provider.get_batch()
inp = self.iter.next()
except StopIteration:
stop_flag = True
break
_results = self._run_step(inp)
for key in _results.iterkeys():
if _results[key] is not None:
if key not in results:
results[key] = []
results[key].append(_results[key])
pass
for key in self.input_identity:
results[key].append(inp[key])
pass
# Concatenate all batches.
for key in results.iterkeys():
results[key] = np.array(results[key])
# print key, results[key].shape
pass
# Do not average steps.
results['step'] = self.step
self.write_log(results)
if stop_flag:
raise StopIteration
pass
pass
get_factory().register('accumulate', AccumulateRunner)
| StarcoderdataPython |
1609349 | #!/usr/bin/env python
import rospy
from robotiq_common.AdvancedController import AdvancedController
from robotiq_2f_gripper_control.msg import Robotiq2FGripper_robot_input, Robotiq2FGripper_robot_output
class TwoFingerGripperController(AdvancedController):
def __init__(self):
super(TwoFingerGripperController, self).__init__(['gripper'], 'Robotiq2FGripperRobotInput', Robotiq2FGripper_robot_input, 'Robotiq2FGripperRobotOutput', Robotiq2FGripper_robot_output)
def msg_from_list(self, lst):
cmd = Robotiq2FGripper_robot_output(rACT=1, rGTO=1)
cmd.rPR, cmd.rSP, cmd.rFR = lst
return cmd
def list_from_msg(self, msg):
return [(msg.gPO, msg.gPR, msg.gCU)]
if __name__ == '__main__':
rospy.init_node('Robotiq2FGripperAdvancedController')
gc = TwoFingerGripperController()
rospy.spin()
| StarcoderdataPython |
3321299 | <gh_stars>0
"""
This module to define neural network
"""
import json
import os
import sys
import argparse
import ConfigParser
import paddle
import paddle.fluid as fluid
def db_lstm(data_reader, word, postag, p_word, conf_dict):
"""
Neural network structure definition: Stacked bidirectional
LSTM network
"""
hidden_dim = conf_dict['hidden_dim']
depth = conf_dict['depth']
label_dict_len = data_reader.get_dict_size('so_label_dict')
word_emb_fixed = True if conf_dict['word_emb_fixed'] == "True" else False
emb_distributed = not conf_dict['is_local']
conf_dict['is_sparse'] = bool(conf_dict['is_sparse'])
# 3 features
word_param = fluid.ParamAttr(name=conf_dict['emb_name'],
trainable=(not word_emb_fixed))
word_embedding = fluid.layers.embedding(
input=word,
size=[data_reader.get_dict_size('wordemb_dict'),
conf_dict['word_dim']],
dtype='float32',
is_distributed=emb_distributed,
is_sparse=emb_distributed,
param_attr=word_param)
postag_embedding = fluid.layers.embedding(
input=postag,
size=[data_reader.get_dict_size('postag_dict'),
conf_dict['postag_dim']],
dtype='float32',
is_distributed=emb_distributed,
is_sparse=emb_distributed)
p_embedding = fluid.layers.embedding(
input=p_word,
size=[data_reader.get_dict_size('wordemb_dict'),
conf_dict['word_dim']],
dtype='float32',
is_distributed=emb_distributed,
is_sparse=emb_distributed,
param_attr=word_param)
# embedding
emb_layers = [word_embedding, postag_embedding, p_embedding]
# input hidden
hidden_0_layers = [
fluid.layers.fc(input=emb, size=hidden_dim, act='tanh')
for emb in emb_layers
]
hidden_0 = fluid.layers.sums(input=hidden_0_layers)
lstm_0 = fluid.layers.dynamic_lstm(
input=hidden_0,
size=hidden_dim,
candidate_activation='relu',
gate_activation='sigmoid',
cell_activation='sigmoid')
# stack L-LSTM and R-LSTM with direct edges
input_tmp = [hidden_0, lstm_0]
for i in range(1, depth):
mix_hidden = fluid.layers.sums(input=[
fluid.layers.fc(input=input_tmp[0], size=hidden_dim, act='tanh'),
fluid.layers.fc(input=input_tmp[1], size=hidden_dim, act='tanh')
])
lstm = fluid.layers.dynamic_lstm(
input=mix_hidden,
size=hidden_dim,
candidate_activation='relu',
gate_activation='sigmoid',
cell_activation='sigmoid',
is_reverse=((i % 2) == 1))
input_tmp = [mix_hidden, lstm]
# output
feature_out = fluid.layers.sums(input=[
fluid.layers.fc(input=input_tmp[0], size=label_dict_len, act='tanh'),
fluid.layers.fc(input=input_tmp[1], size=label_dict_len, act='tanh')
])
return feature_out | StarcoderdataPython |
1738328 | """
mfgmg module. Contains the ModflowGmg class. Note that the user can access
the ModflowGmg class as `flopy.modflow.ModflowGmg`.
Additional information for this MODFLOW package can be found at the `Online
MODFLOW Guide
<http://water.usgs.gov/ogw/modflow/MODFLOW-2005-Guide/gmg.htm>`_.
"""
import sys
from ..pakbase import Package
class ModflowGmg(Package):
"""
MODFLOW GMG Package Class.
Parameters
----------
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to which
this package will be added.
mxiter : int
maximum number of outer iterations. (default is 50)
iiter : int
maximum number of inner iterations. (default is 30)
iadamp : int
is a flag that controls adaptive damping. The possible values
of iadamp are.
If iadamp = 0, then the value assigned to DAMP is used as a constant
damping parameter.
If iadamp = 1, the value of damp is used for the first nonlinear
iteration. The damping parameter is adaptively varied on the basis
of the head change, using Cooley's method as described in Mehl
and Hill (2001), for subsequent iterations.
If iadamp = 2, the relative reduced residual damping method documented
in Mehl and Hill (2001) and modified by Banta (2006) is used.
When iadamp is specified as 2 and the value specified for DAMP is less
than 0.5, the closure criterion for the inner iterations (drclose) is
assigned simply as rclose. When damp is between 0.5 and 1.0, inclusive,
or when iadamp is specified as 0 or 1, drclose is calculated according
to equation 20 on p. 9 of Wilson and Naff (2004).
hclose : float
is the head change criterion for convergence. (default is 1e-5).
rclose : float
is the residual criterion for convergence. (default is 1e-5)
relax : float
is a relaxation parameter for the ILU preconditioned conjugate
gradient method. The relax parameter can be used to improve the
spectral condition number of the ILU preconditioned system. The value
of relax should be approximately one. However, the relaxation parameter
can cause the factorization to break down. If this happens, then the
gmg solver will report an assembly error and a value smaller than one
for relax should be tried. This item is read only if isc = 4.
ioutgmg : int
is a flag that controls the output of the gmg solver. The
possible values of ioutgmg are.
If ioutgmg = 0, then only the solver inputs are printed.
If ioutgmg = 1, then for each linear solve, the number of pcg
iterations, the value of the damping parameter, the l2norm of
the residual, and the maxnorm of the head change and its location
(column, row, layer) are printed. At the end of a time/stress period,
the total number of gmg calls, pcg iterations, and a running total
of pcg iterations for all time/stress periods are printed.
If ioutgmg = 2, then the convergence history of the pcg iteration is
printed, showing the l2norm of the residual and the convergence factor
for each iteration.
ioutgmg = 3 is the same as ioutgmg = 1 except output is sent to the
terminal instead of the modflow list output file.
ioutgmg = 4 is the same as ioutgmg = 2 except output is sent to the
terminal instead of the modflow list output file.
(default is 0)
iunitmhc : int
is a flag and a unit number, which controls output of maximum
head change values. If iunitmhc = 0, maximum head change values
are not written to an output file. If iunitmhc > 0, maximum head
change values are written to unit iunitmhc. Unit iunitmhc should
be listed in the Name file with 'DATA' as the file type. If
iunitmhc < 0 or is not present, iunitmhc defaults to 0.
(default is 0)
ism : int
is a flag that controls the type of smoother used in the multigrid
preconditioner. If ism = 0, then ilu(0) smoothing is implemented in
the multigrid preconditioner; this smoothing requires an additional
ector on each multigrid level to store the pivots in the ilu
factorization. If ism = 1, then symmetric gaussseidel (sgs) smoothing
is implemented in the multigrid preconditioner. No additional storage
is required if ism = 1; users may want to use this option if available
memory is exceeded or nearly exceeded when using ism = 0. Using sgs
smoothing is not as robust as ilu smoothing; additional iterations are
likely to be required in reducing the residuals. In extreme cases, the
solver may fail to converge as the residuals cannot be reduced
sufficiently. (default is 0)
isc : int
is a flag that controls semicoarsening in the multigrid
preconditioner. If isc = 0, then the rows, columns and layers are
all coarsened. If isc = 1, then the rows and columns are coarsened,
but the layers are not. If isc = 2, then the columns and layers are
coarsened, but the rows are not. If isc = 3, then the rows and layers
are coarsened, but the columns are not. If isc = 4, then there is no
coarsening. Typically, the value of isc should be 0 or 1. In the case
that there are large vertical variations in the hydraulic
conductivities, then a value of 1 should be used. If no coarsening is
implemented (isc = 4), then the gmg solver is comparable to the pcg2
ilu(0) solver described in Hill (1990) and uses the least amount of
memory. (default is 0)
damp : float
is the value of the damping parameter. For linear problems, a value
of 1.0 should be used. For nonlinear problems, a value less than 1.0
but greater than 0.0 may be necessary to achieve convergence. A typical
value for nonlinear problems is 0.5. Damping also helps control the
convergence criterion of the linear solve to alleviate excessive pcg
iterations. (default 1.)
dup : float
is the maximum damping value that should be applied at any iteration
when the solver is not oscillating; it is dimensionless. An appropriate
value for dup will be problem-dependent. For moderately nonlinear
problems, reasonable values for dup would be in the range 0.5 to 1.0.
For a highly nonlinear problem, a reasonable value for dup could be as
small as 0.1. When the solver is oscillating, a damping value as large
as 2.0 x DUP may be applied. (default is 0.75)
dlow : float
is the minimum damping value to be generated by the adaptive-damping
procedure; it is dimensionless. An appropriate value for dlow will be
problem-dependent and will be smaller than the value specified for dup.
For a highly nonlinear problem, an appropriate value for dlow might be
as small as 0.001. Note that the value specified for the variable,
chglimit, could result in application of a damping value smaller than
dlow. (default is 0.01)
chglimit : float
is the maximum allowed head change at any cell between outer
iterations; it has units of length. The effect of chglimit is to
determine a damping value that, when applied to all elements of the
head-change vector, will produce an absolute maximum head change equal
to chglimit. (default is 1.0)
extension : list string
Filename extension (default is 'gmg')
unitnumber : int
File unit number (default is None).
filenames : str or list of str
Filenames to use for the package and the output files. If
filenames=None the package name will be created using the model name
and package extension and the gmg output name will be created using
the model name and .cbc extension (for example, modflowtest.gmg.out),
if iunitmhc is a number greater than zero. If a single string is passed
the package will be set to the string and gmg output names will be
created using the model name and .gmg.out extension, if iunitmhc is a
number greater than zero. To define the names for all package files
(input and output) the length of the list of strings should be 2.
Default is None.
Returns
-------
None
Attributes
----------
Methods
-------
See Also
--------
Notes
-----
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> gmg = flopy.modflow.ModflowGmg(m)
"""
def __init__(self, model, mxiter=50, iiter=30, iadamp=0,
hclose=1e-5, rclose=1e-5, relax=1.0, ioutgmg=0,
iunitmhc=None, ism=0, isc=0, damp=1.0, dup=0.75,
dlow=0.01, chglimit=1.0, extension='gmg',
unitnumber=None, filenames=None):
"""
Package constructor.
"""
# set default unit number of one is not specified
if unitnumber is None:
unitnumber = ModflowGmg.defaultunit()
# set filenames
if filenames is None:
filenames = [None, None]
elif isinstance(filenames, str):
filenames = [filenames, None]
elif isinstance(filenames, list):
if len(filenames) < 2:
filenames.append(None)
# update external file information with gmg output, if necessary
if iunitmhc is not None:
fname = filenames[1]
model.add_output_file(iunitmhc, fname=fname, extension='gmg.out',
binflag=False,
package=ModflowGmg.ftype())
else:
iunitmhc = 0
# Fill namefile items
name = [ModflowGmg.ftype()]
units = [unitnumber]
extra = ['']
# set package name
fname = [filenames[0]]
# Call ancestor's init to set self.parent, extension, name and unit number
Package.__init__(self, model, extension=extension, name=name,
unit_number=units, extra=extra, filenames=fname)
# check if a valid model version has been specified
if model.version == 'mfusg':
err = 'Error: cannot use {} package with model version {}'.format(
self.name, model.version)
raise Exception(err)
self.heading = '# {} package for '.format(self.name[0]) + \
' {}, '.format(model.version_types[model.version]) + \
'generated by Flopy.'
self.url = 'gmg.htm'
self.mxiter = mxiter
self.iiter = iiter
self.iadamp = iadamp
self.hclose = hclose
self.rclose = rclose
self.relax = relax
self.ism = ism
self.isc = isc
self.dup = dup
self.dlow = dlow
self.chglimit = chglimit
self.damp = damp
self.ioutgmg = ioutgmg
self.iunitmhc = iunitmhc
self.parent.add_package(self)
def write_file(self):
"""
Write the package file.
Returns
-------
None
"""
f_gmg = open(self.fn_path, 'w')
f_gmg.write('%s\n' % self.heading)
# dataset 0
f_gmg.write('{} {} {} {}\n' \
.format(self.rclose, self.iiter, self.hclose, self.mxiter))
# dataset 1
f_gmg.write('{} {} {} {}\n' \
.format(self.damp, self.iadamp, self.ioutgmg,
self.iunitmhc))
# dataset 2
f_gmg.write('{} {} '.format(self.ism, self.isc))
if self.iadamp == 2:
f_gmg.write('{} {} {}' \
.format(self.dup, self.dlow, self.chglimit))
f_gmg.write('\n')
# dataset 3
f_gmg.write('{}\n'.format(self.relax))
f_gmg.close()
@staticmethod
def load(f, model, ext_unit_dict=None):
"""
Load an existing package.
Parameters
----------
f : filename or file handle
File to load.
model : model object
The model object (of type :class:`flopy.modflow.mf.Modflow`) to
which this package will be added.
ext_unit_dict : dictionary, optional
If the arrays in the file are specified using EXTERNAL,
or older style array control records, then `f` should be a file
handle. In this case ext_unit_dict is required, which can be
constructed using the function
:class:`flopy.utils.mfreadnam.parsenamefile`.
Returns
-------
gmg : ModflowGmg object
Examples
--------
>>> import flopy
>>> m = flopy.modflow.Modflow()
>>> gmg = flopy.modflow.ModflowGmg.load('test.gmg', m)
"""
if model.verbose:
sys.stdout.write('loading gmg package file...\n')
openfile = not hasattr(f, 'read')
if openfile:
filename = f
f = open(filename, 'r')
# dataset 0 -- header
while True:
line = f.readline()
if line[0] != '#':
break
# dataset 0
t = line.strip().split()
rclose = float(t[0])
iiter = int(t[1])
hclose = float(t[2])
mxiter = int(t[3])
# dataset 1
line = f.readline()
t = line.strip().split()
damp = float(t[0])
iadamp = int(t[1])
ioutgmg = int(t[2])
try:
iunitmhc = int(t[3])
except:
iunitmhc = 0
# dataset 2
line = f.readline()
t = line.strip().split()
ism = int(t[0])
isc = int(t[1])
dup, dlow, chglimit = 0.75, 0.01, 1.0
if iadamp == 2:
dup = float(t[2])
dlow = float(t[3])
chglimit = float(t[4])
# dataset 3
line = f.readline()
t = line.strip().split()
relax = float(t[0])
if openfile:
f.close()
# determine specified unit number
unitnumber = None
filenames = [None, None]
if ext_unit_dict is not None:
unitnumber, filenames[0] = \
model.get_ext_dict_attr(ext_unit_dict,
filetype=ModflowGmg.ftype())
if iunitmhc > 0:
iu, filenames[1] = \
model.get_ext_dict_attr(ext_unit_dict, unit=iunitmhc)
model.add_pop_key_list(iunitmhc)
# create the gmg object
gmg = ModflowGmg(model, mxiter=mxiter, iiter=iiter, iadamp=iadamp,
hclose=hclose, rclose=rclose, relax=relax,
ioutgmg=ioutgmg, iunitmhc=iunitmhc,
ism=ism, isc=isc, damp=damp,
dup=dup, dlow=dlow, chglimit=chglimit,
unitnumber=unitnumber)
return gmg
@staticmethod
def ftype():
return 'GMG'
@staticmethod
def defaultunit():
return 27
| StarcoderdataPython |
1750497 | <reponame>meryusha/seeds_faster<filename>maskrcnn_benchmark/data/datasets/evaluation/seed/seed_predict.py
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved.
import cv2
import torch
from torchvision import transforms as T
from maskrcnn_benchmark.modeling.detector import build_detection_model
from maskrcnn_benchmark.utils.checkpoint import DetectronCheckpointer
from maskrcnn_benchmark.structures.image_list import to_image_list
from maskrcnn_benchmark import layers as L
from maskrcnn_benchmark.utils import cv2_util
import numpy as np
class SeedPredict(object):
CLASSES_STRAT1 = (
"__background__ ",
"germinated",
"non-germinated"
# "seed",
# "radical"
)
CLASSES_STRAT2 = (
"__background__ ",
# "germinated",
# "non-germinated"
"seed",
"radical"
)
def __init__(
self,
cfg,
confidence_threshold=0.7,
min_image_size=800,
):
self.cfg = cfg.clone()
self.model = build_detection_model(cfg)
self.model.eval()
self.device = torch.device(cfg.MODEL.DEVICE)
self.model.to(self.device)
self.min_image_size = min_image_size
save_dir = cfg.OUTPUT_DIR
checkpointer = DetectronCheckpointer(cfg, self.model, save_dir=save_dir)
_ = checkpointer.load(cfg.MODEL.WEIGHT)
self.transforms = self.build_transform()
self.cpu_device = torch.device("cpu")
self.confidence_threshold = confidence_threshold
def build_transform(self):
"""
Creates a basic transformation that was used to train the models
"""
cfg = self.cfg
# we are loading images with OpenCV, so we don't need to convert them
# to BGR, they are already! So all we need to do is to normalize
# by 255 if we want to convert to BGR255 format, or flip the channels
# if we want it to be in RGB in [0-1] range.
if cfg.INPUT.TO_BGR255:
to_bgr_transform = T.Lambda(lambda x: x * 255)
else:
to_bgr_transform = T.Lambda(lambda x: x[[2, 1, 0]])
normalize_transform = T.Normalize(
mean=cfg.INPUT.PIXEL_MEAN, std=cfg.INPUT.PIXEL_STD
)
transform = T.Compose(
[
T.ToPILImage(),
T.Resize(self.min_image_size),
T.ToTensor(),
to_bgr_transform,
normalize_transform,
]
)
return transform
def run_on_opencv_image(self, image_bytes):
"""
Arguments:
image_path (String): path to an image
image_bytes
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
# image = cv2.imread(image_path)
image = cv2.imdecode(np.frombuffer(image_bytes, np.uint8), -1)
cv2.imwrite('image.jpg', image)
predictions = self.compute_prediction(image)
# image_copy = image.copy()
# boxes = predictions.bbox.numpy()
# print( predictions.get_field("scores"))
#Checking
# for box in boxes:
# minX = box[0]
# minY = box[1]
# maxX = box[2]
# maxY = box[3]
# cv2.rectangle(image_copy, ( minX, minY), (maxX, maxY), 0, 4)
# cv2.imwrite('image.jpg', image_copy)
# top_predictions = self.select_top_predictions(predictions)
# predictions = []
# result = image.copy()
return predictions
def compute_prediction(self, original_image):
"""
Arguments:
original_image (np.ndarray): an image as returned by OpenCV
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
# apply pre-processing to image
image = self.transforms(original_image)
# convert to an ImageList, padded so that it is divisible by
# cfg.DATALOADER.SIZE_DIVISIBILITY
image_list = to_image_list(image, self.cfg.DATALOADER.SIZE_DIVISIBILITY)
image_list = image_list.to(self.device)
# compute predictions
print("ALL fine here 1")
with torch.no_grad():
predictions = self.model(image_list)
print("ALL fine here 2")
predictions = [o.to(self.cpu_device) for o in predictions]
# always single image is passed at a time
prediction = predictions[0]
# reshape prediction (a BoxList) into the original image size
height, width = original_image.shape[:-1]
prediction = prediction.resize((width, height))
return prediction
def select_top_predictions(self, predictions):
"""
Select only predictions which have a `score` > self.confidence_threshold,
and returns the predictions in descending order of score
Arguments:
predictions (BoxList): the result of the computation by the model.
It should contain the field `scores`.
Returns:
prediction (BoxList): the detected objects. Additional information
of the detection properties can be found in the fields of
the BoxList via `prediction.fields()`
"""
scores = predictions.get_field("scores")
keep = torch.nonzero(scores > self.confidence_threshold).squeeze(1)
predictions = predictions[keep]
scores = predictions.get_field("scores")
_, idx = scores.sort(0, descending=True)
return predictions[idx]
def overlay_boxes(self, image, predictions):
"""
Adds the predicted boxes on top of the image
Arguments:
image (np.ndarray): an image as returned by OpenCV
predictions (BoxList): the result of the computation by the model.
It should contain the field `labels`.
"""
labels = predictions.get_field("labels")
boxes = predictions.bbox
colors = self.compute_colors_for_labels(labels).tolist()
for box, color in zip(boxes, colors):
box = box.to(torch.int64)
top_left, bottom_right = box[:2].tolist(), box[2:].tolist()
image = cv2.rectangle(
image, tuple(top_left), tuple(bottom_right), tuple(color), 1
)
return image
def map_class_id_to_class_name(self, class_id):
return self.CLASSES_STRAT2[class_id]
| StarcoderdataPython |
3329925 | <reponame>Glignos/invenio-iiif<gh_stars>0
# -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2018 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""IIIF image previewer."""
from __future__ import absolute_import, print_function
from copy import deepcopy
from flask import Blueprint, current_app, render_template
from .utils import ui_iiif_image_url
previewable_extensions = ['jpg', 'jpeg', 'png', 'tif', 'tiff']
blueprint = Blueprint(
'invenio_iiif',
__name__,
template_folder='templates',
)
"""Blueprint to allow loading of templates."""
def can_preview(file):
"""Determine if the given file can be previewed."""
supported_extensions = ('.jpg', '.jpeg', '.png', '.tif', '.tiff')
return file.has_extensions(*supported_extensions)
def preview(file):
"""Render appropriate template with embed flag."""
params = deepcopy(current_app.config['IIIF_PREVIEWER_PARAMS'])
if 'image_format' not in params:
params['image_format'] = \
'png' if file.has_extensions('.png') else 'jpg'
return render_template(
current_app.config['IIIF_PREVIEW_TEMPLATE'],
file=file,
file_url=ui_iiif_image_url(
file.file,
**params
)
)
| StarcoderdataPython |
148726 | import importlib.util
import logging
import os
import re
import signal
import sys
class FrameworkError(Exception):
pass
def load_module(name, path):
spec = importlib.util.spec_from_file_location(name, path)
module = importlib.util.module_from_spec(spec)
sys.modules[name] = module
spec.loader.exec_module(module)
return module
amlb_path = os.environ.get("AMLB_PATH")
if amlb_path:
utils = load_module("amlb.utils", os.path.join(amlb_path, "utils", "__init__.py"))
else:
import amlb.utils as utils
def setup_logger():
console = logging.StreamHandler(sys.stdout)
console.setLevel(logging.INFO)
handlers = [console]
logging.basicConfig(handlers=handlers)
root = logging.getLogger()
root.setLevel(logging.INFO)
setup_logger()
log = logging.getLogger(__name__)
def result(output_file=None,
predictions=None, truth=None,
probabilities=None, probabilities_labels=None,
target_is_encoded=False,
error_message=None,
models_count=None,
training_duration=None,
predict_duration=None,
**others):
return locals()
def output_subdir(name, config):
subdir = os.path.join(config.output_dir, name, config.name, str(config.fold))
utils.touch(subdir, as_dir=True)
return subdir
def save_metadata(config, **kwargs):
obj = dict(config.__dict__)
obj.update(kwargs)
utils.json_dump(obj, config.output_metadata_file, style='pretty')
data_keys = re.compile("^(X|y|data)(_.+)?$")
def call_run(run_fn):
import numpy as np
params = utils.Namespace.from_dict(utils.json_loads(sys.stdin.read()))
def load_data(name, path, **ignored):
if isinstance(path, str) and data_keys.match(name):
return name, np.load(path, allow_pickle=True)
return name, path
log.info("Params passed to subprocess:\n%s", params)
ds = utils.Namespace.walk(params.dataset, load_data)
config = params.config
config.framework_params = utils.Namespace.dict(config.framework_params)
try:
with utils.InterruptTimeout(config.job_timeout_seconds,
interruptions=[
dict(sig=TimeoutError),
dict(sig=signal.SIGTERM),
dict(sig=signal.SIGQUIT),
dict(sig=signal.SIGKILL),
dict(interrupt='process', sig=signal.SIGKILL)
],
wait_retry_secs=10):
result = run_fn(ds, config)
res = dict(result)
for name in ['predictions', 'truth', 'probabilities']:
arr = result[name]
if arr is not None:
res[name] = os.path.join(config.result_dir, '.'.join([name, 'npy']))
np.save(res[name], arr, allow_pickle=True)
except BaseException as e:
log.exception(e)
res = dict(
error_message=str(e),
models_count=0
)
finally:
# ensure there's no subprocess left
utils.kill_proc_tree(include_parent=False, timeout=5)
utils.json_dump(res, config.result_file, style='compact')
| StarcoderdataPython |
3301913 | <reponame>jordyvanraalte/piqcer-client-python
import requests
from requests.auth import HTTPBasicAuth
from ..resources.resource import Resource
class Customers(Resource):
def __init__(self):
super().__init__("customers")
def get_customer_addresses(self, id):
return requests.get(self.config.base_url + self.path + "/" + str(id) + "addresses", verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def post_customer_address(self, id, addresses):
return requests.post(self.config.base_url + self.path + "/" + str(id) + "addresses", data=addresses,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def put_customer_address(self, id, address_id, address):
return requests.put(self.config.base_url + self.path + "/" + str(id) + "addresses/" + str(address_id),
data=address,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete_customer_address(self, id, address_id):
return requests.delete(self.config.base_url + self.path + "/" + str(id) + "/addresses/" + address_id,
verify=True,
auth=HTTPBasicAuth(self.config.api_key, ''))
def delete(self, id):
raise NotImplementedError("Not possible to delete a product")
| StarcoderdataPython |
32090 | # /bin/env python
# coding: utf-8
from __future__ import print_function
import sys
import argparse
import logging
import os
import math
import cv2
import numpy as np
class GenerateSyntheticData:
import PythonMagick as Magick
def __init__(self, logger=None):
if logger == None:
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
self.logger = logging.getLogger()
else:
self.logger = logger
@staticmethod
def appendArgumentParser(argparser):
argparser.add_argument('--shift-x', type=int, help='')
argparser.add_argument('--shift-y', type=int, help='')
argparser.add_argument('--skew-x', type=float, help='')
argparser.add_argument('--skew-y', type=float, help='')
argparser.add_argument('--rotate', type=float, help='rotates image clock- or counterclock-wise (angle in degrees)')
argparser.add_argument('--horizontal_flip', action='store_true', help='horizontally flips image')
argparser.add_argument('--zoom', type=str, help='resize image; argument given in percentage')
argparser.add_argument('--contrast', type=int, help='default=0; 0~infinity (integer times contract is applided to image)')
argparser.add_argument('--brightness', type=float, help='default=100')
argparser.add_argument('--saturation', type=float, help='default=100')
argparser.add_argument('--hue', type=float, help='default=100')
argparser.add_argument('--blur', action='store_true', help='')
argparser.add_argument('--blur_radius', type=float, default=10, help='')
argparser.add_argument('--blur_sigma', type=float, default=1, help='')
argparser.add_argument('--gaussianBlur', action='store_true', help='')
argparser.add_argument('--gaussianBlur_width', type=float, default=5, help='')
argparser.add_argument('--gaussianBlur_sigma', type=float, default=1, help='')
argparser.add_argument('--despeckle', action='store_true', help='')
argparser.add_argument('--enhance', action='store_true', help='')
argparser.add_argument('--equalize', action='store_true', help='')
argparser.add_argument('--gamma', type=float, help='0 ~ 2; 1 is default')
argparser.add_argument('--implode', type=float, help='Implode factor 0~1; 0 (nothing) to 1 (full); 0.0 ~ 0.5 recommended.')
argparser.add_argument('--negate', action='store_true', help='')
argparser.add_argument('--normalize', action='store_true', help='')
argparser.add_argument('--quantize', action='store_true', help='')
argparser.add_argument('--reduceNoise', type=int, help='default=1')
argparser.add_argument('--shade', action='store_true', help='')
argparser.add_argument('--shade_azimuth', type=float, default=50, help='')
argparser.add_argument('--shade_elevation', type=float, default=50, help='')
argparser.add_argument('--sharpen', action='store_true', help='')
argparser.add_argument('--sharpen_radius', type=float, default=1, help='')
argparser.add_argument('--sharpen_sigma', type=float, default=0.5, help='')
argparser.add_argument('--swirl', type=float, help='degree; default=10')
argparser.add_argument('--wave', action='store_true', help='')
argparser.add_argument('--wave_amplitude', type=float, default=5, help='')
argparser.add_argument('--wave_wavelength', type=float, default=100, help='')
argparser.add_argument('--auto', action='store_true', help='')
argparser.add_argument('--auto_ops', type=str, default='', help='')
argparser.add_argument('--auto_rotate_min', type=float, default=0, help='')
argparser.add_argument('--auto_rotate_max', type=float, default=0, help='')
argparser.add_argument('--auto_zoom_min', type=float, default=0, help='')
argparser.add_argument('--auto_zoom_max', type=float, default=0, help='')
def generateRandomOptions(self, cmdArg):
def _generateRandomOptionsShift(args):
args.shift_x = int(np.abs(np.random.normal(0, 3))) # -10 ~ +10
args.shift_y = int(np.abs(np.random.normal(0, 1))) # -3 ~ +3
def _generateRandomOptionsSkew(args):
args.skew_x = int(np.random.normal(0, 3)) # -10 ~ +10
args.skew_y = int(np.random.normal(0, 3)) # -10 ~ +10
def _generateRandomOptionsRotate(args):
if cmdArg.auto_rotate_min != cmdArg.auto_rotate_max:
args.rotate = int(np.random.uniform(cmdArg.auto_rotate_min, cmdArg.auto_rotate_max))
else:
args.rotate = int(np.random.normal(0, 3)) # -10 ~ +10
def _generateRandomOptionsZoom(args):
if cmdArg.auto_zoom_min != cmdArg.auto_zoom_max:
args.zoom = str(int(np.random.uniform(cmdArg.auto_zoom_min, cmdArg.auto_zoom_max))) + '%'
else:
args.zoom = str(int(np.random.normal(100, 3))) + '%' # 90% ~ 110%
def _generateRandomOptionsContrast(args):
args.contrast = int(np.abs(np.random.normal(0, 1))) # 0 ~ +3
def _generateRandomOptionsBrightness(args):
args.brightness = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsSaturation(args):
args.saturation = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsHue(args):
args.hue = np.random.normal(100, 5) # 85 ~ 115
def _generateRandomOptionsBlur(args):
if np.random.binomial(1,0.1): # do blur
if np.random.binomial(1,0.5):
args.blur = True
else:
args.gaussianBlur = True
if args.blur:
args.blur_radius = np.abs(np.random.normal(0, 3)) # 0 ~ 10
args.blur_sigma = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
if args.gaussianBlur:
args.gaussianBlur_width = np.abs(np.random.normal(0, 3)) # 0 ~ 10
args.gaussianBlur_sigma = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
def _generateRandomOptionsHorizontalFlip(args):
args.horizontal_flip = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsDespeckle(args):
args.despeckle = (np.random.binomial(1,0.5) > 0)
def _generateRandomOptionsEnhance(args):
args.enhance = (np.random.binomial(1,0.5) > 0)
def _generateRandomOptionsEqualize(args):
args.equalize = (np.random.binomial(1,0.1) == 1)
def _generateRandomOptionsNegate(args):
args.negate = (np.random.binomial(1,0.1) == 1)
def _generateRandomOptionsNormalize(args):
args.normalize = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsQuantize(args):
args.quantize = (np.random.binomial(1,0.1) > 0)
def _generateRandomOptionsGamma(args):
args.gamma = np.abs(np.random.normal(1, 0.03)) # 0 ~ 2
def _generateRandomOptionsImplode(args):
args.implode = 0
if np.random.binomial(1,0.5) > 0:
args.implode = np.random.normal(0, 0.15) # -0.5 ~ 0.5
def _generateRandomOptionsReduceNoise(args):
args.reduceNoise = int(np.abs(np.random.normal(0, 0.7))) # 0 ~ 2
def _generateRandomOptionsShade(args):
args.shade = (np.random.binomial(1,0.1) > 0)
if args.shade:
args.shade_azimuth = np.random.normal(50, 17) # 0 ~ 100
args.shade_elevation = np.random.normal(50, 17) # 0 ~ 100
def _generateRandomOptionsSharpen(args):
args.sharpen = (np.random.binomial(1,0.1) > 0)
if args.sharpen:
args.sharpen_radius = np.abs(np.random.normal(0, 0.7)) # 0 ~ 2
args.sharpen_sigma = np.abs(np.random.normal(0, 0.3)) # 0 ~ 1
def _generateRandomOptionsSwirl(args):
args.swirl = np.random.normal(0, 5) # -15 ~ +15
def _generateRandomOptionsWave(args):
args.wave = (np.random.binomial(1,0.3) > 0)
if args.wave:
args.wave_amplitude = np.abs(np.random.normal(5, 0.3)) # 0 ~ 10
args.wave_wavelength = np.abs(np.random.normal(100, 10)) # 0 ~ 200
args = argparse.Namespace()
args.shift_x = args.shift_y = None
args.skew_x = args.skew_y = None
args.rotate = args.zoom = None
args.contrast = args.brightness = args.saturation = args.hue = None
args.blur = args.gaussianBlur = None
args.horizontal_flip = None
args.despeckle = args.enhance = args.reduceNoise = None
args.equalize = args.negate = args.normalize = args.quantize = args.gamma = None
args.shade = None
args.sharpen = None
args.implode = args.swirl = args.wave = None
if len(cmdArg.auto_ops)>0:
for op in cmdArg.auto_ops.split(","):
if op == 'shift': _generateRandomOptionsShift(args)
elif op == 'skew': _generateRandomOptionsSkew(args)
elif op == 'rotate': _generateRandomOptionsRotate(args)
elif op == 'zoom': _generateRandomOptionsZoom(args)
elif op == 'contrast': _generateRandomOptionsContrast(args)
elif op == 'brightness': _generateRandomOptionsBrightness(args)
elif op == 'saturation': _generateRandomOptionsSaturation(args)
elif op == 'hue': _generateRandomOptionsHue(args)
elif op == 'blur': _generateRandomOptionsBlur(args)
elif op == 'horizontal_flip': _generateRandomOptionsHorizontalFlip(args)
elif op == 'despeckle': _generateRandomOptionsDespeckle(args)
elif op == 'enhance': _generateRandomOptionsEnhance(args)
elif op == 'equalize': _generateRandomOptionsEqualize(args)
elif op == 'negate': _generateRandomOptionsNegate(args)
elif op == 'normalize': _generateRandomOptionsNormalize(args)
elif op == 'quantize': _generateRandomOptionsQuantize(args)
elif op == 'gamma': _generateRandomOptionsGamma(args)
elif op == 'implode': _generateRandomOptionsImplode(args)
elif op == 'reduceNoise': _generateRandomOptionsReduceNoise(args)
elif op == 'shade': _generateRandomOptionsShade(args)
elif op == 'sharpen': _generateRandomOptionsSharpen(args)
elif op == 'swirl': _generateRandomOptionsSwirl(args)
elif op == 'wave': _generateRandomOptionsWave(args)
else:
self.logger.error('Unknown Operation Name ' + op)
else: # apply all operations
_generateRandomOptionsShift(args)
_generateRandomOptionsSkew(args)
_generateRandomOptionsRotate(args)
_generateRandomOptionsZoom(args)
_generateRandomOptionsContrast(args)
_generateRandomOptionsBrightness(args)
_generateRandomOptionsSaturation(args)
_generateRandomOptionsHue(args)
_generateRandomOptionsBlur(args)
#_generateRandomOptionsHorizontalFlip(args)
_generateRandomOptionsDespeckle(args)
_generateRandomOptionsEnhance(args)
#_generateRandomOptionsEqualize(args)
#_generateRandomOptionsNegate(args)
_generateRandomOptionsNormalize(args)
_generateRandomOptionsQuantize(args)
_generateRandomOptionsGamma(args)
_generateRandomOptionsImplode(args)
_generateRandomOptionsReduceNoise(args)
_generateRandomOptionsShade(args)
_generateRandomOptionsSharpen(args)
_generateRandomOptionsSwirl(args)
#_generateRandomOptionsWave(args)
self.logger.debug('Randomly generated options: ')
for key in vars(args):
self.logger.debug(' -- %s: %s' % (key, getattr(args, key)))
self.logger.debug('')
return args
def isVideo(self, inputF):
video_file_extensions = (
'.264', '.3g2', '.3gp', '.3gp2', '.3gpp', '.3gpp2', '.3mm', '.3p2', '.60d', '.787', '.89', '.aaf', '.aec', '.aep', '.aepx',
'.aet', '.aetx', '.ajp', '.ale', '.am', '.amc', '.amv', '.amx', '.anim', '.aqt', '.arcut', '.arf', '.asf', '.asx', '.avb',
'.avc', '.avd', '.avi', '.avp', '.avs', '.avs', '.avv', '.axm', '.bdm', '.bdmv', '.bdt2', '.bdt3', '.bik', '.bin', '.bix',
'.bmk', '.bnp', '.box', '.bs4', '.bsf', '.bvr', '.byu', '.camproj', '.camrec', '.camv', '.ced', '.cel', '.cine', '.cip',
'.clpi', '.cmmp', '.cmmtpl', '.cmproj', '.cmrec', '.cpi', '.cst', '.cvc', '.cx3', '.d2v', '.d3v', '.dat', '.dav', '.dce',
'.dck', '.dcr', '.dcr', '.ddat', '.dif', '.dir', '.divx', '.dlx', '.dmb', '.dmsd', '.dmsd3d', '.dmsm', '.dmsm3d', '.dmss',
'.dmx', '.dnc', '.dpa', '.dpg', '.dream', '.dsy', '.dv', '.dv-avi', '.dv4', '.dvdmedia', '.dvr', '.dvr-ms', '.dvx', '.dxr',
'.dzm', '.dzp', '.dzt', '.edl', '.evo', '.eye', '.ezt', '.f4p', '.f4v', '.fbr', '.fbr', '.fbz', '.fcp', '.fcproject',
'.ffd', '.flc', '.flh', '.fli', '.flv', '.flx', '.gfp', '.gl', '.gom', '.grasp', '.gts', '.gvi', '.gvp', '.h264', '.hdmov',
'.hkm', '.ifo', '.imovieproj', '.imovieproject', '.ircp', '.irf', '.ism', '.ismc', '.ismv', '.iva', '.ivf', '.ivr', '.ivs',
'.izz', '.izzy', '.jss', '.jts', '.jtv', '.k3g', '.kmv', '.ktn', '.lrec', '.lsf', '.lsx', '.m15', '.m1pg', '.m1v', '.m21',
'.m21', '.m2a', '.m2p', '.m2t', '.m2ts', '.m2v', '.m4e', '.m4u', '.m4v', '.m75', '.mani', '.meta', '.mgv', '.mj2', '.mjp',
'.mjpg', '.mk3d', '.mkv', '.mmv', '.mnv', '.mob', '.mod', '.modd', '.moff', '.moi', '.moov', '.mov', '.movie', '.mp21',
'.mp21', '.mp2v', '.mp4', '.mp4v', '.mpe', '.mpeg', '.mpeg1', '.mpeg4', '.mpf', '.mpg', '.mpg2', '.mpgindex', '.mpl',
'.mpl', '.mpls', '.mpsub', '.mpv', '.mpv2', '.mqv', '.msdvd', '.mse', '.msh', '.mswmm', '.mts', '.mtv', '.mvb', '.mvc',
'.mvd', '.mve', '.mvex', '.mvp', '.mvp', '.mvy', '.mxf', '.mxv', '.mys', '.ncor', '.nsv', '.nut', '.nuv', '.nvc', '.ogm',
'.ogv', '.ogx', '.osp', '.otrkey', '.pac', '.par', '.pds', '.pgi', '.photoshow', '.piv', '.pjs', '.playlist', '.plproj',
'.pmf', '.pmv', '.pns', '.ppj', '.prel', '.pro', '.prproj', '.prtl', '.psb', '.psh', '.pssd', '.pva', '.pvr', '.pxv',
'.qt', '.qtch', '.qtindex', '.qtl', '.qtm', '.qtz', '.r3d', '.rcd', '.rcproject', '.rdb', '.rec', '.rm', '.rmd', '.rmd',
'.rmp', '.rms', '.rmv', '.rmvb', '.roq', '.rp', '.rsx', '.rts', '.rts', '.rum', '.rv', '.rvid', '.rvl', '.sbk', '.sbt',
'.scc', '.scm', '.scm', '.scn', '.screenflow', '.sec', '.sedprj', '.seq', '.sfd', '.sfvidcap', '.siv', '.smi', '.smi',
'.smil', '.smk', '.sml', '.smv', '.spl', '.sqz', '.srt', '.ssf', '.ssm', '.stl', '.str', '.stx', '.svi', '.swf', '.swi',
'.swt', '.tda3mt', '.tdx', '.thp', '.tivo', '.tix', '.tod', '.tp', '.tp0', '.tpd', '.tpr', '.trp', '.ts', '.tsp', '.ttxt',
'.tvs', '.usf', '.usm', '.vc1', '.vcpf', '.vcr', '.vcv', '.vdo', '.vdr', '.vdx', '.veg', '.vem', '.vep', '.vf', '.vft',
'.vfw', '.vfz', '.vgz', '.vid', '.video', '.viewlet', '.viv', '.vivo', '.vlab', '.vob', '.vp3', '.vp6', '.vp7', '.vpj',
'.vro', '.vs4', '.vse', '.vsp', '.w32', '.wcp', '.webm', '.wlmp', '.wm', '.wmd', '.wmmp', '.wmv', '.wmx', '.wot', '.wp3',
'.wpl', '.wtv', '.wve', '.wvx', '.xej', '.xel', '.xesc', '.xfl', '.xlmv', '.xmv', '.xvid', '.y4m', '.yog', '.yuv', '.zeg',
'.zm1', '.zm2', '.zm3', '.zmv')
if inputF.endswith((video_file_extensions)):
return True
return False
def getFPS(self, vF):
video = cv2.VideoCapture(vF);
major_ver, _, _ = (cv2.__version__).split('.')
if int(major_ver) < 3 :
fps = video.get(cv2.cv.CV_CAP_PROP_FPS)
else :
fps = video.get(cv2.CAP_PROP_FPS)
video.release()
return fps
def splitFromVideo(self, inputF, outputFPrefix):
retVal = []
vid = cv2.VideoCapture(inputF)
idx = 0
while(True):
ret, frame = vid.read()
if not ret:
break
name = outputFPrefix + '_frame' + str(idx) + '.png'
cv2.imwrite(name, frame)
retVal.append(name)
idx += 1
return retVal
def mergeIntoVideo(self, inFs, outputF, FPS):
frame = cv2.imread(inFs[0])
height, width, _ = frame.shape
video = cv2.VideoWriter(outputF, cv2.VideoWriter_fourcc(*'mp4v'), FPS, (width, height))
for inF in inFs:
video.write(cv2.imread(inF))
video.release()
def generate(self, inputF, outputF, args):
if args.auto:
auto_options = self.generateRandomOptions(args)
logger.info('Random options: ' + str(auto_options))
if self.isVideo(inputF):
FPS = self.getFPS(inputF)
inputFs = self.splitFromVideo(inputF, outputF+'_input')
outputFs = []
for idx in range(0, len(inputFs)):
iF = inputFs[idx]
oF = outputF + '_output_frame' + str(idx) + '.png'
if args.auto:
self._generate(iF, oF, auto_options)
else:
self._generate(iF, oF, args)
outputFs.append(oF)
self.mergeIntoVideo(outputFs, outputF, FPS)
for f in inputFs:
os.remove(f)
for f in outputFs:
os.remove(f)
return True
else:
if args.auto:
return self._generate(inputF, outputF, auto_options)
else:
return self._generate(inputF, outputF, args)
def _generate(self, inputF, outputF, args):
inputImage = self.Magick.Image(inputF)
input_width = inputImage.size().width()
input_height = inputImage.size().height()
self.logger.debug('Input width and height: %d x %d' % (input_width, input_height))
# make image ready to be modified
inputImage.modifyImage()
inputImage.backgroundColor(self.Magick.Color('black'))
if args.shift_x != None:
inputImage.roll(args.shift_x, 0)
if args.shift_y != None:
inputImage.roll(0, args.shift_y)
if args.skew_x != None and args.skew_y != None:
inputImage.shear(args.skew_x, args.skew_y)
elif args.skew_x != None:
inputImage.shear(args.skew_x, 0)
if args.skew_y != None:
inputImage.shear(0, args.skew_y)
if args.rotate != None:
inputImage.rotate(args.rotate)
inputImage.crop(self.Magick.Geometry(input_width, input_height, 0, 0))
if args.horizontal_flip:
inputImage.flop()
if args.zoom != None:
inputImage.sample(self.Magick.Geometry(args.zoom))
if int(args.zoom.strip()[0:-1]) >= 100:
inputImage.crop(self.Magick.Geometry(input_width,
input_height,
int((inputImage.size().width() - input_width) / 2),
int((inputImage.size().height() - input_height) / 2)))
else:
# PythonMagick is missing extent() API
# inputImage.exent(Magick.Geometry(input_width, input_height), Magick.GravityType.CenterGravity)
smallWidth = inputImage.size().width()
smallHeight = inputImage.size().height()
inputImage.size(self.Magick.Geometry(input_width, input_height))
inputImage.draw(self.Magick.DrawableRectangle(smallWidth, smallHeight, input_width, input_height))
inputImage.draw(self.Magick.DrawableRectangle(smallWidth, 0, input_width, smallHeight))
inputImage.draw(self.Magick.DrawableRectangle(0, smallHeight, smallWidth, input_height))
inputImage.roll(int((input_width - smallWidth) / 2), int((input_height - smallHeight) / 2))
if args.contrast != None:
for _ in range(0, args.contrast):
inputImage.contrast(args.contrast)
if args.brightness != None or args.saturation != None or args.hue != None:
if args.brightness is None:
args.brightness = 100
if args.saturation is None:
args.saturation = 100
if args.hue is None:
args.hue = 100
inputImage.modulate(args.brightness, args.saturation, args.hue)
if args.blur:
inputImage.blur(args.blur_radius, args.blur_sigma)
if args.gaussianBlur:
inputImage.gaussianBlur(args.gaussianBlur_width, args.gaussianBlur_sigma)
if args.despeckle:
inputImage.despeckle()
if args.enhance:
inputImage.enhance()
if args.equalize:
inputImage.equalize()
if args.gamma != None:
inputImage.gamma(args.gamma)
if args.implode != None:
inputImage.implode(args.implode)
if args.negate:
inputImage.negate()
if args.normalize:
inputImage.normalize()
if args.quantize:
inputImage.quantize()
if args.reduceNoise != None:
inputImage.reduceNoise(args.reduceNoise)
if args.shade:
inputImage.shade(args.shade_azimuth, args.shade_elevation)
if args.sharpen:
inputImage.sharpen(args.sharpen_radius, args.sharpen_sigma)
if args.swirl != None:
inputImage.swirl(args.swirl)
if args.wave:
inputImage.wave(args.wave_amplitude, args.wave_wavelength)
inputImage.crop(self.Magick.Geometry(input_width,
input_height,
int(math.fabs((inputImage.size().width() - input_width) / 2)),
int(math.fabs((inputImage.size().height() - input_height) / 2))))
inputImage.write(outputF)
self.logger.debug('Output width and height: %d x %d' % (inputImage.size().width(), inputImage.size().height()))
return True
if __name__ == "__main__":
argparser = argparse.ArgumentParser()
argparser.add_argument('-l', '--log-level', default='INFO', help="log-level (INFO|WARN|DEBUG|FATAL|ERROR)")
argparser.add_argument('-i', '--input', required=True, help='Input image file name')
argparser.add_argument('-o', '--output', required=True, help='Output image file name')
argparser.add_argument('-w', '--overwrite', action='store_true', help='If set, will overwrite the existing output file')
GenerateSyntheticData.appendArgumentParser(argparser)
args = argparser.parse_args()
logging.basicConfig(stream=sys.stdout, level=args.log_level)
logger = logging.getLogger("DragonFly-ASL-GSD")
logger.debug('CLI arguments')
for key in vars(args):
logger.debug(' -- %s: %s' % (key, getattr(args, key)))
logger.debug('')
# check input file exists
if not os.path.isfile(args.input):
logger.error('Input file %s does not exist: ' % args.input)
sys.exit(1)
# check if output file exists
if os.path.isfile(args.output) and not args.overwrite:
try: input = raw_input
except NameError: pass
yn = input('Do you wish to overwrite %s? (y/n) ' % args.output)
if yn != 'y' and yn != 'Y':
logger.error('Output file %s will not be overwritten.' % args.output)
sys.exit(1)
GSD = GenerateSyntheticData(logger=logger)
status = GSD.generate(args.input, args.output, args)
logger.debug('Generation status: %r' % status)
| StarcoderdataPython |
38236 | <reponame>project-scifi/scifiweb
from django.conf.urls import include
from django.conf.urls import url
from django.shortcuts import redirect
from django.shortcuts import reverse
import scifiweb.about.urls
import scifiweb.news.urls
from scifiweb.home import home
from scifiweb.robots import robots_dot_txt
urlpatterns = [
url(r'^$', home, name='home'),
url(r'^robots\.txt$', robots_dot_txt, name='robots.txt'),
url(r'^about/', include(scifiweb.about.urls.urlpatterns)),
url(r'^news/', include(scifiweb.news.urls.urlpatterns)),
# Legacy redirects to /about/
url(r'^info/about/$', lambda _: redirect(reverse('about'), permanent=True)),
url(r'^info/about/contact$', lambda _: redirect(reverse('about/contact'), permanent=True)),
url(r'^info/about/team$', lambda _: redirect(reverse('about/team'), permanent=True)),
]
| StarcoderdataPython |
3231971 | # Copyright 2020 DeepMind Technologies Limited.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Level generator for Mujoban based on levels from follwing dataset.
https://github.com/deepmind/boxoban-levels/
"""
import glob
import os
import zipfile
import numpy as np
import requests
BOXOBAN_URL = "https://github.com/deepmind/boxoban-levels/archive/master.zip"
def boxoban_level_generator(levels_set="unfiltered", data_split="valid"):
env = Boxoban(levels_set=levels_set, data_split=data_split)
while True:
index = np.random.randint(0, env.num_levels-1)
yield env.levels[index]
class Boxoban(object):
"""Class for loading and generatting Boxoban levels."""
def __init__(self,
levels_set="unfiltered",
data_split="valid"):
self._levels_set = levels_set
self._data_split = data_split
self._levels = []
data_file_path_local = os.path.join(os.path.dirname(__file__),
"boxoban_cache",
"{}_{}.npz".format(self._levels_set,
self._data_split))
data_file_path_global = os.path.join("/tmp/boxoban_cache",
"{}_{}.npz".format(self._levels_set,
self._data_split))
if os.path.exists(data_file_path_local):
self.levels = np.load(data_file_path_local)["levels"]
elif os.path.exists(data_file_path_global):
self.levels = np.load(data_file_path_global)["levels"]
else:
self.levels = self.get_data()
self.num_levels = len(self.levels)
def get_data(self):
"""Downloads and cache the data."""
try:
cache_path = os.path.join(
os.path.dirname(__file__), "boxoban_cache")
os.makedirs(cache_path, exist_ok=True)
except PermissionError:
cache_path = os.path.join("/tmp/boxoban_cache")
if not os.path.exists(cache_path):
os.makedirs(cache_path, exist_ok=True)
# Get the zip file
zip_file_path = os.path.join(cache_path, "master.zip")
if not os.path.exists(zip_file_path):
response = requests.get(BOXOBAN_URL, stream=True)
handle = open(zip_file_path, "wb")
for chunk in response.iter_content(chunk_size=512):
if chunk:
handle.write(chunk)
handle.close()
with zipfile.ZipFile(zip_file_path, "r") as zipref:
zipref.extractall(cache_path)
# convert to npz
path = os.path.join(cache_path, "boxoban-levels-master",
self._levels_set,
self._data_split)
files = glob.glob(path + "/*.txt")
levels = "".join([open(f, "r").read() for f in files])
levels = levels.split("\n;")
levels = ["\n".join(item.split("\n")[1:]) for item in levels]
levels = np.asarray(levels)
data_file_path = os.path.join(
cache_path, "{}_{}.npz".format(self._levels_set, self._data_split))
np.savez(data_file_path, levels=levels)
return levels
| StarcoderdataPython |
178117 | <reponame>adnanqidwai/school_algorithms<gh_stars>1-10
from math import sqrt as _sqrt
from ._if_not_valid_raise import (_if_not_int_or_float_raise,
_if_not_positive_raise)
def pythag_leg(hy, a):
"""
Calculates the length of a leg in right-angled triangle using the formula:
leg(b) = the square-root of (hy squared - leg(a) squared)
Parameters
----------
hy: int or float
The hypotneus of the equation.
a: int or float
The leg of the equation.
Returns
-------
Float
sqrt(a**2 + b**2)
Raises
------
ValueError
If hy or a::
Is not an integer or float.
Is not positive.
If a is greater than hy.
Examples
--------
>>> school_algorithms.pythag_leg(7, 2)
6.708203932499369
"""
_if_not_int_or_float_raise(hy, a)
_if_not_positive_raise(hy, a)
if a > hy:
raise ValueError ("a cannot be greater than hy")
return _sqrt(hy**2 - a**2)
def pythag_hypot(a, b):
"""
Calculates the hypotenuse of a right angled triangle using the formula:
hypotenuse = the square-root of (leg(a) squared + leg(b) squared)
Parameters
----------
a: int or float
leg(a) of the equation.
b: int or float
leg(b) of the equation.
Returns
-------
Float
sqrt(a**2 + b**2)
Raises
------
ValueError
If a or b::
Is not an integer or float.
Is not positive.
Examples
--------
>>> school_algorithms.pythag_hypot(4, 2)
4.47213595499958
"""
_if_not_int_or_float_raise(a, b)
_if_not_positive_raise(a, b)
return _sqrt(a**2 + b**2)
| StarcoderdataPython |
4839570 | <reponame>adeebabdulsalam/py-stellar-base
from typing import Union
from ..call_builder.base_call_builder import BaseCallBuilder
from ..client.base_async_client import BaseAsyncClient
from ..client.base_sync_client import BaseSyncClient
class TransactionsCallBuilder(BaseCallBuilder):
""" Creates a new :class:`TransactionsCallBuilder` pointed to server defined by horizon_url.
Do not create this object directly, use :func:`stellar_sdk.server.Server.transactions`.
See `All Transactions <https://www.stellar.org/developers/horizon/reference/endpoints/transactions-all.html>`_
:param horizon_url: Horizon server URL.
:param client: The client instance used to send request.
"""
def __init__(
self, horizon_url: str, client: Union[BaseAsyncClient, BaseSyncClient]
) -> None:
super().__init__(horizon_url, client)
self.endpoint: str = "transactions"
def transaction(self, transaction_hash: str) -> "TransactionsCallBuilder":
"""The transaction details endpoint provides information on a single transaction.
The transaction hash provided in the hash argument specifies which transaction to load.
See `Transaction Details <https://www.stellar.org/developers/horizon/reference/endpoints/transactions-single.html>`_
:param transaction_hash: transaction hash
:return: current TransactionsCallBuilder instance
"""
self.endpoint = f"transactions/{transaction_hash}"
return self
def for_account(self, account_id: str) -> "TransactionsCallBuilder":
"""This endpoint represents all transactions that affected a given account.
See `Transactions for Account <https://www.stellar.org/developers/horizon/reference/endpoints/transactions-for-account.html>`_
:param account_id: account id
:return: current TransactionsCallBuilder instance
"""
self.endpoint = f"accounts/{account_id}/transactions"
return self
def for_ledger(self, sequence: Union[str, int]) -> "TransactionsCallBuilder":
"""This endpoint represents all transactions in a given ledger.
See `Transactions for Ledger <https://www.stellar.org/developers/horizon/reference/endpoints/transactions-for-ledger.html>`_
:param sequence: ledger sequence
:return: current TransactionsCallBuilder instance
"""
self.endpoint = f"ledgers/{sequence}/transactions"
return self
def include_failed(self, include_failed: bool) -> "TransactionsCallBuilder":
"""Adds a parameter defining whether to include failed transactions. By default only
transactions of successful transactions are returned.
:param include_failed: Set to `True` to include failed transactions.
:return: current TransactionsCallBuilder instance
"""
self._add_query_param("include_failed", include_failed)
return self
| StarcoderdataPython |
1694041 | # https://www.globaletraining.com/
# Simple Multiple Inheritance
# ParentClass1 <--- ChildClassLevel1 <--- ChildClassLevel2
class ParentClass1:
def __init__(self, message, message_id):
print("ParentClass1 __init__")
self.message = message
self.message_id = message_id
def click_happy(self):
print("Class: ParentClass1, Method: click_happy, {}:{}".format(self.message_id, self.message))
def click_happy_pc1_1(self):
print("Class: ParentClass1, Method: click_happy_pc1_1, {}:{}".format(self.message_id, self.message))
class ChildClassLevel1(ParentClass1):
def __init__(self, message, message_id):
print("ChildClassLevel1 __init__")
self.message = message
self.message_id = message_id
super().__init__(self.message, self.message_id)
def click_happy(self):
print("Class: ChildClassLevel1, Method: click_happy, {}:{}".format(self.message_id, self.message))
def click_happy_cc1_1(self):
print("Class: ChildClassLevel1, Method: click_happy_cc1_1, {}:{}".format(self.message_id, self.message))
class ChildClassLevel2(ChildClassLevel1):
def __init__(self, message, message_id):
print("ChildClassLevel2 __init__")
self.message = message
self.message_id = message_id
super().__init__(self.message, self.message_id)
def click_happy(self):
print("Class: ChildClassLevel2, Method: click_happy, {}:{}".format(self.message_id, self.message))
def click_happy_cc2_1(self):
print("Class: ChildClassLevel2, Method: click_happy_cc2_1, {}:{}".format(self.message_id, self.message))
def main():
# ChildClassLevel2 Object
print("ChildClassLevel2 object".center(40, "-"))
obj1 = ChildClassLevel2("Checking", 501)
obj1.click_happy()
obj1.click_happy_cc2_1()
obj1.click_happy_cc1_1()
obj1.click_happy_pc1_1()
print("")
# ChildClassLevel1 Object
print("ChildClassLevel1 object".center(40, "-"))
obj2 = ChildClassLevel1("Checking", 502)
obj2.click_happy()
# obj2.click_happy_cc2_1() # ChildClassLevel2 methods are nor accessible to ChildClassLevel1
obj2.click_happy_cc1_1()
obj2.click_happy_pc1_1()
print("")
# ParentClass1 Object
print("ParentClass1 object".center(40, "-"))
obj3 = ParentClass1("Checking", 503)
# print(ParentClass1.__mro__)
obj3.click_happy()
# obj3.click_happy_cc2_1() # ChildClassLevel2 methods are nor accessible to ParentClass1
# obj3.click_happy_cc1_1() # ChildClassLevel1 methods are nor accessible to ParentClass1
obj3.click_happy_pc1_1()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4802817 | import pickle
from os import path as osp
from typing import List
from multiworld.envs.pygame import PickAndPlaceEnv
from rlkit.envs.pygame.pnp_util import sample_pnp_sets
from rlkit.misc import asset_loader
from rlkit.launchers.config import LOCAL_LOG_DIR
from rlkit.torch.sets import set
def create_sets(
env,
renderer,
saved_filename=None,
save_to_filename=None,
example_paths_and_math_set_classes=None,
**kwargs
) -> List[set.Set]:
if saved_filename is not None:
sets = asset_loader.load_local_or_remote_file(saved_filename)
elif example_paths_and_math_set_classes is not None:
sets = [
create_set_object_from_examples(**kwargs)
for kwargs in example_paths_and_math_set_classes
]
else:
if isinstance(env, PickAndPlaceEnv):
sets = sample_pnp_sets(env, renderer, **kwargs)
else:
raise NotImplementedError()
if save_to_filename:
save(sets, save_to_filename)
return sets
def create_set_object_from_examples(
examples_path,
math_set_class,
math_set_class_kwargs=None,
):
if math_set_class_kwargs is None:
math_set_class_kwargs = {}
example_dict = asset_loader.load_local_or_remote_file(examples_path).item()
example_dict['example_image'] = example_dict['image_desired_goal']
example_dict['example_state'] = example_dict['state_desired_goal']
description = math_set_class(**math_set_class_kwargs)
return set.Set(description, example_dict)
def create_debug_set(example_dict):
debug_set = set.DebugSet()
return set.Set(debug_set, example_dict)
def get_absolute_path(relative_path):
return osp.join(LOCAL_LOG_DIR, relative_path)
def load(relative_path):
path = get_absolute_path(relative_path)
print("loading data from", path)
return pickle.load(open(path, "rb"))
def save(data, relative_path):
path = get_absolute_path(relative_path)
pickle.dump(data, open(path, "wb")) | StarcoderdataPython |
1669967 | # n = nums.length
# time = O(n)
# space = O(1)
# done time = 15m
class Solution:
def findLengthOfLCIS(self, nums: List[int]) -> int:
if not nums:
return 0
continuous_count = 1
max_continuous_count = 1
for i in range(1, len(nums)):
if nums[i-1] < nums[i]:
continuous_count += 1
else:
max_continuous_count = max(max_continuous_count, continuous_count)
continuous_count = 1
max_continuous_count = max(max_continuous_count, continuous_count)
return max_continuous_count
| StarcoderdataPython |
1654841 | <reponame>mgielda/hwt<gh_stars>100-1000
def internal(fn):
"""
Decorator which does not affect functionality but it is used as marker
which tells that this object is not interesting for users and it is only used internally
"""
return fn | StarcoderdataPython |
1667256 | <filename>examples/formal_project/sampleproject/api/views.py
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
import logging
from sampleproject.base import MyBaseHandler
from tornado.web import HTTPError
class ApiHandler(MyBaseHandler):
EXCEPTION_HANDLERS = {
HTTPError: '_handle_http_error'
}
def _handle_http_error(self, e):
self.json_error(e.status_code, e)
def json_error(self, code, error=None):
msg = {'code': code}
if isinstance(error, Exception):
msg['error'] = str(error)
logging.info('Get error to write: %s - %s' %
(error.__class__.__name__, error))
elif isinstance(error, str):
msg['error'] = error
else:
raise ValueError('error object should be either Exception or str')
self.set_status(code)
self.write_json(msg, code=code)
class SourceHandler(ApiHandler):
def get(self, name):
path = os.path.join(self.app.root_path, name)
if not os.path.exists(path) or os.path.isdir(path):
raise HTTPError(404, 'File not found')
with open(path, 'r') as f:
source = f.read()
try:
source.decode('utf8')
except Exception, e:
raise HTTPError(403, 'Not a valid utf-8 text file, %s' % e)
d = {
'name': name,
'source': source
}
self.write_json(d)
class SettingsHandler(ApiHandler):
def get(self):
self.write_json(self.app.settings)
handlers = [
('/source/(.*)', SourceHandler),
('/settings.json', SettingsHandler),
]
| StarcoderdataPython |
3301909 | <reponame>qfwysw/oft
import time
import torch
from torchvision.transforms.functional import to_tensor
from argparse import ArgumentParser
import matplotlib.pyplot as plt
from oft import KittiObjectDataset, OftNet, ObjectEncoder, visualize_objects
def parse_args():
parser = ArgumentParser()
parser.add_argument('model-path', type=str,
help='path to checkpoint file containing trained model')
parser.add_argument('-g', '--gpu', type=int, default=0,
help='gpu to use for inference (-1 for cpu)')
# Data options
parser.add_argument('--root', type=str, default='data/kitti',
help='root directory of the KITTI dataset')
parser.add_argument('--grid-size', type=float, nargs=2, default=(80., 80.),
help='width and depth of validation grid, in meters')
parser.add_argument('--yoffset', type=float, default=1.74,
help='vertical offset of the grid from the camera axis')
parser.add_argument('--nms-thresh', type=float, default=0.2,
help='minimum score for a positive detection')
# Model options
parser.add_argument('--grid-height', type=float, default=4.,
help='size of grid cells, in meters')
parser.add_argument('-r', '--grid-res', type=float, default=0.5,
help='size of grid cells, in meters')
parser.add_argument('--frontend', type=str, default='resnet18',
choices=['resnet18', 'resnet34'],
help='name of frontend ResNet architecture')
parser.add_argument('--topdown', type=int, default=8,
help='number of residual blocks in topdown network')
return parser.parse_args()
def main():
# Parse command line arguments
args = parse_args()
# Load validation dataset to visualise
dataset = KittiObjectDataset(
args.root, 'val', args.grid_size, args.grid_res, args.yoffset)
# Build model
model = OftNet(num_classes=1, frontend=args.frontend,
topdown_layers=args.topdown, grid_res=args.grid_res,
grid_height=args.grid_height)
if args.gpu >= 0:
torch.cuda.set_device(args.gpu)
model.cuda()
# Load checkpoint
ckpt = torch.load(args.model_path)
model.load_state_dict(ckpt['model'])
# Create encoder
encoder = ObjectEncoder(nms_thresh=args.nms_thresh)
# Set up plots
_, (ax1, ax2) = plt.subplots(nrows=2)
plt.ion()
# Iterate over validation images
for _, image, calib, objects, grid in dataset:
# Move tensors to gpu
image = to_tensor(image)
if args.gpu >= 0:
image, calib, grid = image.cuda(), calib.cuda(), grid.cuda()
# Run model forwards
pred_encoded = model(image[None], calib[None], grid[None])
# Decode predictions
pred_encoded = [t[0].cpu() for t in pred_encoded]
detections = encoder.decode(*pred_encoded, grid.cpu())
# Visualize predictions
visualize_objects(image, calib, detections, ax=ax1)
ax1.set_title('Detections')
visualize_objects(image, calib, objects, ax=ax2)
ax2.set_title('Ground truth')
plt.draw()
plt.pause(0.01)
time.sleep(0.5)
if __name__ == '__main__':
main() | StarcoderdataPython |
1630380 | <gh_stars>1-10
import numpy as np
import pandas as pd
from MLFeatureSelection import sequence_selection
from sklearn.linear_model import LogisticRegression
from sklearn.model_selection import KFold
"""
MLFeatureSelection筛选特征
https://github.com/duxuhao/Feature-Selection
"""
sf = sequence_selection.Select(Sequence=True, Random=True, Cross=False)
df = pd.read_csv('data/face/df_input.csv')
try:
df.drop(['Unnamed: 0', 'file'], axis=1, inplace=True)
df['Rating'] = df['Rating'].round().astype(int)
df = df.replace([np.nan, np.inf, -np.inf], 0)
df.dropna(axis=0, inplace=True)
df.fillna(0, inplace=True)
except Exception as e:
print(e)
df = None
sf.ImportDF(df, label='Rating') # import dataframe and label
def lossfunction(y_pred, y_test):
"""define your own loss function with y_pred and y_test
return score
"""
return np.mean(y_pred == y_test)
def validation(X, y, features, clf, lossfunction):
totaltest = []
kf = KFold(5)
for train_index, test_index in kf.split(X):
# print(train_index)
# print(test_index)
X_train, X_test = X.reindex(train_index)[features], X.reindex(test_index)[features]
y_train, y_test = y.reindex(train_index), y.reindex(test_index)
# clf.fit(X_train, y_train, eval_set = [(X_train, y_train), (X_test, y_test)], eval_metric='logloss', verbose=False,early_stopping_rounds=50)
# todo 检查 ValueError: Input contains NaN, infinity or a value too large for dtype('float64').
clf.fit(X_train, y_train)
totaltest.append(lossfunction(y_test, clf.predict(X_test)))
return np.mean(totaltest), clf
#notusable = ['file']
notusable = []
initialfeatures = list(df.columns)
sf.ImportLossFunction(lossfunction, direction='ascend')
# import loss function handle and optimize direction, 'ascend' for AUC, ACC, 'descend' for logloss etc.
sf.InitialNonTrainableFeatures(notusable) # those features that is not trainable in the dataframe, user_id, string, etc
sf.InitialFeatures(initialfeatures) # initial initialfeatures as list
sf.GenerateCol() # generate features for selection
sf.SetFeatureEachRound(50, False)
# set number of feature each round, and set how the features are selected from all features (True: sample selection, False: select chunk by chunk)
sf.clf = LogisticRegression() # set the selected algorithm, can be any algorithm
# 结果记录到log中,最高得分在最后
sf.SetLogFile('logs/record.log') # log file
sf.run(validation)
# run with validation function, validate is the function handle of the validation function, return best features combination
| StarcoderdataPython |
3206344 | import torch
import torch.nn as nn
import torch.nn.functional as F
class SSIM(nn.Module):
"""Layer to compute the SSIM loss between a pair of images
"""
def __init__(self):
super(SSIM, self).__init__()
self.mu_x_pool = nn.AvgPool2d(3, 1)
self.mu_y_pool = nn.AvgPool2d(3, 1)
self.sig_x_pool = nn.AvgPool2d(3, 1)
self.sig_y_pool = nn.AvgPool2d(3, 1)
self.sig_xy_pool = nn.AvgPool2d(3, 1)
self.mask_pool = nn.AvgPool2d(3, 1)
self.C1 = 0.01 ** 2
self.C2 = 0.03 ** 2
def forward(self, x, y, mask):
x = x.permute(0, 3, 1, 2) # [B, H, W, C] --> [B, C, H, W]
y = y.permute(0, 3, 1, 2)
mask = mask.permute(0, 3, 1, 2)
mu_x = self.mu_x_pool(x)
mu_y = self.mu_y_pool(y)
sigma_x = self.sig_x_pool(x ** 2) - mu_x ** 2
sigma_y = self.sig_y_pool(y ** 2) - mu_y ** 2
sigma_xy = self.sig_xy_pool(x * y) - mu_x * mu_y
SSIM_n = (2 * mu_x * mu_y + self.C1) * (2 * sigma_xy + self.C2)
SSIM_d = (mu_x ** 2 + mu_y ** 2 + self.C1) * (sigma_x + sigma_y + self.C2)
SSIM_mask = self.mask_pool(mask)
output = SSIM_mask * torch.clamp((1 - SSIM_n / SSIM_d) / 2, 0, 1)
return output.permute(0, 2, 3, 1) # [B, C, H, W] --> [B, H, W, C]
def gradient_x(img):
return img[:, :, :-1, :] - img[:, :, 1:, :]
def gradient_y(img):
return img[:, :-1, :, :] - img[:, 1:, :, :]
def gradient(pred):
D_dy = pred[:, 1:, :, :] - pred[:, :-1, :, :]
D_dx = pred[:, :, 1:, :] - pred[:, :, :-1, :]
return D_dx, D_dy
def depth_smoothness(depth, img,lambda_wt=1):
"""Computes image-aware depth smoothness loss."""
depth_dx = gradient_x(depth)
depth_dy = gradient_y(depth)
image_dx = gradient_x(img)
image_dy = gradient_y(img)
weights_x = torch.exp(-(lambda_wt * torch.mean(torch.abs(image_dx), 3, keepdim=True)))
weights_y = torch.exp(-(lambda_wt * torch.mean(torch.abs(image_dy), 3, keepdim=True)))
smoothness_x = depth_dx * weights_x
smoothness_y = depth_dy * weights_y
return torch.mean(torch.abs(smoothness_x)) + torch.mean(torch.abs(smoothness_y))
def compute_reconstr_loss(warped, ref, mask, simple=True):
if simple:
return F.smooth_l1_loss(warped*mask, ref*mask, reduction='mean')
else:
alpha = 0.5
ref_dx, ref_dy = gradient(ref * mask)
warped_dx, warped_dy = gradient(warped * mask)
photo_loss = F.smooth_l1_loss(warped*mask, ref*mask, reduction='mean')
grad_loss = F.smooth_l1_loss(warped_dx, ref_dx, reduction='mean') + \
F.smooth_l1_loss(warped_dy, ref_dy, reduction='mean')
return (1 - alpha) * photo_loss + alpha * grad_loss | StarcoderdataPython |
4836104 | import pandas as pd
import time
import os
import numpy as np
import matplotlib.pyplot as plt
from collections import defaultdict
pd.set_option('mode.chained_assignment', None)
# preprocessing of heterogeneous nodes and edges
def preprocess_data(v_sample, e_sample, core_targets, ext_targets, core_testing):
t0 = time.time()
print("PREPROCESSING DATA STARTED")
v_data = v_sample
#? 0: Replace CoreCaseID, ExtCaseID and testingFlag with CSV data (notebook: "Correct CVS data")
v_data_new = v_data.drop(['CoreCaseGraphID', 'ExtendedCaseGraphID', 'testingFlag'], axis=1)
core_targets_new = core_targets.rename(columns={'CaseID': 'CoreCaseGraphID'})
core_targets_new.index.name = 'node_id'
ext_targets_new = ext_targets.rename(columns={'CaseID': 'ExtendedCaseGraphID'})
ext_targets_new.index.name = 'node_id'
core_testing_new = core_testing.rename(columns={'CaseID': 'CoreCaseGraphID'})
core_testing_new.index.name = 'node_id'
v_data_new = pd.merge(v_data_new, core_targets_new, left_index=True, right_index=True, how='left')
v_data_new = pd.merge(v_data_new, ext_targets_new, left_index=True, right_index=True, how='left')
# For some reason some nodes have 2 different core case IDs... filtering to just take the first
for ind, row in v_data.iterrows():
v_data_new['CoreCaseGraphID'][ind] = core_testing.loc[core_testing.index == ind].CaseID.values[0] if len(core_testing.loc[core_testing.index == ind].CaseID.values) != 0 else row['CoreCaseGraphID']
tsf = pd.DataFrame(v_data_new.index)
tsf = tsf.set_index('node_id')
tsf['testingFlag'] = np.NaN
for ind, row in tsf.iterrows():
tsf['testingFlag'][ind] = 0 if len(core_targets.loc[core_targets.index == ind]) != 0 or len(ext_targets.loc[ext_targets.index == ind]) != 0 else row.testingFlag
for ind, row in tsf.iterrows():
tsf['testingFlag'][ind] = 1 if len(core_testing.loc[core_testing.index == ind]) != 0 else row.testingFlag
v_data_new = pd.merge(v_data_new, tsf, left_index=True, right_index=True, how='left')
v_data_new = v_data_new[~v_data_new.index.duplicated(keep='first')]
v_sample = v_data_new
#? 0.1: Add Extra Features: Node Degree (notebook: Node Degree feature)
source_data = e_sample.groupby('from_id').count().to_id
source_data = pd.DataFrame(source_data)
source_data = source_data.rename(columns={'to_id': 'source_degree'})
source_data = source_data.rename_axis('node_id')
target_data = e_sample.groupby('to_id').count().from_id
target_data = pd.DataFrame(target_data)
target_data = target_data.rename(columns={'from_id': 'target_degree'})
target_data = target_data.rename_axis('node_id')
v_sample = pd.merge(v_sample, source_data, left_index=True, right_index=True, how='left')
v_sample = pd.merge(v_sample, target_data, left_index=True, right_index=True, how='left')
v_sample['source_degree'] = v_sample['source_degree'].fillna(0)
v_sample['target_degree'] = v_sample['target_degree'].fillna(0)
#? 1: missing core/ext case ID
# Solution: set to 0 if NaN
v_sample.CoreCaseGraphID = v_sample.CoreCaseGraphID.fillna(0)
v_sample.ExtendedCaseGraphID = v_sample.ExtendedCaseGraphID.fillna(0)
#? 2: Create dataframes for each node type (Account, Customer, Derived entity, External entity, Address)
# and each edge type (has account, has address, is similar, money transfer)
v_sets = defaultdict()
for v_type in list(pd.Categorical(v_sample.Label).categories):
v_sets[v_type] = v_sample[v_sample.Label == v_type]
v_sets[v_type] = v_sets[v_type].drop(['Label']+list(v_sets[v_type].columns[v_sets[v_type].isnull().all()]), axis=1)
v_sets[v_type].testingFlag = v_sets[v_type].testingFlag.fillna(-1)
e_sets = defaultdict()
for e_type in list(pd.Categorical(e_sample.Label).categories):
e_sets[e_type] = e_sample[e_sample.Label == e_type]
e_sets[e_type] = e_sets[e_type].drop(['Label']+list(e_sets[e_type].columns[e_sets[e_type].isnull().all()]), axis=1)
e_sets[e_type] = e_sets[e_type].rename(columns={'from_id':'source', 'to_id':'target'})
#? 3: Logical conversion of categorical features
# Revenue Size Flag: low, mid_low, medium, mid_high, high -> 0,1
conversion = {'low':0.1, 'mid_low':0.3, 'medium':0.6, 'mid_high':0.8, 'high':1}
for i in v_sets:
if 'Revenue Size Flag' in list(v_sets[i].columns):
v_sets[i]['Revenue Size Flag']=v_sets[i]['Revenue Size Flag'].map(conversion)
# Income Size Flag: low, medium, high -> 0,1
conversion = {'low':0.1, 'medium':0.5, 'high':1}
for i in v_sets:
if 'Income Size Flag' in list(v_sets[i].columns):
v_sets[i]['Income Size Flag']=v_sets[i]['Income Size Flag'].map(conversion)
# Similarity Strength: weak, medium, strong -> 0,1
conversion = {'weak':0.1, 'medium':0.5, 'strong':1}
for i in e_sets:
if 'Similarity Strength' in list(e_sets[i].columns):
e_sets[i]['Similarity Strength']= e_sets[i]['Similarity Strength'].map(conversion)
e_sets[i] = e_sets[i].rename(columns={'Similarity Strength':'weight'})
# Amount Flag: small, medium, large -> 0,1 -> treated as weights
conversion = {'small':0.1, 'medium':0.5, 'large':1}
for i in e_sets:
if 'Amount Flag' in list(e_sets[i].columns):
e_sets[i]['Amount Flag']=e_sets[i]['Amount Flag'].map(conversion)
e_sets[i] = e_sets[i].rename(columns={'Amount Flag':'weight'})
#? 4: One-hot encoding
# One-hot encoding of Person or Organisation
for i in v_sets:
if 'Person or Organisation' in list(v_sets[i].columns):
v_sets[i] = pd.get_dummies(v_sets[i], columns=['Person or Organisation'])
# one-hot encoding of CoreCaseGraphID
for i in v_sets:
if 'CoreCaseGraphID' in list(v_sets[i].columns):
v_sets[i] = pd.get_dummies(v_sets[i], columns=['CoreCaseGraphID'])
#? 5: String features
# Attempt 1: remove them
for i in v_sets:
if 'Account ID String' in list(v_sets[i].columns):
v_sets[i] = v_sets[i].drop('Account ID String', axis=1)
if 'Address' in list(v_sets[i].columns):
v_sets[i] = v_sets[i].drop('Address', axis=1)
if 'Name' in list(v_sets[i].columns):
v_sets[i] = v_sets[i].drop('Name', axis=1)
#? 6: Additional Features
'''
Note: isReadable flag:
it basically tells me if the name has been protected by encryption and is thus unreadable.
I finally decided not to use this feature.
(view "notebooks/isReadable field" for more details)
'''
# # Adding 'Fraudolent' flag:
# for set in v_sets:
# v_sets[set]['Fraudolent'] = np.where(
# np.logical_or(v_sets[set]['CoreCaseGraphID'] != 0.0, v_sets[set]['ExtendedCaseGraphID'] != 0.0), '1', '0')
t1 = time.time()
print(f"PREPROCESSING DATA COMPLETED: {(t1-t0):.2f} s")
return v_sets, e_sets | StarcoderdataPython |
3274207 | <filename>Chapter04/ordereddict_keys.py<gh_stars>10-100
>>> d.keys()
odict_keys(['a', 'c', 'd', 'e', 'b'])
| StarcoderdataPython |
42362 | #!/usr/bin/env python3
from dev_aberto import hello
from babel.dates import format_datetime
from datetime import datetime
import gettext
gettext.install('hello', localedir='locale')
if __name__ == '__main__':
date, name = hello()
date = format_datetime(datetime.strptime(date, '%Y-%m-%dT%H:%M:%SZ'))
print(_('Ultimo commit feito em: '), date, _(' por '), name) | StarcoderdataPython |
1775519 | <reponame>nathan4690/Simple-Chess<gh_stars>0
import chess
import pygame
from os.path import dirname
from typing import Iterable
pygame.init()
infoObject = pygame.display.Info()
RESOURCESPATH = (dirname(__file__)) + "/Resources/"
SCREENWIDTH = 60*8
SCREENHEIGHT = 60*8
IMAGESNAME = ["wK","wQ","wR","wB","wN","wP","bk","bq","br","bb","bn","bp"]
images = []
for i in IMAGESNAME:
images.append(pygame.image.load(RESOURCESPATH +i+".png"))
screen = pygame.display.set_mode((SCREENWIDTH,SCREENHEIGHT))
defaultFont = pygame.font.SysFont("Calibri",int(100/1500*SCREENWIDTH))
chessboard = chess.Board()
pygame.display.set_caption("Chess")
class Cover:
def __init__(self,master,color,top=0,left=0,width=SCREENWIDTH,height=SCREENHEIGHT) -> None:
self.master = master
self.top = top
self.left = left
self.width = width
self.height = height
self.color = color
self.rect = pygame.Rect(self.left,self.top,self.width,self.height)
def draw(self,):
shape_surf = pygame.Surface(pygame.Rect(self.rect).size, pygame.SRCALPHA)
pygame.draw.rect(shape_surf, self.color, shape_surf.get_rect())
self.master.blit(shape_surf, self.rect)
class PygameText():
def __init__(self,master,font=defaultFont) -> None:
self.master = master
self.font = font
def draw(self,text: str,pos=None,color=(255,0,0)):
cnt = len(text.splitlines())
for line in text.splitlines():
img = self.font.render(line,True,color)
pos = [SCREENWIDTH/2-img.get_width()/2,SCREENHEIGHT/2 - (img.get_height()/2)]
pos = list(pos)
if cnt >= 1:
pos[1] -= ((img.get_height())+20/2)*(cnt-1)
self.master.blit(img,pos)
print(cnt,pos)
cnt -= 1
def flipy(r,c):
# c = (8 - c)
r = 8-r-1
return chess.square(c,r)
def drawBoard(selected: Iterable[int] = None , mrange: Iterable[Iterable[int]] = None):
colors = [pygame.Color("white"),pygame.Color("grey")]
for r in range(8):
for c in range(8):
pygame.draw.rect(screen,colors[(r+c)%2],pygame.Rect(c*60,r*60,60,60))
if selected:
r,c = selected
pygame.draw.rect(screen, pygame.Color(110, 155, 255), pygame.Rect(c*60,r*60,60,60))
if mrange:
for r,c in mrange:
pygame.draw.rect(screen, pygame.Color(255, 214, 110), pygame.Rect(c*60,r*60,60,60))
def drawPieces(board: chess.Board):
mboard = board.transform(chess.flip_vertical)
# print(mboard.piece_map().items())
for sq, piece in list(mboard.piece_map().items()):
ssym = chess.square_name(sq)
r,c = [chess.FILE_NAMES.index(ssym[0]),int(ssym[1])-1]
psym = piece.symbol()
psym = ("w" if psym.isupper() else "b") + psym
# print(psym,r,c)
screen.blit(images[IMAGESNAME.index(psym)],(r*60,c*60))
# print(type(sq), type(piece))
# print(chess.square_name(flipy(1,2)))
done = False
click = True
selected = []
availmoves = []
mrange = []
txt = PygameText(screen)
cvr = Cover(screen,(0,0,0,200))
while not done:
# print(chessboard)
drawBoard(selected,mrange)
drawPieces(chessboard)
if chessboard.is_game_over():
cvr.draw()
res = chessboard.outcome().result()
reas = chessboard.outcome().termination.name
if res == "1-0":
txt.draw(f"White wins-{reas}")
elif res == "0-1":
txt.draw(f"Black wins-{reas}")
else:
txt.draw(f"Draw-{reas}")
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.MOUSEBUTTONDOWN and not chessboard.is_game_over():
aselected = (pygame.mouse.get_pos()[1] // 60,pygame.mouse.get_pos()[0] // 60,)
if click:
availmoves = []
mrange = []
squ = flipy(*aselected)
print(chess.square_name(squ))
piec = chessboard.piece_at(squ)
print(piec)
selected = aselected[:]
for move in chessboard.generate_legal_moves():
# print(move.from_square, squ)
if move.from_square == squ:
ssym = chess.square_name(move.to_square)
r,c = [chess.FILE_NAMES.index(ssym[0]),8-int(ssym[1])]
mrange.append((c,r))
availmoves.append(move)
click = not click
else:
print(aselected,mrange)
if tuple(aselected) in mrange:
chessboard.push(availmoves[mrange.index(aselected)])
print(chessboard.is_game_over())
click = not click
selected = []
availmoves = []
mrange = []
pygame.display.flip() | StarcoderdataPython |
1726923 | <filename>interfaces/result.py
import torch
from typing import Dict, Any
from dataclasses import dataclass
class Result:
outputs: torch.Tensor
loss: torch.Tensor
def plot(self) -> Dict[str, Any]:
return {}
@dataclass
class RecurrentResult(Result):
outputs: torch.Tensor
loss: torch.Tensor
@property
def batch_size(self) -> int:
return self.outputs.shape[1]
@dataclass
class FeedforwardResult(Result):
outputs: torch.Tensor
loss: torch.Tensor
@property
def batch_size(self) -> int:
return self.outputs.shape[0]
| StarcoderdataPython |
1691395 | <filename>sevent/coroutines/loop.py<gh_stars>10-100
# -*- coding: utf-8 -*-
# 2020/5/8
# create by: snower
import types
import greenlet
from ..utils import get_logger
def warp_coroutine(BaseIOLoop):
class IOLoop(BaseIOLoop):
def call_async(self, callback, *args, **kwargs):
if isinstance(callback, types.CoroutineType):
def run_coroutine_fuc(*args, **kwargs):
def run_coroutine():
try:
callback.send(None)
while True:
callback.send(None)
except StopIteration:
return
except Exception as e:
get_logger().exception("loop callback error:%s", e)
child_gr = greenlet.greenlet(run_coroutine)
return child_gr.switch()
return self._handlers.append((run_coroutine_fuc, args, kwargs))
if callback.__code__.co_flags & 0x80 == 0:
return self._handlers.append((callback, args, kwargs))
def run_async_fuc(*args, **kwargs):
def run_async():
try:
g = callback(*args, **kwargs)
g.send(None)
while True:
g.send(None)
except StopIteration:
return
except Exception as e:
get_logger().exception("loop callback error:%s", e)
child_gr = greenlet.greenlet(run_async)
return child_gr.switch()
return self._handlers.append((run_async_fuc, args, kwargs))
go = call_async
async def sleep(self, seconds):
child_gr = greenlet.getcurrent()
main = child_gr.parent
assert main is not None, "must be running in async func"
self.add_timeout(seconds, child_gr.switch)
return main.switch()
def run(self, callback, *args, **kwargs):
if isinstance(callback, types.CoroutineType):
async def do_coroutine_run():
try:
await callback
finally:
self.stop()
self.call_async(do_coroutine_run)
return self.start()
if callback.__code__.co_flags & 0x80 == 0:
def do_run():
try:
callback(*args, **kwargs)
finally:
self.stop()
self.add_async(do_run)
return self.start()
async def do_async_run():
try:
await callback(*args, **kwargs)
finally:
self.stop()
self.call_async(do_async_run)
return self.start()
return IOLoop
| StarcoderdataPython |
62343 | <reponame>philippschw/flightstats_API
# -*- coding: utf-8 -*-
#pylint:disable=too-many-lines
"""
This database should really be a database...
Also - needs updating.
Also - needs to move to Eva.
"""
from __future__ import unicode_literals, division, print_function
AIRPORTS_ICAO_TO_IATA = {
"YMOR" : "MRZ",
"USRN" : "NFG",
"USRO" : "NOJ",
"USRK" : "KGP",
"USRR" : "SGC",
"CYVV" : "YVV",
"CYVT" : "YVT",
"CYVR" : "YVR",
"MGGT" : "GUA",
"CYVP" : "YVP",
"CYVQ" : "YVQ",
"RKJY" : "RSU",
"CYVZ" : "YVZ",
"CYVG" : "YVG",
"CYVB" : "YVB",
"CYVC" : "YVC",
"KVQQ" : "NZC",
"RKJK" : "KUV",
"CYVO" : "YVO",
"MSSS" : "",
"CYVM" : "YVM",
"PHMU" : "MUE",
"VIJO" : "JDH",
"VIJN" : "",
"VIJU" : "IXJ",
"VIJP" : "JAI",
"VIJR" : "JSA",
"KADM" : "ADM",
"FXMU" : "",
"MBSY" : "SLX",
"FXMM" : "MSU",
"MBSC" : "XSC",
"YMOG" : "MMG",
"KADS" : "ADS",
"KADW" : "ADW",
"VENP" : "",
"KP08" : "P08",
"YTDR" : "TDR",
"OSAP" : "ALP",
"BIRK" : "RKV",
"BIRL" : "MVA",
"DNSO" : "SKO",
"PHMK" : "MKK",
"VNTR" : "TMI",
"VLOS" : "ODY",
"VNTJ" : "TPJ",
"LPSJ" : "SJZ",
"RKJU" : "",
"MYBG" : "",
"MYBC" : "CCZ",
"KOZA" : "OZA",
"LPST" : "",
"MYBS" : "BIM",
"LEOC" : "",
"SMDA" : "DRJ",
"STPR" : "",
"NTKN" : "NIU",
"SMDU" : "",
"ZUXC" : "XIC",
"KTOP" : "TOP",
"RKJB" : "MWX",
"KTOA" : "TOA",
"KTOC" : "TOC",
"KTOL" : "TOL",
"TDCF" : "DCF",
"ULOO" : "PKV",
"ULOL" : "VLU",
"EGTO" : "RCS",
"RKJJ" : "KWJ",
"NTAT" : "TUB",
"NTAV" : "RVV",
"NTAR" : "RUR",
"YBUD" : "BDB",
"NTAA" : "PPT",
"RKJM" : "MPK",
"NTAM" : "RMT",
"YBUU" : "",
"KGON" : "GON",
"LKUL" : "",
"NZRC" : "SZS",
"NZRO" : "ROT",
"FCBB" : "BZV",
"NZRU" : "",
"CNT3" : "YOG",
"VYNP" : "",
"KNYG" : "NYG",
"HEAL" : "DBB",
"YPXM" : "XCH",
"HEAR" : "AAC",
"BGEM" : "JEG",
"HEAX" : "ALY",
"VABB" : "BOM",
"VABM" : "IXG",
"VABO" : "BDQ",
"VABI" : "PAB",
"VABJ" : "BHJ",
"MRLM" : "LIO",
"LHPP" : "PEV",
"KF70" : "RBK",
"VABV" : "BHU",
"VABP" : "BHO",
"MRLC" : "",
"MRLB" : "LIR",
"HAMT" : "MTF",
"KPSM" : "PSM",
"EGQS" : "LMO",
"ENKR" : "KKN",
"ENDI" : "",
"EGQL" : "ADX",
"KPSP" : "PSP",
"LGHI" : "JKH",
"ENKJ" : "",
"HAMA" : "MKS",
"KPSX" : "PSX",
"HAMK" : "MQX",
"ENKB" : "KSU",
"VLSN" : "",
"VLSK" : "ZVK",
"TRTC" : "",
"VLSV" : "VNA",
"K6D9" : "ECA",
"RCBS" : "KNH",
"KCKB" : "CKB",
"KHUL" : "HUL",
"OEWJ" : "EJH",
"KCKF" : "CKF",
"KHUF" : "HUF",
"OEWD" : "EWD",
"KHUA" : "HUA",
"KCKV" : "CKV",
"KHUT" : "HUT",
"DGSI" : "KMS",
"DGSN" : "NYI",
"VMMC" : "MFM",
"ORBI" : "BGW",
"ORBM" : "OSB",
"ORBD" : "",
"KSSI" : "SSI",
"KSSC" : "SSC",
"KJEF" : "JEF",
"KJES" : "JES",
"GGCF" : "",
"CKL3" : "WNN",
"FXMN" : "",
"EDPW" : "",
"SLCR" : "",
"SLCB" : "CBB",
"SLCC" : "",
"LOIH" : "HOJ",
"EDPA" : "",
"NGUK" : "AAK",
"SLCH" : "",
"SASQ" : "",
"PAYA" : "YAK",
"SLCO" : "CIJ",
"HSYE" : "",
"YMPC" : "",
"KMMV" : "MMV",
"KMMU" : "MMU",
"NZOU" : "OAM",
"NZOH" : "",
"KMMI" : "MMI",
"KMMH" : "MMH",
"CNK4" : "YPD",
"MMCS" : "CJS",
"EVVA" : "VTS",
"SBLP" : "LAZ",
"KCEW" : "CEW",
"DNKA" : "KAD",
"MBNC" : "NCA",
"SOCA" : "CAY",
"SPAB" : "",
"DNKN" : "KAN",
"SPAY" : "",
"SPAS" : "",
"KPNS" : "PNS",
"KEGE" : "EGE",
"TRTO" : "",
"VEUK" : "",
"KPNC" : "PNC",
"YLRE" : "LRE",
"YLRD" : "LHG",
"KPNE" : "PNE",
"KPNM" : "PNM",
"SKPP" : "PPN",
"OAKN" : "KDH",
"OAKB" : "KBL",
"SKGZ" : "GLJ",
"SKGY" : "",
"KLKP" : "LKP",
"SKGO" : "CRC",
"CZBA" : "",
"OAKS" : "KHT",
"BIIS" : "IFJ",
"SKGI" : "",
"MMCE" : "CME",
"MMCD" : "",
"CZJN" : "ZJN",
"SBDN" : "PPB",
"YNTN" : "NTN",
"VNT1" : "",
"EFVA" : "VAA",
"FACO" : "",
"KEGT" : "EGT",
"TJFA" : "FAJ",
"N55" : "UIT",
"SKPD" : "PDA",
"MMCN" : "CEN",
"EFVR" : "VRK",
"SKPE" : "PEI",
"FACV" : "",
"FACT" : "CPT",
"HKKG" : "",
"RJNK" : "KMQ",
"RJNH" : "",
"RJNO" : "OKI",
"KDTS" : "DTS",
"SEII" : "",
"RJNA" : "NKM",
"RJNF" : "",
"RJNG" : "",
"RJNY" : "",
"KDTA" : "DTA",
"HKKL" : "ILU",
"RJNS" : "FSZ",
"RJNW" : "NTQ",
"RJNT" : "TOY",
"VDKH" : "",
"VDKT" : "KTI",
"PLCH" : "CXI",
"NFKD" : "KDV",
"KGPT" : "GPT",
"KGPZ" : "GPZ",
"UMBB" : "BQT",
"SWKO" : "CIZ",
"WMPR" : "RDN",
"YMML" : "MEL",
"YMMB" : "MBW",
"WMPA" : "PKG",
"CYXX" : "YXX",
"CYXY" : "YXY",
"CYXZ" : "YXZ",
"OPOK" : "",
"CYXT" : "YXT",
"CYXU" : "YXU",
"CYXP" : "YXP",
"CYXR" : "YXR",
"CYXS" : "YXS",
"CYXL" : "YXL",
"CYXN" : "YXN",
"CYXH" : "YXH",
"CYXJ" : "YXJ",
"CYXK" : "YXK",
"CYXD" : "YXD",
"CYXE" : "YXE",
"OPOR" : "ORW",
"CYXC" : "YXC",
"MZBZ" : "BZE",
"LFAG" : "",
"VILD" : "LUH",
"VILK" : "LKO",
"VILH" : "IXL",
"ZPLJ" : "LJG",
"LFAI" : "",
"ZPLX" : "LUM",
"PFYU" : "FYU",
"KAFW" : "AFW",
"PSC" : "PSC",
"UHMW" : "",
"UASS" : "PLX",
"GABS" : "BKO",
"UASP" : "PWQ",
"UHMM" : "GDX",
"UHMO" : "KVM",
"UASK" : "UKK",
"VOMD" : "IXM",
"KFRG" : "",
"UHMD" : "PVS",
"GABG" : "",
"UHMA" : "DYR",
"VYTO" : "",
"VYTL" : "THL",
"VYTD" : "SNW",
"VEHK" : "",
"MMLP" : "LAP",
"ZLIC" : "INC",
"KCPR" : "CPR",
"VNVT" : "BIR",
"GREN" : "",
"KOXR" : "OXR",
"LPMT" : "",
"LPMR" : "",
"KOXC" : "OXC",
"EYSB" : "HLJ",
"LPMA" : "FNC",
"OYHD" : "HOD",
"LEMO" : "OZP",
"LEMH" : "MAH",
"RODN" : "DNA",
"LEMG" : "AGP",
"LEMD" : "MAD",
"RODE" : "",
"YRTI" : "",
"WA44" : "",
"ZUZH" : "PZI",
"KTMB" : "TMB",
"KTMA" : "TMA",
"LB14" : "HKV",
"ULAL" : "LDG",
"ULAM" : "NNM",
"ULAH" : "",
"ULAE" : "",
"ULAA" : "ARH",
"ZWTN" : "HTN",
"ZWTC" : "TCG",
"ULAS" : "CSH",
"YBKT" : "BUC",
"NTGQ" : "PUK",
"NTGP" : "PKP",
"NTGV" : "MVT",
"NTGU" : "AXR",
"NTGT" : "TKP",
"NTGY" : "ZTA",
"NTGC" : "TIH",
"NTGB" : "",
"NTGA" : "AAA",
"NTGF" : "FAV",
"NTGE" : "REA",
"NTGD" : "APK",
"NTGK" : "KKR",
"NTGJ" : "GMR",
"NTGI" : "XMH",
"USII" : "IJK",
"NTGM" : "MKP",
"NZTK" : "KTF",
"NZTL" : "",
"SWTP" : "IRZ",
"NZTG" : "TRG",
"EHSE" : "",
"EHSB" : "UTC",
"NZTU" : "TIU",
"KSOW" : "SOW",
"VVKP" : "",
"CNV8" : "",
"HHAS" : "ASM",
"KISM" : "ISM",
"KISO" : "ISO",
"KISN" : "ISN",
"SVHG" : "",
"KISW" : "ISW",
"KISP" : "ISP",
"HEOW" : "GSQ",
"EPRU" : "CZW",
"VADN" : "NMB",
"VADS" : "",
"KWSD" : "WSD",
"MHTJ" : "",
"MHTG" : "TGU",
"MHTE" : "TEA",
"KPQI" : "PQI",
"AYHG" : "HEO",
"LSTS" : "",
"YSHR" : "JHQ",
"AYNZ" : "LAE",
"EGSF" : "",
"WSSS" : "SIN",
"TTCP" : "TAB",
"EGSC" : "CBG",
"EGSM" : "",
"CAP5" : "",
"EGSH" : "NWI",
"EGSU" : "QFO",
"SYIB" : "IMB",
"EGSS" : "STN",
"WSSL" : "XSP",
"EGSY" : "",
"EGSX" : "",
"KCIC" : "CIC",
"KCID" : "CID",
"EIMN" : "IIA",
"KCIU" : "CIU",
"MMLC" : "LZC",
"YWDH" : "WNR",
"FZFK" : "GMA",
"KFZI" : "FZI",
"KSQL" : "SQL",
"NCMG" : "MGS",
"OIMC" : "",
"OIMB" : "XBJ",
"OIMN" : "BJB",
"OIMM" : "MHD",
"OIMJ" : "",
"NCMH" : "MHX",
"FZVA" : "LJA",
"KDOV" : "DOV",
"OIMT" : "",
"OIMS" : "AFZ",
"FZVS" : "PFR",
"EDRY" : "ZQC",
"EDRZ" : "",
"EDRT" : "ZQF",
"EDRV" : "",
"EDRN" : "",
"EDRI" : "",
"EDRK" : "ZNV",
"OOSA" : "SLL",
"EDRF" : "",
"EDRA" : "",
"EDRB" : "",
"XLLV" : "",
"KL52" : "L52",
"RKSI" : "ICN",
"RKSM" : "SSN",
"KMOB" : "MOB",
"RKSG" : "",
"NZNV" : "IVC",
"KANP" : "ANP",
"RKSS" : "GMP",
"RKSW" : "",
"FMCZ" : "DZA",
"FMCV" : "AJN",
"GOGS" : "CSK",
"YSHK" : "MJK",
"FMCN" : "YVA",
"FMCH" : "HAH",
"FMCI" : "NWA",
"GOGG" : "ZIG",
"DNIM" : "QOW",
"DNIL" : "ILR",
"DNIB" : "IBA",
"ZSCN" : "KHN",
"ZSCG" : "CZX",
"KPLN" : "PLN",
"BIKR" : "SAK",
"LRCV" : "CRA",
"OAUZ" : "UND",
"BIKF" : "KEF",
"SKEJ" : "EJA",
"LRCS" : "CSB",
"HTZA" : "ZNZ",
"YNRV" : "RVT",
"EHVB" : "LID",
"WBSB" : "BWN",
"SERO" : "",
"LXGB" : "GIB",
"MMIA" : "CLQ",
"SBFI" : "IGU",
"MMIM" : "ISJ",
"SBFL" : "FLN",
"MMIO" : "SLW",
"SBFN" : "FEN",
"TAPA" : "ANU",
"FAEA" : "ELL",
"SBFU" : "",
"MMIT" : "",
"HLZA" : "",
"EFTP" : "TMP",
"EFTS" : "",
"SBFZ" : "FOR",
"EFTU" : "TKU",
"MKJS" : "MBJ",
"MKJP" : "KIN",
"FAEO" : "",
"YROM" : "RMA",
"FWLK" : "LIX",
"SNBR" : "BRA",
"KDRT" : "DRT",
"FNGI" : "VPE",
"KDRI" : "DRI",
"KDRO" : "DRO",
"KDRM" : "DRM",
"KA39" : "A39",
"FVBM" : "",
"YUCX" : "",
"ZPDQ" : "DIG",
"KGVL" : "GVL",
"VEGT" : "GAU",
"UTTT" : "TAS",
"KGVQ" : "GVQ",
"GFBO" : "KBS",
"GFBN" : "BTE",
"KGVT" : "GVT",
"OMAL" : "AAN",
"EKTD" : "",
"EKTS" : "TED",
"OPMJ" : "MJD",
"CYZX" : "YZX",
"OPMI" : "",
"CYZR" : "YZR",
"CYZP" : "YZP",
"OPMA" : "",
"CYZV" : "YZV",
"CYZW" : "YZW",
"CYZT" : "YZT",
"CYZU" : "YZU",
"CYZH" : "YZH",
"OPMR" : "",
"CYZF" : "YZF",
"CYZG" : "YZG",
"CYZD" : "YZD",
"CYZE" : "YZE",
"LTBE" : "BTZ",
"SUSO" : "STY",
"LDLO" : "LSZ",
"SCTN" : "WCH",
"ZPBS" : "BSD",
"KLHM" : "LHM",
"KIDL" : "IDL",
"KLHX" : "LHX",
"KAHN" : "AHN",
"UHOO" : "OHO",
"KLHW" : "",
"VOSM" : "",
"KM94" : "M94",
"VEJS" : "IXW",
"VEJT" : "JRH",
"VEJH" : "",
"FVWN" : "WKM",
"GSUM" : "",
"K5B2" : "5B2",
"VTCN" : "NNT",
"KYUM" : "YUM",
"SADF" : "",
"HSOB" : "EBD",
"SADJ" : "",
"SADL" : "LPG",
"SADM" : "",
"HMCY" : "",
"SADP" : "",
"EGJJ" : "JER",
"VNPK" : "PKR",
"VNPL" : "PPL",
"VTCH" : "HGN",
"ENRK" : "",
"LPOV" : "",
"YCUN" : "",
"ATWN" : "",
"LEST" : "SCQ",
"LESU" : "LEU",
"LESA" : "SLM",
"LESB" : "",
"LESL" : "",
"LESO" : "EAS",
"LESJ" : "",
"GUKU" : "KSI",
"KTKI" : "TKI",
"KGST" : "XEG",
"KGSP" : "GSP",
"GCTS" : "TFS",
"YBIE" : "BEU",
"LFSX" : "",
"LFSR" : "RHE",
"LFSQ" : "BOR",
"LFSP" : "",
"LFST" : "SXB",
"LFSI" : "",
"LFSH" : "",
"LFSO" : "",
"LFSN" : "ENC",
"LFSM" : "",
"LFSL" : "BVE",
"LFSC" : "",
"LFSB" : "MLH",
"LFSG" : "EPL",
"LFSF" : "MZM",
"LFSD" : "DIJ",
"HEMK" : "HMB",
"HEMM" : "MUH",
"HEMA" : "RMF",
"UKCW" : "VSG",
"SVJC" : "LSP",
"UKCM" : "MPW",
"UKCC" : "DOK",
"SVJM" : "",
"SPZA" : "NZA",
"SPZO" : "CUZ",
"HEAT" : "ATZ",
"YELD" : "ELC",
"HAAX" : "AXU",
"KPWK" : "PWK",
"KPWM" : "PWM",
"HAAL" : "",
"HAAM" : "AMH",
"HAAB" : "ADD",
"KPWT" : "PWT",
"EGUB" : "",
"ENOL" : "OLA",
"EGUN" : "MHZ",
"EGUL" : "",
"EGUW" : "",
"ENOV" : "HOV",
"EGUY" : "",
"KNUC" : "",
"KHIB" : "HIB",
"KHIF" : "HIF",
"OEKK" : "HBT",
"KNUW" : "NUW",
"KNUQ" : "NUQ",
"KCWI" : "CWI",
"PSKA" : "KIA",
"FSSB" : "BDI",
"FSSC" : "",
"ZJSY" : "SYX",
"CYQB" : "YQB",
"KNLC" : "NLC",
"NWWD" : "KNQ",
"NWWE" : "ILP",
"KDMA" : "DMA",
"NWWA" : "TGJ",
"NWWC" : "BMY",
"NWWL" : "LIF",
"NWWM" : "GEA",
"MDPC" : "PUJ",
"XUDS" : "",
"NWWK" : "KOC",
"NWWU" : "TOU",
"NWWV" : "UVE",
"NWWW" : "NOU",
"EBFS" : "",
"NWWR" : "MEE",
"GGOV" : "OXB",
"MDPP" : "POP",
"EDTY" : "",
"EDTK" : "",
"EDTN" : "",
"EDTM" : "",
"EDTL" : "LHA",
"EDTG" : "",
"EDTF" : "",
"EDTD" : "ZQL",
"GFYE" : "WYE",
"UEVD" : "",
"LHOY" : "",
"MGQC" : "AQB",
"PTKK" : "TKK",
"ZSAM" : "XMN",
"YMTG" : "MGB",
"ZSAQ" : "AQG",
"YMTI" : "ONG",
"KMIC" : "MIC",
"KMIB" : "MIB",
"KMIA" : "MIA",
"RCLG" : "TXG",
"NIUE" : "IUE",
"EBCV" : "",
"LGPL" : "JTY",
"SPEP" : "",
"SPEQ" : "",
"SPEO" : "CHM",
"LGPZ" : "PVK",
"SOGS" : "",
"FYTM" : "TSB",
"SKCZ" : "CZU",
"SKCU" : "CAQ",
"BIES" : "BIE",
"SKCO" : "TCO",
"SKCL" : "CLO",
"SKCC" : "CUC",
"SKCA" : "CPB",
"BIEG" : "EGS",
"KEVV" : "EVV",
"KEVW" : "EVW",
"SKCD" : "COG",
"FZMA" : "BKY",
"MMGM" : "GYM",
"MMGL" : "GDL",
"MKTP" : "KTP",
"FAGY" : "",
"FAGT" : "",
"FAGR" : "",
"MPEJ" : "ONX",
"FAGM" : "QRA",
"HLTD" : "LTD",
"FAGG" : "GRJ",
"SBHT" : "ATM",
"MMGR" : "GUB",
"NGTS" : "TSU",
"NGTR" : "AIS",
"LIRZ" : "PEG",
"LIRS" : "GRS",
"LIRQ" : "FLR",
"LIRP" : "PSA",
"LIRV" : "",
"LIRU" : "",
"SGPJ" : "PJC",
"LIRJ" : "EBA",
"LIRI" : "QSR",
"KAEX" : "AEX",
"LIRN" : "NAP",
"LIRM" : "",
"LIRL" : "QLT",
"KDPA" : "DPA",
"LIRA" : "CIA",
"LIRG" : "",
"LIRF" : "FCO",
"LIRE" : "",
"CFJ2" : "",
"KA50" : "A50",
"DIMN" : "MJC",
"FQPB" : "POL",
"KGTU" : "GTU",
"KGTR" : "GTR",
"KGTF" : "GTF",
"KGTB" : "GTB",
"NGTB" : "AEA",
"EKVJ" : "STA",
"KCKC" : "GRM",
"EKVH" : "",
"LKLN" : "",
"EKVG" : "FAE",
"EKVD" : "",
"OPKC" : "KHI",
"OPKD" : "HDD",
"OPKH" : "KDD",
"VIPT" : "PGH",
"VIPL" : "",
"VIPK" : "IXP",
"YSWG" : "WGA",
"KLVS" : "LVS",
"KAJR" : "",
"KLVM" : "LVM",
"KUNI" : "",
"KLVK" : "LVK",
"NSFA" : "APW",
"NSFI" : "FGI",
"NSFQ" : "FTI",
"VYXG" : "",
"YYND" : "YUE",
"LEUT" : "LES",
"VLHS" : "OUI",
"RCYU" : "HUN",
"SVC" : "SVC",
"FNLK" : "LBZ",
"VNRC" : "RHP",
"VAKP" : "KLH",
"VNRT" : "RUM",
"KDUC" : "DUC",
"LJSG" : "",
"SMBN" : "ABN",
"PWAK" : "AWK",
"MUKW" : "",
"CLTN" : "",
"SLZI" : "",
"SSCK" : "CCI",
"YBOU" : "BQL",
"YBOI" : "GIC",
"YBOK" : "OKY",
"ZBXH" : "XIL",
"LKSZ" : "LKS",
"MRFI" : "",
"LFQA" : "",
"LFQC" : "",
"LFQB" : "QYR",
"LFQE" : "",
"KK83" : "K83",
"LFQG" : "NVS",
"LFQF" : "",
"LFQI" : "",
"LFQJ" : "",
"LFQM" : "",
"LFQQ" : "LIL",
"LFQP" : "",
"LFQT" : "",
"LFQW" : "",
"LFQV" : "",
"UAAA" : "ALA",
"KIWA" : "AZA",
"KTIK" : "TIK",
"KTIW" : "TIW",
"KIWS" : "IWS",
"EHKD" : "DHR",
"ZGHA" : "CSX",
"KPUW" : "PUW",
"KPUB" : "PUB",
"KPUC" : "PUC",
"MHPL" : "PEU",
"ENML" : "MOL",
"EGWC" : "",
"HAGM" : "GMB",
"HAGN" : "GDQ",
"HAGO" : "GDE",
"HAGR" : "GOR",
"HTAR" : "ARK",
"EGWU" : "NHT",
"ENMS" : "MJF",
"KHOP" : "HOP",
"KHOT" : "HOT",
"KHOU" : "HOU",
"KHON" : "HON",
"KHOB" : "HOB",
"FAZR" : "",
"KJKA" : "JKA",
"OIIK" : "",
"OIII" : "THR",
"NCAI" : "AIT",
"OIIM" : "",
"OIIC" : "",
"OIIA" : "",
"SKLM" : "",
"OIIG" : "",
"OIIE" : "IKA",
"OIID" : "",
"KDKX" : "DKX",
"FZJH" : "IRP",
"NCAT" : "AIU",
"EPBY" : "BZG",
"VDPP" : "PNH",
"EPBA" : "",
"EPBC" : "",
"PACV" : "CDV",
"VTST" : "TST",
"EDVE" : "BWE",
"VTSR" : "",
"PACR" : "IRC",
"KBQK" : "BQK",
"EDVI" : "",
"EDVH" : "",
"EDVK" : "KSF",
"EDVM" : "",
"PACY" : "CYT",
"PACZ" : "CZF",
"PACD" : "CDB",
"VTSF" : "NST",
"VTSE" : "CJM",
"EDVR" : "",
"VTSC" : "NAW",
"VTSB" : "URT",
"VTSA" : "",
"PACL" : "Z84",
"VTSN" : "",
"VTSM" : "USM",
"VTSK" : "PAN",
"PACI" : "CIK",
"VTSH" : "",
"ZSOF" : "HFE",
"KMKL" : "MKL",
"KMKO" : "MKO",
"KMKC" : "MKC",
"KMKE" : "MKE",
"KMKG" : "MKG",
"KMKY" : "MRK",
"USSK" : "",
"LTCT" : "IGD",
"BUCH" : "BUH",
"LGRP" : "RHO",
"BUCA" : "",
"LGRX" : "GPA",
"DXXX" : "LFW",
"LGRD" : "",
"LOIJ" : "",
"ZYYJ" : "YNJ",
"BIGR" : "GRY",
"BIGJ" : "GJR",
"KEPC" : "",
"KEPM" : "EPM",
"GQNA" : "IEO",
"GQNF" : "KFA",
"GQND" : "TIY",
"GQNK" : "KED",
"GQNI" : "EMN",
"GQNN" : "NKC",
"TPEI" : "",
"CCK4" : "YFX",
"GQNS" : "SEY",
"FPPR" : "PCP",
"EDAN" : "",
"SBJU" : "JDO",
"SBJV" : "JOI",
"SBJP" : "JPA",
"MMES" : "ESE",
"EFHV" : "HYV",
"EFHM" : "",
"SBJF" : "JDF",
"SBJC" : "",
"EFHE" : "HEN",
"EFHF" : "HEM",
"EFHA" : "KEV",
"DBBB" : "COO",
"KSNY" : "SNY",
"SSJA" : "JCB",
"PHNY" : "LNY",
"YGEL" : "GET",
"SKAC" : "ACR",
"SKAD" : "ACD",
"DIKO" : "HGO",
"FGSL" : "SSG",
"ZWCM" : "IQM",
"SKAS" : "PUU",
"SKAR" : "AXM",
"PHNL" : "HNL",
"EDAK" : "",
"KRVS" : "RVS",
"PLIN" : "",
"UNKY" : "KYZ",
"KALB" : "ALB",
"UHKK" : "KXK",
"KFXE" : "FXE",
"KKLS" : "KLS",
"KALI" : "ALI",
"UHKD" : "",
"LKNA" : "",
"KALM" : "ALM",
"KALO" : "ALO",
"EKPB" : "",
"KALS" : "ALS",
"KALW" : "ALW",
"SWMK" : "",
"UAUR" : "AYK",
"UAUU" : "KSN",
"KVYS" : "VYS",
"DRZD" : "",
"LIPC" : "",
"LIPB" : "BZO",
"LIPE" : "BLQ",
"DRZA" : "AJY",
"LIPI" : "",
"LIPH" : "TSF",
"LIPK" : "FRL",
"LIPL" : "",
"LIPO" : "VBS",
"LIPN" : "",
"LIPQ" : "TRS",
"FNCP" : "KNP",
"LIPS" : "",
"LIPR" : "RMI",
"LIPU" : "QPA",
"LIPT" : "VIC",
"DRZR" : "ZND",
"LIPV" : "",
"LIPY" : "AOI",
"LIPX" : "VRN",
"LIPZ" : "VCE",
"GVSV" : "VXE",
"VIRB" : "",
"ESGT" : "THN",
"ESGP" : "GSE",
"KLTS" : "LTS",
"ESGR" : "KVB",
"ESGL" : "LDK",
"ESGK" : "",
"ESGJ" : "JKG",
"ESGG" : "GOT",
"KORL" : "ORL",
"KORH" : "ORH",
"KORD" : "ORD",
"KORF" : "ORF",
"FVSV" : "",
"FVSH" : "",
"LRIA" : "IAS",
"LCEN" : "ECN",
"LGKM" : "",
"KORS" : "ESD",
"BGSF" : "SFJ",
"ZLSN" : "SIA",
"YLEC" : "",
"RCWA" : "WOT",
"LLMZ" : "",
"YBMC" : "MCY",
"YBMA" : "ISA",
"YBMK" : "MKY",
"LLMG" : "",
"SBAX" : "AAX",
"KPYP" : "PYP",
"SNKE" : "CMP",
"GCXO" : "TFN",
"LDSP" : "SPU",
"KCBM" : "CBM",
"KCBE" : "CBE",
"MRDK" : "DRK",
"EHYB" : "",
"MM57" : "SCX",
"KFPR" : "FRP",
"MM52" : "",
"LGIO" : "IOA",
"LGIK" : "JIK",
"ZGNN" : "NNG",
"LGIR" : "HER",
"DAOV" : "MUW",
"BGMQ" : "JSU",
"ZYDD" : "DDG",
"YDBY" : "DRB",
"YWBR" : "",
"YWBS" : "SYU",
"MHRO" : "RTB",
"UEEA" : "ADH",
"EGKR" : "KRH",
"AYHK" : "HKN",
"ENST" : "SSJ",
"DAAT" : "TMR",
"MNBL" : "BEF",
"DAAS" : "QSF",
"DAAP" : "VVZ",
"DAAQ" : "",
"DAAZ" : "",
"DAAY" : "",
"ENSD" : "SDN",
"DAAG" : "ALG",
"DAAD" : "",
"DAAE" : "BJA",
"DAAB" : "",
"MNBZ" : "BZA",
"ENSB" : "LYR",
"KEMP" : "KEX",
"DAAN" : "",
"ENSM" : "",
"ENSN" : "SKE",
"ENSO" : "SRP",
"ENSH" : "SVJ",
"DAAK" : "QFD",
"ENSK" : "SKN",
"VTSS" : "HDY",
"KNQX" : "NQX",
"KNQA" : "NQA",
"CBW4" : "YBO",
"KNQI" : "NQI",
"KCSG" : "CSG",
"KHMN" : "HMN",
"SBQV" : "VDC",
"DAOB" : "TID",
"EGKB" : "BQH",
"KHMZ" : "",
"TVSC" : "CIW",
"WAWT" : "",
"WAWW" : "KDI",
"WAWS" : "",
"TVSM" : "MQS",
"TVSV" : "SVD",
"TVSU" : "UNI",
"WAWM" : "",
"WAWJ" : "MJU",
"OIKQ" : "",
"OIKP" : "",
"OIKR" : "RJN",
"EBBL" : "",
"OIKY" : "",
"EBBE" : "",
"EBBX" : "",
"EBBR" : "BRU",
"OIKK" : "KER",
"OIKJ" : "",
"OIKM" : "BXR",
"KDIK" : "DIK",
"FQIA" : "",
"FQIN" : "INH",
"UERP" : "PYJ",
"EDXN" : "",
"UERR" : "MJZ",
"PAAQ" : "PAQ",
"UERT" : "",
"EDXI" : "",
"EDXH" : "HGL",
"EDXF" : "FLF",
"EDXB" : "HEI",
"SEQE" : "",
"UERA" : "",
"LYVR" : "",
"LRBS" : "BBU",
"EDXW" : "GWT",
"PAAK" : "AKB",
"UERL" : "ULK",
"EDXR" : "",
"SEQU" : "UIO",
"EDXP" : "",
"EKMB" : "",
"KS40" : "S40",
"EKMS" : "",
"OPTH" : "BDN",
"EEEI" : "",
"OPTU" : "TUK",
"KMUI" : "MUI",
"YSNW" : "NOA",
"HUKB" : "",
"HUKD" : "",
"YSNF" : "NLK",
"SUDU" : "",
"HUKT" : "",
"YORG" : "OAG",
"ZHXF" : "XFN",
"LGTS" : "SKG",
"LGTP" : "",
"LGTT" : "",
"DNSU" : "QRW",
"LGTG" : "",
"MUFL" : "",
"LGTL" : "",
"GOTK" : "KGG",
"FVHA" : "HRE",
"TJNR" : "",
"YLZI" : "LZR",
"KERY" : "ERY",
"KERV" : "ERV",
"LHPR" : "QGY",
"KYNG" : "YNG",
"KERI" : "ERI",
"LHPA" : "",
"VGSY" : "ZYL",
"VGSD" : "SPD",
"SBLS" : "",
"MMCP" : "CPE",
"MMCV" : "CVM",
"MMCU" : "CUU",
"MMCZ" : "CZM",
"MMCY" : "CYW",
"SKGP" : "GPI",
"EFNU" : "",
"MMCC" : "",
"CZBD" : "ILF",
"CZBF" : "ZBF",
"MMCG" : "",
"SBLE" : "LEC",
"CZBB" : "YDT",
"CZBM" : "ZBM",
"SBLJ" : "",
"MMCH" : "",
"SBLO" : "LDB",
"SBLN" : "LIP",
"MMCM" : "CTM",
"MMCL" : "CUL",
"STCB" : "",
"DCBS" : "",
"HKMY" : "OYL",
"NGMA" : "MNK",
"CYAQ" : "XKS",
"OASA" : "AZ3",
"OASD" : "",
"OASG" : "",
"FQTT" : "TET",
"OASH" : "",
"CLY1" : "",
"SKOC" : "OCV",
"ZWAT" : "AAT",
"ZWAK" : "AKU",
"BIAR" : "AEY",
"SKOT" : "OTU",
"WIDD" : "BTH",
"WIDN" : "TNJ",
"WIDS" : "SIQ",
"KRHI" : "RHI",
"LKHK" : "",
"KAND" : "AND",
"KANE" : "",
"KANB" : "ANB",
"KFZG" : "FZG",
"EKRN" : "RNN",
"UAKD" : "DZN",
"EKRK" : "RKE",
"REGE" : "RGB",
"UAKK" : "KGF",
"LKHV" : "",
"SWCA" : "CAF",
"OKBK" : "KWI",
"FNMO" : "MSZ",
"FNME" : "SPP",
"FNMA" : "MEG",
"HETB" : "TCP",
"ZPDL" : "DLU",
"LDRG" : "",
"LDRI" : "RJK",
"HETR" : "ELT",
"KLRU" : "LRU",
"KLRO" : "LRO",
"VA1P" : "DIU",
"KLRF" : "LRF",
"KLRD" : "LRD",
"GUFH" : "FAA",
"KOPF" : "OPF",
"SJDB" : "BYO",
"LPEV" : "",
"KDGL" : "DGL",
"VLLB" : "LPQ",
"VLLN" : "LXG",
"BKX" : "BKX",
"SAZR" : "RSA",
"SAZS" : "BRC",
"SAZP" : "",
"SAZV" : "VLG",
"SAZW" : "",
"SAZT" : "TDL",
"SAZY" : "CPC",
"DTNZ" : "NBE",
"MGQZ" : "AAZ",
"SAZB" : "BHI",
"SAZC" : "",
"SAZF" : "",
"SAZG" : "",
"SAZH" : "",
"SAZI" : "",
"SAZN" : "NQN",
"SAZO" : "NEC",
"SAZL" : "SST",
"SAZM" : "MDQ",
"RJSM" : "MSJ",
"YCKN" : "CTN",
"KW55" : "LKE",
"LKKV" : "KLV",
"WALG" : "TJS",
"YBCK" : "BKQ",
"WALL" : "BPN",
"YBCG" : "OOL",
"WALJ" : "DTD",
"WALK" : "BEJ",
"WALT" : "",
"WALR" : "TRK",
"WALS" : "SRI",
"YBCV" : "CTL",
"YBCS" : "CNS",
"FBFT" : "FRW",
"YPLM" : "LEA",
"LLOV" : "VDA",
"ZUBD" : "BPX",
"XLLN" : "",
"SLVR" : "VVI",
"SLVM" : "",
"ZLQY" : "IQN",
"XS46" : "S46",
"HSDN" : "DOG",
"HSDZ" : "",
"VHHH" : "HKG",
"MRBC" : "BCL",
"MRBA" : "",
"YHUG" : "HGD",
"VVCS" : "VCS",
"VVCR" : "CXR",
"VVCT" : "VCA",
"VVCI" : "HPH",
"VVCM" : "CAH",
"VOYK" : "YLK",
"VVCA" : "VCL",
"YOOM" : "MOO",
"KTEB" : "TEB",
"KIKV" : "IKV",
"KIKR" : "IKR",
"KTEX" : "TEX",
"KIKK" : "IKK",
"KBUU" : "BUU",
"LGKC" : "KIT",
"LGKA" : "KSO",
"LGKF" : "EFL",
"BGSS" : "JHS",
"LGKJ" : "KZS",
"LGKO" : "KGS",
"LGKL" : "KLX",
"YPBR" : "BME",
"LGKR" : "CFU",
"LGKS" : "KSJ",
"LGKP" : "AOK",
"LGKV" : "KVA",
"BGSC" : "OBY",
"LGKZ" : "KZI",
"LGKY" : "JKL",
"YWLM" : "NTL",
"%u04" : "%u0",
"YWLG" : "WGE",
"KPYM" : "PYM",
"YWLU" : "WUN",
"SESA" : "SNC",
"KEOK" : "EOK",
"HTMW" : "MWZ",
"HTMU" : "MUZ",
"HTMT" : "MYW",
"DAOR" : "CBH",
"DAOS" : "",
"DAOL" : "TAF",
"DAON" : "TLM",
"DAOO" : "ORN",
"DAOI" : "QAS",
"EGKK" : "LGW",
"HKSB" : "UAS",
"DAOE" : "",
"DAOF" : "TIN",
"EGKA" : "ESH",
"HTMA" : "MFA",
"PAGM" : "GAM",
"KBUF" : "BUF",
"KDTW" : "DTW",
"KNSI" : "",
"KNSE" : "NSE",
"OEAO" : "ULH",
"SBSC" : "STU",
"OEAH" : "HOF",
"SBSJ" : "SJK",
"SBSN" : "STM",
"OEAB" : "AHB",
"SBSL" : "SLZ",
"SBSM" : "RIA",
"SBSR" : "SJP",
"SBSP" : "CGH",
"SBSV" : "SSA",
"SBST" : "SSZ",
"LOSM" : "",
"UTAA" : "ASB",
"UTAM" : "MYP",
"UTAK" : "KRW",
"UTAT" : "TAZ",
"UTAV" : "",
"RJSS" : "SDJ",
"RJSR" : "ONJ",
"RJST" : "",
"RJSY" : "SYO",
"RJSA" : "AOJ",
"RJSC" : "GAJ",
"RJSD" : "",
"RJSF" : "FKS",
"RJSI" : "HNA",
"RJSH" : "",
"RJSK" : "AXT",
"KSYR" : "SYR",
"MUOC" : "CCC",
"RJSN" : "KIJ",
"YABA" : "ALH",
"KIEV" : "KIP",
"PAGY" : "SGY",
"KBUR" : "BUR",
"SEST" : "SCY",
"KBUY" : "BUY",
"PAGS" : "GST",
"PAGT" : "NME",
"PAGH" : "SHG",
"PAGK" : "GKN",
"PAGL" : "GLV",
"SESD" : "",
"PAGN" : "AGN",
"FMNT" : "TTS",
"PAGA" : "GAL",
"LYTV" : "TIV",
"SESM" : "PTZ",
"PAGG" : "KWK",
"EKOD" : "ODE",
"FMNP" : "WMP",
"NGFU" : "FUN",
"LTCS" : "GNY",
"LTCR" : "MQM",
"OPRK" : "RYK",
"LTCM" : "SIC",
"OPRN" : "ISB",
"LTCL" : "",
"LTCO" : "AJI",
"KMWM" : "MWM",
"KMWL" : "MWL",
"LTCN" : "KCM",
"KMWH" : "MWH",
"OPRS" : "",
"KMWA" : "MWA",
"KMWC" : "MWC",
"LTCH" : "SFQ",
"FZNA" : "GOM",
"SSUM" : "UMU",
"LTCJ" : "BAL",
"LTCE" : "ERZ",
"FMND" : "ZWA",
"LTCG" : "TZX",
"LTCA" : "EZS",
"LTCC" : "DIY",
"EFLA" : "",
"HLRA" : "",
"EFLP" : "LPP",
"CABO" : "",
"OSPR" : "PMS",
"ANYN" : "INU",
"LADG" : "",
"YIVO" : "",
"MMAS" : "AGU",
"SBNT" : "NAT",
"MPCE" : "CTD",
"SBNM" : "GEL",
"MMAN" : "NTR",
"MMAA" : "ACA",
"OPRT" : "RAZ",
"MPCH" : "CHX",
"SBNF" : "NVT",
"YGAY" : "GAH",
"YGAT" : "",
"SKMU" : "MVP",
"SKMR" : "MTR",
"SKMZ" : "MZL",
"SKMD" : "EOH",
"SKMG" : "MGN",
"LBBG" : "BOJ",
"FQVL" : "VNX",
"HKKR" : "KEY",
"ZBAA" : "PEK",
"EDXO" : "PSH",
"KFDW" : "FDW",
"KLPC" : "LPC",
"UHWW" : "VVO",
"ESCF" : "",
"KUDD" : "UDD",
"ESCK" : "",
"FJDG" : "",
"ESCM" : "",
"ESCL" : "SOO",
"KFDY" : "FDY",
"KAPF" : "APF",
"KAPG" : "APG",
"UAII" : "CIT",
"KAPA" : "APA",
"KAPC" : "APC",
"UHWK" : "",
"KAPN" : "APN",
"LFFE" : "",
"LFFN" : "",
"LFFH" : "",
"LFFI" : "",
"LFFQ" : "",
"GA46" : "",
"LPGR" : "GRW",
"OYBN" : "BHN",
"ORSU" : "ISU",
"OYBA" : "",
"TTPP" : "POS",
"SCNT" : "PNT",
"CYBR" : "YBR",
"CYBQ" : "XTL",
"CYBV" : "YBV",
"CYBW" : "YBW",
"CYBT" : "YBT",
"CYBX" : "YBX",
"CYBB" : "YBB",
"CYBC" : "YBC",
"HOLM" : "",
"CYBF" : "YBY",
"CYBG" : "YBG",
"CYBD" : "QBC",
"CYBE" : "YBE",
"CYBK" : "YBK",
"CYBL" : "YBL",
"YCMU" : "CMA",
"RCSQ" : "",
"RCSS" : "TSA",
"KCDN" : "CDN",
"KMDQ" : "",
"YBAM" : "ABM",
"YBAF" : "",
"YBAR" : "BCI",
"YBAS" : "ASP",
"YBAU" : "BDD",
"ZUDX" : "DAX",
"RORK" : "KTD",
"RORA" : "AGJ",
"RORY" : "RNJ",
"ZUDC" : "DCY",
"RORT" : "",
"RORS" : "SHI",
"PHOG" : "OGG",
"EDAD" : "",
"EDAE" : "",
"HSFS" : "ELF",
"KBHB" : "BHB",
"EDAB" : "BBJ",
"EDAC" : "AOC",
"EDAL" : "",
"EDAM" : "",
"KBHM" : "BHM",
"EDAH" : "HDF",
"EDAI" : "",
"EDAJ" : "",
"SLTJ" : "TJA",
"EDAU" : "",
"EDAV" : "",
"EDAW" : "",
"UEMY" : "",
"EDAQ" : "",
"EDAS" : "",
"UEMU" : "UMS",
"UEMT" : "",
"SABE" : "AEP",
"EDAX" : "",
"EDAY" : "",
"EDAZ" : "",
"ZGBH" : "BHY",
"FUA1" : "",
"ZPLC" : "LNJ",
"EGED" : "EOI",
"EHEH" : "EIN",
"KTCS" : "TCS",
"SVCO" : "",
"YSSY" : "SYD",
"KIMT" : "IMT",
"KTCC" : "TCC",
"KIMM" : "IMM",
"KTCL" : "TCL",
"KTCM" : "TCM",
"SVBI" : "BNS",
"BGQQ" : "NAQ",
"LGML" : "MLO",
"SVBL" : "",
"SVBC" : "BLA",
"K1RL" : "1RL",
"LGMG" : "",
"SPRM" : "",
"YPDN" : "DRW",
"BUZZ" : "",
"SVBS" : "MYC",
"LGMT" : "MJT",
"CAJ4" : "YAA",
"AYDU" : "DAU",
"PAMH" : "MHM",
"EGMK" : "LYM",
"EGMH" : "MSE",
"SCSE" : "LSC",
"EGMC" : "SEN",
"EGMD" : "LYX",
"XKSO" : "",
"MMZH" : "ZIH",
"YPEC" : "BEO",
"SBUL" : "UDI",
"MMZM" : "ZMM",
"FAPY" : "",
"MMZO" : "ZLO",
"SVCR" : "CZE",
"MMZC" : "ZCL",
"FAPS" : "",
"SBUF" : "PAV",
"SBUG" : "URG",
"FAPN" : "NTY",
"FAPM" : "PZB",
"FAPJ" : "",
"FAPI" : "PTG",
"FAPH" : "PHW",
"MMZP" : "",
"SBUR" : "UBA",
"PAEG" : "EAA",
"PAED" : "EDF",
"PAEE" : "EEK",
"VTUD" : "UTH",
"PAEN" : "ENA",
"UHMP" : "PWE",
"WASS" : "SOQ",
"WASR" : "MKW",
"VTUL" : "LOE",
"VTUO" : "BFV",
"PAEI" : "EIL",
"VTUQ" : "NAK",
"WASK" : "KNG",
"VTUR" : "",
"VTUU" : "UBP",
"VTUW" : "KOP",
"VTUV" : "ROI",
"UHMS" : "",
"WASF" : "FKQ",
"KSGU" : "SGU",
"KSGR" : "SGR",
"PAMB" : "KMO",
"KDET" : "DET",
"KRYY" : "RYY",
"KSGF" : "SGF",
"KJQF" : "",
"KDEN" : "DEN",
"3N0" : "NDK",
"KDEC" : "DEC",
"KSGJ" : "UST",
"KDED" : "",
"KSGH" : "SGH",
"WAML" : "PLW",
"FQMA" : "MPM",
"HKUK" : "UKA",
"FQMD" : "",
"KFRI" : "FRI",
"SKVV" : "VVC",
"SKVP" : "VUP",
"FQMP" : "MZB",
"KCCO" : "CCO",
"FQMR" : "",
"UASB" : "",
"CNY3" : "",
"KBWI" : "BWI",
"KBWG" : "BWG",
"SOOA" : "MPY",
"SOOG" : "",
"SOOM" : "LDX",
"SOOS" : "XAU",
"USCC" : "CEK",
"USCM" : "MQF",
"OPPS" : "PEW",
"KMQS" : "CTH",
"KMQT" : "MQT",
"OPPC" : "PAJ",
"OPPG" : "PJG",
"OPPI" : "PSI",
"OIGG" : "RAS",
"YSBK" : "BWU",
"KCCR" : "CCR",
"MUBY" : "BYM",
"EDXQ" : "",
"DATM" : "BMW",
"MUBA" : "BCA",
"ESMK" : "KID",
"HLLT" : "TIP",
"TJRV" : "RVR",
"KOKC" : "OKC",
"HLLQ" : "LAQ",
"HLLS" : "SEB",
"OABT" : "BST",
"HLLB" : "BEN",
"YTWB" : "TWB",
"HLLM" : "MJI",
"EGET" : "LWK",
"LZSL" : "SLD",
"HRYR" : "KGL",
"FDFM" : "",
"HRYG" : "GYI",
"YPPH" : "PER",
"EGEC" : "CAL",
"MYSM" : "ZSA",
"HKMK" : "",
"CZFM" : "ZFM",
"CZFN" : "ZFN",
"CZFA" : "ZFA",
"EGEG" : "",
"MPMG" : "PAC",
"CZFD" : "ZFD",
"EGEF" : "FIE",
"UOOO" : "NSK",
"HKMO" : "MBA",
"TKPK" : "SKB",
"TKPN" : "NEV",
"YREN" : "RMK",
"CCE4" : "YBI",
"K2I3" : "",
"SYAN" : "NAI",
"YRED" : "",
"EGEN" : "NRL",
"SYAP" : "",
"SDZY" : "IZA",
"ZBCZ" : "CIH",
"ZBCF" : "CIF",
"ESMO" : "OSK",
"ESML" : "JLD",
"KARV" : "ARV",
"KART" : "ART",
"KFFZ" : "FFZ",
"FLSW" : "SLI",
"KFFT" : "FFT",
"ESMA" : "",
"FLSO" : "KIW",
"KFFO" : "FFO",
"KARA" : "ARA",
"UAOO" : "KZO",
"ESMX" : "VXO",
"ESMV" : "",
"KFOE" : "FOE",
"ESMS" : "MMX",
"ESMQ" : "KLR",
"ESMP" : "",
"LFDB" : "",
"VOBI" : "BEP",
"LFDA" : "",
"VOBL" : "BLR",
"LFDJ" : "",
"LFDH" : "",
"LFDI" : "",
"LFDN" : "RCO",
"SWGN" : "AUX",
"LFDM" : "",
"VOBZ" : "VGA",
"VOBR" : "",
"HEPS" : "PSD",
"TVSB" : "BQU",
"LFOH" : "LEH",
"LDVA" : "",
"UAFM" : "FRU",
"KHBG" : "HBG",
"LPAR" : "",
"OEBA" : "ABT",
"YCOE" : "CUQ",
"LPAV" : "",
"ZMKD" : "HVD",
"LPAZ" : "SMA",
"YCOM" : "OOM",
"YCOO" : "CDA",
"KOTH" : "OTH",
"ORMM" : "BSR",
"SCLL" : "",
"FYGF" : "GFY",
"EGYM" : "KNF",
"KNHK" : "NHK",
"CYDP" : "YDP",
"CYDQ" : "YDQ",
"KJOT" : "JOT",
"CYDL" : "YDL",
"CYDN" : "YDN",
"CYDA" : "YDA",
"CYDB" : "YDB",
"CYDC" : "YDC",
"CYDF" : "YDF",
"RCQC" : "MZG",
"SBRP" : "RAO",
"RCQS" : "",
"FBJW" : "JWA",
"EYSA" : "SQQ",
"EPWR" : "WRO",
"EPWA" : "WAW",
"EDCB" : "",
"EDCA" : "",
"KBJI" : "BJI",
"EDCG" : "GTI",
"EDCD" : "",
"EDCK" : "",
"SLRI" : "RIB",
"EDCO" : "",
"KBJC" : "BJC",
"EDCM" : "",
"EDCS" : "",
"SLRQ" : "RBQ",
"SLRY" : "REY",
"EDCY" : "",
"VAPO" : "PNQ",
"ZSTX" : "TXN",
"CNZ3" : "XCM",
"EDHP" : "",
"SPPY" : "CHH",
"VAPR" : "PBD",
"EHGR" : "",
"EHGG" : "GRQ",
"LFYD" : "",
"KTAN" : "TAN",
"HHMS" : "MSW",
"EDHC" : "",
"KIOW" : "IOW",
"KJYL" : "JYL",
"FARG" : "",
"FARB" : "RCB",
"YWHA" : "WYA",
"FARS" : "",
"UNSS" : "SWT",
"YWHI" : "WSY",
"K6J4" : "6J4",
"KEKY" : "EKY",
"AYBK" : "BUA",
"KEKO" : "EKO",
"KEKN" : "EKN",
"EDVP" : "",
"VGJR" : "JSR",
"KWAL" : "WAL",
"KHGR" : "HGR",
"MMHO" : "HMO",
"FCOO" : "FTX",
"LOWG" : "GRZ",
"PAKN" : "AKN",
"PAKO" : "IKO",
"PAKH" : "AKK",
"PAKI" : "KPN",
"PAKK" : "KKA",
"LOWL" : "LNZ",
"LOWK" : "KLU",
"LOWI" : "INN",
"LOWW" : "VIE",
"PAKY" : "KYK",
"PAKT" : "KTN",
"PAKU" : "UUK",
"PAKV" : "KAL",
"PAKW" : "KLW",
"PAKP" : "AKP",
"SBGV" : "GVR",
"KJST" : "JST",
"KSEZ" : "SDX",
"NURN" : "NUR",
"KSES" : "SES",
"KSEM" : "SEM",
"KSEE" : "SEE",
"KSEF" : "SEF",
"KSEA" : "SEA",
"KUDG" : "UDG",
"EGOE" : "",
"EGOD" : "",
"UOII" : "IAA",
"UNCC" : "",
"SKTL" : "TLU",
"SKTM" : "TME",
"HTIR" : "IRI",
"EGOS" : "",
"EGOW" : "",
"EGOV" : "HLY",
"FABO" : "",
"SCGZ" : "",
"LYPR" : "PRN",
"KLPR" : "LPR",
"LYPG" : "TGD",
"USMU" : "NUX",
"PFNO" : "ORV",
"EKKA" : "KRP",
"USMM" : "NYM",
"NGBR" : "BEZ",
"EKKL" : "",
"KVBG" : "VBG",
"ZWTP" : "TLQ",
"KDCA" : "DCA",
"EBLG" : "LGG",
"LIEA" : "AHO",
"OIAG" : "",
"FZBO" : "FDU",
"LIED" : "DCI",
"LIEE" : "CAG",
"OIAJ" : "",
"OIAI" : "QMJ",
"OIAH" : "",
"LIEO" : "OLB",
"OIAM" : "MRX",
"LIER" : "FNU",
"OIAW" : "AWZ",
"LIET" : "TTB",
"FZBT" : "KRZ",
"YSDU" : "DBO",
"VODG" : "",
"TJPS" : "PSE",
"UNNT" : "OVB",
"HLNF" : "",
"UNNE" : "",
"FVFA" : "VFA",
"KMSL" : "MSL",
"KMSO" : "MSO",
"KMSN" : "MSN",
"KMSP" : "MSP",
"KMSS" : "MSS",
"KMSY" : "MSY",
"WBGR" : "MYY",
"WBGS" : "SBW",
"WBGQ" : "BKM",
"WBGW" : "LWY",
"WBGZ" : "BBN",
"WBGB" : "BTU",
"WBGC" : "BLG",
"WBGF" : "LGL",
"WBGG" : "KCH",
"WBGJ" : "LMN",
"WBGK" : "MKM",
"WBGI" : "ODN",
"WBGL" : "LKH",
"WBGM" : "MUR",
"NTTO" : "HOI",
"NTTM" : "MOZ",
"KRND" : "RND",
"NTTH" : "HUH",
"YIFL" : "IFL",
"NTTG" : "RGI",
"NTTE" : "TTI",
"NTTB" : "BOB",
"KRNO" : "RNO",
"KGBD" : "GBN",
"AGRM" : "RBV",
"KRNT" : "RNT",
"LJLJ" : "LJU",
"AGRC" : "RIN",
"NTTP" : "MAU",
"SMWA" : "AGI",
"SYCJ" : "GEO",
"ROKJ" : "UEO",
"KJXN" : "JXN",
"ROKR" : "KJP",
"GCGM" : "GMZ",
"ULPW" : "",
"SKIP" : "IPI",
"PGRC" : "",
"SKIB" : "IBE",
"ULPB" : "PES",
"ZMUB" : "ULN",
"WIBD" : "DUM",
"WIBB" : "PKU",
"KATY" : "ATY",
"ESOW" : "VST",
"YHLC" : "HCQ",
"ZMUG" : "ULO",
"KATT" : "ATT",
"KATW" : "ATW",
"ESOE" : "ORB",
"KATL" : "ATL",
"ESOH" : "HFS",
"ESOK" : "KSD",
"LFBH" : "LRH",
"LFBI" : "PIS",
"LFBK" : "",
"LFBL" : "LIG",
"LFBM" : "",
"LFBN" : "NIT",
"LFBO" : "TLS",
"LFBA" : "AGF",
"LFBC" : "",
"LFBD" : "BOD",
"LFBE" : "EGC",
"LFBF" : "",
"LFBG" : "CNG",
"LFBX" : "PGX",
"LFBZ" : "BIQ",
"VVTH" : "TBB",
"UHSS" : "UUS",
"LFBP" : "PUF",
"LFBR" : "",
"LFBT" : "LDE",
"LFBU" : "ANG",
"MTCH" : "CAP",
"DRRT" : "THZ",
"MTCA" : "",
"FNKU" : "SVP",
"SNVS" : "BVS",
"DRRM" : "MFQ",
"DRRN" : "NIM",
"GA04" : "",
"SCRT" : "",
"SNVB" : "VAL",
"UKRR" : "CEJ",
"ZMMN" : "MXV",
"KMYR" : "MYR",
"LPCS" : "",
"LPCR" : "CVU",
"LPCV" : "",
"LPCO" : "",
"YCAR" : "CVQ",
"UGSS" : "SUI",
"SCJO" : "ZOS",
"PJON" : "JON",
"VYBP" : "",
"VYBR" : "NYU",
"VYBM" : "BMO",
"UGSB" : "BUS",
"EGLL" : "LHR",
"VYBG" : "",
"PAQH" : "KWN",
"MMTP" : "TAP",
"KNJK" : "NJK",
"KCFD" : "CFD",
"CYFD" : "YFD",
"CYFE" : "",
"CYFB" : "YFB",
"CYFC" : "YFC",
"CYFA" : "YFA",
"CYFO" : "YFO",
"KHZL" : "HZL",
"CYFJ" : "YTM",
"CYFH" : "YFH",
"CYFT" : "YMN",
"CYFR" : "YFR",
"CYFS" : "YFS",
"EIBN" : "BYT",
"WAJW" : "WMX",
"WAJR" : "WAR",
"WAJS" : "SEH",
"WAJB" : "BUI",
"YBEE" : "",
"WAJJ" : "DJJ",
"LLES" : "",
"GMFO" : "OUD",
"GMFM" : "MEK",
"GMFK" : "ERH",
"GMFI" : "",
"GMFF" : "FEZ",
"LLEY" : "",
"GMFB" : "",
"PKWA" : "KWA",
"LLEK" : "",
"GMFU" : "",
"MDAB" : "EPS",
"CEB3" : "YCK",
"KBLI" : "BLI",
"KBLH" : "BLH",
"SLPO" : "POI",
"SLPA" : "",
"LRTZ" : "",
"SLPS" : "PSZ",
"SLPR" : "PUR",
"KBLV" : "BLV",
"CND4" : "ND4",
"VARP" : "RPR",
"BGUM" : "UMD",
"BGUK" : "JUV",
"BGUQ" : "JQA",
"VARK" : "RAJ",
"EHAL" : "",
"EHAM" : "AMS",
"UWSB" : "BWO",
"UWSS" : "RTW",
"LGAX" : "",
"KIAH" : "IAH",
"LGAT" : "",
"LGAV" : "ATH",
"KIAD" : "IAD",
"KIAB" : "IAB",
"LGAL" : "AXD",
"LSMU" : "",
"LGAD" : "PYR",
"LGAG" : "AGQ",
"FATF" : "",
"FATP" : "",
"FATT" : "",
"FATZ" : "LTA",
"NVSQ" : "ZGU",
"NVSP" : "NUS",
"NVSS" : "SON",
"NVSR" : "RCL",
"NVSU" : "ULB",
"NVST" : "TGH",
"NVSW" : "WLH",
"NVSV" : "VLS",
"NVSX" : "SWJ",
"NVSZ" : "OLZ",
"KPCZ" : "PCZ",
"NVSA" : "MTV",
"NVSC" : "SLH",
"NVSE" : "EAE",
"NVSD" : "TOH",
"NVSG" : "LOD",
"NVSF" : "CCV",
"NVSI" : "PBJ",
"NVSH" : "SSR",
"NVSM" : "LNB",
"NVSL" : "LPM",
"NVSO" : "LNE",
"NVSN" : "MWF",
"YVRD" : "VCD",
"OEGS" : "ELQ",
"OEGT" : "URY",
"MMVR" : "VER",
"SBYS" : "QPS",
"OEGN" : "GIZ",
"KHEF" : "MNZ",
"YNAR" : "NRA",
"MMVA" : "VSA",
"NZGT" : "MON",
"PAIK" : "IAN",
"PAII" : "EGX",
"PAIN" : "MCL",
"PAIL" : "ILI",
"PAIM" : "UTO",
"NFFN" : "NAN",
"PAIG" : "IGG",
"PAIW" : "WAA",
"KSCK" : "SCK",
"KSCH" : "SCH",
"SKRH" : "RCH",
"HTKJ" : "JRO",
"HKIS" : "",
"HTKA" : "TKQ",
"EGAE" : "LDY",
"EGAC" : "BHD",
"EGAB" : "ENK",
"EGAA" : "BFS",
"SKRG" : "MDE",
"OAFZ" : "FBD",
"EGAR" : "",
"KLGA" : "LGA",
"KLGB" : "LGB",
"KLGC" : "LGC",
"MUNG" : "GER",
"KLGU" : "LGU",
"EKEL" : "",
"GAMB" : "MZI",
"CLAA" : "",
"EKEB" : "EBJ",
"KGDW" : "GDW",
"KVLD" : "VLD",
"OICI" : "IIL",
"LICJ" : "PMO",
"FNPA" : "PBN",
"OICC" : "KSH",
"LICC" : "CTA",
"LICD" : "LMP",
"LICG" : "PNL",
"LICZ" : "NSY",
"SULO" : "",
"HUSO" : "SRT",
"LICP" : "",
"KDAW" : "DAW",
"LICR" : "REG",
"LICT" : "TPS",
"VIAG" : "AGR",
"VIAL" : "IXD",
"OLKA" : "",
"VIAR" : "ATQ",
"7FA1" : "",
"UNLL" : "",
"OIFH" : "",
"CTU5" : "ZLT",
"ETHN" : "",
"ETHM" : "",
"ETHL" : "",
"ETHF" : "",
"ETHE" : "",
"ETHC" : "ZCN",
"ETHB" : "",
"ETHS" : "",
"ETHR" : "",
"WBAK" : "",
"ZBOW" : "BAV",
"LEBG" : "RGS",
"LEBB" : "BIO",
"LEBA" : "ODB",
"LEBL" : "BCN",
"CCA6" : "YWM",
"LEBZ" : "BJZ",
"FIMP" : "MRU",
"FIMR" : "RRG",
"ZLDH" : "DNH",
"YPKG" : "KGI",
"DRZF" : "",
"ZWYN" : "YIN",
"DIAP" : "ABJ",
"VAID" : "IDR",
"KLZU" : "LZU",
"ESIB" : "",
"ESIA" : "",
"RPMZ" : "ZAM",
"UACK" : "KOV",
"RPMP" : "PAG",
"RPMS" : "SUG",
"RPMR" : "",
"UACC" : "TSE",
"RPMW" : "TDG",
"RPMH" : "CGM",
"VVVH" : "VII",
"RPMJ" : "JOL",
"RPML" : "CGY",
"RPMO" : "OZC",
"RPMN" : "SGS",
"RPMC" : "CBO",
"RPMB" : "GES",
"RPME" : "BXU",
"RPMD" : "DVO",
"RPMG" : "DPL",
"UACP" : "PPK",
"KHKY" : "HKY",
"KTZR" : "TZR",
"SNPD" : "POJ",
"UKLT" : "TNL",
"UKLU" : "UDJ",
"UKLR" : "RWN",
"LDZA" : "ZAG",
"LDZD" : "ZAD",
"UKLC" : "UKC",
"UKLL" : "LWO",
"UKLN" : "CWC",
"UKLH" : "HMJ",
"UKLI" : "IFO",
"LDZU" : "",
"GLH" : "GLH",
"YCCT" : "CNC",
"MYRP" : "",
"YCCY" : "CNJ",
"YCCA" : "CCL",
"MHAH" : "AHS",
"EIME" : "",
"VYDW" : "TVY",
"FYKT" : "KMP",
"OJHF" : "",
"FYKM" : "MPA",
"KDLS" : "",
"KNDZ" : "NDZ",
"SNCW" : "",
"KCDC" : "CDC",
"CYHN" : "YHN",
"SARI" : "IGR",
"CYHH" : "YNS",
"CYHI" : "YHI",
"SARL" : "AOL",
"SARM" : "",
"CYHD" : "YHD",
"SARC" : "CNQ",
"CYHF" : "YHF",
"SARF" : "FMA",
"CYHA" : "YQC",
"CYHB" : "YHB",
"SARE" : "RES",
"KCDR" : "CDR",
"KCDS" : "CDS",
"CYHY" : "YHY",
"CYHZ" : "YHZ",
"CYHU" : "YHU",
"SARP" : "PSS",
"CYHR" : "YHR",
"CYHS" : "",
"VTBD" : "DMK",
"TLPC" : "SLU",
"TLPL" : "UVF",
"HSLI" : "",
"WADW" : "WGP",
"WADS" : "SWQ",
"WADL" : "LOP",
"VTBU" : "UTP",
"VTBW" : "",
"VTBS" : "BKK",
"WADD" : "DPS",
"WADA" : "AMI",
"WADB" : "BMU",
"MDCZ" : "COZ",
"MDCY" : "AZS",
"MDCR" : "",
"ROTM" : "",
"NCRG" : "RAR",
"TNCM" : "SXM",
"ZLYS" : "YUS",
"TNCC" : "CUR",
"YAYE" : "AYQ",
"ZLYL" : "UYN",
"ZLYA" : "ENY",
"TNCA" : "AUA",
"EPSN" : "",
"EDGS" : "",
"EPSD" : "",
"EDGZ" : "",
"EPSC" : "SZZ",
"OONR" : "OMM",
"KDSM" : "DSM",
"KBNA" : "BNA",
"EDGE" : "",
"TNCE" : "EUX",
"KJVL" : "JVL",
"OBBS" : "",
"KBNO" : "",
"ZGDY" : "DYG",
"YPJT" : "JAD",
"SWFX" : "SXO",
"SPTN" : "TCQ",
"CNF4" : "NF4",
"ZSPD" : "PVG",
"SPTU" : "PEM",
"MRSR" : "PLD",
"NZDN" : "DUD",
"MRST" : "",
"UEMM" : "",
"LFLL" : "LYS",
"UEMO" : "",
"ZUJZ" : "JZH",
"UWUU" : "UFA",
"LFLC" : "CFE",
"UEMH" : "",
"LTAW" : "",
"KICT" : "ICT",
"LTAU" : "ASR",
"LTAT" : "MLX",
"LTAS" : "ONQ",
"LTAR" : "VAS",
"LTAP" : "MZH",
"LTAZ" : "NAV",
"LTAY" : "DNZ",
"LTAG" : "",
"LTAF" : "ADA",
"LTAE" : "",
"LTAD" : "ANK",
"LTAC" : "ESB",
"LTAB" : "",
"LTAO" : "",
"LTAN" : "KYA",
"LTAL" : "KFS",
"LTAK" : "",
"LTAJ" : "GZT",
"LTAI" : "AYT",
"LTAH" : "AFY",
"UEMA" : "",
"FAVB" : "VRU",
"FAVG" : "VIR",
"FAVR" : "",
"FAVV" : "",
"KVAY" : "VAY",
"MMSD" : "SJD",
"FC08" : "",
"KPAE" : "PAE",
"KPAH" : "PAH",
"KPAO" : "PAO",
"ANNA" : "",
"KPAM" : "PAM",
"SLTR" : "TDD",
"HUAR" : "RUA",
"MMSL" : "",
"KCYS" : "CYS",
"MMTJ" : "TIJ",
"MMTN" : "TSL",
"MMTO" : "TLC",
"MMTM" : "TAM",
"MMTB" : "",
"MMTC" : "TRC",
"MMTA" : "TXA",
"MMTG" : "TGZ",
"PAOR" : "ORT",
"PAOT" : "OTZ",
"PAOU" : "NLG",
"NCMK" : "MUK",
"PAOB" : "OBU",
"PAOH" : "HNH",
"PTYA" : "YAP",
"PAOM" : "OME",
"PAOO" : "OOK",
"EYPA" : "PLQ",
"NCMR" : "MOI",
"CCZ2" : "YRG",
"SKPV" : "PVA",
"HASO" : "ASO",
"SKPS" : "PSO",
"HKKT" : "KTL",
"EGCD" : "",
"FQCH" : "VPY",
"KEGV" : "EGV",
"SKPB" : "",
"SKPC" : "PCR",
"EGCC" : "MAN",
"HKKE" : "",
"FQCB" : "FXO",
"HKKI" : "KIS",
"EGCK" : "",
"SKPI" : "",
"KLEB" : "LEB",
"MULM" : "LCL",
"NGNU" : "NIG",
"KLEW" : "LEW",
"DNYO" : "YOL",
"KLEX" : "LEX",
"RPVI" : "ILO",
"RPVJ" : "MBT",
"RPVK" : "KLO",
"RPVM" : "CEB",
"RPVO" : "OMC",
"RPVA" : "TAC",
"RPVB" : "BCD",
"RPVC" : "CYP",
"RPVD" : "DGT",
"EKGH" : "",
"RPVF" : "CRM",
"RPVG" : "",
"RPVP" : "PPS",
"RPVR" : "RXS",
"RPVS" : "SJI",
"RPVT" : "TAG",
"RPVU" : "TBH",
"RPVV" : "USU",
"RPVW" : "BPR",
"KSAA" : "SAA",
"KSAC" : "SAC",
"KSAD" : "SAD",
"KSAF" : "SAF",
"FZFP" : "",
"KSAN" : "SAN",
"KSAT" : "SAT",
"KSAW" : "",
"KSAV" : "SAV",
"FZFD" : "BDT",
"UCFL" : "ИКУ",
"VICG" : "IXC",
"LOKN" : "",
"VICX" : "",
"KSLK" : "SLK",
"KOMA" : "OMA",
"KOMN" : "OMN",
"OMAD" : "AZI",
"OMAA" : "AUH",
"OMAM" : "",
"VEGY" : "GAY",
"OMAJ" : "",
"OMAH" : "",
"OMAS" : "",
"OMAR" : "",
"VEGK" : "GOP",
"WI1A" : "",
"FVBU" : "BUQ",
"OMAZ" : "",
"OPZB" : "PZH",
"KVNC" : "",
"KVNW" : "VNW",
"FA54" : "",
"KVNY" : "VNY",
"DHGU" : "DHG",
"UITT" : "UKX",
"VNCG" : "BDP",
"VNCJ" : "HRJ",
"KRBD" : "RBD",
"KGFK" : "GFK",
"KGFL" : "GFL",
"NTHE" : "AHE",
"KRBM" : "RBM",
"KRBL" : "",
"SMSI" : "",
"SMSK" : "",
"SMSM" : "",
"FBSK" : "GBE",
"FBSW" : "SWX",
"FBSP" : "PKW",
"SYGO" : "OGL",
"LQTZ" : "",
"YLLE" : "",
"TBPB" : "BGI",
"FVKB" : "KAB",
"VAKE" : "IXY",
"VAKJ" : "HJR",
"ESKV" : "",
"VAKS" : "IXK",
"ESKK" : "KSK",
"ESKM" : "MXX",
"ESKN" : "NYO",
"ESKB" : "",
"CN-0" : "",
"KFLO" : "FLO",
"UAAT" : "TDK",
"KFLL" : "FLL",
"KFLG" : "FLG",
"KFLD" : "FLD",
"LKBE" : "",
"LFNH" : "",
"KFLV" : "FLV",
"LKBU" : "",
"UAAH" : "",
"LFNB" : "MEN",
"LGMK" : "JMK",
"SVBM" : "BRM",
"KTXK" : "TXK",
"KIXD" : "JCI",
"SNRU" : "CAU",
"SPRU" : "TRU",
"ZMAT" : "LTI",
"ZMAH" : "AVK",
"FRID" : "",
"SCFX" : "",
"SCFT" : "",
"OJJR" : "",
"ORKK" : "KIK",
"SCFM" : "",
"MNSI" : "SIU",
"SCFA" : "ANF",
"MNSC" : "",
"VVPB" : "",
"VVPC" : "UIH",
"KMOD" : "MOD",
"VVPK" : "PXU",
"VVPQ" : "",
"VVPR" : "PHA",
"WMBT" : "TOD",
"SATR" : "",
"CYJN" : "YJN",
"SATU" : "",
"ACRT" : "",
"CYJM" : "YJM",
"KNFL" : "NFL",
"RKSO" : "OSN",
"RCKU" : "CYI",
"RCKW" : "HCN",
"RCKH" : "KHH",
"KNFW" : "",
"PALJ" : "PTA",
"SATD" : "",
"SATG" : "",
"CYJQ" : "ZEL",
"CYJT" : "YJT",
"SATO" : "",
"UEEE" : "YKS",
"PARC" : "ARC",
"HSNN" : "UYL",
"KQA" : "KQA",
"PARS" : "RSH",
"PARY" : "RBY",
"KMOT" : "MOT",
"MWCR" : "GCM",
"MWCL" : "LYB",
"SFAL" : "PSY",
"MWCB" : "CYB",
"EGXD" : "",
"EGXE" : "",
"EGXG" : "",
"EGXC" : "QCY",
"EGXH" : "BEQ",
"EGXJ" : "",
"FYMO" : "OKU",
"EGXT" : "",
"EGXU" : "",
"EGXW" : "WTN",
"EGXP" : "",
"BOST" : "",
"EGXZ" : "",
"OTBD" : "",
"OTHH" : "DOH",
"EDIU" : "HDB",
"CYLW" : "YLW",
"SLLP" : "LPB",
"EPMB" : "",
"EPML" : "",
"EPMO" : "WMI",
"EPMI" : "",
"SPJR" : "CJA",
"LGEL" : "",
"DNBE" : "BNI",
"YPLC" : "PLO",
"SPJL" : "JUL",
"SPJJ" : "",
"SPJI" : "JJI",
"PHSF" : "BSF",
"SAVJ" : "IGB",
"HADM" : "DBM",
"ZULS" : "LXA",
"OEPJ" : "",
"ZULZ" : "LZO",
"PHSM" : "",
"NCPY" : "PYE",
"OEPF" : "",
"UWWW" : "KBY",
"YSHT" : "SHT",
"SAVE" : "EQS",
"FMNX" : "WMA",
"FMNZ" : "",
"LTCU" : "BGG",
"CYLL" : "YLL",
"FMNW" : "WAI",
"FMNV" : "VOH",
"FMNQ" : "BPY",
"LTCP" : "ADF",
"FMNS" : "SVB",
"FMNR" : "WMN",
"FMNM" : "MJN",
"FMNL" : "HVA",
"FMNO" : "DWB",
"FMNN" : "NOS",
"LTCI" : "VAN",
"FMNH" : "ANM",
"LTCK" : "MSR",
"FMNJ" : "IVA",
"FMNE" : "AMB",
"LTCD" : "ERC",
"FMNG" : "WPB",
"LTCF" : "KSY",
"FMNA" : "DIE",
"FMNC" : "WMR",
"FAHV" : "",
"FAHS" : "HDS",
"FAHR" : "",
"LUKK" : "KIV",
"HZLT" : "",
"GRND" : "",
"FAHA" : "",
"MMRX" : "REX",
"ZYHB" : "HRB",
"ZYHE" : "HEK",
"KPGV" : "PGV",
"KPGA" : "PGA",
"KPGD" : "PGD",
"KMDT" : "MDT",
"KMDW" : "MDW",
"BAMB" : "BAM",
"EHMZ" : "",
"CZSW" : "ZSW",
"CZSJ" : "ZSJ",
"CZSN" : "XSI",
"ANDR" : "",
"PAMY" : "MYU",
"PAMR" : "MRI",
"PAMO" : "MOU",
"PAML" : "MLY",
"PAMM" : "MTM",
"UTKN" : "NMA",
"PAMK" : "SMK",
"AGKG" : "KGE",
"UTKA" : "AZN",
"UTKF" : "FEG",
"PAMC" : "MCG",
"UB13" : "",
"EGES" : "NDY",
"EGER" : "SOY",
"HKMZ" : "",
"EGEP" : "PPW",
"EGEW" : "WRY",
"DAUT" : "TMX",
"DAUU" : "OGX",
"DAUZ" : "IAM",
"EGEY" : "CSA",
"DAUB" : "BSK",
"DAUA" : "AZR",
"HKML" : "MYD",
"DAUG" : "GHA",
"KEAU" : "EAU",
"KEAT" : "EAT",
"OABN" : "BIN",
"DAUK" : "TGR",
"DAUH" : "HME",
"DAUI" : "INZ",
"EGEO" : "OBN",
"DAUO" : "ELU",
"DAUL" : "LOO",
"VDBG" : "BBM",
"KLCH" : "LCH",
"KLCK" : "LCK",
"PMDY" : "MDY",
"ESPA" : "LLA",
"ESPE" : "",
"DXNG" : "LRL",
"LSTA" : "",
"KLCQ" : "LCQ",
"USKK" : "KVX",
"EKAH" : "AAR",
"CYPW" : "YPW",
"YJAB" : "JAB",
"KJYO" : "JYO",
"RJER" : "RIS",
"RJEC" : "AKJ",
"RJEB" : "MBE",
"RJEO" : "OIR",
"KSOP" : "SOP",
"GVBA" : "BVC",
"VNNG" : "KEP",
"SBUA" : "",
"LPTN" : "",
"KOCF" : "OCF",
"FAPP" : "",
"VEAZ" : "AJL",
"VEAT" : "IXA",
"PGRO" : "ROP",
"EFSE" : "",
"VEAN" : "",
"FYOG" : "OMD",
"YMDG" : "DGE",
"KMYV" : "MYV",
"SBUP" : "",
"FAPE" : "PLZ",
"KMYL" : "MYL",
"1AZ0" : "",
"VNMG" : "MEY",
"VNMA" : "NGX",
"KRDR" : "RDR",
"KRDU" : "RDU",
"WIHH" : "HLP",
"2XS8" : "",
"WIHL" : "CXP",
"WIHP" : "PCB",
"KRDG" : "RDG",
"KRDD" : "RDD",
"KRDM" : "RDM",
"SMMT" : "",
"LEFM" : "",
"VQPR" : "PBH",
"ROMY" : "MMY",
"KGDV" : "GDV",
"VTUI" : "SNO",
"ROMD" : "MMD",
"SMMO" : "",
"PAEL" : "ELV",
"PAEM" : "EMK",
"KNID" : "NID",
"LZKZ" : "KSC",
"SDBK" : "QCJ",
"PAEH" : "EHM",
"CLA4" : "",
"VTUN" : "",
"RPUW" : "MRQ",
"EKBI" : "BLL",
"CYCN" : "YCN",
"EDNX" : "",
"SKEB" : "EBG",
"WASO" : "BXB",
"YPSH" : "PEA",
"KFNT" : "FNT",
"LFLX" : "CHR",
"LFLY" : "LYN",
"YHBA" : "HVB",
"KAZO" : "AZO",
"MRSG" : "",
"LFLS" : "GNB",
"LFLP" : "NCY",
"LELN" : "LEN",
"LFLV" : "VHY",
"LFLW" : "AUR",
"LFLT" : "MCU",
"LFLU" : "VAF",
"LFLJ" : "CVF",
"LFLH" : "XCD",
"LFLI" : "QNJ",
"LFLN" : "",
"LFLO" : "RNE",
"MRSV" : "",
"LFLM" : "QNX",
"LFLB" : "CMF",
"KFNL" : "FNL",
"LFLA" : "AUF",
"LFLD" : "BOU",
"LFLE" : "",
"YGKL" : "GKL",
"COA1" : "",
"KTVL" : "TVL",
"KTVI" : "TVI",
"KTVF" : "TVF",
"KTVC" : "TVC",
"KIZG" : "IZG",
"UKHP" : "PLV",
"UKHH" : "HRK",
"PNMP" : "",
"FSFA" : "",
"EIDW" : "DUB",
"ZMCD" : "COQ",
"YDMG" : "DMD",
"EBNM" : "",
"CBFD" : "",
"SCDA" : "IQQ",
"ORER" : "EBL",
"VVRG" : "VKG",
"CYLS" : "YLS",
"CYLT" : "YLT",
"CYLU" : "XGR",
"SAVY" : "PMY",
"SAVV" : "VDM",
"SAVT" : "REL",
"SAVR" : "ARR",
"SAVS" : "SGV",
"OEPS" : "",
"SAVN" : "OES",
"CYLA" : "YPJ",
"SFL" : "Cape Verde",
"CYLC" : "YLC",
"CYLD" : "YLD",
"SAVH" : "LHS",
"CYLH" : "YLH",
"CYLJ" : "YLJ",
"CYLK" : "YSG",
"SAVB" : "EHL",
"SAVC" : "CRD",
"OEPC" : "",
"PAPM" : "PTU",
"KBBG" : "BKG",
"PAPN" : "PIP",
"PAPH" : "PTH",
"PAPE" : "KPV",
"PAPG" : "PSG",
"PAPC" : "KPC",
"PAPB" : "PBV",
"SKVG" : "VGZ",
"HSPN" : "PZU",
"PAPR" : "PPC",
"UNBG" : "RGK",
"UNBB" : "BAX",
"FYOO" : "OKF",
"VYHL" : "",
"VYHH" : "HEH",
"ENBO" : "BOO",
"ENBN" : "BNN",
"ENBM" : "",
"FYOA" : "OND",
"ENBV" : "BVG",
"ENBS" : "BJF",
"ENBR" : "BGO",
"TAPH" : "BBQ",
"VLXK" : "XKH",
"UOTT" : "",
"EDKZ" : "",
"FAEL" : "ELS",
"EDKV" : "",
"EDKN" : "",
"UNBI" : "",
"EDKL" : "",
"YAUR" : "AUU",
"EPOK" : "QYD",
"EDKA" : "AAH",
"SVTM" : "",
"SPHY" : "ANS",
"SPHZ" : "ATA",
"SVTC" : "TUV",
"SPHO" : "AYP",
"SPHI" : "CIX",
"ZUNZ" : "LZY",
"LFGF" : "",
"LFGC" : "",
"ZUNC" : "NAO",
"OEJB" : "",
"KIGQ" : "IGQ",
"OEJF" : "",
"MMPN" : "UPN",
"MMPG" : "PDS",
"KRNM" : "RNM",
"MMPE" : "PPE",
"MMPB" : "PBC",
"CZUM" : "ZUM",
"FAJS" : "JNB",
"MMPA" : "PAZ",
"MMPR" : "PVR",
"KCVN" : "CVN",
"WMKM" : "MKZ",
"ZYJM" : "JMU",
"ZYJX" : "JXA",
"ZYJZ" : "JNZ",
"YDPO" : "DPO",
"ETSB" : "",
"NZHN" : "HLZ",
"ETSA" : "",
"ETSF" : "FEL",
"KMFD" : "MFD",
"KMFE" : "MFE",
"ETSH" : "",
"ETSI" : "",
"ETSN" : "",
"GLRB" : "ROB",
"ETSL" : "",
"KMFR" : "MFR",
"VOCP" : "CDP",
"ETSP" : "",
"NZHS" : "",
"CAD4" : "YZZ",
"VGBR" : "BZL",
"KE63" : "E63",
"SEGZ" : "",
"SEGU" : "GYE",
"SEGS" : "GPS",
"GFKE" : "KEN",
"NYPA" : "",
"KCDK" : "CDK",
"KHAO" : "",
"EGGP" : "LPL",
"EGGW" : "LTN",
"KPEQ" : "PEQ",
"KECP" : "ECP",
"EGGD" : "BRS",
"HKOK" : "",
"GOSS" : "XLS",
"KLAW" : "LAW",
"KLAR" : "LAR",
"KLAS" : "LAS",
"KLAX" : "LAX",
"KLAA" : "LAA",
"KLAN" : "LAN",
"MUHG" : "HOG",
"KLAL" : "LAL",
"KLAM" : "LAM",
"GOSM" : "MAX",
"MUHA" : "HAV",
"EKCH" : "CPH",
"USUU" : "KRO",
"UUDL" : "IAR",
"KFUL" : "FUL",
"UUDG" : "",
"UUDD" : "DME",
"GAKY" : "KYS",
"LIMZ" : "CUF",
"OIYY" : "AZD",
"KSMQ" : "",
"LIMS" : "QPZ",
"LIMP" : "PMF",
"KSMX" : "SMX",
"LIMJ" : "GOA",
"KSMD" : "SMD",
"KSMF" : "SMF",
"LIMN" : "",
"LIML" : "LIN",
"LIMB" : "",
"LIMC" : "MXP",
"RJGG" : "NGO",
"LIMA" : "",
"LIMF" : "TRN",
"LIMG" : "ALL",
"LIME" : "BGY",
"VIGR" : "GWL",
"VIGG" : "DHM",
"LFRM" : "LME",
"LPVZ" : "",
"LPVR" : "VRL",
"MYIG" : "IGA",
"LKVO" : "",
"HLFL" : "",
"NULL" : "MYF",
"KMQI" : "MQI",
"KOAK" : "OAK",
"KOAJ" : "OAJ",
"KOAR" : "OAR",
"DSA" : "United Kingdom",
"SLSU" : "SRE",
"ETNP" : "",
"ETNS" : "",
"ETNU" : "",
"VVDN" : "DAD",
"ETNW" : "",
"MMPC" : "PCA",
"ETNH" : "",
"ETNJ" : "",
"ETNL" : "RLG",
"OPFA" : "LYP",
"ETNN" : "",
"ETND" : "",
"ETNG" : "GKE",
"CYQG" : "YQG",
"CYQF" : "YQF",
"CYQD" : "YQD",
"PPIZ" : "PIZ",
"CYQA" : "YQA",
"CYQN" : "YQN",
"PHHN" : "HNM",
"CYQL" : "YQL",
"MGBN" : "",
"PHHI" : "HHI",
"CYQI" : "YQI",
"CYQH" : "YQH",
"CYQW" : "YQW",
"CYQV" : "YQV",
"CYQU" : "YQU",
"CYQT" : "YQT",
"CYQR" : "YQR",
"CYQQ" : "YQQ",
"CYQZ" : "YQZ",
"CYQY" : "YQY",
"CYQX" : "YQX",
"ZWSH" : "KHG",
"YBRK" : "ROK",
"ZBUL" : "HLH",
"KRFD" : "RFD",
"ZBUH" : "WUA",
"MMPS" : "PXM",
"LEDA" : "ILD",
"KGJT" : "GJT",
"YLHR" : "IRG",
"VECC" : "CCU",
"VECO" : "COH",
"YLHI" : "LDH",
"LZIB" : "BTS",
"HEGR" : "EGR",
"OSLK" : "LTK",
"ZGSZ" : "SZX",
"BGBW" : "UAK",
"ZGSD" : "ZUH",
"KFHR" : "FRD",
"KFHU" : "FHU",
"MRQP" : "XQP",
"SYKA" : "KAI",
"SYKM" : "KAR",
"ZUUU" : "CTU",
"KTTD" : "TTD",
"URRR" : "ROV",
"KTTN" : "TTN",
"URRT" : "",
"YBKE" : "BRK",
"RCGM" : "",
"FSDR" : "DES",
"RCGI" : "GNI",
"DGLE" : "TML",
"DGLW" : "",
"ETSE" : "",
"NSTU" : "PPG",
"KMFI" : "MFI",
"LFJL" : "ETZ",
"VOHY" : "HYD",
"LFJR" : "ANE",
"CYZY" : "YZY",
"PHUP" : "UPP",
"KCNY" : "CNY",
"OERR" : "RAE",
"KNBC" : "BFT",
"KHRT" : "HRT",
"KNBG" : "NBG",
"KHRO" : "HRO",
"OERK" : "RUH",
"CYNL" : "YNL",
"KHRL" : "HRL",
"CYNJ" : "",
"KCNI" : "",
"OERM" : "",
"OERB" : "",
"CYND" : "YND",
"CYNE" : "YNE",
"OERF" : "RAH",
"CYNC" : "YNC",
"CYNA" : "YNA",
"EDMA" : "AGB",
"EDMB" : "",
"EDME" : "",
"KBDE" : "BDE",
"PAVL" : "KVL",
"PAVC" : "KVC",
"OPMF" : "MFG",
"PAVA" : "VAK",
"EDML" : "",
"KBDN" : "",
"PAVE" : "VEE",
"KBDL" : "BDL",
"KBDR" : "BDR",
"EDMS" : "",
"EDMT" : "",
"EDMV" : "",
"KOKB" : "OKB",
"WABI" : "NBX",
"WABB" : "BIK",
"WABP" : "TIM",
"SCBA" : "BBA",
"PRTV" : "",
"HAJM" : "JIM",
"TRPG" : "MNI",
"MNWP" : "WSP",
"SCBQ" : "",
"KZUN" : "ZUN",
"EPIR" : "",
"OPMT" : "MUX",
"SVVP" : "VDP",
"SVVG" : "VIG",
"SVVA" : "VLN",
"SPNC" : "HUU",
"SVVL" : "VLV",
"ZSZS" : "HSN",
"KSVN" : "SVN",
"KSVH" : "SVH",
"UWKS" : "CSY",
"UWKJ" : "JOK",
"UWKE" : "NBC",
"UWKD" : "KZN",
"UWKB" : "UUA",
"SBAT" : "AFL",
"SBAU" : "ARU",
"FALY" : "LAY",
"SBAQ" : "AQA",
"SBAR" : "AJU",
"SBAS" : "",
"FALW" : "",
"CZGR" : "ZGR",
"SBAF" : "",
"MMNG" : "NOG",
"EFSI" : "SJY",
"FALM" : "",
"MMNL" : "NLD",
"SBAM" : "",
"FALA" : "HLA",
"EFSA" : "SVL",
"ZYTX" : "SHE",
"LFML" : "MRS",
"ZYTL" : "DLC",
"ETUO" : "GUT",
"ENHF" : "HFT",
"ENHA" : "HMR",
"WSPT" : "",
"NFNR" : "RTA",
"NFNS" : "SVU",
"SEAT" : "",
"NFNK" : "LKB",
"NFNO" : "KXF",
"NFNL" : "LBS",
"NFNM" : "TVU",
"NFNB" : "LEV",
"NFNA" : "SUV",
"NFNG" : "NGI",
"AGGE" : "BAS",
"AGGF" : "FRE",
"AGGA" : "AKS",
"VRMV" : "VAM",
"AGGC" : "CHY",
"AGGM" : "MUA",
"AGGL" : "SCZ",
"AGGO" : "MNY",
"AGGN" : "GZO",
"AGGI" : "MBU",
"AGGH" : "HIR",
"AGGK" : "IRA",
"AGGU" : "RUS",
"AGGT" : "NNB",
"AGGV" : "VAO",
"VRMG" : "GAN",
"AGGS" : "EGM",
"AGGR" : "RNL",
"VRMK" : "KDO",
"VRMH" : "HAQ",
"AGGY" : "XYA",
"VRMM" : "MLE",
"KPKB" : "PKB",
"VI22" : "",
"HKAM" : "ASV",
"HTSN" : "SEU",
"HTSY" : "SHY",
"KLHV" : "LHV",
"LBSZ" : "",
"ZHHH" : "WUH",
"SAWR" : "GGS",
"KLOZ" : "LOZ",
"KLOT" : "LOT",
"KLOU" : "LOU",
"LSPV" : "",
"NY94" : "",
"KLOM" : "BBX",
"LSPO" : "",
"LSPN" : "",
"LSPM" : "",
"LSPA" : "",
"UUBP" : "BZK",
"KAAF" : "AAF",
"KFWA" : "FWA",
"UUBW" : "",
"PFTO" : "6K8",
"KFWL" : "RWL",
"UUBA" : "",
"UUBB" : "BKA",
"UUBD" : "KMW",
"KAAP" : "AAP",
"UUBI" : "IWA",
"UUBK" : "RYB",
"RPXE" : "MPH",
"KSKY" : "SKY",
"RJAW" : "IWO",
"KSKA" : "SKA",
"RJAH" : "IBR",
"KSKF" : "SKF",
"RJAM" : "",
"RJAA" : "NRT",
"RJAF" : "MMJ",
"FNXA" : "XGN",
"SAWB" : "",
"SAWE" : "RGA",
"MARO" : "MOS",
"MARK" : "OAL",
"KDYS" : "DYS",
"LCPH" : "PFO",
"LPPD" : "PDL",
"OYSN" : "SAH",
"LPPI" : "PIX",
"LPPM" : "",
"OYSY" : "GXF",
"LPPR" : "OPO",
"LPPS" : "PXO",
"LPPT" : "LIS",
"OYSQ" : "SCT",
"EDVC" : "",
"YMHB" : "HBA",
"YQLP" : "ULP",
"OPDI" : "DSK",
"KECG" : "ECG",
"LLBS" : "BEV",
"OPDG" : "DEA",
"OPDB" : "DBA",
"CYSM" : "YSM",
"CYSN" : "YCM",
"UUYW" : "VKT",
"SAMM" : "LGS",
"CYSJ" : "YSJ",
"CYSD" : "YSD",
"PHJH" : "JHM",
"CYSF" : "YSF",
"CYSC" : "YSC",
"CYSB" : "YSB",
"PHJR" : "",
"CYSY" : "YSY",
"CYSU" : "YSU",
"SAMR" : "AFA",
"CYSP" : "YSP",
"CYSR" : "YSR",
"WITT" : "BTJ",
"DIYO" : "ASK",
"WITB" : "SBG",
"WITL" : "",
"WITM" : "LSW",
"FOGQ" : "OKN",
"FOGR" : "LBQ",
"0WI8" : "",
"YBPN" : "PPP",
"YBPI" : "BMP",
"LEJR" : "XRY",
"RAI" : "<NAME>",
"ROAH" : "OKA",
"LLRM" : "",
"FOGM" : "MJL",
"FOGO" : "OYE",
"LLRD" : "",
"FOGK" : "KOU",
"VEMN" : "MOH",
"VEMZ" : "",
"VEMR" : "DMU",
"SDNY" : "QNV",
"SDNS" : "",
"SVMD" : "MRD",
"SVMG" : "PMV",
"IFP" : "IFP",
"SVMC" : "MAR",
"UKDD" : "DNK",
"UKDE" : "OZH",
"SVMI" : "CCS",
"SVMT" : "MUN",
"VAAH" : "AMD",
"VAAK" : "AKD",
"UKDR" : "KWG",
"MROC" : "SJO",
"FASZ" : "SZK",
"SYMD" : "MHA",
"FASX" : "",
"ZUWX" : "WXN",
"KTRK" : "TKF",
"KTRI" : "TRI",
"SBTL" : "",
"KTRM" : "TRM",
"FASS" : "SIS",
"FKKD" : "DLA",
"OETB" : "TUU",
"OETF" : "TIF",
"FKKC" : "TKC",
"OETH" : "",
"FKKM" : "FOM",
"FKKN" : "NGE",
"KHPN" : "HPN",
"OETN" : "",
"FKKU" : "BFX",
"OETR" : "TUI",
"FKKR" : "GOU",
"FKKY" : "YAO",
"ORAT" : "",
"MNPC" : "PUZ",
"ORAA" : "",
"LFHO" : "OBS",
"EHTE" : "",
"LFHM" : "MVV",
"VVNT" : "NHA",
"EHTL" : "",
"VVNB" : "HAN",
"FASB" : "SBU",
"LFHY" : "XMU",
"LFHV" : "XVF",
"SWST" : "STZ",
"LFHS" : "XBK",
"LFHP" : "LPY",
"LFHQ" : "",
"KCLS" : "CLS",
"KCLW" : "CLW",
"KCLT" : "CLT",
"RJTL" : "",
"FASD" : "",
"KCLL" : "CLL",
"KCLM" : "CLM",
"KCLE" : "CLE",
"PATQ" : "ATK",
"KDNL" : "DNL",
"KBFM" : "BFM",
"ENAT" : "ALF",
"EDOI" : "",
"RJTH" : "HAC",
"EPKM" : "",
"KBFD" : "BFD",
"KBFF" : "BFF",
"EPKT" : "KTW",
"PATE" : "TLA",
"EPKS" : "",
"PATG" : "TOG",
"EDOV" : "ZSN",
"KBFP" : "BFP",
"PATK" : "TKA",
"KBFT" : "BFT",
"EDOP" : "SZW",
"RJTT" : "HND",
"VYLS" : "LSH",
"ENFL" : "FRO",
"ENFB" : "FBU",
"VYLY" : "",
"ENFG" : "VDB",
"VYLK" : "LIW",
"FYSM" : "SWP",
"UT25" : "",
"HSTR" : "",
"SVPR" : "PZO",
"MUSN" : "",
"SVPT" : "",
"MUSC" : "SNU",
"MUSA" : "",
"SVPC" : "PBL",
"SVPA" : "PYH",
"MUSS" : "",
"SVPM" : "",
"SPLO" : "",
"SPLP" : "",
"ZSXZ" : "XUZ",
"OEPA" : "AQI",
"KSTC" : "STC",
"KSTE" : "STE",
"KSTJ" : "STJ",
"KSTK" : "STK",
"KSTL" : "STL",
"KSTS" : "STS",
"FZQM" : "KWZ",
"VTUJ" : "",
"FZQA" : "FBM",
"WPEC" : "",
"KCWA" : "CWA",
"LLIB" : "RPN",
"SBCR" : "CMG",
"SBCP" : "CAW",
"SBCV" : "CRQ",
"SBCT" : "CWB",
"SBCZ" : "CZS",
"WBTM" : "TGC",
"SBCY" : "CGB",
"SBCB" : "CFB",
"SBCC" : "",
"SBCA" : "CAC",
"SBCF" : "CNF",
"SBCG" : "CGR",
"SBCD" : "CFC",
"SBCJ" : "CKS",
"SBCH" : "XAP",
"SBCI" : "CLN",
"SBCN" : "CLV",
"SBCO" : "",
"MMLM" : "LMM",
"TJCG" : "VQS",
"NONE" : "TSS",
"SYTC" : "",
"UNTT" : "TOF",
"FANS" : "NLP",
"TJCP" : "CPX",
"FANY" : "",
"SBIL" : "IOS",
"KMBS" : "MBS",
"NZLX" : "ALR",
"EDBT" : "",
"KGAD" : "GAD",
"SWUY" : "",
"LIMW" : "AOT",
"KMBL" : "MBL",
"KSME" : "SME",
"NIAG" : "",
"KE25" : "E25",
"EBTN" : "",
"EGNB" : "",
"KSMO" : "SMO",
"KSMN" : "SMN",
"AGAF" : "AFT",
"07FA" : "OCA",
"SECM" : "",
"SECO" : "OCC",
"UMGG" : "GME",
"ESMG" : "",
"SECU" : "CUE",
"AGAT" : "ATD",
"UMGM" : "",
"AYVN" : "VAI",
"KPIM" : "PIM",
"KPIB" : "PIB",
"KPIA" : "PIA",
"KPIE" : "PIE",
"LWSK" : "SKP",
"OICK" : "KHD",
"KPIR" : "PIR",
"KPIT" : "PIT",
"KARB" : "ARB",
"CSU2" : "YKU",
"EFKI" : "KAJ",
"OAHR" : "HEA",
"ZHNY" : "NNY",
"UAOL" : "",
"KLMT" : "LMT",
"ESMT" : "HAD",
"KACK" : "ACK",
"KACJ" : "ACJ",
"KFFA" : "FFA",
"KACY" : "ACY",
"EGNX" : "EMA",
"KACT" : "ACT",
"GAGO" : "GAQ",
"KACV" : "ACV",
"MTPX" : "PAX",
"VCBI" : "CMB",
"MTPP" : "PAP",
"KSIK" : "SIK",
"RJCC" : "CTS",
"RJCB" : "OBO",
"KDWH" : "DWH",
"SSER" : "ERM",
"RJCH" : "HKD",
"RJCK" : "KUH",
"RJCJ" : "SPK",
"RJCM" : "MMB",
"KDWA" : "DWA",
"RJCO" : "OKD",
"RJCN" : "SHB",
"VIKO" : "KTU",
"RJCW" : "WKJ",
"VIKA" : "KNU",
"SWGI" : "GRP",
"EFYL" : "",
"VNKL" : "",
"WBKW" : "TWU",
"WBKT" : "KUD",
"WBKS" : "SDK",
"WBKL" : "LBU",
"WBKM" : "TMG",
"WBKK" : "BKI",
"WBKD" : "LDU",
"VNKT" : "KTM",
"KBTM" : "BTM",
"MPSA" : "",
"KOEB" : "OEB",
"MYMM" : "MYG",
"LCRA" : "AKT",
"NSMA" : "MXS",
"HLBG" : "",
"OPBW" : "BHV",
"KVVS" : "",
"YMND" : "MTL",
"OPBN" : "BNP",
"OPBG" : "",
"PHLI" : "LIH",
"SAOR" : "",
"SAOU" : "LUQ",
"CYUL" : "YUL",
"CYUB" : "YUB",
"SAOC" : "RCU",
"CYUX" : "YUX",
"SAOD" : "VDR",
"SAOM" : "",
"PHLU" : "LUP",
"CYUT" : "YUT",
"ZWWW" : "URC",
"KGNT" : "GNT",
"KGNV" : "GNV",
"YTEM" : "TEM",
"K4U9" : "4U9",
"YTEF" : "TEF",
"VYSW" : "AKY",
"VYST" : "",
"LZMC" : "",
"ZLJQ" : "JGN",
"ULLI" : "LED",
"MMCB" : "CVJ",
"BISI" : "SIJ",
"BISF" : "",
"UKFK" : "KHC",
"NBWK" : "",
"BGFH" : "JFR",
"UKFB" : "UKS",
"UKFF" : "SIP",
"ZGWZ" : "WUZ",
"SMKA" : "",
"SMKE" : "",
"LEHC" : "HSK",
"LRBG" : "",
"GUFA" : "FIG",
"SYOR" : "ORJ",
"ZUYB" : "YBP",
"KTPL" : "TPL",
"KTPA" : "TPA",
"KIPP" : "",
"KIPL" : "IPL",
"NZWO" : "",
"HEBL" : "ABS",
"HEBA" : "HBE",
"CNU8" : "NU8",
"OOKB" : "KHS",
"KHVN" : "HVN",
"KHVR" : "HVR",
"EINC" : "",
"EDJA" : "FMM",
"RCCM" : "CMJ",
"EINN" : "SNN",
"HSSW" : "WHF",
"YWYM" : "",
"YWYY" : "BWT",
"YDKI" : "DKI",
"LFVP" : "FSP",
"VOTX" : "",
"EHVK" : "",
"FLMF" : "MFU",
"NZQN" : "ZQN",
"VOTV" : "TRV",
"VOTP" : "TIR",
"VOTR" : "TRZ",
"VOTJ" : "",
"LFVM" : "MQC",
"EDQT" : "",
"KBXS" : "BXS",
"FZAM" : "MAT",
"EDQD" : "BYU",
"EDQE" : "",
"EDQF" : "",
"EDQM" : "HOQ",
"OOTH" : "TTH",
"KBXK" : "BXK",
"FZAJ" : "BOA",
"KDDC" : "DDC",
"ENDU" : "BDU",
"EGPT" : "PSL",
"EGPU" : "TRE",
"VYNS" : "",
"EGPW" : "UNT",
"VYNT" : "NYT",
"EGPR" : "BRR",
"EGPL" : "BEB",
"EGPM" : "SDZ",
"EGPN" : "DND",
"EGPO" : "SYY",
"EGPH" : "EDI",
"EGPI" : "ILY",
"EGPK" : "PIK",
"EGPD" : "ABZ",
"EGPE" : "INV",
"EGPF" : "GLA",
"EGPA" : "KOI",
"EGPB" : "LSI",
"EGPC" : "WIC",
"CYQM" : "YQM",
"CYQK" : "YQK",
"SVRS" : "LRV",
"SPBC" : "LHC",
"ZSFZ" : "FOC",
"ZSFY" : "FUG",
"DNJO" : "JOS",
"SPBR" : "",
"UHMH" : "",
"UJAP" : "UJE",
"KSRQ" : "SRQ",
"KSRR" : "SRR",
"KJBR" : "JBR",
"CNH2" : "",
"UHMI" : "",
"FZSA" : "KMN",
"UWOD" : "",
"UWOH" : "",
"UWOO" : "REN",
"UWOR" : "OSW",
"KWST" : "WST",
"SBES" : "",
"SBEK" : "",
"CZKE" : "ZKE",
"MMJA" : "JAL",
"SBEG" : "MAO",
"TJAB" : "ARE",
"KMLT" : "MLT",
"KMLU" : "MLU",
"NZNR" : "NPE",
"NZNS" : "NSN",
"NZNP" : "NPL",
"KMLS" : "MLS",
"KMLI" : "MLI",
"KMLJ" : "MLJ",
"KMLD" : "MLD",
"KMLB" : "MLB",
"KMLC" : "MLC",
"UIAA" : "HTA",
"KEUF" : "EUF",
"UIAR" : "",
"SDSC" : "QSC",
"SKBU" : "BUN",
"CCP4" : "YHA",
"LYBE" : "BEG",
"SEMO" : "",
"UTSS" : "SKD",
"KGSB" : "GSB",
"UTST" : "TMJ",
"SEMH" : "MCH",
"KGSO" : "GSO",
"SEMC" : "XMS",
"SEMA" : "",
"UTSB" : "BHK",
"UTSA" : "NVI",
"SEMX" : "",
"PTSA" : "KSA",
"SEMT" : "MEC",
"UTSN" : "AFS",
"UTSL" : "KSQ",
"CYMG" : "YMG",
"AYTK" : "RAB",
"YLST" : "LER",
"AYTB" : "TBG",
"AYTA" : "TIZ",
"KEYW" : "EYW",
"KPOB" : "POB",
"KPOC" : "POC",
"KPOE" : "POE",
"KPOF" : "POF",
"OAJL" : "JAA",
"LBWN" : "VAR",
"BING" : "",
"BINF" : "NOR",
"SKFL" : "FLA",
"HKEL" : "EDL",
"HKEM" : "",
"YSRI" : "RCM",
"YSRN" : "SRN",
"ZHLY" : "LYA",
"KFST" : "FST",
"NGTU" : "BBG",
"UARR" : "URA",
"EKYT" : "AAL",
"NGTM" : "TMN",
"KFSD" : "FSD",
"UHBB" : "BQS",
"KLKV" : "",
"NGTE" : "TBF",
"KFSM" : "FSM",
"NGTA" : "TRW",
"KFSI" : "FSI",
"GAA1" : "",
"KDUJ" : "DUJ",
"FNDU" : "DUE",
"MMEP" : "TPQ",
"MTRL" : "",
"BRKM" : "",
"KDUG" : "DUG",
"BRKV" : "",
"DFOO" : "BOY",
"SUPE" : "MDO",
"EISG" : "SXL",
"MYCI" : "CRI",
"MYCB" : "CAT",
"MYCA" : "ATC",
"LPLA" : "TER",
"KFCM" : "FCM",
"KVPS" : "VPS",
"GOOK" : "KLC",
"YMLT" : "LST",
"SWJI" : "JPR",
"USSS" : "SVX",
"KVPC" : "VPC",
"CYWP" : "YWP",
"MSLP" : "SAL",
"CYWY" : "YWY",
"SAAV" : "SFN",
"KUMP" : "UMP",
"SAAR" : "ROS",
"SAAP" : "PRA",
"CYWA" : "YWA",
"PHNG" : "NGF",
"SAAJ" : "",
"CYWG" : "YWG",
"SAAG" : "GHU",
"CYWH" : "YWH",
"CYWK" : "YWK",
"CYWJ" : "YWJ",
"SAAC" : "COC",
"CYWL" : "YWL",
"WIPP" : "PLM",
"WIPQ" : "PDO",
"WIPR" : "RGT",
"WIPT" : "PDG",
"WIPK" : "PGK",
"WIPL" : "BKS",
"WIPA" : "DJB",
"YBTI" : "BRT",
"YBTH" : "BHS",
"YBTL" : "TSV",
"ZBSH" : "SHP",
"ZBSJ" : "SJW",
"KGLS" : "GLS",
"ZBSN" : "TVS",
"KGLD" : "GLD",
"YBTR" : "BLT",
"FVTL" : "GWE",
"YTGM" : "XTG",
"VEIM" : "IMF",
"ZLHZ" : "HZG",
"SVIE" : "",
"VHSK" : "",
"VHST" : "",
"KTNT" : "TNT",
"KTNX" : "TNX",
"KIRK" : "IRK",
"KCHO" : "CHO",
"KHTL" : "HTL",
"KCHD" : "",
"KCHA" : "CHA",
"OEHL" : "HAS",
"KHTS" : "HTS",
"KCHS" : "CHS",
"KHII" : "HII",
"KHIO" : "HIO",
"VORY" : "RJA",
"LFTW" : "FNI",
"LFTU" : "FRJ",
"LFTZ" : "LTT",
"EDVU" : "",
"LKTB" : "BRQ",
"LFTF" : "",
"LFTH" : "TLN",
"NZSP" : "",
"FLCP" : "CIP",
"KNXX" : "NXX",
"KNXP" : "NXP",
"LTBY" : "AOE",
"EPGO" : "",
"EPGD" : "GDN",
"EDSB" : "FKB",
"KBZN" : "BZN",
"EPGR" : "",
"SLBJ" : "BJO",
"KPRC" : "PRC",
"HALA" : "AWA",
"MHUT" : "UII",
"HALL" : "LLI",
"ENJA" : "ZXB",
"FYWE" : "ERS",
"FYWB" : "WVB",
"HTDA" : "DAR",
"HTDO" : "DOD",
"FYWV" : "WDH",
"VLPS" : "PKZ",
"VLPV" : "",
"YAMB" : "",
"MLIP" : "MIJ",
"MBMC" : "MDS",
"ZSDY" : "DOY",
"KSPW" : "SPW",
"GMTT" : "TNG",
"KSPS" : "SPS",
"KSPZ" : "SPZ",
"KSPF" : "SPF",
"KSPG" : "SPG",
"KSPB" : "SPB",
"CJX7" : "YAB",
"GMTN" : "TTU",
"KSPI" : "SPI",
"HBBA" : "BJM",
"FZUA" : "KGA",
"FZUK" : "TSH",
"CNJ4" : "",
"VTPI" : "",
"CZMT" : "ZMT",
"CZML" : "ZML",
"EDCL" : "",
"CZMD" : "MSA",
"MMHC" : "TCN",
"FABR" : "",
"SBGO" : "GYN",
"SBGL" : "GIG",
"SBGM" : "",
"EDCP" : "PEF",
"FABE" : "BIY",
"SBGW" : "",
"SBGU" : "GPB",
"SBGR" : "GRU",
"SBGP" : "",
"FABB" : "",
"FABM" : "",
"FABL" : "BFN",
"EFUT" : "QVY",
"KMNM" : "MNM",
"RKTN" : "TAE",
"RKTH" : "KPO",
"VGZR" : "DAC",
"RKTY" : "YEC",
"EBFN" : "",
"RKTU" : "CJJ",
"KXNA" : "XNA",
"FAMM" : "MBD",
"ZWHM" : "HMI",
"KRSW" : "RSW",
"KRST" : "RST",
"NFTV" : "VAV",
"NFTP" : "NTT",
"LRTC" : "TCE",
"LRTM" : "TGM",
"NFTE" : "EUA",
"NFTF" : "TBU",
"LRTR" : "TSR",
"NFTL" : "HPA",
"KGQQ" : "GQQ",
"KPMP" : "PMP",
"LRT2" : "CIO",
"KPMD" : "PMD",
"KPMB" : "PMB",
"VETZ" : "TEZ",
"BIHN" : "HFN",
"HKGX" : "",
"HKGT" : "",
"OATN" : "TII",
"BIHU" : "HZK",
"HKGA" : "GAS",
"YSTW" : "TMW",
"SURV" : "RVY",
"KAGS" : "AGS",
"MBPV" : "PLS",
"KAGC" : "AGC",
"HDAM" : "JIB",
"KLIT" : "LIT",
"RJOT" : "TAK",
"RJOW" : "IWJ",
"RJOS" : "TKS",
"RJOR" : "TTJ",
"TNCS" : "SAB",
"RJOY" : "",
"RJOZ" : "",
"RJOE" : "",
"RJOF" : "",
"RJOA" : "HIJ",
"RJOC" : "IZO",
"RJOB" : "OKJ",
"RJOM" : "MYJ",
"TNCB" : "BON",
"RJOO" : "ITM",
"RJOI" : "",
"RJOH" : "YGJ",
"RJOK" : "KCZ",
"NGAB" : "ABF",
"SACO" : "COR",
"YHML" : "HLT",
"SACT" : "",
"MYAT" : "TCB",
"EYPP" : "PNV",
"MYAP" : "AXP",
"MYAS" : "",
"MYAF" : "ASD",
"MYAB" : "",
"MYAM" : "MHH",
"MYAN" : "SAQ",
"MYAK" : "COX",
"KP52" : "P52",
"SWHT" : "HUW",
"KVRB" : "VRB",
"CYYZ" : "YYZ",
"CYYY" : "YYY",
"OPNH" : "WNS",
"CYYW" : "YYW",
"CYYU" : "YYU",
"CYYT" : "YYT",
"CYYR" : "YYR",
"CYYQ" : "YYQ",
"CYYN" : "YYN",
"CYYL" : "YYL",
"CYYJ" : "YYJ",
"CYYH" : "YYH",
"CYYG" : "YYG",
"CYYF" : "YYF",
"CYYE" : "YYE",
"CYYD" : "YYD",
"CYYC" : "YYC",
"CYYB" : "YYB",
"DISP" : "SPY",
"FTTY" : "FYT",
"KMPT" : "",
"FTTD" : "MQQ",
"FTTC" : "AEH",
"FTTA" : "SRH",
"FTTN" : "AMC",
"FTTJ" : "NDJ",
"OTBH" : "IUD",
"YIVL" : "IVR",
"EAR" : "EAR",
"VEKU" : "IXS",
"VEKR" : "IXH",
"OSDZ" : "DEZ",
"PHBG" : "",
"OSDI" : "DAM",
"PGUA" : "UAM",
"KITH" : "ITH",
"BGJN" : "JAV",
"SSFB" : "FBE",
"MGSJ" : "",
"BGJH" : "JJU",
"FNCA" : "CAB",
"OIZI" : "",
"UKBB" : "KBP",
"VAGN" : "",
"VAGO" : "GOI",
"ZGKL" : "KWL",
"LELL" : "QSA",
"LELO" : "RJL",
"LLHS" : "",
"LELC" : "MJV",
"LLHA" : "HFA",
"YXCM" : "DEE",
"LILY" : "",
"FLKE" : "",
"KTLH" : "TLH",
"DTTR" : "EBM",
"KCVS" : "CVS",
"DTTX" : "SFA",
"DTTZ" : "TOE",
"KNZJ" : "NZJ",
"KCVX" : "CVX",
"DGTK" : "TKD",
"DTTA" : "TUN",
"DTTB" : "",
"KCVG" : "CVG",
"DTTD" : "",
"DTTF" : "GAF",
"DTTG" : "GAE",
"KNZY" : "NZY",
"DTTI" : "",
"DTTJ" : "DJE",
"KCVO" : "CVO",
"OEJN" : "JED",
"FAYP" : "",
"NZUK" : "",
"LFRZ" : "SNR",
"GLMR" : "MLW",
"MRIA" : "PBP",
"LFRQ" : "UIP",
"LFRS" : "NTE",
"LFRT" : "SBK",
"LFRU" : "MXN",
"LFRV" : "VNE",
"LFRH" : "LRT",
"LFRI" : "EDM",
"LFRJ" : "",
"LFRK" : "CFR",
"LFRL" : "",
"EHRD" : "RTM",
"LFRN" : "RNS",
"LFRO" : "LAI",
"LKVM" : "",
"LFRB" : "BES",
"LFRC" : "CER",
"LFRD" : "DNR",
"LFRE" : "",
"LFRF" : "GFR",
"LFRG" : "DOL",
"VVDL" : "DLI",
"VOPC" : "PNY",
"VOPB" : "IXZ",
"SBYA" : "",
"CAQ3" : "CXH",
"VVDB" : "DIN",
"PSEA" : "",
"EDUS" : "",
"EDUW" : "",
"SETE" : "",
"SETA" : "",
"SETM" : "LOH",
"SETN" : "ESM",
"SETI" : "TPN",
"SETU" : "TUA",
"SETR" : "TPC",
"KMIV" : "MIV",
"HABE" : "BEI",
"HABD" : "BJR",
"HABC" : "BCO",
"ENHD" : "HAU",
"EGTB" : "",
"EGTC" : "",
"VLVT" : "VTE",
"EGTE" : "EXT",
"EGTF" : "",
"EGTG" : "FZO",
"EGTK" : "OXF",
"ENHK" : "HAA",
"DAFH" : "HRM",
"EGTR" : "",
"ENHV" : "HVG",
"KCNW" : "CNW",
"YSII" : "SBR",
"OERT" : "",
"CYIK" : "YIK",
"18AZ" : "",
"CPV7" : "YHP",
"PFCB" : "NCN",
"MZ10" : "SPR",
"KCNM" : "CNM",
"UUYY" : "SCW",
"UUYX" : "",
"CPV8" : "KEW",
"CYNM" : "YNM",
"PACE" : "CEM",
"KJFK" : "JFK",
"FHAW" : "",
"RJTE" : "",
"OING" : "GBT",
"OINE" : "",
"FZWA" : "MJM",
"KDNN" : "DNN",
"RJTO" : "OIM",
"OINN" : "NSH",
"RJTJ" : "",
"RJTK" : "",
"OINR" : "RZR",
"KDNV" : "DNV",
"RJTQ" : "MYE",
"RJTR" : "",
"OINZ" : "SRY",
"SSZR" : "SRA",
"RJTY" : "OKO",
"EHLE" : "",
"KWWD" : "WWD",
"EFKT" : "KTT",
"EFKU" : "KUO",
"MKKJ" : "POT",
"SBIH" : "",
"EFKS" : "KAO",
"SBIC" : "",
"EFKE" : "KEM",
"UNAA" : "ABA",
"EFKA" : "KAU",
"SBIZ" : "IMP",
"SBIT" : "",
"EFKM" : "",
"SBIP" : "IPN",
"FADN" : "DUR",
"EFKJ" : "",
"EFKK" : "KOK",
"EDMN" : "",
"PAVD" : "VDZ",
"KMHK" : "MHK",
"KMHR" : "MHR",
"KMHT" : "MHT",
"KMHV" : "MHV",
"FQQL" : "UEL",
"KBDU" : "WBU",
"ZWNL" : "NLT",
"LGPA" : "PAS",
"KGWO" : "GWO",
"LYNI" : "INI",
"EEKU" : "",
"NFVB" : "VBV",
"SEIB" : "",
"Q30" : "MJB",
"KRUT" : "RUT",
"EEKA" : "KDL",
"EEKE" : "URE",
"AYPY" : "POM",
"YKII" : "KNS",
"VEVZ" : "VTZ",
"HCMA" : "ALU",
"KEUG" : "EUG",
"SKBQ" : "BAQ",
"SKBS" : "BSC",
"LHKE" : "",
"LZZI" : "ILZ",
"SKBO" : "BOG",
"SKBC" : "",
"CCLN" : "",
"SKBG" : "BGA",
"CYRI" : "YRI",
"CYRM" : "YRM",
"KLWT" : "LWT",
"KAIZ" : "AIZ",
"KLWS" : "LWS",
"LSXB" : "",
"ESDF" : "RNB",
"KAIK" : "AIK",
"KLWA" : "LWA",
"KLWB" : "LWB",
"KLWC" : "LWC",
"KLWM" : "LWM",
"KAIA" : "AIA",
"FWKI" : "LLW",
"CYRT" : "YRT",
"FWKA" : "KGJ",
"YRNG" : "RAM",
"FWKG" : "",
"OISY" : "YES",
"EBZR" : "",
"SNMU" : "MVS",
"OISS" : "SYZ",
"OISR" : "",
"OISJ" : "",
"OISL" : "LRR",
"KDQH" : "DQH",
"EBZH" : "",
"OISF" : "",
"GQPP" : "NDB",
"GQPT" : "",
"EIWF" : "WAT",
"GQPA" : "ATR",
"SAEZ" : "EZE",
"EIWT" : "",
"EYVP" : "",
"LPHR" : "HOR",
"EYVI" : "VNO",
"MYGF" : "FPO",
"ESSZ" : "",
"KW04" : "W04",
"SWNA" : "NVP",
"LKMH" : "",
"HAJJ" : "JIJ",
"LKMT" : "OSR",
"MUNI" : "MUQ",
"WMSA" : "SZB",
"YFDF" : "KFE",
"PHBK" : "BKH",
"OPLA" : "LHE",
"VCRI" : "HRI",
"OPLH" : "",
"SWSI" : "OPS",
"ULBC" : "CEE",
"KUTA" : "UTM",
"HSAT" : "ATB",
"HSAW" : "",
"YBHI" : "BHQ",
"PAKF" : "KFP",
"WA77" : "",
"VYYY" : "RGN",
"KNKX" : "NKX",
"VYYE" : "XYE",
"ZLTS" : "THQ",
"SKCG" : "CTG",
"LOWS" : "SZG",
"SVEZ" : "",
"HELX" : "LXR",
"GBYD" : "BJL",
"HELC" : "",
"SVED" : "",
"LERS" : "REU",
"UUEE" : "SVO",
"LOWZ" : "",
"SPYL" : "TYL",
"YEML" : "EMD",
"FOOL" : "LBV",
"FOON" : "MVB",
"LERT" : "",
"FOOH" : "OMB",
"FOOK" : "MKU",
"FOOG" : "POG",
"FOOB" : "BMM",
"SMAF" : "",
"FOOT" : "TCH",
"LERL" : "CQM",
"ROYN" : "OGN",
"LERI" : "",
"GULB" : "LEK",
"FOOY" : "MYB",
"CHIR" : "",
"KNTD" : "NTD",
"KHHR" : "HHR",
"KCTB" : "CTB",
"KHHH" : "HHH",
"KCTJ" : "CTJ",
"KNTU" : "NTU",
"UA30" : "",
"LKPR" : "PRG",
"NZWS" : "WSZ",
"NZWR" : "WRE",
"NZWU" : "WAG",
"NZWT" : "WTZ",
"MRGF" : "GLF",
"GLCP" : "CPA",
"GRY1" : "",
"NZWB" : "BHE",
"NZWD" : "",
"NZWG" : "",
"LKPD" : "PED",
"MRGT" : "",
"NZWK" : "WHK",
"LKPO" : "PRV",
"LKPM" : "",
"NZWN" : "WLG",
"LFPG" : "CDG",
"LFPE" : "",
"LFPB" : "LBG",
"LFPC" : "CSF",
"LFPN" : "TNF",
"LFPO" : "ORY",
"LFPM" : "",
"LFPK" : "",
"LFPV" : "",
"LFPT" : "POX",
"LFPZ" : "",
"TGPY" : "GND",
"KFAY" : "FAY",
"VDSV" : "KOS",
"VDST" : "",
"EFJY" : "JYV",
"HKWJ" : "WJR",
"EDWF" : "",
"LMMG" : "GZM",
"EDWE" : "EME",
"EDWB" : "BRV",
"EDWC" : "",
"EDWO" : "",
"LMML" : "MLA",
"EDWM" : "",
"EDWJ" : "JUI",
"EDWK" : "",
"EDWH" : "",
"EDWI" : "WVN",
"EDWV" : "",
"EDWR" : "BMK",
"EDWS" : "NOE",
"KAST" : "AST",
"KASW" : "",
"EDWY" : "NRD",
"AYMO" : "MAS",
"UHPP" : "PKC",
"AYMH" : "HGU",
"KPVC" : "PVC",
"AYMD" : "MAG",
"KPVD" : "PVD",
"EFJO" : "JOE",
"BRU1" : "",
"AYMS" : "MIS",
"AYMR" : "MXH",
"EGVN" : "BZZ",
"EGVO" : "ODH",
"ENNA" : "LKL",
"ENNK" : "NVK",
"ENNO" : "NTB",
"EGCF" : "",
"ENNM" : "OSY",
"FAGC" : "GCJ",
"LFKX" : "MFX",
"KASE" : "ASE",
"LGSK" : "JSI",
"KUGN" : "UGN",
"LSGG" : "GVA",
"PFAK" : "AKI",
"LGSM" : "SMI",
"UUWW" : "VKO",
"LSGK" : "",
"LGSA" : "CHQ",
"LSGN" : "QNC",
"LSGL" : "",
"LSGR" : "",
"LSGS" : "SIR",
"LSGP" : "",
"LGSY" : "SKU",
"KFAF" : "FAF",
"LGSR" : "JTR",
"LGSP" : "",
"LSGY" : "",
"LGSV" : "",
"LGST" : "JSH",
"WATM" : "ARD",
"WATO" : "LBJ",
"KJHW" : "JHW",
"WATE" : "ENE",
"WATG" : "RTG",
"WATC" : "MOF",
"SWDM" : "DMT",
"WATT" : "KOE",
"OIHH" : "HDM",
"KDLL" : "DLL",
"MDSI" : "",
"MDSN" : "",
"KDLH" : "DLH",
"KDLF" : "DLF",
"EBEN" : "EBE",
"MDSD" : "SDQ",
"FZIA" : "FKI",
"OIHR" : "",
"MDST" : "STI",
"RSAU" : "",
"EFIV" : "IVL",
"MMDO" : "DGO",
"EFIT" : "",
"MMDA" : "CUA",
"SBKG" : "CPV",
"CZAC" : "ZAC",
"EFIK" : "",
"SBKP" : "VCP",
"FAFB" : "",
"EFIM" : "",
"USDD" : "SLY",
"YMUI" : "MYI",
"RKPS" : "HIN",
"RKPU" : "USN",
"OPWN" : "",
"FPST" : "TMS",
"RKPC" : "CJU",
"UIKK" : "",
"RKPD" : "",
"RKPK" : "PUS",
"YJLC" : "JCK",
"ZKPY" : "FNJ",
"FQSG" : "",
"KGUP" : "GUP",
"KGUS" : "GUS",
"KRWI" : "RWI",
"ZBDT" : "DAT",
"KGUC" : "GUC",
"SGPI" : "",
"GFGK" : "GBK",
"ZBDS" : "DSN",
"VEPH" : "",
"KNBU" : "NBU",
"FVMV" : "MVZ",
"FVMU" : "",
"FVMT" : "",
"VEPU" : "",
"VEPT" : "PAT",
"UDLS" : "",
"YKKG" : "KFG",
"KYKN" : "YKN",
"KEWR" : "EWR",
"KEWB" : "EWB",
"KEWN" : "EWN",
"KEWK" : "EWK",
"VISR" : "SXR",
"FNBG" : "BUG",
"VISP" : "",
"FNBC" : "SSY",
"VIST" : "TNI",
"NZWP" : "",
"LDOC" : "",
"LIQS" : "SAY",
"LDOS" : "OSI",
"VISM" : "SLV",
"ESFR" : "",
"LSZP" : "",
"LSZS" : "SMV",
"LSZR" : "ACH",
"LSZT" : "",
"KUOS" : "",
"LSZH" : "ZRH",
"LSZK" : "",
"LSZJ" : "",
"KLUF" : "LUF",
"LSZL" : "ZJI",
"LSZA" : "LUG",
"KLUK" : "LUK",
"LSZC" : "BXO",
"LSZB" : "BRN",
"LSZG" : "",
"LSZF" : "",
"UMOO" : "MVQ",
"KBMI" : "BMI",
"SNOX" : "ORX",
"YMNE" : "WME",
"RJKI" : "KKX",
"RJKN" : "TKN",
"RJKA" : "ASJ",
"RJKB" : "",
"PHDH" : "HDH",
"NZWF" : "WKA",
"DTKA" : "TBJ",
"CZWL" : "ZWL",
"VNSB" : "SYH",
"CZWH" : "XLB",
"VNSI" : "SIF",
"MRGP" : "",
"VNSK" : "SKH",
"MYER" : "RSD",
"MYEH" : "ELH",
"MYEM" : "GHB",
"VNST" : "IMK",
"MYEN" : "",
"MYEG" : "",
"MYEF" : "GGT",
"UHHK" : "",
"UATG" : "GUW",
"UHHH" : "KHV",
"SWLB" : "LBR",
"SWLC" : "RVD",
"KFYV" : "FYV",
"KAKR" : "AKC",
"UATT" : "AKX",
"FALI" : "",
"OPJA" : "",
"EFSO" : "SOT",
"SYLT" : "LTM",
"OPJI" : "JIW",
"SBAA" : "CDJ",
"GECT" : "JCU",
"OIAA" : "ABD",
"FZBI" : "NIO",
"ULDD" : "AMV",
"SBAN" : "",
"KBOI" : "BOI",
"YBNS" : "BSJ",
"OIAD" : "",
"ZBYC" : "YCU",
"YBNA" : "BNK",
"ZBYN" : "TYN",
"FZBA" : "INO",
"MHLC" : "LCE",
"NFO" : "Tonga",
"WA19" : "",
"BLF" : "BLF",
"YOLD" : "OLP",
"SVGU" : "GUQ",
"EVLA" : "LPX",
"SVGI" : "GUI",
"SVGD" : "",
"BGNN" : "JNN",
"ZGOW" : "SWA",
"BGNS" : "JNS",
"YPAG" : "PUG",
"SWOB" : "FBA",
"YPAM" : "PMK",
"GMMI" : "ESU",
"GMMH" : "VIL",
"GMMN" : "CMN",
"GMML" : "EUN",
"GMMB" : "",
"GMMA" : "SMW",
"LEPP" : "PNA",
"GMME" : "RBA",
"LJPZ" : "POW",
"GMMZ" : "OZZ",
"GMMY" : "NNA",
"GMMX" : "RAK",
"SMCO" : "TOT",
"GMMW" : "NDR",
"ZUAL" : "NGQ",
"VDSR" : "REP",
"KCRP" : "CRP",
"KCRQ" : "CLD",
"KCRW" : "CRW",
"KHND" : "HSH",
"OENG" : "EAM",
"TQPF" : "AXA",
"FSPP" : "PRI",
"EDWG" : "AGE",
"MREC" : "",
"LKRO" : "",
"OG39" : "",
"EDWL" : "LGO",
"VTPP" : "PHS",
"PABT" : "BTT",
"PABR" : "BRW",
"VTPT" : "TKT",
"VTPY" : "",
"VTPB" : "PHY",
"PABE" : "BET",
"PABA" : "BTI",
"LOLW" : "",
"PABM" : "BMX",
"KBPT" : "BPT",
"VTPN" : "",
"VTPO" : "THS",
"VTPL" : "",
"VTPM" : "MAQ",
"AYKK" : "KRI",
"AYKI" : "UNG",
"AYKM" : "KMA",
"EDWQ" : "",
"KPTB" : "PTB",
"KPTK" : "PTK",
"AYKV" : "KVG",
"ENLI" : "FAN",
"SCVM" : "",
"ENLK" : "LKN",
"HTBU" : "BKZ",
"UDSG" : "LWN",
"SCVD" : "ZAL",
"KELM" : "ELM",
"KELO" : "LYU",
"MNCI" : "RNI",
"EGHD" : "PLH",
"KELP" : "ELP",
"EGHA" : "",
"EGHC" : "LEQ",
"DABS" : "TEE",
"EGHN" : "",
"EGHH" : "BOH",
"KELY" : "ELY",
"EGHJ" : "BBP",
"DABT" : "BLJ",
"ZPZT" : "ZAT",
"AYMN" : "MDU",
"DENX" : "",
"MBGT" : "GDT",
"GATS" : "",
"GATB" : "TOM",
"BPAP" : "",
"EBCI" : "CRL",
"KSZL" : "SZL",
"LGNX" : "JNX",
"FZKA" : "BUX",
"UWGG" : "GOJ",
"UIIB" : "",
"UIII" : "IKT",
"UIIR" : "",
"VGTJ" : "",
"SBMQ" : "MCP",
"SBMS" : "",
"SBMT" : "",
"SBMW" : "MVF",
"SBMY" : "MNX",
"EFOU" : "OUL",
"SBMA" : "MAB",
"SBMC" : "MQH",
"SBMD" : "",
"SBME" : "MEA",
"SBMG" : "MGF",
"SBMK" : "MOC",
"SBML" : "MII",
"SBMN" : "",
"SBMO" : "MCZ",
"ZSNJ" : "NKG",
"MNMG" : "MGA",
"ZSNB" : "NGB",
"EKLS" : "",
"KUNV" : "SCE",
"ETAR" : "RMS",
"KVEL" : "VEL",
"ETAD" : "SPM",
"KMTC" : "MTC",
"KMTN" : "MTN",
"KMTH" : "MTH",
"KMTJ" : "MTJ",
"SEAM" : "ATF",
"FGRS" : "",
"FQUG" : "",
"SKNV" : "NVA",
"EGVA" : "FFD",
"SKNQ" : "NQU",
"KRIU" : "RIU",
"KRIV" : "RIV",
"KRIW" : "RIW",
"KRIR" : "RIR",
"KRID" : "RID",
"KRIF" : "RIF",
"KRIC" : "RIC",
"KRIL" : "RIL",
"TJIG" : "SIG",
"UNEE" : "KEJ",
"VERC" : "IXR",
"VERK" : "RRK",
"YKMB" : "KRB",
"IRUF" : "IRU",
"UTOD" : "",
"SNIG" : "",
"FNLB" : "",
"FNLU" : "LAD",
"KHQU" : "HQU",
"KLSV" : "LSV",
"KUIN" : "UIN",
"KLSE" : "LSE",
"KLSF" : "LSF",
"UMMS" : "MSQ",
"UMMG" : "GNA",
"UMMB" : "",
"UMMM" : "MHP",
"KCOE" : "COE",
"VRMO" : "GKK",
"YGFN" : "GFN",
"K4A7" : "4A7",
"MTJE" : "JEE",
"MTJA" : "",
"DTMB" : "MIR",
"MGPB" : "PBR",
"LGLE" : "LRS",
"KAMA" : "AMA",
"PFAL" : "AET",
"FEFF" : "BGF",
"SWBR" : "RBB",
"HDOB" : "OBC",
"FEFT" : "BBT",
"LGSO" : "JSY",
"MPOA" : "PUE",
"LSGE" : "",
"UKKV" : "ZTR",
"DAFI" : "QDJ",
"KOSH" : "OSH",
"OJAQ" : "AQJ",
"KOSC" : "OSC",
"OJAI" : "AMM",
"OJAM" : "ADJ",
"OMSJ" : "SHJ",
"KOSU" : "OSU",
"TIST" : "STT",
"TISX" : "STX",
"YYMI" : "XMY",
"KIJD" : "IJD",
"SVAC" : "AGV",
"NTTR" : "RFP",
"LGHL" : "PKH",
"SVAN" : "AAO",
"YPCC" : "CCK",
"SVAT" : "",
"YPCE" : "",
"LEVX" : "VGO",
"LEVS" : "",
"LEVT" : "VIT",
"YBLT" : "",
"LQBK" : "BNX",
"LEVD" : "VLL",
"MDHE" : "HEX",
"WKBR" : "",
"LLNV" : "",
"ZUCK" : "CKG",
"SBFT" : "",
"KNPA" : "NPA",
"ENGM" : "OSL",
"SBPF" : "PFB",
"SBPC" : "POO",
"SBPB" : "",
"SBPA" : "POA",
"SBPN" : "PNB",
"SBPL" : "PNZ",
"SBPK" : "PET",
"SBPJ" : "PMW",
"KHLN" : "HLN",
"SBPV" : "PVH",
"KHLR" : "HLR",
"SBPS" : "BPS",
"SBPP" : "PMG",
"SIAM" : "MSI",
"MRCR" : "RIK",
"MRCV" : "TNO",
"KCZG" : "CZG",
"MRCA" : "CSC",
"MRCC" : "OTR",
"MRCH" : "",
"ZGMX" : "MXZ",
"VVBM" : "BMV",
"VNRK" : "RUK",
"UESS" : "CYX",
"UESU" : "ZKP",
"UEST" : "IKS",
"LRCL" : "CLJ",
"LRCK" : "CND",
"KBRD" : "BRD",
"SERB" : "",
"TLDO" : "",
"UESK" : "",
"KBRL" : "BRL",
"KBRO" : "BRO",
"UESO" : "CKH",
"YWCA" : "WIO",
"UUBS" : "KLF",
"ENRS" : "RET",
"KEND" : "END",
"SCTB" : "",
"SCTC" : "ZCO",
"HADC" : "DSE",
"KY72" : "",
"HADD" : "DEM",
"ENRY" : "RYG",
"SCTE" : "PMC",
"EGJB" : "GCI",
"HTLM" : "LKY",
"EGJA" : "ACI",
"KENV" : "ENV",
"HTLI" : "LDI",
"ENRA" : "MQN",
"ENRO" : "RRS",
"ENRM" : "RVK",
"HADR" : "DIR",
"UODD" : "DKS",
"HADT" : "DBT",
"ENRI" : "",
"YSOL" : "SLJ",
"MUGT" : "GAO",
"ZHYC" : "YIH",
"MUGM" : "",
"PFEL" : "ELI",
"WAPP" : "AMQ",
"ZABR" : "",
"WAPL" : "LUV",
"EBAW" : "ANR",
"KDHT" : "DHT",
"KJLN" : "JLN",
"PKMA" : "ENT",
"PKMJ" : "MAJ",
"KDHN" : "DHN",
"EIKN" : "NOC",
"HRZA" : "KME",
"VGRJ" : "RJH",
"KWYS" : "WYS",
"SBOI" : "",
"CZEM" : "ZEM",
"ZSLQ" : "HYN",
"DNPO" : "PHC",
"ZSLY" : "LYI",
"HDTJ" : "TDJ",
"ZSLG" : "LYG",
"OPSK" : "SKZ",
"MPDA" : "DAV",
"OPSN" : "SYW",
"YMYB" : "MBH",
"KMVY" : "MVY",
"OPSD" : "KDU",
"LVGZ" : "GZA",
"OPSS" : "SDT",
"OPST" : "SKT",
"OPSU" : "SUL",
"KMVL" : "MVL",
"RJAN" : "",
"SKUL" : "ULQ",
"SKLC" : "",
"SKLG" : "LQM",
"LWOH" : "OHD",
"SKLP" : "LPD",
"SKLT" : "LET",
"Q51" : "KIO",
"KRKS" : "RKS",
"KRKP" : "RKP",
"KRKD" : "RKD",
"UNKL" : "KJA",
"UNKM" : "",
"EFME" : "",
"UNKI" : "",
"EFMA" : "MHQ",
"EFMI" : "MIK",
"SKUC" : "AUC",
"YLEV" : "LEL",
"KESN" : "ESN",
"KESC" : "ESC",
"YLEO" : "LNO",
"FB57" : "",
"YKOW" : "KWM",
"KESF" : "ESF",
"HESH" : "SSH",
"HESN" : "ASW",
"HESC" : "SKV",
"FNNG" : "GXG",
"SNKV" : "",
"LDSB" : "BWK",
"SWEI" : "ERN",
"KGYR" : "",
"UMKK" : "KGD",
"SMZO" : "ORG",
"KGYY" : "GYY",
"CYAW" : "YAW",
"CYAV" : "YAV",
"CYAT" : "YAT",
"CYAS" : "YKG",
"MGRT" : "",
"CYAZ" : "YAZ",
"CYAY" : "YAY",
"CYAX" : "",
"CYAG" : "YAG",
"KNOP" : "ONP",
"CYAC" : "YAC",
"KNOW" : "NOW",
"CYAM" : "YAM",
"VVTS" : "SGN",
"DGAA" : "ACC",
"RCTP" : "TPE",
"YHID" : "HID",
"KAOH" : "AOH",
"LKKT" : "",
"LKKU" : "",
"KAOO" : "AOO",
"EKSP" : "SKS",
"EKSS" : "",
"EKSV" : "",
"LKKB" : "",
"EKSN" : "CNL",
"EKSB" : "SGD",
"SLUY" : "UYU",
"EPZG" : "IEG",
"K23M" : "23M",
"KBID" : "BID",
"KBIF" : "BIF",
"RHVI" : "",
"KBIL" : "BIL",
"UELL" : "CNN",
"KBIV" : "BIV",
"KBIS" : "BIS",
"HSGN" : "EGN",
"RKPE" : "",
"KBIX" : "BIX",
"KOQU" : "OQU",
"LPFL" : "FLW",
"OYAT" : "AXK",
"OYAA" : "ADE",
"LPFR" : "FAO",
"KOQN" : "OQN",
"UIKB" : "ODO",
"CILS" : "",
"YYKI" : "OKR",
"KILM" : "ILM",
"KILN" : "ILN",
"KILG" : "ILG",
"SVCN" : "CAJ",
"KOGS" : "OGS",
"SVCL" : "",
"SVCJ" : "",
"SPST" : "TPP",
"SVCD" : "",
"SVCB" : "CBL",
"SPSO" : "PIO",
"SVCZ" : "",
"YPEA" : "",
"SVCU" : "CUM",
"YPED" : "",
"SVCS" : "",
"SVCP" : "CUP",
"WAMG" : "GTO",
"YBBN" : "BNE",
"ZJHK" : "HAK",
"GMAZ" : "",
"WAMM" : "MDC",
"GMAT" : "TTA",
"WAMH" : "NAH",
"WAMW" : "LUW",
"WAMT" : "TTE",
"WAMR" : "OTI",
"WAMP" : "PSJ",
"GMAA" : "AGA",
"LETO" : "TOJ",
"MDJB" : "JBQ",
"PKPS" : "",
"SUMU" : "MVD",
"FBGZ" : "GNZ",
"MDJS" : "",
"AYCH" : "CMU",
"OEBH" : "BHH",
"SBRJ" : "SDU",
"FAQT" : "UTW",
"SBRG" : "RIG",
"SBRF" : "REC",
"SBRB" : "RBR",
"KWBW" : "WBW",
"OEBQ" : "",
"KHBR" : "HBR",
"ZSWF" : "WEF",
"KBGE" : "BGE",
"ZGCD" : "CGD",
"ZSWH" : "WEH",
"MRAN" : "FON",
"MRAO" : "TTQ",
"ZSWX" : "WUX",
"ZSWY" : "WUS",
"ZSWZ" : "WNZ",
"FCOU" : "OUE",
"RCMQ" : "RMQ",
"NZHK" : "HKK",
"SLGY" : "GYA",
"HKRE" : "",
"KBTR" : "BTR",
"KBTV" : "BTV",
"PAFS" : "NIB",
"PAFR" : "FRN",
"SEKK" : "",
"PAFM" : "ABL",
"PAFB" : "FBK",
"PAFA" : "FAI",
"YMHU" : "MCV",
"PAFE" : "AFE",
"LRAR" : "ARW",
"RCMT" : "MFK",
"AYGR" : "PNP",
"TUPJ" : "EIS",
"AYGN" : "GUR",
"TUPW" : "VIJ",
"VVGL" : "",
"ZMUL" : "ULG",
"AYGA" : "GKA",
"NVVB" : "AWD",
"NVVA" : "AUY",
"NVVF" : "FTA",
"CYSK" : "YSK",
"NVVD" : "DLY",
"NVVI" : "IPA",
"EGLW" : "",
"EGLJ" : "",
"EGLK" : "BBS",
"NVVV" : "VLI",
"EGLM" : "",
"SCRG" : "",
"EBKT" : "QKT",
"EGLC" : "LCY",
"SCRM" : "",
"EGLF" : "FAB",
"FZOA" : "KND",
"FZOK" : "KGN",
"SMST" : "SMZ",
"SAME" : "MDZ",
"K52A" : "52A",
"XUMW" : "",
"HTND" : "DUU",
"PFKW" : "KWT",
"RPVE" : "",
"PFKU" : "KYU",
"PFKT" : "KTS",
"LSMP" : "",
"PFKO" : "KOT",
"LSMM" : "",
"LSMI" : "ZIN",
"LSMD" : "",
"LSME" : "",
"LSMF" : "",
"LSMA" : "",
"PFKA" : "KUK",
"WARR" : "SUB",
"WARS" : "SRG",
"WARQ" : "SOC",
"KS39" : "",
"WARJ" : "JOG",
"WARI" : "",
"KS30" : "S30",
"WARA" : "MLG",
"KDFW" : "DFW",
"OIFS" : "CQD",
"OIFP" : "",
"KSFZ" : "SFZ",
"EBOS" : "OST",
"OIFK" : "",
"KSFF" : "SFF",
"NGTO" : "NON",
"KSFB" : "SFB",
"OIFM" : "",
"KSFO" : "SFO",
"OIFE" : "",
"VNDG" : "DNP",
"VNDH" : "DHI",
"VNDP" : "DOP",
"CZGI" : "ZGI",
"MYRD" : "",
"MPBO" : "BOC",
"KYKM" : "YKM",
"ZSJU" : "JUZ",
"EKHG" : "",
"ZSJA" : "KNC",
"ZSJD" : "JDZ",
"ZSJJ" : "JIU",
"ZSJN" : "TNA",
"OPQT" : "UET",
"OPQS" : "",
"KVAD" : "VAD",
"ETEU" : "GHF",
"KMPV" : "MPV",
"KMPI" : "MPI",
"ETEJ" : "",
"LHSA" : "",
"BIBA" : "",
"LHSM" : "SOB",
"BIBD" : "BIU",
"LHSN" : "",
"KRMY" : "RMY",
"EHHO" : "",
"ZBBB" : "NAY",
"KRME" : "RME",
"KRMG" : "RMG",
"UNII" : "EIE",
"KOJC" : "OJC",
"UNIB" : "",
"TJMZ" : "MAZ",
"UNIP" : "",
"UNIW" : "",
"UNIT" : "",
"YYTA" : "",
"CCD4" : "YSO",
"FAPG" : "",
"FNHU" : "NOV",
"URMT" : "STW",
"KGHG" : "GHG",
"URMN" : "NAL",
"URMO" : "OGZ",
"URML" : "MCX",
"URMM" : "MRV",
"URMG" : "GRV",
"UKWW" : "VIN",
"UMII" : "VTB",
"EECL" : "",
"FWCC" : "",
"FWCL" : "BLZ",
"FWCM" : "CMK",
"CYCY" : "YCY",
"CYCZ" : "YZS",
"CYCT" : "YCT",
"CYCW" : "YCW",
"YFLI" : "FLS",
"CYCS" : "YCS",
"CYCR" : "YCR",
"CYCL" : "YCL",
"CYCO" : "YCO",
"MGTK" : "FRS",
"CYCH" : "YCH",
"KNIP" : "NIP",
"CYCD" : "YCD",
"CYCG" : "YCG",
"CYCA" : "YRF",
"CYCC" : "YCC",
"CYCB" : "YCB",
"HAGB" : "GOB",
"OKAS" : "",
"VOCI" : "COK",
"VOCL" : "CCJ",
"SWFE" : "LVR",
"LFGA" : "CMR",
"LFGO" : "",
"KJRA" : "JRA",
"VOCB" : "CJB",
"LFGK" : "",
"WMKP" : "PEN",
"VOCX" : "",
"WMKL" : "LGK",
"WMKN" : "TGG",
"WMKI" : "IPH",
"WMKK" : "KUL",
"WMKJ" : "JHB",
"WMKE" : "KTE",
"WMKD" : "KUA",
"WMKF" : "",
"WMKA" : "AOR",
"WMKC" : "KBR",
"WMKB" : "",
"ZLLL" : "ZGC",
"MNLN" : "",
"SLSA" : "",
"SLSB" : "SRJ",
"KBKF" : "BKF",
"KBKD" : "BKD",
"TDPD" : "DOM",
"KBKL" : "BKL",
"KLUA" : "",
"KBKW" : "BKW",
"GEIR" : "",
"YCNM" : "CNB",
"YCNK" : "CES",
"KXFL" : "XFL",
"KOWB" : "OWB",
"KOWD" : "OWD",
"VYAS" : "",
"ENMR" : "MEH",
"OJMF" : "OMF",
"VYAN" : "",
"TREU" : "TLG",
"ZUGU" : "GYS",
"KTBN" : "TBN",
"ZUGY" : "KWE",
"KIND" : "IND",
"LGLM" : "LXS",
"KINL" : "INL",
"KINJ" : "INJ",
"KINK" : "INK",
"SPQT" : "IQT",
"SPQU" : "AQP",
"KINW" : "INW",
"KINT" : "INT",
"KINS" : "INS",
"LGLR" : "LRA",
"WAOO" : "BDJ",
"WAON" : "TJG",
"WAOI" : "PKN",
"WAOC" : "",
"WAOP" : "PKY",
"WAOS" : "SMQ",
"PKRO" : "",
"LEZL" : "SVQ",
"LEZG" : "ZAZ",
"LLET" : "ETH",
"UWPP" : "PEZ",
"LLBG" : "TLV",
"UWPS" : "SKX",
"MDAT" : "",
"MDLR" : "LRM",
"CYST" : "YST",
"YPWR" : "UMR",
"OEMA" : "MED",
"MOA1" : "",
"SBTK" : "",
"OEDR" : "DHA",
"OEDW" : "DWD",
"SBTC" : "UNA",
"SBTB" : "TMT",
"SBTF" : "TFF",
"SBTE" : "THE",
"SBTD" : "TOW",
"FASK" : "",
"FASI" : "",
"OEDF" : "DMM",
"VAAU" : "IXU",
"SBTS" : "",
"FASC" : "",
"LUBL" : "",
"SBTU" : "TUR",
"SBTT" : "TBT",
"YPGV" : "GOV",
"EHDR" : "",
"EHDP" : "",
"PABL" : "BKC",
"EHDL" : "",
"LFXA" : "",
"EGNE" : "",
"UOHH" : "HTG",
"EGNC" : "CAX",
"FQLC" : "VXC",
"EGNO" : "",
"EGNL" : "BWF",
"EGNM" : "LBA",
"EGNJ" : "HUY",
"EGNH" : "BLK",
"EGNV" : "MME",
"EGNT" : "NCL",
"EGNR" : "CEG",
"EGNS" : "IOM",
"FQLU" : "",
"KBVY" : "BVY",
"KBVU" : "BLD",
"UUMU" : "CKL",
"ZYCY" : "CHG",
"ZYCC" : "CGQ",
"YWOL" : "WOL",
"ZYCH" : "CNI",
"UDYZ" : "EVN",
"ENVR" : "VRY",
"UDYE" : "",
"ENVA" : "TRD",
"SCPE" : "",
"SCPC" : "ZPC",
"ENVD" : "VDS",
"FNUG" : "UGO",
"FNUE" : "LUO",
"BOCH" : "BOX",
"FNUB" : "SDD",
"FZAI" : "",
"YBHM" : "HTI",
"FZAG" : "MNB",
"GVMA" : "MMO",
"SUAA" : "",
"FZAB" : "NLO",
"FZAA" : "FIH",
"MUCX" : "",
"YSCB" : "CBR",
"YSCO" : "NSO",
"YSCN" : "CDU",
"YSCH" : "CFS",
"MUCU" : "SCU",
"CKQ3" : "YNO",
"MUCM" : "CMW",
"MUCL" : "CYO",
"MUCC" : "",
"MUCA" : "AVI",
"MUCF" : "CFG",
"PADE" : "DRG",
"PADK" : "ADK",
"PADM" : "MLL",
"PADL" : "DLG",
"PADQ" : "ADQ",
"PADU" : "DUT",
"PADY" : "KKH",
"KSDY" : "SDY",
"KSDC" : "SDC",
"KSDF" : "SDF",
"KSDL" : "ZSY",
"KSDM" : "SDM",
"KMRB" : "MRB",
"SYMB" : "USI",
"KMRN" : "MRN",
"KMRY" : "MRY",
"MPLP" : "PLP",
"MYPI" : "PID",
"MPLD" : "",
"UUOT" : "TBW",
"UUOS" : "",
"KDKK" : "DKK",
"UUOB" : "EGO",
"UUOO" : "VOZ",
"ZSHC" : "HGH",
"UUOL" : "LPK",
"UUOK" : "URS",
"7FL4" : "",
"KVCV" : "VCV",
"KVCT" : "VCT",
"MYES" : "",
"GCFV" : "FUE",
"DIDL" : "DJO",
"ZBLA" : "HLD",
"PAWI" : "AIN",
"HLMB" : "",
"PGUM" : "GUM",
"TJSJ" : "SJU",
"UNOO" : "OMS",
"YYRM" : "",
"K2H0" : "2H0",
"LZPP" : "PZY",
"OAZJ" : "ZAJ",
"YKSC" : "KGC",
"SGIB" : "",
"URKA" : "AAQ",
"URKG" : "",
"URKK" : "KRR",
"HCMR" : "GLK",
"UATE" : "SCO",
"KROC" : "ROC",
"HCMV" : "BUO",
"KROA" : "ROA",
"KROW" : "ROW",
"HCMF" : "BSA",
"HCMH" : "HGA",
"HCMI" : "BBO",
"HCMK" : "KMU",
"HCMM" : "MGQ",
"BLBO" : "",
"OLBA" : "BEY",
"YGLI" : "GLI",
"YGLG" : "GEX",
"YGLA" : "GLT",
"YGLB" : "GUL",
"KNKT" : "NKT",
"CYER" : "YER",
"UHHT" : "",
"CYEV" : "YEV",
"CYEU" : "YEU",
"CYET" : "YET",
"CYEK" : "YEK",
"CYEN" : "YEN",
"CYEM" : "YEM",
"CYEG" : "YEG",
"CYEE" : "YEE",
"RCPO" : "",
"OEYN" : "YNB",
"FKKL" : "MVR",
"ESNJ" : "",
"ESNK" : "KRF",
"ESNH" : "HUV",
"ESNN" : "SDL",
"ESNO" : "OER",
"ESNL" : "LYC",
"ESNM" : "",
"ESNC" : "",
"KFAR" : "FAR",
"ESNA" : "",
"KFAT" : "FAT",
"ESNG" : "GEV",
"ESND" : "EVG",
"ESNZ" : "OSD",
"ESNX" : "AJR",
"UHPO" : "",
"KASH" : "ASH",
"ESNR" : "",
"ESNS" : "SFT",
"ESNP" : "",
"ESNQ" : "KRN",
"ESNV" : "VHM",
"ESNT" : "",
"ESNU" : "UME",
"FCPL" : "DIS",
"VOAT" : "AGX",
"RPNS" : "",
"LFEH" : "",
"CTK6" : "ZKG",
"LFED" : "",
"VTSP" : "HKT",
"FKKV" : "BPC",
"LFEC" : "",
"LFEY" : "IDY",
"LFEW" : "",
"FCPP" : "PNR",
"LFES" : "",
"ZLZW" : "ZHY",
"EDBM" : "",
"VTSG" : "KBV",
"EDBH" : "BBH",
"EDBK" : "",
"EDBJ" : "",
"EDBG" : "",
"KBMC" : "BMC",
"EDVS" : "",
"EDBC" : "CSO",
"KBMG" : "BMG",
"KBMQ" : "BMQ",
"HSKA" : "KSL",
"KBMT" : "BMT",
"KAVQ" : "AVW",
"EDBR" : "",
"EDVW" : "",
"YWTN" : "WIN",
"LPBR" : "",
"EDVY" : "",
"LPBJ" : "",
"PACM" : "SCM",
"LPBG" : "BGC",
"PACH" : "CHU",
"VYCI" : "",
"VIHR" : "",
"FMSL" : "OVA",
"FMSM" : "MNJ",
"FMSJ" : "MJA",
"FMSK" : "WVK",
"FMSF" : "WFI",
"FMSG" : "RVA",
"FMSD" : "FTU",
"KDXR" : "DXR",
"FMST" : "TLE",
"FMSR" : "MXM",
"CNE3" : "XBE",
"FBKR" : "KHW",
"FBKE" : "BBK",
"KIAG" : "IAG",
"LEXJ" : "SDR",
"VGIS" : "IRD",
"VVNS" : "SQH",
"SLYA" : "BYC",
"FKYS" : "NSI",
"SBVT" : "VIX",
"YNBR" : "NAA",
"SBVG" : "VAG",
"KHFD" : "HFD",
"SBVH" : "BVH",
"KCZL" : "",
"ZSSH" : "HIA",
"ZSSL" : "",
"VASL" : "SSE",
"YPID" : "",
"ZGGG" : "CAN",
"ZSSS" : "SHA",
"VASU" : "STV",
"EHTW" : "ENS",
"NZAR" : "AMZ",
"NZAP" : "TUO",
"NZAA" : "AKL",
"FQNP" : "APL",
"ENTC" : "TOS",
"SKUI" : "UIB",
"ENTO" : "TRF",
"FQNC" : "MNC",
"HI07" : "WKL",
"EHTX" : "",
"FAUL" : "ULD",
"FAUP" : "UTN",
"FAUT" : "UTT",
"ZYMD" : "",
"ANAS" : "",
"KEDW" : "EDW",
"UHPK" : "",
"OIBL" : "BDH",
"OIBK" : "KIH",
"OIBH" : "",
"OIBI" : "",
"OIBB" : "BUZ",
"KDBN" : "DBN",
"OIBA" : "",
"SUCA" : "CYR",
"KDBQ" : "DBQ",
"FZCA" : "KKW",
"EBKW" : "KNO",
"OIBV" : "",
"OIBS" : "",
"OIBP" : "PGU",
"OIBQ" : "",
"ZHCC" : "CGO",
"CRDB" : "",
"MHLM" : "SAP",
"UTDK" : "TJU",
"PAJN" : "JNU",
"UTDL" : "LBD",
"UTDD" : "DYU",
"KPCW" : "PCW",
"PAJZ" : "KGK",
"UTDT" : "",
"KSBY" : "SBY",
"KSBP" : "SBP",
"NLWW" : "WLS",
"KSBS" : "SBS",
"KSBM" : "SBM",
"KSBN" : "SBN",
"KSBO" : "SBO",
"KSBA" : "SBA",
"KSBD" : "SBD",
"KJRB" : "JRB",
"KBFI" : "BFI",
"ETIN" : "",
"KEET" : "EET",
"ETIH" : "",
"ETID" : "ZNF",
"ETIC" : "",
"EDOJ" : "",
"KBFL" : "BFL",
"EPKO" : "OSZ",
"USNR" : "RAT",
"EPKK" : "KRK",
"DNZA" : "ZAR",
"UUMO" : "",
"USNN" : "NJC",
"RPSD" : "RZP",
"PATC" : "TNC",
"YMCO" : "XMC",
"ZLGM" : "GOQ",
"LBGO" : "GOZ",
"OAZI" : "",
"ZLGY" : "GYU",
"WICT" : "TKG",
"FA08" : "",
"DIBK" : "BYK",
"PATL" : "TLJ",
"WICC" : "BDO",
"WICB" : "",
"WICD" : "CBN",
"WICM" : "",
"PGWT" : "TIQ",
"KONO" : "",
"SANT" : "TUC",
"HLON" : "",
"KONT" : "ONT",
"CJT2" : "",
"VEZO" : "",
"YKUB" : "KUG",
"KGCK" : "GCK",
"HHSB" : "ASA",
"KRAL" : "RAL",
"KRAC" : "RAC",
"LJMB" : "MBX",
"KGCC" : "GCC",
"CYTQ" : "YTQ",
"KGCN" : "GCN",
"KRAP" : "RAP",
"CYTR" : "YTR",
"LECD" : "",
"SMPA" : "OEM",
"LECO" : "LCG",
"YOLW" : "ONS",
"SMPT" : "",
"STLS" : "",
"KPVU" : "PVU",
"HAHM" : "QHR",
"KNEW" : "NEW",
"KNEL" : "NEL",
"KCGF" : "CGF",
"KHYA" : "HYA",
"CYGE" : "YGE",
"CYGH" : "YGH",
"CYGK" : "YGK",
"CYGM" : "YGM",
"CYGL" : "YGL",
"KCGI" : "CGI",
"CYGQ" : "YGQ",
"CYGP" : "YGP",
"CYGR" : "YGR",
"CYGT" : "YGT",
"EICA" : "NNR",
"CYGV" : "YGV",
"RCNN" : "TNN",
"EICM" : "GWY",
"CYGZ" : "YGZ",
"EICK" : "ORK",
"KCGZ" : "CGZ",
"KCGX" : "CGX",
"YHOT" : "MHU",
"KAUS" : "AUS",
"ZGZJ" : "ZHA",
"ZGZH" : "LZH",
"KUAB" : "UAB",
"KAUO" : "AUO",
"YHOO" : "HOK",
"KAUG" : "AUG",
"KAUF" : "KFX",
"LFCK" : "DCM",
"LFCI" : "LBI",
"LFCH" : "XAC",
"LFCM" : "",
"KFCA" : "FCA",
"LFCC" : "",
"RPLU" : "LBX",
"LFCG" : "",
"RPLP" : "LGP",
"LFCD" : "",
"LFCZ" : "",
"LFCY" : "RYN",
"KFCS" : "FCS",
"RPLI" : "LAO",
"LFCR" : "RDZ",
"LFCQ" : "",
"LFCW" : "",
"RPLC" : "CRK",
"LFCU" : "",
"ZLXY" : "XIY",
"WSAG" : "",
"ZLXN" : "XNN",
"HAHU" : "HUE",
"ZLXH" : "GXH",
"WSAT" : "",
"WSAP" : "QPG",
"EDDK" : "CGN",
"YPAD" : "ADL",
"EDDI" : "THF",
"EDDH" : "HAM",
"EDDN" : "NUE",
"EDDM" : "MUC",
"EDDL" : "DUS",
"EDDC" : "DRS",
"EDDB" : "BER",
"EPPO" : "POZ",
"EDDF" : "FRA",
"EDDE" : "ERF",
"EPPR" : "",
"EDDS" : "STR",
"EDDR" : "SCN",
"EDDP" : "LEJ",
"EDDW" : "BRE",
"EDDV" : "HAJ",
"GEML" : "MLN",
"YWVA" : "",
"YCBB" : "COJ",
"YCBA" : "CAZ",
"NFFO" : "PTF",
"ORNI" : "NJF",
"YCBP" : "CPD",
"LCLK" : "LCA",
"USPP" : "PEE",
"UGTB" : "TBS",
"VYEL" : "ELA",
"GUXD" : "KNN",
"KUVA" : "UVA",
"FMMT" : "TMM",
"FMMU" : "WTA",
"FMMV" : "MOQ",
"FMMR" : "TVA",
"FMMS" : "SMS",
"FMMX" : "WTS",
"FMMZ" : "WAM",
"FMME" : "ATJ",
"FMMG" : "WAQ",
"FMML" : "BMD",
"FMMN" : "ZVA",
"FMMO" : "MXT",
"FMMI" : "TNR",
"FMMK" : "JVA",
"HSMK" : "RBX",
"YBDV" : "BVI",
"CYGB" : "YGB",
"KCGC" : "CGC",
"WAKK" : "MKQ",
"YBDG" : "",
"FBCL" : "",
"FBCO" : "",
"TFFR" : "PTP",
"TFFS" : "LSS",
"TFFM" : "GBJ",
"TFFJ" : "SBH",
"TFFF" : "FDF",
"TFFG" : "SFG",
"FBMN" : "MUB",
"TFFB" : "BBR",
"TFFC" : "SFC",
"CYGO" : "YGO",
"KWDR" : "WDR",
"KHYS" : "HYS",
"LEPA" : "PMI",
"MUSL" : "",
"KHDI" : "HDI",
"KCXO" : "CXO",
"KCXL" : "CXL",
"KHDO" : "HDO",
"KHDN" : "HDN",
"CYGW" : "YGW",
"YPKA" : "KTA",
"ZSQZ" : "JJN",
"BGTL" : "THU",
"DNGU" : "QUS",
"VAUD" : "UDR",
"CYGX" : "YGX",
"YPKS" : "PKE",
"SPUR" : "PIU",
"YPKU" : "KNX",
"EFHK" : "HEL",
"NZCI" : "CHT",
"NZCH" : "CHC",
"EGBJ" : "GLO",
"SKSJ" : "SJE",
"EGBN" : "NQT",
"EGBO" : "",
"SKSM" : "SMR",
"LHDC" : "DEB",
"EGBB" : "BHX",
"SKSA" : "RVE",
"EGBG" : "",
"EGBE" : "CVT",
"HTTG" : "TGT",
"HKHB" : "",
"HTTB" : "TBO",
"HKHO" : "HOA",
"ENZV" : "SVG",
"EGBP" : "",
"SKSP" : "ADZ",
"EGBW" : "",
"EGBT" : "",
"EFHN" : "",
"ETNT" : "",
"FAWM" : "WEL",
"FAWK" : "",
"FAWI" : "",
"YWKS" : "",
"FAWB" : "PRY",
"ZZ04" : "",
"ZZ05" : "",
"ZZ06" : "",
"ZZ07" : "RGO",
"BKKX" : "",
"WRBK" : "KBU",
"KEFT" : "",
"KPBX" : "PBX",
"KPBG" : "PBG",
"KPBF" : "PBF",
"KEFH" : "1H2",
"YBLN" : "BQB",
"KEFD" : "EFD",
"KPBH" : "",
"LIBN" : "LCC",
"LIBC" : "CRV",
"ZPPP" : "KMG",
"LIBG" : "TAR",
"LIBF" : "FOG",
"LIBD" : "BRI",
"FZEA" : "MDK",
"HBRG" : "",
"LIBR" : "BDS",
"LIBP" : "PSR",
"LIBV" : "",
"GVAC" : "SID",
"YSGE" : "SGO",
"ULMK" : "KVK",
"PAHU" : "HUS",
"PAHV" : "HKB",
"LOXK" : "",
"PAHP" : "HPB",
"PAHY" : "HYG",
"PAHX" : "SHX",
"ELLX" : "LUX",
"LOXZ" : "",
"PAHC" : "HCR",
"PAHL" : "HSL",
"PAHO" : "HOM",
"PAHN" : "HNS",
"LFCL" : "",
"EDMO" : "OBF",
"LSML" : "",
"VNBW" : "BWA",
"VNBR" : "BJU",
"OIKB" : "BND",
"VNBP" : "BHR",
"RPLO" : "CYU",
"S31" : "LPS",
"RPLL" : "MNL",
"VNBG" : "BJH",
"KCTY" : "CTY",
"VNBJ" : "BHP",
"ESUD" : "SQO",
"ESUK" : "",
"KLFI" : "LFI",
"ESUT" : "HMV",
"ESUP" : "PJA",
"USHB" : "NBB",
"YMAY" : "ABX",
"USHH" : "HMA",
"RPLB" : "SFS",
"YMAV" : "AVV",
"KSTP" : "",
"USHN" : "NYA",
"KVOK" : "VOK",
"USHS" : "OVS",
"USHU" : "URJ",
"CTT5" : "",
"YMAA" : "UBB",
"LZTN" : "",
"LZTT" : "TAT",
"OSKL" : "KAC",
"ESTA" : "AGH",
"WIMS" : "",
"WIMM" : "MES",
"WIMG" : "",
"WIME" : "",
"WIMB" : "GNS",
"KOLV" : "OLV",
"KOLS" : "OLS",
"OYTZ" : "TAI",
"EFET" : "ENF",
"EFEU" : "",
"MPHO" : "HOW",
"KOLF" : "OLF",
"KOLN" : "KOX",
"KOLM" : "OLM",
"YTRE" : "TRO",
"CJV7" : "SUR",
"VEDB" : "DBD",
"KTYR" : "TYR",
"KTYS" : "TYS",
"NK39" : "",
"URWW" : "VOG",
"SGME" : "",
"MADC" : "",
"SNSW" : "SFK",
"URWA" : "ASF",
"URWI" : "ESL",
"UBBY" : "ZTU",
"ZBHD" : "HDG",
"NTKR" : "TKX",
"KBOW" : "BOW",
"ZBHH" : "HET",
"UBBQ" : "GBB",
"KRCA" : "RCA",
"KRCZ" : "RCZ",
"KGAI" : "GAI",
"UBBN" : "NAJ",
"UBBL" : "LLK",
"UBBB" : "GYD",
"LJCE" : "",
"UBBG" : "KVD",
"KBOS" : "BOS",
"LEAB" : "",
"SLOR" : "",
"LEAL" : "ALC",
"LEAM" : "LEI",
"LEAS" : "OVD",
"SMRP" : "",
"KNGZ" : "NGZ",
"EDDG" : "FMO",
"KNGP" : "NGP",
"YNWN" : "ZNE",
"KNGU" : "NGU",
"WMAP" : "",
"CYIO" : "YIO",
"SAST" : "TTG",
"RCLY" : "KYD",
"MMLT" : "LTO",
"CYIF" : "YIF",
"KCEF" : "CEF",
"CYIB" : "YIB",
"KCEC" : "CEC",
"K62S" : "",
"SASA" : "SLA",
"SASC" : "CFX",
"KCEZ" : "CEZ",
"KCEU" : "CEU",
"CYIV" : "YIV",
"SASO" : "ORA",
"SBCX" : "CXJ",
"SASJ" : "JUJ",
"VTBL" : "",
"VAHB" : "HBX",
"KLYH" : "LYH",
"WHIT" : "",
"LFAQ" : "",
"KFMN" : "FMN",
"VOMY" : "MYQ",
"KFMH" : "FMH",
"LFAT" : "LTQ",
"LFAV" : "",
"LFAY" : "",
"KFME" : "FME",
"LFAC" : "CQF",
"KFMY" : "FMY",
"VOML" : "IXE",
"VOMM" : "MAA",
"LKCV" : "",
"EDDT" : "TXL",
"LKCS" : "",
"LFAO" : "",
"HYFG" : "KLK",
"KDAL" : "DAL",
"EDFQ" : "",
"EDFW" : "",
"EDFV" : "",
"EPRA" : "QXR",
"EDFZ" : "",
"MMLO" : "BJX",
"EPRZ" : "RZE",
"EDFE" : "",
"LICA" : "SUF",
"KBAD" : "BAD",
"EDFH" : "HHN",
"KBAF" : "BAF",
"SBCM" : "CCM",
"EDFM" : "MHG",
"LICB" : "CIY",
"KBAB" : "BAB",
"YCDU" : "CED",
"KDAB" : "DAB",
"FANC" : "NCS",
"FSAL" : "",
"FSAS" : "",
"SULS" : "PDP",
"SCIP" : "IPC",
"SCIR" : "",
"ZPSM" : "SYM",
"SCIC" : "",
"SCIE" : "CCP",
"ESNF" : "",
"ZUMY" : "MIG",
"KDAY" : "DAY",
"CEM3" : "YLE",
"MDBH" : "BRX",
"WPLN" : "",
"OICS" : "SDG",
"LTBR" : "YEI",
"LTBS" : "DLM",
"LTBP" : "",
"LTBQ" : "",
"LTBV" : "BXN",
"SWMW" : "MBZ",
"LTBT" : "",
"LTBU" : "TEQ",
"LTBZ" : "KZR",
"LTBX" : "",
"ARIA" : "",
"KIDA" : "IDA",
"LTBA" : "IST",
"LTBF" : "BZI",
"LTBG" : "BDM",
"BBUF" : "",
"LTBJ" : "ADB",
"LTBK" : "",
"LTBH" : "CKZ",
"LTBI" : "ESK",
"LTBN" : "",
"LTBO" : "USQ",
"LTBL" : "IGL",
"LTBM" : "",
"PASD" : "SDP",
"VTCC" : "CNX",
"PASA" : "SVA",
"PASC" : "SCC",
"PASL" : "SLQ",
"PASM" : "KSM",
"PASN" : "SNP",
"VTCL" : "LPT",
"PASH" : "SHH",
"PASI" : "SIT",
"VTCI" : "PYY",
"PASK" : "WLK",
"PASV" : "SVW",
"VTCT" : "CEI",
"VTCP" : "PRH",
"PASY" : "SYA",
"FBOR" : "ORP",
"EHBD" : "",
"EHBK" : "MST",
"VGEG" : "CGP",
"MMML" : "MXL",
"CZPB" : "ZPB",
"SNCL" : "",
"YPMP" : "EDR",
"YPMQ" : "PQQ",
"DNEN" : "ENU",
"LGBL" : "VOL",
"YPMH" : "PXH",
"EGDR" : "",
"SKQU" : "",
"EGDX" : "",
"EGDY" : "YEO",
"EGDA" : "",
"EGDC" : "",
"HKJK" : "NBO",
"EGDG" : "NQY",
"EGDL" : "LYE",
"EGDM" : "",
"FQBR" : "BEW",
"MMUN" : "CUN",
"WRLP" : "LPU",
"ZUKD" : "KGT",
"WRLF" : "NNX",
"VTPH" : "HHQ",
"SEPS" : "",
"SEPV" : "PVO",
"RJDT" : "TSJ",
"LIPA" : "AVB",
"KSNA" : "SNA",
"FNSA" : "VHC",
"EBWE" : "",
"RJDA" : "AXJ",
"RJDB" : "IKI",
"RJDC" : "UBJ",
"FZGA" : "LIQ",
"GVSN" : "SNE",
"FNSO" : "SZA",
"HUPA" : "PAF",
"VIBL" : "",
"VIBN" : "VNS",
"VIBK" : "",
"DFFD" : "OUA",
"VIBY" : "",
"VIBT" : "",
"VIBW" : "",
"VIBR" : "KUU",
"PANR" : "FNR",
"PANW" : "KNW",
"PANV" : "ANV",
"PANU" : "NUL",
"PANT" : "ANN",
"CTB6" : "ZTB",
"PANC" : "ANC",
"YBWX" : "BWB",
"LRIV" : "",
"PANI" : "ANI",
"PANO" : "NNL",
"FNCT" : "CBT",
"ETMW" : "",
"K55S" : "55S",
"LDPL" : "PUY",
"KVIS" : "VIS",
"KMXF" : "MXF",
"SNDC" : "RDC",
"ETMN" : "",
"SAHC" : "",
"UIUN" : "",
"UIUU" : "UUD",
"SAHR" : "",
"VNLK" : "LUA",
"VNLD" : "LDN",
"SAHZ" : "APZ",
"KELD" : "ELD",
"KLDJ" : "LDJ",
"GOTB" : "BXE",
"MUMZ" : "MZO",
"NGMK" : "MZK",
"NGMN" : "MTK",
"MUMO" : "MOA",
"LFIF" : "",
"GOTT" : "TUD",
"PFSH" : "SKK",
"UUIC" : "",
"DABC" : "CZL",
"DABB" : "AAE",
"YMGB" : "MGT",
"YMGD" : "MNG",
"GCHI" : "VDE",
"SDCO" : "SOD",
"SDCG" : "OLC",
"ULWW" : "VGD",
"ULWU" : "VUS",
"MPJE" : "JQE",
"EGHL" : "QLA",
"KOBE" : "OBE",
"UGKO" : "KUT",
"HLKF" : "AKF",
"PGSN" : "SPN",
"EGHI" : "SOU",
"FVCZ" : "BFO",
"FVCP" : "",
"EGHK" : "PZE",
"KTWF" : "TWF",
"SGCO" : "",
"AGAR" : "RNA",
"HEYD" : "",
"KIYK" : "IYK",
"UKOO" : "ODS",
"UKON" : "NLV",
"UKOH" : "KHE",
"WIOD" : "TJQ",
"WIOG" : "",
"WIOM" : "MWK",
"WIOO" : "PNK",
"WION" : "NTX",
"KGGW" : "GGW",
"WIOK" : "KTG",
"WIOP" : "PSU",
"WIOS" : "SQG",
"KREI" : "REI",
"KGGG" : "GGG",
"KGGE" : "GGE",
"LQSA" : "SJJ",
"FBTL" : "TLD",
"LEGA" : "",
"LEGE" : "GRO",
"SMLA" : "",
"LEGR" : "GRX",
"LEGT" : "",
"STHT" : "",
"FLKY" : "ZKB",
"YFTZ" : "FIZ",
"KPIH" : "PIH",
"OMRK" : "RKT",
"CYKQ" : "YKQ",
"CYKY" : "YKY",
"CYKX" : "YKX",
"CYKZ" : "YKZ",
"CYKD" : "LAK",
"CYKG" : "YWB",
"CYKF" : "YKF",
"CYKA" : "YKA",
"CYKL" : "YKL",
"CYKO" : "AKV",
"VAJM" : "JGA",
"YPPF" : "",
"YPPD" : "PHE",
"VAJB" : "JLR",
"KFOK" : "FOK",
"LFOZ" : "",
"LFOV" : "LVA",
"LFOU" : "CET",
"LFOT" : "TUF",
"KFOD" : "FOD",
"LFOP" : "URO",
"LFOK" : "XCR",
"LFOJ" : "ORE",
"LFOI" : "",
"MRTR" : "TMU",
"LFOE" : "",
"LFOD" : "",
"LFOC" : "",
"LFOB" : "BVA",
"LFOA" : "",
"UAFO" : "OSS",
"EGYE" : "",
"EGYD" : "",
"ENCN" : "KRS",
"LWSB" : "",
"EGYR" : "",
"EGYP" : "MPN",
"FYLZ" : "LUD",
"PITT" : "",
"KBCT" : "BCT",
"KZPH" : "ZPH",
"EDHN" : "EUM",
"EDHM" : "",
"EDHL" : "LBC",
"EDHK" : "KEL",
"EPLB" : "LUZ",
"EDHI" : "XFW",
"EDHF" : "",
"EDHE" : "",
"EPLL" : "LCJ",
"EPLK" : "",
"EDHB" : "",
"KBCE" : "BCE",
"MHBL" : "BHG",
"MNRT" : "RFS",
"SCGE" : "LSQ",
"OOMS" : "MCT",
"DAUE" : "ELG",
"LTDA" : "HTY",
"CKD9" : "",
"PAQC" : "AQC",
"UEBB" : "",
"LOAG" : "",
"OOMA" : "MSH",
"LOAN" : "",
"LOAV" : "",
"PAQT" : "NUI",
"ROIG" : "ISG",
"KCXY" : "CXY",
"NZGB" : "GBZ",
"KMEM" : "MEM",
"KMEI" : "MEI",
"NZGS" : "GIS",
"KMER" : "MER",
"EHLW" : "LWR",
"VGCB" : "CXB",
"KWHP" : "WHP",
"CNC3" : "",
"DNCA" : "CBQ",
"MUVT" : "VTU",
"MUVR" : "VRA",
"YPOD" : "PTJ",
"SPIM" : "LIM",
"HKLU" : "LAU",
"HKLK" : "LKG",
"OACC" : "CCN",
"HKLO" : "LOK",
"OBBI" : "BAH",
"EGFH" : "SWS",
"EGFF" : "CWL",
"EGFE" : "",
"HTPE" : "PMA",
"CAC8" : "ZNA",
"K6S0" : "6S0",
"EIDL" : "CFN",
"FAKR" : "",
"FAKS" : "",
"MMSF" : "",
"FAKU" : "",
"FAKZ" : "KLZ",
"FZKJ" : "",
"CZRJ" : "ZRJ",
"MMSP" : "SLP",
"FAKD" : "",
"SIMK" : "FRC",
"FAKN" : "MQP",
"FAKM" : "KIM",
"ZSQD" : "TAO",
"KPFN" : "PFN",
"RJFR" : "KKJ",
"RJFS" : "HSG",
"KSLN" : "SLN",
"RJFT" : "KMJ",
"RJFU" : "NGS",
"RJFZ" : "",
"KSLC" : "SLC",
"RJFY" : "",
"KSLE" : "SLE",
"RJFC" : "KUM",
"RJFA" : "",
"RJFF" : "FUK",
"RJFG" : "TNE",
"RJFE" : "FUJ",
"RJFK" : "KOJ",
"LIND" : "LND",
"RJFN" : "",
"RJFO" : "OIT",
"EBUL" : "",
"RJFM" : "KMI",
"VIDN" : "DED",
"VIDD" : "",
"VIDP" : "DEL",
"KFBR" : "FBR",
"LROD" : "OMR",
"PALU" : "LUR",
"EETU" : "TAY",
"PALH" : "LHD",
"LROP" : "OTP",
"EETN" : "TLL",
"GFHA" : "HGS",
"NFCI" : "ICI",
"PALB" : "KLN",
"PALG" : "KLG",
"OPGT" : "GIL",
"YMED" : "",
"YMEK" : "MKR",
"ETOU" : "",
"ETOR" : "",
"YMEN" : "MEB",
"OPGD" : "GWD",
"YMES" : "",
"YMER" : "MIM",
"KMZJ" : "MZJ",
"ETOI" : "",
"CYPE" : "YPE",
"CYPG" : "YPG",
"CYPA" : "YPA",
"K57C" : "57C",
"CYPC" : "YPC",
"CYPL" : "YPL",
"CYPM" : "YPM",
"CYPN" : "YPN",
"CYPO" : "YPO",
"CYPH" : "YPH",
"CYPI" : "YPI",
"CYPK" : "",
"PPHO" : "PHO",
"CYPQ" : "YPQ",
"CYPR" : "YPR",
"CYPX" : "YPX",
"CYPY" : "YPY",
"ESQO" : "",
"KLBL" : "LBL",
"KLBB" : "LBB",
"KLBF" : "LBF",
"KLBE" : "LBE",
"ZHES" : "ENH",
"KLBX" : "",
"KUZA" : "RKH",
"MMBT" : "HUX",
"KLBT" : "LBT",
"RPUQ" : "",
"RPUS" : "SFE",
"RPUR" : "",
"RPUT" : "TUG",
"USTR" : "TJM",
"RPUV" : "VRC",
"RPUY" : "CYZ",
"RPUZ" : "",
"RPUB" : "BAG",
"NGKT" : "KUC",
"RPUD" : "",
"RPUG" : "",
"RPUF" : "",
"RPUH" : "",
"RPUM" : "",
"RPUL" : "",
"RPUO" : "BSO",
"RPUN" : "WNP",
"YLIS" : "LSY",
"ZLAK" : "AKA",
"ZLAN" : "LHW",
"BITN" : "THO",
"SDAM" : "CPQ",
"SDAI" : "",
"EYKS" : "",
"MPTO" : "PTY",
"KANQ" : "ANQ",
"EYKA" : "KUN",
"SJTC" : "JTC",
"OMDM" : "NHD",
"OMDB" : "DXB",
"PCIS" : "CIS",
"YTNK" : "TCA",
"FNZG" : "",
"YTNG" : "THG",
"OMDW" : "DWC",
"SNGI" : "GNM",
"ZURK" : "RKZ",
"KTUS" : "TUS",
"KTUP" : "TUP",
"YGTE" : "GTE",
"KTUL" : "TUL",
"YGTH" : "GFF",
"SGAS" : "ASU",
"URSS" : "AER",
"SGAY" : "",
"SKSV" : "SVI",
"HEGN" : "HRG",
"RJCT" : "",
"ALX_" : "ALX",
"WIII" : "CGK",
"TEA1" : "",
"ZBTL" : "TGO",
"ZBTJ" : "TSN",
"KGED" : "GED",
"KGEG" : "GEG",
"KGEO" : "",
"KGEU" : "GEU",
"KAVO" : "AVO",
"SMNI" : "ICK",
"KAVL" : "AVL",
"LFMD" : "CEQ",
"LFMA" : "QXB",
"LFMC" : "",
"VONS" : "",
"LFMO" : "",
"LFMN" : "NCE",
"LFMI" : "",
"LFMH" : "EBU",
"LFMK" : "CCF",
"YFRT" : "",
"LFMU" : "BZR",
"LFMT" : "MPL",
"LFMV" : "AVN",
"LFMQ" : "CTT",
"LFMP" : "PGF",
"LFMS" : "",
"LFMY" : "",
"LFMZ" : "",
"KCAR" : "CAR",
"CYMW" : "YMW",
"CYMU" : "YUD",
"CYMT" : "YMT",
"KHST" : "HST",
"SAWP" : "PMQ",
"KHSV" : "HSV",
"CYMX" : "YMX",
"SAWU" : "RZA",
"KNCA" : "",
"SAWH" : "USH",
"CYMA" : "YMA",
"SAWJ" : "ULA",
"KCAE" : "CAE",
"SAWA" : "ING",
"CYMJ" : "YMJ",
"KCAK" : "CAK",
"CYMH" : "YMH",
"CYMO" : "YMO",
"SAWD" : "PUD",
"CYMM" : "YMM",
"PHTO" : "ITO",
"KAVP" : "AVP",
"BGCO" : "CNP",
"BGCH" : "JCH",
"VHXX" : "",
"VNKD" : "",
"WRLB" : "LBW",
"KFIT" : "FIT",
"KAVX" : "AVX",
"UADD" : "DMB",
"INGS" : "IGS",
"HAKD" : "ABK",
"VYKI" : "KHM",
"VYKL" : "KMV",
"SCEL" : "SCL",
"ENAN" : "ANX",
"VYKG" : "KET",
"ENAL" : "AES",
"ENAS" : "",
"SCES" : "ESR",
"VYKP" : "KYP",
"VYKT" : "KAW",
"YARG" : "GYL",
"YARM" : "ARM",
"CAX6" : "",
"LFGJ" : "DLE",
"ZMBN" : "UGA",
"ZMBH" : "BYN",
"KMQY" : "",
"LFGW" : "",
"EGTN" : "",
"CKB6" : "YAX",
"HSSS" : "KRT",
"KBEC" : "BEC",
"KBED" : "BED",
"PAWM" : "WMO",
"PAWN" : "WTK",
"PAWB" : "WBQ",
"PAWG" : "WRG",
"HSSJ" : "JUB",
"PAWR" : "",
"PAWT" : "K03",
"HSSM" : "MAK",
"WAAA" : "UPG",
"KMGM" : "MGM",
"KMGJ" : "MGJ",
"KMGE" : "MGE",
"KMGC" : "MGC",
"KMGY" : "MGY",
"KMGW" : "MGW",
"NZIR" : "",
"KBYS" : "BYS",
"GMTA" : "AHU",
"CNM5" : "KIF",
"LTFG" : "GZP",
"LTFD" : "EDO",
"LTFE" : "BJV",
"LTFB" : "",
"LTFC" : "ISE",
"MUTD" : "TND",
"LTFA" : "",
"LTFJ" : "SAW",
"LTFH" : "SZF",
"DNAA" : "ABV",
"SPOL" : "",
"EGHT" : "TSO",
"DNAK" : "AKR",
"GOOY" : "DKR",
"5A8" : "WKK",
"LFGP" : "",
"KPBI" : "PBI",
"WURZ" : "WZB",
"YESP" : "EPR",
"LHBP" : "BUD",
"YLTN" : "LVO",
"KPDK" : "PDK",
"OAMS" : "MZR",
"OAMN" : "MMZ",
"KPDT" : "PDT",
"KPDX" : "PDX",
"YPBO" : "PBO",
"CAE5" : "YWS",
"HKNI" : "NYE",
"HKNW" : "WIL",
"HKNV" : "",
"HKNY" : "NYK",
"LIBA" : "",
"MKBS" : "OCJ",
"FAMS" : "MEZ",
"FAMU" : "",
"CZTM" : "ZTM",
"FAMI" : "",
"FAMJ" : "",
"LSGC" : "",
"FAMN" : "",
"FAMB" : "",
"FAMD" : "AAM",
"MMQT" : "QRO",
"FAMG" : "MGH",
"BAIN" : "",
"CCW3" : "",
"N36" : "WTE",
"KSJT" : "SJT",
"KJZP" : "JZP",
"EBSP" : "",
"EBST" : "",
"EBSU" : "",
"KSJC" : "SJC",
"EBSL" : "",
"OIZB" : "",
"OIZC" : "ZBR",
"CYUY" : "YUY",
"OIZH" : "ZAH",
"FZEN" : "BSU",
"OIZS" : "",
"ZPJH" : "",
"LDDU" : "DBV",
"NFMO" : "MFJ",
"LRMS" : "",
"NFMA" : "MNF",
"LATI" : "TIA",
"SAOL" : "",
"EERU" : "",
"CYRB" : "YRB",
"RKNN" : "KAG",
"CYRA" : "YRA",
"YMRB" : "MOV",
"CYRJ" : "YRJ",
"RKND" : "SHO",
"PHIK" : "",
"CYRL" : "YRL",
"MGCB" : "CBV",
"CYRS" : "YRS",
"CYRQ" : "YRQ",
"CYRV" : "YRV",
"RKNY" : "YNY",
"RKNW" : "WJU",
"FDMS" : "MTS",
"ESSP" : "NRK",
"ESSU" : "",
"ESST" : "TYF",
"ESSV" : "VBY",
"PFWS" : "WSN",
"SCHA" : "CPO",
"ESSA" : "ARN",
"ESSB" : "BMA",
"ESSE" : "ESX",
"ESSD" : "BLE",
"ESSF" : "HLF",
"AUGS" : "AUB",
"ESSK" : "GVX",
"ESSL" : "LPI",
"YMRT" : "",
"KFTK" : "FTK",
"KFTW" : "FTW",
"UUEM" : "KLD",
"KFTY" : "FTY",
"VEBD" : "IXB",
"VEBI" : "SHL",
"VEBK" : "",
"NZMK" : "MZP",
"VEBS" : "BBI",
"FGBT" : "BSG",
"GCLA" : "SPC",
"BIVM" : "VEY",
"ULKK" : "KSZ",
"BIVO" : "VPN",
"SDOW" : "OIA",
"GCLP" : "LPA",
"MYNN" : "NAS",
"OYRN" : "RIY",
"HLGT" : "GHT",
"KOFF" : "OFF",
"HLGD" : "SRX",
"HLGL" : "",
"HLGN" : "TOB",
"OMFJ" : "FJR",
"SYLD" : "",
"ZUTC" : "TCZ",
"YRRB" : "RPB",
"GUCY" : "CKY",
"ZUTR" : "TEN",
"LOXT" : "",
"LBSF" : "SOF",
"ARPT" : "",
"CYHM" : "YHM",
"CYHO" : "YHO",
"NTMD" : "NHV",
"DAAV" : "GJL",
"CYHK" : "YHK",
"NTMN" : "AUQ",
"NTMP" : "UAP",
"NTMU" : "UAH",
"KGKY" : "GKY",
"KHXD" : "HXD",
"LFKC" : "CLY",
"LFKB" : "BIA",
"LFKF" : "FSC",
"SWRD" : "ROO",
"LFKJ" : "AJA",
"LFKO" : "",
"LFKS" : "SOZ",
"SAWS" : "JSM",
"LFKT" : "",
"MUPR" : "",
"CYHC" : "YHC",
"CYOP" : "YOP",
"KCOU" : "COU",
"KCOT" : "COT",
"KCOS" : "COS",
"CYOW" : "YOW",
"CAV3" : "",
"CYOH" : "YOH",
"CYOJ" : "YOJ",
"CYOO" : "YOO",
"CYOA" : "YOA",
"KCOF" : "COF",
"CYOC" : "YOC",
"KCOD" : "COD",
"CYOD" : "YOD",
"KCDW" : "CDW",
"VANP" : "NAG",
"VANR" : "ISK",
"UKKE" : "CKC",
"YPTN" : "KTR",
"UKKG" : "KGO",
"UKKK" : "IEV",
"UKKM" : "GML",
"BGAM" : "AGM",
"BGAP" : "LLU",
"VAND" : "NDC",
"SARS" : "",
"IGM" : "IGM",
"MRPJ" : "PJM",
"MRPM" : "PMZ",
"ENSR" : "SOJ",
"ENSS" : "VAW",
"MRPD" : "",
"KFKL" : "FKL",
"SVSJ" : "",
"FLND" : "NLA",
"MRPV" : "SYQ",
"SCCC" : "CCH",
"VYMS" : "MOG",
"SCCF" : "CJC",
"VYMW" : "MWQ",
"VYMY" : "",
"SCCI" : "PUQ",
"SCCH" : "",
"SCCL" : "",
"BSNT" : "",
"VYMD" : "MDL",
"VYME" : "MGZ",
"SCCY" : "GXQ",
"VYMK" : "MYT",
"VYMM" : "MNU",
"VYMO" : "",
"LGZA" : "ZTH",
"WADT" : "TMC",
"ENSG" : "SOG",
"CHB3" : "",
"ENSA" : "",
"RCFN" : "TTT",
"VTBO" : "TDX",
"OESB" : "",
"RCFG" : "LZN",
"OESL" : "SLF",
"OESH" : "SHW",
"OESK" : "AJF",
"EIKY" : "KIR",
"DNMA" : "MIU",
"VTBK" : "",
"YDNI" : "NLF",
"DAAJ" : "DJG",
"ZMDN" : "",
"MNBR" : "",
"ZMDZ" : "DLZ",
"KSWF" : "SWF",
"DNMN" : "MXJ",
"KFFC" : "FFC",
"KJAC" : "JAC",
"KJAN" : "JAN",
"UWLW" : "ULY",
"UWLL" : "ULV",
"DNMK" : "MDI",
"KJAX" : "JAX",
"HUGU" : "ULU",
"EDLB" : "",
"EDLA" : "ZCA",
"KBGM" : "BGM",
"EDLF" : "",
"EDLE" : "ESS",
"EDLD" : "",
"EDLI" : "",
"EDLH" : "",
"EDLO" : "",
"EDLN" : "MGL",
"EDLM" : "",
"EDLS" : "",
"EDLP" : "PAD",
"EDLW" : "DTM",
"EDLV" : "NRN",
"EDLT" : "",
"PAUN" : "UNK",
"PABI" : "BIG",
"KBGR" : "BGR",
"PAUK" : "AUK",
"NZKT" : "KAT",
"EHHV" : "",
"KMAF" : "MAF",
"KMAE" : "MAE",
"20GA" : "",
"NZKI" : "KBZ",
"NZKK" : "KKE",
"UIBB" : "BTK",
"UIBS" : "UIK",
"FMEE" : "RUN",
"EVRA" : "RIX",
"EDKO" : "",
"FMEP" : "ZSE",
"ZSYN" : "YNZ",
"SPME" : "TBP",
"ZSYC" : "YIC",
"ZSYA" : "YTY",
"SAWC" : "FTE",
"SPMS" : "YMS",
"SPMQ" : "",
"ZSYW" : "YIW",
"ZSYT" : "YNT",
"HATP" : "TIE",
"KPJC" : "",
"YKBR" : "KAX",
"YLVT" : "",
"LBPD" : "PDV",
"EFRY" : "",
"KLFK" : "LFK",
"MMOX" : "OAX",
"EFRO" : "RVN",
"EFRN" : "",
"EFRH" : "",
"FAOB" : "",
"SAWG" : "RGL",
"FAOH" : "DUH",
"KLFT" : "LFT",
"TJBQ" : "BQN",
"VLDZ" : "",
"VCCG" : "GOY",
"VCCA" : "",
"VCCB" : "",
"VCCC" : "RML",
"KSHV" : "SHV",
"KSHR" : "SHR",
"VCCJ" : "JAF",
"VCCK" : "KCT",
"VCCT" : "TRR",
"VCCW" : "WRZ",
"VCCS" : "GIU",
"MTQE" : "",
"KSHD" : "SHD",
"VTUK" : "KKC",
"OITL" : "ADU",
"OITM" : "ACP",
"RJBD" : "SHM",
"RJBE" : "UKB",
"RJBB" : "KIX",
"SSDO" : "DOU",
"OITK" : "",
"RJBK" : "",
"RJBH" : "HIW",
"RJBT" : "TJH",
"SNDT" : "DTI",
"OITZ" : "",
"KNMM" : "NMM",
"OITT" : "TBZ",
"OITP" : "PFQ",
"OITR" : "OMH",
"LRSV" : "SCV",
"LRSB" : "SBZ",
"GFLL" : "FNA",
"LRSM" : "SUJ",
"EPSK" : "OSP",
"UTNN" : "NCU",
"NLWF" : "FUT",
"66CA" : "",
"EEPU" : "EPU",
"UTNU" : "UGC",
"PTPN" : "PNI",
"OPCH" : "CJL",
"YMIA" : "MQL",
"CYTH" : "YTH",
"PHKO" : "KOA",
"SANU" : "UAQ",
"CYTL" : "YTL",
"USHQ" : "EYK",
"CYTA" : "YTA",
"CYTE" : "YTE",
"CYTF" : "YTF",
"CYTZ" : "YTZ",
"SANE" : "SDE",
"SANB" : "",
"SANC" : "CTC",
"SANO" : "",
"SANL" : "IRJ",
"CYTS" : "YTS",
"SANH" : "RHD",
"SANI" : "",
"MMMV" : "LOV",
"KLNS" : "LNS",
"KLNR" : "LNR",
"KLNN" : "LNN",
"KLNK" : "LNK",
"SBBR" : "BSB",
"KLNA" : "LNA",
"KABE" : "ABE",
"KFVE" : "WFK",
"KABI" : "ABI",
"KABR" : "ABR",
"KABQ" : "ABQ",
"KABY" : "ABY",
"VYPA" : "",
"VYPN" : "BSX",
"SBBE" : "BEL",
"VYPU" : "PKK",
"VYPT" : "PBU",
"VELR" : "IXI",
"VYPY" : "",
"GCRR" : "ACE",
"MKNG" : "NEG",
"BIPA" : "PFJ",
"KJMS" : "JMS",
"ULMM" : "MMK",
"VNJL" : "JUM",
"VNJS" : "JMO",
"VNJP" : "",
"MYLD" : "LGI",
"SJHG" : "CFO",
"KIPT" : "IPT",
"MYLS" : "SML",
"LLSD" : "SDV",
"SMJP" : "PBM",
"FWUU" : "ZZU",
"SGES" : "AGT",
"HECW" : "",
"HECA" : "CAI",
"03N" : "UTK",
"YBWP" : "WEI",
"KGIF" : "GIF",
"LEIB" : "IBZ",
"LQMO" : "OMO",
"SWPI" : "PIN",
"NZPM" : "PMR",
"NZPN" : "PCN",
"EHWO" : "WOE",
"LFIG" : "",
"FLLI" : "LVI",
"NZPP" : "PPQ",
"FLLS" : "LUN",
"SWBC" : "BAZ",
"BGGH" : "GOH",
"SVLF" : "LFR",
"FUSS" : "FUS",
"BGGN" : "JGO",
"SVLO" : "",
"EDXM" : "",
"UERS" : "",
"FUSN" : "FUX",
"MRNS" : "NOB",
"LRBC" : "BCM",
"LRBM" : "BAY",
"YHYN" : "HIS",
"ENEV" : "EVE",
"FYRU" : "NDU",
"SCAR" : "ARI",
"KHWD" : "HWD",
"EIIR" : "INQ",
"KCMI" : "CMI",
"KCMH" : "CMH",
"KHWO" : "HWO",
"XRRC" : "",
"RCDC" : "PIF",
"EIIM" : "IOR",
"FSIA" : "SEZ",
"RCDI" : "",
"KCMX" : "CMX",
"YDHD" : "",
"PAAL" : "PML",
"UERO" : "",
"KSUN" : "SUN",
"LEVC" : "VLC",
"KSUA" : "SUA",
"KSUX" : "SUX",
"KSUU" : "SUU",
"FZRF" : "FMI",
"KSUS" : "SUS",
"HUEN" : "EBB",
"WPDB" : "",
"WPDL" : "DIL",
"EDNM" : "",
"EDNL" : "",
"LTAV" : "",
"NZPG" : "",
"EDNY" : "FDH",
"LOGG" : "",
"LOGO" : "",
"FTLD" : "",
"EDNR" : "",
"KBYH" : "BYH",
"KDVT" : "DVT",
"HSWW" : "WUU",
"NZMS" : "MRO",
"KMCW" : "MCW",
"YTLP" : "",
"NZMC" : "GTN",
"KMCC" : "MCC",
"KMCE" : "MCE",
"NZMF" : "MFN",
"KMCF" : "MCF",
"KMCI" : "MCI",
"KMCK" : "MCK",
"NZMO" : "TEU",
"YMRY" : "MYA",
"KMCO" : "MCO",
"KMCN" : "MCN",
"FLMG" : "",
"SVSZ" : "STB",
"SVST" : "SOM",
"SVSR" : "SFD",
"SVSP" : "SFH",
"SVSO" : "STD",
"MUPB" : "",
"SVSE" : "",
"SVSB" : "",
"SVSA" : "SVZ",
"SPCL" : "PCL",
"SPCH" : "",
"DNMM" : "LOS",
"TFFA" : "DSD",
"ZSGZ" : "KOW",
"TXKF" : "BDA",
"LHNY" : "",
"AYWK" : "WWK",
"AYWD" : "WBM",
"KPHX" : "PHX",
"KPHD" : "PHD",
"KPHF" : "PHF",
"KPHL" : "PHL",
"KPHN" : "PHN",
"KPHK" : "",
"HKBU" : "",
"HKBR" : "",
"RJTA" : "",
"HKBA" : "",
"SKYP" : "EYP",
"OAIX" : "BPM",
"MMMY" : "MTY",
"MMMX" : "MEX",
"MMMZ" : "MZT",
"SBBU" : "BAU",
"MMMT" : "MTT",
"SBBW" : "",
"SBBV" : "BVB",
"SBBQ" : "",
"SBBP" : "BJP",
"KYIP" : "YIP",
"MMMM" : "MLM",
"KMUO" : "MUO",
"SBBI" : "BFH",
"SBBH" : "PLU",
"KWRB" : "WRB",
"KWRL" : "WRL",
"MMMD" : "MID",
"SBBG" : "BGX",
"MMMA" : "MAM",
"KWRI" : "WRI",
"FAAL" : "ADY",
"FAAN" : "",
"SELJ" : "",
"ZYQQ" : "NDG",
"EFPO" : "POR",
"FAAG" : "AGZ",
"EFPI" : "",
"FAAB" : "ALJ",
"EFPU" : "",
"EFPY" : "",
"KGRI" : "GRI",
"KDVO" : "",
"KDVL" : "DVL",
"MHNJ" : "GJA",
"SNFX" : "SXX",
"07MT" : "",
"UNWW" : "NOZ",
"SSBL" : "BNU",
"ZWKC" : "KCA",
"ZWKL" : "KRL",
"ZWKM" : "KRY",
"ZWKN" : "KJI",
"KTMK" : "",
"KGRF" : "GRF",
"KGRB" : "GRB",
"OYGD" : "AAY",
"SELA" : "LGQ",
"KGRK" : "GRK",
"PTRO" : "ROR",
"VI73" : "",
"KGRR" : "GRR",
"SELT" : "",
"AGBT" : "BPF",
"PACK" : "CYF",
}
AIRPORTS_IATA_TO_ICAO = {iata:icao for icao, iata in AIRPORTS_ICAO_TO_IATA.iteritems() if iata}
if __name__ == '__main__':
print(AIRPORTS_IATA_TO_ICAO["HEO"])
| StarcoderdataPython |
1715564 | <filename>utils/utils_data.py<gh_stars>0
import logging
import os
import torch
logger = logging.getLogger(__name__)
class InputExample(object):
"""A single training/test example for classification."""
def __init__(self, guid, sentence, label):
"""Constructs a InputExample.
Args:
guid: Unique id for the example.
words: list. The words of the sequence.
labels: (Optional) list. The label for the sequence. This should be
specified for train and dev examples, but not for test examples.
"""
self.guid = guid
self.sentence = sentence
self.label = label
class InputFeatures(object):
"""A single set of features of data."""
def __init__(self, guid, input_ids, input_mask, segment_ids, label_ids):
self.guid = guid
self.input_ids = input_ids
self.input_mask = input_mask
self.segment_ids = segment_ids
self.label_ids = label_ids
def read_examples_from_file(data_dir, mode):
file_path = os.path.join(data_dir, "{}.txt".format(mode))
guid_index = 1
examples = []
with open(file_path, encoding="utf-8") as f:
for line in f:
splits = line.split('\t', 1)
if len(splits) > 1:
label = splits[0]
sentence = splits[1]
else:
label = -1
sentence = splits[0]
examples.append(InputExample(guid="{}-{}".format(mode, guid_index), sentence=sentence, label=label))
guid_index += 1
return examples
def convert_examples_to_features_spare(
examples,
label_list,
max_seq_length,
tokenizer
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
# if ex_index % 10000 == 0:
# logger.info("Writing example %d of %d", ex_index, len(examples))
inputs = tokenizer.encode_plus(example.sentence, padding='max_length', truncation=True, max_length=max_seq_length, return_tensors="pt")
input_ids = inputs['input_ids'][0].numpy()
input_mask = inputs['attention_mask'][0].numpy()
segment_ids = inputs['token_type_ids'][0].numpy()
if example.label == -1:
label_ids = None
else:
label_ids = [label_map[example.label]]
features.append(
InputFeatures(input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids
)
)
return features
def convert_examples_to_features(
examples,
label_list,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
mask_padding_with_zero=True,
window = 50
):
""" Loads a data file into a list of `InputBatch`s
`cls_token_at_end` define the location of the CLS token:
- False (Default, BERT/XLM pattern): [CLS] + A + [SEP] + B + [SEP]
- True (XLNet/GPT pattern): A + [SEP] + B + [SEP] + [CLS]
`cls_token_segment_id` define the segment id associated to the CLS token (0 for BERT, 2 for XLNet)
using sliding window at token level to deal with long text
"""
label_map = {label: i for i, label in enumerate(label_list)}
features = []
for (ex_index, example) in enumerate(examples):
# if ex_index % 10000 == 0:
# logger.info("Writing example %d of %d", ex_index, len(examples))
# valid_mask = []
guid = example.guid
tokens = tokenizer.tokenize(example.sentence)
if example.label == -1:
label_ids = None
else:
label_ids = [label_map[example.label]]
# Account for [CLS] and [SEP] with "- 2" and with "- 3" for RoBERTa.
special_tokens_count = 3 if sep_token_extra else 2
# sliding window at token level (deal with long text)
if len(tokens) > max_seq_length - special_tokens_count:
start = 0
span = max_seq_length - special_tokens_count
while start + span < len(tokens):
tokens_now = tokens[start:start+span]
input_ids, input_mask, segment_ids = padding_mask(tokens_now, max_seq_length, tokenizer, cls_token_at_end,
cls_token,
cls_token_segment_id, sep_token, sep_token_extra,
pad_on_left, pad_token,
pad_token_segment_id, sequence_a_segment_id,
mask_padding_with_zero)
features.append(
InputFeatures(guid=guid,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids)
)
start += window
else:
input_ids, input_mask, segment_ids = padding_mask(tokens, max_seq_length, tokenizer, cls_token_at_end,
cls_token,
cls_token_segment_id, sep_token, sep_token_extra,
pad_on_left, pad_token,
pad_token_segment_id, sequence_a_segment_id,
mask_padding_with_zero)
features.append(
InputFeatures(guid=guid,
input_ids=input_ids,
input_mask=input_mask,
segment_ids=segment_ids,
label_ids=label_ids)
)
return features
def padding_mask(
tokens,
max_seq_length,
tokenizer,
cls_token_at_end=False,
cls_token="[CLS]",
cls_token_segment_id=1,
sep_token="[SEP]",
sep_token_extra=False,
pad_on_left=False,
pad_token=0,
pad_token_segment_id=0,
sequence_a_segment_id=0,
mask_padding_with_zero=True,):
special_tokens_count = 3 if sep_token_extra else 2
assert len(tokens) <= max_seq_length - special_tokens_count
tokens += [sep_token]
if sep_token_extra:
# roberta uses an extra separator b/w pairs of sentences
tokens += [sep_token]
segment_ids = [sequence_a_segment_id] * len(tokens)
if cls_token_at_end:
tokens += [cls_token]
segment_ids += [cls_token_segment_id]
else:
tokens = [cls_token] + tokens
segment_ids = [cls_token_segment_id] + segment_ids
input_ids = tokenizer.convert_tokens_to_ids(tokens)
# The mask has 1 for real tokens and 0 for padding tokens. Only real
# tokens are attended to.
input_mask = [1 if mask_padding_with_zero else 0] * len(input_ids)
# Zero-pad up to the sequence length.
padding_length = max_seq_length - len(input_ids)
if pad_on_left:
input_ids = ([pad_token] * padding_length) + input_ids
input_mask = ([0 if mask_padding_with_zero else 1] * padding_length) + input_mask
segment_ids = ([pad_token_segment_id] * padding_length) + segment_ids
else:
input_ids += [pad_token] * padding_length
input_mask += [0 if mask_padding_with_zero else 1] * padding_length
segment_ids += [pad_token_segment_id] * padding_length
assert len(input_ids) == max_seq_length
assert len(input_mask) == max_seq_length
assert len(segment_ids) == max_seq_length
# if ex_index < 3:
# logger.info("*** Example ***")
# logger.info("guid: %s", example.guid)
# logger.info("tokens: %s", " ".join([str(x) for x in tokens]))
# logger.info("valid_mask: %s", " ".join([str(x) for x in valid_mask]))
# logger.info("input_ids: %s", " ".join([str(x) for x in input_ids]))
# logger.info("input_mask: %s", " ".join([str(x) for x in input_mask]))
# logger.info("segment_ids: %s", " ".join([str(x) for x in segment_ids]))
# logger.info("label_ids: %s", " ".join([str(x) for x in label_ids]))
# logger.info("start_ids: %s", " ".join([str(x) for x in start_ids]))
# logger.info("end_ids: %s", " ".join([str(x) for x in end_ids]))
return input_ids, input_mask, segment_ids
def collate_fn(batch):
"""
batch should be a list of (sequence, target, length) tuples...
Returns a padded tensor of sequences sorted from longest to shortest,
"""
batch_tuple = tuple(map(torch.stack, zip(*batch)))
batch_lens = torch.sum(batch_tuple[1], dim=-1, keepdim=False)
max_len = batch_lens.max().item()
results = ()
for item in batch_tuple:
if item.dim() >= 2:
results += (item[:, :max_len],)
else:
results += (item,)
return results
def get_labels(path):
if path:
with open(path, "r") as f:
labels = f.read().splitlines()
return labels
| StarcoderdataPython |
3335765 | <reponame>ElectronicBabylonianLiterature/dictionary
from typing import Mapping, Type
from marshmallow import Schema, fields, post_load
from marshmallow_oneofschema import OneOfSchema
from ebl.bibliography.application.reference_schema import ReferenceSchema
from ebl.schemas import NameEnum
from ebl.transliteration.application.token_schemas import OneOfTokenSchema
from ebl.transliteration.domain.language import Language
from ebl.transliteration.domain.markup import (
BibliographyPart,
EmphasisPart,
LanguagePart,
StringPart,
)
class StringPartSchema(Schema):
text = fields.String(required=True)
@post_load
def make_part(self, data, **kwargs) -> StringPart:
return StringPart(data["text"])
class EmphasisPartSchema(Schema):
text = fields.String(required=True)
@post_load
def make_part(self, data, **kwargs) -> EmphasisPart:
return EmphasisPart(data["text"])
class LanguagePartSchema(Schema):
language = NameEnum(Language, required=True)
tokens = fields.Nested(OneOfTokenSchema, many=True, load_default=None)
@post_load
def make_part(self, data, **kwargs) -> LanguagePart:
return LanguagePart.of_transliteration(data["language"], data["tokens"])
class BibliographyPartSchema(Schema):
reference = fields.Nested(ReferenceSchema, required=True)
@post_load
def make_part(self, data, **kwargs) -> BibliographyPart:
return BibliographyPart(data["reference"])
class OneOfNoteLinePartSchema(OneOfSchema):
type_field = "type"
type_schemas: Mapping[str, Type[Schema]] = {
"StringPart": StringPartSchema,
"EmphasisPart": EmphasisPartSchema,
"LanguagePart": LanguagePartSchema,
"BibliographyPart": BibliographyPartSchema,
}
| StarcoderdataPython |
1642754 | #!/usr/bin/env python
# coding: utf-8
# In[50]:
# 6. naloga
# Source:
# https://towardsdatascience.com/building-a-k-nearest-neighbors-k-nn-model-with-scikit-learn-51209555453a
# https://medium.com/@svanillasun/how-to-deal-with-cross-validation-based-on-knn-algorithm-compute-auc-based-on-naive-bayes-ff4b8284cff4
# In[2]:
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataAll = pd.read_csv("data/reg/181.csv")
data = dataAll.head(30)
data
data.shape
# In[41]:
"""
# we separate X and y values in 2 tables
X = data.drop(columns=['Y'])
y = y = data['Y']
X
y
"""
X = data[['X1', 'X2', 'X3', 'X4', 'X5']]
y = data['Y']
# In[1]:
from sklearn.model_selection import cross_val_score
import numpy as np
#create a new KNN model
knn_cv = KNeighborsClassifier(n_neighbors=3)
#train model with cv of 5
cv_scores = cross_val_score(knn_cv, X, y, cv=5)
#print each cv score (accuracy) and average them
print(cv_scores)
print('cv_scores mean:{}'.format(np.mean(cv_scores)))
| StarcoderdataPython |
1683127 | <gh_stars>0
import math
import os
import sys
import numpy as np
import torch
from ising_model import (
data,
l0_l2constrained_ise,
l0_l2constrained_logreg,
l1_constrained_logreg,
l1_ise,
l1_logreg,
metrics,
)
# Parameters
current_id = sys.argv[1]
dataset_root = sys.argv[2]
N = int(sys.argv[3])
P = int(sys.argv[4])
rho = float(sys.argv[5])
type_matrix = sys.argv[6]
assert type_matrix in [
'periodic_graph_uniform_sign',
'periodic_graph_random_sign',
'random_graph_uniform_sign',
'random_graph_random_sign',
'random_graph_uniform_sign_uniform_val',
'random_graph_random_sign_uniform_val',
]
# Folder for results
string = type_matrix + '_N_{}_P_{}_rho_{}'.format(N, P, rho)
folder = os.path.join(dataset_root, string)
if not os.path.exists(folder):
os.makedirs(folder, exist_ok=True)
savetofile = os.path.join(dataset_root, string, current_id + '.npz')
# Data generation
if P <= 16:
W, data_train, data_val = data.generate_train_val_data(
N, P, rho, type_matrix, seed=int(current_id)
)
else:
W, data_train, data_val = data.generate_train_val_data_gibbs(
N, P, rho, type_matrix, seed=int(current_id)
)
Ks = [7, 6, 5, 4, 3, 2, 1]
#############################
############# LR ############
#############################
# L1 LR with scikit, no validation
L1LogReg = l1_logreg.L1_LogReg(data_train, data_val)
L1LogReg.estimate_W(validate=False, use_scikit=True)
W_L1_LR = L1LogReg.W
W_L1_LR += W_L1_LR.T
W_L1_LR /= 2
n_vertex_L1_LR_scikit, recovered_L1_LR_scikit = metrics.recover_graph(W, W_L1_LR, rho)
# L1 LR with scikit, validation: performs better in practice
L1LogReg = l1_logreg.L1_LogReg(data_train, data_val)
L1LogReg.estimate_W(validate=True, use_scikit=True)
W_L1_LR_val = L1LogReg.W
W_L1_LR_val += W_L1_LR_val.T
W_L1_LR_val /= 2
n_vertex_L1_LR_scikit_val, recovered_L1_LR_scikit_val = metrics.recover_graph(
W, W_L1_LR_val, rho
)
#############################
###### LR Constrained #######
#############################
# L1 consrained LR with scikit, validation
L1ConstrainedLogReg = l1_constrained_logreg.L1Constrained_LogReg(data_train, data_val)
L1ConstrainedLogReg.estimate_W(validate=True)
W_L1_constrained_LR = L1ConstrainedLogReg.W
W_L1_constrained_LR += W_L1_constrained_LR.T
W_L1_constrained_LR /= 2
n_vertex_L1_constrained_LR, recovered_L1_constrained_LR = metrics.recover_graph(
W, W_L1_constrained_LR, rho
)
#############################
######### L0-L2 LR ##########
#############################
# L0-L2 LR with warm-start
vals_L0L2_LR = []
bics_L0L2_LR = []
n_vertices_L0L2_LR = []
recovered_L0L2_LR = []
Ws_L0L2_LR = []
W_init = W_L1_LR_val.copy()
for K in Ks:
L0L2ConstrainedLogReg = l0_l2constrained_logreg.L0L2Constrained_LogReg(
data_train, data_val, W_init=W_init
)
# The continuation heuristic allows us to not tune the regularization parameter
L0L2ConstrainedLogReg.estimate_W(validate=False, K=K)
W_L0L2_LR = L0L2ConstrainedLogReg.W
W_L0L2_LR += W_L0L2_LR.T
W_L0L2_LR /= 2
n_vertices_K, recovered_K = metrics.recover_graph(W, W_L0L2_LR, 0) # rho is unused for L0-L2 LR
val = L0L2ConstrainedLogReg.best_val_lik
sparsity = len(np.where(W_L0L2_LR != 0)[0])
bic = 2 * N * val + sparsity * math.log(N)
vals_L0L2_LR.append(val)
bics_L0L2_LR.append(bic)
n_vertices_L0L2_LR.append(n_vertices_K)
recovered_L0L2_LR.append(recovered_K)
Ws_L0L2_LR.append(W_L0L2_LR)
W_init = W_L0L2_LR.copy()
#############################
############# ISE ###########
#############################
# ISE, no validation
L1ISE = l1_ise.L1_ISE(data_train, data_val)
L1ISE.estimate_W(validate=False)
W_L1_ISE = L1ISE.W
W_L1_ISE += W_L1_ISE.T
W_L1_ISE /= 2
n_vertex_L1_ISE, recovered_L1_ISE = metrics.recover_graph(W, W_L1_ISE, rho)
# ISE, validation: performs better in practice
L1ISE = l1_ise.L1_ISE(data_train, data_val)
L1ISE.estimate_W(validate=True)
W_L1_ISE_val = L1ISE.W
W_L1_ISE_val += W_L1_ISE_val.T
W_L1_ISE_val /= 2
n_vertex_L1_ISE_val, recovered_L1_ISE_val = metrics.recover_graph(W, W_L1_ISE_val, rho)
#############################
######### L0-L2 ISE #########
#############################
# L0-L2 ISE, warm-start
W_init = W_L1_ISE_val.copy()
vals_L0L2_ISE = []
bics_L0L2_ISE = []
n_vertices_L0L2_ISE = []
recovered_L0L2_ISE = []
Ws_L0L2_ISE = []
for K in Ks:
L0L2ISE = l0_l2constrained_ise.L0L2Constrained_ISE(
data_train, data_val, W_init=W_init
)
# The continuation heuristic allows us to not tune the regularization parameter
L0L2ISE.estimate_W(validate=False, K=K)
W_L0L2_ISE = L0L2ISE.W
W_L0L2_ISE += W_L0L2_ISE.T
W_L0L2_ISE /= 2
n_vertices_K, recovered_K = metrics.recover_graph(W, W_L0L2_ISE, 0) # rho is unused for L0-L2 ISE
val = L0L2ISE.best_val_lik
sparsity = len(np.where(W_L0L2_ISE != 0)[0])
bic = 2 * N * val + sparsity * math.log(N)
vals_L0L2_ISE.append(val)
bics_L0L2_ISE.append(bic)
n_vertices_L0L2_ISE.append(n_vertices_K)
recovered_L0L2_ISE.append(recovered_K)
Ws_L0L2_ISE.append(W_L0L2_ISE)
W_init = W_L0L2_ISE.copy()
# Save results
np.savez_compressed(
savetofile,
Ks=Ks,
W_gt=W,
# L1-LR
n_vertex_L1_LR_scikit=n_vertex_L1_LR_scikit,
recovered_L1_LR_scikit=recovered_L1_LR_scikit,
W_L1_LR=W_L1_LR,
n_vertex_L1_LR_scikit_val=n_vertex_L1_LR_scikit_val,
recovered_L1_LR_scikit_val=recovered_L1_LR_scikit_val,
W_L1_LR_val=W_L1_LR_val,
# L1-constrained LR
W_L1_constrained_LR=W_L1_constrained_LR,
n_vertex_L1_constrained_LR=n_vertex_L1_constrained_LR,
recovered_L1_constrained_LR=recovered_L1_constrained_LR,
# L0-L2 LR
vals_L0L2_LR=vals_L0L2_LR,
bics_L0L2_LR=bics_L0L2_LR,
n_vertices_L0L2_LR=n_vertices_L0L2_LR,
recovered_L0L2_LR=recovered_L0L2_LR,
Ws_L0L2_LR=Ws_L0L2_LR,
# L1-ISE
n_vertex_L1_ISE=n_vertex_L1_ISE,
recovered_L1_ISE=recovered_L1_ISE,
W_L1_ISE=W_L1_ISE,
n_vertex_L1_ISE_val=n_vertex_L1_ISE_val,
recovered_L1_ISE_val=recovered_L1_ISE_val,
W_L1_ISE_val=W_L1_ISE_val,
# L0-L2 ISE
vals_L0L2_ISE=vals_L0L2_ISE,
bics_L0L2_ISE=bics_L0L2_ISE,
n_vertices_L0L2_ISE=n_vertices_L0L2_ISE,
recovered_L0L2_ISE=recovered_L0L2_ISE,
Ws_L0L2_ISE=Ws_L0L2_ISE,
)
| StarcoderdataPython |
3299374 | # Copyright 2020 NXP Semiconductors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''
Queue of events and logic around it.
The queue class also maintains current absolute time.
'''
from bisect import bisect_right
class TimeQueue:
''' Maintains a list (queue) of objects (elements) associated with absolute time
and provides methods for the queue.
We have a tuple (absolute time, element) for the queue, but internally we are managing
two queues, which have always the same size and they are ordered by an absolute time.
'''
def __init__(self):
self.timequeue = []
self.elementqueue = []
self.time = 0
def add_element(self, time_delta, element):
''' Add an element to the queue at a time_delta from current time. '''
time = self.time + time_delta
i = bisect_right(self.timequeue, time)
self.timequeue.insert(i, time)
self.elementqueue.insert(i, element)
def get0(self):
''' Get the first element from the queue. '''
if self.timequeue:
return self.timequeue[0], self.elementqueue[0]
# If there is nothing in the queue, report virtual None object in the inifinite time.
# Note: another solution would be to add this virtual element into the queue. In that
# case the bisect function would take a little more time when adding an element.
return float("inf"), (None, None)
def pop(self):
''' Pop the first element from the queue and return it to the caller. '''
time, element = self.timequeue.pop(0), self.elementqueue.pop(0)
self.time = time
return time, element
def delete(self, cond):
''' Delete all the objects which fit to the condition from the queue. '''
new_timequeue = []
new_elementqueue = []
for time, element in zip(self.timequeue, self.elementqueue):
if not cond(element):
new_timequeue.append(time)
new_elementqueue.append(element)
self.timequeue = new_timequeue
self.elementqueue = new_elementqueue
def __len__(self):
''' Get length of the queue. '''
return len(self.timequeue)
| StarcoderdataPython |
1716617 | from .dsl import *
| StarcoderdataPython |
4826825 | <filename>tests/test_0341-parquet-reader-writer.py
# BSD 3-Clause License; see https://github.com/scikit-hep/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import os
import pytest
import numpy
import awkward1
pyarrow_parquet = pytest.importorskip("pyarrow.parquet")
def test_write_read(tmp_path):
array1 = awkward1.Array([[1, 2, 3], [], [4, 5], [], [], [6, 7, 8, 9]])
array2 = awkward1.repartition(array1, 2)
array3 = awkward1.Array(
[
{"x": 1, "y": 1.1},
{"x": 2, "y": 2.2},
{"x": 3, "y": 3.3},
{"x": 4, "y": 4.4},
{"x": 5, "y": 5.5},
{"x": 6, "y": 6.6},
{"x": 7, "y": 7.7},
{"x": 8, "y": 8.8},
{"x": 9, "y": 9.9},
]
)
array4 = awkward1.repartition(array3, 2)
awkward1.to_parquet(array1, os.path.join(tmp_path, "array1.parquet"))
awkward1.to_parquet(array2, os.path.join(tmp_path, "array2.parquet"))
awkward1.to_parquet(array3, os.path.join(tmp_path, "array3.parquet"))
awkward1.to_parquet(array4, os.path.join(tmp_path, "array4.parquet"))
assert awkward1.to_list(
awkward1.from_parquet(os.path.join(tmp_path, "array1.parquet"))
) == awkward1.to_list(array1)
assert awkward1.to_list(
awkward1.from_parquet(os.path.join(tmp_path, "array2.parquet"))
) == awkward1.to_list(array2)
assert awkward1.to_list(
awkward1.from_parquet(os.path.join(tmp_path, "array3.parquet"))
) == awkward1.to_list(array3)
assert awkward1.to_list(
awkward1.from_parquet(os.path.join(tmp_path, "array4.parquet"))
) == awkward1.to_list(array4)
assert awkward1.to_list(
awkward1.from_parquet(os.path.join(tmp_path, "array1.parquet"), lazy=True)
) == awkward1.to_list(array1)
assert awkward1.to_list(
awkward1.from_parquet(os.path.join(tmp_path, "array2.parquet"), lazy=True)
) == awkward1.to_list(array2)
assert awkward1.to_list(
awkward1.from_parquet(os.path.join(tmp_path, "array3.parquet"), lazy=True)
) == awkward1.to_list(array3)
assert awkward1.to_list(
awkward1.from_parquet(os.path.join(tmp_path, "array4.parquet"), lazy=True)
) == awkward1.to_list(array4)
def test_explode(tmp_path):
array3 = awkward1.Array(
[
[{"x": 1, "y": 1.1}, {"x": 2, "y": 2.2}, {"x": 3, "y": 3.3}],
[],
[{"x": 4, "y": 4.4}, {"x": 5, "y": 5.5}],
[],
[],
[
{"x": 6, "y": 6.6},
{"x": 7, "y": 7.7},
{"x": 8, "y": 8.8},
{"x": 9, "y": 9.9},
],
]
)
array4 = awkward1.repartition(array3, 2)
awkward1.to_parquet(array3, os.path.join(tmp_path, "array3.parquet"), explode_records=True)
awkward1.to_parquet(array4, os.path.join(tmp_path, "array4.parquet"), explode_records=True)
assert awkward1.from_parquet(os.path.join(tmp_path, "array3.parquet")) == [
{"x": [1, 2, 3], "y": [1.1, 2.2, 3.3]},
{"x": [], "y": []},
{"x": [4, 5], "y": [4.4, 5.5]},
{"x": [], "y": []},
{"x": [], "y": []},
{"x": [6, 7, 8, 9], "y": [6.6, 7.7, 8.8, 9.9]},
]
assert awkward1.from_parquet(os.path.join(tmp_path, "array4.parquet")) == [
{"x": [1, 2, 3], "y": [1.1, 2.2, 3.3]},
{"x": [], "y": []},
{"x": [4, 5], "y": [4.4, 5.5]},
{"x": [], "y": []},
{"x": [], "y": []},
{"x": [6, 7, 8, 9], "y": [6.6, 7.7, 8.8, 9.9]},
]
def test_oamap_samples():
assert awkward1.to_list(
awkward1.from_parquet("tests/samples/list-depths-simple.parquet")
) == [
{"list0": 1, "list1": [1]},
{"list0": 2, "list1": [1, 2]},
{"list0": 3, "list1": [1, 2, 3]},
{"list0": 4, "list1": [1, 2, 3, 4]},
{"list0": 5, "list1": [1, 2, 3, 4, 5]},
]
assert awkward1.to_list(
awkward1.from_parquet("tests/samples/nullable-record-primitives.parquet")
) == [
{
"u1": None,
"u4": 1,
"u8": None,
"f4": 1.100000023841858,
"f8": None,
"raw": b"one",
"utf8": "one",
},
{
"u1": 1,
"u4": None,
"u8": 2,
"f4": 2.200000047683716,
"f8": None,
"raw": None,
"utf8": None,
},
{
"u1": None,
"u4": None,
"u8": 3,
"f4": None,
"f8": None,
"raw": b"three",
"utf8": None,
},
{
"u1": 0,
"u4": None,
"u8": 4,
"f4": None,
"f8": 4.4,
"raw": None,
"utf8": None,
},
{
"u1": None,
"u4": 5,
"u8": None,
"f4": None,
"f8": 5.5,
"raw": None,
"utf8": "five",
},
]
assert awkward1.to_list(
awkward1.from_parquet("tests/samples/nullable-record-primitives-simple.parquet")
) == [
{"u4": None, "u8": 1},
{"u4": None, "u8": 2},
{"u4": None, "u8": 3},
{"u4": None, "u8": 4},
{"u4": None, "u8": 5},
]
assert awkward1.to_list(
awkward1.from_parquet("tests/samples/record-primitives.parquet")
) == [
{
"u1": 0,
"u4": 1,
"u8": 1,
"f4": 1.100000023841858,
"f8": 1.1,
"raw": b"one",
"utf8": "one",
},
{
"u1": 1,
"u4": 2,
"u8": 2,
"f4": 2.200000047683716,
"f8": 2.2,
"raw": b"two",
"utf8": "two",
},
{
"u1": 1,
"u4": 3,
"u8": 3,
"f4": 3.299999952316284,
"f8": 3.3,
"raw": b"three",
"utf8": "three",
},
{
"u1": 0,
"u4": 4,
"u8": 4,
"f4": 4.400000095367432,
"f8": 4.4,
"raw": b"four",
"utf8": "four",
},
{
"u1": 0,
"u4": 5,
"u8": 5,
"f4": 5.5,
"f8": 5.5,
"raw": b"five",
"utf8": "five",
},
]
# awkward1.to_list(awkward1.from_parquet("tests/samples/list-depths.parquet"))
# awkward1.to_list(awkward1.from_parquet("tests/samples/list-depths-records-list.parquet"))
# awkward1.to_list(awkward1.from_parquet("tests/samples/list-depths-records.parquet"))
# awkward1.to_list(awkward1.from_parquet("tests/samples/list-depths-strings.parquet"))
# awkward1.to_list(awkward1.from_parquet("tests/samples/list-lengths.parquet"))
# awkward1.to_list(awkward1.from_parquet("tests/samples/nonnullable-depths.parquet"))
# awkward1.to_list(awkward1.from_parquet("tests/samples/nullable-depths.parquet"))
# awkward1.to_list(awkward1.from_parquet("tests/samples/nullable-list-depths.parquet"))
# awkward1.to_list(awkward1.from_parquet("tests/samples/nullable-list-depths-records-list.parquet"))
# awkward1.to_list(awkward1.from_parquet("tests/samples/nullable-list-depths-records.parquet"))
# awkward1.to_list(awkward1.from_parquet("tests/samples/nullable-list-depths-strings.parquet"))
# Arrow 2.0.0 broke this (or was it broken before?)
# assert awkward1.to_list(
# awkward1.from_parquet("tests/samples/nullable-levels.parquet")
# ) == [
# {"whatever": {"r0": {"r1": {"r2": {"r3": 1}}}}},
# {"whatever": {"r0": {"r1": {"r2": {"r3": None}}}}},
# {"whatever": {"r0": {"r1": {"r2": None}}}},
# {"whatever": {"r0": {"r1": None}}},
# {"whatever": {"r0": {"r1": None}}},
# {"whatever": {"r0": {"r1": None}}},
# {"whatever": {"r0": {"r1": {"r2": None}}}},
# {"whatever": {"r0": {"r1": {"r2": {"r3": None}}}}},
# {"whatever": {"r0": {"r1": {"r2": {"r3": 1}}}}},
# ]
| StarcoderdataPython |
6025 | from django.contrib import admin
#from .models import *
from . import models
# Register your models here.
admin.site.register(models.ClimbModel)
| StarcoderdataPython |
1692512 | <reponame>T3kton/contractor_plugins<filename>contractor_plugins/AWS/migrations/0001_initial.py<gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
def load_foundation_blueprints( app, schema_editor ):
FoundationBluePrint = app.get_model( 'BluePrint', 'FoundationBluePrint' )
StructureBluePrint = app.get_model( 'BluePrint', 'StructureBluePrint' )
Script = app.get_model( 'BluePrint', 'Script' )
BluePrintScript = app.get_model( 'BluePrint', 'BluePrintScript' )
fbp = FoundationBluePrint( name='generic-awsec2-micro', description='Generic AWS EC2 t2.micro Instance' )
fbp.config_values = { 'awsec2_type': 't2.micro' }
fbp.template = {}
fbp.foundation_type_list = [ 'AWSEC2' ]
fbp.physical_interface_names = [ 'eth0' ]
fbp.full_clean()
fbp.save()
s = Script( name='create-generic-awsec2', description='Create AWS EC2 Instance' )
s.script = """# Create Generic AWS EC2 Instance
begin( description="Instance Creation" )
instance = aws.create()
foundation.awsec2_instance_id = instance[ 'instance_id' ]
foundation.set_interface_macs( interface_list=instance[ 'interface_list' ] )
foundation.set_ip_addresses( ip_address_map=instance[ 'ip_address_map' ] )
end
"""
s.full_clean()
s.save()
BluePrintScript( blueprint=fbp, script=s, name='create' ).save()
s = Script( name='destroy-generic-awsec2', description='Destroy AWS EC2 Instance' )
s.script = """# Destory Generic AWS EC2 Instance
begin( description="Instance Destruction" )
foundation.power_off()
foundation.destroy()
foundation.awsec2_instance_id = None
end
"""
s.full_clean()
s.save()
BluePrintScript( blueprint=fbp, script=s, name='destroy' ).save()
sbp = StructureBluePrint.objects.get( name='generic-linux' )
sbp.foundation_blueprint_list.add( fbp )
class Migration(migrations.Migration):
dependencies = [
('Building', '0002_initial2'),
]
operations = [
migrations.CreateModel(
name='AWSEC2Foundation',
fields=[
('foundation_ptr', models.OneToOneField(auto_created=True, primary_key=True, parent_link=True, serialize=False, to='Building.Foundation')),
('awsec2_instance_id', models.CharField(max_length=19, blank=True, null=True)),
],
bases=('Building.foundation',),
),
migrations.RunPython( load_foundation_blueprints ),
]
| StarcoderdataPython |
107834 | <gh_stars>1-10
# ## Importing modules
# Figure 3: Importing needed resources
import cocotb
from cocotb.triggers import FallingEdge
import random
# ### The tinyalu_utils module
# All testbenches use tinyalu_utils, so store it in a central
# place and add its path to the sys path so we can import it
from pathlib import Path
parent_path = Path("..").resolve()
import sys # noqa: E402
sys.path.insert(0, str(parent_path))
from tinyalu_utils import Ops, alu_prediction, logger, get_int # noqa: E402
# ## Setting up the cocotb TinyALU test
# Figure 7: The start of the TinyALU. Reset the DUT
@cocotb.test()
async def alu_test(dut):
passed = True
cvg = set() # functional coverage
await FallingEdge(dut.clk)
dut.reset_n.value = 0
dut.start.value = 0
await FallingEdge(dut.clk)
dut.reset_n.value = 1
# ### Sending commands
# Figure 8: Creating one transaction for each operation
cmd_count = 1
op_list = list(Ops)
num_ops = len(op_list)
while cmd_count <= num_ops:
await FallingEdge(dut.clk)
st = get_int(dut.start)
dn = get_int(dut.done)
# ### Sending a command and waiting for it to complete
# Figure 9: Creating a TinyALU command
if st == 0 and dn == 0:
aa = random.randint(0, 255)
bb = random.randint(0, 255)
op = op_list.pop(0)
cvg.add(op)
dut.A.value = aa
dut.B.value = bb
dut.op.value = op
dut.start.value = 1
# Figure 10: Asserting that a failure state never happens
if st == 0 and dn == 1:
raise AssertionError("DUT Error: done set to 1 without start")
# Figure 11: If we are in an operation, continue
if st == 1 and dn == 0:
continue
# ### Checking the result
# Figure 12: The operation is complete
if st == 1 and dn == 1:
dut.start.value = 0
cmd_count += 1
result = get_int(dut.result)
# Figure 13: Checking results against the prediction
pr = alu_prediction(aa, bb, op)
if result == pr:
logger.info(
f"PASSED: {aa:2x} {op.name} {bb:2x} = {result:04x}")
else:
logger.error(
f"FAILED: {aa:2x} {op.name} {bb:2x} ="
f" {result:04x} - predicted {pr:04x}")
passed = False
# ### Finishing the test
# Figure 14: Checking functional coverage using a set
if len(set(Ops) - cvg) > 0:
logger.error(f"Functional coverage error. Missed: {set(Ops)-cvg}")
passed = False
else:
logger.info("Covered all operations")
# Figure 15: This assertion relays pass/fail to cocotb
assert passed
| StarcoderdataPython |
3306922 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import local_evaluation_for_loop
from datetime import datetime
from tf import train_tensorflow_for_loop
if __name__ == '__main__':
elapsed_time, gin_bindings = train_tensorflow_for_loop.train_tf_main()
final_scores = local_evaluation_for_loop.eval_main()
# gin_bindings = [
# "dataset.name = '{}'".format('aaa'),
# "model.model = @beta_tc_vae()",
# "beta_tc_vae.beta = 4"
# ]
# final_scores = {'dci': 0.123797058589091, 'factor_vae_metric': 0.2824, 'sap_score': 0.002599999999999942,
# 'mig': 0.012375536130820422, 'irs': 0.623302371088628}
result_path = './results/final_score_tf_' + datetime.now().strftime("%Y%m%d_%H%M%S") + '.txt'
with open(result_path, 'w+') as fw:
fw.write('Final Score:\n\n')
fw.write('dci: ' + str(final_scores['dci']) + '\n')
fw.write('factor_vae_metric: ' + str(final_scores['factor_vae_metric']) + '\n')
fw.write('sap_score: ' + str(final_scores['sap_score']) + '\n')
fw.write('mig: ' + str(final_scores['mig']) + '\n')
fw.write('irs: ' + str(final_scores['irs']) + '\n\n')
fw.write('elapsed time: ' + str(elapsed_time) + '\n\n')
fw.write('gin_bindings: \n\n')
for gin_b in gin_bindings:
fw.write(gin_b + '\n')
fw.write('\n')
fw.write('\n##################################################\n\n')
with open('./tf/model.gin', 'r') as fr:
for line in fr.readlines():
fw.write(line)
| StarcoderdataPython |
1799253 | import numpy as np
from ray import tune
exp_args = {
'stop': {'is_finished': True},
'resume': False,
'verbose': True,
'checkpoint_freq': 0,
'checkpoint_at_end': False,
'num_samples': 1,
'resources_per_trial': {'cpu': 2, 'gpu': 1},
'config': {
'SynDataset.dataset_choice': tune.grid_search(
['0000','0100','0110','0001','0101','0010',
'0111','1000','1011','1100','1110','1111']),
'BaseDataset.semisupervised': tune.grid_search([100, 250, 1000, 4000, 20000,
50000, 100000, 200000]),
'scheduler.milestones': tune.sample_from(lambda spec: [5,200] if spec.config['BaseDataset.semisupervised'] < 20000 else [5,600]),
'Wrapper.pretrained': tune.sample_from(lambda spec: 'pretrained-amdim/' + spec.config['SynDataset.dataset_choice']),
}
}
| StarcoderdataPython |
116800 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Mar 30 08:29:21 2019
@author: hhouse
"""
import pandas as pd
# Load datasets as pandas dataframes
df_nyc = pd.read_csv("stage3_format_nyc.csv")
df_25pct = pd.read_csv("stage3_format_nyc_25pct.csv")
# Deleting unnecessary columns
del df_nyc['confidence']
del df_nyc['mid']
del df_25pct['confidence']
del df_25pct['mid']
print(df_nyc.columns)
print(df_25pct.columns)
#
df_unique_nyc = df_nyc[~df_nyc.label.isin(df_25pct.label)]
print(df_unique_nyc)
df_unique_25pct = df_25pct[~df_25pct.label.isin(df_nyc.label)]
print(df_unique_25pct)
print("NYC unique labels: " + str(df_unique_nyc['label'].nunique()))
print("NYC 25pct unique labels: " + str(df_unique_25pct['label'].nunique()))
# Saving out new dataframes to CSV
df_unique_nyc.to_csv('stage4_unique_nyc.csv')
df_unique_25pct.to_csv('stage4_unique_nyc_25pct.csv') | StarcoderdataPython |
4803031 | from django.core.management import setup_environ
import os
import sys
sys.path.append(os.path.dirname(os.path.join('..','wp',__file__)))
import settings
setup_environ(settings)
#==================================#
from arp.models import ConservationFeature as F
from django.template.defaultfilters import slugify
def output(level,val,crumbs, target=0.5, penalty=0.5):
id = '---'.join([slugify(x) for x in crumbs])
print " "*level, "<span>", val,'</span>'
print " "*level, '''<span class="sliders">
<table>
<tr>
<td>Proportion of Total Value</td>
<td><input type="text" class="slidervalue targetvalue" id="target---%(id)s" value="%(target)s"/></td>
<td><div class="slider" id="slider_target---%(id)s"></div></td>
</tr>
<tr>
<td>Importance Weighting</td>
<td><input type="text" class="slidervalue penaltyvalue" id="penalty---%(id)s" value="%(penalty)s"/></td>
<td><div class="slider" id="slider_penalty---%(id)s"></div></td>
</tr>
</table>
</span>''' % {'id': id, 'target': target, 'penalty': penalty}
def header():
print """<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml">
<head>
<meta http-equiv="content-type" content="text/html; charset=iso-8859-1"/>
<title>Focal Species</title>
<!--
<script type="text/javascript" src="http://ajax.googleapis.com/ajax/libs/jquery/1.2.6/jquery.min.js"></script>
-->
<script src="http://ajax.googleapis.com/ajax/libs/jquery/1.5/jquery.min.js" type="text/javascript"></script>
<script src="http://ajax.googleapis.com/ajax/libs/jqueryui/1.8.11/jquery-ui.min.js" type="text/javascript"></script>
<script type="text/javascript" src="http://jqueryui.com/ui/jquery.ui.slider.js"></script>
<script src="./treeview/jquery.treeview.js" type="text/javascript"></script>
<script type="text/javascript">
$(document).ready(function(){
$("#focalspecies").treeview({
collapsed: true
});
var params_update = function() {
var html = "";
$('#params_out').html(html);
var targets = {};
var penalties = {};
$('.targetvalue:visible').each( function(index) {
var xid = $(this).attr("id");
var id = "#" + xid;
xid = xid.replace(/^target---/,''); // Remove preceding identifier
xid = xid.replace(/---$/,''); // Remove trailing ---
targets[xid] = parseFloat($(id).val());
});
$('.penaltyvalue:visible').each( function(index) {
var xid = $(this).attr("id");
var id = "#" + xid;
xid = xid.replace(/^penalty---/,''); // Remove preceding identifier
xid = xid.replace(/---$/,''); // Remove trailing ---
penalties[xid] = parseFloat($(id).val());
});
html += JSON.stringify(targets);
html += JSON.stringify(penalties);
$('#params_out').text(html);
$.post('http://wp.hestia.ecotrust.org/arp/test_params/',
{'input_targets': JSON.stringify(targets), 'input_penalties': JSON.stringify(penalties)},
function(data) {
console.log(data);
$('#server_out').text(JSON.stringify(data));
},
'json'
);
};
$('.slidervalue').each( function(index) {
var id = $(this).attr("id");
var slider_id = "#slider_" + id;
id = "#" + id;
$(slider_id).slider({
range: 'min',
min : 0,
max : 1,
step : 0.01,
change : function(event, ui) {
$(id).val($(this).slider('value'));
},
slide : function(event, ui) {
$(id).val($(this).slider('value'));
}
});
$(slider_id).slider('value', $(id).val());
$(id).change( function(){
$(slider_id).slider("value", $(id).val());
});
});
$('#refresh_params').click(function(event){
params_update();
event.preventDefault();
});
});
</script>
<link rel="stylesheet" href="./treeview/jquery.treeview.css" />
<!-- TODO remove this when in panel -->
<link rel="stylesheet" href="./treeview/jquery-widgets.css" />
<style type="text/css">
#params { float: right; padding: 10px; border: 1px black solid; margin: 10px; width:500px; }
li.collapsable > span.sliders { display: none; }
li.expandable > span.sliders { color: black; display: inline; }
td { padding-right:6px; font-size: 80%; }
.slider { margin-bottom: 1px; }
.slidervalue { font-size: 80% }
.ui-slider .ui-slider-handle { position: absolute; z-index: 2; width: 0.7em; height: 0.7em;
cursor: default; border: 1px solid #b3b3b3/*{borderColorDefault}*/;
background: #d6d6d6/*{bgColorDefault}*/; font-weight: normal/*{fwDefault}*/; color: #555555/*{fcDefault}*/; }
.ui-slider-horizontal { height: .4em; border: 1px solid #888888/*{borderColorContent}*/;
background: #eeeeee/*{bgColorContent}*/; color: #222222/*{fcContent}*/; }
.ui-slider-horizontal .ui-slider-range { top: 0; height: 100%; }
.ui-slider-horizontal .ui-slider-range-min { left: 0; background:#9a9a9a}
.slider { width: 150px; }
.slidervalue { width: 45px ! important;}
</style>
</head>
<body>
<div id="params">
<span> Parameters to be submitted </span> <a href="#" id="refresh_params">[Refresh]</a>
<div id="params_out"></div>
<hr/>
<span> Server response </span>
<div id="server_out"></div>
</div>
<h3>Set Proptions and Weights for Focal Fish Species</h3>
<form action="postit.html" id="focalspecies_form">
<span>Focal Fish Species</span>
"""
def footer():
print """ <input type="submit"/></form></body></html>"""
def main():
L1 = F.objects.values_list('level1',flat=True).distinct()
print "<ul id='focalspecies'>"
for val1 in L1:
print " <li>"
output(1,val1,[val1])
L2 = F.objects.filter(level1=val1).values_list('level2',flat=True).distinct().exclude(level2=None)
print " <ul>"
for val2 in L2:
print " <li>"
output(2,val2,[val1,val2])
L3 = F.objects.filter(level2=val2).values_list('level3',flat=True).distinct().exclude(level3=None)
if len(L3) > 0: print " <ul>"
for val3 in L3:
print " <li>"
output(3,val3,[val1,val2,val3])
L4 = F.objects.filter(level3=val3).values_list('level4',flat=True).distinct().exclude(level4=None)
if len(L4) > 0: print " <ul>"
for val4 in L4:
print " <li>"
output(4,val4,[val1,val2,val3,val4])
print " </li>"
if len(L4) > 0: print " </ul>"
print " </li>"
if len(L3) > 0: print " </ul>"
print " </li>"
if len(L2) > 0: print " </ul>"
print " </li>"
print "</ul>"
if __name__ == '__main__':
header()
main()
footer()
| StarcoderdataPython |
1676479 | <filename>dipy/io/gradients.py<gh_stars>1-10
from __future__ import division, print_function, absolute_import
from os.path import splitext
from ..utils.six import string_types
import numpy as np
def read_bvals_bvecs(fbvals, fbvecs):
"""
Read b-values and b-vectors from the disk
Parameters
----------
fbvals : str
path of file with b-values, or None if you don't want to read bvals
fbvecs : str
path of file with b-vectorsl, or None if you don't want to read bvecs
Returns
-------
bvals : array, (N,) or None
bvecs : array, (N, 3) or None
Notes
-----
Files can be either '.bvals'/'.bvecs' or '.txt' or '.npy' (containing arrays
stored with the appropriate values).
"""
# Loop over the provided inputs, reading each one in turn and adding them
# to this list:
vals = []
for this_fname in [fbvals, fbvecs]:
# If the input was None, we don't read anything and move on:
if this_fname is None:
vals.append(None)
else:
if isinstance(this_fname, string_types):
base, ext = splitext(this_fname)
if ext in ['.bvals', '.bval', '.bvecs', '.bvec', '.txt', '']:
vals.append(np.squeeze(np.loadtxt(this_fname)))
elif ext == '.npy':
vals.append(np.squeeze(np.load(this_fname)))
else:
e_s = "File type %s is not recognized"%ext
raise ValueError(e_s)
else:
raise ValueError('String with full path to file is required')
# Once out of the loop, unpack them:
bvals, bvecs = vals[0], vals[1]
# If bvecs is None, you can just return now w/o making more checks:
if bvecs is None:
return bvals, bvecs
if bvecs.shape[1] > bvecs.shape[0]:
bvecs = bvecs.T
if min(bvecs.shape) != 3:
raise IOError('bvec file should have three rows')
if bvecs.ndim != 2:
raise IOError('bvec file should be saved as a two dimensional array')
# If bvals is None, you don't need to check that they have the same shape:
if bvals is None:
return bvals, bvecs
if max(bvals.shape) != max(bvecs.shape):
raise IOError('b-values and b-vectors shapes do not correspond')
return bvals, bvecs
| StarcoderdataPython |
1645489 | <filename>apps/dbcache/migrations/0002_auto_20200226_1841.py
# Generated by Django 3.0.3 on 2020-02-26 22:41
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("dbcache", "0001_initial"),
]
operations = [
migrations.AddIndex(
model_name="djangocache",
index=models.Index(fields=["expires"], name="django-cache_expires"),
),
]
| StarcoderdataPython |
1679691 | from GitMarco.tf import utils, metrics, basic
import numpy as np
from GitMarco.tf.losses import chamfer_distance, euclidian_dist_loss
from GitMarco.tf.optimization import OptiLoss, GradientOptimizer
from GitMarco.tf.pointnet import Pointnet, PointnetAe
from GitMarco.tf.utils import limit_memory, random_dataset
import pandas as pd
from GitMarco.tf.basic import basic_dense_model
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
def test_limit_memory():
limit_memory()
def test_random_dataset():
utils.random_dataset()
def test_r_squared():
y = np.random.rand(100)
predictions = np.random.rand(100)
metrics.r_squared(y, predictions)
def test_basic_dense_model():
model = basic.basic_dense_model(input_shape=(10,),
output_shape=1,
optimizer='adadelta')
model.summary()
def test_chamfer_loss():
x = utils.random_dataset(shape=(32, 1024, 3))
y = utils.random_dataset(shape=(32, 1024, 3))
chamfer_distance(x, y)
def test_pointnet():
data = random_dataset(shape=(32, 4096, 3))
test_data = random_dataset(shape=(32, 4096, 3))
field = random_dataset(shape=(32, 4096, 2))
field_test = random_dataset(shape=(32, 4096, 2))
model = Pointnet(n_points=4096,)
model = model.create_model()
# model.model_2_image()
model.summary()
model.compile(loss='mse', optimizer='adam')
model.evaluate(data, field)
def test_pointnet_ae():
data = random_dataset(shape=(32, 400, 3))
global_v = random_dataset(shape=(32, 1))
global_v_2 = random_dataset(shape=(32, 1))
local_v = random_dataset(shape=(32, 400, 3))
model = PointnetAe(n_geometry_points=data.shape[1],
n_global_variables=2,
n_local_variables=3,
type_decoder='cnn',
n_cnn_dec_layer=4,
dfferent_out_for_globals=True,
cnn_dec_filters=[64, 32, 32, 16]
)
model = model.create_model()
model.summary()
model.compile(loss=[chamfer_distance, ['mse', 'mse'], 'mse'], optimizer='adam')
with tf.device('CPU:0'):
model.evaluate(data, [data, [global_v, global_v_2], local_v])
def test_euclidean_distance():
x = utils.random_dataset(shape=(32, 1024, 3))
y = utils.random_dataset(shape=(32, 1024, 3))
euclidian_dist_loss(x, y, correction=True)
class Loss(OptiLoss):
def __init__(self, params=None):
super(Loss, self).__init__(params)
def __call__(self, sample):
return self.model(sample)[0][0]
def test_gradient_optimizer():
with tf.device('CPU:0'):
df = pd.DataFrame(random_dataset(shape=(32, 4)).numpy())
df.columns = ['x1', 'x2', 'y1', 'y2']
model = basic_dense_model(input_shape=(2,), output_shape=2)
model.compile(optimizer='Adam')
model.fit(random_dataset(shape=(32, 2)), random_dataset(shape=(32, 2)), epochs=1)
optimizer = GradientOptimizer(
model,
df,
StandardScaler(),
Loss(),
n_features=2,
n_labels=2,
iterations=10
)
optimizer.run()
optimizer.history()
optimizer.get_best_sample()
optimizer.get_results()
optimizer.compare_bounds()
optimizer.reset()
optimizer.iterations = 100
optimizer.run()
optimizer.history()
optimizer.get_best_sample()
optimizer.get_results()
| StarcoderdataPython |
3254607 | <filename>Predict.py
import kashgari
from configparser import ConfigParser
def predict(model, text):
'''预测
输入句子,返回时间标记列表
'''
text = text.replace(" ", ",")
tag_list = model.predict([[char for char in text]])
return tag_list
if __name__ == '__main__':
cf = ConfigParser()
cf.read('./config.ini')
model_path = cf.get('global', 'model_dir')
model = kashgari.utils.load_model(model_path)
while True:
text = input('Sentence: ')
tag_list = predict(model, text)
print(' '.join(tag_list[0]))
| StarcoderdataPython |
1648734 | <filename>EJC/simulation_so.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 1 16:47:17 2018
@author: esteban
"""
import numpy as np
import solver as sol
from scipy.special import gamma
import matplotlib.pyplot as plt
import matplotlib as mpl
label_size = 14
mpl.rcParams['xtick.labelsize'] = label_size
mpl.rcParams['font.size'] = label_size
mpl.rcParams['agg.path.chunksize'] = 10000
def invdW1(x, q):
return 1/q*np.exp(np.abs(x)**q)*sol.odd_pow(x, 1-q)
def proddW1(x, q):
return (1/q)**2*np.exp(2*np.abs(x)**q)*(q*np.abs(x)**q+1-q)*np.abs(x)**(1-2*q)
def invdW2(x, q):
return np.pi/(2*q)*(sol.odd_pow(x, 1+q) + sol.odd_pow(x, 1-q))
def proddW2(x, q):
return (np.pi/(2*q))**2*((1+q)*np.abs(x)**(2*q)+1-q)*(np.abs(x)+np.abs(x)**(1-2*q))
def invdW3(x, q, a):
return 1/(a*q)*(np.abs(x)**q+a)**2*sol.odd_pow(x, 1-q)
def proddW3(x, q, a):
return (1/(a*q))**2*(np.abs(x)**q+a)**3*(2*np.abs(x)**q+(1-q)*(np.abs(x)**q+a))*np.abs(x)**(1-2*q)
def invdW4(x, q, a):
return gamma(a)/(q)*np.exp(np.abs(x)**q)*sol.odd_pow(x, 1-a*q)
def proddW4(x, q, a):
return (gamma(a)/(q))**2*np.exp(2*np.abs(x)**q)*(q*np.abs(x)**q+1-a*q)*np.abs(x)**(1-2*a*q)
def invdW(i, x, q, a):
if i==1:
return invdW1(x, q)
elif i==2:
return invdW2(x, q)
elif i==3:
return invdW3(x, q, a)
elif i==4:
return invdW4(x, q, a)
def proddW(i, x, q, a):
if i==1:
return proddW1(x, q)
elif i==2:
return proddW2(x, q)
elif i==3:
return proddW3(x, q, a)
elif i==4:
return proddW4(x, q, a)
def system(t, x):
# System parameters
g, mc, m, l = 9.8, 1, 0.1, 0.5
# Controller parameters
r1, r2, r3 = 0.91, 0.09, 2
q1, q2, a1, a2 = 0.5, 0.5, 1, 1.1
i = 4
# Reference
r = np.sin(0.5*np.pi*t)
dr = 0.5*np.pi*np.cos(0.5*np.pi*t)
d2r = -(0.5*np.pi)**2*r
# State variables
x1, x2 = x[0], x[1]
# Error variables
e1, e2 = x1-r, x2-dr
# Disturbance
Delta = np.sin(10*x1)+np.cos(x2)
# Sliding variable
s = e2 + sol.odd_pow(sol.odd_pow(e2,2)+
2/(r1**2)*sol.odd_pow(invdW(i,e1,q1,a1),2),0.5)
# Controller
us = -1/r2*invdW(i,s,q2,a2)-r3*np.sign(invdW(i,s,q2,a2))-2/(r1**2)*proddW(i,e1,q1,a1)*np.sign(invdW(i,s,q2,a2))
f = (g*np.sin(x1)-m*l*x2**2*np.cos(x1)*np.sin(x1)/(mc+m))/(l*(4/3-m*np.cos(x1)**2/(mc+m)))-d2r
g = (np.cos(x1)/(mc+m))/(l*(4/3-m*np.cos(x1)**2/(mc+m)))
u = (-f+us)/g
return np.array([x2, f+g*u+Delta])
# Simulation parameters
t0, tf, h, i = 0, 1.2, 1e-6, 0
# Simulation
t, x = sol.ode1(system, np.array([-1, 0]), t0, tf, h)
# States
x1, x2 = x
# Reference
r = np.sin(0.5*np.pi*t)
dr = 0.5*np.pi*np.cos(0.5*np.pi*t)
d2r = -(0.5*np.pi)**2*r
# Error variables
e1, e2 = x1-r, x2-dr
# Controller
# System parameters
g, mc, m, l = 9.8, 1, 0.1, 0.5
# Controller parameters
r1, r2, r3 = 0.91, 0.09, 2
q1, q2, a1, a2 = 0.5, 0.5, 1, 1.1
i = 4
s = e2 + sol.odd_pow(sol.odd_pow(e2,2)+2/(r1**2)*sol.odd_pow(invdW(i,e1,q1,a1),2),0.5)
us = -1/r2*invdW(i,s,q2,a2)-r3*np.sign(invdW(i,s,q2,a2))-2/(r1**2)*proddW(i,e1,q1,a1)*np.sign(invdW(i,s,q2,a2))
f = (g*np.sin(x1)-m*l*x2**2*np.cos(x1)*np.sin(x1)/(mc+m))/(l*(4/3-m*np.cos(x1)**2/(mc+m)))-d2r
g = (np.cos(x1)/(mc+m))/(l*(4/3-m*np.cos(x1)**2/(mc+m)))
u = (-f+us)/g
## Trajectories
#plt.figure(num=1)
#plt.plot(t, r, color=0.5*np.ones(3), lw=3, label='$r(t)$')
#plt.plot(t, dr, color=0.7*np.ones(3), lw=3, label='$\dot{r}(t)$')
#plt.plot(t, x1, '--', color=0*np.ones(3), lw=2, label='$x_1(t)$')
#plt.plot(t, x2, '--', color=0.3*np.ones(3), lw=2, label='$x_2(t)$')
#plt.ylim(-2,5)
#plt.xlim(0, 1.2)
#plt.xlabel('$t$', fontsize = 14)
#plt.legend(loc=9)
#plt.text(1, -1.3, '$T_c=1$')
#plt.axvline(x = 1, ymin = -1, ymax = 2, linestyle='dashed', color = 0.6*np.ones(3))
#plt.grid()
#plt.savefig('figures/trajW'+str(i)+'.eps', bbox_inches='tight', format='eps', dpi=1500)
#
## Error
#plt.figure(num=2)
#plt.plot(t, e1, '--', color=0*np.ones(3), lw=2, label='$e_1(t)$')
#plt.plot(t, e2, '--', color=0.3*np.ones(3), lw=2, label='$e_2(t)$')
#plt.ylim(-2,5)
#plt.xlim(0, 1.2)
#plt.xlabel('$t$', fontsize = 14)
#plt.legend(loc=9)
#plt.text(1, -1.3, '$T_c=1$')
#plt.axvline(x = 1, ymin = -1, ymax = 2, linestyle='dashed', color = 0.6*np.ones(3))
#plt.grid()
#plt.savefig('figures/errorW'+str(i)+'.eps', bbox_inches='tight', format='eps', dpi=1500)
#
## Control
#plt.figure(num=3)
#plt.plot(t, u, '--', color=0*np.ones(3))
#plt.ylim(-100,100)
#plt.xlim(0, 1.2)
#plt.xlabel('$t$', fontsize = 14)
#plt.ylabel('$u$', fontsize = 14)
#plt.grid()
#plt.savefig('figures/controllerW'+str(i)+'.eps', bbox_inches='tight', format='eps', dpi=1500)
IAEu = np.abs(u).sum()*h | StarcoderdataPython |
1675306 | <reponame>TeamSprinkle/Sprinkle_Server
"""
DAO.py
Sprinkle
Created by LeeKW on 2021/02/18.
"""
from pymongo import MongoClient
class DAO():
def __init__(self):
# Database Init
self.conn = MongoClient("mongodb://sprinkle:bitbitr35@localhost:27017/sprinkle").sprinkle
def getConn(self):
return self.conn | StarcoderdataPython |
162273 | """
Writing Plugins
---------------
nose supports plugins for test collection, selection, observation and
reporting. There are two basic rules for plugins:
* Plugin classes should subclass :class:`nose.plugins.Plugin`.
* Plugins may implement any of the methods described in the class
:doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
this class is for documentary purposes only; plugins may not subclass
IPluginInterface.
Registering
===========
.. Note::
Important note: the following applies only to the default
plugin manager. Other plugin managers may use different means to
locate and load plugins.
For nose to find a plugin, it must be part of a package that uses
setuptools_, and the plugin must be included in the entry points defined
in the setup.py for the package:
.. code-block:: python
setup(name='Some plugin',
# ...
entry_points = {
'nose.plugins.0.10': [
'someplugin = someplugin:SomePlugin'
]
},
# ...
)
Once the package is installed with install or develop, nose will be able
to load the plugin.
.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
Registering a plugin without setuptools
=======================================
It is currently possible to register a plugin programmatically by
creating a custom nose runner like this :
.. code-block:: python
import nose
from yourplugin import YourPlugin
if __name__ == '__main__':
nose.main(addplugins=[YourPlugin()])
Defining options
================
All plugins must implement the methods ``options(self, parser, env)``
and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
that want the standard options should call the superclass methods.
nose uses optparse.OptionParser from the standard library to parse
arguments. A plugin's ``options()`` method receives a parser
instance. It's good form for a plugin to use that instance only to add
additional arguments that take only long arguments (--like-this). Most
of nose's built-in arguments get their default value from an environment
variable.
A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
object, as well as the current config object. Plugins should configure their
behavior based on the user-selected settings, and may raise exceptions
if the configured behavior is nonsensical.
Logging
=======
nose uses the logging classes from the standard library. To enable users
to view debug messages easily, plugins should use ``logging.getLogger()`` to
acquire a logger in the ``nose.plugins`` namespace.
Recipes
=======
* Writing a plugin that monitors or controls test result output
Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
results. If you also want to monitor output, implement
``setOutputStream`` and keep a reference to the output stream. If you
want to prevent the builtin ``TextTestResult`` output, implement
``setOutputSteam`` and *return a dummy stream*. The default output will go
to the dummy stream, while you send your desired output to the real stream.
Example: `examples/html_plugin/htmlplug.py`_
* Writing a plugin that handles exceptions
Subclass :doc:`ErrorClassPlugin <errorclasses>`.
Examples: :doc:`nose.plugins.deprecated <deprecated>`,
:doc:`nose.plugins.skip <skip>`
* Writing a plugin that adds detail to error reports
Implement ``formatError`` and/or ``formatFailture``. The error tuple
you return (error class, error message, traceback) will replace the
original error tuple.
Examples: :doc:`nose.plugins.capture <capture>`,
:doc:`nose.plugins.failuredetail <failuredetail>`
* Writing a plugin that loads tests from files other than python modules
Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
return True for files that you want to examine for tests. In
``loadTestsFromFile``, for those files, return an iterable
containing TestCases (or yield them as you find them;
``loadTestsFromFile`` may also be a generator).
Example: :doc:`nose.plugins.doctests <doctests>`
* Writing a plugin that prints a report
Implement ``begin`` if you need to perform setup before testing
begins. Implement ``report`` and output your report to the provided stream.
Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
* Writing a plugin that selects or rejects tests
Implement any or all ``want*`` methods. Return False to reject the test
candidate, True to accept it -- which means that the test candidate
will pass through the rest of the system, so you must be prepared to
load tests from it if tests can't be loaded by the core loader or
another plugin -- and None if you don't care.
Examples: :doc:`nose.plugins.attrib <attrib>`,
:doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
More Examples
=============
See any builtin plugin or example plugin in the examples_ directory in
the nose source distribution. There is a list of third-party plugins
`on jottit`_.
.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
.. _on jottit: http://nose-plugins.jottit.com/
"""
from nose.plugins.base import Plugin
from nose.plugins.manager import *
from nose.plugins.plugintest import PluginTester
if __name__ == '__main__':
import doctest
doctest.testmod()
| StarcoderdataPython |
153291 | from tinder_py.tinder.xttp import Http
class Entity:
"""
ABC for all Tinder entities.
"""
__slots__ = ["http", "id"]
def __init__(self, entity: dict, http: Http):
self.http = http
if "_id" in entity:
self.id: str = entity["_id"]
elif "id" in entity:
self.id: str = entity["id"]
else:
raise TypeError("Not an entity!")
def __str__(self):
return f"Tinder Entity({self.id})"
| StarcoderdataPython |
4833911 | <reponame>kif/freesas
# -*- coding: utf-8 -*-
#
# Project: freesas
# https://github.com/kif/freesas
#
# Copyright (C) 2017 European Synchrotron Radiation Facility, Grenoble, France
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
__authors__ = ["<NAME>"]
__license__ = "MIT"
__date__ = "16/07/2021"
import unittest
import pathlib
import re
import logging
from subprocess import run, Popen, PIPE, STDOUT
from os import linesep
import PyPDF2
from freesas.test.utilstests import get_datafile
logger = logging.getLogger(__name__)
expectedTexts = {
"Label of scatter plot X-axis": r"\$q\$ \(nm\$\^\{-1\}\$\)",
"Label of scatter plot Y-axis": r"\$I\(q\)\$ \(log scale\)",
"Smallest Tick of scatter plot Y axis": r"\$\\mathdefault\{10\^\{-2\}\}\$",
"Largest Tick of scatter plot Y axis": r"\$\\mathdefault\{10\^\{2\}\}\$",
"Scattering plot caption": r"Scattering curve",
"Experimental data legend": r"Experimental data", # used twice, but we might just ignore that
"Guinier region legend": r"Guinier region: \$R_g=\$[23]\.[0-9][0-9] nm, \$I_0=\$6[0-9]\.[0-9][0-9]",
"BIFT fit legend": r"BIFT extraplolated: \$D_\{max\}=\$9\.[0-9][0-9] nm",
"Label of Guinier plot X-axis": r"\$q\^2\$ \(nm\$\^\{-2\}\$\)",
"Label of Guinier plot Y-axis": r"ln\[\$I\(q\)\$]",
"Guinier - qRgmin": r"\$\(qR_\{g\}\)_\{min\}\$=0\.[0-9]",
"Guinier - qRgmax": r"\$\(qR_\{g\}\)_\{max\}\$=1\.[0123]",
"Guinier region label": r"Guinier region",
"Guinier plot caption": r"Guinier plot: \$R_\{g\}=\$[23]\.[0-9][0-9] nm \$I_\{0\}=\$6[0-9]\.[0-9][0-9]",
"Guinier fit equation": r"ln\[\$I\(q\)\$\] = 4\.12 -3\.01 \* \$q\^2\$",
"Guinier fit data label": r"Experimental curve",
"Label of Kratky plot X-Axis": r"\$qR_\{g\}\$",
"Label of Kratky plot Y-Axis": r"\$\(qR_\{g\}\)\^2 I/I_\{0\}\$",
"Kratky plot caption": r"Dimensionless Kratky plot",
"Label of distribution plot X-axis": r"\$r\$ \(nm\)",
"Label of distribution plot Y-axis": r"\$p\(r\)\$",
"Distribution plot caption": r"Pair distribution function",
# "BIFT chi": r"BIFT: χ\$_\{r\}\^\{2\}=\$1\.[0-9][0-9]",
"BIFT Dmax": r"\$D_\{max\}=\$[1]?[09].[0-9][0-9] nm",
"BIFT Rg": r"\$R_\{g\}=\$[23]\.[0-9][0-9] nm",
"BIFT I0": r"\$I_\{0\}=\$6[0-9]\.[0-9][0-9]",
}
class TestFreeSAS(unittest.TestCase):
cwd = pathlib.Path.cwd()
TEST_IMAGE_NAME = pathlib.Path(cwd, "freesas.svg")
TEST_PDF_NAME = pathlib.Path(cwd, "freesas.pdf")
test_location = pathlib.Path(__file__)
test_data_location = pathlib.Path(test_location.parent, "e2etest_data")
bsa_filename = pathlib.Path(get_datafile("bsa_005_sub.dat"))
sas_curve2_filename = pathlib.Path(get_datafile("SASDF52.dat"))
image_text = None
@classmethod
def setUpClass(cls):
super(TestFreeSAS, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(TestFreeSAS, cls).tearDownClass()
cls.image_text = None
def __init__(self, testName, **extra_kwargs):
super(TestFreeSAS, self).__init__(testName)
self.extra_arg = extra_kwargs
def test_save_image(self):
"""
Test whether freeSAS finishes without errors
if there is an -o argument.
It also uses the output as input for label tests.
"""
# Make sure the result file does not exist for a meaningful assert
try:
self.TEST_IMAGE_NAME.unlink()
except FileNotFoundError:
pass
run_freesas = run(
[
"freesas",
str(self.bsa_filename),
"-o",
str(self.TEST_IMAGE_NAME),
],
stdout=PIPE,
stderr=STDOUT,
check=True,
)
self.assertEqual(
run_freesas.returncode, 0, msg="freesas completed well"
)
self.assertTrue(self.TEST_IMAGE_NAME.exists(), msg="Found output file")
with open(self.TEST_IMAGE_NAME) as file:
self.__class__.image_text = file.read()
try:
self.TEST_IMAGE_NAME.unlink()
except FileNotFoundError:
pass
def test_display_image(self):
"""
Test whether freeSAS for one dataset finishes without errors
if there no -o argument.
"""
run_freesas = Popen(
["freesas", str(self.bsa_filename)],
universal_newlines=True,
stdout=PIPE,
stderr=PIPE,
stdin=PIPE,
)
stdout, _ = run_freesas.communicate(linesep, timeout=40)
self.assertEqual(
run_freesas.returncode, 0, msg="freesas completed well"
)
self.assertEqual(
stdout, "Press enter to quit", msg="freesas requested enter"
)
def test_label(self):
"""
Test for the presence of labels in the svg.
Requires two extra kwargs:
regex: The regex expression to search for
description: The description of what the label represents
"""
text_regex: str = self.extra_arg["regex"]
text_description: str = self.extra_arg["description"]
pattern = re.compile(text_regex)
self.assertIsNotNone(
pattern.search(self.image_text),
msg="Could not find text for {} in image".format(text_description),
)
def test_multi_file_pdf(self):
"""
Check that correct PDF is created when processing several files with the -o option.
"""
# Make sure the result file does not exist for a meaningful assert
try:
self.TEST_PDF_NAME.unlink()
except FileNotFoundError:
pass
run_freesas = run(
[
"freesas",
str(self.bsa_filename),
str(self.sas_curve2_filename),
"-o",
str(self.TEST_PDF_NAME),
],
stdout=PIPE,
stderr=STDOUT,
check=True,
)
self.assertEqual(
run_freesas.returncode, 0, msg="freesas completed well"
)
self.assertTrue(self.TEST_PDF_NAME.exists(), msg="Found output file")
with open(self.TEST_PDF_NAME, "rb") as file:
output_pdf = PyPDF2.PdfFileReader(file)
self.assertEqual(
output_pdf.numPages, 2, msg="correct number of pages in pdf"
)
page_1_text = output_pdf.getPage(0).extractText()
page_2_text = output_pdf.getPage(1).extractText()
self.assertTrue(
(str(self.bsa_filename) in page_1_text)
^ (str(self.bsa_filename) in page_2_text),
msg=str(self.bsa_filename) + " found on one of the pages",
)
self.assertTrue(
(str(self.sas_curve2_filename) in page_1_text)
^ (str(self.sas_curve2_filename) in page_2_text),
msg=str(self.sas_curve2_filename) + " found on one of the pages",
)
# Clean up
try:
self.TEST_PDF_NAME.unlink()
except FileNotFoundError:
pass
def suite():
test_suite = unittest.TestSuite()
test_suite.addTest(TestFreeSAS("test_display_image"))
test_suite.addTest(TestFreeSAS("test_save_image"))
for text_description, text_regex in expectedTexts.items():
test_suite.addTest(
TestFreeSAS(
"test_label", regex=text_regex, description=text_description
)
)
test_suite.addTest(TestFreeSAS("test_multi_file_pdf"))
return test_suite
if __name__ == "__main__":
runner = unittest.TextTestRunner()
runner.run(suite())
| StarcoderdataPython |
25703 | <reponame>Rory-Sullivan/yrlocationforecast
"""An example of accessing individual forecast variables."""
from metno_locationforecast import Place, Forecast
USER_AGENT = "metno_locationforecast/1.0 https://github.com/Rory-Sullivan/yrlocationforecast"
new_york = Place("New York", 40.7, -74.0, 10)
new_york_forecast = Forecast(new_york, USER_AGENT, "complete")
new_york_forecast.update()
# Access a particular interval.
first_interval = new_york_forecast.data.intervals[0]
print(first_interval)
# Access the interval's duration attribute.
print(f"Duration: {first_interval.duration}")
print() # Blank line
# Access a particular variable from the interval.
rain = first_interval.variables["precipitation_amount"]
print(rain)
# Access the variables value and unit attributes.
print(f"Rain value: {rain.value}")
print(f"Rain units: {rain.units}")
# Get a full list of variables available in the interval.
print(first_interval.variables.keys())
| StarcoderdataPython |
1795563 | <gh_stars>10-100
import unittest
from PyStacks.PyStacks.template import templateCF
class TestTemplate(unittest.TestCase):
def test_templateCF_S3(self):
resources = {
's3': {
'S3Bucket': {
'name': 'stuff.holder',
'accesscontrol': 'PublicRead',
'versioning': True,
'tags': {
'Name': 'Api'
},
'notices': {
'lamda': [{
'event': 's3:ObjectCreated:*',
'function': 'somelambdaarn'
}]
}
}
},
's3_policies': {
'S3BucketPolicies': {
'policy': '"what": "on earth"'
}
}
}
expected = {
'S3BucketPolicies': {
'Type': 'AWS::S3::BucketPolicy',
'Properties': {
'what': 'on earth'
}
},
'S3Bucket': {
'Type': 'AWS::S3::Bucket',
'Properties': {
'AccessControl': 'PublicRead',
'NotificationConfiguration': {
'LambdaConfigurations': [
{
'Event': 's3:ObjectCreated:*',
'Function': 'somelambdaarn'
}
]
},
'VersioningConfiguration': {
'Status': 'Enabled'
},
'BucketName': 'stuff.holder',
'Tags': [
{
'Key': 'Name',
'Value': 'Api'
}
]
}
}
}
self.maxDiff = 'None'
actual = templateCF(resources, 'resources')
self.assertDictEqual(actual, expected)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3330892 | print('\033[1mCADASTRO DE PESSOAS\033[M')
mais = hom = mulh = 0
while True:
print('-' * 50)
idade = int(input('Idade pessoa: '))
sexo = ' '
while sexo not in 'MF':
sexo = str(input('Sexo da pessoa [M/F]: ')).strip().upper()[0]
print('-' * 50)
if idade > 18:
mais += 1
if sexo == 'M':
hom += 1
if idade < 20 and sexo == 'F':
mulh += 1
resp = ' '
while resp not in 'SN':
resp = str(input('Quer continuar? [S/N]: ')).strip().upper()[0]
if resp == 'N':
break
print('-' * 50)
print(f'''Muito Obrigado, no total foram cadastradas {mais} com
mais de 18 anos {hom} homens e {mulh} mulheres com menos de 20 anos.''')
| StarcoderdataPython |
1655480 | def main():
print("안녕하세요 사장님!")
owner_name = input("사장님의 이름을 알려주세요:")
print(owner_name + " 사장님 앞으로 멋진 식당을 만들어봐요")
menu01 = input("첫번째 메뉴 이름을 정해주세요: ")
menu02 = input("두번째 메뉴 이름을 정해주세요: ")
menu03 = input("세번째 메뉴 이름을 정해주세요: ")
print("이 식당에서는 아래와 같은 요리를 먹을수 있습니다.")
print(menu01)
print(menu02)
print(menu03)
if __name__ == '__main__':
main() | StarcoderdataPython |
1634059 | # -*- coding: utf-8 -*-
"""
@Module SARibbonPannelOptionButton
@Author ROOT
@brief Pannel右下角的操作按钮
此按钮和一个action关联,使用SARibbonPannel.addOptionAction 函数用于生成此按钮,正常来说
用户并不需要直接操作此类,仅仅用于样式设计
如果一定要重载此按钮,可以通过重载 SARibbonElementCreateDelegate
的 SARibbonElementCreateDelegate.createRibbonPannelOptionButton来实现新的OptionButton
"""
import PySARibbon.resource_rc
from PyQt5.QtGui import QIcon
from PyQt5.QtCore import QSize
from PyQt5.QtWidgets import QToolButton, QAction
class SARibbonPannelOptionButton(QToolButton):
def __init__(self, parent=None):
super().__init__(parent)
self.setAutoRaise(True)
self.setCheckable(False)
self.setFixedSize(QSize(16, 16))
self.setIconSize(QSize(16, 16))
self.setIcon(QIcon(':/icon/resource/ribbonPannelOptionButton.png'))
def connectAction(self, act: QAction):
self.clicked.connect(act.toggled)
| StarcoderdataPython |
39143 | <gh_stars>100-1000
import FWCore.ParameterSet.Config as cms
# reco hit production
from RecoPPS.Local.ctppsDiamondRecHits_cfi import ctppsDiamondRecHits
# local track fitting
from RecoPPS.Local.ctppsDiamondLocalTracks_cfi import ctppsDiamondLocalTracks
ctppsDiamondLocalReconstructionTask = cms.Task(
ctppsDiamondRecHits,
ctppsDiamondLocalTracks
)
ctppsDiamondLocalReconstruction = cms.Sequence(ctppsDiamondLocalReconstructionTask)
| StarcoderdataPython |
3270764 | # 1,-2,3,-4,5 ESE 100 TAKK PRINT KRE
num=1
while num<=100:
if num%2==0:
print(num*-1)
else:
print(num)
num=num+1 | StarcoderdataPython |
1691517 | <filename>postal_address/tests/__init__.py
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013-2018 Scaleway and Contributors. All Rights Reserved.
# <NAME> <<EMAIL>>
#
# Licensed under the BSD 2-Clause License (the "License"); you may not use this
# file except in compliance with the License. You may obtain a copy of the
# License at http://opensource.org/licenses/BSD-2-Clause
| StarcoderdataPython |
4830501 | <reponame>julianapereira99/SIB
import itertools
# Y is reserved to idenfify dependent variables
import numpy as np
import pandas as pd
ALPHA = 'ABCDEFGHIJKLMNOPQRSTUVWXZ'
__all__ = ['label_gen', 'euclidian_distance', 'manhattan_distance', 'train_test_split', 'sig', 'add_intersect', 'l1_distance', 'l2_distance', 'minibatch', 'to_categorical']
def label_gen(n):
""" Generates a list of n distinct labels similar to Excel"""
def _iter_all_strings():
size = 1
while True:
for s in itertools.product(ALPHA, repeat=size):
yield "".join(s)
size += 1
generator = _iter_all_strings()
def gen():
for s in generator:
return s
return [gen() for _ in range(n)]
def l1_distance(x, y):
"""Computes the manhatan distance of a point (x) to a set of
points y.
x.shape=(n,) and y.shape=(m,n)
"""
import numpy as np
dist = (np.absolute(x - y)).sum(axis=1)
return dist
def l2_distance(x, y):
"""Computes the euclidean distance of a point (x) to a set of
points y.
x.shape=(n,) and y.shape=(m,n)
"""
dist = ((x - y) ** 2).sum(axis=1)
return dist
def euclidian_distance(x, y):
dist = np.sqrt(((x -y) ** 2).sum(axis=1))
return dist
def manhattan_distance(x, y):
dist = np.absolute(x-y).sum(axis=1)
return dist
def train_test_split(dataset, split=0.8):
numtst = dataset.X.shape[0]
arr = np.arange(numtst)
m = int(split*numtst)
np.random.shuffle(arr)
from si.data import Dataset
train = Dataset(dataset.X[arr[:m]], dataset.Y[arr[:m]], dataset._xnames, dataset._yname)
test = Dataset(dataset.X[arr[m:]], dataset.Y[arr[m:]], dataset._xnames, dataset._yname)
return train, test
def sig(z):
return 1 / (1 + np.exp(-z))
def add_intersect(X):
return np.hstack((np.ones((X.shape[0], 1)), X))
def to_categorical(y, num_classes=None, dtype='float32'):
y = np.array(y, dtype='int')
input_shape = y.shape
if input_shape and input_shape[-1] == 1 and len(input_shape) > 1:
input_shape = tuple(input_shape[:-1])
y = y.ravel()
if not num_classes:
num_classes = np.max(y) + 1
n = y.shape[0]
categorical = np.zeros((n, num_classes), dtype=dtype)
categorical[np.arange(n), y] = 1
output_shape = input_shape + (num_classes,)
categorical = np.reshape(categorical, output_shape)
return categorical
def minibatch(X, batchsize=256, shuffle=True):
N = X.shape[0]
ix = np.arange(N)
n_batches = int(np.ceil(N / batchsize))
if shuffle:
np.random.shuffle(ix)
def mb_generator():
for i in range(n_batches):
yield ix[i * batchsize: (i + 1) * batchsize]
return mb_generator(), | StarcoderdataPython |
165122 | <reponame>ondiiik/meteoink
def localtime(sec):
import time
return time.localtime(sec + 946677600)
def sleep_ms(ms):
import time
time.sleep(0.001 * ms)
def ticks_ms():
import time
return int(round(time.time() * 1000)) | StarcoderdataPython |
3308266 | <gh_stars>0
from torch.nn.utils import spectral_norm
import torch.nn as nn
import torch
class Flatten(nn.Module):
"""
Flattens an input value into size (batch_size, -1)
"""
@staticmethod
def forward(x: torch.Tensor):
"""
Flattens an input value into size (batch_size, -1)
Parameters
----------
x: Input tensor, which should be reshaped.
Returns
-------
x: Reshaped tensor.
"""
return x.view(x.size(0), -1)
class Reshape(nn.Module):
"""
Reshapes an input value into batch_size, shape.
"""
def __init__(self, shape: tuple=(1, 28, 28)):
"""
Initializes the class by storing parameter, which defines the target shape.
Parameters
----------
shape: target shape (without batch_size).
"""
super(Reshape, self).__init__()
self.shape = shape
def forward(self, x: torch.Tensor):
"""
Reshapes an input value to the desired target shape.
Parameters
----------
x: Input tensor, which should be reshaped.
Returns
-------
x: Reshaped tensor.
"""
return x.view(x.size(0), self.shape[0], self.shape[1], self.shape[2])
class Conv2d(nn.Module):
"""
Wrapper for the torch.nn.Conv2d class. It augments the class by properties like spectral normalization or coord-conv
approaches.
"""
def __init__(self, in_channels, out_channels, size, stride, padding,
spec_norm=False, coord_conv=False):
"""
Initializes the Conv2d layer.
Parameters
----------
in_channels: Number of input channels for the convolutional layer.
out_channels: Number of output channels for the convolutional layer.
size: Kernel size of the convolutional layer.
stride: Stride, which is used by the convolutional layer for sliding over the input feature map.
padding: Padding, which is applied to the input feature map.
spec_norm: Specifies whether to use spectral normalization (often used in GAN layers) or not.
coord_conv: Specifies, whether to apply the coord_conv approach.
"""
super(Conv2d, self).__init__()
self.coord_conv = coord_conv
# generate the torch conv2d layer with corresponding properties
if coord_conv:
in_channels += 2
if not spec_norm:
self.conv = nn.Conv2d(in_channels, out_channels, size, stride=stride, padding=padding)
else:
self.conv = spectral_norm(nn.Conv2d(in_channels, out_channels, size, stride=stride, padding=padding))
def forward(self, x):
"""
Applies the conv2d operation with all defined properties.
Parameters
----------
x: Input feature map.
Returns
-------
out: Output, which was generated by convolving the input with the convolutional layer.
"""
# prepare the input for the conv layer, if the coord convolution should be applied
if self.coord_conv:
batch, _, height, width = x.size()
coord_info = torch.zeros(batch, 2, height, width).float()
for i in range(height):
coord_info[:, 0, i, :] = i
for i in range(width):
coord_info[:, 1, :, i] = i
coord_info[:, 0, ...] /= height
coord_info[:, 1, ...] /= width
if x.is_cuda:
coord_info = coord_info.cuda()
x = torch.cat((x, coord_info), 1)
# perform the convolution
out = self.conv(x)
return out
class ConvTranspose2d(nn.Module):
"""
Wrapper for the torch.nn.ConvTranspose2d class. It augments the class by properties like spectral normalization or
coord-conv approaches.
"""
def __init__(self, in_channels, out_channels, size, stride, padding,
spec_norm=False, coord_conv=False):
"""
Initializes the ConvTranspose2d layer.
Parameters
----------
in_channels: Number of input channels for the convolutional layer.
out_channels: Number of output channels for the convolutional layer.
size: Kernel size of the convolutional layer.
stride: Stride, which is used by the convolutional layer for sliding over the input feature map.
padding: Padding, which is applied to the input feature map.
spec_norm: Specifies whether to use spectral normalization (often used in GAN layers) or not.
coord_conv: Specifies, whether to apply the coord_conv approach.
"""
super(ConvTranspose2d, self).__init__()
self.coord_conv = coord_conv
if coord_conv:
in_channels += 2
if not spec_norm:
self.conv_trans = nn.ConvTranspose2d(in_channels, out_channels, size, stride=stride, padding=padding)
else:
self.conv_trans = spectral_norm(nn.ConvTranspose2d(in_channels, out_channels, size,
stride=stride, padding=padding))
def forward(self, x):
"""
Applies the ConvTranspose2d operation with all defined properties.
Parameters
----------
x: Input feature map.
Returns
-------
out: Output, which was generated by convolving the input with the convolutional layer.
"""
if self.coord_conv:
batch, _, height, width = x.size()
coord_info = torch.zeros(batch, 2, height, width).float()
for i in range(height):
coord_info[:, 0, i, :] = i
for i in range(width):
coord_info[:, 1, :, i] = i
coord_info[:, 0, ...] /= height
coord_info[:, 1, ...] /= width
if x.is_cuda:
coord_info = coord_info.cuda()
x = torch.cat((x, coord_info), 1)
out = self.conv_trans(x)
return out
| StarcoderdataPython |
3369693 | import argparse
import os
import json
import fnmatch
import re
import yaml
import glob
import shutil
LEGACY_SOURCE_FOLDER = "legacy/docs-ref-autogen"
TARGET_SOURCE_FOLDER = "docs-ref-autogen"
root_dir = os.path.abspath(os.path.join(os.path.abspath(__file__), ".."))
def check_against_targeted_namespaces(test_line, namespace_patterns_list):
return any([re.match(namespace_pattern, test_line) for namespace_pattern in namespace_patterns_list])
if __name__ == "__main__":
# parse packages.json
with open(os.path.join(root_dir, "temporary-src-based-yml", 'migration.json'), 'r') as f:
text = f.read().rstrip("\n")
json = json.loads(text)
migrating_namespaces = json["migrating_namespaces"]
migrating_namespaces_regexs = [fnmatch.translate(namespace) for namespace in migrating_namespaces]
# get the yml from legacy
with open(os.path.join(root_dir, LEGACY_SOURCE_FOLDER, 'toc.yml'), "r") as legacy_toc:
legacy_toc = yaml.safe_load(legacy_toc)
toc_items = []
files_for_move = []
# filter that toc
for index, top_level_toc_item in enumerate(legacy_toc):
if check_against_targeted_namespaces(top_level_toc_item['uid'], migrating_namespaces_regexs):
toc_items.append(top_level_toc_item)
files_for_move += glob.glob(os.path.join(root_dir, LEGACY_SOURCE_FOLDER, top_level_toc_item['uid']+"*"))
appended_content = yaml.dump(toc_items, default_flow_style=False)
# write the toc
with open(os.path.join(root_dir, TARGET_SOURCE_FOLDER, "toc.yml"), "a", encoding="utf-8") as stable_toc:
stable_toc.write(appended_content)
for file_name in files_for_move:
shutil.copy(file_name, os.path.join(root_dir, TARGET_SOURCE_FOLDER))
| StarcoderdataPython |
3390291 | <gh_stars>0
a = int(input())
b = int(input())
c = int(input())
if (a % 2 == 0 or b % 2 == 0 or c % 2 == 0) \
and (a % 2 == 1 or b % 2 == 1 or c % 2 == 1):
print("YES")
else:
print("NO")
| StarcoderdataPython |
3380528 | <reponame>gridsum/IML-predictor-
from .error import ErrorCode
class PredictSuccessResponse:
def __init__(self, memory_limit=None, error_code=ErrorCode.SUCCESS):
self.mem = memory_limit
self.error_code = error_code
def get_response(self):
return self.__dict__
class ModelBuildResponse:
def __init__(self, error_code, message):
self.error_code = error_code
self.message = message
def get_response(self):
return self.__dict__
class ModelStatusResponse:
def __init__(self, status_code, status_str):
self.status_code = status_code
self.status_str = status_str
def get_response(self):
return self.__dict__
| StarcoderdataPython |
1681517 | <reponame>illuin-tech/opyoid
from typing import Any, Type, TypeVar, Union, cast
from .named import Named
InjectedT = TypeVar("InjectedT", bound=Any)
EMPTY = object()
def get_class_full_name(klass: Union[Type, str]) -> str:
if isinstance(klass, str):
return klass
if isinstance(klass, type) and issubclass(klass, Named):
return get_class_full_name(cast(Type[Named], klass).original_type) + f"#{klass.name}"
if hasattr(klass, "__origin__"):
return repr(klass)
try:
module = klass.__module__
except AttributeError:
module = None
try:
name = klass.__name__
except AttributeError:
name = repr(klass)
if module is None or module == str.__module__:
return name
return module + "." + name
| StarcoderdataPython |
4826265 | <filename>leetcode/graphs/all_paths_source_target.py
# link: https://leetcode.com/problems/all-paths-from-source-to-target/
class Solution(object):
def allPathsSourceTarget(self, graph):
"""
:type graph: List[List[int]]
:rtype: List[List[int]]
"""
target_node= len(graph)-1
# source_node=0
result = []
def backtrack(curr_node, path):
if curr_node == target_node:
result.append(path[:])
return
else:
for next_node in graph[curr_node]:
path.append(next_node)
backtrack(next_node, path)
# print(path)
path.pop()
backtrack(0,[0])
return result
| StarcoderdataPython |
1749090 | # coding: utf-8
import numpy as np
import tensorflow as tf
from tensorflow.contrib.rnn import RNNCell
from tensorflow.python.ops import rnn_cell_impl
#from tensorflow.contrib.data.python.util import nest
from tensorflow.contrib.framework import nest
from tensorflow.contrib.seq2seq.python.ops.attention_wrapper import _bahdanau_score, _BaseAttentionMechanism, BahdanauAttention, \
AttentionWrapperState, AttentionMechanism, _BaseMonotonicAttentionMechanism,_maybe_mask_score,_prepare_memory,_monotonic_probability_fn
from tensorflow.python.layers.core import Dense
from .modules import prenet
import functools
_zero_state_tensors = rnn_cell_impl._zero_state_tensors
class AttentionWrapper(RNNCell):
"""Wraps another `RNNCell` with attention.
"""
def __init__(self,
cell,
attention_mechanism,
is_manual_attention, # 추가된 argument
manual_alignments, # 추가된 argument
attention_layer_size=None,
alignment_history=False,
cell_input_fn=None,
output_attention=True,
initial_cell_state=None,
name=None):
"""Construct the `AttentionWrapper`.
**NOTE** If you are using the `BeamSearchDecoder` with a cell wrapped in
`AttentionWrapper`, then you must ensure that:
- The encoder output has been tiled to `beam_width` via
@{tf.contrib.seq2seq.tile_batch} (NOT `tf.tile`).
- The `batch_size` argument passed to the `zero_state` method of this
wrapper is equal to `true_batch_size * beam_width`.
- The initial state created with `zero_state` above contains a
`cell_state` value containing properly tiled final state from the
encoder.
An example:
```
tiled_encoder_outputs = tf.contrib.seq2seq.tile_batch(
encoder_outputs, multiplier=beam_width)
tiled_encoder_final_state = tf.conrib.seq2seq.tile_batch(
encoder_final_state, multiplier=beam_width)
tiled_sequence_length = tf.contrib.seq2seq.tile_batch(
sequence_length, multiplier=beam_width)
attention_mechanism = MyFavoriteAttentionMechanism(
num_units=attention_depth,
memory=tiled_inputs,
memory_sequence_length=tiled_sequence_length)
attention_cell = AttentionWrapper(cell, attention_mechanism, ...)
decoder_initial_state = attention_cell.zero_state(
dtype, batch_size=true_batch_size * beam_width)
decoder_initial_state = decoder_initial_state.clone(
cell_state=tiled_encoder_final_state)
```
Args:
cell: An instance of `RNNCell`.
attention_mechanism: A list of `AttentionMechanism` instances or a single
instance.
attention_layer_size: A list of Python integers or a single Python
integer, the depth of the attention (output) layer(s). If None
(default), use the context as attention at each time step. Otherwise,
feed the context and cell output into the attention layer to generate
attention at each time step. If attention_mechanism is a list,
attention_layer_size must be a list of the same length.
alignment_history: Python boolean, whether to store alignment history
from all time steps in the final output state (currently stored as a
time major `TensorArray` on which you must call `stack()`).
cell_input_fn: (optional) A `callable`. The default is:
`lambda inputs, attention: tf.concat([inputs, attention], -1)`.
output_attention: Python bool. If `True` (default), the output at each
time step is the attention value. This is the behavior of Luong-style
attention mechanisms. If `False`, the output at each time step is
the output of `cell`. This is the behavior of Bhadanau-style
attention mechanisms. In both cases, the `attention` tensor is
propagated to the next time step via the state and is used there.
This flag only controls whether the attention mechanism is propagated
up to the next cell in an RNN stack or to the top RNN output.
initial_cell_state: The initial state value to use for the cell when
the user calls `zero_state()`. Note that if this value is provided
now, and the user uses a `batch_size` argument of `zero_state` which
does not match the batch size of `initial_cell_state`, proper
behavior is not guaranteed.
name: Name to use when creating ops.
Raises:
TypeError: `attention_layer_size` is not None and (`attention_mechanism`
is a list but `attention_layer_size` is not; or vice versa).
ValueError: if `attention_layer_size` is not None, `attention_mechanism`
is a list, and its length does not match that of `attention_layer_size`.
"""
super(AttentionWrapper, self).__init__(name=name)
self.is_manual_attention = is_manual_attention
self.manual_alignments = manual_alignments
rnn_cell_impl.assert_like_rnncell("cell", cell)
if isinstance(attention_mechanism, (list, tuple)):
self._is_multi = True
attention_mechanisms = attention_mechanism
for attention_mechanism in attention_mechanisms:
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must contain only instances of "
"AttentionMechanism, saw type: %s"
% type(attention_mechanism).__name__)
else:
self._is_multi = False
if not isinstance(attention_mechanism, AttentionMechanism):
raise TypeError(
"attention_mechanism must be an AttentionMechanism or list of "
"multiple AttentionMechanism instances, saw type: %s"
% type(attention_mechanism).__name__)
attention_mechanisms = (attention_mechanism,)
if cell_input_fn is None:
cell_input_fn = (
lambda inputs, attention: tf.concat([inputs, attention], -1))
else:
if not callable(cell_input_fn):
raise TypeError(
"cell_input_fn must be callable, saw type: %s"
% type(cell_input_fn).__name__)
if attention_layer_size is not None:
attention_layer_sizes = tuple(
attention_layer_size
if isinstance(attention_layer_size, (list, tuple))
else (attention_layer_size,))
if len(attention_layer_sizes) != len(attention_mechanisms):
raise ValueError(
"If provided, attention_layer_size must contain exactly one "
"integer per attention_mechanism, saw: %d vs %d"
% (len(attention_layer_sizes), len(attention_mechanisms)))
self._attention_layers = tuple(
layers_core.Dense(
attention_layer_size,
name="attention_layer",
use_bias=False,
dtype=attention_mechanisms[i].dtype)
for i, attention_layer_size in enumerate(attention_layer_sizes))
self._attention_layer_size = sum(attention_layer_sizes)
else:
self._attention_layers = None
self._attention_layer_size = sum(
attention_mechanism.values.get_shape()[-1].value
for attention_mechanism in attention_mechanisms)
self._cell = cell
self._attention_mechanisms = attention_mechanisms
self._cell_input_fn = cell_input_fn
self._output_attention = output_attention
self._alignment_history = alignment_history
with tf.name_scope(name, "AttentionWrapperInit"):
if initial_cell_state is None:
self._initial_cell_state = None
else:
final_state_tensor = nest.flatten(initial_cell_state)[-1]
state_batch_size = (
final_state_tensor.shape[0].value
or tf.shape(final_state_tensor)[0])
error_message = (
"When constructing AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and initial_cell_state. Are you using "
"the BeamSearchDecoder? You may need to tile your initial state "
"via the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with tf.control_dependencies(
self._batch_size_checks(state_batch_size, error_message)):
self._initial_cell_state = nest.map_structure(
lambda s: tf.identity(s, name="check_initial_cell_state"),
initial_cell_state)
def _batch_size_checks(self, batch_size, error_message):
return [tf.assert_equal(batch_size,
attention_mechanism.batch_size,
message=error_message)
for attention_mechanism in self._attention_mechanisms]
def _item_or_tuple(self, seq):
"""Returns `seq` as tuple or the singular element.
Which is returned is determined by how the AttentionMechanism(s) were passed
to the constructor.
Args:
seq: A non-empty sequence of items or generator.
Returns:
Either the values in the sequence as a tuple if AttentionMechanism(s)
were passed to the constructor as a sequence or the singular element.
"""
t = tuple(seq)
if self._is_multi:
return t
else:
return t[0]
@property
def output_size(self):
if self._output_attention:
return self._attention_layer_size
else:
return self._cell.output_size
@property
def state_size(self):
"""The `state_size` property of `AttentionWrapper`.
Returns:
An `AttentionWrapperState` tuple containing shapes used by this object.
"""
return AttentionWrapperState(
cell_state=self._cell.state_size,
time=tf.TensorShape([]),
attention=self._attention_layer_size,
alignments=self._item_or_tuple(
a.alignments_size for a in self._attention_mechanisms),
attention_state=self._item_or_tuple(
a.state_size for a in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
a.alignments_size if self._alignment_history else ()
for a in self._attention_mechanisms)) # sometimes a TensorArray
def zero_state(self, batch_size, dtype):
"""Return an initial (zero) state tuple for this `AttentionWrapper`.
**NOTE** Please see the initializer documentation for details of how
to call `zero_state` if using an `AttentionWrapper` with a
`BeamSearchDecoder`.
Args:
batch_size: `0D` integer tensor: the batch size.
dtype: The internal state data type.
Returns:
An `AttentionWrapperState` tuple containing zeroed out tensors and,
possibly, empty `TensorArray` objects.
Raises:
ValueError: (or, possibly at runtime, InvalidArgument), if
`batch_size` does not match the output size of the encoder passed
to the wrapper object at initialization time.
"""
with tf.name_scope(type(self).__name__ + "ZeroState", values=[batch_size]):
if self._initial_cell_state is not None:
cell_state = self._initial_cell_state
else:
cell_state = self._cell.zero_state(batch_size, dtype)
error_message = (
"When calling zero_state of AttentionWrapper %s: " % self._base_name +
"Non-matching batch sizes between the memory "
"(encoder output) and the requested batch size. Are you using "
"the BeamSearchDecoder? If so, make sure your encoder output has "
"been tiled to beam_width via tf.contrib.seq2seq.tile_batch, and "
"the batch_size= argument passed to zero_state is "
"batch_size * beam_width.")
with tf.control_dependencies(
self._batch_size_checks(batch_size, error_message)):
cell_state = nest.map_structure(
lambda s: tf.identity(s, name="checked_cell_state"),
cell_state)
initial_alignments = [
attention_mechanism.initial_alignments(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms]
return AttentionWrapperState(
cell_state=cell_state,
time=tf.zeros([], dtype=tf.int32),
attention=_zero_state_tensors(self._attention_layer_size, batch_size,
dtype),
alignments=self._item_or_tuple(initial_alignments),
attention_state=self._item_or_tuple(
attention_mechanism.initial_state(batch_size, dtype)
for attention_mechanism in self._attention_mechanisms),
alignment_history=self._item_or_tuple(
tf.TensorArray(
dtype,
size=0,
dynamic_size=True,
element_shape=alignment.shape)
if self._alignment_history else ()
for alignment in initial_alignments))
def call(self, inputs, state):
"""Perform a step of attention-wrapped RNN.
- Step 1: Mix the `inputs` and previous step's `attention` output via
`cell_input_fn`.
- Step 2: Call the wrapped `cell` with this input and its previous state.
- Step 3: Score the cell's output with `attention_mechanism`.
- Step 4: Calculate the alignments by passing the score through the
`normalizer`.
- Step 5: Calculate the context vector as the inner product between the
alignments and the attention_mechanism's values (memory).
- Step 6: Calculate the attention output by concatenating the cell output
and context through the attention layer (a linear layer with
`attention_layer_size` outputs).
Args:
inputs: (Possibly nested tuple of) Tensor, the input at this time step.
state: An instance of `AttentionWrapperState` containing
tensors from the previous time step.
Returns:
A tuple `(attention_or_cell_output, next_state)`, where:
- `attention_or_cell_output` depending on `output_attention`.
- `next_state` is an instance of `AttentionWrapperState`
containing the state calculated at this time step.
Raises:
TypeError: If `state` is not an instance of `AttentionWrapperState`.
"""
if not isinstance(state, AttentionWrapperState):
raise TypeError("Expected state to be instance of AttentionWrapperState. "
"Received type %s instead." % type(state))
# Step 1: Calculate the true inputs to the cell based on the
# previous attention value.
cell_inputs = self._cell_input_fn(inputs, state.attention) # concat
cell_state = state.cell_state
cell_output, next_cell_state = self._cell(cell_inputs, cell_state)
cell_batch_size = (
cell_output.shape[0].value or tf.shape(cell_output)[0])
error_message = (
"When applying AttentionWrapper %s: " % self.name +
"Non-matching batch sizes between the memory "
"(encoder output) and the query (decoder output). Are you using "
"the BeamSearchDecoder? You may need to tile your memory input via "
"the tf.contrib.seq2seq.tile_batch function with argument "
"multiple=beam_width.")
with tf.control_dependencies(
self._batch_size_checks(cell_batch_size, error_message)):
cell_output = tf.identity(
cell_output, name="checked_cell_output")
if self._is_multi:
previous_attention_state = state.attention_state
previous_alignment_history = state.alignment_history
else:
previous_attention_state = [state.attention_state]
previous_alignment_history = [state.alignment_history]
all_alignments = []
all_attentions = []
all_attention_states = []
maybe_all_histories = []
for i, attention_mechanism in enumerate(self._attention_mechanisms):
attention, alignments, next_attention_state = _compute_attention(
attention_mechanism, cell_output, previous_attention_state[i],
self._attention_layers[i] if self._attention_layers else None,
self.is_manual_attention, self.manual_alignments,state.time)
alignment_history = previous_alignment_history[i].write(
state.time, alignments) if self._alignment_history else ()
all_attention_states.append(next_attention_state)
all_alignments.append(alignments)
all_attentions.append(attention)
maybe_all_histories.append(alignment_history)
attention = tf.concat(all_attentions, 1)
next_state = AttentionWrapperState(
time=state.time + 1,
cell_state=next_cell_state,
attention=attention,
attention_state=self._item_or_tuple(all_attention_states),
alignments=self._item_or_tuple(all_alignments),
alignment_history=self._item_or_tuple(maybe_all_histories))
if self._output_attention:
return attention, next_state
else:
return cell_output, next_state
def _compute_attention(attention_mechanism, cell_output, previous_alignments,attention_layer, is_manual_attention, manual_alignments, time):
computed_alignments, next_attention_state = attention_mechanism(cell_output, state=previous_alignments) # (query, state)를 넘긴다.
batch_size, max_time = tf.shape(computed_alignments)[0], tf.shape(computed_alignments)[1]
alignments = tf.cond(is_manual_attention, lambda: manual_alignments[:, time, :],lambda: computed_alignments,) # 여기 이곳만 tensorflow 1.3과 다름.
#alignments = tf.one_hot(tf.zeros((batch_size,), dtype=tf.int32), max_time, dtype=tf.float32)
# Reshape from [batch_size, memory_time] to [batch_size, 1, memory_time]
expanded_alignments = tf.expand_dims(alignments, 1)
# Context is the inner product of alignments and values along the
# memory time dimension.
# alignments shape is
# [batch_size, 1, memory_time]
# attention_mechanism.values shape is
# [batch_size, memory_time, memory_size]
# the batched matmul is over memory_time, so the output shape is
# [batch_size, 1, memory_size].
# we then squeeze out the singleton dim.
context = tf.matmul(expanded_alignments, attention_mechanism.values)
context = tf.squeeze(context, [1])
if attention_layer is not None:
attention = attention_layer(tf.concat([cell_output, context], 1))
else:
attention = context
return attention, alignments,next_attention_state
class DecoderPrenetWrapper(RNNCell):
'''Runs RNN inputs through a prenet before sending them to the cell.'''
# input에 prenet을 먼저 적용하는 것 뿐이다.
def __init__(self, cell, embed_to_concat,is_training, prenet_sizes, dropout_prob):
super(DecoderPrenetWrapper, self).__init__()
self._is_training = is_training
self._cell = cell
self._embed_to_concat = embed_to_concat
self.prenet_sizes = prenet_sizes
self.dropout_prob = dropout_prob
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size
def call(self, inputs, state):
prenet_out = prenet(inputs, self._is_training,self.prenet_sizes, self.dropout_prob, scope='decoder_prenet')
if self._embed_to_concat is not None:
concat_out = tf.concat([prenet_out, self._embed_to_concat],axis=-1, name='speaker_concat')
return self._cell(concat_out, state)
else:
return self._cell(prenet_out, state)
def zero_state(self, batch_size, dtype):
return self._cell.zero_state(batch_size, dtype)
class ConcatOutputAndAttentionWrapper(RNNCell):
'''Concatenates RNN cell output with the attention context vector.
This is expected to wrap a cell wrapped with an AttentionWrapper constructed with
attention_layer_size=None and output_attention=False. Such a cell's state will include an
"attention" field that is the context vector.
'''
def __init__(self, cell, embed_to_concat):
super(ConcatOutputAndAttentionWrapper, self).__init__()
self._cell = cell
self._embed_to_concat = embed_to_concat
@property
def state_size(self):
return self._cell.state_size
@property
def output_size(self):
return self._cell.output_size + self._cell.state_size.attention
def call(self, inputs, state):
output, res_state = self._cell(inputs, state)
if self._embed_to_concat is not None:
tensors = [output, res_state.attention,self._embed_to_concat,]
return tf.concat(tensors, axis=-1), res_state
else:
return tf.concat([output, res_state.attention], axis=-1), res_state
def zero_state(self, batch_size, dtype):
return self._cell.zero_state(batch_size, dtype)
class BahdanauMonotonicAttention_hccho(_BaseMonotonicAttentionMechanism):
"""Monotonic attention mechanism with Bahadanau-style energy function.
This type of attention enforces a monotonic constraint on the attention
distributions; that is once the model attends to a given point in the memory
it can't attend to any prior points at subsequence output timesteps. It
achieves this by using the _monotonic_probability_fn instead of softmax to
construct its attention distributions. Since the attention scores are passed
through a sigmoid, a learnable scalar bias parameter is applied after the
score function and before the sigmoid. Otherwise, it is equivalent to
BahdanauAttention. This approach is proposed in
<NAME>, <NAME>, <NAME>, <NAME>, <NAME>,
"Online and Linear-Time Attention by Enforcing Monotonic Alignments."
ICML 2017. https://arxiv.org/abs/1704.00784
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
normalize=False,
score_mask_value=None,
sigmoid_noise=0.,
sigmoid_noise_seed=None,
score_bias_init=0.,
mode="parallel",
dtype=None,
name="BahdanauMonotonicAttentionHccho"):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths.
normalize: Python boolean. Whether to normalize the energy term.
score_mask_value: (optional): The mask value for score before passing into
`probability_fn`. The default is -inf. Only used if
`memory_sequence_length` is not None.
sigmoid_noise: Standard deviation of pre-sigmoid noise. See the docstring
for `_monotonic_probability_fn` for more information.
sigmoid_noise_seed: (optional) Random seed for pre-sigmoid noise.
score_bias_init: Initial value for score bias scalar. It's recommended to
initialize this to a negative value when the length of the memory is
large.
mode: How to compute the attention distribution. Must be one of
'recursive', 'parallel', or 'hard'. See the docstring for
`tf.contrib.seq2seq.monotonic_attention` for more information.
dtype: The data type for the query and memory layers of the attention
mechanism.
name: Name to use when creating ops.
"""
# Set up the monotonic probability fn with supplied parameters
if dtype is None:
dtype = tf.float32
wrapped_probability_fn = functools.partial(
_monotonic_probability_fn, sigmoid_noise=sigmoid_noise, mode=mode,
seed=sigmoid_noise_seed)
super(BahdanauMonotonicAttention_hccho, self).__init__(
query_layer=Dense(num_units, name="query_layer", use_bias=False, dtype=dtype),
memory_layer=Dense(num_units, name="memory_layer", use_bias=False, dtype=dtype),
memory=memory,
probability_fn=wrapped_probability_fn,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value,
name=name)
self._num_units = num_units
self._normalize = normalize
self._name = name
self._score_bias_init = score_bias_init
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
with tf.variable_scope(None, "bahdanau_monotonic_hccho_attention", [query]):
processed_query = self.query_layer(query) if self.query_layer else query
score = _bahdanau_score(processed_query, self._keys, self._normalize) # keys 가 memory임
score_bias = tf.get_variable("attention_score_bias", dtype=processed_query.dtype, initializer=self._score_bias_init)
#alignments_bias = tf.get_variable("alignments_bias", shape = state.get_shape()[-1],dtype=processed_query.dtype, initializer=tf.zeros_initializer()) # hccho
alignments_bias = tf.get_variable("alignments_bias", shape = (1),dtype=processed_query.dtype, initializer=tf.zeros_initializer()) # hccho
score += score_bias
alignments = self._probability_fn(score, state) #BahdanauAttention에서 _probability_fn = softmax
next_state = alignments # 다음 alignment 계산에 사용할 state 값 = AttentionWrapperState.attention_state
# hccho. alignment가 attention 계산에 직접 사용된다.
alignments = tf.nn.relu(alignments+alignments_bias)
alignments = alignments/(tf.reduce_sum(alignments,axis=-1,keepdims=True) + 1.0e-12 ) # hccho 수정
return alignments, next_state
class LocationSensitiveAttention(BahdanauAttention):
"""Impelements Bahdanau-style (cumulative) scoring function.
Usually referred to as "hybrid" attention (content-based + location-based)
Extends the additive attention described in:
"<NAME>, <NAME>, and <NAME>, �쏯eural machine transla-
tion by jointly learning to align and translate,�� in Proceedings
of ICLR, 2015."
to use previous alignments as additional location features.
This attention is described in:
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>-
gio, �쏛ttention-based models for speech recognition,�� in Ad-
vances in Neural Information Processing Systems, 2015, pp.
577��585.
"""
def __init__(self,
num_units,
memory,
memory_sequence_length=None,
smoothing=False,
cumulate_weights=True,
name='LocationSensitiveAttention'):
"""Construct the Attention mechanism.
Args:
num_units: The depth of the query mechanism.
memory: The memory to query; usually the output of an RNN encoder. This
tensor should be shaped `[batch_size, max_time, ...]`.
memory_sequence_length (optional): Sequence lengths for the batch entries
in memory. If provided, the memory tensor rows are masked with zeros
for values past the respective sequence lengths. Only relevant if mask_encoder = True.
smoothing (optional): Boolean. Determines which normalization function to use.
Default normalization function (probablity_fn) is softmax. If smoothing is
enabled, we replace softmax with:
a_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j}))
Introduced in:
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>-
gio, �쏛ttention-based models for speech recognition,�� in Ad-
vances in Neural Information Processing Systems, 2015, pp.
577��585.
This is mainly used if the model wants to attend to multiple inputs parts
at the same decoding step. We probably won't be using it since multiple sound
frames may depend from the same character, probably not the way around.
Note:
We still keep it implemented in case we want to test it. They used it in the
paper in the context of speech recognition, where one phoneme may depend on
multiple subsequent sound frames.
name: Name to use when creating ops.
"""
#Create normalization function
#Setting it to None defaults in using softmax
normalization_function = _smoothing_normalization if (smoothing == True) else None
super(LocationSensitiveAttention, self).__init__(
num_units=num_units,
memory=memory,
memory_sequence_length=memory_sequence_length,
probability_fn=normalization_function,
name=name)
self.location_convolution = tf.layers.Conv1D(filters=32,
kernel_size=(31, ), padding='same', use_bias=True,
bias_initializer=tf.zeros_initializer(), name='location_features_convolution')
self.location_layer = tf.layers.Dense(units=num_units, use_bias=False,
dtype=tf.float32, name='location_features_layer')
self._cumulate = cumulate_weights
def __call__(self, query, state):
"""Score the query based on the keys and values.
Args:
query: Tensor of dtype matching `self.values` and shape
`[batch_size, query_depth]`.
state (previous alignments): Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]`
(`alignments_size` is memory's `max_time`).
Returns:
alignments: Tensor of dtype matching `self.values` and shape
`[batch_size, alignments_size]` (`alignments_size` is memory's
`max_time`).
"""
previous_alignments = state
with tf.variable_scope(None, "Location_Sensitive_Attention", [query]):
# processed_query shape [batch_size, query_depth] -> [batch_size, attention_dim]
processed_query = self.query_layer(query) if self.query_layer else query
# -> [batch_size, 1, attention_dim]
processed_query = tf.expand_dims(processed_query, 1) #_bahdanau_score처럼 _location_sensitive_score에서 해도 되는데, 여기서 했네.
# processed_location_features shape [batch_size, max_time, attention dimension]
# [batch_size, max_time] -> [batch_size, max_time, 1]
expanded_alignments = tf.expand_dims(previous_alignments, axis=2)
# location features [batch_size, max_time, filters] # filters = 32로 고정
f = self.location_convolution(expanded_alignments)
# Projected location features [batch_size, max_time, attention_dim]
processed_location_features = self.location_layer(f)
# energy shape [batch_size, max_time]
energy = _location_sensitive_score(processed_query, processed_location_features, self.keys)
# alignments shape = energy shape = [batch_size, max_time]
alignments = self._probability_fn(energy, previous_alignments) # location sensitivity attention에서는 softmax가 아니고, smoothing normalization function을 사용한다.
# Cumulate alignments
if self._cumulate:
next_state = alignments + previous_alignments
else:
next_state = alignments
return alignments, next_state
def _location_sensitive_score(W_query, W_fil, W_keys):
"""Impelements Bahdanau-style (cumulative) scoring function.
This attention is described in:
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>-
gio, �쏛ttention-based models for speech recognition,�� in Ad-
vances in Neural Information Processing Systems, 2015, pp.
577��585.
#############################################################################
hybrid attention (content-based + location-based)
f = F * 慣_{i-1}
energy = dot(v_a, tanh(W_keys(h_enc) + W_query(h_dec) + W_fil(f) + b_a))
#############################################################################
Args:
W_query: Tensor, shape '[batch_size, 1, attention_dim]' to compare to location features.
W_location: processed previous alignments into location features, shape '[batch_size, max_time, attention_dim]'
W_keys: Tensor, shape '[batch_size, max_time, attention_dim]', typically the encoder outputs.
Returns:
A '[batch_size, max_time]' attention score (energy)
"""
# Get the number of hidden units from the trailing dimension of keys
dtype = W_query.dtype
num_units = W_keys.shape[-1].value or tf.shape(W_keys)[-1]
v_a = tf.get_variable(
'attention_variable', shape=[num_units], dtype=dtype,
initializer=tf.contrib.layers.xavier_initializer())
b_a = tf.get_variable(
'attention_bias', shape=[num_units], dtype=dtype,
initializer=tf.zeros_initializer())
return tf.reduce_sum(v_a * tf.tanh(W_keys + W_query + W_fil + b_a), [2])
def _smoothing_normalization(e):
"""Applies a smoothing normalization function instead of softmax
Introduced in:
<NAME>, <NAME>, <NAME>, <NAME>, and <NAME>-
gio, �쏛ttention-based models for speech recognition,�� in Ad-
vances in Neural Information Processing Systems, 2015, pp.
577��585.
############################################################################
Smoothing normalization function
a_{i, j} = sigmoid(e_{i, j}) / sum_j(sigmoid(e_{i, j}))
############################################################################
Args:
e: matrix [batch_size, max_time(memory_time)]: expected to be energy (score)
values of an attention mechanism
Returns:
matrix [batch_size, max_time]: [0, 1] normalized alignments with possible
attendance to multiple memory time steps.
"""
return tf.nn.sigmoid(e) / tf.reduce_sum(tf.nn.sigmoid(e), axis=-1, keepdims=True)
class GmmAttention(AttentionMechanism):
def __init__(self,
num_mixtures,
memory,
memory_sequence_length=None,
check_inner_dims_defined=True,
score_mask_value=None,
name='GmmAttention'):
self.dtype = memory.dtype
self.num_mixtures = num_mixtures
self.query_layer = tf.layers.Dense(3 * num_mixtures, name='gmm_query_layer', use_bias=True, dtype=self.dtype)
with tf.name_scope(name, 'GmmAttentionMechanismInit'):
if score_mask_value is None:
score_mask_value = 0.
self._maybe_mask_score = functools.partial(
_maybe_mask_score,
memory_sequence_length=memory_sequence_length,
score_mask_value=score_mask_value)
self._value = _prepare_memory(
memory, memory_sequence_length, check_inner_dims_defined)
self._batch_size = (
self._value.shape[0].value or tf.shape(self._value)[0])
self._alignments_size = (
self._value.shape[1].value or tf.shape(self._value)[1])
@property
def values(self):
return self._value
@property
def batch_size(self):
return self._batch_size
@property
def alignments_size(self):
return self._alignments_size
@property
def state_size(self):
return self.num_mixtures
def initial_alignments(self, batch_size, dtype):
max_time = self._alignments_size
return _zero_state_tensors(max_time, batch_size, dtype)
def initial_state(self, batch_size, dtype):
state_size_ = self.state_size
return _zero_state_tensors(state_size_, batch_size, dtype)
def __call__(self, query, state):
with tf.variable_scope("GmmAttention"):
previous_kappa = state
params = self.query_layer(query) # query(dec_rnn_size=256) , params(num_mixtures(256)*3)
alpha_hat, beta_hat, kappa_hat = tf.split(params, num_or_size_splits=3, axis=1)
# [batch_size, num_mixtures, 1]
alpha = tf.expand_dims(tf.exp(alpha_hat), axis=2)
# softmax makes the alpha value more stable.
# alpha = tf.expand_dims(tf.nn.softmax(alpha_hat, axis=1), axis=2)
beta = tf.expand_dims(tf.exp(beta_hat), axis=2)
kappa = tf.expand_dims(previous_kappa + tf.exp(kappa_hat), axis=2)
# [1, 1, max_input_steps]
mu = tf.reshape(tf.cast(tf.range(self.alignments_size), dtype=tf.float32), shape=[1, 1, self.alignments_size]) # [[[0,1,2,...]]]
# [batch_size, max_input_steps]
phi = tf.reduce_sum(alpha * tf.exp(-beta * (kappa - mu) ** 2.), axis=1)
alignments = self._maybe_mask_score(phi)
state = tf.squeeze(kappa, axis=2)
return alignments, state
| StarcoderdataPython |
4823724 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-05-15 04:16
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('email_notifications', '0003_auto_20180206_1733'),
]
operations = [
migrations.CreateModel(
name='FieldConditional',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('field_name', models.CharField(max_length=255, verbose_name='field')),
('field_value', models.CharField(max_length=255, verbose_name='field value')),
('action_type', models.CharField(choices=[('email', 'Email to'), ('redirect', 'Redirect to')], max_length=20, verbose_name='action')),
('action_value', models.CharField(max_length=255, verbose_name='to name')),
('form', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='conditionals', to='email_notifications.EmailNotificationFormPlugin')),
],
options={
'verbose_name': 'conditional',
'verbose_name_plural': 'conditionals',
},
),
]
| StarcoderdataPython |
4984 | <reponame>fintelia/habitationi
#!/usr/bin/python
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from BaseHTTPServer import BaseHTTPRequestHandler,HTTPServer
from urlparse import urlparse, parse_qs
from jinja2 import Template
import sqlite3
import urllib
def get_caps(options):
far = {}
for i in ['A-1', 'A-2', 'B', 'SD-2']:
far[i] = 0.5
for i in ['C', 'SD-9', 'SD-10F', 'SD-10H']:
far[i] = 0.6
for i in ['C-1', 'BA-3', 'IB-2', 'O-1']:
far[i] = .75
for i in ['BA-1', 'SD-12']:
far[i] = 1.0
for i in ['C-1A', 'SD-5']:
far[i] = 1.25
for i in ['IA-1', 'IA', 'O-2A', 'SD-4A', 'SD-13']:
far[i] = 1.5
for i in ['C-2', 'C-2B', 'BA', 'BA-2', 'SD-8']:
far[i] = 1.75
for i in ['BC', 'O-2']:
far[i] = 2.0
for i in ['C-2A']:
far[i] = 2.50
for i in ['C-3', 'C-3A', 'C-3B', 'BB', 'BB-2', 'BC-1', 'IB-1', 'O-3', 'O-3A', 'SD-1', 'SD-6', 'SD-7']:
far[i] = 3.0
for i in ['IA-2', 'IB']:
far[i] = 4.0
far['BB-1'] = 3.25
far['SD-11'] = 1.7
far['SD-15'] = 3.5
lot_area = {
'A-1': 6000,
'A-2': 4500,
'C-1A': 1000,
'BC': 500,
'BC-1': 450,
'IA-1': 700,
'SD-8': 650,
'SD-14': 800,
}
for i in ['IB-2', 'BA-1']:
lot_area[i] = 1200
for i in ['B', 'SD-2', 'SD-3']:
lot_area[i] = 2500
for i in ['C', 'SD-10F', 'SD-10H', 'SD-9']:
lot_area[i] = 1800
for i in ['C-1', 'BA-3']:
lot_area[i] = 1500
for i in ['C-2', 'C-2B', 'O-2', 'BA', 'BA-2', 'SD-4', 'SD-4A', 'SD-5', 'SD-11', 'SD-13']:
lot_area[i] = 600
for i in ['C-2A', 'C-3', 'C-3A', 'C-3B', 'BB', 'BB-1', 'BB-2', 'SD-1', 'SD-6', 'SD-7']:
lot_area[i] = 300
for i in lot_area:
if options and 'lot_explicit' in options:
lot_area[i] = options['lot_explicit']
elif options and 'lot_factor' in options:
lot_area[i] = int(lot_area[i] / float(options['lot_factor']))
if 'no_lot' in options:
lot_area = {}
for i in far:
if options and 'far_explicit' in options:
far[i] = options['far_explicit']
elif options and 'far_factor' in options:
far[i] = far[i] * float(options['far_factor'])
if 'no_far' in options:
far = {}
return far, lot_area
def table(options):
far, lot_area = get_caps(options)
table = []
for i in ['A-1', 'A-2', 'B', 'C', 'C-1', 'C-1A', 'C-2', 'C-2A', 'C-2B', 'C-3', 'C-3A', 'C-3B']:
table.append("<tr><td>%s</td><td>%s</td><td>%s</td></tr>" % (i, far.get(i, ""), lot_area.get(i,"")))
return "\n".join(table)
def unit_cap(row, options=None):
if not options:
options = {}
far, lot_area = get_caps(options)
zone = row['zone']
if (not zone.startswith("C") and not zone in ("A-1", "A-2", "B")) or zone == "CRDD":
return -1
if zone in ['A-1', 'A-2'] and not 'no_a' in options:
return 1
#print row
area = float(row.get('gis_lot_size',0) or 0)
if zone in lot_area and area:
m = max(area/(lot_area[zone]), 1)
else:
m = 100000
max_building = area * far[zone] * 1
if max(int(max_building/800), 1) < m:
m = max(int(max_building/800), 1)
if zone == "B" and not 'no_b' in options:
m = min(m, 2)
return m
def dict_factory(cursor, row):
d = {}
for idx, col in enumerate(cursor.description):
d[col[0]] = row[idx]
return d
def compute_count(options = None):
conn = sqlite3.connect("prop.db")
if options == None:
options = {}
c = conn.cursor()
c.row_factory = dict_factory
m = 0
current = 0
for row in c.execute("SELECT * FROM lots"):
t = unit_cap(row, options=options)
if t == -1:
continue
m += int(t)
return m
def describe(options):
changes = []
if 'no_lot' in options:
changes.append("eliminate lot size/unit minimums")
elif 'lot_explicit' in options:
changes.append("set all lot size/unit minimums to %s" % options['lot_explicit'])
elif 'lot_factor' in options and options['lot_factor'] != 1.0:
changes.append('decrease lot size minimums by a factor of %s' % options['lot_factor'])
if 'no_a' in options:
changes.append('eliminate single family zoning in A-1 and A-2 zones')
if 'no_b' in options:
changes.append('eliminate two-family zoning limits in B zones')
if 'far_explicit' in options:
changes.append("set all FAR maximums to %s" % options['far_explicit'])
elif 'far_factor' in options and options['far_factor'] != 1.0:
changes.append('increase FAR maximums by a factor of %s' % options['far_factor'])
if len(changes):
return ", ".join(changes)
else:
return ""
def serve(options):
d = open("unit_template.html")
template = Template( d.read() )
unit_count = int(compute_count(options))
data = {}
data['changes'] = describe(options)
data['unit_count'] = unit_count
data['increase'] = unit_count-37453
data['table'] = table(options)
data['options'] = options
s = template.render(**data)
return s
PORT_NUMBER = 8080
class myHandler(BaseHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
# Send the html message
form = parse_qs(urlparse(self.path).query)
options = {}
for i in ['far_factor', 'lot_factor']:
if i in form:
options[i] = float(form[i][0])
else:
options[i] = 1.0
if 'far_explicit' in form and form['far_explicit']:
options['far_explicit'] = float(form['far_explicit'][0])
if 'lot_explicit' in form and form['lot_explicit']:
options['lot_explicit'] = int(form['lot_explicit'][0])
if 'lot' in form:
options['no_lot'] = True
if 'singlefamily' in form:
options['no_a'] = True
if 'twofamily' in form:
options['no_b'] = True
self.wfile.write(serve(options))
return
def run():
try:
#Create a web server and define the handler to manage the
#incoming request
server = HTTPServer(('', PORT_NUMBER), myHandler)
print 'Started httpserver on port ' , PORT_NUMBER
#Wait forever for incoming htto requests
server.serve_forever()
except KeyboardInterrupt:
print '^C received, shutting down the web server'
server.socket.close()
if __name__ == "__main__":
print run()
| StarcoderdataPython |
1643049 | """
LibreASR source code
"""
| StarcoderdataPython |
1761117 | '''
ABC company is interested to computerize the salary payments of
their employees
DA: 80% Basic pay
HRA: 30% Basic Pay
PF: 12% of Basic Pay
Input: Basic Pay
Process: Salary=DA+HRA+Basic Pay-PF
Output: Salary
'''
basicpay=float(input("Enter Basic pay: "))
#salary is basicpay+DA+HRA-PF
salary=basicpay+basicpay*0.80+basicpay*0.30-basicpay*0.12
print("Net Salary:",format(salary,'.2f'))
| StarcoderdataPython |
1662365 | import argparse
import random
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--num-cpus', '-c', default=32, type=int,
help='number of routers, default is 32')
parser.add_argument('--mesh-rows', '-d', default=8, type=int,
help='number of rows in mesh network, default is 8')
options = parser.parse_args()
num_cpus = options.num_cpus
num_rows = options.mesh_rows
num_columns = num_rows
num_routers = num_rows * num_columns
active_routers = []
# add the first two corners
active_routers.append(0)
active_routers.append(num_rows - 1)
# add the last row
for i in range(num_rows):
active_routers.append(num_routers - i - 1)
random.seed(99)
remaining_cores = []
for i in range(1, num_rows - 1):
remaining_cores.append(i)
for i in range(num_rows, num_routers - num_rows):
remaining_cores.append(i)
for i in range(num_cpus - 2 - num_rows):
r = random.choice(remaining_cores)
active_routers.append(r)
remaining_cores.remove(r)
active_routers.sort()
off_cores = '{'
off_cores += str(remaining_cores[0])
for i , core in enumerate(remaining_cores[1:]):
off_cores += ',{}'.format(core)
off_cores += '}'
print('attached routers : {}'.format(active_routers))
print('off cores: {}'.format(off_cores))
if __name__ == '__main__':
main()
| StarcoderdataPython |
3250332 | <filename>StatsBot/test.py
import discord
import json
import pandas as pd
import numpy as np
import pandasql as ps
from datapuller import DataPuller
from nba_search import *
with open("account.info", encoding="utf-8") as f:
accountDICT = json.loads(f.read())
info_fields = {'season': None,
'team': None,
'player': None,
'stat': None,
'level': None}
statDICT = {'pts':['得分'],
'reb':['籃板', '搶籃板'], 'ast':['助攻'],
'blk':['阻攻', '火鍋', '蓋火鍋','搧帽', '蓋帽'],
'stl':['抄截']}
msg = '2018賽季 <NAME> 單場最高籃板'
#msg = '2020球季魔術隊單場籃板最多是誰'
#msg = '2019公牛隊平均得分最多的球員'
print(msg)
df = pd.read_csv('./media/data.csv')
msg, info_fields = get_playername(msg, info_fields)
msg, info_fields = get_season(msg, info_fields)
print(msg, 'msg')
filterLIST = []
resultDICT = runLoki([msg], filterLIST)
if len(resultDICT['team']) > 0:
info_fields['team'] = resultDICT['team'][0]
team = resultDICT['team'][0]
else:
team = ''
# try:
# info_fields['team'] = resultDICT['team'][0]
# except:
# pass
print(resultDICT)
info_fields['stat'] = resultDICT['stat'][0]
info_fields['level'] = resultDICT['level']
print(info_fields, "here")
if info_fields['player'] and info_fields['level'] == 'avg':
qry = DataPuller.get_player_agg_stats(info_fields['stat'],
info_fields['player'],
info_fields['season'])
elif info_fields['player'] and info_fields['level'] == 'single':
qry = DataPuller.get_player_single_game_stats(info_fields['stat'],
info_fields['player'],
info_fields['season'])
elif info_fields['team']:
qry = DataPuller.get_team_best_player_stats(info_fields['stat'],
info_fields['team'],
info_fields['season'])
else:
raise ValueError('invalid input')
result = ps.sqldf(qry, locals())
print(result)
aa = statDICT[info_fields['stat']][0]
print(aa)
bb = result['target_stat'].iloc[0]
print(bb)
print(info_fields)
reply_message = str(info_fields['season']) + ' ' + team + ' ' + result['player_name'] + ' ' + info_fields['level'] + ' ' + statDICT[info_fields['stat']][0] + ' ' + str(round(result['target_stat'].iloc[0], 1))
print(reply_message)
# df = pd.read_csv('./media/data.csv')
# for name in list(set(df['player_name'])):
# if name in msg:
# info_fields['player'] = name
# msg = msg.replace(name, '').strip()
# for season_key in seasonDICT.keys():
# for year in seasonDICT[season_key]:
# if year in msg:
# info_fields['season'] = season_key
# msg = msg.replace(year, '').strip()
# filterLIST = []
# resultDICT = runLoki([msg], filterLIST)
# print(resultDICT)
# try:
# info_fields['team'] = resultDICT['team'][0]
# except:
# pass
# info_fields['stat'] = resultDICT['stat'][0]
# info_fields['level'] = resultDICT['level']
# print(info_fields)
# if info_fields['player'] and info_fields['level'] == 'avg':
# qry = DataPuller.get_player_agg_stats(info_fields['stat'],
# info_fields['player'],
# info_fields['season'])
# elif info_fields['player'] and info_fields['level'] == 'single':
# qry = DataPuller.get_player_single_game_stats(info_fields['stat'],
# info_fields['player'],
# info_fields['season'])
# elif info_fields['team']:
# qry = DataPuller.get_team_best_player_stats(info_fields['stat'],
# info_fields['team'],
# info_fields['season'])
# else:
# raise ValueError('invalid input')
# result = ps.sqldf(qry, locals())
# print(result)
# reply_message = info_fields['season'] + ' ' + result['team'].iloc[0] + ' ' + \
# info_fields['player'] + ' ' + info_fields['level'] + ' game ' + \
# statDICT[info_fields['stat']][0] + ' ' + str(round(result['target_stat'].iloc[0], 1))
# #+ info_fields['level'] + ' ' \ # + statDICT[info_fields['stat'][0]] + ': '
# print(reply_message)
| StarcoderdataPython |
36681 | <gh_stars>0
#!/usr/bin/env python
import rospy
import threading
from ca_msgs.msg import Bumper
from geometry_msgs.msg import Twist, Vector3
class StateMachine(object):
def __init__(self):
self.pub = rospy.Publisher("/cmd_vel", Twist, queue_size=10)
self.goal_queue = []
def rotate(self, ang_vel):
self.move(0., ang_vel)
def rotate_left(self, ang_vel):
self.rotate(ang_vel)
def rotate_right(self, ang_vel):
self.rotate(-ang_vel)
def set_goal(self, data):
if data.is_left_pressed and data.is_right_pressed:
self.goal_queue.append({'goal': self.move_backward, 'velocity': 0.1, 'duration': 3.})
if data.is_left_pressed:
self.goal_queue.append({'goal': self.move_backward, 'velocity': 0.1, 'duration': 1.5})
self.goal_queue.append({'goal': self.rotate_right, 'velocity': 0.3, 'duration': 2.})
elif data.is_right_pressed:
self.goal_queue.append({'goal': self.move_backward, 'velocity': 0.1, 'duration': 1.5})
self.goal_queue.append({'goal': self.rotate_left, 'velocity': 0.3, 'duration': 2.})
else:
self.goal_queue.append({'goal': self.move_straight, 'velocity': 0.2, 'duration': 0.})
def stop(self):
self.move(0., 0.)
def close(self):
self.stop()
self.goal_queue = []
def move(self, lin_vel, ang_vel):
msg = Twist()
msg.linear.x = lin_vel
msg.angular.z = ang_vel
self.pub.publish(msg)
def move_straight(self, lin_vel):
self.move(lin_vel, 0.)
def move_backward(self, lin_vel):
self.move_straight(-lin_vel)
def run(self):
if len(self.goal_queue) > 0:
# Execute next goal
goal = self.goal_queue.pop()
end_time = rospy.Time.now().secs + goal.get('duration')
while end_time > rospy.Time.now().secs:
goal.get('goal')(goal.get('velocity'))
else:
# Move straight
self.move_straight(0.2)
class RankingController():
def __init__(self):
rospy.init_node("ranking_controller", log_level=rospy.INFO)
self.sub = rospy.Subscriber("bumper", Bumper, self.callback)
self.state_machine = StateMachine()
self.rate = rospy.Rate(10) # Hz
rospy.on_shutdown(self.stop)
threading.Thread(name="ranking_controller", target=self.run).start()
rospy.spin()
def callback(self, data):
rospy.logdebug("{} {}".format(data.is_left_pressed, data.is_right_pressed))
self.state_machine.set_goal(data)
def stop(self):
rospy.loginfo("Thread stopped.")
self.state_machine.close()
def run(self):
rospy.loginfo("Thread started.")
while not rospy.is_shutdown():
self.state_machine.run()
self.rate.sleep()
if __name__ == "__main__":
rc = RankingController()
| StarcoderdataPython |
17361 | <reponame>Very1Fake/monitor<gh_stars>0
from typing import Dict
_codes: Dict[int, str] = {
# Debug (1xxxx)
# System (100xx)
10000: 'Test debug',
# Pipe (103xx)
10301: 'Reindexing parser',
# Resolver (109xx)
10901: 'Executing catalog',
10902: 'Executing target',
10903: 'Catalog executed',
10904: 'Target executed',
# SubProvider (113xx)
11301: 'Common exception while sending request',
# Information (2xxxx)
# System (200xx)
20000: 'Test information',
20001: 'Thread started',
20002: 'Thread paused',
20003: 'Thread resumed',
20004: 'Thread closing',
20005: 'Thread closed',
# Core (201xx)
20101: 'Production mode enabled',
20102: 'Signal Interrupt',
20103: 'Turning off',
20104: 'Saving success hashes started',
20105: 'Saving success hashes complete',
20106: 'Offline',
# ThreadManager (202xx)
20201: 'Pipe initialized',
20202: 'Pipe started',
20203: 'Worker initialized',
20204: 'Worker started',
20205: 'CatalogWorker initialized',
20206: 'CatalogWorker started',
# Pipe (203xx)
20301: 'Reindexing parsers started',
20302: 'Reindexing parsers complete',
20303: 'Parser reindexing complete',
# ScriptManager (205xx)
20501: 'Script loaded',
20502: 'Script unloaded',
20503: 'Script reloaded',
20504: 'Loading all indexed scripts',
20505: 'Loading all indexed scripts complete',
20506: 'Unloading all scripts',
20507: 'Unloading all scripts complete',
20508: 'Reloading all scripts',
20509: 'Reloading all scripts complete',
# ScriptIndex (206xx)
20601: 'Config loaded',
20602: 'Config dumped',
20603: 'Config does not loaded (must be dict)',
20604: 'Skipping script (config not detected)',
20605: 'Skipping script (bad config)',
20606: 'Skipping script (script incompatible with core)',
20607: 'Skipping script (script in blacklist)',
20608: 'Skipping script (script with this name is already indexed)',
20609: 'N script(s) indexed',
20610: 'Skipping config (script not in whitelist)',
# EventHandler (207xx)
20701: 'Starting loop',
20702: 'Loop started',
20703: 'Stopping loop',
20704: 'Loop stopped',
# Logger (208xx)
20801: 'Log level changed',
20802: 'Log mode changed',
20803: 'Time changed to UTC',
20804: 'Time changed to local',
# Resolver (209xx)
20901: 'Successful target execution',
20902: 'Catalog updated',
# Commands (211xx)
21101: 'Command executing',
21102: 'Command executed',
21103: 'Command execute',
# Provider (212xx)
21201: 'Proxies dumped',
21202: 'Checking proxy',
21203: 'Checking proxy (OK)',
# Keywords (215xx)
21501: 'Dumping keywords(started)',
21502: 'Dumping keywords(complete)',
21503: 'Clearing keywords(started)',
21504: 'Clearing keywords(complete)',
21505: 'Syncing keywords(started)',
21506: 'Syncing keywords(complete)',
21507: 'Loading keywords(started)',
21508: 'Loading keywords(complete)',
# Warning (3xxxx)
# System (300xx)
30000: 'Test warning',
# ThreadManager (302xx)
30201: 'Pipe was stopped',
30202: 'Worker was stopped',
30203: 'CatalogWorker was stopped',
30204: 'Lock forced released',
# Pipe (303xx)
30301: 'Parser reindexing failed',
30302: 'Catalog lost while sending (queue full)',
30303: 'Target lost while sending (queue full)',
# ScriptManager (305xx)
30501: 'Module not loaded',
30502: 'Nothing to import in script',
30503: 'Script cannot be unloaded (_unload)',
30504: 'Script cannot be unloaded (_reload)',
30505: 'Script not indexed but still loaded',
30506: 'Script already loaded',
30507: 'Max errors for script reached, unloading',
# EventHandler (307xx)
30701: 'Loop already started',
30702: 'Loop already stopped',
# Logger (308xx)
30801: 'Meaningless level change (changing to the same value)',
30802: 'Meaningless mode change (changing to the same value)',
30803: 'Meaningless time change (changing to the same value)',
# Resolver (309xx)
30901: 'Catalog lost while retrieving (script not loaded)',
30902: 'Catalog lost while retrieving (script has no Parser)',
30903: 'Target lost while retrieving (script not loaded)',
30904: 'Target lost while retrieving (script has no Parser)',
30905: 'Catalog lost while executing (script unloaded)',
30906: 'Catalog lost while executing (script has no parser)',
30907: 'Catalog lost while executing (bad result)',
30908: 'Target lost while executing (script unloaded)',
30909: 'Target lost while executing (script has no parser)',
30910: 'Target lost while executing (bad result)',
30911: 'Smart catalog expired',
30912: 'Smart target expired',
# Provider (312xx)
31201: 'Proxy added',
31202: 'Proxy removed',
31203: 'Proxies list changed',
31204: 'Proxies statistics reset',
31205: 'Proxies list cleared',
# Keywords (315xx)
31501: 'Keywords file not found',
31511: 'Absolute keyword not loaded (TypeError)',
31512: 'Absolute keyword not loaded (UniquenessError)',
31521: 'Positive keyword not loaded (TypeError)',
31522: 'Positive keyword not loaded (UniquenessError)',
31531: 'Negative keyword not loaded (TypeError)',
31532: 'Negative keyword not loaded (UniquenessError)',
# Error (4xxxx)
# System (400xx)
40000: 'Unknown error',
# ThreadManager (402xx)
40201: 'Pipe was unexpectedly stopped',
40202: 'Worker was unexpectedly stopped',
40203: 'CatalogWorker was unexpectedly stopped',
# Pipe (403xx)
40301: 'Wrong catalog received from script',
# Worker (404xx)
40401: 'Unknown status received while executing',
40402: 'Parser execution failed',
40403: 'Target lost in pipeline (script unloaded)',
# ScriptsManager (405xx)
40501: 'Can\'t load script (ImportError)',
40502: 'Can\'t load script (script not indexed)',
40503: 'Can\'t unload script (script isn\'t loaded)',
40504: 'Can\'t reload script (script isn\'t loaded)',
40505: 'Script cannot be reloaded (folder not found)',
40506: 'Script cannot be reloaded (script not in index)',
# EventHandler (407xx)
40701: 'Event execution failed',
# Logger (408xx)
40801: 'Can\'t change level (possible values (0, 1, 2, 3, 4, 5))',
40802: 'Can\'t change mode (possible values (0, 1, 2, 3))',
# Resolver (409xx)
40901: 'Unknown index type (while inserting)',
40902: 'Unknown target type (while inserting)',
40903: 'Catalog execution failed',
40904: 'Target execution failed',
# Provider (412xx)
41201: 'Bad proxy',
41202: 'Checking proxy (FAILED)',
# SubProvider (413xx)
41301: 'Severe exception while sending request',
# Keywords (415xx)
41501: 'Loading keywords (Failed)',
# Fatal (5xxxx)
# System (500xx)
50000: 'Test fatal',
# Core (501xx)
50101: 'ThreadManager unexpectedly has turned off',
# ThreadManager (502xx)
50201: 'Exception raised, emergency stop initiated',
# Pipe (503xx)
50301: 'Unexpectedly has turned off',
# Worker (504xx)
50401: 'Unexpectedly has turned off',
# CatalogWorker (510xx)
51001: 'Unexpectedly has turned off',
# RemoteThread (514xx)
51401: 'Unknown fatal error'
}
class CodeError(Exception):
pass
class Code:
__slots__ = ('code', 'title', 'message')
code: int
title: str
message: str
def __init__(self, code: int, message: str = ''):
if isinstance(code, int) and len(str(code)) == 5:
self.code = code
if code in _codes:
self.title = _codes[code]
else:
raise CodeError('Code does not exist')
else:
raise CodeError('Code must be int in range (10000 - 65535)')
self.message = message
def __str__(self) -> str:
return self.format()
def __repr__(self) -> str:
return f'Code({self.code}, {self.title})'
def format(self, mode: int = 1) -> str:
if mode == 1 and self.message:
return f'C{self.code}: {self.message}'
elif mode == 2:
return f'C{self.code} {self.title}' + (f': {self.message}' if self.message else '')
else:
return f'C{self.code}'
| StarcoderdataPython |
59328 | import string
from .network import g2p_network
graphemes = ["<pad>", "<unk>", "</s>"] + list(string.ascii_lowercase)
grapheme_to_index = {x: i for i, x in enumerate(graphemes)}
UNKNOWN_GRAPHEME = grapheme_to_index["<unk>"]
END_OF_WORD = grapheme_to_index["</s>"]
phonemes = [
"<pad>",
"<unk>",
"<s>",
"</s>",
"AA0",
"AA1",
"AA2",
"AE0",
"AE1",
"AE2",
"AH0",
"AH1",
"AH2",
"AO0",
"AO1",
"AO2",
"AW0",
"AW1",
"AW2",
"AY0",
"AY1",
"AY2",
"B",
"CH",
"D",
"DH",
"EH0",
"EH1",
"EH2",
"ER0",
"ER1",
"ER2",
"EY0",
"EY1",
"EY2",
"F",
"G",
"HH",
"IH0",
"IH1",
"IH2",
"IY0",
"IY1",
"IY2",
"JH",
"K",
"L",
"M",
"N",
"NG",
"OW0",
"OW1",
"OW2",
"OY0",
"OY1",
"OY2",
"P",
"R",
"S",
"SH",
"T",
"TH",
"UH0",
"UH1",
"UH2",
"UW",
"UW0",
"UW1",
"UW2",
"V",
"W",
"Y",
"Z",
"ZH",
]
def predict(graphemes):
indices = [grapheme_to_index.get(x, UNKNOWN_GRAPHEME) for x in graphemes]
indices.append(END_OF_WORD)
preds = g2p_network.predict(indices)
preds = [phonemes[i] for i in preds]
return preds
| StarcoderdataPython |
1677765 | #!/usr/local/bin/python3
print ("hello world")
| StarcoderdataPython |
1774358 | <filename>project1/src/util/modifiers.py
# -*- coding: utf-8 -*-
import itertools
import numpy as np
import implementations as impl
import costs
def batch_iter(y, tx, batch_size, num_batches=1, shuffle=True):
"""Generate a minibatch iterator for a dataset.
Takes as input two iterables (here the output desired values 'y' and the input data 'tx')
Outputs an iterator which gives mini-batches of `batch_size` matching elements from `y` and `tx`.
Data can be randomly shuffled to avoid ordering in the original data messing with the randomness of the minibatches.
Example of use :
for minibatch_y, minibatch_tx in batch_iter(y, tx, 32):
<DO-SOMETHING>
"""
data_size = len(y)
if shuffle:
shuffle_indices = np.random.permutation(np.arange(data_size))
shuffled_y = y[shuffle_indices]
shuffled_tx = tx[shuffle_indices]
else:
shuffled_y = y
shuffled_tx = tx
for batch_num in range(num_batches):
start_index = batch_num * batch_size
end_index = min((batch_num + 1) * batch_size, data_size)
if start_index != end_index:
yield shuffled_y[start_index:end_index], shuffled_tx[start_index:end_index]
def build_poly(tx, degree, do_add_bias=True, odd_only=False):
"""Polynomial basis functions for input data x, for j=0 up to j=degree."""
_, D = tx.shape
new_tx = np.zeros((tx.shape[0], degree * D))
step = 2 if odd_only else 1
j = 0
for feat in range(0, D):
for i in range(1, degree + 1, step):
new_tx[:, j] = np.power(tx[:, feat], i)
j = j + 1
return np.concatenate((np.ones((tx.shape[0], 1)), new_tx), axis=1) if do_add_bias else new_tx
def split_data_rand(y, tx, ratio, seed=1):
"""Randomly split the dataset, based on the split ratio and the given seed."""
np.random.seed(seed)
# generate random indices
num_row = len(y)
indices = np.random.permutation(num_row)
index_split = int(np.floor(ratio * num_row))
index_tr = indices[: index_split]
index_te = indices[index_split:]
# create and return splits
return y[index_tr], tx[index_tr], y[index_te], tx[index_te]
def k_fold_random_split(y, tx, k, seed=1):
"""Create k random splits of the y and tx arrays, based on the given seed."""
# Create space for k subsets of the initial dataset
subsets_y = [None] * k
subsets_tx = [None] * k
# Rearrange the indices of the initial dataset
np.random.seed(seed)
indices = np.random.permutation(len(y))
# Calculate the number of rows per subset
rows = np.floor(len(y) / k).astype(int)
# Populate subsets
for i in range(k - 1):
subsets_y[i] = y[indices[i * rows : i * rows + rows]]
subsets_tx[i] = tx[indices[i * rows : i * rows + rows]]
subsets_y[k - 1] = y[indices[(k - 1) * rows :]]
subsets_tx[k - 1] = tx[indices[(k - 1) * rows :]]
return subsets_y, subsets_tx
def eliminate_minus_999(tx):
"""Eliminate the -999 values per feature, by setting them to each feature's median."""
for i in range(tx.shape[1]):
feature = tx[:, i]
invalid = feature == -999.0
valid = feature != -999.0
median = np.median(feature[valid])
feature[invalid] = median
tx[:, i] = feature
return tx
def standardize(tx):
"""Standardise each feature.
Standardises each feature by calculating the mean and standard deviation per feature,
not taking into account the -999 values. Sets all -999 values to 0.0 (mean).
"""
for i in range(1, tx.shape[1]):
feature = tx[:, i]
invalid = feature == -999.0
valid = feature != -999.0
mean = np.mean(feature[valid])
std = np.std(feature[valid])
feature = (feature - mean) / std
feature[invalid] = 0.0
tx[:, i] = feature
return tx
def build_poly_of_feature(x_i, degree):
"""Augment the given feature to the specified degree and return it."""
richArray = np.zeros((x_i.shape[0], degree))
for i in range(1, degree + 1):
richArray[:, (i-1)] = np.power(x_i[:], i)
return richArray
def augment_feature(tx, i, degree):
"""
Augment the feature i of matrix tx to a polynomial of the given degree
and return the modified matrix tx.
"""
y = tx[:, :i]
temp = build_poly_of_feature(tx[:, i], degree)
y = np.concatenate((y, temp), axis=1)
y = np.concatenate((y[:, :], tx[:, (i+1) :]), axis=1)
return y
def build_poly_with_degrees(tx, degrees, do_add_bias=True):
"""Gets a dataset of features and an array of degrees to be applied
to each feature, and augments features according to ther specified
degree
"""
y = np.empty((tx.shape[0], 1))
j = 0
for i in range(tx.shape[1]):
temp = build_poly_of_feature(tx[:, i], degrees[i])
y = np.concatenate((y[:, :j], temp), axis=1)
j = y.shape[1]
y = np.concatenate((y[:, :], tx[:, (i+1) :]), axis=1)
return np.concatenate((np.ones((tx.shape[0], 1)), y), axis=1) if do_add_bias else y
def predict_labels(weights, data, is_logistic=False):
"""Generate class predictions given weights, and a test data matrix."""
y_pred = costs.sigmoid(data.dot(weights)) if is_logistic else data.dot(weights)
cutoff, lower, upper = (0.5, 0, 1) if is_logistic else (0, -1, 1)
y_pred[np.where(y_pred <= cutoff)] = lower
y_pred[np.where(y_pred > cutoff)] = upper
return y_pred
def drop_minus_999_features(tx):
"""Remove features that have at least one -999 value."""
cols = [c for c in range(tx.shape[1]) if -999 in tx[:, c]]
return np.delete(tx, cols, 1)
def augment_with_binary(tx):
"""Add a binary column for every group of columns containing -999 values."""
_, D = tx.shape
# construct groups of columns that have the -999 values in the same places
groups = sorted([(tx[np.where(tx[:, i] == -999)].shape[0], i) for i in range(D)],
key=lambda x: x[0])
groups = itertools.dropwhile(lambda x: x[0] == 0, groups)
groups = [(group, [k[1] for k in keys])
for group, keys in itertools.groupby(groups, key=lambda x: x[0])]
# groups is a list of pairs: first element is -999 count, second element is a list of columns
# create binary columns for each group, concatenate with tx and return
binary_cols = np.array([tx[:, cols[0]] != -999 for _, cols in groups]).T.astype(int)
return np.concatenate((tx, binary_cols), axis=1)
| StarcoderdataPython |
29892 | """
Freyr - A Free stock API
"""
import random
import requests.utils
header = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.14; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:82.0) Gecko/20100101 Firefox/82.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.15; rv:84.0) Gecko/20100101 Firefox/84.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.16; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/13.1.2 Safari/605.1.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.1 Safari/605.1.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.1 Safari/605.1.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.2 Safari/605.1.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0 Safari/605.1.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/605.1.15 (KHTML, like Gecko) Version/14.0.1 Safari/605.1.15",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 11_1_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/76.0.3809.100 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.121 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.183 Safari/537.36 OPR/72.0.3815.320",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.193 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 Edg/86.0.622.69",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 OPR/72.0.3815.400",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.75 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.101 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36 Edg/87.0.664.41",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36 Edg/87.0.664.47",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36 Edg/87.0.664.52",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36 Edg/87.0.664.55",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.57",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36 Edg/87.0.664.60",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:78.0) Gecko/20100101 Firefox/78.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:82.0) Gecko/20100101 Firefox/82.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (Windows NT 10.0; Win64; x64; rv:84.0) Gecko/20100101 Firefox/84.0",
"Mozilla/5.0 (Windows NT 10.0; rv:78.0) Gecko/20100101 Firefox/78.0",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36 OPR/72.0.3815.400",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.1; Win64; x64; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Mozilla/5.0 (Windows NT 6.3; Win64; x64; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (X11; Fedora; Linux x86_64; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.92 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.111 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/86.0.4240.198 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.66 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.67 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36",
"Mozilla/5.0 (X11; Linux x86_64; rv:78.0) Gecko/20100101 Firefox/78.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:82.0) Gecko/20100101 Firefox/82.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (X11; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:82.0) Gecko/20100101 Firefox/82.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:83.0) Gecko/20100101 Firefox/83.0",
"Mozilla/5.0 (X11; Ubuntu; Linux x86_64; rv:84.0) Gecko/20100101 Firefox/84.0",
]
def default_user_agent():
"""
Pick and set a random header user agent
:return:
"""
requests.utils.default_user_agent = lambda: random.choice(header)
return requests.utils.default_user_agent
| StarcoderdataPython |
1735414 | """
@author: yuhao.he
@contact: <<EMAIL>>
@version: 0.0.1
@file: models.py
@time: 2021/11/2 21:02
"""
from tortoise import fields
from src.utils.admin import ManyToOneModel, AbstractModel, AdminMixin, AdminMeta
class Command(AbstractModel, ManyToOneModel, AdminMixin):
class Admin(AdminMeta):
label = "命令"
fields = {"command": "命令", "pattern": "出发方式", "device": "所属设备"}
order = 2.1
icon = "fas fa-terminal"
command = fields.CharField(max_length=32, description="命令 注册到ai中心 即AI answer 接口的输出")
pattern = fields.CharField(max_length=128, description="用于唤醒命令,即AI answer 接口的输入")
device = fields.ForeignKeyField("models.Device", related_name="commands", db_constraint=False)
def __repr__(self):
return f"<{self.pattern}: {self.command}>"
def __str__(self):
return f"<{self.pattern}: {self.command}>"
async def render(self):
return {"command": self.command, "pattern": self.pattern, "id": self.id}
@classmethod
async def parse(cls, data: dict):
if 'id' in data:
pk = data.pop('id')
obj = await cls.get(id=pk)
for k, v in data:
setattr(obj, k, v)
else:
obj = await cls(**data)
return obj
class Device(AbstractModel, AdminMixin):
class Admin(AdminMeta):
label = "设备"
fields = {"name": "设备名", "location": "位置", "pin": "PIN脚", "commands": "命令"}
icon = "fas fa-terminal"
order = 2.0
name = fields.CharField(max_length=32, description="设备名称")
location = fields.CharField(max_length=32, description="设备位置")
pin = fields.CharField(max_length=16, description="设备gpio pin脚号")
def __str__(self):
return f'{self.name} In:{self.location}'
def __repr__(self):
return str(self)
# @signals.post_save(Command)
# async def after_del(obj, **kwargs):
# print("after save command ", obj, kwargs)
# raise Exception()
| StarcoderdataPython |
3345242 | ### 'FileImport'
import pretty
pretty.arrowify(james="RITS", matt="CMIC")
### 'ModuleVariable'
pretty.arrow="=>"
pretty.arrowify(beauty=True, cause="consequence")
### 'ImportFrom'
import math
math.sin(math.pi)
from math import sin
sin(math.pi)
from math import *
sin(pi)
### 'ImportAlias'
import math as m
m.cos(0)
mypi=3
from math import pi as realpi
sin(mypi)
sin(realpi)
### 'FolderModules'
import module1
print module1.hello
import module1.module2
print module1.module2.hello
### 'RelativeImport'
import module1.module3
module1.module3.hello
| StarcoderdataPython |
3327828 | <filename>examples/example_l2_routing.py
#! /usr/bin/env python
# -*- coding: utf-8 -*-
from scapy.all import *
# DST_MAC = "00:11:22:33:44:55"
DST_MAC = "00:16:3e:0c:53:93"
SRC_MAC = "00:00:70:5b:c7:34"
DST_IPV4 = "192.168.127.12"
SRC_IPV4 = "10.0.0.1"
DST_IPV6 = "2001:db8:100::1"
SRC_IPV6 = "2001:db8:100::2"
DST_TUN6 = "fdf8:f53e:61e4::18"
SRC_TUN6 = "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b"
VLAN_VID = 10
IFACES = ["enp131s0f0", "enp131s0f1", "enp132s0f0", "enp132s0f1"]
class Counter:
CNT = 0
@classmethod
def get(cls):
cls.CNT += 1
return cls.CNT
def make_pkt(dst_ip, data):
udp = UDP(sport=5051, dport=5050) / data
pkt = Ether(dst=DST_MAC, src=SRC_MAC)/IP(dst=dst_ip, src=SRC_IPV4)/udp
pkt.show2()
return pkt
def make_pkt6(dst_ip, data):
udp = UDP(sport=5051, dport=5050) / data
pkt = Ether(dst=DST_MAC, src=SRC_MAC)/IPv6(dst=dst_ip, src=SRC_IPV6)/udp
pkt.show2()
return pkt
def make_tun6(dst_ip, data):
udp = UDP(sport=5051, dport=5050) / data
ip4 = IP(dst=dst_ip, src=SRC_IPV4)/udp
pkt = Ether(dst=DST_MAC, src=SRC_MAC)/IPv6(dst=DST_TUN6, src=SRC_TUN6)/ip4
pkt.show2()
return pkt
def send_ipv4(dst_ip):
for iface in IFACES:
pkt = make_pkt(dst_ip, "{0}/{1}".format(iface, Counter.get()))
sendp(pkt, iface=iface)
def send_ipv6(dst_ip):
for iface in IFACES:
pkt = make_pkt6(dst_ip, "{0}/{1}".format(iface, Counter.get()))
sendp(pkt, iface=iface)
def send_tun6(dst_ip):
for iface in IFACES:
pkt = make_tun6(dst_ip, "{0}/{1}".format(iface, Counter.get()))
sendp(pkt, iface=iface)
def _main():
send_ipv4("172.16.17.32")
#send_ipv4("172.16.58.3")
#send_ipv4("172.16.58.3")
send_tun6("172.16.58.3")
send_tun6("172.16.58.3")
# send_ipv6()
if __name__ == "__main__":
_main()
| StarcoderdataPython |
1647986 | """Resamples a GeoTIFF file to make a KML and a PNG browse image for ASF"""
import argparse
import logging
import os
import sys
from osgeo import gdal
from hyp3lib.resample_geotiff import resample_geotiff
def makeAsfBrowse(geotiff: str, base_name: str, use_nn=False, width: int = 2048):
"""
Make a KML and PNG browse image for ASF
Args:
geotiff: name of GeoTIFF file
base_name: base name of output files
use_nn: Use GDAL's GRIORA_NearestNeighbour interpolation instead of GRIORA_Cubic
to resample the GeoTIFF
width: browse image width
Returns:
browse_width: the width of the created browse image
"""
tiff = gdal.Open(geotiff)
tiff_width = tiff.RasterXSize
tiff = None # How to close with gdal
if tiff_width < width:
logging.warning(f'Requested image dimension of {width} exceeds GeoTIFF width {tiff_width}.'
f' Using GeoTIFF width')
browse_width = tiff_width
else:
browse_width = width
resample_geotiff(geotiff, browse_width, 'KML', f'{base_name}.kmz', use_nn)
resample_geotiff(geotiff, browse_width, 'PNG', f'{base_name}.png', use_nn)
return browse_width
def main():
"""Main entrypoint"""
parser = argparse.ArgumentParser(
prog=os.path.basename(__file__),
description=__doc__,
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
)
parser.add_argument('geotiff', help='name of GeoTIFF file to resample')
parser.add_argument('basename', help='base name of output files')
parser.add_argument('-n', '--nearest-neighbor', action='store_true',
help="use GDAL's GRIORA_NearestNeighbour interpolation instead"
" of GRIORA_Cubic to resample the GeoTIFF")
parser.add_argument('-w', '--width', default=2048,
help='browse image width')
args = parser.parse_args()
out = logging.StreamHandler(stream=sys.stdout)
out.addFilter(lambda record: record.levelno <= logging.INFO)
err = logging.StreamHandler()
err.setLevel(logging.WARNING)
logging.basicConfig(format='%(message)s', level=logging.INFO, handlers=(out, err))
if not os.path.exists(args.geotiff):
parser.error(f'GeoTIFF file {args.geotiff} does not exist!')
if os.path.splitext(args.basename)[-1]:
parser.error(f'Output file {args.basename} has an extension!')
makeAsfBrowse(
args.geotiff, args.basename, use_nn=args.nearest_neighbor, width=args.width
)
if __name__ == '__main__':
main()
| StarcoderdataPython |
4821317 | <reponame>mikedingjan/ecs-deplojo
import copy
import json
import operator
import os.path
import typing
from string import Template
class TaskDefinition:
"""A TaskDefinition exists out of a set of containers."""
def __init__(self, data):
self._data = data
@classmethod
def load(cls, fh) -> "TaskDefinition":
data = json.load(fh)
return cls(data)
def as_dict(self):
"""Output the TaskDefinition in a boto3 compatible format.
See the boto3 documentation on `ECS.Client.register_task_definition`.
"""
result = copy.deepcopy(self._data)
for container in result["containerDefinitions"]:
container["environment"] = sorted(
[
{"name": k, "value": str(v)}
for k, v in container.get("environment", {}).items()
],
key=operator.itemgetter("name"),
)
container["secrets"] = sorted(
[
{"name": k, "valueFrom": str(v)}
for k, v in container.get("secrets", {}).items()
],
key=operator.itemgetter("name"),
)
return result
def apply_variables(self, variables: typing.Dict[str, str]):
"""Interpolate all the variables used in the task definition"""
for container in self.container_definitions:
container["image"] = Template(container["image"]).substitute(variables)
def apply_overrides(self, overrides):
"""Apply overrides for all containers within this task definition."""
for container in self.container_definitions:
container_overrides = overrides.get(container["name"], {})
for key, value in container_overrides.items():
if key in container and isinstance(container[key], list):
container[key].extend(value)
elif key in container and isinstance(container[key], dict):
container[key].update(value)
else:
container[key] = value
def set_environment(self, env: typing.Dict[str, str]):
"""Interpolate all the variables used in the task definition"""
for container in self.container_definitions:
container["environment"] = env
def set_secrets(self, secrets: typing.Dict[str, str]):
"""Interpolate all the secrets used in the task definition.
Secrets will be fetched from the AWS Parameter store and injected in the
environment variables during container startup
"""
for container in self.container_definitions:
container["secrets"] = secrets
def __str__(self):
if self._data.get("name"):
return self._data.get("name")
return "(unregistered)"
def __eq__(self, other: object):
if not isinstance(other, TaskDefinition):
return False
return self._data == other._data
def __repr__(self):
return json.dumps(self._data)
@property
def tags(self) -> typing.List[typing.Dict[str, str]]:
return self._data.get("tags")
@tags.setter
def tags(self, value: typing.List[typing.Dict[str, str]]):
self._data["tags"] = value
@property
def family(self) -> str:
return self._data.get("family")
@family.setter
def family(self, value: str):
self._data["family"] = value
@property
def revision(self) -> int:
return self._data.get("revision")
@revision.setter
def revision(self, value: int):
self._data["revision"] = value
@property
def name(self) -> str:
return self._data.get("name")
@name.setter
def name(self, value: str):
self._data["name"] = value
@property
def task_role_arn(self) -> str:
return self._data.get("taskRoleArn")
@task_role_arn.setter
def task_role_arn(self, value: str):
self._data["taskRoleArn"] = value
@property
def execution_role_arn(self) -> str:
return self._data.get("executionRoleArn")
@execution_role_arn.setter
def execution_role_arn(self, value: str):
self._data["executionRoleArn"] = value
@property
def arn(self) -> str:
return self._data.get("arn")
@arn.setter
def arn(self, value: str):
self._data["arn"] = value
@property
def container_definitions(self):
return self._data.get("containerDefinitions")
@container_definitions.setter
def container_definitions(self, value):
self._data["containerDefinitions"] = value
@property
def network_mode(self) -> typing.Optional[str]:
return self._data.get("networkMode", None)
@network_mode.setter
def network_mode(self, value):
self._data["networkMode"] = value
def generate_task_definitions(
config, template_vars, base_path, output_path=None
) -> typing.Dict[str, TaskDefinition]:
"""Generate the task definitions
:parameter config: The yaml config contents
:parameter template_vars: Key-Value dict with template replacements
:parameter base_path: The base path (location of the config file)
:parameter output_path: Optional path to write the task definitions to.
:rtype dict:
"""
task_definitions = {}
for name, info in config["task_definitions"].items():
# Create a copy of the environment dict so that it can safely be
# modified.
env_vars = copy.deepcopy(config.get("environment", {}))
# Environment groups
env_group = info.get("environment_group")
if env_group:
env_vars.update(config["environment_groups"][env_group])
overrides = info.get("overrides", {})
definition = generate_task_definition(
filename=info["template"],
environment=env_vars,
template_vars=template_vars,
overrides=overrides,
name=name,
base_path=base_path,
task_role_arn=info.get("task_role_arn"),
secrets=config.get("secrets", {}),
execution_role_arn=info.get("execution_role_arn"),
)
if output_path:
write_task_definition(name, definition, output_path)
task_definitions[name] = definition
return task_definitions
def generate_task_definition(
filename: str,
environment: typing.Dict[str, str],
template_vars,
overrides,
name,
base_path=None,
task_role_arn=None,
secrets: typing.Dict[str, str] = {},
execution_role_arn=None,
) -> TaskDefinition:
"""Generate the task definitions."""
if base_path:
filename = os.path.join(base_path, filename)
with open(filename, "r") as fh:
task_definition = TaskDefinition.load(fh)
task_definition.family = name
if task_role_arn:
task_definition.task_role_arn = task_role_arn
if execution_role_arn:
task_definition.execution_role_arn = execution_role_arn
# If no hostname is specified for the container we set it ourselves to
# `{family}-{container-name}-{num}`
# Skip this when network_mode == awsvpc, not supported by AWS.
if task_definition.network_mode not in ["awsvpc"]:
num_containers = len(task_definition.container_definitions)
for container in task_definition.container_definitions:
hostname = task_definition.family
if num_containers > 1:
hostname += "-%s" % container["name"].replace("_", "-")
container.setdefault("hostname", hostname)
task_definition.set_environment(environment)
if secrets:
task_definition.set_secrets(secrets)
task_definition.apply_variables(template_vars)
task_definition.apply_overrides(overrides)
task_definition.tags = [{"key": "createdBy", "value": "ecs-deplojo"}]
return task_definition
def write_task_definition(name: str, definition: TaskDefinition, output_path) -> None:
filename = os.path.join(output_path, "%s.json" % name)
with open(filename, "w") as fh:
json.dump(definition.as_dict(), fh, indent=4)
| StarcoderdataPython |
1640503 | <reponame>mikecokina/pypex
import numpy as np
from pypex.base import shape
from pypex.poly2d.intersection import linter
from pypex.base.conf import ROUND_PRECISION
class Line(shape.Shape2D):
__intersect__ = ['INTERSECT']
__overlapping__ = ['OVERLAP']
def __str__(self):
return "Line: [{}]".format(", ".join([str(v) for v in self.hull]))
def __repr__(self):
return "Line: [{}]".format(", ".join([str(v) for v in self.hull]))
def intersects(self, line, _full=False, in_touch=False, round_tol=ROUND_PRECISION):
"""
Figure out whether two line are in intersection or not
:param round_tol: int; consider as same up to 'round_tol' decimal numbers
:param in_touch: bool
:param line: pypex.poly2d.line.Line
:param _full: bool; define whether return full output or not
:return: bool or tuple
"""
# fixme: return dual type is probably not a good idea
intersection = linter.intersection(self.hull[0], self.hull[1], line.hull[0], line.hull[1],
in_touch=in_touch, round_tol=round_tol)
if _full:
return intersection
return intersection[1] and (intersection[4] in "INTERSECT")
def full_intersects(self, line, in_touch=False, round_tol=ROUND_PRECISION):
"""
Figure out whether two line are in intersection or not.
Method is here to avoid repeating evaluation of if condition statement in case of _full=True in intersects()
method in case when is necessary to find intersection of huge amount of lines in loop
:param round_tol: int; consider as same up to 'round_tol' decimal numbers
:param in_touch: bool
:param line: pypex.poly2d.line.Line
:return: tuple
"""
return linter.intersection(self.hull[0], self.hull[1], line.hull[0], line.hull[1],
in_touch=in_touch, round_tol=round_tol)
def intersection(self, line, in_touch=False, round_tol=ROUND_PRECISION):
"""
Find intersection point of two lines if exists.
:param round_tol: int
:param in_touch: bool
:param line: pypex.poly2d.line.Line
:return: pypex.poly2d.point.Point or None
"""
intersection = self.full_intersects(line, in_touch=in_touch, round_tol=round_tol)
intersect = intersection[1] and (intersection[4] in "INTERSECT")
if not intersect:
return
return intersection[2]
def to_array(self):
"""
Get points of line in numpy array.
:return: numpy.array;
"""
return np.array([point.to_array() for point in self.to_Points()])
def sort_clockwise(self, *args, **kwargs):
return self.hull
def direction_vector(self):
"""
Get direction vector.
:return: numpy.array;
"""
return self.hull[1] - self.hull[0]
def parametrized(self):
"""
Return callable parametrization of given line as function.
"""
def _parametrized(t):
v = self.direction_vector()
return self.hull[0][0] + (t * v[0]), self.hull[0][1] + (t * v[1])
return _parametrized
def angle(self, other: shape.Shape2D, degrees=False):
"""
Return angle between vectors defined by self `Line` and other 'Line'.
"""
vector_self = self.hull[0] - self.hull[1]
vector_other = other.hull[0] - other.hull[1]
unit_vector_self = vector_self / np.linalg.norm(vector_self)
unit_vector_other = vector_other / np.linalg.norm(vector_other)
dot_product = np.dot(unit_vector_self, unit_vector_other)
angle = np.arccos(dot_product)
if degrees:
angle = np.degrees(angle)
return angle
| StarcoderdataPython |
181557 | from typing import Tuple
import numpy as np
import torch
import torch.nn as nn
from rlcycle.common.abstract.action_selector import ActionSelector
from rlcycle.common.utils.common_utils import np2tensor
class SACActionSelector(ActionSelector):
"""Action selector for (vanilla) DDPG policy
Attributes:
action_dim (int): size of action space dimension
action_min (np.ndarray): lower bound for continuous actions
action_max (np.ndarray): upper bound for continuous actions
"""
def __init__(self, action_dim: int, action_range: list, use_cuda: bool):
ActionSelector.__init__(self, use_cuda)
self.action_dim = action_dim
self.action_min = np.array(action_range[0])
self.action_max = np.array(action_range[1])
def __call__(
self, policy: nn.Module, state: np.ndarray
) -> Tuple[torch.Tensor, ...]:
"""Generate action via policy"""
if state.ndim == 1:
state = state.reshape(1, -1)
mu, sigma, z, log_pi = policy.sample(np2tensor(state, self.use_cuda))
action = torch.tanh(z)
action_np = action.cpu().detach().view(-1).numpy()
return action_np
def rescale_action(self, action: np.ndarray) -> np.ndarray:
"""Rescale actions to fit continuous action spaces"""
action_rescaled = (
action * (self.action_max - self.action_min) / 2.0
+ (self.action_max + self.action_min) / 2.0
)
return action_rescaled
| StarcoderdataPython |
197945 | <gh_stars>100-1000
# -*- coding: utf-8 -*-
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the MIT License.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# MIT License for more details.
"""MeanLoss for data."""
from vega.modules.module import Module
from vega.common import ClassType, ClassFactory
@ClassFactory.register(ClassType.LOSS)
class MeanLoss(Module):
"""MeanLoss Loss for data."""
def __init__(self):
super(MeanLoss, self).__init__()
def call(self, inputs, targets):
"""Compute loss, mean() to average on multi-gpu."""
return inputs.mean()
| StarcoderdataPython |
1739022 | <filename>arborlife/eventloop.py
import enum
import logging
import sys
from datetime import datetime, timedelta
from arborlife import config, observer
from collections import namedtuple
logger = logging.getLogger(__name__)
ONE_HOUR = timedelta(hours=1)
Epoch = namedtuple("Epoch", "event dtime",)
class Event(enum.Enum):
EOH = enum.auto()
EOD = enum.auto()
EOW = enum.auto()
EOM = enum.auto()
EOY = enum.auto()
class Event2(enum.Enum):
EOH_INIT = enum.auto()
EOH_EXEC = enum.auto()
EOD_INIT = enum.auto()
EOD_EXEC = enum.auto()
EOW_INIT = enum.auto()
EOW_EXEC = enum.auto()
EOM_INIT = enum.auto()
EOM_EXEC = enum.auto()
EOY_INIT = enum.auto()
EOY_EXEC = enum.auto()
class EventLoop(observer.Subject):
"""Regulates the passage of time within the simulation.
A subclass of Subject, EventLoop manages the passage of time within the
simulation. As time events occur, EventLoop notifies the Observers that
have attached themselves to it about those events.
"""
def __init__(self):
super().__init__()
cfg = config.get_cfg("eventloop")
try:
self.start_dtime = datetime.fromisoformat(cfg["start_date"])
self.finish_dtime = datetime.fromisoformat(cfg["finish_date"])
except ValueError as error:
sys.exit(error)
else:
logger.debug(f"start_date: {self.start_dtime}")
logger.debug(f"finish_date: {self.finish_dtime}")
def run(self):
"""Execute event loop for simulation.
Until the finish date is reached, increment the current time by
an hour and notify all Observers regarding the important time-based
state changes.
"""
for epoch in self._epochs(self.start_dtime, self.finish_dtime):
# Setting state triggers calls to observers
self.subject_state = epoch
def _epochs(self, current_dtime, finish_dtime):
"""A generator producing zero or more time events depending on the dtime.
Picks apart the dtime and yields one event for each milestone associated
with that particular datetime object.
"""
while current_dtime < self.finish_dtime:
current_dtime += ONE_HOUR
# By definition an hour just elapsed
yield Epoch(event=Event.EOH, dtime=current_dtime)
# End of day if it just turned midnight
if current_dtime.hour == 0:
yield Epoch(event=Event.EOD, dtime=current_dtime)
# End of week if it just became Monday
if current_dtime.weekday() == 0:
yield Epoch(event=Event.EOW, dtime=current_dtime)
# End of month if it just became day 1 of the new month
if current_dtime.day == 1:
yield Epoch(event=Event.EOM, dtime=current_dtime)
# End of year if it just became January 1st
if current_dtime.month == 1:
yield Epoch(event=Event.EOY, dtime=current_dtime)
| StarcoderdataPython |
3240543 | <reponame>felixdittrich92/DeepLearning-tensorflow-keras
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from tensorflow.keras.models import *
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import *
def f(x):
return x**2 + x + 10
x = np.linspace(start=-10.0, stop=10.0, num=1000).reshape(-1, 1)
y = f(x)
def relu(x):
if x > 0: return x
else: return 0
model = Sequential()
model.add(Dense(12)) # Input zu Hidden
model.add(Activation("relu")) # ReLU vom Hidden
model.add(Dense(1)) # Vom Hidden zum Output
model.compile(optimizer=Adam(lr=5e-2), loss="mse")
model.fit(x, y, epochs=20)
# Gewichte aus dem Netz holen
W, b = model.layers[0].get_weights() # Dense 1
W2, b2 = model.layers[2].get_weights() # Dense 2
# in Vektorform "ausrollen"
W = W.flatten()
W2 = W2.flatten()
b = b.flatten()
b2 = b2.flatten()
print(W.shape, b.shape)
print(W2.shape, b2.shape)
# [1, 2, ...., 12]
for i in range(1, len(W)+1):
y_hidden = np.array([W[:i] * xi + b[:i] for xi in x])
y_relu = np.array([[relu(yhi) for yhi in yh] for yh in y_hidden])
y_output = np.array([np.dot(W2[:i], yri) + b2 for yri in y_relu])
fig, (ax1, ax2) = plt.subplots(ncols=2, figsize=(16,8))
plt.title("Num weights: " + str(i))
plt.grid(True)
ax1.plot(x, y, color="blue")
ax1.plot(x.flatten(), y_output.flatten(), color="red")
ax2.plot(x, y_relu.T[-1])
plt.show()
plt.close() | StarcoderdataPython |
4809932 | <gh_stars>1-10
from test import TestProtocolCase, bad_client_wrong_broadcast, bad_client_output_vector
import random
import time
class TestProtocol(TestProtocolCase):
def test_001_cheat_in_sending_different_keys(self):
good_threads = self.make_clients_threads(with_print = True, number_of_clients = self.number_of_players - 1)
bad_thread = self.make_bad_client(bad_client_wrong_broadcast, with_print = True)
protocolThreads = good_threads + [bad_thread]
random.shuffle(protocolThreads)
self.start_protocols(protocolThreads)
done = False
while not done:
completes = [self.is_protocol_complete(p) for p in good_threads]
done = all(completes)
self.stop_protocols(protocolThreads)
tx = good_threads[0].protocol.tx.raw
for pThread in good_threads[1:]:
self.assertEqual(tx, pThread.protocol.tx.raw)
def test_002_cheat_in_sending_different_outputs(self):
protocolThreads = self.make_clients_threads(with_print = True, number_of_clients = self.number_of_players - 1)
bad_thread = self.make_bad_client(bad_client_output_vector, with_print = True)
self.start_protocols(protocolThreads)
protocolThreads.append(bad_thread)
time.sleep(1)
bad_thread.start()
done = False
while not done:
completes = [self.is_protocol_complete(p) for p in protocolThreads[:-1]]
done = all(completes)
self.stop_protocols(protocolThreads)
time.sleep(1)
tx = protocolThreads[0].protocol.tx.raw
for pThread in protocolThreads[:-1]:
print(pThread.protocol.me)
self.assertEqual(tx, pThread.protocol.tx.raw)
| StarcoderdataPython |
3359184 | import sys
sys.path.append('../gen-py')
from media_service import TextService
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
import random
import string
def main():
# Make socket
socket = TSocket.TSocket("ath-8.ece.cornell.edu", 9090)
# Buffering is critical. Raw sockets are very slow
transport = TTransport.TFramedTransport(socket)
# Wrap in a protocol
protocol = TBinaryProtocol.TBinaryProtocol(transport)
# Create a client to use the protocol encoder
client = TextService.Client(protocol)
# Connect!
transport.open()
for i in range (1, 2):
req_id = random.getrandbits(64) - 2**63
text = ''.join(random.choices(string.ascii_lowercase + string.digits, k=128))
client.UploadText(req_id, text)
transport.close()
if __name__ == '__main__':
try:
main()
except Thrift.TException as tx:
print('%s' % tx.message) | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.