file_name large_stringlengths 4 140 | prefix large_stringlengths 0 39k | suffix large_stringlengths 0 36.1k | middle large_stringlengths 0 29.4k | fim_type large_stringclasses 4
values |
|---|---|---|---|---|
Administration.SentEmailsForm.ts | namespace PatientManagement.Administration {
export interface SentEmailsForm {
ToEmail: PatientManagement.LKCodeDescr;
Subject: Serenity.StringEditor;
Body: Serenity.HtmlContentEditor;
EmailSignature: Serenity.HtmlContentEditor;
}
export class Se | xtends Serenity.PrefixedContext {
static formKey = 'Administration.SentEmails';
private static init: boolean;
constructor(prefix: string) {
super(prefix);
if (!SentEmailsForm.init) {
SentEmailsForm.init = true;
var s = Serenity;
var w0 = PatientManagement.LKCodeDescr;
var w1 = s.StringEditor;
var w2 = s.HtmlContentEditor;
Q.initFormType(SentEmailsForm, [
'ToEmail', w0,
'Subject', w1,
'Body', w2,
'EmailSignature', w2
]);
}
}
}
}
| ntEmailsForm e | identifier_name |
TestController.py | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, threading, subprocess, getopt, signal
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
def removeTrustSettings():
serverCert = os.path.join(path[0], "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") == 0:
sys.stdout.write("removing trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security remove-trusted-cert " + serverCert) != 0:
print("\nerror: couldn't remove trust settings for the HTTP server certificate")
else:
print("ok")
else:
print("trust settings already removed")
#
# On OS X, provide an option to allow removing the trust settings
#
if TestUtil.isDarwin():
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["clean"])
if ("--clean", "") in opts:
removeTrustSettings()
sys.exit(0)
except getopt.GetoptError:
pass
version = "3.6.0"
jar = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..",
"java/test/controller/build/libs/testController-%(version)s.jar" % {"version": version})
javaHome = os.environ.get("JAVA_HOME", "")
javaCmd = '%s' % os.path.join(javaHome, "bin", "java") if javaHome else "java"
command = [javaCmd, "-jar", jar]
if len(sys.argv) > 1:
command += sys.argv[1:]
p = subprocess.Popen(command, shell = False, stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.STDOUT, bufsize = 0)
def signal_handler(signal, frame):
if p:
p.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if TestUtil.isDarwin():
#
# On OS X, we set the trust settings on the certificate to prevent
# the Web browsers from prompting the user about the unstrusted
# certificate. Some browsers such as Chrome don't provide the
# option to set this trust settings.
#
serverCert = os.path.join(TestUtil.toplevel, "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") != 0:
sys.stdout.write("adding trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security add-trusted-cert -r trustAsRoot " + serverCert) != 0:
print("error: couldn't add trust settings for the HTTP server certificate")
print("ok") |
while(True):
c = p.stdout.read(1)
if not c: break
if c == '\r': continue
# Depending on Python version and platform, the value c could be a
# string or a bytes object.
if type(c) != str:
c = c.decode()
sys.stdout.write(c)
sys.stdout.flush() | print("run " + sys.argv[0] + " --clean to remove the trust setting") | random_line_split |
TestController.py | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, threading, subprocess, getopt, signal
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
def removeTrustSettings():
serverCert = os.path.join(path[0], "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") == 0:
sys.stdout.write("removing trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security remove-trusted-cert " + serverCert) != 0:
print("\nerror: couldn't remove trust settings for the HTTP server certificate")
else:
print("ok")
else:
print("trust settings already removed")
#
# On OS X, provide an option to allow removing the trust settings
#
if TestUtil.isDarwin():
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["clean"])
if ("--clean", "") in opts:
removeTrustSettings()
sys.exit(0)
except getopt.GetoptError:
pass
version = "3.6.0"
jar = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..",
"java/test/controller/build/libs/testController-%(version)s.jar" % {"version": version})
javaHome = os.environ.get("JAVA_HOME", "")
javaCmd = '%s' % os.path.join(javaHome, "bin", "java") if javaHome else "java"
command = [javaCmd, "-jar", jar]
if len(sys.argv) > 1:
command += sys.argv[1:]
p = subprocess.Popen(command, shell = False, stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.STDOUT, bufsize = 0)
def signal_handler(signal, frame):
|
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if TestUtil.isDarwin():
#
# On OS X, we set the trust settings on the certificate to prevent
# the Web browsers from prompting the user about the unstrusted
# certificate. Some browsers such as Chrome don't provide the
# option to set this trust settings.
#
serverCert = os.path.join(TestUtil.toplevel, "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") != 0:
sys.stdout.write("adding trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security add-trusted-cert -r trustAsRoot " + serverCert) != 0:
print("error: couldn't add trust settings for the HTTP server certificate")
print("ok")
print("run " + sys.argv[0] + " --clean to remove the trust setting")
while(True):
c = p.stdout.read(1)
if not c: break
if c == '\r': continue
# Depending on Python version and platform, the value c could be a
# string or a bytes object.
if type(c) != str:
c = c.decode()
sys.stdout.write(c)
sys.stdout.flush()
| if p:
p.terminate()
sys.exit(0) | identifier_body |
TestController.py | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, threading, subprocess, getopt, signal
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
def removeTrustSettings():
serverCert = os.path.join(path[0], "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") == 0:
sys.stdout.write("removing trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security remove-trusted-cert " + serverCert) != 0:
print("\nerror: couldn't remove trust settings for the HTTP server certificate")
else:
print("ok")
else:
print("trust settings already removed")
#
# On OS X, provide an option to allow removing the trust settings
#
if TestUtil.isDarwin():
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["clean"])
if ("--clean", "") in opts:
removeTrustSettings()
sys.exit(0)
except getopt.GetoptError:
pass
version = "3.6.0"
jar = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..",
"java/test/controller/build/libs/testController-%(version)s.jar" % {"version": version})
javaHome = os.environ.get("JAVA_HOME", "")
javaCmd = '%s' % os.path.join(javaHome, "bin", "java") if javaHome else "java"
command = [javaCmd, "-jar", jar]
if len(sys.argv) > 1:
command += sys.argv[1:]
p = subprocess.Popen(command, shell = False, stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.STDOUT, bufsize = 0)
def | (signal, frame):
if p:
p.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if TestUtil.isDarwin():
#
# On OS X, we set the trust settings on the certificate to prevent
# the Web browsers from prompting the user about the unstrusted
# certificate. Some browsers such as Chrome don't provide the
# option to set this trust settings.
#
serverCert = os.path.join(TestUtil.toplevel, "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") != 0:
sys.stdout.write("adding trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security add-trusted-cert -r trustAsRoot " + serverCert) != 0:
print("error: couldn't add trust settings for the HTTP server certificate")
print("ok")
print("run " + sys.argv[0] + " --clean to remove the trust setting")
while(True):
c = p.stdout.read(1)
if not c: break
if c == '\r': continue
# Depending on Python version and platform, the value c could be a
# string or a bytes object.
if type(c) != str:
c = c.decode()
sys.stdout.write(c)
sys.stdout.flush()
| signal_handler | identifier_name |
TestController.py | #!/usr/bin/env python
# **********************************************************************
#
# Copyright (c) 2003-2015 ZeroC, Inc. All rights reserved.
#
# This copy of Ice is licensed to you under the terms described in the
# ICE_LICENSE file included in this distribution.
#
# **********************************************************************
import os, sys, threading, subprocess, getopt, signal
path = [ ".", "..", "../..", "../../..", "../../../.." ]
head = os.path.dirname(sys.argv[0])
if len(head) > 0:
path = [os.path.join(head, p) for p in path]
path = [os.path.abspath(p) for p in path if os.path.exists(os.path.join(p, "scripts", "TestUtil.py")) ]
if len(path) == 0:
raise RuntimeError("can't find toplevel directory!")
sys.path.append(os.path.join(path[0], "scripts"))
import TestUtil
def removeTrustSettings():
serverCert = os.path.join(path[0], "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") == 0:
sys.stdout.write("removing trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security remove-trusted-cert " + serverCert) != 0:
print("\nerror: couldn't remove trust settings for the HTTP server certificate")
else:
print("ok")
else:
print("trust settings already removed")
#
# On OS X, provide an option to allow removing the trust settings
#
if TestUtil.isDarwin():
try:
opts, args = getopt.getopt(sys.argv[1:], "", ["clean"])
if ("--clean", "") in opts:
removeTrustSettings()
sys.exit(0)
except getopt.GetoptError:
pass
version = "3.6.0"
jar = os.path.join(os.path.dirname(os.path.abspath(__file__)), "..",
"java/test/controller/build/libs/testController-%(version)s.jar" % {"version": version})
javaHome = os.environ.get("JAVA_HOME", "")
javaCmd = '%s' % os.path.join(javaHome, "bin", "java") if javaHome else "java"
command = [javaCmd, "-jar", jar]
if len(sys.argv) > 1:
|
p = subprocess.Popen(command, shell = False, stdin = subprocess.PIPE, stdout = subprocess.PIPE,
stderr = subprocess.STDOUT, bufsize = 0)
def signal_handler(signal, frame):
if p:
p.terminate()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
if TestUtil.isDarwin():
#
# On OS X, we set the trust settings on the certificate to prevent
# the Web browsers from prompting the user about the unstrusted
# certificate. Some browsers such as Chrome don't provide the
# option to set this trust settings.
#
serverCert = os.path.join(TestUtil.toplevel, "certs", "server.pem")
if os.system("security verify-cert -c " + serverCert + " >& /dev/null") != 0:
sys.stdout.write("adding trust settings for the HTTP server certificate... ")
sys.stdout.flush()
if os.system("security add-trusted-cert -r trustAsRoot " + serverCert) != 0:
print("error: couldn't add trust settings for the HTTP server certificate")
print("ok")
print("run " + sys.argv[0] + " --clean to remove the trust setting")
while(True):
c = p.stdout.read(1)
if not c: break
if c == '\r': continue
# Depending on Python version and platform, the value c could be a
# string or a bytes object.
if type(c) != str:
c = c.decode()
sys.stdout.write(c)
sys.stdout.flush()
| command += sys.argv[1:] | conditional_block |
classif_and_ktst.py | """Classification-based test and kernel two-sample test.
Author: Sandro Vega-Pons, Emanuele Olivetti.
"""
import os
import numpy as np
from sklearn.metrics import pairwise_distances, confusion_matrix
from sklearn.metrics import pairwise_kernels
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, KFold, cross_val_score
from sklearn.grid_search import GridSearchCV
from kernel_two_sample_test import MMD2u, compute_null_distribution
from kernel_two_sample_test import compute_null_distribution_given_permutations
import matplotlib.pylab as plt
from joblib import Parallel, delayed
def compute_rbf_kernel_matrix(X):
"""Compute the RBF kernel matrix with sigma2 as the median pairwise
distance.
"""
sigma2 = np.median(pairwise_distances(X, metric='euclidean'))**2
K = pairwise_kernels(X, X, metric='rbf', gamma=1.0/sigma2, n_jobs=-1)
return K
def balanced_accuracy_scoring(clf, X, y):
"""Scoring function that computes the balanced accuracy to be used
internally in the cross-validation procedure.
"""
y_pred = clf.predict(X)
conf_mat = confusion_matrix(y, y_pred)
bal_acc = 0.
for i in range(len(conf_mat)):
bal_acc += (float(conf_mat[i, i])) / np.sum(conf_mat[i])
bal_acc /= len(conf_mat)
return bal_acc
def compute_svm_cv(K, y, C=100.0, n_folds=5,
scoring=balanced_accuracy_scoring):
"""Compute cross-validated score of SVM with given precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds)
clf = SVC(C=C, kernel='precomputed', class_weight='auto')
scores = cross_val_score(clf, K, y,
scoring=scoring, cv=cv)
return scores.mean()
def compute_svm_subjects(K, y, n_folds=5):
"""
"""
cv = KFold(len(K)/2, n_folds)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
train_ids = np.concatenate((train, len(K)/2+train))
test_ids = np.concatenate((test, len(K)/2+test))
clf = SVC(kernel='precomputed')
clf.fit(K[train_ids, :][:, train_ids], y[train_ids])
scores[i] = clf.score(K[test_ids, :][:, train_ids], y[test_ids])
return scores.mean()
def permutation_subjects(y):
"""Permute class labels of Contextual Disorder dataset.
"""
y_perm = np.random.randint(0, 2, len(y)/2)
y_perm = np.concatenate((y_perm, np.logical_not(y_perm).astype(int)))
return y_perm
def permutation_subjects_ktst(y):
"""Permute class labels of Contextual Disorder dataset for KTST.
"""
yp = np.random.randint(0, 2, len(y)/2)
yp = np.concatenate((yp, np.logical_not(yp).astype(int)))
y_perm = np.arange(len(y))
for i in range(len(y)/2):
if yp[i] == 1:
y_perm[i] = len(y)/2+i
y_perm[len(y)/2+i] = i
return y_perm
def compute_svm_score_nestedCV(K, y, n_folds,
scoring=balanced_accuracy_scoring,
random_state=None,
param_grid=[{'C': np.logspace(-5, 5, 25)}]):
"""Compute cross-validated score of SVM using precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
cvclf = SVC(kernel='precomputed')
y_train = y[train]
cvcv = StratifiedKFold(y_train, n_folds=n_folds,
shuffle=True,
random_state=random_state)
clf = GridSearchCV(cvclf, param_grid=param_grid, scoring=scoring,
cv=cvcv, n_jobs=1)
clf.fit(K[train, :][:, train], y_train)
# print clf.best_params_
scores[i] = clf.score(K[test, :][:, train], y[test])
return scores.mean()
def apply_svm(K, y, n_folds=5, iterations=10000, subjects=False, verbose=True,
random_state=None):
"""
Compute the balanced accuracy, its null distribution and the p-value.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
cv: Number of folds in the stratified cross-validation
verbose: bool
Verbosity
Returns:
-------
acc: float
Average balanced accuracy.
acc_null: array
Null distribution of the balanced accuracy.
p_value: float
p-value
"""
# Computing the accuracy
param_grid = [{'C': np.logspace(-5, 5, 20)}]
if subjects:
acc = compute_svm_subjects(K, y, n_folds)
else:
acc = compute_svm_score_nestedCV(K, y, n_folds, param_grid=param_grid,
random_state=random_state)
if verbose:
print("Mean balanced accuracy = %s" % (acc))
print("Computing the null-distribution.")
# Computing the null-distribution
# acc_null = np.zeros(iterations)
# for i in range(iterations):
# if verbose and (i % 1000) == 0:
# print(i),
# stdout.flush()
# y_perm = np.random.permutation(y)
# acc_null[i] = compute_svm_score_nestedCV(K, y_perm, n_folds,
# param_grid=param_grid)
# if verbose:
# print ''
# Computing the null-distribution
if subjects:
yis = [permutation_subjects(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_subjects)(K, yis[i], n_folds) for i in range(iterations))
else:
yis = [np.random.permutation(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, yis[i], n_folds, scoring=balanced_accuracy_scoring, param_grid=param_grid) for i in range(iterations))
# acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_cv)(K, yis[i], C=100., n_folds=n_folds) for i in range(iterations))
p_value = max(1.0 / iterations, (acc_null > acc).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return acc, acc_null, p_value
def apply_ktst(K, y, iterations=10000, subjects=False, verbose=True):
"""
Compute MMD^2_u, its null distribution and the p-value of the
kernel two-sample test.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
verbose: bool
Verbosity
Returns:
-------
mmd2u: float
MMD^2_u value.
acc_null: array
Null distribution of the MMD^2_u
p_value: float
p-value
"""
assert len(np.unique(y)) == 2, 'KTST only works on binary problems'
# Assuming that the first m rows of the kernel matrix are from one
# class and the other n rows from the second class.
m = len(y[y == 0])
n = len(y[y == 1])
mmd2u = MMD2u(K, m, n)
if verbose:
print("MMD^2_u = %s" % mmd2u)
print("Computing the null distribution.")
if subjects:
perms = [permutation_subjects_ktst(y) for i in range(iterations)]
mmd2u_null = compute_null_distribution_given_permutations(K, m, n,
perms,
iterations)
else:
|
p_value = max(1.0/iterations, (mmd2u_null > mmd2u).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return mmd2u, mmd2u_null, p_value
def plot_null_distribution(stats, stats_null, p_value, data_name='',
stats_name='$MMD^2_u$', save_figure=True):
"""Plot the observed value for the test statistic, its null
distribution and p-value.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
prob, bins, patches = plt.hist(stats_null, bins=50, normed=True)
ax.plot(stats, prob.max()/30, 'w*', markersize=15,
markeredgecolor='k', markeredgewidth=2,
label="%s = %s" % (stats_name, stats))
ax.annotate('p-value: %s' % (p_value),
xy=(float(stats), prob.max()/9.), xycoords='data',
xytext=(-105, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="1."),
arrowprops={"arrowstyle": "->",
"connectionstyle": "angle,angleA=0,angleB=90,rad=10"},
)
plt.xlabel(stats_name)
plt.ylabel('p(%s)' % stats_name)
plt.legend(numpoints=1)
plt.title('Data: %s' % data_name)
if save_figure:
save_dir = 'figures'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
stn = 'ktst' if stats_name == '$MMD^2_u$' else 'clf'
fig_name = os.path.join(save_dir, '%s_%s.pdf' % (data_name, stn))
fig.savefig(fig_name)
| mmd2u_null = compute_null_distribution(K, m, n, iterations,
verbose=verbose) | conditional_block |
classif_and_ktst.py | """Classification-based test and kernel two-sample test.
Author: Sandro Vega-Pons, Emanuele Olivetti.
"""
import os
import numpy as np
from sklearn.metrics import pairwise_distances, confusion_matrix
from sklearn.metrics import pairwise_kernels
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, KFold, cross_val_score
from sklearn.grid_search import GridSearchCV
from kernel_two_sample_test import MMD2u, compute_null_distribution
from kernel_two_sample_test import compute_null_distribution_given_permutations
import matplotlib.pylab as plt
from joblib import Parallel, delayed
def compute_rbf_kernel_matrix(X):
"""Compute the RBF kernel matrix with sigma2 as the median pairwise
distance.
"""
sigma2 = np.median(pairwise_distances(X, metric='euclidean'))**2
K = pairwise_kernels(X, X, metric='rbf', gamma=1.0/sigma2, n_jobs=-1)
return K
def balanced_accuracy_scoring(clf, X, y):
|
def compute_svm_cv(K, y, C=100.0, n_folds=5,
scoring=balanced_accuracy_scoring):
"""Compute cross-validated score of SVM with given precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds)
clf = SVC(C=C, kernel='precomputed', class_weight='auto')
scores = cross_val_score(clf, K, y,
scoring=scoring, cv=cv)
return scores.mean()
def compute_svm_subjects(K, y, n_folds=5):
"""
"""
cv = KFold(len(K)/2, n_folds)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
train_ids = np.concatenate((train, len(K)/2+train))
test_ids = np.concatenate((test, len(K)/2+test))
clf = SVC(kernel='precomputed')
clf.fit(K[train_ids, :][:, train_ids], y[train_ids])
scores[i] = clf.score(K[test_ids, :][:, train_ids], y[test_ids])
return scores.mean()
def permutation_subjects(y):
"""Permute class labels of Contextual Disorder dataset.
"""
y_perm = np.random.randint(0, 2, len(y)/2)
y_perm = np.concatenate((y_perm, np.logical_not(y_perm).astype(int)))
return y_perm
def permutation_subjects_ktst(y):
"""Permute class labels of Contextual Disorder dataset for KTST.
"""
yp = np.random.randint(0, 2, len(y)/2)
yp = np.concatenate((yp, np.logical_not(yp).astype(int)))
y_perm = np.arange(len(y))
for i in range(len(y)/2):
if yp[i] == 1:
y_perm[i] = len(y)/2+i
y_perm[len(y)/2+i] = i
return y_perm
def compute_svm_score_nestedCV(K, y, n_folds,
scoring=balanced_accuracy_scoring,
random_state=None,
param_grid=[{'C': np.logspace(-5, 5, 25)}]):
"""Compute cross-validated score of SVM using precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
cvclf = SVC(kernel='precomputed')
y_train = y[train]
cvcv = StratifiedKFold(y_train, n_folds=n_folds,
shuffle=True,
random_state=random_state)
clf = GridSearchCV(cvclf, param_grid=param_grid, scoring=scoring,
cv=cvcv, n_jobs=1)
clf.fit(K[train, :][:, train], y_train)
# print clf.best_params_
scores[i] = clf.score(K[test, :][:, train], y[test])
return scores.mean()
def apply_svm(K, y, n_folds=5, iterations=10000, subjects=False, verbose=True,
random_state=None):
"""
Compute the balanced accuracy, its null distribution and the p-value.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
cv: Number of folds in the stratified cross-validation
verbose: bool
Verbosity
Returns:
-------
acc: float
Average balanced accuracy.
acc_null: array
Null distribution of the balanced accuracy.
p_value: float
p-value
"""
# Computing the accuracy
param_grid = [{'C': np.logspace(-5, 5, 20)}]
if subjects:
acc = compute_svm_subjects(K, y, n_folds)
else:
acc = compute_svm_score_nestedCV(K, y, n_folds, param_grid=param_grid,
random_state=random_state)
if verbose:
print("Mean balanced accuracy = %s" % (acc))
print("Computing the null-distribution.")
# Computing the null-distribution
# acc_null = np.zeros(iterations)
# for i in range(iterations):
# if verbose and (i % 1000) == 0:
# print(i),
# stdout.flush()
# y_perm = np.random.permutation(y)
# acc_null[i] = compute_svm_score_nestedCV(K, y_perm, n_folds,
# param_grid=param_grid)
# if verbose:
# print ''
# Computing the null-distribution
if subjects:
yis = [permutation_subjects(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_subjects)(K, yis[i], n_folds) for i in range(iterations))
else:
yis = [np.random.permutation(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, yis[i], n_folds, scoring=balanced_accuracy_scoring, param_grid=param_grid) for i in range(iterations))
# acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_cv)(K, yis[i], C=100., n_folds=n_folds) for i in range(iterations))
p_value = max(1.0 / iterations, (acc_null > acc).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return acc, acc_null, p_value
def apply_ktst(K, y, iterations=10000, subjects=False, verbose=True):
"""
Compute MMD^2_u, its null distribution and the p-value of the
kernel two-sample test.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
verbose: bool
Verbosity
Returns:
-------
mmd2u: float
MMD^2_u value.
acc_null: array
Null distribution of the MMD^2_u
p_value: float
p-value
"""
assert len(np.unique(y)) == 2, 'KTST only works on binary problems'
# Assuming that the first m rows of the kernel matrix are from one
# class and the other n rows from the second class.
m = len(y[y == 0])
n = len(y[y == 1])
mmd2u = MMD2u(K, m, n)
if verbose:
print("MMD^2_u = %s" % mmd2u)
print("Computing the null distribution.")
if subjects:
perms = [permutation_subjects_ktst(y) for i in range(iterations)]
mmd2u_null = compute_null_distribution_given_permutations(K, m, n,
perms,
iterations)
else:
mmd2u_null = compute_null_distribution(K, m, n, iterations,
verbose=verbose)
p_value = max(1.0/iterations, (mmd2u_null > mmd2u).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return mmd2u, mmd2u_null, p_value
def plot_null_distribution(stats, stats_null, p_value, data_name='',
stats_name='$MMD^2_u$', save_figure=True):
"""Plot the observed value for the test statistic, its null
distribution and p-value.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
prob, bins, patches = plt.hist(stats_null, bins=50, normed=True)
ax.plot(stats, prob.max()/30, 'w*', markersize=15,
markeredgecolor='k', markeredgewidth=2,
label="%s = %s" % (stats_name, stats))
ax.annotate('p-value: %s' % (p_value),
xy=(float(stats), prob.max()/9.), xycoords='data',
xytext=(-105, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="1."),
arrowprops={"arrowstyle": "->",
"connectionstyle": "angle,angleA=0,angleB=90,rad=10"},
)
plt.xlabel(stats_name)
plt.ylabel('p(%s)' % stats_name)
plt.legend(numpoints=1)
plt.title('Data: %s' % data_name)
if save_figure:
save_dir = 'figures'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
stn = 'ktst' if stats_name == '$MMD^2_u$' else 'clf'
fig_name = os.path.join(save_dir, '%s_%s.pdf' % (data_name, stn))
fig.savefig(fig_name)
| """Scoring function that computes the balanced accuracy to be used
internally in the cross-validation procedure.
"""
y_pred = clf.predict(X)
conf_mat = confusion_matrix(y, y_pred)
bal_acc = 0.
for i in range(len(conf_mat)):
bal_acc += (float(conf_mat[i, i])) / np.sum(conf_mat[i])
bal_acc /= len(conf_mat)
return bal_acc | identifier_body |
classif_and_ktst.py | """Classification-based test and kernel two-sample test.
Author: Sandro Vega-Pons, Emanuele Olivetti.
"""
import os
import numpy as np
from sklearn.metrics import pairwise_distances, confusion_matrix
from sklearn.metrics import pairwise_kernels
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, KFold, cross_val_score
from sklearn.grid_search import GridSearchCV
from kernel_two_sample_test import MMD2u, compute_null_distribution
from kernel_two_sample_test import compute_null_distribution_given_permutations
import matplotlib.pylab as plt
from joblib import Parallel, delayed
def compute_rbf_kernel_matrix(X):
"""Compute the RBF kernel matrix with sigma2 as the median pairwise
distance.
"""
sigma2 = np.median(pairwise_distances(X, metric='euclidean'))**2
K = pairwise_kernels(X, X, metric='rbf', gamma=1.0/sigma2, n_jobs=-1)
return K
def balanced_accuracy_scoring(clf, X, y):
"""Scoring function that computes the balanced accuracy to be used
internally in the cross-validation procedure.
"""
y_pred = clf.predict(X)
conf_mat = confusion_matrix(y, y_pred)
bal_acc = 0.
for i in range(len(conf_mat)):
bal_acc += (float(conf_mat[i, i])) / np.sum(conf_mat[i])
bal_acc /= len(conf_mat)
return bal_acc
def compute_svm_cv(K, y, C=100.0, n_folds=5,
scoring=balanced_accuracy_scoring):
"""Compute cross-validated score of SVM with given precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds)
clf = SVC(C=C, kernel='precomputed', class_weight='auto')
scores = cross_val_score(clf, K, y,
scoring=scoring, cv=cv)
return scores.mean()
def compute_svm_subjects(K, y, n_folds=5):
"""
"""
cv = KFold(len(K)/2, n_folds)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
train_ids = np.concatenate((train, len(K)/2+train))
test_ids = np.concatenate((test, len(K)/2+test))
clf = SVC(kernel='precomputed')
clf.fit(K[train_ids, :][:, train_ids], y[train_ids])
scores[i] = clf.score(K[test_ids, :][:, train_ids], y[test_ids])
return scores.mean()
def permutation_subjects(y):
"""Permute class labels of Contextual Disorder dataset.
"""
y_perm = np.random.randint(0, 2, len(y)/2)
y_perm = np.concatenate((y_perm, np.logical_not(y_perm).astype(int)))
return y_perm
def permutation_subjects_ktst(y):
"""Permute class labels of Contextual Disorder dataset for KTST.
"""
yp = np.random.randint(0, 2, len(y)/2)
yp = np.concatenate((yp, np.logical_not(yp).astype(int)))
y_perm = np.arange(len(y))
for i in range(len(y)/2):
if yp[i] == 1:
y_perm[i] = len(y)/2+i
y_perm[len(y)/2+i] = i
return y_perm
def | (K, y, n_folds,
scoring=balanced_accuracy_scoring,
random_state=None,
param_grid=[{'C': np.logspace(-5, 5, 25)}]):
"""Compute cross-validated score of SVM using precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
cvclf = SVC(kernel='precomputed')
y_train = y[train]
cvcv = StratifiedKFold(y_train, n_folds=n_folds,
shuffle=True,
random_state=random_state)
clf = GridSearchCV(cvclf, param_grid=param_grid, scoring=scoring,
cv=cvcv, n_jobs=1)
clf.fit(K[train, :][:, train], y_train)
# print clf.best_params_
scores[i] = clf.score(K[test, :][:, train], y[test])
return scores.mean()
def apply_svm(K, y, n_folds=5, iterations=10000, subjects=False, verbose=True,
random_state=None):
"""
Compute the balanced accuracy, its null distribution and the p-value.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
cv: Number of folds in the stratified cross-validation
verbose: bool
Verbosity
Returns:
-------
acc: float
Average balanced accuracy.
acc_null: array
Null distribution of the balanced accuracy.
p_value: float
p-value
"""
# Computing the accuracy
param_grid = [{'C': np.logspace(-5, 5, 20)}]
if subjects:
acc = compute_svm_subjects(K, y, n_folds)
else:
acc = compute_svm_score_nestedCV(K, y, n_folds, param_grid=param_grid,
random_state=random_state)
if verbose:
print("Mean balanced accuracy = %s" % (acc))
print("Computing the null-distribution.")
# Computing the null-distribution
# acc_null = np.zeros(iterations)
# for i in range(iterations):
# if verbose and (i % 1000) == 0:
# print(i),
# stdout.flush()
# y_perm = np.random.permutation(y)
# acc_null[i] = compute_svm_score_nestedCV(K, y_perm, n_folds,
# param_grid=param_grid)
# if verbose:
# print ''
# Computing the null-distribution
if subjects:
yis = [permutation_subjects(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_subjects)(K, yis[i], n_folds) for i in range(iterations))
else:
yis = [np.random.permutation(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, yis[i], n_folds, scoring=balanced_accuracy_scoring, param_grid=param_grid) for i in range(iterations))
# acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_cv)(K, yis[i], C=100., n_folds=n_folds) for i in range(iterations))
p_value = max(1.0 / iterations, (acc_null > acc).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return acc, acc_null, p_value
def apply_ktst(K, y, iterations=10000, subjects=False, verbose=True):
"""
Compute MMD^2_u, its null distribution and the p-value of the
kernel two-sample test.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
verbose: bool
Verbosity
Returns:
-------
mmd2u: float
MMD^2_u value.
acc_null: array
Null distribution of the MMD^2_u
p_value: float
p-value
"""
assert len(np.unique(y)) == 2, 'KTST only works on binary problems'
# Assuming that the first m rows of the kernel matrix are from one
# class and the other n rows from the second class.
m = len(y[y == 0])
n = len(y[y == 1])
mmd2u = MMD2u(K, m, n)
if verbose:
print("MMD^2_u = %s" % mmd2u)
print("Computing the null distribution.")
if subjects:
perms = [permutation_subjects_ktst(y) for i in range(iterations)]
mmd2u_null = compute_null_distribution_given_permutations(K, m, n,
perms,
iterations)
else:
mmd2u_null = compute_null_distribution(K, m, n, iterations,
verbose=verbose)
p_value = max(1.0/iterations, (mmd2u_null > mmd2u).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return mmd2u, mmd2u_null, p_value
def plot_null_distribution(stats, stats_null, p_value, data_name='',
stats_name='$MMD^2_u$', save_figure=True):
"""Plot the observed value for the test statistic, its null
distribution and p-value.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
prob, bins, patches = plt.hist(stats_null, bins=50, normed=True)
ax.plot(stats, prob.max()/30, 'w*', markersize=15,
markeredgecolor='k', markeredgewidth=2,
label="%s = %s" % (stats_name, stats))
ax.annotate('p-value: %s' % (p_value),
xy=(float(stats), prob.max()/9.), xycoords='data',
xytext=(-105, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="1."),
arrowprops={"arrowstyle": "->",
"connectionstyle": "angle,angleA=0,angleB=90,rad=10"},
)
plt.xlabel(stats_name)
plt.ylabel('p(%s)' % stats_name)
plt.legend(numpoints=1)
plt.title('Data: %s' % data_name)
if save_figure:
save_dir = 'figures'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
stn = 'ktst' if stats_name == '$MMD^2_u$' else 'clf'
fig_name = os.path.join(save_dir, '%s_%s.pdf' % (data_name, stn))
fig.savefig(fig_name)
| compute_svm_score_nestedCV | identifier_name |
classif_and_ktst.py | """Classification-based test and kernel two-sample test.
Author: Sandro Vega-Pons, Emanuele Olivetti.
"""
import os
import numpy as np
from sklearn.metrics import pairwise_distances, confusion_matrix
from sklearn.metrics import pairwise_kernels
from sklearn.svm import SVC
from sklearn.cross_validation import StratifiedKFold, KFold, cross_val_score
from sklearn.grid_search import GridSearchCV
from kernel_two_sample_test import MMD2u, compute_null_distribution
from kernel_two_sample_test import compute_null_distribution_given_permutations
import matplotlib.pylab as plt
from joblib import Parallel, delayed
def compute_rbf_kernel_matrix(X):
"""Compute the RBF kernel matrix with sigma2 as the median pairwise
distance.
"""
sigma2 = np.median(pairwise_distances(X, metric='euclidean'))**2
K = pairwise_kernels(X, X, metric='rbf', gamma=1.0/sigma2, n_jobs=-1)
return K
def balanced_accuracy_scoring(clf, X, y):
"""Scoring function that computes the balanced accuracy to be used
internally in the cross-validation procedure.
"""
y_pred = clf.predict(X)
conf_mat = confusion_matrix(y, y_pred)
bal_acc = 0.
for i in range(len(conf_mat)):
bal_acc += (float(conf_mat[i, i])) / np.sum(conf_mat[i])
bal_acc /= len(conf_mat)
return bal_acc
def compute_svm_cv(K, y, C=100.0, n_folds=5,
scoring=balanced_accuracy_scoring):
"""Compute cross-validated score of SVM with given precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds)
clf = SVC(C=C, kernel='precomputed', class_weight='auto')
scores = cross_val_score(clf, K, y,
scoring=scoring, cv=cv)
return scores.mean()
def compute_svm_subjects(K, y, n_folds=5):
"""
"""
cv = KFold(len(K)/2, n_folds)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
train_ids = np.concatenate((train, len(K)/2+train))
test_ids = np.concatenate((test, len(K)/2+test))
clf = SVC(kernel='precomputed')
clf.fit(K[train_ids, :][:, train_ids], y[train_ids])
scores[i] = clf.score(K[test_ids, :][:, train_ids], y[test_ids])
return scores.mean()
def permutation_subjects(y):
"""Permute class labels of Contextual Disorder dataset.
"""
y_perm = np.random.randint(0, 2, len(y)/2)
y_perm = np.concatenate((y_perm, np.logical_not(y_perm).astype(int)))
return y_perm
def permutation_subjects_ktst(y):
"""Permute class labels of Contextual Disorder dataset for KTST.
"""
yp = np.random.randint(0, 2, len(y)/2)
yp = np.concatenate((yp, np.logical_not(yp).astype(int)))
y_perm = np.arange(len(y))
for i in range(len(y)/2):
if yp[i] == 1:
y_perm[i] = len(y)/2+i
y_perm[len(y)/2+i] = i
return y_perm
def compute_svm_score_nestedCV(K, y, n_folds,
scoring=balanced_accuracy_scoring,
random_state=None,
param_grid=[{'C': np.logspace(-5, 5, 25)}]):
"""Compute cross-validated score of SVM using precomputed kernel.
"""
cv = StratifiedKFold(y, n_folds=n_folds, shuffle=True,
random_state=random_state)
scores = np.zeros(n_folds)
for i, (train, test) in enumerate(cv):
cvclf = SVC(kernel='precomputed')
y_train = y[train]
cvcv = StratifiedKFold(y_train, n_folds=n_folds,
shuffle=True,
random_state=random_state)
clf = GridSearchCV(cvclf, param_grid=param_grid, scoring=scoring,
cv=cvcv, n_jobs=1)
clf.fit(K[train, :][:, train], y_train)
# print clf.best_params_
scores[i] = clf.score(K[test, :][:, train], y[test]) | random_state=None):
"""
Compute the balanced accuracy, its null distribution and the p-value.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
cv: Number of folds in the stratified cross-validation
verbose: bool
Verbosity
Returns:
-------
acc: float
Average balanced accuracy.
acc_null: array
Null distribution of the balanced accuracy.
p_value: float
p-value
"""
# Computing the accuracy
param_grid = [{'C': np.logspace(-5, 5, 20)}]
if subjects:
acc = compute_svm_subjects(K, y, n_folds)
else:
acc = compute_svm_score_nestedCV(K, y, n_folds, param_grid=param_grid,
random_state=random_state)
if verbose:
print("Mean balanced accuracy = %s" % (acc))
print("Computing the null-distribution.")
# Computing the null-distribution
# acc_null = np.zeros(iterations)
# for i in range(iterations):
# if verbose and (i % 1000) == 0:
# print(i),
# stdout.flush()
# y_perm = np.random.permutation(y)
# acc_null[i] = compute_svm_score_nestedCV(K, y_perm, n_folds,
# param_grid=param_grid)
# if verbose:
# print ''
# Computing the null-distribution
if subjects:
yis = [permutation_subjects(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_subjects)(K, yis[i], n_folds) for i in range(iterations))
else:
yis = [np.random.permutation(y) for i in range(iterations)]
acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_score_nestedCV)(K, yis[i], n_folds, scoring=balanced_accuracy_scoring, param_grid=param_grid) for i in range(iterations))
# acc_null = Parallel(n_jobs=-1)(delayed(compute_svm_cv)(K, yis[i], C=100., n_folds=n_folds) for i in range(iterations))
p_value = max(1.0 / iterations, (acc_null > acc).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return acc, acc_null, p_value
def apply_ktst(K, y, iterations=10000, subjects=False, verbose=True):
"""
Compute MMD^2_u, its null distribution and the p-value of the
kernel two-sample test.
Parameters:
----------
K: array-like
Kernel matrix
y: array_like
class labels
verbose: bool
Verbosity
Returns:
-------
mmd2u: float
MMD^2_u value.
acc_null: array
Null distribution of the MMD^2_u
p_value: float
p-value
"""
assert len(np.unique(y)) == 2, 'KTST only works on binary problems'
# Assuming that the first m rows of the kernel matrix are from one
# class and the other n rows from the second class.
m = len(y[y == 0])
n = len(y[y == 1])
mmd2u = MMD2u(K, m, n)
if verbose:
print("MMD^2_u = %s" % mmd2u)
print("Computing the null distribution.")
if subjects:
perms = [permutation_subjects_ktst(y) for i in range(iterations)]
mmd2u_null = compute_null_distribution_given_permutations(K, m, n,
perms,
iterations)
else:
mmd2u_null = compute_null_distribution(K, m, n, iterations,
verbose=verbose)
p_value = max(1.0/iterations, (mmd2u_null > mmd2u).sum()
/ float(iterations))
if verbose:
print("p-value ~= %s \t (resolution : %s)" % (p_value, 1.0/iterations))
return mmd2u, mmd2u_null, p_value
def plot_null_distribution(stats, stats_null, p_value, data_name='',
stats_name='$MMD^2_u$', save_figure=True):
"""Plot the observed value for the test statistic, its null
distribution and p-value.
"""
fig = plt.figure()
ax = fig.add_subplot(111)
prob, bins, patches = plt.hist(stats_null, bins=50, normed=True)
ax.plot(stats, prob.max()/30, 'w*', markersize=15,
markeredgecolor='k', markeredgewidth=2,
label="%s = %s" % (stats_name, stats))
ax.annotate('p-value: %s' % (p_value),
xy=(float(stats), prob.max()/9.), xycoords='data',
xytext=(-105, 30), textcoords='offset points',
bbox=dict(boxstyle="round", fc="1."),
arrowprops={"arrowstyle": "->",
"connectionstyle": "angle,angleA=0,angleB=90,rad=10"},
)
plt.xlabel(stats_name)
plt.ylabel('p(%s)' % stats_name)
plt.legend(numpoints=1)
plt.title('Data: %s' % data_name)
if save_figure:
save_dir = 'figures'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
stn = 'ktst' if stats_name == '$MMD^2_u$' else 'clf'
fig_name = os.path.join(save_dir, '%s_%s.pdf' % (data_name, stn))
fig.savefig(fig_name) |
return scores.mean()
def apply_svm(K, y, n_folds=5, iterations=10000, subjects=False, verbose=True, | random_line_split |
git.rs | use std::collections::HashMap;
use std::env;
use crate::config;
use crate::errors::*;
use crate::ConfigScope;
pub struct Repo {
repo: git2::Repository,
}
impl Repo {
pub fn new() -> Result<Self> {
let repo = env::current_dir()
.chain_err(|| "")
.and_then(|current_dir| git2::Repository::discover(current_dir).chain_err(|| ""))?;
Ok(Repo { repo })
}
pub fn config(&self) -> Result<Config> {
self.repo
.config()
.map(|config| Config { config })
.chain_err(|| "")
}
pub fn auto_include(&self, filename: &str) -> Result<()> {
let include_path = format!("../{}", filename);
let workdir = match self.repo.workdir() {
Some(dir) => dir,
_ => {
return Ok(());
}
};
let mut path_buf = workdir.to_path_buf();
path_buf.push(filename);
if !path_buf.exists() {
return Ok(());
}
let include_paths = self.include_paths()?;
if include_paths.contains(&include_path) {
return Ok(());
}
let mut config = self.local_config()?;
config
.set_multivar("include.path", "^$", &include_path) | fn include_paths(&self) -> Result<Vec<String>> {
let config = self.local_config()?;
let include_paths: Vec<String> = config
.entries(Some("include.path"))
.chain_err(|| "")?
.into_iter()
.map(|entry| {
entry
.chain_err(|| "")
.and_then(|entry| entry.value().map(String::from).ok_or_else(|| "".into()))
})
.collect::<Result<_>>()?;
Ok(include_paths)
}
fn local_config(&self) -> Result<git2::Config> {
let config = self.repo.config().chain_err(|| "")?;
config.open_level(git2::ConfigLevel::Local).chain_err(|| "")
}
}
pub struct Config {
config: git2::Config,
}
impl Config {
pub fn new(scope: ConfigScope) -> Result<Self> {
let config = match scope {
ConfigScope::Local => git2::Config::open_default(),
ConfigScope::Global => git2::Config::open_default().and_then(|mut r| r.open_global()),
};
config.map(|config| Config { config }).chain_err(|| "")
}
}
impl config::Config for Config {
fn get(&self, name: &str) -> Result<String> {
self.config
.get_string(name)
.chain_err(|| format!("error getting git config for '{}'", name))
}
fn get_all(&self, glob: &str) -> Result<HashMap<String, String>> {
let mut result = HashMap::new();
let entries = self
.config
.entries(Some(glob))
.chain_err(|| "error getting git config entries")?;
for entry in &entries {
let entry = entry.chain_err(|| "error getting git config entry")?;
if let (Some(name), Some(value)) = (entry.name(), entry.value()) {
result.insert(name.into(), value.into());
}
}
Ok(result)
}
fn add(&mut self, name: &str, value: &str) -> Result<()> {
self.config
.set_multivar(name, "^$", value)
.chain_err(|| format!("error adding git config '{}': '{}'", name, value))
}
fn set(&mut self, name: &str, value: &str) -> Result<()> {
self.config
.set_str(name, value)
.chain_err(|| format!("error setting git config '{}': '{}'", name, value))
}
fn clear(&mut self, name: &str) -> Result<()> {
self.config
.remove(name)
.chain_err(|| format!("error removing git config '{}'", name))
}
} | .and(Ok(()))
.chain_err(|| "")
}
| random_line_split |
git.rs | use std::collections::HashMap;
use std::env;
use crate::config;
use crate::errors::*;
use crate::ConfigScope;
pub struct Repo {
repo: git2::Repository,
}
impl Repo {
pub fn new() -> Result<Self> {
let repo = env::current_dir()
.chain_err(|| "")
.and_then(|current_dir| git2::Repository::discover(current_dir).chain_err(|| ""))?;
Ok(Repo { repo })
}
pub fn config(&self) -> Result<Config> {
self.repo
.config()
.map(|config| Config { config })
.chain_err(|| "")
}
pub fn auto_include(&self, filename: &str) -> Result<()> {
let include_path = format!("../{}", filename);
let workdir = match self.repo.workdir() {
Some(dir) => dir,
_ => {
return Ok(());
}
};
let mut path_buf = workdir.to_path_buf();
path_buf.push(filename);
if !path_buf.exists() {
return Ok(());
}
let include_paths = self.include_paths()?;
if include_paths.contains(&include_path) {
return Ok(());
}
let mut config = self.local_config()?;
config
.set_multivar("include.path", "^$", &include_path)
.and(Ok(()))
.chain_err(|| "")
}
fn include_paths(&self) -> Result<Vec<String>> {
let config = self.local_config()?;
let include_paths: Vec<String> = config
.entries(Some("include.path"))
.chain_err(|| "")?
.into_iter()
.map(|entry| {
entry
.chain_err(|| "")
.and_then(|entry| entry.value().map(String::from).ok_or_else(|| "".into()))
})
.collect::<Result<_>>()?;
Ok(include_paths)
}
fn local_config(&self) -> Result<git2::Config> {
let config = self.repo.config().chain_err(|| "")?;
config.open_level(git2::ConfigLevel::Local).chain_err(|| "")
}
}
pub struct Config {
config: git2::Config,
}
impl Config {
pub fn new(scope: ConfigScope) -> Result<Self> {
let config = match scope {
ConfigScope::Local => git2::Config::open_default(),
ConfigScope::Global => git2::Config::open_default().and_then(|mut r| r.open_global()),
};
config.map(|config| Config { config }).chain_err(|| "")
}
}
impl config::Config for Config {
fn get(&self, name: &str) -> Result<String> {
self.config
.get_string(name)
.chain_err(|| format!("error getting git config for '{}'", name))
}
fn get_all(&self, glob: &str) -> Result<HashMap<String, String>> {
let mut result = HashMap::new();
let entries = self
.config
.entries(Some(glob))
.chain_err(|| "error getting git config entries")?;
for entry in &entries {
let entry = entry.chain_err(|| "error getting git config entry")?;
if let (Some(name), Some(value)) = (entry.name(), entry.value()) {
result.insert(name.into(), value.into());
}
}
Ok(result)
}
fn | (&mut self, name: &str, value: &str) -> Result<()> {
self.config
.set_multivar(name, "^$", value)
.chain_err(|| format!("error adding git config '{}': '{}'", name, value))
}
fn set(&mut self, name: &str, value: &str) -> Result<()> {
self.config
.set_str(name, value)
.chain_err(|| format!("error setting git config '{}': '{}'", name, value))
}
fn clear(&mut self, name: &str) -> Result<()> {
self.config
.remove(name)
.chain_err(|| format!("error removing git config '{}'", name))
}
}
| add | identifier_name |
git.rs | use std::collections::HashMap;
use std::env;
use crate::config;
use crate::errors::*;
use crate::ConfigScope;
pub struct Repo {
repo: git2::Repository,
}
impl Repo {
pub fn new() -> Result<Self> {
let repo = env::current_dir()
.chain_err(|| "")
.and_then(|current_dir| git2::Repository::discover(current_dir).chain_err(|| ""))?;
Ok(Repo { repo })
}
pub fn config(&self) -> Result<Config> {
self.repo
.config()
.map(|config| Config { config })
.chain_err(|| "")
}
pub fn auto_include(&self, filename: &str) -> Result<()> {
let include_path = format!("../{}", filename);
let workdir = match self.repo.workdir() {
Some(dir) => dir,
_ => {
return Ok(());
}
};
let mut path_buf = workdir.to_path_buf();
path_buf.push(filename);
if !path_buf.exists() |
let include_paths = self.include_paths()?;
if include_paths.contains(&include_path) {
return Ok(());
}
let mut config = self.local_config()?;
config
.set_multivar("include.path", "^$", &include_path)
.and(Ok(()))
.chain_err(|| "")
}
fn include_paths(&self) -> Result<Vec<String>> {
let config = self.local_config()?;
let include_paths: Vec<String> = config
.entries(Some("include.path"))
.chain_err(|| "")?
.into_iter()
.map(|entry| {
entry
.chain_err(|| "")
.and_then(|entry| entry.value().map(String::from).ok_or_else(|| "".into()))
})
.collect::<Result<_>>()?;
Ok(include_paths)
}
fn local_config(&self) -> Result<git2::Config> {
let config = self.repo.config().chain_err(|| "")?;
config.open_level(git2::ConfigLevel::Local).chain_err(|| "")
}
}
pub struct Config {
config: git2::Config,
}
impl Config {
pub fn new(scope: ConfigScope) -> Result<Self> {
let config = match scope {
ConfigScope::Local => git2::Config::open_default(),
ConfigScope::Global => git2::Config::open_default().and_then(|mut r| r.open_global()),
};
config.map(|config| Config { config }).chain_err(|| "")
}
}
impl config::Config for Config {
fn get(&self, name: &str) -> Result<String> {
self.config
.get_string(name)
.chain_err(|| format!("error getting git config for '{}'", name))
}
fn get_all(&self, glob: &str) -> Result<HashMap<String, String>> {
let mut result = HashMap::new();
let entries = self
.config
.entries(Some(glob))
.chain_err(|| "error getting git config entries")?;
for entry in &entries {
let entry = entry.chain_err(|| "error getting git config entry")?;
if let (Some(name), Some(value)) = (entry.name(), entry.value()) {
result.insert(name.into(), value.into());
}
}
Ok(result)
}
fn add(&mut self, name: &str, value: &str) -> Result<()> {
self.config
.set_multivar(name, "^$", value)
.chain_err(|| format!("error adding git config '{}': '{}'", name, value))
}
fn set(&mut self, name: &str, value: &str) -> Result<()> {
self.config
.set_str(name, value)
.chain_err(|| format!("error setting git config '{}': '{}'", name, value))
}
fn clear(&mut self, name: &str) -> Result<()> {
self.config
.remove(name)
.chain_err(|| format!("error removing git config '{}'", name))
}
}
| {
return Ok(());
} | conditional_block |
git.rs | use std::collections::HashMap;
use std::env;
use crate::config;
use crate::errors::*;
use crate::ConfigScope;
pub struct Repo {
repo: git2::Repository,
}
impl Repo {
pub fn new() -> Result<Self> {
let repo = env::current_dir()
.chain_err(|| "")
.and_then(|current_dir| git2::Repository::discover(current_dir).chain_err(|| ""))?;
Ok(Repo { repo })
}
pub fn config(&self) -> Result<Config> {
self.repo
.config()
.map(|config| Config { config })
.chain_err(|| "")
}
pub fn auto_include(&self, filename: &str) -> Result<()> {
let include_path = format!("../{}", filename);
let workdir = match self.repo.workdir() {
Some(dir) => dir,
_ => {
return Ok(());
}
};
let mut path_buf = workdir.to_path_buf();
path_buf.push(filename);
if !path_buf.exists() {
return Ok(());
}
let include_paths = self.include_paths()?;
if include_paths.contains(&include_path) {
return Ok(());
}
let mut config = self.local_config()?;
config
.set_multivar("include.path", "^$", &include_path)
.and(Ok(()))
.chain_err(|| "")
}
fn include_paths(&self) -> Result<Vec<String>> {
let config = self.local_config()?;
let include_paths: Vec<String> = config
.entries(Some("include.path"))
.chain_err(|| "")?
.into_iter()
.map(|entry| {
entry
.chain_err(|| "")
.and_then(|entry| entry.value().map(String::from).ok_or_else(|| "".into()))
})
.collect::<Result<_>>()?;
Ok(include_paths)
}
fn local_config(&self) -> Result<git2::Config> {
let config = self.repo.config().chain_err(|| "")?;
config.open_level(git2::ConfigLevel::Local).chain_err(|| "")
}
}
pub struct Config {
config: git2::Config,
}
impl Config {
pub fn new(scope: ConfigScope) -> Result<Self> {
let config = match scope {
ConfigScope::Local => git2::Config::open_default(),
ConfigScope::Global => git2::Config::open_default().and_then(|mut r| r.open_global()),
};
config.map(|config| Config { config }).chain_err(|| "")
}
}
impl config::Config for Config {
fn get(&self, name: &str) -> Result<String> {
self.config
.get_string(name)
.chain_err(|| format!("error getting git config for '{}'", name))
}
fn get_all(&self, glob: &str) -> Result<HashMap<String, String>> |
fn add(&mut self, name: &str, value: &str) -> Result<()> {
self.config
.set_multivar(name, "^$", value)
.chain_err(|| format!("error adding git config '{}': '{}'", name, value))
}
fn set(&mut self, name: &str, value: &str) -> Result<()> {
self.config
.set_str(name, value)
.chain_err(|| format!("error setting git config '{}': '{}'", name, value))
}
fn clear(&mut self, name: &str) -> Result<()> {
self.config
.remove(name)
.chain_err(|| format!("error removing git config '{}'", name))
}
}
| {
let mut result = HashMap::new();
let entries = self
.config
.entries(Some(glob))
.chain_err(|| "error getting git config entries")?;
for entry in &entries {
let entry = entry.chain_err(|| "error getting git config entry")?;
if let (Some(name), Some(value)) = (entry.name(), entry.value()) {
result.insert(name.into(), value.into());
}
}
Ok(result)
} | identifier_body |
home_languages_persian.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of |
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Languages_Persian():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
languages=["Persian"])) | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details. | random_line_split |
home_languages_persian.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class La | :
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
menu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
languages=["Persian"])) | nguages_Persian() | identifier_name |
home_languages_persian.py | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
This file is part of XBMC Mega Pack Addon.
Copyright (C) 2014 Wolverine (xbmcmegapack@gmail.com)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License along
with this program. If not, see http://www.gnu.org/licenses/gpl-3.0.html
"""
class Languages_Persian():
'''Class that manages this specific menu context.'''
def open(self, plugin, menu):
me | nu.add_xplugins(plugin.get_xplugins(dictionaries=["Channels",
"Events", "Live", "Movies", "Sports", "TVShows"],
languages=["Persian"])) | identifier_body | |
sermons_to_dynalist.py | #!/usr/bin/env python3
#
################################################################################
# Important note:
# Because of the highly individual nature of the ODT files to be processed, each
# user will have to adjust this script according to their needs. It will fail
# out of the box unless you happen to format your source files the same way I
# do. Areas that I think you will most likely have to modify are commented with
# three leading hashes, like so: ###.
################################################################################
import argparse
import os
import re
import string
import sys
html_mode_available = True
try:
from bs4 import BeautifulSoup
except ImportError:
html_mode_available = False
# List types that may be set in HTML by LibreOffice. The type may also be unspecified.
html_types = set(('I','A','1','i','a', 'disc', 'circle', 'square'))
### These lists represent the sequence of outline levels in your original
### document, as they get converted to plain text by LibreOffice. You'll need to
### adjust it to match your usage. Make sure the order is correct (outermost
### first) and the lists are long enough to cover every possible list in your
### original. Note that if you have any levels which repeat symbols, invoking
### this script with --type=text will produce incorrect output.
###
### This function is only in use with --type=text in effect.
def list_symbols() -> list:
'''These symbols are used when converting from plain text, not HTML.'''
symbols = [['I','II','III','IV','V','VI','VII','VIII','IX','X']]
symbols.append(list(string.ascii_uppercase)) # A, B, C, ..., Z
symbols.append([str(i) for i in range(1, 21)]) # 1, 2, 3, ..., 20
symbols.append(list(string.ascii_lowercase)) # a, b, c, ..., z
symbols.append(['i','ii','iii','iv','v','vi','vii','viii','ix','x'])
symbols.append(['α','β','γ','δ','ε'])
return symbols
def format_item(text: str, depth: int) -> str:
'''Add tabs to the beginning of each item.'''
assert isinstance(depth, int) and depth >= 0
return '\t' * depth + text
def format_text_line(line: str, symbols: list) -> str:
'''Process a single item, when --type == "text"'''
depth = 0
parts = line.partition('.')
if parts[1] == '.':
for s in symbols:
if parts[0] in s:
break
else:
depth += 1
return format_item(parts[2].strip(), depth)
# Only called when --type == 'html'
def format_html(soup):
'''Formats the converted-to-html source for Dynalist'''
output = []
level = -1
def iterate_children(children, level):
'''Iterates over HTML list tags and figures out what is what.'''
level += 1
for child in children:
if child.name == 'p':
output.append({'level': level, 'item': child})
elif child.name == 'div':
iterate_children(child, level)
#elif child.name == 'div' and child.attrs.get('type') not in html_types:
# iterate_children(child, level-1)
def parse_items(item):
'''Parses each item and converts it to a string formatted for Dynalist'''
def fix_str(st):
'''Removes extraneous newlines and whitespace'''
st = st.replace('\n', ' ')
return re.sub(r'\s+', ' ', st)
def get_footnote(a):
href = a['href']
for parent in a.parents:
if parent.parent is None:
footnote_arr = [i for i in parent.select(href)[0].next_siblings]
footnote = []
for fn in footnote_arr:
footnote.append(str(fn))
footnote = ''.join(footnote)
break
return parse_items({'level': 0, 'item': '#fixme #footnote ' + str(footnote)})
item_level = item['level']
item = item['item']
accumulator = ['\t' * item_level]
footnotes = []
if hasattr(item, 'contents'):
for i in item.contents:
if hasattr(i, 'select') and i.select('a') and 'footnote' in i.a.get('href'):
footnotes.append(get_footnote(i.a))
# Search also for footnotes in the next sibling. If it's several
# siblings later, it won't be found, currently.
elif hasattr(i, 'nextSibling') and hasattr(i.nextSibling, 'name') and i.nextSibling.name == 'a' and i.nextSibling.get('href') and 'footnote' in i.nextSibling.get('href'):
footnotes.append(get_footnote(i.nextSibling))
if isinstance(i, str):
accumulator.append(fix_str(i))
elif i.name == 'i' and len(i.contents) == 1:
accumulator.append('__' + fix_str(i.contents[0]) + '__')
elif i.name == 'b' and len(i.contents) == 1:
accumulator.append('**' + fix_str(i.contents[0]) + '**')
elif (i.name == 'span' or i.name=='a' or i.name=='sup') and i.strings is not None:
accumulator.append(fix_str(''.join(list(i.strings))))
else:
print(i)
exit("ERROR: I don't know what to do with this.")
else:
item = item.replace('<i>', '__')
item = item.replace('</i>', '__')
item = item.replace('<b>', '**')
item = item.replace('</b>', '**')
item = re.sub(r'<[^>]{2,100}>', '', item)
accumulator.append(item)
o = [''.join(accumulator)]
for f in footnotes:
o.append('\n' + '\t' * item_level + f)
return ''.join(o)
for outer_list in soup.contents:
if outer_list.name != 'div' or outer_list.attrs.get('type') not in html_types:
continue
iterate_children(outer_list, level)
break
for i in range(len(output)):
tmp = parse_items(output[i])
# The below is an ugly hack to get rid of extra spaces at the beginning
# of a line. For some reason, re.sub was just corrupting things.
if re.match(r'^([\t]*)[ ]+(\S+)', tmp):
start = end = 0
for j in range(len(tmp)):
if start == 0 and tmp[j] != '\t':
start = j
elif start > 0 and tmp[j] != ' ':
end = j
break
output[i] = str(tmp[:start] + tmp[end:])
else:
output[i] = tmp
return output
def parse_args() -> argparse.Namespace:
'''Handles command-line arguments'''
parser = argparse.ArgumentParser(description='''Converts LibreOffice sermon outlines to a format suitable for pasting into Dynalist.
Prior to running this script, the sermon should already be exported as the specified type.''')
infile = 'File to read for input. Default: stdin'
outfile = 'File to write for output. Default: stdout'
force = 'Overwrite the output file if it already exists'
type_ = 'The type of the input file. Default: text. Other options: html.'
add = parser.add_argument
add('-i', '--infile', metavar='FILE', default=None, help=infile)
add('-o', '--outfile', metavar='FILE', default=None, help=outfile) |
def main():
args = parse_args()
if args.infile:
if not os.path.isfile(args.infile):
exit('ERROR: Input file doesn\'t exist: {}'.format(args.infile))
with open(args.infile) as f:
infile = f.read()
else:
with open(sys.stdin) as f:
infile = f.read()
if args.outfile and os.path.exists(args.outfile):
if not os.path.isfile(args.outfile):
exit("ERROR: Output file isn't a regular file")
if not args.force:
exit("ERROR: Can't overwrite existing output file. Use --force to override.")
output = []
if args.type == 'text':
symbols = list_symbols()
output = [format_text_line(i.strip(), symbols) for i in infile.split('\n') if len(i.strip()) > 0]
elif args.type == 'html':
if html_mode_available:
soup = BeautifulSoup(infile, 'html.parser')
output = format_html(soup)
else:
exit("ERROR: Please install Beautiful Soup 4 (Ubuntu package python-bs4) to use HTML mode.")
else:
exit("ERROR: Invalid input file type")
if args.outfile:
outfile = open(args.outfile, 'w')
else:
outfile = sys.stdout
outfile.write('\n'.join([i for i in output if len(i) > 0]))
outfile.close()
if __name__ == '__main__':
main() | add('-f', '--force', action='store_true', default=False, help=force)
add('-t', '--type', default='text', help=type_)
return parser.parse_args() | random_line_split |
sermons_to_dynalist.py | #!/usr/bin/env python3
#
################################################################################
# Important note:
# Because of the highly individual nature of the ODT files to be processed, each
# user will have to adjust this script according to their needs. It will fail
# out of the box unless you happen to format your source files the same way I
# do. Areas that I think you will most likely have to modify are commented with
# three leading hashes, like so: ###.
################################################################################
import argparse
import os
import re
import string
import sys
html_mode_available = True
try:
from bs4 import BeautifulSoup
except ImportError:
html_mode_available = False
# List types that may be set in HTML by LibreOffice. The type may also be unspecified.
html_types = set(('I','A','1','i','a', 'disc', 'circle', 'square'))
### These lists represent the sequence of outline levels in your original
### document, as they get converted to plain text by LibreOffice. You'll need to
### adjust it to match your usage. Make sure the order is correct (outermost
### first) and the lists are long enough to cover every possible list in your
### original. Note that if you have any levels which repeat symbols, invoking
### this script with --type=text will produce incorrect output.
###
### This function is only in use with --type=text in effect.
def list_symbols() -> list:
'''These symbols are used when converting from plain text, not HTML.'''
symbols = [['I','II','III','IV','V','VI','VII','VIII','IX','X']]
symbols.append(list(string.ascii_uppercase)) # A, B, C, ..., Z
symbols.append([str(i) for i in range(1, 21)]) # 1, 2, 3, ..., 20
symbols.append(list(string.ascii_lowercase)) # a, b, c, ..., z
symbols.append(['i','ii','iii','iv','v','vi','vii','viii','ix','x'])
symbols.append(['α','β','γ','δ','ε'])
return symbols
def format_item(text: str, depth: int) -> str:
'''Add tabs to the beginning of each item.'''
assert isinstance(depth, int) and depth >= 0
return '\t' * depth + text
def format_text_line(line: str, symbols: list) -> str:
'''Process a single item, when --type == "text"'''
depth = 0
parts = line.partition('.')
if parts[1] == '.':
for s in symbols:
if parts[0] in s:
break
else:
depth += 1
return format_item(parts[2].strip(), depth)
# Only called when --type == 'html'
def format_html(soup):
'''Formats the converted-to-html source for Dynalist'''
output = []
level = -1
def iterate_children(children, level):
'''Iterates over HTML list tags and figures out what is what.'''
level += 1
for child in children:
if child.name == 'p':
output.append({'level': level, 'item': child})
elif child.name == 'div':
iterate_children(child, level)
#elif child.name == 'div' and child.attrs.get('type') not in html_types:
# iterate_children(child, level-1)
def parse_items(item):
'''Parses each item and converts it to a string formatted for Dynalist'''
def fix_str(st):
'''Removes extraneous newlines and whitespace'''
st = st.replace('\n', ' ')
return re.sub(r'\s+', ' ', st)
def get_footnote(a):
href = a['href']
for parent in a.parents:
if parent.parent is None:
footnote_arr = [i for i in parent.select(href)[0].next_siblings]
footnote = []
for fn in footnote_arr:
footnote.append(str(fn))
footnote = ''.join(footnote)
break
return parse_items({'level': 0, 'item': '#fixme #footnote ' + str(footnote)})
item_level = item['level']
item = item['item']
accumulator = ['\t' * item_level]
footnotes = []
if hasattr(item, 'contents'):
for i in item.contents:
if hasattr(i, 'select') and i.select('a') and 'footnote' in i.a.get('href'):
footnotes.append(get_footnote(i.a))
# Search also for footnotes in the next sibling. If it's several
# siblings later, it won't be found, currently.
elif hasattr(i, 'nextSibling') and hasattr(i.nextSibling, 'name') and i.nextSibling.name == 'a' and i.nextSibling.get('href') and 'footnote' in i.nextSibling.get('href'):
footnotes.append(get_footnote(i.nextSibling))
if isinstance(i, str):
accumulator.append(fix_str(i))
elif i.name == 'i' and len(i.contents) == 1:
accumulator.append('__' + fix_str(i.contents[0]) + '__')
elif i.name == 'b' and len(i.contents) == 1:
accumulator.append('**' + fix_str(i.contents[0]) + '**')
elif (i.name == 'span' or i.name=='a' or i.name=='sup') and i.strings is not None:
accumulator.append(fix_str(''.join(list(i.strings))))
else:
print(i)
exit("ERROR: I don't know what to do with this.")
else:
item = item.replace('<i>', '__')
item = item.replace('</i>', '__')
item = item.replace('<b>', '**')
item = item.replace('</b>', '**')
item = re.sub(r'<[^>]{2,100}>', '', item)
accumulator.append(item)
o = [''.join(accumulator)]
for f in footnotes:
o.append('\n' + '\t' * item_level + f)
return ''.join(o)
for outer_list in soup.contents:
if outer_list.name != 'div' or outer_list.attrs.get('type') not in html_types:
continue
iterate_children(outer_list, level)
break
for i in range(len(output)):
tmp = parse_items(output[i])
# The below is an ugly hack to get rid of extra spaces at the beginning
# of a line. For some reason, re.sub was just corrupting things.
if re.match(r'^([\t]*)[ ]+(\S+)', tmp):
start = end = 0
for j in range(len(tmp)):
if start == 0 and tmp[j] != '\t':
start = j
elif start > 0 and tmp[j] != ' ':
end = j
break
output[i] = str(tmp[:start] + tmp[end:])
else:
output[i] = tmp
return output
def parse_args() -> argparse.Namespace:
'''Handles command-line arguments'''
parser = argparse.ArgumentParser(description='''Converts LibreOffice sermon outlines to a format suitable for pasting into Dynalist.
Prior to running this script, the sermon should already be exported as the specified type.''')
infile = 'File to read for input. Default: stdin'
outfile = 'File to write for output. Default: stdout'
force = 'Overwrite the output file if it already exists'
type_ = 'The type of the input file. Default: text. Other options: html.'
add = parser.add_argument
add('-i', '--infile', metavar='FILE', default=None, help=infile)
add('-o', '--outfile', metavar='FILE', default=None, help=outfile)
add('-f', '--force', action='store_true', default=False, help=force)
add('-t', '--type', default='text', help=type_)
return parser.parse_args()
def main():
args | __name__ == '__main__':
main()
| = parse_args()
if args.infile:
if not os.path.isfile(args.infile):
exit('ERROR: Input file doesn\'t exist: {}'.format(args.infile))
with open(args.infile) as f:
infile = f.read()
else:
with open(sys.stdin) as f:
infile = f.read()
if args.outfile and os.path.exists(args.outfile):
if not os.path.isfile(args.outfile):
exit("ERROR: Output file isn't a regular file")
if not args.force:
exit("ERROR: Can't overwrite existing output file. Use --force to override.")
output = []
if args.type == 'text':
symbols = list_symbols()
output = [format_text_line(i.strip(), symbols) for i in infile.split('\n') if len(i.strip()) > 0]
elif args.type == 'html':
if html_mode_available:
soup = BeautifulSoup(infile, 'html.parser')
output = format_html(soup)
else:
exit("ERROR: Please install Beautiful Soup 4 (Ubuntu package python-bs4) to use HTML mode.")
else:
exit("ERROR: Invalid input file type")
if args.outfile:
outfile = open(args.outfile, 'w')
else:
outfile = sys.stdout
outfile.write('\n'.join([i for i in output if len(i) > 0]))
outfile.close()
if | identifier_body |
sermons_to_dynalist.py | #!/usr/bin/env python3
#
################################################################################
# Important note:
# Because of the highly individual nature of the ODT files to be processed, each
# user will have to adjust this script according to their needs. It will fail
# out of the box unless you happen to format your source files the same way I
# do. Areas that I think you will most likely have to modify are commented with
# three leading hashes, like so: ###.
################################################################################
import argparse
import os
import re
import string
import sys
html_mode_available = True
try:
from bs4 import BeautifulSoup
except ImportError:
html_mode_available = False
# List types that may be set in HTML by LibreOffice. The type may also be unspecified.
html_types = set(('I','A','1','i','a', 'disc', 'circle', 'square'))
### These lists represent the sequence of outline levels in your original
### document, as they get converted to plain text by LibreOffice. You'll need to
### adjust it to match your usage. Make sure the order is correct (outermost
### first) and the lists are long enough to cover every possible list in your
### original. Note that if you have any levels which repeat symbols, invoking
### this script with --type=text will produce incorrect output.
###
### This function is only in use with --type=text in effect.
def list_symbols() -> list:
'''These symbols are used when converting from plain text, not HTML.'''
symbols = [['I','II','III','IV','V','VI','VII','VIII','IX','X']]
symbols.append(list(string.ascii_uppercase)) # A, B, C, ..., Z
symbols.append([str(i) for i in range(1, 21)]) # 1, 2, 3, ..., 20
symbols.append(list(string.ascii_lowercase)) # a, b, c, ..., z
symbols.append(['i','ii','iii','iv','v','vi','vii','viii','ix','x'])
symbols.append(['α','β','γ','δ','ε'])
return symbols
def format_item(text: str, depth: int) -> str:
'''Add tabs to the beginning of each item.'''
assert isinstance(depth, int) and depth >= 0
return '\t' * depth + text
def format_text_line(line: str, symbols: list) -> str:
'''Process a single item, when --type == "text"'''
depth = 0
parts = line.partition('.')
if parts[1] == '.':
for s in symbols:
if parts[0] in s:
break
else:
depth += 1
return format_item(parts[2].strip(), depth)
# Only called when --type == 'html'
def format_html(soup):
'''Formats the converted-to-html source for Dynalist'''
output = []
level = -1
def iterate_children(children, level):
'''Iterates over HTML list tags and figures out what is what.'''
level += 1
for child in children:
if child.name == 'p':
output.append({'level': level, 'item': child})
elif child.name == 'div':
iterate_children(child, level)
#elif child.name == 'div' and child.attrs.get('type') not in html_types:
# iterate_children(child, level-1)
def parse_items(item):
'''Parses each item and converts it to a string formatted for Dynalist'''
def fix_str(st):
'''Removes extraneous newlines and whitespace'''
st = st.replace('\n', ' ')
return re.sub(r'\s+', ' ', st)
def get_footnote(a):
href = a['href']
for parent in a.parents:
if parent.parent is None:
footnote_arr = [i for i in parent.select(href)[0].next_siblings]
footnote = []
for fn in footnote_arr:
footnote.append(str(fn))
footnote = ''.join(footnote)
break
return parse_items({'level': 0, 'item': '#fixme #footnote ' + str(footnote)})
item_level = item['level']
item = item['item']
accumulator = ['\t' * item_level]
footnotes = []
if hasattr(item, 'contents'):
for i in item.contents:
if hasattr(i, 'select') and i.select('a') and 'footnote' in i.a.get('href'):
footnotes.append(get_footnote(i.a))
# Search also for footnotes in the next sibling. If it's several
# siblings later, it won't be found, currently.
elif hasattr(i, 'nextSibling') and hasattr(i.nextSibling, 'name') and i.nextSibling.name == 'a' and i.nextSibling.get('href') and 'footnote' in i.nextSibling.get('href'):
footnotes.append(get_footnote(i.nextSibling))
if isinstance(i, str):
accumulator.append(fix_str(i))
elif i.name == 'i' and len(i.contents) == 1:
accumulator.append('__' + fix_str(i.contents[0]) + '__')
elif i.name == 'b' and len(i.contents) == 1:
accumulator.append('**' + fix_str(i.contents[0]) + '**')
elif (i.name == 'span' or i.name=='a' or i.name=='sup') and i.strings is not None:
accumulator.append(fix_str(''.join(list(i.strings))))
else:
print(i)
exit("ERROR: I don't know what to do with this.")
else:
item | o = [''.join(accumulator)]
for f in footnotes:
o.append('\n' + '\t' * item_level + f)
return ''.join(o)
for outer_list in soup.contents:
if outer_list.name != 'div' or outer_list.attrs.get('type') not in html_types:
continue
iterate_children(outer_list, level)
break
for i in range(len(output)):
tmp = parse_items(output[i])
# The below is an ugly hack to get rid of extra spaces at the beginning
# of a line. For some reason, re.sub was just corrupting things.
if re.match(r'^([\t]*)[ ]+(\S+)', tmp):
start = end = 0
for j in range(len(tmp)):
if start == 0 and tmp[j] != '\t':
start = j
elif start > 0 and tmp[j] != ' ':
end = j
break
output[i] = str(tmp[:start] + tmp[end:])
else:
output[i] = tmp
return output
def parse_args() -> argparse.Namespace:
'''Handles command-line arguments'''
parser = argparse.ArgumentParser(description='''Converts LibreOffice sermon outlines to a format suitable for pasting into Dynalist.
Prior to running this script, the sermon should already be exported as the specified type.''')
infile = 'File to read for input. Default: stdin'
outfile = 'File to write for output. Default: stdout'
force = 'Overwrite the output file if it already exists'
type_ = 'The type of the input file. Default: text. Other options: html.'
add = parser.add_argument
add('-i', '--infile', metavar='FILE', default=None, help=infile)
add('-o', '--outfile', metavar='FILE', default=None, help=outfile)
add('-f', '--force', action='store_true', default=False, help=force)
add('-t', '--type', default='text', help=type_)
return parser.parse_args()
def main():
args = parse_args()
if args.infile:
if not os.path.isfile(args.infile):
exit('ERROR: Input file doesn\'t exist: {}'.format(args.infile))
with open(args.infile) as f:
infile = f.read()
else:
with open(sys.stdin) as f:
infile = f.read()
if args.outfile and os.path.exists(args.outfile):
if not os.path.isfile(args.outfile):
exit("ERROR: Output file isn't a regular file")
if not args.force:
exit("ERROR: Can't overwrite existing output file. Use --force to override.")
output = []
if args.type == 'text':
symbols = list_symbols()
output = [format_text_line(i.strip(), symbols) for i in infile.split('\n') if len(i.strip()) > 0]
elif args.type == 'html':
if html_mode_available:
soup = BeautifulSoup(infile, 'html.parser')
output = format_html(soup)
else:
exit("ERROR: Please install Beautiful Soup 4 (Ubuntu package python-bs4) to use HTML mode.")
else:
exit("ERROR: Invalid input file type")
if args.outfile:
outfile = open(args.outfile, 'w')
else:
outfile = sys.stdout
outfile.write('\n'.join([i for i in output if len(i) > 0]))
outfile.close()
if __name__ == '__main__':
main()
| = item.replace('<i>', '__')
item = item.replace('</i>', '__')
item = item.replace('<b>', '**')
item = item.replace('</b>', '**')
item = re.sub(r'<[^>]{2,100}>', '', item)
accumulator.append(item)
| conditional_block |
sermons_to_dynalist.py | #!/usr/bin/env python3
#
################################################################################
# Important note:
# Because of the highly individual nature of the ODT files to be processed, each
# user will have to adjust this script according to their needs. It will fail
# out of the box unless you happen to format your source files the same way I
# do. Areas that I think you will most likely have to modify are commented with
# three leading hashes, like so: ###.
################################################################################
import argparse
import os
import re
import string
import sys
html_mode_available = True
try:
from bs4 import BeautifulSoup
except ImportError:
html_mode_available = False
# List types that may be set in HTML by LibreOffice. The type may also be unspecified.
html_types = set(('I','A','1','i','a', 'disc', 'circle', 'square'))
### These lists represent the sequence of outline levels in your original
### document, as they get converted to plain text by LibreOffice. You'll need to
### adjust it to match your usage. Make sure the order is correct (outermost
### first) and the lists are long enough to cover every possible list in your
### original. Note that if you have any levels which repeat symbols, invoking
### this script with --type=text will produce incorrect output.
###
### This function is only in use with --type=text in effect.
def list_symbols() -> list:
'''These symbols are used when converting from plain text, not HTML.'''
symbols = [['I','II','III','IV','V','VI','VII','VIII','IX','X']]
symbols.append(list(string.ascii_uppercase)) # A, B, C, ..., Z
symbols.append([str(i) for i in range(1, 21)]) # 1, 2, 3, ..., 20
symbols.append(list(string.ascii_lowercase)) # a, b, c, ..., z
symbols.append(['i','ii','iii','iv','v','vi','vii','viii','ix','x'])
symbols.append(['α','β','γ','δ','ε'])
return symbols
def format_item(text: str, depth: int) -> str:
'''Add tabs to the beginning of each item.'''
assert isinstance(depth, int) and depth >= 0
return '\t' * depth + text
def format_text_line(line: str, symbols: list) -> str:
'''Process a single item, when --type == "text"'''
depth = 0
parts = line.partition('.')
if parts[1] == '.':
for s in symbols:
if parts[0] in s:
break
else:
depth += 1
return format_item(parts[2].strip(), depth)
# Only called when --type == 'html'
def format_html(soup):
'''Formats the converted-to-html source for Dynalist'''
output = []
level = -1
def iterate_children(children, level):
'''Iterates over HTML list tags and figures out what is what.'''
level += 1
for child in children:
if child.name == 'p':
output.append({'level': level, 'item': child})
elif child.name == 'div':
iterate_children(child, level)
#elif child.name == 'div' and child.attrs.get('type') not in html_types:
# iterate_children(child, level-1)
def parse_items(item):
'''Parses each item and converts it to a string formatted for Dynalist'''
def fix_str(st):
'''Removes extraneous newlines and whitespace'''
st = st.replace('\n', ' ')
return re.sub(r'\s+', ' ', st)
def get_footnote(a):
href = a['href']
for parent in a.parents:
if parent.parent is None:
footnote_arr = [i for i in parent.select(href)[0].next_siblings]
footnote = []
for fn in footnote_arr:
footnote.append(str(fn))
footnote = ''.join(footnote)
break
return parse_items({'level': 0, 'item': '#fixme #footnote ' + str(footnote)})
item_level = item['level']
item = item['item']
accumulator = ['\t' * item_level]
footnotes = []
if hasattr(item, 'contents'):
for i in item.contents:
if hasattr(i, 'select') and i.select('a') and 'footnote' in i.a.get('href'):
footnotes.append(get_footnote(i.a))
# Search also for footnotes in the next sibling. If it's several
# siblings later, it won't be found, currently.
elif hasattr(i, 'nextSibling') and hasattr(i.nextSibling, 'name') and i.nextSibling.name == 'a' and i.nextSibling.get('href') and 'footnote' in i.nextSibling.get('href'):
footnotes.append(get_footnote(i.nextSibling))
if isinstance(i, str):
accumulator.append(fix_str(i))
elif i.name == 'i' and len(i.contents) == 1:
accumulator.append('__' + fix_str(i.contents[0]) + '__')
elif i.name == 'b' and len(i.contents) == 1:
accumulator.append('**' + fix_str(i.contents[0]) + '**')
elif (i.name == 'span' or i.name=='a' or i.name=='sup') and i.strings is not None:
accumulator.append(fix_str(''.join(list(i.strings))))
else:
print(i)
exit("ERROR: I don't know what to do with this.")
else:
item = item.replace('<i>', '__')
item = item.replace('</i>', '__')
item = item.replace('<b>', '**')
item = item.replace('</b>', '**')
item = re.sub(r'<[^>]{2,100}>', '', item)
accumulator.append(item)
o = [''.join(accumulator)]
for f in footnotes:
o.append('\n' + '\t' * item_level + f)
return ''.join(o)
for outer_list in soup.contents:
if outer_list.name != 'div' or outer_list.attrs.get('type') not in html_types:
continue
iterate_children(outer_list, level)
break
for i in range(len(output)):
tmp = parse_items(output[i])
# The below is an ugly hack to get rid of extra spaces at the beginning
# of a line. For some reason, re.sub was just corrupting things.
if re.match(r'^([\t]*)[ ]+(\S+)', tmp):
start = end = 0
for j in range(len(tmp)):
if start == 0 and tmp[j] != '\t':
start = j
elif start > 0 and tmp[j] != ' ':
end = j
break
output[i] = str(tmp[:start] + tmp[end:])
else:
output[i] = tmp
return output
def parse_args() -> argparse.Namespace:
'''Handles command-line arguments'''
parser = argparse.ArgumentParser(description='''Converts LibreOffice sermon outlines to a format suitable for pasting into Dynalist.
Prior to running this script, the sermon should already be exported as the specified type.''')
infile = 'File to read for input. Default: stdin'
outfile = 'File to write for output. Default: stdout'
force = 'Overwrite the output file if it already exists'
type_ = 'The type of the input file. Default: text. Other options: html.'
add = parser.add_argument
add('-i', '--infile', metavar='FILE', default=None, help=infile)
add('-o', '--outfile', metavar='FILE', default=None, help=outfile)
add('-f', '--force', action='store_true', default=False, help=force)
add('-t', '--type', default='text', help=type_)
return parser.parse_args()
def main( | args = parse_args()
if args.infile:
if not os.path.isfile(args.infile):
exit('ERROR: Input file doesn\'t exist: {}'.format(args.infile))
with open(args.infile) as f:
infile = f.read()
else:
with open(sys.stdin) as f:
infile = f.read()
if args.outfile and os.path.exists(args.outfile):
if not os.path.isfile(args.outfile):
exit("ERROR: Output file isn't a regular file")
if not args.force:
exit("ERROR: Can't overwrite existing output file. Use --force to override.")
output = []
if args.type == 'text':
symbols = list_symbols()
output = [format_text_line(i.strip(), symbols) for i in infile.split('\n') if len(i.strip()) > 0]
elif args.type == 'html':
if html_mode_available:
soup = BeautifulSoup(infile, 'html.parser')
output = format_html(soup)
else:
exit("ERROR: Please install Beautiful Soup 4 (Ubuntu package python-bs4) to use HTML mode.")
else:
exit("ERROR: Invalid input file type")
if args.outfile:
outfile = open(args.outfile, 'w')
else:
outfile = sys.stdout
outfile.write('\n'.join([i for i in output if len(i) > 0]))
outfile.close()
if __name__ == '__main__':
main()
| ):
| identifier_name |
tutorial_quanconv_cifar10.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
- 1. This model has 1,068,298 paramters and quantization compression strategy(weight:8 bits, active: 8 bits here, you can change the setting),
after 705 epoches' training with GPU, test accurcy of 84.0% was found.
- 2. For simplified CNN layers see "Convolutional layer (Simplified)"
in read the docs website.
- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !!
Links
-------
.. paper:https://arxiv.org/abs/1712.05877
Note
------
The optimizers between official code and this code are different.
Description
-----------
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
.. Randomly distort the image contrast.
Speed Up
--------
Reading images from disk and distorting them can use a non-trivial amount
of processing time. To prevent these operations from slowing down training,
we run them inside 16 separate threads which continuously fill a TensorFlow queue.
"""
import multiprocessing
import time
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import (Dense, Flatten, Input, MaxPool2d, QuanConv2dWithBN, QuanDense)
from tensorlayer.models import Model
tl.logging.set_verbosity(tl.logging.DEBUG)
# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py```
# prepare cifar10 data
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
def model(input_shape, n_classes, bitW, bitA):
in_net = Input(shape=input_shape, name='input')
net = QuanConv2dWithBN(64, (5, 5), (1, 1), act='relu', padding='SAME', bitW=bitW, bitA=bitA, name='qcnnbn1')(in_net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net)
net = QuanConv2dWithBN(64, (5, 5), (1, 1), padding='SAME', act='relu', bitW=bitW, bitA=bitA, name='qcnnbn2')(net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net)
net = Flatten(name='flatten')(net)
net = QuanDense(384, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd1relu')(net)
net = QuanDense(192, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd2relu')(net)
net = Dense(n_classes, act=None, name='output')(net)
net = Model(inputs=in_net, outputs=net, name='dorefanet')
return net
# training settings
bitW = 8
bitA = 8
net = model([None, 24, 24, 3], n_classes=10, bitW=bitW, bitA=bitA)
batch_size = 128
n_epoch = 50000
learning_rate = 0.0001
print_freq = 5
n_step_epoch = int(len(y_train) / batch_size)
n_step = n_epoch * n_step_epoch
shuffle_buffer_size = 128
optimizer = tf.optimizers.Adam(learning_rate)
cost = tl.cost.cross_entropy
def generator_train():
inputs = X_train
targets = y_train
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def generator_test():
inputs = X_test
targets = y_test
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def _map_fn_train(img, target):
# 1. Randomly crop a [height, width] section of the image.
img = tf.image.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
img = tf.image.random_flip_left_right(img)
# 3. Randomly change brightness.
img = tf.image.random_brightness(img, max_delta=63)
# 4. Randomly change contrast.
img = tf.image.random_contrast(img, lower=0.2, upper=1.8)
# 5. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
target = tf.reshape(target, ())
return img, target
def _map_fn_test(img, target):
# 1. Crop the central [height, width] of the image.
img = tf.image.resize_with_pad(img, 24, 24)
# 2. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
img = tf.reshape(img, (24, 24, 3))
target = tf.reshape(target, ())
return img, target
def | (network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
with tf.GradientTape() as tape:
y_pred = network(X_batch)
_loss = cost(y_pred, y_batch)
grad = tape.gradient(_loss, network.trainable_weights)
train_op.apply_gradients(zip(grad, network.trainable_weights))
if acc is not None:
_acc = acc(y_pred, y_batch)
return _loss, _acc
else:
return _loss, None
def accuracy(_logits, y_batch):
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
# dataset API and augmentation
train_ds = tf.data.Dataset.from_generator(
generator_train, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
# train_ds = train_ds.repeat(n_epoch)
train_ds = train_ds.shuffle(shuffle_buffer_size)
train_ds = train_ds.prefetch(buffer_size=4096)
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count())
# value = train_ds.make_one_shot_iterator().get_next()
test_ds = tf.data.Dataset.from_generator(
generator_test, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
# test_ds = test_ds.shuffle(shuffle_buffer_size)
# test_ds = test_ds.repeat(n_epoch)
test_ds = test_ds.prefetch(buffer_size=4096)
test_ds = test_ds.batch(batch_size)
test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count())
# value_test = test_ds.make_one_shot_iterator().get_next()
for epoch in range(n_epoch):
start_time = time.time()
train_loss, train_acc, n_iter = 0, 0, 0
net.train()
for X_batch, y_batch in train_ds:
_loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy)
train_loss += _loss
train_acc += acc
n_iter += 1
# use training and evaluation sets to evaluate the model every print_freq epoch
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
print(" train loss: {}".format(train_loss / n_iter))
print(" train acc: {}".format(train_acc / n_iter))
net.eval()
val_loss, val_acc, n_val_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch) # is_train=False, disable dropout
val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss')
val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_val_iter += 1
print(" val loss: {}".format(val_loss / n_val_iter))
print(" val acc: {}".format(val_acc / n_val_iter))
# use testing data to evaluate the model
net.eval()
test_loss, test_acc, n_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch)
test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss')
test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_iter += 1
print(" test loss: {}".format(test_loss / n_iter))
print(" test acc: {}".format(test_acc / n_iter))
| _train_step | identifier_name |
tutorial_quanconv_cifar10.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
- 1. This model has 1,068,298 paramters and quantization compression strategy(weight:8 bits, active: 8 bits here, you can change the setting),
after 705 epoches' training with GPU, test accurcy of 84.0% was found.
- 2. For simplified CNN layers see "Convolutional layer (Simplified)"
in read the docs website.
- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !!
Links
-------
.. paper:https://arxiv.org/abs/1712.05877
Note
------
The optimizers between official code and this code are different.
Description
-----------
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
.. Randomly distort the image contrast.
Speed Up
--------
Reading images from disk and distorting them can use a non-trivial amount
of processing time. To prevent these operations from slowing down training,
we run them inside 16 separate threads which continuously fill a TensorFlow queue.
"""
import multiprocessing
import time
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import (Dense, Flatten, Input, MaxPool2d, QuanConv2dWithBN, QuanDense)
from tensorlayer.models import Model
tl.logging.set_verbosity(tl.logging.DEBUG)
# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py```
# prepare cifar10 data
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
def model(input_shape, n_classes, bitW, bitA):
in_net = Input(shape=input_shape, name='input')
net = QuanConv2dWithBN(64, (5, 5), (1, 1), act='relu', padding='SAME', bitW=bitW, bitA=bitA, name='qcnnbn1')(in_net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net)
net = QuanConv2dWithBN(64, (5, 5), (1, 1), padding='SAME', act='relu', bitW=bitW, bitA=bitA, name='qcnnbn2')(net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net)
net = Flatten(name='flatten')(net)
net = QuanDense(384, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd1relu')(net)
net = QuanDense(192, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd2relu')(net)
net = Dense(n_classes, act=None, name='output')(net)
net = Model(inputs=in_net, outputs=net, name='dorefanet')
return net
# training settings
bitW = 8
bitA = 8
net = model([None, 24, 24, 3], n_classes=10, bitW=bitW, bitA=bitA)
batch_size = 128
n_epoch = 50000
learning_rate = 0.0001
print_freq = 5
n_step_epoch = int(len(y_train) / batch_size)
n_step = n_epoch * n_step_epoch
shuffle_buffer_size = 128
optimizer = tf.optimizers.Adam(learning_rate)
cost = tl.cost.cross_entropy
def generator_train():
inputs = X_train
targets = y_train
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def generator_test():
inputs = X_test
targets = y_test
if len(inputs) != len(targets):
|
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def _map_fn_train(img, target):
# 1. Randomly crop a [height, width] section of the image.
img = tf.image.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
img = tf.image.random_flip_left_right(img)
# 3. Randomly change brightness.
img = tf.image.random_brightness(img, max_delta=63)
# 4. Randomly change contrast.
img = tf.image.random_contrast(img, lower=0.2, upper=1.8)
# 5. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
target = tf.reshape(target, ())
return img, target
def _map_fn_test(img, target):
# 1. Crop the central [height, width] of the image.
img = tf.image.resize_with_pad(img, 24, 24)
# 2. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
img = tf.reshape(img, (24, 24, 3))
target = tf.reshape(target, ())
return img, target
def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
with tf.GradientTape() as tape:
y_pred = network(X_batch)
_loss = cost(y_pred, y_batch)
grad = tape.gradient(_loss, network.trainable_weights)
train_op.apply_gradients(zip(grad, network.trainable_weights))
if acc is not None:
_acc = acc(y_pred, y_batch)
return _loss, _acc
else:
return _loss, None
def accuracy(_logits, y_batch):
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
# dataset API and augmentation
train_ds = tf.data.Dataset.from_generator(
generator_train, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
# train_ds = train_ds.repeat(n_epoch)
train_ds = train_ds.shuffle(shuffle_buffer_size)
train_ds = train_ds.prefetch(buffer_size=4096)
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count())
# value = train_ds.make_one_shot_iterator().get_next()
test_ds = tf.data.Dataset.from_generator(
generator_test, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
# test_ds = test_ds.shuffle(shuffle_buffer_size)
# test_ds = test_ds.repeat(n_epoch)
test_ds = test_ds.prefetch(buffer_size=4096)
test_ds = test_ds.batch(batch_size)
test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count())
# value_test = test_ds.make_one_shot_iterator().get_next()
for epoch in range(n_epoch):
start_time = time.time()
train_loss, train_acc, n_iter = 0, 0, 0
net.train()
for X_batch, y_batch in train_ds:
_loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy)
train_loss += _loss
train_acc += acc
n_iter += 1
# use training and evaluation sets to evaluate the model every print_freq epoch
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
print(" train loss: {}".format(train_loss / n_iter))
print(" train acc: {}".format(train_acc / n_iter))
net.eval()
val_loss, val_acc, n_val_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch) # is_train=False, disable dropout
val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss')
val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_val_iter += 1
print(" val loss: {}".format(val_loss / n_val_iter))
print(" val acc: {}".format(val_acc / n_val_iter))
# use testing data to evaluate the model
net.eval()
test_loss, test_acc, n_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch)
test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss')
test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_iter += 1
print(" test loss: {}".format(test_loss / n_iter))
print(" test acc: {}".format(test_acc / n_iter))
| raise AssertionError("The length of inputs and targets should be equal") | conditional_block |
tutorial_quanconv_cifar10.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
- 1. This model has 1,068,298 paramters and quantization compression strategy(weight:8 bits, active: 8 bits here, you can change the setting),
after 705 epoches' training with GPU, test accurcy of 84.0% was found.
- 2. For simplified CNN layers see "Convolutional layer (Simplified)"
in read the docs website.
- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !!
Links
-------
.. paper:https://arxiv.org/abs/1712.05877
Note
------
The optimizers between official code and this code are different.
Description
-----------
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
.. Randomly distort the image contrast.
Speed Up
--------
Reading images from disk and distorting them can use a non-trivial amount
of processing time. To prevent these operations from slowing down training,
we run them inside 16 separate threads which continuously fill a TensorFlow queue.
"""
import multiprocessing
import time
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import (Dense, Flatten, Input, MaxPool2d, QuanConv2dWithBN, QuanDense)
from tensorlayer.models import Model
tl.logging.set_verbosity(tl.logging.DEBUG)
# Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py```
# prepare cifar10 data
X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
def model(input_shape, n_classes, bitW, bitA):
in_net = Input(shape=input_shape, name='input')
net = QuanConv2dWithBN(64, (5, 5), (1, 1), act='relu', padding='SAME', bitW=bitW, bitA=bitA, name='qcnnbn1')(in_net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net)
net = QuanConv2dWithBN(64, (5, 5), (1, 1), padding='SAME', act='relu', bitW=bitW, bitA=bitA, name='qcnnbn2')(net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net)
net = Flatten(name='flatten')(net)
net = QuanDense(384, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd1relu')(net)
net = QuanDense(192, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd2relu')(net)
net = Dense(n_classes, act=None, name='output')(net)
net = Model(inputs=in_net, outputs=net, name='dorefanet')
return net
# training settings
bitW = 8
bitA = 8
net = model([None, 24, 24, 3], n_classes=10, bitW=bitW, bitA=bitA)
batch_size = 128
n_epoch = 50000
learning_rate = 0.0001
print_freq = 5
n_step_epoch = int(len(y_train) / batch_size)
n_step = n_epoch * n_step_epoch
shuffle_buffer_size = 128
optimizer = tf.optimizers.Adam(learning_rate)
cost = tl.cost.cross_entropy
def generator_train():
inputs = X_train
targets = y_train
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def generator_test():
|
def _map_fn_train(img, target):
# 1. Randomly crop a [height, width] section of the image.
img = tf.image.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
img = tf.image.random_flip_left_right(img)
# 3. Randomly change brightness.
img = tf.image.random_brightness(img, max_delta=63)
# 4. Randomly change contrast.
img = tf.image.random_contrast(img, lower=0.2, upper=1.8)
# 5. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
target = tf.reshape(target, ())
return img, target
def _map_fn_test(img, target):
# 1. Crop the central [height, width] of the image.
img = tf.image.resize_with_pad(img, 24, 24)
# 2. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
img = tf.reshape(img, (24, 24, 3))
target = tf.reshape(target, ())
return img, target
def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
with tf.GradientTape() as tape:
y_pred = network(X_batch)
_loss = cost(y_pred, y_batch)
grad = tape.gradient(_loss, network.trainable_weights)
train_op.apply_gradients(zip(grad, network.trainable_weights))
if acc is not None:
_acc = acc(y_pred, y_batch)
return _loss, _acc
else:
return _loss, None
def accuracy(_logits, y_batch):
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
# dataset API and augmentation
train_ds = tf.data.Dataset.from_generator(
generator_train, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
# train_ds = train_ds.repeat(n_epoch)
train_ds = train_ds.shuffle(shuffle_buffer_size)
train_ds = train_ds.prefetch(buffer_size=4096)
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count())
# value = train_ds.make_one_shot_iterator().get_next()
test_ds = tf.data.Dataset.from_generator(
generator_test, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
# test_ds = test_ds.shuffle(shuffle_buffer_size)
# test_ds = test_ds.repeat(n_epoch)
test_ds = test_ds.prefetch(buffer_size=4096)
test_ds = test_ds.batch(batch_size)
test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count())
# value_test = test_ds.make_one_shot_iterator().get_next()
for epoch in range(n_epoch):
start_time = time.time()
train_loss, train_acc, n_iter = 0, 0, 0
net.train()
for X_batch, y_batch in train_ds:
_loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy)
train_loss += _loss
train_acc += acc
n_iter += 1
# use training and evaluation sets to evaluate the model every print_freq epoch
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
print(" train loss: {}".format(train_loss / n_iter))
print(" train acc: {}".format(train_acc / n_iter))
net.eval()
val_loss, val_acc, n_val_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch) # is_train=False, disable dropout
val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss')
val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_val_iter += 1
print(" val loss: {}".format(val_loss / n_val_iter))
print(" val acc: {}".format(val_acc / n_val_iter))
# use testing data to evaluate the model
net.eval()
test_loss, test_acc, n_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch)
test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss')
test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_iter += 1
print(" test loss: {}".format(test_loss / n_iter))
print(" test acc: {}".format(test_acc / n_iter))
| inputs = X_test
targets = y_test
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target | identifier_body |
tutorial_quanconv_cifar10.py | #! /usr/bin/python
# -*- coding: utf-8 -*-
"""
- 1. This model has 1,068,298 paramters and quantization compression strategy(weight:8 bits, active: 8 bits here, you can change the setting),
after 705 epoches' training with GPU, test accurcy of 84.0% was found.
- 2. For simplified CNN layers see "Convolutional layer (Simplified)"
in read the docs website.
- 3. Data augmentation without TFRecord see `tutorial_image_preprocess.py` !!
Links
-------
.. paper:https://arxiv.org/abs/1712.05877
Note
------
The optimizers between official code and this code are different.
Description
-----------
The images are processed as follows:
.. They are cropped to 24 x 24 pixels, centrally for evaluation or randomly for training.
.. They are approximately whitened to make the model insensitive to dynamic range.
For training, we additionally apply a series of random distortions to
artificially increase the data set size:
.. Randomly flip the image from left to right.
.. Randomly distort the image brightness.
.. Randomly distort the image contrast.
Speed Up
--------
Reading images from disk and distorting them can use a non-trivial amount
of processing time. To prevent these operations from slowing down training,
we run them inside 16 separate threads which continuously fill a TensorFlow queue.
"""
import multiprocessing
import time
import numpy as np
import tensorflow as tf
import tensorlayer as tl
from tensorlayer.layers import (Dense, Flatten, Input, MaxPool2d, QuanConv2dWithBN, QuanDense)
from tensorlayer.models import Model
tl.logging.set_verbosity(tl.logging.DEBUG)
| X_train, y_train, X_test, y_test = tl.files.load_cifar10_dataset(shape=(-1, 32, 32, 3), plotable=False)
def model(input_shape, n_classes, bitW, bitA):
in_net = Input(shape=input_shape, name='input')
net = QuanConv2dWithBN(64, (5, 5), (1, 1), act='relu', padding='SAME', bitW=bitW, bitA=bitA, name='qcnnbn1')(in_net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool1')(net)
net = QuanConv2dWithBN(64, (5, 5), (1, 1), padding='SAME', act='relu', bitW=bitW, bitA=bitA, name='qcnnbn2')(net)
net = MaxPool2d((3, 3), (2, 2), padding='SAME', name='pool2')(net)
net = Flatten(name='flatten')(net)
net = QuanDense(384, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd1relu')(net)
net = QuanDense(192, act=tf.nn.relu, bitW=bitW, bitA=bitA, name='qd2relu')(net)
net = Dense(n_classes, act=None, name='output')(net)
net = Model(inputs=in_net, outputs=net, name='dorefanet')
return net
# training settings
bitW = 8
bitA = 8
net = model([None, 24, 24, 3], n_classes=10, bitW=bitW, bitA=bitA)
batch_size = 128
n_epoch = 50000
learning_rate = 0.0001
print_freq = 5
n_step_epoch = int(len(y_train) / batch_size)
n_step = n_epoch * n_step_epoch
shuffle_buffer_size = 128
optimizer = tf.optimizers.Adam(learning_rate)
cost = tl.cost.cross_entropy
def generator_train():
inputs = X_train
targets = y_train
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def generator_test():
inputs = X_test
targets = y_test
if len(inputs) != len(targets):
raise AssertionError("The length of inputs and targets should be equal")
for _input, _target in zip(inputs, targets):
# yield _input.encode('utf-8'), _target.encode('utf-8')
yield _input, _target
def _map_fn_train(img, target):
# 1. Randomly crop a [height, width] section of the image.
img = tf.image.random_crop(img, [24, 24, 3])
# 2. Randomly flip the image horizontally.
img = tf.image.random_flip_left_right(img)
# 3. Randomly change brightness.
img = tf.image.random_brightness(img, max_delta=63)
# 4. Randomly change contrast.
img = tf.image.random_contrast(img, lower=0.2, upper=1.8)
# 5. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
target = tf.reshape(target, ())
return img, target
def _map_fn_test(img, target):
# 1. Crop the central [height, width] of the image.
img = tf.image.resize_with_pad(img, 24, 24)
# 2. Subtract off the mean and divide by the variance of the pixels.
img = tf.image.per_image_standardization(img)
img = tf.reshape(img, (24, 24, 3))
target = tf.reshape(target, ())
return img, target
def _train_step(network, X_batch, y_batch, cost, train_op=tf.optimizers.Adam(learning_rate=0.0001), acc=None):
with tf.GradientTape() as tape:
y_pred = network(X_batch)
_loss = cost(y_pred, y_batch)
grad = tape.gradient(_loss, network.trainable_weights)
train_op.apply_gradients(zip(grad, network.trainable_weights))
if acc is not None:
_acc = acc(y_pred, y_batch)
return _loss, _acc
else:
return _loss, None
def accuracy(_logits, y_batch):
return np.mean(np.equal(np.argmax(_logits, 1), y_batch))
# dataset API and augmentation
train_ds = tf.data.Dataset.from_generator(
generator_train, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
# train_ds = train_ds.repeat(n_epoch)
train_ds = train_ds.shuffle(shuffle_buffer_size)
train_ds = train_ds.prefetch(buffer_size=4096)
train_ds = train_ds.batch(batch_size)
train_ds = train_ds.map(_map_fn_train, num_parallel_calls=multiprocessing.cpu_count())
# value = train_ds.make_one_shot_iterator().get_next()
test_ds = tf.data.Dataset.from_generator(
generator_test, output_types=(tf.float32, tf.int32)
) # , output_shapes=((24, 24, 3), (1)))
# test_ds = test_ds.shuffle(shuffle_buffer_size)
# test_ds = test_ds.repeat(n_epoch)
test_ds = test_ds.prefetch(buffer_size=4096)
test_ds = test_ds.batch(batch_size)
test_ds = test_ds.map(_map_fn_test, num_parallel_calls=multiprocessing.cpu_count())
# value_test = test_ds.make_one_shot_iterator().get_next()
for epoch in range(n_epoch):
start_time = time.time()
train_loss, train_acc, n_iter = 0, 0, 0
net.train()
for X_batch, y_batch in train_ds:
_loss, acc = _train_step(net, X_batch, y_batch, cost=cost, train_op=optimizer, acc=accuracy)
train_loss += _loss
train_acc += acc
n_iter += 1
# use training and evaluation sets to evaluate the model every print_freq epoch
if epoch + 1 == 1 or (epoch + 1) % print_freq == 0:
print("Epoch {} of {} took {}".format(epoch + 1, n_epoch, time.time() - start_time))
print(" train loss: {}".format(train_loss / n_iter))
print(" train acc: {}".format(train_acc / n_iter))
net.eval()
val_loss, val_acc, n_val_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch) # is_train=False, disable dropout
val_loss += tl.cost.cross_entropy(_logits, y_batch, name='eval_loss')
val_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_val_iter += 1
print(" val loss: {}".format(val_loss / n_val_iter))
print(" val acc: {}".format(val_acc / n_val_iter))
# use testing data to evaluate the model
net.eval()
test_loss, test_acc, n_iter = 0, 0, 0
for X_batch, y_batch in test_ds:
_logits = net(X_batch)
test_loss += tl.cost.cross_entropy(_logits, y_batch, name='test_loss')
test_acc += np.mean(np.equal(np.argmax(_logits, 1), y_batch))
n_iter += 1
print(" test loss: {}".format(test_loss / n_iter))
print(" test acc: {}".format(test_acc / n_iter)) | # Download data, and convert to TFRecord format, see ```tutorial_tfrecord.py```
# prepare cifar10 data | random_line_split |
poloniexFactories.ts | /***************************************************************************************************************************
* @license *
* Copyright 2017 Coinbase, Inc. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on *
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the *
* License for the specific language governing permissions and limitations under the License. *
***************************************************************************************************************************/
import { Logger } from '../utils/Logger';
import { PoloniexFeed, PoloniexFeedConfig } from '../exchanges/poloniex/PoloniexFeed';
import { gdaxToPolo, getAllProductInfo, POLONIEX_WS_FEED, PoloniexProducts } from '../exchanges/poloniex/PoloniexCommon';
import { ExchangeFeedConfig, getFeed } from '../exchanges/ExchangeFeed';
import { ExchangeAuthConfig } from '../exchanges/AuthConfig';
import CCXTExchangeWrapper from '../exchanges/ccxt';
let publicAPIInstance: CCXTExchangeWrapper;
/**
* A convenience function that returns a GDAXExchangeAPI instance for accessing REST methods conveniently. If API
* key details are found in the GDAX_KEY etc. envars, they will be used
*/
export function DefaultAPI(logger: Logger): CCXTExchangeWrapper {
if (!publicAPIInstance) {
publicAPIInstance = CCXTExchangeWrapper.createExchange('poloniex', { key: process.env.POLONIEX_KEY, secret: process.env.POLONIEX_SECRET }, logger);
}
return publicAPIInstance;
}
/**
* Convenience function to connect to and subscribe to the given channels
* @param options {object} Any options from GDAXConfig will be accepted
* @param products {string[]} An array of products to subscribe to
*/
export function getSubscribedFeeds(options: any, products: string[]): Promise<PoloniexFeed> {
return getAllProductInfo(false, options.logger).then((info: PoloniexProducts) => {
const config: PoloniexFeedConfig = {
wsUrl: options.wsUrl || POLONIEX_WS_FEED,
auth: options.auth,
logger: options.logger,
tickerChannel: !!options.tickerChannel
};
const feed = getFeed<PoloniexFeed, ExchangeFeedConfig>(PoloniexFeed, config);
if (!feed.isConnected()) {
feed.reconnect(0);
feed.on('websocket-open', () => {
subscribeToAll(products, feed, info);
});
} else {
subscribeToAll(products, feed, info);
}
return Promise.resolve(feed);
});
}
function subscribeToAll(products: string[], feed: PoloniexFeed, info: PoloniexProducts) {
products.forEach((product: string) => {
const id: number = getChannelId(product, info);
if (id > 0) {
feed.subscribe(id);
}
});
}
function getChannelId(product: string, info: PoloniexProducts): number {
let result: number;
for (const id in info) {
const symbol = info[id].id;
const found = (symbol === product) || (symbol === gdaxToPolo(product));
result = found ? info[id].sourceData.id : -1;
if (found) {
break;
}
}
return result;
}
/**
* This is a straightforward wrapper around getSubscribedFeeds using the Factory pattern with the most commonly used
* defaults. For customised feeds, use getSubscribedFeeds instead.
*
* It is assumed that your API keys are stored in the GDAX_KEY, GDAX_SECRET and GDAX_PASSPHRASE envars
*/
export function | (logger: Logger, products: string[], auth?: ExchangeAuthConfig): Promise<PoloniexFeed> {
// auth = auth || {
// key: process.env.POLONIEX_KEY,
// secret: process.env.POLONIEX_SECRET,
// };
auth = null; // Polo doesn't provide auth feeds yet
return getSubscribedFeeds({ auth: auth, logger: logger, tickerChannel: true }, products);
}
| FeedFactory | identifier_name |
poloniexFactories.ts | /***************************************************************************************************************************
* @license *
* Copyright 2017 Coinbase, Inc. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on *
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the *
* License for the specific language governing permissions and limitations under the License. *
***************************************************************************************************************************/
import { Logger } from '../utils/Logger';
import { PoloniexFeed, PoloniexFeedConfig } from '../exchanges/poloniex/PoloniexFeed';
import { gdaxToPolo, getAllProductInfo, POLONIEX_WS_FEED, PoloniexProducts } from '../exchanges/poloniex/PoloniexCommon';
import { ExchangeFeedConfig, getFeed } from '../exchanges/ExchangeFeed';
import { ExchangeAuthConfig } from '../exchanges/AuthConfig';
import CCXTExchangeWrapper from '../exchanges/ccxt';
let publicAPIInstance: CCXTExchangeWrapper;
/**
* A convenience function that returns a GDAXExchangeAPI instance for accessing REST methods conveniently. If API
* key details are found in the GDAX_KEY etc. envars, they will be used
*/
export function DefaultAPI(logger: Logger): CCXTExchangeWrapper {
if (!publicAPIInstance) {
publicAPIInstance = CCXTExchangeWrapper.createExchange('poloniex', { key: process.env.POLONIEX_KEY, secret: process.env.POLONIEX_SECRET }, logger);
}
return publicAPIInstance;
}
/**
* Convenience function to connect to and subscribe to the given channels
* @param options {object} Any options from GDAXConfig will be accepted
* @param products {string[]} An array of products to subscribe to
*/
export function getSubscribedFeeds(options: any, products: string[]): Promise<PoloniexFeed> |
function subscribeToAll(products: string[], feed: PoloniexFeed, info: PoloniexProducts) {
products.forEach((product: string) => {
const id: number = getChannelId(product, info);
if (id > 0) {
feed.subscribe(id);
}
});
}
function getChannelId(product: string, info: PoloniexProducts): number {
let result: number;
for (const id in info) {
const symbol = info[id].id;
const found = (symbol === product) || (symbol === gdaxToPolo(product));
result = found ? info[id].sourceData.id : -1;
if (found) {
break;
}
}
return result;
}
/**
* This is a straightforward wrapper around getSubscribedFeeds using the Factory pattern with the most commonly used
* defaults. For customised feeds, use getSubscribedFeeds instead.
*
* It is assumed that your API keys are stored in the GDAX_KEY, GDAX_SECRET and GDAX_PASSPHRASE envars
*/
export function FeedFactory(logger: Logger, products: string[], auth?: ExchangeAuthConfig): Promise<PoloniexFeed> {
// auth = auth || {
// key: process.env.POLONIEX_KEY,
// secret: process.env.POLONIEX_SECRET,
// };
auth = null; // Polo doesn't provide auth feeds yet
return getSubscribedFeeds({ auth: auth, logger: logger, tickerChannel: true }, products);
}
| {
return getAllProductInfo(false, options.logger).then((info: PoloniexProducts) => {
const config: PoloniexFeedConfig = {
wsUrl: options.wsUrl || POLONIEX_WS_FEED,
auth: options.auth,
logger: options.logger,
tickerChannel: !!options.tickerChannel
};
const feed = getFeed<PoloniexFeed, ExchangeFeedConfig>(PoloniexFeed, config);
if (!feed.isConnected()) {
feed.reconnect(0);
feed.on('websocket-open', () => {
subscribeToAll(products, feed, info);
});
} else {
subscribeToAll(products, feed, info);
}
return Promise.resolve(feed);
});
} | identifier_body |
poloniexFactories.ts | /***************************************************************************************************************************
* @license *
* Copyright 2017 Coinbase, Inc. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on *
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the *
* License for the specific language governing permissions and limitations under the License. *
***************************************************************************************************************************/
import { Logger } from '../utils/Logger';
import { PoloniexFeed, PoloniexFeedConfig } from '../exchanges/poloniex/PoloniexFeed';
import { gdaxToPolo, getAllProductInfo, POLONIEX_WS_FEED, PoloniexProducts } from '../exchanges/poloniex/PoloniexCommon';
import { ExchangeFeedConfig, getFeed } from '../exchanges/ExchangeFeed';
import { ExchangeAuthConfig } from '../exchanges/AuthConfig';
import CCXTExchangeWrapper from '../exchanges/ccxt';
|
/**
* A convenience function that returns a GDAXExchangeAPI instance for accessing REST methods conveniently. If API
* key details are found in the GDAX_KEY etc. envars, they will be used
*/
export function DefaultAPI(logger: Logger): CCXTExchangeWrapper {
if (!publicAPIInstance) {
publicAPIInstance = CCXTExchangeWrapper.createExchange('poloniex', { key: process.env.POLONIEX_KEY, secret: process.env.POLONIEX_SECRET }, logger);
}
return publicAPIInstance;
}
/**
* Convenience function to connect to and subscribe to the given channels
* @param options {object} Any options from GDAXConfig will be accepted
* @param products {string[]} An array of products to subscribe to
*/
export function getSubscribedFeeds(options: any, products: string[]): Promise<PoloniexFeed> {
return getAllProductInfo(false, options.logger).then((info: PoloniexProducts) => {
const config: PoloniexFeedConfig = {
wsUrl: options.wsUrl || POLONIEX_WS_FEED,
auth: options.auth,
logger: options.logger,
tickerChannel: !!options.tickerChannel
};
const feed = getFeed<PoloniexFeed, ExchangeFeedConfig>(PoloniexFeed, config);
if (!feed.isConnected()) {
feed.reconnect(0);
feed.on('websocket-open', () => {
subscribeToAll(products, feed, info);
});
} else {
subscribeToAll(products, feed, info);
}
return Promise.resolve(feed);
});
}
function subscribeToAll(products: string[], feed: PoloniexFeed, info: PoloniexProducts) {
products.forEach((product: string) => {
const id: number = getChannelId(product, info);
if (id > 0) {
feed.subscribe(id);
}
});
}
function getChannelId(product: string, info: PoloniexProducts): number {
let result: number;
for (const id in info) {
const symbol = info[id].id;
const found = (symbol === product) || (symbol === gdaxToPolo(product));
result = found ? info[id].sourceData.id : -1;
if (found) {
break;
}
}
return result;
}
/**
* This is a straightforward wrapper around getSubscribedFeeds using the Factory pattern with the most commonly used
* defaults. For customised feeds, use getSubscribedFeeds instead.
*
* It is assumed that your API keys are stored in the GDAX_KEY, GDAX_SECRET and GDAX_PASSPHRASE envars
*/
export function FeedFactory(logger: Logger, products: string[], auth?: ExchangeAuthConfig): Promise<PoloniexFeed> {
// auth = auth || {
// key: process.env.POLONIEX_KEY,
// secret: process.env.POLONIEX_SECRET,
// };
auth = null; // Polo doesn't provide auth feeds yet
return getSubscribedFeeds({ auth: auth, logger: logger, tickerChannel: true }, products);
} | let publicAPIInstance: CCXTExchangeWrapper; | random_line_split |
poloniexFactories.ts | /***************************************************************************************************************************
* @license *
* Copyright 2017 Coinbase, Inc. *
* *
* Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance *
* with the License. You may obtain a copy of the License at *
* *
* http://www.apache.org/licenses/LICENSE-2.0 *
* *
* Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on *
* an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the *
* License for the specific language governing permissions and limitations under the License. *
***************************************************************************************************************************/
import { Logger } from '../utils/Logger';
import { PoloniexFeed, PoloniexFeedConfig } from '../exchanges/poloniex/PoloniexFeed';
import { gdaxToPolo, getAllProductInfo, POLONIEX_WS_FEED, PoloniexProducts } from '../exchanges/poloniex/PoloniexCommon';
import { ExchangeFeedConfig, getFeed } from '../exchanges/ExchangeFeed';
import { ExchangeAuthConfig } from '../exchanges/AuthConfig';
import CCXTExchangeWrapper from '../exchanges/ccxt';
let publicAPIInstance: CCXTExchangeWrapper;
/**
* A convenience function that returns a GDAXExchangeAPI instance for accessing REST methods conveniently. If API
* key details are found in the GDAX_KEY etc. envars, they will be used
*/
export function DefaultAPI(logger: Logger): CCXTExchangeWrapper {
if (!publicAPIInstance) {
publicAPIInstance = CCXTExchangeWrapper.createExchange('poloniex', { key: process.env.POLONIEX_KEY, secret: process.env.POLONIEX_SECRET }, logger);
}
return publicAPIInstance;
}
/**
* Convenience function to connect to and subscribe to the given channels
* @param options {object} Any options from GDAXConfig will be accepted
* @param products {string[]} An array of products to subscribe to
*/
export function getSubscribedFeeds(options: any, products: string[]): Promise<PoloniexFeed> {
return getAllProductInfo(false, options.logger).then((info: PoloniexProducts) => {
const config: PoloniexFeedConfig = {
wsUrl: options.wsUrl || POLONIEX_WS_FEED,
auth: options.auth,
logger: options.logger,
tickerChannel: !!options.tickerChannel
};
const feed = getFeed<PoloniexFeed, ExchangeFeedConfig>(PoloniexFeed, config);
if (!feed.isConnected()) {
feed.reconnect(0);
feed.on('websocket-open', () => {
subscribeToAll(products, feed, info);
});
} else |
return Promise.resolve(feed);
});
}
function subscribeToAll(products: string[], feed: PoloniexFeed, info: PoloniexProducts) {
products.forEach((product: string) => {
const id: number = getChannelId(product, info);
if (id > 0) {
feed.subscribe(id);
}
});
}
function getChannelId(product: string, info: PoloniexProducts): number {
let result: number;
for (const id in info) {
const symbol = info[id].id;
const found = (symbol === product) || (symbol === gdaxToPolo(product));
result = found ? info[id].sourceData.id : -1;
if (found) {
break;
}
}
return result;
}
/**
* This is a straightforward wrapper around getSubscribedFeeds using the Factory pattern with the most commonly used
* defaults. For customised feeds, use getSubscribedFeeds instead.
*
* It is assumed that your API keys are stored in the GDAX_KEY, GDAX_SECRET and GDAX_PASSPHRASE envars
*/
export function FeedFactory(logger: Logger, products: string[], auth?: ExchangeAuthConfig): Promise<PoloniexFeed> {
// auth = auth || {
// key: process.env.POLONIEX_KEY,
// secret: process.env.POLONIEX_SECRET,
// };
auth = null; // Polo doesn't provide auth feeds yet
return getSubscribedFeeds({ auth: auth, logger: logger, tickerChannel: true }, products);
}
| {
subscribeToAll(products, feed, info);
} | conditional_block |
controller.py | # Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unohelper
from com.sun.star.frame import XController, XTitle, XDispatchProvider
from com.sun.star.lang import XServiceInfo
from com.sun.star.task import XStatusIndicatorSupplier
class MRIUIController(unohelper.Base,
XController, XTitle, XDispatchProvider,
XStatusIndicatorSupplier, XServiceInfo):
""" Provides controller which connects between frame and model. """
IMPLE_NAME = "mytools.mri.UIController"
def __init__(self,frame, model):
self.frame = frame
self.model = model
self.ui = None
def set_ui(self, ui):
self.ui = ui
def get_imple_name(self):
return self.ui.pages.get_imple_name()
# XTitle
def getTitle(self):
return self.frame.getTitle()
def setTitle(self, title):
self.frame.setTitle(title)
def dispose(self):
self.frame = None
self.model = None
def addEventListener(self, xListener):
pass
def removeEventListener(self, aListener):
pass
# XController
def attachFrame(self, frame):
self.frame = frame
def attachModel(self, model):
|
def suspend(self, Suspend):
return True
def getViewData(self):
""" Returns current instance inspected. """
return self.ui.main.current.target
def restoreViewData(self, Data):
pass
def getModel(self):
return self.model
def getFrame(self):
return self.frame
def getStatusIndicator(self):
pass
# XDispatchProvider
def queryDispatch(self, url, name, flags):
pass
def queryDispatches(self, requests):
pass
# XServiceInfo
def getImplementationName(self):
return self.IMPLE_NAME
def supportsService(self, name):
return name == self.IMPLE_NAME
def getSupportedServiceNames(self):
return self.IMPLE_NAME,
| self.model = model | identifier_body |
controller.py | # Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unohelper
from com.sun.star.frame import XController, XTitle, XDispatchProvider
from com.sun.star.lang import XServiceInfo
from com.sun.star.task import XStatusIndicatorSupplier
class MRIUIController(unohelper.Base,
XController, XTitle, XDispatchProvider,
XStatusIndicatorSupplier, XServiceInfo):
""" Provides controller which connects between frame and model. """
IMPLE_NAME = "mytools.mri.UIController"
def __init__(self,frame, model):
self.frame = frame
self.model = model
self.ui = None
def set_ui(self, ui):
self.ui = ui
def get_imple_name(self):
return self.ui.pages.get_imple_name()
# XTitle
def getTitle(self):
return self.frame.getTitle()
def setTitle(self, title):
self.frame.setTitle(title)
def dispose(self):
self.frame = None
self.model = None
def addEventListener(self, xListener):
pass
def removeEventListener(self, aListener):
pass
# XController
def attachFrame(self, frame):
self.frame = frame
def attachModel(self, model):
self.model = model
def suspend(self, Suspend):
return True
def getViewData(self):
""" Returns current instance inspected. """
return self.ui.main.current.target
def restoreViewData(self, Data):
pass
def getModel(self):
return self.model
def getFrame(self):
return self.frame
def getStatusIndicator(self):
pass
# XDispatchProvider
def queryDispatch(self, url, name, flags):
pass
def queryDispatches(self, requests):
pass
# XServiceInfo
def getImplementationName(self):
return self.IMPLE_NAME
def | (self, name):
return name == self.IMPLE_NAME
def getSupportedServiceNames(self):
return self.IMPLE_NAME,
| supportsService | identifier_name |
controller.py | # Copyright 2011 Tsutomu Uchino
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unohelper
from com.sun.star.frame import XController, XTitle, XDispatchProvider | class MRIUIController(unohelper.Base,
XController, XTitle, XDispatchProvider,
XStatusIndicatorSupplier, XServiceInfo):
""" Provides controller which connects between frame and model. """
IMPLE_NAME = "mytools.mri.UIController"
def __init__(self,frame, model):
self.frame = frame
self.model = model
self.ui = None
def set_ui(self, ui):
self.ui = ui
def get_imple_name(self):
return self.ui.pages.get_imple_name()
# XTitle
def getTitle(self):
return self.frame.getTitle()
def setTitle(self, title):
self.frame.setTitle(title)
def dispose(self):
self.frame = None
self.model = None
def addEventListener(self, xListener):
pass
def removeEventListener(self, aListener):
pass
# XController
def attachFrame(self, frame):
self.frame = frame
def attachModel(self, model):
self.model = model
def suspend(self, Suspend):
return True
def getViewData(self):
""" Returns current instance inspected. """
return self.ui.main.current.target
def restoreViewData(self, Data):
pass
def getModel(self):
return self.model
def getFrame(self):
return self.frame
def getStatusIndicator(self):
pass
# XDispatchProvider
def queryDispatch(self, url, name, flags):
pass
def queryDispatches(self, requests):
pass
# XServiceInfo
def getImplementationName(self):
return self.IMPLE_NAME
def supportsService(self, name):
return name == self.IMPLE_NAME
def getSupportedServiceNames(self):
return self.IMPLE_NAME, | from com.sun.star.lang import XServiceInfo
from com.sun.star.task import XStatusIndicatorSupplier
| random_line_split |
boolean_mask.ts | /**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {Tensor} from '../tensor';
import {convertToTensor} from '../tensor_util_env';
import {TensorLike} from '../types';
import * as util from '../util';
import {whereAsync} from './logical_ops';
import {gather} from './segment_ops';
/**
* Apply boolean mask to tensor.
*
* ```js
* const tensor = tf.tensor2d([1, 2, 3, 4, 5, 6], [3, 2]);
* const mask = tf.tensor1d([1, 0, 1], 'bool');
* const result = await tf.booleanMaskAsync(tensor, mask);
* result.print();
* ```
*
* @param tensor N-D tensor.
* @param mask K-D boolean tensor, K <= N and K must be known statically.
* @param axis A 0-D int Tensor representing the axis in tensor to mask from.
* By default, axis is 0 which will mask from the first dimension.
* Otherwise K + axis <= N.
*/
/** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
async function | (
tensor: Tensor|TensorLike, mask: Tensor|TensorLike,
axis?: number): Promise<Tensor> {
const $tensor = convertToTensor(tensor, 'tensor', 'boolMask');
const $mask = convertToTensor(mask, 'mask', 'boolMask', 'bool');
const axisFrom = axis == null ? 0 : axis;
const maskDim = $mask.rank;
const tensorShape = $tensor.shape;
util.assert(maskDim > 0, () => 'mask cannot be scalar');
util.assertShapesMatch(
tensorShape.slice(axisFrom, axisFrom + maskDim), $mask.shape,
`mask's shape must match the first K dimensions of tensor's shape,`);
let leadingSize = 1;
for (let i = axisFrom; i < axisFrom + maskDim; i++) {
leadingSize *= tensorShape[i];
}
const targetTensorShape =
tensorShape.slice(0, axisFrom)
.concat([leadingSize], tensorShape.slice(axisFrom + maskDim));
const reshapedTensor = $tensor.reshape(targetTensorShape);
const reshapedMask = $mask.reshape([-1]);
const positivePositions = await whereAsync(reshapedMask);
const indices = positivePositions.squeeze([1]);
const res = gather(reshapedTensor, indices, axisFrom);
// Ensure no memory leak.
if (tensor !== $tensor) {
$tensor.dispose();
}
if (mask !== $mask) {
$mask.dispose();
}
indices.dispose();
reshapedTensor.dispose();
reshapedMask.dispose();
positivePositions.dispose();
return res;
}
export const booleanMaskAsync = booleanMaskAsync_;
| booleanMaskAsync_ | identifier_name |
boolean_mask.ts | /**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {Tensor} from '../tensor';
import {convertToTensor} from '../tensor_util_env';
import {TensorLike} from '../types';
import * as util from '../util';
import {whereAsync} from './logical_ops';
import {gather} from './segment_ops';
/**
* Apply boolean mask to tensor.
*
* ```js
* const tensor = tf.tensor2d([1, 2, 3, 4, 5, 6], [3, 2]);
* const mask = tf.tensor1d([1, 0, 1], 'bool');
* const result = await tf.booleanMaskAsync(tensor, mask);
* result.print();
* ```
*
* @param tensor N-D tensor.
* @param mask K-D boolean tensor, K <= N and K must be known statically.
* @param axis A 0-D int Tensor representing the axis in tensor to mask from.
* By default, axis is 0 which will mask from the first dimension.
* Otherwise K + axis <= N.
*/
/** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
async function booleanMaskAsync_(
tensor: Tensor|TensorLike, mask: Tensor|TensorLike,
axis?: number): Promise<Tensor> {
const $tensor = convertToTensor(tensor, 'tensor', 'boolMask');
const $mask = convertToTensor(mask, 'mask', 'boolMask', 'bool');
const axisFrom = axis == null ? 0 : axis;
const maskDim = $mask.rank;
const tensorShape = $tensor.shape;
util.assert(maskDim > 0, () => 'mask cannot be scalar');
util.assertShapesMatch(
tensorShape.slice(axisFrom, axisFrom + maskDim), $mask.shape,
`mask's shape must match the first K dimensions of tensor's shape,`);
let leadingSize = 1;
for (let i = axisFrom; i < axisFrom + maskDim; i++) {
leadingSize *= tensorShape[i];
}
const targetTensorShape =
tensorShape.slice(0, axisFrom)
.concat([leadingSize], tensorShape.slice(axisFrom + maskDim));
const reshapedTensor = $tensor.reshape(targetTensorShape);
const reshapedMask = $mask.reshape([-1]);
const positivePositions = await whereAsync(reshapedMask);
const indices = positivePositions.squeeze([1]);
const res = gather(reshapedTensor, indices, axisFrom);
// Ensure no memory leak.
if (tensor !== $tensor) |
if (mask !== $mask) {
$mask.dispose();
}
indices.dispose();
reshapedTensor.dispose();
reshapedMask.dispose();
positivePositions.dispose();
return res;
}
export const booleanMaskAsync = booleanMaskAsync_;
| {
$tensor.dispose();
} | conditional_block |
boolean_mask.ts | /**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {Tensor} from '../tensor';
import {convertToTensor} from '../tensor_util_env';
import {TensorLike} from '../types';
import * as util from '../util';
import {whereAsync} from './logical_ops';
import {gather} from './segment_ops';
/**
* Apply boolean mask to tensor.
*
* ```js
* const tensor = tf.tensor2d([1, 2, 3, 4, 5, 6], [3, 2]);
* const mask = tf.tensor1d([1, 0, 1], 'bool');
* const result = await tf.booleanMaskAsync(tensor, mask);
* result.print();
* ```
*
* @param tensor N-D tensor.
* @param mask K-D boolean tensor, K <= N and K must be known statically.
* @param axis A 0-D int Tensor representing the axis in tensor to mask from.
* By default, axis is 0 which will mask from the first dimension.
* Otherwise K + axis <= N.
*/
/** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
async function booleanMaskAsync_(
tensor: Tensor|TensorLike, mask: Tensor|TensorLike,
axis?: number): Promise<Tensor> {
const $tensor = convertToTensor(tensor, 'tensor', 'boolMask');
const $mask = convertToTensor(mask, 'mask', 'boolMask', 'bool');
const axisFrom = axis == null ? 0 : axis;
const maskDim = $mask.rank;
const tensorShape = $tensor.shape;
util.assert(maskDim > 0, () => 'mask cannot be scalar'); | `mask's shape must match the first K dimensions of tensor's shape,`);
let leadingSize = 1;
for (let i = axisFrom; i < axisFrom + maskDim; i++) {
leadingSize *= tensorShape[i];
}
const targetTensorShape =
tensorShape.slice(0, axisFrom)
.concat([leadingSize], tensorShape.slice(axisFrom + maskDim));
const reshapedTensor = $tensor.reshape(targetTensorShape);
const reshapedMask = $mask.reshape([-1]);
const positivePositions = await whereAsync(reshapedMask);
const indices = positivePositions.squeeze([1]);
const res = gather(reshapedTensor, indices, axisFrom);
// Ensure no memory leak.
if (tensor !== $tensor) {
$tensor.dispose();
}
if (mask !== $mask) {
$mask.dispose();
}
indices.dispose();
reshapedTensor.dispose();
reshapedMask.dispose();
positivePositions.dispose();
return res;
}
export const booleanMaskAsync = booleanMaskAsync_; | util.assertShapesMatch(
tensorShape.slice(axisFrom, axisFrom + maskDim), $mask.shape, | random_line_split |
boolean_mask.ts | /**
* @license
* Copyright 2018 Google Inc. All Rights Reserved.
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================================
*/
import {Tensor} from '../tensor';
import {convertToTensor} from '../tensor_util_env';
import {TensorLike} from '../types';
import * as util from '../util';
import {whereAsync} from './logical_ops';
import {gather} from './segment_ops';
/**
* Apply boolean mask to tensor.
*
* ```js
* const tensor = tf.tensor2d([1, 2, 3, 4, 5, 6], [3, 2]);
* const mask = tf.tensor1d([1, 0, 1], 'bool');
* const result = await tf.booleanMaskAsync(tensor, mask);
* result.print();
* ```
*
* @param tensor N-D tensor.
* @param mask K-D boolean tensor, K <= N and K must be known statically.
* @param axis A 0-D int Tensor representing the axis in tensor to mask from.
* By default, axis is 0 which will mask from the first dimension.
* Otherwise K + axis <= N.
*/
/** @doc {heading: 'Tensors', subheading: 'Slicing and Joining'} */
async function booleanMaskAsync_(
tensor: Tensor|TensorLike, mask: Tensor|TensorLike,
axis?: number): Promise<Tensor> |
export const booleanMaskAsync = booleanMaskAsync_;
| {
const $tensor = convertToTensor(tensor, 'tensor', 'boolMask');
const $mask = convertToTensor(mask, 'mask', 'boolMask', 'bool');
const axisFrom = axis == null ? 0 : axis;
const maskDim = $mask.rank;
const tensorShape = $tensor.shape;
util.assert(maskDim > 0, () => 'mask cannot be scalar');
util.assertShapesMatch(
tensorShape.slice(axisFrom, axisFrom + maskDim), $mask.shape,
`mask's shape must match the first K dimensions of tensor's shape,`);
let leadingSize = 1;
for (let i = axisFrom; i < axisFrom + maskDim; i++) {
leadingSize *= tensorShape[i];
}
const targetTensorShape =
tensorShape.slice(0, axisFrom)
.concat([leadingSize], tensorShape.slice(axisFrom + maskDim));
const reshapedTensor = $tensor.reshape(targetTensorShape);
const reshapedMask = $mask.reshape([-1]);
const positivePositions = await whereAsync(reshapedMask);
const indices = positivePositions.squeeze([1]);
const res = gather(reshapedTensor, indices, axisFrom);
// Ensure no memory leak.
if (tensor !== $tensor) {
$tensor.dispose();
}
if (mask !== $mask) {
$mask.dispose();
}
indices.dispose();
reshapedTensor.dispose();
reshapedMask.dispose();
positivePositions.dispose();
return res;
} | identifier_body |
dashboard.module.ts |
import { GooglechartComponent } from './googlechart/googlechart.component';
import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { FormsModule } from '@angular/forms';
import { AppTranslationModule } from '../../app.translation.module';
import { NgaModule } from '../../theme/nga.module';
import { Dashboard } from './dashboard.component';
import { routing } from './dashboard.routing';
import { PopularApp } from './popularApp';
import { PieChart } from './pieChart';
import { TrafficChart } from './trafficChart';
import { UsersMap } from './usersMap';
import { LineChart } from './lineChart';
import { Feed } from './feed';
import { Todo } from './todo';
import { Calendar } from './calendar';
import { CalendarService } from './calendar/calendar.service';
import { FeedService } from './feed/feed.service';
import { LineChartService } from './lineChart/lineChart.service';
import { PieChartService } from './pieChart/pieChart.service';
import { TodoService } from './todo/todo.service';
import { TrafficChartService } from './trafficChart/trafficChart.service';
import { UsersMapService } from './usersMap/usersMap.service';
import { GaugechartComponent } from './gaugechart/gaugechart.component';
import { TabelaCidadesComponent } from './tabela-cidades/tabela-cidades.component';
import { DashBoardService } from './../../_services/dashboard.service';
import { SimpleTimer } from 'ng2-simple-timer';
@NgModule({
imports: [
CommonModule,
FormsModule,
AppTranslationModule,
NgaModule,
routing,
],
declarations: [
PopularApp,
PieChart,
TrafficChart,
UsersMap,
LineChart,
Feed,
Todo,
Calendar,
Dashboard,
GooglechartComponent,
GaugechartComponent,
TabelaCidadesComponent,
],
providers: [
CalendarService,
FeedService,
LineChartService,
PieChartService,
TodoService,
TrafficChartService,
UsersMapService,
DashBoardService,
SimpleTimer,
],
})
export class | {}
| DashboardModule | identifier_name |
dashboard.module.ts | import { GooglechartComponent } from './googlechart/googlechart.component';
import { NgModule } from '@angular/core';
import { CommonModule } from '@angular/common';
import { FormsModule } from '@angular/forms';
import { AppTranslationModule } from '../../app.translation.module';
import { NgaModule } from '../../theme/nga.module';
import { Dashboard } from './dashboard.component';
import { routing } from './dashboard.routing';
import { PopularApp } from './popularApp';
import { PieChart } from './pieChart';
import { TrafficChart } from './trafficChart';
import { UsersMap } from './usersMap';
import { LineChart } from './lineChart';
import { Feed } from './feed';
import { Todo } from './todo';
import { Calendar } from './calendar';
import { CalendarService } from './calendar/calendar.service';
import { FeedService } from './feed/feed.service';
import { LineChartService } from './lineChart/lineChart.service';
import { PieChartService } from './pieChart/pieChart.service';
import { TodoService } from './todo/todo.service';
import { TrafficChartService } from './trafficChart/trafficChart.service';
import { UsersMapService } from './usersMap/usersMap.service';
import { GaugechartComponent } from './gaugechart/gaugechart.component';
import { TabelaCidadesComponent } from './tabela-cidades/tabela-cidades.component';
import { DashBoardService } from './../../_services/dashboard.service'; | CommonModule,
FormsModule,
AppTranslationModule,
NgaModule,
routing,
],
declarations: [
PopularApp,
PieChart,
TrafficChart,
UsersMap,
LineChart,
Feed,
Todo,
Calendar,
Dashboard,
GooglechartComponent,
GaugechartComponent,
TabelaCidadesComponent,
],
providers: [
CalendarService,
FeedService,
LineChartService,
PieChartService,
TodoService,
TrafficChartService,
UsersMapService,
DashBoardService,
SimpleTimer,
],
})
export class DashboardModule {} | import { SimpleTimer } from 'ng2-simple-timer';
@NgModule({
imports: [ | random_line_split |
partialeq_ne_impl.rs | use clippy_utils::diagnostics::span_lint_hir;
use clippy_utils::is_automatically_derived;
use if_chain::if_chain;
use rustc_hir::{Impl, Item, ItemKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// Checks for manual re-implementations of `PartialEq::ne`.
///
/// ### Why is this bad?
/// `PartialEq::ne` is required to always return the
/// negated result of `PartialEq::eq`, which is exactly what the default
/// implementation does. Therefore, there should never be any need to
/// re-implement it.
///
/// ### Example
/// ```rust
/// struct Foo;
///
/// impl PartialEq for Foo {
/// fn eq(&self, other: &Foo) -> bool { true }
/// fn ne(&self, other: &Foo) -> bool { !(self == other) }
/// } |
declare_lint_pass!(PartialEqNeImpl => [PARTIALEQ_NE_IMPL]);
impl<'tcx> LateLintPass<'tcx> for PartialEqNeImpl {
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'_>) {
if_chain! {
if let ItemKind::Impl(Impl { of_trait: Some(ref trait_ref), items: impl_items, .. }) = item.kind;
let attrs = cx.tcx.hir().attrs(item.hir_id());
if !is_automatically_derived(attrs);
if let Some(eq_trait) = cx.tcx.lang_items().eq_trait();
if trait_ref.path.res.def_id() == eq_trait;
then {
for impl_item in impl_items {
if impl_item.ident.name == sym::ne {
span_lint_hir(
cx,
PARTIALEQ_NE_IMPL,
impl_item.id.hir_id(),
impl_item.span,
"re-implementing `PartialEq::ne` is unnecessary",
);
}
}
}
};
}
} | /// ```
pub PARTIALEQ_NE_IMPL,
complexity,
"re-implementing `PartialEq::ne`"
} | random_line_split |
partialeq_ne_impl.rs | use clippy_utils::diagnostics::span_lint_hir;
use clippy_utils::is_automatically_derived;
use if_chain::if_chain;
use rustc_hir::{Impl, Item, ItemKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// Checks for manual re-implementations of `PartialEq::ne`.
///
/// ### Why is this bad?
/// `PartialEq::ne` is required to always return the
/// negated result of `PartialEq::eq`, which is exactly what the default
/// implementation does. Therefore, there should never be any need to
/// re-implement it.
///
/// ### Example
/// ```rust
/// struct Foo;
///
/// impl PartialEq for Foo {
/// fn eq(&self, other: &Foo) -> bool { true }
/// fn ne(&self, other: &Foo) -> bool { !(self == other) }
/// }
/// ```
pub PARTIALEQ_NE_IMPL,
complexity,
"re-implementing `PartialEq::ne`"
}
declare_lint_pass!(PartialEqNeImpl => [PARTIALEQ_NE_IMPL]);
impl<'tcx> LateLintPass<'tcx> for PartialEqNeImpl {
fn check_item(&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'_>) |
}
| {
if_chain! {
if let ItemKind::Impl(Impl { of_trait: Some(ref trait_ref), items: impl_items, .. }) = item.kind;
let attrs = cx.tcx.hir().attrs(item.hir_id());
if !is_automatically_derived(attrs);
if let Some(eq_trait) = cx.tcx.lang_items().eq_trait();
if trait_ref.path.res.def_id() == eq_trait;
then {
for impl_item in impl_items {
if impl_item.ident.name == sym::ne {
span_lint_hir(
cx,
PARTIALEQ_NE_IMPL,
impl_item.id.hir_id(),
impl_item.span,
"re-implementing `PartialEq::ne` is unnecessary",
);
}
}
}
};
} | identifier_body |
partialeq_ne_impl.rs | use clippy_utils::diagnostics::span_lint_hir;
use clippy_utils::is_automatically_derived;
use if_chain::if_chain;
use rustc_hir::{Impl, Item, ItemKind};
use rustc_lint::{LateContext, LateLintPass};
use rustc_session::{declare_lint_pass, declare_tool_lint};
use rustc_span::sym;
declare_clippy_lint! {
/// ### What it does
/// Checks for manual re-implementations of `PartialEq::ne`.
///
/// ### Why is this bad?
/// `PartialEq::ne` is required to always return the
/// negated result of `PartialEq::eq`, which is exactly what the default
/// implementation does. Therefore, there should never be any need to
/// re-implement it.
///
/// ### Example
/// ```rust
/// struct Foo;
///
/// impl PartialEq for Foo {
/// fn eq(&self, other: &Foo) -> bool { true }
/// fn ne(&self, other: &Foo) -> bool { !(self == other) }
/// }
/// ```
pub PARTIALEQ_NE_IMPL,
complexity,
"re-implementing `PartialEq::ne`"
}
declare_lint_pass!(PartialEqNeImpl => [PARTIALEQ_NE_IMPL]);
impl<'tcx> LateLintPass<'tcx> for PartialEqNeImpl {
fn | (&mut self, cx: &LateContext<'tcx>, item: &'tcx Item<'_>) {
if_chain! {
if let ItemKind::Impl(Impl { of_trait: Some(ref trait_ref), items: impl_items, .. }) = item.kind;
let attrs = cx.tcx.hir().attrs(item.hir_id());
if !is_automatically_derived(attrs);
if let Some(eq_trait) = cx.tcx.lang_items().eq_trait();
if trait_ref.path.res.def_id() == eq_trait;
then {
for impl_item in impl_items {
if impl_item.ident.name == sym::ne {
span_lint_hir(
cx,
PARTIALEQ_NE_IMPL,
impl_item.id.hir_id(),
impl_item.span,
"re-implementing `PartialEq::ne` is unnecessary",
);
}
}
}
};
}
}
| check_item | identifier_name |
lib.rs | // Copyright 2014 The Rust Project Developers. See the COPYRIGHT | // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
//! This crate provides a native implementation of regular expressions that is
//! heavily based on RE2 both in syntax and in implementation. Notably,
//! backreferences and arbitrary lookahead/lookbehind assertions are not
//! provided. In return, regular expression searching provided by this package
//! has excellent worst case performance. The specific syntax supported is
//! documented further down.
//!
//! This crate's documentation provides some simple examples, describes Unicode
//! support and exhaustively lists the supported syntax. For more specific
//! details on the API, please see the documentation for the `Regex` type.
//!
//! # First example: find a date
//!
//! General use of regular expressions in this package involves compiling an
//! expression and then using it to search, split or replace text. For example,
//! to confirm that some text resembles a date:
//!
//! ```rust
//! use regex::Regex;
//! let re = match Regex::new(r"^\d{4}-\d{2}-\d{2}$") {
//! Ok(re) => re,
//! Err(err) => fail!("{}", err),
//! };
//! assert_eq!(re.is_match("2014-01-01"), true);
//! ```
//!
//! Notice the use of the `^` and `$` anchors. In this crate, every expression
//! is executed with an implicit `.*?` at the beginning and end, which allows
//! it to match anywhere in the text. Anchors can be used to ensure that the
//! full text matches an expression.
//!
//! This example also demonstrates the utility of raw strings in Rust, which
//! are just like regular strings except they are prefixed with an `r` and do
//! not process any escape sequences. For example, `"\\d"` is the same
//! expression as `r"\d"`.
//!
//! # The `regex!` macro
//!
//! Rust's compile time meta-programming facilities provide a way to write a
//! `regex!` macro which compiles regular expressions *when your program
//! compiles*. Said differently, if you only use `regex!` to build regular
//! expressions in your program, then your program cannot compile with an
//! invalid regular expression. Moreover, the `regex!` macro compiles the
//! given expression to native Rust code, which makes it much faster for
//! searching text.
//!
//! Since `regex!` provides compiled regular expressions that are both safer
//! and faster to use, you should use them whenever possible. The only
//! requirement for using them is that you have a string literal corresponding
//! to your expression. Otherwise, it is indistinguishable from an expression
//! compiled at runtime with `Regex::new`.
//!
//! To use the `regex!` macro, you must enable the `phase` feature and import
//! the `regex_macros` crate as a syntax extension:
//!
//! ```rust
//! #![feature(phase)]
//! #[phase(syntax)]
//! extern crate regex_macros;
//! extern crate regex;
//!
//! fn main() {
//! let re = regex!(r"^\d{4}-\d{2}-\d{2}$");
//! assert_eq!(re.is_match("2014-01-01"), true);
//! }
//! ```
//!
//! There are a few things worth mentioning about using the `regex!` macro.
//! Firstly, the `regex!` macro *only* accepts string *literals*.
//! Secondly, the `regex` crate *must* be linked with the name `regex` since
//! the generated code depends on finding symbols in the `regex` crate.
//!
//! The only downside of using the `regex!` macro is that it can increase the
//! size of your program's binary since it generates specialized Rust code.
//! The extra size probably won't be significant for a small number of
//! expressions, but 100+ calls to `regex!` will probably result in a
//! noticeably bigger binary.
//!
//! # Example: iterating over capture groups
//!
//! This crate provides convenient iterators for matching an expression
//! repeatedly against a search string to find successive non-overlapping
//! matches. For example, to find all dates in a string and be able to access
//! them by their component pieces:
//!
//! ```rust
//! # #![feature(phase)]
//! # extern crate regex; #[phase(syntax)] extern crate regex_macros;
//! # fn main() {
//! let re = regex!(r"(\d{4})-(\d{2})-(\d{2})");
//! let text = "2012-03-14, 2013-01-01 and 2014-07-05";
//! for cap in re.captures_iter(text) {
//! println!("Month: {} Day: {} Year: {}", cap.at(2), cap.at(3), cap.at(1));
//! }
//! // Output:
//! // Month: 03 Day: 14 Year: 2012
//! // Month: 01 Day: 01 Year: 2013
//! // Month: 07 Day: 05 Year: 2014
//! # }
//! ```
//!
//! Notice that the year is in the capture group indexed at `1`. This is
//! because the *entire match* is stored in the capture group at index `0`.
//!
//! # Example: replacement with named capture groups
//!
//! Building on the previous example, perhaps we'd like to rearrange the date
//! formats. This can be done with text replacement. But to make the code
//! clearer, we can *name* our capture groups and use those names as variables
//! in our replacement text:
//!
//! ```rust
//! # #![feature(phase)]
//! # extern crate regex; #[phase(syntax)] extern crate regex_macros;
//! # fn main() {
//! let re = regex!(r"(?P<y>\d{4})-(?P<m>\d{2})-(?P<d>\d{2})");
//! let before = "2012-03-14, 2013-01-01 and 2014-07-05";
//! let after = re.replace_all(before, "$m/$d/$y");
//! assert_eq!(after.as_slice(), "03/14/2012, 01/01/2013 and 07/05/2014");
//! # }
//! ```
//!
//! The `replace` methods are actually polymorphic in the replacement, which
//! provides more flexibility than is seen here. (See the documentation for
//! `Regex::replace` for more details.)
//!
//! # Pay for what you use
//!
//! With respect to searching text with a regular expression, there are three
//! questions that can be asked:
//!
//! 1. Does the text match this expression?
//! 2. If so, where does it match?
//! 3. Where are the submatches?
//!
//! Generally speaking, this crate could provide a function to answer only #3,
//! which would subsume #1 and #2 automatically. However, it can be
//! significantly more expensive to compute the location of submatches, so it's
//! best not to do it if you don't need to.
//!
//! Therefore, only use what you need. For example, don't use `find` if you
//! only need to test if an expression matches a string. (Use `is_match`
//! instead.)
//!
//! # Unicode
//!
//! This implementation executes regular expressions **only** on sequences of
//! UTF8 codepoints while exposing match locations as byte indices.
//!
//! Currently, only naive case folding is supported. Namely, when matching
//! case insensitively, the characters are first converted to their uppercase
//! forms and then compared.
//!
//! Regular expressions themselves are also **only** interpreted as a sequence
//! of UTF8 codepoints. This means you can embed Unicode characters directly
//! into your expression:
//!
//! ```rust
//! # #![feature(phase)]
//! # extern crate regex; #[phase(syntax)] extern crate regex_macros;
//! # fn main() {
//! let re = regex!(r"(?i)Δ+");
//! assert_eq!(re.find("ΔδΔ"), Some((0, 6)));
//! # }
//! ```
//!
//! Finally, Unicode general categories and scripts are available as character
//! classes. For example, you can match a sequence of numerals, Greek or
//! Cherokee letters:
//!
//! ```rust
//! # #![feature(phase)]
//! # extern crate regex; #[phase(syntax)] extern crate regex_macros;
//! # fn main() {
//! let re = regex!(r"[\pN\p{Greek}\p{Cherokee}]+");
//! assert_eq!(re.find("abcΔᎠβⅠᏴγδⅡxyz"), Some((3, 23)));
//! # }
//! ```
//!
//! # Syntax
//!
//! The syntax supported in this crate is almost in an exact correspondence
//! with the syntax supported by RE2.
//!
//! ## Matching one character
//!
//! <pre class="rust">
//! . any character except new line (includes new line with s flag)
//! [xyz] A character class matching either x, y or z.
//! [^xyz] A character class matching any character except x, y and z.
//! [a-z] A character class matching any character in range a-z.
//! \d Perl character class ([0-9])
//! \D Negated Perl character class ([^0-9])
//! [:alpha:] ASCII character class ([A-Za-z])
//! [:^alpha:] Negated ASCII character class ([^A-Za-z])
//! \pN One letter name Unicode character class
//! \p{Greek} Unicode character class (general category or script)
//! \PN Negated one letter name Unicode character class
//! \P{Greek} negated Unicode character class (general category or script)
//! </pre>
//!
//! Any named character class may appear inside a bracketed `[...]` character
//! class. For example, `[\p{Greek}\pN]` matches any Greek or numeral
//! character.
//!
//! ## Composites
//!
//! <pre class="rust">
//! xy concatenation (x followed by y)
//! x|y alternation (x or y, prefer x)
//! </pre>
//!
//! ## Repetitions
//!
//! <pre class="rust">
//! x* zero or more of x (greedy)
//! x+ one or more of x (greedy)
//! x? zero or one of x (greedy)
//! x*? zero or more of x (ungreedy)
//! x+? one or more of x (ungreedy)
//! x?? zero or one of x (ungreedy)
//! x{n,m} at least n and at most x (greedy)
//! x{n,} at least n x (greedy)
//! x{n} exactly n x
//! x{n,m}? at least n and at most x (ungreedy)
//! x{n,}? at least n x (ungreedy)
//! x{n}? exactly n x
//! </pre>
//!
//! ## Empty matches
//!
//! <pre class="rust">
//! ^ the beginning of text (or start-of-line with multi-line mode)
//! $ the end of text (or end-of-line with multi-line mode)
//! \A only the beginning of text (even with multi-line mode enabled)
//! \z only the end of text (even with multi-line mode enabled)
//! \b a Unicode word boundary (\w on one side and \W, \A, or \z on other)
//! \B not a Unicode word boundary
//! </pre>
//!
//! ## Grouping and flags
//!
//! <pre class="rust">
//! (exp) numbered capture group (indexed by opening parenthesis)
//! (?P<name>exp) named (also numbered) capture group (allowed chars: [_0-9a-zA-Z])
//! (?:exp) non-capturing group
//! (?flags) set flags within current group
//! (?flags:exp) set flags for exp (non-capturing)
//! </pre>
//!
//! Flags are each a single character. For example, `(?x)` sets the flag `x`
//! and `(?-x)` clears the flag `x`. Multiple flags can be set or cleared at
//! the same time: `(?xy)` sets both the `x` and `y` flags and `(?x-y)` sets
//! the `x` flag and clears the `y` flag.
//!
//! All flags are by default disabled. They are:
//!
//! <pre class="rust">
//! i case insensitive
//! m multi-line mode: ^ and $ match begin/end of line
//! s allow . to match \n
//! U swap the meaning of x* and x*?
//! </pre>
//!
//! Here's an example that matches case insensitively for only part of the
//! expression:
//!
//! ```rust
//! # #![feature(phase)]
//! # extern crate regex; #[phase(syntax)] extern crate regex_macros;
//! # fn main() {
//! let re = regex!(r"(?i)a+(?-i)b+");
//! let cap = re.captures("AaAaAbbBBBb").unwrap();
//! assert_eq!(cap.at(0), "AaAaAbb");
//! # }
//! ```
//!
//! Notice that the `a+` matches either `a` or `A`, but the `b+` only matches
//! `b`.
//!
//! ## Escape sequences
//!
//! <pre class="rust">
//! \* literal *, works for any punctuation character: \.+*?()|[]{}^$
//! \a bell (\x07)
//! \f form feed (\x0C)
//! \t horizontal tab
//! \n new line
//! \r carriage return
//! \v vertical tab (\x0B)
//! \123 octal character code (up to three digits)
//! \x7F hex character code (exactly two digits)
//! \x{10FFFF} any hex character code corresponding to a valid UTF8 codepoint
//! </pre>
//!
//! ## Perl character classes (Unicode friendly)
//!
//! <pre class="rust">
//! \d digit ([0-9] + \p{Nd})
//! \D not digit
//! \s whitespace ([\t\n\f\r ] + \p{Z})
//! \S not whitespace
//! \w word character ([0-9A-Za-z_] + \p{L})
//! \W not word character
//! </pre>
//!
//! ## ASCII character classes
//!
//! <pre class="rust">
//! [:alnum:] alphanumeric ([0-9A-Za-z])
//! [:alpha:] alphabetic ([A-Za-z])
//! [:ascii:] ASCII ([\x00-\x7F])
//! [:blank:] blank ([\t ])
//! [:cntrl:] control ([\x00-\x1F\x7F])
//! [:digit:] digits ([0-9])
//! [:graph:] graphical ([!-~])
//! [:lower:] lower case ([a-z])
//! [:print:] printable ([ -~])
//! [:punct:] punctuation ([!-/:-@[-`{-~])
//! [:space:] whitespace ([\t\n\v\f\r ])
//! [:upper:] upper case ([A-Z])
//! [:word:] word characters ([0-9A-Za-z_])
//! [:xdigit:] hex digit ([0-9A-Fa-f])
//! </pre>
//!
//! # Untrusted input
//!
//! There are two factors to consider here: untrusted regular expressions and
//! untrusted search text.
//!
//! Currently, there are no counter-measures in place to prevent a malicious
//! user from writing an expression that may use a lot of resources. One such
//! example is to repeat counted repetitions: `((a{100}){100}){100}` will try
//! to repeat the `a` instruction `100^3` times. Essentially, this means it's
//! very easy for an attacker to exhaust your system's memory if they are
//! allowed to execute arbitrary regular expressions. A possible solution to
//! this is to impose a hard limit on the size of a compiled expression, but it
//! does not yet exist.
//!
//! The story is a bit better with untrusted search text, since this crate's
//! implementation provides `O(nm)` search where `n` is the number of
//! characters in the search text and `m` is the number of instructions in a
//! compiled expression.
#![crate_id = "regex#0.11-pre"]
#![crate_type = "rlib"]
#![crate_type = "dylib"]
#![experimental]
#![license = "MIT/ASL2"]
#![doc(html_logo_url = "http://www.rust-lang.org/logos/rust-logo-128x128-blk-v2.png",
html_favicon_url = "http://www.rust-lang.org/favicon.ico",
html_root_url = "http://static.rust-lang.org/doc/master")]
#![feature(macro_rules, phase)]
#![deny(missing_doc)]
extern crate collections;
#[cfg(test)]
extern crate stdtest = "test";
#[cfg(test)]
extern crate rand;
// During tests, this links with the `regex` crate so that the `regex!` macro
// can be tested.
#[cfg(test)]
extern crate regex;
pub use parse::Error;
pub use re::{Regex, Captures, SubCaptures, SubCapturesPos};
pub use re::{FindCaptures, FindMatches};
pub use re::{Replacer, NoExpand, RegexSplits, RegexSplitsN};
pub use re::{quote, is_match};
mod compile;
mod parse;
mod re;
mod vm;
// FIXME(#13725) windows needs fixing.
#[cfg(test, not(windows))]
mod test;
/// The `program` module exists to support the `regex!` macro. Do not use.
#[doc(hidden)]
pub mod native {
// Exporting this stuff is bad form, but it's necessary for two reasons.
// Firstly, the `regex!` syntax extension is in a different crate and
// requires access to the representation of a regex (particularly the
// instruction set) in order to compile to native Rust. This could be
// mitigated if `regex!` was defined in the same crate, but this has
// undesirable consequences (such as requiring a dependency on
// `libsyntax`).
//
// Secondly, the code generated generated by `regex!` must *also* be able
// to access various functions in this crate to reduce code duplication
// and to provide a value with precisely the same `Regex` type in this
// crate. This, AFAIK, is impossible to mitigate.
//
// On the bright side, `rustdoc` lets us hide this from the public API
// documentation.
pub use compile::{
Program,
OneChar, CharClass, Any, Save, Jump, Split,
Match, EmptyBegin, EmptyEnd, EmptyWordBoundary,
};
pub use parse::{
FLAG_EMPTY, FLAG_NOCASE, FLAG_MULTI, FLAG_DOTNL,
FLAG_SWAP_GREED, FLAG_NEGATED,
};
pub use re::{Dynamic, Native};
pub use vm::{
MatchKind, Exists, Location, Submatches,
StepState, StepMatchEarlyReturn, StepMatch, StepContinue,
CharReader, find_prefix,
};
} | // file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
// | random_line_split |
ex6_2.py | import numpy as np
import pyCloudy as pc
import matplotlib.pyplot as plt
from pyneb.utils.physics import IP
# TODO: Add comments
"""
Pregunta 1
"""
def alpha_B(Te):
"""
Recomb. coefficient, case B
"""
T4 = Te/1e4
return 2.6e-13/T4
def U_mean_def(QH0, Ne, Rstr):
"""
\int_0^{Rstr}{U.dV} / \int_0^{Rstr}{dV}
Return the mean over Stromgren volume of U
"""
return 3.* QH0 / (4. * np.pi * pc.CST.CLIGHT * Ne * Rstr**2)
def QH0_def(Rstr, Ne, ff, Te = 1e4):
"""
Volume of
"""
return 4. / 3. * np.pi * Rstr**3 * Ne**2 * ff * alpha_B(Te)
def Rstr(QH0, Ne, ff, Te = 1e4):
return (3. * QH0 / (4. * np.pi * ff * alpha_B(Te) * Ne**2))**(1./3.)
def U_mean(QH0, Ne, ff, Te = 1e4):
return (Ne * QH0 * ff**2 * 3 / (4. * np.pi) * alpha_B(Te)**2)**(1./3.) / pc.CST.CLIGHT
def QH0(U_mean, Ne, ff, Te = 1e4):
return U_mean**3 * pc.CST.CLIGHT**3 * 4. * np.pi / 3. / (Ne * ff**2 * alpha_B(Te)**2)
# --------------------------------------
"""
Pregunta 3
"""
def make_model(name, models_dir='./', SED='BB', qH=None, SED_params=None, n_zones = None, iterate=1):
pc.log_.level=3
abund_AGSS09 = {'He' : 10.93, 'C' : 8.43, 'N' : 7.83, 'O' : 8.69, 'Ne' : 7.93, 'Mg' : 7.6,
'S' : 7.12, 'Ar' : 6.40, 'Fe' : 7.5, 'Cl' : 5.5, 'Si' : 7.51}
for elem in abund_AGSS09:
abund_AGSS09[elem] -= 12
if elem != 'He':
abund_AGSS09[elem] -= 0.3
options = ('no molecules',
'no level2 lines',
'no fine opacities',
'atom h-like levels small',
'atom he-like levels small',
'COSMIC RAY BACKGROUND',
'element limit off -8',
)
c_input = pc.CloudyInput('{0}/{1}'.format(models_dir, name))
if SED == 'BB':
c_input.set_BB(Teff = SED_params, lumi_unit = 'q(H)', lumi_value = qH)
else:
c_input.set_star(SED = SED, SED_params = SED_params, lumi_unit = 'q(H)', lumi_value=qH)
# Defining the density. You may also use set_dlaw(parameters) if you have a density law defined in dense_fabden.cpp.
c_input.set_cste_density(2, ff = 1.)
# Defining the inner radius. A second parameter would be the outer radius (matter-bounded nebula).
c_input.set_radius(r_in = np.log10(pc.CST.PC/10))
c_input.set_abund(ab_dict = abund_AGSS09, nograins = True)
c_input.set_other(options)
c_input.set_iterate(iterate) # (0) for no iteration, () for one iteration, (N) for N iterations.
c_input.set_sphere() # () or (True) : sphere, or (False): open geometry.
c_input.set_distance(dist=1., unit='kpc', linear=True) # unit can be 'kpc', 'Mpc', 'parsecs', 'cm'. If linear=False, the distance is in log.
if n_zones is not None:
c_input.set_stop('zones {0}'.format(n_zones))
c_input.print_input()
c_input.run_cloudy()
def | (name, models_dir = './', style='-', fig_num = 1):
pc.log_.level=3
M = pc.CloudyModel('{0}/{1}'.format(models_dir, name), read_emis = False)
X = M.radius/1e19
colors = ['r', 'g', 'b', 'y', 'm', 'c']
plt.figure(fig_num)
plt.subplot(3, 3, 1)
plt.plot(X, M.get_ionic('H', 0), label='H0', linestyle=style, c= colors[0])
plt.plot(X, M.get_ionic('H', 1), label='H+', linestyle=style, c= colors[1])
plt.plot(X, M.get_ionic('He', 0), label='He0', linestyle=style, c= colors[2])
plt.plot(X, M.get_ionic('He', 1), label='He+', linestyle=style, c= colors[3])
plt.plot(X, M.get_ionic('He', 2), label='He++', linestyle=style, c= colors[4])
if style== '-':
plt.legend()
plt.title(name)
for i_plot, elem in enumerate(['N', 'O', 'Ne', 'S', 'Ar']):
plt.subplot(3, 3, i_plot + 2)
for i in np.arange(4):
plt.plot(X, M.get_ionic(elem, i), linestyle=style, c=colors[i])
plt.text(np.max(X)/2, 0.9, elem)
if i_plot == 0:
plt.title(M.date_model)
plt.subplot(3, 3, 7)
plt.plot(X, M.ne, label=r'N$_e$', linestyle=style, c='blue')
plt.plot(X, M.nH, label='N$_H$', linestyle=style, c='red')
if style== '-':
plt.legend(loc=3)
plt.xlabel(r'R [10$^{19}$cm]')
plt.subplot(3, 3, 8)
plt.plot(X, M.te, label=r'T$_e$', linestyle=style, c='blue')
if style== '-':
plt.legend(loc=3)
plt.subplot(3, 3, 9)
plt.plot(X, M.log_U, label='log U', c='blue')
if style== '-':
plt.legend()
def search_T(name, models_dir = './', SED = 'BB'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
if SED == 'BB':
T = np.array([float(pc.sextract(M.out['Blackbody'], 'dy ', '*')) for M in Ms])
elif SED == 'WM':
T = np.array([float(pc.sextract(M.out['table star'], 'mod" ', '4.0')) for M in Ms])
QH0 = np.array([M.Q0 for M in Ms])
QHe0 = np.array([M.Q[1::].sum() for M in Ms])
plt.plot(T/1e3, QHe0/QH0)
plt.xlabel('T [kK]')
plt.ylabel('QHe0/QH0')
def print_Xi(name, models_dir = './'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
names = [M.model_name_s for M in Ms]
print(names)
print('H0/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('H', 0),
Ms[1].get_ab_ion_vol('H', 0),
Ms[2].get_ab_ion_vol('H', 0)))
print('H1/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('H', 1),
Ms[1].get_ab_ion_vol('H', 1),
Ms[2].get_ab_ion_vol('H', 1)))
print('He0/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 0),
Ms[1].get_ab_ion_vol('He', 0),
Ms[2].get_ab_ion_vol('He', 0)))
print('He1/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 1),
Ms[1].get_ab_ion_vol('He', 1),
Ms[2].get_ab_ion_vol('He', 1)))
print('He2/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 2),
Ms[1].get_ab_ion_vol('He', 2),
Ms[2].get_ab_ion_vol('He', 2)))
for elem in ['N', 'O', 'Ne', 'S', 'Ar']:
for i in np.arange(4):
print('{0:2s}{1}/H: {2:.2e} {3:.2e} {4:.2e}'.format(elem, i, Ms[0].get_ab_ion_vol(elem, i),
Ms[1].get_ab_ion_vol(elem, i),
Ms[2].get_ab_ion_vol(elem, i)))
def plot_SED(name, models_dir = './', unit='Jy'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
plt.figure()
plt.subplot(2, 1, 1)
for M in Ms:
plt.plot(M.get_cont_x(unit = 'eV'), np.log10(M.get_cont_y(unit = 'esHz')), label=M.model_name_s)
plt.xlim((10., 60))
plt.ylim((18, 24))
plt.ylabel('log [erg.s-1.Hz-1]')
plt.legend(loc=3)
plt.subplot(2, 1, 2)
for M in Ms:
plt.plot(M.get_cont_x(unit = 'eV'), np.log10(M.get_cont_y(unit = 'Q')), label=M.model_name_s)
plt.xlim((10., 60))
plt.ylim((42., 50))
plt.xlabel('E [eV]')
plt.ylabel('QH0(E)')
# TODO: avoid overlap
for ip in IP:
plt.plot([IP[ip], IP[ip]], [49, 50])
plt.text(IP[ip], 48, ip)
| plot_model | identifier_name |
ex6_2.py | import numpy as np
import pyCloudy as pc
import matplotlib.pyplot as plt
from pyneb.utils.physics import IP
# TODO: Add comments
"""
Pregunta 1
"""
def alpha_B(Te):
"""
Recomb. coefficient, case B
"""
T4 = Te/1e4
return 2.6e-13/T4
def U_mean_def(QH0, Ne, Rstr):
"""
\int_0^{Rstr}{U.dV} / \int_0^{Rstr}{dV}
Return the mean over Stromgren volume of U
"""
return 3.* QH0 / (4. * np.pi * pc.CST.CLIGHT * Ne * Rstr**2)
def QH0_def(Rstr, Ne, ff, Te = 1e4):
"""
Volume of
"""
return 4. / 3. * np.pi * Rstr**3 * Ne**2 * ff * alpha_B(Te)
def Rstr(QH0, Ne, ff, Te = 1e4):
return (3. * QH0 / (4. * np.pi * ff * alpha_B(Te) * Ne**2))**(1./3.)
def U_mean(QH0, Ne, ff, Te = 1e4):
return (Ne * QH0 * ff**2 * 3 / (4. * np.pi) * alpha_B(Te)**2)**(1./3.) / pc.CST.CLIGHT
def QH0(U_mean, Ne, ff, Te = 1e4):
return U_mean**3 * pc.CST.CLIGHT**3 * 4. * np.pi / 3. / (Ne * ff**2 * alpha_B(Te)**2)
# --------------------------------------
"""
Pregunta 3
"""
def make_model(name, models_dir='./', SED='BB', qH=None, SED_params=None, n_zones = None, iterate=1):
pc.log_.level=3
abund_AGSS09 = {'He' : 10.93, 'C' : 8.43, 'N' : 7.83, 'O' : 8.69, 'Ne' : 7.93, 'Mg' : 7.6,
'S' : 7.12, 'Ar' : 6.40, 'Fe' : 7.5, 'Cl' : 5.5, 'Si' : 7.51}
for elem in abund_AGSS09:
abund_AGSS09[elem] -= 12
if elem != 'He':
abund_AGSS09[elem] -= 0.3
options = ('no molecules',
'no level2 lines',
'no fine opacities',
'atom h-like levels small',
'atom he-like levels small',
'COSMIC RAY BACKGROUND',
'element limit off -8',
)
c_input = pc.CloudyInput('{0}/{1}'.format(models_dir, name))
if SED == 'BB':
c_input.set_BB(Teff = SED_params, lumi_unit = 'q(H)', lumi_value = qH)
else:
c_input.set_star(SED = SED, SED_params = SED_params, lumi_unit = 'q(H)', lumi_value=qH)
# Defining the density. You may also use set_dlaw(parameters) if you have a density law defined in dense_fabden.cpp.
c_input.set_cste_density(2, ff = 1.)
# Defining the inner radius. A second parameter would be the outer radius (matter-bounded nebula).
c_input.set_radius(r_in = np.log10(pc.CST.PC/10))
c_input.set_abund(ab_dict = abund_AGSS09, nograins = True)
c_input.set_other(options)
c_input.set_iterate(iterate) # (0) for no iteration, () for one iteration, (N) for N iterations.
c_input.set_sphere() # () or (True) : sphere, or (False): open geometry.
c_input.set_distance(dist=1., unit='kpc', linear=True) # unit can be 'kpc', 'Mpc', 'parsecs', 'cm'. If linear=False, the distance is in log.
if n_zones is not None:
c_input.set_stop('zones {0}'.format(n_zones))
c_input.print_input()
c_input.run_cloudy()
def plot_model(name, models_dir = './', style='-', fig_num = 1):
pc.log_.level=3
M = pc.CloudyModel('{0}/{1}'.format(models_dir, name), read_emis = False)
X = M.radius/1e19
colors = ['r', 'g', 'b', 'y', 'm', 'c']
plt.figure(fig_num)
plt.subplot(3, 3, 1)
plt.plot(X, M.get_ionic('H', 0), label='H0', linestyle=style, c= colors[0])
plt.plot(X, M.get_ionic('H', 1), label='H+', linestyle=style, c= colors[1])
plt.plot(X, M.get_ionic('He', 0), label='He0', linestyle=style, c= colors[2])
plt.plot(X, M.get_ionic('He', 1), label='He+', linestyle=style, c= colors[3])
plt.plot(X, M.get_ionic('He', 2), label='He++', linestyle=style, c= colors[4])
if style== '-':
plt.legend()
plt.title(name)
for i_plot, elem in enumerate(['N', 'O', 'Ne', 'S', 'Ar']):
plt.subplot(3, 3, i_plot + 2)
for i in np.arange(4):
plt.plot(X, M.get_ionic(elem, i), linestyle=style, c=colors[i])
plt.text(np.max(X)/2, 0.9, elem)
if i_plot == 0:
plt.title(M.date_model)
plt.subplot(3, 3, 7)
plt.plot(X, M.ne, label=r'N$_e$', linestyle=style, c='blue')
plt.plot(X, M.nH, label='N$_H$', linestyle=style, c='red')
if style== '-':
plt.legend(loc=3)
plt.xlabel(r'R [10$^{19}$cm]')
plt.subplot(3, 3, 8)
plt.plot(X, M.te, label=r'T$_e$', linestyle=style, c='blue')
if style== '-':
plt.legend(loc=3)
plt.subplot(3, 3, 9)
plt.plot(X, M.log_U, label='log U', c='blue')
if style== '-':
plt.legend()
def search_T(name, models_dir = './', SED = 'BB'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
if SED == 'BB':
T = np.array([float(pc.sextract(M.out['Blackbody'], 'dy ', '*')) for M in Ms])
elif SED == 'WM':
T = np.array([float(pc.sextract(M.out['table star'], 'mod" ', '4.0')) for M in Ms])
QH0 = np.array([M.Q0 for M in Ms])
QHe0 = np.array([M.Q[1::].sum() for M in Ms])
plt.plot(T/1e3, QHe0/QH0)
plt.xlabel('T [kK]')
plt.ylabel('QHe0/QH0')
def print_Xi(name, models_dir = './'):
|
def plot_SED(name, models_dir = './', unit='Jy'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
plt.figure()
plt.subplot(2, 1, 1)
for M in Ms:
plt.plot(M.get_cont_x(unit = 'eV'), np.log10(M.get_cont_y(unit = 'esHz')), label=M.model_name_s)
plt.xlim((10., 60))
plt.ylim((18, 24))
plt.ylabel('log [erg.s-1.Hz-1]')
plt.legend(loc=3)
plt.subplot(2, 1, 2)
for M in Ms:
plt.plot(M.get_cont_x(unit = 'eV'), np.log10(M.get_cont_y(unit = 'Q')), label=M.model_name_s)
plt.xlim((10., 60))
plt.ylim((42., 50))
plt.xlabel('E [eV]')
plt.ylabel('QH0(E)')
# TODO: avoid overlap
for ip in IP:
plt.plot([IP[ip], IP[ip]], [49, 50])
plt.text(IP[ip], 48, ip)
| Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
names = [M.model_name_s for M in Ms]
print(names)
print('H0/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('H', 0),
Ms[1].get_ab_ion_vol('H', 0),
Ms[2].get_ab_ion_vol('H', 0)))
print('H1/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('H', 1),
Ms[1].get_ab_ion_vol('H', 1),
Ms[2].get_ab_ion_vol('H', 1)))
print('He0/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 0),
Ms[1].get_ab_ion_vol('He', 0),
Ms[2].get_ab_ion_vol('He', 0)))
print('He1/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 1),
Ms[1].get_ab_ion_vol('He', 1),
Ms[2].get_ab_ion_vol('He', 1)))
print('He2/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 2),
Ms[1].get_ab_ion_vol('He', 2),
Ms[2].get_ab_ion_vol('He', 2)))
for elem in ['N', 'O', 'Ne', 'S', 'Ar']:
for i in np.arange(4):
print('{0:2s}{1}/H: {2:.2e} {3:.2e} {4:.2e}'.format(elem, i, Ms[0].get_ab_ion_vol(elem, i),
Ms[1].get_ab_ion_vol(elem, i),
Ms[2].get_ab_ion_vol(elem, i))) | identifier_body |
ex6_2.py | import numpy as np
import pyCloudy as pc
import matplotlib.pyplot as plt
from pyneb.utils.physics import IP
# TODO: Add comments
"""
Pregunta 1
"""
def alpha_B(Te):
"""
Recomb. coefficient, case B
"""
T4 = Te/1e4
return 2.6e-13/T4
def U_mean_def(QH0, Ne, Rstr):
"""
\int_0^{Rstr}{U.dV} / \int_0^{Rstr}{dV}
Return the mean over Stromgren volume of U
"""
return 3.* QH0 / (4. * np.pi * pc.CST.CLIGHT * Ne * Rstr**2)
def QH0_def(Rstr, Ne, ff, Te = 1e4):
"""
Volume of
"""
return 4. / 3. * np.pi * Rstr**3 * Ne**2 * ff * alpha_B(Te)
def Rstr(QH0, Ne, ff, Te = 1e4):
return (3. * QH0 / (4. * np.pi * ff * alpha_B(Te) * Ne**2))**(1./3.)
def U_mean(QH0, Ne, ff, Te = 1e4):
return (Ne * QH0 * ff**2 * 3 / (4. * np.pi) * alpha_B(Te)**2)**(1./3.) / pc.CST.CLIGHT
def QH0(U_mean, Ne, ff, Te = 1e4):
return U_mean**3 * pc.CST.CLIGHT**3 * 4. * np.pi / 3. / (Ne * ff**2 * alpha_B(Te)**2)
# --------------------------------------
"""
Pregunta 3
"""
def make_model(name, models_dir='./', SED='BB', qH=None, SED_params=None, n_zones = None, iterate=1):
pc.log_.level=3
abund_AGSS09 = {'He' : 10.93, 'C' : 8.43, 'N' : 7.83, 'O' : 8.69, 'Ne' : 7.93, 'Mg' : 7.6,
'S' : 7.12, 'Ar' : 6.40, 'Fe' : 7.5, 'Cl' : 5.5, 'Si' : 7.51}
for elem in abund_AGSS09:
abund_AGSS09[elem] -= 12
if elem != 'He':
abund_AGSS09[elem] -= 0.3
options = ('no molecules',
'no level2 lines',
'no fine opacities',
'atom h-like levels small',
'atom he-like levels small', | 'element limit off -8',
)
c_input = pc.CloudyInput('{0}/{1}'.format(models_dir, name))
if SED == 'BB':
c_input.set_BB(Teff = SED_params, lumi_unit = 'q(H)', lumi_value = qH)
else:
c_input.set_star(SED = SED, SED_params = SED_params, lumi_unit = 'q(H)', lumi_value=qH)
# Defining the density. You may also use set_dlaw(parameters) if you have a density law defined in dense_fabden.cpp.
c_input.set_cste_density(2, ff = 1.)
# Defining the inner radius. A second parameter would be the outer radius (matter-bounded nebula).
c_input.set_radius(r_in = np.log10(pc.CST.PC/10))
c_input.set_abund(ab_dict = abund_AGSS09, nograins = True)
c_input.set_other(options)
c_input.set_iterate(iterate) # (0) for no iteration, () for one iteration, (N) for N iterations.
c_input.set_sphere() # () or (True) : sphere, or (False): open geometry.
c_input.set_distance(dist=1., unit='kpc', linear=True) # unit can be 'kpc', 'Mpc', 'parsecs', 'cm'. If linear=False, the distance is in log.
if n_zones is not None:
c_input.set_stop('zones {0}'.format(n_zones))
c_input.print_input()
c_input.run_cloudy()
def plot_model(name, models_dir = './', style='-', fig_num = 1):
pc.log_.level=3
M = pc.CloudyModel('{0}/{1}'.format(models_dir, name), read_emis = False)
X = M.radius/1e19
colors = ['r', 'g', 'b', 'y', 'm', 'c']
plt.figure(fig_num)
plt.subplot(3, 3, 1)
plt.plot(X, M.get_ionic('H', 0), label='H0', linestyle=style, c= colors[0])
plt.plot(X, M.get_ionic('H', 1), label='H+', linestyle=style, c= colors[1])
plt.plot(X, M.get_ionic('He', 0), label='He0', linestyle=style, c= colors[2])
plt.plot(X, M.get_ionic('He', 1), label='He+', linestyle=style, c= colors[3])
plt.plot(X, M.get_ionic('He', 2), label='He++', linestyle=style, c= colors[4])
if style== '-':
plt.legend()
plt.title(name)
for i_plot, elem in enumerate(['N', 'O', 'Ne', 'S', 'Ar']):
plt.subplot(3, 3, i_plot + 2)
for i in np.arange(4):
plt.plot(X, M.get_ionic(elem, i), linestyle=style, c=colors[i])
plt.text(np.max(X)/2, 0.9, elem)
if i_plot == 0:
plt.title(M.date_model)
plt.subplot(3, 3, 7)
plt.plot(X, M.ne, label=r'N$_e$', linestyle=style, c='blue')
plt.plot(X, M.nH, label='N$_H$', linestyle=style, c='red')
if style== '-':
plt.legend(loc=3)
plt.xlabel(r'R [10$^{19}$cm]')
plt.subplot(3, 3, 8)
plt.plot(X, M.te, label=r'T$_e$', linestyle=style, c='blue')
if style== '-':
plt.legend(loc=3)
plt.subplot(3, 3, 9)
plt.plot(X, M.log_U, label='log U', c='blue')
if style== '-':
plt.legend()
def search_T(name, models_dir = './', SED = 'BB'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
if SED == 'BB':
T = np.array([float(pc.sextract(M.out['Blackbody'], 'dy ', '*')) for M in Ms])
elif SED == 'WM':
T = np.array([float(pc.sextract(M.out['table star'], 'mod" ', '4.0')) for M in Ms])
QH0 = np.array([M.Q0 for M in Ms])
QHe0 = np.array([M.Q[1::].sum() for M in Ms])
plt.plot(T/1e3, QHe0/QH0)
plt.xlabel('T [kK]')
plt.ylabel('QHe0/QH0')
def print_Xi(name, models_dir = './'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
names = [M.model_name_s for M in Ms]
print(names)
print('H0/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('H', 0),
Ms[1].get_ab_ion_vol('H', 0),
Ms[2].get_ab_ion_vol('H', 0)))
print('H1/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('H', 1),
Ms[1].get_ab_ion_vol('H', 1),
Ms[2].get_ab_ion_vol('H', 1)))
print('He0/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 0),
Ms[1].get_ab_ion_vol('He', 0),
Ms[2].get_ab_ion_vol('He', 0)))
print('He1/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 1),
Ms[1].get_ab_ion_vol('He', 1),
Ms[2].get_ab_ion_vol('He', 1)))
print('He2/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 2),
Ms[1].get_ab_ion_vol('He', 2),
Ms[2].get_ab_ion_vol('He', 2)))
for elem in ['N', 'O', 'Ne', 'S', 'Ar']:
for i in np.arange(4):
print('{0:2s}{1}/H: {2:.2e} {3:.2e} {4:.2e}'.format(elem, i, Ms[0].get_ab_ion_vol(elem, i),
Ms[1].get_ab_ion_vol(elem, i),
Ms[2].get_ab_ion_vol(elem, i)))
def plot_SED(name, models_dir = './', unit='Jy'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
plt.figure()
plt.subplot(2, 1, 1)
for M in Ms:
plt.plot(M.get_cont_x(unit = 'eV'), np.log10(M.get_cont_y(unit = 'esHz')), label=M.model_name_s)
plt.xlim((10., 60))
plt.ylim((18, 24))
plt.ylabel('log [erg.s-1.Hz-1]')
plt.legend(loc=3)
plt.subplot(2, 1, 2)
for M in Ms:
plt.plot(M.get_cont_x(unit = 'eV'), np.log10(M.get_cont_y(unit = 'Q')), label=M.model_name_s)
plt.xlim((10., 60))
plt.ylim((42., 50))
plt.xlabel('E [eV]')
plt.ylabel('QH0(E)')
# TODO: avoid overlap
for ip in IP:
plt.plot([IP[ip], IP[ip]], [49, 50])
plt.text(IP[ip], 48, ip) | 'COSMIC RAY BACKGROUND', | random_line_split |
ex6_2.py | import numpy as np
import pyCloudy as pc
import matplotlib.pyplot as plt
from pyneb.utils.physics import IP
# TODO: Add comments
"""
Pregunta 1
"""
def alpha_B(Te):
"""
Recomb. coefficient, case B
"""
T4 = Te/1e4
return 2.6e-13/T4
def U_mean_def(QH0, Ne, Rstr):
"""
\int_0^{Rstr}{U.dV} / \int_0^{Rstr}{dV}
Return the mean over Stromgren volume of U
"""
return 3.* QH0 / (4. * np.pi * pc.CST.CLIGHT * Ne * Rstr**2)
def QH0_def(Rstr, Ne, ff, Te = 1e4):
"""
Volume of
"""
return 4. / 3. * np.pi * Rstr**3 * Ne**2 * ff * alpha_B(Te)
def Rstr(QH0, Ne, ff, Te = 1e4):
return (3. * QH0 / (4. * np.pi * ff * alpha_B(Te) * Ne**2))**(1./3.)
def U_mean(QH0, Ne, ff, Te = 1e4):
return (Ne * QH0 * ff**2 * 3 / (4. * np.pi) * alpha_B(Te)**2)**(1./3.) / pc.CST.CLIGHT
def QH0(U_mean, Ne, ff, Te = 1e4):
return U_mean**3 * pc.CST.CLIGHT**3 * 4. * np.pi / 3. / (Ne * ff**2 * alpha_B(Te)**2)
# --------------------------------------
"""
Pregunta 3
"""
def make_model(name, models_dir='./', SED='BB', qH=None, SED_params=None, n_zones = None, iterate=1):
pc.log_.level=3
abund_AGSS09 = {'He' : 10.93, 'C' : 8.43, 'N' : 7.83, 'O' : 8.69, 'Ne' : 7.93, 'Mg' : 7.6,
'S' : 7.12, 'Ar' : 6.40, 'Fe' : 7.5, 'Cl' : 5.5, 'Si' : 7.51}
for elem in abund_AGSS09:
abund_AGSS09[elem] -= 12
if elem != 'He':
abund_AGSS09[elem] -= 0.3
options = ('no molecules',
'no level2 lines',
'no fine opacities',
'atom h-like levels small',
'atom he-like levels small',
'COSMIC RAY BACKGROUND',
'element limit off -8',
)
c_input = pc.CloudyInput('{0}/{1}'.format(models_dir, name))
if SED == 'BB':
c_input.set_BB(Teff = SED_params, lumi_unit = 'q(H)', lumi_value = qH)
else:
c_input.set_star(SED = SED, SED_params = SED_params, lumi_unit = 'q(H)', lumi_value=qH)
# Defining the density. You may also use set_dlaw(parameters) if you have a density law defined in dense_fabden.cpp.
c_input.set_cste_density(2, ff = 1.)
# Defining the inner radius. A second parameter would be the outer radius (matter-bounded nebula).
c_input.set_radius(r_in = np.log10(pc.CST.PC/10))
c_input.set_abund(ab_dict = abund_AGSS09, nograins = True)
c_input.set_other(options)
c_input.set_iterate(iterate) # (0) for no iteration, () for one iteration, (N) for N iterations.
c_input.set_sphere() # () or (True) : sphere, or (False): open geometry.
c_input.set_distance(dist=1., unit='kpc', linear=True) # unit can be 'kpc', 'Mpc', 'parsecs', 'cm'. If linear=False, the distance is in log.
if n_zones is not None:
|
c_input.print_input()
c_input.run_cloudy()
def plot_model(name, models_dir = './', style='-', fig_num = 1):
pc.log_.level=3
M = pc.CloudyModel('{0}/{1}'.format(models_dir, name), read_emis = False)
X = M.radius/1e19
colors = ['r', 'g', 'b', 'y', 'm', 'c']
plt.figure(fig_num)
plt.subplot(3, 3, 1)
plt.plot(X, M.get_ionic('H', 0), label='H0', linestyle=style, c= colors[0])
plt.plot(X, M.get_ionic('H', 1), label='H+', linestyle=style, c= colors[1])
plt.plot(X, M.get_ionic('He', 0), label='He0', linestyle=style, c= colors[2])
plt.plot(X, M.get_ionic('He', 1), label='He+', linestyle=style, c= colors[3])
plt.plot(X, M.get_ionic('He', 2), label='He++', linestyle=style, c= colors[4])
if style== '-':
plt.legend()
plt.title(name)
for i_plot, elem in enumerate(['N', 'O', 'Ne', 'S', 'Ar']):
plt.subplot(3, 3, i_plot + 2)
for i in np.arange(4):
plt.plot(X, M.get_ionic(elem, i), linestyle=style, c=colors[i])
plt.text(np.max(X)/2, 0.9, elem)
if i_plot == 0:
plt.title(M.date_model)
plt.subplot(3, 3, 7)
plt.plot(X, M.ne, label=r'N$_e$', linestyle=style, c='blue')
plt.plot(X, M.nH, label='N$_H$', linestyle=style, c='red')
if style== '-':
plt.legend(loc=3)
plt.xlabel(r'R [10$^{19}$cm]')
plt.subplot(3, 3, 8)
plt.plot(X, M.te, label=r'T$_e$', linestyle=style, c='blue')
if style== '-':
plt.legend(loc=3)
plt.subplot(3, 3, 9)
plt.plot(X, M.log_U, label='log U', c='blue')
if style== '-':
plt.legend()
def search_T(name, models_dir = './', SED = 'BB'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
if SED == 'BB':
T = np.array([float(pc.sextract(M.out['Blackbody'], 'dy ', '*')) for M in Ms])
elif SED == 'WM':
T = np.array([float(pc.sextract(M.out['table star'], 'mod" ', '4.0')) for M in Ms])
QH0 = np.array([M.Q0 for M in Ms])
QHe0 = np.array([M.Q[1::].sum() for M in Ms])
plt.plot(T/1e3, QHe0/QH0)
plt.xlabel('T [kK]')
plt.ylabel('QHe0/QH0')
def print_Xi(name, models_dir = './'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
names = [M.model_name_s for M in Ms]
print(names)
print('H0/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('H', 0),
Ms[1].get_ab_ion_vol('H', 0),
Ms[2].get_ab_ion_vol('H', 0)))
print('H1/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('H', 1),
Ms[1].get_ab_ion_vol('H', 1),
Ms[2].get_ab_ion_vol('H', 1)))
print('He0/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 0),
Ms[1].get_ab_ion_vol('He', 0),
Ms[2].get_ab_ion_vol('He', 0)))
print('He1/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 1),
Ms[1].get_ab_ion_vol('He', 1),
Ms[2].get_ab_ion_vol('He', 1)))
print('He2/H: {0:.2e} {1:.2e} {2:.2e}'.format(Ms[0].get_ab_ion_vol('He', 2),
Ms[1].get_ab_ion_vol('He', 2),
Ms[2].get_ab_ion_vol('He', 2)))
for elem in ['N', 'O', 'Ne', 'S', 'Ar']:
for i in np.arange(4):
print('{0:2s}{1}/H: {2:.2e} {3:.2e} {4:.2e}'.format(elem, i, Ms[0].get_ab_ion_vol(elem, i),
Ms[1].get_ab_ion_vol(elem, i),
Ms[2].get_ab_ion_vol(elem, i)))
def plot_SED(name, models_dir = './', unit='Jy'):
Ms = pc.load_models('{0}/{1}'.format(models_dir, name), read_emis = False)
plt.figure()
plt.subplot(2, 1, 1)
for M in Ms:
plt.plot(M.get_cont_x(unit = 'eV'), np.log10(M.get_cont_y(unit = 'esHz')), label=M.model_name_s)
plt.xlim((10., 60))
plt.ylim((18, 24))
plt.ylabel('log [erg.s-1.Hz-1]')
plt.legend(loc=3)
plt.subplot(2, 1, 2)
for M in Ms:
plt.plot(M.get_cont_x(unit = 'eV'), np.log10(M.get_cont_y(unit = 'Q')), label=M.model_name_s)
plt.xlim((10., 60))
plt.ylim((42., 50))
plt.xlabel('E [eV]')
plt.ylabel('QH0(E)')
# TODO: avoid overlap
for ip in IP:
plt.plot([IP[ip], IP[ip]], [49, 50])
plt.text(IP[ip], 48, ip)
| c_input.set_stop('zones {0}'.format(n_zones)) | conditional_block |
installed_packages.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Listing installed packages
use rustc::metadata::filesearch::rust_path;
use path_util::*;
use std::os;
use std::io;
use std::io::fs;
pub fn list_installed_packages(f: |&CrateId| -> bool) -> bool {
let workspaces = rust_path();
for p in workspaces.iter() {
let binfiles = {
let _guard = io::ignore_io_error();
fs::readdir(&p.join("bin"))
};
for exec in binfiles.iter() {
// FIXME (#9639): This needs to handle non-utf8 paths
match exec.filestem_str() {
None => (),
Some(exec_path) => {
if !f(&CrateId::new(exec_path)) {
return false;
}
}
}
}
let libfiles = {
let _guard = io::ignore_io_error();
fs::readdir(&p.join("lib"))
};
for lib in libfiles.iter() {
debug!("Full name: {}", lib.display());
match has_library(lib) {
Some(basename) => {
let parent = p.join("lib");
debug!("parent = {}, child = {}",
parent.display(), lib.display());
let rel_p = lib.path_relative_from(&parent).unwrap();
debug!("Rel: {}", rel_p.display());
let rel_path = rel_p.join(basename);
rel_path.display().with_str(|s| {
debug!("Rel name: {}", s);
f(&CrateId::new(s));
});
}
None => ()
}
};
}
true
}
pub fn | (p: &Path) -> Option<~str> {
let files = {
let _guard = io::ignore_io_error();
fs::readdir(p)
};
for path in files.iter() {
if path.extension_str() == Some(os::consts::DLL_EXTENSION) {
let stuff : &str = path.filestem_str().expect("has_library: weird path");
let mut stuff2 = stuff.split_str("-");
let stuff3: ~[&str] = stuff2.collect();
// argh
let chars_to_drop = os::consts::DLL_PREFIX.len();
return Some(stuff3[0].slice(chars_to_drop, stuff3[0].len()).to_owned());
}
}
None
}
pub fn package_is_installed(p: &CrateId) -> bool {
let mut is_installed = false;
list_installed_packages(|installed| {
if installed == p {
is_installed = true;
false
} else {
true
}
});
is_installed
}
| has_library | identifier_name |
installed_packages.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Listing installed packages
use rustc::metadata::filesearch::rust_path;
use path_util::*;
use std::os;
use std::io;
use std::io::fs; | let workspaces = rust_path();
for p in workspaces.iter() {
let binfiles = {
let _guard = io::ignore_io_error();
fs::readdir(&p.join("bin"))
};
for exec in binfiles.iter() {
// FIXME (#9639): This needs to handle non-utf8 paths
match exec.filestem_str() {
None => (),
Some(exec_path) => {
if !f(&CrateId::new(exec_path)) {
return false;
}
}
}
}
let libfiles = {
let _guard = io::ignore_io_error();
fs::readdir(&p.join("lib"))
};
for lib in libfiles.iter() {
debug!("Full name: {}", lib.display());
match has_library(lib) {
Some(basename) => {
let parent = p.join("lib");
debug!("parent = {}, child = {}",
parent.display(), lib.display());
let rel_p = lib.path_relative_from(&parent).unwrap();
debug!("Rel: {}", rel_p.display());
let rel_path = rel_p.join(basename);
rel_path.display().with_str(|s| {
debug!("Rel name: {}", s);
f(&CrateId::new(s));
});
}
None => ()
}
};
}
true
}
pub fn has_library(p: &Path) -> Option<~str> {
let files = {
let _guard = io::ignore_io_error();
fs::readdir(p)
};
for path in files.iter() {
if path.extension_str() == Some(os::consts::DLL_EXTENSION) {
let stuff : &str = path.filestem_str().expect("has_library: weird path");
let mut stuff2 = stuff.split_str("-");
let stuff3: ~[&str] = stuff2.collect();
// argh
let chars_to_drop = os::consts::DLL_PREFIX.len();
return Some(stuff3[0].slice(chars_to_drop, stuff3[0].len()).to_owned());
}
}
None
}
pub fn package_is_installed(p: &CrateId) -> bool {
let mut is_installed = false;
list_installed_packages(|installed| {
if installed == p {
is_installed = true;
false
} else {
true
}
});
is_installed
} |
pub fn list_installed_packages(f: |&CrateId| -> bool) -> bool { | random_line_split |
installed_packages.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Listing installed packages
use rustc::metadata::filesearch::rust_path;
use path_util::*;
use std::os;
use std::io;
use std::io::fs;
pub fn list_installed_packages(f: |&CrateId| -> bool) -> bool {
let workspaces = rust_path();
for p in workspaces.iter() {
let binfiles = {
let _guard = io::ignore_io_error();
fs::readdir(&p.join("bin"))
};
for exec in binfiles.iter() {
// FIXME (#9639): This needs to handle non-utf8 paths
match exec.filestem_str() {
None => (),
Some(exec_path) => |
}
}
let libfiles = {
let _guard = io::ignore_io_error();
fs::readdir(&p.join("lib"))
};
for lib in libfiles.iter() {
debug!("Full name: {}", lib.display());
match has_library(lib) {
Some(basename) => {
let parent = p.join("lib");
debug!("parent = {}, child = {}",
parent.display(), lib.display());
let rel_p = lib.path_relative_from(&parent).unwrap();
debug!("Rel: {}", rel_p.display());
let rel_path = rel_p.join(basename);
rel_path.display().with_str(|s| {
debug!("Rel name: {}", s);
f(&CrateId::new(s));
});
}
None => ()
}
};
}
true
}
pub fn has_library(p: &Path) -> Option<~str> {
let files = {
let _guard = io::ignore_io_error();
fs::readdir(p)
};
for path in files.iter() {
if path.extension_str() == Some(os::consts::DLL_EXTENSION) {
let stuff : &str = path.filestem_str().expect("has_library: weird path");
let mut stuff2 = stuff.split_str("-");
let stuff3: ~[&str] = stuff2.collect();
// argh
let chars_to_drop = os::consts::DLL_PREFIX.len();
return Some(stuff3[0].slice(chars_to_drop, stuff3[0].len()).to_owned());
}
}
None
}
pub fn package_is_installed(p: &CrateId) -> bool {
let mut is_installed = false;
list_installed_packages(|installed| {
if installed == p {
is_installed = true;
false
} else {
true
}
});
is_installed
}
| {
if !f(&CrateId::new(exec_path)) {
return false;
}
} | conditional_block |
installed_packages.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Listing installed packages
use rustc::metadata::filesearch::rust_path;
use path_util::*;
use std::os;
use std::io;
use std::io::fs;
pub fn list_installed_packages(f: |&CrateId| -> bool) -> bool {
let workspaces = rust_path();
for p in workspaces.iter() {
let binfiles = {
let _guard = io::ignore_io_error();
fs::readdir(&p.join("bin"))
};
for exec in binfiles.iter() {
// FIXME (#9639): This needs to handle non-utf8 paths
match exec.filestem_str() {
None => (),
Some(exec_path) => {
if !f(&CrateId::new(exec_path)) {
return false;
}
}
}
}
let libfiles = {
let _guard = io::ignore_io_error();
fs::readdir(&p.join("lib"))
};
for lib in libfiles.iter() {
debug!("Full name: {}", lib.display());
match has_library(lib) {
Some(basename) => {
let parent = p.join("lib");
debug!("parent = {}, child = {}",
parent.display(), lib.display());
let rel_p = lib.path_relative_from(&parent).unwrap();
debug!("Rel: {}", rel_p.display());
let rel_path = rel_p.join(basename);
rel_path.display().with_str(|s| {
debug!("Rel name: {}", s);
f(&CrateId::new(s));
});
}
None => ()
}
};
}
true
}
pub fn has_library(p: &Path) -> Option<~str> {
let files = {
let _guard = io::ignore_io_error();
fs::readdir(p)
};
for path in files.iter() {
if path.extension_str() == Some(os::consts::DLL_EXTENSION) {
let stuff : &str = path.filestem_str().expect("has_library: weird path");
let mut stuff2 = stuff.split_str("-");
let stuff3: ~[&str] = stuff2.collect();
// argh
let chars_to_drop = os::consts::DLL_PREFIX.len();
return Some(stuff3[0].slice(chars_to_drop, stuff3[0].len()).to_owned());
}
}
None
}
pub fn package_is_installed(p: &CrateId) -> bool | {
let mut is_installed = false;
list_installed_packages(|installed| {
if installed == p {
is_installed = true;
false
} else {
true
}
});
is_installed
} | identifier_body | |
index.js | /*!
* content-type
* Copyright(c) 2015 Douglas Christopher Wilson
* MIT Licensed
*/
'use strict'
/**
* RegExp to match *( ";" parameter ) in RFC 7231 sec 3.1.1.1
*
* parameter = token "=" ( token / quoted-string )
* token = 1*tchar
* tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
* / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
* / DIGIT / ALPHA
* ; any VCHAR, except delimiters
* quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE
* qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text
* obs-text = %x80-FF
* quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
*/
var paramRegExp = /; *([!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+) *= *("(?:[\u000b\u0020\u0021\u0023-\u005b\u005d-\u007e\u0080-\u00ff]|\\[\u000b\u0020-\u00ff])*"|[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+) */g
var textRegExp = /^[\u000b\u0020-\u007e\u0080-\u00ff]+$/
var tokenRegExp = /^[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+$/
/**
* RegExp to match quoted-pair in RFC 7230 sec 3.2.6
*
* quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
* obs-text = %x80-FF
*/
var qescRegExp = /\\([\u000b\u0020-\u00ff])/g
/**
* RegExp to match chars that must be quoted-pair in RFC 7230 sec 3.2.6
*/
var quoteRegExp = /([\\"])/g
/**
* RegExp to match type in RFC 6838
*
* media-type = type "/" subtype
* type = token
* subtype = token
*/
var typeRegExp = /^[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+\/[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+$/
/**
* Module exports.
* @public
*/
exports.format = format
exports.parse = parse
/**
* Format object to media type.
*
* @param {object} obj
* @return {string}
* @public
*/
function format(obj) {
if (!obj || typeof obj !== 'object') {
throw new TypeError('argument obj is required')
}
var parameters = obj.parameters
var type = obj.type
if (!type || !typeRegExp.test(type)) {
throw new TypeError('invalid type')
}
var string = type
// append parameters
if (parameters && typeof parameters === 'object') {
var param
var params = Object.keys(parameters).sort()
for (var i = 0; i < params.length; i++) {
param = params[i]
if (!tokenRegExp.test(param)) {
throw new TypeError('invalid parameter name')
}
string += '; ' + param + '=' + qstring(parameters[param])
}
}
return string
}
/**
* Parse media type to object.
*
* @param {string|object} string
* @return {Object}
* @public
*/
function | (string) {
if (!string) {
throw new TypeError('argument string is required')
}
if (typeof string === 'object') {
// support req/res-like objects as argument
string = getcontenttype(string)
if (typeof string !== 'string') {
throw new TypeError('content-type header is missing from object');
}
}
if (typeof string !== 'string') {
throw new TypeError('argument string is required to be a string')
}
var index = string.indexOf(';')
var type = index !== -1
? string.substr(0, index).trim()
: string.trim()
if (!typeRegExp.test(type)) {
throw new TypeError('invalid media type')
}
var key
var match
var obj = new ContentType(type.toLowerCase())
var value
paramRegExp.lastIndex = index
while (match = paramRegExp.exec(string)) {
if (match.index !== index) {
throw new TypeError('invalid parameter format')
}
index += match[0].length
key = match[1].toLowerCase()
value = match[2]
if (value[0] === '"') {
// remove quotes and escapes
value = value
.substr(1, value.length - 2)
.replace(qescRegExp, '$1')
}
obj.parameters[key] = value
}
if (index !== -1 && index !== string.length) {
throw new TypeError('invalid parameter format')
}
return obj
}
/**
* Get content-type from req/res objects.
*
* @param {object}
* @return {Object}
* @private
*/
function getcontenttype(obj) {
if (typeof obj.getHeader === 'function') {
// res-like
return obj.getHeader('content-type')
}
if (typeof obj.headers === 'object') {
// req-like
return obj.headers && obj.headers['content-type']
}
}
/**
* Quote a string if necessary.
*
* @param {string} val
* @return {string}
* @private
*/
function qstring(val) {
var str = String(val)
// no need to quote tokens
if (tokenRegExp.test(str)) {
return str
}
if (str.length > 0 && !textRegExp.test(str)) {
throw new TypeError('invalid parameter value')
}
return '"' + str.replace(quoteRegExp, '\\$1') + '"'
}
/**
* Class to represent a content type.
* @private
*/
function ContentType(type) {
this.parameters = Object.create(null)
this.type = type
}
| parse | identifier_name |
index.js | /*!
* content-type
* Copyright(c) 2015 Douglas Christopher Wilson
* MIT Licensed
*/
'use strict'
/**
* RegExp to match *( ";" parameter ) in RFC 7231 sec 3.1.1.1
*
* parameter = token "=" ( token / quoted-string )
* token = 1*tchar
* tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
* / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
* / DIGIT / ALPHA
* ; any VCHAR, except delimiters
* quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE
* qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text
* obs-text = %x80-FF
* quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
*/
var paramRegExp = /; *([!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+) *= *("(?:[\u000b\u0020\u0021\u0023-\u005b\u005d-\u007e\u0080-\u00ff]|\\[\u000b\u0020-\u00ff])*"|[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+) */g
var textRegExp = /^[\u000b\u0020-\u007e\u0080-\u00ff]+$/
var tokenRegExp = /^[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+$/
/**
* RegExp to match quoted-pair in RFC 7230 sec 3.2.6
*
* quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
* obs-text = %x80-FF
*/
var qescRegExp = /\\([\u000b\u0020-\u00ff])/g
/**
* RegExp to match chars that must be quoted-pair in RFC 7230 sec 3.2.6
*/
var quoteRegExp = /([\\"])/g
/**
* RegExp to match type in RFC 6838
*
* media-type = type "/" subtype
* type = token
* subtype = token
*/
var typeRegExp = /^[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+\/[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+$/
/**
* Module exports.
* @public
*/
exports.format = format
exports.parse = parse
/**
* Format object to media type.
*
* @param {object} obj
* @return {string}
* @public
*/
function format(obj) {
if (!obj || typeof obj !== 'object') {
throw new TypeError('argument obj is required')
}
var parameters = obj.parameters
var type = obj.type
if (!type || !typeRegExp.test(type)) {
throw new TypeError('invalid type')
}
var string = type
// append parameters
if (parameters && typeof parameters === 'object') {
var param
var params = Object.keys(parameters).sort()
for (var i = 0; i < params.length; i++) {
param = params[i]
if (!tokenRegExp.test(param)) {
throw new TypeError('invalid parameter name')
}
string += '; ' + param + '=' + qstring(parameters[param])
}
}
return string
}
/**
* Parse media type to object.
*
* @param {string|object} string
* @return {Object}
* @public
*/
function parse(string) {
if (!string) {
throw new TypeError('argument string is required')
}
if (typeof string === 'object') {
// support req/res-like objects as argument
string = getcontenttype(string)
if (typeof string !== 'string') {
throw new TypeError('content-type header is missing from object');
}
}
if (typeof string !== 'string') {
throw new TypeError('argument string is required to be a string')
}
var index = string.indexOf(';')
var type = index !== -1
? string.substr(0, index).trim()
: string.trim()
if (!typeRegExp.test(type)) {
throw new TypeError('invalid media type')
}
var key
var match
var obj = new ContentType(type.toLowerCase())
var value
paramRegExp.lastIndex = index
while (match = paramRegExp.exec(string)) {
if (match.index !== index) {
throw new TypeError('invalid parameter format')
}
index += match[0].length
key = match[1].toLowerCase()
value = match[2]
if (value[0] === '"') {
// remove quotes and escapes
value = value
.substr(1, value.length - 2)
.replace(qescRegExp, '$1')
}
obj.parameters[key] = value
}
if (index !== -1 && index !== string.length) {
throw new TypeError('invalid parameter format')
}
return obj
}
/**
* Get content-type from req/res objects.
*
* @param {object}
* @return {Object}
* @private
*/
function getcontenttype(obj) |
/**
* Quote a string if necessary.
*
* @param {string} val
* @return {string}
* @private
*/
function qstring(val) {
var str = String(val)
// no need to quote tokens
if (tokenRegExp.test(str)) {
return str
}
if (str.length > 0 && !textRegExp.test(str)) {
throw new TypeError('invalid parameter value')
}
return '"' + str.replace(quoteRegExp, '\\$1') + '"'
}
/**
* Class to represent a content type.
* @private
*/
function ContentType(type) {
this.parameters = Object.create(null)
this.type = type
}
| {
if (typeof obj.getHeader === 'function') {
// res-like
return obj.getHeader('content-type')
}
if (typeof obj.headers === 'object') {
// req-like
return obj.headers && obj.headers['content-type']
}
} | identifier_body |
index.js | /*!
* content-type
* Copyright(c) 2015 Douglas Christopher Wilson
* MIT Licensed
*/
'use strict'
/**
* RegExp to match *( ";" parameter ) in RFC 7231 sec 3.1.1.1
*
* parameter = token "=" ( token / quoted-string )
* token = 1*tchar
* tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
* / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
* / DIGIT / ALPHA
* ; any VCHAR, except delimiters
* quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE
* qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text
* obs-text = %x80-FF
* quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
*/
var paramRegExp = /; *([!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+) *= *("(?:[\u000b\u0020\u0021\u0023-\u005b\u005d-\u007e\u0080-\u00ff]|\\[\u000b\u0020-\u00ff])*"|[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+) */g
var textRegExp = /^[\u000b\u0020-\u007e\u0080-\u00ff]+$/
var tokenRegExp = /^[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+$/
/**
* RegExp to match quoted-pair in RFC 7230 sec 3.2.6
*
* quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
* obs-text = %x80-FF
*/
var qescRegExp = /\\([\u000b\u0020-\u00ff])/g
/**
* RegExp to match chars that must be quoted-pair in RFC 7230 sec 3.2.6
*/
var quoteRegExp = /([\\"])/g
/**
* RegExp to match type in RFC 6838
*
* media-type = type "/" subtype
* type = token
* subtype = token
*/
var typeRegExp = /^[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+\/[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+$/
/**
* Module exports.
* @public
*/
exports.format = format
exports.parse = parse
/**
* Format object to media type.
*
* @param {object} obj
* @return {string}
* @public
*/
function format(obj) {
if (!obj || typeof obj !== 'object') {
throw new TypeError('argument obj is required')
}
var parameters = obj.parameters
var type = obj.type
if (!type || !typeRegExp.test(type)) {
throw new TypeError('invalid type')
}
var string = type
// append parameters
if (parameters && typeof parameters === 'object') {
var param
var params = Object.keys(parameters).sort()
for (var i = 0; i < params.length; i++) {
param = params[i]
if (!tokenRegExp.test(param)) {
throw new TypeError('invalid parameter name')
}
string += '; ' + param + '=' + qstring(parameters[param])
}
}
return string
}
/**
* Parse media type to object.
*
* @param {string|object} string
* @return {Object}
* @public
*/
function parse(string) {
if (!string) {
throw new TypeError('argument string is required')
}
if (typeof string === 'object') |
if (typeof string !== 'string') {
throw new TypeError('argument string is required to be a string')
}
var index = string.indexOf(';')
var type = index !== -1
? string.substr(0, index).trim()
: string.trim()
if (!typeRegExp.test(type)) {
throw new TypeError('invalid media type')
}
var key
var match
var obj = new ContentType(type.toLowerCase())
var value
paramRegExp.lastIndex = index
while (match = paramRegExp.exec(string)) {
if (match.index !== index) {
throw new TypeError('invalid parameter format')
}
index += match[0].length
key = match[1].toLowerCase()
value = match[2]
if (value[0] === '"') {
// remove quotes and escapes
value = value
.substr(1, value.length - 2)
.replace(qescRegExp, '$1')
}
obj.parameters[key] = value
}
if (index !== -1 && index !== string.length) {
throw new TypeError('invalid parameter format')
}
return obj
}
/**
* Get content-type from req/res objects.
*
* @param {object}
* @return {Object}
* @private
*/
function getcontenttype(obj) {
if (typeof obj.getHeader === 'function') {
// res-like
return obj.getHeader('content-type')
}
if (typeof obj.headers === 'object') {
// req-like
return obj.headers && obj.headers['content-type']
}
}
/**
* Quote a string if necessary.
*
* @param {string} val
* @return {string}
* @private
*/
function qstring(val) {
var str = String(val)
// no need to quote tokens
if (tokenRegExp.test(str)) {
return str
}
if (str.length > 0 && !textRegExp.test(str)) {
throw new TypeError('invalid parameter value')
}
return '"' + str.replace(quoteRegExp, '\\$1') + '"'
}
/**
* Class to represent a content type.
* @private
*/
function ContentType(type) {
this.parameters = Object.create(null)
this.type = type
}
| {
// support req/res-like objects as argument
string = getcontenttype(string)
if (typeof string !== 'string') {
throw new TypeError('content-type header is missing from object');
}
} | conditional_block |
index.js | /*!
* content-type
* Copyright(c) 2015 Douglas Christopher Wilson
* MIT Licensed
*/
'use strict'
/**
* RegExp to match *( ";" parameter ) in RFC 7231 sec 3.1.1.1
*
* parameter = token "=" ( token / quoted-string )
* token = 1*tchar
* tchar = "!" / "#" / "$" / "%" / "&" / "'" / "*"
* / "+" / "-" / "." / "^" / "_" / "`" / "|" / "~"
* / DIGIT / ALPHA
* ; any VCHAR, except delimiters
* quoted-string = DQUOTE *( qdtext / quoted-pair ) DQUOTE
* qdtext = HTAB / SP / %x21 / %x23-5B / %x5D-7E / obs-text
* obs-text = %x80-FF
* quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
*/
var paramRegExp = /; *([!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+) *= *("(?:[\u000b\u0020\u0021\u0023-\u005b\u005d-\u007e\u0080-\u00ff]|\\[\u000b\u0020-\u00ff])*"|[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+) */g
var textRegExp = /^[\u000b\u0020-\u007e\u0080-\u00ff]+$/
var tokenRegExp = /^[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+$/
/**
* RegExp to match quoted-pair in RFC 7230 sec 3.2.6
*
* quoted-pair = "\" ( HTAB / SP / VCHAR / obs-text )
* obs-text = %x80-FF
*/ | var qescRegExp = /\\([\u000b\u0020-\u00ff])/g
/**
* RegExp to match chars that must be quoted-pair in RFC 7230 sec 3.2.6
*/
var quoteRegExp = /([\\"])/g
/**
* RegExp to match type in RFC 6838
*
* media-type = type "/" subtype
* type = token
* subtype = token
*/
var typeRegExp = /^[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+\/[!#$%&'\*\+\-\.\^_`\|~0-9A-Za-z]+$/
/**
* Module exports.
* @public
*/
exports.format = format
exports.parse = parse
/**
* Format object to media type.
*
* @param {object} obj
* @return {string}
* @public
*/
function format(obj) {
if (!obj || typeof obj !== 'object') {
throw new TypeError('argument obj is required')
}
var parameters = obj.parameters
var type = obj.type
if (!type || !typeRegExp.test(type)) {
throw new TypeError('invalid type')
}
var string = type
// append parameters
if (parameters && typeof parameters === 'object') {
var param
var params = Object.keys(parameters).sort()
for (var i = 0; i < params.length; i++) {
param = params[i]
if (!tokenRegExp.test(param)) {
throw new TypeError('invalid parameter name')
}
string += '; ' + param + '=' + qstring(parameters[param])
}
}
return string
}
/**
* Parse media type to object.
*
* @param {string|object} string
* @return {Object}
* @public
*/
function parse(string) {
if (!string) {
throw new TypeError('argument string is required')
}
if (typeof string === 'object') {
// support req/res-like objects as argument
string = getcontenttype(string)
if (typeof string !== 'string') {
throw new TypeError('content-type header is missing from object');
}
}
if (typeof string !== 'string') {
throw new TypeError('argument string is required to be a string')
}
var index = string.indexOf(';')
var type = index !== -1
? string.substr(0, index).trim()
: string.trim()
if (!typeRegExp.test(type)) {
throw new TypeError('invalid media type')
}
var key
var match
var obj = new ContentType(type.toLowerCase())
var value
paramRegExp.lastIndex = index
while (match = paramRegExp.exec(string)) {
if (match.index !== index) {
throw new TypeError('invalid parameter format')
}
index += match[0].length
key = match[1].toLowerCase()
value = match[2]
if (value[0] === '"') {
// remove quotes and escapes
value = value
.substr(1, value.length - 2)
.replace(qescRegExp, '$1')
}
obj.parameters[key] = value
}
if (index !== -1 && index !== string.length) {
throw new TypeError('invalid parameter format')
}
return obj
}
/**
* Get content-type from req/res objects.
*
* @param {object}
* @return {Object}
* @private
*/
function getcontenttype(obj) {
if (typeof obj.getHeader === 'function') {
// res-like
return obj.getHeader('content-type')
}
if (typeof obj.headers === 'object') {
// req-like
return obj.headers && obj.headers['content-type']
}
}
/**
* Quote a string if necessary.
*
* @param {string} val
* @return {string}
* @private
*/
function qstring(val) {
var str = String(val)
// no need to quote tokens
if (tokenRegExp.test(str)) {
return str
}
if (str.length > 0 && !textRegExp.test(str)) {
throw new TypeError('invalid parameter value')
}
return '"' + str.replace(quoteRegExp, '\\$1') + '"'
}
/**
* Class to represent a content type.
* @private
*/
function ContentType(type) {
this.parameters = Object.create(null)
this.type = type
} | random_line_split | |
mod.rs | mod boss;
mod player;
use Item;
use num::integer::Integer;
pub use self::boss::Boss;
pub use self::player::Player;
pub fn player(weapon: &Item, armor: &Item, rings: Vec<&Item>) -> Player {
Player::new(weapon, armor, rings)
}
pub fn boss(hit_points: u32, damage: u32, armor: u32) -> Boss {
Boss::new(hit_points, damage, armor)
}
pub trait Character {
fn hit_points(&self) -> u32;
fn damage(&self) -> u32;
fn armor(&self) -> u32;
fn beats(&self, other: &Character) -> bool {
let win_after = turns_to_beat(self.damage(), other.hit_points(), other.armor());
let lose_after = turns_to_beat(other.damage(), self.hit_points(), self.armor());
win_after <= lose_after
}
}
fn turns_to_beat(damage: u32, hit_points: u32, armor: u32) -> u32 {
let effective_damage = if damage > armor { damage - armor } else | ;
let (div, rem) = hit_points.div_rem(&effective_damage);
if rem == 0 { div } else { div + 1 }
}
| { 1 } | conditional_block |
mod.rs | mod boss;
mod player;
use Item;
use num::integer::Integer;
pub use self::boss::Boss;
pub use self::player::Player;
pub fn player(weapon: &Item, armor: &Item, rings: Vec<&Item>) -> Player {
Player::new(weapon, armor, rings)
}
pub fn boss(hit_points: u32, damage: u32, armor: u32) -> Boss {
Boss::new(hit_points, damage, armor)
}
pub trait Character {
fn hit_points(&self) -> u32;
fn damage(&self) -> u32;
fn armor(&self) -> u32;
fn beats(&self, other: &Character) -> bool {
let win_after = turns_to_beat(self.damage(), other.hit_points(), other.armor());
let lose_after = turns_to_beat(other.damage(), self.hit_points(), self.armor());
win_after <= lose_after
}
}
fn | (damage: u32, hit_points: u32, armor: u32) -> u32 {
let effective_damage = if damage > armor { damage - armor } else { 1 };
let (div, rem) = hit_points.div_rem(&effective_damage);
if rem == 0 { div } else { div + 1 }
}
| turns_to_beat | identifier_name |
mod.rs | mod boss;
mod player;
use Item;
use num::integer::Integer;
pub use self::boss::Boss;
pub use self::player::Player;
pub fn player(weapon: &Item, armor: &Item, rings: Vec<&Item>) -> Player |
pub fn boss(hit_points: u32, damage: u32, armor: u32) -> Boss {
Boss::new(hit_points, damage, armor)
}
pub trait Character {
fn hit_points(&self) -> u32;
fn damage(&self) -> u32;
fn armor(&self) -> u32;
fn beats(&self, other: &Character) -> bool {
let win_after = turns_to_beat(self.damage(), other.hit_points(), other.armor());
let lose_after = turns_to_beat(other.damage(), self.hit_points(), self.armor());
win_after <= lose_after
}
}
fn turns_to_beat(damage: u32, hit_points: u32, armor: u32) -> u32 {
let effective_damage = if damage > armor { damage - armor } else { 1 };
let (div, rem) = hit_points.div_rem(&effective_damage);
if rem == 0 { div } else { div + 1 }
}
| {
Player::new(weapon, armor, rings)
} | identifier_body |
mod.rs | mod boss;
mod player;
use Item;
use num::integer::Integer;
pub use self::boss::Boss;
pub use self::player::Player;
pub fn player(weapon: &Item, armor: &Item, rings: Vec<&Item>) -> Player {
Player::new(weapon, armor, rings)
}
pub fn boss(hit_points: u32, damage: u32, armor: u32) -> Boss {
Boss::new(hit_points, damage, armor)
}
pub trait Character { | fn damage(&self) -> u32;
fn armor(&self) -> u32;
fn beats(&self, other: &Character) -> bool {
let win_after = turns_to_beat(self.damage(), other.hit_points(), other.armor());
let lose_after = turns_to_beat(other.damage(), self.hit_points(), self.armor());
win_after <= lose_after
}
}
fn turns_to_beat(damage: u32, hit_points: u32, armor: u32) -> u32 {
let effective_damage = if damage > armor { damage - armor } else { 1 };
let (div, rem) = hit_points.div_rem(&effective_damage);
if rem == 0 { div } else { div + 1 }
} | fn hit_points(&self) -> u32; | random_line_split |
delete.js | var accessToken="i7LM4k7JcSKs4ucCpxpgNPcs3i1kRbNKyUE8aPGKZzZWASagz9uZiuLgmgDgBJzY";
$(window).load(function() {
$('#pseudo_submit').click(function() {
NProgress.start();
$.ajax({
type: "POST",
url: "../db-app/signdown.php",
dataType: 'text',
data: {
password: $("#basic_pass").val()
},
complete: function(xhr, statusText){
NProgress.done();
},
success: function (res) {
console.log(res);
if(res.indexOf("ERR")==-1) {
delete_schedule(res);
}
else {
new PNotify({
title: 'Error :(',
text: "Something went wrong",
type: 'error',
styling: 'bootstrap3'
});
}
}
});
});
});
function | (id) {
NProgress.start();
var del_url = "https://api.whenhub.com/api/schedules/"+id+"?access_token="+accessToken;
$.ajax({
type: "DELETE",
url: del_url,
complete: function(xhr, statusText){
NProgress.done();
console.log(xhr.status+" "+statusText);
},
success: function (data) {
console.log(data);
window.location.href = "account_deleted.php";
},
error: function(xhr, statusText, err){
console.log(xhr);
console.log(statusText);
console.log(err);
}
});
} | delete_schedule | identifier_name |
delete.js | var accessToken="i7LM4k7JcSKs4ucCpxpgNPcs3i1kRbNKyUE8aPGKZzZWASagz9uZiuLgmgDgBJzY";
$(window).load(function() {
$('#pseudo_submit').click(function() {
NProgress.start();
$.ajax({
type: "POST",
url: "../db-app/signdown.php",
dataType: 'text',
data: {
password: $("#basic_pass").val()
},
complete: function(xhr, statusText){
NProgress.done();
},
success: function (res) {
console.log(res);
if(res.indexOf("ERR")==-1) {
delete_schedule(res);
}
else {
new PNotify({
title: 'Error :(',
text: "Something went wrong",
type: 'error',
styling: 'bootstrap3'
});
}
}
});
});
});
function delete_schedule(id) | {
NProgress.start();
var del_url = "https://api.whenhub.com/api/schedules/"+id+"?access_token="+accessToken;
$.ajax({
type: "DELETE",
url: del_url,
complete: function(xhr, statusText){
NProgress.done();
console.log(xhr.status+" "+statusText);
},
success: function (data) {
console.log(data);
window.location.href = "account_deleted.php";
},
error: function(xhr, statusText, err){
console.log(xhr);
console.log(statusText);
console.log(err);
}
});
} | identifier_body | |
delete.js | var accessToken="i7LM4k7JcSKs4ucCpxpgNPcs3i1kRbNKyUE8aPGKZzZWASagz9uZiuLgmgDgBJzY";
$(window).load(function() {
$('#pseudo_submit').click(function() {
NProgress.start();
$.ajax({
type: "POST",
url: "../db-app/signdown.php",
dataType: 'text',
data: {
password: $("#basic_pass").val()
},
complete: function(xhr, statusText){
NProgress.done();
},
success: function (res) {
console.log(res);
if(res.indexOf("ERR")==-1) {
delete_schedule(res);
}
else {
new PNotify({
title: 'Error :(',
text: "Something went wrong",
type: 'error',
styling: 'bootstrap3'
});
}
}
});
});
});
function delete_schedule(id) {
NProgress.start();
var del_url = "https://api.whenhub.com/api/schedules/"+id+"?access_token="+accessToken; |
$.ajax({
type: "DELETE",
url: del_url,
complete: function(xhr, statusText){
NProgress.done();
console.log(xhr.status+" "+statusText);
},
success: function (data) {
console.log(data);
window.location.href = "account_deleted.php";
},
error: function(xhr, statusText, err){
console.log(xhr);
console.log(statusText);
console.log(err);
}
});
} | random_line_split | |
delete.js | var accessToken="i7LM4k7JcSKs4ucCpxpgNPcs3i1kRbNKyUE8aPGKZzZWASagz9uZiuLgmgDgBJzY";
$(window).load(function() {
$('#pseudo_submit').click(function() {
NProgress.start();
$.ajax({
type: "POST",
url: "../db-app/signdown.php",
dataType: 'text',
data: {
password: $("#basic_pass").val()
},
complete: function(xhr, statusText){
NProgress.done();
},
success: function (res) {
console.log(res);
if(res.indexOf("ERR")==-1) {
delete_schedule(res);
}
else |
}
});
});
});
function delete_schedule(id) {
NProgress.start();
var del_url = "https://api.whenhub.com/api/schedules/"+id+"?access_token="+accessToken;
$.ajax({
type: "DELETE",
url: del_url,
complete: function(xhr, statusText){
NProgress.done();
console.log(xhr.status+" "+statusText);
},
success: function (data) {
console.log(data);
window.location.href = "account_deleted.php";
},
error: function(xhr, statusText, err){
console.log(xhr);
console.log(statusText);
console.log(err);
}
});
} | {
new PNotify({
title: 'Error :(',
text: "Something went wrong",
type: 'error',
styling: 'bootstrap3'
});
} | conditional_block |
iterable_differs.d.ts | import { ChangeDetectorRef } from '../change_detector_ref';
import { Provider } from 'angular2/src/core/di';
export interface IterableDiffer {
diff(object: Object): any;
onDestroy(): any;
}
/**
* Provides a factory for {@link IterableDiffer}.
*/
export interface IterableDifferFactory {
supports(objects: Object): boolean;
create(cdRef: ChangeDetectorRef): IterableDiffer;
}
/**
* A repository of different iterable diffing strategies used by NgFor, NgClass, and others.
*/
export declare class | {
factories: IterableDifferFactory[];
constructor(factories: IterableDifferFactory[]);
static create(factories: IterableDifferFactory[], parent?: IterableDiffers): IterableDiffers;
/**
* Takes an array of {@link IterableDifferFactory} and returns a provider used to extend the
* inherited {@link IterableDiffers} instance with the provided factories and return a new
* {@link IterableDiffers} instance.
*
* The following example shows how to extend an existing list of factories,
* which will only be applied to the injector for this component and its children.
* This step is all that's required to make a new {@link IterableDiffer} available.
*
* ### Example
*
* ```
* @Component({
* viewProviders: [
* IterableDiffers.extend([new ImmutableListDiffer()])
* ]
* })
* ```
*/
static extend(factories: IterableDifferFactory[]): Provider;
find(iterable: Object): IterableDifferFactory;
}
| IterableDiffers | identifier_name |
iterable_differs.d.ts | import { ChangeDetectorRef } from '../change_detector_ref';
import { Provider } from 'angular2/src/core/di';
export interface IterableDiffer {
diff(object: Object): any;
onDestroy(): any;
}
/**
* Provides a factory for {@link IterableDiffer}.
*/
export interface IterableDifferFactory {
supports(objects: Object): boolean;
create(cdRef: ChangeDetectorRef): IterableDiffer;
}
/**
| constructor(factories: IterableDifferFactory[]);
static create(factories: IterableDifferFactory[], parent?: IterableDiffers): IterableDiffers;
/**
* Takes an array of {@link IterableDifferFactory} and returns a provider used to extend the
* inherited {@link IterableDiffers} instance with the provided factories and return a new
* {@link IterableDiffers} instance.
*
* The following example shows how to extend an existing list of factories,
* which will only be applied to the injector for this component and its children.
* This step is all that's required to make a new {@link IterableDiffer} available.
*
* ### Example
*
* ```
* @Component({
* viewProviders: [
* IterableDiffers.extend([new ImmutableListDiffer()])
* ]
* })
* ```
*/
static extend(factories: IterableDifferFactory[]): Provider;
find(iterable: Object): IterableDifferFactory;
} | * A repository of different iterable diffing strategies used by NgFor, NgClass, and others.
*/
export declare class IterableDiffers {
factories: IterableDifferFactory[];
| random_line_split |
parallel.rs | // Benchmark from https://github.com/lschmierer/ecs_bench
#![feature(test)]
extern crate test;
use calx_ecs::{build_ecs, Entity};
use serde::{Deserialize, Serialize};
use test::Bencher;
pub const N: usize = 10000;
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct R {
pub x: f32,
}
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct W1 {
pub x: f32,
}
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct W2 {
pub x: f32,
}
build_ecs! {
r: R,
w1: W1,
w2: W2,
}
fn build() -> Ecs {
let mut ecs = Ecs::new();
for _ in 0..N {
let e = ecs.make();
ecs.r.insert(e, R { x: 0.0 });
ecs.w1.insert(e, W1 { x: 0.0 });
ecs.w2.insert(e, W2 { x: 0.0 });
}
ecs
}
#[bench]
fn bench_build(b: &mut Bencher) |
#[bench]
fn bench_update(b: &mut Bencher) {
let mut ecs = build();
b.iter(|| {
let es: Vec<Entity> = ecs.r.ent_iter().cloned().collect();
for &e in &es {
let rx = ecs.r[e].x;
ecs.w1.get_mut(e).map(|w1| w1.x = rx);
}
for &e in &es {
let rx = ecs.r[e].x;
ecs.w2.get_mut(e).map(|w2| w2.x = rx);
}
});
}
| { b.iter(build); } | identifier_body |
parallel.rs | // Benchmark from https://github.com/lschmierer/ecs_bench
#![feature(test)]
extern crate test;
use calx_ecs::{build_ecs, Entity};
use serde::{Deserialize, Serialize};
use test::Bencher;
| pub const N: usize = 10000;
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct R {
pub x: f32,
}
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct W1 {
pub x: f32,
}
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct W2 {
pub x: f32,
}
build_ecs! {
r: R,
w1: W1,
w2: W2,
}
fn build() -> Ecs {
let mut ecs = Ecs::new();
for _ in 0..N {
let e = ecs.make();
ecs.r.insert(e, R { x: 0.0 });
ecs.w1.insert(e, W1 { x: 0.0 });
ecs.w2.insert(e, W2 { x: 0.0 });
}
ecs
}
#[bench]
fn bench_build(b: &mut Bencher) { b.iter(build); }
#[bench]
fn bench_update(b: &mut Bencher) {
let mut ecs = build();
b.iter(|| {
let es: Vec<Entity> = ecs.r.ent_iter().cloned().collect();
for &e in &es {
let rx = ecs.r[e].x;
ecs.w1.get_mut(e).map(|w1| w1.x = rx);
}
for &e in &es {
let rx = ecs.r[e].x;
ecs.w2.get_mut(e).map(|w2| w2.x = rx);
}
});
} | random_line_split | |
parallel.rs | // Benchmark from https://github.com/lschmierer/ecs_bench
#![feature(test)]
extern crate test;
use calx_ecs::{build_ecs, Entity};
use serde::{Deserialize, Serialize};
use test::Bencher;
pub const N: usize = 10000;
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct R {
pub x: f32,
}
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct W1 {
pub x: f32,
}
#[derive(Copy, Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct W2 {
pub x: f32,
}
build_ecs! {
r: R,
w1: W1,
w2: W2,
}
fn build() -> Ecs {
let mut ecs = Ecs::new();
for _ in 0..N {
let e = ecs.make();
ecs.r.insert(e, R { x: 0.0 });
ecs.w1.insert(e, W1 { x: 0.0 });
ecs.w2.insert(e, W2 { x: 0.0 });
}
ecs
}
#[bench]
fn bench_build(b: &mut Bencher) { b.iter(build); }
#[bench]
fn | (b: &mut Bencher) {
let mut ecs = build();
b.iter(|| {
let es: Vec<Entity> = ecs.r.ent_iter().cloned().collect();
for &e in &es {
let rx = ecs.r[e].x;
ecs.w1.get_mut(e).map(|w1| w1.x = rx);
}
for &e in &es {
let rx = ecs.r[e].x;
ecs.w2.get_mut(e).map(|w2| w2.x = rx);
}
});
}
| bench_update | identifier_name |
mod.rs | #[macro_use]
pub mod macros;
/// Constants like memory locations
pub mod consts;
/// Debugging support
pub mod debug;
/// Devices
pub mod device;
/// Global descriptor table
pub mod gdt;
/// Graphical debug
#[cfg(feature = "graphical_debug")]
mod graphical_debug;
/// Interrupt instructions
#[macro_use]
pub mod interrupt;
/// Interrupt descriptor table
pub mod idt;
| pub mod ipi;
/// Paging
pub mod paging;
/// Page table isolation
pub mod pti;
pub mod rmm;
/// Initialization and start function
pub mod start;
/// Stop function
pub mod stop;
pub use ::rmm::X8664Arch as CurrentRmmArch;
// Flags
pub mod flags {
pub const SHIFT_SINGLESTEP: usize = 8;
pub const FLAG_SINGLESTEP: usize = 1 << SHIFT_SINGLESTEP;
pub const FLAG_INTERRUPTS: usize = 1 << 9;
} | /// Inter-processor interrupts | random_line_split |
moves-based-on-type-no-recursive-stack-closure.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests correct kind-checking of the reason stack closures without the :Copy
// bound must be noncopyable. For details see
// http://smallcultfollowing.com/babysteps/blog/2013/04/30/the-case-of-the-recurring-closure/
extern crate debug;
struct R<'a> {
// This struct is needed to create the
// otherwise infinite type of a fn that
// accepts itself as argument:
c: |&mut R, bool|: 'a | let mut x = Some("hello".to_string());
conspirator(|f, writer| {
if writer {
x = None;
} else {
match x {
Some(ref msg) => {
(f.c)(f, true);
//~^ ERROR: cannot borrow `*f` as mutable because
println!("{:?}", msg);
},
None => fail!("oops"),
}
}
})
}
fn conspirator(f: |&mut R, bool|) {
let mut r = R {c: f};
f(&mut r, false) //~ ERROR use of moved value
}
fn main() { innocent_looking_victim() } | }
fn innocent_looking_victim() { | random_line_split |
moves-based-on-type-no-recursive-stack-closure.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests correct kind-checking of the reason stack closures without the :Copy
// bound must be noncopyable. For details see
// http://smallcultfollowing.com/babysteps/blog/2013/04/30/the-case-of-the-recurring-closure/
extern crate debug;
struct R<'a> {
// This struct is needed to create the
// otherwise infinite type of a fn that
// accepts itself as argument:
c: |&mut R, bool|: 'a
}
fn innocent_looking_victim() |
fn conspirator(f: |&mut R, bool|) {
let mut r = R {c: f};
f(&mut r, false) //~ ERROR use of moved value
}
fn main() { innocent_looking_victim() }
| {
let mut x = Some("hello".to_string());
conspirator(|f, writer| {
if writer {
x = None;
} else {
match x {
Some(ref msg) => {
(f.c)(f, true);
//~^ ERROR: cannot borrow `*f` as mutable because
println!("{:?}", msg);
},
None => fail!("oops"),
}
}
})
} | identifier_body |
moves-based-on-type-no-recursive-stack-closure.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests correct kind-checking of the reason stack closures without the :Copy
// bound must be noncopyable. For details see
// http://smallcultfollowing.com/babysteps/blog/2013/04/30/the-case-of-the-recurring-closure/
extern crate debug;
struct R<'a> {
// This struct is needed to create the
// otherwise infinite type of a fn that
// accepts itself as argument:
c: |&mut R, bool|: 'a
}
fn innocent_looking_victim() {
let mut x = Some("hello".to_string());
conspirator(|f, writer| {
if writer {
x = None;
} else |
})
}
fn conspirator(f: |&mut R, bool|) {
let mut r = R {c: f};
f(&mut r, false) //~ ERROR use of moved value
}
fn main() { innocent_looking_victim() }
| {
match x {
Some(ref msg) => {
(f.c)(f, true);
//~^ ERROR: cannot borrow `*f` as mutable because
println!("{:?}", msg);
},
None => fail!("oops"),
}
} | conditional_block |
moves-based-on-type-no-recursive-stack-closure.rs | // Copyright 2013 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// Tests correct kind-checking of the reason stack closures without the :Copy
// bound must be noncopyable. For details see
// http://smallcultfollowing.com/babysteps/blog/2013/04/30/the-case-of-the-recurring-closure/
extern crate debug;
struct R<'a> {
// This struct is needed to create the
// otherwise infinite type of a fn that
// accepts itself as argument:
c: |&mut R, bool|: 'a
}
fn | () {
let mut x = Some("hello".to_string());
conspirator(|f, writer| {
if writer {
x = None;
} else {
match x {
Some(ref msg) => {
(f.c)(f, true);
//~^ ERROR: cannot borrow `*f` as mutable because
println!("{:?}", msg);
},
None => fail!("oops"),
}
}
})
}
fn conspirator(f: |&mut R, bool|) {
let mut r = R {c: f};
f(&mut r, false) //~ ERROR use of moved value
}
fn main() { innocent_looking_victim() }
| innocent_looking_victim | identifier_name |
query-generator.js | 'use strict';
/* jshint -W110 */
var Utils = require('../../utils')
, util = require('util')
, DataTypes = require('../../data-types')
, AbstractQueryGenerator = require('../abstract/query-generator')
, primaryKeys = {}
, semver = require('semver')
, _ = require('lodash');
var QueryGenerator = {
options: {},
dialect: 'postgres',
setSearchPath: function(searchPath) {
var query = 'SET search_path to <%= searchPath%>;';
return Utils._.template(query)({searchPath: searchPath});
},
createSchema: function(schema) {
var query = 'CREATE SCHEMA <%= schema%>;';
return Utils._.template(query)({schema: schema});
},
dropSchema: function(schema) {
var query = 'DROP SCHEMA IF EXISTS <%= schema%> CASCADE;';
return Utils._.template(query)({schema: schema});
},
showSchemasQuery: function() {
return "SELECT schema_name FROM information_schema.schemata WHERE schema_name <> 'information_schema' AND schema_name != 'public' AND schema_name !~ E'^pg_';";
},
versionQuery: function() {
return 'SHOW SERVER_VERSION';
},
createTableQuery: function(tableName, attributes, options) {
var self = this;
options = Utils._.extend({
}, options || {});
primaryKeys[tableName] = [];
var databaseVersion = Utils._.get(self, 'sequelize.options.databaseVersion', 0);
//Postgres 9.0 does not support CREATE TABLE IF NOT EXISTS, 9.1 and above do
var query = 'CREATE TABLE ' +
( (databaseVersion === 0 || semver.gte(databaseVersion, '9.1.0')) ? 'IF NOT EXISTS ' : '') +
'<%= table %> (<%= attributes%>)<%= comments %>'
, comments = ''
, attrStr = []
, i;
if (options.comment && Utils._.isString(options.comment)) {
comments += '; COMMENT ON TABLE <%= table %> IS ' + this.escape(options.comment);
}
for (var attr in attributes) {
if ((i = attributes[attr].indexOf('COMMENT')) !== -1) {
// Move comment to a separate query
comments += '; ' + attributes[attr].substring(i);
attributes[attr] = attributes[attr].substring(0, i);
}
var dataType = this.pgDataTypeMapping(tableName, attr, attributes[attr]);
attrStr.push(this.quoteIdentifier(attr) + ' ' + dataType);
}
var values = {
table: this.quoteTable(tableName),
attributes: attrStr.join(', '),
comments: Utils._.template(comments)({ table: this.quoteTable(tableName)})
};
if (!!options.uniqueKeys) {
Utils._.each(options.uniqueKeys, function(columns) {
if (!columns.singleField) { // If it's a single field its handled in column def, not as an index
values.attributes += ', UNIQUE (' + columns.fields.map(function(f) { return self.quoteIdentifiers(f); }).join(', ') + ')';
}
});
}
var pks = primaryKeys[tableName].map(function(pk) {
return this.quoteIdentifier(pk);
}.bind(this)).join(',');
if (pks.length > 0) |
return Utils._.template(query)(values).trim() + ';';
},
dropTableQuery: function(tableName, options) {
options = options || {};
var query = 'DROP TABLE IF EXISTS <%= table %><%= cascade %>;';
return Utils._.template(query)({
table: this.quoteTable(tableName),
cascade: options.cascade ? ' CASCADE' : ''
});
},
showTablesQuery: function() {
return "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type LIKE '%TABLE' AND table_name != 'spatial_ref_sys';";
},
describeTableQuery: function(tableName, schema) {
if (!schema) {
schema = 'public';
}
var query = 'SELECT tc.constraint_type as "Constraint", c.column_name as "Field", c.column_default as "Default", c.is_nullable as "Null", ' +
"CASE WHEN c.udt_name = 'hstore' " +
'THEN c.udt_name ELSE c.data_type END as "Type", (SELECT array_agg(e.enumlabel) ' +
'FROM pg_catalog.pg_type t JOIN pg_catalog.pg_enum e ON t.oid=e.enumtypid WHERE t.typname=c.udt_name) AS "special" ' +
'FROM information_schema.columns c ' +
'LEFT JOIN information_schema.key_column_usage cu ON c.table_name = cu.table_name AND cu.column_name = c.column_name ' +
'LEFT JOIN information_schema.table_constraints tc ON c.table_name = tc.table_name AND cu.column_name = c.column_name AND tc.constraint_type = \'PRIMARY KEY\' ' +
' WHERE c.table_name = <%= table %> AND c.table_schema = <%= schema %> ';
return Utils._.template(query)({
table: this.escape(tableName),
schema: this.escape(schema)
});
},
// A recursive parser for nested where conditions
parseConditionObject: function(_conditions, path) {
var self = this;
path = path || [];
return Utils._.reduce(_conditions, function (r, v, k) { // result, key, value
if (Utils._.isObject(v)) {
r = r.concat(self.parseConditionObject(v, path.concat(k))); // Recursively parse objects
} else {
r.push({ path: path.concat(k), value: v });
}
return r;
}, []);
},
handleSequelizeMethod: function (smth, tableName, factory, options, prepend) {
if (smth instanceof Utils.json) {
// Parse nested object
if (smth.conditions) {
var conditions = _.map(this.parseConditionObject(smth.conditions), function generateSql(condition) {
return util.format("%s#>>'{%s}' = '%s'",
_.first(condition.path),
_.tail(condition.path).join(','),
condition.value);
});
return conditions.join(' and ');
} else if (smth.path) {
var str;
// Allow specifying conditions using the postgres json syntax
if (_.some(['->', '->>', '#>'], _.partial(_.includes, smth.path))) {
str = smth.path;
} else {
// Also support json dot notation
var path = smth.path.split('.');
str = util.format("%s#>>'{%s}'",
_.first(path),
_.tail(path).join(','));
}
if (smth.value) {
str += util.format(' = %s', this.escape(smth.value));
}
return str;
}
} else {
return AbstractQueryGenerator.handleSequelizeMethod.call(this, smth, tableName, factory, options, prepend);
}
},
addColumnQuery: function(table, key, dataType) {
var query = 'ALTER TABLE <%= table %> ADD COLUMN <%= attribute %>;'
, dbDataType = this.attributeToSQL(dataType, {context: 'addColumn'})
, attribute;
if (dataType.type && dataType.type instanceof DataTypes.ENUM || dataType instanceof DataTypes.ENUM) {
query = this.pgEnum(table, key, dataType) + query;
}
attribute = Utils._.template('<%= key %> <%= definition %>')({
key: this.quoteIdentifier(key),
definition: this.pgDataTypeMapping(table, key, dbDataType)
});
return Utils._.template(query)({
table: this.quoteTable(table),
attribute: attribute
});
},
removeColumnQuery: function(tableName, attributeName) {
var query = 'ALTER TABLE <%= tableName %> DROP COLUMN <%= attributeName %>;';
return Utils._.template(query)({
tableName: this.quoteTable(tableName),
attributeName: this.quoteIdentifier(attributeName)
});
},
changeColumnQuery: function(tableName, attributes) {
var query = 'ALTER TABLE <%= tableName %> ALTER COLUMN <%= query %>;'
, sql = [];
for (var attributeName in attributes) {
var definition = this.pgDataTypeMapping(tableName, attributeName, attributes[attributeName]);
var attrSql = '';
if (definition.indexOf('NOT NULL') > 0) {
attrSql += Utils._.template(query)({
tableName: this.quoteTable(tableName),
query: this.quoteIdentifier(attributeName) + ' SET NOT NULL'
});
definition = definition.replace('NOT NULL', '').trim();
} else if (!definition.match(/REFERENCES/)) {
attrSql += Utils._.template(query)({
tableName: this.quoteTable(tableName),
query: this.quoteIdentifier(attributeName) + ' DROP NOT NULL'
});
}
if (definition.indexOf('DEFAULT') > 0) {
attrSql += Utils._.template(query)({
tableName: this.quoteTable(tableName),
query: this.quoteIdentifier(attributeName) + ' SET DEFAULT ' + definition.match(/DEFAULT ([^;]+)/)[1]
});
definition = definition.replace(/(DEFAULT[^;]+)/, '').trim();
} else if (!definition.match(/REFERENCES/)) {
attrSql += Utils._.template(query)({
tableName: this.quoteTable(tableName),
query: this.quoteIdentifier(attributeName) + ' DROP DEFAULT'
});
}
if (attributes[attributeName].match(/^ENUM\(/)) {
query = this.pgEnum(tableName, attributeName, attributes[attributeName]) + query;
definition = definition.replace(/^ENUM\(.+\)/, this.quoteIdentifier('enum_' + tableName + '_' + attributeName));
definition += ' USING (' + this.quoteIdentifier(attributeName) + '::' + this.quoteIdentifier(definition) + ')';
}
if (definition.match(/UNIQUE;*$/)) {
definition = definition.replace(/UNIQUE;*$/, '');
attrSql += Utils._.template(query.replace('ALTER COLUMN', ''))({
tableName: this.quoteTable(tableName),
query: 'ADD CONSTRAINT ' + this.quoteIdentifier(attributeName + '_unique_idx') + ' UNIQUE (' + this.quoteIdentifier(attributeName) + ')'
});
}
if (definition.match(/REFERENCES/)) {
definition = definition.replace(/.+?(?=REFERENCES)/,'');
attrSql += Utils._.template(query.replace('ALTER COLUMN', ''))({
tableName: this.quoteTable(tableName),
query: 'ADD CONSTRAINT ' + this.quoteIdentifier(attributeName + '_foreign_idx') + ' FOREIGN KEY (' + this.quoteIdentifier(attributeName) + ') ' + definition
});
} else {
attrSql += Utils._.template(query)({
tableName: this.quoteTable(tableName),
query: this.quoteIdentifier(attributeName) + ' TYPE ' + definition
});
}
sql.push(attrSql);
}
return sql.join('');
},
renameColumnQuery: function(tableName, attrBefore, attributes) {
var query = 'ALTER TABLE <%= tableName %> RENAME COLUMN <%= attributes %>;';
var attrString = [];
for (var attributeName in attributes) {
attrString.push(Utils._.template('<%= before %> TO <%= after %>')({
before: this.quoteIdentifier(attrBefore),
after: this.quoteIdentifier(attributeName)
}));
}
return Utils._.template(query)({
tableName: this.quoteTable(tableName),
attributes: attrString.join(', ')
});
},
fn: function(fnName, tableName, body, returns, language) {
fnName = fnName || 'testfunc';
language = language || 'plpgsql';
returns = returns || 'SETOF ' + this.quoteTable(tableName);
var query = 'CREATE OR REPLACE FUNCTION pg_temp.<%= fnName %>() RETURNS <%= returns %> AS $func$ BEGIN <%= body %> END; $func$ LANGUAGE <%= language %>; SELECT * FROM pg_temp.<%= fnName %>();';
return Utils._.template(query)({
fnName: fnName,
returns: returns,
language: language,
body: body
});
},
exceptionFn: function(fnName, tableName, main, then, when, returns, language) {
when = when || 'unique_violation';
var body = '<%= main %> EXCEPTION WHEN <%= when %> THEN <%= then %>;';
body = Utils._.template(body)({
main: main,
when: when,
then: then
});
return this.fn(fnName, tableName, body, returns, language);
},
// http://www.maori.geek.nz/post/postgres_upsert_update_or_insert_in_ger_using_knex_js
upsertQuery: function (tableName, insertValues, updateValues, where, rawAttributes, options) {
var insert = this.insertQuery(tableName, insertValues, rawAttributes, options);
var update = this.updateQuery(tableName, updateValues, where, options, rawAttributes);
// The numbers here are selected to match the number of affected rows returned by MySQL
return this.exceptionFn(
'sequelize_upsert',
tableName,
insert + ' RETURN 1;',
update + '; RETURN 2',
'unique_violation',
'integer'
);
},
deleteQuery: function(tableName, where, options, model) {
var query;
options = options || {};
tableName = Utils.removeTicks(this.quoteTable(tableName), '"');
if (options.truncate === true) {
query = 'TRUNCATE ' + QueryGenerator.quoteIdentifier(tableName);
if (options.cascade) {
query += ' CASCADE';
}
return query;
}
if (Utils._.isUndefined(options.limit)) {
options.limit = 1;
}
primaryKeys[tableName] = primaryKeys[tableName] || [];
if (!!model && primaryKeys[tableName].length < 1) {
primaryKeys[tableName] = Utils._.map(Object.keys(model.primaryKeys), function(k){
return model.primaryKeys[k].field;
});
}
if (options.limit) {
query = 'DELETE FROM <%= table %> WHERE <%= primaryKeys %> IN (SELECT <%= primaryKeysSelection %> FROM <%= table %><%= where %><%= limit %>)';
} else {
query = 'DELETE FROM <%= table %><%= where %>';
}
var pks;
if (primaryKeys[tableName] && primaryKeys[tableName].length > 0) {
pks = primaryKeys[tableName].map(function(pk) {
return this.quoteIdentifier(pk);
}.bind(this)).join(',');
} else {
pks = this.quoteIdentifier('id');
}
var replacements = {
table: this.quoteIdentifiers(tableName),
where: this.getWhereConditions(where, null, model, options),
limit: !!options.limit ? ' LIMIT ' + this.escape(options.limit) : '',
primaryKeys: primaryKeys[tableName].length > 1 ? '(' + pks + ')' : pks,
primaryKeysSelection: pks
};
if (replacements.where) {
replacements.where = ' WHERE ' + replacements.where;
}
return Utils._.template(query)(replacements);
},
showIndexesQuery: function(tableName) {
var schemaJoin = '', schemaWhere = '';
if (!Utils._.isString(tableName)) {
schemaJoin = ', pg_namespace s';
schemaWhere = Utils._.template(" AND s.oid = t.relnamespace AND s.nspname = '<%= schemaName %>'")({schemaName: tableName.schema});
tableName = tableName.tableName;
}
// This is ARCANE!
var query = 'SELECT i.relname AS name, ix.indisprimary AS primary, ix.indisunique AS unique, ix.indkey AS indkey, ' +
'array_agg(a.attnum) as column_indexes, array_agg(a.attname) AS column_names, pg_get_indexdef(ix.indexrelid) ' +
'AS definition FROM pg_class t, pg_class i, pg_index ix, pg_attribute a<%= schemaJoin%> ' +
'WHERE t.oid = ix.indrelid AND i.oid = ix.indexrelid AND a.attrelid = t.oid AND ' +
"t.relkind = 'r' and t.relname = '<%= tableName %>'<%= schemaWhere%> " +
'GROUP BY i.relname, ix.indexrelid, ix.indisprimary, ix.indisunique, ix.indkey ORDER BY i.relname;';
return Utils._.template(query)({tableName: tableName, schemaJoin: schemaJoin, schemaWhere: schemaWhere});
},
removeIndexQuery: function(tableName, indexNameOrAttributes) {
var sql = 'DROP INDEX IF EXISTS <%= indexName %>'
, indexName = indexNameOrAttributes;
if (typeof indexName !== 'string') {
indexName = Utils.inflection.underscore(tableName + '_' + indexNameOrAttributes.join('_'));
}
return Utils._.template(sql)({
tableName: this.quoteIdentifiers(tableName),
indexName: this.quoteIdentifiers(indexName)
});
},
addLimitAndOffset: function(options) {
var fragment = '';
if (options.limit) fragment += ' LIMIT ' + this.escape(options.limit);
if (options.offset) fragment += ' OFFSET ' + this.escape(options.offset);
return fragment;
},
attributeToSQL: function(attribute) {
if (!Utils._.isPlainObject(attribute)) {
attribute = {
type: attribute
};
}
var template = '<%= type %>'
, replacements = {};
if (attribute.type instanceof DataTypes.ENUM) {
if (attribute.type.values && !attribute.values) attribute.values = attribute.type.values;
if (Array.isArray(attribute.values) && (attribute.values.length > 0)) {
replacements.type = 'ENUM(' + Utils._.map(attribute.values, function(value) {
return this.escape(value);
}.bind(this)).join(', ') + ')';
} else {
throw new Error("Values for ENUM haven't been defined.");
}
}
if (!replacements.type) {
replacements.type = attribute.type;
}
if (attribute.hasOwnProperty('allowNull') && (!attribute.allowNull)) {
template += ' NOT NULL';
}
if (attribute.autoIncrement) {
template += ' SERIAL';
}
if (Utils.defaultValueSchemable(attribute.defaultValue)) {
template += ' DEFAULT <%= defaultValue %>';
replacements.defaultValue = this.escape(attribute.defaultValue, attribute);
}
if (attribute.unique === true) {
template += ' UNIQUE';
}
if (attribute.primaryKey) {
template += ' PRIMARY KEY';
}
if (attribute.references) {
attribute = Utils.formatReferences(attribute);
template += ' REFERENCES <%= referencesTable %> (<%= referencesKey %>)';
replacements.referencesTable = this.quoteTable(attribute.references.model);
if (attribute.references.key) {
replacements.referencesKey = this.quoteIdentifiers(attribute.references.key);
} else {
replacements.referencesKey = this.quoteIdentifier('id');
}
if (attribute.onDelete) {
template += ' ON DELETE <%= onDeleteAction %>';
replacements.onDeleteAction = attribute.onDelete.toUpperCase();
}
if (attribute.onUpdate) {
template += ' ON UPDATE <%= onUpdateAction %>';
replacements.onUpdateAction = attribute.onUpdate.toUpperCase();
}
if (attribute.references.deferrable) {
template += ' <%= deferrable %>';
replacements.deferrable = attribute.references.deferrable.toString(this);
}
}
return Utils._.template(template)(replacements);
},
deferConstraintsQuery: function (options) {
return options.deferrable.toString(this);
},
setConstraintQuery: function (columns, type) {
var columnFragment = 'ALL';
if (columns) {
columnFragment = columns.map(function (column) {
return this.quoteIdentifier(column);
}.bind(this)).join(', ');
}
return 'SET CONSTRAINTS ' + columnFragment + ' ' + type;
},
setDeferredQuery: function (columns) {
return this.setConstraintQuery(columns, 'DEFERRED');
},
setImmediateQuery: function (columns) {
return this.setConstraintQuery(columns, 'IMMEDIATE');
},
attributesToSQL: function(attributes, options) {
var result = {}
, key
, attribute;
for (key in attributes) {
attribute = attributes[key];
result[attribute.field || key] = this.attributeToSQL(attribute, options);
}
return result;
},
findAutoIncrementField: function(factory) {
var fields = [];
for (var name in factory.attributes) {
var definition = factory.attributes[name];
if (definition && definition.autoIncrement) {
fields.push(name);
}
}
return fields;
},
createTrigger: function(tableName, triggerName, eventType, fireOnSpec, functionName, functionParams, optionsArray) {
var sql = [
'CREATE <%= constraintVal %>TRIGGER <%= triggerName %>'
, '<%= eventType %> <%= eventSpec %>'
, 'ON <%= tableName %>'
, '<%= optionsSpec %>'
, 'EXECUTE PROCEDURE <%= functionName %>(<%= paramList %>);'
].join('\n\t');
return Utils._.template(sql)({
constraintVal: this.triggerEventTypeIsConstraint(eventType),
triggerName: triggerName,
eventType: this.decodeTriggerEventType(eventType),
eventSpec: this.expandTriggerEventSpec(fireOnSpec),
tableName: tableName,
optionsSpec: this.expandOptions(optionsArray),
functionName: functionName,
paramList: this.expandFunctionParamList(functionParams)
});
},
dropTrigger: function(tableName, triggerName) {
var sql = 'DROP TRIGGER <%= triggerName %> ON <%= tableName %> RESTRICT;';
return Utils._.template(sql)({
triggerName: triggerName,
tableName: tableName
});
},
renameTrigger: function(tableName, oldTriggerName, newTriggerName) {
var sql = 'ALTER TRIGGER <%= oldTriggerName %> ON <%= tableName %> RENAME TO <%= newTriggerName%>;';
return Utils._.template(sql)({
tableName: tableName,
oldTriggerName: oldTriggerName,
newTriggerName: newTriggerName
});
},
createFunction: function(functionName, params, returnType, language, body, options) {
var sql = ['CREATE FUNCTION <%= functionName %>(<%= paramList %>)'
, 'RETURNS <%= returnType %> AS $func$'
, 'BEGIN'
, '\t<%= body %>'
, 'END;'
, "$func$ language '<%= language %>'<%= options %>;"
].join('\n');
return Utils._.template(sql)({
functionName: functionName,
paramList: this.expandFunctionParamList(params),
returnType: returnType,
body: body.replace('\n', '\n\t'),
language: language,
options: this.expandOptions(options)
});
},
dropFunction: function(functionName, params) {
// RESTRICT is (currently, as of 9.2) default but we'll be explicit
var sql = 'DROP FUNCTION <%= functionName %>(<%= paramList %>) RESTRICT;';
return Utils._.template(sql)({
functionName: functionName,
paramList: this.expandFunctionParamList(params)
});
},
renameFunction: function(oldFunctionName, params, newFunctionName) {
var sql = 'ALTER FUNCTION <%= oldFunctionName %>(<%= paramList %>) RENAME TO <%= newFunctionName %>;';
return Utils._.template(sql)({
oldFunctionName: oldFunctionName,
paramList: this.expandFunctionParamList(params),
newFunctionName: newFunctionName
});
},
databaseConnectionUri: function(config) {
var template = '<%= protocol %>://<%= user %>:<%= password %>@<%= host %><% if(port) { %>:<%= port %><% } %>/<%= database %><% if(ssl) { %>?ssl=<%= ssl %><% } %>';
return Utils._.template(template)({
user: config.username,
password: config.password,
database: config.database,
host: config.host,
port: config.port,
protocol: config.protocol,
ssl: config.ssl
});
},
pgEscapeAndQuote: function(val) {
return this.quoteIdentifier(Utils.removeTicks(this.escape(val), "'"));
},
expandFunctionParamList: function expandFunctionParamList(params) {
if (Utils._.isUndefined(params) || !Utils._.isArray(params)) {
throw new Error('expandFunctionParamList: function parameters array required, including an empty one for no arguments');
}
var paramList = Utils._.each(params, function expandParam(curParam) {
var paramDef = [];
if (Utils._.has(curParam, 'type')) {
if (Utils._.has(curParam, 'direction')) { paramDef.push(curParam.direction); }
if (Utils._.has(curParam, 'name')) { paramDef.push(curParam.name); }
paramDef.push(curParam.type);
} else {
throw new Error('createFunction called with a parameter with no type');
}
return paramDef.join(' ');
});
return paramList.join(', ');
},
expandOptions: function expandOptions(options) {
return Utils._.isUndefined(options) || Utils._.isEmpty(options) ?
'' : '\n\t' + options.join('\n\t');
},
decodeTriggerEventType: function decodeTriggerEventType(eventSpecifier) {
var EVENT_DECODER = {
'after': 'AFTER',
'before': 'BEFORE',
'instead_of': 'INSTEAD OF',
'after_constraint': 'AFTER'
};
if (!Utils._.has(EVENT_DECODER, eventSpecifier)) {
throw new Error('Invalid trigger event specified: ' + eventSpecifier);
}
return EVENT_DECODER[eventSpecifier];
},
triggerEventTypeIsConstraint: function triggerEventTypeIsConstraint(eventSpecifier) {
return eventSpecifier === 'after_constrain' ? 'CONSTRAINT ' : '';
},
expandTriggerEventSpec: function expandTriggerEventSpec(fireOnSpec) {
if (Utils._.isEmpty(fireOnSpec)) {
throw new Error('no table change events specified to trigger on');
}
return Utils._.map(fireOnSpec, function parseTriggerEventSpec(fireValue, fireKey) {
var EVENT_MAP = {
'insert': 'INSERT',
'update': 'UPDATE',
'delete': 'DELETE',
'truncate': 'TRUNCATE'
};
if (!Utils._.has(EVENT_MAP, fireKey)) {
throw new Error('parseTriggerEventSpec: undefined trigger event ' + fireKey);
}
var eventSpec = EVENT_MAP[fireKey];
if (eventSpec === 'UPDATE') {
if (Utils._.isArray(fireValue) && fireValue.length > 0) {
eventSpec += ' OF ' + fireValue.join(', ');
}
}
return eventSpec;
}).join(' OR ');
},
pgEnumName: function (tableName, attr, options) {
options = options || {};
var tableDetails = this.extractTableDetails(tableName, options)
, enumName = '"enum_' + tableDetails.tableName + '_' + attr + '"';
// pgListEnums requires the enum name only, without the schema
if (options.schema !== false && tableDetails.schema) {
enumName = this.quoteIdentifier(tableDetails.schema) + tableDetails.delimiter + enumName;
}
return enumName;
},
pgListEnums: function(tableName, attrName, options) {
var enumName = ''
, tableDetails = this.extractTableDetails(tableName, options);
if (tableDetails.tableName && attrName) {
enumName = ' AND t.typname=' + this.pgEnumName(tableDetails.tableName, attrName, { schema: false }).replace(/"/g, "'");
}
var query = 'SELECT t.typname enum_name, array_agg(e.enumlabel ORDER BY enumsortorder) enum_value FROM pg_type t ' +
'JOIN pg_enum e ON t.oid = e.enumtypid ' +
'JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ' +
"WHERE n.nspname = '" + tableDetails.schema + "'" + enumName + ' GROUP BY 1';
return query;
},
pgEnum: function(tableName, attr, dataType, options) {
var enumName = this.pgEnumName(tableName, attr, options)
, values;
if (dataType.values) {
values = "ENUM('" + dataType.values.join("', '") + "')";
} else {
values = dataType.toString().match(/^ENUM\(.+\)/)[0];
}
var sql = 'CREATE TYPE ' + enumName + ' AS ' + values + ';';
if (!!options && options.force === true) {
sql = this.pgEnumDrop(tableName, attr) + sql;
}
return sql;
},
pgEnumAdd: function(tableName, attr, value, options) {
var enumName = this.pgEnumName(tableName, attr)
, sql = 'ALTER TYPE ' + enumName + ' ADD VALUE ';
if (semver.gte(this.sequelize.options.databaseVersion, '9.3.0')) {
sql += 'IF NOT EXISTS ';
}
sql += this.escape(value);
if (!!options.before) {
sql += ' BEFORE ' + this.escape(options.before);
} else if (!!options.after) {
sql += ' AFTER ' + this.escape(options.after);
}
return sql;
},
pgEnumDrop: function(tableName, attr, enumName) {
enumName = enumName || this.pgEnumName(tableName, attr);
return 'DROP TYPE IF EXISTS ' + enumName + '; ';
},
fromArray: function(text) {
text = text.replace(/^{/, '').replace(/}$/, '');
var matches = text.match(/("(?:\\.|[^"\\\\])*"|[^,]*)(?:\s*,\s*|\s*$)/ig);
if (matches.length < 1) {
return [];
}
matches = matches.map(function(m) {
return m.replace(/",$/, '').replace(/,$/, '').replace(/(^"|"$)/, '');
});
return matches.slice(0, -1);
},
padInt: function(i) {
return (i < 10) ? '0' + i.toString() : i.toString();
},
pgDataTypeMapping: function(tableName, attr, dataType) {
return this.dataTypeMapping(tableName, attr, dataType);
},
dataTypeMapping: function(tableName, attr, dataType) {
if (Utils._.includes(dataType, 'PRIMARY KEY')) {
primaryKeys[tableName].push(attr);
dataType = dataType.replace(/PRIMARY KEY/, '');
}
if (Utils._.includes(dataType, 'SERIAL')) {
if (Utils._.includes(dataType, 'BIGINT')) {
dataType = dataType.replace(/SERIAL/, 'BIGSERIAL');
dataType = dataType.replace(/BIGINT/, '');
} else {
dataType = dataType.replace(/INTEGER/, '');
}
dataType = dataType.replace(/NOT NULL/, '');
}
if (dataType.match(/^ENUM\(/)) {
dataType = dataType.replace(/^ENUM\(.+\)/, this.pgEnumName(tableName, attr));
}
return dataType;
},
quoteIdentifier: function(identifier, force) {
if (identifier === '*') return identifier;
if (!force && this.options && this.options.quoteIdentifiers === false) { // default is `true`
// In Postgres, if tables or attributes are created double-quoted,
// they are also case sensitive. If they contain any uppercase
// characters, they must always be double-quoted. This makes it
// impossible to write queries in portable SQL if tables are created in
// this way. Hence, we strip quotes if we don't want case sensitivity.
return Utils.removeTicks(identifier, '"');
} else {
return Utils.addTicks(identifier, '"');
}
},
/*
/**
* Generates an SQL query that returns all foreign keys of a table.
*
* @param {String} tableName The name of the table.
* @param {String} schemaName The name of the schema.
* @return {String} The generated sql query.
*/
getForeignKeysQuery: function(tableName, schemaName) {
return 'SELECT conname as constraint_name, pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r ' +
"WHERE r.conrelid = (SELECT oid FROM pg_class WHERE relname = '" + tableName + "' LIMIT 1) AND r.contype = 'f' ORDER BY 1;";
},
/**
* Generates an SQL query that removes a foreign key from a table.
*
* @param {String} tableName The name of the table.
* @param {String} foreignKey The name of the foreign key constraint.
* @return {String} The generated sql query.
*/
dropForeignKeyQuery: function(tableName, foreignKey) {
return 'ALTER TABLE ' + this.quoteTable(tableName) + ' DROP CONSTRAINT ' + this.quoteIdentifier(foreignKey) + ';';
},
setAutocommitQuery: function(value, options) {
if (options.parent) {
return;
}
// POSTGRES does not support setting AUTOCOMMIT = OFF as of 9.4.0
// Additionally it does not support AUTOCOMMIT at all starting at v9.5
// The assumption is that it won't be returning in future versions either
// If you are on a Pg version that is not semver compliant e.g. '9.5.0beta2', which fails due to the 'beta' qualification, then you need to pass
// the database version as "9.5.0" explicitly through the options param passed when creating the Sequelize instance under the key "databaseVersion"
// otherwise Pg version "9.4.0" is assumed by default as per Sequelize 3.14.2.
// For Pg versions that are semver compliant, this is auto-detected upon the first connection.
if (!value || semver.gte(this.sequelize.options.databaseVersion, '9.4.0')) {
return;
}
return AbstractQueryGenerator.setAutocommitQuery.call(this, value, options);
}
};
module.exports = Utils._.extend(Utils._.clone(AbstractQueryGenerator), QueryGenerator);
| {
values.attributes += ', PRIMARY KEY (' + pks + ')';
} | conditional_block |
query-generator.js | 'use strict';
/* jshint -W110 */
var Utils = require('../../utils')
, util = require('util')
, DataTypes = require('../../data-types')
, AbstractQueryGenerator = require('../abstract/query-generator')
, primaryKeys = {}
, semver = require('semver')
, _ = require('lodash');
var QueryGenerator = {
options: {},
dialect: 'postgres',
setSearchPath: function(searchPath) {
var query = 'SET search_path to <%= searchPath%>;';
return Utils._.template(query)({searchPath: searchPath});
},
createSchema: function(schema) {
var query = 'CREATE SCHEMA <%= schema%>;';
return Utils._.template(query)({schema: schema});
},
dropSchema: function(schema) {
var query = 'DROP SCHEMA IF EXISTS <%= schema%> CASCADE;';
return Utils._.template(query)({schema: schema});
},
showSchemasQuery: function() {
return "SELECT schema_name FROM information_schema.schemata WHERE schema_name <> 'information_schema' AND schema_name != 'public' AND schema_name !~ E'^pg_';";
},
versionQuery: function() {
return 'SHOW SERVER_VERSION';
},
createTableQuery: function(tableName, attributes, options) {
var self = this;
options = Utils._.extend({
}, options || {});
primaryKeys[tableName] = [];
var databaseVersion = Utils._.get(self, 'sequelize.options.databaseVersion', 0);
//Postgres 9.0 does not support CREATE TABLE IF NOT EXISTS, 9.1 and above do
var query = 'CREATE TABLE ' +
( (databaseVersion === 0 || semver.gte(databaseVersion, '9.1.0')) ? 'IF NOT EXISTS ' : '') +
'<%= table %> (<%= attributes%>)<%= comments %>'
, comments = ''
, attrStr = []
, i;
if (options.comment && Utils._.isString(options.comment)) {
comments += '; COMMENT ON TABLE <%= table %> IS ' + this.escape(options.comment);
}
for (var attr in attributes) {
if ((i = attributes[attr].indexOf('COMMENT')) !== -1) {
// Move comment to a separate query
comments += '; ' + attributes[attr].substring(i);
attributes[attr] = attributes[attr].substring(0, i);
}
var dataType = this.pgDataTypeMapping(tableName, attr, attributes[attr]);
attrStr.push(this.quoteIdentifier(attr) + ' ' + dataType);
}
var values = {
table: this.quoteTable(tableName),
attributes: attrStr.join(', '),
comments: Utils._.template(comments)({ table: this.quoteTable(tableName)})
};
if (!!options.uniqueKeys) {
Utils._.each(options.uniqueKeys, function(columns) {
if (!columns.singleField) { // If it's a single field its handled in column def, not as an index
values.attributes += ', UNIQUE (' + columns.fields.map(function(f) { return self.quoteIdentifiers(f); }).join(', ') + ')';
}
});
}
var pks = primaryKeys[tableName].map(function(pk) {
return this.quoteIdentifier(pk);
}.bind(this)).join(',');
if (pks.length > 0) {
values.attributes += ', PRIMARY KEY (' + pks + ')';
}
return Utils._.template(query)(values).trim() + ';';
},
dropTableQuery: function(tableName, options) {
options = options || {};
var query = 'DROP TABLE IF EXISTS <%= table %><%= cascade %>;';
return Utils._.template(query)({
table: this.quoteTable(tableName),
cascade: options.cascade ? ' CASCADE' : ''
});
},
showTablesQuery: function() {
return "SELECT table_name FROM information_schema.tables WHERE table_schema = 'public' AND table_type LIKE '%TABLE' AND table_name != 'spatial_ref_sys';";
},
describeTableQuery: function(tableName, schema) {
if (!schema) {
schema = 'public';
}
var query = 'SELECT tc.constraint_type as "Constraint", c.column_name as "Field", c.column_default as "Default", c.is_nullable as "Null", ' +
"CASE WHEN c.udt_name = 'hstore' " +
'THEN c.udt_name ELSE c.data_type END as "Type", (SELECT array_agg(e.enumlabel) ' +
'FROM pg_catalog.pg_type t JOIN pg_catalog.pg_enum e ON t.oid=e.enumtypid WHERE t.typname=c.udt_name) AS "special" ' +
'FROM information_schema.columns c ' +
'LEFT JOIN information_schema.key_column_usage cu ON c.table_name = cu.table_name AND cu.column_name = c.column_name ' +
'LEFT JOIN information_schema.table_constraints tc ON c.table_name = tc.table_name AND cu.column_name = c.column_name AND tc.constraint_type = \'PRIMARY KEY\' ' +
' WHERE c.table_name = <%= table %> AND c.table_schema = <%= schema %> ';
return Utils._.template(query)({
table: this.escape(tableName),
schema: this.escape(schema)
});
},
// A recursive parser for nested where conditions
parseConditionObject: function(_conditions, path) {
var self = this;
path = path || [];
return Utils._.reduce(_conditions, function (r, v, k) { // result, key, value
if (Utils._.isObject(v)) {
r = r.concat(self.parseConditionObject(v, path.concat(k))); // Recursively parse objects
} else {
r.push({ path: path.concat(k), value: v });
}
return r;
}, []);
},
handleSequelizeMethod: function (smth, tableName, factory, options, prepend) {
if (smth instanceof Utils.json) {
// Parse nested object
if (smth.conditions) {
var conditions = _.map(this.parseConditionObject(smth.conditions), function generateSql(condition) {
return util.format("%s#>>'{%s}' = '%s'",
_.first(condition.path),
_.tail(condition.path).join(','),
condition.value);
});
return conditions.join(' and ');
} else if (smth.path) {
var str;
// Allow specifying conditions using the postgres json syntax
if (_.some(['->', '->>', '#>'], _.partial(_.includes, smth.path))) {
str = smth.path;
} else {
// Also support json dot notation
var path = smth.path.split('.');
str = util.format("%s#>>'{%s}'",
_.first(path),
_.tail(path).join(','));
}
if (smth.value) {
str += util.format(' = %s', this.escape(smth.value));
}
return str;
}
} else {
return AbstractQueryGenerator.handleSequelizeMethod.call(this, smth, tableName, factory, options, prepend);
}
},
addColumnQuery: function(table, key, dataType) {
var query = 'ALTER TABLE <%= table %> ADD COLUMN <%= attribute %>;'
, dbDataType = this.attributeToSQL(dataType, {context: 'addColumn'})
, attribute;
if (dataType.type && dataType.type instanceof DataTypes.ENUM || dataType instanceof DataTypes.ENUM) {
query = this.pgEnum(table, key, dataType) + query;
}
attribute = Utils._.template('<%= key %> <%= definition %>')({
key: this.quoteIdentifier(key),
definition: this.pgDataTypeMapping(table, key, dbDataType)
});
return Utils._.template(query)({
table: this.quoteTable(table),
attribute: attribute
});
},
removeColumnQuery: function(tableName, attributeName) {
var query = 'ALTER TABLE <%= tableName %> DROP COLUMN <%= attributeName %>;';
return Utils._.template(query)({
tableName: this.quoteTable(tableName),
attributeName: this.quoteIdentifier(attributeName)
});
},
changeColumnQuery: function(tableName, attributes) {
var query = 'ALTER TABLE <%= tableName %> ALTER COLUMN <%= query %>;'
, sql = [];
for (var attributeName in attributes) {
var definition = this.pgDataTypeMapping(tableName, attributeName, attributes[attributeName]);
var attrSql = '';
if (definition.indexOf('NOT NULL') > 0) {
attrSql += Utils._.template(query)({
tableName: this.quoteTable(tableName),
query: this.quoteIdentifier(attributeName) + ' SET NOT NULL'
});
definition = definition.replace('NOT NULL', '').trim();
} else if (!definition.match(/REFERENCES/)) {
attrSql += Utils._.template(query)({
tableName: this.quoteTable(tableName),
query: this.quoteIdentifier(attributeName) + ' DROP NOT NULL'
});
}
if (definition.indexOf('DEFAULT') > 0) {
attrSql += Utils._.template(query)({
tableName: this.quoteTable(tableName),
query: this.quoteIdentifier(attributeName) + ' SET DEFAULT ' + definition.match(/DEFAULT ([^;]+)/)[1]
});
definition = definition.replace(/(DEFAULT[^;]+)/, '').trim();
} else if (!definition.match(/REFERENCES/)) {
attrSql += Utils._.template(query)({
tableName: this.quoteTable(tableName),
query: this.quoteIdentifier(attributeName) + ' DROP DEFAULT'
});
}
if (attributes[attributeName].match(/^ENUM\(/)) {
query = this.pgEnum(tableName, attributeName, attributes[attributeName]) + query;
definition = definition.replace(/^ENUM\(.+\)/, this.quoteIdentifier('enum_' + tableName + '_' + attributeName));
definition += ' USING (' + this.quoteIdentifier(attributeName) + '::' + this.quoteIdentifier(definition) + ')';
}
if (definition.match(/UNIQUE;*$/)) {
definition = definition.replace(/UNIQUE;*$/, '');
attrSql += Utils._.template(query.replace('ALTER COLUMN', ''))({
tableName: this.quoteTable(tableName),
query: 'ADD CONSTRAINT ' + this.quoteIdentifier(attributeName + '_unique_idx') + ' UNIQUE (' + this.quoteIdentifier(attributeName) + ')'
});
}
if (definition.match(/REFERENCES/)) {
definition = definition.replace(/.+?(?=REFERENCES)/,'');
attrSql += Utils._.template(query.replace('ALTER COLUMN', ''))({
tableName: this.quoteTable(tableName),
query: 'ADD CONSTRAINT ' + this.quoteIdentifier(attributeName + '_foreign_idx') + ' FOREIGN KEY (' + this.quoteIdentifier(attributeName) + ') ' + definition
});
} else {
attrSql += Utils._.template(query)({
tableName: this.quoteTable(tableName),
query: this.quoteIdentifier(attributeName) + ' TYPE ' + definition
});
}
sql.push(attrSql);
}
return sql.join('');
},
renameColumnQuery: function(tableName, attrBefore, attributes) {
var query = 'ALTER TABLE <%= tableName %> RENAME COLUMN <%= attributes %>;';
var attrString = [];
for (var attributeName in attributes) {
attrString.push(Utils._.template('<%= before %> TO <%= after %>')({
before: this.quoteIdentifier(attrBefore),
after: this.quoteIdentifier(attributeName)
}));
}
return Utils._.template(query)({
tableName: this.quoteTable(tableName),
attributes: attrString.join(', ')
});
},
fn: function(fnName, tableName, body, returns, language) {
fnName = fnName || 'testfunc';
language = language || 'plpgsql';
returns = returns || 'SETOF ' + this.quoteTable(tableName);
var query = 'CREATE OR REPLACE FUNCTION pg_temp.<%= fnName %>() RETURNS <%= returns %> AS $func$ BEGIN <%= body %> END; $func$ LANGUAGE <%= language %>; SELECT * FROM pg_temp.<%= fnName %>();';
return Utils._.template(query)({
fnName: fnName,
returns: returns,
language: language,
body: body
});
},
exceptionFn: function(fnName, tableName, main, then, when, returns, language) {
when = when || 'unique_violation';
var body = '<%= main %> EXCEPTION WHEN <%= when %> THEN <%= then %>;';
body = Utils._.template(body)({
main: main,
when: when,
then: then
});
return this.fn(fnName, tableName, body, returns, language);
},
// http://www.maori.geek.nz/post/postgres_upsert_update_or_insert_in_ger_using_knex_js
upsertQuery: function (tableName, insertValues, updateValues, where, rawAttributes, options) {
var insert = this.insertQuery(tableName, insertValues, rawAttributes, options);
var update = this.updateQuery(tableName, updateValues, where, options, rawAttributes);
// The numbers here are selected to match the number of affected rows returned by MySQL
return this.exceptionFn(
'sequelize_upsert',
tableName,
insert + ' RETURN 1;',
update + '; RETURN 2',
'unique_violation',
'integer'
);
},
deleteQuery: function(tableName, where, options, model) {
var query;
options = options || {};
tableName = Utils.removeTicks(this.quoteTable(tableName), '"');
if (options.truncate === true) {
query = 'TRUNCATE ' + QueryGenerator.quoteIdentifier(tableName);
if (options.cascade) {
query += ' CASCADE';
}
return query;
}
if (Utils._.isUndefined(options.limit)) {
options.limit = 1;
}
primaryKeys[tableName] = primaryKeys[tableName] || [];
if (!!model && primaryKeys[tableName].length < 1) {
primaryKeys[tableName] = Utils._.map(Object.keys(model.primaryKeys), function(k){
return model.primaryKeys[k].field;
});
}
if (options.limit) {
query = 'DELETE FROM <%= table %> WHERE <%= primaryKeys %> IN (SELECT <%= primaryKeysSelection %> FROM <%= table %><%= where %><%= limit %>)';
} else {
query = 'DELETE FROM <%= table %><%= where %>';
}
var pks;
if (primaryKeys[tableName] && primaryKeys[tableName].length > 0) {
pks = primaryKeys[tableName].map(function(pk) {
return this.quoteIdentifier(pk);
}.bind(this)).join(',');
} else {
pks = this.quoteIdentifier('id');
}
var replacements = {
table: this.quoteIdentifiers(tableName),
where: this.getWhereConditions(where, null, model, options),
limit: !!options.limit ? ' LIMIT ' + this.escape(options.limit) : '',
primaryKeys: primaryKeys[tableName].length > 1 ? '(' + pks + ')' : pks,
primaryKeysSelection: pks
};
if (replacements.where) {
replacements.where = ' WHERE ' + replacements.where;
}
return Utils._.template(query)(replacements);
},
showIndexesQuery: function(tableName) {
var schemaJoin = '', schemaWhere = '';
if (!Utils._.isString(tableName)) {
schemaJoin = ', pg_namespace s';
schemaWhere = Utils._.template(" AND s.oid = t.relnamespace AND s.nspname = '<%= schemaName %>'")({schemaName: tableName.schema});
tableName = tableName.tableName;
}
// This is ARCANE!
var query = 'SELECT i.relname AS name, ix.indisprimary AS primary, ix.indisunique AS unique, ix.indkey AS indkey, ' +
'array_agg(a.attnum) as column_indexes, array_agg(a.attname) AS column_names, pg_get_indexdef(ix.indexrelid) ' +
'AS definition FROM pg_class t, pg_class i, pg_index ix, pg_attribute a<%= schemaJoin%> ' +
'WHERE t.oid = ix.indrelid AND i.oid = ix.indexrelid AND a.attrelid = t.oid AND ' +
"t.relkind = 'r' and t.relname = '<%= tableName %>'<%= schemaWhere%> " +
'GROUP BY i.relname, ix.indexrelid, ix.indisprimary, ix.indisunique, ix.indkey ORDER BY i.relname;';
return Utils._.template(query)({tableName: tableName, schemaJoin: schemaJoin, schemaWhere: schemaWhere});
},
removeIndexQuery: function(tableName, indexNameOrAttributes) {
var sql = 'DROP INDEX IF EXISTS <%= indexName %>'
, indexName = indexNameOrAttributes;
if (typeof indexName !== 'string') {
indexName = Utils.inflection.underscore(tableName + '_' + indexNameOrAttributes.join('_'));
}
return Utils._.template(sql)({
tableName: this.quoteIdentifiers(tableName),
indexName: this.quoteIdentifiers(indexName)
});
},
addLimitAndOffset: function(options) {
var fragment = '';
if (options.limit) fragment += ' LIMIT ' + this.escape(options.limit);
if (options.offset) fragment += ' OFFSET ' + this.escape(options.offset);
return fragment;
},
attributeToSQL: function(attribute) {
if (!Utils._.isPlainObject(attribute)) {
attribute = {
type: attribute
};
}
var template = '<%= type %>'
, replacements = {};
if (attribute.type instanceof DataTypes.ENUM) {
if (attribute.type.values && !attribute.values) attribute.values = attribute.type.values;
if (Array.isArray(attribute.values) && (attribute.values.length > 0)) {
replacements.type = 'ENUM(' + Utils._.map(attribute.values, function(value) {
return this.escape(value);
}.bind(this)).join(', ') + ')';
} else {
throw new Error("Values for ENUM haven't been defined.");
}
}
if (!replacements.type) {
replacements.type = attribute.type;
}
if (attribute.hasOwnProperty('allowNull') && (!attribute.allowNull)) {
template += ' NOT NULL';
}
if (attribute.autoIncrement) {
template += ' SERIAL';
}
if (Utils.defaultValueSchemable(attribute.defaultValue)) {
template += ' DEFAULT <%= defaultValue %>';
replacements.defaultValue = this.escape(attribute.defaultValue, attribute);
}
if (attribute.unique === true) {
template += ' UNIQUE';
}
if (attribute.primaryKey) {
template += ' PRIMARY KEY';
}
if (attribute.references) {
attribute = Utils.formatReferences(attribute);
template += ' REFERENCES <%= referencesTable %> (<%= referencesKey %>)';
replacements.referencesTable = this.quoteTable(attribute.references.model);
if (attribute.references.key) {
replacements.referencesKey = this.quoteIdentifiers(attribute.references.key);
} else {
replacements.referencesKey = this.quoteIdentifier('id');
}
if (attribute.onDelete) {
template += ' ON DELETE <%= onDeleteAction %>';
replacements.onDeleteAction = attribute.onDelete.toUpperCase();
}
if (attribute.onUpdate) {
template += ' ON UPDATE <%= onUpdateAction %>';
replacements.onUpdateAction = attribute.onUpdate.toUpperCase();
}
if (attribute.references.deferrable) {
template += ' <%= deferrable %>';
replacements.deferrable = attribute.references.deferrable.toString(this);
}
}
return Utils._.template(template)(replacements);
},
deferConstraintsQuery: function (options) {
return options.deferrable.toString(this);
},
setConstraintQuery: function (columns, type) {
var columnFragment = 'ALL';
if (columns) {
columnFragment = columns.map(function (column) {
return this.quoteIdentifier(column);
}.bind(this)).join(', ');
}
return 'SET CONSTRAINTS ' + columnFragment + ' ' + type;
},
setDeferredQuery: function (columns) {
return this.setConstraintQuery(columns, 'DEFERRED');
},
setImmediateQuery: function (columns) {
return this.setConstraintQuery(columns, 'IMMEDIATE');
},
attributesToSQL: function(attributes, options) {
var result = {}
, key
, attribute;
for (key in attributes) {
attribute = attributes[key];
result[attribute.field || key] = this.attributeToSQL(attribute, options);
}
return result;
},
findAutoIncrementField: function(factory) {
var fields = [];
for (var name in factory.attributes) {
var definition = factory.attributes[name];
if (definition && definition.autoIncrement) {
fields.push(name);
}
}
return fields;
},
createTrigger: function(tableName, triggerName, eventType, fireOnSpec, functionName, functionParams, optionsArray) {
var sql = [
'CREATE <%= constraintVal %>TRIGGER <%= triggerName %>'
, '<%= eventType %> <%= eventSpec %>'
, 'ON <%= tableName %>'
, '<%= optionsSpec %>'
, 'EXECUTE PROCEDURE <%= functionName %>(<%= paramList %>);'
].join('\n\t');
return Utils._.template(sql)({
constraintVal: this.triggerEventTypeIsConstraint(eventType),
triggerName: triggerName,
eventType: this.decodeTriggerEventType(eventType),
eventSpec: this.expandTriggerEventSpec(fireOnSpec),
tableName: tableName,
optionsSpec: this.expandOptions(optionsArray),
functionName: functionName,
paramList: this.expandFunctionParamList(functionParams)
});
},
dropTrigger: function(tableName, triggerName) {
var sql = 'DROP TRIGGER <%= triggerName %> ON <%= tableName %> RESTRICT;';
return Utils._.template(sql)({
triggerName: triggerName,
tableName: tableName
});
},
renameTrigger: function(tableName, oldTriggerName, newTriggerName) {
var sql = 'ALTER TRIGGER <%= oldTriggerName %> ON <%= tableName %> RENAME TO <%= newTriggerName%>;';
return Utils._.template(sql)({
tableName: tableName,
oldTriggerName: oldTriggerName,
newTriggerName: newTriggerName
});
},
createFunction: function(functionName, params, returnType, language, body, options) {
var sql = ['CREATE FUNCTION <%= functionName %>(<%= paramList %>)'
, 'RETURNS <%= returnType %> AS $func$'
, 'BEGIN'
, '\t<%= body %>'
, 'END;'
, "$func$ language '<%= language %>'<%= options %>;"
].join('\n');
return Utils._.template(sql)({
functionName: functionName,
paramList: this.expandFunctionParamList(params),
returnType: returnType,
body: body.replace('\n', '\n\t'),
language: language,
options: this.expandOptions(options)
});
},
dropFunction: function(functionName, params) {
// RESTRICT is (currently, as of 9.2) default but we'll be explicit
var sql = 'DROP FUNCTION <%= functionName %>(<%= paramList %>) RESTRICT;';
return Utils._.template(sql)({
functionName: functionName,
paramList: this.expandFunctionParamList(params)
});
},
renameFunction: function(oldFunctionName, params, newFunctionName) {
var sql = 'ALTER FUNCTION <%= oldFunctionName %>(<%= paramList %>) RENAME TO <%= newFunctionName %>;';
return Utils._.template(sql)({
oldFunctionName: oldFunctionName,
paramList: this.expandFunctionParamList(params),
newFunctionName: newFunctionName
});
},
databaseConnectionUri: function(config) {
var template = '<%= protocol %>://<%= user %>:<%= password %>@<%= host %><% if(port) { %>:<%= port %><% } %>/<%= database %><% if(ssl) { %>?ssl=<%= ssl %><% } %>';
return Utils._.template(template)({
user: config.username,
password: config.password,
database: config.database,
host: config.host,
port: config.port,
protocol: config.protocol,
ssl: config.ssl
});
},
pgEscapeAndQuote: function(val) {
return this.quoteIdentifier(Utils.removeTicks(this.escape(val), "'"));
},
expandFunctionParamList: function expandFunctionParamList(params) {
if (Utils._.isUndefined(params) || !Utils._.isArray(params)) {
throw new Error('expandFunctionParamList: function parameters array required, including an empty one for no arguments');
}
var paramList = Utils._.each(params, function expandParam(curParam) { | paramDef.push(curParam.type);
} else {
throw new Error('createFunction called with a parameter with no type');
}
return paramDef.join(' ');
});
return paramList.join(', ');
},
expandOptions: function expandOptions(options) {
return Utils._.isUndefined(options) || Utils._.isEmpty(options) ?
'' : '\n\t' + options.join('\n\t');
},
decodeTriggerEventType: function decodeTriggerEventType(eventSpecifier) {
var EVENT_DECODER = {
'after': 'AFTER',
'before': 'BEFORE',
'instead_of': 'INSTEAD OF',
'after_constraint': 'AFTER'
};
if (!Utils._.has(EVENT_DECODER, eventSpecifier)) {
throw new Error('Invalid trigger event specified: ' + eventSpecifier);
}
return EVENT_DECODER[eventSpecifier];
},
triggerEventTypeIsConstraint: function triggerEventTypeIsConstraint(eventSpecifier) {
return eventSpecifier === 'after_constrain' ? 'CONSTRAINT ' : '';
},
expandTriggerEventSpec: function expandTriggerEventSpec(fireOnSpec) {
if (Utils._.isEmpty(fireOnSpec)) {
throw new Error('no table change events specified to trigger on');
}
return Utils._.map(fireOnSpec, function parseTriggerEventSpec(fireValue, fireKey) {
var EVENT_MAP = {
'insert': 'INSERT',
'update': 'UPDATE',
'delete': 'DELETE',
'truncate': 'TRUNCATE'
};
if (!Utils._.has(EVENT_MAP, fireKey)) {
throw new Error('parseTriggerEventSpec: undefined trigger event ' + fireKey);
}
var eventSpec = EVENT_MAP[fireKey];
if (eventSpec === 'UPDATE') {
if (Utils._.isArray(fireValue) && fireValue.length > 0) {
eventSpec += ' OF ' + fireValue.join(', ');
}
}
return eventSpec;
}).join(' OR ');
},
pgEnumName: function (tableName, attr, options) {
options = options || {};
var tableDetails = this.extractTableDetails(tableName, options)
, enumName = '"enum_' + tableDetails.tableName + '_' + attr + '"';
// pgListEnums requires the enum name only, without the schema
if (options.schema !== false && tableDetails.schema) {
enumName = this.quoteIdentifier(tableDetails.schema) + tableDetails.delimiter + enumName;
}
return enumName;
},
pgListEnums: function(tableName, attrName, options) {
var enumName = ''
, tableDetails = this.extractTableDetails(tableName, options);
if (tableDetails.tableName && attrName) {
enumName = ' AND t.typname=' + this.pgEnumName(tableDetails.tableName, attrName, { schema: false }).replace(/"/g, "'");
}
var query = 'SELECT t.typname enum_name, array_agg(e.enumlabel ORDER BY enumsortorder) enum_value FROM pg_type t ' +
'JOIN pg_enum e ON t.oid = e.enumtypid ' +
'JOIN pg_catalog.pg_namespace n ON n.oid = t.typnamespace ' +
"WHERE n.nspname = '" + tableDetails.schema + "'" + enumName + ' GROUP BY 1';
return query;
},
pgEnum: function(tableName, attr, dataType, options) {
var enumName = this.pgEnumName(tableName, attr, options)
, values;
if (dataType.values) {
values = "ENUM('" + dataType.values.join("', '") + "')";
} else {
values = dataType.toString().match(/^ENUM\(.+\)/)[0];
}
var sql = 'CREATE TYPE ' + enumName + ' AS ' + values + ';';
if (!!options && options.force === true) {
sql = this.pgEnumDrop(tableName, attr) + sql;
}
return sql;
},
pgEnumAdd: function(tableName, attr, value, options) {
var enumName = this.pgEnumName(tableName, attr)
, sql = 'ALTER TYPE ' + enumName + ' ADD VALUE ';
if (semver.gte(this.sequelize.options.databaseVersion, '9.3.0')) {
sql += 'IF NOT EXISTS ';
}
sql += this.escape(value);
if (!!options.before) {
sql += ' BEFORE ' + this.escape(options.before);
} else if (!!options.after) {
sql += ' AFTER ' + this.escape(options.after);
}
return sql;
},
pgEnumDrop: function(tableName, attr, enumName) {
enumName = enumName || this.pgEnumName(tableName, attr);
return 'DROP TYPE IF EXISTS ' + enumName + '; ';
},
fromArray: function(text) {
text = text.replace(/^{/, '').replace(/}$/, '');
var matches = text.match(/("(?:\\.|[^"\\\\])*"|[^,]*)(?:\s*,\s*|\s*$)/ig);
if (matches.length < 1) {
return [];
}
matches = matches.map(function(m) {
return m.replace(/",$/, '').replace(/,$/, '').replace(/(^"|"$)/, '');
});
return matches.slice(0, -1);
},
padInt: function(i) {
return (i < 10) ? '0' + i.toString() : i.toString();
},
pgDataTypeMapping: function(tableName, attr, dataType) {
return this.dataTypeMapping(tableName, attr, dataType);
},
dataTypeMapping: function(tableName, attr, dataType) {
if (Utils._.includes(dataType, 'PRIMARY KEY')) {
primaryKeys[tableName].push(attr);
dataType = dataType.replace(/PRIMARY KEY/, '');
}
if (Utils._.includes(dataType, 'SERIAL')) {
if (Utils._.includes(dataType, 'BIGINT')) {
dataType = dataType.replace(/SERIAL/, 'BIGSERIAL');
dataType = dataType.replace(/BIGINT/, '');
} else {
dataType = dataType.replace(/INTEGER/, '');
}
dataType = dataType.replace(/NOT NULL/, '');
}
if (dataType.match(/^ENUM\(/)) {
dataType = dataType.replace(/^ENUM\(.+\)/, this.pgEnumName(tableName, attr));
}
return dataType;
},
quoteIdentifier: function(identifier, force) {
if (identifier === '*') return identifier;
if (!force && this.options && this.options.quoteIdentifiers === false) { // default is `true`
// In Postgres, if tables or attributes are created double-quoted,
// they are also case sensitive. If they contain any uppercase
// characters, they must always be double-quoted. This makes it
// impossible to write queries in portable SQL if tables are created in
// this way. Hence, we strip quotes if we don't want case sensitivity.
return Utils.removeTicks(identifier, '"');
} else {
return Utils.addTicks(identifier, '"');
}
},
/*
/**
* Generates an SQL query that returns all foreign keys of a table.
*
* @param {String} tableName The name of the table.
* @param {String} schemaName The name of the schema.
* @return {String} The generated sql query.
*/
getForeignKeysQuery: function(tableName, schemaName) {
return 'SELECT conname as constraint_name, pg_catalog.pg_get_constraintdef(r.oid, true) as condef FROM pg_catalog.pg_constraint r ' +
"WHERE r.conrelid = (SELECT oid FROM pg_class WHERE relname = '" + tableName + "' LIMIT 1) AND r.contype = 'f' ORDER BY 1;";
},
/**
* Generates an SQL query that removes a foreign key from a table.
*
* @param {String} tableName The name of the table.
* @param {String} foreignKey The name of the foreign key constraint.
* @return {String} The generated sql query.
*/
dropForeignKeyQuery: function(tableName, foreignKey) {
return 'ALTER TABLE ' + this.quoteTable(tableName) + ' DROP CONSTRAINT ' + this.quoteIdentifier(foreignKey) + ';';
},
setAutocommitQuery: function(value, options) {
if (options.parent) {
return;
}
// POSTGRES does not support setting AUTOCOMMIT = OFF as of 9.4.0
// Additionally it does not support AUTOCOMMIT at all starting at v9.5
// The assumption is that it won't be returning in future versions either
// If you are on a Pg version that is not semver compliant e.g. '9.5.0beta2', which fails due to the 'beta' qualification, then you need to pass
// the database version as "9.5.0" explicitly through the options param passed when creating the Sequelize instance under the key "databaseVersion"
// otherwise Pg version "9.4.0" is assumed by default as per Sequelize 3.14.2.
// For Pg versions that are semver compliant, this is auto-detected upon the first connection.
if (!value || semver.gte(this.sequelize.options.databaseVersion, '9.4.0')) {
return;
}
return AbstractQueryGenerator.setAutocommitQuery.call(this, value, options);
}
};
module.exports = Utils._.extend(Utils._.clone(AbstractQueryGenerator), QueryGenerator); | var paramDef = [];
if (Utils._.has(curParam, 'type')) {
if (Utils._.has(curParam, 'direction')) { paramDef.push(curParam.direction); }
if (Utils._.has(curParam, 'name')) { paramDef.push(curParam.name); } | random_line_split |
docs-mixin.js | /*
* docs-mixin: used by any page under /docs path
*/
import { updateMetaTOC, scrollTo, offsetTop } from '~/utils'
import { bvDescription, nav } from '~/content'
const TOC_CACHE = {}
// @vue/component
export default {
head() {
return {
title: this.headTitle,
meta: this.headMeta
}
},
computed: {
headTitle() {
const routeName = this.$route.name
let title = ''
let section = ''
if (this.meta && this.meta.title) {
title = this.meta.title
}
if (/^docs-components/.test(routeName)) {
section = 'Components'
} else if (/^docs-directives/.test(routeName)) {
section = 'Directives'
} else if (/^docs-reference/.test(routeName)) |
return [title, section, 'BootstrapVue'].filter(Boolean).join(' | ')
},
headMeta() {
const section = this.$route.name.split('-')[1]
const sectionMeta = section ? nav.find(n => n.base === `${section}/`) : null
const description =
this.meta && this.meta.description
? this.meta.description
: sectionMeta && sectionMeta.description
? sectionMeta.description
: bvDescription
const meta = [
{
hid: 'og:title',
name: 'og:title',
property: 'og:title',
content: this.headTitle
}
]
if (description) {
meta.push({
hid: 'description',
name: 'description',
content: description
})
meta.push({
hid: 'og:description',
name: 'og:description',
property: 'og:description',
content: description
})
}
return meta
}
},
created() {
// Create private non-reactive props
this.$_filterTimer = null
// In a `$nextTick()` to ensure `toc.vue` is created first
this.$nextTick(() => {
const key = `${this.$route.name}_${this.$route.params.slug || ''}`
const toc =
TOC_CACHE[key] || (TOC_CACHE[key] = updateMetaTOC(this.baseTOC || {}, this.meta || null))
this.$root.$emit('docs-set-toc', toc)
})
},
mounted() {
this.clearScrollTimeout()
this.focusScroll()
},
updated() {
this.clearScrollTimeout()
this.focusScroll()
},
beforeDestroy() {
this.clearScrollTimeout()
},
methods: {
clearScrollTimeout() {
clearTimeout(this.$_scrollTimeout)
this.$_scrollTimeout = null
},
focusScroll() {
const hash = this.$route.hash
this.$nextTick(() => {
let el
if (hash) {
// We use an attribute `querySelector()` rather than `getElementByID()`,
// as some auto-generated ID's are invalid or not unique
el = this.$el.querySelector(`[id="${hash.replace('#', '')}"]`)
this.scrollIntoView(el)
}
if (!el) {
el = this.$el.querySelector('h1')
}
if (el) {
el.tabIndex = -1
el.focus()
}
})
},
scrollIntoView(el) {
if (el) {
// Get the document scrolling element
const scroller = document.scrollingElement || document.documentElement || document.body
this.clearScrollTimeout()
// Allow time for v-play to finish rendering
this.$_scrollTimeout = setTimeout(() => {
// Scroll heading into view (minus offset to account for nav top height)
scrollTo(scroller, offsetTop(el) - 70, 100)
}, 100)
}
}
}
}
| {
section = 'Reference'
} | conditional_block |
docs-mixin.js | /*
* docs-mixin: used by any page under /docs path
*/
import { updateMetaTOC, scrollTo, offsetTop } from '~/utils'
import { bvDescription, nav } from '~/content'
const TOC_CACHE = {}
// @vue/component
export default {
head() {
return {
title: this.headTitle,
meta: this.headMeta
}
},
computed: {
headTitle() {
const routeName = this.$route.name
let title = ''
let section = ''
if (this.meta && this.meta.title) {
title = this.meta.title
}
if (/^docs-components/.test(routeName)) {
section = 'Components'
} else if (/^docs-directives/.test(routeName)) {
section = 'Directives'
} else if (/^docs-reference/.test(routeName)) {
section = 'Reference'
}
return [title, section, 'BootstrapVue'].filter(Boolean).join(' | ')
},
headMeta() {
const section = this.$route.name.split('-')[1]
const sectionMeta = section ? nav.find(n => n.base === `${section}/`) : null
const description =
this.meta && this.meta.description
? this.meta.description
: sectionMeta && sectionMeta.description
? sectionMeta.description
: bvDescription
const meta = [
{
hid: 'og:title',
name: 'og:title',
property: 'og:title',
content: this.headTitle
}
]
if (description) {
meta.push({
hid: 'description',
name: 'description',
content: description
})
meta.push({
hid: 'og:description',
name: 'og:description',
property: 'og:description',
content: description
})
}
return meta
}
},
created() {
// Create private non-reactive props
this.$_filterTimer = null
// In a `$nextTick()` to ensure `toc.vue` is created first
this.$nextTick(() => {
const key = `${this.$route.name}_${this.$route.params.slug || ''}`
const toc =
TOC_CACHE[key] || (TOC_CACHE[key] = updateMetaTOC(this.baseTOC || {}, this.meta || null))
this.$root.$emit('docs-set-toc', toc)
})
},
mounted() {
this.clearScrollTimeout()
this.focusScroll()
},
updated() {
this.clearScrollTimeout()
this.focusScroll()
},
beforeDestroy() {
this.clearScrollTimeout()
},
methods: {
clearScrollTimeout() {
clearTimeout(this.$_scrollTimeout)
this.$_scrollTimeout = null
},
| () {
const hash = this.$route.hash
this.$nextTick(() => {
let el
if (hash) {
// We use an attribute `querySelector()` rather than `getElementByID()`,
// as some auto-generated ID's are invalid or not unique
el = this.$el.querySelector(`[id="${hash.replace('#', '')}"]`)
this.scrollIntoView(el)
}
if (!el) {
el = this.$el.querySelector('h1')
}
if (el) {
el.tabIndex = -1
el.focus()
}
})
},
scrollIntoView(el) {
if (el) {
// Get the document scrolling element
const scroller = document.scrollingElement || document.documentElement || document.body
this.clearScrollTimeout()
// Allow time for v-play to finish rendering
this.$_scrollTimeout = setTimeout(() => {
// Scroll heading into view (minus offset to account for nav top height)
scrollTo(scroller, offsetTop(el) - 70, 100)
}, 100)
}
}
}
}
| focusScroll | identifier_name |
docs-mixin.js | /*
* docs-mixin: used by any page under /docs path
*/
import { updateMetaTOC, scrollTo, offsetTop } from '~/utils'
import { bvDescription, nav } from '~/content'
const TOC_CACHE = {}
// @vue/component
export default {
head() {
return {
title: this.headTitle,
meta: this.headMeta
}
},
computed: {
headTitle() {
const routeName = this.$route.name
let title = ''
let section = ''
if (this.meta && this.meta.title) {
title = this.meta.title
}
if (/^docs-components/.test(routeName)) {
section = 'Components'
} else if (/^docs-directives/.test(routeName)) {
section = 'Directives'
} else if (/^docs-reference/.test(routeName)) {
section = 'Reference'
}
return [title, section, 'BootstrapVue'].filter(Boolean).join(' | ')
},
headMeta() {
const section = this.$route.name.split('-')[1]
const sectionMeta = section ? nav.find(n => n.base === `${section}/`) : null
const description =
this.meta && this.meta.description
? this.meta.description
: sectionMeta && sectionMeta.description
? sectionMeta.description
: bvDescription
const meta = [
{
hid: 'og:title',
name: 'og:title',
property: 'og:title',
content: this.headTitle | if (description) {
meta.push({
hid: 'description',
name: 'description',
content: description
})
meta.push({
hid: 'og:description',
name: 'og:description',
property: 'og:description',
content: description
})
}
return meta
}
},
created() {
// Create private non-reactive props
this.$_filterTimer = null
// In a `$nextTick()` to ensure `toc.vue` is created first
this.$nextTick(() => {
const key = `${this.$route.name}_${this.$route.params.slug || ''}`
const toc =
TOC_CACHE[key] || (TOC_CACHE[key] = updateMetaTOC(this.baseTOC || {}, this.meta || null))
this.$root.$emit('docs-set-toc', toc)
})
},
mounted() {
this.clearScrollTimeout()
this.focusScroll()
},
updated() {
this.clearScrollTimeout()
this.focusScroll()
},
beforeDestroy() {
this.clearScrollTimeout()
},
methods: {
clearScrollTimeout() {
clearTimeout(this.$_scrollTimeout)
this.$_scrollTimeout = null
},
focusScroll() {
const hash = this.$route.hash
this.$nextTick(() => {
let el
if (hash) {
// We use an attribute `querySelector()` rather than `getElementByID()`,
// as some auto-generated ID's are invalid or not unique
el = this.$el.querySelector(`[id="${hash.replace('#', '')}"]`)
this.scrollIntoView(el)
}
if (!el) {
el = this.$el.querySelector('h1')
}
if (el) {
el.tabIndex = -1
el.focus()
}
})
},
scrollIntoView(el) {
if (el) {
// Get the document scrolling element
const scroller = document.scrollingElement || document.documentElement || document.body
this.clearScrollTimeout()
// Allow time for v-play to finish rendering
this.$_scrollTimeout = setTimeout(() => {
// Scroll heading into view (minus offset to account for nav top height)
scrollTo(scroller, offsetTop(el) - 70, 100)
}, 100)
}
}
}
} | }
] | random_line_split |
docs-mixin.js | /*
* docs-mixin: used by any page under /docs path
*/
import { updateMetaTOC, scrollTo, offsetTop } from '~/utils'
import { bvDescription, nav } from '~/content'
const TOC_CACHE = {}
// @vue/component
export default {
head() {
return {
title: this.headTitle,
meta: this.headMeta
}
},
computed: {
headTitle() {
const routeName = this.$route.name
let title = ''
let section = ''
if (this.meta && this.meta.title) {
title = this.meta.title
}
if (/^docs-components/.test(routeName)) {
section = 'Components'
} else if (/^docs-directives/.test(routeName)) {
section = 'Directives'
} else if (/^docs-reference/.test(routeName)) {
section = 'Reference'
}
return [title, section, 'BootstrapVue'].filter(Boolean).join(' | ')
},
headMeta() {
const section = this.$route.name.split('-')[1]
const sectionMeta = section ? nav.find(n => n.base === `${section}/`) : null
const description =
this.meta && this.meta.description
? this.meta.description
: sectionMeta && sectionMeta.description
? sectionMeta.description
: bvDescription
const meta = [
{
hid: 'og:title',
name: 'og:title',
property: 'og:title',
content: this.headTitle
}
]
if (description) {
meta.push({
hid: 'description',
name: 'description',
content: description
})
meta.push({
hid: 'og:description',
name: 'og:description',
property: 'og:description',
content: description
})
}
return meta
}
},
created() {
// Create private non-reactive props
this.$_filterTimer = null
// In a `$nextTick()` to ensure `toc.vue` is created first
this.$nextTick(() => {
const key = `${this.$route.name}_${this.$route.params.slug || ''}`
const toc =
TOC_CACHE[key] || (TOC_CACHE[key] = updateMetaTOC(this.baseTOC || {}, this.meta || null))
this.$root.$emit('docs-set-toc', toc)
})
},
mounted() | ,
updated() {
this.clearScrollTimeout()
this.focusScroll()
},
beforeDestroy() {
this.clearScrollTimeout()
},
methods: {
clearScrollTimeout() {
clearTimeout(this.$_scrollTimeout)
this.$_scrollTimeout = null
},
focusScroll() {
const hash = this.$route.hash
this.$nextTick(() => {
let el
if (hash) {
// We use an attribute `querySelector()` rather than `getElementByID()`,
// as some auto-generated ID's are invalid or not unique
el = this.$el.querySelector(`[id="${hash.replace('#', '')}"]`)
this.scrollIntoView(el)
}
if (!el) {
el = this.$el.querySelector('h1')
}
if (el) {
el.tabIndex = -1
el.focus()
}
})
},
scrollIntoView(el) {
if (el) {
// Get the document scrolling element
const scroller = document.scrollingElement || document.documentElement || document.body
this.clearScrollTimeout()
// Allow time for v-play to finish rendering
this.$_scrollTimeout = setTimeout(() => {
// Scroll heading into view (minus offset to account for nav top height)
scrollTo(scroller, offsetTop(el) - 70, 100)
}, 100)
}
}
}
}
| {
this.clearScrollTimeout()
this.focusScroll()
} | identifier_body |
index.js | #!/usr/bin/env node
(function () {
var DirectoryLayout = require('../lib/index.js'),
program = require('commander'),
options;
program
.version('1.0.2')
.usage('[options] <path, ...>')
.option('-g, --generate <path> <output-directory-layout-file-path>', 'Generate directory layout')
.option('-v, --verify <input-directory-layout-file-path> <path>', 'Verify directory layout') | options = {
output: program.args[0] || 'layout.md',
ignore: []
};
console.log('Generating layout for ' + program.generate + '... \n')
DirectoryLayout
.generate(program.generate, options)
.then(function() {
console.log('Layout generated at: ' + options.output);
});
}
else if(program.verify) {
options = {
root: program.args[0]
};
console.log('Verifying layout for ' + options.root + ' ...\n');
DirectoryLayout
.verify(program.verify, options)
.then(function() {
console.log('Successfully verified layout available in ' + program.verify + '.');
});
}
}()); | .parse(process.argv);
if(program.generate) { | random_line_split |
bouncy_circles.rs | #![feature (test)]
#![feature (macro_vis_matcher)]
extern crate test;
#[macro_use]
extern crate time_steward;
#[macro_use]
extern crate glium;
extern crate nalgebra;
extern crate rand;
extern crate boolinator;
extern crate docopt;
extern crate serde;
#[macro_use]
extern crate serde_derive;
use test::Bencher;
use time_steward::{DeterministicRandomId};
use time_steward::{PersistentTypeId, ListedType, PersistentlyIdentifiedType, DataTimelineCellTrait, Basics as BasicsTrait};
//use time_steward::stewards::{simple_full as steward_module};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Event, DataTimelineCell, EventAccessor, FutureCleanupAccessor, SnapshotAccessor, simple_timeline};
use simple_timeline::{SimpleTimeline, GetVarying};
#[path = "../dev-shared/bouncy_circles.rs"] mod bouncy_circles;
use bouncy_circles::*;
#[bench]
fn bouncy_circles_straightforward(bencher: &mut Bencher) {
bencher.iter(|| {
let mut steward: Steward = Steward::from_globals (make_globals());
steward.insert_fiat_event(0, DeterministicRandomId::new(&0), Initialize {}).unwrap();
for index in 0..1000 {
let time = 10*SECOND*index/1000;
steward.snapshot_before(& time).expect("steward failed to provide snapshot");
steward.forget_before(& time);
}
})
}
#[bench]
fn | (bencher: &mut Bencher) {
bencher.iter(|| {
let mut steward: Steward = Steward::from_globals (make_globals());
steward.insert_fiat_event(0, DeterministicRandomId::new(&0), Initialize {}).unwrap();
for index in 1..10 {
steward.insert_fiat_event (index*SECOND, DeterministicRandomId::new (& index), Disturb{ coordinates: [ARENA_SIZE/3,ARENA_SIZE/3]}).unwrap();
}
for index in 0..1000 {
let time = 10*SECOND*index/1000;
steward.snapshot_before(& time).expect("steward failed to provide snapshot");
steward.forget_before(& time);
}
})
}
/*
#[bench]
fn bouncy_circles_disturbed_retroactive (bencher: &mut Bencher) {
bencher.iter(|| {
let mut steward: amortized::Steward<Basics> = amortized::Steward::from_constants(());
steward.insert_fiat_event(0, DeterministicRandomId::new(&0), Initialize::new()).unwrap();
steward.snapshot_before(& (10*SECOND)).expect("steward failed to provide snapshot");
for index in 1..10 {
steward.insert_fiat_event (index*SECOND, DeterministicRandomId::new (& index), Disturb::new ([ARENA_SIZE/3,ARENA_SIZE/3])).unwrap();
steward.snapshot_before(& (10*SECOND)).expect("steward failed to provide snapshot");
}
})
}
*/
| bouncy_circles_disturbed | identifier_name |
bouncy_circles.rs | #![feature (test)]
#![feature (macro_vis_matcher)]
extern crate test;
#[macro_use]
extern crate time_steward;
#[macro_use]
extern crate glium;
extern crate nalgebra;
extern crate rand;
extern crate boolinator;
extern crate docopt;
extern crate serde;
#[macro_use]
extern crate serde_derive;
use test::Bencher;
use time_steward::{DeterministicRandomId};
use time_steward::{PersistentTypeId, ListedType, PersistentlyIdentifiedType, DataTimelineCellTrait, Basics as BasicsTrait};
//use time_steward::stewards::{simple_full as steward_module};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Event, DataTimelineCell, EventAccessor, FutureCleanupAccessor, SnapshotAccessor, simple_timeline};
use simple_timeline::{SimpleTimeline, GetVarying};
#[path = "../dev-shared/bouncy_circles.rs"] mod bouncy_circles;
use bouncy_circles::*;
#[bench]
fn bouncy_circles_straightforward(bencher: &mut Bencher) {
bencher.iter(|| {
let mut steward: Steward = Steward::from_globals (make_globals());
steward.insert_fiat_event(0, DeterministicRandomId::new(&0), Initialize {}).unwrap();
for index in 0..1000 {
let time = 10*SECOND*index/1000;
steward.snapshot_before(& time).expect("steward failed to provide snapshot");
steward.forget_before(& time);
}
})
}
#[bench]
fn bouncy_circles_disturbed (bencher: &mut Bencher) |
/*
#[bench]
fn bouncy_circles_disturbed_retroactive (bencher: &mut Bencher) {
bencher.iter(|| {
let mut steward: amortized::Steward<Basics> = amortized::Steward::from_constants(());
steward.insert_fiat_event(0, DeterministicRandomId::new(&0), Initialize::new()).unwrap();
steward.snapshot_before(& (10*SECOND)).expect("steward failed to provide snapshot");
for index in 1..10 {
steward.insert_fiat_event (index*SECOND, DeterministicRandomId::new (& index), Disturb::new ([ARENA_SIZE/3,ARENA_SIZE/3])).unwrap();
steward.snapshot_before(& (10*SECOND)).expect("steward failed to provide snapshot");
}
})
}
*/
| {
bencher.iter(|| {
let mut steward: Steward = Steward::from_globals (make_globals());
steward.insert_fiat_event(0, DeterministicRandomId::new(&0), Initialize {}).unwrap();
for index in 1..10 {
steward.insert_fiat_event (index*SECOND, DeterministicRandomId::new (& index), Disturb{ coordinates: [ARENA_SIZE/3,ARENA_SIZE/3]}).unwrap();
}
for index in 0..1000 {
let time = 10*SECOND*index/1000;
steward.snapshot_before(& time).expect("steward failed to provide snapshot");
steward.forget_before(& time);
}
})
} | identifier_body |
bouncy_circles.rs | #![feature (test)]
#![feature (macro_vis_matcher)]
extern crate test;
#[macro_use]
extern crate time_steward;
#[macro_use]
extern crate glium;
extern crate nalgebra;
extern crate rand;
extern crate boolinator;
extern crate docopt;
extern crate serde;
#[macro_use]
extern crate serde_derive;
use test::Bencher;
use time_steward::{DeterministicRandomId};
use time_steward::{PersistentTypeId, ListedType, PersistentlyIdentifiedType, DataTimelineCellTrait, Basics as BasicsTrait};
//use time_steward::stewards::{simple_full as steward_module};
use steward_module::{TimeSteward, ConstructibleTimeSteward, Event, DataTimelineCell, EventAccessor, FutureCleanupAccessor, SnapshotAccessor, simple_timeline};
use simple_timeline::{SimpleTimeline, GetVarying};
#[path = "../dev-shared/bouncy_circles.rs"] mod bouncy_circles;
use bouncy_circles::*;
#[bench]
fn bouncy_circles_straightforward(bencher: &mut Bencher) {
bencher.iter(|| {
let mut steward: Steward = Steward::from_globals (make_globals());
steward.insert_fiat_event(0, DeterministicRandomId::new(&0), Initialize {}).unwrap();
for index in 0..1000 {
let time = 10*SECOND*index/1000;
steward.snapshot_before(& time).expect("steward failed to provide snapshot");
steward.forget_before(& time);
}
})
}
#[bench]
fn bouncy_circles_disturbed (bencher: &mut Bencher) {
bencher.iter(|| { | steward.insert_fiat_event(0, DeterministicRandomId::new(&0), Initialize {}).unwrap();
for index in 1..10 {
steward.insert_fiat_event (index*SECOND, DeterministicRandomId::new (& index), Disturb{ coordinates: [ARENA_SIZE/3,ARENA_SIZE/3]}).unwrap();
}
for index in 0..1000 {
let time = 10*SECOND*index/1000;
steward.snapshot_before(& time).expect("steward failed to provide snapshot");
steward.forget_before(& time);
}
})
}
/*
#[bench]
fn bouncy_circles_disturbed_retroactive (bencher: &mut Bencher) {
bencher.iter(|| {
let mut steward: amortized::Steward<Basics> = amortized::Steward::from_constants(());
steward.insert_fiat_event(0, DeterministicRandomId::new(&0), Initialize::new()).unwrap();
steward.snapshot_before(& (10*SECOND)).expect("steward failed to provide snapshot");
for index in 1..10 {
steward.insert_fiat_event (index*SECOND, DeterministicRandomId::new (& index), Disturb::new ([ARENA_SIZE/3,ARENA_SIZE/3])).unwrap();
steward.snapshot_before(& (10*SECOND)).expect("steward failed to provide snapshot");
}
})
}
*/ | let mut steward: Steward = Steward::from_globals (make_globals()); | random_line_split |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android
// ignore-windows
// exec-env:RUST_LOG=debug
#[macro_use]
extern crate log;
use std::io::Command;
use std::os;
use std::str;
fn main() | {
let args = os::args();
let args = args.as_slice();
if args.len() > 1 && args[1].as_slice() == "child" {
debug!("foo");
debug!("bar");
return
}
let p = Command::new(args[0].as_slice())
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(p.error.as_slice()).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
} | identifier_body | |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android
// ignore-windows
// exec-env:RUST_LOG=debug
#[macro_use]
extern crate log;
use std::io::Command;
use std::os;
use std::str;
fn main() {
let args = os::args();
let args = args.as_slice();
if args.len() > 1 && args[1].as_slice() == "child" |
let p = Command::new(args[0].as_slice())
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(p.error.as_slice()).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
}
| {
debug!("foo");
debug!("bar");
return
} | conditional_block |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
| extern crate log;
use std::io::Command;
use std::os;
use std::str;
fn main() {
let args = os::args();
let args = args.as_slice();
if args.len() > 1 && args[1].as_slice() == "child" {
debug!("foo");
debug!("bar");
return
}
let p = Command::new(args[0].as_slice())
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(p.error.as_slice()).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
} | // ignore-android
// ignore-windows
// exec-env:RUST_LOG=debug
#[macro_use] | random_line_split |
logging-separate-lines.rs | // Copyright 2013-2014 The Rust Project Developers. See the COPYRIGHT
// file at the top-level directory of this distribution and at
// http://rust-lang.org/COPYRIGHT.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
// ignore-android
// ignore-windows
// exec-env:RUST_LOG=debug
#[macro_use]
extern crate log;
use std::io::Command;
use std::os;
use std::str;
fn | () {
let args = os::args();
let args = args.as_slice();
if args.len() > 1 && args[1].as_slice() == "child" {
debug!("foo");
debug!("bar");
return
}
let p = Command::new(args[0].as_slice())
.arg("child")
.spawn().unwrap().wait_with_output().unwrap();
assert!(p.status.success());
let mut lines = str::from_utf8(p.error.as_slice()).unwrap().lines();
assert!(lines.next().unwrap().contains("foo"));
assert!(lines.next().unwrap().contains("bar"));
}
| main | identifier_name |
constellation_msg.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The high-level interface from script to constellation. Using this abstract interface helps
//! reduce coupling between these two components.
use nonzero::NonZero;
use std::cell::Cell;
use std::fmt;
use webrender_api;
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum KeyState {
Pressed,
Released,
Repeated,
}
//N.B. Based on the glutin key enum
#[derive(Clone, Copy, Debug, Deserialize, Eq, MallocSizeOf, PartialEq, Serialize)]
pub enum Key {
Space,
Apostrophe,
Comma,
Minus,
Period,
Slash,
Num0,
Num1,
Num2,
Num3,
Num4,
Num5,
Num6,
Num7,
Num8,
Num9,
Semicolon,
Equal,
A,
B,
C,
D,
E,
F,
G,
H,
I,
J,
K,
L,
M,
N,
O,
P,
Q,
R,
S,
T,
U,
V,
W,
X,
Y,
Z,
LeftBracket,
Backslash,
RightBracket,
GraveAccent,
World1,
World2,
Escape,
Enter,
Tab,
Backspace,
Insert,
Delete,
Right,
Left, | Up,
PageUp,
PageDown,
Home,
End,
CapsLock,
ScrollLock,
NumLock,
PrintScreen,
Pause,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
F13,
F14,
F15,
F16,
F17,
F18,
F19,
F20,
F21,
F22,
F23,
F24,
F25,
Kp0,
Kp1,
Kp2,
Kp3,
Kp4,
Kp5,
Kp6,
Kp7,
Kp8,
Kp9,
KpDecimal,
KpDivide,
KpMultiply,
KpSubtract,
KpAdd,
KpEnter,
KpEqual,
LeftShift,
LeftControl,
LeftAlt,
LeftSuper,
RightShift,
RightControl,
RightAlt,
RightSuper,
Menu,
NavigateBackward,
NavigateForward,
}
bitflags! {
#[derive(Deserialize, Serialize)]
pub struct KeyModifiers: u8 {
const NONE = 0x00;
const SHIFT = 0x01;
const CONTROL = 0x02;
const ALT = 0x04;
const SUPER = 0x08;
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum TraversalDirection {
Forward(usize),
Back(usize),
}
/// Each pipeline ID needs to be unique. However, it also needs to be possible to
/// generate the pipeline ID from an iframe element (this simplifies a lot of other
/// code that makes use of pipeline IDs).
///
/// To achieve this, each pipeline index belongs to a particular namespace. There is
/// a namespace for the constellation thread, and also one for every script thread.
/// This allows pipeline IDs to be generated by any of those threads without conflicting
/// with pipeline IDs created by other script threads or the constellation. The
/// constellation is the only code that is responsible for creating new *namespaces*.
/// This ensures that namespaces are always unique, even when using multi-process mode.
///
/// It may help conceptually to think of the namespace ID as an identifier for the
/// thread that created this pipeline ID - however this is really an implementation
/// detail so shouldn't be relied upon in code logic. It's best to think of the
/// pipeline ID as a simple unique identifier that doesn't convey any more information.
#[derive(Clone, Copy)]
pub struct PipelineNamespace {
id: PipelineNamespaceId,
index: u32,
}
impl PipelineNamespace {
pub fn install(namespace_id: PipelineNamespaceId) {
PIPELINE_NAMESPACE.with(|tls| {
assert!(tls.get().is_none());
tls.set(Some(PipelineNamespace {
id: namespace_id,
index: 0,
}));
});
}
fn next_index(&mut self) -> NonZero<u32> {
self.index += 1;
NonZero::new(self.index).expect("pipeline id index wrapped!")
}
fn next_pipeline_id(&mut self) -> PipelineId {
PipelineId {
namespace_id: self.id,
index: PipelineIndex(self.next_index()),
}
}
fn next_browsing_context_id(&mut self) -> BrowsingContextId {
BrowsingContextId {
namespace_id: self.id,
index: BrowsingContextIndex(self.next_index()),
}
}
}
thread_local!(pub static PIPELINE_NAMESPACE: Cell<Option<PipelineNamespace>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineNamespaceId(pub u32);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineIndex(pub NonZero<u32>);
malloc_size_of_is_0!(PipelineIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineId {
pub namespace_id: PipelineNamespaceId,
pub index: PipelineIndex
}
impl PipelineId {
pub fn new() -> PipelineId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_pipeline_id = namespace.next_pipeline_id();
tls.set(Some(namespace));
new_pipeline_id
})
}
pub fn to_webrender(&self) -> webrender_api::PipelineId {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
webrender_api::PipelineId(namespace_id, index.get())
}
#[allow(unsafe_code)]
pub fn from_webrender(pipeline: webrender_api::PipelineId) -> PipelineId {
let webrender_api::PipelineId(namespace_id, index) = pipeline;
unsafe {
PipelineId {
namespace_id: PipelineNamespaceId(namespace_id),
index: PipelineIndex(NonZero::new_unchecked(index)),
}
}
}
pub fn root_scroll_node(&self) -> webrender_api::ClipId {
webrender_api::ClipId::root_scroll_node(self.to_webrender())
}
pub fn root_scroll_id(&self) -> webrender_api::ExternalScrollId {
webrender_api::ExternalScrollId(0, self.to_webrender())
}
pub fn root_clip_and_scroll_info(&self) -> webrender_api::ClipAndScrollInfo {
webrender_api::ClipAndScrollInfo::simple(self.root_scroll_node())
}
}
impl fmt::Display for PipelineId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextIndex(pub NonZero<u32>);
malloc_size_of_is_0!(BrowsingContextIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextId {
pub namespace_id: PipelineNamespaceId,
pub index: BrowsingContextIndex,
}
impl BrowsingContextId {
pub fn new() -> BrowsingContextId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_browsing_context_id = namespace.next_browsing_context_id();
tls.set(Some(namespace));
new_browsing_context_id
})
}
}
impl fmt::Display for BrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let BrowsingContextIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
thread_local!(pub static TOP_LEVEL_BROWSING_CONTEXT_ID: Cell<Option<TopLevelBrowsingContextId>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct TopLevelBrowsingContextId(BrowsingContextId);
impl TopLevelBrowsingContextId {
pub fn new() -> TopLevelBrowsingContextId {
TopLevelBrowsingContextId(BrowsingContextId::new())
}
/// Each script and layout thread should have the top-level browsing context id installed,
/// since it is used by crash reporting.
pub fn install(id: TopLevelBrowsingContextId) {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.set(Some(id)))
}
pub fn installed() -> Option<TopLevelBrowsingContextId> {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.get())
}
}
impl fmt::Display for TopLevelBrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(fmt)
}
}
impl From<TopLevelBrowsingContextId> for BrowsingContextId {
fn from(id: TopLevelBrowsingContextId) -> BrowsingContextId {
id.0
}
}
impl PartialEq<TopLevelBrowsingContextId> for BrowsingContextId {
fn eq(&self, rhs: &TopLevelBrowsingContextId) -> bool {
self.eq(&rhs.0)
}
}
impl PartialEq<BrowsingContextId> for TopLevelBrowsingContextId {
fn eq(&self, rhs: &BrowsingContextId) -> bool {
self.0.eq(rhs)
}
}
// We provide ids just for unit testing.
pub const TEST_NAMESPACE: PipelineNamespaceId = PipelineNamespaceId(1234);
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_INDEX: PipelineIndex = unsafe { PipelineIndex(NonZero::new_unchecked(5678)) };
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_ID: PipelineId = PipelineId { namespace_id: TEST_NAMESPACE, index: TEST_PIPELINE_INDEX };
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_INDEX: BrowsingContextIndex =
unsafe { BrowsingContextIndex(NonZero::new_unchecked(8765)) };
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_ID: BrowsingContextId =
BrowsingContextId { namespace_id: TEST_NAMESPACE, index: TEST_BROWSING_CONTEXT_INDEX };
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize)]
pub enum FrameType {
IFrame,
MozBrowserIFrame,
} | Down, | random_line_split |
constellation_msg.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The high-level interface from script to constellation. Using this abstract interface helps
//! reduce coupling between these two components.
use nonzero::NonZero;
use std::cell::Cell;
use std::fmt;
use webrender_api;
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum KeyState {
Pressed,
Released,
Repeated,
}
//N.B. Based on the glutin key enum
#[derive(Clone, Copy, Debug, Deserialize, Eq, MallocSizeOf, PartialEq, Serialize)]
pub enum Key {
Space,
Apostrophe,
Comma,
Minus,
Period,
Slash,
Num0,
Num1,
Num2,
Num3,
Num4,
Num5,
Num6,
Num7,
Num8,
Num9,
Semicolon,
Equal,
A,
B,
C,
D,
E,
F,
G,
H,
I,
J,
K,
L,
M,
N,
O,
P,
Q,
R,
S,
T,
U,
V,
W,
X,
Y,
Z,
LeftBracket,
Backslash,
RightBracket,
GraveAccent,
World1,
World2,
Escape,
Enter,
Tab,
Backspace,
Insert,
Delete,
Right,
Left,
Down,
Up,
PageUp,
PageDown,
Home,
End,
CapsLock,
ScrollLock,
NumLock,
PrintScreen,
Pause,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
F13,
F14,
F15,
F16,
F17,
F18,
F19,
F20,
F21,
F22,
F23,
F24,
F25,
Kp0,
Kp1,
Kp2,
Kp3,
Kp4,
Kp5,
Kp6,
Kp7,
Kp8,
Kp9,
KpDecimal,
KpDivide,
KpMultiply,
KpSubtract,
KpAdd,
KpEnter,
KpEqual,
LeftShift,
LeftControl,
LeftAlt,
LeftSuper,
RightShift,
RightControl,
RightAlt,
RightSuper,
Menu,
NavigateBackward,
NavigateForward,
}
bitflags! {
#[derive(Deserialize, Serialize)]
pub struct KeyModifiers: u8 {
const NONE = 0x00;
const SHIFT = 0x01;
const CONTROL = 0x02;
const ALT = 0x04;
const SUPER = 0x08;
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum TraversalDirection {
Forward(usize),
Back(usize),
}
/// Each pipeline ID needs to be unique. However, it also needs to be possible to
/// generate the pipeline ID from an iframe element (this simplifies a lot of other
/// code that makes use of pipeline IDs).
///
/// To achieve this, each pipeline index belongs to a particular namespace. There is
/// a namespace for the constellation thread, and also one for every script thread.
/// This allows pipeline IDs to be generated by any of those threads without conflicting
/// with pipeline IDs created by other script threads or the constellation. The
/// constellation is the only code that is responsible for creating new *namespaces*.
/// This ensures that namespaces are always unique, even when using multi-process mode.
///
/// It may help conceptually to think of the namespace ID as an identifier for the
/// thread that created this pipeline ID - however this is really an implementation
/// detail so shouldn't be relied upon in code logic. It's best to think of the
/// pipeline ID as a simple unique identifier that doesn't convey any more information.
#[derive(Clone, Copy)]
pub struct PipelineNamespace {
id: PipelineNamespaceId,
index: u32,
}
impl PipelineNamespace {
pub fn install(namespace_id: PipelineNamespaceId) {
PIPELINE_NAMESPACE.with(|tls| {
assert!(tls.get().is_none());
tls.set(Some(PipelineNamespace {
id: namespace_id,
index: 0,
}));
});
}
fn next_index(&mut self) -> NonZero<u32> {
self.index += 1;
NonZero::new(self.index).expect("pipeline id index wrapped!")
}
fn next_pipeline_id(&mut self) -> PipelineId {
PipelineId {
namespace_id: self.id,
index: PipelineIndex(self.next_index()),
}
}
fn next_browsing_context_id(&mut self) -> BrowsingContextId {
BrowsingContextId {
namespace_id: self.id,
index: BrowsingContextIndex(self.next_index()),
}
}
}
thread_local!(pub static PIPELINE_NAMESPACE: Cell<Option<PipelineNamespace>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineNamespaceId(pub u32);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineIndex(pub NonZero<u32>);
malloc_size_of_is_0!(PipelineIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineId {
pub namespace_id: PipelineNamespaceId,
pub index: PipelineIndex
}
impl PipelineId {
pub fn new() -> PipelineId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_pipeline_id = namespace.next_pipeline_id();
tls.set(Some(namespace));
new_pipeline_id
})
}
pub fn to_webrender(&self) -> webrender_api::PipelineId {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
webrender_api::PipelineId(namespace_id, index.get())
}
#[allow(unsafe_code)]
pub fn from_webrender(pipeline: webrender_api::PipelineId) -> PipelineId {
let webrender_api::PipelineId(namespace_id, index) = pipeline;
unsafe {
PipelineId {
namespace_id: PipelineNamespaceId(namespace_id),
index: PipelineIndex(NonZero::new_unchecked(index)),
}
}
}
pub fn root_scroll_node(&self) -> webrender_api::ClipId {
webrender_api::ClipId::root_scroll_node(self.to_webrender())
}
pub fn root_scroll_id(&self) -> webrender_api::ExternalScrollId {
webrender_api::ExternalScrollId(0, self.to_webrender())
}
pub fn root_clip_and_scroll_info(&self) -> webrender_api::ClipAndScrollInfo |
}
impl fmt::Display for PipelineId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextIndex(pub NonZero<u32>);
malloc_size_of_is_0!(BrowsingContextIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextId {
pub namespace_id: PipelineNamespaceId,
pub index: BrowsingContextIndex,
}
impl BrowsingContextId {
pub fn new() -> BrowsingContextId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_browsing_context_id = namespace.next_browsing_context_id();
tls.set(Some(namespace));
new_browsing_context_id
})
}
}
impl fmt::Display for BrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let BrowsingContextIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
thread_local!(pub static TOP_LEVEL_BROWSING_CONTEXT_ID: Cell<Option<TopLevelBrowsingContextId>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct TopLevelBrowsingContextId(BrowsingContextId);
impl TopLevelBrowsingContextId {
pub fn new() -> TopLevelBrowsingContextId {
TopLevelBrowsingContextId(BrowsingContextId::new())
}
/// Each script and layout thread should have the top-level browsing context id installed,
/// since it is used by crash reporting.
pub fn install(id: TopLevelBrowsingContextId) {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.set(Some(id)))
}
pub fn installed() -> Option<TopLevelBrowsingContextId> {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.get())
}
}
impl fmt::Display for TopLevelBrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(fmt)
}
}
impl From<TopLevelBrowsingContextId> for BrowsingContextId {
fn from(id: TopLevelBrowsingContextId) -> BrowsingContextId {
id.0
}
}
impl PartialEq<TopLevelBrowsingContextId> for BrowsingContextId {
fn eq(&self, rhs: &TopLevelBrowsingContextId) -> bool {
self.eq(&rhs.0)
}
}
impl PartialEq<BrowsingContextId> for TopLevelBrowsingContextId {
fn eq(&self, rhs: &BrowsingContextId) -> bool {
self.0.eq(rhs)
}
}
// We provide ids just for unit testing.
pub const TEST_NAMESPACE: PipelineNamespaceId = PipelineNamespaceId(1234);
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_INDEX: PipelineIndex = unsafe { PipelineIndex(NonZero::new_unchecked(5678)) };
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_ID: PipelineId = PipelineId { namespace_id: TEST_NAMESPACE, index: TEST_PIPELINE_INDEX };
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_INDEX: BrowsingContextIndex =
unsafe { BrowsingContextIndex(NonZero::new_unchecked(8765)) };
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_ID: BrowsingContextId =
BrowsingContextId { namespace_id: TEST_NAMESPACE, index: TEST_BROWSING_CONTEXT_INDEX };
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize)]
pub enum FrameType {
IFrame,
MozBrowserIFrame,
}
| {
webrender_api::ClipAndScrollInfo::simple(self.root_scroll_node())
} | identifier_body |
constellation_msg.rs | /* This Source Code Form is subject to the terms of the Mozilla Public
* License, v. 2.0. If a copy of the MPL was not distributed with this
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
//! The high-level interface from script to constellation. Using this abstract interface helps
//! reduce coupling between these two components.
use nonzero::NonZero;
use std::cell::Cell;
use std::fmt;
use webrender_api;
#[derive(Clone, Copy, Debug, Deserialize, Eq, PartialEq, Serialize)]
pub enum KeyState {
Pressed,
Released,
Repeated,
}
//N.B. Based on the glutin key enum
#[derive(Clone, Copy, Debug, Deserialize, Eq, MallocSizeOf, PartialEq, Serialize)]
pub enum Key {
Space,
Apostrophe,
Comma,
Minus,
Period,
Slash,
Num0,
Num1,
Num2,
Num3,
Num4,
Num5,
Num6,
Num7,
Num8,
Num9,
Semicolon,
Equal,
A,
B,
C,
D,
E,
F,
G,
H,
I,
J,
K,
L,
M,
N,
O,
P,
Q,
R,
S,
T,
U,
V,
W,
X,
Y,
Z,
LeftBracket,
Backslash,
RightBracket,
GraveAccent,
World1,
World2,
Escape,
Enter,
Tab,
Backspace,
Insert,
Delete,
Right,
Left,
Down,
Up,
PageUp,
PageDown,
Home,
End,
CapsLock,
ScrollLock,
NumLock,
PrintScreen,
Pause,
F1,
F2,
F3,
F4,
F5,
F6,
F7,
F8,
F9,
F10,
F11,
F12,
F13,
F14,
F15,
F16,
F17,
F18,
F19,
F20,
F21,
F22,
F23,
F24,
F25,
Kp0,
Kp1,
Kp2,
Kp3,
Kp4,
Kp5,
Kp6,
Kp7,
Kp8,
Kp9,
KpDecimal,
KpDivide,
KpMultiply,
KpSubtract,
KpAdd,
KpEnter,
KpEqual,
LeftShift,
LeftControl,
LeftAlt,
LeftSuper,
RightShift,
RightControl,
RightAlt,
RightSuper,
Menu,
NavigateBackward,
NavigateForward,
}
bitflags! {
#[derive(Deserialize, Serialize)]
pub struct KeyModifiers: u8 {
const NONE = 0x00;
const SHIFT = 0x01;
const CONTROL = 0x02;
const ALT = 0x04;
const SUPER = 0x08;
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)]
pub enum TraversalDirection {
Forward(usize),
Back(usize),
}
/// Each pipeline ID needs to be unique. However, it also needs to be possible to
/// generate the pipeline ID from an iframe element (this simplifies a lot of other
/// code that makes use of pipeline IDs).
///
/// To achieve this, each pipeline index belongs to a particular namespace. There is
/// a namespace for the constellation thread, and also one for every script thread.
/// This allows pipeline IDs to be generated by any of those threads without conflicting
/// with pipeline IDs created by other script threads or the constellation. The
/// constellation is the only code that is responsible for creating new *namespaces*.
/// This ensures that namespaces are always unique, even when using multi-process mode.
///
/// It may help conceptually to think of the namespace ID as an identifier for the
/// thread that created this pipeline ID - however this is really an implementation
/// detail so shouldn't be relied upon in code logic. It's best to think of the
/// pipeline ID as a simple unique identifier that doesn't convey any more information.
#[derive(Clone, Copy)]
pub struct PipelineNamespace {
id: PipelineNamespaceId,
index: u32,
}
impl PipelineNamespace {
pub fn install(namespace_id: PipelineNamespaceId) {
PIPELINE_NAMESPACE.with(|tls| {
assert!(tls.get().is_none());
tls.set(Some(PipelineNamespace {
id: namespace_id,
index: 0,
}));
});
}
fn next_index(&mut self) -> NonZero<u32> {
self.index += 1;
NonZero::new(self.index).expect("pipeline id index wrapped!")
}
fn next_pipeline_id(&mut self) -> PipelineId {
PipelineId {
namespace_id: self.id,
index: PipelineIndex(self.next_index()),
}
}
fn next_browsing_context_id(&mut self) -> BrowsingContextId {
BrowsingContextId {
namespace_id: self.id,
index: BrowsingContextIndex(self.next_index()),
}
}
}
thread_local!(pub static PIPELINE_NAMESPACE: Cell<Option<PipelineNamespace>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineNamespaceId(pub u32);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineIndex(pub NonZero<u32>);
malloc_size_of_is_0!(PipelineIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct PipelineId {
pub namespace_id: PipelineNamespaceId,
pub index: PipelineIndex
}
impl PipelineId {
pub fn new() -> PipelineId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_pipeline_id = namespace.next_pipeline_id();
tls.set(Some(namespace));
new_pipeline_id
})
}
pub fn to_webrender(&self) -> webrender_api::PipelineId {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
webrender_api::PipelineId(namespace_id, index.get())
}
#[allow(unsafe_code)]
pub fn | (pipeline: webrender_api::PipelineId) -> PipelineId {
let webrender_api::PipelineId(namespace_id, index) = pipeline;
unsafe {
PipelineId {
namespace_id: PipelineNamespaceId(namespace_id),
index: PipelineIndex(NonZero::new_unchecked(index)),
}
}
}
pub fn root_scroll_node(&self) -> webrender_api::ClipId {
webrender_api::ClipId::root_scroll_node(self.to_webrender())
}
pub fn root_scroll_id(&self) -> webrender_api::ExternalScrollId {
webrender_api::ExternalScrollId(0, self.to_webrender())
}
pub fn root_clip_and_scroll_info(&self) -> webrender_api::ClipAndScrollInfo {
webrender_api::ClipAndScrollInfo::simple(self.root_scroll_node())
}
}
impl fmt::Display for PipelineId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let PipelineIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextIndex(pub NonZero<u32>);
malloc_size_of_is_0!(BrowsingContextIndex);
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct BrowsingContextId {
pub namespace_id: PipelineNamespaceId,
pub index: BrowsingContextIndex,
}
impl BrowsingContextId {
pub fn new() -> BrowsingContextId {
PIPELINE_NAMESPACE.with(|tls| {
let mut namespace = tls.get().expect("No namespace set for this thread!");
let new_browsing_context_id = namespace.next_browsing_context_id();
tls.set(Some(namespace));
new_browsing_context_id
})
}
}
impl fmt::Display for BrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
let PipelineNamespaceId(namespace_id) = self.namespace_id;
let BrowsingContextIndex(index) = self.index;
write!(fmt, "({},{})", namespace_id, index.get())
}
}
thread_local!(pub static TOP_LEVEL_BROWSING_CONTEXT_ID: Cell<Option<TopLevelBrowsingContextId>> = Cell::new(None));
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, Ord, PartialEq, PartialOrd, Serialize)]
pub struct TopLevelBrowsingContextId(BrowsingContextId);
impl TopLevelBrowsingContextId {
pub fn new() -> TopLevelBrowsingContextId {
TopLevelBrowsingContextId(BrowsingContextId::new())
}
/// Each script and layout thread should have the top-level browsing context id installed,
/// since it is used by crash reporting.
pub fn install(id: TopLevelBrowsingContextId) {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.set(Some(id)))
}
pub fn installed() -> Option<TopLevelBrowsingContextId> {
TOP_LEVEL_BROWSING_CONTEXT_ID.with(|tls| tls.get())
}
}
impl fmt::Display for TopLevelBrowsingContextId {
fn fmt(&self, fmt: &mut fmt::Formatter) -> fmt::Result {
self.0.fmt(fmt)
}
}
impl From<TopLevelBrowsingContextId> for BrowsingContextId {
fn from(id: TopLevelBrowsingContextId) -> BrowsingContextId {
id.0
}
}
impl PartialEq<TopLevelBrowsingContextId> for BrowsingContextId {
fn eq(&self, rhs: &TopLevelBrowsingContextId) -> bool {
self.eq(&rhs.0)
}
}
impl PartialEq<BrowsingContextId> for TopLevelBrowsingContextId {
fn eq(&self, rhs: &BrowsingContextId) -> bool {
self.0.eq(rhs)
}
}
// We provide ids just for unit testing.
pub const TEST_NAMESPACE: PipelineNamespaceId = PipelineNamespaceId(1234);
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_INDEX: PipelineIndex = unsafe { PipelineIndex(NonZero::new_unchecked(5678)) };
#[cfg(feature = "unstable")]
pub const TEST_PIPELINE_ID: PipelineId = PipelineId { namespace_id: TEST_NAMESPACE, index: TEST_PIPELINE_INDEX };
#[allow(unsafe_code)]
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_INDEX: BrowsingContextIndex =
unsafe { BrowsingContextIndex(NonZero::new_unchecked(8765)) };
#[cfg(feature = "unstable")]
pub const TEST_BROWSING_CONTEXT_ID: BrowsingContextId =
BrowsingContextId { namespace_id: TEST_NAMESPACE, index: TEST_BROWSING_CONTEXT_INDEX };
#[derive(Clone, Copy, Debug, Deserialize, Eq, Hash, MallocSizeOf, PartialEq, Serialize)]
pub enum FrameType {
IFrame,
MozBrowserIFrame,
}
| from_webrender | identifier_name |
server.rs | extern crate music_server;
extern crate systray;
use music_server::*;
use std::{process, env, thread};
use std::io::{Read, Write};
use std::net::TcpListener;
use std::sync::{Arc, Mutex};
/// Listens on a TcpListener, parses the commands and add them to the queue.
fn start_server(listener: &TcpListener, queue: &Arc<Mutex<Queue>>) {
for stream in listener.incoming() {
match stream {
Ok(mut stream) => {
let mut buf = [0; 256];
let data = stream.read(&mut buf);
if data.is_ok() {
let command = Command::from(&buf);
if command.is_ok() |
}
},
Err(_) => println!("Error"),
}
}
}
/// Creates a library from /home/$USER/Music, creates a tray icon if the feature is enabled, and
/// starts the server and the event loop.
fn main() {
// Initialize library
let library_path = env::home_dir();
if library_path.is_none() {
let _ = writeln!(&mut std::io::stderr(), "Error: cannot read home dir");
process::exit(1);
}
let library_path = library_path.unwrap().join("Music");
let library = Library::new_rc(&library_path);
let queue_for_server = {
let library = library.borrow();
Arc::clone(library.queue())
};
use tray;
let queue_for_tray = Arc::clone(&queue_for_server);
// Initialize tray icon
thread::spawn(move || {
tray::start_tray(&queue_for_tray);
});
// Initialize server
let listener = TcpListener::bind("127.0.0.1:5000");
if let Ok(listener) = listener {
thread::spawn(move || {
start_server(&listener, &queue_for_server);
});
library.borrow_mut().event_loop();
} else {
let _ = writeln!(&mut std::io::stderr(), "Error: could not start server");
process::exit(1);
}
}
| {
let mut queue = queue.lock().unwrap();
queue.push(command.unwrap(), Some(stream));
} | conditional_block |
server.rs | extern crate music_server;
extern crate systray;
use music_server::*;
use std::{process, env, thread};
use std::io::{Read, Write};
use std::net::TcpListener;
use std::sync::{Arc, Mutex};
/// Listens on a TcpListener, parses the commands and add them to the queue.
fn start_server(listener: &TcpListener, queue: &Arc<Mutex<Queue>>) {
for stream in listener.incoming() {
match stream {
Ok(mut stream) => {
let mut buf = [0; 256];
let data = stream.read(&mut buf);
if data.is_ok() {
let command = Command::from(&buf);
if command.is_ok() {
let mut queue = queue.lock().unwrap();
queue.push(command.unwrap(), Some(stream));
}
}
},
Err(_) => println!("Error"),
}
}
}
/// Creates a library from /home/$USER/Music, creates a tray icon if the feature is enabled, and
/// starts the server and the event loop.
fn main() {
// Initialize library
let library_path = env::home_dir();
if library_path.is_none() {
let _ = writeln!(&mut std::io::stderr(), "Error: cannot read home dir");
process::exit(1);
}
let library_path = library_path.unwrap().join("Music");
let library = Library::new_rc(&library_path);
let queue_for_server = {
let library = library.borrow();
Arc::clone(library.queue())
};
use tray;
let queue_for_tray = Arc::clone(&queue_for_server);
// Initialize tray icon
thread::spawn(move || {
tray::start_tray(&queue_for_tray);
});
|
if let Ok(listener) = listener {
thread::spawn(move || {
start_server(&listener, &queue_for_server);
});
library.borrow_mut().event_loop();
} else {
let _ = writeln!(&mut std::io::stderr(), "Error: could not start server");
process::exit(1);
}
} | // Initialize server
let listener = TcpListener::bind("127.0.0.1:5000"); | random_line_split |
server.rs | extern crate music_server;
extern crate systray;
use music_server::*;
use std::{process, env, thread};
use std::io::{Read, Write};
use std::net::TcpListener;
use std::sync::{Arc, Mutex};
/// Listens on a TcpListener, parses the commands and add them to the queue.
fn start_server(listener: &TcpListener, queue: &Arc<Mutex<Queue>>) {
for stream in listener.incoming() {
match stream {
Ok(mut stream) => {
let mut buf = [0; 256];
let data = stream.read(&mut buf);
if data.is_ok() {
let command = Command::from(&buf);
if command.is_ok() {
let mut queue = queue.lock().unwrap();
queue.push(command.unwrap(), Some(stream));
}
}
},
Err(_) => println!("Error"),
}
}
}
/// Creates a library from /home/$USER/Music, creates a tray icon if the feature is enabled, and
/// starts the server and the event loop.
fn main() | {
// Initialize library
let library_path = env::home_dir();
if library_path.is_none() {
let _ = writeln!(&mut std::io::stderr(), "Error: cannot read home dir");
process::exit(1);
}
let library_path = library_path.unwrap().join("Music");
let library = Library::new_rc(&library_path);
let queue_for_server = {
let library = library.borrow();
Arc::clone(library.queue())
};
use tray;
let queue_for_tray = Arc::clone(&queue_for_server);
// Initialize tray icon
thread::spawn(move || {
tray::start_tray(&queue_for_tray);
});
// Initialize server
let listener = TcpListener::bind("127.0.0.1:5000");
if let Ok(listener) = listener {
thread::spawn(move || {
start_server(&listener, &queue_for_server);
});
library.borrow_mut().event_loop();
} else {
let _ = writeln!(&mut std::io::stderr(), "Error: could not start server");
process::exit(1);
}
} | identifier_body | |
server.rs | extern crate music_server;
extern crate systray;
use music_server::*;
use std::{process, env, thread};
use std::io::{Read, Write};
use std::net::TcpListener;
use std::sync::{Arc, Mutex};
/// Listens on a TcpListener, parses the commands and add them to the queue.
fn start_server(listener: &TcpListener, queue: &Arc<Mutex<Queue>>) {
for stream in listener.incoming() {
match stream {
Ok(mut stream) => {
let mut buf = [0; 256];
let data = stream.read(&mut buf);
if data.is_ok() {
let command = Command::from(&buf);
if command.is_ok() {
let mut queue = queue.lock().unwrap();
queue.push(command.unwrap(), Some(stream));
}
}
},
Err(_) => println!("Error"),
}
}
}
/// Creates a library from /home/$USER/Music, creates a tray icon if the feature is enabled, and
/// starts the server and the event loop.
fn | () {
// Initialize library
let library_path = env::home_dir();
if library_path.is_none() {
let _ = writeln!(&mut std::io::stderr(), "Error: cannot read home dir");
process::exit(1);
}
let library_path = library_path.unwrap().join("Music");
let library = Library::new_rc(&library_path);
let queue_for_server = {
let library = library.borrow();
Arc::clone(library.queue())
};
use tray;
let queue_for_tray = Arc::clone(&queue_for_server);
// Initialize tray icon
thread::spawn(move || {
tray::start_tray(&queue_for_tray);
});
// Initialize server
let listener = TcpListener::bind("127.0.0.1:5000");
if let Ok(listener) = listener {
thread::spawn(move || {
start_server(&listener, &queue_for_server);
});
library.borrow_mut().event_loop();
} else {
let _ = writeln!(&mut std::io::stderr(), "Error: could not start server");
process::exit(1);
}
}
| main | identifier_name |
conftest.py | # Calliope
# Copyright (C) 2017, 2018 Sam Thursfield <sam@afuera.me.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import testutils
@pytest.fixture()
def cli():
| '''Fixture for testing through the `cpe` commandline interface.'''
return testutils.Cli() | identifier_body | |
conftest.py | # Calliope
# Copyright (C) 2017, 2018 Sam Thursfield <sam@afuera.me.uk>
#
# This program is free software: you can redistribute it and/or modify | # it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import testutils
@pytest.fixture()
def cli():
'''Fixture for testing through the `cpe` commandline interface.'''
return testutils.Cli() | random_line_split | |
conftest.py | # Calliope
# Copyright (C) 2017, 2018 Sam Thursfield <sam@afuera.me.uk>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import pytest
import testutils
@pytest.fixture()
def | ():
'''Fixture for testing through the `cpe` commandline interface.'''
return testutils.Cli()
| cli | identifier_name |
test_compat.py | """
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import sys, os, string
import unittest
import tempfile
from test_all import verbose
try:
# For Python 2.3
from bsddb import db, hashopen, btopen, rnopen
except ImportError:
# For earlier Pythons w/distutils pybsddb
from bsddb3 import db, hashopen, btopen, rnopen
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
| def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = string.split("The quick brown fox jumped over the lazy dog.")
if verbose:
print "\nTesting: rnopen"
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print '%s %s %s' % getTest
assert getTest[1] == 'quick', 'data mismatch!'
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print rec
try:
rec = f.next()
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print '\nTesting: ', what
f = factory(self.filename, 'c')
if verbose:
print 'creation...'
# truth test
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
f['f'] = 'Python'
if verbose:
print '%s %s %s' % (f['a'], f['b'], f['c'])
if verbose:
print 'key ordering...'
f.set_location(f.first()[0])
while 1:
try:
rec = f.next()
except KeyError:
assert rec == f.last(), 'Error, last <> last!'
f.previous()
break
if verbose:
print rec
assert f.has_key('f'), 'Error, missing key!'
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print 'modification...'
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print 'access...'
for key in f.keys():
word = f[key]
if verbose:
print word
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite') |
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
| random_line_split |
test_compat.py | """
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import sys, os, string
import unittest
import tempfile
from test_all import verbose
try:
# For Python 2.3
from bsddb import db, hashopen, btopen, rnopen
except ImportError:
# For earlier Pythons w/distutils pybsddb
from bsddb3 import db, hashopen, btopen, rnopen
class CompatibilityTestCase(unittest.TestCase):
def | (self):
self.filename = tempfile.mktemp()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = string.split("The quick brown fox jumped over the lazy dog.")
if verbose:
print "\nTesting: rnopen"
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print '%s %s %s' % getTest
assert getTest[1] == 'quick', 'data mismatch!'
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print rec
try:
rec = f.next()
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print '\nTesting: ', what
f = factory(self.filename, 'c')
if verbose:
print 'creation...'
# truth test
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
f['f'] = 'Python'
if verbose:
print '%s %s %s' % (f['a'], f['b'], f['c'])
if verbose:
print 'key ordering...'
f.set_location(f.first()[0])
while 1:
try:
rec = f.next()
except KeyError:
assert rec == f.last(), 'Error, last <> last!'
f.previous()
break
if verbose:
print rec
assert f.has_key('f'), 'Error, missing key!'
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print 'modification...'
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print 'access...'
for key in f.keys():
word = f[key]
if verbose:
print word
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| setUp | identifier_name |
test_compat.py | """
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import sys, os, string
import unittest
import tempfile
from test_all import verbose
try:
# For Python 2.3
from bsddb import db, hashopen, btopen, rnopen
except ImportError:
# For earlier Pythons w/distutils pybsddb
from bsddb3 import db, hashopen, btopen, rnopen
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
|
def test03_rnopen(self):
data = string.split("The quick brown fox jumped over the lazy dog.")
if verbose:
print "\nTesting: rnopen"
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print '%s %s %s' % getTest
assert getTest[1] == 'quick', 'data mismatch!'
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print rec
try:
rec = f.next()
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print '\nTesting: ', what
f = factory(self.filename, 'c')
if verbose:
print 'creation...'
# truth test
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
f['f'] = 'Python'
if verbose:
print '%s %s %s' % (f['a'], f['b'], f['c'])
if verbose:
print 'key ordering...'
f.set_location(f.first()[0])
while 1:
try:
rec = f.next()
except KeyError:
assert rec == f.last(), 'Error, last <> last!'
f.previous()
break
if verbose:
print rec
assert f.has_key('f'), 'Error, missing key!'
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print 'modification...'
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print 'access...'
for key in f.keys():
word = f[key]
if verbose:
print word
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| self.do_bthash_test(hashopen, 'hashopen') | identifier_body |
test_compat.py | """
Test cases adapted from the test_bsddb.py module in Python's
regression test suite.
"""
import sys, os, string
import unittest
import tempfile
from test_all import verbose
try:
# For Python 2.3
from bsddb import db, hashopen, btopen, rnopen
except ImportError:
# For earlier Pythons w/distutils pybsddb
from bsddb3 import db, hashopen, btopen, rnopen
class CompatibilityTestCase(unittest.TestCase):
def setUp(self):
self.filename = tempfile.mktemp()
def tearDown(self):
try:
os.remove(self.filename)
except os.error:
pass
def test01_btopen(self):
self.do_bthash_test(btopen, 'btopen')
def test02_hashopen(self):
self.do_bthash_test(hashopen, 'hashopen')
def test03_rnopen(self):
data = string.split("The quick brown fox jumped over the lazy dog.")
if verbose:
print "\nTesting: rnopen"
f = rnopen(self.filename, 'c')
for x in range(len(data)):
f[x+1] = data[x]
getTest = (f[1], f[2], f[3])
if verbose:
print '%s %s %s' % getTest
assert getTest[1] == 'quick', 'data mismatch!'
f[25] = 'twenty-five'
f.close()
del f
f = rnopen(self.filename, 'w')
f[20] = 'twenty'
def noRec(f):
rec = f[15]
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f['a string']
self.assertRaises(TypeError, badKey, f)
del f[3]
rec = f.first()
while rec:
if verbose:
print rec
try:
rec = f.next()
except KeyError:
break
f.close()
def test04_n_flag(self):
f = hashopen(self.filename, 'n')
f.close()
def do_bthash_test(self, factory, what):
if verbose:
print '\nTesting: ', what
f = factory(self.filename, 'c')
if verbose:
print 'creation...'
# truth test
if f:
if verbose: print "truth test: true"
else:
if verbose: print "truth test: false"
f['0'] = ''
f['a'] = 'Guido'
f['b'] = 'van'
f['c'] = 'Rossum'
f['d'] = 'invented'
f['f'] = 'Python'
if verbose:
print '%s %s %s' % (f['a'], f['b'], f['c'])
if verbose:
print 'key ordering...'
f.set_location(f.first()[0])
while 1:
try:
rec = f.next()
except KeyError:
assert rec == f.last(), 'Error, last <> last!'
f.previous()
break
if verbose:
print rec
assert f.has_key('f'), 'Error, missing key!'
f.sync()
f.close()
# truth test
try:
if f:
if verbose: print "truth test: true"
else:
if verbose: |
except db.DBError:
pass
else:
self.fail("Exception expected")
del f
if verbose:
print 'modification...'
f = factory(self.filename, 'w')
f['d'] = 'discovered'
if verbose:
print 'access...'
for key in f.keys():
word = f[key]
if verbose:
print word
def noRec(f):
rec = f['no such key']
self.assertRaises(KeyError, noRec, f)
def badKey(f):
rec = f[15]
self.assertRaises(TypeError, badKey, f)
f.close()
#----------------------------------------------------------------------
def test_suite():
return unittest.makeSuite(CompatibilityTestCase)
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
| print "truth test: false" | conditional_block |
spot_launcher.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import boto.ec2
from boto.ec2.blockdevicemapping import BlockDeviceType
from boto.ec2.blockdevicemapping import BlockDeviceMapping
import time
import copy
import argparse
import sys
import pprint
import os
import yaml
BASE_PATH = os.path.dirname(os.path.abspath(__file__))
CONFIG_PATH = os.path.join(BASE_PATH, '../configs')
def launch_from_config(conn, instance_config_name, config_file_name):
spot_requests_config = get_config(config_file_name)
config = spot_requests_config[instance_config_name]
mapping = create_mapping(config)
print 'Launching %s instances'%(instance_config_name)
print 'Instance parameters:'
pp = pprint.PrettyPrinter(indent=4)
pp.pprint(config)
spot_req = conn.request_spot_instances(
config['price'],
config['ami_id'],
count=config['count'],
type=config['type'],
key_name=config['key_name'],
instance_type=config['instance_type'],
placement_group=config['placement_group'],
security_group_ids=config['security_groups'],
subnet_id=config['subnet_id'],
instance_profile_name=config['instance_profile_name'],
block_device_map=mapping
)
request_ids = [req.id for req in spot_req]
print 'Waiting for fulfillment'
instance_ids = wait_for_fulfillment(conn, request_ids,
copy.deepcopy(request_ids))
if 'tags' in config:
tag_instances(conn, instance_ids, config['tags'])
return instance_ids
def get_config(config_file_name):
config_file = open(os.path.join(CONFIG_PATH, config_file_name))
config_dict = yaml.load(config_file.read())
return config_dict
def | (config):
if 'mapping' not in config:
return None
mapping = BlockDeviceMapping()
for ephemeral_name, device_path in config['mapping'].iteritems():
ephemeral = BlockDeviceType()
ephemeral.ephemeral_name = ephemeral_name
mapping[device_path] = ephemeral
return mapping
def wait_for_fulfillment(conn, request_ids, pending_request_ids):
"""Loop through all pending request ids waiting for them to be fulfilled.
If a request is fulfilled, remove it from pending_request_ids.
If there are still pending requests, sleep and check again in 10 seconds.
Only return when all spot requests have been fulfilled."""
instance_ids = []
failed_ids = []
time.sleep(10)
pending_statuses = set(['pending-evaluation', 'pending-fulfillment'])
while len(pending_request_ids) > 0:
results = conn.get_all_spot_instance_requests(
request_ids=pending_request_ids)
for result in results:
if result.status.code == 'fulfilled':
pending_request_ids.pop(pending_request_ids.index(result.id))
print '\nspot request %s fulfilled!'%result.id
instance_ids.append(result.instance_id)
elif result.status.code not in pending_statuses:
pending_request_ids.pop(pending_request_ids.index(result.id))
print '\nspot request %s could not be fulfilled. ' \
'Status code: %s'%(result.id, result.status.code)
failed_ids.append(result.id)
if len(pending_request_ids) > 0:
sys.stdout.write('.')
sys.stdout.flush()
time.sleep(10)
if len(failed_ids) > 0:
print 'The following spot requests ' \
'have failed: %s'%(', '.join(failed_ids))
else:
print 'All spot requests fulfilled!'
return instance_ids
def tag_instances(conn, instance_ids, tags):
instances = conn.get_only_instances(instance_ids=instance_ids)
for instance in instances:
for key, value in tags.iteritems():
instance.add_tag(key=key, value=value)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('instance', type=str,
help='Instance config name to launch')
parser.add_argument('-r', '--region', type=str, default='us-east-1',
help='EC2 region name')
parser.add_argument('-c', '--config-file', type=str, default='spot_requests.yml',
help='Spot requests config file name')
args = parser.parse_args()
conn = boto.ec2.connect_to_region(args.region)
config_file_name = args.config_file
instance_config_name = args.instance
launch_from_config(conn, instance_config_name, config_file_name)
if __name__ == '__main__':
main()
| create_mapping | identifier_name |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.