file_name
large_stringlengths
4
140
prefix
large_stringlengths
0
39k
suffix
large_stringlengths
0
36.1k
middle
large_stringlengths
0
29.4k
fim_type
large_stringclasses
4 values
slave.js
'use strict'; // not using ES6 import/export syntax, since we need to require() in a handler // what the ES6 syntax does not permit var vm = require('vm'); var errorCatcherInPlace = false; var messageHandler = function messageHandler() { console.error('No thread logic initialized.'); // eslint-disable-line no-console }; function setupErrorCatcher() { if (errorCatcherInPlace) { return; } process.on('uncaughtException', function (error) { process.send({ error: { message: error.message, stack: error.stack } }); }); errorCatcherInPlace = true; } function runAsSandboxedModule(code) { var sandbox = { Buffer: Buffer, console: console, clearInterval: clearInterval, clearTimeout: clearTimeout, module: { exports: null }, require: require, setInterval: setInterval, setTimeout: setTimeout }; vm.runInNewContext(code, sandbox); return sandbox.module.exports; } function
() { for (var _len = arguments.length, args = Array(_len), _key = 0; _key < _len; _key++) { args[_key] = arguments[_key]; } process.send({ response: args }); } messageHandlerDone.transfer = function () { for (var _len2 = arguments.length, args = Array(_len2), _key2 = 0; _key2 < _len2; _key2++) { args[_key2] = arguments[_key2]; } args.pop(); // ignore last parameter, since it's only useful for browser code messageHandlerDone.apply(undefined, args); }; function messageHandlerProgress(progress) { process.send({ progress: progress }); } process.on('message', function (data) { if (data.initByScript) { messageHandler = require(data.script); } if (data.initByMethod) { messageHandler = runAsSandboxedModule('module.exports = ' + data.method); } if (data.doRun) { // it's a good idea to wait until first thread logic run to set this up, // so initialization errors will be printed to console setupErrorCatcher(); messageHandler(data.param, messageHandlerDone, messageHandlerProgress); } }); //# sourceMappingURL=slave.js.map
messageHandlerDone
identifier_name
histogram.py
from __future__ import print_function, division, absolute_import from george import kernels, GP import numpy as np from kglib import fitters from scipy.integrate import quad from scipy.optimize import minimize class HistFitter(fitters.Bayesian_LS):
def __init__(self, mcmc_samples, bin_edges): """ Histogram Inference a la Dan Foreman-Mackey Parameters: =========== - mcmc_samples: numpy array of shape (Nobs, Nsamples) MCMC samples for the thing you want to histogram - bin_edges: numpy.ndarray array The edges of the histogram bins to use. """ self.mcmc_samples = mcmc_samples self.bin_edges = bin_edges self.bin_centers = (self.bin_edges[:-1] + self.bin_edges[1:]) / 2 self.bin_widths = np.diff(self.bin_edges) self.Nbins = self.bin_widths.size self.Nobs = self.mcmc_samples.shape[0] # Find which bin each q falls in self.bin_idx = np.digitize(self.mcmc_samples, self.bin_edges) - 1 # Determine the censoring function for each bin (used in the integral) self.censor_integrals = np.array([quad(func=self.censoring_fcn, a=left, b=right)[0] for (left, right) in zip(self.bin_edges[:-1], self.bin_edges[1:])]) # Set values needed for multinest fitting self.n_params = self.Nbins self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)] def lnlike(self, pars): # Pull theta out of pars theta = pars[:self.Nbins] # Generate the inner summation gamma = np.ones_like(self.bin_idx) * np.nan good = (self.bin_idx < self.Nbins) & (self.bin_idx >= 0) # nans in q get put in nonexistent bins gamma[good] = self.Nobs * self.censoring_fcn(self.mcmc_samples[good]) * theta[self.bin_idx[good]] summation = np.nanmean(gamma, axis=1) # Calculate the integral I = self._integral_fcn(theta) # Generate the log-likelihood ll = -I + np.nansum(np.log(summation)) return ll def lnprior(self, pars): """ Override this if you want to set a better prior on the bin heights. """ if all([p > 0 and p < 10 for p in pars]): return 0 return -np.inf def lnprob(self, pars): lp = self.lnprior(pars) return lp + self.lnlike(pars) if np.isfinite(lp) else -np.inf def _integral_fcn(self, theta): return np.sum(theta * self.censor_integrals) * self.Nobs def censoring_fcn(self, value): """ Censoring function. This should return the completeness of your survey to the given value. """ return 1.0 def guess_fit(self): def errfcn(pars): ll = self.lnprob(pars) return -ll initial_guess = np.ones_like(self.bin_centers) bounds = [[1e-3, None] for p in initial_guess] out = minimize(errfcn, initial_guess, bounds=bounds) return out.x def mnest_prior(self, cube, ndim, nparams): # All bins are in the range (0, 10) for i in range(self.Nbins): cube[i] *= 10 return class CensoredHistFitter(HistFitter): """ Inherits from HistFitter, but actually defines the censoring function """ def censoring_fcn(self, val, alpha=40, beta=0.25): # sigmoid censoring function. Change this for the real deal! return 1.0 / (1.0 + np.exp(-alpha * (val - beta))) class SmoothHistFitter(CensoredHistFitter): """ A subclass of HistogramFitter that puts a gaussian process smoothing prior on the bin heights """ def __init__(self, *args, **kwargs): super(SmoothHistFitter, self).__init__(*args, **kwargs) self.smoothing = self.mcmc_samples.shape[0] / self.Nbins self.n_params = self.Nbins + 4 self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)] self.param_names.extend(('lna', 'lntau', 'lnerr', 'mean')) def lnprior(self, pars): """ Smoothing prior using gaussian process. We will learn the hyperparameters and marginalize over them. """ theta = pars[:self.Nbins] if np.any(theta < 0): return -np.inf a, tau, err = np.exp(pars[self.Nbins:-1]) mean = pars[-1] kernel = a * kernels.ExpSquaredKernel(tau) gp = GP(kernel, mean=mean) gp.compute(self.bin_centers, yerr=err) return gp.lnlikelihood(theta) / self.smoothing def guess_fit(self): """ This doesn't work too great, but the full MCMC fit looks good. """ def errfcn(pars): ll = self.lnprob(pars) return -ll # Set up initial guesses initial_guess = np.ones(self.bin_centers.size + 4) initial_guess[-4] = 0.0 initial_guess[-3] = -0.25 initial_guess[-2] = -1.0 initial_guess[-1] = -1.0 # Set up bounds bounds = [[1e-3, None] for p in self.bin_centers] bounds.append([-10, 20]) bounds.append([-10, 10]) bounds.append((-1, 5)) bounds.append((-10, 10)) # Minimize out = minimize(errfcn, initial_guess, bounds=bounds) return out.x def _lnlike(self, pars): return self.lnprob(pars) def mnest_prior(self, cube, ndim, nparams): for i in range(self.Nbins): cube[i] *= 10 cube[self.Nbins] = cube[self.Nbins] * 30 - 10 cube[self.Nbins + 1] = cube[self.Nbins + 1] * 20 - 10 cube[self.Nbins + 2] = cube[self.Nbins + 2] * 7 - 2 cube[self.Nbins + 3] = cube[self.Nbins + 3] * 20 - 10 return
random_line_split
histogram.py
from __future__ import print_function, division, absolute_import from george import kernels, GP import numpy as np from kglib import fitters from scipy.integrate import quad from scipy.optimize import minimize class HistFitter(fitters.Bayesian_LS): def __init__(self, mcmc_samples, bin_edges): """ Histogram Inference a la Dan Foreman-Mackey Parameters: =========== - mcmc_samples: numpy array of shape (Nobs, Nsamples) MCMC samples for the thing you want to histogram - bin_edges: numpy.ndarray array The edges of the histogram bins to use. """ self.mcmc_samples = mcmc_samples self.bin_edges = bin_edges self.bin_centers = (self.bin_edges[:-1] + self.bin_edges[1:]) / 2 self.bin_widths = np.diff(self.bin_edges) self.Nbins = self.bin_widths.size self.Nobs = self.mcmc_samples.shape[0] # Find which bin each q falls in self.bin_idx = np.digitize(self.mcmc_samples, self.bin_edges) - 1 # Determine the censoring function for each bin (used in the integral) self.censor_integrals = np.array([quad(func=self.censoring_fcn, a=left, b=right)[0] for (left, right) in zip(self.bin_edges[:-1], self.bin_edges[1:])]) # Set values needed for multinest fitting self.n_params = self.Nbins self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)] def lnlike(self, pars): # Pull theta out of pars theta = pars[:self.Nbins] # Generate the inner summation gamma = np.ones_like(self.bin_idx) * np.nan good = (self.bin_idx < self.Nbins) & (self.bin_idx >= 0) # nans in q get put in nonexistent bins gamma[good] = self.Nobs * self.censoring_fcn(self.mcmc_samples[good]) * theta[self.bin_idx[good]] summation = np.nanmean(gamma, axis=1) # Calculate the integral I = self._integral_fcn(theta) # Generate the log-likelihood ll = -I + np.nansum(np.log(summation)) return ll def lnprior(self, pars): """ Override this if you want to set a better prior on the bin heights. """ if all([p > 0 and p < 10 for p in pars]): return 0 return -np.inf def lnprob(self, pars): lp = self.lnprior(pars) return lp + self.lnlike(pars) if np.isfinite(lp) else -np.inf def _integral_fcn(self, theta): return np.sum(theta * self.censor_integrals) * self.Nobs def censoring_fcn(self, value): """ Censoring function. This should return the completeness of your survey to the given value. """ return 1.0 def guess_fit(self): def errfcn(pars): ll = self.lnprob(pars) return -ll initial_guess = np.ones_like(self.bin_centers) bounds = [[1e-3, None] for p in initial_guess] out = minimize(errfcn, initial_guess, bounds=bounds) return out.x def mnest_prior(self, cube, ndim, nparams): # All bins are in the range (0, 10) for i in range(self.Nbins): cube[i] *= 10 return class CensoredHistFitter(HistFitter): """ Inherits from HistFitter, but actually defines the censoring function """ def censoring_fcn(self, val, alpha=40, beta=0.25): # sigmoid censoring function. Change this for the real deal! return 1.0 / (1.0 + np.exp(-alpha * (val - beta))) class SmoothHistFitter(CensoredHistFitter): """ A subclass of HistogramFitter that puts a gaussian process smoothing prior on the bin heights """ def __init__(self, *args, **kwargs): super(SmoothHistFitter, self).__init__(*args, **kwargs) self.smoothing = self.mcmc_samples.shape[0] / self.Nbins self.n_params = self.Nbins + 4 self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)] self.param_names.extend(('lna', 'lntau', 'lnerr', 'mean')) def lnprior(self, pars): """ Smoothing prior using gaussian process. We will learn the hyperparameters and marginalize over them. """ theta = pars[:self.Nbins] if np.any(theta < 0): return -np.inf a, tau, err = np.exp(pars[self.Nbins:-1]) mean = pars[-1] kernel = a * kernels.ExpSquaredKernel(tau) gp = GP(kernel, mean=mean) gp.compute(self.bin_centers, yerr=err) return gp.lnlikelihood(theta) / self.smoothing def guess_fit(self): """ This doesn't work too great, but the full MCMC fit looks good. """ def errfcn(pars): ll = self.lnprob(pars) return -ll # Set up initial guesses initial_guess = np.ones(self.bin_centers.size + 4) initial_guess[-4] = 0.0 initial_guess[-3] = -0.25 initial_guess[-2] = -1.0 initial_guess[-1] = -1.0 # Set up bounds bounds = [[1e-3, None] for p in self.bin_centers] bounds.append([-10, 20]) bounds.append([-10, 10]) bounds.append((-1, 5)) bounds.append((-10, 10)) # Minimize out = minimize(errfcn, initial_guess, bounds=bounds) return out.x def _lnlike(self, pars): return self.lnprob(pars) def mnest_prior(self, cube, ndim, nparams): for i in range(self.Nbins):
cube[self.Nbins] = cube[self.Nbins] * 30 - 10 cube[self.Nbins + 1] = cube[self.Nbins + 1] * 20 - 10 cube[self.Nbins + 2] = cube[self.Nbins + 2] * 7 - 2 cube[self.Nbins + 3] = cube[self.Nbins + 3] * 20 - 10 return
cube[i] *= 10
conditional_block
histogram.py
from __future__ import print_function, division, absolute_import from george import kernels, GP import numpy as np from kglib import fitters from scipy.integrate import quad from scipy.optimize import minimize class HistFitter(fitters.Bayesian_LS): def __init__(self, mcmc_samples, bin_edges): """ Histogram Inference a la Dan Foreman-Mackey Parameters: =========== - mcmc_samples: numpy array of shape (Nobs, Nsamples) MCMC samples for the thing you want to histogram - bin_edges: numpy.ndarray array The edges of the histogram bins to use. """ self.mcmc_samples = mcmc_samples self.bin_edges = bin_edges self.bin_centers = (self.bin_edges[:-1] + self.bin_edges[1:]) / 2 self.bin_widths = np.diff(self.bin_edges) self.Nbins = self.bin_widths.size self.Nobs = self.mcmc_samples.shape[0] # Find which bin each q falls in self.bin_idx = np.digitize(self.mcmc_samples, self.bin_edges) - 1 # Determine the censoring function for each bin (used in the integral) self.censor_integrals = np.array([quad(func=self.censoring_fcn, a=left, b=right)[0] for (left, right) in zip(self.bin_edges[:-1], self.bin_edges[1:])]) # Set values needed for multinest fitting self.n_params = self.Nbins self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)] def
(self, pars): # Pull theta out of pars theta = pars[:self.Nbins] # Generate the inner summation gamma = np.ones_like(self.bin_idx) * np.nan good = (self.bin_idx < self.Nbins) & (self.bin_idx >= 0) # nans in q get put in nonexistent bins gamma[good] = self.Nobs * self.censoring_fcn(self.mcmc_samples[good]) * theta[self.bin_idx[good]] summation = np.nanmean(gamma, axis=1) # Calculate the integral I = self._integral_fcn(theta) # Generate the log-likelihood ll = -I + np.nansum(np.log(summation)) return ll def lnprior(self, pars): """ Override this if you want to set a better prior on the bin heights. """ if all([p > 0 and p < 10 for p in pars]): return 0 return -np.inf def lnprob(self, pars): lp = self.lnprior(pars) return lp + self.lnlike(pars) if np.isfinite(lp) else -np.inf def _integral_fcn(self, theta): return np.sum(theta * self.censor_integrals) * self.Nobs def censoring_fcn(self, value): """ Censoring function. This should return the completeness of your survey to the given value. """ return 1.0 def guess_fit(self): def errfcn(pars): ll = self.lnprob(pars) return -ll initial_guess = np.ones_like(self.bin_centers) bounds = [[1e-3, None] for p in initial_guess] out = minimize(errfcn, initial_guess, bounds=bounds) return out.x def mnest_prior(self, cube, ndim, nparams): # All bins are in the range (0, 10) for i in range(self.Nbins): cube[i] *= 10 return class CensoredHistFitter(HistFitter): """ Inherits from HistFitter, but actually defines the censoring function """ def censoring_fcn(self, val, alpha=40, beta=0.25): # sigmoid censoring function. Change this for the real deal! return 1.0 / (1.0 + np.exp(-alpha * (val - beta))) class SmoothHistFitter(CensoredHistFitter): """ A subclass of HistogramFitter that puts a gaussian process smoothing prior on the bin heights """ def __init__(self, *args, **kwargs): super(SmoothHistFitter, self).__init__(*args, **kwargs) self.smoothing = self.mcmc_samples.shape[0] / self.Nbins self.n_params = self.Nbins + 4 self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)] self.param_names.extend(('lna', 'lntau', 'lnerr', 'mean')) def lnprior(self, pars): """ Smoothing prior using gaussian process. We will learn the hyperparameters and marginalize over them. """ theta = pars[:self.Nbins] if np.any(theta < 0): return -np.inf a, tau, err = np.exp(pars[self.Nbins:-1]) mean = pars[-1] kernel = a * kernels.ExpSquaredKernel(tau) gp = GP(kernel, mean=mean) gp.compute(self.bin_centers, yerr=err) return gp.lnlikelihood(theta) / self.smoothing def guess_fit(self): """ This doesn't work too great, but the full MCMC fit looks good. """ def errfcn(pars): ll = self.lnprob(pars) return -ll # Set up initial guesses initial_guess = np.ones(self.bin_centers.size + 4) initial_guess[-4] = 0.0 initial_guess[-3] = -0.25 initial_guess[-2] = -1.0 initial_guess[-1] = -1.0 # Set up bounds bounds = [[1e-3, None] for p in self.bin_centers] bounds.append([-10, 20]) bounds.append([-10, 10]) bounds.append((-1, 5)) bounds.append((-10, 10)) # Minimize out = minimize(errfcn, initial_guess, bounds=bounds) return out.x def _lnlike(self, pars): return self.lnprob(pars) def mnest_prior(self, cube, ndim, nparams): for i in range(self.Nbins): cube[i] *= 10 cube[self.Nbins] = cube[self.Nbins] * 30 - 10 cube[self.Nbins + 1] = cube[self.Nbins + 1] * 20 - 10 cube[self.Nbins + 2] = cube[self.Nbins + 2] * 7 - 2 cube[self.Nbins + 3] = cube[self.Nbins + 3] * 20 - 10 return
lnlike
identifier_name
histogram.py
from __future__ import print_function, division, absolute_import from george import kernels, GP import numpy as np from kglib import fitters from scipy.integrate import quad from scipy.optimize import minimize class HistFitter(fitters.Bayesian_LS): def __init__(self, mcmc_samples, bin_edges):
def lnlike(self, pars): # Pull theta out of pars theta = pars[:self.Nbins] # Generate the inner summation gamma = np.ones_like(self.bin_idx) * np.nan good = (self.bin_idx < self.Nbins) & (self.bin_idx >= 0) # nans in q get put in nonexistent bins gamma[good] = self.Nobs * self.censoring_fcn(self.mcmc_samples[good]) * theta[self.bin_idx[good]] summation = np.nanmean(gamma, axis=1) # Calculate the integral I = self._integral_fcn(theta) # Generate the log-likelihood ll = -I + np.nansum(np.log(summation)) return ll def lnprior(self, pars): """ Override this if you want to set a better prior on the bin heights. """ if all([p > 0 and p < 10 for p in pars]): return 0 return -np.inf def lnprob(self, pars): lp = self.lnprior(pars) return lp + self.lnlike(pars) if np.isfinite(lp) else -np.inf def _integral_fcn(self, theta): return np.sum(theta * self.censor_integrals) * self.Nobs def censoring_fcn(self, value): """ Censoring function. This should return the completeness of your survey to the given value. """ return 1.0 def guess_fit(self): def errfcn(pars): ll = self.lnprob(pars) return -ll initial_guess = np.ones_like(self.bin_centers) bounds = [[1e-3, None] for p in initial_guess] out = minimize(errfcn, initial_guess, bounds=bounds) return out.x def mnest_prior(self, cube, ndim, nparams): # All bins are in the range (0, 10) for i in range(self.Nbins): cube[i] *= 10 return class CensoredHistFitter(HistFitter): """ Inherits from HistFitter, but actually defines the censoring function """ def censoring_fcn(self, val, alpha=40, beta=0.25): # sigmoid censoring function. Change this for the real deal! return 1.0 / (1.0 + np.exp(-alpha * (val - beta))) class SmoothHistFitter(CensoredHistFitter): """ A subclass of HistogramFitter that puts a gaussian process smoothing prior on the bin heights """ def __init__(self, *args, **kwargs): super(SmoothHistFitter, self).__init__(*args, **kwargs) self.smoothing = self.mcmc_samples.shape[0] / self.Nbins self.n_params = self.Nbins + 4 self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)] self.param_names.extend(('lna', 'lntau', 'lnerr', 'mean')) def lnprior(self, pars): """ Smoothing prior using gaussian process. We will learn the hyperparameters and marginalize over them. """ theta = pars[:self.Nbins] if np.any(theta < 0): return -np.inf a, tau, err = np.exp(pars[self.Nbins:-1]) mean = pars[-1] kernel = a * kernels.ExpSquaredKernel(tau) gp = GP(kernel, mean=mean) gp.compute(self.bin_centers, yerr=err) return gp.lnlikelihood(theta) / self.smoothing def guess_fit(self): """ This doesn't work too great, but the full MCMC fit looks good. """ def errfcn(pars): ll = self.lnprob(pars) return -ll # Set up initial guesses initial_guess = np.ones(self.bin_centers.size + 4) initial_guess[-4] = 0.0 initial_guess[-3] = -0.25 initial_guess[-2] = -1.0 initial_guess[-1] = -1.0 # Set up bounds bounds = [[1e-3, None] for p in self.bin_centers] bounds.append([-10, 20]) bounds.append([-10, 10]) bounds.append((-1, 5)) bounds.append((-10, 10)) # Minimize out = minimize(errfcn, initial_guess, bounds=bounds) return out.x def _lnlike(self, pars): return self.lnprob(pars) def mnest_prior(self, cube, ndim, nparams): for i in range(self.Nbins): cube[i] *= 10 cube[self.Nbins] = cube[self.Nbins] * 30 - 10 cube[self.Nbins + 1] = cube[self.Nbins + 1] * 20 - 10 cube[self.Nbins + 2] = cube[self.Nbins + 2] * 7 - 2 cube[self.Nbins + 3] = cube[self.Nbins + 3] * 20 - 10 return
""" Histogram Inference a la Dan Foreman-Mackey Parameters: =========== - mcmc_samples: numpy array of shape (Nobs, Nsamples) MCMC samples for the thing you want to histogram - bin_edges: numpy.ndarray array The edges of the histogram bins to use. """ self.mcmc_samples = mcmc_samples self.bin_edges = bin_edges self.bin_centers = (self.bin_edges[:-1] + self.bin_edges[1:]) / 2 self.bin_widths = np.diff(self.bin_edges) self.Nbins = self.bin_widths.size self.Nobs = self.mcmc_samples.shape[0] # Find which bin each q falls in self.bin_idx = np.digitize(self.mcmc_samples, self.bin_edges) - 1 # Determine the censoring function for each bin (used in the integral) self.censor_integrals = np.array([quad(func=self.censoring_fcn, a=left, b=right)[0] for (left, right) in zip(self.bin_edges[:-1], self.bin_edges[1:])]) # Set values needed for multinest fitting self.n_params = self.Nbins self.param_names = [r'$\theta_{}$'.format(i) for i in range(self.Nbins)]
identifier_body
comments.js
import React from 'react' import Icon from 'react-icon-base' const FaComments = props => ( <Icon viewBox="0 0 40 40" {...props}> <g><path d="m31.4 17.1q0 3.1-2.1 5.8t-5.7 4.1-7.9 1.6q-1.9 0-3.9-0.4-2.8 2-6.2 2.9-0.8 0.2-1.9 0.3h-0.1q-0.3 0-0.5-0.2t-0.2-0.4q0-0.1 0-0.2t0-0.1 0-0.1l0.1-0.2 0-0.1 0.1-0.1 0.1-0.1 0.1-0.1q0.1-0.1 0.5-0.6t0.6-0.6 0.5-0.7 0.6-0.8 0.4-1q-2.7-1.6-4.3-4t-1.6-5q0-3.1 2.1-5.7t5.7-4.2 7.9-1.5 7.9 1.5 5.7 4.2 2.1 5.7z m8.6 5.8q0 2.6-1.6 5t-4.3 3.9q0.2 0.5 0.4 1t0.6 0.8 0.5 0.7 0.6 0.7 0.5 0.5q0 0 0.1 0.1t0.1 0.1 0.1 0.1 0 0.2l0.1 0.1 0 0.1 0 0.1 0 0.2q0 0.3-0.3 0.5t-0.5 0.1q-1.1-0.1-1.9-0.3-3.4-0.9-6.2-2.9-2 0.4-3.9 0.4-6.1 0-10.6-3 1.3 0.1 2 0.1 3.6 0 6.9-1t5.9-2.9q2.8-2 4.3-4.7t1.5-5.7q0-1.7-0.5-3.4 2.9 1.6 4.5 4t1.7 5.2z"/></g>
export default FaComments
</Icon> )
random_line_split
__init__.py
# -*- coding: utf-8 -*- # ERPNext - web based ERP (http://erpnext.com) # Copyright (C) 2012 Web Notes Technologies Pvt Ltd # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. #
# but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. # default settings that can be made for a user. from __future__ import unicode_literals import frappe # product_name = "ERPNext" product_name = "letzERP" user_defaults = { "Company": "company", "Territory": "territory" }
# This program is distributed in the hope that it will be useful,
random_line_split
estimated-input-latency.js
/** * @license Copyright 2016 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ 'use strict'; const Audit = require('../audit'); const i18n = require('../../lib/i18n'); const UIStrings = { /** The name of the metric that marks the estimated time between the page receiving input (a user clicking, tapping, or typing) and the page responding. Shown to users as the label for the numeric metric value. Ideally fits within a ~40 character limit. */ title: 'Estimated Input Latency', /** Description of the Estimated Input Latency metric that estimates the amount of time, in milliseconds, that the app takes to respond to user input. This description is displayed within a tooltip when the user hovers on the metric name to see more. No character length limits. 'Learn More' becomes link text to additional documentation. */ description: 'The score above is an estimate of how long your app takes to respond to user ' + 'input, in milliseconds, during the busiest 5s window of page load. If your ' + 'latency is higher than 50 ms, users may perceive your app as laggy. ' + '[Learn more](https://developers.google.com/web/tools/lighthouse/audits/estimated-input-latency).', }; const str_ = i18n.createMessageInstanceIdFn(__filename, UIStrings); class
extends Audit { /** * @return {LH.Audit.Meta} */ static get meta() { return { id: 'estimated-input-latency', title: str_(UIStrings.title), description: str_(UIStrings.description), scoreDisplayMode: Audit.SCORING_MODES.NUMERIC, requiredArtifacts: ['traces'], }; } /** * @return {LH.Audit.ScoreOptions} */ static get defaultOptions() { return { // see https://www.desmos.com/calculator/srv0hqhf7d scorePODR: 50, scoreMedian: 100, }; } /** * Audits the page to estimate input latency. * @see https://github.com/GoogleChrome/lighthouse/issues/28 * * @param {LH.Artifacts} artifacts * @param {LH.Audit.Context} context * @return {Promise<LH.Audit.Product>} */ static async audit(artifacts, context) { const trace = artifacts.traces[Audit.DEFAULT_PASS]; const devtoolsLog = artifacts.devtoolsLogs[Audit.DEFAULT_PASS]; const metricComputationData = {trace, devtoolsLog, settings: context.settings}; const metricResult = await artifacts.requestEstimatedInputLatency(metricComputationData); return { score: Audit.computeLogNormalScore( metricResult.timing, context.options.scorePODR, context.options.scoreMedian ), rawValue: metricResult.timing, displayValue: str_(i18n.UIStrings.ms, {timeInMs: metricResult.timing}), }; } } module.exports = EstimatedInputLatency; module.exports.UIStrings = UIStrings;
EstimatedInputLatency
identifier_name
estimated-input-latency.js
/** * @license Copyright 2016 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ 'use strict'; const Audit = require('../audit'); const i18n = require('../../lib/i18n'); const UIStrings = { /** The name of the metric that marks the estimated time between the page receiving input (a user clicking, tapping, or typing) and the page responding. Shown to users as the label for the numeric metric value. Ideally fits within a ~40 character limit. */ title: 'Estimated Input Latency', /** Description of the Estimated Input Latency metric that estimates the amount of time, in milliseconds, that the app takes to respond to user input. This description is displayed within a tooltip when the user hovers on the metric name to see more. No character length limits. 'Learn More' becomes link text to additional documentation. */ description: 'The score above is an estimate of how long your app takes to respond to user ' + 'input, in milliseconds, during the busiest 5s window of page load. If your ' + 'latency is higher than 50 ms, users may perceive your app as laggy. ' + '[Learn more](https://developers.google.com/web/tools/lighthouse/audits/estimated-input-latency).', }; const str_ = i18n.createMessageInstanceIdFn(__filename, UIStrings); class EstimatedInputLatency extends Audit { /** * @return {LH.Audit.Meta} */ static get meta() { return { id: 'estimated-input-latency', title: str_(UIStrings.title), description: str_(UIStrings.description), scoreDisplayMode: Audit.SCORING_MODES.NUMERIC, requiredArtifacts: ['traces'], }; } /** * @return {LH.Audit.ScoreOptions} */ static get defaultOptions() { return { // see https://www.desmos.com/calculator/srv0hqhf7d scorePODR: 50, scoreMedian: 100, }; } /** * Audits the page to estimate input latency. * @see https://github.com/GoogleChrome/lighthouse/issues/28 * * @param {LH.Artifacts} artifacts * @param {LH.Audit.Context} context * @return {Promise<LH.Audit.Product>} */ static async audit(artifacts, context) { const trace = artifacts.traces[Audit.DEFAULT_PASS]; const devtoolsLog = artifacts.devtoolsLogs[Audit.DEFAULT_PASS]; const metricComputationData = {trace, devtoolsLog, settings: context.settings}; const metricResult = await artifacts.requestEstimatedInputLatency(metricComputationData); return { score: Audit.computeLogNormalScore( metricResult.timing, context.options.scorePODR, context.options.scoreMedian ), rawValue: metricResult.timing,
}; } } module.exports = EstimatedInputLatency; module.exports.UIStrings = UIStrings;
displayValue: str_(i18n.UIStrings.ms, {timeInMs: metricResult.timing}),
random_line_split
estimated-input-latency.js
/** * @license Copyright 2016 Google Inc. All Rights Reserved. * Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 * Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ 'use strict'; const Audit = require('../audit'); const i18n = require('../../lib/i18n'); const UIStrings = { /** The name of the metric that marks the estimated time between the page receiving input (a user clicking, tapping, or typing) and the page responding. Shown to users as the label for the numeric metric value. Ideally fits within a ~40 character limit. */ title: 'Estimated Input Latency', /** Description of the Estimated Input Latency metric that estimates the amount of time, in milliseconds, that the app takes to respond to user input. This description is displayed within a tooltip when the user hovers on the metric name to see more. No character length limits. 'Learn More' becomes link text to additional documentation. */ description: 'The score above is an estimate of how long your app takes to respond to user ' + 'input, in milliseconds, during the busiest 5s window of page load. If your ' + 'latency is higher than 50 ms, users may perceive your app as laggy. ' + '[Learn more](https://developers.google.com/web/tools/lighthouse/audits/estimated-input-latency).', }; const str_ = i18n.createMessageInstanceIdFn(__filename, UIStrings); class EstimatedInputLatency extends Audit { /** * @return {LH.Audit.Meta} */ static get meta()
/** * @return {LH.Audit.ScoreOptions} */ static get defaultOptions() { return { // see https://www.desmos.com/calculator/srv0hqhf7d scorePODR: 50, scoreMedian: 100, }; } /** * Audits the page to estimate input latency. * @see https://github.com/GoogleChrome/lighthouse/issues/28 * * @param {LH.Artifacts} artifacts * @param {LH.Audit.Context} context * @return {Promise<LH.Audit.Product>} */ static async audit(artifacts, context) { const trace = artifacts.traces[Audit.DEFAULT_PASS]; const devtoolsLog = artifacts.devtoolsLogs[Audit.DEFAULT_PASS]; const metricComputationData = {trace, devtoolsLog, settings: context.settings}; const metricResult = await artifacts.requestEstimatedInputLatency(metricComputationData); return { score: Audit.computeLogNormalScore( metricResult.timing, context.options.scorePODR, context.options.scoreMedian ), rawValue: metricResult.timing, displayValue: str_(i18n.UIStrings.ms, {timeInMs: metricResult.timing}), }; } } module.exports = EstimatedInputLatency; module.exports.UIStrings = UIStrings;
{ return { id: 'estimated-input-latency', title: str_(UIStrings.title), description: str_(UIStrings.description), scoreDisplayMode: Audit.SCORING_MODES.NUMERIC, requiredArtifacts: ['traces'], }; }
identifier_body
i16_add_with_overflow.rs
#![feature(core, core_intrinsics)] extern crate core; #[cfg(test)] mod tests { use core::intrinsics::i16_add_with_overflow; // pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool); #[test] fn i16_add_with_overflow_test1()
#[test] #[allow(overflowing_literals)] fn i16_add_with_overflow_test2() { let x: i16 = 0x7fff; // 32767 let y: i16 = 0x0001; // 1 let (result, is_overflow): (i16, bool) = unsafe { i16_add_with_overflow(x, y) }; assert_eq!(result, 0x8000); // -32768 assert_eq!(is_overflow, true); } #[test] #[allow(overflowing_literals)] fn i16_add_with_overflow_test3() { let x: i16 = 0x8000; // -32768 let y: i16 = 0xffff; // -1 let (result, is_overflow): (i16, bool) = unsafe { i16_add_with_overflow(x, y) }; assert_eq!(result, 0x7fff); // 32767 assert_eq!(is_overflow, true); } }
{ let x: i16 = 0x7f00; // 32512 let y: i16 = 0x00ff; // 255 let (result, is_overflow): (i16, bool) = unsafe { i16_add_with_overflow(x, y) }; assert_eq!(result, 0x7fff); // 32767 assert_eq!(is_overflow, false); }
identifier_body
i16_add_with_overflow.rs
#![feature(core, core_intrinsics)] extern crate core; #[cfg(test)] mod tests { use core::intrinsics::i16_add_with_overflow; // pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool); #[test] fn i16_add_with_overflow_test1() { let x: i16 = 0x7f00; // 32512 let y: i16 = 0x00ff; // 255 let (result, is_overflow): (i16, bool) = unsafe { i16_add_with_overflow(x, y) }; assert_eq!(result, 0x7fff); // 32767 assert_eq!(is_overflow, false); } #[test] #[allow(overflowing_literals)] fn
() { let x: i16 = 0x7fff; // 32767 let y: i16 = 0x0001; // 1 let (result, is_overflow): (i16, bool) = unsafe { i16_add_with_overflow(x, y) }; assert_eq!(result, 0x8000); // -32768 assert_eq!(is_overflow, true); } #[test] #[allow(overflowing_literals)] fn i16_add_with_overflow_test3() { let x: i16 = 0x8000; // -32768 let y: i16 = 0xffff; // -1 let (result, is_overflow): (i16, bool) = unsafe { i16_add_with_overflow(x, y) }; assert_eq!(result, 0x7fff); // 32767 assert_eq!(is_overflow, true); } }
i16_add_with_overflow_test2
identifier_name
i16_add_with_overflow.rs
#![feature(core, core_intrinsics)] extern crate core;
#[cfg(test)] mod tests { use core::intrinsics::i16_add_with_overflow; // pub fn i16_add_with_overflow(x: i16, y: i16) -> (i16, bool); #[test] fn i16_add_with_overflow_test1() { let x: i16 = 0x7f00; // 32512 let y: i16 = 0x00ff; // 255 let (result, is_overflow): (i16, bool) = unsafe { i16_add_with_overflow(x, y) }; assert_eq!(result, 0x7fff); // 32767 assert_eq!(is_overflow, false); } #[test] #[allow(overflowing_literals)] fn i16_add_with_overflow_test2() { let x: i16 = 0x7fff; // 32767 let y: i16 = 0x0001; // 1 let (result, is_overflow): (i16, bool) = unsafe { i16_add_with_overflow(x, y) }; assert_eq!(result, 0x8000); // -32768 assert_eq!(is_overflow, true); } #[test] #[allow(overflowing_literals)] fn i16_add_with_overflow_test3() { let x: i16 = 0x8000; // -32768 let y: i16 = 0xffff; // -1 let (result, is_overflow): (i16, bool) = unsafe { i16_add_with_overflow(x, y) }; assert_eq!(result, 0x7fff); // 32767 assert_eq!(is_overflow, true); } }
random_line_split
HelloWorldWebPart.ts
import { Version } from '@microsoft/sp-core-library'; import { BaseClientSideWebPart, IPropertyPaneConfiguration, PropertyPaneTextField, PropertyPaneCheckbox, PropertyPaneDropdown, PropertyPaneToggle } from '@microsoft/sp-webpart-base'; import { escape } from '@microsoft/sp-lodash-subset'; import styles from './HelloWorld.module.scss'; import * as strings from 'helloWorldStrings'; import { IHelloWorldWebPartProps } from './IHelloWorldWebPartProps'; import MockHttpClient from './MockHttpClient'; import { SPHttpClient } from '@microsoft/sp-http'; import { Environment, EnvironmentType } from '@microsoft/sp-core-library'; export interface ISPLists { value: ISPList[]; } export interface ISPList { Title: string; Id: string; } export default class HelloWorldWebPart extends BaseClientSideWebPart<IHelloWorldWebPartProps> { public render(): void { this.domElement.innerHTML = ` <div class="${styles.helloWorld}"> <div class="${styles.container}"> <div class="ms-Grid-row ms-bgColor-themeDark ms-fontColor-white ${styles.row}"> <div class="ms-Grid-col ms-u-lg10 ms-u-xl8 ms-u-xlPush2 ms-u-lgPush1"> <span class="ms-font-xl ms-fontColor-white">Welcome to SharePoint!</span> <p class="ms-font-l ms-fontColor-white">Customize SharePoint experiences using Web Parts.</p> <p class="ms-font-l ms-fontColor-white">${escape(this.properties.description)}</p> <p class="ms-font-l ms-fontColor-white">Loading from ${escape(this.context.pageContext.web.title)}</p> <a href="https://aka.ms/spfx" class="${styles.button}"> <span class="${styles.label}">Learn more</span> </a> </div> </div> </div> <div id="spListContainer" /> </div>`; this._renderListAsync(); } private _getMockListData(): Promise<ISPLists> { return MockHttpClient.get() .then((data: ISPList[]) => { var listData: ISPLists = { value: data }; return listData; }) as Promise<ISPLists>; } private _getListData(): Promise<ISPLists> { return this.context.spHttpClient.get(this.context.pageContext.web.absoluteUrl + `/_api/web/lists?$filter=Hidden eq false`, SPHttpClient.configurations.v1) .then((response: Response) => { return response.json(); }); } private _renderListAsync(): void { // Local environment if (Environment.type === EnvironmentType.Local)
else if (Environment.type == EnvironmentType.SharePoint || Environment.type == EnvironmentType.ClassicSharePoint) { this._getListData() .then((response) => { this._renderList(response.value); }); } } private _renderList(items: ISPList[]): void { let html: string = ''; items.forEach((item: ISPList) => { html += ` <ul class="${styles.list}"> <li class="${styles.listItem}"> <span class="ms-font-l">${item.Title}</span> </li> </ul>`; }); const listContainer: Element = this.domElement.querySelector('#spListContainer'); listContainer.innerHTML = html; } protected get dataVersion(): Version { return Version.parse('1.0'); } protected getPropertyPaneConfiguration(): IPropertyPaneConfiguration { return { pages: [ { header: { description: strings.PropertyPaneDescription }, groups: [ { groupName: strings.BasicGroupName, groupFields: [ PropertyPaneTextField('description', { label: 'Description' }), PropertyPaneTextField('test', { label: 'Multi-line Text Field', multiline: true }), PropertyPaneCheckbox('test1', { text: 'Checkbox' }), PropertyPaneDropdown('test2', { label: 'Dropdown', options: [ { key: '1', text: 'One' }, { key: '2', text: 'Two' }, { key: '3', text: 'Three' }, { key: '4', text: 'Four' } ] }), PropertyPaneToggle('test3', { label: 'Toggle', onText: 'On', offText: 'Off' }) ] } ] } ] }; } }
{ this._getMockListData().then((response) => { this._renderList(response.value); }); }
conditional_block
HelloWorldWebPart.ts
import { Version } from '@microsoft/sp-core-library'; import { BaseClientSideWebPart, IPropertyPaneConfiguration, PropertyPaneTextField, PropertyPaneCheckbox, PropertyPaneDropdown, PropertyPaneToggle } from '@microsoft/sp-webpart-base'; import { escape } from '@microsoft/sp-lodash-subset'; import styles from './HelloWorld.module.scss'; import * as strings from 'helloWorldStrings'; import { IHelloWorldWebPartProps } from './IHelloWorldWebPartProps'; import MockHttpClient from './MockHttpClient'; import { SPHttpClient } from '@microsoft/sp-http'; import { Environment, EnvironmentType } from '@microsoft/sp-core-library'; export interface ISPLists { value: ISPList[]; } export interface ISPList { Title: string; Id: string; } export default class HelloWorldWebPart extends BaseClientSideWebPart<IHelloWorldWebPartProps> { public render(): void { this.domElement.innerHTML = ` <div class="${styles.helloWorld}"> <div class="${styles.container}"> <div class="ms-Grid-row ms-bgColor-themeDark ms-fontColor-white ${styles.row}"> <div class="ms-Grid-col ms-u-lg10 ms-u-xl8 ms-u-xlPush2 ms-u-lgPush1"> <span class="ms-font-xl ms-fontColor-white">Welcome to SharePoint!</span> <p class="ms-font-l ms-fontColor-white">Customize SharePoint experiences using Web Parts.</p> <p class="ms-font-l ms-fontColor-white">${escape(this.properties.description)}</p> <p class="ms-font-l ms-fontColor-white">Loading from ${escape(this.context.pageContext.web.title)}</p> <a href="https://aka.ms/spfx" class="${styles.button}"> <span class="${styles.label}">Learn more</span> </a> </div> </div> </div> <div id="spListContainer" /> </div>`; this._renderListAsync(); } private _getMockListData(): Promise<ISPLists> { return MockHttpClient.get() .then((data: ISPList[]) => { var listData: ISPLists = { value: data }; return listData; }) as Promise<ISPLists>; } private _getListData(): Promise<ISPLists> { return this.context.spHttpClient.get(this.context.pageContext.web.absoluteUrl + `/_api/web/lists?$filter=Hidden eq false`, SPHttpClient.configurations.v1) .then((response: Response) => { return response.json();
private _renderListAsync(): void { // Local environment if (Environment.type === EnvironmentType.Local) { this._getMockListData().then((response) => { this._renderList(response.value); }); } else if (Environment.type == EnvironmentType.SharePoint || Environment.type == EnvironmentType.ClassicSharePoint) { this._getListData() .then((response) => { this._renderList(response.value); }); } } private _renderList(items: ISPList[]): void { let html: string = ''; items.forEach((item: ISPList) => { html += ` <ul class="${styles.list}"> <li class="${styles.listItem}"> <span class="ms-font-l">${item.Title}</span> </li> </ul>`; }); const listContainer: Element = this.domElement.querySelector('#spListContainer'); listContainer.innerHTML = html; } protected get dataVersion(): Version { return Version.parse('1.0'); } protected getPropertyPaneConfiguration(): IPropertyPaneConfiguration { return { pages: [ { header: { description: strings.PropertyPaneDescription }, groups: [ { groupName: strings.BasicGroupName, groupFields: [ PropertyPaneTextField('description', { label: 'Description' }), PropertyPaneTextField('test', { label: 'Multi-line Text Field', multiline: true }), PropertyPaneCheckbox('test1', { text: 'Checkbox' }), PropertyPaneDropdown('test2', { label: 'Dropdown', options: [ { key: '1', text: 'One' }, { key: '2', text: 'Two' }, { key: '3', text: 'Three' }, { key: '4', text: 'Four' } ] }), PropertyPaneToggle('test3', { label: 'Toggle', onText: 'On', offText: 'Off' }) ] } ] } ] }; } }
}); }
random_line_split
HelloWorldWebPart.ts
import { Version } from '@microsoft/sp-core-library'; import { BaseClientSideWebPart, IPropertyPaneConfiguration, PropertyPaneTextField, PropertyPaneCheckbox, PropertyPaneDropdown, PropertyPaneToggle } from '@microsoft/sp-webpart-base'; import { escape } from '@microsoft/sp-lodash-subset'; import styles from './HelloWorld.module.scss'; import * as strings from 'helloWorldStrings'; import { IHelloWorldWebPartProps } from './IHelloWorldWebPartProps'; import MockHttpClient from './MockHttpClient'; import { SPHttpClient } from '@microsoft/sp-http'; import { Environment, EnvironmentType } from '@microsoft/sp-core-library'; export interface ISPLists { value: ISPList[]; } export interface ISPList { Title: string; Id: string; } export default class HelloWorldWebPart extends BaseClientSideWebPart<IHelloWorldWebPartProps> { public render(): void { this.domElement.innerHTML = ` <div class="${styles.helloWorld}"> <div class="${styles.container}"> <div class="ms-Grid-row ms-bgColor-themeDark ms-fontColor-white ${styles.row}"> <div class="ms-Grid-col ms-u-lg10 ms-u-xl8 ms-u-xlPush2 ms-u-lgPush1"> <span class="ms-font-xl ms-fontColor-white">Welcome to SharePoint!</span> <p class="ms-font-l ms-fontColor-white">Customize SharePoint experiences using Web Parts.</p> <p class="ms-font-l ms-fontColor-white">${escape(this.properties.description)}</p> <p class="ms-font-l ms-fontColor-white">Loading from ${escape(this.context.pageContext.web.title)}</p> <a href="https://aka.ms/spfx" class="${styles.button}"> <span class="${styles.label}">Learn more</span> </a> </div> </div> </div> <div id="spListContainer" /> </div>`; this._renderListAsync(); } private _getMockListData(): Promise<ISPLists> { return MockHttpClient.get() .then((data: ISPList[]) => { var listData: ISPLists = { value: data }; return listData; }) as Promise<ISPLists>; } private _getListData(): Promise<ISPLists> { return this.context.spHttpClient.get(this.context.pageContext.web.absoluteUrl + `/_api/web/lists?$filter=Hidden eq false`, SPHttpClient.configurations.v1) .then((response: Response) => { return response.json(); }); } private
(): void { // Local environment if (Environment.type === EnvironmentType.Local) { this._getMockListData().then((response) => { this._renderList(response.value); }); } else if (Environment.type == EnvironmentType.SharePoint || Environment.type == EnvironmentType.ClassicSharePoint) { this._getListData() .then((response) => { this._renderList(response.value); }); } } private _renderList(items: ISPList[]): void { let html: string = ''; items.forEach((item: ISPList) => { html += ` <ul class="${styles.list}"> <li class="${styles.listItem}"> <span class="ms-font-l">${item.Title}</span> </li> </ul>`; }); const listContainer: Element = this.domElement.querySelector('#spListContainer'); listContainer.innerHTML = html; } protected get dataVersion(): Version { return Version.parse('1.0'); } protected getPropertyPaneConfiguration(): IPropertyPaneConfiguration { return { pages: [ { header: { description: strings.PropertyPaneDescription }, groups: [ { groupName: strings.BasicGroupName, groupFields: [ PropertyPaneTextField('description', { label: 'Description' }), PropertyPaneTextField('test', { label: 'Multi-line Text Field', multiline: true }), PropertyPaneCheckbox('test1', { text: 'Checkbox' }), PropertyPaneDropdown('test2', { label: 'Dropdown', options: [ { key: '1', text: 'One' }, { key: '2', text: 'Two' }, { key: '3', text: 'Three' }, { key: '4', text: 'Four' } ] }), PropertyPaneToggle('test3', { label: 'Toggle', onText: 'On', offText: 'Off' }) ] } ] } ] }; } }
_renderListAsync
identifier_name
lib.rs
//! Letter count: library. //! //! Functions to count graphemes in a string and print a summary. use unicode_segmentation::UnicodeSegmentation; use std::collections::HashMap; /// Prints a summary of the contents of a grapheme counter. pub fn
<S: ::std::hash::BuildHasher>(counter: &HashMap<String, u64, S>) { for (key, val) in counter.iter() { println!("{}: {}", key, val); } } /// Counts all the graphemes in a string. pub fn count_graphemes_in_string<S: ::std::hash::BuildHasher>( to_parse: &str, counter: &mut HashMap<String, u64, S>, ) { // Loop through each character in the current string... for grapheme in UnicodeSegmentation::graphemes(to_parse, true) { // If the character we are looking at already exists in the counter // hash, get its value. Otherwise, start a new counter at zero. let count = counter.entry(grapheme.to_string()).or_insert(0); // In either case, increment the counter. *count += 1; } }
print_summary
identifier_name
lib.rs
//! Letter count: library. //! //! Functions to count graphemes in a string and print a summary. use unicode_segmentation::UnicodeSegmentation; use std::collections::HashMap;
pub fn print_summary<S: ::std::hash::BuildHasher>(counter: &HashMap<String, u64, S>) { for (key, val) in counter.iter() { println!("{}: {}", key, val); } } /// Counts all the graphemes in a string. pub fn count_graphemes_in_string<S: ::std::hash::BuildHasher>( to_parse: &str, counter: &mut HashMap<String, u64, S>, ) { // Loop through each character in the current string... for grapheme in UnicodeSegmentation::graphemes(to_parse, true) { // If the character we are looking at already exists in the counter // hash, get its value. Otherwise, start a new counter at zero. let count = counter.entry(grapheme.to_string()).or_insert(0); // In either case, increment the counter. *count += 1; } }
/// Prints a summary of the contents of a grapheme counter.
random_line_split
lib.rs
//! Letter count: library. //! //! Functions to count graphemes in a string and print a summary. use unicode_segmentation::UnicodeSegmentation; use std::collections::HashMap; /// Prints a summary of the contents of a grapheme counter. pub fn print_summary<S: ::std::hash::BuildHasher>(counter: &HashMap<String, u64, S>) { for (key, val) in counter.iter() { println!("{}: {}", key, val); } } /// Counts all the graphemes in a string. pub fn count_graphemes_in_string<S: ::std::hash::BuildHasher>( to_parse: &str, counter: &mut HashMap<String, u64, S>, )
{ // Loop through each character in the current string... for grapheme in UnicodeSegmentation::graphemes(to_parse, true) { // If the character we are looking at already exists in the counter // hash, get its value. Otherwise, start a new counter at zero. let count = counter.entry(grapheme.to_string()).or_insert(0); // In either case, increment the counter. *count += 1; } }
identifier_body
Combinatorics.py
# Parallel Biclustering Algorithm - Fast Algorithm for finding all biclusters in a GEM # Copyright (C) 2006 Luke Imhoff # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # Contact Info: # Luke Imhoff (imho0030@umn.edu) # 220 Delaware St. SE # Minneapolis, MN 55455 """Combinatoric utilities not included in scipy or faster implementation @author Luke Imhoff @license GPLv2 """ from scipy import zeros from scipy.misc import factorial import operator def nChooseK(n, k): """n choose k Returns number of combinations of k elements in n elements @param n total number of elements in set @param k number of elements in subset to choose @return C(n, k) """ numerator = 1 for i in xrange(max(n - k, k) + 1, n + 1): numerator *= i return numerator / factorial(min(n - k, k), exact = 1) class xcombinations(object): """Returns all combinations of subsetSize number from [0, setSize) Supports len() @param setSize number of elements in set to select from @param subsetSize number of elements chosen from set @return generator of combinations. Combination is stored in a 1D array of subsetSize length. None if subsetSize is 0. """ def __init__(self, setSize, subsetSize): if setSize < subsetSize: raise ValueError("setSize (%d) must be >= subsetSize (%d)" % (setSize, subsetSize)) self.setSize = setSize self.subsetSize = subsetSize def len(self): return nChooseK(self.setSize, self.subsetSize) def __iter__(self): a = zeros(self.subsetSize) # using a closure faster than passing setSize def recursiveXCombinations(setStart, recursiveSubsetSize): if recursiveSubsetSize == 0: yield None else: index = self.subsetSize - recursiveSubsetSize for i in xrange(setStart, self.setSize - recursiveSubsetSize + 1):
return recursiveXCombinations(0, self.subsetSize) def permutations(setSize, subsetSize): """Return 'setSize permute subsetSize' or nPk @param setSize number of elements in set to select from @param subsetSize number for elements chosen from set @return number of permutations of subsetSize of setSize """ count = 1 for i in xrange(setSize - subsetSize + 1, setSize + 1): count *= i return count class xpermutations(object): """Returns all permutations of subsetSize number from [0, setSize) Supports len() @param setSize number of elements in set to select from @param subsetSize number for elements chosen from set @return generator of permutations. Permutations stored in a 1D array of subsetSize length. None if subsetsize is 0 """ def __init__(self, setSize, subsetSize): if setSize < subsetSize: raise ValueError("setSize (%d) must be >= subsetSize (%d)" % (setSize, subsetSize)) self.setSize = setSize self.subsetSize = subsetSize def len(self): return permutations(self.setSize, self.subsetSize) def __iter__(self): a = zeros(self.subsetSize) items = range(self.setSize) # using a closure faster than passing setSize def recursiveXPermutations(recursiveItems, recursiveSubsetSize): if recursiveSubsetSize == 0: yield None else: index = self.subsetSize -recursiveSubsetSize for i in xrange(len(recursiveItems)): a[index] = recursiveItems[i] for cc in recursiveXPermutations(recursiveItems[:i] + recursiveItems[i + 1:], recursiveSubsetSize - 1): yield a return recursiveXPermutations(items, self.subsetSize) def selections(setSizes): length = 1 for setSize in setSizes: length *= setSize return length class xselections(object): """Returns all selections of one item from each set of setSizes Supports len() @param collection of set sizes @return generator of selections. Selection is stored in a 1D array with each entry corresponding to the selected index from the respective set. None if no sets """ def __init__(self, setSizes): self.setSizes = setSizes def __len__(self): return selections(self.setSizes) def len(self): return selections(self.setSizes) def __iter__(self): a = zeros(len(self.setSizes)) setCount = len(self.setSizes) def recursiveXSelections(setIndex): if setIndex == setCount: yield None else: for i in xrange(0, self.setSizes[setIndex]): a[setIndex] = i for j in recursiveXSelections(setIndex + 1): yield a return recursiveXSelections(0)
a[index] = i for j in recursiveXCombinations(i + 1, recursiveSubsetSize - 1): yield a
conditional_block
Combinatorics.py
# Parallel Biclustering Algorithm - Fast Algorithm for finding all biclusters in a GEM # Copyright (C) 2006 Luke Imhoff # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # Contact Info: # Luke Imhoff (imho0030@umn.edu) # 220 Delaware St. SE # Minneapolis, MN 55455 """Combinatoric utilities not included in scipy or faster implementation @author Luke Imhoff @license GPLv2 """ from scipy import zeros from scipy.misc import factorial import operator def nChooseK(n, k): """n choose k Returns number of combinations of k elements in n elements @param n total number of elements in set @param k number of elements in subset to choose @return C(n, k) """ numerator = 1 for i in xrange(max(n - k, k) + 1, n + 1): numerator *= i return numerator / factorial(min(n - k, k), exact = 1) class xcombinations(object): """Returns all combinations of subsetSize number from [0, setSize) Supports len() @param setSize number of elements in set to select from @param subsetSize number of elements chosen from set @return generator of combinations. Combination is stored in a 1D array of subsetSize length. None if subsetSize is 0. """ def __init__(self, setSize, subsetSize): if setSize < subsetSize: raise ValueError("setSize (%d) must be >= subsetSize (%d)" % (setSize, subsetSize)) self.setSize = setSize self.subsetSize = subsetSize def len(self): return nChooseK(self.setSize, self.subsetSize) def __iter__(self): a = zeros(self.subsetSize) # using a closure faster than passing setSize def recursiveXCombinations(setStart, recursiveSubsetSize): if recursiveSubsetSize == 0: yield None else: index = self.subsetSize - recursiveSubsetSize for i in xrange(setStart, self.setSize - recursiveSubsetSize + 1): a[index] = i for j in recursiveXCombinations(i + 1, recursiveSubsetSize - 1): yield a return recursiveXCombinations(0, self.subsetSize) def permutations(setSize, subsetSize): """Return 'setSize permute subsetSize' or nPk @param setSize number of elements in set to select from @param subsetSize number for elements chosen from set @return number of permutations of subsetSize of setSize """ count = 1 for i in xrange(setSize - subsetSize + 1, setSize + 1): count *= i return count class xpermutations(object): """Returns all permutations of subsetSize number from [0, setSize) Supports len() @param setSize number of elements in set to select from @param subsetSize number for elements chosen from set @return generator of permutations. Permutations stored in a 1D array of subsetSize length. None if subsetsize is 0 """ def __init__(self, setSize, subsetSize): if setSize < subsetSize: raise ValueError("setSize (%d) must be >= subsetSize (%d)" % (setSize, subsetSize)) self.setSize = setSize self.subsetSize = subsetSize def len(self): return permutations(self.setSize, self.subsetSize) def __iter__(self): a = zeros(self.subsetSize) items = range(self.setSize) # using a closure faster than passing setSize def recursiveXPermutations(recursiveItems, recursiveSubsetSize): if recursiveSubsetSize == 0: yield None else: index = self.subsetSize -recursiveSubsetSize for i in xrange(len(recursiveItems)): a[index] = recursiveItems[i] for cc in recursiveXPermutations(recursiveItems[:i] + recursiveItems[i + 1:], recursiveSubsetSize - 1): yield a return recursiveXPermutations(items, self.subsetSize) def selections(setSizes): length = 1 for setSize in setSizes: length *= setSize return length class xselections(object): """Returns all selections of one item from each set of setSizes Supports len() @param collection of set sizes @return generator of selections. Selection is stored in a 1D array with each entry corresponding to the selected index from the respective set. None if no sets """ def __init__(self, setSizes): self.setSizes = setSizes def __len__(self): return selections(self.setSizes) def len(self): return selections(self.setSizes) def __iter__(self): a = zeros(len(self.setSizes)) setCount = len(self.setSizes) def
(setIndex): if setIndex == setCount: yield None else: for i in xrange(0, self.setSizes[setIndex]): a[setIndex] = i for j in recursiveXSelections(setIndex + 1): yield a return recursiveXSelections(0)
recursiveXSelections
identifier_name
Combinatorics.py
# Parallel Biclustering Algorithm - Fast Algorithm for finding all biclusters in a GEM # Copyright (C) 2006 Luke Imhoff # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # Contact Info: # Luke Imhoff (imho0030@umn.edu) # 220 Delaware St. SE # Minneapolis, MN 55455 """Combinatoric utilities not included in scipy or faster implementation @author Luke Imhoff @license GPLv2 """ from scipy import zeros from scipy.misc import factorial import operator def nChooseK(n, k): """n choose k Returns number of combinations of k elements in n elements @param n total number of elements in set @param k number of elements in subset to choose @return C(n, k) """ numerator = 1 for i in xrange(max(n - k, k) + 1, n + 1): numerator *= i return numerator / factorial(min(n - k, k), exact = 1) class xcombinations(object): """Returns all combinations of subsetSize number from [0, setSize) Supports len() @param setSize number of elements in set to select from @param subsetSize number of elements chosen from set @return generator of combinations. Combination is stored in a 1D array of subsetSize length. None if subsetSize is 0. """ def __init__(self, setSize, subsetSize):
raise ValueError("setSize (%d) must be >= subsetSize (%d)" % (setSize, subsetSize)) self.setSize = setSize self.subsetSize = subsetSize def len(self): return nChooseK(self.setSize, self.subsetSize) def __iter__(self): a = zeros(self.subsetSize) # using a closure faster than passing setSize def recursiveXCombinations(setStart, recursiveSubsetSize): if recursiveSubsetSize == 0: yield None else: index = self.subsetSize - recursiveSubsetSize for i in xrange(setStart, self.setSize - recursiveSubsetSize + 1): a[index] = i for j in recursiveXCombinations(i + 1, recursiveSubsetSize - 1): yield a return recursiveXCombinations(0, self.subsetSize) def permutations(setSize, subsetSize): """Return 'setSize permute subsetSize' or nPk @param setSize number of elements in set to select from @param subsetSize number for elements chosen from set @return number of permutations of subsetSize of setSize """ count = 1 for i in xrange(setSize - subsetSize + 1, setSize + 1): count *= i return count class xpermutations(object): """Returns all permutations of subsetSize number from [0, setSize) Supports len() @param setSize number of elements in set to select from @param subsetSize number for elements chosen from set @return generator of permutations. Permutations stored in a 1D array of subsetSize length. None if subsetsize is 0 """ def __init__(self, setSize, subsetSize): if setSize < subsetSize: raise ValueError("setSize (%d) must be >= subsetSize (%d)" % (setSize, subsetSize)) self.setSize = setSize self.subsetSize = subsetSize def len(self): return permutations(self.setSize, self.subsetSize) def __iter__(self): a = zeros(self.subsetSize) items = range(self.setSize) # using a closure faster than passing setSize def recursiveXPermutations(recursiveItems, recursiveSubsetSize): if recursiveSubsetSize == 0: yield None else: index = self.subsetSize -recursiveSubsetSize for i in xrange(len(recursiveItems)): a[index] = recursiveItems[i] for cc in recursiveXPermutations(recursiveItems[:i] + recursiveItems[i + 1:], recursiveSubsetSize - 1): yield a return recursiveXPermutations(items, self.subsetSize) def selections(setSizes): length = 1 for setSize in setSizes: length *= setSize return length class xselections(object): """Returns all selections of one item from each set of setSizes Supports len() @param collection of set sizes @return generator of selections. Selection is stored in a 1D array with each entry corresponding to the selected index from the respective set. None if no sets """ def __init__(self, setSizes): self.setSizes = setSizes def __len__(self): return selections(self.setSizes) def len(self): return selections(self.setSizes) def __iter__(self): a = zeros(len(self.setSizes)) setCount = len(self.setSizes) def recursiveXSelections(setIndex): if setIndex == setCount: yield None else: for i in xrange(0, self.setSizes[setIndex]): a[setIndex] = i for j in recursiveXSelections(setIndex + 1): yield a return recursiveXSelections(0)
if setSize < subsetSize:
random_line_split
Combinatorics.py
# Parallel Biclustering Algorithm - Fast Algorithm for finding all biclusters in a GEM # Copyright (C) 2006 Luke Imhoff # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. # # Contact Info: # Luke Imhoff (imho0030@umn.edu) # 220 Delaware St. SE # Minneapolis, MN 55455 """Combinatoric utilities not included in scipy or faster implementation @author Luke Imhoff @license GPLv2 """ from scipy import zeros from scipy.misc import factorial import operator def nChooseK(n, k): """n choose k Returns number of combinations of k elements in n elements @param n total number of elements in set @param k number of elements in subset to choose @return C(n, k) """ numerator = 1 for i in xrange(max(n - k, k) + 1, n + 1): numerator *= i return numerator / factorial(min(n - k, k), exact = 1) class xcombinations(object): """Returns all combinations of subsetSize number from [0, setSize) Supports len() @param setSize number of elements in set to select from @param subsetSize number of elements chosen from set @return generator of combinations. Combination is stored in a 1D array of subsetSize length. None if subsetSize is 0. """ def __init__(self, setSize, subsetSize): if setSize < subsetSize: raise ValueError("setSize (%d) must be >= subsetSize (%d)" % (setSize, subsetSize)) self.setSize = setSize self.subsetSize = subsetSize def len(self): return nChooseK(self.setSize, self.subsetSize) def __iter__(self): a = zeros(self.subsetSize) # using a closure faster than passing setSize def recursiveXCombinations(setStart, recursiveSubsetSize): if recursiveSubsetSize == 0: yield None else: index = self.subsetSize - recursiveSubsetSize for i in xrange(setStart, self.setSize - recursiveSubsetSize + 1): a[index] = i for j in recursiveXCombinations(i + 1, recursiveSubsetSize - 1): yield a return recursiveXCombinations(0, self.subsetSize) def permutations(setSize, subsetSize): """Return 'setSize permute subsetSize' or nPk @param setSize number of elements in set to select from @param subsetSize number for elements chosen from set @return number of permutations of subsetSize of setSize """ count = 1 for i in xrange(setSize - subsetSize + 1, setSize + 1): count *= i return count class xpermutations(object): """Returns all permutations of subsetSize number from [0, setSize) Supports len() @param setSize number of elements in set to select from @param subsetSize number for elements chosen from set @return generator of permutations. Permutations stored in a 1D array of subsetSize length. None if subsetsize is 0 """ def __init__(self, setSize, subsetSize): if setSize < subsetSize: raise ValueError("setSize (%d) must be >= subsetSize (%d)" % (setSize, subsetSize)) self.setSize = setSize self.subsetSize = subsetSize def len(self):
def __iter__(self): a = zeros(self.subsetSize) items = range(self.setSize) # using a closure faster than passing setSize def recursiveXPermutations(recursiveItems, recursiveSubsetSize): if recursiveSubsetSize == 0: yield None else: index = self.subsetSize -recursiveSubsetSize for i in xrange(len(recursiveItems)): a[index] = recursiveItems[i] for cc in recursiveXPermutations(recursiveItems[:i] + recursiveItems[i + 1:], recursiveSubsetSize - 1): yield a return recursiveXPermutations(items, self.subsetSize) def selections(setSizes): length = 1 for setSize in setSizes: length *= setSize return length class xselections(object): """Returns all selections of one item from each set of setSizes Supports len() @param collection of set sizes @return generator of selections. Selection is stored in a 1D array with each entry corresponding to the selected index from the respective set. None if no sets """ def __init__(self, setSizes): self.setSizes = setSizes def __len__(self): return selections(self.setSizes) def len(self): return selections(self.setSizes) def __iter__(self): a = zeros(len(self.setSizes)) setCount = len(self.setSizes) def recursiveXSelections(setIndex): if setIndex == setCount: yield None else: for i in xrange(0, self.setSizes[setIndex]): a[setIndex] = i for j in recursiveXSelections(setIndex + 1): yield a return recursiveXSelections(0)
return permutations(self.setSize, self.subsetSize)
identifier_body
PopularArtists_popular_artists.graphql.ts
/* tslint:disable */ import { ReaderFragment } from "relay-runtime"; import { FragmentRefs } from "relay-runtime"; export type PopularArtists_popular_artists = ReadonlyArray<{ readonly slug: string; readonly internalID: string; readonly id: string; readonly name: string | null;
} | null; readonly " $refType": "PopularArtists_popular_artists"; }>; export type PopularArtists_popular_artists$data = PopularArtists_popular_artists; export type PopularArtists_popular_artists$key = ReadonlyArray<{ readonly " $data"?: PopularArtists_popular_artists$data; readonly " $fragmentRefs": FragmentRefs<"PopularArtists_popular_artists">; }>; const node: ReaderFragment = { "kind": "Fragment", "name": "PopularArtists_popular_artists", "type": "Artist", "metadata": { "plural": true }, "argumentDefinitions": [], "selections": [ { "kind": "ScalarField", "alias": null, "name": "slug", "args": null, "storageKey": null }, { "kind": "ScalarField", "alias": null, "name": "internalID", "args": null, "storageKey": null }, { "kind": "ScalarField", "alias": null, "name": "id", "args": null, "storageKey": null }, { "kind": "ScalarField", "alias": null, "name": "name", "args": null, "storageKey": null }, { "kind": "LinkedField", "alias": null, "name": "image", "storageKey": null, "args": null, "concreteType": "Image", "plural": false, "selections": [ { "kind": "LinkedField", "alias": null, "name": "cropped", "storageKey": "cropped(height:100,width:100)", "args": [ { "kind": "Literal", "name": "height", "value": 100 }, { "kind": "Literal", "name": "width", "value": 100 } ], "concreteType": "CroppedImageUrl", "plural": false, "selections": [ { "kind": "ScalarField", "alias": null, "name": "url", "args": null, "storageKey": null } ] } ] } ] }; (node as any).hash = '3d46c81e197d7d6f1f6a27595f5291b6'; export default node;
readonly image: { readonly cropped: { readonly url: string | null; } | null;
random_line_split
snapshot_helpers.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Element an snapshot common logic. use crate::gecko_bindings::bindings; use crate::gecko_bindings::structs::{self, nsAtom}; use crate::string_cache::{Atom, WeakAtom}; use crate::CaseSensitivityExt; use selectors::attr::CaseSensitivity; /// A function that, given an element of type `T`, allows you to get a single /// class or a class list. enum Class<'a> { None, One(*const nsAtom), More(&'a [structs::RefPtr<nsAtom>]), } #[inline(always)] fn base_type(attr: &structs::nsAttrValue) -> structs::nsAttrValue_ValueBaseType { (attr.mBits & structs::NS_ATTRVALUE_BASETYPE_MASK) as structs::nsAttrValue_ValueBaseType } #[inline(always)] unsafe fn ptr<T>(attr: &structs::nsAttrValue) -> *const T { (attr.mBits & !structs::NS_ATTRVALUE_BASETYPE_MASK) as *const T } #[inline(always)] unsafe fn get_class_or_part_from_attr(attr: &structs::nsAttrValue) -> Class { debug_assert!(bindings::Gecko_AssertClassAttrValueIsSane(attr)); let base_type = base_type(attr); if base_type == structs::nsAttrValue_ValueBaseType_eStringBase { return Class::None; } if base_type == structs::nsAttrValue_ValueBaseType_eAtomBase { return Class::One(ptr::<nsAtom>(attr)); } debug_assert_eq!(base_type, structs::nsAttrValue_ValueBaseType_eOtherBase); let container = ptr::<structs::MiscContainer>(attr); debug_assert_eq!(
.__bindgen_anon_1 .mValue .as_ref() .__bindgen_anon_1 .mAtomArray .as_ref(); Class::More(&***array) } #[inline(always)] unsafe fn get_id_from_attr(attr: &structs::nsAttrValue) -> &WeakAtom { debug_assert_eq!( base_type(attr), structs::nsAttrValue_ValueBaseType_eAtomBase ); WeakAtom::new(ptr::<nsAtom>(attr)) } /// Find an attribute value with a given name and no namespace. #[inline(always)] pub fn find_attr<'a>( attrs: &'a [structs::AttrArray_InternalAttr], name: &Atom, ) -> Option<&'a structs::nsAttrValue> { attrs .iter() .find(|attr| attr.mName.mBits == name.as_ptr() as usize) .map(|attr| &attr.mValue) } /// Finds the id attribute from a list of attributes. #[inline(always)] pub fn get_id(attrs: &[structs::AttrArray_InternalAttr]) -> Option<&WeakAtom> { Some(unsafe { get_id_from_attr(find_attr(attrs, &atom!("id"))?) }) } #[inline(always)] pub(super) fn exported_part( attrs: &[structs::AttrArray_InternalAttr], name: &Atom, ) -> Option<Atom> { let attr = find_attr(attrs, &atom!("exportparts"))?; let atom = unsafe { bindings::Gecko_Element_ExportedPart(attr, name.as_ptr()) }; if atom.is_null() { return None; } Some(unsafe { Atom::from_raw(atom) }) } #[inline(always)] pub(super) fn imported_part( attrs: &[structs::AttrArray_InternalAttr], name: &Atom, ) -> Option<Atom> { let attr = find_attr(attrs, &atom!("exportparts"))?; let atom = unsafe { bindings::Gecko_Element_ImportedPart(attr, name.as_ptr()) }; if atom.is_null() { return None; } Some(unsafe { Atom::from_raw(atom) }) } /// Given a class or part name, a case sensitivity, and an array of attributes, /// returns whether the attribute has that name. #[inline(always)] pub fn has_class_or_part( name: &Atom, case_sensitivity: CaseSensitivity, attr: &structs::nsAttrValue, ) -> bool { match unsafe { get_class_or_part_from_attr(attr) } { Class::None => false, Class::One(atom) => unsafe { case_sensitivity.eq_atom(name, WeakAtom::new(atom)) }, Class::More(atoms) => match case_sensitivity { CaseSensitivity::CaseSensitive => { atoms.iter().any(|atom| atom.mRawPtr == name.as_ptr()) }, CaseSensitivity::AsciiCaseInsensitive => unsafe { atoms .iter() .any(|atom| WeakAtom::new(atom.mRawPtr).eq_ignore_ascii_case(name)) }, }, } } /// Given an item, a callback, and a getter, execute `callback` for each class /// or part name this `item` has. #[inline(always)] pub fn each_class_or_part<F>(attr: &structs::nsAttrValue, mut callback: F) where F: FnMut(&Atom), { unsafe { match get_class_or_part_from_attr(attr) { Class::None => {}, Class::One(atom) => Atom::with(atom, callback), Class::More(atoms) => { for atom in atoms { Atom::with(atom.mRawPtr, &mut callback) } }, } } }
(*container).mType, structs::nsAttrValue_ValueType_eAtomArray ); let array = (*container)
random_line_split
snapshot_helpers.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Element an snapshot common logic. use crate::gecko_bindings::bindings; use crate::gecko_bindings::structs::{self, nsAtom}; use crate::string_cache::{Atom, WeakAtom}; use crate::CaseSensitivityExt; use selectors::attr::CaseSensitivity; /// A function that, given an element of type `T`, allows you to get a single /// class or a class list. enum Class<'a> { None, One(*const nsAtom), More(&'a [structs::RefPtr<nsAtom>]), } #[inline(always)] fn base_type(attr: &structs::nsAttrValue) -> structs::nsAttrValue_ValueBaseType { (attr.mBits & structs::NS_ATTRVALUE_BASETYPE_MASK) as structs::nsAttrValue_ValueBaseType } #[inline(always)] unsafe fn ptr<T>(attr: &structs::nsAttrValue) -> *const T { (attr.mBits & !structs::NS_ATTRVALUE_BASETYPE_MASK) as *const T } #[inline(always)] unsafe fn get_class_or_part_from_attr(attr: &structs::nsAttrValue) -> Class { debug_assert!(bindings::Gecko_AssertClassAttrValueIsSane(attr)); let base_type = base_type(attr); if base_type == structs::nsAttrValue_ValueBaseType_eStringBase { return Class::None; } if base_type == structs::nsAttrValue_ValueBaseType_eAtomBase { return Class::One(ptr::<nsAtom>(attr)); } debug_assert_eq!(base_type, structs::nsAttrValue_ValueBaseType_eOtherBase); let container = ptr::<structs::MiscContainer>(attr); debug_assert_eq!( (*container).mType, structs::nsAttrValue_ValueType_eAtomArray ); let array = (*container) .__bindgen_anon_1 .mValue .as_ref() .__bindgen_anon_1 .mAtomArray .as_ref(); Class::More(&***array) } #[inline(always)] unsafe fn get_id_from_attr(attr: &structs::nsAttrValue) -> &WeakAtom { debug_assert_eq!( base_type(attr), structs::nsAttrValue_ValueBaseType_eAtomBase ); WeakAtom::new(ptr::<nsAtom>(attr)) } /// Find an attribute value with a given name and no namespace. #[inline(always)] pub fn find_attr<'a>( attrs: &'a [structs::AttrArray_InternalAttr], name: &Atom, ) -> Option<&'a structs::nsAttrValue>
/// Finds the id attribute from a list of attributes. #[inline(always)] pub fn get_id(attrs: &[structs::AttrArray_InternalAttr]) -> Option<&WeakAtom> { Some(unsafe { get_id_from_attr(find_attr(attrs, &atom!("id"))?) }) } #[inline(always)] pub(super) fn exported_part( attrs: &[structs::AttrArray_InternalAttr], name: &Atom, ) -> Option<Atom> { let attr = find_attr(attrs, &atom!("exportparts"))?; let atom = unsafe { bindings::Gecko_Element_ExportedPart(attr, name.as_ptr()) }; if atom.is_null() { return None; } Some(unsafe { Atom::from_raw(atom) }) } #[inline(always)] pub(super) fn imported_part( attrs: &[structs::AttrArray_InternalAttr], name: &Atom, ) -> Option<Atom> { let attr = find_attr(attrs, &atom!("exportparts"))?; let atom = unsafe { bindings::Gecko_Element_ImportedPart(attr, name.as_ptr()) }; if atom.is_null() { return None; } Some(unsafe { Atom::from_raw(atom) }) } /// Given a class or part name, a case sensitivity, and an array of attributes, /// returns whether the attribute has that name. #[inline(always)] pub fn has_class_or_part( name: &Atom, case_sensitivity: CaseSensitivity, attr: &structs::nsAttrValue, ) -> bool { match unsafe { get_class_or_part_from_attr(attr) } { Class::None => false, Class::One(atom) => unsafe { case_sensitivity.eq_atom(name, WeakAtom::new(atom)) }, Class::More(atoms) => match case_sensitivity { CaseSensitivity::CaseSensitive => { atoms.iter().any(|atom| atom.mRawPtr == name.as_ptr()) }, CaseSensitivity::AsciiCaseInsensitive => unsafe { atoms .iter() .any(|atom| WeakAtom::new(atom.mRawPtr).eq_ignore_ascii_case(name)) }, }, } } /// Given an item, a callback, and a getter, execute `callback` for each class /// or part name this `item` has. #[inline(always)] pub fn each_class_or_part<F>(attr: &structs::nsAttrValue, mut callback: F) where F: FnMut(&Atom), { unsafe { match get_class_or_part_from_attr(attr) { Class::None => {}, Class::One(atom) => Atom::with(atom, callback), Class::More(atoms) => { for atom in atoms { Atom::with(atom.mRawPtr, &mut callback) } }, } } }
{ attrs .iter() .find(|attr| attr.mName.mBits == name.as_ptr() as usize) .map(|attr| &attr.mValue) }
identifier_body
snapshot_helpers.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ //! Element an snapshot common logic. use crate::gecko_bindings::bindings; use crate::gecko_bindings::structs::{self, nsAtom}; use crate::string_cache::{Atom, WeakAtom}; use crate::CaseSensitivityExt; use selectors::attr::CaseSensitivity; /// A function that, given an element of type `T`, allows you to get a single /// class or a class list. enum
<'a> { None, One(*const nsAtom), More(&'a [structs::RefPtr<nsAtom>]), } #[inline(always)] fn base_type(attr: &structs::nsAttrValue) -> structs::nsAttrValue_ValueBaseType { (attr.mBits & structs::NS_ATTRVALUE_BASETYPE_MASK) as structs::nsAttrValue_ValueBaseType } #[inline(always)] unsafe fn ptr<T>(attr: &structs::nsAttrValue) -> *const T { (attr.mBits & !structs::NS_ATTRVALUE_BASETYPE_MASK) as *const T } #[inline(always)] unsafe fn get_class_or_part_from_attr(attr: &structs::nsAttrValue) -> Class { debug_assert!(bindings::Gecko_AssertClassAttrValueIsSane(attr)); let base_type = base_type(attr); if base_type == structs::nsAttrValue_ValueBaseType_eStringBase { return Class::None; } if base_type == structs::nsAttrValue_ValueBaseType_eAtomBase { return Class::One(ptr::<nsAtom>(attr)); } debug_assert_eq!(base_type, structs::nsAttrValue_ValueBaseType_eOtherBase); let container = ptr::<structs::MiscContainer>(attr); debug_assert_eq!( (*container).mType, structs::nsAttrValue_ValueType_eAtomArray ); let array = (*container) .__bindgen_anon_1 .mValue .as_ref() .__bindgen_anon_1 .mAtomArray .as_ref(); Class::More(&***array) } #[inline(always)] unsafe fn get_id_from_attr(attr: &structs::nsAttrValue) -> &WeakAtom { debug_assert_eq!( base_type(attr), structs::nsAttrValue_ValueBaseType_eAtomBase ); WeakAtom::new(ptr::<nsAtom>(attr)) } /// Find an attribute value with a given name and no namespace. #[inline(always)] pub fn find_attr<'a>( attrs: &'a [structs::AttrArray_InternalAttr], name: &Atom, ) -> Option<&'a structs::nsAttrValue> { attrs .iter() .find(|attr| attr.mName.mBits == name.as_ptr() as usize) .map(|attr| &attr.mValue) } /// Finds the id attribute from a list of attributes. #[inline(always)] pub fn get_id(attrs: &[structs::AttrArray_InternalAttr]) -> Option<&WeakAtom> { Some(unsafe { get_id_from_attr(find_attr(attrs, &atom!("id"))?) }) } #[inline(always)] pub(super) fn exported_part( attrs: &[structs::AttrArray_InternalAttr], name: &Atom, ) -> Option<Atom> { let attr = find_attr(attrs, &atom!("exportparts"))?; let atom = unsafe { bindings::Gecko_Element_ExportedPart(attr, name.as_ptr()) }; if atom.is_null() { return None; } Some(unsafe { Atom::from_raw(atom) }) } #[inline(always)] pub(super) fn imported_part( attrs: &[structs::AttrArray_InternalAttr], name: &Atom, ) -> Option<Atom> { let attr = find_attr(attrs, &atom!("exportparts"))?; let atom = unsafe { bindings::Gecko_Element_ImportedPart(attr, name.as_ptr()) }; if atom.is_null() { return None; } Some(unsafe { Atom::from_raw(atom) }) } /// Given a class or part name, a case sensitivity, and an array of attributes, /// returns whether the attribute has that name. #[inline(always)] pub fn has_class_or_part( name: &Atom, case_sensitivity: CaseSensitivity, attr: &structs::nsAttrValue, ) -> bool { match unsafe { get_class_or_part_from_attr(attr) } { Class::None => false, Class::One(atom) => unsafe { case_sensitivity.eq_atom(name, WeakAtom::new(atom)) }, Class::More(atoms) => match case_sensitivity { CaseSensitivity::CaseSensitive => { atoms.iter().any(|atom| atom.mRawPtr == name.as_ptr()) }, CaseSensitivity::AsciiCaseInsensitive => unsafe { atoms .iter() .any(|atom| WeakAtom::new(atom.mRawPtr).eq_ignore_ascii_case(name)) }, }, } } /// Given an item, a callback, and a getter, execute `callback` for each class /// or part name this `item` has. #[inline(always)] pub fn each_class_or_part<F>(attr: &structs::nsAttrValue, mut callback: F) where F: FnMut(&Atom), { unsafe { match get_class_or_part_from_attr(attr) { Class::None => {}, Class::One(atom) => Atom::with(atom, callback), Class::More(atoms) => { for atom in atoms { Atom::with(atom.mRawPtr, &mut callback) } }, } } }
Class
identifier_name
qualified_ident.rs
use nom::{ branch::alt, bytes::complete::{is_not, tag}, character::complete::{alpha1, alphanumeric1, space0}, combinator::{opt, recognize}, multi::{many0_count, separated_list0, separated_list1}, sequence::{delimited, pair, preceded, terminated}, IResult, }; use super::prelude::*; pub type QIdent<'a> = Vec<QIdentSegment<'a>>; #[derive(Debug, PartialEq, Clone)] pub struct QIdentSegment<'a> { ident: &'a str, parameters: Vec<QIdentParam<'a>>, } #[derive(Debug, PartialEq, Clone)] pub enum QIdentParam<'a> { QIdent(QIdent<'a>), Other(&'a str), } fn ident(input: Span) -> IResult<Span, Span> { recognize(pair(alt((alpha1, tag("_"))), many0_count(alt((alphanumeric1, tag("_"))))))(input) } fn template_param(input: Span) -> IResult<Span, QIdentParam> { match qualified_ident(input) { Ok((rest, result)) => Ok((rest, QIdentParam::QIdent(result))), Err(_) => match recognize(is_not("<,>"))(input) { Ok((rest, result)) => Ok((rest, QIdentParam::Other(result.trim()))), Err(err) => Err(err), }, } } fn template_params(input: Span) -> IResult<Span, Vec<QIdentParam>> { let (rest, parameters) = delimited(tag("<"), separated_list0(tag(","), ws(template_param)), tag(">"))(input)?; Ok((rest, parameters)) } fn qident_segment(input: Span) -> IResult<Span, QIdentSegment> { let (rest, (ident, parameters)) = pair(ident, opt(preceded(space0, template_params)))(input)?; let parameters = match parameters { Some(parameters) => parameters, None => Vec::new(), }; Ok(( rest, QIdentSegment { ident, parameters, }, )) } pub fn qualified_ident(input: Span) -> IResult<Span, QIdent> { preceded(opt(terminated(tag("::"), space0)), separated_list1(ws(tag("::")), qident_segment))(input) } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #[test] fn test_ident() { assert_eq!(ident("ident1234"), Ok(("", "ident1234"))); assert_eq!(ident("_ident_1234::"), Ok(("::", "_ident_1234"))); } #[test] fn test_qident_segment()
#[test] fn test_qualified_ident() { let expected = vec![ QIdentSegment { ident: "foo", parameters: vec![], }, QIdentSegment { ident: "bar", parameters: vec![ QIdentParam::QIdent(vec![ QIdentSegment { ident: "baz", parameters: vec![], }, QIdentSegment { ident: "quox", parameters: vec![QIdentParam::Other("3")], }, ]), QIdentParam::Other("1 +0.234"), ], }, ]; assert_eq!(qualified_ident("foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected.clone()))); assert_eq!(qualified_ident(":: foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected))); }
{ assert_eq!( qident_segment("string"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![], } )) ); assert_eq!( qident_segment("string<42>"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![QIdentParam::Other("42")] } )) ); assert_eq!( qident_segment("string < 42 >"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![QIdentParam::Other("42")] } )) ); assert_eq!( qident_segment("string < 2, 3.0 >"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![QIdentParam::Other("2"), QIdentParam::Other("3.0")] } )) ); }
identifier_body
qualified_ident.rs
use nom::{ branch::alt, bytes::complete::{is_not, tag}, character::complete::{alpha1, alphanumeric1, space0}, combinator::{opt, recognize}, multi::{many0_count, separated_list0, separated_list1}, sequence::{delimited, pair, preceded, terminated}, IResult, }; use super::prelude::*; pub type QIdent<'a> = Vec<QIdentSegment<'a>>; #[derive(Debug, PartialEq, Clone)] pub struct QIdentSegment<'a> { ident: &'a str, parameters: Vec<QIdentParam<'a>>, } #[derive(Debug, PartialEq, Clone)] pub enum QIdentParam<'a> { QIdent(QIdent<'a>), Other(&'a str), } fn ident(input: Span) -> IResult<Span, Span> { recognize(pair(alt((alpha1, tag("_"))), many0_count(alt((alphanumeric1, tag("_"))))))(input) } fn
(input: Span) -> IResult<Span, QIdentParam> { match qualified_ident(input) { Ok((rest, result)) => Ok((rest, QIdentParam::QIdent(result))), Err(_) => match recognize(is_not("<,>"))(input) { Ok((rest, result)) => Ok((rest, QIdentParam::Other(result.trim()))), Err(err) => Err(err), }, } } fn template_params(input: Span) -> IResult<Span, Vec<QIdentParam>> { let (rest, parameters) = delimited(tag("<"), separated_list0(tag(","), ws(template_param)), tag(">"))(input)?; Ok((rest, parameters)) } fn qident_segment(input: Span) -> IResult<Span, QIdentSegment> { let (rest, (ident, parameters)) = pair(ident, opt(preceded(space0, template_params)))(input)?; let parameters = match parameters { Some(parameters) => parameters, None => Vec::new(), }; Ok(( rest, QIdentSegment { ident, parameters, }, )) } pub fn qualified_ident(input: Span) -> IResult<Span, QIdent> { preceded(opt(terminated(tag("::"), space0)), separated_list1(ws(tag("::")), qident_segment))(input) } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #[test] fn test_ident() { assert_eq!(ident("ident1234"), Ok(("", "ident1234"))); assert_eq!(ident("_ident_1234::"), Ok(("::", "_ident_1234"))); } #[test] fn test_qident_segment() { assert_eq!( qident_segment("string"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![], } )) ); assert_eq!( qident_segment("string<42>"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![QIdentParam::Other("42")] } )) ); assert_eq!( qident_segment("string < 42 >"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![QIdentParam::Other("42")] } )) ); assert_eq!( qident_segment("string < 2, 3.0 >"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![QIdentParam::Other("2"), QIdentParam::Other("3.0")] } )) ); } #[test] fn test_qualified_ident() { let expected = vec![ QIdentSegment { ident: "foo", parameters: vec![], }, QIdentSegment { ident: "bar", parameters: vec![ QIdentParam::QIdent(vec![ QIdentSegment { ident: "baz", parameters: vec![], }, QIdentSegment { ident: "quox", parameters: vec![QIdentParam::Other("3")], }, ]), QIdentParam::Other("1 +0.234"), ], }, ]; assert_eq!(qualified_ident("foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected.clone()))); assert_eq!(qualified_ident(":: foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected))); }
template_param
identifier_name
qualified_ident.rs
use nom::{ branch::alt, bytes::complete::{is_not, tag}, character::complete::{alpha1, alphanumeric1, space0}, combinator::{opt, recognize}, multi::{many0_count, separated_list0, separated_list1}, sequence::{delimited, pair, preceded, terminated}, IResult, }; use super::prelude::*; pub type QIdent<'a> = Vec<QIdentSegment<'a>>; #[derive(Debug, PartialEq, Clone)] pub struct QIdentSegment<'a> { ident: &'a str, parameters: Vec<QIdentParam<'a>>, } #[derive(Debug, PartialEq, Clone)] pub enum QIdentParam<'a> { QIdent(QIdent<'a>), Other(&'a str), } fn ident(input: Span) -> IResult<Span, Span> { recognize(pair(alt((alpha1, tag("_"))), many0_count(alt((alphanumeric1, tag("_"))))))(input) } fn template_param(input: Span) -> IResult<Span, QIdentParam> { match qualified_ident(input) { Ok((rest, result)) => Ok((rest, QIdentParam::QIdent(result))), Err(_) => match recognize(is_not("<,>"))(input) { Ok((rest, result)) => Ok((rest, QIdentParam::Other(result.trim()))), Err(err) => Err(err), }, } } fn template_params(input: Span) -> IResult<Span, Vec<QIdentParam>> { let (rest, parameters) = delimited(tag("<"), separated_list0(tag(","), ws(template_param)), tag(">"))(input)?; Ok((rest, parameters)) } fn qident_segment(input: Span) -> IResult<Span, QIdentSegment> { let (rest, (ident, parameters)) = pair(ident, opt(preceded(space0, template_params)))(input)?; let parameters = match parameters { Some(parameters) => parameters, None => Vec::new(), }; Ok(( rest, QIdentSegment { ident, parameters, }, )) } pub fn qualified_ident(input: Span) -> IResult<Span, QIdent> { preceded(opt(terminated(tag("::"), space0)), separated_list1(ws(tag("::")), qident_segment))(input) } /////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// #[test] fn test_ident() { assert_eq!(ident("ident1234"), Ok(("", "ident1234"))); assert_eq!(ident("_ident_1234::"), Ok(("::", "_ident_1234"))); } #[test] fn test_qident_segment() { assert_eq!( qident_segment("string"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![], } )) ); assert_eq!( qident_segment("string<42>"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![QIdentParam::Other("42")] } )) ); assert_eq!( qident_segment("string < 42 >"),
"", QIdentSegment { ident: "string", parameters: vec![QIdentParam::Other("42")] } )) ); assert_eq!( qident_segment("string < 2, 3.0 >"), Ok(( "", QIdentSegment { ident: "string", parameters: vec![QIdentParam::Other("2"), QIdentParam::Other("3.0")] } )) ); } #[test] fn test_qualified_ident() { let expected = vec![ QIdentSegment { ident: "foo", parameters: vec![], }, QIdentSegment { ident: "bar", parameters: vec![ QIdentParam::QIdent(vec![ QIdentSegment { ident: "baz", parameters: vec![], }, QIdentSegment { ident: "quox", parameters: vec![QIdentParam::Other("3")], }, ]), QIdentParam::Other("1 +0.234"), ], }, ]; assert_eq!(qualified_ident("foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected.clone()))); assert_eq!(qualified_ident(":: foo :: bar < baz::quox< 3>, 1 +0.234>"), Ok(("", expected))); }
Ok((
random_line_split
util.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(missing_copy_implementations)] use prelude::v1::*; use io::{self, Read, Write, ErrorKind, BufRead}; /// Copies the entire contents of a reader into a writer. /// /// This function will continuously read data from `r` and then write it into /// `w` in a streaming fashion until `r` returns EOF. /// /// On success the total number of bytes that were copied from `r` to `w` is /// returned. /// /// # Errors /// /// This function will return an error immediately if any call to `read` or /// `write` returns an error. All instances of `ErrorKind::Interrupted` are /// handled by this function and the underlying operation is retried. #[stable(feature = "rust1", since = "1.0.0")] pub fn copy<R: Read, W: Write>(r: &mut R, w: &mut W) -> io::Result<u64> { let mut buf = [0; super::DEFAULT_BUF_SIZE]; let mut written = 0; loop { let len = match r.read(&mut buf) { Ok(0) => return Ok(written), Ok(len) => len, Err(ref e) if e.kind() == ErrorKind::Interrupted => continue, Err(e) => return Err(e), }; try!(w.write_all(&buf[..len])); written += len as u64; } } /// A reader which is always at EOF. #[stable(feature = "rust1", since = "1.0.0")] pub struct Empty { _priv: () } /// Creates an instance of an empty reader. /// /// All reads from the returned reader will return `Ok(0)`. #[stable(feature = "rust1", since = "1.0.0")] pub fn empty() -> Empty { Empty { _priv: () } } #[stable(feature = "rust1", since = "1.0.0")] impl Read for Empty { fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> { Ok(0) } } #[stable(feature = "rust1", since = "1.0.0")] impl BufRead for Empty { fn fill_buf(&mut self) -> io::Result<&[u8]> { Ok(&[]) } fn
(&mut self, _n: usize) {} } /// A reader which infinitely yields one byte. #[stable(feature = "rust1", since = "1.0.0")] pub struct Repeat { byte: u8 } /// Creates an instance of a reader that infinitely repeats one byte. /// /// All reads from this reader will succeed by filling the specified buffer with /// the given byte. #[stable(feature = "rust1", since = "1.0.0")] pub fn repeat(byte: u8) -> Repeat { Repeat { byte: byte } } #[stable(feature = "rust1", since = "1.0.0")] impl Read for Repeat { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { for slot in buf.iter_mut() { *slot = self.byte; } Ok(buf.len()) } } /// A writer which will move data into the void. #[stable(feature = "rust1", since = "1.0.0")] pub struct Sink { _priv: () } /// Creates an instance of a writer which will successfully consume all data. /// /// All calls to `write` on the returned instance will return `Ok(buf.len())` /// and the contents of the buffer will not be inspected. #[stable(feature = "rust1", since = "1.0.0")] pub fn sink() -> Sink { Sink { _priv: () } } #[stable(feature = "rust1", since = "1.0.0")] impl Write for Sink { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } #[cfg(test)] mod tests { use prelude::v1::*; use io::prelude::*; use io::{sink, empty, repeat}; #[test] fn sink_sinks() { let mut s = sink(); assert_eq!(s.write(&[]).unwrap(), 0); assert_eq!(s.write(&[0]).unwrap(), 1); assert_eq!(s.write(&[0; 1024]).unwrap(), 1024); assert_eq!(s.by_ref().write(&[0; 1024]).unwrap(), 1024); } #[test] fn empty_reads() { let mut e = empty(); assert_eq!(e.read(&mut []).unwrap(), 0); assert_eq!(e.read(&mut [0]).unwrap(), 0); assert_eq!(e.read(&mut [0; 1024]).unwrap(), 0); assert_eq!(e.by_ref().read(&mut [0; 1024]).unwrap(), 0); } #[test] fn repeat_repeats() { let mut r = repeat(4); let mut b = [0; 1024]; assert_eq!(r.read(&mut b).unwrap(), 1024); assert!(b.iter().all(|b| *b == 4)); } #[test] fn take_some_bytes() { assert_eq!(repeat(4).take(100).bytes().count(), 100); assert_eq!(repeat(4).take(100).bytes().next().unwrap().unwrap(), 4); assert_eq!(repeat(1).take(10).chain(repeat(2).take(10)).bytes().count(), 20); } #[test] fn tee() { let mut buf = [0; 10]; { let mut ptr: &mut [u8] = &mut buf; assert_eq!(repeat(4).tee(&mut ptr).take(5).read(&mut [0; 10]).unwrap(), 5); } assert_eq!(buf, [4, 4, 4, 4, 4, 0, 0, 0, 0, 0]); } #[test] fn broadcast() { let mut buf1 = [0; 10]; let mut buf2 = [0; 10]; { let mut ptr1: &mut [u8] = &mut buf1; let mut ptr2: &mut [u8] = &mut buf2; assert_eq!((&mut ptr1).broadcast(&mut ptr2) .write(&[1, 2, 3]).unwrap(), 3); } assert_eq!(buf1, buf2); assert_eq!(buf1, [1, 2, 3, 0, 0, 0, 0, 0, 0, 0]); } }
consume
identifier_name
util.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![allow(missing_copy_implementations)] use prelude::v1::*; use io::{self, Read, Write, ErrorKind, BufRead}; /// Copies the entire contents of a reader into a writer. /// /// This function will continuously read data from `r` and then write it into /// `w` in a streaming fashion until `r` returns EOF. /// /// On success the total number of bytes that were copied from `r` to `w` is /// returned. /// /// # Errors /// /// This function will return an error immediately if any call to `read` or /// `write` returns an error. All instances of `ErrorKind::Interrupted` are /// handled by this function and the underlying operation is retried. #[stable(feature = "rust1", since = "1.0.0")] pub fn copy<R: Read, W: Write>(r: &mut R, w: &mut W) -> io::Result<u64> { let mut buf = [0; super::DEFAULT_BUF_SIZE]; let mut written = 0; loop { let len = match r.read(&mut buf) { Ok(0) => return Ok(written), Ok(len) => len, Err(ref e) if e.kind() == ErrorKind::Interrupted => continue, Err(e) => return Err(e), }; try!(w.write_all(&buf[..len])); written += len as u64; } } /// A reader which is always at EOF. #[stable(feature = "rust1", since = "1.0.0")] pub struct Empty { _priv: () } /// Creates an instance of an empty reader. /// /// All reads from the returned reader will return `Ok(0)`. #[stable(feature = "rust1", since = "1.0.0")] pub fn empty() -> Empty { Empty { _priv: () } } #[stable(feature = "rust1", since = "1.0.0")]
fn read(&mut self, _buf: &mut [u8]) -> io::Result<usize> { Ok(0) } } #[stable(feature = "rust1", since = "1.0.0")] impl BufRead for Empty { fn fill_buf(&mut self) -> io::Result<&[u8]> { Ok(&[]) } fn consume(&mut self, _n: usize) {} } /// A reader which infinitely yields one byte. #[stable(feature = "rust1", since = "1.0.0")] pub struct Repeat { byte: u8 } /// Creates an instance of a reader that infinitely repeats one byte. /// /// All reads from this reader will succeed by filling the specified buffer with /// the given byte. #[stable(feature = "rust1", since = "1.0.0")] pub fn repeat(byte: u8) -> Repeat { Repeat { byte: byte } } #[stable(feature = "rust1", since = "1.0.0")] impl Read for Repeat { fn read(&mut self, buf: &mut [u8]) -> io::Result<usize> { for slot in buf.iter_mut() { *slot = self.byte; } Ok(buf.len()) } } /// A writer which will move data into the void. #[stable(feature = "rust1", since = "1.0.0")] pub struct Sink { _priv: () } /// Creates an instance of a writer which will successfully consume all data. /// /// All calls to `write` on the returned instance will return `Ok(buf.len())` /// and the contents of the buffer will not be inspected. #[stable(feature = "rust1", since = "1.0.0")] pub fn sink() -> Sink { Sink { _priv: () } } #[stable(feature = "rust1", since = "1.0.0")] impl Write for Sink { fn write(&mut self, buf: &[u8]) -> io::Result<usize> { Ok(buf.len()) } fn flush(&mut self) -> io::Result<()> { Ok(()) } } #[cfg(test)] mod tests { use prelude::v1::*; use io::prelude::*; use io::{sink, empty, repeat}; #[test] fn sink_sinks() { let mut s = sink(); assert_eq!(s.write(&[]).unwrap(), 0); assert_eq!(s.write(&[0]).unwrap(), 1); assert_eq!(s.write(&[0; 1024]).unwrap(), 1024); assert_eq!(s.by_ref().write(&[0; 1024]).unwrap(), 1024); } #[test] fn empty_reads() { let mut e = empty(); assert_eq!(e.read(&mut []).unwrap(), 0); assert_eq!(e.read(&mut [0]).unwrap(), 0); assert_eq!(e.read(&mut [0; 1024]).unwrap(), 0); assert_eq!(e.by_ref().read(&mut [0; 1024]).unwrap(), 0); } #[test] fn repeat_repeats() { let mut r = repeat(4); let mut b = [0; 1024]; assert_eq!(r.read(&mut b).unwrap(), 1024); assert!(b.iter().all(|b| *b == 4)); } #[test] fn take_some_bytes() { assert_eq!(repeat(4).take(100).bytes().count(), 100); assert_eq!(repeat(4).take(100).bytes().next().unwrap().unwrap(), 4); assert_eq!(repeat(1).take(10).chain(repeat(2).take(10)).bytes().count(), 20); } #[test] fn tee() { let mut buf = [0; 10]; { let mut ptr: &mut [u8] = &mut buf; assert_eq!(repeat(4).tee(&mut ptr).take(5).read(&mut [0; 10]).unwrap(), 5); } assert_eq!(buf, [4, 4, 4, 4, 4, 0, 0, 0, 0, 0]); } #[test] fn broadcast() { let mut buf1 = [0; 10]; let mut buf2 = [0; 10]; { let mut ptr1: &mut [u8] = &mut buf1; let mut ptr2: &mut [u8] = &mut buf2; assert_eq!((&mut ptr1).broadcast(&mut ptr2) .write(&[1, 2, 3]).unwrap(), 3); } assert_eq!(buf1, buf2); assert_eq!(buf1, [1, 2, 3, 0, 0, 0, 0, 0, 0, 0]); } }
impl Read for Empty {
random_line_split
por.form.js
/** * @fileoverview 弹出窗口处理 * port from EF */ var EF_SPLIT = "__"; var EF_CLASSTAG = ""; var EF_FORMDATA_SPLIT = "-"; var EF_CR_IDENTIFIER = "¶¶"; var EF_CR_HTML_IDENTIFIER = "&para;&para;"; //alert("efform"); var ef_form_init_time = new Date(); var ef_form_validate_message = new Object(); var ef_grids = new Object(); var ef_form_ename = " "; var ef_form_cname = " "; ef = function() { } //切换节点显示方式 ef.toggleDivDisplay = function (divId){ var node = ef.get(divId); if (node.style.display == "none") { node.style.display = "block"; } else { node.style.display = "none"; } } ef.getNodeById = function(node_id) { var node_get = ef.get(node_id); if (!node_get) alert("ID[" + node_id + "] not exist"); return node_get; } ef.get = function(node_id) { var node_get = document.getElementById(node_id); return node_get; } ef.debug = function( msg ) { ef.log( "[DEBUG] - " + msg ); } ef.info = function( msg ) { ef.log( "[INFO] - " + msg ); } ef.log = function(msg) { /* var msgRecNode = ef.get("efFormDetailMsg"); if (msgRecNode!= null) msgRecNode.innerHTML += msg + "<br>"; */ try { if( isAvailable(__DEBUG) ) { __DEBUG_MSG.push(msg); } } catch(ex) { } } efform = function () { //debugger; } efform.onload = function () { var ef_form_load_time = new Date(); ef.debug( "ef(" + ef_version + ") form took: " + (ef_form_load_time.getTime() - ef_form_init_time.getTime()).toString() + "ms to load" ); // window.status = "ef(" + ef_version + ") form took: " + // (ef_form_load_time.getTime() - ef_form_init_time.getTime()).toString() // + "ms to load" ; } efform.repaint = function() { ef_form_init_time = new Date(); for( var key in ef_grids ) { ef_grids[key].paint(); } var ef_form_load_time = new Date(); ef.debug( "ef(" + ef_version + ") form took: " + (ef_form_load_time.getTime() - ef_form_init_time.getTime()).toString() + "ms to load" ); } efform.alertTime = function(msg_info) { var ef_form_load_time = new Date(); ef.debug( msg_info + (ef_form_load_time.getTime() - ef_form_init_time.getTime()).toString() ); } efform.init = function() { window.onload = efform.onload; document.body.onresize = efform.repaint; } efform.addGrid = function( grid ) { ef_grids[grid.gridId] = grid; } efform.getGrid = function( grid_id ) { var grid = ef_grids[grid_id]; if( !grid ) throw new Error( "Grid with id [" + grid_id + "] not exists!" ); return grid; } efform.clearDiv = function( div_id ) { try { var div_node = document.getElementById( div_id ); efform.clearInputField( div_node ); } catch( exception ) { alert( exception.message ); } } efform.clearInputField = function( node ) { if( node.tagName == "INPUT" && node.type!="button" ) { node.value = ""; } else { for( var i=0;i< node.childNodes.length;i++ ) { try { efform.clearInputField( node.childNodes[i] ); } catch( exception ) {} } } } efform.hasErrorMessage = function() { for( var key in ef_form_validate_message ) { return true; } return false; } efform.showErrorMessage = function() { var ei_message = document.getElementById("ei_message"); if( !ei_message ) return; if( efform.hasErrorMessage() ) { var message = []; ei_message.style.color = "red"; for( var key in ef_form_validate_message ) { message.push( "* " + ef_form_validate_message[key] ); } ei_message.innerText = message.join('\t'); } else { ei_message.style.color = ""; ei_message.innerText = ""; } } efform.clearErrorMessage = function() { for( var key in ef_form_validate_message ) { delete ef_form_validate_message[key]; } var ei_message = document.getElementById("ei_message"); if( !ei_message ) return; ei_message.style.color = ""; ei_message.innerText = ""; } efform.windowOnError = function( msg, url, line ) { alert( "出现javascript异常: " + msg); return true; } efform.togglenav = function( nav ) { try { if( isAvailable(__DEBUG) ) { efdebugger.show(); } } catch(ex) { } } efform.formRefresh = function() { } efform.formPrint = function() { } efform.getSubGridDiv = function() { var div_id = "ef_subgrid"; var div_node = document.getElementById( div_id ); if( !div_node ) { div_node = efform.createDivWindow( div_id, "por-Window" ); } return div_node; } efform.createDivWindow = function( div_id, styleClassName ) { var ef_subwindow = document.getElementById( div_id ); if( ef_subwindow ) throw new Error( "Element with id [" + div_id + "] already exists!" ); ef_subwindow = document.createElement("div"); ef_subwindow.id = div_id; ef_subwindow.className = styleClassName; ef_subwindow.style.overflow = "hidden"; ef_subwindow.style.display = "none"; ef_subwindow.innerHTML = "&nbsp;" // document.forms[0].appendChild( ef_subwindow ); document.body.appendChild( ef_subwindow ); return ef_subwindow; } efform.showSubGridWindow = function( component_id, column ) { var component = document.getElementById( component_id ); if( !component ) return; var div_node = efform.getSubGridDiv(); // var column_new = div_node.getAttribute( "efDisplayCol" ); var column_new = column; var column_old = div_node.getAttribute( "efDisplayingCol" ); if( column_old!= null && column_old == column_new ) { PopWindow.show( component, div_node.id ); return; } div_node.setAttribute( "efDisplayingCol", column_new ); var inner_html = ""; var column_length = column_new.columnListLabel.length; inner_html += "<TABLE cellspacing='0' cellpadding='1'>"; inner_html += "<TR class=por-Caption>"; inner_html += "<TD align='left' id='containerOuter'>&nbsp;"+ column_new.getCname() +"&nbsp;</TD>"; inner_html += "<TD align='right' id='containerOuter'><IMG src='" + PorGlobals.IMAGES_PATH + "efcalendar_close.gif' onclick='javascript:PopWindow.hide();'/></TD>"; inner_html += "</TR>"; inner_html += "<TR><TD colspan=2>"; inner_html += "<TABLE class='por-Table' hidefocus cellspacing = 1 cellpadding = 1 onmouseover='javascript:efform.subgrid_onmouseover()' onmouseout='javascript:efform.subgrid_onmouseout()' onclick='javascript:efform.subgrid_onclick()'>"; inner_html += "<TR class=por-TableHeader>"; for( var j=0; j<column_length; j++ ) { inner_html += "<TD>"+ column_new.columnListLabel[j] +"</TD>"; } inner_html +="</TR>"; for( var i=0; i<column_new.columnListValue.length; i++ ) { var row_i = column_new.columnListValue[i]; inner_html += "<TR class='por-TableRow" + (i&1) + "'>"; for( var j=0; j<column_length; j++ ) { inner_html += "<TD>"+ row_i[j] +"</TD>"; } inner_html += "</TR>"; } inner_html += "</TABLE>"; $(div_node).html(inner_html); //div_node.innerHTML = inner_html; PopWindow.show( component, div_node.id ); } efform.showTextAreaWindow = function( component_id ) { var component = document.getElementById( component_id ); if( !component ) return; var div_node = efform.getSubGridDiv(); var show_value = component.value; var index=show_value.indexOf(EF_CR_IDENTIFIER); while(index!=-1) { show_value=show_value.substring(0,index)+"\r\n"+show_value.substring(index+2); index=show_value.indexOf(EF_CR_IDENTIFIER); } var inner_html = ""; inner_html += "<TABLE cellspacing='0' cellpadding='1'>"; inner_html += "<TR class=por-Caption>"; inner_html += "<TD align='left' id='containerOuter'>&nbsp;详情&nbsp;</TD>"; inner_html += "<TD align='right' id='containerOuter'><IMG src='" + PorGlobals.IMAGES_PATH + "efcalendar_close.gif' onclick='javascript:PopWindow.hide();'/></TD>"; inner_html += "</TR>"; inner_html += "<TR onmouseover='javascript:efform.nullfunction();' onclick='javascript:efform.nullfunction();' onmousemove='javascript:efform.nullfunction();' ><TD colspan=2>"; inner_html += "<textarea wrap=\"hard\" rows=10 cols=50 id=\"subwindow_textarea\" class=por-InputField>"+show_value+"</textarea>"; inner_html += "</TD></TR>"; inner_html += "<TR onmouseover='javascript:efform.nullfunction();' onclick='javascript:efform.nullfunction();' onmousemove='javascript:efform.nullfunction();'><TD colspan=2 align=center>"; inner_html += "<input class=por-Button type=\"button\" value=\"保存\" onclick='javascript:efform.subgrid_save_onclick();' />"; inner_html += "</TD></TR>"; inner_html += "</TABLE>"; $(div_node).html(inner_html); //div_node.innerHTML = inner_html; PopWindow.show( component, div_node.id ); } efform.subgrid_onmouseover = function() { if( event.srcElement.tagName != 'TD' ) return; var td_node = event.srcElement; if (td_node && efgrid.getRowIndex(td_node.parentNode) >0 ) { td_node.parentNode.className = 'por-RowHighlight'; } } efform.subgrid_onmouseout = function() { if( event.srcElement.tagName != 'TD' ) return; var td_node = event.srcElement; if (td_node && efgrid.getRowIndex(td_node.parentNode) >0 ) { td_node.parentNode.className = 'por-TableRow' + (efgrid.getRowIndex(td_node.parentNode) & 1); } } efform.subgrid_onclick = function() { if( event.srcElement.tagName != 'TD' ) return; var td_node = event.srcElement; if ( !td_node ) return; var row_index = efgrid.getRowIndex(td_node.parentNode); if( row_index <=0 ) return;
if( column_obj ) { cell_value = column_obj.columnListValue[row_index-1][0]; } PopWindow.setValue( cell_value ); } efform.subgrid_save_onclick = function() { var text_area = document.getElementById("subwindow_textarea"); var cell_value = ""; if( text_area ) { cell_value = text_area.value; } PopWindow.setValue( cell_value ); } efform.nullfunction = function() { } efform.evalFunction = function( functionName ) { if((typeof functionName) == "function") { try { eval( functionName(arguments[1],arguments[2],arguments[3],arguments[4],arguments[5],arguments[6],arguments[7],arguments[8],arguments[9],arguments[10]) ); } catch(ex) {} } } efform.newWindow = function( input_id, div_node, row_index, col_index ) { var component = document.getElementById( input_id ); if( !component ) return; var url = "about:blank"; try { url = efgrid_getNewWindowUrl( div_node, row_index, col_index, component.value ); } catch( exception ) { alert("efgrid_getNewWindowUrl 未定义或参数错误"); return; } var returnValue; if ($.browser.msie){ returnValue = window.showModalDialog( url, "","dialogWidth:400px; dialogHeight:300px;resizable:yes"); if( !returnValue || !returnValue["value"] ) { alert("返回值未定义!"); return; } div_node.firstChild.value = returnValue["value"]; $(div_node.firstChild).change(); // efgrid.setCellValue( input_id,returnValue["value"],returnValue["label"]); } else { win = window.open( url, "","dialogWidth:400px; dialogHeight:300px;resizable:yes"); win.target = div_node; win.onunload = function(){ if (this.returnValue){ this.target.firstChild.value = this.returnValue["value"]; $(this.target.firstChild).change(); } }; } }
var column_obj = efform.getSubGridDiv().getAttribute( "efDisplayingCol" ); var cell_value = "";
random_line_split
ie.d.ts
import * as webdriver from './index'; /** * A WebDriver client for Microsoft's Internet Explorer. */ export class Driver extends webdriver.WebDriver { /** * @param {(capabilities.Capabilities|Options)=} opt_config The configuration * options. * @param {promise.ControlFlow=} opt_flow The control flow to use, * or {@code null} to use the currently active flow. */ constructor(opt_config?: webdriver.Capabilities | Options, opt_flow?: webdriver.promise.ControlFlow); /** * This function is a no-op as file detectors are not supported by this * implementation. * @override */ setFileDetector(): void; } /** * Class for managing IEDriver specific options. */ export class
{ constructor(); /** * Extracts the IEDriver specific options from the given capabilities * object. * @param {!capabilities.Capabilities} caps The capabilities object. * @return {!Options} The IEDriver options. */ static fromCapabilities(caps: webdriver.Capabilities): Options; /** * Whether to disable the protected mode settings check when the session is * created. Disbling this setting may lead to significant instability as the * browser may become unresponsive/hang. Only 'best effort' support is provided * when using this capability. * * For more information, refer to the IEDriver's * [required system configuration](http://goo.gl/eH0Yi3). * * @param {boolean} ignoreSettings Whether to ignore protected mode settings. * @return {!Options} A self reference. */ introduceFlakinessByIgnoringProtectedModeSettings(ignoreSettings: boolean): Options; /** * Indicates whether to skip the check that the browser's zoom level is set to * 100%. * * @param {boolean} ignore Whether to ignore the browser's zoom level settings. * @return {!Options} A self reference. */ ignoreZoomSetting(ignore: boolean): Options; /** * Sets the initial URL loaded when IE starts. This is intended to be used with * {@link #ignoreProtectedModeSettings} to allow the user to initialize IE in * the proper Protected Mode zone. Setting this option may cause browser * instability or flaky and unresponsive code. Only 'best effort' support is * provided when using this option. * * @param {string} url The initial browser URL. * @return {!Options} A self reference. */ initialBrowserUrl(url: string): Options; /** * Configures whether to enable persistent mouse hovering (true by default). * Persistent hovering is achieved by continuously firing mouse over events at * the last location the mouse cursor has been moved to. * * @param {boolean} enable Whether to enable persistent hovering. * @return {!Options} A self reference. */ enablePersistentHover(enable: boolean): Options; /** * Configures whether the driver should attempt to remove obsolete * {@linkplain webdriver.WebElement WebElements} from its internal cache on * page navigation (true by default). Disabling this option will cause the * driver to run with a larger memory footprint. * * @param {boolean} enable Whether to enable element reference cleanup. * @return {!Options} A self reference. */ enableElementCacheCleanup(enable: boolean): Options; /** * Configures whether to require the IE window to have input focus before * performing any user interactions (i.e. mouse or keyboard events). This * option is disabled by default, but delivers much more accurate interaction * events when enabled. * * @param {boolean} require Whether to require window focus. * @return {!Options} A self reference. */ requireWindowFocus(require: boolean): Options; /** * Configures the timeout, in milliseconds, that the driver will attempt to * located and attach to a newly opened instance of Internet Explorer. The * default is zero, which indicates waiting indefinitely. * * @param {number} timeout How long to wait for IE. * @return {!Options} A self reference. */ browserAttachTimeout(timeout: number): Options; /** * Configures whether to launch Internet Explorer using the CreateProcess API. * If this option is not specified, IE is launched using IELaunchURL, if * available. For IE 8 and above, this option requires the TabProcGrowth * registry value to be set to 0. * * @param {boolean} force Whether to use the CreateProcess API. * @return {!Options} A self reference. */ forceCreateProcessApi(force: boolean): Options; /** * Specifies command-line switches to use when launching Internet Explorer. * This is only valid when used with {@link #forceCreateProcessApi}. * * @param {...(string|!Array.<string>)} var_args The arguments to add. * @return {!Options} A self reference. */ addArguments(...var_args: string[]): Options; /** * Configures whether proxies should be configured on a per-process basis. If * not set, setting a {@linkplain #setProxy proxy} will configure the system * proxy. The default behavior is to use the system proxy. * * @param {boolean} enable Whether to enable per-process proxy settings. * @return {!Options} A self reference. */ usePerProcessProxy(enable: boolean): Options; /** * Configures whether to clear the cache, cookies, history, and saved form data * before starting the browser. _Using this capability will clear session data * for all running instances of Internet Explorer, including those started * manually._ * * @param {boolean} cleanSession Whether to clear all session data on startup. * @return {!Options} A self reference. */ ensureCleanSession(cleanSession: boolean): Options; /** * Sets the path to the log file the driver should log to. * @param {string} file The log file path. * @return {!Options} A self reference. */ setLogFile(file: string): Options; /** * Sets the IEDriverServer's logging {@linkplain Level level}. * @param {Level} level The logging level. * @return {!Options} A self reference. */ setLogLevel(level: webdriver.logging.Level): Options; /** * Sets the IP address of the driver's host adapter. * @param {string} host The IP address to use. * @return {!Options} A self reference. */ setHost(host: string): Options; /** * Sets the path of the temporary data directory to use. * @param {string} path The log file path. * @return {!Options} A self reference. */ setExtractPath(path: string): Options; /** * Sets whether the driver should start in silent mode. * @param {boolean} silent Whether to run in silent mode. * @return {!Options} A self reference. */ silent(silent: boolean): Options; /** * Sets the proxy settings for the new session. * @param {capabilities.ProxyConfig} proxy The proxy configuration to use. * @return {!Options} A self reference. */ setProxy(proxy: webdriver.ProxyConfig): Options; /** * Converts this options instance to a {@link capabilities.Capabilities} * object. * @param {capabilities.Capabilities=} opt_capabilities The capabilities to * merge these options into, if any. * @return {!capabilities.Capabilities} The capabilities. */ toCapabilities(opt_capabilities: webdriver.Capabilities): webdriver.Capabilities; }
Options
identifier_name
ie.d.ts
import * as webdriver from './index'; /** * A WebDriver client for Microsoft's Internet Explorer. */ export class Driver extends webdriver.WebDriver { /** * @param {(capabilities.Capabilities|Options)=} opt_config The configuration * options. * @param {promise.ControlFlow=} opt_flow The control flow to use, * or {@code null} to use the currently active flow. */ constructor(opt_config?: webdriver.Capabilities | Options, opt_flow?: webdriver.promise.ControlFlow); /** * This function is a no-op as file detectors are not supported by this * implementation. * @override */ setFileDetector(): void; } /** * Class for managing IEDriver specific options. */ export class Options { constructor(); /** * Extracts the IEDriver specific options from the given capabilities * object. * @param {!capabilities.Capabilities} caps The capabilities object. * @return {!Options} The IEDriver options. */ static fromCapabilities(caps: webdriver.Capabilities): Options; /** * Whether to disable the protected mode settings check when the session is * created. Disbling this setting may lead to significant instability as the * browser may become unresponsive/hang. Only 'best effort' support is provided * when using this capability. * * For more information, refer to the IEDriver's * [required system configuration](http://goo.gl/eH0Yi3). * * @param {boolean} ignoreSettings Whether to ignore protected mode settings. * @return {!Options} A self reference. */ introduceFlakinessByIgnoringProtectedModeSettings(ignoreSettings: boolean): Options; /** * Indicates whether to skip the check that the browser's zoom level is set to * 100%. * * @param {boolean} ignore Whether to ignore the browser's zoom level settings. * @return {!Options} A self reference. */ ignoreZoomSetting(ignore: boolean): Options; /** * Sets the initial URL loaded when IE starts. This is intended to be used with * {@link #ignoreProtectedModeSettings} to allow the user to initialize IE in * the proper Protected Mode zone. Setting this option may cause browser * instability or flaky and unresponsive code. Only 'best effort' support is * provided when using this option. * * @param {string} url The initial browser URL. * @return {!Options} A self reference. */ initialBrowserUrl(url: string): Options; /** * Configures whether to enable persistent mouse hovering (true by default). * Persistent hovering is achieved by continuously firing mouse over events at * the last location the mouse cursor has been moved to. * * @param {boolean} enable Whether to enable persistent hovering. * @return {!Options} A self reference. */ enablePersistentHover(enable: boolean): Options; /** * Configures whether the driver should attempt to remove obsolete * {@linkplain webdriver.WebElement WebElements} from its internal cache on * page navigation (true by default). Disabling this option will cause the * driver to run with a larger memory footprint. * * @param {boolean} enable Whether to enable element reference cleanup. * @return {!Options} A self reference. */ enableElementCacheCleanup(enable: boolean): Options; /** * Configures whether to require the IE window to have input focus before * performing any user interactions (i.e. mouse or keyboard events). This * option is disabled by default, but delivers much more accurate interaction * events when enabled. * * @param {boolean} require Whether to require window focus. * @return {!Options} A self reference. */ requireWindowFocus(require: boolean): Options; /** * Configures the timeout, in milliseconds, that the driver will attempt to * located and attach to a newly opened instance of Internet Explorer. The * default is zero, which indicates waiting indefinitely. * * @param {number} timeout How long to wait for IE. * @return {!Options} A self reference. */ browserAttachTimeout(timeout: number): Options; /** * Configures whether to launch Internet Explorer using the CreateProcess API. * If this option is not specified, IE is launched using IELaunchURL, if * available. For IE 8 and above, this option requires the TabProcGrowth * registry value to be set to 0. * * @param {boolean} force Whether to use the CreateProcess API. * @return {!Options} A self reference. */ forceCreateProcessApi(force: boolean): Options; /** * Specifies command-line switches to use when launching Internet Explorer. * This is only valid when used with {@link #forceCreateProcessApi}. * * @param {...(string|!Array.<string>)} var_args The arguments to add. * @return {!Options} A self reference. */ addArguments(...var_args: string[]): Options; /** * Configures whether proxies should be configured on a per-process basis. If * not set, setting a {@linkplain #setProxy proxy} will configure the system * proxy. The default behavior is to use the system proxy. * * @param {boolean} enable Whether to enable per-process proxy settings. * @return {!Options} A self reference. */ usePerProcessProxy(enable: boolean): Options; /** * Configures whether to clear the cache, cookies, history, and saved form data * before starting the browser. _Using this capability will clear session data * for all running instances of Internet Explorer, including those started * manually._ * * @param {boolean} cleanSession Whether to clear all session data on startup. * @return {!Options} A self reference. */ ensureCleanSession(cleanSession: boolean): Options; /** * Sets the path to the log file the driver should log to. * @param {string} file The log file path. * @return {!Options} A self reference. */ setLogFile(file: string): Options; /** * Sets the IEDriverServer's logging {@linkplain Level level}. * @param {Level} level The logging level. * @return {!Options} A self reference. */ setLogLevel(level: webdriver.logging.Level): Options; /** * Sets the IP address of the driver's host adapter. * @param {string} host The IP address to use. * @return {!Options} A self reference. */ setHost(host: string): Options; /** * Sets the path of the temporary data directory to use. * @param {string} path The log file path.
* @return {!Options} A self reference. */ setExtractPath(path: string): Options; /** * Sets whether the driver should start in silent mode. * @param {boolean} silent Whether to run in silent mode. * @return {!Options} A self reference. */ silent(silent: boolean): Options; /** * Sets the proxy settings for the new session. * @param {capabilities.ProxyConfig} proxy The proxy configuration to use. * @return {!Options} A self reference. */ setProxy(proxy: webdriver.ProxyConfig): Options; /** * Converts this options instance to a {@link capabilities.Capabilities} * object. * @param {capabilities.Capabilities=} opt_capabilities The capabilities to * merge these options into, if any. * @return {!capabilities.Capabilities} The capabilities. */ toCapabilities(opt_capabilities: webdriver.Capabilities): webdriver.Capabilities; }
random_line_split
issue-12127.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(box_syntax, unboxed_closures)] fn to_fn_once<A,F:FnOnce<A>>(f: F) -> F
fn do_it(x: &isize) { } fn main() { let x: Box<_> = box 22; let f = to_fn_once(move|| do_it(&*x)); to_fn_once(move|| { f(); f(); //~^ ERROR: use of moved value: `f` })() }
{ f }
identifier_body
issue-12127.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(box_syntax, unboxed_closures)] fn to_fn_once<A,F:FnOnce<A>>(f: F) -> F { f } fn do_it(x: &isize) { } fn
() { let x: Box<_> = box 22; let f = to_fn_once(move|| do_it(&*x)); to_fn_once(move|| { f(); f(); //~^ ERROR: use of moved value: `f` })() }
main
identifier_name
issue-12127.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(box_syntax, unboxed_closures)] fn to_fn_once<A,F:FnOnce<A>>(f: F) -> F { f } fn do_it(x: &isize) { } fn main() { let x: Box<_> = box 22; let f = to_fn_once(move|| do_it(&*x)); to_fn_once(move|| { f(); f(); //~^ ERROR: use of moved value: `f` })() }
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
random_line_split
AgreeItem.web.js
'use strict'; Object.defineProperty(exports, "__esModule", { value: true }); exports["default"] = undefined; var _defineProperty2 = require('babel-runtime/helpers/defineProperty'); var _defineProperty3 = _interopRequireDefault(_defineProperty2); var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck'); var _classCallCheck3 = _interopRequireDefault(_classCallCheck2); var _possibleConstructorReturn2 = require('babel-runtime/helpers/possibleConstructorReturn'); var _possibleConstructorReturn3 = _interopRequireDefault(_possibleConstructorReturn2); var _inherits2 = require('babel-runtime/helpers/inherits'); var _inherits3 = _interopRequireDefault(_inherits2); var _react = require('react'); var _react2 = _interopRequireDefault(_react); var _classnames = require('classnames'); var _classnames2 = _interopRequireDefault(_classnames); var _Checkbox = require('./Checkbox.web'); var _Checkbox2 = _interopRequireDefault(_Checkbox); var _getDataAttr = require('../_util/getDataAttr'); var _getDataAttr2 = _interopRequireDefault(_getDataAttr); var _omit = require('omit.js'); var _omit2 = _interopRequireDefault(_omit); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } var __assign = undefined && undefined.__assign || Object.assign || function (t) { for (var s, i = 1, n = arguments.length; i < n; i++) { s = arguments[i]; for (var p in s) { if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; } } return t; }; var AgreeItem = function (_React$Component) { (0, _inherits3["default"])(AgreeItem, _React$Component); function AgreeItem()
AgreeItem.prototype.render = function render() { var _classNames; var _props = this.props, prefixCls = _props.prefixCls, style = _props.style, className = _props.className; var wrapCls = (0, _classnames2["default"])((_classNames = {}, (0, _defineProperty3["default"])(_classNames, prefixCls + '-agree', true), (0, _defineProperty3["default"])(_classNames, className, className), _classNames)); return _react2["default"].createElement("div", __assign({}, (0, _getDataAttr2["default"])(this.props), { className: wrapCls, style: style }), _react2["default"].createElement(_Checkbox2["default"], __assign({}, (0, _omit2["default"])(this.props, ['style']), { className: prefixCls + '-agree-label' }))); }; return AgreeItem; }(_react2["default"].Component); exports["default"] = AgreeItem; AgreeItem.defaultProps = { prefixCls: 'am-checkbox' }; module.exports = exports['default'];
{ (0, _classCallCheck3["default"])(this, AgreeItem); return (0, _possibleConstructorReturn3["default"])(this, _React$Component.apply(this, arguments)); }
identifier_body
AgreeItem.web.js
'use strict'; Object.defineProperty(exports, "__esModule", { value: true }); exports["default"] = undefined; var _defineProperty2 = require('babel-runtime/helpers/defineProperty'); var _defineProperty3 = _interopRequireDefault(_defineProperty2); var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck'); var _classCallCheck3 = _interopRequireDefault(_classCallCheck2); var _possibleConstructorReturn2 = require('babel-runtime/helpers/possibleConstructorReturn'); var _possibleConstructorReturn3 = _interopRequireDefault(_possibleConstructorReturn2); var _inherits2 = require('babel-runtime/helpers/inherits'); var _inherits3 = _interopRequireDefault(_inherits2);
var _classnames = require('classnames'); var _classnames2 = _interopRequireDefault(_classnames); var _Checkbox = require('./Checkbox.web'); var _Checkbox2 = _interopRequireDefault(_Checkbox); var _getDataAttr = require('../_util/getDataAttr'); var _getDataAttr2 = _interopRequireDefault(_getDataAttr); var _omit = require('omit.js'); var _omit2 = _interopRequireDefault(_omit); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } var __assign = undefined && undefined.__assign || Object.assign || function (t) { for (var s, i = 1, n = arguments.length; i < n; i++) { s = arguments[i]; for (var p in s) { if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; } } return t; }; var AgreeItem = function (_React$Component) { (0, _inherits3["default"])(AgreeItem, _React$Component); function AgreeItem() { (0, _classCallCheck3["default"])(this, AgreeItem); return (0, _possibleConstructorReturn3["default"])(this, _React$Component.apply(this, arguments)); } AgreeItem.prototype.render = function render() { var _classNames; var _props = this.props, prefixCls = _props.prefixCls, style = _props.style, className = _props.className; var wrapCls = (0, _classnames2["default"])((_classNames = {}, (0, _defineProperty3["default"])(_classNames, prefixCls + '-agree', true), (0, _defineProperty3["default"])(_classNames, className, className), _classNames)); return _react2["default"].createElement("div", __assign({}, (0, _getDataAttr2["default"])(this.props), { className: wrapCls, style: style }), _react2["default"].createElement(_Checkbox2["default"], __assign({}, (0, _omit2["default"])(this.props, ['style']), { className: prefixCls + '-agree-label' }))); }; return AgreeItem; }(_react2["default"].Component); exports["default"] = AgreeItem; AgreeItem.defaultProps = { prefixCls: 'am-checkbox' }; module.exports = exports['default'];
var _react = require('react'); var _react2 = _interopRequireDefault(_react);
random_line_split
AgreeItem.web.js
'use strict'; Object.defineProperty(exports, "__esModule", { value: true }); exports["default"] = undefined; var _defineProperty2 = require('babel-runtime/helpers/defineProperty'); var _defineProperty3 = _interopRequireDefault(_defineProperty2); var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck'); var _classCallCheck3 = _interopRequireDefault(_classCallCheck2); var _possibleConstructorReturn2 = require('babel-runtime/helpers/possibleConstructorReturn'); var _possibleConstructorReturn3 = _interopRequireDefault(_possibleConstructorReturn2); var _inherits2 = require('babel-runtime/helpers/inherits'); var _inherits3 = _interopRequireDefault(_inherits2); var _react = require('react'); var _react2 = _interopRequireDefault(_react); var _classnames = require('classnames'); var _classnames2 = _interopRequireDefault(_classnames); var _Checkbox = require('./Checkbox.web'); var _Checkbox2 = _interopRequireDefault(_Checkbox); var _getDataAttr = require('../_util/getDataAttr'); var _getDataAttr2 = _interopRequireDefault(_getDataAttr); var _omit = require('omit.js'); var _omit2 = _interopRequireDefault(_omit); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } var __assign = undefined && undefined.__assign || Object.assign || function (t) { for (var s, i = 1, n = arguments.length; i < n; i++) { s = arguments[i]; for (var p in s) { if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; } } return t; }; var AgreeItem = function (_React$Component) { (0, _inherits3["default"])(AgreeItem, _React$Component); function
() { (0, _classCallCheck3["default"])(this, AgreeItem); return (0, _possibleConstructorReturn3["default"])(this, _React$Component.apply(this, arguments)); } AgreeItem.prototype.render = function render() { var _classNames; var _props = this.props, prefixCls = _props.prefixCls, style = _props.style, className = _props.className; var wrapCls = (0, _classnames2["default"])((_classNames = {}, (0, _defineProperty3["default"])(_classNames, prefixCls + '-agree', true), (0, _defineProperty3["default"])(_classNames, className, className), _classNames)); return _react2["default"].createElement("div", __assign({}, (0, _getDataAttr2["default"])(this.props), { className: wrapCls, style: style }), _react2["default"].createElement(_Checkbox2["default"], __assign({}, (0, _omit2["default"])(this.props, ['style']), { className: prefixCls + '-agree-label' }))); }; return AgreeItem; }(_react2["default"].Component); exports["default"] = AgreeItem; AgreeItem.defaultProps = { prefixCls: 'am-checkbox' }; module.exports = exports['default'];
AgreeItem
identifier_name
AgreeItem.web.js
'use strict'; Object.defineProperty(exports, "__esModule", { value: true }); exports["default"] = undefined; var _defineProperty2 = require('babel-runtime/helpers/defineProperty'); var _defineProperty3 = _interopRequireDefault(_defineProperty2); var _classCallCheck2 = require('babel-runtime/helpers/classCallCheck'); var _classCallCheck3 = _interopRequireDefault(_classCallCheck2); var _possibleConstructorReturn2 = require('babel-runtime/helpers/possibleConstructorReturn'); var _possibleConstructorReturn3 = _interopRequireDefault(_possibleConstructorReturn2); var _inherits2 = require('babel-runtime/helpers/inherits'); var _inherits3 = _interopRequireDefault(_inherits2); var _react = require('react'); var _react2 = _interopRequireDefault(_react); var _classnames = require('classnames'); var _classnames2 = _interopRequireDefault(_classnames); var _Checkbox = require('./Checkbox.web'); var _Checkbox2 = _interopRequireDefault(_Checkbox); var _getDataAttr = require('../_util/getDataAttr'); var _getDataAttr2 = _interopRequireDefault(_getDataAttr); var _omit = require('omit.js'); var _omit2 = _interopRequireDefault(_omit); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { "default": obj }; } var __assign = undefined && undefined.__assign || Object.assign || function (t) { for (var s, i = 1, n = arguments.length; i < n; i++)
return t; }; var AgreeItem = function (_React$Component) { (0, _inherits3["default"])(AgreeItem, _React$Component); function AgreeItem() { (0, _classCallCheck3["default"])(this, AgreeItem); return (0, _possibleConstructorReturn3["default"])(this, _React$Component.apply(this, arguments)); } AgreeItem.prototype.render = function render() { var _classNames; var _props = this.props, prefixCls = _props.prefixCls, style = _props.style, className = _props.className; var wrapCls = (0, _classnames2["default"])((_classNames = {}, (0, _defineProperty3["default"])(_classNames, prefixCls + '-agree', true), (0, _defineProperty3["default"])(_classNames, className, className), _classNames)); return _react2["default"].createElement("div", __assign({}, (0, _getDataAttr2["default"])(this.props), { className: wrapCls, style: style }), _react2["default"].createElement(_Checkbox2["default"], __assign({}, (0, _omit2["default"])(this.props, ['style']), { className: prefixCls + '-agree-label' }))); }; return AgreeItem; }(_react2["default"].Component); exports["default"] = AgreeItem; AgreeItem.defaultProps = { prefixCls: 'am-checkbox' }; module.exports = exports['default'];
{ s = arguments[i]; for (var p in s) { if (Object.prototype.hasOwnProperty.call(s, p)) t[p] = s[p]; } }
conditional_block
dummy_backend.py
# Copyright 2010-2012 Institut Mines-Telecom # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Created on Nov 10, 2011 @author: Bilel Msekni @contact: bilel.msekni@telecom-sudparis.eu @author: Houssem Medhioub @contact: houssem.medhioub@it-sudparis.eu @organization: Institut Mines-Telecom - Telecom SudParis @license: Apache License, Version 2.0 """ #import pyocni.backend.backend as backend from pyocni.backends.backend import backend_interface import pyocni.pyocni_tools.config as config # getting the Logger logger = config.logger class backend(backend_interface): def create(self, entity): ''' Create an entity (Resource or Link) ''' logger.debug('The create operation of the dummy_backend') def
(self, entity): ''' Get the Entity's information ''' logger.debug('The read operation of the dummy_backend') def update(self, old_entity, new_entity): ''' Update an Entity's information ''' logger.debug('The update operation of the dummy_backend') def delete(self, entity): ''' Delete an Entity ''' logger.debug('The delete operation of the dummy_backend') def action(self, entity, action, attributes): ''' Perform an action on an Entity ''' print "i got entity = " + str(entity) print "i got to do action = " + action print " my attributes are = " print attributes logger.debug('The Entity\'s action operation of the dummy_backend')
read
identifier_name
dummy_backend.py
# Copyright 2010-2012 Institut Mines-Telecom # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Created on Nov 10, 2011 @author: Bilel Msekni @contact: bilel.msekni@telecom-sudparis.eu @author: Houssem Medhioub @contact: houssem.medhioub@it-sudparis.eu @organization: Institut Mines-Telecom - Telecom SudParis @license: Apache License, Version 2.0 """ #import pyocni.backend.backend as backend from pyocni.backends.backend import backend_interface import pyocni.pyocni_tools.config as config # getting the Logger logger = config.logger class backend(backend_interface): def create(self, entity): ''' Create an entity (Resource or Link) ''' logger.debug('The create operation of the dummy_backend') def read(self, entity): ''' Get the Entity's information ''' logger.debug('The read operation of the dummy_backend') def update(self, old_entity, new_entity): ''' Update an Entity's information ''' logger.debug('The update operation of the dummy_backend') def delete(self, entity): ''' Delete an Entity ''' logger.debug('The delete operation of the dummy_backend') def action(self, entity, action, attributes): ''' Perform an action on an Entity '''
print "i got to do action = " + action print " my attributes are = " print attributes logger.debug('The Entity\'s action operation of the dummy_backend')
print "i got entity = " + str(entity)
random_line_split
dummy_backend.py
# Copyright 2010-2012 Institut Mines-Telecom # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Created on Nov 10, 2011 @author: Bilel Msekni @contact: bilel.msekni@telecom-sudparis.eu @author: Houssem Medhioub @contact: houssem.medhioub@it-sudparis.eu @organization: Institut Mines-Telecom - Telecom SudParis @license: Apache License, Version 2.0 """ #import pyocni.backend.backend as backend from pyocni.backends.backend import backend_interface import pyocni.pyocni_tools.config as config # getting the Logger logger = config.logger class backend(backend_interface): def create(self, entity):
def read(self, entity): ''' Get the Entity's information ''' logger.debug('The read operation of the dummy_backend') def update(self, old_entity, new_entity): ''' Update an Entity's information ''' logger.debug('The update operation of the dummy_backend') def delete(self, entity): ''' Delete an Entity ''' logger.debug('The delete operation of the dummy_backend') def action(self, entity, action, attributes): ''' Perform an action on an Entity ''' print "i got entity = " + str(entity) print "i got to do action = " + action print " my attributes are = " print attributes logger.debug('The Entity\'s action operation of the dummy_backend')
''' Create an entity (Resource or Link) ''' logger.debug('The create operation of the dummy_backend')
identifier_body
test_xlog_cleanup.py
#!/usr/bin/env python """ Copyright (c) 2004-Present Pivotal Software, Inc. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from gppylib.commands.base import Command from gppylib.db import dbconn from tinctest import logger from mpp.lib.PSQL import PSQL from mpp.models import MPPTestCase from mpp.gpdb.tests.storage.walrepl import lib as walrepl import mpp.gpdb.tests.storage.walrepl.run import os import shutil import subprocess class basebackup_cases(mpp.gpdb.tests.storage.walrepl.run.StandbyRunMixin, MPPTestCase): def tearDown(self): super(basebackup_cases, self).tearDown() self.reset_fault('base_backup_post_create_checkpoint') def run_gpfaultinjector(self, fault_type, fault_name): cmd_str = 'gpfaultinjector -s 1 -y {0} -f {1}'.format( fault_type, fault_name) cmd = Command(cmd_str, cmd_str) cmd.run() return cmd.get_results() def resume(self, fault_name): return self.run_gpfaultinjector('resume', fault_name) def suspend_at(self, fault_name): return self.run_gpfaultinjector('suspend', fault_name) def reset_fault(self, fault_name): return self.run_gpfaultinjector('reset', fault_name) def fault_status(self, fault_name): return self.run_gpfaultinjector('status', fault_name) def wait_triggered(self, fault_name): search = "fault injection state:'triggered'" for i in walrepl.polling(10, 3): result = self.fault_status(fault_name) stdout = result.stdout if stdout.find(search) > 0: return True return False def test_xlogcleanup(self): """ Test for verifying if xlog seg created while basebackup dumps out data does not get cleaned """ shutil.rmtree('base', True) PSQL.run_sql_command('DROP table if exists foo') # Inject fault at post checkpoint create (basebackup) logger.info ('Injecting base_backup_post_create_checkpoint fault ...') result = self.suspend_at( 'base_backup_post_create_checkpoint') logger.info(result.stdout) self.assertEqual(result.rc, 0, result.stdout) # Now execute basebackup. It will be blocked due to the # injected fault. logger.info ('Perform basebackup with xlog & recovery.conf...') pg_basebackup = subprocess.Popen(['pg_basebackup', '-x', '-R', '-D', 'base'] , stdout = subprocess.PIPE , stderr = subprocess.PIPE) # Give basebackup a moment to reach the fault & # trigger it logger.info('Check if suspend fault is hit ...') triggered = self.wait_triggered( 'base_backup_post_create_checkpoint') self.assertTrue(triggered, 'Fault was not triggered') # Perform operations that causes xlog seg generation logger.info ('Performing xlog seg generation ...') count = 0 while (count < 10):
# Resume basebackup result = self.resume('base_backup_post_create_checkpoint') logger.info(result.stdout) self.assertEqual(result.rc, 0, result.stdout) # Wait until basebackup end logger.info('Waiting for basebackup to end ...') sql = "SELECT count(*) FROM pg_stat_replication" with dbconn.connect(dbconn.DbURL(), utility=True) as conn: while (True): curs = dbconn.execSQL(conn, sql) results = curs.fetchall() if (int(results[0][0]) == 0): break; # Verify if basebackup completed successfully # See if recovery.conf exists (Yes - Pass) self.assertTrue(os.path.exists(os.path.join('base','recovery.conf'))) logger.info ('Found recovery.conf in the backup directory.') logger.info ('Pass')
PSQL.run_sql_command('select pg_switch_xlog(); select pg_switch_xlog(); checkpoint;') count = count + 1
conditional_block
test_xlog_cleanup.py
#!/usr/bin/env python """ Copyright (c) 2004-Present Pivotal Software, Inc. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from gppylib.commands.base import Command from gppylib.db import dbconn from tinctest import logger from mpp.lib.PSQL import PSQL from mpp.models import MPPTestCase from mpp.gpdb.tests.storage.walrepl import lib as walrepl import mpp.gpdb.tests.storage.walrepl.run import os import shutil import subprocess class basebackup_cases(mpp.gpdb.tests.storage.walrepl.run.StandbyRunMixin, MPPTestCase): def tearDown(self): super(basebackup_cases, self).tearDown() self.reset_fault('base_backup_post_create_checkpoint') def run_gpfaultinjector(self, fault_type, fault_name): cmd_str = 'gpfaultinjector -s 1 -y {0} -f {1}'.format( fault_type, fault_name) cmd = Command(cmd_str, cmd_str) cmd.run() return cmd.get_results() def resume(self, fault_name): return self.run_gpfaultinjector('resume', fault_name) def suspend_at(self, fault_name): return self.run_gpfaultinjector('suspend', fault_name) def
(self, fault_name): return self.run_gpfaultinjector('reset', fault_name) def fault_status(self, fault_name): return self.run_gpfaultinjector('status', fault_name) def wait_triggered(self, fault_name): search = "fault injection state:'triggered'" for i in walrepl.polling(10, 3): result = self.fault_status(fault_name) stdout = result.stdout if stdout.find(search) > 0: return True return False def test_xlogcleanup(self): """ Test for verifying if xlog seg created while basebackup dumps out data does not get cleaned """ shutil.rmtree('base', True) PSQL.run_sql_command('DROP table if exists foo') # Inject fault at post checkpoint create (basebackup) logger.info ('Injecting base_backup_post_create_checkpoint fault ...') result = self.suspend_at( 'base_backup_post_create_checkpoint') logger.info(result.stdout) self.assertEqual(result.rc, 0, result.stdout) # Now execute basebackup. It will be blocked due to the # injected fault. logger.info ('Perform basebackup with xlog & recovery.conf...') pg_basebackup = subprocess.Popen(['pg_basebackup', '-x', '-R', '-D', 'base'] , stdout = subprocess.PIPE , stderr = subprocess.PIPE) # Give basebackup a moment to reach the fault & # trigger it logger.info('Check if suspend fault is hit ...') triggered = self.wait_triggered( 'base_backup_post_create_checkpoint') self.assertTrue(triggered, 'Fault was not triggered') # Perform operations that causes xlog seg generation logger.info ('Performing xlog seg generation ...') count = 0 while (count < 10): PSQL.run_sql_command('select pg_switch_xlog(); select pg_switch_xlog(); checkpoint;') count = count + 1 # Resume basebackup result = self.resume('base_backup_post_create_checkpoint') logger.info(result.stdout) self.assertEqual(result.rc, 0, result.stdout) # Wait until basebackup end logger.info('Waiting for basebackup to end ...') sql = "SELECT count(*) FROM pg_stat_replication" with dbconn.connect(dbconn.DbURL(), utility=True) as conn: while (True): curs = dbconn.execSQL(conn, sql) results = curs.fetchall() if (int(results[0][0]) == 0): break; # Verify if basebackup completed successfully # See if recovery.conf exists (Yes - Pass) self.assertTrue(os.path.exists(os.path.join('base','recovery.conf'))) logger.info ('Found recovery.conf in the backup directory.') logger.info ('Pass')
reset_fault
identifier_name
test_xlog_cleanup.py
#!/usr/bin/env python """ Copyright (c) 2004-Present Pivotal Software, Inc. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from gppylib.commands.base import Command from gppylib.db import dbconn from tinctest import logger from mpp.lib.PSQL import PSQL from mpp.models import MPPTestCase from mpp.gpdb.tests.storage.walrepl import lib as walrepl import mpp.gpdb.tests.storage.walrepl.run import os import shutil import subprocess class basebackup_cases(mpp.gpdb.tests.storage.walrepl.run.StandbyRunMixin, MPPTestCase): def tearDown(self): super(basebackup_cases, self).tearDown() self.reset_fault('base_backup_post_create_checkpoint') def run_gpfaultinjector(self, fault_type, fault_name): cmd_str = 'gpfaultinjector -s 1 -y {0} -f {1}'.format( fault_type, fault_name) cmd = Command(cmd_str, cmd_str) cmd.run() return cmd.get_results() def resume(self, fault_name): return self.run_gpfaultinjector('resume', fault_name) def suspend_at(self, fault_name): return self.run_gpfaultinjector('suspend', fault_name) def reset_fault(self, fault_name): return self.run_gpfaultinjector('reset', fault_name) def fault_status(self, fault_name): return self.run_gpfaultinjector('status', fault_name) def wait_triggered(self, fault_name): search = "fault injection state:'triggered'" for i in walrepl.polling(10, 3): result = self.fault_status(fault_name) stdout = result.stdout if stdout.find(search) > 0: return True return False def test_xlogcleanup(self):
""" Test for verifying if xlog seg created while basebackup dumps out data does not get cleaned """ shutil.rmtree('base', True) PSQL.run_sql_command('DROP table if exists foo') # Inject fault at post checkpoint create (basebackup) logger.info ('Injecting base_backup_post_create_checkpoint fault ...') result = self.suspend_at( 'base_backup_post_create_checkpoint') logger.info(result.stdout) self.assertEqual(result.rc, 0, result.stdout) # Now execute basebackup. It will be blocked due to the # injected fault. logger.info ('Perform basebackup with xlog & recovery.conf...') pg_basebackup = subprocess.Popen(['pg_basebackup', '-x', '-R', '-D', 'base'] , stdout = subprocess.PIPE , stderr = subprocess.PIPE) # Give basebackup a moment to reach the fault & # trigger it logger.info('Check if suspend fault is hit ...') triggered = self.wait_triggered( 'base_backup_post_create_checkpoint') self.assertTrue(triggered, 'Fault was not triggered') # Perform operations that causes xlog seg generation logger.info ('Performing xlog seg generation ...') count = 0 while (count < 10): PSQL.run_sql_command('select pg_switch_xlog(); select pg_switch_xlog(); checkpoint;') count = count + 1 # Resume basebackup result = self.resume('base_backup_post_create_checkpoint') logger.info(result.stdout) self.assertEqual(result.rc, 0, result.stdout) # Wait until basebackup end logger.info('Waiting for basebackup to end ...') sql = "SELECT count(*) FROM pg_stat_replication" with dbconn.connect(dbconn.DbURL(), utility=True) as conn: while (True): curs = dbconn.execSQL(conn, sql) results = curs.fetchall() if (int(results[0][0]) == 0): break; # Verify if basebackup completed successfully # See if recovery.conf exists (Yes - Pass) self.assertTrue(os.path.exists(os.path.join('base','recovery.conf'))) logger.info ('Found recovery.conf in the backup directory.') logger.info ('Pass')
identifier_body
test_xlog_cleanup.py
#!/usr/bin/env python """ Copyright (c) 2004-Present Pivotal Software, Inc. This program and the accompanying materials are made available under the terms of the under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ from gppylib.commands.base import Command from gppylib.db import dbconn from tinctest import logger from mpp.lib.PSQL import PSQL from mpp.models import MPPTestCase from mpp.gpdb.tests.storage.walrepl import lib as walrepl import mpp.gpdb.tests.storage.walrepl.run import os import shutil import subprocess class basebackup_cases(mpp.gpdb.tests.storage.walrepl.run.StandbyRunMixin, MPPTestCase): def tearDown(self): super(basebackup_cases, self).tearDown() self.reset_fault('base_backup_post_create_checkpoint') def run_gpfaultinjector(self, fault_type, fault_name): cmd_str = 'gpfaultinjector -s 1 -y {0} -f {1}'.format( fault_type, fault_name) cmd = Command(cmd_str, cmd_str) cmd.run() return cmd.get_results() def resume(self, fault_name): return self.run_gpfaultinjector('resume', fault_name) def suspend_at(self, fault_name): return self.run_gpfaultinjector('suspend', fault_name) def reset_fault(self, fault_name): return self.run_gpfaultinjector('reset', fault_name) def fault_status(self, fault_name): return self.run_gpfaultinjector('status', fault_name) def wait_triggered(self, fault_name): search = "fault injection state:'triggered'" for i in walrepl.polling(10, 3): result = self.fault_status(fault_name) stdout = result.stdout if stdout.find(search) > 0: return True return False def test_xlogcleanup(self): """ Test for verifying if xlog seg created while basebackup dumps out data does not get cleaned """ shutil.rmtree('base', True) PSQL.run_sql_command('DROP table if exists foo') # Inject fault at post checkpoint create (basebackup) logger.info ('Injecting base_backup_post_create_checkpoint fault ...') result = self.suspend_at( 'base_backup_post_create_checkpoint') logger.info(result.stdout) self.assertEqual(result.rc, 0, result.stdout) # Now execute basebackup. It will be blocked due to the # injected fault. logger.info ('Perform basebackup with xlog & recovery.conf...') pg_basebackup = subprocess.Popen(['pg_basebackup', '-x', '-R', '-D', 'base'] , stdout = subprocess.PIPE , stderr = subprocess.PIPE) # Give basebackup a moment to reach the fault & # trigger it logger.info('Check if suspend fault is hit ...') triggered = self.wait_triggered( 'base_backup_post_create_checkpoint') self.assertTrue(triggered, 'Fault was not triggered') # Perform operations that causes xlog seg generation logger.info ('Performing xlog seg generation ...') count = 0 while (count < 10): PSQL.run_sql_command('select pg_switch_xlog(); select pg_switch_xlog(); checkpoint;') count = count + 1 # Resume basebackup
# Wait until basebackup end logger.info('Waiting for basebackup to end ...') sql = "SELECT count(*) FROM pg_stat_replication" with dbconn.connect(dbconn.DbURL(), utility=True) as conn: while (True): curs = dbconn.execSQL(conn, sql) results = curs.fetchall() if (int(results[0][0]) == 0): break; # Verify if basebackup completed successfully # See if recovery.conf exists (Yes - Pass) self.assertTrue(os.path.exists(os.path.join('base','recovery.conf'))) logger.info ('Found recovery.conf in the backup directory.') logger.info ('Pass')
result = self.resume('base_backup_post_create_checkpoint') logger.info(result.stdout) self.assertEqual(result.rc, 0, result.stdout)
random_line_split
models.py
from sqlalchemy import ( Column, Index, Integer, Text, Table, ForeignKey, String, Boolean, DateTime, ) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import ( scoped_session, sessionmaker, relationship, ) from zope.sqlalchemy import ZopeTransactionExtension DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension())) Base = declarative_base() #class MyModel(Base): # __tablename__ = 'models' # id = Column(Integer, primary_key=True) # name = Column(Text) # value = Column(Integer) gsClassStudent = Table('gsClassStudent', Base.metadata, Column('id', Integer, primary_key=True), Column('classid', Integer, ForeignKey('gsClass.id')), Column('studentid', Integer, ForeignKey('gsStudent.id')) ) class gsUser(Base): __tablename__ = 'gsUser' id = Column(Integer, primary_key=True) username = Column(String(120), index=True, unique=True) FirstName = Column(String(120), index=True, unique=False) LastName = Column(String(120), index=True, unique=False) myClasses = relationship('gsClass', back_populates="teacher") def __repr__(self): return '<User %r>' % (self.username) class gsStudent(Base): __tablename__ = 'gsStudent' id = Column(Integer, primary_key=True, autoincrement=False) username = Column(String(64), index=True, unique=True) FirstName = Column(String(120), index=True, unique=False) LastName = Column(String(120), index=True, unique=False) cohort = Column(Integer, index=True, unique=False) current = Column(Boolean, default=False) class gsClass(Base): __tablename__ = 'gsClass' id = Column(Integer, primary_key=True) classCode = Column(String(64), index=True, unique=False) cohort = Column(Integer, index=True, unique=False) #teacher = Column(String(120), index=True, unique=False) teacherid = Column(Integer, ForeignKey('gsUser.id')) teacher = relationship('gsUser', back_populates="myClasses") calendarYear = Column(Integer, index=True, unique=False) students = relationship('gsStudent', secondary=gsClassStudent, primaryjoin=(gsClassStudent.c.classid == id), secondaryjoin=(gsClassStudent.c.studentid == gsStudent.id), lazy='dynamic') class gsClassNote(Base):
#Index('my_index', MyModel.name, unique=True, mysql_length=255)
__tablename__ = 'gsClassNote' Noteid = Column(Integer, primary_key=True) classStudentid = Column(Integer, ForeignKey('gsClassStudent.id')) note = Column(Text, index=False, unique=False) value = Column(Integer, index=False) date = Column(DateTime, index=True)
identifier_body
models.py
from sqlalchemy import ( Column, Index, Integer, Text, Table, ForeignKey, String, Boolean, DateTime, ) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import ( scoped_session, sessionmaker, relationship, ) from zope.sqlalchemy import ZopeTransactionExtension DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension())) Base = declarative_base() #class MyModel(Base): # __tablename__ = 'models' # id = Column(Integer, primary_key=True) # name = Column(Text) # value = Column(Integer) gsClassStudent = Table('gsClassStudent', Base.metadata, Column('id', Integer, primary_key=True), Column('classid', Integer, ForeignKey('gsClass.id')), Column('studentid', Integer, ForeignKey('gsStudent.id')) ) class gsUser(Base): __tablename__ = 'gsUser' id = Column(Integer, primary_key=True) username = Column(String(120), index=True, unique=True) FirstName = Column(String(120), index=True, unique=False) LastName = Column(String(120), index=True, unique=False) myClasses = relationship('gsClass', back_populates="teacher") def __repr__(self): return '<User %r>' % (self.username) class gsStudent(Base): __tablename__ = 'gsStudent' id = Column(Integer, primary_key=True, autoincrement=False) username = Column(String(64), index=True, unique=True) FirstName = Column(String(120), index=True, unique=False) LastName = Column(String(120), index=True, unique=False) cohort = Column(Integer, index=True, unique=False) current = Column(Boolean, default=False) class gsClass(Base): __tablename__ = 'gsClass' id = Column(Integer, primary_key=True) classCode = Column(String(64), index=True, unique=False) cohort = Column(Integer, index=True, unique=False) #teacher = Column(String(120), index=True, unique=False) teacherid = Column(Integer, ForeignKey('gsUser.id')) teacher = relationship('gsUser', back_populates="myClasses") calendarYear = Column(Integer, index=True, unique=False) students = relationship('gsStudent', secondary=gsClassStudent, primaryjoin=(gsClassStudent.c.classid == id), secondaryjoin=(gsClassStudent.c.studentid == gsStudent.id), lazy='dynamic') class gsClassNote(Base): __tablename__ = 'gsClassNote' Noteid = Column(Integer, primary_key=True) classStudentid = Column(Integer, ForeignKey('gsClassStudent.id')) note = Column(Text, index=False, unique=False) value = Column(Integer, index=False) date = Column(DateTime, index=True)
#Index('my_index', MyModel.name, unique=True, mysql_length=255)
random_line_split
models.py
from sqlalchemy import ( Column, Index, Integer, Text, Table, ForeignKey, String, Boolean, DateTime, ) from sqlalchemy.ext.declarative import declarative_base from sqlalchemy.orm import ( scoped_session, sessionmaker, relationship, ) from zope.sqlalchemy import ZopeTransactionExtension DBSession = scoped_session(sessionmaker(extension=ZopeTransactionExtension())) Base = declarative_base() #class MyModel(Base): # __tablename__ = 'models' # id = Column(Integer, primary_key=True) # name = Column(Text) # value = Column(Integer) gsClassStudent = Table('gsClassStudent', Base.metadata, Column('id', Integer, primary_key=True), Column('classid', Integer, ForeignKey('gsClass.id')), Column('studentid', Integer, ForeignKey('gsStudent.id')) ) class gsUser(Base): __tablename__ = 'gsUser' id = Column(Integer, primary_key=True) username = Column(String(120), index=True, unique=True) FirstName = Column(String(120), index=True, unique=False) LastName = Column(String(120), index=True, unique=False) myClasses = relationship('gsClass', back_populates="teacher") def
(self): return '<User %r>' % (self.username) class gsStudent(Base): __tablename__ = 'gsStudent' id = Column(Integer, primary_key=True, autoincrement=False) username = Column(String(64), index=True, unique=True) FirstName = Column(String(120), index=True, unique=False) LastName = Column(String(120), index=True, unique=False) cohort = Column(Integer, index=True, unique=False) current = Column(Boolean, default=False) class gsClass(Base): __tablename__ = 'gsClass' id = Column(Integer, primary_key=True) classCode = Column(String(64), index=True, unique=False) cohort = Column(Integer, index=True, unique=False) #teacher = Column(String(120), index=True, unique=False) teacherid = Column(Integer, ForeignKey('gsUser.id')) teacher = relationship('gsUser', back_populates="myClasses") calendarYear = Column(Integer, index=True, unique=False) students = relationship('gsStudent', secondary=gsClassStudent, primaryjoin=(gsClassStudent.c.classid == id), secondaryjoin=(gsClassStudent.c.studentid == gsStudent.id), lazy='dynamic') class gsClassNote(Base): __tablename__ = 'gsClassNote' Noteid = Column(Integer, primary_key=True) classStudentid = Column(Integer, ForeignKey('gsClassStudent.id')) note = Column(Text, index=False, unique=False) value = Column(Integer, index=False) date = Column(DateTime, index=True) #Index('my_index', MyModel.name, unique=True, mysql_length=255)
__repr__
identifier_name
ContainerTable.react.js
// libs import { Component } from 'react'; import 'antd/dist/antd.css'; import { connect } from 'react-redux'; import Immutable from 'seamless-immutable'; import { Table, Card, Icon, Tag, Button } from 'antd'; import ReactJson from 'react-json-view'; import { openModal } from '../actions/modal.action'; import { createSelector } from 'reselect'; import { Row, Col } from 'antd'; const ContainerTable = ({ dataSource, openModal }) => { const columns = [ { title: 'PodName', dataIndex: 'podName', key: 'podName', onFilter: (value, record) => record.podName.includes(value), sorter: (a, b) => a.podName.length - b.podName.length }, { title: 'Age', dataIndex: 'age', key: 'age' }, { title: 'Address', dataIndex: 'address', key: 'address' }, { title: 'Status', dataIndex: '', key: 'x', render: (text, record) => ( <span> <Tag color="green">{record.podName}</Tag> </span> ) }, { title: 'Terminal', dataIndex: '', rowSpan: 2, key: 'y', render: (text, record) => ( <Row type="flex" justify="left" align="middle"> <Col span={3}> <Button icon="desktop" onClick={() => openModal(record, 'docker')}> Host </Button> </Col> <Col span={1}> <span className="ant-divider"/> </Col> <Col span={3}> <Button icon="laptop" onClick={() => openModal(record)}> Pod </Button> </Col> <Col span={1}> <span className="ant-divider"/> </Col> <Col span={3}> <Button icon="code-o" onClick={() => openModal(record)}> Log </Button> </Col> <Col span={1}> <span className="ant-divider"/> </Col> <Col span={3}> <Button icon="edit" onClick={() => openModal(record)}> Describe </Button> </Col> <Col span={5}/>
</Row> ) } ]; return ( <div> <Table columns={columns} dataSource={dataSource.asMutable()} expandedRowRender={(record) => ( <Card title="Card title"> <ReactJson src={record}/> </Card> )}/> </div> ); }; const containerTable = (state) => state.containerTable.dataSource; const autoCompleteFilter = (state) => state.autoCompleteFilter.filter; const tableDataSelector = createSelector( containerTable, autoCompleteFilter, (containerTable, autoCompleteFilter) => { let returnData = containerTable; if (autoCompleteFilter != '') { returnData = containerTable.filter((row) => Object.values(row).find((f) => f.toString().includes(autoCompleteFilter)) ); } return returnData; } ); ContainerTable.propTypes = { // columns: React.PropTypes.array.isRequired, dataSource: React.PropTypes.array.isRequired }; const mapStateToProps = (state) => ({ // columns: state.containerTable.columns, dataSource: tableDataSelector(state) }); export default connect(mapStateToProps, { openModal })(ContainerTable);
{/* <Tag color="green">{record.podName}</Tag>*/}
random_line_split
ContainerTable.react.js
// libs import { Component } from 'react'; import 'antd/dist/antd.css'; import { connect } from 'react-redux'; import Immutable from 'seamless-immutable'; import { Table, Card, Icon, Tag, Button } from 'antd'; import ReactJson from 'react-json-view'; import { openModal } from '../actions/modal.action'; import { createSelector } from 'reselect'; import { Row, Col } from 'antd'; const ContainerTable = ({ dataSource, openModal }) => { const columns = [ { title: 'PodName', dataIndex: 'podName', key: 'podName', onFilter: (value, record) => record.podName.includes(value), sorter: (a, b) => a.podName.length - b.podName.length }, { title: 'Age', dataIndex: 'age', key: 'age' }, { title: 'Address', dataIndex: 'address', key: 'address' }, { title: 'Status', dataIndex: '', key: 'x', render: (text, record) => ( <span> <Tag color="green">{record.podName}</Tag> </span> ) }, { title: 'Terminal', dataIndex: '', rowSpan: 2, key: 'y', render: (text, record) => ( <Row type="flex" justify="left" align="middle"> <Col span={3}> <Button icon="desktop" onClick={() => openModal(record, 'docker')}> Host </Button> </Col> <Col span={1}> <span className="ant-divider"/> </Col> <Col span={3}> <Button icon="laptop" onClick={() => openModal(record)}> Pod </Button> </Col> <Col span={1}> <span className="ant-divider"/> </Col> <Col span={3}> <Button icon="code-o" onClick={() => openModal(record)}> Log </Button> </Col> <Col span={1}> <span className="ant-divider"/> </Col> <Col span={3}> <Button icon="edit" onClick={() => openModal(record)}> Describe </Button> </Col> <Col span={5}/> {/* <Tag color="green">{record.podName}</Tag>*/} </Row> ) } ]; return ( <div> <Table columns={columns} dataSource={dataSource.asMutable()} expandedRowRender={(record) => ( <Card title="Card title"> <ReactJson src={record}/> </Card> )}/> </div> ); }; const containerTable = (state) => state.containerTable.dataSource; const autoCompleteFilter = (state) => state.autoCompleteFilter.filter; const tableDataSelector = createSelector( containerTable, autoCompleteFilter, (containerTable, autoCompleteFilter) => { let returnData = containerTable; if (autoCompleteFilter != '')
return returnData; } ); ContainerTable.propTypes = { // columns: React.PropTypes.array.isRequired, dataSource: React.PropTypes.array.isRequired }; const mapStateToProps = (state) => ({ // columns: state.containerTable.columns, dataSource: tableDataSelector(state) }); export default connect(mapStateToProps, { openModal })(ContainerTable);
{ returnData = containerTable.filter((row) => Object.values(row).find((f) => f.toString().includes(autoCompleteFilter)) ); }
conditional_block
settings.py
""" Django settings for lostanimals project. For more information on this file, see https://docs.djangoproject.com/en/1.7/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.7/ref/settings/ """ # Build paths inside the project like this: os.path.join(BASE_DIR, ...) import os BASE_DIR = os.path.dirname(os.path.dirname(__file__)) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
SECRET_KEY = 'ul!uc(_bz_fe=u2$k1^di#*dr3+-&gxwagi%-_@i2d&a09eo#d' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True # Templetes path debug TEMPLATE_DEBUG = True TEMPLATE_DIRS = [os.path.join(BASE_DIR, 'templates')] ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = ( 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', 'django_google_maps', 'lostpet', ) MIDDLEWARE_CLASSES = ( 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.auth.middleware.SessionAuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ) ROOT_URLCONF = 'lostanimals.urls' WSGI_APPLICATION = 'lostanimals.wsgi.application' # Database # https://docs.djangoproject.com/en/1.7/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Internationalization # https://docs.djangoproject.com/en/1.7/topics/i18n/ LANGUAGE_CODE = 'pt-br' TIME_ZONE = 'UTC' USE_I18N = False USE_L10N = False USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.7/howto/static-files/ STATIC_URL = '/static/' MEDIA_ROOT = os.path.join(BASE_DIR, 'media') STATIC_ROOT = os.path.join(BASE_DIR, 'static')
# SECURITY WARNING: keep the secret key used in production secret!
random_line_split
hangman.py
# This is a hangman game. # Your game must do the following things. # Everytime a user guesses a character, it should tell them if their character # is in the secret word or not. # # Also, it should print the guessed character in the following format # if the secret word is unicorn and the user guessed the letter 'n' # you program should print _n____n # # It should also print a picture of the current state of the hanged man. # # If the user guesses a letter he already guessed, give them a warning. # # The user can make at most 6 mistakes. import random # don't worry about these lines. from hangman_pics import HANGMANPICS LIST_OF_WORDS = ['hangman', 'chairs', 'backpack', 'bodywash', 'clothing', 'computer', 'python', 'program', 'glasses', 'sweatshirt', 'sweatpants', 'mattress', 'friends', 'clocks', 'biology', 'algebra', 'suitcase', 'knives', 'ninjas', 'shampoo'] # First let's write a function to select a random word from the list of words. def getSecretWord(): # this line generates a random number use it to index into the list of words # and return a secret word. rnd = random.randint(0, len(LIST_OF_WORDS) - 1) return ... secret_word = getSecretWord() # functions help us organize our code! mistakes = 0 # Now create an empty list to keep track of the letters the user guessed def string_contains_character(c, word):
# copy your function from lesson6 here. def hide_word(guesses, secret_word): hidden_word = "" for letter in secret_word: if letter in guesses: ... # This is the while(True): guess = raw_input() # Check if the guess was a letter that the user already guessed. If so, # give them a warning and go back to the beginning of the loop. # If this is a new guess, add it to the list of letters the user guessed. # Maybe you could use one of the list methods... # Check if the new guess is in the secret word, using the function # string_contains_character you wrote on lesson6. # If the user made a mistake, increase their number of mistakes and let them # know! # Now, complete the function hide_word, which takes in the guesses made and # the secret_word and returns a the word in a hidden format. Remember, if the # letter was in the guesses, it should appear in the word, if it's not it # should appear as an underscore "_" # # If your hidden word has no underscores, the user won! Let them know about # that and break out of the loop print(HANGMANPICS[mistakes]) # this line will print a picture of the hanged man # If the user made 6 mistakes, tell them the game is over and break out of the # loop.
identifier_body
hangman.py
# This is a hangman game. # Your game must do the following things. # Everytime a user guesses a character, it should tell them if their character # is in the secret word or not. # # Also, it should print the guessed character in the following format # if the secret word is unicorn and the user guessed the letter 'n' # you program should print _n____n # # It should also print a picture of the current state of the hanged man. # # If the user guesses a letter he already guessed, give them a warning. # # The user can make at most 6 mistakes. import random # don't worry about these lines. from hangman_pics import HANGMANPICS LIST_OF_WORDS = ['hangman', 'chairs', 'backpack', 'bodywash', 'clothing', 'computer', 'python', 'program', 'glasses', 'sweatshirt', 'sweatpants', 'mattress', 'friends', 'clocks', 'biology', 'algebra', 'suitcase', 'knives', 'ninjas', 'shampoo'] # First let's write a function to select a random word from the list of words. def getSecretWord(): # this line generates a random number use it to index into the list of words # and return a secret word. rnd = random.randint(0, len(LIST_OF_WORDS) - 1) return ... secret_word = getSecretWord() # functions help us organize our code! mistakes = 0 # Now create an empty list to keep track of the letters the user guessed def string_contains_character(c, word): # copy your function from lesson6 here. def
(guesses, secret_word): hidden_word = "" for letter in secret_word: if letter in guesses: ... # This is the while(True): guess = raw_input() # Check if the guess was a letter that the user already guessed. If so, # give them a warning and go back to the beginning of the loop. # If this is a new guess, add it to the list of letters the user guessed. # Maybe you could use one of the list methods... # Check if the new guess is in the secret word, using the function # string_contains_character you wrote on lesson6. # If the user made a mistake, increase their number of mistakes and let them # know! # Now, complete the function hide_word, which takes in the guesses made and # the secret_word and returns a the word in a hidden format. Remember, if the # letter was in the guesses, it should appear in the word, if it's not it # should appear as an underscore "_" # # If your hidden word has no underscores, the user won! Let them know about # that and break out of the loop print(HANGMANPICS[mistakes]) # this line will print a picture of the hanged man # If the user made 6 mistakes, tell them the game is over and break out of the # loop.
hide_word
identifier_name
hangman.py
# This is a hangman game. # Your game must do the following things. # Everytime a user guesses a character, it should tell them if their character # is in the secret word or not. # # Also, it should print the guessed character in the following format # if the secret word is unicorn and the user guessed the letter 'n' # you program should print _n____n # # It should also print a picture of the current state of the hanged man. # # If the user guesses a letter he already guessed, give them a warning. # # The user can make at most 6 mistakes. import random # don't worry about these lines. from hangman_pics import HANGMANPICS LIST_OF_WORDS = ['hangman', 'chairs', 'backpack', 'bodywash', 'clothing', 'computer', 'python', 'program', 'glasses', 'sweatshirt', 'sweatpants', 'mattress', 'friends', 'clocks', 'biology', 'algebra', 'suitcase', 'knives', 'ninjas', 'shampoo'] # First let's write a function to select a random word from the list of words. def getSecretWord(): # this line generates a random number use it to index into the list of words # and return a secret word. rnd = random.randint(0, len(LIST_OF_WORDS) - 1) return ... secret_word = getSecretWord() # functions help us organize our code! mistakes = 0 # Now create an empty list to keep track of the letters the user guessed def string_contains_character(c, word): # copy your function from lesson6 here. def hide_word(guesses, secret_word): hidden_word = "" for letter in secret_word: if letter in guesses: ... # This is the while(True):
guess = raw_input() # Check if the guess was a letter that the user already guessed. If so, # give them a warning and go back to the beginning of the loop. # If this is a new guess, add it to the list of letters the user guessed. # Maybe you could use one of the list methods... # Check if the new guess is in the secret word, using the function # string_contains_character you wrote on lesson6. # If the user made a mistake, increase their number of mistakes and let them # know! # Now, complete the function hide_word, which takes in the guesses made and # the secret_word and returns a the word in a hidden format. Remember, if the # letter was in the guesses, it should appear in the word, if it's not it # should appear as an underscore "_" # # If your hidden word has no underscores, the user won! Let them know about # that and break out of the loop print(HANGMANPICS[mistakes]) # this line will print a picture of the hanged man # If the user made 6 mistakes, tell them the game is over and break out of the # loop.
conditional_block
hangman.py
# Also, it should print the guessed character in the following format # if the secret word is unicorn and the user guessed the letter 'n' # you program should print _n____n # # It should also print a picture of the current state of the hanged man. # # If the user guesses a letter he already guessed, give them a warning. # # The user can make at most 6 mistakes. import random # don't worry about these lines. from hangman_pics import HANGMANPICS LIST_OF_WORDS = ['hangman', 'chairs', 'backpack', 'bodywash', 'clothing', 'computer', 'python', 'program', 'glasses', 'sweatshirt', 'sweatpants', 'mattress', 'friends', 'clocks', 'biology', 'algebra', 'suitcase', 'knives', 'ninjas', 'shampoo'] # First let's write a function to select a random word from the list of words. def getSecretWord(): # this line generates a random number use it to index into the list of words # and return a secret word. rnd = random.randint(0, len(LIST_OF_WORDS) - 1) return ... secret_word = getSecretWord() # functions help us organize our code! mistakes = 0 # Now create an empty list to keep track of the letters the user guessed def string_contains_character(c, word): # copy your function from lesson6 here. def hide_word(guesses, secret_word): hidden_word = "" for letter in secret_word: if letter in guesses: ... # This is the while(True): guess = raw_input() # Check if the guess was a letter that the user already guessed. If so, # give them a warning and go back to the beginning of the loop. # If this is a new guess, add it to the list of letters the user guessed. # Maybe you could use one of the list methods... # Check if the new guess is in the secret word, using the function # string_contains_character you wrote on lesson6. # If the user made a mistake, increase their number of mistakes and let them # know! # Now, complete the function hide_word, which takes in the guesses made and # the secret_word and returns a the word in a hidden format. Remember, if the # letter was in the guesses, it should appear in the word, if it's not it # should appear as an underscore "_" # # If your hidden word has no underscores, the user won! Let them know about # that and break out of the loop print(HANGMANPICS[mistakes]) # this line will print a picture of the hanged man # If the user made 6 mistakes, tell them the game is over and break out of the # loop.
# This is a hangman game. # Your game must do the following things. # Everytime a user guesses a character, it should tell them if their character # is in the secret word or not. #
random_line_split
settings.py
""" Django settings for ross project. Generated by 'django-admin startproject' using Django 1.10.6. For more information on this file, see https://docs.djangoproject.com/en/1.10/topics/settings/ For the full list of settings and their values, see https://docs.djangoproject.com/en/1.10/ref/settings/ """ import os # Build paths inside the project like this: os.path.join(BASE_DIR, ...) BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) # Quick-start development settings - unsuitable for production # See https://docs.djangoproject.com/en/1.10/howto/deployment/checklist/ # SECURITY WARNING: keep the secret key used in production secret! SECRET_KEY = 'jtn=n8&nq9jgir8_z1ck40^c1s22d%=)z5qsm*q(bku*_=^sg&' # SECURITY WARNING: don't run with debug turned on in production! DEBUG = True ALLOWED_HOSTS = [] # Application definition INSTALLED_APPS = [ 'django.contrib.admin', 'django.contrib.auth', 'django.contrib.contenttypes', 'django.contrib.sessions', 'django.contrib.messages', 'django.contrib.staticfiles', ] MIDDLEWARE = [ 'django.middleware.security.SecurityMiddleware', 'django.contrib.sessions.middleware.SessionMiddleware', 'django.middleware.common.CommonMiddleware', 'django.middleware.csrf.CsrfViewMiddleware', 'django.contrib.auth.middleware.AuthenticationMiddleware', 'django.contrib.messages.middleware.MessageMiddleware', 'django.middleware.clickjacking.XFrameOptionsMiddleware', ] ROOT_URLCONF = 'ross.urls' TEMPLATES = [ { 'BACKEND': 'django.template.backends.django.DjangoTemplates', 'DIRS': [], 'APP_DIRS': True, 'OPTIONS': { 'context_processors': [ 'django.template.context_processors.debug', 'django.template.context_processors.request', 'django.contrib.auth.context_processors.auth', 'django.contrib.messages.context_processors.messages', ], }, }, ] WSGI_APPLICATION = 'ross.wsgi.application' # Database # https://docs.djangoproject.com/en/1.10/ref/settings/#databases DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': os.path.join(BASE_DIR, 'db.sqlite3'), } } # Password validation # https://docs.djangoproject.com/en/1.10/ref/settings/#auth-password-validators AUTH_PASSWORD_VALIDATORS = [ { 'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator', }, { 'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator', }, {
{ 'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator', }, ] # Internationalization # https://docs.djangoproject.com/en/1.10/topics/i18n/ LANGUAGE_CODE = 'en-us' TIME_ZONE = 'UTC' USE_I18N = True USE_L10N = True USE_TZ = True # Static files (CSS, JavaScript, Images) # https://docs.djangoproject.com/en/1.10/howto/static-files/ STATIC_URL = '/static/'
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator', },
random_line_split
audit-package-versions.js
/** * Make sure that all versions of all required packages are what they oughta be. */ /* eslint-disable no-console */ const { resolve } = require("path"); const fs = require("fs"); function
(dir) { if (!dir) { throw new Error("Missing directory!"); } const problems = []; const correctVersion = "^" + require(resolve(dir, "lerna.json")).version; const packages = fs.readdirSync(resolve(dir, "packages")); packages.forEach(pkgName => { const pkgJson = resolve(dir, "packages", pkgName, "package.json"); try { fs.statSync(pkgJson); } catch (e) { // No package.json, whatever, it's chill, let's move on return; } const pkg = require(pkgJson); ["dependencies", "peerDependencies", "devDependencies"].forEach( category => { if (!pkg[category]) { return; } const localDeps = Object.keys(pkg[category]).filter(depName => { return packages.indexOf(depName) !== -1; }); localDeps.forEach(depName => { const depVersion = pkg[category][depName]; if (depVersion !== correctVersion) { problems.push( `${pkgName} has ${depName} of ${depVersion} instead of ${correctVersion}` ); } }); } ); }); if (problems.length > 0) { problems.forEach(p => console.error(p)); throw new Error("Package version audit failed"); } } module.exports = auditPackageVersions; if (!module.parent) { try { auditPackageVersions(process.argv[2]); } catch (e) { console.error("Correct package versions before proceeding."); process.exit(1); } }
auditPackageVersions
identifier_name
audit-package-versions.js
/** * Make sure that all versions of all required packages are what they oughta be. */ /* eslint-disable no-console */ const { resolve } = require("path"); const fs = require("fs"); function auditPackageVersions(dir) { if (!dir)
const problems = []; const correctVersion = "^" + require(resolve(dir, "lerna.json")).version; const packages = fs.readdirSync(resolve(dir, "packages")); packages.forEach(pkgName => { const pkgJson = resolve(dir, "packages", pkgName, "package.json"); try { fs.statSync(pkgJson); } catch (e) { // No package.json, whatever, it's chill, let's move on return; } const pkg = require(pkgJson); ["dependencies", "peerDependencies", "devDependencies"].forEach( category => { if (!pkg[category]) { return; } const localDeps = Object.keys(pkg[category]).filter(depName => { return packages.indexOf(depName) !== -1; }); localDeps.forEach(depName => { const depVersion = pkg[category][depName]; if (depVersion !== correctVersion) { problems.push( `${pkgName} has ${depName} of ${depVersion} instead of ${correctVersion}` ); } }); } ); }); if (problems.length > 0) { problems.forEach(p => console.error(p)); throw new Error("Package version audit failed"); } } module.exports = auditPackageVersions; if (!module.parent) { try { auditPackageVersions(process.argv[2]); } catch (e) { console.error("Correct package versions before proceeding."); process.exit(1); } }
{ throw new Error("Missing directory!"); }
conditional_block
audit-package-versions.js
/** * Make sure that all versions of all required packages are what they oughta be. */ /* eslint-disable no-console */ const { resolve } = require("path"); const fs = require("fs"); function auditPackageVersions(dir)
module.exports = auditPackageVersions; if (!module.parent) { try { auditPackageVersions(process.argv[2]); } catch (e) { console.error("Correct package versions before proceeding."); process.exit(1); } }
{ if (!dir) { throw new Error("Missing directory!"); } const problems = []; const correctVersion = "^" + require(resolve(dir, "lerna.json")).version; const packages = fs.readdirSync(resolve(dir, "packages")); packages.forEach(pkgName => { const pkgJson = resolve(dir, "packages", pkgName, "package.json"); try { fs.statSync(pkgJson); } catch (e) { // No package.json, whatever, it's chill, let's move on return; } const pkg = require(pkgJson); ["dependencies", "peerDependencies", "devDependencies"].forEach( category => { if (!pkg[category]) { return; } const localDeps = Object.keys(pkg[category]).filter(depName => { return packages.indexOf(depName) !== -1; }); localDeps.forEach(depName => { const depVersion = pkg[category][depName]; if (depVersion !== correctVersion) { problems.push( `${pkgName} has ${depName} of ${depVersion} instead of ${correctVersion}` ); } }); } ); }); if (problems.length > 0) { problems.forEach(p => console.error(p)); throw new Error("Package version audit failed"); } }
identifier_body
audit-package-versions.js
/** * Make sure that all versions of all required packages are what they oughta be. */ /* eslint-disable no-console */ const { resolve } = require("path"); const fs = require("fs"); function auditPackageVersions(dir) { if (!dir) { throw new Error("Missing directory!"); } const problems = [];
const correctVersion = "^" + require(resolve(dir, "lerna.json")).version; const packages = fs.readdirSync(resolve(dir, "packages")); packages.forEach(pkgName => { const pkgJson = resolve(dir, "packages", pkgName, "package.json"); try { fs.statSync(pkgJson); } catch (e) { // No package.json, whatever, it's chill, let's move on return; } const pkg = require(pkgJson); ["dependencies", "peerDependencies", "devDependencies"].forEach( category => { if (!pkg[category]) { return; } const localDeps = Object.keys(pkg[category]).filter(depName => { return packages.indexOf(depName) !== -1; }); localDeps.forEach(depName => { const depVersion = pkg[category][depName]; if (depVersion !== correctVersion) { problems.push( `${pkgName} has ${depName} of ${depVersion} instead of ${correctVersion}` ); } }); } ); }); if (problems.length > 0) { problems.forEach(p => console.error(p)); throw new Error("Package version audit failed"); } } module.exports = auditPackageVersions; if (!module.parent) { try { auditPackageVersions(process.argv[2]); } catch (e) { console.error("Correct package versions before proceeding."); process.exit(1); } }
random_line_split
code-mirror-host.tsx
import * as React from 'react' import CodeMirror, { Doc, EditorChangeLinkedList, Editor, EditorConfiguration, LineHandle, } from 'codemirror' // Required for us to be able to customize the foreground color of selected text import 'codemirror/addon/selection/mark-selection' // Autocompletion plugin import 'codemirror/addon/hint/show-hint' if (__DARWIN__) { // This has to be required to support the `simple` scrollbar style. require('codemirror/addon/scroll/simplescrollbars') } import 'codemirror/addon/search/search' interface ICodeMirrorHostProps { /** * An optional class name for the wrapper element around the * CodeMirror component */ readonly className?: string /** The text contents for the editor */ readonly value: string | Doc /** Any CodeMirror specific settings */ readonly options?: EditorConfiguration /** Callback for diff to control whether selection is enabled */ readonly isSelectionEnabled?: () => boolean /** Callback for when CodeMirror renders (or re-renders) a line */ readonly onRenderLine?: ( cm: Editor, line: LineHandle, elem: HTMLElement ) => void /** Callback for when CodeMirror has completed a batch of changes to the editor */ readonly onChanges?: (cm: Editor, change: EditorChangeLinkedList[]) => void /** Callback for when the viewport changes due to scrolling or other updates */ readonly onViewportChange?: (cm: Editor, from: number, to: number) => void /** Callback for when the editor document is swapped out for a new one */ readonly onSwapDoc?: (cm: Editor, oldDoc: Doc) => void /** * Called after the document has been swapped, meaning that consumers of this * event have access to the updated viewport (as opposed to onSwapDoc) */ readonly onAfterSwapDoc?: (cm: Editor, oldDoc: Doc, newDoc: Doc) => void /** * Called when user want to open context menu. */ readonly onContextMenu?: (cm: Editor, event: Event) => void /** * Called when content has been copied. The default behavior may be prevented * by calling `preventDefault` on the event. */ readonly onCopy?: (editor: Editor, event: Event) => void } /** * Attempts to cancel an active mouse selection in the * given editor by accessing undocumented APIs. This is likely * to break in the future. */ function cancelActiveSelection(cm: Editor) { if (cm.state && cm.state.selectingText instanceof Function) { try { // Simulate a mouseup event which will cause CodeMirror // to abort its currently active selection. If no selection // is active the selectingText property will not be a function // so we won't end up here. cm.state.selectingText(new CustomEvent('fake-event')) } catch (err) { // If we end up here it's likely because CodeMirror has changed // its internal API. // See https://github.com/codemirror/CodeMirror/issues/5821 log.info('Unable to cancel CodeMirror selection', err) } } } /** * A component hosting a CodeMirror instance */ export class CodeMirrorHost extends React.Component<ICodeMirrorHostProps, {}> { private static updateDoc(cm: Editor, value: string | Doc) { if (typeof value === 'string')
else { cancelActiveSelection(cm) cm.swapDoc(value) } } private wrapper: HTMLDivElement | null = null private codeMirror: Editor | null = null /** * Resize observer used for tracking width changes and * refreshing the internal codemirror instance when * they occur */ private readonly resizeObserver: ResizeObserver private resizeDebounceId: number | null = null private lastKnownWidth: number | null = null public constructor(props: ICodeMirrorHostProps) { super(props) // Observe size changes and let codemirror know // when it needs to refresh. this.resizeObserver = new ResizeObserver(entries => { if (entries.length === 1 && this.codeMirror) { const newWidth = entries[0].contentRect.width // We don't care about the first resize, let's just // store what we've got. Codemirror already does a good // job of height changes through monitoring window resize, // we just need to care about when the width changes and // do a re-layout if (this.lastKnownWidth === null) { this.lastKnownWidth = newWidth } else if (this.lastKnownWidth !== newWidth) { this.lastKnownWidth = newWidth if (this.resizeDebounceId !== null) { cancelAnimationFrame(this.resizeDebounceId) this.resizeDebounceId = null } this.resizeDebounceId = requestAnimationFrame(this.onResized) } } }) } /** * Gets the internal CodeMirror instance or null if CodeMirror hasn't * been initialized yet (happens when component mounts) */ public getEditor(): Editor | null { return this.codeMirror } public componentDidMount() { this.codeMirror = CodeMirror(this.wrapper!, this.props.options) this.codeMirror.on('renderLine', this.onRenderLine) this.codeMirror.on('changes', this.onChanges) this.codeMirror.on('viewportChange', this.onViewportChange) this.codeMirror.on('beforeSelectionChange', this.beforeSelectionChanged) this.codeMirror.on('copy', this.onCopy) this.codeMirror.on('contextmenu', this.onContextMenu) this.codeMirror.on('swapDoc', this.onSwapDoc as any) CodeMirrorHost.updateDoc(this.codeMirror, this.props.value) this.resizeObserver.observe(this.codeMirror.getWrapperElement()) } private onSwapDoc = (cm: Editor, oldDoc: Doc) => { if (this.props.onSwapDoc) { this.props.onSwapDoc(cm, oldDoc) } } private onContextMenu = (instance: Editor, event: Event) => { if (this.props.onContextMenu) { this.props.onContextMenu(instance, event) } } private onCopy = (instance: Editor, event: Event) => { if (this.props.onCopy) { this.props.onCopy(instance, event) } } public componentWillUnmount() { const cm = this.codeMirror if (cm) { cm.off('changes', this.onChanges) cm.off('viewportChange', this.onViewportChange) cm.off('renderLine', this.onRenderLine) cm.off('beforeSelectionChange', this.beforeSelectionChanged) cm.off('copy', this.onCopy) cm.off('swapDoc', this.onSwapDoc as any) this.codeMirror = null } this.resizeObserver.disconnect() } public componentDidUpdate(prevProps: ICodeMirrorHostProps) { if (this.codeMirror && this.props.value !== prevProps.value) { const oldDoc = this.codeMirror.getDoc() CodeMirrorHost.updateDoc(this.codeMirror, this.props.value) const newDoc = this.codeMirror.getDoc() if (this.props.onAfterSwapDoc) { this.props.onAfterSwapDoc(this.codeMirror, oldDoc, newDoc) } } } private beforeSelectionChanged = (cm: Editor, changeObj: any) => { if (this.props.isSelectionEnabled) { if (!this.props.isSelectionEnabled()) { // ignore whatever the user has currently selected, pass in a // "nothing selected" value // NOTE: // - `head` is the part of the selection that is moving // - `anchor` is the other end changeObj.update([ { head: { line: 0, ch: 0 }, anchor: { line: 0, ch: 0 } }, ]) } } } private onChanges = (cm: Editor, changes: EditorChangeLinkedList[]) => { if (this.props.onChanges) { this.props.onChanges(cm, changes) } } private onViewportChange = (cm: Editor, from: number, to: number) => { if (this.props.onViewportChange) { this.props.onViewportChange(cm, from, to) } } private onRenderLine = (cm: Editor, line: LineHandle, elem: HTMLElement) => { if (this.props.onRenderLine) { this.props.onRenderLine(cm, line, elem) } } private onResized = () => { this.resizeDebounceId = null if (this.codeMirror) { this.codeMirror.refresh() } } private onRef = (ref: HTMLDivElement | null) => { this.wrapper = ref } public render() { return <div className={this.props.className} ref={this.onRef} /> } }
{ cm.setValue(value) }
conditional_block
code-mirror-host.tsx
import * as React from 'react' import CodeMirror, { Doc, EditorChangeLinkedList, Editor, EditorConfiguration, LineHandle, } from 'codemirror' // Required for us to be able to customize the foreground color of selected text import 'codemirror/addon/selection/mark-selection' // Autocompletion plugin import 'codemirror/addon/hint/show-hint' if (__DARWIN__) { // This has to be required to support the `simple` scrollbar style. require('codemirror/addon/scroll/simplescrollbars') } import 'codemirror/addon/search/search' interface ICodeMirrorHostProps { /** * An optional class name for the wrapper element around the * CodeMirror component */ readonly className?: string /** The text contents for the editor */ readonly value: string | Doc /** Any CodeMirror specific settings */ readonly options?: EditorConfiguration /** Callback for diff to control whether selection is enabled */ readonly isSelectionEnabled?: () => boolean /** Callback for when CodeMirror renders (or re-renders) a line */ readonly onRenderLine?: ( cm: Editor, line: LineHandle, elem: HTMLElement ) => void /** Callback for when CodeMirror has completed a batch of changes to the editor */ readonly onChanges?: (cm: Editor, change: EditorChangeLinkedList[]) => void /** Callback for when the viewport changes due to scrolling or other updates */ readonly onViewportChange?: (cm: Editor, from: number, to: number) => void /** Callback for when the editor document is swapped out for a new one */ readonly onSwapDoc?: (cm: Editor, oldDoc: Doc) => void /** * Called after the document has been swapped, meaning that consumers of this * event have access to the updated viewport (as opposed to onSwapDoc) */ readonly onAfterSwapDoc?: (cm: Editor, oldDoc: Doc, newDoc: Doc) => void /** * Called when user want to open context menu. */ readonly onContextMenu?: (cm: Editor, event: Event) => void /** * Called when content has been copied. The default behavior may be prevented * by calling `preventDefault` on the event. */ readonly onCopy?: (editor: Editor, event: Event) => void } /** * Attempts to cancel an active mouse selection in the * given editor by accessing undocumented APIs. This is likely * to break in the future. */ function cancelActiveSelection(cm: Editor) { if (cm.state && cm.state.selectingText instanceof Function) { try { // Simulate a mouseup event which will cause CodeMirror // to abort its currently active selection. If no selection // is active the selectingText property will not be a function // so we won't end up here. cm.state.selectingText(new CustomEvent('fake-event')) } catch (err) { // If we end up here it's likely because CodeMirror has changed // its internal API. // See https://github.com/codemirror/CodeMirror/issues/5821 log.info('Unable to cancel CodeMirror selection', err) } } } /** * A component hosting a CodeMirror instance */ export class CodeMirrorHost extends React.Component<ICodeMirrorHostProps, {}> { private static updateDoc(cm: Editor, value: string | Doc) { if (typeof value === 'string') { cm.setValue(value) } else { cancelActiveSelection(cm) cm.swapDoc(value) } } private wrapper: HTMLDivElement | null = null private codeMirror: Editor | null = null /** * Resize observer used for tracking width changes and * refreshing the internal codemirror instance when * they occur */ private readonly resizeObserver: ResizeObserver private resizeDebounceId: number | null = null private lastKnownWidth: number | null = null public constructor(props: ICodeMirrorHostProps) { super(props) // Observe size changes and let codemirror know // when it needs to refresh. this.resizeObserver = new ResizeObserver(entries => {
if (entries.length === 1 && this.codeMirror) { const newWidth = entries[0].contentRect.width // We don't care about the first resize, let's just // store what we've got. Codemirror already does a good // job of height changes through monitoring window resize, // we just need to care about when the width changes and // do a re-layout if (this.lastKnownWidth === null) { this.lastKnownWidth = newWidth } else if (this.lastKnownWidth !== newWidth) { this.lastKnownWidth = newWidth if (this.resizeDebounceId !== null) { cancelAnimationFrame(this.resizeDebounceId) this.resizeDebounceId = null } this.resizeDebounceId = requestAnimationFrame(this.onResized) } } }) } /** * Gets the internal CodeMirror instance or null if CodeMirror hasn't * been initialized yet (happens when component mounts) */ public getEditor(): Editor | null { return this.codeMirror } public componentDidMount() { this.codeMirror = CodeMirror(this.wrapper!, this.props.options) this.codeMirror.on('renderLine', this.onRenderLine) this.codeMirror.on('changes', this.onChanges) this.codeMirror.on('viewportChange', this.onViewportChange) this.codeMirror.on('beforeSelectionChange', this.beforeSelectionChanged) this.codeMirror.on('copy', this.onCopy) this.codeMirror.on('contextmenu', this.onContextMenu) this.codeMirror.on('swapDoc', this.onSwapDoc as any) CodeMirrorHost.updateDoc(this.codeMirror, this.props.value) this.resizeObserver.observe(this.codeMirror.getWrapperElement()) } private onSwapDoc = (cm: Editor, oldDoc: Doc) => { if (this.props.onSwapDoc) { this.props.onSwapDoc(cm, oldDoc) } } private onContextMenu = (instance: Editor, event: Event) => { if (this.props.onContextMenu) { this.props.onContextMenu(instance, event) } } private onCopy = (instance: Editor, event: Event) => { if (this.props.onCopy) { this.props.onCopy(instance, event) } } public componentWillUnmount() { const cm = this.codeMirror if (cm) { cm.off('changes', this.onChanges) cm.off('viewportChange', this.onViewportChange) cm.off('renderLine', this.onRenderLine) cm.off('beforeSelectionChange', this.beforeSelectionChanged) cm.off('copy', this.onCopy) cm.off('swapDoc', this.onSwapDoc as any) this.codeMirror = null } this.resizeObserver.disconnect() } public componentDidUpdate(prevProps: ICodeMirrorHostProps) { if (this.codeMirror && this.props.value !== prevProps.value) { const oldDoc = this.codeMirror.getDoc() CodeMirrorHost.updateDoc(this.codeMirror, this.props.value) const newDoc = this.codeMirror.getDoc() if (this.props.onAfterSwapDoc) { this.props.onAfterSwapDoc(this.codeMirror, oldDoc, newDoc) } } } private beforeSelectionChanged = (cm: Editor, changeObj: any) => { if (this.props.isSelectionEnabled) { if (!this.props.isSelectionEnabled()) { // ignore whatever the user has currently selected, pass in a // "nothing selected" value // NOTE: // - `head` is the part of the selection that is moving // - `anchor` is the other end changeObj.update([ { head: { line: 0, ch: 0 }, anchor: { line: 0, ch: 0 } }, ]) } } } private onChanges = (cm: Editor, changes: EditorChangeLinkedList[]) => { if (this.props.onChanges) { this.props.onChanges(cm, changes) } } private onViewportChange = (cm: Editor, from: number, to: number) => { if (this.props.onViewportChange) { this.props.onViewportChange(cm, from, to) } } private onRenderLine = (cm: Editor, line: LineHandle, elem: HTMLElement) => { if (this.props.onRenderLine) { this.props.onRenderLine(cm, line, elem) } } private onResized = () => { this.resizeDebounceId = null if (this.codeMirror) { this.codeMirror.refresh() } } private onRef = (ref: HTMLDivElement | null) => { this.wrapper = ref } public render() { return <div className={this.props.className} ref={this.onRef} /> } }
random_line_split
code-mirror-host.tsx
import * as React from 'react' import CodeMirror, { Doc, EditorChangeLinkedList, Editor, EditorConfiguration, LineHandle, } from 'codemirror' // Required for us to be able to customize the foreground color of selected text import 'codemirror/addon/selection/mark-selection' // Autocompletion plugin import 'codemirror/addon/hint/show-hint' if (__DARWIN__) { // This has to be required to support the `simple` scrollbar style. require('codemirror/addon/scroll/simplescrollbars') } import 'codemirror/addon/search/search' interface ICodeMirrorHostProps { /** * An optional class name for the wrapper element around the * CodeMirror component */ readonly className?: string /** The text contents for the editor */ readonly value: string | Doc /** Any CodeMirror specific settings */ readonly options?: EditorConfiguration /** Callback for diff to control whether selection is enabled */ readonly isSelectionEnabled?: () => boolean /** Callback for when CodeMirror renders (or re-renders) a line */ readonly onRenderLine?: ( cm: Editor, line: LineHandle, elem: HTMLElement ) => void /** Callback for when CodeMirror has completed a batch of changes to the editor */ readonly onChanges?: (cm: Editor, change: EditorChangeLinkedList[]) => void /** Callback for when the viewport changes due to scrolling or other updates */ readonly onViewportChange?: (cm: Editor, from: number, to: number) => void /** Callback for when the editor document is swapped out for a new one */ readonly onSwapDoc?: (cm: Editor, oldDoc: Doc) => void /** * Called after the document has been swapped, meaning that consumers of this * event have access to the updated viewport (as opposed to onSwapDoc) */ readonly onAfterSwapDoc?: (cm: Editor, oldDoc: Doc, newDoc: Doc) => void /** * Called when user want to open context menu. */ readonly onContextMenu?: (cm: Editor, event: Event) => void /** * Called when content has been copied. The default behavior may be prevented * by calling `preventDefault` on the event. */ readonly onCopy?: (editor: Editor, event: Event) => void } /** * Attempts to cancel an active mouse selection in the * given editor by accessing undocumented APIs. This is likely * to break in the future. */ function cancelActiveSelection(cm: Editor)
/** * A component hosting a CodeMirror instance */ export class CodeMirrorHost extends React.Component<ICodeMirrorHostProps, {}> { private static updateDoc(cm: Editor, value: string | Doc) { if (typeof value === 'string') { cm.setValue(value) } else { cancelActiveSelection(cm) cm.swapDoc(value) } } private wrapper: HTMLDivElement | null = null private codeMirror: Editor | null = null /** * Resize observer used for tracking width changes and * refreshing the internal codemirror instance when * they occur */ private readonly resizeObserver: ResizeObserver private resizeDebounceId: number | null = null private lastKnownWidth: number | null = null public constructor(props: ICodeMirrorHostProps) { super(props) // Observe size changes and let codemirror know // when it needs to refresh. this.resizeObserver = new ResizeObserver(entries => { if (entries.length === 1 && this.codeMirror) { const newWidth = entries[0].contentRect.width // We don't care about the first resize, let's just // store what we've got. Codemirror already does a good // job of height changes through monitoring window resize, // we just need to care about when the width changes and // do a re-layout if (this.lastKnownWidth === null) { this.lastKnownWidth = newWidth } else if (this.lastKnownWidth !== newWidth) { this.lastKnownWidth = newWidth if (this.resizeDebounceId !== null) { cancelAnimationFrame(this.resizeDebounceId) this.resizeDebounceId = null } this.resizeDebounceId = requestAnimationFrame(this.onResized) } } }) } /** * Gets the internal CodeMirror instance or null if CodeMirror hasn't * been initialized yet (happens when component mounts) */ public getEditor(): Editor | null { return this.codeMirror } public componentDidMount() { this.codeMirror = CodeMirror(this.wrapper!, this.props.options) this.codeMirror.on('renderLine', this.onRenderLine) this.codeMirror.on('changes', this.onChanges) this.codeMirror.on('viewportChange', this.onViewportChange) this.codeMirror.on('beforeSelectionChange', this.beforeSelectionChanged) this.codeMirror.on('copy', this.onCopy) this.codeMirror.on('contextmenu', this.onContextMenu) this.codeMirror.on('swapDoc', this.onSwapDoc as any) CodeMirrorHost.updateDoc(this.codeMirror, this.props.value) this.resizeObserver.observe(this.codeMirror.getWrapperElement()) } private onSwapDoc = (cm: Editor, oldDoc: Doc) => { if (this.props.onSwapDoc) { this.props.onSwapDoc(cm, oldDoc) } } private onContextMenu = (instance: Editor, event: Event) => { if (this.props.onContextMenu) { this.props.onContextMenu(instance, event) } } private onCopy = (instance: Editor, event: Event) => { if (this.props.onCopy) { this.props.onCopy(instance, event) } } public componentWillUnmount() { const cm = this.codeMirror if (cm) { cm.off('changes', this.onChanges) cm.off('viewportChange', this.onViewportChange) cm.off('renderLine', this.onRenderLine) cm.off('beforeSelectionChange', this.beforeSelectionChanged) cm.off('copy', this.onCopy) cm.off('swapDoc', this.onSwapDoc as any) this.codeMirror = null } this.resizeObserver.disconnect() } public componentDidUpdate(prevProps: ICodeMirrorHostProps) { if (this.codeMirror && this.props.value !== prevProps.value) { const oldDoc = this.codeMirror.getDoc() CodeMirrorHost.updateDoc(this.codeMirror, this.props.value) const newDoc = this.codeMirror.getDoc() if (this.props.onAfterSwapDoc) { this.props.onAfterSwapDoc(this.codeMirror, oldDoc, newDoc) } } } private beforeSelectionChanged = (cm: Editor, changeObj: any) => { if (this.props.isSelectionEnabled) { if (!this.props.isSelectionEnabled()) { // ignore whatever the user has currently selected, pass in a // "nothing selected" value // NOTE: // - `head` is the part of the selection that is moving // - `anchor` is the other end changeObj.update([ { head: { line: 0, ch: 0 }, anchor: { line: 0, ch: 0 } }, ]) } } } private onChanges = (cm: Editor, changes: EditorChangeLinkedList[]) => { if (this.props.onChanges) { this.props.onChanges(cm, changes) } } private onViewportChange = (cm: Editor, from: number, to: number) => { if (this.props.onViewportChange) { this.props.onViewportChange(cm, from, to) } } private onRenderLine = (cm: Editor, line: LineHandle, elem: HTMLElement) => { if (this.props.onRenderLine) { this.props.onRenderLine(cm, line, elem) } } private onResized = () => { this.resizeDebounceId = null if (this.codeMirror) { this.codeMirror.refresh() } } private onRef = (ref: HTMLDivElement | null) => { this.wrapper = ref } public render() { return <div className={this.props.className} ref={this.onRef} /> } }
{ if (cm.state && cm.state.selectingText instanceof Function) { try { // Simulate a mouseup event which will cause CodeMirror // to abort its currently active selection. If no selection // is active the selectingText property will not be a function // so we won't end up here. cm.state.selectingText(new CustomEvent('fake-event')) } catch (err) { // If we end up here it's likely because CodeMirror has changed // its internal API. // See https://github.com/codemirror/CodeMirror/issues/5821 log.info('Unable to cancel CodeMirror selection', err) } } }
identifier_body
code-mirror-host.tsx
import * as React from 'react' import CodeMirror, { Doc, EditorChangeLinkedList, Editor, EditorConfiguration, LineHandle, } from 'codemirror' // Required for us to be able to customize the foreground color of selected text import 'codemirror/addon/selection/mark-selection' // Autocompletion plugin import 'codemirror/addon/hint/show-hint' if (__DARWIN__) { // This has to be required to support the `simple` scrollbar style. require('codemirror/addon/scroll/simplescrollbars') } import 'codemirror/addon/search/search' interface ICodeMirrorHostProps { /** * An optional class name for the wrapper element around the * CodeMirror component */ readonly className?: string /** The text contents for the editor */ readonly value: string | Doc /** Any CodeMirror specific settings */ readonly options?: EditorConfiguration /** Callback for diff to control whether selection is enabled */ readonly isSelectionEnabled?: () => boolean /** Callback for when CodeMirror renders (or re-renders) a line */ readonly onRenderLine?: ( cm: Editor, line: LineHandle, elem: HTMLElement ) => void /** Callback for when CodeMirror has completed a batch of changes to the editor */ readonly onChanges?: (cm: Editor, change: EditorChangeLinkedList[]) => void /** Callback for when the viewport changes due to scrolling or other updates */ readonly onViewportChange?: (cm: Editor, from: number, to: number) => void /** Callback for when the editor document is swapped out for a new one */ readonly onSwapDoc?: (cm: Editor, oldDoc: Doc) => void /** * Called after the document has been swapped, meaning that consumers of this * event have access to the updated viewport (as opposed to onSwapDoc) */ readonly onAfterSwapDoc?: (cm: Editor, oldDoc: Doc, newDoc: Doc) => void /** * Called when user want to open context menu. */ readonly onContextMenu?: (cm: Editor, event: Event) => void /** * Called when content has been copied. The default behavior may be prevented * by calling `preventDefault` on the event. */ readonly onCopy?: (editor: Editor, event: Event) => void } /** * Attempts to cancel an active mouse selection in the * given editor by accessing undocumented APIs. This is likely * to break in the future. */ function cancelActiveSelection(cm: Editor) { if (cm.state && cm.state.selectingText instanceof Function) { try { // Simulate a mouseup event which will cause CodeMirror // to abort its currently active selection. If no selection // is active the selectingText property will not be a function // so we won't end up here. cm.state.selectingText(new CustomEvent('fake-event')) } catch (err) { // If we end up here it's likely because CodeMirror has changed // its internal API. // See https://github.com/codemirror/CodeMirror/issues/5821 log.info('Unable to cancel CodeMirror selection', err) } } } /** * A component hosting a CodeMirror instance */ export class CodeMirrorHost extends React.Component<ICodeMirrorHostProps, {}> { private static updateDoc(cm: Editor, value: string | Doc) { if (typeof value === 'string') { cm.setValue(value) } else { cancelActiveSelection(cm) cm.swapDoc(value) } } private wrapper: HTMLDivElement | null = null private codeMirror: Editor | null = null /** * Resize observer used for tracking width changes and * refreshing the internal codemirror instance when * they occur */ private readonly resizeObserver: ResizeObserver private resizeDebounceId: number | null = null private lastKnownWidth: number | null = null public constructor(props: ICodeMirrorHostProps) { super(props) // Observe size changes and let codemirror know // when it needs to refresh. this.resizeObserver = new ResizeObserver(entries => { if (entries.length === 1 && this.codeMirror) { const newWidth = entries[0].contentRect.width // We don't care about the first resize, let's just // store what we've got. Codemirror already does a good // job of height changes through monitoring window resize, // we just need to care about when the width changes and // do a re-layout if (this.lastKnownWidth === null) { this.lastKnownWidth = newWidth } else if (this.lastKnownWidth !== newWidth) { this.lastKnownWidth = newWidth if (this.resizeDebounceId !== null) { cancelAnimationFrame(this.resizeDebounceId) this.resizeDebounceId = null } this.resizeDebounceId = requestAnimationFrame(this.onResized) } } }) } /** * Gets the internal CodeMirror instance or null if CodeMirror hasn't * been initialized yet (happens when component mounts) */ public getEditor(): Editor | null { return this.codeMirror } public
() { this.codeMirror = CodeMirror(this.wrapper!, this.props.options) this.codeMirror.on('renderLine', this.onRenderLine) this.codeMirror.on('changes', this.onChanges) this.codeMirror.on('viewportChange', this.onViewportChange) this.codeMirror.on('beforeSelectionChange', this.beforeSelectionChanged) this.codeMirror.on('copy', this.onCopy) this.codeMirror.on('contextmenu', this.onContextMenu) this.codeMirror.on('swapDoc', this.onSwapDoc as any) CodeMirrorHost.updateDoc(this.codeMirror, this.props.value) this.resizeObserver.observe(this.codeMirror.getWrapperElement()) } private onSwapDoc = (cm: Editor, oldDoc: Doc) => { if (this.props.onSwapDoc) { this.props.onSwapDoc(cm, oldDoc) } } private onContextMenu = (instance: Editor, event: Event) => { if (this.props.onContextMenu) { this.props.onContextMenu(instance, event) } } private onCopy = (instance: Editor, event: Event) => { if (this.props.onCopy) { this.props.onCopy(instance, event) } } public componentWillUnmount() { const cm = this.codeMirror if (cm) { cm.off('changes', this.onChanges) cm.off('viewportChange', this.onViewportChange) cm.off('renderLine', this.onRenderLine) cm.off('beforeSelectionChange', this.beforeSelectionChanged) cm.off('copy', this.onCopy) cm.off('swapDoc', this.onSwapDoc as any) this.codeMirror = null } this.resizeObserver.disconnect() } public componentDidUpdate(prevProps: ICodeMirrorHostProps) { if (this.codeMirror && this.props.value !== prevProps.value) { const oldDoc = this.codeMirror.getDoc() CodeMirrorHost.updateDoc(this.codeMirror, this.props.value) const newDoc = this.codeMirror.getDoc() if (this.props.onAfterSwapDoc) { this.props.onAfterSwapDoc(this.codeMirror, oldDoc, newDoc) } } } private beforeSelectionChanged = (cm: Editor, changeObj: any) => { if (this.props.isSelectionEnabled) { if (!this.props.isSelectionEnabled()) { // ignore whatever the user has currently selected, pass in a // "nothing selected" value // NOTE: // - `head` is the part of the selection that is moving // - `anchor` is the other end changeObj.update([ { head: { line: 0, ch: 0 }, anchor: { line: 0, ch: 0 } }, ]) } } } private onChanges = (cm: Editor, changes: EditorChangeLinkedList[]) => { if (this.props.onChanges) { this.props.onChanges(cm, changes) } } private onViewportChange = (cm: Editor, from: number, to: number) => { if (this.props.onViewportChange) { this.props.onViewportChange(cm, from, to) } } private onRenderLine = (cm: Editor, line: LineHandle, elem: HTMLElement) => { if (this.props.onRenderLine) { this.props.onRenderLine(cm, line, elem) } } private onResized = () => { this.resizeDebounceId = null if (this.codeMirror) { this.codeMirror.refresh() } } private onRef = (ref: HTMLDivElement | null) => { this.wrapper = ref } public render() { return <div className={this.props.className} ref={this.onRef} /> } }
componentDidMount
identifier_name
lib.py
"""Suit + values are ints""" from random import shuffle class Card: suits = ("spades", "hearts", "diamonds", "clubs") values = (None, None, '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack','Queen', 'King', 'Ace') def __init__(self, v, s): self.suite = s self.value = v def __lt__(self,c2): if self.value < c2.value:
if self.value == c2.value: if self.suite < c2.suite: return True else: return False return False def __gt__(self,c2): if self.value > c2.value: return True if self.value == c2.value: if self.suite > c2.suite: return True else: return False return False def __repr__(self): v = self.values[self.value] +\ " of " + \ self.suits[self.suite] return v class Deck(): def __init__(self): self.cards = [] for i in range(2,15): for j in range(0,4): self.cards\ .append(Card(i, j)) shuffle(self.cards) def rm_card(self): if len(self.cards) == 0: return return self.cards.pop()
return True
conditional_block
lib.py
"""Suit + values are ints""" from random import shuffle class Card: suits = ("spades", "hearts", "diamonds", "clubs") values = (None, None, '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack','Queen', 'King', 'Ace') def __init__(self, v, s): self.suite = s self.value = v def __lt__(self,c2): if self.value < c2.value: return True if self.value == c2.value: if self.suite < c2.suite:
else: return False return False def __gt__(self,c2): if self.value > c2.value: return True if self.value == c2.value: if self.suite > c2.suite: return True else: return False return False def __repr__(self): v = self.values[self.value] +\ " of " + \ self.suits[self.suite] return v class Deck(): def __init__(self): self.cards = [] for i in range(2,15): for j in range(0,4): self.cards\ .append(Card(i, j)) shuffle(self.cards) def rm_card(self): if len(self.cards) == 0: return return self.cards.pop()
return True
random_line_split
lib.py
"""Suit + values are ints""" from random import shuffle class Card: suits = ("spades", "hearts", "diamonds", "clubs") values = (None, None, '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack','Queen', 'King', 'Ace') def __init__(self, v, s): self.suite = s self.value = v def __lt__(self,c2): if self.value < c2.value: return True if self.value == c2.value: if self.suite < c2.suite: return True else: return False return False def __gt__(self,c2):
def __repr__(self): v = self.values[self.value] +\ " of " + \ self.suits[self.suite] return v class Deck(): def __init__(self): self.cards = [] for i in range(2,15): for j in range(0,4): self.cards\ .append(Card(i, j)) shuffle(self.cards) def rm_card(self): if len(self.cards) == 0: return return self.cards.pop()
if self.value > c2.value: return True if self.value == c2.value: if self.suite > c2.suite: return True else: return False return False
identifier_body
lib.py
"""Suit + values are ints""" from random import shuffle class Card: suits = ("spades", "hearts", "diamonds", "clubs") values = (None, None, '2', '3', '4', '5', '6', '7', '8', '9', '10', 'Jack','Queen', 'King', 'Ace') def __init__(self, v, s): self.suite = s self.value = v def
(self,c2): if self.value < c2.value: return True if self.value == c2.value: if self.suite < c2.suite: return True else: return False return False def __gt__(self,c2): if self.value > c2.value: return True if self.value == c2.value: if self.suite > c2.suite: return True else: return False return False def __repr__(self): v = self.values[self.value] +\ " of " + \ self.suits[self.suite] return v class Deck(): def __init__(self): self.cards = [] for i in range(2,15): for j in range(0,4): self.cards\ .append(Card(i, j)) shuffle(self.cards) def rm_card(self): if len(self.cards) == 0: return return self.cards.pop()
__lt__
identifier_name
WalletRecoveryPhraseDisplayDialog.tsx
import React, { Component } from 'react'; import { observer } from 'mobx-react'; import classnames from 'classnames'; import { defineMessages, intlShape, FormattedHTMLMessage } from 'react-intl'; import WalletRecoveryPhraseMnemonic from './WalletRecoveryPhraseMnemonic'; import DialogCloseButton from '../../widgets/DialogCloseButton'; import Dialog from '../../widgets/Dialog'; import WalletRecoveryInstructions from './WalletRecoveryInstructions'; import globalMessages from '../../../i18n/global-messages'; import styles from './WalletRecoveryPhraseDisplayDialog.scss'; import { WALLET_RECOVERY_PHRASE_WORD_COUNT } from '../../../config/cryptoConfig'; import LoadingSpinner from '../../widgets/LoadingSpinner'; const messages = defineMessages({ backupInstructions: { id: 'wallet.backup.recovery.phrase.display.dialog.backup.instructions', defaultMessage: '!!!Please make sure you write down the {walletRecoveryPhraseWordCount} words of your wallet recovery phrase <strong>on a piece of paper in the exact order shown here</strong>.', description: 'Instructions for backing up wallet recovery phrase on dialog that displays wallet recovery phrase.', }, buttonLabelIHaveWrittenItDown: { id: 'wallet.backup.recovery.phrase.display.dialog.button.label.iHaveWrittenItDown', defaultMessage: '!!!Yes, I have written down my wallet recovery phrase.', description: 'Label for button "Yes, I have written down my wallet recovery phrase." on wallet backup dialog',
onCancelBackup: (...args: Array<any>) => any; isSubmitting: boolean; }; @observer class WalletRecoveryPhraseDisplayDialog extends Component<Props> { static contextTypes = { intl: intlShape.isRequired, }; render() { const { intl } = this.context; const { recoveryPhrase, onStartWalletBackup, onCancelBackup, isSubmitting, } = this.props; const dialogClasses = classnames([ styles.component, 'WalletRecoveryPhraseDisplayDialog', ]); const buttonLabel = !isSubmitting ? ( intl.formatMessage(messages.buttonLabelIHaveWrittenItDown) ) : ( <LoadingSpinner /> ); const actions = [ { label: buttonLabel, onClick: onStartWalletBackup, primary: true, }, ]; return ( <Dialog className={dialogClasses} title={intl.formatMessage(globalMessages.recoveryPhraseDialogTitle)} actions={actions} onClose={onCancelBackup} closeOnOverlayClick={false} closeButton={<DialogCloseButton onClose={onCancelBackup} />} > <WalletRecoveryInstructions instructionsText={ <FormattedHTMLMessage {...messages.backupInstructions} values={{ walletRecoveryPhraseWordCount: WALLET_RECOVERY_PHRASE_WORD_COUNT, }} /> } /> <WalletRecoveryPhraseMnemonic phrase={recoveryPhrase} /> </Dialog> ); } } export default WalletRecoveryPhraseDisplayDialog;
}, }); type Props = { recoveryPhrase: string; onStartWalletBackup: (...args: Array<any>) => any;
random_line_split
WalletRecoveryPhraseDisplayDialog.tsx
import React, { Component } from 'react'; import { observer } from 'mobx-react'; import classnames from 'classnames'; import { defineMessages, intlShape, FormattedHTMLMessage } from 'react-intl'; import WalletRecoveryPhraseMnemonic from './WalletRecoveryPhraseMnemonic'; import DialogCloseButton from '../../widgets/DialogCloseButton'; import Dialog from '../../widgets/Dialog'; import WalletRecoveryInstructions from './WalletRecoveryInstructions'; import globalMessages from '../../../i18n/global-messages'; import styles from './WalletRecoveryPhraseDisplayDialog.scss'; import { WALLET_RECOVERY_PHRASE_WORD_COUNT } from '../../../config/cryptoConfig'; import LoadingSpinner from '../../widgets/LoadingSpinner'; const messages = defineMessages({ backupInstructions: { id: 'wallet.backup.recovery.phrase.display.dialog.backup.instructions', defaultMessage: '!!!Please make sure you write down the {walletRecoveryPhraseWordCount} words of your wallet recovery phrase <strong>on a piece of paper in the exact order shown here</strong>.', description: 'Instructions for backing up wallet recovery phrase on dialog that displays wallet recovery phrase.', }, buttonLabelIHaveWrittenItDown: { id: 'wallet.backup.recovery.phrase.display.dialog.button.label.iHaveWrittenItDown', defaultMessage: '!!!Yes, I have written down my wallet recovery phrase.', description: 'Label for button "Yes, I have written down my wallet recovery phrase." on wallet backup dialog', }, }); type Props = { recoveryPhrase: string; onStartWalletBackup: (...args: Array<any>) => any; onCancelBackup: (...args: Array<any>) => any; isSubmitting: boolean; }; @observer class WalletRecoveryPhraseDisplayDialog extends Component<Props> { static contextTypes = { intl: intlShape.isRequired, };
() { const { intl } = this.context; const { recoveryPhrase, onStartWalletBackup, onCancelBackup, isSubmitting, } = this.props; const dialogClasses = classnames([ styles.component, 'WalletRecoveryPhraseDisplayDialog', ]); const buttonLabel = !isSubmitting ? ( intl.formatMessage(messages.buttonLabelIHaveWrittenItDown) ) : ( <LoadingSpinner /> ); const actions = [ { label: buttonLabel, onClick: onStartWalletBackup, primary: true, }, ]; return ( <Dialog className={dialogClasses} title={intl.formatMessage(globalMessages.recoveryPhraseDialogTitle)} actions={actions} onClose={onCancelBackup} closeOnOverlayClick={false} closeButton={<DialogCloseButton onClose={onCancelBackup} />} > <WalletRecoveryInstructions instructionsText={ <FormattedHTMLMessage {...messages.backupInstructions} values={{ walletRecoveryPhraseWordCount: WALLET_RECOVERY_PHRASE_WORD_COUNT, }} /> } /> <WalletRecoveryPhraseMnemonic phrase={recoveryPhrase} /> </Dialog> ); } } export default WalletRecoveryPhraseDisplayDialog;
render
identifier_name
IPostService.ts
import { User } from 'core/domain/users' import { Post } from 'core/domain/posts' /** * Post service interface * * @export * @interface IPostService */ export interface IPostService { addPost: (post: Post) => Promise<string> updatePost: (post: Post) => Promise<void> deletePost: (postId: string) => Promise<void> getPosts: (currentUserId: string,lastPostId: string, page: number, limit: number) => Promise<{posts: {[postId: string]: Post }[], newLastPostId: string}> /** * Get list of post by user identifier */ getPostsByUserId: (userId: string, lastPostId?: string, page?: number, limit?: number) => Promise<{ posts: { [postId: string]: Post }[], newLastPostId: string }> /** * Get post by the post identifier */
}
getPostById: (postId: string) => Promise<Post>
random_line_split
Comments.js
import { GET_COMMENTS, GET_COMMENT, SUBMIT_VOTE_COMMENT, DELETE_COMMENT, EDIT_COMMENT, ADD_COMMENT, } from '../actions'; import _ from 'lodash'; function commentsReducer(state = {}, action)
export default commentsReducer;
{ switch (action.type) { case GET_COMMENTS: let comments = _.mapKeys( _.orderBy(action.payload.data, 'voteScore', 'desc'), 'id', ); return { ...state, ...comments, }; case GET_COMMENT: return action.payload.data; case SUBMIT_VOTE_COMMENT: let {id, option} = action.payload; let newScore = option === 'upVote' ? state[id].voteScore + 1 : state[id].voteScore - 1; return { ...state, [id]: {...state[id], voteScore: newScore}, }; case DELETE_COMMENT: return _.omit(state, [action.payload]); case EDIT_COMMENT: return state; case ADD_COMMENT: return state; default: return state; } }
identifier_body
Comments.js
import { GET_COMMENTS, GET_COMMENT, SUBMIT_VOTE_COMMENT, DELETE_COMMENT, EDIT_COMMENT, ADD_COMMENT, } from '../actions'; import _ from 'lodash'; function
(state = {}, action) { switch (action.type) { case GET_COMMENTS: let comments = _.mapKeys( _.orderBy(action.payload.data, 'voteScore', 'desc'), 'id', ); return { ...state, ...comments, }; case GET_COMMENT: return action.payload.data; case SUBMIT_VOTE_COMMENT: let {id, option} = action.payload; let newScore = option === 'upVote' ? state[id].voteScore + 1 : state[id].voteScore - 1; return { ...state, [id]: {...state[id], voteScore: newScore}, }; case DELETE_COMMENT: return _.omit(state, [action.payload]); case EDIT_COMMENT: return state; case ADD_COMMENT: return state; default: return state; } } export default commentsReducer;
commentsReducer
identifier_name
Comments.js
import { GET_COMMENTS, GET_COMMENT, SUBMIT_VOTE_COMMENT, DELETE_COMMENT,
function commentsReducer(state = {}, action) { switch (action.type) { case GET_COMMENTS: let comments = _.mapKeys( _.orderBy(action.payload.data, 'voteScore', 'desc'), 'id', ); return { ...state, ...comments, }; case GET_COMMENT: return action.payload.data; case SUBMIT_VOTE_COMMENT: let {id, option} = action.payload; let newScore = option === 'upVote' ? state[id].voteScore + 1 : state[id].voteScore - 1; return { ...state, [id]: {...state[id], voteScore: newScore}, }; case DELETE_COMMENT: return _.omit(state, [action.payload]); case EDIT_COMMENT: return state; case ADD_COMMENT: return state; default: return state; } } export default commentsReducer;
EDIT_COMMENT, ADD_COMMENT, } from '../actions'; import _ from 'lodash';
random_line_split
foo.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(lang_items, no_std)] #![no_std] #[lang="copy"] trait Copy { } #[lang="sized"] trait Sized { }
} fn _main() { let _a = unsafe { _foo() }; }
#[lang="start"] fn start(_main: *const u8, _argc: int, _argv: *const *const u8) -> int { 0 } extern { fn _foo() -> [u8; 16];
random_line_split
foo.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(lang_items, no_std)] #![no_std] #[lang="copy"] trait Copy { } #[lang="sized"] trait Sized { } #[lang="start"] fn start(_main: *const u8, _argc: int, _argv: *const *const u8) -> int
extern { fn _foo() -> [u8; 16]; } fn _main() { let _a = unsafe { _foo() }; }
{ 0 }
identifier_body
foo.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. #![feature(lang_items, no_std)] #![no_std] #[lang="copy"] trait Copy { } #[lang="sized"] trait Sized { } #[lang="start"] fn
(_main: *const u8, _argc: int, _argv: *const *const u8) -> int { 0 } extern { fn _foo() -> [u8; 16]; } fn _main() { let _a = unsafe { _foo() }; }
start
identifier_name
actors.js
var fs = require('fs') , child_process = require('child_process') , _glob = require('glob') , bunch = require('./bunch') ; exports.loadEnv = function loadEnv(env, cb) { var loaders = [] function load(name, cb) { fs.readFile(env[name], function(error, data) { env[name] = env[name].match(/.*\.json$/) ? JSON.parse(data) : data; cb(error, data) }) } for (var name in env) { loaders.push([load, name]) } bunch(loaders, cb) } exports.commandActor = function command(executable) { return function command(args, opts, cb) { if (!cb) { cb = opts; opts = {} } var cmd = child_process.spawn(executable, args, opts); function log(b) { console.log(b.toString()) } cmd.stdout.on('data', log); cmd.stderr.on('data', log); cmd.on('exit', function(code) { if (code) { cb(new Error(executable + ' exited with status ' + code)); } else
}); return cmd; } } exports.jsonParse = function(str, cb) { try { cb(null, JSON.parse(str)); } catch (ex) { cb(ex); } } exports.jsonStringify = function(obj, cb) { try { cb(null, JSON.stringify(obj)); } catch (ex) { cb(ex); } } exports.glob = function glob(pattern, cb) { console.log('pattern', pattern); _glob(pattern, function(error, files) { cb(error, [files]); }); }
{ cb(); }
conditional_block
actors.js
var fs = require('fs') , child_process = require('child_process') , _glob = require('glob') , bunch = require('./bunch') ; exports.loadEnv = function loadEnv(env, cb) { var loaders = [] function load(name, cb)
for (var name in env) { loaders.push([load, name]) } bunch(loaders, cb) } exports.commandActor = function command(executable) { return function command(args, opts, cb) { if (!cb) { cb = opts; opts = {} } var cmd = child_process.spawn(executable, args, opts); function log(b) { console.log(b.toString()) } cmd.stdout.on('data', log); cmd.stderr.on('data', log); cmd.on('exit', function(code) { if (code) { cb(new Error(executable + ' exited with status ' + code)); } else { cb(); } }); return cmd; } } exports.jsonParse = function(str, cb) { try { cb(null, JSON.parse(str)); } catch (ex) { cb(ex); } } exports.jsonStringify = function(obj, cb) { try { cb(null, JSON.stringify(obj)); } catch (ex) { cb(ex); } } exports.glob = function glob(pattern, cb) { console.log('pattern', pattern); _glob(pattern, function(error, files) { cb(error, [files]); }); }
{ fs.readFile(env[name], function(error, data) { env[name] = env[name].match(/.*\.json$/) ? JSON.parse(data) : data; cb(error, data) }) }
identifier_body
actors.js
var fs = require('fs') , child_process = require('child_process') , _glob = require('glob') , bunch = require('./bunch') ; exports.loadEnv = function loadEnv(env, cb) { var loaders = [] function load(name, cb) { fs.readFile(env[name], function(error, data) { env[name] = env[name].match(/.*\.json$/) ? JSON.parse(data) : data; cb(error, data) }) } for (var name in env) { loaders.push([load, name]) } bunch(loaders, cb) } exports.commandActor = function command(executable) { return function command(args, opts, cb) { if (!cb) { cb = opts; opts = {} } var cmd = child_process.spawn(executable, args, opts); function
(b) { console.log(b.toString()) } cmd.stdout.on('data', log); cmd.stderr.on('data', log); cmd.on('exit', function(code) { if (code) { cb(new Error(executable + ' exited with status ' + code)); } else { cb(); } }); return cmd; } } exports.jsonParse = function(str, cb) { try { cb(null, JSON.parse(str)); } catch (ex) { cb(ex); } } exports.jsonStringify = function(obj, cb) { try { cb(null, JSON.stringify(obj)); } catch (ex) { cb(ex); } } exports.glob = function glob(pattern, cb) { console.log('pattern', pattern); _glob(pattern, function(error, files) { cb(error, [files]); }); }
log
identifier_name
actors.js
var fs = require('fs') , child_process = require('child_process') , _glob = require('glob') , bunch = require('./bunch') ; exports.loadEnv = function loadEnv(env, cb) { var loaders = [] function load(name, cb) { fs.readFile(env[name], function(error, data) { env[name] = env[name].match(/.*\.json$/) ? JSON.parse(data) : data; cb(error, data) }) } for (var name in env) { loaders.push([load, name]) } bunch(loaders, cb) } exports.commandActor = function command(executable) { return function command(args, opts, cb) { if (!cb) { cb = opts; opts = {} } var cmd = child_process.spawn(executable, args, opts); function log(b) { console.log(b.toString()) } cmd.stdout.on('data', log); cmd.stderr.on('data', log); cmd.on('exit', function(code) { if (code) { cb(new Error(executable + ' exited with status ' + code)); } else { cb(); } }); return cmd; } } exports.jsonParse = function(str, cb) {
try { cb(null, JSON.parse(str)); } catch (ex) { cb(ex); } } exports.jsonStringify = function(obj, cb) { try { cb(null, JSON.stringify(obj)); } catch (ex) { cb(ex); } } exports.glob = function glob(pattern, cb) { console.log('pattern', pattern); _glob(pattern, function(error, files) { cb(error, [files]); }); }
random_line_split
conf.py
# -*- coding: utf-8 -*- # # Baobab documentation build configuration file, created by # sphinx-quickstart on Tue Dec 7 00:44:28 2010. # # This file is execfile()d with the current directory set to its containing dir. # # Note that not all possible configuration values are present in this # autogenerated file. # # All configuration values have a default; values that are commented out # serve to show the default. import sys, os # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. sys.path.insert(0, os.path.abspath('..')) # -- General configuration ----------------------------------------------------- # If your documentation needs a minimal Sphinx version, state it here. #needs_sphinx = '1.0' # Add any Sphinx extension module names here, as strings. They can be extensions # coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = ['sphinx.ext.todo','jsonext'] # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates/sphinxdoc'] # The suffix of source filenames. source_suffix = '.rst' # The encoding of source files. #source_encoding = 'utf-8-sig' # The master toctree document. master_doc = 'index' # General information about the project. project = u'Baobab' copyright = u'2010, Riccardo Attilio Galli' # The version info for the project you're documenting, acts as replacement for # |version| and |release|, also used in various other places throughout the # built documents. # # The short X.Y version. version = '1.3.1' # The full version, including alpha/beta/rc tags. release = '1.3.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. #language = None # There are two options for replacing |today|: either, you set today to some # non-false value, then it is used: #today = '' # Else, today_fmt is used as the format for a strftime call. #today_fmt = '%B %d, %Y' # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. exclude_patterns = [] # The reST default role (used for this markup: `text`) to use for all documents. #default_role = None # If true, '()' will be appended to :func: etc. cross-reference text. #add_function_parentheses = True # If true, the current module name will be prepended to all description # unit titles (such as .. function::). #add_module_names = True # If true, sectionauthor and moduleauthor directives will be shown in the # output. They are ignored by default. #show_authors = False # The name of the Pygments (syntax highlighting) style to use.
# A list of ignored prefixes for module index sorting. #modindex_common_prefix = [] # -- Options for HTML output --------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. html_theme = 'sphinxdoc' # Theme options are theme-specific and customize the look and feel of a theme # further. For a list of options available for each theme, see the # documentation. #html_theme_options = {} # Add any paths that contain custom themes here, relative to this directory. #html_theme_path = [] # The name for this set of Sphinx documents. If None, it defaults to # "<project> v<release> documentation". #html_title = None # A shorter title for the navigation bar. Default is the same as html_title. #html_short_title = None # The name of an image file (relative to this directory) to place at the top # of the sidebar. html_logo = "logo_baobab_200.png" # The name of an image file (within the static path) to use as favicon of the # docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 # pixels large. html_favicon = 'favicon.png' # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". html_static_path = ['_static'] # If not '', a 'Last updated on:' timestamp is inserted at every page bottom, # using the given strftime format. #html_last_updated_fmt = '%b %d, %Y' # If true, SmartyPants will be used to convert quotes and dashes to # typographically correct entities. #html_use_smartypants = True # Custom sidebar templates, maps document names to template names. html_sidebars = { '*': ['globaltoc.html', 'sourcelink.html', 'searchbox.html'], 'index': ['download.html','globaltoc.html', 'sourcelink.html', 'searchbox.html'] } # Additional templates that should be rendered to pages, maps page names to # template names. html_additional_pages = {'example_animals':'animals.html','example_forum':'forum.html'} # If false, no module index is generated. #html_domain_indices = True # If false, no index is generated. #html_use_index = True # If true, the index is split into individual pages for each letter. #html_split_index = False # If true, links to the reST sources are added to the pages. #html_show_sourcelink = True # If true, "Created using Sphinx" is shown in the HTML footer. Default is True. #html_show_sphinx = True # If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. #html_show_copyright = True # If true, an OpenSearch description file will be output, and all pages will # contain a <link> tag referring to it. The value of this option must be the # base URL from which the finished HTML is served. #html_use_opensearch = '' # This is the file name suffix for HTML files (e.g. ".xhtml"). #html_file_suffix = None # Output file base name for HTML help builder. htmlhelp_basename = 'Baobabdoc' # -- Options for LaTeX output -------------------------------------------------- # The paper size ('letter' or 'a4'). #latex_paper_size = 'letter' # The font size ('10pt', '11pt' or '12pt'). #latex_font_size = '10pt' # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass [howto/manual]). latex_documents = [ ('index', 'Baobab.tex', u'Baobab Documentation', u'Riccardo Attilio Galli', 'manual'), ] # The name of an image file (relative to this directory) to place at the top of # the title page. #latex_logo = None # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. #latex_use_parts = False # If true, show page references after internal links. #latex_show_pagerefs = False # If true, show URL addresses after external links. #latex_show_urls = False # Additional stuff for the LaTeX preamble. #latex_preamble = '' # Documents to append as an appendix to all manuals. #latex_appendices = [] # If false, no module index is generated. #latex_domain_indices = True # -- Options for manual page output -------------------------------------------- # One entry per manual page. List of tuples # (source start file, name, description, authors, manual section). man_pages = [ ('index', 'baobab', u'Baobab Documentation', [u'Riccardo Attilio Galli'], 1) ]
pygments_style = 'sphinx'
random_line_split
mod.rs
#![allow(dead_code)] // XXX unused code belongs into translation of new extract_data_from_object fn use libc::ptrdiff_t; use md5; use sha1; use sha2::{Digest, Sha224, Sha256, Sha384, Sha512}; use std; use std::slice; use remacs_macros::lisp_fn; use remacs_sys::{make_specified_string, make_uninit_string, nsberror, EmacsInt}; use remacs_sys::{code_convert_string, extract_data_from_object, preferred_coding_system, string_char_to_byte, validate_subarray, Fcoding_system_p}; use remacs_sys::{globals, Ffind_operation_coding_system, Flocal_variable_p}; use remacs_sys::{Qbuffer_file_coding_system, Qcoding_system_error, Qmd5, Qraw_text, Qsha1, Qsha224, Qsha256, Qsha384, Qsha512, Qstringp, Qwrite_region}; use remacs_sys::{current_thread, make_buffer_string, record_unwind_current_buffer, set_buffer_internal}; use buffers::{buffer_file_name, current_buffer, get_buffer, LispBufferRef}; use lisp::{LispNumber, LispObject}; use lisp::defsubr; use multibyte::LispStringRef; use symbols::{fboundp, symbol_name}; use threads::ThreadState; #[derive(Clone, Copy)] enum HashAlg { MD5, SHA1, SHA224, SHA256, SHA384, SHA512, } static MD5_DIGEST_LEN: usize = 16; static SHA1_DIGEST_LEN: usize = 20; static SHA224_DIGEST_LEN: usize = 224 / 8; static SHA256_DIGEST_LEN: usize = 256 / 8; static SHA384_DIGEST_LEN: usize = 384 / 8; static SHA512_DIGEST_LEN: usize = 512 / 8; fn hash_alg(algorithm: LispObject) -> HashAlg { algorithm.as_symbol_or_error(); if algorithm.to_raw() == Qmd5 { HashAlg::MD5 } else if algorithm.to_raw() == Qsha1 { HashAlg::SHA1 } else if algorithm.to_raw() == Qsha224 { HashAlg::SHA224 } else if algorithm.to_raw() == Qsha256 { HashAlg::SHA256 } else if algorithm.to_raw() == Qsha384 { HashAlg::SHA384 } else if algorithm.to_raw() == Qsha512 { HashAlg::SHA512 } else { let name = symbol_name(algorithm).as_string_or_error(); error!("Invalid algorithm arg: {:?}\0", &name.as_slice()); } } fn check_coding_system_or_error(coding_system: LispObject, noerror: LispObject) -> LispObject { if LispObject::from(unsafe { Fcoding_system_p(coding_system.to_raw()) }).is_nil() { /* Invalid coding system. */ if noerror.is_not_nil() { LispObject::from(Qraw_text) } else { xsignal!(Qcoding_system_error, coding_system); } } else { coding_system } } fn get_coding_system_for_string(string: LispStringRef, coding_system: LispObject) -> LispObject { if coding_system.is_nil() { /* Decide the coding-system to encode the data with. */ if string.is_multibyte() { /* use default, we can't guess correct value */ LispObject::from(unsafe { preferred_coding_system() }) } else { LispObject::from(Qraw_text) } } else { coding_system } } fn get_coding_system_for_buffer( object: LispObject, buffer: LispBufferRef, start: LispObject, end: LispObject, start_byte: ptrdiff_t, end_byte: ptrdiff_t, coding_system: LispObject, ) -> LispObject { /* Decide the coding-system to encode the data with. See fileio.c:Fwrite-region */ if coding_system.is_not_nil() { return coding_system; } if LispObject::from(unsafe { globals.f_Vcoding_system_for_write }).is_not_nil() { return LispObject::from(unsafe { globals.f_Vcoding_system_for_write }); } if LispObject::from(buffer.buffer_file_coding_system).is_nil() || LispObject::from(unsafe { Flocal_variable_p( Qbuffer_file_coding_system, LispObject::constant_nil().to_raw(), ) }).is_nil() { if LispObject::from(buffer.enable_multibyte_characters).is_nil() { return LispObject::from(Qraw_text); } } if buffer_file_name(object).is_not_nil() { /* Check file-coding-system-alist. */ let mut args = [ Qwrite_region, start.to_raw(), end.to_raw(), buffer_file_name(object).to_raw(), ]; let val = LispObject::from(unsafe { Ffind_operation_coding_system(4, args.as_mut_ptr()) }); if val.is_cons() && val.as_cons_or_error().cdr().is_not_nil() { return val.as_cons_or_error().cdr(); } } if LispObject::from(buffer.buffer_file_coding_system).is_not_nil() { /* If we still have not decided a coding system, use the default value of buffer-file-coding-system. */ return LispObject::from(buffer.buffer_file_coding_system); } let sscsf = LispObject::from(unsafe { globals.f_Vselect_safe_coding_system_function }); if fboundp(sscsf).is_not_nil() { /* Confirm that VAL can surely encode the current region. */ return call!( sscsf, LispObject::from_natnum(start_byte as EmacsInt), LispObject::from_natnum(end_byte as EmacsInt), coding_system, LispObject::constant_nil() ); } LispObject::constant_nil() } fn get_input_from_string( object: LispObject, string: LispStringRef, start: LispObject, end: LispObject, ) -> LispObject { let size: ptrdiff_t; let start_byte: ptrdiff_t; let end_byte: ptrdiff_t; let mut start_char: ptrdiff_t = 0; let mut end_char: ptrdiff_t = 0; size = string.len_bytes(); unsafe { validate_subarray( object.to_raw(), start.to_raw(), end.to_raw(), size, &mut start_char, &mut end_char, ); } start_byte = if start_char == 0 { 0 } else { unsafe { string_char_to_byte(object.to_raw(), start_char) } }; end_byte = if end_char == size { string.len_bytes() } else { unsafe { string_char_to_byte(object.to_raw(), end_char) } }; if start_byte == 0 && end_byte == size { object } else { LispObject::from(unsafe { make_specified_string( string.const_sdata_ptr().offset(start_byte), -1 as ptrdiff_t, end_byte - start_byte, string.is_multibyte(), ) }) } } fn get_input_from_buffer( mut buffer: LispBufferRef, start: LispObject, end: LispObject, start_byte: &mut ptrdiff_t, end_byte: &mut ptrdiff_t, ) -> LispObject { let prev_buffer = ThreadState::current_buffer().as_mut(); unsafe { record_unwind_current_buffer() }; unsafe { set_buffer_internal(buffer.as_mut()) }; *start_byte = if start.is_nil() { buffer.begv } else { match start.as_number_coerce_marker_or_error() { LispNumber::Fixnum(n) => n as ptrdiff_t, LispNumber::Float(n) => n as ptrdiff_t, } }; *end_byte = if end.is_nil() { buffer.zv } else { match end.as_number_coerce_marker_or_error() { LispNumber::Fixnum(n) => n as ptrdiff_t, LispNumber::Float(n) => n as ptrdiff_t, } }; if start_byte > end_byte { std::mem::swap(start_byte, end_byte); } if !(buffer.begv <= *start_byte && *end_byte <= buffer.zv) { args_out_of_range!(start, end); } let string = LispObject::from(unsafe { make_buffer_string(*start_byte, *end_byte, false) }); unsafe { set_buffer_internal(prev_buffer) }; // TODO: this needs to be std::mem::size_of<specbinding>() unsafe { (*current_thread).m_specpdl_ptr = (*current_thread).m_specpdl_ptr.offset(-40) }; string } fn get_input( object: LispObject, string: &mut Option<LispStringRef>, buffer: &Option<LispBufferRef>, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispStringRef { if object.is_string() { if string.unwrap().is_multibyte() { let coding_system = check_coding_system_or_error( get_coding_system_for_string(string.unwrap(), coding_system), noerror, ); *string = Some( LispObject::from(unsafe { code_convert_string( object.to_raw(), coding_system.to_raw(), LispObject::constant_nil().to_raw(), true, false, true, ) }).as_string_or_error(), ) } get_input_from_string(object, string.unwrap(), start, end).as_string_or_error() } else if object.is_buffer() { let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let s = get_input_from_buffer(buffer.unwrap(), start, end, &mut start_byte, &mut end_byte); let ss = s.as_string_or_error(); if ss.is_multibyte() { let coding_system = check_coding_system_or_error( get_coding_system_for_buffer( object, buffer.unwrap(), start, end, start_byte, end_byte, coding_system, ), noerror, ); LispObject::from(unsafe { code_convert_string( s.to_raw(), coding_system.to_raw(), LispObject::constant_nil().to_raw(), true, false, false, ) }).as_string_or_error() } else { ss } } else { wrong_type!(Qstringp, object); } } /// Return MD5 message digest of OBJECT, a buffer or string. /// /// A message digest is a cryptographic checksum of a document, and the /// algorithm to calculate it is defined in RFC 1321. /// /// The two optional arguments START and END are character positions /// specifying for which part of OBJECT the message digest should be /// computed. If nil or omitted, the digest is computed for the whole /// OBJECT. /// /// The MD5 message digest is computed from the result of encoding the /// text in a coding system, not directly from the internal Emacs form of /// the text. The optional fourth argument CODING-SYSTEM specifies which /// coding system to encode the text with. It should be the same coding /// system that you used or will use when actually writing the text into a /// file. /// /// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If /// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding /// system would be chosen by default for writing this text into a file. /// /// If OBJECT is a string, the most preferred coding system (see the /// command `prefer-coding-system') is used. /// /// If NOERROR is non-nil, silently assume the `raw-text' coding if the /// guesswork fails. Normally, an error is signaled in such case. #[lisp_fn(min = "1")] pub fn
( object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispObject { _secure_hash( HashAlg::MD5, object, start, end, coding_system, noerror, LispObject::constant_nil(), ) } /// Return the secure hash of OBJECT, a buffer or string. /// ALGORITHM is a symbol specifying the hash to use: /// md5, sha1, sha224, sha256, sha384 or sha512. /// /// The two optional arguments START and END are positions specifying for /// which part of OBJECT to compute the hash. If nil or omitted, uses the /// whole OBJECT. /// /// The full list of algorithms can be obtained with `secure-hash-algorithms'. /// /// If BINARY is non-nil, returns a string in binary form. #[lisp_fn(min = "2")] pub fn secure_hash( algorithm: LispObject, object: LispObject, start: LispObject, end: LispObject, binary: LispObject, ) -> LispObject { _secure_hash( hash_alg(algorithm), object, start, end, LispObject::constant_nil(), LispObject::constant_nil(), binary, ) } fn _secure_hash( algorithm: HashAlg, object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, binary: LispObject, ) -> LispObject { let spec = list!(object, start, end, coding_system, noerror); let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let input = unsafe { extract_data_from_object(spec.to_raw(), &mut start_byte, &mut end_byte) }; if input.is_null() { error!("secure_hash: failed to extract data from object, aborting!"); } let input_slice = unsafe { slice::from_raw_parts( input.offset(start_byte) as *mut u8, (end_byte - start_byte) as usize, ) }; type HashFn = fn(&[u8], &mut [u8]); let (digest_size, hash_func) = match algorithm { HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn), HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn), HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn), HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn), HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn), HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn), }; let buffer_size = if binary.is_nil() { (digest_size * 2) as EmacsInt } else { digest_size as EmacsInt }; let digest = LispObject::from(unsafe { make_uninit_string(buffer_size as EmacsInt) }); let digest_str = digest.as_string_or_error(); hash_func(input_slice, digest_str.as_mut_slice()); if binary.is_nil() { hexify_digest_string(digest_str.as_mut_slice(), digest_size); } digest } /// To avoid a copy, buffer is both the source and the destination of /// this transformation. Buffer must contain len bytes of data and /// 2*len bytes of space for the final hex string. fn hexify_digest_string(buffer: &mut [u8], len: usize) { static hexdigit: [u8; 16] = *b"0123456789abcdef"; debug_assert_eq!( buffer.len(), 2 * len, "buffer must be long enough to hold 2*len hex digits" ); for i in (0..len).rev() { let v = buffer[i]; buffer[2 * i] = hexdigit[(v >> 4) as usize]; buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize]; } } // For the following hash functions, the caller must ensure that the // destination buffer is at least long enough to hold the // digest. Additionally, the caller may have been asked to return a // hex string, in which case dest_buf will be twice as long as the // digest. fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let output = md5::compute(buffer); dest_buf[..output.len()].copy_from_slice(&*output) } fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = sha1::Sha1::new(); hasher.update(buffer); let output = hasher.digest().bytes(); dest_buf[..output.len()].copy_from_slice(&output) } /// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`. fn sha2_hash_buffer<D>(hasher: D, buffer: &[u8], dest_buf: &mut [u8]) where D: Digest, { let mut hasher = hasher; hasher.input(buffer); let output = hasher.result(); dest_buf[..output.len()].copy_from_slice(&output) } fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha224::new(), buffer, dest_buf); } fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha256::new(), buffer, dest_buf); } fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha384::new(), buffer, dest_buf); } fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha512::new(), buffer, dest_buf); } /// Return a hash of the contents of BUFFER-OR-NAME. /// This hash is performed on the raw internal format of the buffer, /// disregarding any coding systems. If nil, use the current buffer. #[lisp_fn(min = "0")] pub fn buffer_hash(buffer_or_name: LispObject) -> LispObject { let buffer = if buffer_or_name.is_nil() { current_buffer() } else { get_buffer(buffer_or_name) }; if buffer.is_nil() { unsafe { nsberror(buffer_or_name.to_raw()) }; } let b = buffer.as_buffer().unwrap(); let mut ctx = sha1::Sha1::new(); ctx.update(unsafe { slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize) }); if b.gpt_byte() < b.z_byte() { ctx.update(unsafe { slice::from_raw_parts( b.gap_end_addr(), (b.z_addr() as usize - b.gap_end_addr() as usize), ) }); } let formatted = ctx.digest().to_string(); let digest = LispObject::from(unsafe { make_uninit_string(formatted.len() as EmacsInt) }); digest .as_string() .unwrap() .as_mut_slice() .copy_from_slice(formatted.as_bytes()); digest } include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
md5
identifier_name
mod.rs
#![allow(dead_code)] // XXX unused code belongs into translation of new extract_data_from_object fn use libc::ptrdiff_t; use md5; use sha1; use sha2::{Digest, Sha224, Sha256, Sha384, Sha512}; use std; use std::slice; use remacs_macros::lisp_fn; use remacs_sys::{make_specified_string, make_uninit_string, nsberror, EmacsInt}; use remacs_sys::{code_convert_string, extract_data_from_object, preferred_coding_system, string_char_to_byte, validate_subarray, Fcoding_system_p}; use remacs_sys::{globals, Ffind_operation_coding_system, Flocal_variable_p}; use remacs_sys::{Qbuffer_file_coding_system, Qcoding_system_error, Qmd5, Qraw_text, Qsha1, Qsha224, Qsha256, Qsha384, Qsha512, Qstringp, Qwrite_region}; use remacs_sys::{current_thread, make_buffer_string, record_unwind_current_buffer, set_buffer_internal}; use buffers::{buffer_file_name, current_buffer, get_buffer, LispBufferRef};
use lisp::{LispNumber, LispObject}; use lisp::defsubr; use multibyte::LispStringRef; use symbols::{fboundp, symbol_name}; use threads::ThreadState; #[derive(Clone, Copy)] enum HashAlg { MD5, SHA1, SHA224, SHA256, SHA384, SHA512, } static MD5_DIGEST_LEN: usize = 16; static SHA1_DIGEST_LEN: usize = 20; static SHA224_DIGEST_LEN: usize = 224 / 8; static SHA256_DIGEST_LEN: usize = 256 / 8; static SHA384_DIGEST_LEN: usize = 384 / 8; static SHA512_DIGEST_LEN: usize = 512 / 8; fn hash_alg(algorithm: LispObject) -> HashAlg { algorithm.as_symbol_or_error(); if algorithm.to_raw() == Qmd5 { HashAlg::MD5 } else if algorithm.to_raw() == Qsha1 { HashAlg::SHA1 } else if algorithm.to_raw() == Qsha224 { HashAlg::SHA224 } else if algorithm.to_raw() == Qsha256 { HashAlg::SHA256 } else if algorithm.to_raw() == Qsha384 { HashAlg::SHA384 } else if algorithm.to_raw() == Qsha512 { HashAlg::SHA512 } else { let name = symbol_name(algorithm).as_string_or_error(); error!("Invalid algorithm arg: {:?}\0", &name.as_slice()); } } fn check_coding_system_or_error(coding_system: LispObject, noerror: LispObject) -> LispObject { if LispObject::from(unsafe { Fcoding_system_p(coding_system.to_raw()) }).is_nil() { /* Invalid coding system. */ if noerror.is_not_nil() { LispObject::from(Qraw_text) } else { xsignal!(Qcoding_system_error, coding_system); } } else { coding_system } } fn get_coding_system_for_string(string: LispStringRef, coding_system: LispObject) -> LispObject { if coding_system.is_nil() { /* Decide the coding-system to encode the data with. */ if string.is_multibyte() { /* use default, we can't guess correct value */ LispObject::from(unsafe { preferred_coding_system() }) } else { LispObject::from(Qraw_text) } } else { coding_system } } fn get_coding_system_for_buffer( object: LispObject, buffer: LispBufferRef, start: LispObject, end: LispObject, start_byte: ptrdiff_t, end_byte: ptrdiff_t, coding_system: LispObject, ) -> LispObject { /* Decide the coding-system to encode the data with. See fileio.c:Fwrite-region */ if coding_system.is_not_nil() { return coding_system; } if LispObject::from(unsafe { globals.f_Vcoding_system_for_write }).is_not_nil() { return LispObject::from(unsafe { globals.f_Vcoding_system_for_write }); } if LispObject::from(buffer.buffer_file_coding_system).is_nil() || LispObject::from(unsafe { Flocal_variable_p( Qbuffer_file_coding_system, LispObject::constant_nil().to_raw(), ) }).is_nil() { if LispObject::from(buffer.enable_multibyte_characters).is_nil() { return LispObject::from(Qraw_text); } } if buffer_file_name(object).is_not_nil() { /* Check file-coding-system-alist. */ let mut args = [ Qwrite_region, start.to_raw(), end.to_raw(), buffer_file_name(object).to_raw(), ]; let val = LispObject::from(unsafe { Ffind_operation_coding_system(4, args.as_mut_ptr()) }); if val.is_cons() && val.as_cons_or_error().cdr().is_not_nil() { return val.as_cons_or_error().cdr(); } } if LispObject::from(buffer.buffer_file_coding_system).is_not_nil() { /* If we still have not decided a coding system, use the default value of buffer-file-coding-system. */ return LispObject::from(buffer.buffer_file_coding_system); } let sscsf = LispObject::from(unsafe { globals.f_Vselect_safe_coding_system_function }); if fboundp(sscsf).is_not_nil() { /* Confirm that VAL can surely encode the current region. */ return call!( sscsf, LispObject::from_natnum(start_byte as EmacsInt), LispObject::from_natnum(end_byte as EmacsInt), coding_system, LispObject::constant_nil() ); } LispObject::constant_nil() } fn get_input_from_string( object: LispObject, string: LispStringRef, start: LispObject, end: LispObject, ) -> LispObject { let size: ptrdiff_t; let start_byte: ptrdiff_t; let end_byte: ptrdiff_t; let mut start_char: ptrdiff_t = 0; let mut end_char: ptrdiff_t = 0; size = string.len_bytes(); unsafe { validate_subarray( object.to_raw(), start.to_raw(), end.to_raw(), size, &mut start_char, &mut end_char, ); } start_byte = if start_char == 0 { 0 } else { unsafe { string_char_to_byte(object.to_raw(), start_char) } }; end_byte = if end_char == size { string.len_bytes() } else { unsafe { string_char_to_byte(object.to_raw(), end_char) } }; if start_byte == 0 && end_byte == size { object } else { LispObject::from(unsafe { make_specified_string( string.const_sdata_ptr().offset(start_byte), -1 as ptrdiff_t, end_byte - start_byte, string.is_multibyte(), ) }) } } fn get_input_from_buffer( mut buffer: LispBufferRef, start: LispObject, end: LispObject, start_byte: &mut ptrdiff_t, end_byte: &mut ptrdiff_t, ) -> LispObject { let prev_buffer = ThreadState::current_buffer().as_mut(); unsafe { record_unwind_current_buffer() }; unsafe { set_buffer_internal(buffer.as_mut()) }; *start_byte = if start.is_nil() { buffer.begv } else { match start.as_number_coerce_marker_or_error() { LispNumber::Fixnum(n) => n as ptrdiff_t, LispNumber::Float(n) => n as ptrdiff_t, } }; *end_byte = if end.is_nil() { buffer.zv } else { match end.as_number_coerce_marker_or_error() { LispNumber::Fixnum(n) => n as ptrdiff_t, LispNumber::Float(n) => n as ptrdiff_t, } }; if start_byte > end_byte { std::mem::swap(start_byte, end_byte); } if !(buffer.begv <= *start_byte && *end_byte <= buffer.zv) { args_out_of_range!(start, end); } let string = LispObject::from(unsafe { make_buffer_string(*start_byte, *end_byte, false) }); unsafe { set_buffer_internal(prev_buffer) }; // TODO: this needs to be std::mem::size_of<specbinding>() unsafe { (*current_thread).m_specpdl_ptr = (*current_thread).m_specpdl_ptr.offset(-40) }; string } fn get_input( object: LispObject, string: &mut Option<LispStringRef>, buffer: &Option<LispBufferRef>, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispStringRef { if object.is_string() { if string.unwrap().is_multibyte() { let coding_system = check_coding_system_or_error( get_coding_system_for_string(string.unwrap(), coding_system), noerror, ); *string = Some( LispObject::from(unsafe { code_convert_string( object.to_raw(), coding_system.to_raw(), LispObject::constant_nil().to_raw(), true, false, true, ) }).as_string_or_error(), ) } get_input_from_string(object, string.unwrap(), start, end).as_string_or_error() } else if object.is_buffer() { let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let s = get_input_from_buffer(buffer.unwrap(), start, end, &mut start_byte, &mut end_byte); let ss = s.as_string_or_error(); if ss.is_multibyte() { let coding_system = check_coding_system_or_error( get_coding_system_for_buffer( object, buffer.unwrap(), start, end, start_byte, end_byte, coding_system, ), noerror, ); LispObject::from(unsafe { code_convert_string( s.to_raw(), coding_system.to_raw(), LispObject::constant_nil().to_raw(), true, false, false, ) }).as_string_or_error() } else { ss } } else { wrong_type!(Qstringp, object); } } /// Return MD5 message digest of OBJECT, a buffer or string. /// /// A message digest is a cryptographic checksum of a document, and the /// algorithm to calculate it is defined in RFC 1321. /// /// The two optional arguments START and END are character positions /// specifying for which part of OBJECT the message digest should be /// computed. If nil or omitted, the digest is computed for the whole /// OBJECT. /// /// The MD5 message digest is computed from the result of encoding the /// text in a coding system, not directly from the internal Emacs form of /// the text. The optional fourth argument CODING-SYSTEM specifies which /// coding system to encode the text with. It should be the same coding /// system that you used or will use when actually writing the text into a /// file. /// /// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If /// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding /// system would be chosen by default for writing this text into a file. /// /// If OBJECT is a string, the most preferred coding system (see the /// command `prefer-coding-system') is used. /// /// If NOERROR is non-nil, silently assume the `raw-text' coding if the /// guesswork fails. Normally, an error is signaled in such case. #[lisp_fn(min = "1")] pub fn md5( object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispObject { _secure_hash( HashAlg::MD5, object, start, end, coding_system, noerror, LispObject::constant_nil(), ) } /// Return the secure hash of OBJECT, a buffer or string. /// ALGORITHM is a symbol specifying the hash to use: /// md5, sha1, sha224, sha256, sha384 or sha512. /// /// The two optional arguments START and END are positions specifying for /// which part of OBJECT to compute the hash. If nil or omitted, uses the /// whole OBJECT. /// /// The full list of algorithms can be obtained with `secure-hash-algorithms'. /// /// If BINARY is non-nil, returns a string in binary form. #[lisp_fn(min = "2")] pub fn secure_hash( algorithm: LispObject, object: LispObject, start: LispObject, end: LispObject, binary: LispObject, ) -> LispObject { _secure_hash( hash_alg(algorithm), object, start, end, LispObject::constant_nil(), LispObject::constant_nil(), binary, ) } fn _secure_hash( algorithm: HashAlg, object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, binary: LispObject, ) -> LispObject { let spec = list!(object, start, end, coding_system, noerror); let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let input = unsafe { extract_data_from_object(spec.to_raw(), &mut start_byte, &mut end_byte) }; if input.is_null() { error!("secure_hash: failed to extract data from object, aborting!"); } let input_slice = unsafe { slice::from_raw_parts( input.offset(start_byte) as *mut u8, (end_byte - start_byte) as usize, ) }; type HashFn = fn(&[u8], &mut [u8]); let (digest_size, hash_func) = match algorithm { HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn), HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn), HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn), HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn), HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn), HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn), }; let buffer_size = if binary.is_nil() { (digest_size * 2) as EmacsInt } else { digest_size as EmacsInt }; let digest = LispObject::from(unsafe { make_uninit_string(buffer_size as EmacsInt) }); let digest_str = digest.as_string_or_error(); hash_func(input_slice, digest_str.as_mut_slice()); if binary.is_nil() { hexify_digest_string(digest_str.as_mut_slice(), digest_size); } digest } /// To avoid a copy, buffer is both the source and the destination of /// this transformation. Buffer must contain len bytes of data and /// 2*len bytes of space for the final hex string. fn hexify_digest_string(buffer: &mut [u8], len: usize) { static hexdigit: [u8; 16] = *b"0123456789abcdef"; debug_assert_eq!( buffer.len(), 2 * len, "buffer must be long enough to hold 2*len hex digits" ); for i in (0..len).rev() { let v = buffer[i]; buffer[2 * i] = hexdigit[(v >> 4) as usize]; buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize]; } } // For the following hash functions, the caller must ensure that the // destination buffer is at least long enough to hold the // digest. Additionally, the caller may have been asked to return a // hex string, in which case dest_buf will be twice as long as the // digest. fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let output = md5::compute(buffer); dest_buf[..output.len()].copy_from_slice(&*output) } fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = sha1::Sha1::new(); hasher.update(buffer); let output = hasher.digest().bytes(); dest_buf[..output.len()].copy_from_slice(&output) } /// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`. fn sha2_hash_buffer<D>(hasher: D, buffer: &[u8], dest_buf: &mut [u8]) where D: Digest, { let mut hasher = hasher; hasher.input(buffer); let output = hasher.result(); dest_buf[..output.len()].copy_from_slice(&output) } fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha224::new(), buffer, dest_buf); } fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha256::new(), buffer, dest_buf); } fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha384::new(), buffer, dest_buf); } fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha512::new(), buffer, dest_buf); } /// Return a hash of the contents of BUFFER-OR-NAME. /// This hash is performed on the raw internal format of the buffer, /// disregarding any coding systems. If nil, use the current buffer. #[lisp_fn(min = "0")] pub fn buffer_hash(buffer_or_name: LispObject) -> LispObject { let buffer = if buffer_or_name.is_nil() { current_buffer() } else { get_buffer(buffer_or_name) }; if buffer.is_nil() { unsafe { nsberror(buffer_or_name.to_raw()) }; } let b = buffer.as_buffer().unwrap(); let mut ctx = sha1::Sha1::new(); ctx.update(unsafe { slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize) }); if b.gpt_byte() < b.z_byte() { ctx.update(unsafe { slice::from_raw_parts( b.gap_end_addr(), (b.z_addr() as usize - b.gap_end_addr() as usize), ) }); } let formatted = ctx.digest().to_string(); let digest = LispObject::from(unsafe { make_uninit_string(formatted.len() as EmacsInt) }); digest .as_string() .unwrap() .as_mut_slice() .copy_from_slice(formatted.as_bytes()); digest } include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
random_line_split
mod.rs
#![allow(dead_code)] // XXX unused code belongs into translation of new extract_data_from_object fn use libc::ptrdiff_t; use md5; use sha1; use sha2::{Digest, Sha224, Sha256, Sha384, Sha512}; use std; use std::slice; use remacs_macros::lisp_fn; use remacs_sys::{make_specified_string, make_uninit_string, nsberror, EmacsInt}; use remacs_sys::{code_convert_string, extract_data_from_object, preferred_coding_system, string_char_to_byte, validate_subarray, Fcoding_system_p}; use remacs_sys::{globals, Ffind_operation_coding_system, Flocal_variable_p}; use remacs_sys::{Qbuffer_file_coding_system, Qcoding_system_error, Qmd5, Qraw_text, Qsha1, Qsha224, Qsha256, Qsha384, Qsha512, Qstringp, Qwrite_region}; use remacs_sys::{current_thread, make_buffer_string, record_unwind_current_buffer, set_buffer_internal}; use buffers::{buffer_file_name, current_buffer, get_buffer, LispBufferRef}; use lisp::{LispNumber, LispObject}; use lisp::defsubr; use multibyte::LispStringRef; use symbols::{fboundp, symbol_name}; use threads::ThreadState; #[derive(Clone, Copy)] enum HashAlg { MD5, SHA1, SHA224, SHA256, SHA384, SHA512, } static MD5_DIGEST_LEN: usize = 16; static SHA1_DIGEST_LEN: usize = 20; static SHA224_DIGEST_LEN: usize = 224 / 8; static SHA256_DIGEST_LEN: usize = 256 / 8; static SHA384_DIGEST_LEN: usize = 384 / 8; static SHA512_DIGEST_LEN: usize = 512 / 8; fn hash_alg(algorithm: LispObject) -> HashAlg { algorithm.as_symbol_or_error(); if algorithm.to_raw() == Qmd5 { HashAlg::MD5 } else if algorithm.to_raw() == Qsha1 { HashAlg::SHA1 } else if algorithm.to_raw() == Qsha224 { HashAlg::SHA224 } else if algorithm.to_raw() == Qsha256 { HashAlg::SHA256 } else if algorithm.to_raw() == Qsha384 { HashAlg::SHA384 } else if algorithm.to_raw() == Qsha512 { HashAlg::SHA512 } else { let name = symbol_name(algorithm).as_string_or_error(); error!("Invalid algorithm arg: {:?}\0", &name.as_slice()); } } fn check_coding_system_or_error(coding_system: LispObject, noerror: LispObject) -> LispObject { if LispObject::from(unsafe { Fcoding_system_p(coding_system.to_raw()) }).is_nil() { /* Invalid coding system. */ if noerror.is_not_nil() { LispObject::from(Qraw_text) } else { xsignal!(Qcoding_system_error, coding_system); } } else { coding_system } } fn get_coding_system_for_string(string: LispStringRef, coding_system: LispObject) -> LispObject { if coding_system.is_nil() { /* Decide the coding-system to encode the data with. */ if string.is_multibyte() { /* use default, we can't guess correct value */ LispObject::from(unsafe { preferred_coding_system() }) } else { LispObject::from(Qraw_text) } } else { coding_system } } fn get_coding_system_for_buffer( object: LispObject, buffer: LispBufferRef, start: LispObject, end: LispObject, start_byte: ptrdiff_t, end_byte: ptrdiff_t, coding_system: LispObject, ) -> LispObject { /* Decide the coding-system to encode the data with. See fileio.c:Fwrite-region */ if coding_system.is_not_nil() { return coding_system; } if LispObject::from(unsafe { globals.f_Vcoding_system_for_write }).is_not_nil() { return LispObject::from(unsafe { globals.f_Vcoding_system_for_write }); } if LispObject::from(buffer.buffer_file_coding_system).is_nil() || LispObject::from(unsafe { Flocal_variable_p( Qbuffer_file_coding_system, LispObject::constant_nil().to_raw(), ) }).is_nil()
if buffer_file_name(object).is_not_nil() { /* Check file-coding-system-alist. */ let mut args = [ Qwrite_region, start.to_raw(), end.to_raw(), buffer_file_name(object).to_raw(), ]; let val = LispObject::from(unsafe { Ffind_operation_coding_system(4, args.as_mut_ptr()) }); if val.is_cons() && val.as_cons_or_error().cdr().is_not_nil() { return val.as_cons_or_error().cdr(); } } if LispObject::from(buffer.buffer_file_coding_system).is_not_nil() { /* If we still have not decided a coding system, use the default value of buffer-file-coding-system. */ return LispObject::from(buffer.buffer_file_coding_system); } let sscsf = LispObject::from(unsafe { globals.f_Vselect_safe_coding_system_function }); if fboundp(sscsf).is_not_nil() { /* Confirm that VAL can surely encode the current region. */ return call!( sscsf, LispObject::from_natnum(start_byte as EmacsInt), LispObject::from_natnum(end_byte as EmacsInt), coding_system, LispObject::constant_nil() ); } LispObject::constant_nil() } fn get_input_from_string( object: LispObject, string: LispStringRef, start: LispObject, end: LispObject, ) -> LispObject { let size: ptrdiff_t; let start_byte: ptrdiff_t; let end_byte: ptrdiff_t; let mut start_char: ptrdiff_t = 0; let mut end_char: ptrdiff_t = 0; size = string.len_bytes(); unsafe { validate_subarray( object.to_raw(), start.to_raw(), end.to_raw(), size, &mut start_char, &mut end_char, ); } start_byte = if start_char == 0 { 0 } else { unsafe { string_char_to_byte(object.to_raw(), start_char) } }; end_byte = if end_char == size { string.len_bytes() } else { unsafe { string_char_to_byte(object.to_raw(), end_char) } }; if start_byte == 0 && end_byte == size { object } else { LispObject::from(unsafe { make_specified_string( string.const_sdata_ptr().offset(start_byte), -1 as ptrdiff_t, end_byte - start_byte, string.is_multibyte(), ) }) } } fn get_input_from_buffer( mut buffer: LispBufferRef, start: LispObject, end: LispObject, start_byte: &mut ptrdiff_t, end_byte: &mut ptrdiff_t, ) -> LispObject { let prev_buffer = ThreadState::current_buffer().as_mut(); unsafe { record_unwind_current_buffer() }; unsafe { set_buffer_internal(buffer.as_mut()) }; *start_byte = if start.is_nil() { buffer.begv } else { match start.as_number_coerce_marker_or_error() { LispNumber::Fixnum(n) => n as ptrdiff_t, LispNumber::Float(n) => n as ptrdiff_t, } }; *end_byte = if end.is_nil() { buffer.zv } else { match end.as_number_coerce_marker_or_error() { LispNumber::Fixnum(n) => n as ptrdiff_t, LispNumber::Float(n) => n as ptrdiff_t, } }; if start_byte > end_byte { std::mem::swap(start_byte, end_byte); } if !(buffer.begv <= *start_byte && *end_byte <= buffer.zv) { args_out_of_range!(start, end); } let string = LispObject::from(unsafe { make_buffer_string(*start_byte, *end_byte, false) }); unsafe { set_buffer_internal(prev_buffer) }; // TODO: this needs to be std::mem::size_of<specbinding>() unsafe { (*current_thread).m_specpdl_ptr = (*current_thread).m_specpdl_ptr.offset(-40) }; string } fn get_input( object: LispObject, string: &mut Option<LispStringRef>, buffer: &Option<LispBufferRef>, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispStringRef { if object.is_string() { if string.unwrap().is_multibyte() { let coding_system = check_coding_system_or_error( get_coding_system_for_string(string.unwrap(), coding_system), noerror, ); *string = Some( LispObject::from(unsafe { code_convert_string( object.to_raw(), coding_system.to_raw(), LispObject::constant_nil().to_raw(), true, false, true, ) }).as_string_or_error(), ) } get_input_from_string(object, string.unwrap(), start, end).as_string_or_error() } else if object.is_buffer() { let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let s = get_input_from_buffer(buffer.unwrap(), start, end, &mut start_byte, &mut end_byte); let ss = s.as_string_or_error(); if ss.is_multibyte() { let coding_system = check_coding_system_or_error( get_coding_system_for_buffer( object, buffer.unwrap(), start, end, start_byte, end_byte, coding_system, ), noerror, ); LispObject::from(unsafe { code_convert_string( s.to_raw(), coding_system.to_raw(), LispObject::constant_nil().to_raw(), true, false, false, ) }).as_string_or_error() } else { ss } } else { wrong_type!(Qstringp, object); } } /// Return MD5 message digest of OBJECT, a buffer or string. /// /// A message digest is a cryptographic checksum of a document, and the /// algorithm to calculate it is defined in RFC 1321. /// /// The two optional arguments START and END are character positions /// specifying for which part of OBJECT the message digest should be /// computed. If nil or omitted, the digest is computed for the whole /// OBJECT. /// /// The MD5 message digest is computed from the result of encoding the /// text in a coding system, not directly from the internal Emacs form of /// the text. The optional fourth argument CODING-SYSTEM specifies which /// coding system to encode the text with. It should be the same coding /// system that you used or will use when actually writing the text into a /// file. /// /// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If /// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding /// system would be chosen by default for writing this text into a file. /// /// If OBJECT is a string, the most preferred coding system (see the /// command `prefer-coding-system') is used. /// /// If NOERROR is non-nil, silently assume the `raw-text' coding if the /// guesswork fails. Normally, an error is signaled in such case. #[lisp_fn(min = "1")] pub fn md5( object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispObject { _secure_hash( HashAlg::MD5, object, start, end, coding_system, noerror, LispObject::constant_nil(), ) } /// Return the secure hash of OBJECT, a buffer or string. /// ALGORITHM is a symbol specifying the hash to use: /// md5, sha1, sha224, sha256, sha384 or sha512. /// /// The two optional arguments START and END are positions specifying for /// which part of OBJECT to compute the hash. If nil or omitted, uses the /// whole OBJECT. /// /// The full list of algorithms can be obtained with `secure-hash-algorithms'. /// /// If BINARY is non-nil, returns a string in binary form. #[lisp_fn(min = "2")] pub fn secure_hash( algorithm: LispObject, object: LispObject, start: LispObject, end: LispObject, binary: LispObject, ) -> LispObject { _secure_hash( hash_alg(algorithm), object, start, end, LispObject::constant_nil(), LispObject::constant_nil(), binary, ) } fn _secure_hash( algorithm: HashAlg, object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, binary: LispObject, ) -> LispObject { let spec = list!(object, start, end, coding_system, noerror); let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let input = unsafe { extract_data_from_object(spec.to_raw(), &mut start_byte, &mut end_byte) }; if input.is_null() { error!("secure_hash: failed to extract data from object, aborting!"); } let input_slice = unsafe { slice::from_raw_parts( input.offset(start_byte) as *mut u8, (end_byte - start_byte) as usize, ) }; type HashFn = fn(&[u8], &mut [u8]); let (digest_size, hash_func) = match algorithm { HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn), HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn), HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn), HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn), HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn), HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn), }; let buffer_size = if binary.is_nil() { (digest_size * 2) as EmacsInt } else { digest_size as EmacsInt }; let digest = LispObject::from(unsafe { make_uninit_string(buffer_size as EmacsInt) }); let digest_str = digest.as_string_or_error(); hash_func(input_slice, digest_str.as_mut_slice()); if binary.is_nil() { hexify_digest_string(digest_str.as_mut_slice(), digest_size); } digest } /// To avoid a copy, buffer is both the source and the destination of /// this transformation. Buffer must contain len bytes of data and /// 2*len bytes of space for the final hex string. fn hexify_digest_string(buffer: &mut [u8], len: usize) { static hexdigit: [u8; 16] = *b"0123456789abcdef"; debug_assert_eq!( buffer.len(), 2 * len, "buffer must be long enough to hold 2*len hex digits" ); for i in (0..len).rev() { let v = buffer[i]; buffer[2 * i] = hexdigit[(v >> 4) as usize]; buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize]; } } // For the following hash functions, the caller must ensure that the // destination buffer is at least long enough to hold the // digest. Additionally, the caller may have been asked to return a // hex string, in which case dest_buf will be twice as long as the // digest. fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let output = md5::compute(buffer); dest_buf[..output.len()].copy_from_slice(&*output) } fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = sha1::Sha1::new(); hasher.update(buffer); let output = hasher.digest().bytes(); dest_buf[..output.len()].copy_from_slice(&output) } /// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`. fn sha2_hash_buffer<D>(hasher: D, buffer: &[u8], dest_buf: &mut [u8]) where D: Digest, { let mut hasher = hasher; hasher.input(buffer); let output = hasher.result(); dest_buf[..output.len()].copy_from_slice(&output) } fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha224::new(), buffer, dest_buf); } fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha256::new(), buffer, dest_buf); } fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha384::new(), buffer, dest_buf); } fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha512::new(), buffer, dest_buf); } /// Return a hash of the contents of BUFFER-OR-NAME. /// This hash is performed on the raw internal format of the buffer, /// disregarding any coding systems. If nil, use the current buffer. #[lisp_fn(min = "0")] pub fn buffer_hash(buffer_or_name: LispObject) -> LispObject { let buffer = if buffer_or_name.is_nil() { current_buffer() } else { get_buffer(buffer_or_name) }; if buffer.is_nil() { unsafe { nsberror(buffer_or_name.to_raw()) }; } let b = buffer.as_buffer().unwrap(); let mut ctx = sha1::Sha1::new(); ctx.update(unsafe { slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize) }); if b.gpt_byte() < b.z_byte() { ctx.update(unsafe { slice::from_raw_parts( b.gap_end_addr(), (b.z_addr() as usize - b.gap_end_addr() as usize), ) }); } let formatted = ctx.digest().to_string(); let digest = LispObject::from(unsafe { make_uninit_string(formatted.len() as EmacsInt) }); digest .as_string() .unwrap() .as_mut_slice() .copy_from_slice(formatted.as_bytes()); digest } include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
{ if LispObject::from(buffer.enable_multibyte_characters).is_nil() { return LispObject::from(Qraw_text); } }
conditional_block
mod.rs
#![allow(dead_code)] // XXX unused code belongs into translation of new extract_data_from_object fn use libc::ptrdiff_t; use md5; use sha1; use sha2::{Digest, Sha224, Sha256, Sha384, Sha512}; use std; use std::slice; use remacs_macros::lisp_fn; use remacs_sys::{make_specified_string, make_uninit_string, nsberror, EmacsInt}; use remacs_sys::{code_convert_string, extract_data_from_object, preferred_coding_system, string_char_to_byte, validate_subarray, Fcoding_system_p}; use remacs_sys::{globals, Ffind_operation_coding_system, Flocal_variable_p}; use remacs_sys::{Qbuffer_file_coding_system, Qcoding_system_error, Qmd5, Qraw_text, Qsha1, Qsha224, Qsha256, Qsha384, Qsha512, Qstringp, Qwrite_region}; use remacs_sys::{current_thread, make_buffer_string, record_unwind_current_buffer, set_buffer_internal}; use buffers::{buffer_file_name, current_buffer, get_buffer, LispBufferRef}; use lisp::{LispNumber, LispObject}; use lisp::defsubr; use multibyte::LispStringRef; use symbols::{fboundp, symbol_name}; use threads::ThreadState; #[derive(Clone, Copy)] enum HashAlg { MD5, SHA1, SHA224, SHA256, SHA384, SHA512, } static MD5_DIGEST_LEN: usize = 16; static SHA1_DIGEST_LEN: usize = 20; static SHA224_DIGEST_LEN: usize = 224 / 8; static SHA256_DIGEST_LEN: usize = 256 / 8; static SHA384_DIGEST_LEN: usize = 384 / 8; static SHA512_DIGEST_LEN: usize = 512 / 8; fn hash_alg(algorithm: LispObject) -> HashAlg { algorithm.as_symbol_or_error(); if algorithm.to_raw() == Qmd5 { HashAlg::MD5 } else if algorithm.to_raw() == Qsha1 { HashAlg::SHA1 } else if algorithm.to_raw() == Qsha224 { HashAlg::SHA224 } else if algorithm.to_raw() == Qsha256 { HashAlg::SHA256 } else if algorithm.to_raw() == Qsha384 { HashAlg::SHA384 } else if algorithm.to_raw() == Qsha512 { HashAlg::SHA512 } else { let name = symbol_name(algorithm).as_string_or_error(); error!("Invalid algorithm arg: {:?}\0", &name.as_slice()); } } fn check_coding_system_or_error(coding_system: LispObject, noerror: LispObject) -> LispObject { if LispObject::from(unsafe { Fcoding_system_p(coding_system.to_raw()) }).is_nil() { /* Invalid coding system. */ if noerror.is_not_nil() { LispObject::from(Qraw_text) } else { xsignal!(Qcoding_system_error, coding_system); } } else { coding_system } } fn get_coding_system_for_string(string: LispStringRef, coding_system: LispObject) -> LispObject { if coding_system.is_nil() { /* Decide the coding-system to encode the data with. */ if string.is_multibyte() { /* use default, we can't guess correct value */ LispObject::from(unsafe { preferred_coding_system() }) } else { LispObject::from(Qraw_text) } } else { coding_system } } fn get_coding_system_for_buffer( object: LispObject, buffer: LispBufferRef, start: LispObject, end: LispObject, start_byte: ptrdiff_t, end_byte: ptrdiff_t, coding_system: LispObject, ) -> LispObject { /* Decide the coding-system to encode the data with. See fileio.c:Fwrite-region */ if coding_system.is_not_nil() { return coding_system; } if LispObject::from(unsafe { globals.f_Vcoding_system_for_write }).is_not_nil() { return LispObject::from(unsafe { globals.f_Vcoding_system_for_write }); } if LispObject::from(buffer.buffer_file_coding_system).is_nil() || LispObject::from(unsafe { Flocal_variable_p( Qbuffer_file_coding_system, LispObject::constant_nil().to_raw(), ) }).is_nil() { if LispObject::from(buffer.enable_multibyte_characters).is_nil() { return LispObject::from(Qraw_text); } } if buffer_file_name(object).is_not_nil() { /* Check file-coding-system-alist. */ let mut args = [ Qwrite_region, start.to_raw(), end.to_raw(), buffer_file_name(object).to_raw(), ]; let val = LispObject::from(unsafe { Ffind_operation_coding_system(4, args.as_mut_ptr()) }); if val.is_cons() && val.as_cons_or_error().cdr().is_not_nil() { return val.as_cons_or_error().cdr(); } } if LispObject::from(buffer.buffer_file_coding_system).is_not_nil() { /* If we still have not decided a coding system, use the default value of buffer-file-coding-system. */ return LispObject::from(buffer.buffer_file_coding_system); } let sscsf = LispObject::from(unsafe { globals.f_Vselect_safe_coding_system_function }); if fboundp(sscsf).is_not_nil() { /* Confirm that VAL can surely encode the current region. */ return call!( sscsf, LispObject::from_natnum(start_byte as EmacsInt), LispObject::from_natnum(end_byte as EmacsInt), coding_system, LispObject::constant_nil() ); } LispObject::constant_nil() } fn get_input_from_string( object: LispObject, string: LispStringRef, start: LispObject, end: LispObject, ) -> LispObject { let size: ptrdiff_t; let start_byte: ptrdiff_t; let end_byte: ptrdiff_t; let mut start_char: ptrdiff_t = 0; let mut end_char: ptrdiff_t = 0; size = string.len_bytes(); unsafe { validate_subarray( object.to_raw(), start.to_raw(), end.to_raw(), size, &mut start_char, &mut end_char, ); } start_byte = if start_char == 0 { 0 } else { unsafe { string_char_to_byte(object.to_raw(), start_char) } }; end_byte = if end_char == size { string.len_bytes() } else { unsafe { string_char_to_byte(object.to_raw(), end_char) } }; if start_byte == 0 && end_byte == size { object } else { LispObject::from(unsafe { make_specified_string( string.const_sdata_ptr().offset(start_byte), -1 as ptrdiff_t, end_byte - start_byte, string.is_multibyte(), ) }) } } fn get_input_from_buffer( mut buffer: LispBufferRef, start: LispObject, end: LispObject, start_byte: &mut ptrdiff_t, end_byte: &mut ptrdiff_t, ) -> LispObject { let prev_buffer = ThreadState::current_buffer().as_mut(); unsafe { record_unwind_current_buffer() }; unsafe { set_buffer_internal(buffer.as_mut()) }; *start_byte = if start.is_nil() { buffer.begv } else { match start.as_number_coerce_marker_or_error() { LispNumber::Fixnum(n) => n as ptrdiff_t, LispNumber::Float(n) => n as ptrdiff_t, } }; *end_byte = if end.is_nil() { buffer.zv } else { match end.as_number_coerce_marker_or_error() { LispNumber::Fixnum(n) => n as ptrdiff_t, LispNumber::Float(n) => n as ptrdiff_t, } }; if start_byte > end_byte { std::mem::swap(start_byte, end_byte); } if !(buffer.begv <= *start_byte && *end_byte <= buffer.zv) { args_out_of_range!(start, end); } let string = LispObject::from(unsafe { make_buffer_string(*start_byte, *end_byte, false) }); unsafe { set_buffer_internal(prev_buffer) }; // TODO: this needs to be std::mem::size_of<specbinding>() unsafe { (*current_thread).m_specpdl_ptr = (*current_thread).m_specpdl_ptr.offset(-40) }; string } fn get_input( object: LispObject, string: &mut Option<LispStringRef>, buffer: &Option<LispBufferRef>, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispStringRef { if object.is_string() { if string.unwrap().is_multibyte() { let coding_system = check_coding_system_or_error( get_coding_system_for_string(string.unwrap(), coding_system), noerror, ); *string = Some( LispObject::from(unsafe { code_convert_string( object.to_raw(), coding_system.to_raw(), LispObject::constant_nil().to_raw(), true, false, true, ) }).as_string_or_error(), ) } get_input_from_string(object, string.unwrap(), start, end).as_string_or_error() } else if object.is_buffer() { let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let s = get_input_from_buffer(buffer.unwrap(), start, end, &mut start_byte, &mut end_byte); let ss = s.as_string_or_error(); if ss.is_multibyte() { let coding_system = check_coding_system_or_error( get_coding_system_for_buffer( object, buffer.unwrap(), start, end, start_byte, end_byte, coding_system, ), noerror, ); LispObject::from(unsafe { code_convert_string( s.to_raw(), coding_system.to_raw(), LispObject::constant_nil().to_raw(), true, false, false, ) }).as_string_or_error() } else { ss } } else { wrong_type!(Qstringp, object); } } /// Return MD5 message digest of OBJECT, a buffer or string. /// /// A message digest is a cryptographic checksum of a document, and the /// algorithm to calculate it is defined in RFC 1321. /// /// The two optional arguments START and END are character positions /// specifying for which part of OBJECT the message digest should be /// computed. If nil or omitted, the digest is computed for the whole /// OBJECT. /// /// The MD5 message digest is computed from the result of encoding the /// text in a coding system, not directly from the internal Emacs form of /// the text. The optional fourth argument CODING-SYSTEM specifies which /// coding system to encode the text with. It should be the same coding /// system that you used or will use when actually writing the text into a /// file. /// /// If CODING-SYSTEM is nil or omitted, the default depends on OBJECT. If /// OBJECT is a buffer, the default for CODING-SYSTEM is whatever coding /// system would be chosen by default for writing this text into a file. /// /// If OBJECT is a string, the most preferred coding system (see the /// command `prefer-coding-system') is used. /// /// If NOERROR is non-nil, silently assume the `raw-text' coding if the /// guesswork fails. Normally, an error is signaled in such case. #[lisp_fn(min = "1")] pub fn md5( object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, ) -> LispObject { _secure_hash( HashAlg::MD5, object, start, end, coding_system, noerror, LispObject::constant_nil(), ) } /// Return the secure hash of OBJECT, a buffer or string. /// ALGORITHM is a symbol specifying the hash to use: /// md5, sha1, sha224, sha256, sha384 or sha512. /// /// The two optional arguments START and END are positions specifying for /// which part of OBJECT to compute the hash. If nil or omitted, uses the /// whole OBJECT. /// /// The full list of algorithms can be obtained with `secure-hash-algorithms'. /// /// If BINARY is non-nil, returns a string in binary form. #[lisp_fn(min = "2")] pub fn secure_hash( algorithm: LispObject, object: LispObject, start: LispObject, end: LispObject, binary: LispObject, ) -> LispObject { _secure_hash( hash_alg(algorithm), object, start, end, LispObject::constant_nil(), LispObject::constant_nil(), binary, ) } fn _secure_hash( algorithm: HashAlg, object: LispObject, start: LispObject, end: LispObject, coding_system: LispObject, noerror: LispObject, binary: LispObject, ) -> LispObject { let spec = list!(object, start, end, coding_system, noerror); let mut start_byte: ptrdiff_t = 0; let mut end_byte: ptrdiff_t = 0; let input = unsafe { extract_data_from_object(spec.to_raw(), &mut start_byte, &mut end_byte) }; if input.is_null() { error!("secure_hash: failed to extract data from object, aborting!"); } let input_slice = unsafe { slice::from_raw_parts( input.offset(start_byte) as *mut u8, (end_byte - start_byte) as usize, ) }; type HashFn = fn(&[u8], &mut [u8]); let (digest_size, hash_func) = match algorithm { HashAlg::MD5 => (MD5_DIGEST_LEN, md5_buffer as HashFn), HashAlg::SHA1 => (SHA1_DIGEST_LEN, sha1_buffer as HashFn), HashAlg::SHA224 => (SHA224_DIGEST_LEN, sha224_buffer as HashFn), HashAlg::SHA256 => (SHA256_DIGEST_LEN, sha256_buffer as HashFn), HashAlg::SHA384 => (SHA384_DIGEST_LEN, sha384_buffer as HashFn), HashAlg::SHA512 => (SHA512_DIGEST_LEN, sha512_buffer as HashFn), }; let buffer_size = if binary.is_nil() { (digest_size * 2) as EmacsInt } else { digest_size as EmacsInt }; let digest = LispObject::from(unsafe { make_uninit_string(buffer_size as EmacsInt) }); let digest_str = digest.as_string_or_error(); hash_func(input_slice, digest_str.as_mut_slice()); if binary.is_nil() { hexify_digest_string(digest_str.as_mut_slice(), digest_size); } digest } /// To avoid a copy, buffer is both the source and the destination of /// this transformation. Buffer must contain len bytes of data and /// 2*len bytes of space for the final hex string. fn hexify_digest_string(buffer: &mut [u8], len: usize) { static hexdigit: [u8; 16] = *b"0123456789abcdef"; debug_assert_eq!( buffer.len(), 2 * len, "buffer must be long enough to hold 2*len hex digits" ); for i in (0..len).rev() { let v = buffer[i]; buffer[2 * i] = hexdigit[(v >> 4) as usize]; buffer[2 * i + 1] = hexdigit[(v & 0xf) as usize]; } } // For the following hash functions, the caller must ensure that the // destination buffer is at least long enough to hold the // digest. Additionally, the caller may have been asked to return a // hex string, in which case dest_buf will be twice as long as the // digest. fn md5_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let output = md5::compute(buffer); dest_buf[..output.len()].copy_from_slice(&*output) } fn sha1_buffer(buffer: &[u8], dest_buf: &mut [u8]) { let mut hasher = sha1::Sha1::new(); hasher.update(buffer); let output = hasher.digest().bytes(); dest_buf[..output.len()].copy_from_slice(&output) } /// Given an instance of `Digest`, and `buffer` write its hash to `dest_buf`. fn sha2_hash_buffer<D>(hasher: D, buffer: &[u8], dest_buf: &mut [u8]) where D: Digest, { let mut hasher = hasher; hasher.input(buffer); let output = hasher.result(); dest_buf[..output.len()].copy_from_slice(&output) } fn sha224_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha224::new(), buffer, dest_buf); } fn sha256_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha256::new(), buffer, dest_buf); } fn sha384_buffer(buffer: &[u8], dest_buf: &mut [u8]) { sha2_hash_buffer(Sha384::new(), buffer, dest_buf); } fn sha512_buffer(buffer: &[u8], dest_buf: &mut [u8])
/// Return a hash of the contents of BUFFER-OR-NAME. /// This hash is performed on the raw internal format of the buffer, /// disregarding any coding systems. If nil, use the current buffer. #[lisp_fn(min = "0")] pub fn buffer_hash(buffer_or_name: LispObject) -> LispObject { let buffer = if buffer_or_name.is_nil() { current_buffer() } else { get_buffer(buffer_or_name) }; if buffer.is_nil() { unsafe { nsberror(buffer_or_name.to_raw()) }; } let b = buffer.as_buffer().unwrap(); let mut ctx = sha1::Sha1::new(); ctx.update(unsafe { slice::from_raw_parts(b.beg_addr(), (b.gpt_byte() - b.beg_byte()) as usize) }); if b.gpt_byte() < b.z_byte() { ctx.update(unsafe { slice::from_raw_parts( b.gap_end_addr(), (b.z_addr() as usize - b.gap_end_addr() as usize), ) }); } let formatted = ctx.digest().to_string(); let digest = LispObject::from(unsafe { make_uninit_string(formatted.len() as EmacsInt) }); digest .as_string() .unwrap() .as_mut_slice() .copy_from_slice(formatted.as_bytes()); digest } include!(concat!(env!("OUT_DIR"), "/crypto_exports.rs"));
{ sha2_hash_buffer(Sha512::new(), buffer, dest_buf); }
identifier_body
virtual_usbxhci_controller_option.py
import logging from pyvisdk.exceptions import InvalidArgumentError ######################################## # Automatically generated, do not edit. ########################################
def VirtualUSBXHCIControllerOption(vim, *args, **kwargs): '''The VirtualUSBXHCIControllerOption data object type contains the options for a virtual USB Extensible Host Controller Interface (USB 3.0).''' obj = vim.client.factory.create('ns0:VirtualUSBXHCIControllerOption') # do some validation checking... if (len(args) + len(kwargs)) < 7: raise IndexError('Expected at least 8 arguments got: %d' % len(args)) required = [ 'autoConnectDevices', 'supportedSpeeds', 'devices', 'deprecated', 'hotRemoveSupported', 'plugAndPlay', 'type' ] optional = [ 'supportedDevice', 'autoAssignController', 'backingOption', 'connectOption', 'controllerType', 'defaultBackingOptionIndex', 'licensingLimit', 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
log = logging.getLogger(__name__)
random_line_split
virtual_usbxhci_controller_option.py
import logging from pyvisdk.exceptions import InvalidArgumentError ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) def
(vim, *args, **kwargs): '''The VirtualUSBXHCIControllerOption data object type contains the options for a virtual USB Extensible Host Controller Interface (USB 3.0).''' obj = vim.client.factory.create('ns0:VirtualUSBXHCIControllerOption') # do some validation checking... if (len(args) + len(kwargs)) < 7: raise IndexError('Expected at least 8 arguments got: %d' % len(args)) required = [ 'autoConnectDevices', 'supportedSpeeds', 'devices', 'deprecated', 'hotRemoveSupported', 'plugAndPlay', 'type' ] optional = [ 'supportedDevice', 'autoAssignController', 'backingOption', 'connectOption', 'controllerType', 'defaultBackingOptionIndex', 'licensingLimit', 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
VirtualUSBXHCIControllerOption
identifier_name
virtual_usbxhci_controller_option.py
import logging from pyvisdk.exceptions import InvalidArgumentError ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) def VirtualUSBXHCIControllerOption(vim, *args, **kwargs):
'''The VirtualUSBXHCIControllerOption data object type contains the options for a virtual USB Extensible Host Controller Interface (USB 3.0).''' obj = vim.client.factory.create('ns0:VirtualUSBXHCIControllerOption') # do some validation checking... if (len(args) + len(kwargs)) < 7: raise IndexError('Expected at least 8 arguments got: %d' % len(args)) required = [ 'autoConnectDevices', 'supportedSpeeds', 'devices', 'deprecated', 'hotRemoveSupported', 'plugAndPlay', 'type' ] optional = [ 'supportedDevice', 'autoAssignController', 'backingOption', 'connectOption', 'controllerType', 'defaultBackingOptionIndex', 'licensingLimit', 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
identifier_body
virtual_usbxhci_controller_option.py
import logging from pyvisdk.exceptions import InvalidArgumentError ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) def VirtualUSBXHCIControllerOption(vim, *args, **kwargs): '''The VirtualUSBXHCIControllerOption data object type contains the options for a virtual USB Extensible Host Controller Interface (USB 3.0).''' obj = vim.client.factory.create('ns0:VirtualUSBXHCIControllerOption') # do some validation checking... if (len(args) + len(kwargs)) < 7: raise IndexError('Expected at least 8 arguments got: %d' % len(args)) required = [ 'autoConnectDevices', 'supportedSpeeds', 'devices', 'deprecated', 'hotRemoveSupported', 'plugAndPlay', 'type' ] optional = [ 'supportedDevice', 'autoAssignController', 'backingOption', 'connectOption', 'controllerType', 'defaultBackingOptionIndex', 'licensingLimit', 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional:
else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
setattr(obj, name, value)
conditional_block
context.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The context within which style is calculated. #[cfg(feature = "servo")] use animation::Animation; use app_units::Au; use bloom::StyleBloom; use data::{EagerPseudoStyles, ElementData}; use dom::{TElement, SendElement}; #[cfg(feature = "servo")] use dom::OpaqueNode; use euclid::Size2D; use euclid::TypedScale; use fnv::FnvHashMap; use font_metrics::FontMetricsProvider; #[cfg(feature = "gecko")] use gecko_bindings::structs; use parallel::{STACK_SAFETY_MARGIN_KB, STYLE_THREAD_STACK_SIZE_KB}; #[cfg(feature = "servo")] use parking_lot::RwLock; use properties::ComputedValues; #[cfg(feature = "servo")] use properties::PropertyId; use rule_cache::RuleCache; use rule_tree::StrongRuleNode; use selector_parser::{EAGER_PSEUDO_COUNT, SnapshotMap}; use selectors::NthIndexCache; use selectors::matching::ElementSelectorFlags; use servo_arc::Arc; #[cfg(feature = "servo")] use servo_atoms::Atom; use shared_lock::StylesheetGuards; use sharing::StyleSharingCache; use std::fmt; use std::ops; #[cfg(feature = "servo")] use std::sync::Mutex; #[cfg(feature = "servo")] use std::sync::mpsc::Sender; use style_traits::CSSPixel; use style_traits::DevicePixel; #[cfg(feature = "servo")] use style_traits::SpeculativePainter; use stylist::Stylist; use thread_state::{self, ThreadState}; use time; use timer::Timer; use traversal::DomTraversal; use traversal_flags::TraversalFlags; use uluru::{Entry, LRUCache}; pub use selectors::matching::QuirksMode; /// This structure is used to create a local style context from a shared one. #[cfg(feature = "servo")] pub struct ThreadLocalStyleContextCreationInfo { new_animations_sender: Sender<Animation>, } #[cfg(feature = "servo")] impl ThreadLocalStyleContextCreationInfo { /// Trivially constructs a `ThreadLocalStyleContextCreationInfo`. pub fn new(animations_sender: Sender<Animation>) -> Self { ThreadLocalStyleContextCreationInfo { new_animations_sender: animations_sender, } } } /// A global options structure for the style system. We use this instead of /// opts to abstract across Gecko and Servo. #[derive(Clone)] pub struct StyleSystemOptions { /// Whether the style sharing cache is disabled. pub disable_style_sharing_cache: bool, /// Whether we should dump statistics about the style system. pub dump_style_statistics: bool, /// The minimum number of elements that must be traversed to trigger a dump /// of style statistics. pub style_statistics_threshold: usize, } #[cfg(feature = "gecko")] fn get_env_bool(name: &str) -> bool { use std::env; match env::var(name) { Ok(s) => !s.is_empty(), Err(_) => false, } } const DEFAULT_STATISTICS_THRESHOLD: usize = 50; #[cfg(feature = "gecko")] fn get_env_usize(name: &str) -> Option<usize> { use std::env; env::var(name).ok().map(|s| { s.parse::<usize>().expect("Couldn't parse environmental variable as usize") }) } impl Default for StyleSystemOptions { #[cfg(feature = "servo")] fn default() -> Self { use servo_config::opts; StyleSystemOptions { disable_style_sharing_cache: opts::get().disable_share_style_cache, dump_style_statistics: opts::get().style_sharing_stats, style_statistics_threshold: DEFAULT_STATISTICS_THRESHOLD, } } #[cfg(feature = "gecko")] fn default() -> Self { StyleSystemOptions { disable_style_sharing_cache: get_env_bool("DISABLE_STYLE_SHARING_CACHE"), dump_style_statistics: get_env_bool("DUMP_STYLE_STATISTICS"), style_statistics_threshold: get_env_usize("STYLE_STATISTICS_THRESHOLD") .unwrap_or(DEFAULT_STATISTICS_THRESHOLD), } } } impl StyleSystemOptions { #[cfg(feature = "servo")] /// On Gecko's nightly build? pub fn is_nightly(&self) -> bool { false } #[cfg(feature = "gecko")] /// On Gecko's nightly build? #[inline] pub fn is_nightly(&self) -> bool { structs::GECKO_IS_NIGHTLY } } /// A shared style context. /// /// There's exactly one of these during a given restyle traversal, and it's /// shared among the worker threads. pub struct SharedStyleContext<'a> { /// The CSS selector stylist. pub stylist: &'a Stylist, /// Whether visited styles are enabled. /// /// They may be disabled when Gecko's pref layout.css.visited_links_enabled /// is false, or when in private browsing mode. pub visited_styles_enabled: bool, /// Configuration options. pub options: StyleSystemOptions, /// Guards for pre-acquired locks pub guards: StylesheetGuards<'a>, /// The current timer for transitions and animations. This is needed to test /// them. pub timer: Timer, /// Flags controlling how we traverse the tree. pub traversal_flags: TraversalFlags, /// A map with our snapshots in order to handle restyle hints. pub snapshot_map: &'a SnapshotMap, /// The animations that are currently running. #[cfg(feature = "servo")] pub running_animations: Arc<RwLock<FnvHashMap<OpaqueNode, Vec<Animation>>>>, /// The list of animations that have expired since the last style recalculation. #[cfg(feature = "servo")] pub expired_animations: Arc<RwLock<FnvHashMap<OpaqueNode, Vec<Animation>>>>, /// Paint worklets #[cfg(feature = "servo")] pub registered_speculative_painters: &'a RegisteredSpeculativePainters, /// Data needed to create the thread-local style context from the shared one. #[cfg(feature = "servo")] pub local_context_creation_data: Mutex<ThreadLocalStyleContextCreationInfo>, } impl<'a> SharedStyleContext<'a> { /// Return a suitable viewport size in order to be used for viewport units. pub fn viewport_size(&self) -> Size2D<Au> { self.stylist.device().au_viewport_size() } /// The device pixel ratio pub fn device_pixel_ratio(&self) -> TypedScale<f32, CSSPixel, DevicePixel> { self.stylist.device().device_pixel_ratio() } /// The quirks mode of the document. pub fn quirks_mode(&self) -> QuirksMode { self.stylist.quirks_mode() } } /// The structure holds various intermediate inputs that are eventually used by /// by the cascade. /// /// The matching and cascading process stores them in this format temporarily /// within the `CurrentElementInfo`. At the end of the cascade, they are folded /// down into the main `ComputedValues` to reduce memory usage per element while /// still remaining accessible. #[derive(Clone, Default)] pub struct CascadeInputs { /// The rule node representing the ordered list of rules matched for this /// node. pub rules: Option<StrongRuleNode>, /// The rule node representing the ordered list of rules matched for this /// node if visited, only computed if there's a relevant link for this /// element. A element's "relevant link" is the element being matched if it /// is a link or the nearest ancestor link. pub visited_rules: Option<StrongRuleNode>, } impl CascadeInputs { /// Construct inputs from previous cascade results, if any. pub fn new_from_style(style: &ComputedValues) -> Self { CascadeInputs { rules: style.rules.clone(), visited_rules: style.visited_style().and_then(|v| v.rules.clone()), } } } // We manually implement Debug for CascadeInputs so that we can avoid the // verbose stringification of ComputedValues for normal logging. impl fmt::Debug for CascadeInputs { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "CascadeInputs {{ rules: {:?}, visited_rules: {:?}, .. }}", self.rules, self.visited_rules) } } /// A list of cascade inputs for eagerly-cascaded pseudo-elements. /// The list is stored inline. #[derive(Debug)] pub struct EagerPseudoCascadeInputs(Option<[Option<CascadeInputs>; EAGER_PSEUDO_COUNT]>); // Manually implement `Clone` here because the derived impl of `Clone` for // array types assumes the value inside is `Copy`. impl Clone for EagerPseudoCascadeInputs { fn clone(&self) -> Self { if self.0.is_none() { return EagerPseudoCascadeInputs(None) } let self_inputs = self.0.as_ref().unwrap(); let mut inputs: [Option<CascadeInputs>; EAGER_PSEUDO_COUNT] = Default::default(); for i in 0..EAGER_PSEUDO_COUNT { inputs[i] = self_inputs[i].clone(); } EagerPseudoCascadeInputs(Some(inputs)) } } impl EagerPseudoCascadeInputs { /// Construct inputs from previous cascade results, if any. fn new_from_style(styles: &EagerPseudoStyles) -> Self { EagerPseudoCascadeInputs(styles.as_optional_array().map(|styles| { let mut inputs: [Option<CascadeInputs>; EAGER_PSEUDO_COUNT] = Default::default(); for i in 0..EAGER_PSEUDO_COUNT { inputs[i] = styles[i].as_ref().map(|s| CascadeInputs::new_from_style(s)); } inputs })) } /// Returns the list of rules, if they exist. pub fn into_array(self) -> Option<[Option<CascadeInputs>; EAGER_PSEUDO_COUNT]> { self.0 } } /// The cascade inputs associated with a node, including those for any /// pseudo-elements. /// /// The matching and cascading process stores them in this format temporarily /// within the `CurrentElementInfo`. At the end of the cascade, they are folded /// down into the main `ComputedValues` to reduce memory usage per element while /// still remaining accessible. #[derive(Clone, Debug)] pub struct ElementCascadeInputs { /// The element's cascade inputs. pub primary: CascadeInputs, /// A list of the inputs for the element's eagerly-cascaded pseudo-elements. pub pseudos: EagerPseudoCascadeInputs, } impl ElementCascadeInputs { /// Construct inputs from previous cascade results, if any. pub fn new_from_element_data(data: &ElementData) -> Self { debug_assert!(data.has_styles()); ElementCascadeInputs { primary: CascadeInputs::new_from_style(data.styles.primary()), pseudos: EagerPseudoCascadeInputs::new_from_style(&data.styles.pseudos), } } } /// Statistics gathered during the traversal. We gather statistics on each /// thread and then combine them after the threads join via the Add /// implementation below. #[derive(Default)] pub struct TraversalStatistics { /// The total number of elements traversed. pub elements_traversed: u32, /// The number of elements where has_styles() went from false to true. pub elements_styled: u32, /// The number of elements for which we performed selector matching. pub elements_matched: u32, /// The number of cache hits from the StyleSharingCache. pub styles_shared: u32, /// The number of styles reused via rule node comparison from the /// StyleSharingCache. pub styles_reused: u32, /// The number of selectors in the stylist. pub selectors: u32, /// The number of revalidation selectors. pub revalidation_selectors: u32, /// The number of state/attr dependencies in the dependency set. pub dependency_selectors: u32, /// The number of declarations in the stylist. pub declarations: u32, /// The number of times the stylist was rebuilt. pub stylist_rebuilds: u32, /// Time spent in the traversal, in milliseconds. pub traversal_time_ms: f64, /// Whether this was a parallel traversal. pub is_parallel: Option<bool>, /// Whether this is a "large" traversal. pub is_large: Option<bool>, } /// Implementation of Add to aggregate statistics across different threads. impl<'a> ops::Add for &'a TraversalStatistics { type Output = TraversalStatistics; fn add(self, other: Self) -> TraversalStatistics { debug_assert!(self.traversal_time_ms == 0.0 && other.traversal_time_ms == 0.0, "traversal_time_ms should be set at the end by the caller"); debug_assert!(self.selectors == 0, "set at the end"); debug_assert!(self.revalidation_selectors == 0, "set at the end"); debug_assert!(self.dependency_selectors == 0, "set at the end"); debug_assert!(self.declarations == 0, "set at the end"); debug_assert!(self.stylist_rebuilds == 0, "set at the end"); TraversalStatistics { elements_traversed: self.elements_traversed + other.elements_traversed, elements_styled: self.elements_styled + other.elements_styled, elements_matched: self.elements_matched + other.elements_matched, styles_shared: self.styles_shared + other.styles_shared, styles_reused: self.styles_reused + other.styles_reused, selectors: 0, revalidation_selectors: 0, dependency_selectors: 0, declarations: 0, stylist_rebuilds: 0, traversal_time_ms: 0.0, is_parallel: None, is_large: None, } } } /// Format the statistics in a way that the performance test harness understands. /// See https://bugzilla.mozilla.org/show_bug.cgi?id=1331856#c2 impl fmt::Display for TraversalStatistics { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_assert!(self.traversal_time_ms != 0.0, "should have set traversal time"); writeln!(f, "[PERF] perf block start")?; writeln!(f, "[PERF],traversal,{}", if self.is_parallel.unwrap() { "parallel" } else { "sequential" })?; writeln!(f, "[PERF],elements_traversed,{}", self.elements_traversed)?; writeln!(f, "[PERF],elements_styled,{}", self.elements_styled)?; writeln!(f, "[PERF],elements_matched,{}", self.elements_matched)?; writeln!(f, "[PERF],styles_shared,{}", self.styles_shared)?; writeln!(f, "[PERF],styles_reused,{}", self.styles_reused)?; writeln!(f, "[PERF],selectors,{}", self.selectors)?; writeln!(f, "[PERF],revalidation_selectors,{}", self.revalidation_selectors)?; writeln!(f, "[PERF],dependency_selectors,{}", self.dependency_selectors)?; writeln!(f, "[PERF],declarations,{}", self.declarations)?; writeln!(f, "[PERF],stylist_rebuilds,{}", self.stylist_rebuilds)?; writeln!(f, "[PERF],traversal_time_ms,{}", self.traversal_time_ms)?; writeln!(f, "[PERF] perf block end") } } impl TraversalStatistics { /// Computes the traversal time given the start time in seconds. pub fn finish<E, D>(&mut self, traversal: &D, parallel: bool, start: f64) where E: TElement, D: DomTraversal<E>, { let threshold = traversal.shared_context().options.style_statistics_threshold; let stylist = traversal.shared_context().stylist; self.is_parallel = Some(parallel); self.is_large = Some(self.elements_traversed as usize >= threshold); self.traversal_time_ms = (time::precise_time_s() - start) * 1000.0; self.selectors = stylist.num_selectors() as u32; self.revalidation_selectors = stylist.num_revalidation_selectors() as u32; self.dependency_selectors = stylist.num_invalidations() as u32; self.declarations = stylist.num_declarations() as u32; self.stylist_rebuilds = stylist.num_rebuilds() as u32; } /// Returns whether this traversal is 'large' in order to avoid console spam /// from lots of tiny traversals. pub fn is_large_traversal(&self) -> bool { self.is_large.unwrap() } } #[cfg(feature = "gecko")] bitflags! { /// Represents which tasks are performed in a SequentialTask of /// UpdateAnimations which is a result of normal restyle. pub struct UpdateAnimationsTasks: u8 { /// Update CSS Animations. const CSS_ANIMATIONS = structs::UpdateAnimationsTasks_CSSAnimations; /// Update CSS Transitions. const CSS_TRANSITIONS = structs::UpdateAnimationsTasks_CSSTransitions; /// Update effect properties. const EFFECT_PROPERTIES = structs::UpdateAnimationsTasks_EffectProperties; /// Update animation cacade results for animations running on the compositor. const CASCADE_RESULTS = structs::UpdateAnimationsTasks_CascadeResults; } } #[cfg(feature = "gecko")] bitflags! { /// Represents which tasks are performed in a SequentialTask as a result of /// animation-only restyle. pub struct PostAnimationTasks: u8 { /// Display property was changed from none in animation-only restyle so /// that we need to resolve styles for descendants in a subsequent /// normal restyle. const DISPLAY_CHANGED_FROM_NONE_FOR_SMIL = 0x01; } } /// A task to be run in sequential mode on the parent (non-worker) thread. This /// is used by the style system to queue up work which is not safe to do during /// the parallel traversal. pub enum SequentialTask<E: TElement> { /// Entry to avoid an unused type parameter error on servo. Unused(SendElement<E>), /// Performs one of a number of possible tasks related to updating animations based on the /// |tasks| field. These include updating CSS animations/transitions that changed as part /// of the non-animation style traversal, and updating the computed effect properties. #[cfg(feature = "gecko")] UpdateAnimations { /// The target element or pseudo-element. el: SendElement<E>, /// The before-change style for transitions. We use before-change style as the initial /// value of its Keyframe. Required if |tasks| includes CSSTransitions. before_change_style: Option<Arc<ComputedValues>>, /// The tasks which are performed in this SequentialTask. tasks: UpdateAnimationsTasks }, /// Performs one of a number of possible tasks as a result of animation-only restyle. /// Currently we do only process for resolving descendant elements that were display:none /// subtree for SMIL animation. #[cfg(feature = "gecko")] PostAnimation { /// The target element. el: SendElement<E>, /// The tasks which are performed in this SequentialTask. tasks: PostAnimationTasks }, } impl<E: TElement> SequentialTask<E> { /// Executes this task. pub fn execute(self) { use self::SequentialTask::*; debug_assert!(thread_state::get() == ThreadState::LAYOUT); match self { Unused(_) => unreachable!(), #[cfg(feature = "gecko")] UpdateAnimations { el, before_change_style, tasks } => { el.update_animations(before_change_style, tasks); } #[cfg(feature = "gecko")] PostAnimation { el, tasks } => { el.process_post_animation(tasks); } } } /// Creates a task to update various animation-related state on /// a given (pseudo-)element. #[cfg(feature = "gecko")] pub fn update_animations(el: E, before_change_style: Option<Arc<ComputedValues>>, tasks: UpdateAnimationsTasks) -> Self { use self::SequentialTask::*; UpdateAnimations { el: unsafe { SendElement::new(el) }, before_change_style: before_change_style, tasks: tasks, } } /// Creates a task to do post-process for a given element as a result of /// animation-only restyle. #[cfg(feature = "gecko")] pub fn process_post_animation(el: E, tasks: PostAnimationTasks) -> Self { use self::SequentialTask::*; PostAnimation { el: unsafe { SendElement::new(el) }, tasks: tasks, } } } type CacheItem<E> = (SendElement<E>, ElementSelectorFlags); /// Map from Elements to ElementSelectorFlags. Used to defer applying selector /// flags until after the traversal. pub struct SelectorFlagsMap<E: TElement> { /// The hashmap storing the flags to apply. map: FnvHashMap<SendElement<E>, ElementSelectorFlags>, /// An LRU cache to avoid hashmap lookups, which can be slow if the map /// gets big. cache: LRUCache<[Entry<CacheItem<E>>; 4 + 1]>, } #[cfg(debug_assertions)] impl<E: TElement> Drop for SelectorFlagsMap<E> { fn drop(&mut self) { debug_assert!(self.map.is_empty()); } } impl<E: TElement> SelectorFlagsMap<E> { /// Creates a new empty SelectorFlagsMap. pub fn new() -> Self { SelectorFlagsMap { map: FnvHashMap::default(), cache: LRUCache::default(), } } /// Inserts some flags into the map for a given element. pub fn insert_flags(&mut self, element: E, flags: ElementSelectorFlags) { let el = unsafe { SendElement::new(element) }; // Check the cache. If the flags have already been noted, we're done. if let Some(item) = self.cache.find(|x| x.0 == el) { if !item.1.contains(flags) { item.1.insert(flags); self.map.get_mut(&el).unwrap().insert(flags); } return; } let f = self.map.entry(el).or_insert(ElementSelectorFlags::empty()); *f |= flags; self.cache.insert((unsafe { SendElement::new(element) }, *f)) } /// Applies the flags. Must be called on the main thread. fn apply_flags(&mut self) { debug_assert!(thread_state::get() == ThreadState::LAYOUT); self.cache.evict_all(); for (el, flags) in self.map.drain() { unsafe { el.set_selector_flags(flags); } } } } /// A list of SequentialTasks that get executed on Drop. pub struct SequentialTaskList<E>(Vec<SequentialTask<E>>) where E: TElement; impl<E> ops::Deref for SequentialTaskList<E> where E: TElement, { type Target = Vec<SequentialTask<E>>; fn deref(&self) -> &Self::Target { &self.0 } } impl<E> ops::DerefMut for SequentialTaskList<E> where E: TElement, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl<E> Drop for SequentialTaskList<E> where E: TElement, { fn drop(&mut self) { debug_assert!(thread_state::get() == ThreadState::LAYOUT); for task in self.0.drain(..) { task.execute() } } } /// A helper type for stack limit checking. This assumes that stacks grow /// down, which is true for all non-ancient CPU architectures. pub struct StackLimitChecker { lower_limit: usize } impl StackLimitChecker { /// Create a new limit checker, for this thread, allowing further use /// of up to |stack_size| bytes beyond (below) the current stack pointer. #[inline(never)] pub fn new(stack_size_limit: usize) -> Self { StackLimitChecker { lower_limit: StackLimitChecker::get_sp() - stack_size_limit } } /// Checks whether the previously stored stack limit has now been exceeded. #[inline(never)] pub fn limit_exceeded(&self) -> bool { let curr_sp = StackLimitChecker::get_sp(); // Do some sanity-checking to ensure that our invariants hold, even in // the case where we've exceeded the soft limit. // // The correctness of depends on the assumption that no stack wraps // around the end of the address space. if cfg!(debug_assertions) { // Compute the actual bottom of the stack by subtracting our safety // margin from our soft limit. Note that this will be slightly below // the actual bottom of the stack, because there are a few initial // frames on the stack before we do the measurement that computes // the limit. let stack_bottom = self.lower_limit - STACK_SAFETY_MARGIN_KB * 1024; // The bottom of the stack should be below the current sp. If it // isn't, that means we've either waited too long to check the limit // and burned through our safety margin (in which case we probably // would have segfaulted by now), or we're using a limit computed for // a different thread. debug_assert!(stack_bottom < curr_sp); // Compute the distance between the current sp and the bottom of // the stack, and compare it against the current stack. It should be // no further from us than the total stack size. We allow some slop // to handle the fact that stack_bottom is a bit further than the // bottom of the stack, as discussed above. let distance_to_stack_bottom = curr_sp - stack_bottom; let max_allowable_distance = (STYLE_THREAD_STACK_SIZE_KB + 10) * 1024; debug_assert!(distance_to_stack_bottom <= max_allowable_distance); } // The actual bounds check. curr_sp <= self.lower_limit } // Technically, rustc can optimize this away, but shouldn't for now. // We should fix this once black_box is stable. #[inline(always)] fn get_sp() -> usize { let mut foo: usize = 42; (&mut foo as *mut usize) as usize } } /// A thread-local style context. /// /// This context contains data that needs to be used during restyling, but is /// not required to be unique among worker threads, so we create one per worker /// thread in order to be able to mutate it without locking. pub struct ThreadLocalStyleContext<E: TElement> { /// A cache to share style among siblings. pub sharing_cache: StyleSharingCache<E>, /// A cache from matched properties to elements that match those. pub rule_cache: RuleCache, /// The bloom filter used to fast-reject selector-matching. pub bloom_filter: StyleBloom<E>, /// A channel on which new animations that have been triggered by style /// recalculation can be sent. #[cfg(feature = "servo")] pub new_animations_sender: Sender<Animation>, /// A set of tasks to be run (on the parent thread) in sequential mode after /// the rest of the styling is complete. This is useful for /// infrequently-needed non-threadsafe operations. /// /// It's important that goes after the style sharing cache and the bloom /// filter, to ensure they're dropped before we execute the tasks, which /// could create another ThreadLocalStyleContext for style computation. pub tasks: SequentialTaskList<E>, /// ElementSelectorFlags that need to be applied after the traversal is /// complete. This map is used in cases where the matching algorithm needs /// to set flags on elements it doesn't have exclusive access to (i.e. other /// than the current element). pub selector_flags: SelectorFlagsMap<E>, /// Statistics about the traversal. pub statistics: TraversalStatistics, /// The struct used to compute and cache font metrics from style /// for evaluation of the font-relative em/ch units and font-size pub font_metrics_provider: E::FontMetricsProvider, /// A checker used to ensure that parallel.rs does not recurse indefinitely /// even on arbitrarily deep trees. See Gecko bug 1376883. pub stack_limit_checker: StackLimitChecker, /// A cache for nth-index-like selectors. pub nth_index_cache: NthIndexCache, } impl<E: TElement> ThreadLocalStyleContext<E> { /// Creates a new `ThreadLocalStyleContext` from a shared one. #[cfg(feature = "servo")] pub fn new(shared: &SharedStyleContext) -> Self { ThreadLocalStyleContext { sharing_cache: StyleSharingCache::new(), rule_cache: RuleCache::new(), bloom_filter: StyleBloom::new(), new_animations_sender: shared.local_context_creation_data.lock().unwrap().new_animations_sender.clone(), tasks: SequentialTaskList(Vec::new()), selector_flags: SelectorFlagsMap::new(), statistics: TraversalStatistics::default(),
font_metrics_provider: E::FontMetricsProvider::create_from(shared), stack_limit_checker: StackLimitChecker::new( (STYLE_THREAD_STACK_SIZE_KB - STACK_SAFETY_MARGIN_KB) * 1024), nth_index_cache: NthIndexCache::default(), } } #[cfg(feature = "gecko")] /// Creates a new `ThreadLocalStyleContext` from a shared one. pub fn new(shared: &SharedStyleContext) -> Self { ThreadLocalStyleContext { sharing_cache: StyleSharingCache::new(), rule_cache: RuleCache::new(), bloom_filter: StyleBloom::new(), tasks: SequentialTaskList(Vec::new()), selector_flags: SelectorFlagsMap::new(), statistics: TraversalStatistics::default(), font_metrics_provider: E::FontMetricsProvider::create_from(shared), stack_limit_checker: StackLimitChecker::new( (STYLE_THREAD_STACK_SIZE_KB - STACK_SAFETY_MARGIN_KB) * 1024), nth_index_cache: NthIndexCache::default(), } } } impl<E: TElement> Drop for ThreadLocalStyleContext<E> { fn drop(&mut self) { debug_assert!(thread_state::get() == ThreadState::LAYOUT); // Apply any slow selector flags that need to be set on parents. self.selector_flags.apply_flags(); } } /// A `StyleContext` is just a simple container for a immutable reference to a /// shared style context, and a mutable reference to a local one. pub struct StyleContext<'a, E: TElement + 'a> { /// The shared style context reference. pub shared: &'a SharedStyleContext<'a>, /// The thread-local style context (mutable) reference. pub thread_local: &'a mut ThreadLocalStyleContext<E>, } /// A registered painter #[cfg(feature = "servo")] pub trait RegisteredSpeculativePainter: SpeculativePainter { /// The name it was registered with fn name(&self) -> Atom; /// The properties it was registered with fn properties(&self) -> &FnvHashMap<Atom, PropertyId>; } /// A set of registered painters #[cfg(feature = "servo")] pub trait RegisteredSpeculativePainters: Sync { /// Look up a speculative painter fn get(&self, name: &Atom) -> Option<&RegisteredSpeculativePainter>; }
random_line_split
context.rs
/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The context within which style is calculated. #[cfg(feature = "servo")] use animation::Animation; use app_units::Au; use bloom::StyleBloom; use data::{EagerPseudoStyles, ElementData}; use dom::{TElement, SendElement}; #[cfg(feature = "servo")] use dom::OpaqueNode; use euclid::Size2D; use euclid::TypedScale; use fnv::FnvHashMap; use font_metrics::FontMetricsProvider; #[cfg(feature = "gecko")] use gecko_bindings::structs; use parallel::{STACK_SAFETY_MARGIN_KB, STYLE_THREAD_STACK_SIZE_KB}; #[cfg(feature = "servo")] use parking_lot::RwLock; use properties::ComputedValues; #[cfg(feature = "servo")] use properties::PropertyId; use rule_cache::RuleCache; use rule_tree::StrongRuleNode; use selector_parser::{EAGER_PSEUDO_COUNT, SnapshotMap}; use selectors::NthIndexCache; use selectors::matching::ElementSelectorFlags; use servo_arc::Arc; #[cfg(feature = "servo")] use servo_atoms::Atom; use shared_lock::StylesheetGuards; use sharing::StyleSharingCache; use std::fmt; use std::ops; #[cfg(feature = "servo")] use std::sync::Mutex; #[cfg(feature = "servo")] use std::sync::mpsc::Sender; use style_traits::CSSPixel; use style_traits::DevicePixel; #[cfg(feature = "servo")] use style_traits::SpeculativePainter; use stylist::Stylist; use thread_state::{self, ThreadState}; use time; use timer::Timer; use traversal::DomTraversal; use traversal_flags::TraversalFlags; use uluru::{Entry, LRUCache}; pub use selectors::matching::QuirksMode; /// This structure is used to create a local style context from a shared one. #[cfg(feature = "servo")] pub struct ThreadLocalStyleContextCreationInfo { new_animations_sender: Sender<Animation>, } #[cfg(feature = "servo")] impl ThreadLocalStyleContextCreationInfo { /// Trivially constructs a `ThreadLocalStyleContextCreationInfo`. pub fn new(animations_sender: Sender<Animation>) -> Self { ThreadLocalStyleContextCreationInfo { new_animations_sender: animations_sender, } } } /// A global options structure for the style system. We use this instead of /// opts to abstract across Gecko and Servo. #[derive(Clone)] pub struct StyleSystemOptions { /// Whether the style sharing cache is disabled. pub disable_style_sharing_cache: bool, /// Whether we should dump statistics about the style system. pub dump_style_statistics: bool, /// The minimum number of elements that must be traversed to trigger a dump /// of style statistics. pub style_statistics_threshold: usize, } #[cfg(feature = "gecko")] fn get_env_bool(name: &str) -> bool { use std::env; match env::var(name) { Ok(s) => !s.is_empty(), Err(_) => false, } } const DEFAULT_STATISTICS_THRESHOLD: usize = 50; #[cfg(feature = "gecko")] fn get_env_usize(name: &str) -> Option<usize> { use std::env; env::var(name).ok().map(|s| { s.parse::<usize>().expect("Couldn't parse environmental variable as usize") }) } impl Default for StyleSystemOptions { #[cfg(feature = "servo")] fn default() -> Self { use servo_config::opts; StyleSystemOptions { disable_style_sharing_cache: opts::get().disable_share_style_cache, dump_style_statistics: opts::get().style_sharing_stats, style_statistics_threshold: DEFAULT_STATISTICS_THRESHOLD, } } #[cfg(feature = "gecko")] fn default() -> Self { StyleSystemOptions { disable_style_sharing_cache: get_env_bool("DISABLE_STYLE_SHARING_CACHE"), dump_style_statistics: get_env_bool("DUMP_STYLE_STATISTICS"), style_statistics_threshold: get_env_usize("STYLE_STATISTICS_THRESHOLD") .unwrap_or(DEFAULT_STATISTICS_THRESHOLD), } } } impl StyleSystemOptions { #[cfg(feature = "servo")] /// On Gecko's nightly build? pub fn is_nightly(&self) -> bool { false } #[cfg(feature = "gecko")] /// On Gecko's nightly build? #[inline] pub fn is_nightly(&self) -> bool { structs::GECKO_IS_NIGHTLY } } /// A shared style context. /// /// There's exactly one of these during a given restyle traversal, and it's /// shared among the worker threads. pub struct SharedStyleContext<'a> { /// The CSS selector stylist. pub stylist: &'a Stylist, /// Whether visited styles are enabled. /// /// They may be disabled when Gecko's pref layout.css.visited_links_enabled /// is false, or when in private browsing mode. pub visited_styles_enabled: bool, /// Configuration options. pub options: StyleSystemOptions, /// Guards for pre-acquired locks pub guards: StylesheetGuards<'a>, /// The current timer for transitions and animations. This is needed to test /// them. pub timer: Timer, /// Flags controlling how we traverse the tree. pub traversal_flags: TraversalFlags, /// A map with our snapshots in order to handle restyle hints. pub snapshot_map: &'a SnapshotMap, /// The animations that are currently running. #[cfg(feature = "servo")] pub running_animations: Arc<RwLock<FnvHashMap<OpaqueNode, Vec<Animation>>>>, /// The list of animations that have expired since the last style recalculation. #[cfg(feature = "servo")] pub expired_animations: Arc<RwLock<FnvHashMap<OpaqueNode, Vec<Animation>>>>, /// Paint worklets #[cfg(feature = "servo")] pub registered_speculative_painters: &'a RegisteredSpeculativePainters, /// Data needed to create the thread-local style context from the shared one. #[cfg(feature = "servo")] pub local_context_creation_data: Mutex<ThreadLocalStyleContextCreationInfo>, } impl<'a> SharedStyleContext<'a> { /// Return a suitable viewport size in order to be used for viewport units. pub fn viewport_size(&self) -> Size2D<Au> { self.stylist.device().au_viewport_size() } /// The device pixel ratio pub fn device_pixel_ratio(&self) -> TypedScale<f32, CSSPixel, DevicePixel> { self.stylist.device().device_pixel_ratio() } /// The quirks mode of the document. pub fn quirks_mode(&self) -> QuirksMode { self.stylist.quirks_mode() } } /// The structure holds various intermediate inputs that are eventually used by /// by the cascade. /// /// The matching and cascading process stores them in this format temporarily /// within the `CurrentElementInfo`. At the end of the cascade, they are folded /// down into the main `ComputedValues` to reduce memory usage per element while /// still remaining accessible. #[derive(Clone, Default)] pub struct CascadeInputs { /// The rule node representing the ordered list of rules matched for this /// node. pub rules: Option<StrongRuleNode>, /// The rule node representing the ordered list of rules matched for this /// node if visited, only computed if there's a relevant link for this /// element. A element's "relevant link" is the element being matched if it /// is a link or the nearest ancestor link. pub visited_rules: Option<StrongRuleNode>, } impl CascadeInputs { /// Construct inputs from previous cascade results, if any. pub fn new_from_style(style: &ComputedValues) -> Self { CascadeInputs { rules: style.rules.clone(), visited_rules: style.visited_style().and_then(|v| v.rules.clone()), } } } // We manually implement Debug for CascadeInputs so that we can avoid the // verbose stringification of ComputedValues for normal logging. impl fmt::Debug for CascadeInputs { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { write!(f, "CascadeInputs {{ rules: {:?}, visited_rules: {:?}, .. }}", self.rules, self.visited_rules) } } /// A list of cascade inputs for eagerly-cascaded pseudo-elements. /// The list is stored inline. #[derive(Debug)] pub struct EagerPseudoCascadeInputs(Option<[Option<CascadeInputs>; EAGER_PSEUDO_COUNT]>); // Manually implement `Clone` here because the derived impl of `Clone` for // array types assumes the value inside is `Copy`. impl Clone for EagerPseudoCascadeInputs { fn clone(&self) -> Self { if self.0.is_none() { return EagerPseudoCascadeInputs(None) } let self_inputs = self.0.as_ref().unwrap(); let mut inputs: [Option<CascadeInputs>; EAGER_PSEUDO_COUNT] = Default::default(); for i in 0..EAGER_PSEUDO_COUNT { inputs[i] = self_inputs[i].clone(); } EagerPseudoCascadeInputs(Some(inputs)) } } impl EagerPseudoCascadeInputs { /// Construct inputs from previous cascade results, if any. fn new_from_style(styles: &EagerPseudoStyles) -> Self { EagerPseudoCascadeInputs(styles.as_optional_array().map(|styles| { let mut inputs: [Option<CascadeInputs>; EAGER_PSEUDO_COUNT] = Default::default(); for i in 0..EAGER_PSEUDO_COUNT { inputs[i] = styles[i].as_ref().map(|s| CascadeInputs::new_from_style(s)); } inputs })) } /// Returns the list of rules, if they exist. pub fn into_array(self) -> Option<[Option<CascadeInputs>; EAGER_PSEUDO_COUNT]> { self.0 } } /// The cascade inputs associated with a node, including those for any /// pseudo-elements. /// /// The matching and cascading process stores them in this format temporarily /// within the `CurrentElementInfo`. At the end of the cascade, they are folded /// down into the main `ComputedValues` to reduce memory usage per element while /// still remaining accessible. #[derive(Clone, Debug)] pub struct ElementCascadeInputs { /// The element's cascade inputs. pub primary: CascadeInputs, /// A list of the inputs for the element's eagerly-cascaded pseudo-elements. pub pseudos: EagerPseudoCascadeInputs, } impl ElementCascadeInputs { /// Construct inputs from previous cascade results, if any. pub fn new_from_element_data(data: &ElementData) -> Self { debug_assert!(data.has_styles()); ElementCascadeInputs { primary: CascadeInputs::new_from_style(data.styles.primary()), pseudos: EagerPseudoCascadeInputs::new_from_style(&data.styles.pseudos), } } } /// Statistics gathered during the traversal. We gather statistics on each /// thread and then combine them after the threads join via the Add /// implementation below. #[derive(Default)] pub struct TraversalStatistics { /// The total number of elements traversed. pub elements_traversed: u32, /// The number of elements where has_styles() went from false to true. pub elements_styled: u32, /// The number of elements for which we performed selector matching. pub elements_matched: u32, /// The number of cache hits from the StyleSharingCache. pub styles_shared: u32, /// The number of styles reused via rule node comparison from the /// StyleSharingCache. pub styles_reused: u32, /// The number of selectors in the stylist. pub selectors: u32, /// The number of revalidation selectors. pub revalidation_selectors: u32, /// The number of state/attr dependencies in the dependency set. pub dependency_selectors: u32, /// The number of declarations in the stylist. pub declarations: u32, /// The number of times the stylist was rebuilt. pub stylist_rebuilds: u32, /// Time spent in the traversal, in milliseconds. pub traversal_time_ms: f64, /// Whether this was a parallel traversal. pub is_parallel: Option<bool>, /// Whether this is a "large" traversal. pub is_large: Option<bool>, } /// Implementation of Add to aggregate statistics across different threads. impl<'a> ops::Add for &'a TraversalStatistics { type Output = TraversalStatistics; fn add(self, other: Self) -> TraversalStatistics { debug_assert!(self.traversal_time_ms == 0.0 && other.traversal_time_ms == 0.0, "traversal_time_ms should be set at the end by the caller"); debug_assert!(self.selectors == 0, "set at the end"); debug_assert!(self.revalidation_selectors == 0, "set at the end"); debug_assert!(self.dependency_selectors == 0, "set at the end"); debug_assert!(self.declarations == 0, "set at the end"); debug_assert!(self.stylist_rebuilds == 0, "set at the end"); TraversalStatistics { elements_traversed: self.elements_traversed + other.elements_traversed, elements_styled: self.elements_styled + other.elements_styled, elements_matched: self.elements_matched + other.elements_matched, styles_shared: self.styles_shared + other.styles_shared, styles_reused: self.styles_reused + other.styles_reused, selectors: 0, revalidation_selectors: 0, dependency_selectors: 0, declarations: 0, stylist_rebuilds: 0, traversal_time_ms: 0.0, is_parallel: None, is_large: None, } } } /// Format the statistics in a way that the performance test harness understands. /// See https://bugzilla.mozilla.org/show_bug.cgi?id=1331856#c2 impl fmt::Display for TraversalStatistics { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { debug_assert!(self.traversal_time_ms != 0.0, "should have set traversal time"); writeln!(f, "[PERF] perf block start")?; writeln!(f, "[PERF],traversal,{}", if self.is_parallel.unwrap() { "parallel" } else { "sequential" })?; writeln!(f, "[PERF],elements_traversed,{}", self.elements_traversed)?; writeln!(f, "[PERF],elements_styled,{}", self.elements_styled)?; writeln!(f, "[PERF],elements_matched,{}", self.elements_matched)?; writeln!(f, "[PERF],styles_shared,{}", self.styles_shared)?; writeln!(f, "[PERF],styles_reused,{}", self.styles_reused)?; writeln!(f, "[PERF],selectors,{}", self.selectors)?; writeln!(f, "[PERF],revalidation_selectors,{}", self.revalidation_selectors)?; writeln!(f, "[PERF],dependency_selectors,{}", self.dependency_selectors)?; writeln!(f, "[PERF],declarations,{}", self.declarations)?; writeln!(f, "[PERF],stylist_rebuilds,{}", self.stylist_rebuilds)?; writeln!(f, "[PERF],traversal_time_ms,{}", self.traversal_time_ms)?; writeln!(f, "[PERF] perf block end") } } impl TraversalStatistics { /// Computes the traversal time given the start time in seconds. pub fn finish<E, D>(&mut self, traversal: &D, parallel: bool, start: f64) where E: TElement, D: DomTraversal<E>, { let threshold = traversal.shared_context().options.style_statistics_threshold; let stylist = traversal.shared_context().stylist; self.is_parallel = Some(parallel); self.is_large = Some(self.elements_traversed as usize >= threshold); self.traversal_time_ms = (time::precise_time_s() - start) * 1000.0; self.selectors = stylist.num_selectors() as u32; self.revalidation_selectors = stylist.num_revalidation_selectors() as u32; self.dependency_selectors = stylist.num_invalidations() as u32; self.declarations = stylist.num_declarations() as u32; self.stylist_rebuilds = stylist.num_rebuilds() as u32; } /// Returns whether this traversal is 'large' in order to avoid console spam /// from lots of tiny traversals. pub fn is_large_traversal(&self) -> bool { self.is_large.unwrap() } } #[cfg(feature = "gecko")] bitflags! { /// Represents which tasks are performed in a SequentialTask of /// UpdateAnimations which is a result of normal restyle. pub struct UpdateAnimationsTasks: u8 { /// Update CSS Animations. const CSS_ANIMATIONS = structs::UpdateAnimationsTasks_CSSAnimations; /// Update CSS Transitions. const CSS_TRANSITIONS = structs::UpdateAnimationsTasks_CSSTransitions; /// Update effect properties. const EFFECT_PROPERTIES = structs::UpdateAnimationsTasks_EffectProperties; /// Update animation cacade results for animations running on the compositor. const CASCADE_RESULTS = structs::UpdateAnimationsTasks_CascadeResults; } } #[cfg(feature = "gecko")] bitflags! { /// Represents which tasks are performed in a SequentialTask as a result of /// animation-only restyle. pub struct PostAnimationTasks: u8 { /// Display property was changed from none in animation-only restyle so /// that we need to resolve styles for descendants in a subsequent /// normal restyle. const DISPLAY_CHANGED_FROM_NONE_FOR_SMIL = 0x01; } } /// A task to be run in sequential mode on the parent (non-worker) thread. This /// is used by the style system to queue up work which is not safe to do during /// the parallel traversal. pub enum SequentialTask<E: TElement> { /// Entry to avoid an unused type parameter error on servo. Unused(SendElement<E>), /// Performs one of a number of possible tasks related to updating animations based on the /// |tasks| field. These include updating CSS animations/transitions that changed as part /// of the non-animation style traversal, and updating the computed effect properties. #[cfg(feature = "gecko")] UpdateAnimations { /// The target element or pseudo-element. el: SendElement<E>, /// The before-change style for transitions. We use before-change style as the initial /// value of its Keyframe. Required if |tasks| includes CSSTransitions. before_change_style: Option<Arc<ComputedValues>>, /// The tasks which are performed in this SequentialTask. tasks: UpdateAnimationsTasks }, /// Performs one of a number of possible tasks as a result of animation-only restyle. /// Currently we do only process for resolving descendant elements that were display:none /// subtree for SMIL animation. #[cfg(feature = "gecko")] PostAnimation { /// The target element. el: SendElement<E>, /// The tasks which are performed in this SequentialTask. tasks: PostAnimationTasks }, } impl<E: TElement> SequentialTask<E> { /// Executes this task. pub fn execute(self) { use self::SequentialTask::*; debug_assert!(thread_state::get() == ThreadState::LAYOUT); match self { Unused(_) => unreachable!(), #[cfg(feature = "gecko")] UpdateAnimations { el, before_change_style, tasks } => { el.update_animations(before_change_style, tasks); } #[cfg(feature = "gecko")] PostAnimation { el, tasks } => { el.process_post_animation(tasks); } } } /// Creates a task to update various animation-related state on /// a given (pseudo-)element. #[cfg(feature = "gecko")] pub fn update_animations(el: E, before_change_style: Option<Arc<ComputedValues>>, tasks: UpdateAnimationsTasks) -> Self { use self::SequentialTask::*; UpdateAnimations { el: unsafe { SendElement::new(el) }, before_change_style: before_change_style, tasks: tasks, } } /// Creates a task to do post-process for a given element as a result of /// animation-only restyle. #[cfg(feature = "gecko")] pub fn process_post_animation(el: E, tasks: PostAnimationTasks) -> Self { use self::SequentialTask::*; PostAnimation { el: unsafe { SendElement::new(el) }, tasks: tasks, } } } type CacheItem<E> = (SendElement<E>, ElementSelectorFlags); /// Map from Elements to ElementSelectorFlags. Used to defer applying selector /// flags until after the traversal. pub struct SelectorFlagsMap<E: TElement> { /// The hashmap storing the flags to apply. map: FnvHashMap<SendElement<E>, ElementSelectorFlags>, /// An LRU cache to avoid hashmap lookups, which can be slow if the map /// gets big. cache: LRUCache<[Entry<CacheItem<E>>; 4 + 1]>, } #[cfg(debug_assertions)] impl<E: TElement> Drop for SelectorFlagsMap<E> { fn drop(&mut self) { debug_assert!(self.map.is_empty()); } } impl<E: TElement> SelectorFlagsMap<E> { /// Creates a new empty SelectorFlagsMap. pub fn new() -> Self { SelectorFlagsMap { map: FnvHashMap::default(), cache: LRUCache::default(), } } /// Inserts some flags into the map for a given element. pub fn insert_flags(&mut self, element: E, flags: ElementSelectorFlags) { let el = unsafe { SendElement::new(element) }; // Check the cache. If the flags have already been noted, we're done. if let Some(item) = self.cache.find(|x| x.0 == el) { if !item.1.contains(flags) { item.1.insert(flags); self.map.get_mut(&el).unwrap().insert(flags); } return; } let f = self.map.entry(el).or_insert(ElementSelectorFlags::empty()); *f |= flags; self.cache.insert((unsafe { SendElement::new(element) }, *f)) } /// Applies the flags. Must be called on the main thread. fn apply_flags(&mut self) { debug_assert!(thread_state::get() == ThreadState::LAYOUT); self.cache.evict_all(); for (el, flags) in self.map.drain() { unsafe { el.set_selector_flags(flags); } } } } /// A list of SequentialTasks that get executed on Drop. pub struct SequentialTaskList<E>(Vec<SequentialTask<E>>) where E: TElement; impl<E> ops::Deref for SequentialTaskList<E> where E: TElement, { type Target = Vec<SequentialTask<E>>; fn deref(&self) -> &Self::Target { &self.0 } } impl<E> ops::DerefMut for SequentialTaskList<E> where E: TElement, { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl<E> Drop for SequentialTaskList<E> where E: TElement, { fn drop(&mut self) { debug_assert!(thread_state::get() == ThreadState::LAYOUT); for task in self.0.drain(..) { task.execute() } } } /// A helper type for stack limit checking. This assumes that stacks grow /// down, which is true for all non-ancient CPU architectures. pub struct StackLimitChecker { lower_limit: usize } impl StackLimitChecker { /// Create a new limit checker, for this thread, allowing further use /// of up to |stack_size| bytes beyond (below) the current stack pointer. #[inline(never)] pub fn new(stack_size_limit: usize) -> Self { StackLimitChecker { lower_limit: StackLimitChecker::get_sp() - stack_size_limit } } /// Checks whether the previously stored stack limit has now been exceeded. #[inline(never)] pub fn limit_exceeded(&self) -> bool { let curr_sp = StackLimitChecker::get_sp(); // Do some sanity-checking to ensure that our invariants hold, even in // the case where we've exceeded the soft limit. // // The correctness of depends on the assumption that no stack wraps // around the end of the address space. if cfg!(debug_assertions) { // Compute the actual bottom of the stack by subtracting our safety // margin from our soft limit. Note that this will be slightly below // the actual bottom of the stack, because there are a few initial // frames on the stack before we do the measurement that computes // the limit. let stack_bottom = self.lower_limit - STACK_SAFETY_MARGIN_KB * 1024; // The bottom of the stack should be below the current sp. If it // isn't, that means we've either waited too long to check the limit // and burned through our safety margin (in which case we probably // would have segfaulted by now), or we're using a limit computed for // a different thread. debug_assert!(stack_bottom < curr_sp); // Compute the distance between the current sp and the bottom of // the stack, and compare it against the current stack. It should be // no further from us than the total stack size. We allow some slop // to handle the fact that stack_bottom is a bit further than the // bottom of the stack, as discussed above. let distance_to_stack_bottom = curr_sp - stack_bottom; let max_allowable_distance = (STYLE_THREAD_STACK_SIZE_KB + 10) * 1024; debug_assert!(distance_to_stack_bottom <= max_allowable_distance); } // The actual bounds check. curr_sp <= self.lower_limit } // Technically, rustc can optimize this away, but shouldn't for now. // We should fix this once black_box is stable. #[inline(always)] fn
() -> usize { let mut foo: usize = 42; (&mut foo as *mut usize) as usize } } /// A thread-local style context. /// /// This context contains data that needs to be used during restyling, but is /// not required to be unique among worker threads, so we create one per worker /// thread in order to be able to mutate it without locking. pub struct ThreadLocalStyleContext<E: TElement> { /// A cache to share style among siblings. pub sharing_cache: StyleSharingCache<E>, /// A cache from matched properties to elements that match those. pub rule_cache: RuleCache, /// The bloom filter used to fast-reject selector-matching. pub bloom_filter: StyleBloom<E>, /// A channel on which new animations that have been triggered by style /// recalculation can be sent. #[cfg(feature = "servo")] pub new_animations_sender: Sender<Animation>, /// A set of tasks to be run (on the parent thread) in sequential mode after /// the rest of the styling is complete. This is useful for /// infrequently-needed non-threadsafe operations. /// /// It's important that goes after the style sharing cache and the bloom /// filter, to ensure they're dropped before we execute the tasks, which /// could create another ThreadLocalStyleContext for style computation. pub tasks: SequentialTaskList<E>, /// ElementSelectorFlags that need to be applied after the traversal is /// complete. This map is used in cases where the matching algorithm needs /// to set flags on elements it doesn't have exclusive access to (i.e. other /// than the current element). pub selector_flags: SelectorFlagsMap<E>, /// Statistics about the traversal. pub statistics: TraversalStatistics, /// The struct used to compute and cache font metrics from style /// for evaluation of the font-relative em/ch units and font-size pub font_metrics_provider: E::FontMetricsProvider, /// A checker used to ensure that parallel.rs does not recurse indefinitely /// even on arbitrarily deep trees. See Gecko bug 1376883. pub stack_limit_checker: StackLimitChecker, /// A cache for nth-index-like selectors. pub nth_index_cache: NthIndexCache, } impl<E: TElement> ThreadLocalStyleContext<E> { /// Creates a new `ThreadLocalStyleContext` from a shared one. #[cfg(feature = "servo")] pub fn new(shared: &SharedStyleContext) -> Self { ThreadLocalStyleContext { sharing_cache: StyleSharingCache::new(), rule_cache: RuleCache::new(), bloom_filter: StyleBloom::new(), new_animations_sender: shared.local_context_creation_data.lock().unwrap().new_animations_sender.clone(), tasks: SequentialTaskList(Vec::new()), selector_flags: SelectorFlagsMap::new(), statistics: TraversalStatistics::default(), font_metrics_provider: E::FontMetricsProvider::create_from(shared), stack_limit_checker: StackLimitChecker::new( (STYLE_THREAD_STACK_SIZE_KB - STACK_SAFETY_MARGIN_KB) * 1024), nth_index_cache: NthIndexCache::default(), } } #[cfg(feature = "gecko")] /// Creates a new `ThreadLocalStyleContext` from a shared one. pub fn new(shared: &SharedStyleContext) -> Self { ThreadLocalStyleContext { sharing_cache: StyleSharingCache::new(), rule_cache: RuleCache::new(), bloom_filter: StyleBloom::new(), tasks: SequentialTaskList(Vec::new()), selector_flags: SelectorFlagsMap::new(), statistics: TraversalStatistics::default(), font_metrics_provider: E::FontMetricsProvider::create_from(shared), stack_limit_checker: StackLimitChecker::new( (STYLE_THREAD_STACK_SIZE_KB - STACK_SAFETY_MARGIN_KB) * 1024), nth_index_cache: NthIndexCache::default(), } } } impl<E: TElement> Drop for ThreadLocalStyleContext<E> { fn drop(&mut self) { debug_assert!(thread_state::get() == ThreadState::LAYOUT); // Apply any slow selector flags that need to be set on parents. self.selector_flags.apply_flags(); } } /// A `StyleContext` is just a simple container for a immutable reference to a /// shared style context, and a mutable reference to a local one. pub struct StyleContext<'a, E: TElement + 'a> { /// The shared style context reference. pub shared: &'a SharedStyleContext<'a>, /// The thread-local style context (mutable) reference. pub thread_local: &'a mut ThreadLocalStyleContext<E>, } /// A registered painter #[cfg(feature = "servo")] pub trait RegisteredSpeculativePainter: SpeculativePainter { /// The name it was registered with fn name(&self) -> Atom; /// The properties it was registered with fn properties(&self) -> &FnvHashMap<Atom, PropertyId>; } /// A set of registered painters #[cfg(feature = "servo")] pub trait RegisteredSpeculativePainters: Sync { /// Look up a speculative painter fn get(&self, name: &Atom) -> Option<&RegisteredSpeculativePainter>; }
get_sp
identifier_name
builder.rs
use ramp::{ Int, RandomInt}; use rand::{ OsRng, StdRng }; use super::{ KeyPair, PublicKey, PrivateKey }; use bigint_extensions::{ ModPow, ModInverse }; pub struct KeyPairBuilder { bits: usize, certainty: u32 } impl KeyPairBuilder { pub fn new() -> KeyPairBuilder { KeyPairBuilder { bits: 512, certainty: 4 } } pub fn bits(&mut self, bits: usize) -> &mut KeyPairBuilder { self.bits = bits; self } pub fn certainty(&mut self, certainty: u32) -> &mut KeyPairBuilder { self.certainty = certainty; self } pub fn finalize(&self) -> KeyPair { let mut sec_rng = match OsRng::new() { Ok(g) => g, Err(e) => panic!("Failed to obtain OS RNG: {}", e) }; let p = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty); let q = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty); let n = p * q; let n_squared = &n * &n; let p_minus_one = p - Int::one(); let q_minus_one = q - Int::one(); let lambda = p_minus_one.lcm(&q_minus_one); let mut g; let mut helper; loop { g = sec_rng.gen_uint(self.bits); helper = calculate_l(&g.mod_pow(&lambda, &n_squared), &n); let a = helper.gcd(&n); if a == Int::one() { break; } } let public_key = PublicKey { bits: self.bits, n: n.clone(), n_squared: n_squared, g: g.clone() }; let private_key = PrivateKey { lambda: lambda, denominator: helper.mod_inverse(&n).unwrap() }; KeyPair { public_key: public_key, private_key: private_key } } } fn calculate_l(u: &Int, n: &Int) -> Int{ let r = u - Int::one(); r / n } fn generate_possible_prime(sec_rng: &mut OsRng, bits: usize, certainty: u32) -> Int { let mut pp; 'outer: loop { pp = sec_rng.gen_uint(bits); if (&pp % &Int::from(2)) == Int::zero() { continue; } let primes = [ 2, 3, 5, 7, 11, 13, 17, 19, 23 ]; for prime in primes.iter() { let big_prime = Int::from(*prime); if &pp % big_prime == Int::zero() { continue 'outer; } } if miller_rabin(&pp, certainty)
} return pp; } fn miller_rabin(n: &Int, k: u32) -> bool{ if n <= &Int::from(3) { return true; } let n_minus_one = n - Int::one(); let mut s = 0; let mut r = n_minus_one.clone(); let two = Int::from(2); while &r % &two == Int::zero() { s += 1; r = r / &two; } let mut rng = match StdRng::new() { Ok(g) => g, Err(e) => panic!("Failed to obtain OS RNG: {}", e) }; let mut a = Int::from(2); for _ in 0..k { let mut x = a.mod_pow(&r, &n); if x == Int::one() || x == n_minus_one { continue; } for _ in 1..(s - 1) { x = &x * &x % n; if x == Int::one(){ return false; } } if x != n_minus_one{ return false; } a = rng.gen_int_range(&Int::from(2), &n_minus_one); } true } #[cfg(test)] mod tests { use super::generate_possible_prime; use rand::OsRng; use test::Bencher; #[bench] fn bench_generate_possible_prime(b: &mut Bencher) { let mut rng = match OsRng::new() { Ok(g) => g, Err(e) => panic!("Failed to obtain OS RNG: {}", e) }; b.iter(|| { generate_possible_prime(&mut rng, 64, 10); }); } }
{ break; }
conditional_block
builder.rs
use ramp::{ Int, RandomInt}; use rand::{ OsRng, StdRng }; use super::{ KeyPair, PublicKey, PrivateKey }; use bigint_extensions::{ ModPow, ModInverse }; pub struct KeyPairBuilder { bits: usize, certainty: u32 } impl KeyPairBuilder { pub fn new() -> KeyPairBuilder { KeyPairBuilder { bits: 512, certainty: 4 } } pub fn bits(&mut self, bits: usize) -> &mut KeyPairBuilder { self.bits = bits; self } pub fn certainty(&mut self, certainty: u32) -> &mut KeyPairBuilder { self.certainty = certainty; self } pub fn
(&self) -> KeyPair { let mut sec_rng = match OsRng::new() { Ok(g) => g, Err(e) => panic!("Failed to obtain OS RNG: {}", e) }; let p = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty); let q = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty); let n = p * q; let n_squared = &n * &n; let p_minus_one = p - Int::one(); let q_minus_one = q - Int::one(); let lambda = p_minus_one.lcm(&q_minus_one); let mut g; let mut helper; loop { g = sec_rng.gen_uint(self.bits); helper = calculate_l(&g.mod_pow(&lambda, &n_squared), &n); let a = helper.gcd(&n); if a == Int::one() { break; } } let public_key = PublicKey { bits: self.bits, n: n.clone(), n_squared: n_squared, g: g.clone() }; let private_key = PrivateKey { lambda: lambda, denominator: helper.mod_inverse(&n).unwrap() }; KeyPair { public_key: public_key, private_key: private_key } } } fn calculate_l(u: &Int, n: &Int) -> Int{ let r = u - Int::one(); r / n } fn generate_possible_prime(sec_rng: &mut OsRng, bits: usize, certainty: u32) -> Int { let mut pp; 'outer: loop { pp = sec_rng.gen_uint(bits); if (&pp % &Int::from(2)) == Int::zero() { continue; } let primes = [ 2, 3, 5, 7, 11, 13, 17, 19, 23 ]; for prime in primes.iter() { let big_prime = Int::from(*prime); if &pp % big_prime == Int::zero() { continue 'outer; } } if miller_rabin(&pp, certainty) { break; } } return pp; } fn miller_rabin(n: &Int, k: u32) -> bool{ if n <= &Int::from(3) { return true; } let n_minus_one = n - Int::one(); let mut s = 0; let mut r = n_minus_one.clone(); let two = Int::from(2); while &r % &two == Int::zero() { s += 1; r = r / &two; } let mut rng = match StdRng::new() { Ok(g) => g, Err(e) => panic!("Failed to obtain OS RNG: {}", e) }; let mut a = Int::from(2); for _ in 0..k { let mut x = a.mod_pow(&r, &n); if x == Int::one() || x == n_minus_one { continue; } for _ in 1..(s - 1) { x = &x * &x % n; if x == Int::one(){ return false; } } if x != n_minus_one{ return false; } a = rng.gen_int_range(&Int::from(2), &n_minus_one); } true } #[cfg(test)] mod tests { use super::generate_possible_prime; use rand::OsRng; use test::Bencher; #[bench] fn bench_generate_possible_prime(b: &mut Bencher) { let mut rng = match OsRng::new() { Ok(g) => g, Err(e) => panic!("Failed to obtain OS RNG: {}", e) }; b.iter(|| { generate_possible_prime(&mut rng, 64, 10); }); } }
finalize
identifier_name
builder.rs
use ramp::{ Int, RandomInt}; use rand::{ OsRng, StdRng }; use super::{ KeyPair, PublicKey, PrivateKey }; use bigint_extensions::{ ModPow, ModInverse }; pub struct KeyPairBuilder { bits: usize, certainty: u32 } impl KeyPairBuilder { pub fn new() -> KeyPairBuilder { KeyPairBuilder { bits: 512, certainty: 4 } } pub fn bits(&mut self, bits: usize) -> &mut KeyPairBuilder { self.bits = bits; self } pub fn certainty(&mut self, certainty: u32) -> &mut KeyPairBuilder { self.certainty = certainty; self } pub fn finalize(&self) -> KeyPair { let mut sec_rng = match OsRng::new() { Ok(g) => g, Err(e) => panic!("Failed to obtain OS RNG: {}", e) }; let p = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty); let q = &generate_possible_prime(&mut sec_rng, self.bits, self.certainty); let n = p * q; let n_squared = &n * &n; let p_minus_one = p - Int::one(); let q_minus_one = q - Int::one(); let lambda = p_minus_one.lcm(&q_minus_one); let mut g; let mut helper; loop { g = sec_rng.gen_uint(self.bits); helper = calculate_l(&g.mod_pow(&lambda, &n_squared), &n); let a = helper.gcd(&n); if a == Int::one() { break; } } let public_key = PublicKey { bits: self.bits, n: n.clone(), n_squared: n_squared, g: g.clone() }; let private_key = PrivateKey { lambda: lambda, denominator: helper.mod_inverse(&n).unwrap() }; KeyPair { public_key: public_key, private_key: private_key } } } fn calculate_l(u: &Int, n: &Int) -> Int{ let r = u - Int::one(); r / n } fn generate_possible_prime(sec_rng: &mut OsRng, bits: usize, certainty: u32) -> Int { let mut pp; 'outer: loop { pp = sec_rng.gen_uint(bits); if (&pp % &Int::from(2)) == Int::zero() { continue; } let primes = [ 2, 3, 5, 7, 11, 13, 17, 19, 23 ]; for prime in primes.iter() { let big_prime = Int::from(*prime); if &pp % big_prime == Int::zero() { continue 'outer; } } if miller_rabin(&pp, certainty) { break; } } return pp; } fn miller_rabin(n: &Int, k: u32) -> bool{ if n <= &Int::from(3) { return true; } let n_minus_one = n - Int::one(); let mut s = 0; let mut r = n_minus_one.clone(); let two = Int::from(2); while &r % &two == Int::zero() { s += 1; r = r / &two;
Ok(g) => g, Err(e) => panic!("Failed to obtain OS RNG: {}", e) }; let mut a = Int::from(2); for _ in 0..k { let mut x = a.mod_pow(&r, &n); if x == Int::one() || x == n_minus_one { continue; } for _ in 1..(s - 1) { x = &x * &x % n; if x == Int::one(){ return false; } } if x != n_minus_one{ return false; } a = rng.gen_int_range(&Int::from(2), &n_minus_one); } true } #[cfg(test)] mod tests { use super::generate_possible_prime; use rand::OsRng; use test::Bencher; #[bench] fn bench_generate_possible_prime(b: &mut Bencher) { let mut rng = match OsRng::new() { Ok(g) => g, Err(e) => panic!("Failed to obtain OS RNG: {}", e) }; b.iter(|| { generate_possible_prime(&mut rng, 64, 10); }); } }
} let mut rng = match StdRng::new() {
random_line_split
cachestatus.js
/* ***** BEGIN LICENSE BLOCK ***** * Version: MPL 1.1/GPL 2.0/LGPL 2.1 * * The contents of this file are subject to the Mozilla Public License Version * 1.1 (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * http://www.mozilla.org/MPL/ * * Software distributed under the License is distributed on an "AS IS" basis, * WITHOUT WARRANTY OF ANY KIND, either express or implied. See the License * for the specific language governing rights and limitations under the * License. * * The Original Code is Cache Status. * * The Initial Developer of the Original Code is * Jason Purdy. * Portions created by the Initial Developer are Copyright (C) 2005 * the Initial Developer. All Rights Reserved. * * Thanks to the Fasterfox Extension for some pointers * * Alternatively, the contents of this file may be used under the terms of * either the GNU General Public License Version 2 or later (the "GPL"), or * the GNU Lesser General Public License Version 2.1 or later (the "LGPL"), * in which case the provisions of the GPL or the LGPL are applicable instead * of those above. If you wish to allow use of your version of this file only * under the terms of either the GPL or the LGPL, and not to allow others to * use your version of this file under the terms of the MPL, indicate your * decision by deleting the provisions above and replace them with the notice * and other provisions required by the GPL or the LGPL. If you do not delete * the provisions above, a recipient may use your version of this file under * the terms of any one of the MPL, the GPL or the LGPL. * * ***** END LICENSE BLOCK ***** */ function cs_updated_stat( type, aDeviceInfo, prefs ) { var current = round_memory_usage( aDeviceInfo.totalSize/1024/1024 ); var max = round_memory_usage( aDeviceInfo.maximumSize/1024/1024 ); var cs_id = 'cachestatus'; var bool_pref_key = 'auto_clear'; var int_pref_key = 'ac'; var clear_directive; if ( type == 'memory' ) { cs_id += '-ram-label'; bool_pref_key += '_ram'; int_pref_key += 'r_percent'; clear_directive = 'ram'; // this is some sort of random bug workaround if ( current > max && current == 4096 ) { current = 0; } } else if ( type == 'disk' ) { cs_id += '-hd-label'; bool_pref_key += '_disk'; int_pref_key += 'd_percent'; clear_directive = 'disk'; } else { // offline ... or something else we don't manage return; } /* dump( 'type: ' + type + ' - aDeviceInfo' + aDeviceInfo ); // do we need to auto-clear? dump( "evaling if we need to auto_clear...\n" ); dump( bool_pref_key + ": " + prefs.getBoolPref( bool_pref_key ) + " and " + (( current/max )*100) + " > " + prefs.getIntPref( int_pref_key ) + "\n" ); dump( "new min level: " + prefs.getIntPref( int_pref_key )*.01*max + " > 10\n" ); */ /* This is being disabled for now: http://code.google.com/p/cachestatus/issues/detail?id=10 */ /* if ( prefs.getBoolPref( bool_pref_key ) && prefs.getIntPref( int_pref_key )*.01*max > 10 && (( current/max )*100) > prefs.getIntPref( int_pref_key ) ) { //dump( "clearing!\n" ); cs_clear_cache( clear_directive, 1 ); current = 0; } */ // Now, update the status bar label... var wm = Components.classes["@mozilla.org/appshell/window-mediator;1"] .getService(Components.interfaces.nsIWindowMediator); var win = wm.getMostRecentWindow("navigator:browser"); if (win) { win.document.getElementById(cs_id).setAttribute( 'value', current + " MB / " + max + " MB " ); } } function update_cache_status() { var cache_service = Components.classes["@mozilla.org/network/cache-service;1"] .getService(Components.interfaces.nsICacheService); var prefService = Components.classes["@mozilla.org/preferences-service;1"].getService(Components.interfaces.nsIPrefService); var prefs = prefService.getBranch("extensions.cachestatus."); var cache_visitor = { visitEntry: function(a,b) {}, visitDevice: function( device, aDeviceInfo ) { cs_updated_stat( device, aDeviceInfo, prefs ); } } cache_service.visitEntries( cache_visitor ); } /* * This function takes what could be 15.8912576891 and drops it to just * one decimal place. In a future version, I could have the user say * how many decimal places... */ function round_memory_usage( memory ) { memory = parseFloat( memory ); memory *= 10; memory = Math.round(memory)/10; return memory; } // I got the cacheService code from the fasterfox extension // http://www.xulplanet.com/references/xpcomref/ifaces/nsICacheService.html function cs_clear_cache( param, noupdate ) { var cacheService = Components.classes["@mozilla.org/network/cache-service;1"] .getService(Components.interfaces.nsICacheService); if ( param && param == 'ram' ) { cacheService.evictEntries(Components.interfaces.nsICache.STORE_IN_MEMORY); } else if ( param && param == 'disk' ) { cacheService.evictEntries(Components.interfaces.nsICache.STORE_ON_DISK); } else { cacheService.evictEntries(Components.interfaces.nsICache.STORE_ON_DISK); cacheService.evictEntries(Components.interfaces.nsICache.STORE_IN_MEMORY); } if ( ! noupdate ) { update_cache_status(); } } /* * Grabbed this helpful bit from: * http://kb.mozillazine.org/On_Page_Load * http://developer.mozilla.org/en/docs/Code_snippets:On_page_load */ var csExtension = { onPageLoad: function(aEvent) { update_cache_status(); }, QueryInterface : function (aIID) { if (aIID.equals(Components.interfaces.nsIObserver) || aIID.equals(Components.interfaces.nsISupports) || aIID.equals(Components.interfaces.nsISupportsWeakReference)) return this; throw Components.results.NS_NOINTERFACE; }, register: function() { var prefService = Components.classes["@mozilla.org/preferences-service;1"].getService(Components.interfaces.nsIPrefService); this._prefs = prefService.getBranch("extensions.cachestatus."); if ( this._prefs.getBoolPref( 'auto_update' ) ) { var appcontent = document.getElementById( 'appcontent' ); if ( appcontent ) appcontent.addEventListener( "DOMContentLoaded", this.onPageLoad, true ); } this._branch = this._prefs; this._branch.QueryInterface(Components.interfaces.nsIPrefBranch2); this._branch.addObserver("", this, true); this._hbox = this.grabHBox(); this.rebuildPresence( this._prefs.getCharPref( 'presence' ) ); this.welcome(); }, welcome: function () { //Do not show welcome page if user has turned it off from Settings. if (!csExtension._prefs.getBoolPref( 'welcome' )) { return } //Detect Firefox version var version = ""; try { version = (navigator.userAgent.match(/Firefox\/([\d\.]*)/) || navigator.userAgent.match(/Thunderbird\/([\d\.]*)/))[1]; } catch (e) {} function welcome(version)
//FF < 4.* var versionComparator = Components.classes["@mozilla.org/xpcom/version-comparator;1"] .getService(Components.interfaces.nsIVersionComparator) .compare(version, "4.0"); if (versionComparator < 0) { var extMan = Components.classes["@mozilla.org/extensions/manager;1"].getService(Components.interfaces.nsIExtensionManager); var addon = extMan.getItemForID("cache@status.org"); welcome(addon.version); } //FF > 4.* else { Components.utils.import("resource://gre/modules/AddonManager.jsm"); AddonManager.getAddonByID("cache@status.org", function (addon) { welcome(addon.version); }); } }, grabHBox: function() { var wm = Components.classes["@mozilla.org/appshell/window-mediator;1"] .getService(Components.interfaces.nsIWindowMediator); var win = wm.getMostRecentWindow("navigator:browser"); var found_hbox; if (win) { this._doc = win.document; found_hbox = win.document.getElementById("cs_presence"); } //dump( "In grabHBox(): WIN: " + win + " HB: " + found_hbox + "\n" ); return found_hbox; }, observe: function(aSubject, aTopic, aData) { if ( aTopic != 'nsPref:changed' ) return; // aSubject is the nsIPrefBranch we're observing (after appropriate QI) // aData is the name of the pref that's been changed (relative to aSubject) //dump( "pref changed: S: " + aSubject + " T: " + aTopic + " D: " + aData + "\n" ); if ( aData == 'auto_update' ) { var add_event_handler = this._prefs.getBoolPref( 'auto_update' ); if ( add_event_handler ) { window.addEventListener( 'load', this.onPageLoad, true ); } else { window.removeEventListener( 'load', this.onPageLoad, true ); } } else if ( aData == 'presence' ) { var presence = this._prefs.getCharPref( 'presence' ); if ( presence == 'original' || presence == 'icons' ) { this.rebuildPresence( presence ); } else { dump( "Unknown presence value: " + presence + "\n" ); } } }, rebuildPresence: function(presence) { // Take the hbox 'cs_presence' and replace it if ( this._hbox == null ) { this._hbox = this.grabHBox(); } var hbox = this._hbox; var child_node = hbox.firstChild; while( child_node != null ) { hbox.removeChild( child_node ); child_node = hbox.firstChild; } var popupset = this._doc.getElementById( 'cs_popupset' ); var child_node = popupset.firstChild; while( child_node != null ) { popupset.removeChild( child_node ); child_node = popupset.firstChild; } var string_bundle = this._doc.getElementById( 'cache-status-strings' ); if ( presence == 'original' ) { var ram_image = this._doc.createElement( 'image' ); ram_image.setAttribute( 'tooltiptext', string_bundle.getString( 'ramcache' ) ); ram_image.setAttribute( 'src', 'chrome://cachestatus/skin/ram.png' ); var ram_label = this._doc.createElement( 'label' ); ram_label.setAttribute( 'id', 'cachestatus-ram-label' ); ram_label.setAttribute( 'value', ': ' + string_bundle.getString( 'nly' ) ); ram_label.setAttribute( 'tooltiptext', string_bundle.getString( 'ramcache' ) ); var disk_image = this._doc.createElement( 'image' ); disk_image.setAttribute( 'tooltiptext', string_bundle.getString( 'diskcache' ) ); disk_image.setAttribute( 'src', 'chrome://cachestatus/skin/hd.png' ); var disk_label = this._doc.createElement( 'label' ); disk_label.setAttribute( 'tooltiptext', string_bundle.getString( 'diskcache' ) ); disk_label.setAttribute( 'id', 'cachestatus-hd-label' ); disk_label.setAttribute( 'value', ': ' + string_bundle.getString( 'nly' ) ); hbox.appendChild( ram_image ); hbox.appendChild( ram_label ); hbox.appendChild( disk_image ); hbox.appendChild( disk_label ); } else if ( presence == 'icons' ) { var ram_tooltip = this._doc.createElement( 'tooltip' ); ram_tooltip.setAttribute( 'id', 'ram_tooltip' ); ram_tooltip.setAttribute( 'orient', 'horizontal' ); var ram_desc_prefix = this._doc.createElement( 'description' ); ram_desc_prefix.setAttribute( 'id', 'cachestatus-ram-prefix' ); ram_desc_prefix.setAttribute( 'value', string_bundle.getString( 'ramcache' ) + ':' ); ram_desc_prefix.setAttribute( 'style', 'font-weight: bold;' ); var ram_desc = this._doc.createElement( 'description' ); ram_desc.setAttribute( 'id', 'cachestatus-ram-label' ); ram_desc.setAttribute( 'value', string_bundle.getString( 'nly' ) ); ram_tooltip.appendChild( ram_desc_prefix ); ram_tooltip.appendChild( ram_desc ); var hd_tooltip = this._doc.createElement( 'tooltip' ); hd_tooltip.setAttribute( 'id', 'hd_tooltip' ); hd_tooltip.setAttribute( 'orient', 'horizontal' ); var hd_desc_prefix = this._doc.createElement( 'description' ); hd_desc_prefix.setAttribute( 'id', 'cachestatus-hd-prefix' ); hd_desc_prefix.setAttribute( 'value', string_bundle.getString( 'diskcache' ) + ':' ); hd_desc_prefix.setAttribute( 'style', 'font-weight: bold;' ); var hd_desc = this._doc.createElement( 'description' ); hd_desc.setAttribute( 'id', 'cachestatus-hd-label' ); hd_desc.setAttribute( 'value', string_bundle.getString( 'nly' ) ); hd_tooltip.appendChild( hd_desc_prefix ); hd_tooltip.appendChild( hd_desc ); popupset.appendChild( ram_tooltip ); popupset.appendChild( hd_tooltip ); hbox.parentNode.insertBefore( popupset, hbox ); var ram_image = this._doc.createElement( 'image' ); ram_image.setAttribute( 'src', 'chrome://cachestatus/skin/ram.png' ); ram_image.setAttribute( 'tooltip', 'ram_tooltip' ); var disk_image = this._doc.createElement( 'image' ); disk_image.setAttribute( 'src', 'chrome://cachestatus/skin/hd.png' ); disk_image.setAttribute( 'tooltip', 'hd_tooltip' ); hbox.appendChild( ram_image ); hbox.appendChild( disk_image ); } } } // I can't just call csExtension.register directly b/c the XUL // might not be loaded yet. window.addEventListener( 'load', function() { csExtension.register(); }, false );
{ if (csExtension._prefs.getCharPref( 'version' ) == version) { return; } //Showing welcome screen setTimeout(function () { var newTab = getBrowser().addTab("http://add0n.com/cache-status.html?version=" + version); getBrowser().selectedTab = newTab; }, 5000); csExtension._prefs.setCharPref( 'version', version ); }
identifier_body