function stringlengths 11 56k | repo_name stringlengths 5 60 | features list |
|---|---|---|
def __init__(self, config_yaml):
self.Config = config_yaml | livingenvironmentslab/UniCAVE | [
89,
26,
89,
10,
1487359957
] |
def LaunchUniCAVEWindow(path, machine_name=None):
args = [path, "-popupWindow"]
if machine_name is not None:
args = args + ["overrideMachineName", machine_name]
return subprocess.Popen(args) | livingenvironmentslab/UniCAVE | [
89,
26,
89,
10,
1487359957
] |
def __init__(self, phi=B3Spline.dec_lo, level=None, boundary="symm",
data=None):
self.set_wavelet(phi=phi)
self.level = level
self.boundary = boundary
self.data = np.array(data) | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def load_data(self, data):
self.reset()
self.data = np.array(data) | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def calc_filters(self):
"""
Calculate the convolution filters of each scale.
Note: the zero-th scale filter (i.e., delta function) is the first
element, thus the array index is the same as the decomposition scale.
"""
self.filters = []
# scale 0: delta function
h = np.array([[1]]) # NOTE: 2D
self.filters.append(h)
# scale 1
h = self.phi[::-1, ::-1]
self.filters.append(h)
for scale in range(2, self.level+1):
h_up = self.zupsample(self.phi, order=scale-1)
h2 = signal.convolve2d(h_up[::-1, ::-1], h, mode="same",
boundary=self.boundary)
self.filters.append(h2) | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def decompose(self, level, boundary="symm"):
"""
Perform IUWT decomposition in the plain loop way.
The filters of each scale/level are calculated first, then the
approximations of each scale/level are calculated by convolving the
raw/finest image with these filters.
return:
[ W_1, W_2, ..., W_n, A_n ]
n = level
W: wavelet details
A: approximation
"""
self.boundary = boundary
if self.level != level or self.filters == []:
self.level = level
self.calc_filters()
self.decomposition = []
approx = self.data
for scale in range(1, level+1):
# approximation:
approx2 = signal.convolve2d(self.data, self.filters[scale],
mode="same", boundary=self.boundary)
# wavelet details:
w = approx - approx2
self.decomposition.append(w)
if scale == level:
self.decomposition.append(approx2)
approx = approx2
return self.decomposition | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def __decompose(self, data, phi, level):
"""
2D IUWT decomposition (or stationary wavelet transform).
This is a convolution version, where kernel is zero-upsampled
explicitly. Not fast.
Parameters:
- level : level of decomposition
- phi : low-pass filter kernel
- boundary : boundary conditions (passed to scipy.signal.convolve2d,
'symm' by default)
Returns:
list of wavelet details + last approximation. Each element in
the list is an image of the same size as the input image.
"""
if level <= 0:
return data
shapecheck = map(lambda a,b:a>b, data.shape, phi.shape)
assert np.all(shapecheck)
# approximation:
approx = signal.convolve2d(data, phi[::-1, ::-1], mode="same",
boundary=self.boundary)
# wavelet details:
w = data - approx
phi_up = self.zupsample(phi, order=1)
shapecheck = map(lambda a,b:a>b, data.shape, phi_up.shape)
if level == 1:
return [w, approx]
elif not np.all(shapecheck):
print("Maximum allowed decomposition level reached",
file=sys.stderr)
return [w, approx]
else:
return [w] + self.__decompose(approx, phi_up, level-1) | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def zupsample(data, order=1):
"""
Upsample data array by interleaving it with zero's.
h{up_order: n}[l] = (1) h[l], if l % 2^n == 0;
(2) 0, otherwise
"""
shape = data.shape
new_shape = [ (2**order * (n-1) + 1) for n in shape ]
output = np.zeros(new_shape, dtype=data.dtype)
output[[ slice(None, None, 2**order) for d in shape ]] = data
return output | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def get_detail(self, scale):
"""
Get the wavelet detail coefficients of given scale.
Note: 1 <= scale <= level
"""
if scale < 1 or scale > self.level:
raise ValueError("Invalid scale")
return self.decomposition[scale-1] | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def reset(self):
super(self.__class__, self).reset()
vst_coef = [] | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def soft_threshold(data, threshold):
if isinstance(data, np.ndarray):
data_th = data.copy()
data_th[np.abs(data) <= threshold] = 0.0
data_th[data > threshold] -= threshold
data_th[data < -threshold] += threshold
else:
data_th = data
if np.abs(data) <= threshold:
data_th = 0.0
elif data > threshold:
data_th -= threshold
else:
data_th += threshold
return data_th | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def filters_product(self, scale1, scale2):
"""
Calculate the scalar product of the filters of two scales,
considering only the overlapped part.
Helper function used in VST coefficients calculation.
"""
if scale1 > scale2:
filter_big = self.filters[scale1]
filter_small = self.filters[scale2]
else:
filter_big = self.filters[scale2]
filter_small = self.filters[scale1]
# crop the big filter to match the size of the small filter
size_big = filter_big.shape
size_small = filter_small.shape
size_diff2 = list(map(lambda a,b: (a-b)//2, size_big, size_small))
filter_big_crop = filter_big[
size_diff2[0]:(size_big[0]-size_diff2[0]),
size_diff2[1]:(size_big[1]-size_diff2[1])]
assert(np.all(list(map(lambda a,b: a==b,
size_small, filter_big_crop.shape))))
product = np.sum(filter_small * filter_big_crop)
return product | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def vst(self, data, scale, coupled=True):
"""
Perform variance stabling transform
XXX: parameter `coupled' why??
Credit: MSVST-V1.0/src/libmsvst/B3VSTAtrous.h
"""
self.vst_coupled = coupled
if self.vst_coef == []:
self.calc_vst_coef()
if coupled:
b = 1.0
else:
b = self.vst_coef[scale]["b"]
data_vst = b * np.sqrt(np.abs(data + self.vst_coef[scale]["c"]))
return data_vst | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def is_significant(self, scale, fdr=0.1, independent=False, verbose=False):
"""
Multiple hypothesis testing with false discovery rate (FDR) control.
`independent': whether the test statistics of all the null
hypotheses are independent.
If `independent=True': FDR <= (m0/m) * q
otherwise: FDR <= (m0/m) * q * (1 + 1/2 + 1/3 + ... + 1/m)
References:
[1] False discovery rate - Wikipedia
https://en.wikipedia.org/wiki/False_discovery_rate
"""
coef = self.get_detail(scale)
std = self.vst_coef[scale]["std"]
pvalues = 2.0 * (1.0 - sp.stats.norm.cdf(np.abs(coef) / std))
p_sorted = pvalues.flatten()
p_sorted.sort()
N = len(p_sorted)
if independent:
cn = 1.0
else:
cn = np.sum(1.0 / np.arange(1, N+1))
p_comp = fdr * np.arange(N) / (N * cn)
comp = (p_sorted < p_comp)
if np.sum(comp) == 0:
# `comp' contains ALL False
p_cutoff = 0.0
else:
# cutoff p-value after FDR control/correction
p_cutoff = np.max(p_sorted[comp])
sig = (pvalues <= p_cutoff)
if verbose:
print("std/sigma: %g, p_cutoff: %g" % (std, p_cutoff),
flush=True, file=sys.stderr)
return (sig, p_cutoff) | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def decompose(self, level=5, boundary="symm", verbose=False):
"""
2D IUWT decomposition with VST.
"""
self.boundary = boundary
if self.level != level or self.filters == []:
self.level = level
self.calc_filters()
self.calc_vst_coef()
self.decomposition = []
approx = self.data
if verbose:
print("IUWT decomposing (%d levels): " % level,
end="", flush=True, file=sys.stderr)
for scale in range(1, level+1):
if verbose:
print("%d..." % scale, end="", flush=True, file=sys.stderr)
# approximation:
approx2 = signal.convolve2d(self.data, self.filters[scale],
mode="same", boundary=self.boundary)
# wavelet details:
w = self.vst(approx, scale=scale-1) - self.vst(approx2, scale=scale)
self.decomposition.append(w)
if scale == level:
self.decomposition.append(approx2)
approx = approx2
if verbose:
print("DONE!", flush=True, file=sys.stderr)
return self.decomposition | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def reconstruct(self, denoised=True, niter=10, verbose=False):
"""
Reconstruct the original image using iterative method with
L1 regularization, because the denoising violates the exact inverse
procedure.
arguments:
* denoised: whether use the denoised coefficients
* niter: number of iterations
"""
if denoised:
decomposition = self.denoised
else:
decomposition = self.decomposition
# L1 regularization
lbd = 1.0
delta = lbd / (niter - 1)
# initial solution
solution = self.reconstruct_ivst(denoised=denoised,
positive_project=True)
#
iuwt = IUWT(level=self.level)
iuwt.calc_filters()
# iterative reconstruction
if verbose:
print("Iteratively reconstructing (%d times): " % niter,
end="", flush=True, file=sys.stderr)
for i in range(niter):
if verbose:
print("%d..." % i, end="", flush=True, file=sys.stderr)
tempd = self.data.copy()
solution_decomp = []
for scale in range(1, self.level+1):
approx, detail = iuwt.transform(tempd, scale)
approx_sol, detail_sol = iuwt.transform(solution, scale)
# Update coefficients according to the significant supports,
# which are acquired during the denosing precodure with FDR.
sig = self.sig_supports[scale]
detail_sol[sig] = detail[sig]
detail_sol = self.soft_threshold(detail_sol, threshold=lbd)
#
solution_decomp.append(detail_sol)
tempd = approx.copy()
solution = approx_sol.copy()
# last approximation (the two are the same)
solution_decomp.append(approx)
# reconstruct
solution = iuwt.reconstruct(decomposition=solution_decomp)
# discard all negative values
solution[solution < 0] = 0.0
#
lbd -= delta
if verbose:
print("DONE!", flush=True, file=sys.stderr)
#
self.reconstruction = solution
return self.reconstruction | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def main():
# commandline arguments parser
parser = argparse.ArgumentParser(
description="Poisson Noise Removal with Multi-scale Variance " + \
"Stabling Transform and Wavelet Transform",
epilog="Version: %s (%s)" % (__version__, __date__))
parser.add_argument("-l", "--level", dest="level",
type=int, default=5,
help="level of the IUWT decomposition")
parser.add_argument("-r", "--fdr", dest="fdr",
type=float, default=0.1,
help="false discovery rate")
parser.add_argument("-I", "--fdr-independent", dest="fdr_independent",
action="store_true", default=False,
help="whether the FDR null hypotheses are independent")
parser.add_argument("-s", "--start-scale", dest="start_scale",
type=int, default=1,
help="which scale to start the denoising (inclusive)")
parser.add_argument("-e", "--end-scale", dest="end_scale",
type=int, default=0,
help="which scale to end the denoising (inclusive)")
parser.add_argument("-n", "--niter", dest="niter",
type=int, default=10,
help="number of iterations for reconstruction")
parser.add_argument("-v", "--verbose", dest="verbose",
action="store_true", default=False,
help="show verbose progress")
parser.add_argument("-C", "--clobber", dest="clobber",
action="store_true", default=False,
help="overwrite output file if exists")
parser.add_argument("infile", help="input image with Poisson noises")
parser.add_argument("outfile", help="output denoised image")
args = parser.parse_args()
if args.end_scale == 0:
args.end_scale = args.level
if args.verbose:
print("infile: '%s'" % args.infile, file=sys.stderr)
print("outfile: '%s'" % args.outfile, file=sys.stderr)
print("level: %d" % args.level, file=sys.stderr)
print("fdr: %.2f" % args.fdr, file=sys.stderr)
print("fdr_independent: %s" % args.fdr_independent, file=sys.stderr)
print("start_scale: %d" % args.start_scale, file=sys.stderr)
print("end_scale: %d" % args.end_scale, file=sys.stderr)
print("niter: %d\n" % args.niter, flush=True, file=sys.stderr)
if not args.clobber and os.path.exists(args.outfile):
raise OSError("outfile '%s' already exists" % args.outfile)
imgfits = fits.open(args.infile)
img = imgfits[0].data
# Remove Poisson noises
msvst = IUWT_VST(data=img)
msvst.decompose(level=args.level, verbose=args.verbose)
msvst.denoise(fdr=args.fdr, fdr_independent=args.fdr_independent,
start_scale=args.start_scale, end_scale=args.end_scale,
verbose=args.verbose)
msvst.reconstruct(denoised=True, niter=args.niter, verbose=args.verbose)
img_denoised = msvst.reconstruction
# Output
imgfits[0].data = img_denoised
imgfits[0].header.add_history("%s: Removed Poisson Noises @ %s" % (
os.path.basename(sys.argv[0]), datetime.utcnow().isoformat()))
imgfits[0].header.add_history(" TOOL: %s (v%s, %s)" % (
os.path.basename(sys.argv[0]), __version__, __date__))
imgfits[0].header.add_history(" PARAM: %s" % " ".join(sys.argv[1:]))
imgfits.writeto(args.outfile, checksum=True, clobber=args.clobber) | liweitianux/atoolbox | [
6,
4,
6,
1,
1464356672
] |
def getGeocodeLocation(inputString):
#Replace Spaces with '+' in URL
locationString = inputString.replace(" ", "+")
url = ('https://maps.googleapis.com/maps/api/geocode/json?address=%s&key=%s'% (locationString, google_api_key))
h = httplib2.Http()
result = json.loads(h.request(url,'GET')[1])
#print response
latitude = result['results'][0]['geometry']['location']['lat']
longitude = result['results'][0]['geometry']['location']['lng']
return (latitude,longitude) | tuanvu216/udacity-course | [
65,
99,
65,
2,
1443836127
] |
def findARestaurant(mealType, location):
latitude, longitude = getGeocodeLocation(location)
url = ('https://api.foursquare.com/v2/venues/search?client_id=%s&client_secret=%s&v=20130815&ll=%s,%s&query=%s' % (foursquare_client_id, foursquare_client_secret,latitude,longitude,mealType))
h = httplib2.Http()
result = json.loads(h.request(url,'GET')[1])
if result['response']['venues']:
#Grab the first restaurant
restaurant = result['response']['venues'][0]
venue_id = restaurant['id']
restaurant_name = restaurant['name']
restaurant_address = restaurant['location']['formattedAddress']
#Format the Restaurant Address into one string
address = ""
for i in restaurant_address:
address += i + " "
restaurant_address = address | tuanvu216/udacity-course | [
65,
99,
65,
2,
1443836127
] |
def __init__(
self, plotly_name="color", parent_name="icicle.outsidetextfont", **kwargs | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def __init__(self, container, parent, value=None):
self._node = tree.Node(container, parent, value) | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def _container(self):
return self._node._container | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def id(self):
return self._node.id | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def value(self):
return self._node.value | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def value(self, value):
self._node.value = value | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def _create_child(self, index, value):
try:
children = self._children
except AttributeError:
children = self._create_children()
children[index].value = value
return children[index] | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def create_right_child(self, value):
return self._create_child(1, value) | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def left_child(self):
return self._child(0) | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def right_child(self):
return self._child(1) | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def __init__(self, *args, **kwargs):
self._tree = tree.Tree(*args, **kwargs) | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def root(self):
try:
return self._root
except AttributeError:
return None | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def _pipe(self):
return self._tree._pipe | alviproject/alvi | [
12,
5,
12,
1,
1381172049
] |
def get_raise(data, key, expect_type=None):
''' Helper function to retrieve an element from a JSON data structure.
The *key* must be a string and may contain periods to indicate nesting.
Parts of the key may be a string or integer used for indexing on lists.
If *expect_type* is not None and the retrieved value is not of the
specified type, TypeError is raised. If the key can not be found,
KeyError is raised. '''
parts = key.split('.')
resolved = ''
for part in parts:
resolved += part
try:
part = int(part)
except ValueError:
pass
if isinstance(part, str):
if not isinstance(data, dict):
raise TypeError('expected dictionary to access {!r}'.format(resolved))
try:
data = data[part]
except KeyError:
raise KeyError(resolved)
elif isinstance(part, int):
if not isinstance(data, list):
raise TypeError('expected list to access {!r}'.format(resolved))
try:
data = data[part]
except IndexError:
raise KeyError(resolved)
else:
assert False, "unreachable"
resolved += '.'
if expect_type is not None and not isinstance(data, expect_type):
raise TypeError('expected {!r} but got {!r} instead for {!r}'.format(
expect_type.__name__, type(data).__name__, key))
return data | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def basic_auth(message='Login required'):
''' Sends a 401 response that enables basic auth. '''
headers = {'WWW-Authenticate': 'Basic realm="{}"'.format(message)}
return Response('Please log in.', 401, headers, mimetype='text/plain') | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def wrapper(*args, **kwargs):
ip = request.remote_addr
token_string = session.get('flux_login_token')
token = models.LoginToken.select(lambda t: t.token == token_string).first()
if not token or token.ip != ip or token.expired():
if token and token.expired():
flash("Your login session has expired.")
token.delete()
return redirect(url_for('login'))
request.login_token = token
request.user = token.user
return func(*args, **kwargs) | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def with_io_response(kwarg='stream', stream_type='text', **response_kwargs):
''' Decorator for View functions that create a :class:`io.StringIO` or
:class:`io.BytesIO` (based on the *stream_type* parameter) and pass it
as *kwarg* to the wrapped function. The contents of the buffer are
sent back to the client. '''
if stream_type == 'text':
factory = io.StringIO
elif stream_type == 'bytes':
factory = io.BytesIO
else:
raise ValueError('invalid value for stream_type: {!r}'.format(stream_type))
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if kwarg in kwargs:
raise RuntimeError('keyword argument {!r} already occupied'.format(kwarg))
kwargs[kwarg] = stream = factory()
status = func(*args, **kwargs)
return Response(stream.getvalue(), status=status, **response_kwargs)
return wrapper
return decorator | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
if replace:
stream = kwargs.pop(stream_dest_kwarg)
else:
stream = kwargs[stream_dest_kwarg]
kwargs[kwarg] = logger = create_logger(stream)
try:
return func(*args, **kwargs)
except BaseException as exc:
logger.exception(exc)
return 500
return wrapper | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def create_logger(stream, name=__name__, fmt=None):
''' Creates a new :class:`logging.Logger` object with the
specified *name* and *fmt* (defaults to a standard logging
formating including the current time, levelname and message).
The logger will also output to stderr. '''
fmt = fmt or '[%(asctime)-15s - %(levelname)s]: %(message)s'
formatter = logging.Formatter(fmt)
logger = logging.Logger(name)
handler = logging.StreamHandler(stream)
handler.setFormatter(formatter)
logger.addHandler(handler)
return logger | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def generate():
with open(filename, 'rb') as fp:
yield from fp | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def flash(message=None):
if message is None:
return session.pop('flux_flash', None)
else:
session['flux_flash'] = message | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def hash_pw(pw):
return hashlib.md5(pw.encode('utf8')).hexdigest() | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def rmtree(path, remove_write_protection=False):
"""
A wrapper for #shutil.rmtree() that can try to remove write protection
if removing fails, if enabled.
"""
if remove_write_protection:
def on_rm_error(func, path, exc_info):
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
else:
on_rm_error = None
shutil.rmtree(path, onerror=on_rm_error) | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def secure_filename(filename):
"""
Similar to #werkzeug.secure_filename(), but preserves leading dots in
the filename.
"""
while True:
filename = filename.lstrip('/').lstrip('\\')
if filename.startswith('..') and filename[2:3] in '/\\':
filename = filename[3:]
elif filename.startswith('.') and filename[1:2] in '/\\':
filename = filename[2:]
else:
break
has_dot = filename.startswith('.')
filename = werkzeug.secure_filename(filename)
if has_dot:
filename = '.' + filename
return filename | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def run(command, logger, cwd=None, env=None, shell=False, return_stdout=False,
inherit_env=True):
"""
Run a subprocess with the specified command. The command and output of is
logged to logger. The command will automatically be converted to a string
or list of command arguments based on the *shell* parameter.
# Parameters
command (str, list): A command-string or list of command arguments.
logger (logging.Logger): A logger that will receive the command output.
cwd (str, None): The current working directory.
env (dict, None): The environment for the subprocess.
shell (bool): If set to #True, execute the command via the system shell.
return_stdout (bool): Return the output of the command (including stderr)
to the caller. The result will be a tuple of (returncode, output).
inherit_env (bool): Inherit the current process' environment.
# Return
int, tuple of (int, str): The return code, or the returncode and the
output of the command.
"""
if shell:
if not isinstance(command, str):
command = ' '.join(quote(x) for x in command)
if logger:
logger.info('$ ' + command)
else:
if isinstance(command, str):
command = shlex.split(command)
if logger:
logger.info('$ ' + ' '.join(map(quote, command)))
if env is None:
env = {}
if inherit_env:
env = {**os.environ, **env}
popen = subprocess.Popen(
command, cwd=cwd, env=env, shell=shell, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, stdin=None)
stdout = popen.communicate()[0].decode()
if stdout:
if popen.returncode != 0 and logger:
logger.error('\n' + stdout)
else:
if logger:
logger.info('\n' + stdout)
if return_stdout:
return popen.returncode, stdout
return popen.returncode | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def strip_url_path(url):
''' Strips that path part of the specified *url*. '''
result = list(urllib.parse.urlparse(url))
result[2] = ''
return urllib.parse.urlunparse(result) | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def get_bitbucket_signature(secret, payload_data):
''' Generates the Bitbucket HMAC signature from the repository
*secret* and the *payload_data*. The Bitbucket signature is sent
with the ``X-Hub-Signature`` header. '''
return hmac.new(secret.encode('utf8'), payload_data, hashlib.sha256).hexdigest() | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def is_page_active(page, user):
path = request.path
if page == 'dashboard' and (not path or path == '/'):
return True
elif page == 'repositories' and (path.startswith('/repositories') or path.startswith('/repo') or path.startswith('/edit/repo') or path.startswith('/build') or path.startswith('/overrides')):
return True
elif page == 'users' and (path.startswith('/users') or (path.startswith('/user') and path != ('/user/' + str(user.id)))):
return True
elif page == 'profile' and path == ('/user/' + str(user.id)):
return True
elif page == 'integration' and path == '/integration':
return True
return False | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def get_customs_path(repo):
return os.path.join(config.customs_dir, repo.name.replace('/', os.sep)) | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def get_override_build_script_path(repo):
return os.path.join(get_override_path(repo), config.build_scripts[0]) | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def write_override_build_script(repo, build_script):
build_script_path = get_override_build_script_path(repo)
if build_script.strip() == '':
if os.path.isfile(build_script_path):
os.remove(build_script_path)
else:
makedirs(os.path.dirname(build_script_path))
build_script_file = open(build_script_path, mode='w')
build_script_file.write(build_script.replace('\r', ''))
build_script_file.close() | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def generate_ssh_keypair(public_key_comment):
"""
Generates new RSA ssh keypair.
Return:
tuple(str, str): generated private and public keys
"""
key = rsa.generate_private_key(backend=default_backend(), public_exponent=65537, key_size=4096)
private_key = key.private_bytes(serialization.Encoding.PEM, serialization.PrivateFormat.PKCS8, serialization.NoEncryption()).decode('ascii')
public_key = key.public_key().public_bytes(serialization.Encoding.OpenSSH, serialization.PublicFormat.OpenSSH).decode('ascii')
if public_key_comment:
public_key += ' ' + public_key_comment
return private_key, public_key | NiklasRosenstein/flux | [
28,
8,
28,
9,
1455380498
] |
def __init__(self, text, menu=None, should_exit=False):
# Here so Sphinx doesn't copy extraneous info from the superclass's docstring
super(ExternalItem, self).__init__(text=text, menu=menu, should_exit=should_exit) | mholgatem/GPIOnext | [
95,
27,
95,
3,
1505762810
] |
def get_manager_config(cls):
ci = os.environ.get("CI", False)
if ci:
database = "orator_test"
user = "root"
password = ""
else:
database = "orator_test"
user = "orator"
password = "orator"
return {
"default": "mysql",
"mysql": {
"driver": "mysql",
"database": database,
"user": user,
"password": password,
},
} | sdispater/orator | [
1388,
164,
1388,
161,
1432491805
] |
def FindAll(haystack, needle):
r = []
if isinstance(haystack, str):
index = haystack.find(needle)
while True:
if ~index:
r += [index]
else:
return r
index = haystack.find(needle, index + 1)
else:
return [i for i, item in (haystack.items() if isinstance(haystack, dict) else enumerate(haystack)) if item == needle] | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def dedup(iterable):
iterable = iterable[:]
items = []
i = 0
for item in iterable:
if item in items:
del iterable[i]
else:
i += 1
items += [item]
return iterable | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def itersplit(iterable, number):
result = []
while len(iterable):
result += [iterable[:number]]
iterable = iterable[number:]
return result | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def abs_str(string):
try:
return abs(float(string) if "." in string else int(string))
except:
return string # ??? | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def product(item):
result = 1
for part in item:
result *= part
return result | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def Abs(item):
if isinstance(item, int) or isinstance(item, float):
return abs(item)
if isinstance(item, str):
return abs_str(item)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
return String(abs_str(str(item)))
if hasattr(item, "__iter__"):
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Abs) | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def Product(item):
if isinstance(item, float):
item = int(item)
if isinstance(item, int):
result = 1
while item:
result *= item % 10
item //= 10
return result
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
if all(c in "0123456789." for c in item) and item.count(".") < 2:
return product([0 if c == "." else int(c) for c in item])
return product(
float(c) if "." in c else int(c)
for c in re.findall("\d+\.?\d*|\.\d+", item)
)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
# TODO: cartesian product?
# if isinstance(item[0], list):
# return sum(item, [])
return product(item) | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def vectorized(left, right, c):
if isinstance(left, String):
left = str(left)
if isinstance(right, String):
right = str(right)
if type(left) == Expression:
left = left.run()
if type(right) == Expression:
right = right.run()
left_type = type(left)
right_type = type(right)
left_is_iterable = (
hasattr(left, "__iter__") and not isinstance(left, str)
)
right_is_iterable = (
hasattr(right, "__iter__") and not isinstance(right, str)
)
if left_is_iterable or right_is_iterable:
if left_is_iterable and right_is_iterable:
result = afn(left, right, c) if afn else [
vectorized(l, r, c) for l, r in zip(left, right)
]
else:
result = (
[vectorized(item, right, c) for item in left]
if left_is_iterable else
[vectorized(left, item, c) for item in right]
)
result_type = type(left if left_is_iterable else right)
try:
return result_type(result)
except:
return result_type(result, left if left_is_iterable else right)
if cast_string and left_type == str:
left = (float if "." in left else int)(left)
if cast_string and right_type == str:
right = (float if "." in right else int)(right)
return fn(left, right, c) | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def Incremented(item):
if isinstance(item, float) or isinstance(item, int):
return round(item + 1, 15)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
item = float(item) if "." in item else int(item)
return Incremented(item)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Incremented) | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def Doubled(item):
if isinstance(item, float) or isinstance(item, int):
return round(item * 2, 15)
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = str(item)
if isinstance(item, str):
item = float(item) if "." in item else int(item)
return Doubled(item)
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Doubled) | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def Lower(item):
if isinstance(item, int) or isinstance(item, float):
return str(item)
if isinstance(item, str):
return item.lower()
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = String(str(item).lower())
if isinstance(item, str):
return item.lower()
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return iter_apply(item, Lower) | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def Max(item):
if isinstance(item, int) or isinstance(item, float):
return ceil(item)
if isinstance(item, str):
return chr(max(map(ord, item)))
if isinstance(item, Expression):
item = item.run()
if isinstance(item, String):
item = String(str(item).lower())
if isinstance(item, str):
return item.lower()
if hasattr(item, "__iter__") and item:
if isinstance(item[0], Expression):
item = iter_apply(item, lambda o: o.run())
return max(item) | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def direction(dir):
if isinstance(dir, String):
dir = str(dir)
cls = type(dir)
if cls == Direction:
return dir
elif cls == int:
return [
Direction.right, Direction.up_right, Direction.up,
Direction.up_left, Direction.left, Direction.down_left,
Direction.down, Direction.down_right
][dir % 8]
elif cls == str:
cleaned = re.sub("[^a-z]", "", dir.lower()[:5])
lookup = {
"r": Direction.right,
"ri": Direction.right,
"rig": Direction.right,
"righ": Direction.right,
"right": Direction.right,
"ur": Direction.up_right,
"upr": Direction.up_right,
"upri": Direction.up_right,
"uprig": Direction.up_right,
"u": Direction.up,
"up": Direction.up,
"ul": Direction.up_left,
"upl": Direction.up_left,
"uple": Direction.up_left,
"uplef": Direction.up_left,
"l": Direction.left,
"le": Direction.left,
"lef": Direction.left,
"left": Direction.left,
"dl": Direction.down_left,
"downl": Direction.down_left,
"d": Direction.down,
"do": Direction.down,
"dow": Direction.down,
"down": Direction.down,
"dr": Direction.down_right,
"downr": Direction.down_right
}
if cleaned in lookup:
return lookup[cleaned]
elif any(c in dir for c in "0123456789"):
return [
Direction.right, Direction.up_right, Direction.up,
Direction.up_left, Direction.left, Direction.down_left,
Direction.down, Direction.down_right
][int(re.search("\d+", dir).group()) % 8]
else:
return 0 | somebody1234/Charcoal | [
189,
8,
189,
1,
1475454499
] |
def valid_module(m):
return m.endswith(".py") and not (m.startswith(u"_") or m in ["sem_module.py", "pipeline.py"]) | YoannDupont/SEM | [
24,
7,
24,
1,
1479070118
] |
def banter():
def username():
import os
return os.environ.get("USERNAME", os.environ.get("USER", os.path.split(os.path.expanduser(u"~"))[-1]))
import random
l = [
u"Do thou mockest me?",
u"Try again?",
u"I'm sorry {0}, I'm afraid I can't do that.".format(username()),
u'The greatest trick this module ever pulled what convincing the users it did not exist.',
u"It's just a typo."
]
random.shuffle(l)
return l[0] | YoannDupont/SEM | [
24,
7,
24,
1,
1479070118
] |
def font(self):
"""
Sets this legend group's title font. | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def font(self, val):
self["font"] = val | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def text(self):
"""
Sets the title of the legend group. | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def text(self, val):
self["text"] = val | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def _prop_descriptions(self):
return """\
font
Sets this legend group's title font.
text
Sets the title of the legend group.
""" | plotly/plotly.py | [
13052,
2308,
13052,
1319,
1385013188
] |
def __init__(
self,
code: str,
short_desc: str,
context: str,
*parameters: Iterable[str], | GreenSteam/pep257 | [
1019,
188,
1019,
111,
1328030303
] |
def set_context(self, definition: Definition, explanation: str) -> None:
"""Set the source code context for this error."""
self.definition = definition
self.explanation = explanation | GreenSteam/pep257 | [
1019,
188,
1019,
111,
1328030303
] |
def message(self) -> str:
"""Return the message to print to the user."""
ret = f'{self.code}: {self.short_desc}'
if self.context is not None:
specific_error_msg = self.context.format(*self.parameters)
ret += f' ({specific_error_msg})'
return ret | GreenSteam/pep257 | [
1019,
188,
1019,
111,
1328030303
] |
def lines(self) -> str:
"""Return the source code lines for this error."""
if self.definition is None:
return ''
source = ''
lines = self.definition.source.splitlines(keepends=True)
offset = self.definition.start # type: ignore
lines_stripped = list(
reversed(list(dropwhile(is_blank, reversed(lines))))
)
numbers_width = len(str(offset + len(lines_stripped)))
line_format = f'{{:{numbers_width}}}:{{}}'
for n, line in enumerate(lines_stripped):
if line:
line = ' ' + line
source += line_format.format(n + offset, line)
if n > 5:
source += ' ...\n'
break
return source | GreenSteam/pep257 | [
1019,
188,
1019,
111,
1328030303
] |
def __repr__(self) -> str:
return str(self) | GreenSteam/pep257 | [
1019,
188,
1019,
111,
1328030303
] |
def __init__(self, prefix: str, name: str) -> None:
"""Initialize the object.
`Prefix` should be the common prefix for errors in this group,
e.g., "D1".
`name` is the name of the group (its subject).
"""
self.prefix = prefix
self.name = name
self.errors = [] # type: List[ErrorParams] | GreenSteam/pep257 | [
1019,
188,
1019,
111,
1328030303
] |
def create_group(cls, prefix: str, name: str) -> ErrorGroup:
"""Create a new error group and return it."""
group = cls.ErrorGroup(prefix, name)
cls.groups.append(group)
return group | GreenSteam/pep257 | [
1019,
188,
1019,
111,
1328030303
] |
def get_error_codes(cls) -> Iterable[str]:
"""Yield all registered codes."""
for group in cls.groups:
for error in group.errors:
yield error.code | GreenSteam/pep257 | [
1019,
188,
1019,
111,
1328030303
] |
def to_rst(cls) -> str:
"""Output the registry as reStructuredText, for documentation."""
max_len = max(
len(error.short_desc)
for group in cls.groups
for error in group.errors
)
sep_line = '+' + 6 * '-' + '+' + '-' * (max_len + 2) + '+\n'
blank_line = '|' + (max_len + 9) * ' ' + '|\n'
table = ''
for group in cls.groups:
table += sep_line
table += blank_line
table += '|' + f'**{group.name}**'.center(max_len + 9) + '|\n'
table += blank_line
for error in group.errors:
table += sep_line
table += (
'|'
+ error.code.center(6)
+ '| '
+ error.short_desc.ljust(max_len + 1)
+ '|\n'
)
table += sep_line
return table | GreenSteam/pep257 | [
1019,
188,
1019,
111,
1328030303
] |
def __getattr__(self, item: str) -> Any:
return self[item] | GreenSteam/pep257 | [
1019,
188,
1019,
111,
1328030303
] |
def __init__(self, name, provider):
NamedEntity.__init__(self, name)
self.provider = provider | stlemme/python-dokuwiki-export | [
1,
1,
1,
1,
1378815092
] |
def _import(name):
"""Think call so we can mock it during testing"""
return __import__(name) | pytest-dev/pytest-qt | [
325,
67,
325,
29,
1361921626
] |
def __init__(self):
self._import_errors = {} | pytest-dev/pytest-qt | [
325,
67,
325,
29,
1361921626
] |
def _guess_qt_api(self): # pragma: no cover
def _can_import(name):
try:
_import(name)
return True
except ModuleNotFoundError as e:
self._import_errors[name] = str(e)
return False
# Note, not importing only the root namespace because when uninstalling from conda,
# the namespace can still be there.
if _can_import("PySide6.QtCore"):
return "pyside6"
elif _can_import("PySide2.QtCore"):
return "pyside2"
elif _can_import("PyQt6.QtCore"):
return "pyqt6"
elif _can_import("PyQt5.QtCore"):
return "pyqt5"
return None | pytest-dev/pytest-qt | [
325,
67,
325,
29,
1361921626
] |
def _import_module(module_name):
m = __import__(_root_module, globals(), locals(), [module_name], 0)
return getattr(m, module_name) | pytest-dev/pytest-qt | [
325,
67,
325,
29,
1361921626
] |
def _check_qt_api_version(self):
if not self.is_pyqt:
# We support all PySide versions
return
if self.QtCore.PYQT_VERSION == 0x060000: # 6.0.0
raise pytest.UsageError(
"PyQt 6.0 is not supported by pytest-qt, use 6.1+ instead."
)
elif self.QtCore.PYQT_VERSION < 0x050B00: # 5.11.0
raise pytest.UsageError(
"PyQt < 5.11 is not supported by pytest-qt, use 5.11+ instead."
) | pytest-dev/pytest-qt | [
325,
67,
325,
29,
1361921626
] |
def get_versions(self):
if self.pytest_qt_api == "pyside6":
import PySide6
version = PySide6.__version__
return VersionTuple(
"PySide6", version, self.QtCore.qVersion(), self.QtCore.__version__
)
elif self.pytest_qt_api == "pyside2":
import PySide2
version = PySide2.__version__
return VersionTuple(
"PySide2", version, self.QtCore.qVersion(), self.QtCore.__version__
)
elif self.pytest_qt_api == "pyqt6":
return VersionTuple(
"PyQt6",
self.QtCore.PYQT_VERSION_STR,
self.QtCore.qVersion(),
self.QtCore.QT_VERSION_STR,
)
elif self.pytest_qt_api == "pyqt5":
return VersionTuple(
"PyQt5",
self.QtCore.PYQT_VERSION_STR,
self.QtCore.qVersion(),
self.QtCore.QT_VERSION_STR,
)
assert False, f"Internal error, unknown pytest_qt_api: {self.pytest_qt_api}" | pytest-dev/pytest-qt | [
325,
67,
325,
29,
1361921626
] |
def __repr__(self):
return format_repr(self, 'user_id', 'category_id', 'score', is_ignored=False) | indico/indico | [
1446,
358,
1446,
649,
1311774990
] |
def returnReplyToQuerier( error ="" ):
"""
@summary : Prints an empty reply so that the receiving web page will
not modify it's display. | khosrow/metpx | [
1,
1,
1,
1,
1446661693
] |
def generateWebPage( images, lang ):
"""
@summary : Generates a web page that simply displays a
series of images one on top of the other. | khosrow/metpx | [
1,
1,
1,
1,
1446661693
] |
def getImagesLangFromForm():
"""
@summary : Parses form with whom this program was called. | khosrow/metpx | [
1,
1,
1,
1,
1446661693
] |
def main():
"""
@summary : Generate an html page displaying all the image received in parameter.
Replies to the querier after generating web page so that querier
is informed the page was generated.
"""
images, lang = getImagesLangFromForm()
#print images
generateWebPage( images, lang )
returnReplyToQuerier() | khosrow/metpx | [
1,
1,
1,
1,
1446661693
] |
def __init__(self,
cards,
atr=None,
type=types.TYPE_USIM,
mode=SIMTRACE_ONLINE):
self.loggingApdu = self.setupLogger()
if LOG_NONE_APDU_IN_FILE:
self.logging = self.loggingApdu
else:
self.logging = logging
self.atr = atr
self.simType = type
self.mode = mode
self.cardsDict = self.addControlCard(cards)
self.lastUpdate = 0
self.apduInjectedCard = None
self.apduInjectedData = None
self.interpreter = None
self.routerMode = ROUTER_MODE_DISABLED
if self.mode != SIMTRACE_OFFLINE:
self.dev = self.usb_find(0x03eb, 0x6119)
if self.dev is None:
self.logging.warning("Simtrace not connected!")
self.mode = SIMTRACE_OFFLINE
self.simCtrl = None
self.loop = None
self.shell = None
self.lock = threading.Lock()
self.rapduInject = None
self.inject = INJECT_READY | kamwar/simLAB | [
74,
31,
74,
4,
1461177205
] |
def usbCtrlOut(self, req, buf):
if self.mode == SIMTRACE_OFFLINE:
return []
return self.dev.ctrl_transfer(0x40,
bRequest=req, # R-APDU
data_or_wLength=buf,
timeout=500) | kamwar/simLAB | [
74,
31,
74,
4,
1461177205
] |
def receiveData(self, cmd):
if self.mode == SIMTRACE_OFFLINE:
return []
try:
return self.usbCtrlIn(cmd)
except:
time.sleep(0.2)
return self.usbCtrlIn(cmd) | kamwar/simLAB | [
74,
31,
74,
4,
1461177205
] |
def resetCards(self, soft=True):
if soft:
resetThread = ResetThread(self)
resetThread.setDaemon(True)
# Start handling C-APDUs.
resetThread.start()
else:
for cardDict in self.cardsDict:
cardDict[MAIN_INTERFACE].reset() | kamwar/simLAB | [
74,
31,
74,
4,
1461177205
] |
def sendResponseApdu(self, msg):
self.sendData(msg) | kamwar/simLAB | [
74,
31,
74,
4,
1461177205
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.