code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def lockout_response(request):
""" if we are locked out, here is the response """
if config.LOCKOUT_TEMPLATE:
context = {
'cooloff_time_seconds': config.COOLOFF_TIME,
'cooloff_time_minutes': config.COOLOFF_TIME / 60,
'failure_limit': config.FAILURE_LIMIT,
}
return render(request, config.LOCKOUT_TEMPLATE, context)
if config.LOCKOUT_URL:
return HttpResponseRedirect(config.LOCKOUT_URL)
if config.COOLOFF_TIME:
return HttpResponse("Account locked: too many login attempts. "
"Please try again later.")
else:
return HttpResponse("Account locked: too many login attempts. "
"Contact an admin to unlock your account.") | if we are locked out, here is the response | Below is the the instruction that describes the task:
### Input:
if we are locked out, here is the response
### Response:
def lockout_response(request):
""" if we are locked out, here is the response """
if config.LOCKOUT_TEMPLATE:
context = {
'cooloff_time_seconds': config.COOLOFF_TIME,
'cooloff_time_minutes': config.COOLOFF_TIME / 60,
'failure_limit': config.FAILURE_LIMIT,
}
return render(request, config.LOCKOUT_TEMPLATE, context)
if config.LOCKOUT_URL:
return HttpResponseRedirect(config.LOCKOUT_URL)
if config.COOLOFF_TIME:
return HttpResponse("Account locked: too many login attempts. "
"Please try again later.")
else:
return HttpResponse("Account locked: too many login attempts. "
"Contact an admin to unlock your account.") |
def pop(self, key, default=_NotGiven):
"""
If *key* is in the dictionary, remove it and return its value,
else return *default*. If *default* is not given and *key* is not in
the dictionary, a KeyError is raised.
"""
if key in self:
self._list_remove(key)
return self._pop(key)
else:
if default is _NotGiven:
raise KeyError(key)
else:
return default | If *key* is in the dictionary, remove it and return its value,
else return *default*. If *default* is not given and *key* is not in
the dictionary, a KeyError is raised. | Below is the the instruction that describes the task:
### Input:
If *key* is in the dictionary, remove it and return its value,
else return *default*. If *default* is not given and *key* is not in
the dictionary, a KeyError is raised.
### Response:
def pop(self, key, default=_NotGiven):
"""
If *key* is in the dictionary, remove it and return its value,
else return *default*. If *default* is not given and *key* is not in
the dictionary, a KeyError is raised.
"""
if key in self:
self._list_remove(key)
return self._pop(key)
else:
if default is _NotGiven:
raise KeyError(key)
else:
return default |
def get_timedelta(self, now=None):
"""
Returns number of seconds that passed since ``self.started``, as float.
None is returned if ``self.started`` was not set yet.
"""
def datetime_to_time(timestamp):
atime = time.mktime(timestamp.timetuple())
atime += timestamp.microsecond / 10.0**6
return atime
if self.started is not None:
now = now or datetime.datetime.now()
started_time = datetime_to_time(self.started)
now_time = datetime_to_time(now)
return now_time - started_time
return None | Returns number of seconds that passed since ``self.started``, as float.
None is returned if ``self.started`` was not set yet. | Below is the the instruction that describes the task:
### Input:
Returns number of seconds that passed since ``self.started``, as float.
None is returned if ``self.started`` was not set yet.
### Response:
def get_timedelta(self, now=None):
"""
Returns number of seconds that passed since ``self.started``, as float.
None is returned if ``self.started`` was not set yet.
"""
def datetime_to_time(timestamp):
atime = time.mktime(timestamp.timetuple())
atime += timestamp.microsecond / 10.0**6
return atime
if self.started is not None:
now = now or datetime.datetime.now()
started_time = datetime_to_time(self.started)
now_time = datetime_to_time(now)
return now_time - started_time
return None |
def relation_get(attribute=None, unit=None, rid=None):
"""Attempt to use leader-get if supported in the current version of Juju,
otherwise falls back on relation-get.
Note that we only attempt to use leader-get if the provided rid is a peer
relation id or no relation id is provided (in which case we assume we are
within the peer relation context).
"""
try:
if rid in relation_ids('cluster'):
return leader_get(attribute, rid)
else:
raise NotImplementedError
except NotImplementedError:
return _relation_get(attribute=attribute, rid=rid, unit=unit) | Attempt to use leader-get if supported in the current version of Juju,
otherwise falls back on relation-get.
Note that we only attempt to use leader-get if the provided rid is a peer
relation id or no relation id is provided (in which case we assume we are
within the peer relation context). | Below is the the instruction that describes the task:
### Input:
Attempt to use leader-get if supported in the current version of Juju,
otherwise falls back on relation-get.
Note that we only attempt to use leader-get if the provided rid is a peer
relation id or no relation id is provided (in which case we assume we are
within the peer relation context).
### Response:
def relation_get(attribute=None, unit=None, rid=None):
"""Attempt to use leader-get if supported in the current version of Juju,
otherwise falls back on relation-get.
Note that we only attempt to use leader-get if the provided rid is a peer
relation id or no relation id is provided (in which case we assume we are
within the peer relation context).
"""
try:
if rid in relation_ids('cluster'):
return leader_get(attribute, rid)
else:
raise NotImplementedError
except NotImplementedError:
return _relation_get(attribute=attribute, rid=rid, unit=unit) |
def process_tls(self, data, name):
"""
Remote TLS processing - one address:port per line
:param data:
:param name:
:return:
"""
ret = []
try:
lines = [x.strip() for x in data.split('\n')]
for idx, line in enumerate(lines):
if line == '':
continue
sub = self.process_host(line, name, idx)
if sub is not None:
ret.append(sub)
except Exception as e:
logger.error('Error in file processing %s : %s' % (name, e))
self.roca.trace_logger.log(e)
return ret | Remote TLS processing - one address:port per line
:param data:
:param name:
:return: | Below is the the instruction that describes the task:
### Input:
Remote TLS processing - one address:port per line
:param data:
:param name:
:return:
### Response:
def process_tls(self, data, name):
"""
Remote TLS processing - one address:port per line
:param data:
:param name:
:return:
"""
ret = []
try:
lines = [x.strip() for x in data.split('\n')]
for idx, line in enumerate(lines):
if line == '':
continue
sub = self.process_host(line, name, idx)
if sub is not None:
ret.append(sub)
except Exception as e:
logger.error('Error in file processing %s : %s' % (name, e))
self.roca.trace_logger.log(e)
return ret |
def deploy(stage, lambda_package, no_lambda, rebuild_deps, config_file):
"""Deploy the project to the development stage."""
config = _load_config(config_file)
if stage is None:
stage = config['devstage']
s3 = boto3.client('s3')
cfn = boto3.client('cloudformation')
region = _get_aws_region()
# obtain previous deployment if it exists
previous_deployment = None
try:
previous_deployment = cfn.describe_stacks(
StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
pass
# build lambda package if required
built_package = False
new_package = True
if lambda_package is None and not no_lambda:
print("Building lambda package...")
lambda_package = _build(config, rebuild_deps=rebuild_deps)
built_package = True
elif lambda_package is None:
# preserve package from previous deployment
new_package = False
lambda_package = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Key')
# create S3 bucket if it doesn't exist yet
bucket = config['aws']['s3_bucket']
_ensure_bucket_exists(s3, bucket, region)
# upload lambda package to S3
if new_package:
s3.upload_file(lambda_package, bucket, lambda_package)
if built_package:
# we created the package, so now that is on S3 we can delete it
os.remove(lambda_package)
# prepare cloudformation template
template_body = get_cfn_template(config)
parameters = [
{'ParameterKey': 'LambdaS3Bucket', 'ParameterValue': bucket},
{'ParameterKey': 'LambdaS3Key', 'ParameterValue': lambda_package},
]
stages = list(config['stage_environments'].keys())
stages.sort()
for s in stages:
param = s.title() + 'Version'
if s != stage:
v = _get_from_stack(previous_deployment, 'Parameter', param) \
if previous_deployment else '$LATEST'
v = v or '$LATEST'
else:
v = '$LATEST'
parameters.append({'ParameterKey': param, 'ParameterValue': v})
# run the cloudformation template
if previous_deployment is None:
print('Deploying {}:{}...'.format(config['name'], stage))
cfn.create_stack(StackName=config['name'], TemplateBody=template_body,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM'])
waiter = cfn.get_waiter('stack_create_complete')
else:
print('Updating {}:{}...'.format(config['name'], stage))
cfn.update_stack(StackName=config['name'], TemplateBody=template_body,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM'])
waiter = cfn.get_waiter('stack_update_complete')
# wait for cloudformation to do its thing
try:
waiter.wait(StackName=config['name'])
except botocore.exceptions.ClientError:
# the update failed, so we remove the lambda package from S3
if built_package:
s3.delete_object(Bucket=bucket, Key=lambda_package)
raise
else:
if previous_deployment and new_package:
# the update succeeded, so it is safe to delete the lambda package
# used by the previous deployment
old_pkg = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Key')
s3.delete_object(Bucket=bucket, Key=old_pkg)
# we are done, show status info and exit
_print_status(config) | Deploy the project to the development stage. | Below is the the instruction that describes the task:
### Input:
Deploy the project to the development stage.
### Response:
def deploy(stage, lambda_package, no_lambda, rebuild_deps, config_file):
"""Deploy the project to the development stage."""
config = _load_config(config_file)
if stage is None:
stage = config['devstage']
s3 = boto3.client('s3')
cfn = boto3.client('cloudformation')
region = _get_aws_region()
# obtain previous deployment if it exists
previous_deployment = None
try:
previous_deployment = cfn.describe_stacks(
StackName=config['name'])['Stacks'][0]
except botocore.exceptions.ClientError:
pass
# build lambda package if required
built_package = False
new_package = True
if lambda_package is None and not no_lambda:
print("Building lambda package...")
lambda_package = _build(config, rebuild_deps=rebuild_deps)
built_package = True
elif lambda_package is None:
# preserve package from previous deployment
new_package = False
lambda_package = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Key')
# create S3 bucket if it doesn't exist yet
bucket = config['aws']['s3_bucket']
_ensure_bucket_exists(s3, bucket, region)
# upload lambda package to S3
if new_package:
s3.upload_file(lambda_package, bucket, lambda_package)
if built_package:
# we created the package, so now that is on S3 we can delete it
os.remove(lambda_package)
# prepare cloudformation template
template_body = get_cfn_template(config)
parameters = [
{'ParameterKey': 'LambdaS3Bucket', 'ParameterValue': bucket},
{'ParameterKey': 'LambdaS3Key', 'ParameterValue': lambda_package},
]
stages = list(config['stage_environments'].keys())
stages.sort()
for s in stages:
param = s.title() + 'Version'
if s != stage:
v = _get_from_stack(previous_deployment, 'Parameter', param) \
if previous_deployment else '$LATEST'
v = v or '$LATEST'
else:
v = '$LATEST'
parameters.append({'ParameterKey': param, 'ParameterValue': v})
# run the cloudformation template
if previous_deployment is None:
print('Deploying {}:{}...'.format(config['name'], stage))
cfn.create_stack(StackName=config['name'], TemplateBody=template_body,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM'])
waiter = cfn.get_waiter('stack_create_complete')
else:
print('Updating {}:{}...'.format(config['name'], stage))
cfn.update_stack(StackName=config['name'], TemplateBody=template_body,
Parameters=parameters,
Capabilities=['CAPABILITY_IAM'])
waiter = cfn.get_waiter('stack_update_complete')
# wait for cloudformation to do its thing
try:
waiter.wait(StackName=config['name'])
except botocore.exceptions.ClientError:
# the update failed, so we remove the lambda package from S3
if built_package:
s3.delete_object(Bucket=bucket, Key=lambda_package)
raise
else:
if previous_deployment and new_package:
# the update succeeded, so it is safe to delete the lambda package
# used by the previous deployment
old_pkg = _get_from_stack(previous_deployment, 'Parameter',
'LambdaS3Key')
s3.delete_object(Bucket=bucket, Key=old_pkg)
# we are done, show status info and exit
_print_status(config) |
def drop_incomplete_days(dataframe, shift=0):
"""truncates a given dataframe to full days only
This funtion truncates a given pandas dataframe (time series) to full days
only, thus dropping leading and tailing hours of incomplete days. Please
note that this methodology only applies to hourly time series.
Args:
dataframe: A pandas dataframe object with index defined as datetime
shift (unsigned int, opt): First hour of daily recordings. For daily
recordings of precipitation gages, 8 would be the first hour of
the subsequent day of recordings since daily totals are
usually recorded at 7. Omit defining this parameter if you intend
to pertain recordings to 0-23h.
Returns:
A dataframe with full days only.
"""
dropped = 0
if shift > 23 or shift < 0:
print("Invalid shift parameter setting! Using defaults.")
shift = 0
first = shift
last = first - 1
if last < 0:
last += 24
try:
# todo: move this checks to a separate function
n = len(dataframe.index)
except:
print('Error: Invalid dataframe.')
return dataframe
delete = list()
# drop heading lines if required
for i in range(0, n):
if dataframe.index.hour[i] == first and dataframe.index.minute[i] == 0:
break
else:
delete.append(i)
dropped += 1
# drop tailing lines if required
for i in range(n-1, 0, -1):
if dataframe.index.hour[i] == last and dataframe.index.minute[i] == 0:
break
else:
delete.append(i)
dropped += 1
# print("The following rows have been dropped (%i in total):" % dropped)
# print(delete)
return dataframe.drop(dataframe.index[[delete]]) | truncates a given dataframe to full days only
This funtion truncates a given pandas dataframe (time series) to full days
only, thus dropping leading and tailing hours of incomplete days. Please
note that this methodology only applies to hourly time series.
Args:
dataframe: A pandas dataframe object with index defined as datetime
shift (unsigned int, opt): First hour of daily recordings. For daily
recordings of precipitation gages, 8 would be the first hour of
the subsequent day of recordings since daily totals are
usually recorded at 7. Omit defining this parameter if you intend
to pertain recordings to 0-23h.
Returns:
A dataframe with full days only. | Below is the the instruction that describes the task:
### Input:
truncates a given dataframe to full days only
This funtion truncates a given pandas dataframe (time series) to full days
only, thus dropping leading and tailing hours of incomplete days. Please
note that this methodology only applies to hourly time series.
Args:
dataframe: A pandas dataframe object with index defined as datetime
shift (unsigned int, opt): First hour of daily recordings. For daily
recordings of precipitation gages, 8 would be the first hour of
the subsequent day of recordings since daily totals are
usually recorded at 7. Omit defining this parameter if you intend
to pertain recordings to 0-23h.
Returns:
A dataframe with full days only.
### Response:
def drop_incomplete_days(dataframe, shift=0):
"""truncates a given dataframe to full days only
This funtion truncates a given pandas dataframe (time series) to full days
only, thus dropping leading and tailing hours of incomplete days. Please
note that this methodology only applies to hourly time series.
Args:
dataframe: A pandas dataframe object with index defined as datetime
shift (unsigned int, opt): First hour of daily recordings. For daily
recordings of precipitation gages, 8 would be the first hour of
the subsequent day of recordings since daily totals are
usually recorded at 7. Omit defining this parameter if you intend
to pertain recordings to 0-23h.
Returns:
A dataframe with full days only.
"""
dropped = 0
if shift > 23 or shift < 0:
print("Invalid shift parameter setting! Using defaults.")
shift = 0
first = shift
last = first - 1
if last < 0:
last += 24
try:
# todo: move this checks to a separate function
n = len(dataframe.index)
except:
print('Error: Invalid dataframe.')
return dataframe
delete = list()
# drop heading lines if required
for i in range(0, n):
if dataframe.index.hour[i] == first and dataframe.index.minute[i] == 0:
break
else:
delete.append(i)
dropped += 1
# drop tailing lines if required
for i in range(n-1, 0, -1):
if dataframe.index.hour[i] == last and dataframe.index.minute[i] == 0:
break
else:
delete.append(i)
dropped += 1
# print("The following rows have been dropped (%i in total):" % dropped)
# print(delete)
return dataframe.drop(dataframe.index[[delete]]) |
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result | Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1 | Below is the the instruction that describes the task:
### Input:
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
### Response:
def show_image(kwargs, call=None):
'''
Show the details from QingCloud concerning an image.
CLI Examples:
.. code-block:: bash
salt-cloud -f show_image my-qingcloud image=trustysrvx64c
salt-cloud -f show_image my-qingcloud image=trustysrvx64c,coreos4
salt-cloud -f show_image my-qingcloud image=trustysrvx64c zone=ap1
'''
if call != 'function':
raise SaltCloudSystemExit(
'The show_images function must be called with '
'-f or --function'
)
if not isinstance(kwargs, dict):
kwargs = {}
images = kwargs['image']
images = images.split(',')
params = {
'action': 'DescribeImages',
'images': images,
'zone': _get_specified_zone(kwargs, get_configured_provider()),
}
items = query(params=params)
if not items['image_set']:
raise SaltCloudNotFound('The specified image could not be found.')
result = {}
for image in items['image_set']:
result[image['image_id']] = {}
for key in image:
result[image['image_id']][key] = image[key]
return result |
def dec(data, **kwargs):
'''
Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
box_type = _get_config(**kwargs)['box_type']
if box_type == 'secretbox':
return secretbox_decrypt(data, **kwargs)
return sealedbox_decrypt(data, **kwargs) | Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default) | Below is the the instruction that describes the task:
### Input:
Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default)
### Response:
def dec(data, **kwargs):
'''
Alias to `{box_type}_decrypt`
box_type: secretbox, sealedbox(default)
'''
if 'keyfile' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'keyfile\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk_file\' argument instead.'
)
kwargs['sk_file'] = kwargs['keyfile']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
if 'key' in kwargs:
salt.utils.versions.warn_until(
'Neon',
'The \'key\' argument has been deprecated and will be removed in Salt '
'{version}. Please use \'sk\' argument instead.'
)
kwargs['sk'] = kwargs['key']
# set boxtype to `secretbox` to maintain backward compatibility
kwargs['box_type'] = 'secretbox'
box_type = _get_config(**kwargs)['box_type']
if box_type == 'secretbox':
return secretbox_decrypt(data, **kwargs)
return sealedbox_decrypt(data, **kwargs) |
def build_groups(self):
"""
Generates the sql for the GROUP BY portion of the query
:return: the GROUP BY portion of the query
:rtype: str
"""
# check if there are any groupings
if len(self.groups):
groups = []
# get the group sql for each grouping
for group in self.groups:
groups.append(group.get_name())
return 'GROUP BY {0} '.format(', '.join(groups))
return '' | Generates the sql for the GROUP BY portion of the query
:return: the GROUP BY portion of the query
:rtype: str | Below is the the instruction that describes the task:
### Input:
Generates the sql for the GROUP BY portion of the query
:return: the GROUP BY portion of the query
:rtype: str
### Response:
def build_groups(self):
"""
Generates the sql for the GROUP BY portion of the query
:return: the GROUP BY portion of the query
:rtype: str
"""
# check if there are any groupings
if len(self.groups):
groups = []
# get the group sql for each grouping
for group in self.groups:
groups.append(group.get_name())
return 'GROUP BY {0} '.format(', '.join(groups))
return '' |
def propose_unif(self):
"""Propose a new live point by sampling *uniformly*
within the unit cube."""
u = self.unitcube.sample(rstate=self.rstate)
ax = np.identity(self.npdim)
return u, ax | Propose a new live point by sampling *uniformly*
within the unit cube. | Below is the the instruction that describes the task:
### Input:
Propose a new live point by sampling *uniformly*
within the unit cube.
### Response:
def propose_unif(self):
"""Propose a new live point by sampling *uniformly*
within the unit cube."""
u = self.unitcube.sample(rstate=self.rstate)
ax = np.identity(self.npdim)
return u, ax |
def send_keyboard_input(text=None, key_list=None):
"""
Args:
text (None):
key_list (list):
References:
http://stackoverflow.com/questions/14788036/python-win32api-sendmesage
http://www.pinvoke.net/default.aspx/user32.sendinput
CommandLine:
python -m utool.util_cplat --test-send_keyboard_input
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> text = '%paste'
>>> result = send_keyboard_input('%paste')
>>> print(result)
"""
#key_mapping = {
# 'enter':
#}
if WIN32:
#raise NotImplementedError()
#import win32api
#import win32gui
#import win32con
#hwnd = win32gui.GetForegroundWindow()
#print('entering text into %r' % (win32gui.GetWindowText(hwnd ),))
#win32con.VK_RETURN
#def callback(hwnd, hwnds):
#if win32gui.IsWindowVisible(hwnd) and win32gui.IsWindowEnabled(hwnd):
#hwnds[win32gui.GetClassName(hwnd)] = hwnd
#return True
#hwnds = {}
#win32gui.EnumChildWindows(hwnd, callback, hwnds)
#for ord_char in map(ord, text):
#win32api.SendMessage(hwnd, win32con.WM_CHAR, ord_char, 0)
from utool._internal import win32_send_keys
pause = float(.05)
text = 'paste'
keys = text
kw = dict(with_spaces=False, with_tabs=True, with_newlines=False)
win32_send_keys.SendKeys(keys, pause=pause, turn_off_numlock=True, **kw)
#win32_send_keys
#import time
#keys_ = win32_send_keys.parse_keys(keys, **kw)
#for k in keys_:
# k.Run()
# time.sleep(pause)
else:
if key_list is None:
char_map = {
'%': 'shift+5'
}
key_list = [char_map.get(char, char) for char in text]
xdotool_args = ['xdotool', 'key'] + key_list
#, 'shift+5', 'p', 'a', 's', 't', 'e', 'enter']
cmd = ' '.join(xdotool_args)
print('Running: cmd=%r' % (cmd,))
print('+---')
print(cmd)
print('L___')
os.system(cmd) | Args:
text (None):
key_list (list):
References:
http://stackoverflow.com/questions/14788036/python-win32api-sendmesage
http://www.pinvoke.net/default.aspx/user32.sendinput
CommandLine:
python -m utool.util_cplat --test-send_keyboard_input
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> text = '%paste'
>>> result = send_keyboard_input('%paste')
>>> print(result) | Below is the the instruction that describes the task:
### Input:
Args:
text (None):
key_list (list):
References:
http://stackoverflow.com/questions/14788036/python-win32api-sendmesage
http://www.pinvoke.net/default.aspx/user32.sendinput
CommandLine:
python -m utool.util_cplat --test-send_keyboard_input
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> text = '%paste'
>>> result = send_keyboard_input('%paste')
>>> print(result)
### Response:
def send_keyboard_input(text=None, key_list=None):
"""
Args:
text (None):
key_list (list):
References:
http://stackoverflow.com/questions/14788036/python-win32api-sendmesage
http://www.pinvoke.net/default.aspx/user32.sendinput
CommandLine:
python -m utool.util_cplat --test-send_keyboard_input
Example:
>>> # DISABLE_DOCTEST
>>> from utool.util_cplat import * # NOQA
>>> text = '%paste'
>>> result = send_keyboard_input('%paste')
>>> print(result)
"""
#key_mapping = {
# 'enter':
#}
if WIN32:
#raise NotImplementedError()
#import win32api
#import win32gui
#import win32con
#hwnd = win32gui.GetForegroundWindow()
#print('entering text into %r' % (win32gui.GetWindowText(hwnd ),))
#win32con.VK_RETURN
#def callback(hwnd, hwnds):
#if win32gui.IsWindowVisible(hwnd) and win32gui.IsWindowEnabled(hwnd):
#hwnds[win32gui.GetClassName(hwnd)] = hwnd
#return True
#hwnds = {}
#win32gui.EnumChildWindows(hwnd, callback, hwnds)
#for ord_char in map(ord, text):
#win32api.SendMessage(hwnd, win32con.WM_CHAR, ord_char, 0)
from utool._internal import win32_send_keys
pause = float(.05)
text = 'paste'
keys = text
kw = dict(with_spaces=False, with_tabs=True, with_newlines=False)
win32_send_keys.SendKeys(keys, pause=pause, turn_off_numlock=True, **kw)
#win32_send_keys
#import time
#keys_ = win32_send_keys.parse_keys(keys, **kw)
#for k in keys_:
# k.Run()
# time.sleep(pause)
else:
if key_list is None:
char_map = {
'%': 'shift+5'
}
key_list = [char_map.get(char, char) for char in text]
xdotool_args = ['xdotool', 'key'] + key_list
#, 'shift+5', 'p', 'a', 's', 't', 'e', 'enter']
cmd = ' '.join(xdotool_args)
print('Running: cmd=%r' % (cmd,))
print('+---')
print(cmd)
print('L___')
os.system(cmd) |
def _candidate_filenames():
"""Generates filenames of the form 'specktre_123AB.png'.
The random noise is five characters long, which allows for
62^5 = 916 million possible filenames.
"""
while True:
random_stub = ''.join([
random.choice(string.ascii_letters + string.digits)
for _ in range(5)
])
yield 'specktre_%s.png' % random_stub | Generates filenames of the form 'specktre_123AB.png'.
The random noise is five characters long, which allows for
62^5 = 916 million possible filenames. | Below is the the instruction that describes the task:
### Input:
Generates filenames of the form 'specktre_123AB.png'.
The random noise is five characters long, which allows for
62^5 = 916 million possible filenames.
### Response:
def _candidate_filenames():
"""Generates filenames of the form 'specktre_123AB.png'.
The random noise is five characters long, which allows for
62^5 = 916 million possible filenames.
"""
while True:
random_stub = ''.join([
random.choice(string.ascii_letters + string.digits)
for _ in range(5)
])
yield 'specktre_%s.png' % random_stub |
def api_token_required(f, *args, **kwargs):
"""
Decorator helper function to ensure some methods aren't needlessly called
without an api_token configured.
"""
try:
if args[0].api_token is None:
raise AttributeError('Parameter api_token is required.')
except AttributeError:
raise AttributeError('Parameter api_token is required.')
return f(*args, **kwargs) | Decorator helper function to ensure some methods aren't needlessly called
without an api_token configured. | Below is the the instruction that describes the task:
### Input:
Decorator helper function to ensure some methods aren't needlessly called
without an api_token configured.
### Response:
def api_token_required(f, *args, **kwargs):
"""
Decorator helper function to ensure some methods aren't needlessly called
without an api_token configured.
"""
try:
if args[0].api_token is None:
raise AttributeError('Parameter api_token is required.')
except AttributeError:
raise AttributeError('Parameter api_token is required.')
return f(*args, **kwargs) |
def upload(self, filename, configuration=None, metadata=None, transcript=None):
"""
Upload new new media to the service as an attachment or from a url.
HTTP POST on /media
:param filename: Media file attached to the request.
:param configuration: VoicebaseMediaConfiguration
:param metadata: VoicebaseMediaMeta
:param transcript: attached transcript
:return: VoicebaseMedia
"""
data = {}
if metadata:
data['metadata'] = str(metadata)
if configuration:
data['configuration'] = str(configuration)
# Determine mime type
m = magic.Magic(mime=True)
mime_type = m.from_file(filename)
# Open file and pipe to request
with open(filename) as handle:
file_info = [('media', (filename, handle, mime_type))]
rq = requests.Request(b'POST', self.full_url('base'), data=data, headers=self.session.headers,
files=file_info)
prepared_rq = rq.prepare()
response = self.session.send(prepared_rq)
response.raise_for_status()
jsn = response.json()
log.debug('Upload response: {}'.format(jsn))
return VoicebaseMedia(jsn, api=self.api) | Upload new new media to the service as an attachment or from a url.
HTTP POST on /media
:param filename: Media file attached to the request.
:param configuration: VoicebaseMediaConfiguration
:param metadata: VoicebaseMediaMeta
:param transcript: attached transcript
:return: VoicebaseMedia | Below is the the instruction that describes the task:
### Input:
Upload new new media to the service as an attachment or from a url.
HTTP POST on /media
:param filename: Media file attached to the request.
:param configuration: VoicebaseMediaConfiguration
:param metadata: VoicebaseMediaMeta
:param transcript: attached transcript
:return: VoicebaseMedia
### Response:
def upload(self, filename, configuration=None, metadata=None, transcript=None):
"""
Upload new new media to the service as an attachment or from a url.
HTTP POST on /media
:param filename: Media file attached to the request.
:param configuration: VoicebaseMediaConfiguration
:param metadata: VoicebaseMediaMeta
:param transcript: attached transcript
:return: VoicebaseMedia
"""
data = {}
if metadata:
data['metadata'] = str(metadata)
if configuration:
data['configuration'] = str(configuration)
# Determine mime type
m = magic.Magic(mime=True)
mime_type = m.from_file(filename)
# Open file and pipe to request
with open(filename) as handle:
file_info = [('media', (filename, handle, mime_type))]
rq = requests.Request(b'POST', self.full_url('base'), data=data, headers=self.session.headers,
files=file_info)
prepared_rq = rq.prepare()
response = self.session.send(prepared_rq)
response.raise_for_status()
jsn = response.json()
log.debug('Upload response: {}'.format(jsn))
return VoicebaseMedia(jsn, api=self.api) |
def _read_utf(cls, data, pos, kind=None):
"""
:param kind: Optional; a human-friendly identifier for the kind of UTF-8 data we're loading (e.g. is it a keystore alias? an algorithm identifier? something else?).
Used to construct more informative exception messages when a decoding error occurs.
"""
size = b2.unpack_from(data, pos)[0]
pos += 2
try:
return data[pos:pos+size].decode('utf-8'), pos+size
except (UnicodeEncodeError, UnicodeDecodeError) as e:
raise BadKeystoreFormatException(("Failed to read %s, contains bad UTF-8 data: %s" % (kind, str(e))) if kind else \
("Encountered bad UTF-8 data: %s" % str(e))) | :param kind: Optional; a human-friendly identifier for the kind of UTF-8 data we're loading (e.g. is it a keystore alias? an algorithm identifier? something else?).
Used to construct more informative exception messages when a decoding error occurs. | Below is the the instruction that describes the task:
### Input:
:param kind: Optional; a human-friendly identifier for the kind of UTF-8 data we're loading (e.g. is it a keystore alias? an algorithm identifier? something else?).
Used to construct more informative exception messages when a decoding error occurs.
### Response:
def _read_utf(cls, data, pos, kind=None):
"""
:param kind: Optional; a human-friendly identifier for the kind of UTF-8 data we're loading (e.g. is it a keystore alias? an algorithm identifier? something else?).
Used to construct more informative exception messages when a decoding error occurs.
"""
size = b2.unpack_from(data, pos)[0]
pos += 2
try:
return data[pos:pos+size].decode('utf-8'), pos+size
except (UnicodeEncodeError, UnicodeDecodeError) as e:
raise BadKeystoreFormatException(("Failed to read %s, contains bad UTF-8 data: %s" % (kind, str(e))) if kind else \
("Encountered bad UTF-8 data: %s" % str(e))) |
def p_define(p):
""" define : DEFINE ID params defs
"""
if ENABLED:
if p[4]:
if SPACES.match(p[4][0]):
p[4][0] = p[4][0][1:]
else:
warning(p.lineno(1), "missing whitespace after the macro name")
ID_TABLE.define(p[2], args=p[3], value=p[4], lineno=p.lineno(2),
fname=CURRENT_FILE[-1])
p[0] = [] | define : DEFINE ID params defs | Below is the the instruction that describes the task:
### Input:
define : DEFINE ID params defs
### Response:
def p_define(p):
""" define : DEFINE ID params defs
"""
if ENABLED:
if p[4]:
if SPACES.match(p[4][0]):
p[4][0] = p[4][0][1:]
else:
warning(p.lineno(1), "missing whitespace after the macro name")
ID_TABLE.define(p[2], args=p[3], value=p[4], lineno=p.lineno(2),
fname=CURRENT_FILE[-1])
p[0] = [] |
def exclude_package(self, package):
"""Remove packages, modules, and extensions in named package"""
pfx = package + '.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
] | Remove packages, modules, and extensions in named package | Below is the the instruction that describes the task:
### Input:
Remove packages, modules, and extensions in named package
### Response:
def exclude_package(self, package):
"""Remove packages, modules, and extensions in named package"""
pfx = package + '.'
if self.packages:
self.packages = [
p for p in self.packages
if p != package and not p.startswith(pfx)
]
if self.py_modules:
self.py_modules = [
p for p in self.py_modules
if p != package and not p.startswith(pfx)
]
if self.ext_modules:
self.ext_modules = [
p for p in self.ext_modules
if p.name != package and not p.name.startswith(pfx)
] |
def draw_separators(self):
"""Draw the lines separating the categories on the Canvas"""
total = 1
self._timeline.create_line((0, 1, self.pixel_width, 1))
for index, (category, label) in enumerate(self._category_labels.items()):
height = label.winfo_reqheight()
self._rows[category] = (total, total + height)
total += height
self._timeline.create_line((0, total, self.pixel_width, total))
pixel_height = total
self._timeline.config(height=pixel_height) | Draw the lines separating the categories on the Canvas | Below is the the instruction that describes the task:
### Input:
Draw the lines separating the categories on the Canvas
### Response:
def draw_separators(self):
"""Draw the lines separating the categories on the Canvas"""
total = 1
self._timeline.create_line((0, 1, self.pixel_width, 1))
for index, (category, label) in enumerate(self._category_labels.items()):
height = label.winfo_reqheight()
self._rows[category] = (total, total + height)
total += height
self._timeline.create_line((0, total, self.pixel_width, total))
pixel_height = total
self._timeline.config(height=pixel_height) |
def _repr_html_(self, indices=None, iops=None, lx=None, li=None, lls=None):
"""Representation of the parameter in html for notebook display."""
filter_ = self._current_slice_
vals = self.flat
if indices is None: indices = self._indices(filter_)
if iops is None:
ravi = self._raveled_index(filter_)
iops = OrderedDict([name, iop.properties_for(ravi)] for name, iop in self._index_operations.items())
if lls is None: lls = [self._max_len_names(iop, name) for name, iop in iops.items()]
header_format = """
<tr>
<th><b>{i}</b></th>
<th><b>{x}</b></th>
<th><b>{iops}</b></th>
</tr>"""
header = header_format.format(x=self.hierarchy_name(), i=__index_name__, iops="</b></th><th><b>".join(list(iops.keys()))) # nice header for printing
to_print = ["""<style type="text/css">
.tg {padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;}
.tg td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;}
.tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;}
</style>"""]
to_print.append('<table class="tg">')
to_print.append(header)
format_spec = self._format_spec(indices, iops, lx, li, lls, False)
format_spec[:2] = ["<tr><td class=tg-left>{i}</td>".format(i=format_spec[0]), "<td class=tg-right>{i}</td>".format(i=format_spec[1])]
for i in range(2, len(format_spec)):
format_spec[i] = '<td class=tg-left>{c}</td>'.format(c=format_spec[i])
format_spec = "".join(format_spec) + '</tr>'
for i in range(self.size):
to_print.append(format_spec.format(index=indices[i], value="{1:.{0}f}".format(__precision__, vals[i]), **dict((name, ' '.join(map(str, iops[name][i]))) for name in iops)))
return '\n'.join(to_print) | Representation of the parameter in html for notebook display. | Below is the the instruction that describes the task:
### Input:
Representation of the parameter in html for notebook display.
### Response:
def _repr_html_(self, indices=None, iops=None, lx=None, li=None, lls=None):
"""Representation of the parameter in html for notebook display."""
filter_ = self._current_slice_
vals = self.flat
if indices is None: indices = self._indices(filter_)
if iops is None:
ravi = self._raveled_index(filter_)
iops = OrderedDict([name, iop.properties_for(ravi)] for name, iop in self._index_operations.items())
if lls is None: lls = [self._max_len_names(iop, name) for name, iop in iops.items()]
header_format = """
<tr>
<th><b>{i}</b></th>
<th><b>{x}</b></th>
<th><b>{iops}</b></th>
</tr>"""
header = header_format.format(x=self.hierarchy_name(), i=__index_name__, iops="</b></th><th><b>".join(list(iops.keys()))) # nice header for printing
to_print = ["""<style type="text/css">
.tg {padding:2px 3px;word-break:normal;border-collapse:collapse;border-spacing:0;border-color:#DCDCDC;margin:0px auto;width:100%;}
.tg td{font-family:"Courier New", Courier, monospace !important;font-weight:bold;color:#444;background-color:#F7FDFA;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg th{font-family:"Courier New", Courier, monospace !important;font-weight:normal;color:#fff;background-color:#26ADE4;border-style:solid;border-width:1px;overflow:hidden;word-break:normal;border-color:#DCDCDC;}
.tg .tg-left{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:left;}
.tg .tg-right{font-family:"Courier New", Courier, monospace !important;font-weight:normal;text-align:right;}
</style>"""]
to_print.append('<table class="tg">')
to_print.append(header)
format_spec = self._format_spec(indices, iops, lx, li, lls, False)
format_spec[:2] = ["<tr><td class=tg-left>{i}</td>".format(i=format_spec[0]), "<td class=tg-right>{i}</td>".format(i=format_spec[1])]
for i in range(2, len(format_spec)):
format_spec[i] = '<td class=tg-left>{c}</td>'.format(c=format_spec[i])
format_spec = "".join(format_spec) + '</tr>'
for i in range(self.size):
to_print.append(format_spec.format(index=indices[i], value="{1:.{0}f}".format(__precision__, vals[i]), **dict((name, ' '.join(map(str, iops[name][i]))) for name in iops)))
return '\n'.join(to_print) |
def get_bool_value(self, section, option, default=True):
"""Get the bool value of an option, if it exists."""
try:
return self.parser.getboolean(section, option)
except NoOptionError:
return bool(default) | Get the bool value of an option, if it exists. | Below is the the instruction that describes the task:
### Input:
Get the bool value of an option, if it exists.
### Response:
def get_bool_value(self, section, option, default=True):
"""Get the bool value of an option, if it exists."""
try:
return self.parser.getboolean(section, option)
except NoOptionError:
return bool(default) |
def Failed(self):
"""Indicates that a request has failed.
Returns:
Time interval to wait before retrying (in seconds).
"""
interval = self._current_interval_sec
self._current_interval_sec = min(
self.max_interval_sec, self._current_interval_sec * self.multiplier)
return interval | Indicates that a request has failed.
Returns:
Time interval to wait before retrying (in seconds). | Below is the the instruction that describes the task:
### Input:
Indicates that a request has failed.
Returns:
Time interval to wait before retrying (in seconds).
### Response:
def Failed(self):
"""Indicates that a request has failed.
Returns:
Time interval to wait before retrying (in seconds).
"""
interval = self._current_interval_sec
self._current_interval_sec = min(
self.max_interval_sec, self._current_interval_sec * self.multiplier)
return interval |
def sgd(fun, x0, data, args=(), bounds=None, batch_size=10, maxiter=5000,
updater=None, eval_obj=False, random_state=None):
"""
Stochastic Gradient Descent.
Parameters
----------
fun : callable
the function to *minimize*, this must have the signature ``[obj,]``
grad = fun(x, data, ...)`, where the ``eval_obj`` argument tells
``sgd`` if an objective function value is going to be returned by
``fun``.
x0 : ndarray
a sequence/1D array of initial values for the parameters to learn.
data : ndarray
a numpy array or sequence of data to input into ``fun``. This will
be split along the first axis (axis=0), and then input into
``fun``.
args : sequence, optional
an optional sequence of arguments to give to fun.
bounds : sequence, optional
Bounds for variables, (min, max) pairs for each element in x, defining
the bounds on that parameter. Use None for one of min or max when
there is no bound in that direction.
batch_size : int, optional
The number of observations in an SGD batch.
maxiter : int, optional
Number of mini-batch iterations before optimization terminates.
updater : SGDUpdater, optional
The type of gradient update to use, by default this is Adam.
eval_obj : bool, optional
This indicates whether or not ``fun`` also evaluates and returns
the objective function value. If this is true, ``fun`` must return
``(obj, grad)`` and then a list of objective function values is
also returned.
random_state : int or RandomState, optional
random seed
Returns
-------
res : OptimizeResult
x : narray
the final result
norms : list
the list of gradient norms
message : str
the convergence condition ('maxiter reached' or error)
objs : list
the list of objective function evaluations if ``eval_obj``
is True.
fun : float
the final objective function evaluation if ``eval_obj`` is
True.
"""
if updater is None:
updater = Adam()
# Make sure we aren't using a recycled updater
updater.reset()
N = _len_data(data)
x = np.array(x0, copy=True, dtype=float)
D = x.shape[0]
# Make sure we have a valid batch size
batch_size = min(batch_size, N)
# Process bounds
if bounds is not None:
if len(bounds) != D:
raise ValueError("The dimension of the bounds does not match x0!")
lower, upper = zip(*map(normalize_bound, bounds))
lower = np.array(lower)
upper = np.array(upper)
# Learning Records
obj = None
objs = []
norms = []
for batch in gen_batch(data, batch_size, maxiter, random_state):
if not eval_obj:
grad = fun(x, *chain(batch, args))
else:
obj, grad = fun(x, *chain(batch, args))
objs.append(obj)
norms.append(np.linalg.norm(grad))
# Truncate gradients if bounded
if bounds is not None:
xlower = x <= lower
grad[xlower] = np.minimum(grad[xlower], 0)
xupper = x >= upper
grad[xupper] = np.maximum(grad[xupper], 0)
# perform update
x = updater(x, grad)
# Trucate steps if bounded
if bounds is not None:
x = np.clip(x, lower, upper)
# Format results
res = OptimizeResult(
x=x,
norms=norms,
message='maxiter reached',
fun=obj,
objs=objs
)
return res | Stochastic Gradient Descent.
Parameters
----------
fun : callable
the function to *minimize*, this must have the signature ``[obj,]``
grad = fun(x, data, ...)`, where the ``eval_obj`` argument tells
``sgd`` if an objective function value is going to be returned by
``fun``.
x0 : ndarray
a sequence/1D array of initial values for the parameters to learn.
data : ndarray
a numpy array or sequence of data to input into ``fun``. This will
be split along the first axis (axis=0), and then input into
``fun``.
args : sequence, optional
an optional sequence of arguments to give to fun.
bounds : sequence, optional
Bounds for variables, (min, max) pairs for each element in x, defining
the bounds on that parameter. Use None for one of min or max when
there is no bound in that direction.
batch_size : int, optional
The number of observations in an SGD batch.
maxiter : int, optional
Number of mini-batch iterations before optimization terminates.
updater : SGDUpdater, optional
The type of gradient update to use, by default this is Adam.
eval_obj : bool, optional
This indicates whether or not ``fun`` also evaluates and returns
the objective function value. If this is true, ``fun`` must return
``(obj, grad)`` and then a list of objective function values is
also returned.
random_state : int or RandomState, optional
random seed
Returns
-------
res : OptimizeResult
x : narray
the final result
norms : list
the list of gradient norms
message : str
the convergence condition ('maxiter reached' or error)
objs : list
the list of objective function evaluations if ``eval_obj``
is True.
fun : float
the final objective function evaluation if ``eval_obj`` is
True. | Below is the the instruction that describes the task:
### Input:
Stochastic Gradient Descent.
Parameters
----------
fun : callable
the function to *minimize*, this must have the signature ``[obj,]``
grad = fun(x, data, ...)`, where the ``eval_obj`` argument tells
``sgd`` if an objective function value is going to be returned by
``fun``.
x0 : ndarray
a sequence/1D array of initial values for the parameters to learn.
data : ndarray
a numpy array or sequence of data to input into ``fun``. This will
be split along the first axis (axis=0), and then input into
``fun``.
args : sequence, optional
an optional sequence of arguments to give to fun.
bounds : sequence, optional
Bounds for variables, (min, max) pairs for each element in x, defining
the bounds on that parameter. Use None for one of min or max when
there is no bound in that direction.
batch_size : int, optional
The number of observations in an SGD batch.
maxiter : int, optional
Number of mini-batch iterations before optimization terminates.
updater : SGDUpdater, optional
The type of gradient update to use, by default this is Adam.
eval_obj : bool, optional
This indicates whether or not ``fun`` also evaluates and returns
the objective function value. If this is true, ``fun`` must return
``(obj, grad)`` and then a list of objective function values is
also returned.
random_state : int or RandomState, optional
random seed
Returns
-------
res : OptimizeResult
x : narray
the final result
norms : list
the list of gradient norms
message : str
the convergence condition ('maxiter reached' or error)
objs : list
the list of objective function evaluations if ``eval_obj``
is True.
fun : float
the final objective function evaluation if ``eval_obj`` is
True.
### Response:
def sgd(fun, x0, data, args=(), bounds=None, batch_size=10, maxiter=5000,
updater=None, eval_obj=False, random_state=None):
"""
Stochastic Gradient Descent.
Parameters
----------
fun : callable
the function to *minimize*, this must have the signature ``[obj,]``
grad = fun(x, data, ...)`, where the ``eval_obj`` argument tells
``sgd`` if an objective function value is going to be returned by
``fun``.
x0 : ndarray
a sequence/1D array of initial values for the parameters to learn.
data : ndarray
a numpy array or sequence of data to input into ``fun``. This will
be split along the first axis (axis=0), and then input into
``fun``.
args : sequence, optional
an optional sequence of arguments to give to fun.
bounds : sequence, optional
Bounds for variables, (min, max) pairs for each element in x, defining
the bounds on that parameter. Use None for one of min or max when
there is no bound in that direction.
batch_size : int, optional
The number of observations in an SGD batch.
maxiter : int, optional
Number of mini-batch iterations before optimization terminates.
updater : SGDUpdater, optional
The type of gradient update to use, by default this is Adam.
eval_obj : bool, optional
This indicates whether or not ``fun`` also evaluates and returns
the objective function value. If this is true, ``fun`` must return
``(obj, grad)`` and then a list of objective function values is
also returned.
random_state : int or RandomState, optional
random seed
Returns
-------
res : OptimizeResult
x : narray
the final result
norms : list
the list of gradient norms
message : str
the convergence condition ('maxiter reached' or error)
objs : list
the list of objective function evaluations if ``eval_obj``
is True.
fun : float
the final objective function evaluation if ``eval_obj`` is
True.
"""
if updater is None:
updater = Adam()
# Make sure we aren't using a recycled updater
updater.reset()
N = _len_data(data)
x = np.array(x0, copy=True, dtype=float)
D = x.shape[0]
# Make sure we have a valid batch size
batch_size = min(batch_size, N)
# Process bounds
if bounds is not None:
if len(bounds) != D:
raise ValueError("The dimension of the bounds does not match x0!")
lower, upper = zip(*map(normalize_bound, bounds))
lower = np.array(lower)
upper = np.array(upper)
# Learning Records
obj = None
objs = []
norms = []
for batch in gen_batch(data, batch_size, maxiter, random_state):
if not eval_obj:
grad = fun(x, *chain(batch, args))
else:
obj, grad = fun(x, *chain(batch, args))
objs.append(obj)
norms.append(np.linalg.norm(grad))
# Truncate gradients if bounded
if bounds is not None:
xlower = x <= lower
grad[xlower] = np.minimum(grad[xlower], 0)
xupper = x >= upper
grad[xupper] = np.maximum(grad[xupper], 0)
# perform update
x = updater(x, grad)
# Trucate steps if bounded
if bounds is not None:
x = np.clip(x, lower, upper)
# Format results
res = OptimizeResult(
x=x,
norms=norms,
message='maxiter reached',
fun=obj,
objs=objs
)
return res |
async def search_raw(self, term: str, limit: int = 3) -> List[dict]:
"""Performs a search for a term and returns the raw response.
Args:
term: The term to be defined.
limit: The maximum amount of results you'd like.
Defaults to 3.
Returns:
A list of :class:`dict`\s which contain word information.
"""
return (await self._get(term=term))['list'][:limit] | Performs a search for a term and returns the raw response.
Args:
term: The term to be defined.
limit: The maximum amount of results you'd like.
Defaults to 3.
Returns:
A list of :class:`dict`\s which contain word information. | Below is the the instruction that describes the task:
### Input:
Performs a search for a term and returns the raw response.
Args:
term: The term to be defined.
limit: The maximum amount of results you'd like.
Defaults to 3.
Returns:
A list of :class:`dict`\s which contain word information.
### Response:
async def search_raw(self, term: str, limit: int = 3) -> List[dict]:
"""Performs a search for a term and returns the raw response.
Args:
term: The term to be defined.
limit: The maximum amount of results you'd like.
Defaults to 3.
Returns:
A list of :class:`dict`\s which contain word information.
"""
return (await self._get(term=term))['list'][:limit] |
def _tagValuedProperties(self, content_type):
"""Document properties for property files having constructs like
<ns:name>value</ns:name>
:param content_type: ``contenttypes.CT_CORE_PROPS`` or ``contenttypes.CT_EXT_PROPS``
:return: mapping like {'property name': 'property value', ...}
"""
rval = {}
if not content_type in self.content_types.listMetaContentTypes:
# We fail silently
return rval
for tree in self.content_types.getTreesFor(self, content_type):
for elt in tree.getroot().getchildren():
tag = elt.tag.split('}')[-1] # Removing namespace if any
rval[toUnicode(tag)] = toUnicode(elt.text)
return rval | Document properties for property files having constructs like
<ns:name>value</ns:name>
:param content_type: ``contenttypes.CT_CORE_PROPS`` or ``contenttypes.CT_EXT_PROPS``
:return: mapping like {'property name': 'property value', ...} | Below is the the instruction that describes the task:
### Input:
Document properties for property files having constructs like
<ns:name>value</ns:name>
:param content_type: ``contenttypes.CT_CORE_PROPS`` or ``contenttypes.CT_EXT_PROPS``
:return: mapping like {'property name': 'property value', ...}
### Response:
def _tagValuedProperties(self, content_type):
"""Document properties for property files having constructs like
<ns:name>value</ns:name>
:param content_type: ``contenttypes.CT_CORE_PROPS`` or ``contenttypes.CT_EXT_PROPS``
:return: mapping like {'property name': 'property value', ...}
"""
rval = {}
if not content_type in self.content_types.listMetaContentTypes:
# We fail silently
return rval
for tree in self.content_types.getTreesFor(self, content_type):
for elt in tree.getroot().getchildren():
tag = elt.tag.split('}')[-1] # Removing namespace if any
rval[toUnicode(tag)] = toUnicode(elt.text)
return rval |
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data.get("properties", {})
raw_content = properties.get("statistics", None)
if raw_content is not None:
statistics = BGPPeersStatistics.from_raw_data(raw_content)
properties["statistics"] = statistics
return super(BGPPeers, cls).process_raw_data(raw_data) | Create a new model using raw API response. | Below is the the instruction that describes the task:
### Input:
Create a new model using raw API response.
### Response:
def process_raw_data(cls, raw_data):
"""Create a new model using raw API response."""
properties = raw_data.get("properties", {})
raw_content = properties.get("statistics", None)
if raw_content is not None:
statistics = BGPPeersStatistics.from_raw_data(raw_content)
properties["statistics"] = statistics
return super(BGPPeers, cls).process_raw_data(raw_data) |
def str_repr(klass):
"""
Implements string conversion methods for the given class.
The given class must implement the __str__ method. This decorat
will add __repr__ and __unicode__ (for Python 2).
"""
if PY2:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
klass.__repr__ = lambda self: '<%s: %r>' % (self.__class__.__name__, str(self))
return klass | Implements string conversion methods for the given class.
The given class must implement the __str__ method. This decorat
will add __repr__ and __unicode__ (for Python 2). | Below is the the instruction that describes the task:
### Input:
Implements string conversion methods for the given class.
The given class must implement the __str__ method. This decorat
will add __repr__ and __unicode__ (for Python 2).
### Response:
def str_repr(klass):
"""
Implements string conversion methods for the given class.
The given class must implement the __str__ method. This decorat
will add __repr__ and __unicode__ (for Python 2).
"""
if PY2:
klass.__unicode__ = klass.__str__
klass.__str__ = lambda self: self.__unicode__().encode('utf-8')
klass.__repr__ = lambda self: '<%s: %r>' % (self.__class__.__name__, str(self))
return klass |
def sigma_to_pressure(sigma, psfc, ptop):
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
"""
if np.any(sigma < 0) or np.any(sigma > 1):
raise ValueError('Sigma values should be bounded by 0 and 1')
if psfc.magnitude < 0 or ptop.magnitude < 0:
raise ValueError('Pressure input should be non-negative')
return sigma * (psfc - ptop) + ptop | r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain | Below is the the instruction that describes the task:
### Input:
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
### Response:
def sigma_to_pressure(sigma, psfc, ptop):
r"""Calculate pressure from sigma values.
Parameters
----------
sigma : ndarray
The sigma levels to be converted to pressure levels.
psfc : `pint.Quantity`
The surface pressure value.
ptop : `pint.Quantity`
The pressure value at the top of the model domain.
Returns
-------
`pint.Quantity`
The pressure values at the given sigma levels.
Notes
-----
Sigma definition adapted from [Philips1957]_.
.. math:: p = \sigma * (p_{sfc} - p_{top}) + p_{top}
* :math:`p` is pressure at a given `\sigma` level
* :math:`\sigma` is non-dimensional, scaled pressure
* :math:`p_{sfc}` is pressure at the surface or model floor
* :math:`p_{top}` is pressure at the top of the model domain
"""
if np.any(sigma < 0) or np.any(sigma > 1):
raise ValueError('Sigma values should be bounded by 0 and 1')
if psfc.magnitude < 0 or ptop.magnitude < 0:
raise ValueError('Pressure input should be non-negative')
return sigma * (psfc - ptop) + ptop |
def unflatten(flat_dict, separator='_'):
"""
Creates a hierarchical dictionary from a flattened dictionary
Assumes no lists are present
:param flat_dict: a dictionary with no hierarchy
:param separator: a string that separates keys
:return: a dictionary with hierarchy
"""
_unflatten_asserts(flat_dict, separator)
# This global dictionary is mutated and returned
unflattened_dict = dict()
def _unflatten(dic, keys, value):
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
for item in flat_dict:
_unflatten(unflattened_dict, item.split(separator), flat_dict[item])
return unflattened_dict | Creates a hierarchical dictionary from a flattened dictionary
Assumes no lists are present
:param flat_dict: a dictionary with no hierarchy
:param separator: a string that separates keys
:return: a dictionary with hierarchy | Below is the the instruction that describes the task:
### Input:
Creates a hierarchical dictionary from a flattened dictionary
Assumes no lists are present
:param flat_dict: a dictionary with no hierarchy
:param separator: a string that separates keys
:return: a dictionary with hierarchy
### Response:
def unflatten(flat_dict, separator='_'):
"""
Creates a hierarchical dictionary from a flattened dictionary
Assumes no lists are present
:param flat_dict: a dictionary with no hierarchy
:param separator: a string that separates keys
:return: a dictionary with hierarchy
"""
_unflatten_asserts(flat_dict, separator)
# This global dictionary is mutated and returned
unflattened_dict = dict()
def _unflatten(dic, keys, value):
for key in keys[:-1]:
dic = dic.setdefault(key, {})
dic[keys[-1]] = value
for item in flat_dict:
_unflatten(unflattened_dict, item.split(separator), flat_dict[item])
return unflattened_dict |
def recherche(self, pattern, entete):
"""Performs a search field by field, using functions defined in formats.
Matchs are marked with info[`font`]
:param pattern: String to look for
:param entete: Fields to look into
:return: Nothing. The collection is changed in place
"""
new_liste = []
sub_patterns = pattern.split(" ")
for p in self:
d_font = {att: False for att in entete}
row_valid = True
for sub_pattern in sub_patterns:
found = False
for att in entete:
fonction_recherche = formats.ASSOCIATION[att][1]
attr_found = bool(fonction_recherche(p[att], sub_pattern))
if attr_found:
found = True
d_font[att] = True
if not found:
row_valid = False
break
if row_valid:
new_liste.append(p)
info = dict(self.get_info(Id=p.Id),font=d_font)
self.infos[p.Id] = info
list.__init__(self, new_liste) | Performs a search field by field, using functions defined in formats.
Matchs are marked with info[`font`]
:param pattern: String to look for
:param entete: Fields to look into
:return: Nothing. The collection is changed in place | Below is the the instruction that describes the task:
### Input:
Performs a search field by field, using functions defined in formats.
Matchs are marked with info[`font`]
:param pattern: String to look for
:param entete: Fields to look into
:return: Nothing. The collection is changed in place
### Response:
def recherche(self, pattern, entete):
"""Performs a search field by field, using functions defined in formats.
Matchs are marked with info[`font`]
:param pattern: String to look for
:param entete: Fields to look into
:return: Nothing. The collection is changed in place
"""
new_liste = []
sub_patterns = pattern.split(" ")
for p in self:
d_font = {att: False for att in entete}
row_valid = True
for sub_pattern in sub_patterns:
found = False
for att in entete:
fonction_recherche = formats.ASSOCIATION[att][1]
attr_found = bool(fonction_recherche(p[att], sub_pattern))
if attr_found:
found = True
d_font[att] = True
if not found:
row_valid = False
break
if row_valid:
new_liste.append(p)
info = dict(self.get_info(Id=p.Id),font=d_font)
self.infos[p.Id] = info
list.__init__(self, new_liste) |
def squash_unicode(obj):
"""coerce unicode back to bytestrings."""
if isinstance(obj,dict):
for key in obj.keys():
obj[key] = squash_unicode(obj[key])
if isinstance(key, unicode):
obj[squash_unicode(key)] = obj.pop(key)
elif isinstance(obj, list):
for i,v in enumerate(obj):
obj[i] = squash_unicode(v)
elif isinstance(obj, unicode):
obj = obj.encode('utf8')
return obj | coerce unicode back to bytestrings. | Below is the the instruction that describes the task:
### Input:
coerce unicode back to bytestrings.
### Response:
def squash_unicode(obj):
"""coerce unicode back to bytestrings."""
if isinstance(obj,dict):
for key in obj.keys():
obj[key] = squash_unicode(obj[key])
if isinstance(key, unicode):
obj[squash_unicode(key)] = obj.pop(key)
elif isinstance(obj, list):
for i,v in enumerate(obj):
obj[i] = squash_unicode(v)
elif isinstance(obj, unicode):
obj = obj.encode('utf8')
return obj |
def get(self, attri, fname=None, numtype='cycNum', decayed=False):
'''
In this method all data for an entire cycle (basically the
content of an iso_massfnnnn.DAT file) or a column of data for
the associated attribute is returned.
Parameters
----------
attri : string or integer
If attri is a string, attri is the cycle or name of the
attribute we are looking for.
If attri is an integer, attri is the cycle number (cycle arrays
are not supported).
fname : string, optional
If attri is a string, fname is the name of the file we are
getting the data from or the cycle number found in the
filename, or a List of either cycles or filenames. If fname
is None, the data from all cycles is returned.
If attri is an integer, then fname is not supported.
The default is None.
numtype : string, optional
If attri is a string, numtype determines whether fname is
the name of a file or, the cycle number. If numtype is
'file' it will then interpret fname as a file. If numtype
is 'cycNum' it will then interpret fname as a cycle number.
If attri is an Integer, then numtype is not supported.
The default is "cycNum".
decayed : boolean, optional
If attri is a string, then decayed is not supported.
If attri is an integer, then get instantaneously decay
abundance distribution.
The default is False.
Returns
-------
array
If attri is a string, data in the form of a numpy array is
returned.
If attri is an integer, Nothing is returned.
Notes
-----
If attri is an integer, then the following variables will be
added to the instance.
a_iso_to_plot: mass number of plotted range of species.
isotope_to_plot: corresponding list of isotopes.
z_iso_to_plot: corresponding charge numbers.
el_iso_to_plot: corresponding element names.
abunds: corresponding abundances.
isom: list of isomers with their abundances.
'''
if type(attri) is type(1):
print("Calling get method in cycle mode, adding a_iso_to_plot, z.. el.. isotope.. isotope... to instance")
self._getcycle(attri,decayed)
elif type(attri) is type("string"):
data=self._getattr(attri,fname,numtype)
return data | In this method all data for an entire cycle (basically the
content of an iso_massfnnnn.DAT file) or a column of data for
the associated attribute is returned.
Parameters
----------
attri : string or integer
If attri is a string, attri is the cycle or name of the
attribute we are looking for.
If attri is an integer, attri is the cycle number (cycle arrays
are not supported).
fname : string, optional
If attri is a string, fname is the name of the file we are
getting the data from or the cycle number found in the
filename, or a List of either cycles or filenames. If fname
is None, the data from all cycles is returned.
If attri is an integer, then fname is not supported.
The default is None.
numtype : string, optional
If attri is a string, numtype determines whether fname is
the name of a file or, the cycle number. If numtype is
'file' it will then interpret fname as a file. If numtype
is 'cycNum' it will then interpret fname as a cycle number.
If attri is an Integer, then numtype is not supported.
The default is "cycNum".
decayed : boolean, optional
If attri is a string, then decayed is not supported.
If attri is an integer, then get instantaneously decay
abundance distribution.
The default is False.
Returns
-------
array
If attri is a string, data in the form of a numpy array is
returned.
If attri is an integer, Nothing is returned.
Notes
-----
If attri is an integer, then the following variables will be
added to the instance.
a_iso_to_plot: mass number of plotted range of species.
isotope_to_plot: corresponding list of isotopes.
z_iso_to_plot: corresponding charge numbers.
el_iso_to_plot: corresponding element names.
abunds: corresponding abundances.
isom: list of isomers with their abundances. | Below is the the instruction that describes the task:
### Input:
In this method all data for an entire cycle (basically the
content of an iso_massfnnnn.DAT file) or a column of data for
the associated attribute is returned.
Parameters
----------
attri : string or integer
If attri is a string, attri is the cycle or name of the
attribute we are looking for.
If attri is an integer, attri is the cycle number (cycle arrays
are not supported).
fname : string, optional
If attri is a string, fname is the name of the file we are
getting the data from or the cycle number found in the
filename, or a List of either cycles or filenames. If fname
is None, the data from all cycles is returned.
If attri is an integer, then fname is not supported.
The default is None.
numtype : string, optional
If attri is a string, numtype determines whether fname is
the name of a file or, the cycle number. If numtype is
'file' it will then interpret fname as a file. If numtype
is 'cycNum' it will then interpret fname as a cycle number.
If attri is an Integer, then numtype is not supported.
The default is "cycNum".
decayed : boolean, optional
If attri is a string, then decayed is not supported.
If attri is an integer, then get instantaneously decay
abundance distribution.
The default is False.
Returns
-------
array
If attri is a string, data in the form of a numpy array is
returned.
If attri is an integer, Nothing is returned.
Notes
-----
If attri is an integer, then the following variables will be
added to the instance.
a_iso_to_plot: mass number of plotted range of species.
isotope_to_plot: corresponding list of isotopes.
z_iso_to_plot: corresponding charge numbers.
el_iso_to_plot: corresponding element names.
abunds: corresponding abundances.
isom: list of isomers with their abundances.
### Response:
def get(self, attri, fname=None, numtype='cycNum', decayed=False):
'''
In this method all data for an entire cycle (basically the
content of an iso_massfnnnn.DAT file) or a column of data for
the associated attribute is returned.
Parameters
----------
attri : string or integer
If attri is a string, attri is the cycle or name of the
attribute we are looking for.
If attri is an integer, attri is the cycle number (cycle arrays
are not supported).
fname : string, optional
If attri is a string, fname is the name of the file we are
getting the data from or the cycle number found in the
filename, or a List of either cycles or filenames. If fname
is None, the data from all cycles is returned.
If attri is an integer, then fname is not supported.
The default is None.
numtype : string, optional
If attri is a string, numtype determines whether fname is
the name of a file or, the cycle number. If numtype is
'file' it will then interpret fname as a file. If numtype
is 'cycNum' it will then interpret fname as a cycle number.
If attri is an Integer, then numtype is not supported.
The default is "cycNum".
decayed : boolean, optional
If attri is a string, then decayed is not supported.
If attri is an integer, then get instantaneously decay
abundance distribution.
The default is False.
Returns
-------
array
If attri is a string, data in the form of a numpy array is
returned.
If attri is an integer, Nothing is returned.
Notes
-----
If attri is an integer, then the following variables will be
added to the instance.
a_iso_to_plot: mass number of plotted range of species.
isotope_to_plot: corresponding list of isotopes.
z_iso_to_plot: corresponding charge numbers.
el_iso_to_plot: corresponding element names.
abunds: corresponding abundances.
isom: list of isomers with their abundances.
'''
if type(attri) is type(1):
print("Calling get method in cycle mode, adding a_iso_to_plot, z.. el.. isotope.. isotope... to instance")
self._getcycle(attri,decayed)
elif type(attri) is type("string"):
data=self._getattr(attri,fname,numtype)
return data |
def _initial_estimate(self, y, modelmat):
"""
Makes an inital estimate for the model coefficients.
For a LinearGAM we simply initialize to small coefficients.
For other GAMs we transform the problem to the linear space
and solve an unpenalized version.
Parameters
---------
y : array-like of shape (n,)
containing target data
modelmat : sparse matrix of shape (n, m)
containing model matrix of the spline basis
Returns
-------
coef : array of shape (m,) containing the initial estimate for the model
coefficients
Notes
-----
This method implements the suggestions in
Wood, section 2.2.2 Geometry and IRLS convergence, pg 80
"""
# do a simple initialization for LinearGAMs
if isinstance(self, LinearGAM):
n, m = modelmat.shape
return np.ones(m) * np.sqrt(EPS)
# transform the problem to the linear scale
y = deepcopy(y).astype('float64')
y[y == 0] += .01 # edge case for log link, inverse link, and logit link
y[y == 1] -= .01 # edge case for logit link
y_ = self.link.link(y, self.distribution)
y_ = make_2d(y_, verbose=False)
assert np.isfinite(y_).all(), "transformed response values should be well-behaved."
# solve the linear problem
return np.linalg.solve(load_diagonal(modelmat.T.dot(modelmat).A),
modelmat.T.dot(y_)) | Makes an inital estimate for the model coefficients.
For a LinearGAM we simply initialize to small coefficients.
For other GAMs we transform the problem to the linear space
and solve an unpenalized version.
Parameters
---------
y : array-like of shape (n,)
containing target data
modelmat : sparse matrix of shape (n, m)
containing model matrix of the spline basis
Returns
-------
coef : array of shape (m,) containing the initial estimate for the model
coefficients
Notes
-----
This method implements the suggestions in
Wood, section 2.2.2 Geometry and IRLS convergence, pg 80 | Below is the the instruction that describes the task:
### Input:
Makes an inital estimate for the model coefficients.
For a LinearGAM we simply initialize to small coefficients.
For other GAMs we transform the problem to the linear space
and solve an unpenalized version.
Parameters
---------
y : array-like of shape (n,)
containing target data
modelmat : sparse matrix of shape (n, m)
containing model matrix of the spline basis
Returns
-------
coef : array of shape (m,) containing the initial estimate for the model
coefficients
Notes
-----
This method implements the suggestions in
Wood, section 2.2.2 Geometry and IRLS convergence, pg 80
### Response:
def _initial_estimate(self, y, modelmat):
"""
Makes an inital estimate for the model coefficients.
For a LinearGAM we simply initialize to small coefficients.
For other GAMs we transform the problem to the linear space
and solve an unpenalized version.
Parameters
---------
y : array-like of shape (n,)
containing target data
modelmat : sparse matrix of shape (n, m)
containing model matrix of the spline basis
Returns
-------
coef : array of shape (m,) containing the initial estimate for the model
coefficients
Notes
-----
This method implements the suggestions in
Wood, section 2.2.2 Geometry and IRLS convergence, pg 80
"""
# do a simple initialization for LinearGAMs
if isinstance(self, LinearGAM):
n, m = modelmat.shape
return np.ones(m) * np.sqrt(EPS)
# transform the problem to the linear scale
y = deepcopy(y).astype('float64')
y[y == 0] += .01 # edge case for log link, inverse link, and logit link
y[y == 1] -= .01 # edge case for logit link
y_ = self.link.link(y, self.distribution)
y_ = make_2d(y_, verbose=False)
assert np.isfinite(y_).all(), "transformed response values should be well-behaved."
# solve the linear problem
return np.linalg.solve(load_diagonal(modelmat.T.dot(modelmat).A),
modelmat.T.dot(y_)) |
def get_full_id(self, state):
"""Return a global unique identifier for this agent instance.
It's a combination of agent_id and instance_id:
full_id = agent_id + '/' + instance_id
"""
desc = state.medium.get_descriptor()
return desc.doc_id + u"/" + unicode(desc.instance_id) | Return a global unique identifier for this agent instance.
It's a combination of agent_id and instance_id:
full_id = agent_id + '/' + instance_id | Below is the the instruction that describes the task:
### Input:
Return a global unique identifier for this agent instance.
It's a combination of agent_id and instance_id:
full_id = agent_id + '/' + instance_id
### Response:
def get_full_id(self, state):
"""Return a global unique identifier for this agent instance.
It's a combination of agent_id and instance_id:
full_id = agent_id + '/' + instance_id
"""
desc = state.medium.get_descriptor()
return desc.doc_id + u"/" + unicode(desc.instance_id) |
def validate(raw_schema, target=None, **kwargs):
"""
Given the python representation of a JSONschema as defined in the swagger
spec, validate that the schema complies to spec. If `target` is provided,
that target will be validated against the provided schema.
"""
schema = schema_validator(raw_schema, **kwargs)
if target is not None:
validate_object(target, schema=schema, **kwargs) | Given the python representation of a JSONschema as defined in the swagger
spec, validate that the schema complies to spec. If `target` is provided,
that target will be validated against the provided schema. | Below is the the instruction that describes the task:
### Input:
Given the python representation of a JSONschema as defined in the swagger
spec, validate that the schema complies to spec. If `target` is provided,
that target will be validated against the provided schema.
### Response:
def validate(raw_schema, target=None, **kwargs):
"""
Given the python representation of a JSONschema as defined in the swagger
spec, validate that the schema complies to spec. If `target` is provided,
that target will be validated against the provided schema.
"""
schema = schema_validator(raw_schema, **kwargs)
if target is not None:
validate_object(target, schema=schema, **kwargs) |
def before_request(request, tracer=None):
"""
Attempts to extract a tracing span from incoming request.
If no tracing context is passed in the headers, or the data
cannot be parsed, a new root span is started.
:param request: HTTP request with `.headers` property exposed
that satisfies a regular dictionary interface
:param tracer: optional tracer instance to use. If not specified
the global opentracing.tracer will be used.
:return: returns a new, already started span.
"""
if tracer is None: # pragma: no cover
tracer = opentracing.tracer
# we need to prepare tags upfront, mainly because RPC_SERVER tag must be
# set when starting the span, to support Zipkin's one-span-per-RPC model
tags_dict = {
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_URL: request.full_url,
}
remote_ip = request.remote_ip
if remote_ip:
tags_dict[tags.PEER_HOST_IPV4] = remote_ip
caller_name = request.caller_name
if caller_name:
tags_dict[tags.PEER_SERVICE] = caller_name
remote_port = request.remote_port
if remote_port:
tags_dict[tags.PEER_PORT] = remote_port
operation = request.operation
try:
carrier = {}
for key, value in six.iteritems(request.headers):
carrier[key] = value
parent_ctx = tracer.extract(
format=Format.HTTP_HEADERS, carrier=carrier
)
except Exception as e:
logging.exception('trace extract failed: %s' % e)
parent_ctx = None
span = tracer.start_span(
operation_name=operation,
child_of=parent_ctx,
tags=tags_dict)
return span | Attempts to extract a tracing span from incoming request.
If no tracing context is passed in the headers, or the data
cannot be parsed, a new root span is started.
:param request: HTTP request with `.headers` property exposed
that satisfies a regular dictionary interface
:param tracer: optional tracer instance to use. If not specified
the global opentracing.tracer will be used.
:return: returns a new, already started span. | Below is the the instruction that describes the task:
### Input:
Attempts to extract a tracing span from incoming request.
If no tracing context is passed in the headers, or the data
cannot be parsed, a new root span is started.
:param request: HTTP request with `.headers` property exposed
that satisfies a regular dictionary interface
:param tracer: optional tracer instance to use. If not specified
the global opentracing.tracer will be used.
:return: returns a new, already started span.
### Response:
def before_request(request, tracer=None):
"""
Attempts to extract a tracing span from incoming request.
If no tracing context is passed in the headers, or the data
cannot be parsed, a new root span is started.
:param request: HTTP request with `.headers` property exposed
that satisfies a regular dictionary interface
:param tracer: optional tracer instance to use. If not specified
the global opentracing.tracer will be used.
:return: returns a new, already started span.
"""
if tracer is None: # pragma: no cover
tracer = opentracing.tracer
# we need to prepare tags upfront, mainly because RPC_SERVER tag must be
# set when starting the span, to support Zipkin's one-span-per-RPC model
tags_dict = {
tags.SPAN_KIND: tags.SPAN_KIND_RPC_SERVER,
tags.HTTP_URL: request.full_url,
}
remote_ip = request.remote_ip
if remote_ip:
tags_dict[tags.PEER_HOST_IPV4] = remote_ip
caller_name = request.caller_name
if caller_name:
tags_dict[tags.PEER_SERVICE] = caller_name
remote_port = request.remote_port
if remote_port:
tags_dict[tags.PEER_PORT] = remote_port
operation = request.operation
try:
carrier = {}
for key, value in six.iteritems(request.headers):
carrier[key] = value
parent_ctx = tracer.extract(
format=Format.HTTP_HEADERS, carrier=carrier
)
except Exception as e:
logging.exception('trace extract failed: %s' % e)
parent_ctx = None
span = tracer.start_span(
operation_name=operation,
child_of=parent_ctx,
tags=tags_dict)
return span |
def size_changed(self, settings, key, user_data):
"""If the gconf var window_height or window_width are changed,
this method will be called and will call the resize function
in guake.
"""
RectCalculator.set_final_window_rect(self.settings, self.guake.window) | If the gconf var window_height or window_width are changed,
this method will be called and will call the resize function
in guake. | Below is the the instruction that describes the task:
### Input:
If the gconf var window_height or window_width are changed,
this method will be called and will call the resize function
in guake.
### Response:
def size_changed(self, settings, key, user_data):
"""If the gconf var window_height or window_width are changed,
this method will be called and will call the resize function
in guake.
"""
RectCalculator.set_final_window_rect(self.settings, self.guake.window) |
def set_flag(self, value, flag, reset_val=False):
"""Set a flag to 0 if the corresponding value is 0"""
if not self.__dict__[flag]:
self.__dict__[flag] = matrix(1.0, (len(self.__dict__[value]), 1),
'd')
for idx, item in enumerate(self.__dict__[value]):
if item == 0:
self.__dict__[flag][idx] = 0
if reset_val:
self.__dict__[value][idx] = 1 | Set a flag to 0 if the corresponding value is 0 | Below is the the instruction that describes the task:
### Input:
Set a flag to 0 if the corresponding value is 0
### Response:
def set_flag(self, value, flag, reset_val=False):
"""Set a flag to 0 if the corresponding value is 0"""
if not self.__dict__[flag]:
self.__dict__[flag] = matrix(1.0, (len(self.__dict__[value]), 1),
'd')
for idx, item in enumerate(self.__dict__[value]):
if item == 0:
self.__dict__[flag][idx] = 0
if reset_val:
self.__dict__[value][idx] = 1 |
def send(self, fail_silently=False):
"""
Sends the sms message
"""
if not self.to:
# Don't bother creating the connection if there's nobody to send to
return 0
res = self.get_connection(fail_silently).send_messages([self])
sms_post_send.send(sender=self, to=self.to, from_phone=self.from_phone, body=self.body)
return res | Sends the sms message | Below is the the instruction that describes the task:
### Input:
Sends the sms message
### Response:
def send(self, fail_silently=False):
"""
Sends the sms message
"""
if not self.to:
# Don't bother creating the connection if there's nobody to send to
return 0
res = self.get_connection(fail_silently).send_messages([self])
sms_post_send.send(sender=self, to=self.to, from_phone=self.from_phone, body=self.body)
return res |
def _applicationStart(self, data):
"""
Initializes the database connection pool.
:param data: <object> event data object
:return: <void>
"""
checkup = False
if "viper.mysql" in self.application.config \
and isinstance(self.application.config["viper.mysql"], dict):
if "host" in self.application.config["viper.mysql"] and \
"port" in self.application.config["viper.mysql"] and \
"name" in self.application.config["viper.mysql"]:
if len(self.application.config["viper.mysql"]["host"]) > 0 and \
self.application.config["viper.mysql"]["port"] > 0 and \
len(self.application.config["viper.mysql"]["name"]) > 0:
checkup = True
if checkup is not True:
return
try:
self._connectionPool = adbapi.ConnectionPool(
"MySQLdb",
host=self.application.config["viper.mysql"]["host"],
port=int(self.application.config["viper.mysql"]["port"]),
user=self.application.config["viper.mysql"]["username"],
passwd=self.application.config["viper.mysql"]["password"],
db=self.application.config["viper.mysql"]["name"],
charset=self.application.config["viper.mysql"]["charset"],
cp_min=int(
self.application.config["viper.mysql"]["connectionsMinimum"]
),
cp_max=int(
self.application.config["viper.mysql"]["connectionsMaximum"]
),
cp_reconnect=True
)
except Exception as e:
self.log.error(
"[Viper.MySQL] Cannot connect to server. Error: {error}",
error=str(e)
)
if "init" in self.application.config["viper.mysql"] \
and self.application.config["viper.mysql"]["init"]["runIfEmpty"]:
self._checkIfDatabaseIsEmpty(
lambda isEmpty:
self._scheduleDatabaseInit(isEmpty)
,
lambda error:
self.log.error("[Viper.MySQL] Cannot check if database is empty. Error {error}", error=error)
) | Initializes the database connection pool.
:param data: <object> event data object
:return: <void> | Below is the the instruction that describes the task:
### Input:
Initializes the database connection pool.
:param data: <object> event data object
:return: <void>
### Response:
def _applicationStart(self, data):
"""
Initializes the database connection pool.
:param data: <object> event data object
:return: <void>
"""
checkup = False
if "viper.mysql" in self.application.config \
and isinstance(self.application.config["viper.mysql"], dict):
if "host" in self.application.config["viper.mysql"] and \
"port" in self.application.config["viper.mysql"] and \
"name" in self.application.config["viper.mysql"]:
if len(self.application.config["viper.mysql"]["host"]) > 0 and \
self.application.config["viper.mysql"]["port"] > 0 and \
len(self.application.config["viper.mysql"]["name"]) > 0:
checkup = True
if checkup is not True:
return
try:
self._connectionPool = adbapi.ConnectionPool(
"MySQLdb",
host=self.application.config["viper.mysql"]["host"],
port=int(self.application.config["viper.mysql"]["port"]),
user=self.application.config["viper.mysql"]["username"],
passwd=self.application.config["viper.mysql"]["password"],
db=self.application.config["viper.mysql"]["name"],
charset=self.application.config["viper.mysql"]["charset"],
cp_min=int(
self.application.config["viper.mysql"]["connectionsMinimum"]
),
cp_max=int(
self.application.config["viper.mysql"]["connectionsMaximum"]
),
cp_reconnect=True
)
except Exception as e:
self.log.error(
"[Viper.MySQL] Cannot connect to server. Error: {error}",
error=str(e)
)
if "init" in self.application.config["viper.mysql"] \
and self.application.config["viper.mysql"]["init"]["runIfEmpty"]:
self._checkIfDatabaseIsEmpty(
lambda isEmpty:
self._scheduleDatabaseInit(isEmpty)
,
lambda error:
self.log.error("[Viper.MySQL] Cannot check if database is empty. Error {error}", error=error)
) |
def set_friend_add_request(self, *, flag, approve=True, remark=None):
"""
处理加好友请求
------------
:param str flag: 加好友请求的 flag(需从上报的数据中获得)
:param bool approve: 是否同意请求
:param str remark: 添加后的好友备注(仅在同意时有效)
:return: None
:rtype: None
"""
return super().__getattr__('set_friend_add_request') \
(flag=flag, approve=approve, remark=remark) | 处理加好友请求
------------
:param str flag: 加好友请求的 flag(需从上报的数据中获得)
:param bool approve: 是否同意请求
:param str remark: 添加后的好友备注(仅在同意时有效)
:return: None
:rtype: None | Below is the the instruction that describes the task:
### Input:
处理加好友请求
------------
:param str flag: 加好友请求的 flag(需从上报的数据中获得)
:param bool approve: 是否同意请求
:param str remark: 添加后的好友备注(仅在同意时有效)
:return: None
:rtype: None
### Response:
def set_friend_add_request(self, *, flag, approve=True, remark=None):
"""
处理加好友请求
------------
:param str flag: 加好友请求的 flag(需从上报的数据中获得)
:param bool approve: 是否同意请求
:param str remark: 添加后的好友备注(仅在同意时有效)
:return: None
:rtype: None
"""
return super().__getattr__('set_friend_add_request') \
(flag=flag, approve=approve, remark=remark) |
def getInstance(cls, *args):
'''
Returns a singleton instance of the class
'''
if not cls.__singleton:
cls.__singleton = Heroku(*args)
return cls.__singleton | Returns a singleton instance of the class | Below is the the instruction that describes the task:
### Input:
Returns a singleton instance of the class
### Response:
def getInstance(cls, *args):
'''
Returns a singleton instance of the class
'''
if not cls.__singleton:
cls.__singleton = Heroku(*args)
return cls.__singleton |
def expire_cache(fragment_name, *args):
"""
Expire a cache item.
@param url: The url object
@param product_names: A list of product names
@param start: The date from which the reporting should start.
@param stop: The date at which the reporting should stop.
"""
cache_key = make_template_fragment_key(fragment_name, args)
cache.delete(cache_key) | Expire a cache item.
@param url: The url object
@param product_names: A list of product names
@param start: The date from which the reporting should start.
@param stop: The date at which the reporting should stop. | Below is the the instruction that describes the task:
### Input:
Expire a cache item.
@param url: The url object
@param product_names: A list of product names
@param start: The date from which the reporting should start.
@param stop: The date at which the reporting should stop.
### Response:
def expire_cache(fragment_name, *args):
"""
Expire a cache item.
@param url: The url object
@param product_names: A list of product names
@param start: The date from which the reporting should start.
@param stop: The date at which the reporting should stop.
"""
cache_key = make_template_fragment_key(fragment_name, args)
cache.delete(cache_key) |
def _check_rr_name(self, rr_name):
# type: (Optional[str]) -> bytes
'''
An internal method to check whether this ISO requires or does not
require a Rock Ridge path.
Parameters:
rr_name - The Rock Ridge name.
Returns:
The Rock Ridge name in bytes if this is a Rock Ridge ISO, None otherwise.
'''
if self.rock_ridge:
if not rr_name:
raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name must be passed for a rock-ridge ISO')
if rr_name.count('/') != 0:
raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name must be relative')
return rr_name.encode('utf-8')
if rr_name:
raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name can only be specified for a rock-ridge ISO')
return b'' | An internal method to check whether this ISO requires or does not
require a Rock Ridge path.
Parameters:
rr_name - The Rock Ridge name.
Returns:
The Rock Ridge name in bytes if this is a Rock Ridge ISO, None otherwise. | Below is the the instruction that describes the task:
### Input:
An internal method to check whether this ISO requires or does not
require a Rock Ridge path.
Parameters:
rr_name - The Rock Ridge name.
Returns:
The Rock Ridge name in bytes if this is a Rock Ridge ISO, None otherwise.
### Response:
def _check_rr_name(self, rr_name):
# type: (Optional[str]) -> bytes
'''
An internal method to check whether this ISO requires or does not
require a Rock Ridge path.
Parameters:
rr_name - The Rock Ridge name.
Returns:
The Rock Ridge name in bytes if this is a Rock Ridge ISO, None otherwise.
'''
if self.rock_ridge:
if not rr_name:
raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name must be passed for a rock-ridge ISO')
if rr_name.count('/') != 0:
raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name must be relative')
return rr_name.encode('utf-8')
if rr_name:
raise pycdlibexception.PyCdlibInvalidInput('A rock ridge name can only be specified for a rock-ridge ISO')
return b'' |
def fetchone(self):
"""Fetch the next row"""
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result | Fetch the next row | Below is the the instruction that describes the task:
### Input:
Fetch the next row
### Response:
def fetchone(self):
"""Fetch the next row"""
self._check_executed()
if self._rows is None or self.rownumber >= len(self._rows):
return None
result = self._rows[self.rownumber]
self.rownumber += 1
return result |
def generate_GitHub_token(*, note="Doctr token for pushing to gh-pages from Travis", scopes=None, **login_kwargs):
"""
Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens.
"""
if scopes is None:
scopes = ['public_repo']
AUTH_URL = "https://api.github.com/authorizations"
data = {
"scopes": scopes,
"note": note,
"note_url": "https://github.com/drdoctr/doctr",
"fingerprint": str(uuid.uuid4()),
}
return GitHub_post(data, AUTH_URL, **login_kwargs) | Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens. | Below is the the instruction that describes the task:
### Input:
Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens.
### Response:
def generate_GitHub_token(*, note="Doctr token for pushing to gh-pages from Travis", scopes=None, **login_kwargs):
"""
Generate a GitHub token for pushing from Travis
The scope requested is public_repo.
If no password or OTP are provided, they will be requested from the
command line.
The token created here can be revoked at
https://github.com/settings/tokens.
"""
if scopes is None:
scopes = ['public_repo']
AUTH_URL = "https://api.github.com/authorizations"
data = {
"scopes": scopes,
"note": note,
"note_url": "https://github.com/drdoctr/doctr",
"fingerprint": str(uuid.uuid4()),
}
return GitHub_post(data, AUTH_URL, **login_kwargs) |
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))
self.__nonzero__() | Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean | Below is the the instruction that describes the task:
### Input:
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
### Response:
def bool(self):
"""
Return the bool of a single element PandasObject.
This must be a boolean scalar value, either True or False. Raise a
ValueError if the PandasObject does not have exactly 1 element, or that
element is not boolean
"""
v = self.squeeze()
if isinstance(v, (bool, np.bool_)):
return bool(v)
elif is_scalar(v):
raise ValueError("bool cannot act on a non-boolean single element "
"{0}".format(self.__class__.__name__))
self.__nonzero__() |
def to_s3_uri(code_dict):
"""Constructs a S3 URI string from given code dictionary
:param dict code_dict: Dictionary containing Lambda function Code S3 location of the form
{S3Bucket, S3Key, S3ObjectVersion}
:return: S3 URI of form s3://bucket/key?versionId=version
:rtype string
"""
try:
uri = "s3://{bucket}/{key}".format(bucket=code_dict["S3Bucket"], key=code_dict["S3Key"])
version = code_dict.get("S3ObjectVersion", None)
except (TypeError, AttributeError):
raise TypeError("Code location should be a dictionary")
if version:
uri += "?versionId=" + version
return uri | Constructs a S3 URI string from given code dictionary
:param dict code_dict: Dictionary containing Lambda function Code S3 location of the form
{S3Bucket, S3Key, S3ObjectVersion}
:return: S3 URI of form s3://bucket/key?versionId=version
:rtype string | Below is the the instruction that describes the task:
### Input:
Constructs a S3 URI string from given code dictionary
:param dict code_dict: Dictionary containing Lambda function Code S3 location of the form
{S3Bucket, S3Key, S3ObjectVersion}
:return: S3 URI of form s3://bucket/key?versionId=version
:rtype string
### Response:
def to_s3_uri(code_dict):
"""Constructs a S3 URI string from given code dictionary
:param dict code_dict: Dictionary containing Lambda function Code S3 location of the form
{S3Bucket, S3Key, S3ObjectVersion}
:return: S3 URI of form s3://bucket/key?versionId=version
:rtype string
"""
try:
uri = "s3://{bucket}/{key}".format(bucket=code_dict["S3Bucket"], key=code_dict["S3Key"])
version = code_dict.get("S3ObjectVersion", None)
except (TypeError, AttributeError):
raise TypeError("Code location should be a dictionary")
if version:
uri += "?versionId=" + version
return uri |
def unpack(regex):
"""
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex) and not regex.startswith('(?P<'):
return re.sub(r'^\((\?:)?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex | Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]' | Below is the the instruction that describes the task:
### Input:
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
### Response:
def unpack(regex):
"""
Remove the outermost parens, keep the (?P...) one
>>> unpack(r'(abc)')
'abc'
>>> unpack(r'(?:abc)')
'abc'
>>> unpack(r'(?P<xyz>abc)')
'(?P<xyz>abc)'
>>> unpack(r'[abc]')
'[abc]'
"""
if is_packed(regex) and not regex.startswith('(?P<'):
return re.sub(r'^\((\?:)?(?P<content>.*?)\)$', r'\g<content>', regex)
else:
return regex |
def error(code, message, template):
"""A generic error handler"""
if json_requested():
return json_error(code, message)
else:
return render_template(template, message=message), code | A generic error handler | Below is the the instruction that describes the task:
### Input:
A generic error handler
### Response:
def error(code, message, template):
"""A generic error handler"""
if json_requested():
return json_error(code, message)
else:
return render_template(template, message=message), code |
def _AddCampaignsToGroup(client, campaign_group_id, campaign_ids):
"""Adds multiple campaigns to a campaign group.
Args:
client: an AdWordsClient instance.
campaign_group_id: an integer ID for the campaign group.
campaign_ids: a list of integer IDs for campaigns.
"""
# Get the CampaignService.
campaign_service = client.GetService('CampaignService', version='v201809')
# Create the operations.
operations = [{
'operator': 'SET',
'operand': {
'id': campaign_id,
'campaignGroupId': campaign_group_id
}
} for campaign_id in campaign_ids]
campaign_service.mutate(operations)
# Display the results.
print ('The following campaign IDs were added to the campaign group with ID '
'"%d":\n\t%s' % (campaign_group_id, campaign_ids)) | Adds multiple campaigns to a campaign group.
Args:
client: an AdWordsClient instance.
campaign_group_id: an integer ID for the campaign group.
campaign_ids: a list of integer IDs for campaigns. | Below is the the instruction that describes the task:
### Input:
Adds multiple campaigns to a campaign group.
Args:
client: an AdWordsClient instance.
campaign_group_id: an integer ID for the campaign group.
campaign_ids: a list of integer IDs for campaigns.
### Response:
def _AddCampaignsToGroup(client, campaign_group_id, campaign_ids):
"""Adds multiple campaigns to a campaign group.
Args:
client: an AdWordsClient instance.
campaign_group_id: an integer ID for the campaign group.
campaign_ids: a list of integer IDs for campaigns.
"""
# Get the CampaignService.
campaign_service = client.GetService('CampaignService', version='v201809')
# Create the operations.
operations = [{
'operator': 'SET',
'operand': {
'id': campaign_id,
'campaignGroupId': campaign_group_id
}
} for campaign_id in campaign_ids]
campaign_service.mutate(operations)
# Display the results.
print ('The following campaign IDs were added to the campaign group with ID '
'"%d":\n\t%s' % (campaign_group_id, campaign_ids)) |
def do_POST(self):
'''The POST command.
'''
try:
ct = self.headers['content-type']
if ct.startswith('multipart/'):
cid = resolvers.MIMEResolver(ct, self.rfile)
xml = cid.GetSOAPPart()
ps = ParsedSoap(xml, resolver=cid.Resolve)
else:
length = int(self.headers['content-length'])
ps = ParsedSoap(self.rfile.read(length))
except ParseException, e:
self.send_fault(FaultFromZSIException(e))
return
except Exception, e:
# Faulted while processing; assume it's in the header.
self.send_fault(FaultFromException(e, 1, sys.exc_info()[2]))
return
_Dispatch(ps, self.server.modules, self.send_xml, self.send_fault,
docstyle=self.server.docstyle, nsdict=self.server.nsdict,
typesmodule=self.server.typesmodule, rpc=self.server.rpc) | The POST command. | Below is the the instruction that describes the task:
### Input:
The POST command.
### Response:
def do_POST(self):
'''The POST command.
'''
try:
ct = self.headers['content-type']
if ct.startswith('multipart/'):
cid = resolvers.MIMEResolver(ct, self.rfile)
xml = cid.GetSOAPPart()
ps = ParsedSoap(xml, resolver=cid.Resolve)
else:
length = int(self.headers['content-length'])
ps = ParsedSoap(self.rfile.read(length))
except ParseException, e:
self.send_fault(FaultFromZSIException(e))
return
except Exception, e:
# Faulted while processing; assume it's in the header.
self.send_fault(FaultFromException(e, 1, sys.exc_info()[2]))
return
_Dispatch(ps, self.server.modules, self.send_xml, self.send_fault,
docstyle=self.server.docstyle, nsdict=self.server.nsdict,
typesmodule=self.server.typesmodule, rpc=self.server.rpc) |
def nvmlDeviceGetTopologyCommonAncestor(device1, device2):
r"""
/**
* Retrieve the common ancestor for two devices
* For all products.
* Supported on Linux only.
*
* @param device1 The identifier of the first device
* @param device2 The identifier of the second device
* @param pathInfo A \ref nvmlGpuTopologyLevel_t that gives the path type
*
* @return
* - \ref NVML_SUCCESS if \a pathInfo has been set
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1, or \a device2 is invalid, or \a pathInfo is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature
* - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery
*/
nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor
"""
c_level = _nvmlGpuTopologyLevel_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTopologyCommonAncestor")
ret = fn(device1, device2, byref(c_level))
_nvmlCheckReturn(ret)
return bytes_to_str(c_level.value) | r"""
/**
* Retrieve the common ancestor for two devices
* For all products.
* Supported on Linux only.
*
* @param device1 The identifier of the first device
* @param device2 The identifier of the second device
* @param pathInfo A \ref nvmlGpuTopologyLevel_t that gives the path type
*
* @return
* - \ref NVML_SUCCESS if \a pathInfo has been set
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1, or \a device2 is invalid, or \a pathInfo is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature
* - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery
*/
nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor | Below is the the instruction that describes the task:
### Input:
r"""
/**
* Retrieve the common ancestor for two devices
* For all products.
* Supported on Linux only.
*
* @param device1 The identifier of the first device
* @param device2 The identifier of the second device
* @param pathInfo A \ref nvmlGpuTopologyLevel_t that gives the path type
*
* @return
* - \ref NVML_SUCCESS if \a pathInfo has been set
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1, or \a device2 is invalid, or \a pathInfo is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature
* - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery
*/
nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor
### Response:
def nvmlDeviceGetTopologyCommonAncestor(device1, device2):
r"""
/**
* Retrieve the common ancestor for two devices
* For all products.
* Supported on Linux only.
*
* @param device1 The identifier of the first device
* @param device2 The identifier of the second device
* @param pathInfo A \ref nvmlGpuTopologyLevel_t that gives the path type
*
* @return
* - \ref NVML_SUCCESS if \a pathInfo has been set
* - \ref NVML_ERROR_INVALID_ARGUMENT if \a device1, or \a device2 is invalid, or \a pathInfo is NULL
* - \ref NVML_ERROR_NOT_SUPPORTED if the device or OS does not support this feature
* - \ref NVML_ERROR_UNKNOWN an error has occurred in underlying topology discovery
*/
nvmlReturn_t DECLDIR nvmlDeviceGetTopologyCommonAncestor
"""
c_level = _nvmlGpuTopologyLevel_t()
fn = _nvmlGetFunctionPointer("nvmlDeviceGetTopologyCommonAncestor")
ret = fn(device1, device2, byref(c_level))
_nvmlCheckReturn(ret)
return bytes_to_str(c_level.value) |
def create_button(self, widget):
"""Create a button that has the given widget rendered as an icon
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: the created button
:rtype: QtGui.QAbstractButton
:raises: None
"""
btn = QtGui.QToolButton(self)
btn.setIconSize(QtCore.QSize(self._iconw, self._iconh))
self.update_button(btn, widget)
return btn | Create a button that has the given widget rendered as an icon
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: the created button
:rtype: QtGui.QAbstractButton
:raises: None | Below is the the instruction that describes the task:
### Input:
Create a button that has the given widget rendered as an icon
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: the created button
:rtype: QtGui.QAbstractButton
:raises: None
### Response:
def create_button(self, widget):
"""Create a button that has the given widget rendered as an icon
:param widget: the widget to render as icon
:type widget: QtGui.QWidget
:returns: the created button
:rtype: QtGui.QAbstractButton
:raises: None
"""
btn = QtGui.QToolButton(self)
btn.setIconSize(QtCore.QSize(self._iconw, self._iconh))
self.update_button(btn, widget)
return btn |
def unders_to_dashes_in_keys(self) -> None:
"""Replaces underscores with dashes in key names.
For each attribute in a mapping, this replaces any underscores \
in its keys with dashes. Handy because Python does not \
accept dashes in identifiers, while some YAML-based formats use \
dashes in their keys.
"""
for key_node, _ in self.yaml_node.value:
key_node.value = key_node.value.replace('_', '-') | Replaces underscores with dashes in key names.
For each attribute in a mapping, this replaces any underscores \
in its keys with dashes. Handy because Python does not \
accept dashes in identifiers, while some YAML-based formats use \
dashes in their keys. | Below is the the instruction that describes the task:
### Input:
Replaces underscores with dashes in key names.
For each attribute in a mapping, this replaces any underscores \
in its keys with dashes. Handy because Python does not \
accept dashes in identifiers, while some YAML-based formats use \
dashes in their keys.
### Response:
def unders_to_dashes_in_keys(self) -> None:
"""Replaces underscores with dashes in key names.
For each attribute in a mapping, this replaces any underscores \
in its keys with dashes. Handy because Python does not \
accept dashes in identifiers, while some YAML-based formats use \
dashes in their keys.
"""
for key_node, _ in self.yaml_node.value:
key_node.value = key_node.value.replace('_', '-') |
def __getDecision(self, result, multiple=False, **values):
"""
The main method for decision picking.
Args:
result (array of str): What values you want to get in return array.
multiple (bolean, optional): Do you want multiple result if it finds many maching decisions.
**values (dict): What should finder look for, (headerString : value).
Returns: Maped result values with finded elements in row/row.
"""
values = self.__toString(values)
__valueKeyWithHeaderIndex = self.__valueKeyWithHeaderIndex(values)
errors = self.__checkDecisionParameters(result, **values)
if errors:
view.Tli.showErrors('ParametersError', errors)
machingData = {}
for line in self.decisions:
match = True
for index in __valueKeyWithHeaderIndex:
if line[index] != __valueKeyWithHeaderIndex[index]:
if line[index] != self.__wildcardSymbol:
match = False
break
if match:
if multiple:
for header in result:
if header not in machingData:
machingData[header] = [line[self.header.index(header)]]
else:
machingData[header].append(line[self.header.index(header)])
else:
for header in result:
machingData[header] = line[self.header.index(header)]
return machingData
if multiple:
if machingData:
return machingData
# Return none if not found (not string so
# not found value can be recognized
return dict((key, None) for key in result) | The main method for decision picking.
Args:
result (array of str): What values you want to get in return array.
multiple (bolean, optional): Do you want multiple result if it finds many maching decisions.
**values (dict): What should finder look for, (headerString : value).
Returns: Maped result values with finded elements in row/row. | Below is the the instruction that describes the task:
### Input:
The main method for decision picking.
Args:
result (array of str): What values you want to get in return array.
multiple (bolean, optional): Do you want multiple result if it finds many maching decisions.
**values (dict): What should finder look for, (headerString : value).
Returns: Maped result values with finded elements in row/row.
### Response:
def __getDecision(self, result, multiple=False, **values):
"""
The main method for decision picking.
Args:
result (array of str): What values you want to get in return array.
multiple (bolean, optional): Do you want multiple result if it finds many maching decisions.
**values (dict): What should finder look for, (headerString : value).
Returns: Maped result values with finded elements in row/row.
"""
values = self.__toString(values)
__valueKeyWithHeaderIndex = self.__valueKeyWithHeaderIndex(values)
errors = self.__checkDecisionParameters(result, **values)
if errors:
view.Tli.showErrors('ParametersError', errors)
machingData = {}
for line in self.decisions:
match = True
for index in __valueKeyWithHeaderIndex:
if line[index] != __valueKeyWithHeaderIndex[index]:
if line[index] != self.__wildcardSymbol:
match = False
break
if match:
if multiple:
for header in result:
if header not in machingData:
machingData[header] = [line[self.header.index(header)]]
else:
machingData[header].append(line[self.header.index(header)])
else:
for header in result:
machingData[header] = line[self.header.index(header)]
return machingData
if multiple:
if machingData:
return machingData
# Return none if not found (not string so
# not found value can be recognized
return dict((key, None) for key in result) |
def _process_group(input_group, required_group, groupname, append_subgroups=None):
"""
Process one group from the input yaml. Ensure it has the required entries. If there is a
subgroup that should be processed and then appended to the rest of the subgroups in that group,
handle it accordingly.
:param dict input_group: The dict of values of the input group
:param dict required_group: The dict of required values for the input group
:param str groupname: The name of the group being processed
:param list append_subgroups: list of subgroups to append to each, other subgroup in this group
:return: processed dict of entries for the group
:rtype: dict
"""
if append_subgroups is None:
append_subgroups = []
tool_options = {}
for key in input_group:
_ensure_set_contains(input_group[key], required_group.get(key, {}), groupname + '::' + key)
if key in append_subgroups:
continue
else:
tool_options[key] = input_group[key]
for key in input_group:
if key in append_subgroups:
continue
else:
for yek in append_subgroups:
tool_options[key].update(input_group[yek])
return tool_options | Process one group from the input yaml. Ensure it has the required entries. If there is a
subgroup that should be processed and then appended to the rest of the subgroups in that group,
handle it accordingly.
:param dict input_group: The dict of values of the input group
:param dict required_group: The dict of required values for the input group
:param str groupname: The name of the group being processed
:param list append_subgroups: list of subgroups to append to each, other subgroup in this group
:return: processed dict of entries for the group
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Process one group from the input yaml. Ensure it has the required entries. If there is a
subgroup that should be processed and then appended to the rest of the subgroups in that group,
handle it accordingly.
:param dict input_group: The dict of values of the input group
:param dict required_group: The dict of required values for the input group
:param str groupname: The name of the group being processed
:param list append_subgroups: list of subgroups to append to each, other subgroup in this group
:return: processed dict of entries for the group
:rtype: dict
### Response:
def _process_group(input_group, required_group, groupname, append_subgroups=None):
"""
Process one group from the input yaml. Ensure it has the required entries. If there is a
subgroup that should be processed and then appended to the rest of the subgroups in that group,
handle it accordingly.
:param dict input_group: The dict of values of the input group
:param dict required_group: The dict of required values for the input group
:param str groupname: The name of the group being processed
:param list append_subgroups: list of subgroups to append to each, other subgroup in this group
:return: processed dict of entries for the group
:rtype: dict
"""
if append_subgroups is None:
append_subgroups = []
tool_options = {}
for key in input_group:
_ensure_set_contains(input_group[key], required_group.get(key, {}), groupname + '::' + key)
if key in append_subgroups:
continue
else:
tool_options[key] = input_group[key]
for key in input_group:
if key in append_subgroups:
continue
else:
for yek in append_subgroups:
tool_options[key].update(input_group[yek])
return tool_options |
def execQuery(self, sql, parameters = None, cursorClass = MySQLdb.cursors.Cursor, InnoDB = False):
"""Execute SQL query."""
i = 0
errcode = 0
caughte = None
while i < self.numTries:
i += 1
try:
cursor = self.connection.cursor(cursorClass)
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
if InnoDB:
self.connection.commit()
results = cursor.fetchall()
self.lastrowid = int(cursor.lastrowid)
cursor.close()
return results
except MySQLdb.OperationalError, e:
errcode = e[0]
# errcodes of 2006 or 2013 usually indicate a dropped connection
# errcode 1100 is an error with table locking
print(e)
self.connection.ping(True)
caughte = e
continue
except:
traceback.print_exc()
break
sys.stderr.write("\nSQL execution error in query at %s:" % datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
sys.stderr.write("\n %s." % sql)
sys.stderr.flush()
sys.stderr.write("\nErrorcode: '%s'.\n" % (str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte) | Execute SQL query. | Below is the the instruction that describes the task:
### Input:
Execute SQL query.
### Response:
def execQuery(self, sql, parameters = None, cursorClass = MySQLdb.cursors.Cursor, InnoDB = False):
"""Execute SQL query."""
i = 0
errcode = 0
caughte = None
while i < self.numTries:
i += 1
try:
cursor = self.connection.cursor(cursorClass)
if parameters:
errcode = cursor.execute(sql, parameters)
else:
errcode = cursor.execute(sql)
if InnoDB:
self.connection.commit()
results = cursor.fetchall()
self.lastrowid = int(cursor.lastrowid)
cursor.close()
return results
except MySQLdb.OperationalError, e:
errcode = e[0]
# errcodes of 2006 or 2013 usually indicate a dropped connection
# errcode 1100 is an error with table locking
print(e)
self.connection.ping(True)
caughte = e
continue
except:
traceback.print_exc()
break
sys.stderr.write("\nSQL execution error in query at %s:" % datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
sys.stderr.write("\n %s." % sql)
sys.stderr.flush()
sys.stderr.write("\nErrorcode: '%s'.\n" % (str(caughte)))
sys.stderr.flush()
raise MySQLdb.OperationalError(caughte) |
def __store_clustering_results(self, amount_clusters, leaf_blocks):
"""!
@brief Stores clustering results in a convenient way.
@param[in] amount_clusters (uint): Amount of cluster that was allocated during processing.
@param[in] leaf_blocks (list): Leaf BANG-blocks (the smallest cells).
"""
self.__clusters = [[] for _ in range(amount_clusters)]
for block in leaf_blocks:
index = block.get_cluster()
if index is not None:
self.__clusters[index] += block.get_points()
else:
self.__noise += block.get_points()
self.__clusters = [ list(set(cluster)) for cluster in self.__clusters ]
self.__noise = list(set(self.__noise)) | !
@brief Stores clustering results in a convenient way.
@param[in] amount_clusters (uint): Amount of cluster that was allocated during processing.
@param[in] leaf_blocks (list): Leaf BANG-blocks (the smallest cells). | Below is the the instruction that describes the task:
### Input:
!
@brief Stores clustering results in a convenient way.
@param[in] amount_clusters (uint): Amount of cluster that was allocated during processing.
@param[in] leaf_blocks (list): Leaf BANG-blocks (the smallest cells).
### Response:
def __store_clustering_results(self, amount_clusters, leaf_blocks):
"""!
@brief Stores clustering results in a convenient way.
@param[in] amount_clusters (uint): Amount of cluster that was allocated during processing.
@param[in] leaf_blocks (list): Leaf BANG-blocks (the smallest cells).
"""
self.__clusters = [[] for _ in range(amount_clusters)]
for block in leaf_blocks:
index = block.get_cluster()
if index is not None:
self.__clusters[index] += block.get_points()
else:
self.__noise += block.get_points()
self.__clusters = [ list(set(cluster)) for cluster in self.__clusters ]
self.__noise = list(set(self.__noise)) |
def list_tickers(self, assetType):
"""Return a list of dicts of metadata tickers for all supported tickers
of the specified asset type, as well as metadata about each ticker.
This includes supported date range, the exchange the ticker is traded
on, and the currency the stock is traded on.
Tickers for unrelated products are omitted.
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
"""
listing_file_url = "https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip"
response = requests.get(listing_file_url)
zipdata = get_zipfile_from_response(response)
raw_csv = get_buffer_from_zipfile(zipdata, 'supported_tickers.csv')
reader = csv.DictReader(raw_csv)
return [row for row in reader
if row.get('assetType') == assetType] | Return a list of dicts of metadata tickers for all supported tickers
of the specified asset type, as well as metadata about each ticker.
This includes supported date range, the exchange the ticker is traded
on, and the currency the stock is traded on.
Tickers for unrelated products are omitted.
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip | Below is the the instruction that describes the task:
### Input:
Return a list of dicts of metadata tickers for all supported tickers
of the specified asset type, as well as metadata about each ticker.
This includes supported date range, the exchange the ticker is traded
on, and the currency the stock is traded on.
Tickers for unrelated products are omitted.
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
### Response:
def list_tickers(self, assetType):
"""Return a list of dicts of metadata tickers for all supported tickers
of the specified asset type, as well as metadata about each ticker.
This includes supported date range, the exchange the ticker is traded
on, and the currency the stock is traded on.
Tickers for unrelated products are omitted.
https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip
"""
listing_file_url = "https://apimedia.tiingo.com/docs/tiingo/daily/supported_tickers.zip"
response = requests.get(listing_file_url)
zipdata = get_zipfile_from_response(response)
raw_csv = get_buffer_from_zipfile(zipdata, 'supported_tickers.csv')
reader = csv.DictReader(raw_csv)
return [row for row in reader
if row.get('assetType') == assetType] |
async def edit(self, **fields):
"""|coro|
Edits the current profile of the client.
If a bot account is used then a password field is optional,
otherwise it is required.
Note
-----
To upload an avatar, a :term:`py:bytes-like object` must be passed in that
represents the image being uploaded. If this is done through a file
then the file must be opened via ``open('some_filename', 'rb')`` and
the :term:`py:bytes-like object` is given through the use of ``fp.read()``.
The only image formats supported for uploading is JPEG and PNG.
Parameters
-----------
password: :class:`str`
The current password for the client's account.
Only applicable to user accounts.
new_password: :class:`str`
The new password you wish to change to.
Only applicable to user accounts.
email: :class:`str`
The new email you wish to change to.
Only applicable to user accounts.
house: Optional[:class:`HypeSquadHouse`]
The hypesquad house you wish to change to.
Could be ``None`` to leave the current house.
Only applicable to user accounts.
username: :class:`str`
The new username you wish to change to.
avatar: :class:`bytes`
A :term:`py:bytes-like object` representing the image to upload.
Could be ``None`` to denote no avatar.
Raises
------
HTTPException
Editing your profile failed.
InvalidArgument
Wrong image format passed for ``avatar``.
ClientException
Password is required for non-bot accounts.
House field was not a HypeSquadHouse.
"""
try:
avatar_bytes = fields['avatar']
except KeyError:
avatar = self.avatar
else:
if avatar_bytes is not None:
avatar = _bytes_to_base64_data(avatar_bytes)
else:
avatar = None
not_bot_account = not self.bot
password = fields.get('password')
if not_bot_account and password is None:
raise ClientException('Password is required for non-bot accounts.')
args = {
'password': password,
'username': fields.get('username', self.name),
'avatar': avatar
}
if not_bot_account:
args['email'] = fields.get('email', self.email)
if 'new_password' in fields:
args['new_password'] = fields['new_password']
http = self._state.http
if 'house' in fields:
house = fields['house']
if house is None:
await http.leave_hypesquad_house()
elif not isinstance(house, HypeSquadHouse):
raise ClientException('`house` parameter was not a HypeSquadHouse')
else:
value = house.value
await http.change_hypesquad_house(value)
data = await http.edit_profile(**args)
if not_bot_account:
self.email = data['email']
try:
http._token(data['token'], bot=False)
except KeyError:
pass
self._update(data) | |coro|
Edits the current profile of the client.
If a bot account is used then a password field is optional,
otherwise it is required.
Note
-----
To upload an avatar, a :term:`py:bytes-like object` must be passed in that
represents the image being uploaded. If this is done through a file
then the file must be opened via ``open('some_filename', 'rb')`` and
the :term:`py:bytes-like object` is given through the use of ``fp.read()``.
The only image formats supported for uploading is JPEG and PNG.
Parameters
-----------
password: :class:`str`
The current password for the client's account.
Only applicable to user accounts.
new_password: :class:`str`
The new password you wish to change to.
Only applicable to user accounts.
email: :class:`str`
The new email you wish to change to.
Only applicable to user accounts.
house: Optional[:class:`HypeSquadHouse`]
The hypesquad house you wish to change to.
Could be ``None`` to leave the current house.
Only applicable to user accounts.
username: :class:`str`
The new username you wish to change to.
avatar: :class:`bytes`
A :term:`py:bytes-like object` representing the image to upload.
Could be ``None`` to denote no avatar.
Raises
------
HTTPException
Editing your profile failed.
InvalidArgument
Wrong image format passed for ``avatar``.
ClientException
Password is required for non-bot accounts.
House field was not a HypeSquadHouse. | Below is the the instruction that describes the task:
### Input:
|coro|
Edits the current profile of the client.
If a bot account is used then a password field is optional,
otherwise it is required.
Note
-----
To upload an avatar, a :term:`py:bytes-like object` must be passed in that
represents the image being uploaded. If this is done through a file
then the file must be opened via ``open('some_filename', 'rb')`` and
the :term:`py:bytes-like object` is given through the use of ``fp.read()``.
The only image formats supported for uploading is JPEG and PNG.
Parameters
-----------
password: :class:`str`
The current password for the client's account.
Only applicable to user accounts.
new_password: :class:`str`
The new password you wish to change to.
Only applicable to user accounts.
email: :class:`str`
The new email you wish to change to.
Only applicable to user accounts.
house: Optional[:class:`HypeSquadHouse`]
The hypesquad house you wish to change to.
Could be ``None`` to leave the current house.
Only applicable to user accounts.
username: :class:`str`
The new username you wish to change to.
avatar: :class:`bytes`
A :term:`py:bytes-like object` representing the image to upload.
Could be ``None`` to denote no avatar.
Raises
------
HTTPException
Editing your profile failed.
InvalidArgument
Wrong image format passed for ``avatar``.
ClientException
Password is required for non-bot accounts.
House field was not a HypeSquadHouse.
### Response:
async def edit(self, **fields):
"""|coro|
Edits the current profile of the client.
If a bot account is used then a password field is optional,
otherwise it is required.
Note
-----
To upload an avatar, a :term:`py:bytes-like object` must be passed in that
represents the image being uploaded. If this is done through a file
then the file must be opened via ``open('some_filename', 'rb')`` and
the :term:`py:bytes-like object` is given through the use of ``fp.read()``.
The only image formats supported for uploading is JPEG and PNG.
Parameters
-----------
password: :class:`str`
The current password for the client's account.
Only applicable to user accounts.
new_password: :class:`str`
The new password you wish to change to.
Only applicable to user accounts.
email: :class:`str`
The new email you wish to change to.
Only applicable to user accounts.
house: Optional[:class:`HypeSquadHouse`]
The hypesquad house you wish to change to.
Could be ``None`` to leave the current house.
Only applicable to user accounts.
username: :class:`str`
The new username you wish to change to.
avatar: :class:`bytes`
A :term:`py:bytes-like object` representing the image to upload.
Could be ``None`` to denote no avatar.
Raises
------
HTTPException
Editing your profile failed.
InvalidArgument
Wrong image format passed for ``avatar``.
ClientException
Password is required for non-bot accounts.
House field was not a HypeSquadHouse.
"""
try:
avatar_bytes = fields['avatar']
except KeyError:
avatar = self.avatar
else:
if avatar_bytes is not None:
avatar = _bytes_to_base64_data(avatar_bytes)
else:
avatar = None
not_bot_account = not self.bot
password = fields.get('password')
if not_bot_account and password is None:
raise ClientException('Password is required for non-bot accounts.')
args = {
'password': password,
'username': fields.get('username', self.name),
'avatar': avatar
}
if not_bot_account:
args['email'] = fields.get('email', self.email)
if 'new_password' in fields:
args['new_password'] = fields['new_password']
http = self._state.http
if 'house' in fields:
house = fields['house']
if house is None:
await http.leave_hypesquad_house()
elif not isinstance(house, HypeSquadHouse):
raise ClientException('`house` parameter was not a HypeSquadHouse')
else:
value = house.value
await http.change_hypesquad_house(value)
data = await http.edit_profile(**args)
if not_bot_account:
self.email = data['email']
try:
http._token(data['token'], bot=False)
except KeyError:
pass
self._update(data) |
def lookup_int(values, name=None):
"""
Lookup field which transforms the result into an integer.
:param values: values allowed
:param name: name for the field
:return: grammar for the lookup field
"""
field = basic.lookup(values, name)
field.addParseAction(lambda l: int(l[0]))
return field | Lookup field which transforms the result into an integer.
:param values: values allowed
:param name: name for the field
:return: grammar for the lookup field | Below is the the instruction that describes the task:
### Input:
Lookup field which transforms the result into an integer.
:param values: values allowed
:param name: name for the field
:return: grammar for the lookup field
### Response:
def lookup_int(values, name=None):
"""
Lookup field which transforms the result into an integer.
:param values: values allowed
:param name: name for the field
:return: grammar for the lookup field
"""
field = basic.lookup(values, name)
field.addParseAction(lambda l: int(l[0]))
return field |
def get_method_contents(self, method):
"""
Returns the swagger contents of the given method. This checks to see if a conditional block
has been used inside of the method, and, if so, returns the method contents that are
inside of the conditional.
:param dict method: method dictionary
:return: list of swagger component dictionaries for the method
"""
if self._CONDITIONAL_IF in method:
return method[self._CONDITIONAL_IF][1:]
return [method] | Returns the swagger contents of the given method. This checks to see if a conditional block
has been used inside of the method, and, if so, returns the method contents that are
inside of the conditional.
:param dict method: method dictionary
:return: list of swagger component dictionaries for the method | Below is the the instruction that describes the task:
### Input:
Returns the swagger contents of the given method. This checks to see if a conditional block
has been used inside of the method, and, if so, returns the method contents that are
inside of the conditional.
:param dict method: method dictionary
:return: list of swagger component dictionaries for the method
### Response:
def get_method_contents(self, method):
"""
Returns the swagger contents of the given method. This checks to see if a conditional block
has been used inside of the method, and, if so, returns the method contents that are
inside of the conditional.
:param dict method: method dictionary
:return: list of swagger component dictionaries for the method
"""
if self._CONDITIONAL_IF in method:
return method[self._CONDITIONAL_IF][1:]
return [method] |
def touch():
""" Create a .vacationrc file if none exists. """
if not os.path.isfile(get_rc_path()):
open(get_rc_path(), 'a').close()
print('Created file: {}'.format(get_rc_path())) | Create a .vacationrc file if none exists. | Below is the the instruction that describes the task:
### Input:
Create a .vacationrc file if none exists.
### Response:
def touch():
""" Create a .vacationrc file if none exists. """
if not os.path.isfile(get_rc_path()):
open(get_rc_path(), 'a').close()
print('Created file: {}'.format(get_rc_path())) |
def check_triggers(self, price, dt):
"""
Update internal state based on price triggers and the
trade event's price.
"""
stop_reached, limit_reached, sl_stop_reached = \
self.check_order_triggers(price)
if (stop_reached, limit_reached) \
!= (self.stop_reached, self.limit_reached):
self.dt = dt
self.stop_reached = stop_reached
self.limit_reached = limit_reached
if sl_stop_reached:
# Change the STOP LIMIT order into a LIMIT order
self.stop = None | Update internal state based on price triggers and the
trade event's price. | Below is the the instruction that describes the task:
### Input:
Update internal state based on price triggers and the
trade event's price.
### Response:
def check_triggers(self, price, dt):
"""
Update internal state based on price triggers and the
trade event's price.
"""
stop_reached, limit_reached, sl_stop_reached = \
self.check_order_triggers(price)
if (stop_reached, limit_reached) \
!= (self.stop_reached, self.limit_reached):
self.dt = dt
self.stop_reached = stop_reached
self.limit_reached = limit_reached
if sl_stop_reached:
# Change the STOP LIMIT order into a LIMIT order
self.stop = None |
def lock_file(path):
"""File based lock on ``path``.
Creates a file based lock. When acquired, other processes or threads are
prevented from acquiring the same lock until it is released.
"""
with _paths_lock:
lock = _paths_to_locks.get(path)
if lock is None:
_paths_to_locks[path] = lock = _FileLock(path)
return lock | File based lock on ``path``.
Creates a file based lock. When acquired, other processes or threads are
prevented from acquiring the same lock until it is released. | Below is the the instruction that describes the task:
### Input:
File based lock on ``path``.
Creates a file based lock. When acquired, other processes or threads are
prevented from acquiring the same lock until it is released.
### Response:
def lock_file(path):
"""File based lock on ``path``.
Creates a file based lock. When acquired, other processes or threads are
prevented from acquiring the same lock until it is released.
"""
with _paths_lock:
lock = _paths_to_locks.get(path)
if lock is None:
_paths_to_locks[path] = lock = _FileLock(path)
return lock |
def contains(ell, p, shell_only=False):
"""
Check to see whether point is inside
conic.
:param exact: Only solutions exactly on conic
are considered (default: False).
"""
v = augment(p)
_ = ell.solve(v)
return N.allclose(_,0) if shell_only else _ <= 0 | Check to see whether point is inside
conic.
:param exact: Only solutions exactly on conic
are considered (default: False). | Below is the the instruction that describes the task:
### Input:
Check to see whether point is inside
conic.
:param exact: Only solutions exactly on conic
are considered (default: False).
### Response:
def contains(ell, p, shell_only=False):
"""
Check to see whether point is inside
conic.
:param exact: Only solutions exactly on conic
are considered (default: False).
"""
v = augment(p)
_ = ell.solve(v)
return N.allclose(_,0) if shell_only else _ <= 0 |
def quilc_compile_payload(quil_program, isa, specs):
"""REST payload for :py:func:`ForestConnection._quilc_compile`"""
if not quil_program:
raise ValueError("You have attempted to compile an empty program."
" Please provide an actual program.")
if not isinstance(quil_program, Program):
raise TypeError("quil_program must be a Program object.")
if not isinstance(isa, ISA):
raise TypeError("isa must be an ISA object.")
if not isinstance(specs, Specs):
raise TypeError("specs must be a Specs object.")
payload = {"uncompiled-quil": quil_program.out(),
"target-device": {
"isa": isa.to_dict(),
"specs": specs.to_dict()}}
return payload | REST payload for :py:func:`ForestConnection._quilc_compile` | Below is the the instruction that describes the task:
### Input:
REST payload for :py:func:`ForestConnection._quilc_compile`
### Response:
def quilc_compile_payload(quil_program, isa, specs):
"""REST payload for :py:func:`ForestConnection._quilc_compile`"""
if not quil_program:
raise ValueError("You have attempted to compile an empty program."
" Please provide an actual program.")
if not isinstance(quil_program, Program):
raise TypeError("quil_program must be a Program object.")
if not isinstance(isa, ISA):
raise TypeError("isa must be an ISA object.")
if not isinstance(specs, Specs):
raise TypeError("specs must be a Specs object.")
payload = {"uncompiled-quil": quil_program.out(),
"target-device": {
"isa": isa.to_dict(),
"specs": specs.to_dict()}}
return payload |
def get_oauth_token():
"""Retrieve a simple OAuth Token for use with the local http client."""
url = "{0}/token".format(DEFAULT_ORIGIN["Origin"])
r = s.get(url=url)
return r.json()["t"] | Retrieve a simple OAuth Token for use with the local http client. | Below is the the instruction that describes the task:
### Input:
Retrieve a simple OAuth Token for use with the local http client.
### Response:
def get_oauth_token():
"""Retrieve a simple OAuth Token for use with the local http client."""
url = "{0}/token".format(DEFAULT_ORIGIN["Origin"])
r = s.get(url=url)
return r.json()["t"] |
def _remove_node(self, node):
"""
Remove a CFGNode from self.graph as well as from the function manager (if it is the beginning of a function)
:param CFGNode node: The CFGNode to remove from the graph.
:return: None
"""
self.graph.remove_node(node)
if node.addr in self._nodes:
del self._nodes[node.addr]
# We wanna remove the function as well
if node.addr in self.kb.functions:
del self.kb.functions[node.addr]
if node.addr in self.kb.functions.callgraph:
self.kb.functions.callgraph.remove_node(node.addr) | Remove a CFGNode from self.graph as well as from the function manager (if it is the beginning of a function)
:param CFGNode node: The CFGNode to remove from the graph.
:return: None | Below is the the instruction that describes the task:
### Input:
Remove a CFGNode from self.graph as well as from the function manager (if it is the beginning of a function)
:param CFGNode node: The CFGNode to remove from the graph.
:return: None
### Response:
def _remove_node(self, node):
"""
Remove a CFGNode from self.graph as well as from the function manager (if it is the beginning of a function)
:param CFGNode node: The CFGNode to remove from the graph.
:return: None
"""
self.graph.remove_node(node)
if node.addr in self._nodes:
del self._nodes[node.addr]
# We wanna remove the function as well
if node.addr in self.kb.functions:
del self.kb.functions[node.addr]
if node.addr in self.kb.functions.callgraph:
self.kb.functions.callgraph.remove_node(node.addr) |
def remove_forwarding_rules(self, forwarding_rules):
"""
Removes existing forwarding rules from a LoadBalancer.
Args:
forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects
"""
rules_dict = [rule.__dict__ for rule in forwarding_rules]
return self.get_data(
"load_balancers/%s/forwarding_rules/" % self.id,
type=DELETE,
params={"forwarding_rules": rules_dict}
) | Removes existing forwarding rules from a LoadBalancer.
Args:
forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects | Below is the the instruction that describes the task:
### Input:
Removes existing forwarding rules from a LoadBalancer.
Args:
forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects
### Response:
def remove_forwarding_rules(self, forwarding_rules):
"""
Removes existing forwarding rules from a LoadBalancer.
Args:
forwarding_rules (obj:`list`): A list of `ForwrdingRules` objects
"""
rules_dict = [rule.__dict__ for rule in forwarding_rules]
return self.get_data(
"load_balancers/%s/forwarding_rules/" % self.id,
type=DELETE,
params={"forwarding_rules": rules_dict}
) |
def _get_container_environment(self, **kwargs):
"""Get all the Environment variables that will be passed to the container
Certain input fields such as BatchStrategy have different values for the API vs the Environment
variables, such as SingleRecord vs SINGLE_RECORD. This method also handles this conversion.
Args:
**kwargs: existing transform arguments
Returns:
dict: All the environment variables that should be set in the container
"""
environment = {}
environment.update(self.primary_container['Environment'])
environment['SAGEMAKER_BATCH'] = 'True'
if 'MaxPayloadInMB' in kwargs:
environment['SAGEMAKER_MAX_PAYLOAD_IN_MB'] = str(kwargs['MaxPayloadInMB'])
if 'BatchStrategy' in kwargs:
if kwargs['BatchStrategy'] == 'SingleRecord':
strategy_env_value = 'SINGLE_RECORD'
elif kwargs['BatchStrategy'] == 'MultiRecord':
strategy_env_value = 'MULTI_RECORD'
else:
raise ValueError('Invalid BatchStrategy, must be \'SingleRecord\' or \'MultiRecord\'')
environment['SAGEMAKER_BATCH_STRATEGY'] = strategy_env_value
# we only do 1 max concurrent transform in Local Mode
if 'MaxConcurrentTransforms' in kwargs and int(kwargs['MaxConcurrentTransforms']) > 1:
logger.warning('Local Mode only supports 1 ConcurrentTransform. Setting MaxConcurrentTransforms to 1')
environment['SAGEMAKER_MAX_CONCURRENT_TRANSFORMS'] = '1'
# if there were environment variables passed to the Transformer we will pass them to the
# container as well.
if 'Environment' in kwargs:
environment.update(kwargs['Environment'])
return environment | Get all the Environment variables that will be passed to the container
Certain input fields such as BatchStrategy have different values for the API vs the Environment
variables, such as SingleRecord vs SINGLE_RECORD. This method also handles this conversion.
Args:
**kwargs: existing transform arguments
Returns:
dict: All the environment variables that should be set in the container | Below is the the instruction that describes the task:
### Input:
Get all the Environment variables that will be passed to the container
Certain input fields such as BatchStrategy have different values for the API vs the Environment
variables, such as SingleRecord vs SINGLE_RECORD. This method also handles this conversion.
Args:
**kwargs: existing transform arguments
Returns:
dict: All the environment variables that should be set in the container
### Response:
def _get_container_environment(self, **kwargs):
"""Get all the Environment variables that will be passed to the container
Certain input fields such as BatchStrategy have different values for the API vs the Environment
variables, such as SingleRecord vs SINGLE_RECORD. This method also handles this conversion.
Args:
**kwargs: existing transform arguments
Returns:
dict: All the environment variables that should be set in the container
"""
environment = {}
environment.update(self.primary_container['Environment'])
environment['SAGEMAKER_BATCH'] = 'True'
if 'MaxPayloadInMB' in kwargs:
environment['SAGEMAKER_MAX_PAYLOAD_IN_MB'] = str(kwargs['MaxPayloadInMB'])
if 'BatchStrategy' in kwargs:
if kwargs['BatchStrategy'] == 'SingleRecord':
strategy_env_value = 'SINGLE_RECORD'
elif kwargs['BatchStrategy'] == 'MultiRecord':
strategy_env_value = 'MULTI_RECORD'
else:
raise ValueError('Invalid BatchStrategy, must be \'SingleRecord\' or \'MultiRecord\'')
environment['SAGEMAKER_BATCH_STRATEGY'] = strategy_env_value
# we only do 1 max concurrent transform in Local Mode
if 'MaxConcurrentTransforms' in kwargs and int(kwargs['MaxConcurrentTransforms']) > 1:
logger.warning('Local Mode only supports 1 ConcurrentTransform. Setting MaxConcurrentTransforms to 1')
environment['SAGEMAKER_MAX_CONCURRENT_TRANSFORMS'] = '1'
# if there were environment variables passed to the Transformer we will pass them to the
# container as well.
if 'Environment' in kwargs:
environment.update(kwargs['Environment'])
return environment |
def from_pyfile(self, filename):
"""
在一个 Python 文件中读取配置。
:param filename: 配置文件的文件名
:return: 如果读取成功,返回 ``True``,如果失败,会抛出错误异常
"""
d = types.ModuleType('config')
d.__file__ = filename
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
self.from_object(d)
return True | 在一个 Python 文件中读取配置。
:param filename: 配置文件的文件名
:return: 如果读取成功,返回 ``True``,如果失败,会抛出错误异常 | Below is the the instruction that describes the task:
### Input:
在一个 Python 文件中读取配置。
:param filename: 配置文件的文件名
:return: 如果读取成功,返回 ``True``,如果失败,会抛出错误异常
### Response:
def from_pyfile(self, filename):
"""
在一个 Python 文件中读取配置。
:param filename: 配置文件的文件名
:return: 如果读取成功,返回 ``True``,如果失败,会抛出错误异常
"""
d = types.ModuleType('config')
d.__file__ = filename
with open(filename) as config_file:
exec(compile(config_file.read(), filename, 'exec'), d.__dict__)
self.from_object(d)
return True |
def add_predicate(self, predicate_obj):
"""
Adds a predicate to the semantic layer
@type predicate_obj: L{Cpredicate}
@param predicate_obj: the predicate object
"""
if self.srl_layer is None:
self.srl_layer = Csrl()
self.root.append(self.srl_layer.get_node())
self.srl_layer.add_predicate(predicate_obj) | Adds a predicate to the semantic layer
@type predicate_obj: L{Cpredicate}
@param predicate_obj: the predicate object | Below is the the instruction that describes the task:
### Input:
Adds a predicate to the semantic layer
@type predicate_obj: L{Cpredicate}
@param predicate_obj: the predicate object
### Response:
def add_predicate(self, predicate_obj):
"""
Adds a predicate to the semantic layer
@type predicate_obj: L{Cpredicate}
@param predicate_obj: the predicate object
"""
if self.srl_layer is None:
self.srl_layer = Csrl()
self.root.append(self.srl_layer.get_node())
self.srl_layer.add_predicate(predicate_obj) |
def noEmptyNests(node):
'''recursively make sure that no dictionaries inside node contain empty children lists '''
if type(node)==list:
for i in node:
noEmptyNests(i)
if type(node)==dict:
for i in node.values():
noEmptyNests(i)
if node["children"] == []:
node.pop("children")
return node | recursively make sure that no dictionaries inside node contain empty children lists | Below is the the instruction that describes the task:
### Input:
recursively make sure that no dictionaries inside node contain empty children lists
### Response:
def noEmptyNests(node):
'''recursively make sure that no dictionaries inside node contain empty children lists '''
if type(node)==list:
for i in node:
noEmptyNests(i)
if type(node)==dict:
for i in node.values():
noEmptyNests(i)
if node["children"] == []:
node.pop("children")
return node |
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array) | Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z' | Below is the the instruction that describes the task:
### Input:
Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
### Response:
def MAH(z, zi, Mi, **cosmo):
""" Calculate mass accretion history by looping function acc_rate
over redshift steps 'z' for halo of mass 'Mi' at redshift 'zi'
Parameters
----------
z : float / numpy array
Redshift to output MAH over. Note zi<z always
zi : float
Redshift
Mi : float
Halo mass at redshift 'zi'
cosmo : dict
Dictionary of cosmological parameters, similar in format to:
{'N_nu': 0,'Y_He': 0.24, 'h': 0.702, 'n': 0.963,'omega_M_0': 0.275,
'omega_b_0': 0.0458,'omega_lambda_0': 0.725,'omega_n_0': 0.0,
'sigma_8': 0.816, 't_0': 13.76, 'tau': 0.088,'z_reion': 10.6}
Returns
-------
(dMdt, Mz) : float / numpy arrays of equivalent size to 'z'
Accretion rate [Msol/yr], halo mass [Msol] at redshift 'z'
"""
# Ensure that z is a 1D NumPy array
z = np.array(z, ndmin=1, dtype=float)
# Create a full array
dMdt_array = np.empty_like(z)
Mz_array = np.empty_like(z)
for i_ind, zval in enumerate(z):
# Solve the accretion rate and halo mass at each redshift step
dMdt, Mz = acc_rate(zval, zi, Mi, **cosmo)
dMdt_array[i_ind] = dMdt
Mz_array[i_ind] = Mz
return(dMdt_array, Mz_array) |
def file_writelines_flush_sync(path, lines):
"""
Fill file at @path with @lines then flush all buffers
(Python and system buffers)
"""
fp = open(path, 'w')
try:
fp.writelines(lines)
flush_sync_file_object(fp)
finally:
fp.close() | Fill file at @path with @lines then flush all buffers
(Python and system buffers) | Below is the the instruction that describes the task:
### Input:
Fill file at @path with @lines then flush all buffers
(Python and system buffers)
### Response:
def file_writelines_flush_sync(path, lines):
"""
Fill file at @path with @lines then flush all buffers
(Python and system buffers)
"""
fp = open(path, 'w')
try:
fp.writelines(lines)
flush_sync_file_object(fp)
finally:
fp.close() |
def query_device_capacity(device_fd):
"""Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
"""
if os_name() != 'posix':
raise NotImplementedError("'bitmath.query_device_capacity' is not supported on this platform: %s" % os_name())
s = os.stat(device_fd.name).st_mode
if not stat.S_ISBLK(s):
raise ValueError("The file descriptor provided is not of a device type")
# The keys of the ``ioctl_map`` dictionary correlate to possible
# values from the ``platform.system`` function.
ioctl_map = {
# ioctls for the "Linux" platform
"Linux": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("BLKGETSIZE64", "L", 0x80081272)
# Per <linux/fs.h>, the BLKGETSIZE64 request returns a
# 'u64' sized value. This is an unsigned 64 bit
# integer C type. This means to correctly "buffer" the
# result we need 64 bits, or 8 bytes, of memory.
#
# The struct module documentation include a reference
# chart relating formatting characters to native C
# Types. In this case, using the "native size", the
# table tells us:
#
# * Character 'L' - Unsigned Long C Type (u64) - Loads into a Python int type
#
# Confirm this character is right by running (on Linux):
#
# >>> import struct
# >>> print 8 == struct.calcsize('L')
#
# The result should be true as long as your kernel
# headers define BLKGETSIZE64 as a u64 type (please
# file a bug report at
# https://github.com/tbielawa/bitmath/issues/new if
# this does *not* work for you)
],
# func is how the final result is decided. Because the
# Linux BLKGETSIZE64 call returns the block device
# capacity in bytes as an integer value, no extra
# calculations are required. Simply return the value of
# BLKGETSIZE64.
"func": lambda x: x["BLKGETSIZE64"]
},
# ioctls for the "Darwin" (Mac OS X) platform
"Darwin": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("DKIOCGETBLOCKCOUNT", "L", 0x40086419),
# Per <sys/disk.h>: get media's block count - uint64_t
#
# As in the BLKGETSIZE64 example, an unsigned 64 bit
# integer will use the 'L' formatting character
("DKIOCGETBLOCKSIZE", "I", 0x40046418)
# Per <sys/disk.h>: get media's block size - uint32_t
#
# This request returns an unsigned 32 bit integer, or
# in other words: just a normal integer (or 'int' c
# type). That should require 4 bytes of space for
# buffering. According to the struct modules
# 'Formatting Characters' chart:
#
# * Character 'I' - Unsigned Int C Type (uint32_t) - Loads into a Python int type
],
# OS X doesn't have a direct equivalent to the Linux
# BLKGETSIZE64 request. Instead, we must request how many
# blocks (or "sectors") are on the disk, and the size (in
# bytes) of each block. Finally, multiply the two together
# to obtain capacity:
#
# n Block * y Byte
# capacity (bytes) = -------
# 1 Block
"func": lambda x: x["DKIOCGETBLOCKCOUNT"] * x["DKIOCGETBLOCKSIZE"]
# This expression simply accepts a dictionary ``x`` as a
# parameter, and then returns the result of multiplying
# the two named dictionary items together. In this case,
# that means multiplying ``DKIOCGETBLOCKCOUNT``, the total
# number of blocks, by ``DKIOCGETBLOCKSIZE``, the size of
# each block in bytes.
}
}
platform_params = ioctl_map[platform.system()]
results = {}
for req_name, fmt, request_code in platform_params['request_params']:
# Read the systems native size (in bytes) of this format type.
buffer_size = struct.calcsize(fmt)
# Construct a buffer to store the ioctl result in
buffer = ' ' * buffer_size
# This code has been ran on only a few test systems. If it's
# appropriate, maybe in the future we'll add try/except
# conditions for some possible errors. Really only for cases
# where it would add value to override the default exception
# message string.
buffer = fcntl.ioctl(device_fd.fileno(), request_code, buffer)
# Unpack the raw result from the ioctl call into a familiar
# python data type according to the ``fmt`` rules.
result = struct.unpack(fmt, buffer)[0]
# Add the new result to our collection
results[req_name] = result
return Byte(platform_params['func'](results)) | Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes. | Below is the the instruction that describes the task:
### Input:
Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
### Response:
def query_device_capacity(device_fd):
"""Create bitmath instances of the capacity of a system block device
Make one or more ioctl request to query the capacity of a block
device. Perform any processing required to compute the final capacity
value. Return the device capacity in bytes as a :class:`bitmath.Byte`
instance.
Thanks to the following resources for help figuring this out Linux/Mac
ioctl's for querying block device sizes:
* http://stackoverflow.com/a/12925285/263969
* http://stackoverflow.com/a/9764508/263969
:param file device_fd: A ``file`` object of the device to query the
capacity of (as in ``get_device_capacity(open("/dev/sda"))``).
:return: a bitmath :class:`bitmath.Byte` instance equivalent to the
capacity of the target device in bytes.
"""
if os_name() != 'posix':
raise NotImplementedError("'bitmath.query_device_capacity' is not supported on this platform: %s" % os_name())
s = os.stat(device_fd.name).st_mode
if not stat.S_ISBLK(s):
raise ValueError("The file descriptor provided is not of a device type")
# The keys of the ``ioctl_map`` dictionary correlate to possible
# values from the ``platform.system`` function.
ioctl_map = {
# ioctls for the "Linux" platform
"Linux": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("BLKGETSIZE64", "L", 0x80081272)
# Per <linux/fs.h>, the BLKGETSIZE64 request returns a
# 'u64' sized value. This is an unsigned 64 bit
# integer C type. This means to correctly "buffer" the
# result we need 64 bits, or 8 bytes, of memory.
#
# The struct module documentation include a reference
# chart relating formatting characters to native C
# Types. In this case, using the "native size", the
# table tells us:
#
# * Character 'L' - Unsigned Long C Type (u64) - Loads into a Python int type
#
# Confirm this character is right by running (on Linux):
#
# >>> import struct
# >>> print 8 == struct.calcsize('L')
#
# The result should be true as long as your kernel
# headers define BLKGETSIZE64 as a u64 type (please
# file a bug report at
# https://github.com/tbielawa/bitmath/issues/new if
# this does *not* work for you)
],
# func is how the final result is decided. Because the
# Linux BLKGETSIZE64 call returns the block device
# capacity in bytes as an integer value, no extra
# calculations are required. Simply return the value of
# BLKGETSIZE64.
"func": lambda x: x["BLKGETSIZE64"]
},
# ioctls for the "Darwin" (Mac OS X) platform
"Darwin": {
"request_params": [
# A list of parameters to calculate the block size.
#
# ( PARAM_NAME , FORMAT_CHAR , REQUEST_CODE )
("DKIOCGETBLOCKCOUNT", "L", 0x40086419),
# Per <sys/disk.h>: get media's block count - uint64_t
#
# As in the BLKGETSIZE64 example, an unsigned 64 bit
# integer will use the 'L' formatting character
("DKIOCGETBLOCKSIZE", "I", 0x40046418)
# Per <sys/disk.h>: get media's block size - uint32_t
#
# This request returns an unsigned 32 bit integer, or
# in other words: just a normal integer (or 'int' c
# type). That should require 4 bytes of space for
# buffering. According to the struct modules
# 'Formatting Characters' chart:
#
# * Character 'I' - Unsigned Int C Type (uint32_t) - Loads into a Python int type
],
# OS X doesn't have a direct equivalent to the Linux
# BLKGETSIZE64 request. Instead, we must request how many
# blocks (or "sectors") are on the disk, and the size (in
# bytes) of each block. Finally, multiply the two together
# to obtain capacity:
#
# n Block * y Byte
# capacity (bytes) = -------
# 1 Block
"func": lambda x: x["DKIOCGETBLOCKCOUNT"] * x["DKIOCGETBLOCKSIZE"]
# This expression simply accepts a dictionary ``x`` as a
# parameter, and then returns the result of multiplying
# the two named dictionary items together. In this case,
# that means multiplying ``DKIOCGETBLOCKCOUNT``, the total
# number of blocks, by ``DKIOCGETBLOCKSIZE``, the size of
# each block in bytes.
}
}
platform_params = ioctl_map[platform.system()]
results = {}
for req_name, fmt, request_code in platform_params['request_params']:
# Read the systems native size (in bytes) of this format type.
buffer_size = struct.calcsize(fmt)
# Construct a buffer to store the ioctl result in
buffer = ' ' * buffer_size
# This code has been ran on only a few test systems. If it's
# appropriate, maybe in the future we'll add try/except
# conditions for some possible errors. Really only for cases
# where it would add value to override the default exception
# message string.
buffer = fcntl.ioctl(device_fd.fileno(), request_code, buffer)
# Unpack the raw result from the ioctl call into a familiar
# python data type according to the ``fmt`` rules.
result = struct.unpack(fmt, buffer)[0]
# Add the new result to our collection
results[req_name] = result
return Byte(platform_params['func'](results)) |
def get_boundaries(self):
"""
Returns just the upper and lower boundaries
[upper, lower] representing the most general positive
boundaries and the most specific lower boundaries
"""
if not self.__values_computed:
self.__compute_values()
return set(self.upper), set(self.lower) | Returns just the upper and lower boundaries
[upper, lower] representing the most general positive
boundaries and the most specific lower boundaries | Below is the the instruction that describes the task:
### Input:
Returns just the upper and lower boundaries
[upper, lower] representing the most general positive
boundaries and the most specific lower boundaries
### Response:
def get_boundaries(self):
"""
Returns just the upper and lower boundaries
[upper, lower] representing the most general positive
boundaries and the most specific lower boundaries
"""
if not self.__values_computed:
self.__compute_values()
return set(self.upper), set(self.lower) |
def _create_joint(fwdbwd, func, wrt, input_derivative):
"""Create a user-friendly gradient function.
By default, gradient functions expect the stack to be passed to them
explicitly. This function modifies the function so that the stack doesn't
need to be passed and gets initialized in the function body instead.
For consistency, gradient functions always return a tuple, even if the
gradient of only one input was required. We unpack the tuple if it is of
length one.
Args:
fwdbwd: An AST. The function definition of the joint primal and adjoint.
func: A function handle. The original function that was differentiated.
wrt: A tuple of integers. The arguments with respect to which we differentiated.
Returns:
The function definition of the new function.
"""
# Correct return to be a non-tuple if there's only one element
retval = fwdbwd.body[-1]
if len(retval.value.elts) == 1:
retval.value = retval.value.elts[0]
# Make a stack init statement
init_stack = quoting.quote('%s = tangent.Stack()' % fwdbwd.args.args[0].id)
init_stack = comments.add_comment(init_stack, 'Initialize the tape')
# Prepend the stack init to the top of the function
fwdbwd.body = [init_stack] + fwdbwd.body
# Replace the function arguments with the original ones
grad_name = fwdbwd.args.args[1].id
fwdbwd.args = quoting.parse_function(func).body[0].args
# Give the function a nice name
fwdbwd.name = naming.joint_name(func, wrt)
# Allow the initial gradient to be passed as a keyword argument
fwdbwd = ast_.append_args(fwdbwd, [grad_name])
if input_derivative == INPUT_DERIVATIVE.DefaultOne:
fwdbwd.args.defaults.append(quoting.quote('1.0'))
return fwdbwd | Create a user-friendly gradient function.
By default, gradient functions expect the stack to be passed to them
explicitly. This function modifies the function so that the stack doesn't
need to be passed and gets initialized in the function body instead.
For consistency, gradient functions always return a tuple, even if the
gradient of only one input was required. We unpack the tuple if it is of
length one.
Args:
fwdbwd: An AST. The function definition of the joint primal and adjoint.
func: A function handle. The original function that was differentiated.
wrt: A tuple of integers. The arguments with respect to which we differentiated.
Returns:
The function definition of the new function. | Below is the the instruction that describes the task:
### Input:
Create a user-friendly gradient function.
By default, gradient functions expect the stack to be passed to them
explicitly. This function modifies the function so that the stack doesn't
need to be passed and gets initialized in the function body instead.
For consistency, gradient functions always return a tuple, even if the
gradient of only one input was required. We unpack the tuple if it is of
length one.
Args:
fwdbwd: An AST. The function definition of the joint primal and adjoint.
func: A function handle. The original function that was differentiated.
wrt: A tuple of integers. The arguments with respect to which we differentiated.
Returns:
The function definition of the new function.
### Response:
def _create_joint(fwdbwd, func, wrt, input_derivative):
"""Create a user-friendly gradient function.
By default, gradient functions expect the stack to be passed to them
explicitly. This function modifies the function so that the stack doesn't
need to be passed and gets initialized in the function body instead.
For consistency, gradient functions always return a tuple, even if the
gradient of only one input was required. We unpack the tuple if it is of
length one.
Args:
fwdbwd: An AST. The function definition of the joint primal and adjoint.
func: A function handle. The original function that was differentiated.
wrt: A tuple of integers. The arguments with respect to which we differentiated.
Returns:
The function definition of the new function.
"""
# Correct return to be a non-tuple if there's only one element
retval = fwdbwd.body[-1]
if len(retval.value.elts) == 1:
retval.value = retval.value.elts[0]
# Make a stack init statement
init_stack = quoting.quote('%s = tangent.Stack()' % fwdbwd.args.args[0].id)
init_stack = comments.add_comment(init_stack, 'Initialize the tape')
# Prepend the stack init to the top of the function
fwdbwd.body = [init_stack] + fwdbwd.body
# Replace the function arguments with the original ones
grad_name = fwdbwd.args.args[1].id
fwdbwd.args = quoting.parse_function(func).body[0].args
# Give the function a nice name
fwdbwd.name = naming.joint_name(func, wrt)
# Allow the initial gradient to be passed as a keyword argument
fwdbwd = ast_.append_args(fwdbwd, [grad_name])
if input_derivative == INPUT_DERIVATIVE.DefaultOne:
fwdbwd.args.defaults.append(quoting.quote('1.0'))
return fwdbwd |
def createBlocksFromHTML(cls, html, encoding='utf-8'):
'''
createBlocksFromHTML - Returns the root level node (unless multiple nodes), and
a list of "blocks" added (text and nodes).
@return list< str/AdvancedTag > - List of blocks created. May be strings (text nodes) or AdvancedTag (tags)
NOTE:
Results may be checked by:
issubclass(block.__class__, AdvancedTag)
If True, block is a tag, otherwise, it is a text node
'''
parser = cls(encoding=encoding)
parser.parseStr(html)
rootNode = parser.getRoot()
rootNode.remove()
return rootNode.blocks | createBlocksFromHTML - Returns the root level node (unless multiple nodes), and
a list of "blocks" added (text and nodes).
@return list< str/AdvancedTag > - List of blocks created. May be strings (text nodes) or AdvancedTag (tags)
NOTE:
Results may be checked by:
issubclass(block.__class__, AdvancedTag)
If True, block is a tag, otherwise, it is a text node | Below is the the instruction that describes the task:
### Input:
createBlocksFromHTML - Returns the root level node (unless multiple nodes), and
a list of "blocks" added (text and nodes).
@return list< str/AdvancedTag > - List of blocks created. May be strings (text nodes) or AdvancedTag (tags)
NOTE:
Results may be checked by:
issubclass(block.__class__, AdvancedTag)
If True, block is a tag, otherwise, it is a text node
### Response:
def createBlocksFromHTML(cls, html, encoding='utf-8'):
'''
createBlocksFromHTML - Returns the root level node (unless multiple nodes), and
a list of "blocks" added (text and nodes).
@return list< str/AdvancedTag > - List of blocks created. May be strings (text nodes) or AdvancedTag (tags)
NOTE:
Results may be checked by:
issubclass(block.__class__, AdvancedTag)
If True, block is a tag, otherwise, it is a text node
'''
parser = cls(encoding=encoding)
parser.parseStr(html)
rootNode = parser.getRoot()
rootNode.remove()
return rootNode.blocks |
def bilinear(x, W, y, input_size, seq_len, batch_size, num_outputs=1, bias_x=False, bias_y=False):
"""Do xWy
Parameters
----------
x : NDArray
(input_size x seq_len) x batch_size
W : NDArray
(num_outputs x ny) x nx
y : NDArray
(input_size x seq_len) x batch_size
input_size : int
input dimension
seq_len : int
sequence length
batch_size : int
batch size
num_outputs : int
number of outputs
bias_x : bool
whether concat bias vector to input x
bias_y : bool
whether concat bias vector to input y
Returns
-------
output : NDArray
[seq_len_y x seq_len_x if output_size == 1 else seq_len_y x num_outputs x seq_len_x] x batch_size
"""
if bias_x:
x = nd.concat(x, nd.ones((1, seq_len, batch_size)), dim=0)
if bias_y:
y = nd.concat(y, nd.ones((1, seq_len, batch_size)), dim=0)
nx, ny = input_size + bias_x, input_size + bias_y
# W: (num_outputs x ny) x nx
lin = nd.dot(W, x)
if num_outputs > 1:
lin = reshape_fortran(lin, (ny, num_outputs * seq_len, batch_size))
y = y.transpose([2, 1, 0]) # May cause performance issues
lin = lin.transpose([2, 1, 0])
blin = nd.batch_dot(lin, y, transpose_b=True)
blin = blin.transpose([2, 1, 0])
if num_outputs > 1:
blin = reshape_fortran(blin, (seq_len, num_outputs, seq_len, batch_size))
return blin | Do xWy
Parameters
----------
x : NDArray
(input_size x seq_len) x batch_size
W : NDArray
(num_outputs x ny) x nx
y : NDArray
(input_size x seq_len) x batch_size
input_size : int
input dimension
seq_len : int
sequence length
batch_size : int
batch size
num_outputs : int
number of outputs
bias_x : bool
whether concat bias vector to input x
bias_y : bool
whether concat bias vector to input y
Returns
-------
output : NDArray
[seq_len_y x seq_len_x if output_size == 1 else seq_len_y x num_outputs x seq_len_x] x batch_size | Below is the the instruction that describes the task:
### Input:
Do xWy
Parameters
----------
x : NDArray
(input_size x seq_len) x batch_size
W : NDArray
(num_outputs x ny) x nx
y : NDArray
(input_size x seq_len) x batch_size
input_size : int
input dimension
seq_len : int
sequence length
batch_size : int
batch size
num_outputs : int
number of outputs
bias_x : bool
whether concat bias vector to input x
bias_y : bool
whether concat bias vector to input y
Returns
-------
output : NDArray
[seq_len_y x seq_len_x if output_size == 1 else seq_len_y x num_outputs x seq_len_x] x batch_size
### Response:
def bilinear(x, W, y, input_size, seq_len, batch_size, num_outputs=1, bias_x=False, bias_y=False):
"""Do xWy
Parameters
----------
x : NDArray
(input_size x seq_len) x batch_size
W : NDArray
(num_outputs x ny) x nx
y : NDArray
(input_size x seq_len) x batch_size
input_size : int
input dimension
seq_len : int
sequence length
batch_size : int
batch size
num_outputs : int
number of outputs
bias_x : bool
whether concat bias vector to input x
bias_y : bool
whether concat bias vector to input y
Returns
-------
output : NDArray
[seq_len_y x seq_len_x if output_size == 1 else seq_len_y x num_outputs x seq_len_x] x batch_size
"""
if bias_x:
x = nd.concat(x, nd.ones((1, seq_len, batch_size)), dim=0)
if bias_y:
y = nd.concat(y, nd.ones((1, seq_len, batch_size)), dim=0)
nx, ny = input_size + bias_x, input_size + bias_y
# W: (num_outputs x ny) x nx
lin = nd.dot(W, x)
if num_outputs > 1:
lin = reshape_fortran(lin, (ny, num_outputs * seq_len, batch_size))
y = y.transpose([2, 1, 0]) # May cause performance issues
lin = lin.transpose([2, 1, 0])
blin = nd.batch_dot(lin, y, transpose_b=True)
blin = blin.transpose([2, 1, 0])
if num_outputs > 1:
blin = reshape_fortran(blin, (seq_len, num_outputs, seq_len, batch_size))
return blin |
def format_row(self, row, key, color):
"""For a given row from the table, format it (i.e. floating
points and color if applicable).
"""
value = row[key]
if isinstance(value, bool) or value is None:
return '+' if value else ''
if not isinstance(value, Number):
return value
# determine if integer value
is_integer = float(value).is_integer()
template = '{}' if is_integer else '{:' + self.floatfmt + '}'
# if numeric, there could be a 'best' key
key_best = key + '_best'
if (key_best in row) and row[key_best]:
template = color + template + Ansi.ENDC.value
return template.format(value) | For a given row from the table, format it (i.e. floating
points and color if applicable). | Below is the the instruction that describes the task:
### Input:
For a given row from the table, format it (i.e. floating
points and color if applicable).
### Response:
def format_row(self, row, key, color):
"""For a given row from the table, format it (i.e. floating
points and color if applicable).
"""
value = row[key]
if isinstance(value, bool) or value is None:
return '+' if value else ''
if not isinstance(value, Number):
return value
# determine if integer value
is_integer = float(value).is_integer()
template = '{}' if is_integer else '{:' + self.floatfmt + '}'
# if numeric, there could be a 'best' key
key_best = key + '_best'
if (key_best in row) and row[key_best]:
template = color + template + Ansi.ENDC.value
return template.format(value) |
def assert_almost_eq(arr_test, arr_target, thresh=1E-11):
r"""
Args:
arr_test (ndarray or list):
arr_target (ndarray or list):
thresh (scalar or ndarray or list):
"""
if util_arg.NO_ASSERTS:
return
import utool as ut
arr1 = np.array(arr_test)
arr2 = np.array(arr_target)
passed, error = ut.almost_eq(arr1, arr2, thresh, ret_error=True)
if not np.all(passed):
failed_xs = np.where(np.logical_not(passed))
failed_error = error.take(failed_xs)
failed_arr_test = arr1.take(failed_xs)
failed_arr_target = arr2.take(failed_xs)
msg_list = [
'FAILED ASSERT ALMOST EQUAL',
' * failed_xs = %r' % (failed_xs,),
' * failed_error = %r' % (failed_error,),
' * failed_arr_test = %r' % (failed_arr_test,),
' * failed_arr_target = %r' % (failed_arr_target,),
]
msg = '\n'.join(msg_list)
raise AssertionError(msg)
return error | r"""
Args:
arr_test (ndarray or list):
arr_target (ndarray or list):
thresh (scalar or ndarray or list): | Below is the the instruction that describes the task:
### Input:
r"""
Args:
arr_test (ndarray or list):
arr_target (ndarray or list):
thresh (scalar or ndarray or list):
### Response:
def assert_almost_eq(arr_test, arr_target, thresh=1E-11):
r"""
Args:
arr_test (ndarray or list):
arr_target (ndarray or list):
thresh (scalar or ndarray or list):
"""
if util_arg.NO_ASSERTS:
return
import utool as ut
arr1 = np.array(arr_test)
arr2 = np.array(arr_target)
passed, error = ut.almost_eq(arr1, arr2, thresh, ret_error=True)
if not np.all(passed):
failed_xs = np.where(np.logical_not(passed))
failed_error = error.take(failed_xs)
failed_arr_test = arr1.take(failed_xs)
failed_arr_target = arr2.take(failed_xs)
msg_list = [
'FAILED ASSERT ALMOST EQUAL',
' * failed_xs = %r' % (failed_xs,),
' * failed_error = %r' % (failed_error,),
' * failed_arr_test = %r' % (failed_arr_test,),
' * failed_arr_target = %r' % (failed_arr_target,),
]
msg = '\n'.join(msg_list)
raise AssertionError(msg)
return error |
def split_tag(section):
"""
Split the JSDoc tag text (everything following the @) at the first
whitespace. Returns a tuple of (tagname, body).
"""
splitval = re.split('\s+', section, 1)
tag, body = len(splitval) > 1 and splitval or (splitval[0], '')
return tag.strip(), body.strip() | Split the JSDoc tag text (everything following the @) at the first
whitespace. Returns a tuple of (tagname, body). | Below is the the instruction that describes the task:
### Input:
Split the JSDoc tag text (everything following the @) at the first
whitespace. Returns a tuple of (tagname, body).
### Response:
def split_tag(section):
"""
Split the JSDoc tag text (everything following the @) at the first
whitespace. Returns a tuple of (tagname, body).
"""
splitval = re.split('\s+', section, 1)
tag, body = len(splitval) > 1 and splitval or (splitval[0], '')
return tag.strip(), body.strip() |
def write_atom(dest, entries, author, title, address, updated=None, link=None,
language="en"):
"""
Write an atom feed to a file.
Parameters
----------
dest : str
Destination file path, or a file-like object
entries : list of FeedEntry
Feed entries.
author : str
Author of the feed.
title : str
Title for the feed.
address : str
Address (domain name or email) to be used in building unique IDs.
updated : datetime, optional
Time stamp for the feed. If not given, take from the newest entry.
link : str, optional
Link for the feed.
language : str, optional
Language of the feed. Default is 'en'.
"""
if updated is None:
if entries:
updated = max(entry.updated for entry in entries)
else:
updated = datetime.datetime.utcnow()
root = etree.Element(ATOM_NS + 'feed')
# id (obligatory)
el = etree.Element(ATOM_NS + 'id')
el.text = _get_id(address, None, ["feed", author, title])
root.append(el)
# author (obligatory)
el = etree.Element(ATOM_NS + 'author')
el2 = etree.Element(ATOM_NS + 'name')
el2.text = author
el.append(el2)
root.append(el)
# title (obligatory)
el = etree.Element(ATOM_NS + 'title')
el.attrib[XML_NS + 'lang'] = language
el.text = title
root.append(el)
# updated (obligatory)
el = etree.Element(ATOM_NS + 'updated')
el.text = updated.strftime('%Y-%m-%dT%H:%M:%SZ')
root.append(el)
# link
if link is not None:
el = etree.Element(ATOM_NS + 'link')
el.attrib[ATOM_NS + 'href'] = link
root.append(el)
# entries
for entry in entries:
root.append(entry.get_atom(address, language))
tree = etree.ElementTree(root)
def write(f):
if sys.version_info[:2] < (2, 7):
_etree_py26_write(f, tree)
else:
tree.write(f, xml_declaration=True, default_namespace=ATOM_NS[1:-1],
encoding=str('utf-8'))
if hasattr(dest, 'write'):
write(dest)
else:
with util.long_path_open(dest, 'wb') as f:
write(f) | Write an atom feed to a file.
Parameters
----------
dest : str
Destination file path, or a file-like object
entries : list of FeedEntry
Feed entries.
author : str
Author of the feed.
title : str
Title for the feed.
address : str
Address (domain name or email) to be used in building unique IDs.
updated : datetime, optional
Time stamp for the feed. If not given, take from the newest entry.
link : str, optional
Link for the feed.
language : str, optional
Language of the feed. Default is 'en'. | Below is the the instruction that describes the task:
### Input:
Write an atom feed to a file.
Parameters
----------
dest : str
Destination file path, or a file-like object
entries : list of FeedEntry
Feed entries.
author : str
Author of the feed.
title : str
Title for the feed.
address : str
Address (domain name or email) to be used in building unique IDs.
updated : datetime, optional
Time stamp for the feed. If not given, take from the newest entry.
link : str, optional
Link for the feed.
language : str, optional
Language of the feed. Default is 'en'.
### Response:
def write_atom(dest, entries, author, title, address, updated=None, link=None,
language="en"):
"""
Write an atom feed to a file.
Parameters
----------
dest : str
Destination file path, or a file-like object
entries : list of FeedEntry
Feed entries.
author : str
Author of the feed.
title : str
Title for the feed.
address : str
Address (domain name or email) to be used in building unique IDs.
updated : datetime, optional
Time stamp for the feed. If not given, take from the newest entry.
link : str, optional
Link for the feed.
language : str, optional
Language of the feed. Default is 'en'.
"""
if updated is None:
if entries:
updated = max(entry.updated for entry in entries)
else:
updated = datetime.datetime.utcnow()
root = etree.Element(ATOM_NS + 'feed')
# id (obligatory)
el = etree.Element(ATOM_NS + 'id')
el.text = _get_id(address, None, ["feed", author, title])
root.append(el)
# author (obligatory)
el = etree.Element(ATOM_NS + 'author')
el2 = etree.Element(ATOM_NS + 'name')
el2.text = author
el.append(el2)
root.append(el)
# title (obligatory)
el = etree.Element(ATOM_NS + 'title')
el.attrib[XML_NS + 'lang'] = language
el.text = title
root.append(el)
# updated (obligatory)
el = etree.Element(ATOM_NS + 'updated')
el.text = updated.strftime('%Y-%m-%dT%H:%M:%SZ')
root.append(el)
# link
if link is not None:
el = etree.Element(ATOM_NS + 'link')
el.attrib[ATOM_NS + 'href'] = link
root.append(el)
# entries
for entry in entries:
root.append(entry.get_atom(address, language))
tree = etree.ElementTree(root)
def write(f):
if sys.version_info[:2] < (2, 7):
_etree_py26_write(f, tree)
else:
tree.write(f, xml_declaration=True, default_namespace=ATOM_NS[1:-1],
encoding=str('utf-8'))
if hasattr(dest, 'write'):
write(dest)
else:
with util.long_path_open(dest, 'wb') as f:
write(f) |
def sys_maxfd():
"""
Returns the maximum file descriptor limit. This is guaranteed to
return a useful int value.
"""
maxfd = None
try:
maxfd = int(resource.getrlimit(resource.RLIMIT_NOFILE)[0])
if maxfd == resource.RLIM_INFINITY: # pragma: no cover
maxfd = None
except: pass
if maxfd is None:
maxfd = sys_maxfd.fallback_maxfd
return maxfd | Returns the maximum file descriptor limit. This is guaranteed to
return a useful int value. | Below is the the instruction that describes the task:
### Input:
Returns the maximum file descriptor limit. This is guaranteed to
return a useful int value.
### Response:
def sys_maxfd():
"""
Returns the maximum file descriptor limit. This is guaranteed to
return a useful int value.
"""
maxfd = None
try:
maxfd = int(resource.getrlimit(resource.RLIMIT_NOFILE)[0])
if maxfd == resource.RLIM_INFINITY: # pragma: no cover
maxfd = None
except: pass
if maxfd is None:
maxfd = sys_maxfd.fallback_maxfd
return maxfd |
def dataCollector( self ):
"""
Returns a method or function that will be used to collect mime data \
for a list of tablewidgetitems. If set, the method should accept a \
single argument for a list of items and then return a QMimeData \
instance.
:usage |from projexui.qt.QtCore import QMimeData, QWidget
|from projexui.widgets.xtablewidget import XTableWidget
|
|def collectData(table, items):
| data = QMimeData()
| data.setText(','.join(map(lambda x: x.text(0), items)))
| return data
|
|class MyWidget(QWidget):
| def __init__( self, parent ):
| super(MyWidget, self).__init__(parent)
|
| self._table = XTableWidget(self)
| self._table.setDataCollector(collectData)
:return <function> || <method> || None
"""
func = None
if ( self._dataCollectorRef ):
func = self._dataCollectorRef()
if ( not func ):
self._dataCollectorRef = None
return func | Returns a method or function that will be used to collect mime data \
for a list of tablewidgetitems. If set, the method should accept a \
single argument for a list of items and then return a QMimeData \
instance.
:usage |from projexui.qt.QtCore import QMimeData, QWidget
|from projexui.widgets.xtablewidget import XTableWidget
|
|def collectData(table, items):
| data = QMimeData()
| data.setText(','.join(map(lambda x: x.text(0), items)))
| return data
|
|class MyWidget(QWidget):
| def __init__( self, parent ):
| super(MyWidget, self).__init__(parent)
|
| self._table = XTableWidget(self)
| self._table.setDataCollector(collectData)
:return <function> || <method> || None | Below is the the instruction that describes the task:
### Input:
Returns a method or function that will be used to collect mime data \
for a list of tablewidgetitems. If set, the method should accept a \
single argument for a list of items and then return a QMimeData \
instance.
:usage |from projexui.qt.QtCore import QMimeData, QWidget
|from projexui.widgets.xtablewidget import XTableWidget
|
|def collectData(table, items):
| data = QMimeData()
| data.setText(','.join(map(lambda x: x.text(0), items)))
| return data
|
|class MyWidget(QWidget):
| def __init__( self, parent ):
| super(MyWidget, self).__init__(parent)
|
| self._table = XTableWidget(self)
| self._table.setDataCollector(collectData)
:return <function> || <method> || None
### Response:
def dataCollector( self ):
"""
Returns a method or function that will be used to collect mime data \
for a list of tablewidgetitems. If set, the method should accept a \
single argument for a list of items and then return a QMimeData \
instance.
:usage |from projexui.qt.QtCore import QMimeData, QWidget
|from projexui.widgets.xtablewidget import XTableWidget
|
|def collectData(table, items):
| data = QMimeData()
| data.setText(','.join(map(lambda x: x.text(0), items)))
| return data
|
|class MyWidget(QWidget):
| def __init__( self, parent ):
| super(MyWidget, self).__init__(parent)
|
| self._table = XTableWidget(self)
| self._table.setDataCollector(collectData)
:return <function> || <method> || None
"""
func = None
if ( self._dataCollectorRef ):
func = self._dataCollectorRef()
if ( not func ):
self._dataCollectorRef = None
return func |
def get_class_from_file(_file, saltclass_path):
'''
Converts the absolute path to a saltclass file back to the dotted notation.
.. code-block:: python
print(get_class_from_file('/srv/saltclass/classes/services/nginx/init.yml', '/srv/saltclass'))
# services.nginx
:param str _file: Absolute path to file
:param str saltclass_path: Root to saltclass storage
:return: class name in dotted notation
:rtype: str
'''
# remove classes path prefix
_file = _file[len(os.path.join(saltclass_path, 'classes')) + len(os.sep):]
# remove .yml extension
_file = _file[:-4]
# revert to dotted notation
_file = _file.replace(os.sep, '.')
# remove tailing init
if _file.endswith('.init'):
_file = _file[:-5]
return _file | Converts the absolute path to a saltclass file back to the dotted notation.
.. code-block:: python
print(get_class_from_file('/srv/saltclass/classes/services/nginx/init.yml', '/srv/saltclass'))
# services.nginx
:param str _file: Absolute path to file
:param str saltclass_path: Root to saltclass storage
:return: class name in dotted notation
:rtype: str | Below is the the instruction that describes the task:
### Input:
Converts the absolute path to a saltclass file back to the dotted notation.
.. code-block:: python
print(get_class_from_file('/srv/saltclass/classes/services/nginx/init.yml', '/srv/saltclass'))
# services.nginx
:param str _file: Absolute path to file
:param str saltclass_path: Root to saltclass storage
:return: class name in dotted notation
:rtype: str
### Response:
def get_class_from_file(_file, saltclass_path):
'''
Converts the absolute path to a saltclass file back to the dotted notation.
.. code-block:: python
print(get_class_from_file('/srv/saltclass/classes/services/nginx/init.yml', '/srv/saltclass'))
# services.nginx
:param str _file: Absolute path to file
:param str saltclass_path: Root to saltclass storage
:return: class name in dotted notation
:rtype: str
'''
# remove classes path prefix
_file = _file[len(os.path.join(saltclass_path, 'classes')) + len(os.sep):]
# remove .yml extension
_file = _file[:-4]
# revert to dotted notation
_file = _file.replace(os.sep, '.')
# remove tailing init
if _file.endswith('.init'):
_file = _file[:-5]
return _file |
def add_rules(self, rules: _RuleList) -> None:
"""Appends new rules to the router.
:arg rules: a list of Rule instances (or tuples of arguments, which are
passed to Rule constructor).
"""
for rule in rules:
if isinstance(rule, (tuple, list)):
assert len(rule) in (2, 3, 4)
if isinstance(rule[0], basestring_type):
rule = Rule(PathMatches(rule[0]), *rule[1:])
else:
rule = Rule(*rule)
self.rules.append(self.process_rule(rule)) | Appends new rules to the router.
:arg rules: a list of Rule instances (or tuples of arguments, which are
passed to Rule constructor). | Below is the the instruction that describes the task:
### Input:
Appends new rules to the router.
:arg rules: a list of Rule instances (or tuples of arguments, which are
passed to Rule constructor).
### Response:
def add_rules(self, rules: _RuleList) -> None:
"""Appends new rules to the router.
:arg rules: a list of Rule instances (or tuples of arguments, which are
passed to Rule constructor).
"""
for rule in rules:
if isinstance(rule, (tuple, list)):
assert len(rule) in (2, 3, 4)
if isinstance(rule[0], basestring_type):
rule = Rule(PathMatches(rule[0]), *rule[1:])
else:
rule = Rule(*rule)
self.rules.append(self.process_rule(rule)) |
def send(messages=None, conf=None, parse_mode=None, disable_web_page_preview=False, files=None, images=None,
captions=None, locations=None, timeout=30):
"""Send data over Telegram. All arguments are optional.
Always use this function with explicit keyword arguments. So
`send(messages=["Hello!"])` instead of `send(["Hello!"])` as the latter
will *break* when I change the order of the arguments.
The `file` type is the [file object][] returned by the `open()` function.
To send an image/file you open it in binary mode:
``` python
import telegram_send
with open("image.jpg", "rb") as f:
telegram_send.send(images=[f])
```
[file object]: https://docs.python.org/3/glossary.html#term-file-object
# Arguments
conf (str): Path of configuration file to use. Will use the default config if not specified.
`~` expands to user's home directory.
messages (List[str]): The messages to send.
parse_mode (str): Specifies formatting of messages, one of `["text", "markdown", "html"]`.
disable_web_page_preview (bool): Disables web page previews for all links in the messages.
files (List[file]): The files to send.
images (List[file]): The images to send.
captions (List[str]): The captions to send with the images.
locations (List[str]): The locations to send. Locations are strings containing the latitude and longitude
separated by whitespace or a comma.
timeout (int|float): The read timeout for network connections in seconds.
"""
conf = expanduser(conf) if conf else get_config_path()
config = configparser.ConfigParser()
if not config.read(conf) or not config.has_section("telegram"):
raise ConfigError("Config not found")
missing_options = set(["token", "chat_id"]) - set(config.options("telegram"))
if len(missing_options) > 0:
raise ConfigError("Missing options in config: {}".format(", ".join(missing_options)))
token = config.get("telegram", "token")
chat_id = int(config.get("telegram", "chat_id")) if config.get("telegram", "chat_id").isdigit() else config.get("telegram", "chat_id")
request = telegram.utils.request.Request(read_timeout=timeout)
bot = telegram.Bot(token, request=request)
# We let the user specify "text" as a parse mode to be more explicit about
# the lack of formatting applied to the message, but "text" isn't a supported
# parse_mode in python-telegram-bot. Instead, set the parse_mode to None
# in this case.
if parse_mode == "text":
parse_mode = None
if messages:
def send_message(message):
return bot.send_message(chat_id=chat_id, text=message, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview)
for m in messages:
if len(m) > MAX_MESSAGE_LENGTH:
warn(markup("Message longer than MAX_MESSAGE_LENGTH=%d, splitting into smaller messages." % MAX_MESSAGE_LENGTH, "red"))
ms = split_message(m, MAX_MESSAGE_LENGTH)
for m in ms:
send_message(m)
elif len(m) == 0:
continue
else:
send_message(m)
if files:
for f in files:
bot.send_document(chat_id=chat_id, document=f)
if images:
if captions:
# make captions equal length when not all images have captions
captions += [None] * (len(images) - len(captions))
for (i, c) in zip(images, captions):
bot.send_photo(chat_id=chat_id, photo=i, caption=c)
else:
for i in images:
bot.send_photo(chat_id=chat_id, photo=i)
if locations:
it = iter(locations)
for loc in it:
if "," in loc:
lat, lon = loc.split(",")
else:
lat = loc
lon = next(it)
bot.send_location(chat_id=chat_id, latitude=float(lat), longitude=float(lon)) | Send data over Telegram. All arguments are optional.
Always use this function with explicit keyword arguments. So
`send(messages=["Hello!"])` instead of `send(["Hello!"])` as the latter
will *break* when I change the order of the arguments.
The `file` type is the [file object][] returned by the `open()` function.
To send an image/file you open it in binary mode:
``` python
import telegram_send
with open("image.jpg", "rb") as f:
telegram_send.send(images=[f])
```
[file object]: https://docs.python.org/3/glossary.html#term-file-object
# Arguments
conf (str): Path of configuration file to use. Will use the default config if not specified.
`~` expands to user's home directory.
messages (List[str]): The messages to send.
parse_mode (str): Specifies formatting of messages, one of `["text", "markdown", "html"]`.
disable_web_page_preview (bool): Disables web page previews for all links in the messages.
files (List[file]): The files to send.
images (List[file]): The images to send.
captions (List[str]): The captions to send with the images.
locations (List[str]): The locations to send. Locations are strings containing the latitude and longitude
separated by whitespace or a comma.
timeout (int|float): The read timeout for network connections in seconds. | Below is the the instruction that describes the task:
### Input:
Send data over Telegram. All arguments are optional.
Always use this function with explicit keyword arguments. So
`send(messages=["Hello!"])` instead of `send(["Hello!"])` as the latter
will *break* when I change the order of the arguments.
The `file` type is the [file object][] returned by the `open()` function.
To send an image/file you open it in binary mode:
``` python
import telegram_send
with open("image.jpg", "rb") as f:
telegram_send.send(images=[f])
```
[file object]: https://docs.python.org/3/glossary.html#term-file-object
# Arguments
conf (str): Path of configuration file to use. Will use the default config if not specified.
`~` expands to user's home directory.
messages (List[str]): The messages to send.
parse_mode (str): Specifies formatting of messages, one of `["text", "markdown", "html"]`.
disable_web_page_preview (bool): Disables web page previews for all links in the messages.
files (List[file]): The files to send.
images (List[file]): The images to send.
captions (List[str]): The captions to send with the images.
locations (List[str]): The locations to send. Locations are strings containing the latitude and longitude
separated by whitespace or a comma.
timeout (int|float): The read timeout for network connections in seconds.
### Response:
def send(messages=None, conf=None, parse_mode=None, disable_web_page_preview=False, files=None, images=None,
captions=None, locations=None, timeout=30):
"""Send data over Telegram. All arguments are optional.
Always use this function with explicit keyword arguments. So
`send(messages=["Hello!"])` instead of `send(["Hello!"])` as the latter
will *break* when I change the order of the arguments.
The `file` type is the [file object][] returned by the `open()` function.
To send an image/file you open it in binary mode:
``` python
import telegram_send
with open("image.jpg", "rb") as f:
telegram_send.send(images=[f])
```
[file object]: https://docs.python.org/3/glossary.html#term-file-object
# Arguments
conf (str): Path of configuration file to use. Will use the default config if not specified.
`~` expands to user's home directory.
messages (List[str]): The messages to send.
parse_mode (str): Specifies formatting of messages, one of `["text", "markdown", "html"]`.
disable_web_page_preview (bool): Disables web page previews for all links in the messages.
files (List[file]): The files to send.
images (List[file]): The images to send.
captions (List[str]): The captions to send with the images.
locations (List[str]): The locations to send. Locations are strings containing the latitude and longitude
separated by whitespace or a comma.
timeout (int|float): The read timeout for network connections in seconds.
"""
conf = expanduser(conf) if conf else get_config_path()
config = configparser.ConfigParser()
if not config.read(conf) or not config.has_section("telegram"):
raise ConfigError("Config not found")
missing_options = set(["token", "chat_id"]) - set(config.options("telegram"))
if len(missing_options) > 0:
raise ConfigError("Missing options in config: {}".format(", ".join(missing_options)))
token = config.get("telegram", "token")
chat_id = int(config.get("telegram", "chat_id")) if config.get("telegram", "chat_id").isdigit() else config.get("telegram", "chat_id")
request = telegram.utils.request.Request(read_timeout=timeout)
bot = telegram.Bot(token, request=request)
# We let the user specify "text" as a parse mode to be more explicit about
# the lack of formatting applied to the message, but "text" isn't a supported
# parse_mode in python-telegram-bot. Instead, set the parse_mode to None
# in this case.
if parse_mode == "text":
parse_mode = None
if messages:
def send_message(message):
return bot.send_message(chat_id=chat_id, text=message, parse_mode=parse_mode, disable_web_page_preview=disable_web_page_preview)
for m in messages:
if len(m) > MAX_MESSAGE_LENGTH:
warn(markup("Message longer than MAX_MESSAGE_LENGTH=%d, splitting into smaller messages." % MAX_MESSAGE_LENGTH, "red"))
ms = split_message(m, MAX_MESSAGE_LENGTH)
for m in ms:
send_message(m)
elif len(m) == 0:
continue
else:
send_message(m)
if files:
for f in files:
bot.send_document(chat_id=chat_id, document=f)
if images:
if captions:
# make captions equal length when not all images have captions
captions += [None] * (len(images) - len(captions))
for (i, c) in zip(images, captions):
bot.send_photo(chat_id=chat_id, photo=i, caption=c)
else:
for i in images:
bot.send_photo(chat_id=chat_id, photo=i)
if locations:
it = iter(locations)
for loc in it:
if "," in loc:
lat, lon = loc.split(",")
else:
lat = loc
lon = next(it)
bot.send_location(chat_id=chat_id, latitude=float(lat), longitude=float(lon)) |
def create_record(self, zone_name, params):
"""
Create a record
:param zone_name: Name of the zone
:param params: {'fieldType': 'A', 'ttl': 60, 'subDomain': 'www',
'target': '1.2.3.4'
"""
self.log.debug('Create record: zone: %s, id %s', zone_name,
params)
return self._client.post('/domain/zone/{}/record'.format(zone_name),
**params) | Create a record
:param zone_name: Name of the zone
:param params: {'fieldType': 'A', 'ttl': 60, 'subDomain': 'www',
'target': '1.2.3.4' | Below is the the instruction that describes the task:
### Input:
Create a record
:param zone_name: Name of the zone
:param params: {'fieldType': 'A', 'ttl': 60, 'subDomain': 'www',
'target': '1.2.3.4'
### Response:
def create_record(self, zone_name, params):
"""
Create a record
:param zone_name: Name of the zone
:param params: {'fieldType': 'A', 'ttl': 60, 'subDomain': 'www',
'target': '1.2.3.4'
"""
self.log.debug('Create record: zone: %s, id %s', zone_name,
params)
return self._client.post('/domain/zone/{}/record'.format(zone_name),
**params) |
def create_on_demand(self,
instance_type='default',
tags=None,
root_device_type='ebs',
size='default',
vol_type='gp2',
delete_on_termination=False):
"""Create one or more EC2 on-demand instances.
:param size: Size of root device
:type size: int
:param delete_on_termination:
:type delete_on_termination: boolean
:param vol_type:
:type vol_type: str
:param root_device_type: The type of the root device.
:type root_device_type: str
:param instance_type: A section name in amazon.json
:type instance_type: str
:param tags:
:type tags: dict
:return: List of instances created
:rtype: list
"""
name, size = self._get_default_name_size(instance_type, size)
if root_device_type == 'ebs':
self.images[instance_type]['block_device_map'] = \
self._configure_ebs_volume(vol_type, name, size, delete_on_termination)
reservation = self.ec2.run_instances(**self.images[instance_type])
logger.info('Creating requested tags...')
for i in reservation.instances:
self.retry_on_ec2_error(self.ec2.create_tags, [i.id], tags or {})
instances = []
logger.info('Waiting for instances to become ready...')
while len(reservation.instances): # pylint: disable=len-as-condition
for i in reservation.instances:
if i.state == 'running':
instances.append(i)
reservation.instances.pop(reservation.instances.index(i))
logger.info('%s is %s at %s (%s)',
i.id,
i.state,
i.public_dns_name,
i.ip_address)
else:
self.retry_on_ec2_error(i.update)
return instances | Create one or more EC2 on-demand instances.
:param size: Size of root device
:type size: int
:param delete_on_termination:
:type delete_on_termination: boolean
:param vol_type:
:type vol_type: str
:param root_device_type: The type of the root device.
:type root_device_type: str
:param instance_type: A section name in amazon.json
:type instance_type: str
:param tags:
:type tags: dict
:return: List of instances created
:rtype: list | Below is the the instruction that describes the task:
### Input:
Create one or more EC2 on-demand instances.
:param size: Size of root device
:type size: int
:param delete_on_termination:
:type delete_on_termination: boolean
:param vol_type:
:type vol_type: str
:param root_device_type: The type of the root device.
:type root_device_type: str
:param instance_type: A section name in amazon.json
:type instance_type: str
:param tags:
:type tags: dict
:return: List of instances created
:rtype: list
### Response:
def create_on_demand(self,
instance_type='default',
tags=None,
root_device_type='ebs',
size='default',
vol_type='gp2',
delete_on_termination=False):
"""Create one or more EC2 on-demand instances.
:param size: Size of root device
:type size: int
:param delete_on_termination:
:type delete_on_termination: boolean
:param vol_type:
:type vol_type: str
:param root_device_type: The type of the root device.
:type root_device_type: str
:param instance_type: A section name in amazon.json
:type instance_type: str
:param tags:
:type tags: dict
:return: List of instances created
:rtype: list
"""
name, size = self._get_default_name_size(instance_type, size)
if root_device_type == 'ebs':
self.images[instance_type]['block_device_map'] = \
self._configure_ebs_volume(vol_type, name, size, delete_on_termination)
reservation = self.ec2.run_instances(**self.images[instance_type])
logger.info('Creating requested tags...')
for i in reservation.instances:
self.retry_on_ec2_error(self.ec2.create_tags, [i.id], tags or {})
instances = []
logger.info('Waiting for instances to become ready...')
while len(reservation.instances): # pylint: disable=len-as-condition
for i in reservation.instances:
if i.state == 'running':
instances.append(i)
reservation.instances.pop(reservation.instances.index(i))
logger.info('%s is %s at %s (%s)',
i.id,
i.state,
i.public_dns_name,
i.ip_address)
else:
self.retry_on_ec2_error(i.update)
return instances |
def run(
stream_spec, cmd='ffmpeg', capture_stdout=False, capture_stderr=False, input=None,
quiet=False, overwrite_output=False):
"""Invoke ffmpeg for the supplied node graph.
Args:
capture_stdout: if True, capture stdout (to be used with
``pipe:`` ffmpeg outputs).
capture_stderr: if True, capture stderr.
quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``.
input: text to be sent to stdin (to be used with ``pipe:``
ffmpeg inputs)
**kwargs: keyword-arguments passed to ``get_args()`` (e.g.
``overwrite_output=True``).
Returns: (out, err) tuple containing captured stdout and stderr data.
"""
process = run_async(
stream_spec,
cmd,
pipe_stdin=input is not None,
pipe_stdout=capture_stdout,
pipe_stderr=capture_stderr,
quiet=quiet,
overwrite_output=overwrite_output,
)
out, err = process.communicate(input)
retcode = process.poll()
if retcode:
raise Error('ffmpeg', out, err)
return out, err | Invoke ffmpeg for the supplied node graph.
Args:
capture_stdout: if True, capture stdout (to be used with
``pipe:`` ffmpeg outputs).
capture_stderr: if True, capture stderr.
quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``.
input: text to be sent to stdin (to be used with ``pipe:``
ffmpeg inputs)
**kwargs: keyword-arguments passed to ``get_args()`` (e.g.
``overwrite_output=True``).
Returns: (out, err) tuple containing captured stdout and stderr data. | Below is the the instruction that describes the task:
### Input:
Invoke ffmpeg for the supplied node graph.
Args:
capture_stdout: if True, capture stdout (to be used with
``pipe:`` ffmpeg outputs).
capture_stderr: if True, capture stderr.
quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``.
input: text to be sent to stdin (to be used with ``pipe:``
ffmpeg inputs)
**kwargs: keyword-arguments passed to ``get_args()`` (e.g.
``overwrite_output=True``).
Returns: (out, err) tuple containing captured stdout and stderr data.
### Response:
def run(
stream_spec, cmd='ffmpeg', capture_stdout=False, capture_stderr=False, input=None,
quiet=False, overwrite_output=False):
"""Invoke ffmpeg for the supplied node graph.
Args:
capture_stdout: if True, capture stdout (to be used with
``pipe:`` ffmpeg outputs).
capture_stderr: if True, capture stderr.
quiet: shorthand for setting ``capture_stdout`` and ``capture_stderr``.
input: text to be sent to stdin (to be used with ``pipe:``
ffmpeg inputs)
**kwargs: keyword-arguments passed to ``get_args()`` (e.g.
``overwrite_output=True``).
Returns: (out, err) tuple containing captured stdout and stderr data.
"""
process = run_async(
stream_spec,
cmd,
pipe_stdin=input is not None,
pipe_stdout=capture_stdout,
pipe_stderr=capture_stderr,
quiet=quiet,
overwrite_output=overwrite_output,
)
out, err = process.communicate(input)
retcode = process.poll()
if retcode:
raise Error('ffmpeg', out, err)
return out, err |
def calculate_weights(self, measure='tfidf'):
"""
Counts word frequency and calculates tf-idf values for words in every document.
:param measure: example weights approach (can be one of ``tfidf, binary, tf``).
"""
from math import log
# TODO replace with spipy matrices (and calculate with scikit)
if measure == 'tfidf':
self.calculate_idf()
for doc_idx, document in enumerate(self.resulting_documents):
train_word_count = defaultdict(int)
self.tf_idfs[doc_idx] = {}
for word in document:
train_word_count[word] += 1
for word in document:
if measure == "binary":
tf = 1
idf = 1
else:
tf = train_word_count[word]
idf = 1 if measure == "tf" else (self.idf[word] if word in self.idf else None)
if idf != None:
self.tf_idfs[doc_idx][word] = tf * idf | Counts word frequency and calculates tf-idf values for words in every document.
:param measure: example weights approach (can be one of ``tfidf, binary, tf``). | Below is the the instruction that describes the task:
### Input:
Counts word frequency and calculates tf-idf values for words in every document.
:param measure: example weights approach (can be one of ``tfidf, binary, tf``).
### Response:
def calculate_weights(self, measure='tfidf'):
"""
Counts word frequency and calculates tf-idf values for words in every document.
:param measure: example weights approach (can be one of ``tfidf, binary, tf``).
"""
from math import log
# TODO replace with spipy matrices (and calculate with scikit)
if measure == 'tfidf':
self.calculate_idf()
for doc_idx, document in enumerate(self.resulting_documents):
train_word_count = defaultdict(int)
self.tf_idfs[doc_idx] = {}
for word in document:
train_word_count[word] += 1
for word in document:
if measure == "binary":
tf = 1
idf = 1
else:
tf = train_word_count[word]
idf = 1 if measure == "tf" else (self.idf[word] if word in self.idf else None)
if idf != None:
self.tf_idfs[doc_idx][word] = tf * idf |
def get_queryset(self):
"""
Overridde the get_queryset method to
do some validations and build the search queryset.
"""
entries = Entry.published.none()
if self.request.GET:
self.pattern = self.request.GET.get('pattern', '')
if len(self.pattern) < 3:
self.error = _('The pattern is too short')
else:
entries = Entry.published.search(self.pattern)
else:
self.error = _('No pattern to search found')
return entries | Overridde the get_queryset method to
do some validations and build the search queryset. | Below is the the instruction that describes the task:
### Input:
Overridde the get_queryset method to
do some validations and build the search queryset.
### Response:
def get_queryset(self):
"""
Overridde the get_queryset method to
do some validations and build the search queryset.
"""
entries = Entry.published.none()
if self.request.GET:
self.pattern = self.request.GET.get('pattern', '')
if len(self.pattern) < 3:
self.error = _('The pattern is too short')
else:
entries = Entry.published.search(self.pattern)
else:
self.error = _('No pattern to search found')
return entries |
def execute_watch(self, id=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html>`_
:arg id: Watch ID
:arg body: Execution control
:arg debug: indicates whether the watch should execute in debug mode
"""
return self.transport.perform_request(
"PUT",
_make_path("_watcher", "watch", id, "_execute"),
params=params,
body=body,
) | `<http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html>`_
:arg id: Watch ID
:arg body: Execution control
:arg debug: indicates whether the watch should execute in debug mode | Below is the the instruction that describes the task:
### Input:
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html>`_
:arg id: Watch ID
:arg body: Execution control
:arg debug: indicates whether the watch should execute in debug mode
### Response:
def execute_watch(self, id=None, body=None, params=None):
"""
`<http://www.elastic.co/guide/en/elasticsearch/reference/current/watcher-api-execute-watch.html>`_
:arg id: Watch ID
:arg body: Execution control
:arg debug: indicates whether the watch should execute in debug mode
"""
return self.transport.perform_request(
"PUT",
_make_path("_watcher", "watch", id, "_execute"),
params=params,
body=body,
) |
def get_current_instruction(self) -> Dict:
"""Gets the current instruction for this GlobalState.
:return:
"""
instructions = self.environment.code.instruction_list
return instructions[self.mstate.pc] | Gets the current instruction for this GlobalState.
:return: | Below is the the instruction that describes the task:
### Input:
Gets the current instruction for this GlobalState.
:return:
### Response:
def get_current_instruction(self) -> Dict:
"""Gets the current instruction for this GlobalState.
:return:
"""
instructions = self.environment.code.instruction_list
return instructions[self.mstate.pc] |
def plot(self,
legend=None,
width=1.5,
ladder=True,
aspect=10,
ticks=(1, 10),
match_only=None,
ax=None,
return_fig=False,
colour=None,
cmap='viridis',
default=None,
style='intervals',
field=None,
**kwargs):
"""
Hands-free plotting.
Args:
legend (Legend): The Legend to use for colours, etc.
width (int): The width of the plot, in inches. Default 1.
ladder (bool): Whether to use widths or not. Default False.
aspect (int): The aspect ratio of the plot. Default 10.
ticks (int or tuple): The (minor,major) tick interval for depth.
Only the major interval is labeled. Default (1,10).
match_only (list): A list of strings matching the attributes you
want to compare when plotting.
ax (ax): A maplotlib axis to plot onto. If you pass this, it will
be returned. Optional.
return_fig (bool): Whether or not to return the maplotlib ``fig``
object. Default False.
colour (str): Which data field to use for colours.
cmap (cmap): Matplotlib colourmap. Default ``viridis``.
**kwargs are passed through to matplotlib's ``patches.Rectangle``.
Returns:
None. Unless you specify ``return_fig=True`` or pass in an ``ax``.
"""
if legend is None:
legend = Legend.random(self.components)
if style.lower() == 'tops':
# Make sure width is at least 3 for 'tops' style
width = max([3, width])
if ax is None:
return_ax = False
fig = plt.figure(figsize=(width, aspect*width))
ax = fig.add_axes([0.35, 0.05, 0.6, 0.95])
else:
return_ax = True
if (self.order == 'none') or (style.lower() == 'points'):
# Then this is a set of points.
ax = self.plot_points(ax=ax, legend=legend, field=field, **kwargs)
elif style.lower() == 'field':
if field is None:
raise StriplogError('You must provide a field to plot.')
ax = self.plot_field(ax=ax, legend=legend, field=field)
elif style.lower() == 'tops':
ax = self.plot_tops(ax=ax, legend=legend, field=field)
ax.set_xticks([])
else:
ax = self.plot_axis(ax=ax,
legend=legend,
ladder=ladder,
default_width=width,
match_only=kwargs.get('match_only', match_only),
colour=colour,
cmap=cmap,
default=default,
width_field=field,
**kwargs
)
ax.set_xlim([0, width])
ax.set_xticks([])
# Rely on interval order.
lower, upper = self[-1].base.z, self[0].top.z
rng = abs(upper - lower)
ax.set_ylim([lower, upper])
# Make sure ticks is a tuple.
try:
ticks = tuple(ticks)
except TypeError:
ticks = (1, ticks)
# Avoid MAXTICKS error.
while rng/ticks[0] > 250:
mi, ma = 10*ticks[0], ticks[1]
if ma <= mi:
ma = 10 * mi
ticks = (mi, ma)
# Carry on plotting...
minorLocator = mpl.ticker.MultipleLocator(ticks[0])
ax.yaxis.set_minor_locator(minorLocator)
majorLocator = mpl.ticker.MultipleLocator(ticks[1])
majorFormatter = mpl.ticker.FormatStrFormatter('%d')
ax.yaxis.set_major_locator(majorLocator)
ax.yaxis.set_major_formatter(majorFormatter)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.get_yaxis().set_tick_params(which='both', direction='out')
# Optional title.
title = getattr(self, 'title', None)
if title is not None:
ax.set_title(title)
ax.patch.set_alpha(0)
if return_ax:
return ax
elif return_fig:
return fig
else:
return | Hands-free plotting.
Args:
legend (Legend): The Legend to use for colours, etc.
width (int): The width of the plot, in inches. Default 1.
ladder (bool): Whether to use widths or not. Default False.
aspect (int): The aspect ratio of the plot. Default 10.
ticks (int or tuple): The (minor,major) tick interval for depth.
Only the major interval is labeled. Default (1,10).
match_only (list): A list of strings matching the attributes you
want to compare when plotting.
ax (ax): A maplotlib axis to plot onto. If you pass this, it will
be returned. Optional.
return_fig (bool): Whether or not to return the maplotlib ``fig``
object. Default False.
colour (str): Which data field to use for colours.
cmap (cmap): Matplotlib colourmap. Default ``viridis``.
**kwargs are passed through to matplotlib's ``patches.Rectangle``.
Returns:
None. Unless you specify ``return_fig=True`` or pass in an ``ax``. | Below is the the instruction that describes the task:
### Input:
Hands-free plotting.
Args:
legend (Legend): The Legend to use for colours, etc.
width (int): The width of the plot, in inches. Default 1.
ladder (bool): Whether to use widths or not. Default False.
aspect (int): The aspect ratio of the plot. Default 10.
ticks (int or tuple): The (minor,major) tick interval for depth.
Only the major interval is labeled. Default (1,10).
match_only (list): A list of strings matching the attributes you
want to compare when plotting.
ax (ax): A maplotlib axis to plot onto. If you pass this, it will
be returned. Optional.
return_fig (bool): Whether or not to return the maplotlib ``fig``
object. Default False.
colour (str): Which data field to use for colours.
cmap (cmap): Matplotlib colourmap. Default ``viridis``.
**kwargs are passed through to matplotlib's ``patches.Rectangle``.
Returns:
None. Unless you specify ``return_fig=True`` or pass in an ``ax``.
### Response:
def plot(self,
legend=None,
width=1.5,
ladder=True,
aspect=10,
ticks=(1, 10),
match_only=None,
ax=None,
return_fig=False,
colour=None,
cmap='viridis',
default=None,
style='intervals',
field=None,
**kwargs):
"""
Hands-free plotting.
Args:
legend (Legend): The Legend to use for colours, etc.
width (int): The width of the plot, in inches. Default 1.
ladder (bool): Whether to use widths or not. Default False.
aspect (int): The aspect ratio of the plot. Default 10.
ticks (int or tuple): The (minor,major) tick interval for depth.
Only the major interval is labeled. Default (1,10).
match_only (list): A list of strings matching the attributes you
want to compare when plotting.
ax (ax): A maplotlib axis to plot onto. If you pass this, it will
be returned. Optional.
return_fig (bool): Whether or not to return the maplotlib ``fig``
object. Default False.
colour (str): Which data field to use for colours.
cmap (cmap): Matplotlib colourmap. Default ``viridis``.
**kwargs are passed through to matplotlib's ``patches.Rectangle``.
Returns:
None. Unless you specify ``return_fig=True`` or pass in an ``ax``.
"""
if legend is None:
legend = Legend.random(self.components)
if style.lower() == 'tops':
# Make sure width is at least 3 for 'tops' style
width = max([3, width])
if ax is None:
return_ax = False
fig = plt.figure(figsize=(width, aspect*width))
ax = fig.add_axes([0.35, 0.05, 0.6, 0.95])
else:
return_ax = True
if (self.order == 'none') or (style.lower() == 'points'):
# Then this is a set of points.
ax = self.plot_points(ax=ax, legend=legend, field=field, **kwargs)
elif style.lower() == 'field':
if field is None:
raise StriplogError('You must provide a field to plot.')
ax = self.plot_field(ax=ax, legend=legend, field=field)
elif style.lower() == 'tops':
ax = self.plot_tops(ax=ax, legend=legend, field=field)
ax.set_xticks([])
else:
ax = self.plot_axis(ax=ax,
legend=legend,
ladder=ladder,
default_width=width,
match_only=kwargs.get('match_only', match_only),
colour=colour,
cmap=cmap,
default=default,
width_field=field,
**kwargs
)
ax.set_xlim([0, width])
ax.set_xticks([])
# Rely on interval order.
lower, upper = self[-1].base.z, self[0].top.z
rng = abs(upper - lower)
ax.set_ylim([lower, upper])
# Make sure ticks is a tuple.
try:
ticks = tuple(ticks)
except TypeError:
ticks = (1, ticks)
# Avoid MAXTICKS error.
while rng/ticks[0] > 250:
mi, ma = 10*ticks[0], ticks[1]
if ma <= mi:
ma = 10 * mi
ticks = (mi, ma)
# Carry on plotting...
minorLocator = mpl.ticker.MultipleLocator(ticks[0])
ax.yaxis.set_minor_locator(minorLocator)
majorLocator = mpl.ticker.MultipleLocator(ticks[1])
majorFormatter = mpl.ticker.FormatStrFormatter('%d')
ax.yaxis.set_major_locator(majorLocator)
ax.yaxis.set_major_formatter(majorFormatter)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.yaxis.set_ticks_position('left')
ax.get_yaxis().set_tick_params(which='both', direction='out')
# Optional title.
title = getattr(self, 'title', None)
if title is not None:
ax.set_title(title)
ax.patch.set_alpha(0)
if return_ax:
return ax
elif return_fig:
return fig
else:
return |
def delete_record(cls, record):
"""Delete a record and it's persistent identifiers."""
record.delete()
PersistentIdentifier.query.filter_by(
object_type='rec', object_uuid=record.id,
).update({PersistentIdentifier.status: PIDStatus.DELETED})
cls.delete_buckets(record)
db.session.commit() | Delete a record and it's persistent identifiers. | Below is the the instruction that describes the task:
### Input:
Delete a record and it's persistent identifiers.
### Response:
def delete_record(cls, record):
"""Delete a record and it's persistent identifiers."""
record.delete()
PersistentIdentifier.query.filter_by(
object_type='rec', object_uuid=record.id,
).update({PersistentIdentifier.status: PIDStatus.DELETED})
cls.delete_buckets(record)
db.session.commit() |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.