text stringlengths 81 112k |
|---|
Retry sending request until timeout or until receiving a response.
def _make_request_to_server(self, query_function, raise_for_status=True,
time_limit_seconds=2, retry_delay_seconds=0.2):
"""Retry sending request until timeout or until receiving a response.
"""
start_time = datetime.datetime.now()
while datetime.datetime.now() - start_time < datetime.timedelta(
0, time_limit_seconds):
error = None
response = None
try:
response = query_function()
except requests.exceptions.ConnectionError as e:
error = ServerConnectionError(
"No response from server.\n%s" % e)
except:
if response:
logger.info(response.text)
raise
if response is not None and raise_for_status:
# raises requests.exceptions.HTTPError
self._raise_for_status(response)
if error:
time.sleep(retry_delay_seconds)
continue
else:
return response
raise error |
Convenience function for retrieving a resource.
If resource does not exist, return None.
def _get_resource(self, relative_url, params=None):
"""Convenience function for retrieving a resource.
If resource does not exist, return None.
"""
response = self._get(relative_url, params=params, raise_for_status=False)
if response.status_code == 404:
return None
self._raise_for_status(response)
return response.json() |
Returns the correct Input class for a given
data type and gather mode
def TaskAttemptInput(input, task_attempt):
"""Returns the correct Input class for a given
data type and gather mode
"""
(data_type, mode) = _get_input_info(input)
if data_type != 'file':
return NoOpInput(None, task_attempt)
if mode == 'no_gather':
return FileInput(input['data']['contents'], task_attempt)
else:
assert mode.startswith('gather')
return FileListInput(input['data']['contents'], task_attempt) |
Run a task asynchronously
def execute(task_function, *args, **kwargs):
"""Run a task asynchronously
"""
if get_setting('TEST_DISABLE_ASYNC_DELAY'):
# Delay disabled, run synchronously
logger.debug('Running function "%s" synchronously because '\
'TEST_DISABLE_ASYNC_DELAY is True'
% task_function.__name__)
return task_function(*args, **kwargs)
db.connections.close_all()
task_function.delay(*args, **kwargs) |
Run a task asynchronously after at least delay_seconds
def execute_with_delay(task_function, *args, **kwargs):
"""Run a task asynchronously after at least delay_seconds
"""
delay = kwargs.pop('delay', 0)
if get_setting('TEST_DISABLE_ASYNC_DELAY'):
# Delay disabled, run synchronously
logger.debug('Running function "%s" synchronously because '\
'TEST_DISABLE_ASYNC_DELAY is True'
% task_function.__name__)
return task_function(*args, **kwargs)
db.connections.close_all()
task_function.apply_async(args=args, kwargs=kwargs, countdown=delay) |
Check for tasks that are no longer sending a heartbeat
def check_for_stalled_tasks():
"""Check for tasks that are no longer sending a heartbeat
"""
from api.models.tasks import Task
for task in Task.objects.filter(status_is_running=True):
if not task.is_responsive():
task.system_error()
if task.is_timed_out():
task.timeout_error() |
Check for TaskAttempts that were never cleaned up
def check_for_missed_cleanup():
"""Check for TaskAttempts that were never cleaned up
"""
if get_setting('PRESERVE_ALL'):
return
from api.models.tasks import TaskAttempt
if get_setting('PRESERVE_ON_FAILURE'):
for task_attempt in TaskAttempt.objects.filter(
status_is_running=False).filter(
status_is_cleaned_up=False).exclude(
status_is_failed=True):
task_attempt.cleanup()
else:
for task_attempt in TaskAttempt.objects.filter(
status_is_running=False).filter(status_is_cleaned_up=False):
task_attempt.cleanup() |
This attempts to execute "retryable_function" with exponential backoff
on delay time.
10 retries adds up to about 34 minutes total delay before the last attempt.
"human_readable_action_name" is an option input to customize retry message.
def execute_with_retries(retryable_function,
retryable_errors,
logger,
human_readable_action_name='Action',
nonretryable_errors=None):
"""This attempts to execute "retryable_function" with exponential backoff
on delay time.
10 retries adds up to about 34 minutes total delay before the last attempt.
"human_readable_action_name" is an option input to customize retry message.
"""
max_retries = 10
attempt = 0
if not nonretryable_errors:
nonretryable_errors = ()
while True:
try:
return retryable_function()
except tuple(nonretryable_errors):
raise
except tuple(retryable_errors) as e:
attempt += 1
if attempt > max_retries:
raise
# Exponentional backoff on retry delay as suggested by
# https://cloud.google.com/storage/docs/exponential-backoff
delay = 2**attempt + random.random()
logger.info('"%s" failed with error "%s". '\
'Retry number %s of %s in %s seconds'
% (human_readable_action_name, str(e),
attempt, max_retries, delay))
time.sleep(delay) |
Export a file from Loom to some file storage location.
Default destination_directory is cwd. Default destination_filename is the
filename from the file data object associated with the given file_id.
def export_file(self, data_object, destination_directory=None,
destination_filename=None, retry=False,
export_metadata=False, export_raw_file=True):
"""Export a file from Loom to some file storage location.
Default destination_directory is cwd. Default destination_filename is the
filename from the file data object associated with the given file_id.
"""
if not destination_directory:
destination_directory = os.getcwd()
# We get filename from the dataobject
if not destination_filename:
destination_filename = data_object['value']['filename']
destination_file_url = os.path.join(destination_directory,
destination_filename)
logger.info('Exporting file %s@%s ...' % (
data_object['value']['filename'],
data_object['uuid']))
if export_raw_file:
destination = File(
destination_file_url, self.storage_settings, retry=retry)
if destination.exists():
raise FileAlreadyExistsError(
'File already exists at %s' % destination_file_url)
logger.info('...copying file to %s' % (
destination.get_url()))
# Copy from the first file location
file_resource = data_object.get('value')
md5 = file_resource.get('md5')
source_url = data_object['value']['file_url']
File(source_url, self.storage_settings, retry=retry).copy_to(
destination, expected_md5=md5)
data_object['value'] = self._create_new_file_resource(
data_object['value'], destination.get_url())
else:
logger.info('...skipping raw file')
if export_metadata:
data_object['value'].pop('link', None)
data_object['value'].pop('upload_status', None)
destination_metadata_url = os.path.join(
destination_file_url + '.metadata.yaml')
logger.info('...writing metadata to %s' % destination_metadata_url)
metadata = yaml.safe_dump(data_object, default_flow_style=False)
metadata_file = File(destination_metadata_url,
self.storage_settings, retry=retry)
metadata_file.write(metadata)
else:
logger.info('...skipping metadata')
logger.info('...finished file export') |
Like urlparse except it assumes 'file://' if no scheme is specified
def _urlparse(path):
"""Like urlparse except it assumes 'file://' if no scheme is specified
"""
url = urlparse.urlparse(path)
_validate_url(url)
if not url.scheme or url.scheme == 'file://':
# Normalize path, and set scheme to "file" if missing
path = os.path.abspath(
os.path.expanduser(path))
url = urlparse.urlparse('file://'+path)
return url |
Factory method returns LocalFilePattern or GoogleStorageFilePattern
def FilePattern(pattern, settings, **kwargs):
"""Factory method returns LocalFilePattern or GoogleStorageFilePattern
"""
url = _urlparse(pattern)
if url.scheme == 'gs':
return GoogleStorageFilePattern(pattern, settings, **kwargs)
else:
assert url.scheme == 'file'
return LocalFilePattern(pattern, settings, **kwargs) |
Factory method
def File(url, settings, retry=False):
"""Factory method
"""
parsed_url = _urlparse(url)
if parsed_url.scheme == 'gs':
return GoogleStorageFile(url, settings, retry=retry)
elif parsed_url.scheme == 'file':
if parsed_url.hostname == 'localhost' or parsed_url.hostname is None:
return LocalFile(url, settings, retry=retry)
else:
raise FileUtilsError(
"Cannot process file url %s. Remote file hosts not supported."
% url)
else:
raise FileUtilsError('Unsupported scheme "%s" in file "%s"'
% (parsed_url.scheme, url)) |
Factory method to select the right copier for a given source and destination.
def Copier(source, destination):
"""Factory method to select the right copier for a given source and destination.
"""
if source.type == 'local' and destination.type == 'local':
return LocalCopier(source, destination)
elif source.type == 'local' and destination.type == 'google_storage':
return Local2GoogleStorageCopier(source, destination)
elif source.type == 'google_storage' and destination.type == 'local':
return GoogleStorage2LocalCopier(source, destination)
elif source.type == 'google_storage' and destination.type == 'google_storage':
return GoogleStorageCopier(source, destination)
else:
raise FileUtilsError('Could not find method to copy from source '\
'"%s" to destination "%s".' % (source, destination)) |
Scan the data tree on the given data_channel to create a corresponding
InputSetGenerator tree.
def create_from_data_channel(cls, data_channel):
"""Scan the data tree on the given data_channel to create a corresponding
InputSetGenerator tree.
"""
gather_depth = cls._get_gather_depth(data_channel)
generator = InputSetGeneratorNode()
for (data_path, data_node) in data_channel.get_ready_data_nodes(
[], gather_depth):
flat_data_node = data_node.flattened_clone(save=False)
input_item = InputItem(
flat_data_node, data_channel.channel,
data_channel.as_channel, mode=data_channel.mode)
generator._add_input_item(data_path, input_item)
return generator |
Returns the correct Output class for a given
data type, source type, and scatter mode
def TaskAttemptOutput(output, task_attempt):
"""Returns the correct Output class for a given
data type, source type, and scatter mode
"""
(data_type, mode, source_type) = _get_output_info(output)
if data_type == 'file':
if mode == 'scatter':
assert source_type in ['filenames', 'glob'], \
'source type "%s" not allowed' % source_type
if source_type == 'filenames':
return FileListScatterOutput(output, task_attempt)
return GlobScatterOutput(output, task_attempt)
else:
assert mode == 'no_scatter'
assert source_type == 'filename', \
'source type "%s" not allowed' % source_type
return FileOutput(output, task_attempt)
else: # data_type is non-file
if mode == 'scatter':
assert source_type in [
'filename', 'filenames', 'glob', 'stream'], \
'source type "%s" not allowed' % source_type
if source_type == 'filename':
return FileContentsScatterOutput(output, task_attempt)
if source_type == 'filenames':
return FileListContentsScatterOutput(output, task_attempt)
if source_type == 'glob':
return GlobContentsScatterOutput(output, task_attempt)
assert source_type == 'stream'
return StreamScatterOutput(output, task_attempt)
else:
assert mode == 'no_scatter'
assert source_type in ['filename', 'stream'], \
'source type "%s" not allowed' % source_type
if source_type == 'filename':
return FileContentsOutput(output, task_attempt)
assert source_type == 'stream'
return StreamOutput(output, task_attempt) |
Adds a new leaf node at the given index with the given data_object
def add_leaf(self, index, data_object, save=False):
"""Adds a new leaf node at the given index with the given data_object
"""
assert self.type == data_object.type, 'data type mismatch'
if self._get_child_by_index(index) is not None:
raise NodeAlreadyExistsError(
'Leaf data node already exists at this index')
else:
data_node = DataNode(
parent=self,
index=index,
data_object=data_object,
type=self.type)
if save:
data_node.full_clean()
data_node.save()
self._add_unsaved_child(data_node)
return data_node |
Returns a list [(path1,data_node1),...]
with entries only for existing nodes with DataObjects where is_ready==True.
Missing nodes or those with non-ready or non-existing data are ignored.
def get_ready_data_nodes(self, seed_path, gather_depth):
"""Returns a list [(path1,data_node1),...]
with entries only for existing nodes with DataObjects where is_ready==True.
Missing nodes or those with non-ready or non-existing data are ignored.
"""
try:
seed_node = self.get_node(seed_path)
except MissingBranchError:
return []
all_paths = seed_node._get_all_paths(seed_path, gather_depth)
ready_data_nodes = []
for path in all_paths:
if self.is_ready(data_path=path):
ready_data_nodes.append((path, self.get_node(path)))
return ready_data_nodes |
Verify that the given index is consistent with the degree of the node.
def _check_index(self, index):
"""Verify that the given index is consistent with the degree of the node.
"""
if self.degree is None:
raise UnknownDegreeError(
'Cannot access child DataNode on a parent with degree of None. '\
'Set the degree on the parent first.')
if index < 0 or index >= self.degree:
raise IndexOutOfRangeError(
'Out of range index %s. DataNode parent has degree %s, so index '\
'should be in the range 0 to %s' % (
index, self.degree, self.degree-1)) |
Determines if we're running on a GCE instance.
def on_gcloud_vm():
""" Determines if we're running on a GCE instance."""
r = None
try:
r = requests.get('http://metadata.google.internal')
except requests.ConnectionError:
return False
try:
if r.headers['Metadata-Flavor'] == 'Google' and \
r.headers['Server'] == 'Metadata Server for VM':
return True
except KeyError:
return False |
Determine the cheapest instance type given a minimum
number of cores and minimum amount of RAM (in GB).
def get_cheapest_instance_type(cores, memory):
"""Determine the cheapest instance type given a minimum
number of cores and minimum amount of RAM (in GB).
"""
pricelist = get_gcloud_pricelist()
# Filter out preemptible, shared-CPU, and non-US instance types
us_instance_types = {k: v for k, v in pricelist.items()
if k.startswith('CP-COMPUTEENGINE-VMIMAGE-')
and not k.endswith('-PREEMPTIBLE')
and 'us' in v and v['cores'] != 'shared'}
# Convert to array and add keys (instance type names) as type names
price_array = []
for key in us_instance_types:
value = us_instance_types[key]
value.update({'name': key.replace(
'CP-COMPUTEENGINE-VMIMAGE-', '').lower()})
price_array.append(value)
# Sort by price in US
price_array.sort(None, lambda x: x['us'])
# Look for an instance type that satisfies requested
# cores and memory; first will be cheapest
for instance_type in price_array:
if int(instance_type['cores']) >= int(cores) \
and float(instance_type['memory']) >= float(memory):
print instance_type['name']
return instance_type['name']
# No instance type found that can fulfill requested cores and memory
raise Exception('No instance type found with at least %d cores '
'and %f GB of RAM.' % (cores, memory)) |
Retrieve latest pricelist from Google Cloud, or use
cached copy if not reachable.
def get_gcloud_pricelist():
"""Retrieve latest pricelist from Google Cloud, or use
cached copy if not reachable.
"""
try:
r = requests.get('http://cloudpricingcalculator.appspot.com'
'/static/data/pricelist.json')
content = json.loads(r.content)
except ConnectionError:
logger.warning(
"Couldn't get updated pricelist from "
"http://cloudpricingcalculator.appspot.com"
"/static/data/pricelist.json. Falling back to cached "
"copy, but prices may be out of date.")
with open('gcloudpricelist.json') as infile:
content = json.load(infile)
pricelist = content['gcp_price_list']
return pricelist |
Create a base name for the worker instance that will run the specified
task run attempt, from this server. Since hostname and step name will be
duplicated across workers (reruns, etc.), ensure that at least
MIN_TASK_ID_CHARS are preserved in the instance name. Also, prevent names
from ending with dashes.
def _get_base_name(hostname, step_name, attempt_id, max_length):
"""Create a base name for the worker instance that will run the specified
task run attempt, from this server. Since hostname and step name will be
duplicated across workers (reruns, etc.), ensure that at least
MIN_TASK_ID_CHARS are preserved in the instance name. Also, prevent names
from ending with dashes.
"""
max_length = int(max_length)
if len(hostname)+len(step_name)+MIN_TASK_ID_CHARS+2 > max_length:
# round with ceil/floor such that extra char goes to hostname if odd
hostname_chars = int(math.ceil(
(max_length-MIN_TASK_ID_CHARS-2)/float(2)))
step_name_chars = int(math.floor(
(max_length-MIN_TASK_ID_CHARS-2)/float(2)))
hostname = hostname[:hostname_chars]
step_name = step_name[:step_name_chars]
name_base = '-'.join([hostname, step_name, attempt_id])
return _sanitize_instance_name(name_base, max_length) |
Instance names must start with a lowercase letter.
All following characters must be a dash, lowercase letter,
or digit.
def _sanitize_instance_name(name, max_length):
"""Instance names must start with a lowercase letter.
All following characters must be a dash, lowercase letter,
or digit.
"""
name = str(name).lower() # make all letters lowercase
name = re.sub(r'[^-a-z0-9]', '', name) # remove invalid characters
# remove non-lowercase letters from the beginning
name = re.sub(r'^[^a-z]+', '', name)
name = name[:max_length]
name = re.sub(r'-+$', '', name) # remove hyphens from the end
return name |
Determines if the cache files have expired, or if it is still valid
def is_valid(self, max_age=None):
''' Determines if the cache files have expired, or if it is still valid '''
if max_age is None:
max_age = self.cache_max_age
if os.path.isfile(self.cache_path_cache):
mod_time = os.path.getmtime(self.cache_path_cache)
current_time = time()
if (mod_time + max_age) > current_time:
return True
return False |
Reads the JSON inventory from the cache file. Returns Python dictionary.
def get_all_data_from_cache(self, filename=''):
''' Reads the JSON inventory from the cache file. Returns Python dictionary. '''
data = ''
if not filename:
filename = self.cache_path_cache
with open(filename, 'r') as cache:
data = cache.read()
return json.loads(data) |
Writes data to file as JSON. Returns True.
def write_to_cache(self, data, filename=''):
''' Writes data to file as JSON. Returns True. '''
if not filename:
filename = self.cache_path_cache
json_data = json.dumps(data)
with open(filename, 'w') as cache:
cache.write(json_data)
return True |
Reads the settings from the gce.ini file.
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
def get_config(self):
"""
Reads the settings from the gce.ini file.
Populates a SafeConfigParser object with defaults and
attempts to read an .ini-style configuration from the filename
specified in GCE_INI_PATH. If the environment variable is
not present, the filename defaults to gce.ini in the current
working directory.
"""
gce_ini_default_path = os.path.join(
os.path.dirname(os.path.realpath(__file__)), "gce.ini")
gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
# Create a ConfigParser.
# This provides empty defaults to each key, so that environment
# variable configuration (as opposed to INI configuration) is able
# to work.
config = ConfigParser.SafeConfigParser(defaults={
'gce_service_account_email_address': '',
'gce_service_account_pem_file_path': '',
'gce_project_id': '',
'libcloud_secrets': '',
'inventory_ip_type': '',
'cache_path': '~/.ansible/tmp',
'cache_max_age': '300'
})
if 'gce' not in config.sections():
config.add_section('gce')
if 'inventory' not in config.sections():
config.add_section('inventory')
if 'cache' not in config.sections():
config.add_section('cache')
config.read(gce_ini_path)
#########
# Section added for processing ini settings
#########
# Set the instance_states filter based on config file options
self.instance_states = []
if config.has_option('gce', 'instance_states'):
states = config.get('gce', 'instance_states')
# Ignore if instance_states is an empty string.
if states:
self.instance_states = states.split(',')
# Caching
cache_path = config.get('cache', 'cache_path')
cache_max_age = config.getint('cache', 'cache_max_age')
# TOOD(supertom): support project-specific caches
cache_name = 'ansible-gce.cache'
self.cache = CloudInventoryCache(cache_path=cache_path,
cache_max_age=cache_max_age,
cache_name=cache_name)
return config |
Determine inventory options. Environment variables always
take precedence over configuration files.
def get_inventory_options(self):
"""Determine inventory options. Environment variables always
take precedence over configuration files."""
ip_type = self.config.get('inventory', 'inventory_ip_type')
# If the appropriate environment variables are set, they override
# other configuration
ip_type = os.environ.get('INVENTORY_IP_TYPE', ip_type)
return ip_type |
Determine the GCE authorization settings and return a
libcloud driver.
def get_gce_driver(self):
"""Determine the GCE authorization settings and return a
libcloud driver.
"""
# Attempt to get GCE params from a configuration file, if one
# exists.
secrets_path = self.config.get('gce', 'libcloud_secrets')
secrets_found = False
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found and secrets_path:
if not secrets_path.endswith('secrets.py'):
err = "Must specify libcloud secrets file as "
err += "/absolute/path/to/secrets.py"
sys.exit(err)
sys.path.append(os.path.dirname(secrets_path))
try:
import secrets
args = list(getattr(secrets, 'GCE_PARAMS', []))
kwargs = getattr(secrets, 'GCE_KEYWORD_PARAMS', {})
secrets_found = True
except:
pass
if not secrets_found:
args = [
self.config.get('gce','gce_service_account_email_address'),
self.config.get('gce','gce_service_account_pem_file_path')
]
kwargs = {'project': self.config.get('gce', 'gce_project_id')}
# If the appropriate environment variables are set, they override
# other configuration; process those into our args and kwargs.
args[0] = os.environ.get('GCE_EMAIL', args[0])
args[1] = os.environ.get('GCE_PEM_FILE_PATH', args[1])
kwargs['project'] = os.environ.get('GCE_PROJECT', kwargs['project'])
# Retrieve and return the GCE driver.
gce = get_driver(Provider.GCE)(*args, **kwargs)
gce.connection.user_agent_append(
'%s/%s' % (USER_AGENT_PRODUCT, USER_AGENT_VERSION),
)
return gce |
returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call
def parse_env_zones(self):
'''returns a list of comma separated zones parsed from the GCE_ZONE environment variable.
If provided, this will be used to filter the results of the grouped_instances call'''
import csv
reader = csv.reader([os.environ.get('GCE_ZONE',"")], skipinitialspace=True)
zones = [r for r in reader]
return [z for z in zones[0]] |
Command line argument processing
def parse_cli_args(self):
''' Command line argument processing '''
parser = argparse.ArgumentParser(
description='Produce an Ansible Inventory file based on GCE')
parser.add_argument('--list', action='store_true', default=True,
help='List instances (default: True)')
parser.add_argument('--host', action='store',
help='Get all information about an instance')
parser.add_argument('--pretty', action='store_true', default=False,
help='Pretty format (default: False)')
parser.add_argument(
'--refresh-cache', action='store_true', default=False,
help='Force refresh of cache by making API requests (default: False - use cache files)')
self.args = parser.parse_args() |
Loads inventory from JSON on disk.
def load_inventory_from_cache(self):
''' Loads inventory from JSON on disk. '''
try:
self.inventory = self.cache.get_all_data_from_cache()
hosts = self.inventory['_meta']['hostvars']
except Exception as e:
print(
"Invalid inventory file %s. Please rebuild with -refresh-cache option."
% (self.cache.cache_path_cache))
raise |
Do API calls and save data in cache.
def do_api_calls_update_cache(self):
''' Do API calls and save data in cache. '''
zones = self.parse_env_zones()
data = self.group_instances(zones)
self.cache.write_to_cache(data)
self.inventory = data |
Group all instances
def group_instances(self, zones=None):
'''Group all instances'''
groups = {}
meta = {}
meta["hostvars"] = {}
for node in self.list_nodes():
# This check filters on the desired instance states defined in the
# config file with the instance_states config option.
#
# If the instance_states list is _empty_ then _ALL_ states are returned.
#
# If the instance_states list is _populated_ then check the current
# state against the instance_states list
if self.instance_states and not node.extra['status'] in self.instance_states:
continue
name = node.name
meta["hostvars"][name] = self.node_to_dict(node)
zone = node.extra['zone'].name
# To avoid making multiple requests per zone
# we list all nodes and then filter the results
if zones and zone not in zones:
continue
if zone in groups: groups[zone].append(name)
else: groups[zone] = [name]
tags = node.extra['tags']
for t in tags:
if t.startswith('group-'):
tag = t[6:]
else:
tag = 'tag_%s' % t
if tag in groups: groups[tag].append(name)
else: groups[tag] = [name]
net = node.extra['networkInterfaces'][0]['network'].split('/')[-1]
net = 'network_%s' % net
if net in groups: groups[net].append(name)
else: groups[net] = [name]
machine_type = node.size
if machine_type in groups: groups[machine_type].append(name)
else: groups[machine_type] = [name]
image = node.image and node.image or 'persistent_disk'
if image in groups: groups[image].append(name)
else: groups[image] = [name]
status = node.extra['status']
stat = 'status_%s' % status.lower()
if stat in groups: groups[stat].append(name)
else: groups[stat] = [name]
groups["_meta"] = meta
return groups |
Converts a dict to a JSON object and dumps it as a formatted
string
def json_format_dict(self, data, pretty=False):
''' Converts a dict to a JSON object and dumps it as a formatted
string '''
if pretty:
return json.dumps(data, sort_keys=True, indent=2)
else:
return json.dumps(data) |
Stream stdout and stderr from the task container to this
process's stdout and stderr, respectively.
def _stream_docker_logs(self):
"""Stream stdout and stderr from the task container to this
process's stdout and stderr, respectively.
"""
thread = threading.Thread(target=self._stderr_stream_worker)
thread.start()
for line in self.docker_client.logs(self.container, stdout=True,
stderr=False, stream=True):
sys.stdout.write(line)
thread.join() |
Because we allow template ID string values, where
serializers normally expect a dict
def to_internal_value(self, data):
"""Because we allow template ID string values, where
serializers normally expect a dict
"""
converted_data = _convert_template_id_to_dict(data)
return super(TemplateSerializer, self)\
.to_internal_value(converted_data) |
Compare two identifier (for pre-release/build components).
def identifier_cmp(a, b):
"""Compare two identifier (for pre-release/build components)."""
a_cmp, a_is_int = _to_int(a)
b_cmp, b_is_int = _to_int(b)
if a_is_int and b_is_int:
# Numeric identifiers are compared as integers
return base_cmp(a_cmp, b_cmp)
elif a_is_int:
# Numeric identifiers have lower precedence
return -1
elif b_is_int:
return 1
else:
# Non-numeric identifiers are compared lexicographically
return base_cmp(a_cmp, b_cmp) |
Compare two identifier list (pre-release/build components).
The rule is:
- Identifiers are paired between lists
- They are compared from left to right
- If all first identifiers match, the longest list is greater.
>>> identifier_list_cmp(['1', '2'], ['1', '2'])
0
>>> identifier_list_cmp(['1', '2a'], ['1', '2b'])
-1
>>> identifier_list_cmp(['1'], ['1', '2'])
-1
def identifier_list_cmp(a, b):
"""Compare two identifier list (pre-release/build components).
The rule is:
- Identifiers are paired between lists
- They are compared from left to right
- If all first identifiers match, the longest list is greater.
>>> identifier_list_cmp(['1', '2'], ['1', '2'])
0
>>> identifier_list_cmp(['1', '2a'], ['1', '2b'])
-1
>>> identifier_list_cmp(['1'], ['1', '2'])
-1
"""
identifier_pairs = zip(a, b)
for id_a, id_b in identifier_pairs:
cmp_res = identifier_cmp(id_a, id_b)
if cmp_res != 0:
return cmp_res
# alpha1.3 < alpha1.3.1
return base_cmp(len(a), len(b)) |
Coerce an arbitrary version string into a semver-compatible one.
The rule is:
- If not enough components, fill minor/patch with zeroes; unless
partial=True
- If more than 3 dot-separated components, extra components are "build"
data. If some "build" data already appeared, append it to the
extra components
Examples:
>>> Version.coerce('0.1')
Version(0, 1, 0)
>>> Version.coerce('0.1.2.3')
Version(0, 1, 2, (), ('3',))
>>> Version.coerce('0.1.2.3+4')
Version(0, 1, 2, (), ('3', '4'))
>>> Version.coerce('0.1+2-3+4_5')
Version(0, 1, 0, (), ('2-3', '4-5'))
def coerce(cls, version_string, partial=False):
"""Coerce an arbitrary version string into a semver-compatible one.
The rule is:
- If not enough components, fill minor/patch with zeroes; unless
partial=True
- If more than 3 dot-separated components, extra components are "build"
data. If some "build" data already appeared, append it to the
extra components
Examples:
>>> Version.coerce('0.1')
Version(0, 1, 0)
>>> Version.coerce('0.1.2.3')
Version(0, 1, 2, (), ('3',))
>>> Version.coerce('0.1.2.3+4')
Version(0, 1, 2, (), ('3', '4'))
>>> Version.coerce('0.1+2-3+4_5')
Version(0, 1, 0, (), ('2-3', '4-5'))
"""
base_re = re.compile(r'^\d+(?:\.\d+(?:\.\d+)?)?')
match = base_re.match(version_string)
if not match:
raise ValueError(
"Version string lacks a numerical component: %r"
% version_string
)
version = version_string[:match.end()]
if not partial:
# We need a not-partial version.
while version.count('.') < 2:
version += '.0'
if match.end() == len(version_string):
return Version(version, partial=partial)
rest = version_string[match.end():]
# Cleanup the 'rest'
rest = re.sub(r'[^a-zA-Z0-9+.-]', '-', rest)
if rest[0] == '+':
# A 'build' component
prerelease = ''
build = rest[1:]
elif rest[0] == '.':
# An extra version component, probably 'build'
prerelease = ''
build = rest[1:]
elif rest[0] == '-':
rest = rest[1:]
if '+' in rest:
prerelease, build = rest.split('+', 1)
else:
prerelease, build = rest, ''
elif '+' in rest:
prerelease, build = rest.split('+', 1)
else:
prerelease, build = rest, ''
build = build.replace('+', '.')
if prerelease:
version = '%s-%s' % (version, prerelease)
if build:
version = '%s+%s' % (version, build)
return cls(version, partial=partial) |
Parse a version string into a Version() object.
Args:
version_string (str), the version string to parse
partial (bool), whether to accept incomplete input
coerce (bool), whether to try to map the passed in string into a
valid Version.
def parse(cls, version_string, partial=False, coerce=False):
"""Parse a version string into a Version() object.
Args:
version_string (str), the version string to parse
partial (bool), whether to accept incomplete input
coerce (bool), whether to try to map the passed in string into a
valid Version.
"""
if not version_string:
raise ValueError('Invalid empty version string: %r' % version_string)
if partial:
version_re = cls.partial_version_re
else:
version_re = cls.version_re
match = version_re.match(version_string)
if not match:
raise ValueError('Invalid version string: %r' % version_string)
major, minor, patch, prerelease, build = match.groups()
if _has_leading_zero(major):
raise ValueError("Invalid leading zero in major: %r" % version_string)
if _has_leading_zero(minor):
raise ValueError("Invalid leading zero in minor: %r" % version_string)
if _has_leading_zero(patch):
raise ValueError("Invalid leading zero in patch: %r" % version_string)
major = int(major)
minor = cls._coerce(minor, partial)
patch = cls._coerce(patch, partial)
if prerelease is None:
if partial and (build is None):
# No build info, strip here
return (major, minor, patch, None, None)
else:
prerelease = ()
elif prerelease == '':
prerelease = ()
else:
prerelease = tuple(prerelease.split('.'))
cls._validate_identifiers(prerelease, allow_leading_zeroes=False)
if build is None:
if partial:
build = None
else:
build = ()
elif build == '':
build = ()
else:
build = tuple(build.split('.'))
cls._validate_identifiers(build, allow_leading_zeroes=True)
return (major, minor, patch, prerelease, build) |
Retrieve comparison methods to apply on version components.
This is a private API.
Args:
partial (bool): whether to provide 'partial' or 'strict' matching.
Returns:
5-tuple of cmp-like functions.
def _comparison_functions(cls, partial=False):
"""Retrieve comparison methods to apply on version components.
This is a private API.
Args:
partial (bool): whether to provide 'partial' or 'strict' matching.
Returns:
5-tuple of cmp-like functions.
"""
def prerelease_cmp(a, b):
"""Compare prerelease components.
Special rule: a version without prerelease component has higher
precedence than one with a prerelease component.
"""
if a and b:
return identifier_list_cmp(a, b)
elif a:
# Versions with prerelease field have lower precedence
return -1
elif b:
return 1
else:
return 0
def build_cmp(a, b):
"""Compare build metadata.
Special rule: there is no ordering on build metadata.
"""
if a == b:
return 0
else:
return NotImplemented
def make_optional(orig_cmp_fun):
"""Convert a cmp-like function to consider 'None == *'."""
@functools.wraps(orig_cmp_fun)
def alt_cmp_fun(a, b):
if a is None or b is None:
return 0
return orig_cmp_fun(a, b)
return alt_cmp_fun
if partial:
return [
base_cmp, # Major is still mandatory
make_optional(base_cmp),
make_optional(base_cmp),
make_optional(prerelease_cmp),
make_optional(build_cmp),
]
else:
return [
base_cmp,
base_cmp,
base_cmp,
prerelease_cmp,
build_cmp,
] |
Helper for comparison.
Allows the caller to provide:
- The condition
- The return value if the comparison is meaningless (ie versions with
build metadata).
def __compare_helper(self, other, condition, notimpl_target):
"""Helper for comparison.
Allows the caller to provide:
- The condition
- The return value if the comparison is meaningless (ie versions with
build metadata).
"""
if not isinstance(other, self.__class__):
return NotImplemented
cmp_res = self.__cmp__(other)
if cmp_res is NotImplemented:
return notimpl_target
return condition(cmp_res) |
Check whether a Version satisfies the Spec.
def match(self, version):
"""Check whether a Version satisfies the Spec."""
return all(spec.match(version) for spec in self.specs) |
Select the best compatible version among an iterable of options.
def select(self, versions):
"""Select the best compatible version among an iterable of options."""
options = list(self.filter(versions))
if options:
return max(options)
return None |
Handle django.db.migrations.
def deconstruct(self):
"""Handle django.db.migrations."""
name, path, args, kwargs = super(VersionField, self).deconstruct()
kwargs['partial'] = self.partial
kwargs['coerce'] = self.coerce
return name, path, args, kwargs |
Converts any value to a base.Version field.
def to_python(self, value):
"""Converts any value to a base.Version field."""
if value is None or value == '':
return value
if isinstance(value, base.Version):
return value
if self.coerce:
return base.Version.coerce(value, partial=self.partial)
else:
return base.Version(value, partial=self.partial) |
Converts any value to a base.Spec field.
def to_python(self, value):
"""Converts any value to a base.Spec field."""
if value is None or value == '':
return value
if isinstance(value, base.Spec):
return value
return base.Spec(value) |
Make the drone move left.
def move_left(self):
"""Make the drone move left."""
self.at(ardrone.at.pcmd, True, -self.speed, 0, 0, 0) |
Make the drone move right.
def move_right(self):
"""Make the drone move right."""
self.at(ardrone.at.pcmd, True, self.speed, 0, 0, 0) |
Make the drone rise upwards.
def move_up(self):
"""Make the drone rise upwards."""
self.at(ardrone.at.pcmd, True, 0, 0, self.speed, 0) |
Make the drone decent downwards.
def move_down(self):
"""Make the drone decent downwards."""
self.at(ardrone.at.pcmd, True, 0, 0, -self.speed, 0) |
Make the drone move forward.
def move_forward(self):
"""Make the drone move forward."""
self.at(ardrone.at.pcmd, True, 0, -self.speed, 0, 0) |
Make the drone move backwards.
def move_backward(self):
"""Make the drone move backwards."""
self.at(ardrone.at.pcmd, True, 0, self.speed, 0, 0) |
Make the drone rotate left.
def turn_left(self):
"""Make the drone rotate left."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, -self.speed) |
Make the drone rotate right.
def turn_right(self):
"""Make the drone rotate right."""
self.at(ardrone.at.pcmd, True, 0, 0, 0, self.speed) |
Toggle the drone's emergency state.
def reset(self):
"""Toggle the drone's emergency state."""
self.at(ardrone.at.ref, False, True)
time.sleep(0.1)
self.at(ardrone.at.ref, False, False) |
Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
def at(self, cmd, *args, **kwargs):
"""Wrapper for the low level at commands.
This method takes care that the sequence number is increased after each
at command and the watchdog timer is started to make sure the drone
receives a command at least every second.
"""
with self.lock:
self.com_watchdog_timer.cancel()
cmd(self.host, self.sequence, *args, **kwargs)
self.sequence += 1
self.com_watchdog_timer = threading.Timer(self.timer, self.commwdg)
self.com_watchdog_timer.start() |
Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
def halt(self):
"""Shutdown the drone.
This method does not land or halt the actual drone, but the
communication with the drone. You should call it at the end of your
application to close all sockets, pipes, processes and threads related
with this object.
"""
with self.lock:
self.com_watchdog_timer.cancel()
self.ipc_thread.stop()
self.ipc_thread.join()
self.network_process.terminate()
self.network_process.join() |
Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
def move(self, lr, fb, vv, va):
"""Makes the drone move (translate/rotate).
Parameters:
lr -- left-right tilt: float [-1..1] negative: left, positive: right
fb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right"""
self.at(ardrone.at.pcmd, True, lr, fb, vv, va) |
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines
def ref(host, seq, takeoff, emergency=False):
"""
Basic behaviour of the drone: take-off/landing, emergency stop/reset)
Parameters:
seq -- sequence number
takeoff -- True: Takeoff / False: Land
emergency -- True: Turn off the engines
"""
p = 0b10001010101000000000000000000
if takeoff:
p |= 0b1000000000
if emergency:
p |= 0b100000000
at(host, 'REF', seq, [p]) |
Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
def pcmd(host, seq, progressive, lr, fb, vv, va):
"""
Makes the drone move (translate/rotate).
Parameters:
seq -- sequence number
progressive -- True: enable progressive commands, False: disable (i.e.
enable hovering mode)
lr -- left-right tilt: float [-1..1] negative: left, positive: right
rb -- front-back tilt: float [-1..1] negative: forwards, positive:
backwards
vv -- vertical speed: float [-1..1] negative: go down, positive: rise
va -- angular speed: float [-1..1] negative: spin left, positive: spin
right
The above float values are a percentage of the maximum speed.
"""
p = 1 if progressive else 0
at(host, 'PCMD', seq, [p, float(lr), float(fb), float(vv), float(va)]) |
Set configuration parameters of the drone.
def config(host, seq, option, value):
"""Set configuration parameters of the drone."""
at(host, 'CONFIG', seq, [str(option), str(value)]) |
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- Integer: front left command
m2 -- Integer: front right command
m3 -- Integer: back right command
m4 -- Integer: back left command
def pwm(host, seq, m1, m2, m3, m4):
"""
Sends control values directly to the engines, overriding control loops.
Parameters:
seq -- sequence number
m1 -- Integer: front left command
m2 -- Integer: front right command
m3 -- Integer: back right command
m4 -- Integer: back left command
"""
at(host, 'PWM', seq, [m1, m2, m3, m4]) |
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- Float: frequency in HZ of the animation
d -- Integer: total duration in seconds of the animation
def led(host, seq, anim, f, d):
"""
Control the drones LED.
Parameters:
seq -- sequence number
anim -- Integer: animation to play
f -- Float: frequency in HZ of the animation
d -- Integer: total duration in seconds of the animation
"""
at(host, 'LED', seq, [anim, float(f), d]) |
Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in seconds of the animation
def anim(host, seq, anim, d):
"""
Makes the drone execute a predefined movement (animation).
Parameters:
seq -- sequcence number
anim -- Integer: animation to play
d -- Integer: total duration in seconds of the animation
"""
at(host, 'ANIM', seq, [anim, d]) |
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
def at(host, command, seq, params):
"""
Parameters:
command -- the command
seq -- the sequence number
params -- a list of elements which can be either int, float or string
"""
params_str = []
for p in params:
if type(p) == int:
params_str.append('{:d}'.format(p))
elif type(p) == float:
params_str.append('{:d}'.format(f2i(p)))
elif type(p) == str:
params_str.append('"{:s}"'.format(p))
msg = 'AT*{:s}={:d},{:s}\r'.format(command, seq, ','.join(params_str))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto(msg.encode(), (host, ardrone.constant.COMMAND_PORT)) |
Decode a navdata packet.
def decode(packet):
"""Decode a navdata packet."""
offset = 0
_ = struct.unpack_from('IIII', packet, offset)
s = _[1]
state = dict()
state['fly'] = s & 1 # FLY MASK : (0) ardrone is landed, (1) ardrone is flying
state['video'] = s >> 1 & 1 # VIDEO MASK : (0) video disable, (1) video enable
state['vision'] = s >> 2 & 1 # VISION MASK : (0) vision disable, (1) vision enable
state['control'] = s >> 3 & 1 # CONTROL ALGO (0) euler angles control, (1) angular speed control
state['altitude'] = s >> 4 & 1 # ALTITUDE CONTROL ALGO : (0) altitude control inactive (1) altitude control active
state['user_feedback_start'] = s >> 5 & 1 # USER feedback : Start button state
state['command'] = s >> 6 & 1 # Control command ACK : (0) None, (1) one received
state['fw_file'] = s >> 7 & 1 # Firmware file is good (1)
state['fw_ver'] = s >> 8 & 1 # Firmware update is newer (1)
state['fw_upd'] = s >> 9 & 1 # Firmware update is ongoing (1)
state['navdata_demo'] = s >> 10 & 1 # Navdata demo : (0) All navdata, (1) only navdata demo
state['navdata_bootstrap'] = s >> 11 & 1 # Navdata bootstrap : (0) options sent in all or demo mode, (1) no navdata options sent
state['motors'] = s >> 12 & 1 # Motor status : (0) Ok, (1) Motors problem
state['com_lost'] = s >> 13 & 1 # Communication lost : (1) com problem, (0) Com is ok
state['vbat_low'] = s >> 15 & 1 # VBat low : (1) too low, (0) Ok
state['user_el'] = s >> 16 & 1 # User Emergency Landing : (1) User EL is ON, (0) User EL is OFF
state['timer_elapsed'] = s >> 17 & 1 # Timer elapsed : (1) elapsed, (0) not elapsed
state['angles_out_of_range'] = s >> 19 & 1 # Angles : (0) Ok, (1) out of range
state['ultrasound'] = s >> 21 & 1 # Ultrasonic sensor : (0) Ok, (1) deaf
state['cutout'] = s >> 22 & 1 # Cutout system detection : (0) Not detected, (1) detected
state['pic_version'] = s >> 23 & 1 # PIC Version number OK : (0) a bad version number, (1) version number is OK
state['atcodec_thread_on'] = s >> 24 & 1 # ATCodec thread ON : (0) thread OFF (1) thread ON
state['navdata_thread_on'] = s >> 25 & 1 # Navdata thread ON : (0) thread OFF (1) thread ON
state['video_thread_on'] = s >> 26 & 1 # Video thread ON : (0) thread OFF (1) thread ON
state['acq_thread_on'] = s >> 27 & 1 # Acquisition thread ON : (0) thread OFF (1) thread ON
state['ctrl_watchdog'] = s >> 28 & 1 # CTRL watchdog : (1) delay in control execution (> 5ms), (0) control is well scheduled
state['adc_watchdog'] = s >> 29 & 1 # ADC Watchdog : (1) delay in uart2 dsr (> 5ms), (0) uart2 is good
state['com_watchdog'] = s >> 30 & 1 # Communication Watchdog : (1) com problem, (0) Com is ok
state['emergency'] = s >> 31 & 1 # Emergency landing : (0) no emergency, (1) emergency
data = dict()
data['state'] = state
data['header'] = _[0]
data['sequence'] = _[2]
data['vision'] = _[3]
offset += struct.calcsize('IIII')
demo_fields = [
'ctrl_state',
'battery',
'theta',
'phi',
'psi',
'altitude',
'vx',
'vy',
'vz',
'num_frames'
]
angles = ['theta', 'phi', 'psi']
while True:
try:
id_nr, size = struct.unpack_from('HH', packet, offset)
offset += struct.calcsize('HH')
except struct.error:
break
values = []
for i in range(size - struct.calcsize('HH')):
values.append(struct.unpack_from('c', packet, offset)[0])
offset += struct.calcsize('c')
if id_nr == 0:
values = struct.unpack_from('IIfffIfffI', b''.join(values))
demo = dict(zip(demo_fields, values))
for a in angles:
demo[a] = int(demo[a] / 1000)
data['demo'] = demo
return data |
Save VTK data to file.
def tofile(self, filename, format = 'ascii'):
"""Save VTK data to file.
"""
if not common.is_string(filename):
raise TypeError('argument filename must be string but got %s'%(type(filename)))
if format not in ['ascii','binary']:
raise TypeError('argument format must be ascii | binary')
filename = filename.strip()
if not filename:
raise ValueError('filename must be non-empty string')
if filename[-4:]!='.vtk':
filename += '.vtk'
f = open(filename,'wb')
f.write(self.to_string(format))
f.close() |
Simple helper to get the value of an instance's attribute if it exists.
If the instance attribute is callable it will be called and the result will
be returned.
Optionally accepts a default value to return if the attribute is missing.
Defaults to `None`
>>> class Foo(object):
... bar = 'baz'
... def hi(self):
... return 'hi'
>>> f = Foo()
>>> _get_attr_value(f, 'bar')
'baz'
>>> _get_attr_value(f, 'xyz')
>>> _get_attr_value(f, 'xyz', False)
False
>>> _get_attr_value(f, 'hi')
'hi'
def _get_attr_value(instance, attr, default=None):
"""
Simple helper to get the value of an instance's attribute if it exists.
If the instance attribute is callable it will be called and the result will
be returned.
Optionally accepts a default value to return if the attribute is missing.
Defaults to `None`
>>> class Foo(object):
... bar = 'baz'
... def hi(self):
... return 'hi'
>>> f = Foo()
>>> _get_attr_value(f, 'bar')
'baz'
>>> _get_attr_value(f, 'xyz')
>>> _get_attr_value(f, 'xyz', False)
False
>>> _get_attr_value(f, 'hi')
'hi'
"""
value = default
if hasattr(instance, attr):
value = getattr(instance, attr)
if callable(value):
value = value()
return value |
Audits the provided customer's subscription against stripe and returns a pair
that contains a boolean and a result type.
Default result types can be found in zebra.conf.defaults and can be
overridden in your project's settings.
def audit_customer_subscription(customer, unknown=True):
"""
Audits the provided customer's subscription against stripe and returns a pair
that contains a boolean and a result type.
Default result types can be found in zebra.conf.defaults and can be
overridden in your project's settings.
"""
if (hasattr(customer, 'suspended') and customer.suspended):
result = AUDIT_RESULTS['suspended']
else:
if hasattr(customer, 'subscription'):
try:
result = AUDIT_RESULTS[customer.subscription.status]
except KeyError, err:
# TODO should this be a more specific exception class?
raise Exception("Unable to locate a result set for \
subscription status %s in ZEBRA_AUDIT_RESULTS") % str(err)
else:
result = AUDIT_RESULTS['no_subscription']
return result |
Use VtkData(<filename>).
def polydata_fromfile(f, self):
"""Use VtkData(<filename>)."""
points = []
data = dict(vertices=[], lines=[], polygons=[], triangle_strips=[])
l = common._getline(f).decode('ascii')
k,n,datatype = [s.strip().lower() for s in l.split(' ')]
if k!='points':
raise ValueError('expected points but got %s'%(repr(k)))
n = int(n)
assert datatype in ['bit','unsigned_char','char','unsigned_short','short','unsigned_int','int','unsigned_long','long','float','double'],repr(datatype)
log.debug('\tgetting %s points'%n)
while len(points) < 3*n:
l = common._getline(f).decode('ascii')
points += map(eval,l.split(' '))
assert len(points)==3*n
while 1:
l = common._getline(f)
if l is None:
break
l = l.decode('ascii')
sl = l.split(' ')
k = sl[0].strip().lower()
if k not in ['vertices','lines','polygons','triangle_strips']:
break
assert len(sl)==3
n = int(sl[1])
size = int(sl[2])
lst = []
while len(lst) < size:
l = common._getline(f).decode('ascii')
lst += map(eval, l.split(' '))
assert len(lst)==size
lst2 = []
j = 0
for i in range(n):
lst2.append(lst[j+1:j+lst[j]+1])
j += lst[j]+1
data[k] = lst2
return PolyData(points,data['vertices'], data['lines'], data['polygons'], data['triangle_strips']), l.encode() |
Handles all known webhooks from stripe, and calls signals.
Plug in as you need.
def webhooks(request):
"""
Handles all known webhooks from stripe, and calls signals.
Plug in as you need.
"""
if request.method != "POST":
return HttpResponse("Invalid Request.", status=400)
json = simplejson.loads(request.POST["json"])
if json["event"] == "recurring_payment_failed":
zebra_webhook_recurring_payment_failed.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json)
elif json["event"] == "invoice_ready":
zebra_webhook_invoice_ready.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json)
elif json["event"] == "recurring_payment_succeeded":
zebra_webhook_recurring_payment_succeeded.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json)
elif json["event"] == "subscription_trial_ending":
zebra_webhook_subscription_trial_ending.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json)
elif json["event"] == "subscription_final_payment_attempt_failed":
zebra_webhook_subscription_final_payment_attempt_failed.send(sender=None, customer=_try_to_get_customer_from_customer_id(json["customer"]), full_json=json)
elif json["event"] == "ping":
zebra_webhook_subscription_ping_sent.send(sender=None)
else:
return HttpResponse(status=400)
return HttpResponse(status=200) |
Handles all known webhooks from stripe, and calls signals.
Plug in as you need.
def webhooks_v2(request):
"""
Handles all known webhooks from stripe, and calls signals.
Plug in as you need.
"""
if request.method != "POST":
return HttpResponse("Invalid Request.", status=400)
try:
event_json = simplejson.loads(request.body)
except AttributeError:
# Backwords compatibility
# Prior to Django 1.4, request.body was named request.raw_post_data
event_json = simplejson.loads(request.raw_post_data)
event_key = event_json['type'].replace('.', '_')
if event_key in WEBHOOK_MAP:
WEBHOOK_MAP[event_key].send(sender=None, full_json=event_json)
return HttpResponse(status=200) |
Check if obj is number.
def is_number(obj):
"""Check if obj is number."""
return isinstance(obj, (int, float, np.int_, np.float_)) |
Return sequence.
def get_seq(self,obj,default=None):
"""Return sequence."""
if is_sequence(obj):
return obj
if is_number(obj): return [obj]
if obj is None and default is not None:
log.warning('using default value (%s)'%(default))
return self.get_seq(default)
raise ValueError('expected sequence|number but got %s'%(type(obj))) |
Return sequence of sequences.
def get_seq_seq(self,obj,default=None):
"""Return sequence of sequences."""
if is_sequence2(obj):
return [self.get_seq(o,default) for o in obj]
else:
return [self.get_seq(obj,default)] |
Return 3-tuple from
number -> (obj,default[1],default[2])
0-sequence|None -> default
1-sequence -> (obj[0],default[1],default[2])
2-sequence -> (obj[0],obj[1],default[2])
(3 or more)-sequence -> (obj[0],obj[1],obj[2])
def get_3_tuple(self,obj,default=None):
"""Return 3-tuple from
number -> (obj,default[1],default[2])
0-sequence|None -> default
1-sequence -> (obj[0],default[1],default[2])
2-sequence -> (obj[0],obj[1],default[2])
(3 or more)-sequence -> (obj[0],obj[1],obj[2])
"""
if not (default is not None \
and type(default) is tuple \
and len(default)==3):
raise ValueError('argument default must be 3-tuple|None but got %s'%(default))
if is_sequence(obj):
n = len(obj)
if n>3:
log.warning('expected 3-sequence but got %s-%s'%(n,type(obj)))
if n>=3:
return tuple(obj)
log.warning('filling with default value (%s) to obtain size=3'%(default[0]))
if default is not None:
if n==0:
return default
elif n==1:
return (obj[0],default[1],default[2])
elif n==2:
return (obj[0],obj[1],default[2])
elif is_number(obj) and default is not None:
log.warning('filling with default value (%s) to obtain size=3'%(default[0]))
return (obj,default[1],default[2])
elif obj is None and default is not None:
log.warning('filling with default value (%s) to obtain size=3'%(default[0]))
return default
raise ValueError('failed to construct 3-tuple from %s-%s'%(n,type(obj))) |
Return list of 3-tuples from
sequence of a sequence,
sequence - it is mapped to sequence of 3-sequences if possible
number
def get_3_tuple_list(self,obj,default=None):
"""Return list of 3-tuples from
sequence of a sequence,
sequence - it is mapped to sequence of 3-sequences if possible
number
"""
if is_sequence2(obj):
return [self.get_3_tuple(o,default) for o in obj]
elif is_sequence(obj):
return [self.get_3_tuple(obj[i:i+3],default) for i in range(0,len(obj),3)]
else:
return [self.get_3_tuple(obj,default)] |
Return tuple of 3-tuples
def get_3_3_tuple(self,obj,default=None):
"""Return tuple of 3-tuples
"""
if is_sequence2(obj):
ret = []
for i in range(3):
if i<len(obj):
ret.append(self.get_3_tuple(obj[i],default))
else:
ret.append(self.get_3_tuple(default,default))
return tuple(ret)
if is_sequence(obj):
if len(obj)>9:
log.warning('ignoring elements obj[i], i>=9')
r = obj[:9]
r = [self.get_3_tuple(r[j:j+3],default) for j in range(0,len(r),3)]
if len(r)<3:
log.warning('filling with default value (%s) to obtain size=3'%(default[0]))
while len(r)<3:
r.append(self.get_3_tuple(default,default))
return tuple(r)
log.warning('filling with default value (%s) to obtain size=3'%(default[0]))
r1 = self.get_3_tuple(obj,default)
r2 = self.get_3_tuple(default,default)
r3 = self.get_3_tuple(default,default)
return (r1,r2,r3) |
Return list of 3x3-tuples.
def get_3_3_tuple_list(self,obj,default=None):
"""Return list of 3x3-tuples.
"""
if is_sequence3(obj):
return [self.get_3_3_tuple(o,default) for o in obj]
return [self.get_3_3_tuple(obj,default)] |
Iterate through the application configuration and instantiate
the services.
def connect(self):
"""Iterate through the application configuration and instantiate
the services.
"""
requested_services = set(
svc.lower() for svc in current_app.config.get('BOTO3_SERVICES', [])
)
region = current_app.config.get('BOTO3_REGION')
sess_params = {
'aws_access_key_id': current_app.config.get('BOTO3_ACCESS_KEY'),
'aws_secret_access_key': current_app.config.get('BOTO3_SECRET_KEY'),
'profile_name': current_app.config.get('BOTO3_PROFILE'),
'region_name': region
}
sess = boto3.session.Session(**sess_params)
try:
cns = {}
for svc in requested_services:
# Check for optional parameters
params = current_app.config.get(
'BOTO3_OPTIONAL_PARAMS', {}
).get(svc, {})
# Get session params and override them with kwargs
# `profile_name` cannot be passed to clients and resources
kwargs = sess_params.copy()
kwargs.update(params.get('kwargs', {}))
del kwargs['profile_name']
# Override the region if one is defined as an argument
args = params.get('args', [])
if len(args) >= 1:
del kwargs['region_name']
if not(isinstance(args, list) or isinstance(args, tuple)):
args = [args]
# Create resource or client
if svc in sess.get_available_resources():
cns.update({svc: sess.resource(svc, *args, **kwargs)})
else:
cns.update({svc: sess.client(svc, *args, **kwargs)})
except UnknownServiceError:
raise
return cns |
Get all clients (with and without associated resources)
def clients(self):
"""
Get all clients (with and without associated resources)
"""
clients = {}
for k, v in self.connections.items():
if hasattr(v.meta, 'client'): # has boto3 resource
clients[k] = v.meta.client
else: # no boto3 resource
clients[k] = v
return clients |
Configure the GSSAPI service name, and validate the presence of the
appropriate principal in the kerberos keytab.
:param app: a flask application
:type app: flask.Flask
:param service: GSSAPI service name
:type service: str
:param hostname: hostname the service runs under
:type hostname: str
def init_kerberos(app, service='HTTP', hostname=gethostname()):
'''
Configure the GSSAPI service name, and validate the presence of the
appropriate principal in the kerberos keytab.
:param app: a flask application
:type app: flask.Flask
:param service: GSSAPI service name
:type service: str
:param hostname: hostname the service runs under
:type hostname: str
'''
global _SERVICE_NAME
_SERVICE_NAME = "%s@%s" % (service, hostname)
if 'KRB5_KTNAME' not in environ:
app.logger.warn("Kerberos: set KRB5_KTNAME to your keytab file")
else:
try:
principal = kerberos.getServerPrincipalDetails(service, hostname)
except kerberos.KrbError as exc:
app.logger.warn("Kerberos: %s" % exc.message[0])
else:
app.logger.info("Kerberos: server is %s" % principal) |
date to unix timestamp in milliseconds
def date_to_timestamp(date):
"""
date to unix timestamp in milliseconds
"""
date_tuple = date.timetuple()
timestamp = calendar.timegm(date_tuple) * 1000
return timestamp |
This function will return a random datetime between two datetime objects.
:param start:
:param end:
def random_date(dt_from, dt_to):
"""
This function will return a random datetime between two datetime objects.
:param start:
:param end:
"""
delta = dt_to - dt_from
int_delta = (delta.days * 24 * 60 * 60) + delta.seconds
random_second = randrange(int_delta)
return dt_from + datetime.timedelta(seconds=random_second) |
transform object to json
def object_to_json(obj, indent=2):
"""
transform object to json
"""
instance_json = json.dumps(obj, indent=indent, ensure_ascii=False, cls=DjangoJSONEncoder)
return instance_json |
transform QuerySet to json
def qs_to_json(qs, fields=None):
"""
transform QuerySet to json
"""
if not fields :
fields = [f.name for f in qs.model._meta.fields]
# сформируем список для сериализации
objects = []
for value_dict in qs.values(*fields):
# сохраним порядок полей, как определено в моделе
o = OrderedDict()
for f in fields:
o[f] = value_dict[f]
objects.append(o)
# сериализуем
json_qs = json.dumps(objects, indent=2, ensure_ascii=False, cls=DjangoJSONEncoder)
return json_qs |
transform mongoengine.QuerySet to json
def mongoqs_to_json(qs, fields=None):
"""
transform mongoengine.QuerySet to json
"""
l = list(qs.as_pymongo())
for element in l:
element.pop('_cls')
# use DjangoJSONEncoder for transform date data type to datetime
json_qs = json.dumps(l, indent=2, ensure_ascii=False, cls=DjangoJSONEncoder)
return json_qs |
join base_url and some GET-parameters to one; it could be absolute url optionally
usage example:
c['current_url'] = url_path(request, use_urllib=True, is_full=False)
...
<a href="{{ current_url }}">Лабораторный номер</a>
def url_path(request, base_url=None, is_full=False, *args, **kwargs):
"""
join base_url and some GET-parameters to one; it could be absolute url optionally
usage example:
c['current_url'] = url_path(request, use_urllib=True, is_full=False)
...
<a href="{{ current_url }}">Лабораторный номер</a>
"""
if not base_url:
base_url = request.path
if is_full:
protocol = 'https' if request.is_secure() else 'http'
base_url = '%s://%s%s' % (protocol, request.get_host(), base_url)
params = url_params(request, *args, **kwargs)
url = '%s%s' % (base_url, params)
return url |
create string with GET-params of request
usage example:
c['sort_url'] = url_params(request, except_params=('sort',))
...
<a href="{{ sort_url }}&sort=lab_number">Лабораторный номер</a>
def url_params(request, except_params=None, as_is=False):
"""
create string with GET-params of request
usage example:
c['sort_url'] = url_params(request, except_params=('sort',))
...
<a href="{{ sort_url }}&sort=lab_number">Лабораторный номер</a>
"""
if not request.GET:
return ''
params = []
for key, value in request.GET.items():
if except_params and key not in except_params:
for v in request.GET.getlist(key):
params.append('%s=%s' % (key, urlquote(v)))
if as_is:
str_params = '?' + '&'.join(params)
else:
str_params = '?' + '&'.join(params)
str_params = urlquote(str_params)
return mark_safe(str_params) |
Prepare sort params. Add revers '-' if need.
Params:
params - list of sort parameters
request
sort_key
revers_sort - list or set with keys that need reverse default sort direction
except_params - GET-params that will be ignored
Example:
view:
c['sort_params'] = prepare_sort_params(
('order__lab_number', 'order__client__lname', 'organization', 'city', 'street', ),
request,
)
template:
<th><a href="{{ sort_params.order__lab_number.url }}">Лабораторный номер</a></th>
or
{% load djutils %}
...
{% sort_th 'order__lab_number' 'Лабораторный номер' %}
def prepare_sort_params(params, request, sort_key='sort', revers_sort=None, except_params=None):
"""
Prepare sort params. Add revers '-' if need.
Params:
params - list of sort parameters
request
sort_key
revers_sort - list or set with keys that need reverse default sort direction
except_params - GET-params that will be ignored
Example:
view:
c['sort_params'] = prepare_sort_params(
('order__lab_number', 'order__client__lname', 'organization', 'city', 'street', ),
request,
)
template:
<th><a href="{{ sort_params.order__lab_number.url }}">Лабораторный номер</a></th>
or
{% load djutils %}
...
{% sort_th 'order__lab_number' 'Лабораторный номер' %}
"""
current_param, current_reversed = sort_key_process(request, sort_key)
except_params = except_params or []
except_params.append(sort_key)
base_url = url_params(request, except_params=except_params, as_is=True)
sort_params = {}
revers_sort = revers_sort or set()
url_connector = '?' if request.get_full_path() == request.path else "&"
for p in params:
sort_params[p] = {}
if current_param and p == current_param:
prefix = '' if current_reversed else '-'
sort_params[p]['url'] = base_url + "%s%s=%s" % (url_connector, sort_key, prefix + current_param)
sort_params[p]['is_reversed'] = current_reversed
sort_params[p]['is_current'] = True
else:
default_direction = '-' if p in revers_sort else ''
sort_params[p]['url'] = base_url + "%s%s=%s%s" % (url_connector, sort_key, default_direction, p)
sort_params[p]['is_reversed'] = False
sort_params[p]['is_current'] = False
return sort_params |
process sort-parameter value (for example, "-name")
return:
current_param - field for sorting ("name)
current_reversed - revers flag (True)
def sort_key_process(request, sort_key='sort'):
"""
process sort-parameter value (for example, "-name")
return:
current_param - field for sorting ("name)
current_reversed - revers flag (True)
"""
current = request.GET.get(sort_key)
current_reversed = False
current_param = None
if current:
mo = re.match(r'^(-?)(\w+)$', current) # exclude first "-" (if exist)
if mo:
current_reversed = mo.group(1) == '-'
current_param = mo.group(2)
return current_param, current_reversed |
transform form errors to list like
["field1: error1", "field2: error2"]
def transform_form_error(form, verbose=True):
"""
transform form errors to list like
["field1: error1", "field2: error2"]
"""
errors = []
for field, err_msg in form.errors.items():
if field == '__all__': # general errors
errors.append(', '.join(err_msg))
else: # field errors
field_name = field
if verbose and field in form.fields:
field_name = form.fields[field].label or field
errors.append('%s: %s' % (field_name, ', '.join(err_msg)))
return errors |
to_datetime - приводить ли date к datetime
default_dt_to - устанавливать заведомо будущее дефолтное значение для dt_to
def process_date_from_to_options(options, to_datetime=False, default_dt_to=False):
"""
to_datetime - приводить ли date к datetime
default_dt_to - устанавливать заведомо будущее дефолтное значение для dt_to
"""
start_time = datetime.datetime.now()
if options.get('last_week'):
dt_from = start_time - datetime.timedelta(days=7)
dt_to = start_time
elif options.get('last_day'):
dt_from = start_time - datetime.timedelta(days=1)
dt_to = start_time
elif options.get('last_2hours'):
dt_from = start_time - datetime.timedelta(hours=2)
dt_to = start_time
else:
from_str = options.get('from')
if from_str:
try:
dt_from = iso_to_datetime(from_str)
except:
dt_from = iso_to_date(from_str)
else:
dt_from = None
to_str = options.get('to')
if to_str:
try:
dt_to = iso_to_datetime(to_str)
except:
dt_to = iso_to_date(to_str)
else:
dt_to = None
if default_dt_to and not dt_to:
dt_to = datetime.datetime(2100, 1, 1)
if to_datetime:
if isinstance(dt_from, datetime.date):
dt_from = date_to_datetime(dt_from)
if isinstance(dt_to, datetime.date):
dt_to = date_to_datetime_lte(dt_to)
return dt_from, dt_to |
Collect data into chunks of up to length n.
:type iterable: Iterable[T]
:type n: int
:rtype: Iterator[list[T]]
def _chunked(iterable, n):
"""
Collect data into chunks of up to length n.
:type iterable: Iterable[T]
:type n: int
:rtype: Iterator[list[T]]
"""
it = iter(iterable)
while True:
chunk = list(islice(it, n))
if chunk:
yield chunk
else:
return |
Look up gender for a list of names.
Can optionally refine search with locale info.
May make multiple requests if there are more names than
can be retrieved in one call.
:param names: List of names.
:type names: Iterable[str]
:param country_id: Optional ISO 3166-1 alpha-2 country code.
:type country_id: Optional[str]
:param language_id: Optional ISO 639-1 language code.
:type language_id: Optional[str]
:param retheader: Optional
:type retheader: Optional[boolean]
:return:
If retheader is False:
List of dicts containing 'name', 'gender',
'probability', 'count' keys. If 'gender' is None,
'probability' and 'count' will be omitted.
else:
A dict containing 'data' and 'headers' keys.
Data is the same as when retheader is False.
Headers are the response header
(a requests.structures.CaseInsensitiveDict).
If multiple requests were made,
the header will be from the last one.
:rtype: Union[dict, Sequence[dict]]
:raises GenderizeException: if API server returns HTTP error code.
def get(self, names, country_id=None, language_id=None, retheader=False):
"""
Look up gender for a list of names.
Can optionally refine search with locale info.
May make multiple requests if there are more names than
can be retrieved in one call.
:param names: List of names.
:type names: Iterable[str]
:param country_id: Optional ISO 3166-1 alpha-2 country code.
:type country_id: Optional[str]
:param language_id: Optional ISO 639-1 language code.
:type language_id: Optional[str]
:param retheader: Optional
:type retheader: Optional[boolean]
:return:
If retheader is False:
List of dicts containing 'name', 'gender',
'probability', 'count' keys. If 'gender' is None,
'probability' and 'count' will be omitted.
else:
A dict containing 'data' and 'headers' keys.
Data is the same as when retheader is False.
Headers are the response header
(a requests.structures.CaseInsensitiveDict).
If multiple requests were made,
the header will be from the last one.
:rtype: Union[dict, Sequence[dict]]
:raises GenderizeException: if API server returns HTTP error code.
"""
responses = [
self._get_chunk(name_chunk, country_id, language_id)
for name_chunk
in _chunked(names, Genderize.BATCH_SIZE)
]
data = list(chain.from_iterable(
response.data for response in responses
))
if retheader:
return {
"data": data,
"headers": responses[-1].headers,
}
else:
return data |
Look up gender for a single name.
See :py:meth:`get`.
Doesn't support retheader option.
def get1(self, name, **kwargs):
"""
Look up gender for a single name.
See :py:meth:`get`.
Doesn't support retheader option.
"""
if 'retheader' in kwargs:
raise GenderizeException(
"get1() doesn't support the retheader option.")
return self.get([name], **kwargs)[0] |
Hacky convinience function to dump a couple of python variables in a
.mat file. See `awmstools.saveVars`.
def saveVarsInMat(filename, varNamesStr, outOf=None, **opts):
"""Hacky convinience function to dump a couple of python variables in a
.mat file. See `awmstools.saveVars`.
"""
from mlabwrap import mlab
filename, varnames, outOf = __saveVarsHelper(
filename, varNamesStr, outOf, '.mat', **opts)
try:
for varname in varnames:
mlab._set(varname, outOf[varname])
mlab._do("save('%s','%s')" % (filename, "', '".join(varnames)), nout=0)
finally:
assert varnames
mlab._do("clear('%s')" % "', '".join(varnames), nout=0) |
Creates a proxy for a variable.
XXX create and cache nested proxies also here.
def _make_proxy(self, varname, parent=None, constructor=MlabObjectProxy):
"""Creates a proxy for a variable.
XXX create and cache nested proxies also here.
"""
# FIXME why not just use gensym here?
proxy_val_name = "PROXY_VAL%d__" % self._proxy_count
self._proxy_count += 1
mlabraw.eval(self._session, "%s = %s;" % (proxy_val_name, varname))
res = constructor(self, proxy_val_name, parent)
self._proxies[proxy_val_name] = res
return res |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.