code
stringlengths 20
4.93k
| docstring
stringlengths 33
1.27k
| source
stringclasses 3
values |
|---|---|---|
def print_stack_info(self):
try:
rest_api_id = None
deployment_found = False
response = self._cf_client.describe_stack_resources(StackName=self._stack_name)
print('\nThe following resources were created:')
rows = []
for resource in response['StackResources']:
if (resource['ResourceType'] == 'AWS::ApiGateway::RestApi'):
rest_api_id = resource['PhysicalResourceId']
elif (resource['ResourceType'] == 'AWS::ApiGateway::Deployment'):
deployment_found = True
row = []
row.append(resource['ResourceType'])
row.append(resource['LogicalResourceId'])
row.append(resource['PhysicalResourceId'])
rows.append(row)
"\n print('\t{}\t{}\t{}'.format(\n resource['ResourceType'],\n resource['LogicalResourceId'],\n resource['PhysicalResourceId']\n )\n )\n "
print(tabulate(rows, headers=['Resource Type', 'Logical ID', 'Physical ID']))
if (rest_api_id and deployment_found):
url = 'https:
print('\nThe deployed service can be found at this URL:')
print('\t{}\n'.format(url))
return response
except Exception as wtf:
print(wtf)
return None
|
List resources from the given stack
Args:
None
Returns:
A dictionary filled resources or None if things went sideways
|
codesearchnet
|
def solid_angle(center, coords):
o = np.array(center)
r = [np.array(c) - o for c in coords]
r.append(r[0])
n = [np.cross(r[i + 1], r[i]) for i in range(len(r) - 1)]
n.append(np.cross(r[1], r[0]))
vals = []
for i in range(len(n) - 1):
v = -np.dot(n[i], n[i + 1]) \
/ (np.linalg.norm(n[i]) * np.linalg.norm(n[i + 1]))
vals.append(acos(abs_cap(v)))
phi = sum(vals)
return phi + (3 - len(r)) * pi
|
Helper method to calculate the solid angle of a set of coords from the
center.
Args:
center (3x1 array): Center to measure solid angle from.
coords (Nx3 array): List of coords to determine solid angle.
Returns:
The solid angle.
|
juraj-google-style
|
def capture_widget(widget, path=None):
if use_qt5:
pixmap = widget.grab()
else:
pixmap = QtGui.QPixmap.grabWidget(widget)
if path:
pixmap.save(path)
else:
image_buffer = QtCore.QBuffer()
image_buffer.open(QtCore.QIODevice.ReadWrite)
pixmap.save(image_buffer, "PNG")
return image_buffer.data().data()
|
Grab an image of a Qt widget
Args:
widget: The Qt Widget to capture
path (optional): The path to save to. If not provided - will return image data.
Returns:
If a path is provided, the image will be saved to it.
If not, the PNG buffer will be returned.
|
juraj-google-style
|
def analyse(self, path_and_filename, pattern):
with open(path_and_filename) as handle:
content = handle.read()
loc = content.count('\n') + 1
com = 0
for match in re.findall(pattern, content, re.DOTALL):
com += match.count('\n') + 1
return max(0, loc - com), com
|
Find out lines of code and lines of comments.
Args:
path_and_filename (str): path and filename to parse for loc and com.
pattern (str): regex to search for line commens and block comments
Returns:
int, int: loc and com for given file.
|
juraj-google-style
|
def apply_instance_data(designspace, include_filenames=None, Font=defcon.Font):
from fontTools.designspaceLib import DesignSpaceDocument
from os.path import normcase, normpath
if hasattr(designspace, '__fspath__'):
designspace = designspace.__fspath__()
if isinstance(designspace, basestring):
designspace = DesignSpaceDocument.fromfile(designspace)
basedir = os.path.dirname(designspace.path)
instance_ufos = []
if (include_filenames is not None):
include_filenames = {normcase(normpath(p)) for p in include_filenames}
for designspace_instance in designspace.instances:
fname = designspace_instance.filename
assert (fname is not None), ('instance %r missing required filename' % getattr(designspace_instance, 'name', designspace_instance))
if (include_filenames is not None):
fname = normcase(normpath(fname))
if (fname not in include_filenames):
continue
logger.debug('Applying instance data to %s', fname)
ufo = Font(normpath(os.path.join(basedir, fname)))
set_weight_class(ufo, designspace, designspace_instance)
set_width_class(ufo, designspace, designspace_instance)
glyphs_instance = InstanceDescriptorAsGSInstance(designspace_instance)
to_ufo_custom_params(None, ufo, glyphs_instance)
ufo.save()
instance_ufos.append(ufo)
return instance_ufos
|
Open UFO instances referenced by designspace, apply Glyphs instance
data if present, re-save UFOs and return updated UFO Font objects.
Args:
designspace: DesignSpaceDocument object or path (str or PathLike) to
a designspace file.
include_filenames: optional set of instance filenames (relative to
the designspace path) to be included. By default all instaces are
processed.
Font: the class used to load the UFO (default: defcon.Font).
Returns:
List of opened and updated instance UFOs.
|
codesearchnet
|
def vasp_version_from_outcar( filename='OUTCAR' ):
with open( filename ) as f:
line = f.readline().strip()
return line
|
Returns the first line from a VASP OUTCAR file, to get the VASP source version string.
Args:
filename (Str, optional): OUTCAR filename. Defaults to 'OUTCAR'.
Returns:
(Str): The first line read from the OUTCAR file.
|
juraj-google-style
|
def _create_moving_sequence(image, pad_lefts, total_padding):
with tf.name_scope('moving_sequence'):
def get_padded_image(args):
(pad_left,) = args
pad_right = (total_padding - pad_left)
padding = tf.stack([pad_left, pad_right], axis=(- 1))
z = tf.zeros((1, 2), dtype=pad_left.dtype)
padding = tf.concat([padding, z], axis=0)
return tf.pad(image, padding)
padded_images = tf.map_fn(get_padded_image, [pad_lefts], dtype=tf.uint8, infer_shape=False, back_prop=False)
return padded_images
|
Create a moving image sequence from the given image a left padding values.
Args:
image: [in_h, in_w, n_channels] uint8 array
pad_lefts: [sequence_length, 2] int32 array of left padding values
total_padding: tensor of padding values, (pad_h, pad_w)
Returns:
[sequence_length, out_h, out_w, n_channels] uint8 image sequence, where
out_h = in_h + pad_h, out_w = in_w + out_w
|
codesearchnet
|
def refresh(self, **kwargs):
if self._id_attr:
path = '%s/%s' % (self.manager.path, self.id)
else:
path = self.manager.path
server_data = self.manager.gitlab.http_get(path, **kwargs)
self._update_attrs(server_data)
|
Refresh a single object from server.
Args:
**kwargs: Extra options to send to the server (e.g. sudo)
Returns None (updates the object)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabGetError: If the server cannot perform the request
|
juraj-google-style
|
def format(sql, args=None):
resolved_vars = {}
code = []
SqlStatement._find_recursive_dependencies(sql, args, code=code, resolved_vars=resolved_vars)
parts = []
for (escape, placeholder, _, literal) in SqlStatement._get_tokens(sql):
if escape:
parts.append('$')
elif placeholder:
variable = placeholder[1:]
try:
value = resolved_vars[variable]
except KeyError as e:
raise Exception(('Invalid sql. Unable to substitute $%s.' % e.args[0]))
if isinstance(value, types.ModuleType):
value = _utils.get_default_query_from_module(value)
if isinstance(value, SqlStatement):
sql = value.format(value._sql, resolved_vars)
value = ('(%s)' % sql)
elif ('_repr_sql_' in dir(value)):
value = value._repr_sql_()
elif isinstance(value, basestring):
value = SqlStatement._escape_string(value)
elif (isinstance(value, list) or isinstance(value, tuple)):
if isinstance(value, tuple):
value = list(value)
expansion = '('
for v in value:
if (len(expansion) > 1):
expansion += ', '
if isinstance(v, basestring):
expansion += SqlStatement._escape_string(v)
else:
expansion += str(v)
expansion += ')'
value = expansion
else:
value = str(value)
parts.append(value)
elif literal:
parts.append(literal)
expanded = ''.join(parts)
return expanded
|
Resolve variable references in a query within an environment.
This computes and resolves the transitive dependencies in the query and raises an
exception if that fails due to either undefined or circular references.
Args:
sql: query to format.
args: a dictionary of values to use in variable expansion.
Returns:
The resolved SQL text with variables expanded.
Raises:
Exception on failure.
|
codesearchnet
|
def validate(self, table: pd.DataFrame, failed_only=False) -> pd.DataFrame:
return pd.concat([
self._validate_input(table, failed_only=failed_only),
self._validate_output(table, failed_only=failed_only),
]).fillna(True)
|
Return a dataframe of validation results for the appropriate series vs the vector of validators.
Args:
table (pd.DataFrame): A dataframe on which to apply validation logic.
failed_only (bool): If ``True``: return only the indexes that failed to validate.
|
juraj-google-style
|
def vector_projection(v1, v2):
return scalar_projection(v1, v2) * v2 / np.linalg.norm(v2)
|
compute the vector projection of v1 upon v2
Args:
v1, v2: iterable
indices 0, 1, 2 corresponding to cartesian coordinates
Returns:
3-vector of the projection of point p onto the direction of v
|
juraj-google-style
|
def get_group(self, uuid=None):
if uuid is None:
uuid = self.uuid
group_data = self.get('group', params={'uuid': uuid})
return group_data
|
Get group data based on uuid.
Args:
uuid (str): optional uuid. defaults to self.cuuid
Raises:
PyLmodUnexpectedData: No data was returned.
requests.RequestException: Exception connection error
Returns:
dict: group json
|
juraj-google-style
|
def piola_kirchoff_1(self, def_grad):
if (not self.is_symmetric):
raise ValueError('The stress tensor is not symmetric, PK stress is based on a symmetric stress tensor.')
def_grad = SquareTensor(def_grad)
return (def_grad.det * np.dot(self, def_grad.inv.trans))
|
calculates the first Piola-Kirchoff stress
Args:
def_grad (3x3 array-like): deformation gradient tensor
|
codesearchnet
|
def Delete(self, request, global_params=None):
config = self.GetMethodConfig('Delete')
return self._RunMethod(config, request, global_params=global_params)
|
Deletes a `BuildTrigger` by its project ID and trigger ID. This API is experimental.
Args:
request: (CloudbuildProjectsTriggersDeleteRequest) input message
global_params: (StandardQueryParameters, default: None) global arguments
Returns:
(Empty) The response message.
|
github-repos
|
def _maybe_add_main_op(self, main_op):
if main_op is None:
return
if not isinstance(main_op, ops.Operation):
raise TypeError(f'Expected {main_op} to be an Operation but got type {type(main_op)} instead.')
for init_op_key in (constants.MAIN_OP_KEY, constants.LEGACY_INIT_OP_KEY):
if ops.get_collection(init_op_key):
raise ValueError(f'Graph already contains one or more main ops under the collection {init_op_key}.')
ops.add_to_collection(constants.MAIN_OP_KEY, main_op)
|
Adds main op to the SavedModel.
Args:
main_op: Main op to run as part of graph initialization. If None, no main
op will be added to the graph.
Raises:
TypeError: If the main op is provided but is not of type `Operation`.
ValueError: if the Graph already contains an init op.
|
github-repos
|
def log_batch(self, log_data):
url = uri_join(self.base_url, 'log')
attachments = []
for log_item in log_data:
log_item['item_id'] = self.stack[(- 1)]
attachment = log_item.get('attachment', None)
if ('attachment' in log_item):
del log_item['attachment']
if attachment:
if (not isinstance(attachment, collections.Mapping)):
attachment = {'data': attachment}
name = attachment.get('name', str(uuid.uuid4()))
log_item['file'] = {'name': name}
attachments.append(('file', (name, attachment['data'], attachment.get('mime', 'application/octet-stream'))))
files = [('json_request_part', (None, json.dumps(log_data), 'application/json'))]
files.extend(attachments)
from reportportal_client import POST_LOGBATCH_RETRY_COUNT
for i in range(POST_LOGBATCH_RETRY_COUNT):
try:
r = self.session.post(url=url, files=files, verify=self.verify_ssl)
except KeyError:
if (i < (POST_LOGBATCH_RETRY_COUNT - 1)):
continue
else:
raise
break
logger.debug('log_batch - Stack: %s', self.stack)
logger.debug('log_batch response: %s', r.text)
return _get_data(r)
|
Logs batch of messages with attachment.
Args:
log_data: list of log records.
log record is a dict of;
time, message, level, attachment
attachment is a dict of:
name: name of attachment
data: fileobj or content
mime: content type for attachment
|
codesearchnet
|
def _add_validator(fv, validator_instance):
for flag_name in validator_instance.get_flags_names():
fv[flag_name].validators.append(validator_instance)
|
Register new flags validator to be checked.
Args:
fv: flags.FlagValues, the FlagValues instance to add the validator.
validator_instance: validators.Validator, the validator to add.
Raises:
KeyError: Raised when validators work with a non-existing flag.
|
codesearchnet
|
def create_transcripts_xml(video_id, video_el, resource_fs, static_dir):
video_transcripts = VideoTranscript.objects.filter(video__edx_video_id=video_id).order_by('language_code')
if video_transcripts.exists():
transcripts_el = SubElement(video_el, 'transcripts')
transcript_files_map = {}
for video_transcript in video_transcripts:
language_code = video_transcript.language_code
file_format = video_transcript.file_format
try:
transcript_filename = create_transcript_file(video_id=video_id, language_code=language_code, file_format=file_format, resource_fs=resource_fs.delegate_fs(), static_dir=combine(u'course', static_dir))
transcript_files_map[language_code] = transcript_filename
except TranscriptsGenerationException:
logger.exception('[VAL] Error while generating "%s" transcript for video["%s"].', language_code, video_id)
continue
SubElement(transcripts_el, 'transcript', {'language_code': language_code, 'file_format': Transcript.SRT, 'provider': video_transcript.provider})
return dict(xml=video_el, transcripts=transcript_files_map)
|
Creates xml for transcripts.
For each transcript element, an associated transcript file is also created in course OLX.
Arguments:
video_id (str): Video id of the video.
video_el (Element): lxml Element object
static_dir (str): The Directory to store transcript file.
resource_fs (SubFS): The file system to store transcripts.
Returns:
lxml Element object with transcripts information
|
codesearchnet
|
def _exec_one_test_with_retry(self, test_name, test_method, max_count):
def should_retry(record):
return record.result in [records.TestResultEnums.TEST_RESULT_FAIL, records.TestResultEnums.TEST_RESULT_ERROR]
previous_record = self.exec_one_test(test_name, test_method)
if not should_retry(previous_record):
return
for i in range(max_count - 1):
retry_name = f'{test_name}_retry_{i + 1}'
new_record = records.TestResultRecord(retry_name, self.TAG)
new_record.retry_parent = previous_record
new_record.parent = (previous_record, records.TestParentType.RETRY)
previous_record = self.exec_one_test(retry_name, test_method, new_record)
if not should_retry(previous_record):
break
|
Executes one test and retry the test if needed.
Repeatedly execute a test case until it passes or the maximum count of
iteration has been reached.
Args:
test_name: string, Name of the test.
test_method: function, The test method to execute.
max_count: int, the maximum number of iterations to execute the test for.
|
github-repos
|
def scatter_min(self, sparse_delta, use_locking=False, name=None):
raise NotImplementedError
|
Updates this variable with the min of `tf.IndexedSlices` and itself.
Args:
sparse_delta: `tf.IndexedSlices` to use as an argument of min with this
variable.
use_locking: If `True`, use locking during the operation.
name: the name of the operation.
Returns:
The updated variable.
Raises:
TypeError: if `sparse_delta` is not an `IndexedSlices`.
|
github-repos
|
def __init__(self, fsapi, filename, line_prepend='', prepend_timestamp=False):
self._fsapi = fsapi
self._filename = filename
if line_prepend:
line_prepend += ' '
self._line_prepend = line_prepend
self._prepend_timestamp = prepend_timestamp
self._line_buffer = LineBuffer()
|
Constructor.
Args:
fsapi: api.FileStreamApi instance
filename: Name of the file this stream is pushed to.
line_prepend: string to prepend to every line for this stream.
prepend_timestamp: If true a timestamp will be prepended to each line
(after line_prepend).
|
juraj-google-style
|
def estimate_blocktime(self, oldest: int = 256) -> float:
last_block_number = self.block_number()
if last_block_number < 1:
return 15
if last_block_number < oldest:
interval = (last_block_number - 1) or 1
else:
interval = last_block_number - oldest
assert interval > 0
last_timestamp = self.get_block_header(last_block_number)['timestamp']
first_timestamp = self.get_block_header(last_block_number - interval)['timestamp']
delta = last_timestamp - first_timestamp
return delta / interval
|
Calculate a blocktime estimate based on some past blocks.
Args:
oldest: delta in block numbers to go back.
Return:
average block time in seconds
|
juraj-google-style
|
def EnableNetworkInterfaces(
self, interfaces, logger, dhclient_script=None):
interfaces_to_up = [i for i in interfaces if i != 'eth0']
if interfaces_to_up:
logger.info('Enabling the Ethernet interfaces %s.', interfaces_to_up)
self._Dhcpcd(interfaces_to_up, logger)
|
Enable the list of network interfaces.
Args:
interfaces: list of string, the output device names to enable.
logger: logger object, used to write to SysLog and serial port.
dhclient_script: string, the path to a dhclient script used by dhclient.
|
juraj-google-style
|
async def change_url(self, url: str, description: str = None):
await self._change(url=url, description=description)
|
change the url of that attachment
|methcoro|
Args:
url: url you want to change
description: *optional* description for your attachment
Raises:
ValueError: url must not be None
APIException
|
juraj-google-style
|
def _ParseHTTPHeaders(self, header_data, offset, display_name):
header_string = header_data.decode('ascii', errors='replace')
try:
http_header_start = header_string.index('request-method')
except ValueError:
logger.debug('No request method in header: "{0:s}"'.format(header_string))
return (None, None)
http_headers = header_string[http_header_start:]
header_parts = http_headers.split('\x00')
request_method = header_parts[1]
if (request_method not in self._REQUEST_METHODS):
logger.debug("[{0:s}] {1:s}:{2:d}: Unknown HTTP method '{3:s}'. Response headers: '{4:s}'".format(self.NAME, display_name, offset, request_method, header_string))
try:
response_head_start = http_headers.index('response-head')
except ValueError:
logger.debug('No response head in header: "{0:s}"'.format(header_string))
return (request_method, None)
response_head = http_headers[response_head_start:]
response_head_parts = response_head.split('\x00')
response_head_text = response_head_parts[1]
response_head_text_parts = response_head_text.split('\r\n')
response_code = response_head_text_parts[0]
if (not response_code.startswith('HTTP')):
logger.debug("[{0:s}] {1:s}:{2:d}: Could not determine HTTP response code. Response headers: '{3:s}'.".format(self.NAME, display_name, offset, header_string))
return (request_method, response_code)
|
Extract relevant information from HTTP header.
Args:
header_data (bytes): HTTP header data.
offset (int): offset of the cache record, relative to the start of
the Firefox cache file.
display_name (str): display name of the Firefox cache file.
Returns:
tuple: containing:
str: HTTP request method or None if the value cannot be extracted.
str: HTTP response code or None if the value cannot be extracted.
|
codesearchnet
|
def getZernike(self, index):
if index not in list(self._dictCache.keys()):
self._dictCache[index]= self._polar(index, self._rhoMap,
self._thetaMap)
return self._dictCache[index]
|
getZernike
Retrieve a map representing the index-th Zernike polynomial
Args:
index (int): The index of Zernike map to be generated,
following Noll 1976 ordering.
Returns:
np.array: A map representing the index-th Zernike polynomial
|
juraj-google-style
|
def parse_author(cls, marc):
name = None
code = None
linked_forms = None
is_corporation = None
record = None
if marc['100a']:
name = _first_or_none(marc['100a'])
code = _first_or_none(marc['1007'])
is_corporation = False
record = marc.datafields['100'][0]
elif marc['110a']:
name = _first_or_none(marc['110a'])
code = _first_or_none(marc['1107'])
linked_forms = marc['410a2 ']
is_corporation = True
record = marc.datafields['110'][0]
else:
return None
linked_forms = marc['410a2 ']
type_descriptor = ['osoba', 'organizace']
alt_name = ('%s [%s]' % (name, type_descriptor[is_corporation]))
if linked_forms:
alt_name += ((' (' + ', '.join(linked_forms)) + ')')
return cls(name=name, code=code, linked_forms=linked_forms, is_corporation=is_corporation, record=record, alt_name=alt_name)
|
Parse author from `marc` data.
Args:
marc (obj): :class:`.MARCXMLRecord` instance. See module
:mod:`.marcxml_parser` for details.
Returns:
obj: :class:`Author`.
|
codesearchnet
|
def fit(self, sents, **kwargs):
tokens = list(itertools.chain.from_iterable(sents))
counter = Counter(tokens)
self.vocab = self.build_vocab(counter, **kwargs)
|
Builds a vocabulary object based on the tokens in the input.
Args:
sents: A list of lists of tokens (representing sentences)
Vocab kwargs include:
max_size
min_freq
specials
unk_init
|
codesearchnet
|
def delete(self):
config = self.get()
if (not config):
return True
command = 'no router ospf {}'.format(config['ospf_process_id'])
return self.configure(command)
|
Removes the entire ospf process from the running configuration
Args:
None
Returns:
bool: True if the command completed succssfully
|
codesearchnet
|
def _get_section(name, source):
pattern = re.compile('^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)'.format(name=name), (re.IGNORECASE | re.MULTILINE))
usage = None
for section in pattern.findall(source):
usage = _merge_section(usage, section.strip())
return usage
|
Extract the named section from the source.
Args:
name: The name of the section to extract (e.g. "Usage").
source: The usage string to parse.
Returns:
A string containing only the requested section. If the section appears
multiple times, each instance will be merged into a single section.
|
codesearchnet
|
def download(url, file=None):
import urllib.request
import shutil
if isinstance(file, str):
file = open(file, 'wb')
try:
with urllib.request.urlopen(url) as response:
if file:
shutil.copyfileobj(response, file)
else:
return response.read()
finally:
if file:
file.close()
|
Pass file as a filename, open file object, or None to return the request bytes
Args:
url (str): URL of file to download
file (Union[str, io, None]): One of the following:
- Filename of output file
- File opened in binary write mode
- None: Return raw bytes instead
Returns:
Union[bytes, None]: Bytes of file if file is None
|
codesearchnet
|
def GetValueByName(self, name):
pyregf_value = self._pyregf_key.get_value_by_name(name)
if not pyregf_value:
return None
return REGFWinRegistryValue(pyregf_value)
|
Retrieves a value by name.
Value names are not unique and pyregf provides first match for the value.
Args:
name (str): name of the value or an empty string for the default value.
Returns:
WinRegistryValue: Windows Registry value if a corresponding value was
found or None if not.
|
juraj-google-style
|
def hdg60(msg):
d = hex2bin(data(msg))
if d[0] == '0':
return None
sign = int(d[1])
value = bin2int(d[2:12])
if sign:
value = value - 1024
hdg = value * 90 / 512.0
if hdg < 0:
hdg = 360 + hdg
return round(hdg, 3)
|
Megnetic heading of aircraft
Args:
msg (String): 28 bytes hexadecimal message (BDS60) string
Returns:
float: heading in degrees to megnetic north (from 0 to 360)
|
juraj-google-style
|
def power(self, n):
if ((not isinstance(n, (int, np.integer))) or (n < 1)):
raise QiskitError('Can only power with positive integer powers.')
if (self._input_dim != self._output_dim):
raise QiskitError('Can only power with input_dim = output_dim.')
ret = self.copy()
for _ in range(1, n):
ret = ret.compose(self)
return ret
|
Return the compose of a operator with itself n times.
Args:
n (int): the number of times to compose with self (n>0).
Returns:
BaseOperator: the n-times composed operator.
Raises:
QiskitError: if the input and output dimensions of the operator
are not equal, or the power is not a positive integer.
|
codesearchnet
|
def get_values(js_dict, value='value'):
values = js_dict[value]
if (type(values) is list):
if ((type(values[0]) is not dict) or tuple):
return values
values = {int(key): value for (key, value) in values.items()}
if js_dict.get('size'):
max_val = np.prod(np.array(js_dict['size']))
else:
max_val = np.prod(np.array(js_dict['dimension']['size']))
vals = (max_val * [None])
for (key, value) in values.items():
vals[key] = value
values = vals
return values
|
Get values from input data.
Args:
js_dict (dict): dictionary containing dataset data and metadata.
value (string, optional): name of the value column. Defaults to 'value'.
Returns:
values (list): list of dataset values.
|
codesearchnet
|
def match_criterion(self, tag):
return ((tag.name == self.reference_tag_name) and (tag.attrs.get('kind', '') == self.reference_tag_kind))
|
Override. Determine if a tag has the desired name and kind attribute
value.
Args:
tag: A BeautifulSoup Tag.
Returns:
True if tag has the desired name and kind, otherwise False.
|
codesearchnet
|
def create_border(video, color='blue', border_percent=2):
if (video.shape[(- 1)] != 3):
return video
color_to_axis = {'blue': 2, 'red': 0, 'green': 1}
axis = color_to_axis[color]
(_, _, height, width, _) = video.shape
border_height = np.ceil(((border_percent * height) / 100.0)).astype(np.int)
border_width = np.ceil(((border_percent * width) / 100.0)).astype(np.int)
video[(:, :, :border_height, :, axis)] = 255
video[(:, :, (- border_height):, :, axis)] = 255
video[(:, :, :, :border_width, axis)] = 255
video[(:, :, :, (- border_width):, axis)] = 255
return video
|
Creates a border around each frame to differentiate input and target.
Args:
video: 5-D NumPy array.
color: string, "blue", "red" or "green".
border_percent: Percentarge of the frame covered by the border.
Returns:
video: 5-D NumPy array.
|
codesearchnet
|
def get_version():
if all([VERSION, UPDATED, any([isinstance(UPDATED, date), isinstance(UPDATED, datetime)])]):
return FORMAT_STRING.format(**{'version': VERSION, 'updated': UPDATED})
elif VERSION:
return VERSION
elif UPDATED:
return (localize(UPDATED) if any([isinstance(UPDATED, date), isinstance(UPDATED, datetime)]) else '')
else:
return ''
|
Return formatted version string.
Returns:
str: string with project version or empty string.
|
codesearchnet
|
def ExamineEvent(self, mediator, event):
if (event.data_type not in self._DATATYPES):
return
url = getattr(event, 'url', None)
if (url is None):
return
parsed_url = urlparse.urlparse(url)
domain = getattr(parsed_url, 'netloc', None)
if (domain in self._domains):
return
self._domains.append(domain)
|
Analyzes an event and extracts domains from it.
We only evaluate straightforward web history events, not visits which can
be inferred by TypedURLs, cookies or other means.
Args:
mediator (AnalysisMediator): mediates interactions between
analysis plugins and other components, such as storage and dfvfs.
event (EventObject): event to examine.
|
codesearchnet
|
def Items(self, key):
with self._mutex:
if (key not in self._buckets):
raise KeyError(('Key %s was not found in Reservoir' % key))
bucket = self._buckets[key]
return bucket.Items()
|
Return items associated with given key.
Args:
key: The key for which we are finding associated items.
Raises:
KeyError: If the key is not found in the reservoir.
Returns:
[list, of, items] associated with that key.
|
codesearchnet
|
def _ParseDataObject(self, file_object, file_offset):
data_object_map = self._GetDataTypeMap('systemd_journal_data_object')
try:
(data_object, _) = self._ReadStructureFromFileObject(file_object, file_offset, data_object_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse data object at offset: 0x{0:08x} with error: {1!s}'.format(file_offset, exception))
if (data_object.object_type != self._OBJECT_TYPE_DATA):
raise errors.ParseError('Unsupported object type: {0:d}.'.format(data_object.object_type))
if (data_object.object_flags not in (0, self._OBJECT_COMPRESSED_FLAG_XZ, self._OBJECT_COMPRESSED_FLAG_LZ4)):
raise errors.ParseError('Unsupported object flags: 0x{0:02x}.'.format(data_object.object_flags))
data_size = (data_object.data_size - 64)
data = file_object.read(data_size)
if (data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_XZ):
data = lzma.decompress(data)
elif (data_object.object_flags & self._OBJECT_COMPRESSED_FLAG_LZ4):
uncompressed_size_map = self._GetDataTypeMap('uint32le')
try:
uncompressed_size = self._ReadStructureFromByteStream(data, (file_offset + 64), uncompressed_size_map)
except (ValueError, errors.ParseError) as exception:
raise errors.ParseError('Unable to parse LZ4 uncompressed size at offset: 0x{0:08x} with error: {1!s}'.format((file_offset + 64), exception))
data = lz4.block.decompress(data[8:], uncompressed_size=uncompressed_size)
return data
|
Parses a data object.
Args:
file_object (dfvfs.FileIO): a file-like object.
file_offset (int): offset of the data object relative to the start
of the file-like object.
Returns:
bytes: data.
Raises:
ParseError: if the data object cannot be parsed.
|
codesearchnet
|
def SetTimeZone(self, time_zone):
try:
self._time_zone = pytz.timezone(time_zone)
except (AttributeError, pytz.UnknownTimeZoneError):
raise ValueError('Unsupported timezone: {0!s}'.format(time_zone))
|
Sets the time zone.
Args:
time_zone (str): time zone.
Raises:
ValueError: if the timezone is not supported.
|
codesearchnet
|
def _segment_reduce(values, index, segment_reduce_fn, name):
flat_index = flatten(index)
vector_shape = tf.shape(values)[index.indices.shape.rank:]
flattened_shape = tf.concat([[-1], vector_shape], axis=0)
flat_values = tf.reshape(values, flattened_shape)
segment_means = segment_reduce_fn(data=flat_values, segment_ids=flat_index.indices, num_segments=flat_index.num_segments)
new_shape = tf.concat([index.batch_shape(), [index.num_segments], vector_shape], axis=0)
output_values = tf.reshape(segment_means, new_shape)
output_index = range_index_map(index.batch_shape(), index.num_segments)
return (output_values, output_index)
|
Applies a segment reduction segment-wise.
Args:
values (`tf.Tensor`):
Tensor with segment values.
index (`IndexMap`):
IndexMap.
segment_reduce_fn (`str`):
Name for the reduce operation. One of "sum", "mean", "max" or "min".
name (`str`):
Name for the operation. Currently not used
Returns:
(`IndexMap`): IndexMap of shape batch_shape with elements equal to range(num_segments).
|
github-repos
|
def SetEnvironmentVariable(self, name, value):
if isinstance(value, py2to3.STRING_TYPES):
value = self._PathStripPrefix(value)
if value is not None:
self._environment_variables[name.upper()] = value
|
Sets an environment variable in the Windows path helper.
Args:
name (str): name of the environment variable without enclosing
%-characters, e.g. SystemRoot as in %SystemRoot%.
value (str): value of the environment variable.
|
juraj-google-style
|
def delete_object(self, object_name):
def delete_fn(weights_dict, source_name, target_name=None):
weights_dict.pop(source_name)
self._edit_object(delete_fn, object_name)
|
Removes an object from the file (e.g. a layer).
Args:
object_name: String, name or path of the
object to delete (e.g. `"dense_2"` or
`"layers/dense_2"`).
|
github-repos
|
def list_refs(profile, ref_type=None):
resource = '/refs'
if ref_type:
resource += ('/' + ref_type)
data = api.get_request(profile, resource)
result = [prepare(x) for x in data]
return result
|
List all refs.
Args:
profile
A profile generated from ``simplygithub.authentication.profile``.
Such profiles tell this module (i) the ``repo`` to connect to,
and (ii) the ``token`` to connect with.
ref_type
The type of ref you want. For heads, it's ``heads``. For tags,
it's ``tags``. That sort of thing. If you don't specify a type,
all refs are returned.
Returns:
A list of dicts with data about each ref.
|
codesearchnet
|
def fetch_ensembl_exons(build='37'):
LOG.info("Fetching ensembl exons build %s ...", build)
if build == '37':
url = 'http:
else:
url = 'http:
dataset_name = 'hsapiens_gene_ensembl'
dataset = pybiomart.Dataset(name=dataset_name, host=url)
attributes = [
'chromosome_name',
'ensembl_gene_id',
'ensembl_transcript_id',
'ensembl_exon_id',
'exon_chrom_start',
'exon_chrom_end',
'5_utr_start',
'5_utr_end',
'3_utr_start',
'3_utr_end',
'strand',
'rank'
]
filters = {
'chromosome_name': CHROMOSOMES,
}
result = dataset.query(
attributes = attributes,
filters = filters
)
return result
|
Fetch the ensembl genes
Args:
build(str): ['37', '38']
|
juraj-google-style
|
def get(self, key, default) -> Union[Uniform, UniformBlock, Subroutine, Attribute, Varying]:
return self._members.get(key, default)
|
Returns a Uniform, UniformBlock, Subroutine, Attribute or Varying.
Args:
default: This is the value to be returned in case key does not exist.
Returns:
:py:class:`Uniform`, :py:class:`UniformBlock`, :py:class:`Subroutine`,
:py:class:`Attribute` or :py:class:`Varying`
|
juraj-google-style
|
def trace(self, data, callback=None):
if (self._push_channel is None):
return
self._push_channel.trace(data, callback=callback)
|
Trace data asynchronously.
If no one is listening for traced data, it will be dropped
otherwise it will be queued for sending.
Args:
data (bytearray, string): Unstructured data to trace to any
connected client.
callback (callable): Optional callback to get notified when
this data is actually sent.
|
codesearchnet
|
def copartition(self, axis, other, how_to_join, sort, force_repartition=False):
if isinstance(other, type(self)):
other = [other]
index_obj = ([o.index for o in other] if (axis == 0) else [o.columns for o in other])
joined_index = self._join_index_objects((axis ^ 1), index_obj, how_to_join, sort=sort)
left_old_idx = (self.index if (axis == 0) else self.columns)
right_old_idxes = index_obj
reindexed_self = self.data
reindexed_other_list = []
def compute_reindex(old_idx):
'Create a function based on the old index and axis.\n\n Args:\n old_idx: The old index/columns\n\n Returns:\n A function that will be run in each partition.\n '
def reindex_partition(df):
if (axis == 0):
df.index = old_idx
new_df = df.reindex(index=joined_index)
new_df.index = pandas.RangeIndex(len(new_df.index))
else:
df.columns = old_idx
new_df = df.reindex(columns=joined_index)
new_df.columns = pandas.RangeIndex(len(new_df.columns))
return new_df
return reindex_partition
for i in range(len(other)):
if ((i != 0) or (left_old_idx.equals(joined_index) and (not force_repartition))):
reindex_left = None
else:
reindex_left = self._prepare_method(compute_reindex(left_old_idx))
if (right_old_idxes[i].equals(joined_index) and (not force_repartition)):
reindex_right = None
else:
reindex_right = other[i]._prepare_method(compute_reindex(right_old_idxes[i]))
(reindexed_self, reindexed_other) = reindexed_self.copartition_datasets(axis, other[i].data, reindex_left, reindex_right)
reindexed_other_list.append(reindexed_other)
return (reindexed_self, reindexed_other_list, joined_index)
|
Copartition two QueryCompiler objects.
Args:
axis: The axis to copartition along.
other: The other Query Compiler(s) to copartition against.
how_to_join: How to manage joining the index object ("left", "right", etc.)
sort: Whether or not to sort the joined index.
force_repartition: Whether or not to force the repartitioning. By default,
this method will skip repartitioning if it is possible. This is because
reindexing is extremely inefficient. Because this method is used to
`join` or `append`, it is vital that the internal indices match.
Returns:
A tuple (left query compiler, right query compiler list, joined index).
|
codesearchnet
|
def duplicate_module(module_file: Union[str, os.PathLike], old_model_patterns: ModelPatterns, new_model_patterns: ModelPatterns, dest_file: Optional[str]=None, add_copied_from: bool=True, attrs_to_remove: Optional[List[str]]=None):
if dest_file is None:
dest_file = str(module_file).replace(old_model_patterns.model_lower_cased, new_model_patterns.model_lower_cased)
with open(module_file, 'r', encoding='utf-8') as f:
content = f.read()
content = re.sub('
objects = parse_module_content(content)
new_objects = []
for obj in objects:
special_pattern = False
for pattern, attr in SPECIAL_PATTERNS.items():
if pattern in obj:
obj = obj.replace(getattr(old_model_patterns, attr), getattr(new_model_patterns, attr))
new_objects.append(obj)
special_pattern = True
break
if special_pattern:
continue
old_obj = obj
obj, replacement = replace_model_patterns(obj, old_model_patterns, new_model_patterns)
has_copied_from = re.search('^
if add_copied_from and (not has_copied_from) and (_re_class_func.search(obj) is not None) and (len(replacement) > 0):
module_name = get_module_from_file(module_file)
old_object_name = _re_class_func.search(old_obj).groups()[0]
obj = add_content_to_text(obj, f'
obj = re.sub('\n[ ]+
new_objects.append(obj)
content = '\n'.join(new_objects)
if attrs_to_remove is not None:
for attr in attrs_to_remove:
content = remove_attributes(content, target_attr=attr)
with open(dest_file, 'w', encoding='utf-8') as f:
f.write(content)
|
Create a new module from an existing one and adapting all function and classes names from old patterns to new ones.
Args:
module_file (`str` or `os.PathLike`): Path to the module to duplicate.
old_model_patterns (`ModelPatterns`): The patterns for the old model.
new_model_patterns (`ModelPatterns`): The patterns for the new model.
dest_file (`str` or `os.PathLike`, *optional*): Path to the new module.
add_copied_from (`bool`, *optional*, defaults to `True`):
Whether or not to add `# Copied from` statements in the duplicated module.
|
github-repos
|
def fit(self, X):
self.constant_value = self._get_constant_value(X)
if (self.constant_value is None):
self.model = scipy.stats.gaussian_kde(X)
else:
self._replace_constant_methods()
self.fitted = True
|
Fit Kernel density estimation to an list of values.
Args:
X: 1-d `np.ndarray` or `pd.Series` or `list` datapoints to be estimated from.
This function will fit a gaussian_kde model to a list of datapoints
and store it as a class attribute.
|
codesearchnet
|
def __init__(self, job=None, replica=None, task=None, device_type=None, device_index=None):
self._job = _as_str_or_none(job)
self._replica = _as_int_or_none(replica)
self._task = _as_int_or_none(task)
self._device_type = _as_device_str_or_none(device_type)
self._device_index = _as_int_or_none(device_index)
self._as_string = self._components_to_string(job=self._job, replica=self._replica, task=self._task, device_type=self._device_type, device_index=self._device_index)
self._hash = hash(self.to_string())
|
Create a new `DeviceSpec` object.
Args:
job: string. Optional job name.
replica: int. Optional replica index.
task: int. Optional task index.
device_type: Optional device type string (e.g. "CPU" or "GPU")
device_index: int. Optional device index. If left unspecified, device
represents 'any' device_index.
|
github-repos
|
def list_indexes(cls):
cls_list = cls.list_mapped_classes()
rtn_obj = {}
for key, value in cls_list.items():
idx = value.es_defs.get('kds_esIndex')[0]
try:
rtn_obj[idx].append(value)
except KeyError:
rtn_obj[idx] = [value]
return rtn_obj
|
Returns a dictionary with the key as the es_index name and the
object is a list of rdfclasses for that index
args:
None
|
juraj-google-style
|
def __init__(self, path_spec):
super(SourceScanNode, self).__init__()
self.path_spec = path_spec
self.parent_node = None
self.scanned = False
self.sub_nodes = []
|
Initializes a source scan node.
Args:
path_spec (PathSpec): path specification.
|
juraj-google-style
|
def __init__(self, command = None):
self._output = None
self._errors = None
self._command = None
self.command = command
|
Class constructor.
Args:
command (str): Command to execute
|
juraj-google-style
|
def set_triple(self, p, o, auto_refresh=True):
self.rdf.graph.set((self.uri, p, self._handle_object(o)))
self._handle_triple_refresh(auto_refresh)
|
Assuming the predicate or object matches a single triple, sets the other for that triple.
Args:
p (rdflib.term.URIRef): predicate
o (): object
auto_refresh (bool): whether or not to update object-like self.rdf.triples
Returns:
None: modifies pre-existing triple in self.rdf.graph
|
codesearchnet
|
def exists(self, path):
self.__validate_storage_path(path)
try:
metadata = self.api_client.get_entity_by_query(path=path)
except StorageNotFoundException:
return False
return (metadata and ('uuid' in metadata))
|
Check if a certain path exists in the storage service.
Args:
path (str): The path to be checked
Returns:
True if the path exists, False otherwise
Raises:
StorageArgumentException: Invalid arguments
StorageForbiddenException: Server response code 403
StorageNotFoundException: Server response code 404
StorageException: other 400-600 error codes
|
codesearchnet
|
def unembed_samples(samples, embedding, chain_break_method=None):
if (chain_break_method is None):
chain_break_method = majority_vote
return list(itertools.chain(*(chain_break_method(sample, embedding) for sample in samples)))
|
Return samples over the variables in the source graph.
Args:
samples (iterable): An iterable of samples where each sample
is a dict of the form {v: val, ...} where v is a variable
in the target model and val is the associated value as
determined by a binary quadratic model sampler.
embedding (dict): The mapping from the source graph to the target graph.
Should be of the form {v: {s, ...}, ...} where v is a node in the
source graph and s is a node in the target graph.
chain_break_method (function, optional): The method used to resolve chain
breaks. Default is :method:`majority_vote`.
Returns:
list: A list of unembedded samples. Each sample is a dict of the form
{v: val, ...} where v is a variable in the source graph and val
is the value associated with the variable.
|
codesearchnet
|
def __init__(self, host: str, port: int, time_to_live: Union[int, timedelta]=DEFAULT_CACHE_ENTRY_TTL_SEC, *, request_coder: Optional[coders.Coder]=None, response_coder: Optional[coders.Coder]=None, **kwargs):
self._host = host
self._port = port
self._time_to_live = time_to_live
self._request_coder = request_coder
self._response_coder = response_coder
self._kwargs = kwargs if kwargs else {}
self._source_caller = None
|
Args:
host (str): The hostname or IP address of the Redis server.
port (int): The port number of the Redis server.
time_to_live: `(Union[int, timedelta])` The time-to-live (TTL) for
records stored in Redis. Provide an integer (in seconds) or a
`datetime.timedelta` object.
request_coder: (Optional[`coders.Coder`]) coder for encoding requests.
response_coder: (Optional[`coders.Coder`]) coder for decoding responses
received from Redis.
kwargs: Optional additional keyword arguments that
are required to connect to your redis server. Same as `redis.Redis()`.
|
github-repos
|
def prefixlen_to_mask(prefixlen):
prefixlen = prefixlen or '32'
addr = '0.0.0.0/%s' % prefixlen
return str(netaddr.IPNetwork(addr).netmask)
|
Converts a prefix length to a dotted decimal subnet mask
Args:
prefixlen (str): The prefix length value to convert
Returns:
str: The subt mask as a dotted decimal string
|
juraj-google-style
|
def exec_one_test(self, test_name, test_method, record=None):
tr_record = record or records.TestResultRecord(test_name, self.TAG)
tr_record.uid = getattr(test_method, 'uid', None)
tr_record.test_begin()
self.current_test_info = runtime_test_info.RuntimeTestInfo(test_name, self.log_path, tr_record)
expects.recorder.reset_internal_states(tr_record)
logging.info('%s %s', TEST_CASE_TOKEN, test_name)
teardown_test_failed = False
try:
try:
try:
self._setup_test(test_name)
except signals.TestFailure as e:
_, _, traceback = sys.exc_info()
raise signals.TestError(e.details, e.extras).with_traceback(traceback)
test_method()
except (signals.TestPass, signals.TestAbortSignal, signals.TestSkip):
raise
except Exception:
logging.exception('Exception occurred in %s.', self.current_test_info.name)
raise
finally:
before_count = expects.recorder.error_count
try:
self._teardown_test(test_name)
except signals.TestAbortSignal:
raise
except Exception as e:
logging.exception('Exception occurred in %s of %s.', STAGE_NAME_TEARDOWN_TEST, self.current_test_info.name)
tr_record.test_error()
tr_record.add_error(STAGE_NAME_TEARDOWN_TEST, e)
teardown_test_failed = True
else:
if before_count < expects.recorder.error_count:
tr_record.test_error()
teardown_test_failed = True
except (signals.TestFailure, AssertionError) as e:
tr_record.test_fail(e)
except signals.TestSkip as e:
tr_record.test_skip(e)
except signals.TestAbortSignal as e:
tr_record.test_fail(e)
raise
except signals.TestPass as e:
tr_record.test_pass(e)
except Exception as e:
tr_record.test_error(e)
else:
if expects.recorder.has_error and (not teardown_test_failed):
tr_record.test_fail()
elif not teardown_test_failed:
tr_record.test_pass()
finally:
tr_record.update_record()
try:
if tr_record.result in (records.TestResultEnums.TEST_RESULT_ERROR, records.TestResultEnums.TEST_RESULT_FAIL):
self._exec_procedure_func(self._on_fail, tr_record)
elif tr_record.result == records.TestResultEnums.TEST_RESULT_PASS:
self._exec_procedure_func(self._on_pass, tr_record)
elif tr_record.result == records.TestResultEnums.TEST_RESULT_SKIP:
self._exec_procedure_func(self._on_skip, tr_record)
finally:
logging.info(RESULT_LINE_TEMPLATE, tr_record.test_name, tr_record.result)
self.results.add_record(tr_record)
self.summary_writer.dump(tr_record.to_dict(), records.TestSummaryEntryType.RECORD)
self.current_test_info = None
return tr_record
|
Executes one test and update test results.
Executes setup_test, the test method, and teardown_test; then creates a
records.TestResultRecord object with the execution information and adds
the record to the test class's test results.
Args:
test_name: string, Name of the test.
test_method: function, The test method to execute.
record: records.TestResultRecord, optional arg for injecting a record
object to use for this test execution. If not set, a new one is created
created. This is meant for passing information between consecutive test
case execution for retry purposes. Do NOT abuse this for "magical"
features.
Returns:
TestResultRecord, the test result record object of the test execution.
This object is strictly for read-only purposes. Modifying this record
will not change what is reported in the test run's summary yaml file.
|
github-repos
|
def _black_objective_and_vega(volatilities):
vol_t = volatilities * sqrt_t
d1 = lnz / vol_t + vol_t / 2
d2 = d1 - vol_t
implied_prices = norm_forwards * _cdf(d1) - norm_strikes * _cdf(d2)
if is_call_options is not None:
put_prices = implied_prices - norm_forwards + norm_strikes
implied_prices = tf.where(tf.broadcast_to(is_call_options, tf.shape(put_prices)), implied_prices, put_prices)
vega = norm_forwards * _pdf(d1) * sqrt_t / discount_factors
return (implied_prices - normalized_prices, vega)
|
Calculate the Black Scholes price and vega for a given volatility.
This method returns normalized results.
Args:
volatilities: A real `Tensor` of same shape and dtype as `forwards`. The
volatility to expiry.
Returns:
A tuple containing (value, gradient) of the black scholes price, both of
which are `Tensor`s of the same shape and dtype as `volatilities`.
|
github-repos
|
def add_transition(self, source: str, dest: str):
self._transitions[source].append(dest)
|
Adds a transition from one state to another.
Args:
source (str): the name of the state from where the transition starts
dest (str): the name of the state where the transition ends
|
juraj-google-style
|
def argmin(x, axis=-1):
return math_ops.argmin(x, axis)
|
Returns the index of the minimum value along an axis.
Args:
x: Tensor or variable.
axis: axis along which to perform the reduction.
Returns:
A tensor.
|
github-repos
|
def check_line_split(code_line):
return re.search('\\\\\\s*\\n$', code_line)
|
Checks if a line was split with `\`.
Args:
code_line: A line of Python code
Returns:
If the line was split with `\`
>>> skip_magic("!gcloud ml-engine models create ${MODEL} \\\n")
True
|
github-repos
|
def _set_read_only_resource_inputs_attr(op: ops.Operation, func_graph: func_graph_module.FuncGraph):
read_only_indices = acd.get_read_only_resource_input_indices_graph(func_graph)
ops.set_int_list_attr(op, acd.READ_ONLY_RESOURCE_INPUTS_ATTR, read_only_indices)
|
Sets the list of resource inputs which are read-only.
This is used by AutomaticControlDependencies.
Args:
op: PartitionedCall Operation.
func_graph: FuncGraph.
|
github-repos
|
def append_dictionary_to_file(localization_key_to_comment, file_path, section_name):
output_file = open_strings_file(file_path, 'a')
write_section_header_to_file(output_file, section_name)
for (entry_key, entry_comment) in sorted(localization_key_to_comment.iteritems(), key=operator.itemgetter(1)):
output_file.write(u'\n')
write_entry_to_file(output_file, entry_comment, entry_key)
output_file.close()
|
Appends dictionary of localization keys and comments to a file
Args:
localization_key_to_comment (dict): A mapping between localization keys and comments.
file_path (str): The path of the file to append to.
section_name (str): The name of the section.
|
codesearchnet
|
def wait_for_jobs(jobs):
all_running = False
while not all_running:
all_running = True
time.sleep(5)
for job in jobs:
job.refresh()
scheduled = getattr(job, "scheduled_at", None)
if scheduled is not None:
logger.info("Waiting for %s on %s [%s]" % (job.uid,
job.site,
_date2h(scheduled)))
all_running = all_running and job.state == "running"
if job.state == "error":
raise Exception("The job %s is in error state" % job)
logger.info("All jobs are Running !")
|
Waits for all the jobs to be runnning.
Args:
jobs(list): list of the python-grid5000 jobs to wait for
Raises:
Exception: if one of the job gets in error state.
|
juraj-google-style
|
def format_dict(dic, format_list, separator=',', default_value=str):
dic = collections.defaultdict(default_value, dic)
str_format = separator.join(["{" + "{}".format(head) + "}" for head in format_list])
return str_format.format(**dic)
|
Format dict to string passing a list of keys as order
Args:
lista: List with elements to clean duplicates.
|
juraj-google-style
|
def postprocess_periodical(marc_xml, mods, uuid, counter, url):
dom = double_linked_dom(mods)
add_missing_xml_attributes(dom, counter)
if uuid:
add_uuid(dom, uuid)
return dom.prettify()
|
Some basic postprocessing of the periodical publications.
Args:
marc_xml (str): Original Aleph record.
mods (str): XML string generated by XSLT template.
uuid (str): UUID of the package.
counter (int): Number of record, is added to XML headers.
url (str): URL of the publication (public or not).
Returns:
str: Updated XML.
|
juraj-google-style
|
def add_cookie_header(self, request, referrer_host=None):
new_request = convert_http_request(request, referrer_host)
self._cookie_jar.add_cookie_header(new_request)
request.fields.clear()
for (name, value) in new_request.header_items():
request.fields.add(name, value)
|
Wrapped ``add_cookie_header``.
Args:
request: An instance of :class:`.http.request.Request`.
referrer_host (str): An hostname or IP address of the referrer
URL.
|
codesearchnet
|
def create_binding(site, hostheader='', ipaddress='*', port=80, protocol='http', sslflags=None):
protocol = six.text_type(protocol).lower()
name = _get_binding_info(hostheader, ipaddress, port)
if (protocol not in _VALID_PROTOCOLS):
message = "Invalid protocol '{0}' specified. Valid formats: {1}".format(protocol, _VALID_PROTOCOLS)
raise SaltInvocationError(message)
if sslflags:
sslflags = int(sslflags)
if (sslflags not in _VALID_SSL_FLAGS):
message = "Invalid sslflags '{0}' specified. Valid sslflags range: {1}..{2}".format(sslflags, _VALID_SSL_FLAGS[0], _VALID_SSL_FLAGS[(- 1)])
raise SaltInvocationError(message)
current_bindings = list_bindings(site)
if (name in current_bindings):
log.debug('Binding already present: %s', name)
return True
if sslflags:
ps_cmd = ['New-WebBinding', '-Name', "'{0}'".format(site), '-HostHeader', "'{0}'".format(hostheader), '-IpAddress', "'{0}'".format(ipaddress), '-Port', "'{0}'".format(port), '-Protocol', "'{0}'".format(protocol), '-SslFlags', '{0}'.format(sslflags)]
else:
ps_cmd = ['New-WebBinding', '-Name', "'{0}'".format(site), '-HostHeader', "'{0}'".format(hostheader), '-IpAddress', "'{0}'".format(ipaddress), '-Port', "'{0}'".format(port), '-Protocol', "'{0}'".format(protocol)]
cmd_ret = _srvmgr(ps_cmd)
if (cmd_ret['retcode'] != 0):
msg = 'Unable to create binding: {0}\nError: {1}'.format(site, cmd_ret['stderr'])
raise CommandExecutionError(msg)
if (name in list_bindings(site)):
log.debug('Binding created successfully: %s', site)
return True
log.error('Unable to create binding: %s', site)
return False
|
Create an IIS Web Binding.
.. note::
This function only validates against the binding
ipaddress:port:hostheader combination, and will return True even if the
binding already exists with a different configuration. It will not
modify the configuration of an existing binding.
Args:
site (str): The IIS site name.
hostheader (str): The host header of the binding. Usually a hostname.
ipaddress (str): The IP address of the binding.
port (int): The TCP port of the binding.
protocol (str): The application protocol of the binding.
sslflags (str): The flags representing certificate type and storage of
the binding.
Returns:
bool: True if successful, otherwise False
CLI Example:
.. code-block:: bash
salt '*' win_iis.create_binding site='site0' hostheader='example.com' ipaddress='*' port='80'
|
codesearchnet
|
def ParseSearchRow(self, parser_mediator, query, row, **unused_kwargs):
query_hash = hash(query)
event_data = TwitterAndroidSearchEventData()
event_data.query = query
event_data.name = self._GetRowValue(query_hash, row, 'name')
event_data.search_query = self._GetRowValue(query_hash, row, 'query')
timestamp = self._GetRowValue(query_hash, row, 'time')
if timestamp:
date_time = dfdatetime_java_time.JavaTime(timestamp=timestamp)
event = time_events.DateTimeValuesEvent(
date_time, definitions.TIME_DESCRIPTION_CREATION)
parser_mediator.ProduceEventWithEventData(event, event_data)
|
Parses a search row from the database.
Args:
parser_mediator (ParserMediator): mediates interactions between parsers
and other components, such as storage and dfvfs.
query (str): query that created the row.
row (sqlite3.Row): row resulting from query.
|
juraj-google-style
|
def get_or_create_hosted_zone(client, zone_name):
zone_id = get_hosted_zone_by_name(client, zone_name)
if zone_id:
return zone_id
logger.debug("Zone %s does not exist, creating.", zone_name)
reference = uuid.uuid4().hex
response = client.create_hosted_zone(Name=zone_name,
CallerReference=reference)
return parse_zone_id(response["HostedZone"]["Id"])
|
Get the Id of an existing zone, or create it.
Args:
client (:class:`botocore.client.Route53`): The connection used to
interact with Route53's API.
zone_name (string): The name of the DNS hosted zone to create.
Returns:
string: The Id of the Hosted Zone.
|
juraj-google-style
|
def _CreateRouteOptions(self, **kwargs):
options = {
'proto': self.proto_id,
'scope': 'host',
}
options.update(kwargs)
return options
|
Create a dictionary of parameters to append to the ip route command.
Args:
**kwargs: dict, the string parameters to update in the ip route command.
Returns:
dict, the string parameters to append to the ip route command.
|
juraj-google-style
|
def _sparse_block_diag(sp_a):
sp_a_shape = tf.convert_to_tensor(value=_get_shape(sp_a, tf.int64))
ind_mat = tf.concat([[sp_a_shape[-2:]], tf.eye(2, dtype=tf.int64)], axis=0)
indices = tf.matmul(sp_a.indices, ind_mat)
dense_shape = sp_a_shape[0] * sp_a_shape[1:]
return tf.SparseTensor(
indices=indices, values=sp_a.values, dense_shape=dense_shape)
|
Returns a block diagonal rank 2 SparseTensor from a batch of SparseTensors.
Args:
sp_a: A rank 3 `SparseTensor` representing a batch of matrices.
Returns:
sp_block_diag_a: matrix-shaped, `float` `SparseTensor` with the same dtype
as `sparse_or_matrix`, of shape [B * M, B * N] where `sp_a` has shape
[B, M, N]. Each [M, N] batch of `sp_a` is lined up along the diagonal.
|
juraj-google-style
|
def validate_task_schema(context, schema_key='schema_file'):
schema_path = context.config
schema_keys = schema_key.split('.')
for key in schema_keys:
schema_path = schema_path[key]
task_schema = load_json_or_yaml(schema_path, is_path=True)
log.debug('Task is validated against this schema: {}'.format(task_schema))
try:
validate_json_schema(context.task, task_schema)
except ScriptWorkerTaskException as e:
raise TaskVerificationError('Cannot validate task against schema. Task: {}.'.format(context.task)) from e
|
Validate the task definition.
Args:
context (scriptworker.context.Context): the scriptworker context. It must contain a task and
the config pointing to the schema file
schema_key: the key in `context.config` where the path to the schema file is. Key can contain
dots (e.g.: 'schema_files.file_a'), in which case
Raises:
TaskVerificationError: if the task doesn't match the schema
|
codesearchnet
|
def qemu_rebase(target, backing_file, safe=True, fail_on_error=True):
cmd = ['qemu-img', 'rebase', '-b', backing_file, target]
if not safe:
cmd.insert(2, '-u')
return run_command_with_validation(
cmd,
fail_on_error,
msg='Failed to rebase {target} onto {backing_file}'.format(
target=target, backing_file=backing_file
)
)
|
changes the backing file of 'source' to 'backing_file'
If backing_file is specified as "" (the empty string),
then the image is rebased onto no backing file
(i.e. it will exist independently of any backing file).
(Taken from qemu-img man page)
Args:
target(str): Path to the source disk
backing_file(str): path to the base disk
safe(bool): if false, allow unsafe rebase
(check qemu-img docs for more info)
|
juraj-google-style
|
def recode_dwgsim_reads(
dwgsim_prefix,
fastq_rnf_fo,
fai_fo,
genome_id,
estimate_unknown_values,
number_of_read_tuples=10**9,
):
dwgsim_pattern = re.compile(
'@(.*)_([0-9]+)_([0-9]+)_([01])_([01])_([01])_([01])_([0-9]+):([0-9]+):([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_(([0-9abcdef])+)'
)
fai_index = rnftools.utils.FaIdx(fai_fo=fai_fo)
read_tuple_id_width = len(format(number_of_read_tuples, 'x'))
read_tuple_id = 0
last_read_tuple_name = None
old_fq = "{}.bfast.fastq".format(dwgsim_prefix)
fq_creator = rnftools.rnfformat.FqCreator(
fastq_fo=fastq_rnf_fo,
read_tuple_id_width=read_tuple_id_width,
genome_id_width=2,
chr_id_width=fai_index.chr_id_width,
coor_width=fai_index.coor_width,
info_reads_in_tuple=True,
info_simulator="dwgsim",
)
i = 0
with open(old_fq, "r+") as f1:
for line in f1:
if i % 4 == 0:
read_tuple_name = line[1:].strip()
if read_tuple_name != last_read_tuple_name:
new_tuple = True
if last_read_tuple_name is not None:
read_tuple_id += 1
else:
new_tuple = False
last_read_tuple_name = read_tuple_name
m = dwgsim_pattern.search(line)
if m is None:
rnftools.utils.error(
"Read tuple '{}' was not created by DwgSim.".format(line[1:]),
program="RNFtools",
subprogram="MIShmash",
exception=ValueError,
)
contig_name = m.group(1)
start_1 = int(m.group(2))
start_2 = int(m.group(3))
direction_1 = "F" if int(m.group(4)) == 0 else "R"
direction_2 = "F" if int(m.group(5)) == 0 else "R"
chr_id = fai_index.dict_chr_ids[contig_name] if fai_index.dict_chr_ids != {} else "0"
elif i % 4 == 1:
bases = line.strip()
if new_tuple:
segment = rnftools.rnfformat.Segment(
genome_id=genome_id,
chr_id=chr_id,
direction=direction_1,
left=start_1,
right=start_1 + len(bases) - 1 if estimate_unknown_values else 0,
)
else:
segment = rnftools.rnfformat.Segment(
genome_id=genome_id,
chr_id=chr_id,
direction=direction_2,
left=start_2,
right=start_2 + len(bases) - 1 if estimate_unknown_values else 0,
)
elif i % 4 == 2:
pass
elif i % 4 == 3:
qualities = line.strip()
fq_creator.add_read(
read_tuple_id=read_tuple_id,
bases=bases,
qualities=qualities,
segments=[segment],
)
i += 1
fq_creator.flush_read_tuple()
|
Convert DwgSim FASTQ file to RNF FASTQ file.
Args:
dwgsim_prefix (str): DwgSim prefix of the simulation (see its commandline parameters).
fastq_rnf_fo (file): File object of RNF FASTQ.
fai_fo (file): File object for FAI file of the reference genome.
genome_id (int): RNF genome ID to be used.
estimate_unknown_values (bool): Estimate unknown values (right coordinate of each end).
number_of_read_tuples (int): Estimate of number of simulated read tuples (to set width).
|
juraj-google-style
|
def received(self, messages):
if messages:
if self._queue:
self._queue.put_nowait(messages)
if self._callback:
self._callback(messages)
|
Called when new messages arrive.
Args:
messages (tuple): Messages
|
juraj-google-style
|
def _build_vocab(filename, vocab_dir, vocab_name):
vocab_path = os.path.join(vocab_dir, vocab_name)
if not tf.gfile.Exists(vocab_path):
with tf.gfile.GFile(filename, "r") as f:
data = f.read().split()
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: (-x[1], x[0]))
words, _ = list(zip(*count_pairs))
encoder = text_encoder.TokenTextEncoder(None, vocab_list=words)
encoder.store_to_file(vocab_path)
else:
encoder = text_encoder.TokenTextEncoder(vocab_path)
return encoder
|
Reads a file to build a vocabulary.
Args:
filename: file to read list of words from.
vocab_dir: directory where to save the vocabulary.
vocab_name: vocab file name.
Returns:
text encoder.
|
juraj-google-style
|
def foo(self, a: int, *args, b: str='x', **kwargs) -> str:
del a, args, kwargs
return b
|
Function foo.
Args:
a: An int.
*args: Varargs.
b: A str.
**kwargs: Kwargs.
Returns:
A str.
|
github-repos
|
def read_locations(filename):
data = ConfigParser()
if (filename == '-'):
data.read_file(sys.stdin)
else:
data.read(filename)
if (not data.sections()):
logging.debug('Config file is empty')
locations = {}
for name in data.sections():
if data.has_option(name, 'locator'):
(latitude, longitude) = utils.from_grid_locator(data.get(name, 'locator'))
else:
latitude = data.getfloat(name, 'latitude')
longitude = data.getfloat(name, 'longitude')
locations[name] = (latitude, longitude)
return locations
|
Pull locations from a user's config file.
Args:
filename (str): Config file to parse
Returns:
dict: List of locations from config file
|
codesearchnet
|
def LessThan(self, value):
self._awql = self._CreateSingleValueCondition(value, '<')
return self._query_builder
|
Sets the type of the WHERE clause as "less than".
Args:
value: The value to be used in the WHERE condition.
Returns:
The query builder that this WHERE builder links to.
|
codesearchnet
|
def _CalculateHashDataStream(self, file_entry, data_stream_name):
hash_context = hashlib.sha256()
try:
file_object = file_entry.GetFileObject(data_stream_name=data_stream_name)
except IOError as exception:
logging.warning('Unable to open path specification:\n{0:s}with error: {1!s}'.format(file_entry.path_spec.comparable, exception))
return None
if (not file_object):
return None
try:
data = file_object.read(self._READ_BUFFER_SIZE)
while data:
hash_context.update(data)
data = file_object.read(self._READ_BUFFER_SIZE)
except IOError as exception:
logging.warning('Unable to read from path specification:\n{0:s}with error: {1!s}'.format(file_entry.path_spec.comparable, exception))
return None
finally:
file_object.close()
return hash_context.hexdigest()
|
Calculates a message digest hash of the data of the file entry.
Args:
file_entry (dfvfs.FileEntry): file entry.
data_stream_name (str): name of the data stream.
Returns:
bytes: digest hash or None.
|
codesearchnet
|
def GetFileEntryByPathSpec(self, path_spec):
fsapfs_file_entry = None
location = getattr(path_spec, 'location', None)
identifier = getattr(path_spec, 'identifier', None)
if (location == self.LOCATION_ROOT or
identifier == self.ROOT_DIRECTORY_IDENTIFIER):
fsapfs_file_entry = self._fsapfs_volume.get_root_directory()
return apfs_file_entry.APFSFileEntry(
self._resolver_context, self, path_spec,
fsapfs_file_entry=fsapfs_file_entry, is_root=True)
try:
if identifier is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_identifier(
identifier)
elif location is not None:
fsapfs_file_entry = self._fsapfs_volume.get_file_entry_by_path(location)
except IOError as exception:
raise errors.BackEndError(exception)
if fsapfs_file_entry is None:
return None
return apfs_file_entry.APFSFileEntry(
self._resolver_context, self, path_spec,
fsapfs_file_entry=fsapfs_file_entry)
|
Retrieves a file entry for a path specification.
Args:
path_spec (PathSpec): path specification.
Returns:
APFSFileEntry: file entry or None if not available.
Raises:
BackEndError: if the file entry cannot be opened.
|
juraj-google-style
|
def GetParsersInformation(cls):
parsers_information = []
for (_, parser_class) in cls.GetParsers():
description = getattr(parser_class, 'DESCRIPTION', '')
parsers_information.append((parser_class.NAME, description))
return parsers_information
|
Retrieves the parsers information.
Returns:
list[tuple[str, str]]: parser names and descriptions.
|
codesearchnet
|
def __init__(
self, keys: Dict[Tuple[YangIdentifier, Optional[YangIdentifier]], str]):
self.keys = keys
|
Initialize the class instance.
Args:
keys: Dictionary with keys of an entry.
|
juraj-google-style
|
def signature_cert_chain_url(url):
r = urlparse(url)
if (not (r.scheme.lower() == 'https')):
warnings.warn('Certificate URL scheme is invalid.')
return False
if (not (r.hostname.lower() == 's3.amazonaws.com')):
warnings.warn('Certificate URL hostname is invalid.')
return False
if (not os.path.normpath(r.path).startswith('/echo.api/')):
warnings.warn('Certificate URL path is invalid.')
return False
if (r.port and (not (r.port == 443))):
warnings.warn('Certificate URL port is invalid.')
return False
return True
|
Validate URL specified by SignatureCertChainUrl.
See `validate.request` for additional info.
Args:
url: str. SignatureCertChainUrl header value sent by request.
Returns:
bool: True if valid, False otherwise.
|
codesearchnet
|
def list_file_extensions(path: str, reportevery: int=1) -> List[str]:
extensions = set()
count = 0
for (root, dirs, files) in os.walk(path):
count += 1
if ((count % reportevery) == 0):
log.debug('Walking directory {}: {!r}', count, root)
for file in files:
(filename, ext) = os.path.splitext(file)
extensions.add(ext)
return sorted(list(extensions))
|
Returns a sorted list of every file extension found in a directory
and its subdirectories.
Args:
path: path to scan
reportevery: report directory progress after every *n* steps
Returns:
sorted list of every file extension found
|
codesearchnet
|
def GetForwardedIps(self, interface, interface_ip=None):
try:
ips = netifaces.ifaddresses(interface)
ips = ips[netifaces.AF_INET]
except (ValueError, IndexError):
return []
forwarded_ips = []
for ip in ips:
if ip['addr'] != interface_ip:
full_addr = '%s/%d' % (ip['addr'], netaddr.IPAddress(ip['netmask']).netmask_bits())
forwarded_ips.append(full_addr)
return self.ParseForwardedIps(forwarded_ips)
|
Retrieve the list of configured forwarded IP addresses.
Args:
interface: string, the output device to query.
interface_ip: string, current interface ip address.
Returns:
list, the IP address strings.
|
juraj-google-style
|
def all(self, **kwargs):
path = ('%s/all' % self.path)
obj = self.gitlab.http_list(path, **kwargs)
return [self._obj_cls(self, item) for item in obj]
|
List all the members, included inherited ones.
Args:
all (bool): If True, return all the items, without pagination
per_page (int): Number of items to retrieve per request
page (int): ID of the page to return (starts with page 1)
as_list (bool): If set to False and no pagination option is
defined, return a generator instead of a list
**kwargs: Extra options to send to the server (e.g. sudo)
Raises:
GitlabAuthenticationError: If authentication is not correct
GitlabListError: If the list could not be retrieved
Returns:
RESTObjectList: The list of members
|
codesearchnet
|
def add_numeric_table_values(table, min_consolidation_fraction=0.7, debug_info=None):
table = table.copy()
filter_invalid_unicode_from_table(table)
for row_index, row in table.iterrows():
for col_index, cell in enumerate(row):
table.iloc[row_index, col_index] = Cell(text=cell)
for col_index, column in enumerate(table.columns):
column_values = _consolidate_numeric_values(_get_column_values(table, col_index), min_consolidation_fraction=min_consolidation_fraction, debug_info=(debug_info, column))
for row_index, numeric_value in column_values.items():
table.iloc[row_index, col_index].numeric_value = numeric_value
return table
|
Parses text in table column-wise and adds the consolidated values. Consolidation refers to finding values with a
common types (date or number)
Args:
table:
Table to annotate.
min_consolidation_fraction:
Fraction of cells in a column that need to have consolidated value.
debug_info:
Additional information used for logging.
|
github-repos
|
def block(self, cutoffs=None, values=None, n_bins=0, right=False, function=None):
params = self.__dict__.copy()
if ((values is not None) and (cutoffs is None)):
cutoffs = values[1:]
if ((cutoffs is None) and (n_bins == 0)):
cutoffs = np.mean(self)
if ((n_bins != 0) and (cutoffs is None)):
(mi, ma) = (np.amin(self), np.amax(self))
cutoffs = np.linspace(mi, ma, (n_bins + 1))
cutoffs = cutoffs[:(- 1)]
try:
data = np.digitize(self, cutoffs, right)
except ValueError:
data = np.digitize(self, [cutoffs], right)
if ((function is None) and (values is None)):
return Curve(data, params=params)
data = data.astype(float)
f = (function or utils.null)
(tops, vals) = utils.find_edges(data)
if (values is None):
for (top, base) in zip(tops[:(- 1)], tops[1:]):
data[top:base] = f(np.copy(self[top:base]))
data[base:] = f(np.copy(self[base:]))
else:
for (top, base, val) in zip(tops[:(- 1)], tops[1:], vals[:(- 1)]):
data[top:base] = values[int(val)]
data[base:] = values[int(vals[(- 1)])]
return Curve(data, params=params)
|
Block a log based on number of bins, or on cutoffs.
Args:
cutoffs (array)
values (array): the values to map to. Defaults to [0, 1, 2,...]
n_bins (int)
right (bool)
function (function): transform the log if you want.
Returns:
Curve.
|
codesearchnet
|
def get_table_columns(metadata):
cols = OrderedDict()
for col in metadata.c:
name = str(col).rpartition('.')[2]
cols[name] = col.type.python_type.__name__
return cols
|
Extract columns names and python typos from metadata
Args:
metadata: Table metadata
Returns:
dict with columns names and python types
|
codesearchnet
|
def set_hparam(self, name, value):
(param_type, is_list) = self._hparam_types[name]
if isinstance(value, list):
if (not is_list):
raise ValueError(('Must not pass a list for single-valued parameter: %s' % name))
setattr(self, name, [_cast_to_type_if_compatible(name, param_type, v) for v in value])
else:
if is_list:
raise ValueError(('Must pass a list for multi-valued parameter: %s.' % name))
setattr(self, name, _cast_to_type_if_compatible(name, param_type, value))
|
Set the value of an existing hyperparameter.
This function verifies that the type of the value matches the type of the
existing hyperparameter.
Args:
name: Name of the hyperparameter.
value: New value of the hyperparameter.
Raises:
KeyError: If the hyperparameter doesn't exist.
ValueError: If there is a type mismatch.
|
codesearchnet
|
def GetLaunchedFlows(self, flow_type="outstanding"):
result = None
all_clients = set(self.ListAllClients())
finished_clients = set(self.ListFinishedClients())
outstanding_clients = all_clients - finished_clients
if flow_type == "all":
result = all_clients
elif flow_type == "finished":
result = finished_clients
elif flow_type == "outstanding":
result = outstanding_clients
flows = aff4.FACTORY.MultiListChildren(
[self.urn.Add(x.Basename()) for x in result])
return [x[0] for _, x in flows]
|
Returns the session IDs of all the flows we launched.
Args:
flow_type: The type of flows to fetch. Can be "all", "outstanding" or
"finished".
Returns:
A list of flow URNs.
|
juraj-google-style
|
def force_string(val=None):
if (val is None):
return ''
if isinstance(val, list):
newval = [str(x) for x in val]
return ';'.join(newval)
if isinstance(val, str):
return val
else:
return str(val)
|
Force a string representation of an object
Args:
val: object to parse into a string
Returns:
str: String representation
|
codesearchnet
|
def plot_state_qsphere(rho, figsize=None):
if not HAS_MATPLOTLIB:
raise ImportError('Must have Matplotlib installed.')
rho = _validate_input_state(rho)
if figsize is None:
figsize = (7, 7)
num = int(np.log2(len(rho)))
we, stateall = linalg.eigh(rho)
for _ in range(2**num):
probmix = we.max()
prob_location = we.argmax()
if probmix > 0.001:
state = stateall[:, prob_location]
loc = np.absolute(state).argmax()
for j in range(2**num):
test = np.absolute(np.absolute(state[j]) -
np.absolute(state[loc]))
if test < 0.001:
loc = j
break
angles = (np.angle(state[loc]) + 2 * np.pi) % (2 * np.pi)
angleset = np.exp(-1j*angles)
state = angleset*state
state.flatten()
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(111, projection='3d')
ax.axes.set_xlim3d(-1.0, 1.0)
ax.axes.set_ylim3d(-1.0, 1.0)
ax.axes.set_zlim3d(-1.0, 1.0)
ax.set_aspect("equal")
ax.axes.grid(False)
u = np.linspace(0, 2 * np.pi, 25)
v = np.linspace(0, np.pi, 25)
x = np.outer(np.cos(u), np.sin(v))
y = np.outer(np.sin(u), np.sin(v))
z = np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x, y, z, rstride=1, cstride=1, color='k',
alpha=0.05, linewidth=0)
ax.w_xaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.w_yaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.w_zaxis.set_pane_color((1.0, 1.0, 1.0, 0.0))
ax.w_xaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
ax.w_yaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
ax.w_zaxis.line.set_color((1.0, 1.0, 1.0, 0.0))
ax.set_xticks([])
ax.set_yticks([])
ax.set_zticks([])
d = num
for i in range(2**num):
element = bin(i)[2:].zfill(num)
weight = element.count("1")
zvalue = -2 * weight / d + 1
number_of_divisions = n_choose_k(d, weight)
weight_order = bit_string_index(element)
angle = weight_order * 2 * np.pi / number_of_divisions
xvalue = np.sqrt(1 - zvalue**2) * np.cos(angle)
yvalue = np.sqrt(1 - zvalue**2) * np.sin(angle)
ax.plot([xvalue], [yvalue], [zvalue],
markerfacecolor=(.5, .5, .5),
markeredgecolor=(.5, .5, .5),
marker='o', markersize=10, alpha=1)
prob = np.real(np.dot(state[i], state[i].conj()))
colorstate = phase_to_color_wheel(state[i])
a = Arrow3D([0, xvalue], [0, yvalue], [0, zvalue],
mutation_scale=20, alpha=prob, arrowstyle="-",
color=colorstate, lw=10)
ax.add_artist(a)
for weight in range(d + 1):
theta = np.linspace(-2 * np.pi, 2 * np.pi, 100)
z = -2 * weight / d + 1
r = np.sqrt(1 - z**2)
x = r * np.cos(theta)
y = r * np.sin(theta)
ax.plot(x, y, z, color=(.5, .5, .5))
ax.plot([0], [0], [0], markerfacecolor=(.5, .5, .5),
markeredgecolor=(.5, .5, .5), marker='o', markersize=10,
alpha=1)
we[prob_location] = 0
else:
break
plt.tight_layout()
plt.close(fig)
return fig
|
Plot the qsphere representation of a quantum state.
Args:
rho (ndarray): State vector or density matrix representation.
of quantum state.
figsize (tuple): Figure size in inches.
Returns:
Figure: A matplotlib figure instance.
Raises:
ImportError: Requires matplotlib.
|
juraj-google-style
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.