code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def authorize_security_group(self, group_name=None,
src_security_group_name=None,
src_security_group_owner_id=None,
ip_protocol=None, from_port=None, to_port=None,
cidr_ip=None, group_id=None,
src_security_group_group_id=None):
if src_security_group_name:
if from_port is None and to_port is None and ip_protocol is None:
return self.authorize_security_group_deprecated(
group_name, src_security_group_name,
src_security_group_owner_id)
params = {}
if group_name:
params['GroupName'] = group_name
if group_id:
params['GroupId'] = group_id
if src_security_group_name:
param_name = 'IpPermissions.1.Groups.1.GroupName'
params[param_name] = src_security_group_name
if src_security_group_owner_id:
param_name = 'IpPermissions.1.Groups.1.UserId'
params[param_name] = src_security_group_owner_id
if src_security_group_group_id:
param_name = 'IpPermissions.1.Groups.1.GroupId'
params[param_name] = src_security_group_group_id
if ip_protocol:
params['IpPermissions.1.IpProtocol'] = ip_protocol
if from_port is not None:
params['IpPermissions.1.FromPort'] = from_port
if to_port is not None:
params['IpPermissions.1.ToPort'] = to_port
if cidr_ip:
params['IpPermissions.1.IpRanges.1.CidrIp'] = cidr_ip
return self.get_status('AuthorizeSecurityGroupIngress',
params, verb='POST') | Add a new rule to an existing security group.
You need to pass in either src_security_group_name and
src_security_group_owner_id OR ip_protocol, from_port, to_port,
and cidr_ip. In other words, either you are authorizing another
group or you are authorizing some ip-based rule.
:type group_name: string
:param group_name: The name of the security group you are adding
the rule to.
:type src_security_group_name: string
:param src_security_group_name: The name of the security group you are
granting access to.
:type src_security_group_owner_id: string
:param src_security_group_owner_id: The ID of the owner of the security
group you are granting access to.
:type ip_protocol: string
:param ip_protocol: Either tcp | udp | icmp
:type from_port: int
:param from_port: The beginning port number you are enabling
:type to_port: int
:param to_port: The ending port number you are enabling
:type cidr_ip: string
:param cidr_ip: The CIDR block you are providing access to.
See http://goo.gl/Yj5QC
:type group_id: string
:param group_id: ID of the EC2 or VPC security group to modify.
This is required for VPC security groups and
can be used instead of group_name for EC2
security groups.
:type group_id: string
:param group_id: ID of the EC2 or VPC source security group.
This is required for VPC security groups and
can be used instead of group_name for EC2
security groups.
:rtype: bool
:return: True if successful. |
def expression(callable, rule_name, grammar):
num_args = len(getargspec(callable).args)
if num_args == 2:
is_simple = True
elif num_args == 5:
is_simple = False
else:
raise RuntimeError("Custom rule functions must take either 2 or 5 "
"arguments, not %s." % num_args)
class AdHocExpression(Expression):
def _uncached_match(self, text, pos, cache, error):
result = (callable(text, pos) if is_simple else
callable(text, pos, cache, error, grammar))
if isinstance(result, integer_types):
end, children = result, None
elif isinstance(result, tuple):
end, children = result
else:
return result
return Node(self, text, pos, end, children=children)
def _as_rhs(self):
return '{custom function "%s"}' % callable.__name__
return AdHocExpression(name=rule_name) | Turn a plain callable into an Expression.
The callable can be of this simple form::
def foo(text, pos):
'''If this custom expression matches starting at text[pos], return
the index where it stops matching. Otherwise, return None.'''
if the expression matched:
return end_pos
If there child nodes to return, return a tuple::
return end_pos, children
If the expression doesn't match at the given ``pos`` at all... ::
return None
If your callable needs to make sub-calls to other rules in the grammar or
do error reporting, it can take this form, gaining additional arguments::
def foo(text, pos, cache, error, grammar):
# Call out to other rules:
node = grammar['another_rule'].match_core(text, pos, cache, error)
...
# Return values as above.
The return value of the callable, if an int or a tuple, will be
automatically transmuted into a :class:`~parsimonious.Node`. If it returns
a Node-like class directly, it will be passed through unchanged.
:arg rule_name: The rule name to attach to the resulting
:class:`~parsimonious.Expression`
:arg grammar: The :class:`~parsimonious.Grammar` this expression will be a
part of, to make delegating to other rules possible |
def get_options(server):
try:
response = requests.options(
server, allow_redirects=False, verify=False, timeout=5)
except (requests.exceptions.ConnectionError,
requests.exceptions.MissingSchema):
return "Server {} is not available!".format(server)
try:
return {'allowed': response.headers['Allow']}
except KeyError:
return "Unable to get HTTP methods" | Retrieve the available HTTP verbs |
def _recv_ack(self, method_frame):
if self._ack_listener:
delivery_tag = method_frame.args.read_longlong()
multiple = method_frame.args.read_bit()
if multiple:
while self._last_ack_id < delivery_tag:
self._last_ack_id += 1
self._ack_listener(self._last_ack_id)
else:
self._last_ack_id = delivery_tag
self._ack_listener(self._last_ack_id) | Receive an ack from the broker. |
def get_scc_from_tuples(constraints):
classes = unionfind.classes(constraints)
return dict((x, tuple(c)) for x, c in classes.iteritems()) | Given set of equivalences, return map of transitive equivalence classes.
>> constraints = [(1,2), (2,3)]
>> get_scc_from_tuples(constraints)
{
1: (1, 2, 3),
2: (1, 2, 3),
3: (1, 2, 3),
} |
def overlap_slices(large_array_shape, small_array_shape, position):
edges_min = [int(pos - small_shape // 2) for (pos, small_shape) in
zip(position, small_array_shape)]
edges_max = [int(pos + (small_shape - small_shape // 2)) for
(pos, small_shape) in
zip(position, small_array_shape)]
slices_large = tuple(slice(max(0, edge_min), min(large_shape, edge_max))
for (edge_min, edge_max, large_shape) in
zip(edges_min, edges_max, large_array_shape))
slices_small = tuple(slice(max(0, -edge_min),
min(large_shape - edge_min,
edge_max - edge_min))
for (edge_min, edge_max, large_shape) in
zip(edges_min, edges_max, large_array_shape))
return slices_large, slices_small | Modified version of `~astropy.nddata.utils.overlap_slices`.
Get slices for the overlapping part of a small and a large array.
Given a certain position of the center of the small array, with
respect to the large array, tuples of slices are returned which can be
used to extract, add or subtract the small array at the given
position. This function takes care of the correct behavior at the
boundaries, where the small array is cut of appropriately.
Parameters
----------
large_array_shape : tuple
Shape of the large array.
small_array_shape : tuple
Shape of the small array.
position : tuple
Position of the small array's center, with respect to the large array.
Coordinates should be in the same order as the array shape.
Returns
-------
slices_large : tuple of slices
Slices in all directions for the large array, such that
``large_array[slices_large]`` extracts the region of the large array
that overlaps with the small array.
slices_small : slice
Slices in all directions for the small array, such that
``small_array[slices_small]`` extracts the region that is inside the
large array. |
def synthesize(self, modules, use_string, x64, native):
print(hash_func)
groups = group_by(modules, ends_with_punctuation)
sources = self.make_source(groups, self.database)
if sources:
return stylify_files(
{'defs.h': sources[0], 'init.c': sources[1]}
)
else:
return '' | Transform sources. |
async def punsubscribe(self, *args):
if args:
args = list_or_args(args[0], args[1:])
return await self.execute_command('PUNSUBSCRIBE', *args) | Unsubscribe from the supplied patterns. If empy, unsubscribe from
all patterns. |
def cli(env, identifier, body):
mgr = SoftLayer.TicketManager(env.client)
ticket_id = helpers.resolve_id(mgr.resolve_ids, identifier, 'ticket')
if body is None:
body = click.edit('\n\n' + ticket.TEMPLATE_MSG)
mgr.update_ticket(ticket_id=ticket_id, body=body)
env.fout("Ticket Updated!") | Adds an update to an existing ticket. |
def read(self, size=-1):
logger.debug("reading with size: %d", size)
if self.response is None:
return b''
if size == 0:
return b''
elif size < 0 and len(self._read_buffer) == 0:
retval = self.response.raw.read()
elif size < 0:
retval = self._read_buffer.read() + self.response.raw.read()
else:
while len(self._read_buffer) < size:
logger.debug("http reading more content at current_pos: %d with size: %d", self._current_pos, size)
bytes_read = self._read_buffer.fill(self._read_iter)
if bytes_read == 0:
retval = self._read_buffer.read()
self._current_pos += len(retval)
return retval
retval = self._read_buffer.read(size)
self._current_pos += len(retval)
return retval | Mimics the read call to a filehandle object. |
def get_source_handler(self, model, source):
if isinstance(source, Column):
return source
modelfield = resolve_orm_path(model, source)
column_class = get_column_for_modelfield(modelfield)
return column_class() | Allow the nested Column source to be its own handler. |
def confusion_matrix(self):
return plot.confusion_matrix(self.y_true, self.y_pred,
self.target_names, ax=_gen_ax()) | Confusion matrix plot |
def to_api_repr(self):
config = copy.deepcopy(self._properties)
if self.options is not None:
r = self.options.to_api_repr()
if r != {}:
config[self.options._RESOURCE_NAME] = r
return config | Build an API representation of this object.
Returns:
Dict[str, Any]:
A dictionary in the format used by the BigQuery API. |
def xiphias_get_users_by_alias(self, alias_jids: Union[str, List[str]]):
return self._send_xmpp_element(xiphias.UsersByAliasRequest(alias_jids)) | Like xiphias_get_users, but for aliases instead of jids.
:param alias_jids: one jid, or a list of jids |
def _create_url(self, date):
return BOXSCORES_URL % (date.year, date.month, date.day) | Build the URL based on the passed datetime object.
In order to get the proper boxscore page, the URL needs to include the
requested month, day, and year.
Parameters
----------
date : datetime object
The date to search for any matches. The month, day, and year are
required for the search, but time is not factored into the search.
Returns
-------
string
Returns a ``string`` of the boxscore URL including the requested
date. |
def get_section_by_name(self, section_name):
sections = self.unravel_sections(self.get_sections())
for section in sections:
if section['name'] == section_name:
return section['groupId'], section
return None, None | Get a section by its name.
Get a list of sections for a given gradebook,
specified by a gradebookid.
Args:
section_name (str): The section's name.
Raises:
requests.RequestException: Exception connection error
ValueError: Unable to decode response content
Returns:
tuple: tuple of group id, and section dictionary
An example return value is:
.. code-block:: python
(
1327565,
{
u'editable': True,
u'groupId': 1327565,
u'groupingScheme': u'Recitation',
u'members': None,
u'name': u'r01',
u'shortName': u'r01',
u'staffs': None
}
) |
def GetName(self):
if self.AssetType == AssetType.GoverningToken:
return "NEO"
elif self.AssetType == AssetType.UtilityToken:
return "NEOGas"
if type(self.Name) is bytes:
return self.Name.decode('utf-8')
return self.Name | Get the asset name based on its type.
Returns:
str: 'NEO' or 'NEOGas' |
def _parse(string):
if string:
return suds.sax.parser.Parser().parse(string=string) | Parses given XML document content.
Returns the resulting root XML element node or None if the given XML
content is empty.
@param string: XML document content to parse.
@type string: I{bytes}
@return: Resulting root XML element node or None.
@rtype: L{Element}|I{None} |
def provision(self, tool: Tool) -> docker.models.containers.Container:
if not self.is_installed(tool):
raise Exception("tool is not installed: {}".format(tool.name))
client = self.__installation.docker
return client.containers.create(tool.image) | Provisions a mountable Docker container for a given tool. |
def get_audits():
audits = [TemplatedFile('/etc/login.defs', LoginContext(),
template_dir=TEMPLATES_DIR,
user='root', group='root', mode=0o0444)]
return audits | Get OS hardening login.defs audits.
:returns: dictionary of audits |
def to_python_(self, table_name: str="data") -> list:
try:
renderer = pytablewriter.PythonCodeTableWriter
data = self._build_export(renderer, table_name)
return data
except Exception as e:
self.err(e, "Can not convert data to python list") | Convert the main dataframe to python a python list
:param table_name: python variable name, defaults to "data"
:param table_name: str, optional
:return: a python list of lists with the data
:rtype: str
:example: ``ds.to_python_("myvar")`` |
def is_valid(self):
return (self.is_primitive and self.name) \
or (self.is_complex and self.name) \
or (self.is_list and self.nested) \
or (self.is_map and self.nested) \
or (self.is_model and self.nested) | checks if type is a valid type |
def _request(self, function, params, method='POST', headers={}):
if method is 'POST':
params = urllib.parse.urlencode(params)
headers = { "Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain" }
path = '/%s/%s' % (self._version, function)
self._conn.request(method, path, params, headers)
return self._conn.getresponse() | Builds a request object. |
def from_inline(cls: Type[RevocationType], version: int, currency: str, inline: str) -> RevocationType:
cert_data = Revocation.re_inline.match(inline)
if cert_data is None:
raise MalformedDocumentError("Revokation")
pubkey = cert_data.group(1)
signature = cert_data.group(2)
return cls(version, currency, pubkey, signature) | Return Revocation document instance from inline string
Only self.pubkey is populated.
You must populate self.identity with an Identity instance to use raw/sign/signed_raw methods
:param version: Version number
:param currency: Name of the currency
:param inline: Inline document
:return: |
def _prepare_env(self):
env = self.state.document.settings.env
if not hasattr(env, self.directive_name):
state = DirectiveState()
setattr(env, self.directive_name, state)
else:
state = getattr(env, self.directive_name)
return env, state | Setup the document's environment, if necessary. |
def multiply(self, x1, x2, out=None):
if out is None:
out = self.element()
if out not in self:
raise LinearSpaceTypeError('`out` {!r} is not an element of '
'{!r}'.format(out, self))
if x1 not in self:
raise LinearSpaceTypeError('`x1` {!r} is not an element of '
'{!r}'.format(x1, self))
if x2 not in self:
raise LinearSpaceTypeError('`x2` {!r} is not an element of '
'{!r}'.format(x2, self))
self._multiply(x1, x2, out)
return out | Return the pointwise product of ``x1`` and ``x2``.
Parameters
----------
x1, x2 : `LinearSpaceElement`
Multiplicands in the product.
out : `LinearSpaceElement`, optional
Element to which the result is written.
Returns
-------
out : `LinearSpaceElement`
Product of the elements. If ``out`` was provided, the
returned object is a reference to it. |
def entropy_of_antennas(positions, normalize=False):
counter = Counter(p for p in positions)
raw_entropy = entropy(list(counter.values()))
n = len(counter)
if normalize and n > 1:
return raw_entropy / math.log(n)
else:
return raw_entropy | The entropy of visited antennas.
Parameters
----------
normalize: boolean, default is False
Returns a normalized entropy between 0 and 1. |
def running_jobs(self, exit_on_error=True):
with self.handling_exceptions():
if self.using_jobs:
from concurrent.futures import ProcessPoolExecutor
try:
with ProcessPoolExecutor(self.jobs) as self.executor:
yield
finally:
self.executor = None
else:
yield
if exit_on_error:
self.exit_on_error() | Initialize multiprocessing. |
def format_item(self, action, item_type='actor'):
obj = getattr(action, item_type)
return {
'id': self.get_uri(action, obj),
'url': self.get_url(action, obj),
'objectType': ContentType.objects.get_for_model(obj).name,
'displayName': text_type(obj)
} | Returns a formatted dictionary for an individual item based on the action and item_type. |
def load_table(self, table_name):
table_name = normalize_table_name(table_name)
with self.lock:
if table_name not in self._tables:
self._tables[table_name] = Table(self, table_name)
return self._tables.get(table_name) | Load a table.
This will fail if the tables does not already exist in the database. If
the table exists, its columns will be reflected and are available on
the :py:class:`Table <dataset.Table>` object.
Returns a :py:class:`Table <dataset.Table>` instance.
::
table = db.load_table('population') |
def macro2blackbox_outputs(self, macro_indices):
if not self.blackbox:
raise ValueError('System is not blackboxed')
return tuple(sorted(set(
self.macro2micro(macro_indices)
).intersection(self.blackbox.output_indices))) | Given a set of macro elements, return the blackbox output elements
which compose these elements. |
def perform_smooth(x_values, y_values, span=None, smoother_cls=None):
if smoother_cls is None:
smoother_cls = DEFAULT_BASIC_SMOOTHER
smoother = smoother_cls()
smoother.specify_data_set(x_values, y_values)
smoother.set_span(span)
smoother.compute()
return smoother | Convenience function to run the basic smoother.
Parameters
----------
x_values : iterable
List of x value observations
y_ values : iterable
list of y value observations
span : float, optional
Fraction of data to use as the window
smoother_cls : Class
The class of smoother to use to smooth the data
Returns
-------
smoother : object
The smoother object with results stored on it. |
def _do_pnp(self, pnp, anchor=None):
if anchor or pnp and pnp.endswith("PNP"):
if anchor is not None:
m = find(lambda x: x.startswith("P"), anchor)
else:
m = None
if self.pnp \
and pnp \
and pnp != OUTSIDE \
and pnp.startswith("B-") is False \
and self.words[-2].pnp is not None:
self.pnp[-1].append(self.words[-1])
elif m is not None and m == self._attachment:
self.pnp[-1].append(self.words[-1])
else:
ch = PNPChunk(self, [self.words[-1]], type="PNP")
self.pnp.append(ch)
self._attachment = m | Attaches prepositional noun phrases.
Identifies PNP's from either the PNP tag or the P-attachment tag.
This does not determine the PP-anchor, it only groups words in a PNP chunk. |
def _general_multithread(func):
def multithread(templates, stream, *args, **kwargs):
with pool_boy(ThreadPool, len(stream), **kwargs) as pool:
return _pool_normxcorr(templates, stream, pool=pool, func=func)
return multithread | return the general multithreading function using func |
def prepare_for_sending(self):
if [arbiter_link for arbiter_link in self.arbiters if arbiter_link.spare]:
logger.info('Serializing the configuration for my spare arbiter...')
self.spare_arbiter_conf = serialize(self) | The configuration needs to be serialized before being sent to a spare arbiter
:return: None |
def generate_antisense_sequence(sequence):
dna_antisense = {
'A': 'T',
'T': 'A',
'C': 'G',
'G': 'C'
}
antisense = [dna_antisense[x] for x in sequence[::-1]]
return ''.join(antisense) | Creates the antisense sequence of a DNA strand. |
def failover_limitation(self):
if not self.reachable:
return 'not reachable'
if self.tags.get('nofailover', False):
return 'not allowed to promote'
if self.watchdog_failed:
return 'not watchdog capable'
return None | Returns reason why this node can't promote or None if everything is ok. |
def page_has_tag(page, tag):
from .models import PageTags
if hasattr(tag, 'slug'):
slug = tag.slug
else:
slug = tag
try:
return page.pagetags.tags.filter(slug=slug).exists()
except PageTags.DoesNotExist:
return False | Check if a Page object is associated with the given tag.
:param page: a Page instance
:param tag: a Tag instance or a slug string.
:return: whether the Page instance has the given tag attached (False if no Page or no
attached PageTags exists)
:type: Boolean |
def start_trial(self, trial, checkpoint=None):
self._commit_resources(trial.resources)
try:
self._start_trial(trial, checkpoint)
except Exception as e:
logger.exception("Error starting runner for Trial %s", str(trial))
error_msg = traceback.format_exc()
time.sleep(2)
self._stop_trial(trial, error=True, error_msg=error_msg)
if isinstance(e, AbortTrialExecution):
return
try:
trial.clear_checkpoint()
logger.info(
"Trying to start runner for Trial %s without checkpoint.",
str(trial))
self._start_trial(trial)
except Exception:
logger.exception(
"Error starting runner for Trial %s, aborting!",
str(trial))
error_msg = traceback.format_exc()
self._stop_trial(trial, error=True, error_msg=error_msg) | Starts the trial.
Will not return resources if trial repeatedly fails on start.
Args:
trial (Trial): Trial to be started.
checkpoint (Checkpoint): A Python object or path storing the state
of trial. |
def get_zones(self, q=None, **kwargs):
uri = "/v1/zones"
params = build_params(q, kwargs)
return self.rest_api_connection.get(uri, params) | Returns a list of zones across all of the user's accounts.
Keyword Arguments:
q -- The search parameters, in a dict. Valid keys are:
name - substring match of the zone name
zone_type - one of:
PRIMARY
SECONDARY
ALIAS
sort -- The sort column used to order the list. Valid values for the sort field are:
NAME
ACCOUNT_NAME
RECORD_COUNT
ZONE_TYPE
reverse -- Whether the list is ascending(False) or descending(True)
offset -- The position in the list for the first returned element(0 based)
limit -- The maximum number of rows to be returned. |
def apply_widget_css_class(self, field_name):
field = self.fields[field_name]
class_name = self.get_widget_css_class(field_name, field)
if class_name:
field.widget.attrs['class'] = join_css_class(
field.widget.attrs.get('class', None), class_name) | Applies CSS classes to widgets if available.
The method uses the `get_widget_css_class` method to determine if the widget
CSS class should be changed. If a CSS class is returned, it is appended to
the current value of the class property of the widget instance.
:param field_name: A field name of the form. |
def update_mute(self, data):
self._group['muted'] = data['mute']
self.callback()
_LOGGER.info('updated mute on %s', self.friendly_name) | Update mute. |
def _get_max_sigma(self, R):
max_sigma = 2.0 * math.pow(np.nanmax(np.std(R, axis=0)), 2)
return max_sigma | Calculate maximum sigma of scanner RAS coordinates
Parameters
----------
R : 2D array, with shape [n_voxel, n_dim]
The coordinate matrix of fMRI data from one subject
Returns
-------
max_sigma : float
The maximum sigma of scanner coordinates. |
def rownumbers(self, table=None):
if table is None:
return self._rownumbers(Table())
return self._rownumbers(table) | Return a list containing the row numbers of this table.
This method can be useful after a selection or a sort.
It returns the row numbers of the rows in this table with respect
to the given table. If no table is given, the original table is used.
For example::
t = table('W53.MS')
t1 = t.selectrows([1,3,5,7,9]) # select a few rows
t1.rownumbers(t)
# [1 3 5 7 9]
t2 = t1.selectrows([2,5]) # select rows from the selection
t2.rownumbers(t1)
# [2 5] # rownrs of t2 in table t1
t2.rownumbers(t)
# [3 9] # rownrs of t2 in t
t2.rownumbers()
# [3 9]
The last statements show that the method returns the row numbers
referring to the given table. Table t2 contains rows 2 and 5 in
table t1, which are rows 3 and 9 in table t. |
def zero_pad(ts, n_zeros):
zeros_shape = ts.shape[:-1] + (n_zeros,)
zzs = np.zeros(zeros_shape)
new_data = np.concatenate((zzs, ts.data, zzs), axis=-1)
return nts.TimeSeries(new_data, sampling_rate=ts.sampling_rate) | Pad a nitime.TimeSeries class instance with n_zeros before and after the
data
Parameters
----------
ts : a nitime.TimeSeries class instance |
def version(self):
version = int(self._dll.JLINKARM_GetDLLVersion())
major = version / 10000
minor = (version / 100) % 100
rev = version % 100
rev = '' if rev == 0 else chr(rev + ord('a') - 1)
return '%d.%02d%s' % (major, minor, rev) | Returns the device's version.
The device's version is returned as a string of the format: M.mr where
``M`` is major number, ``m`` is minor number, and ``r`` is revision
character.
Args:
self (JLink): the ``JLink`` instance
Returns:
Device version string. |
def error_handler(self, handler):
if not self.opened():
handler = handler or util.noop
self._error_handler = enums.JLinkFunctions.LOG_PROTOTYPE(handler)
self._dll.JLINKARM_SetErrorOutHandler(self._error_handler) | Setter for the error handler function.
If the DLL is open, this function is a no-op, so it should be called
prior to calling ``open()``.
Args:
self (JLink): the ``JLink`` instance
handler (function): function to call on error messages
Returns:
``None`` |
def single_row_or_col_df_to_series(desired_type: Type[T], single_rowcol_df: pd.DataFrame, logger: Logger, **kwargs)\
-> pd.Series:
if single_rowcol_df.shape[0] == 1:
return single_rowcol_df.transpose()[0]
elif single_rowcol_df.shape[1] == 2 and isinstance(single_rowcol_df.index, pd.RangeIndex):
d = single_rowcol_df.set_index(single_rowcol_df.columns[0])
return d[d.columns[0]]
elif single_rowcol_df.shape[1] == 1:
d = single_rowcol_df
return d[d.columns[0]]
else:
raise ValueError('Unable to convert provided dataframe to a series : '
'expected exactly 1 row or 1 column, found : ' + str(single_rowcol_df.shape) + '') | Helper method to convert a dataframe with one row or one or two columns into a Series
:param desired_type:
:param single_col_df:
:param logger:
:param kwargs:
:return: |
def create_service(self, service_type, plan_name, service_name, params,
async=False, **kwargs):
if self.space.has_service_with_name(service_name):
logging.warning("Service already exists with that name.")
return self.get_instance(service_name)
if self.space.has_service_of_type(service_type):
logging.warning("Service type already exists.")
guid = self.get_service_plan_guid(service_type, plan_name)
if not guid:
raise ValueError("No service plan named: %s" % (plan_name))
body = {
'name': service_name,
'space_guid': self.space.guid,
'service_plan_guid': guid,
'parameters': params
}
uri = '/v2/service_instances?accepts_incomplete=true'
if async:
uri += '&async=true'
return self.api.post(uri, body) | Create a service instance. |
def get_parent_banks(self, bank_id):
if self._catalog_session is not None:
return self._catalog_session.get_parent_catalogs(catalog_id=bank_id)
return BankLookupSession(
self._proxy,
self._runtime).get_banks_by_ids(
list(self.get_parent_bank_ids(bank_id))) | Gets the parents of the given bank.
arg: bank_id (osid.id.Id): a bank ``Id``
return: (osid.assessment.BankList) - the parents of the bank
raise: NotFound - ``bank_id`` is not found
raise: NullArgument - ``bank_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
*compliance: mandatory -- This method must be implemented.* |
def data_received(self, data):
_LOGGER.debug("Starting: data_received")
_LOGGER.debug('Received %d bytes from PLM: %s',
len(data), binascii.hexlify(data))
self._buffer.put_nowait(data)
asyncio.ensure_future(self._peel_messages_from_buffer(),
loop=self._loop)
_LOGGER.debug("Finishing: data_received") | Receive data from the protocol.
Called when asyncio.Protocol detects received data from network. |
def getbulk_by_oid(self, non_repeaters, max_repetitions, *oid):
if self.version.startswith('3'):
errorIndication, errorStatus, errorIndex, varBinds = self.cmdGen.getCmd(
cmdgen.UsmUserData(self.user, self.auth),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
if self.version.startswith('2'):
errorIndication, errorStatus, errorIndex, varBindTable = self.cmdGen.bulkCmd(
cmdgen.CommunityData(self.community),
cmdgen.UdpTransportTarget((self.host, self.port)),
non_repeaters,
max_repetitions,
*oid
)
else:
return []
return self.__bulk_result__(errorIndication, errorStatus, errorIndex, varBindTable) | SNMP getbulk request.
In contrast to snmpwalk, this information will typically be gathered in
a single transaction with the agent, rather than one transaction per
variable found.
* non_repeaters: This specifies the number of supplied variables that
should not be iterated over.
* max_repetitions: This specifies the maximum number of iterations over
the repeating variables.
* oid: oid list
> Return a list of dicts |
def factory(data):
if 'object' not in data:
raise exceptions.UnknownAPIResource('Missing `object` key in resource.')
for reconstituable_api_resource_type in ReconstituableAPIResource.__subclasses__():
if reconstituable_api_resource_type.object_type == data['object']:
return reconstituable_api_resource_type(**data)
raise exceptions.UnknownAPIResource('Unknown object `' + data['object'] + '`.') | Try to reconstruct the APIResource from its data.
:param data: The APIResource data
:type data: dict
:return: The guessed APIResource
:raise
exceptions.UnkownAPIResource when it's impossible to reconstruct the APIResource from its data. |
def encodeEntitiesReentrant(self, input):
ret = libxml2mod.xmlEncodeEntitiesReentrant(self._o, input)
return ret | Do a global encoding of a string, replacing the predefined
entities and non ASCII values with their entities and
CharRef counterparts. Contrary to xmlEncodeEntities, this
routine is reentrant, and result must be deallocated. |
def move_entry_in_group(self, entry = None, index = None):
if entry is None or index is None or type(entry) is not v1Entry \
or type(index) is not int:
raise KPError("Need an entry and an index.")
elif index < 0 or index > len(entry.group.entries)-1:
raise KPError("Index is not valid.")
elif entry not in self.entries:
raise KPError("Entry not found.")
pos_in_group = entry.group.entries.index(entry)
pos_in_entries = self.entries.index(entry)
entry_at_index = entry.group.entries[index]
pos_in_entries2 = self.entries.index(entry_at_index)
entry.group.entries[index] = entry
entry.group.entries[pos_in_group] = entry_at_index
self.entries[pos_in_entries2] = entry
self.entries[pos_in_entries] = entry_at_index
return True | Move entry to another position inside a group.
An entry and a valid index to insert the entry in the
entry list of the holding group is needed. 0 means
that the entry is moved to the first position 1 to
the second and so on. |
def __init_configrs(self, rs_cfg):
rs_cfg['id'] = rs_cfg.pop('rs_id', None)
for member in rs_cfg.setdefault('members', [{}]):
member['procParams'] = self._strip_auth(
member.get('procParams', {}))
member['procParams']['configsvr'] = True
if self.enable_ipv6:
common.enable_ipv6_single(member['procParams'])
rs_cfg['sslParams'] = self.sslParams
self._configsvrs.append(ReplicaSets().create(rs_cfg)) | Create and start a config replica set. |
def fswap(p, q):
yield cirq.ISWAP(q, p), cirq.Z(p) ** 1.5
yield cirq.Z(q) ** 1.5 | Decompose the Fermionic SWAP gate into two single-qubit gates and
one iSWAP gate.
Args:
p: the id of the first qubit
q: the id of the second qubit |
def register_or_check(klass, finish, mean, between, refresh_presision, configuration):
m, created = klass.objects.get_or_create(finish=finish, configuration=configuration)
if created:
m.mean=mean
m.between=between
m.refresh_presision=refresh_presision
m.save()
else:
diff = abs(float(m.mean) - mean)
if not(diff < 0.006 and m.between == between and m.refresh_presision == refresh_presision):
raise InvalidMeasurementError("There are diferents values for the same measurement.")
return m | Return the active configurations. |
def post_silence_request(self, kwargs):
self._request('POST', '/silenced', data=json.dumps(kwargs))
return True | Create a silence entry. |
def update_account(self, **kwargs):
api = self._get_api(iam.AccountAdminApi)
account = Account._create_request_map(kwargs)
body = AccountUpdateReq(**account)
return Account(api.update_my_account(body)) | Update details of account associated with current API key.
:param str address_line1: Postal address line 1.
:param str address_line2: Postal address line 2.
:param str city: The city part of the postal address.
:param str display_name: The display name for the account.
:param str country: The country part of the postal address.
:param str company: The name of the company.
:param str state: The state part of the postal address.
:param str contact: The name of the contact person for this account.
:param str postal_code: The postal code part of the postal address.
:param str parent_id: The ID of the parent account.
:param str phone_number: The phone number of the company.
:param str email: Email address for this account.
:param list[str] aliases: List of aliases
:returns: an account object.
:rtype: Account |
async def unixlisten(path, onlink):
info = {'path': path, 'unix': True}
async def onconn(reader, writer):
link = await Link.anit(reader, writer, info=info)
link.schedCoro(onlink(link))
return await asyncio.start_unix_server(onconn, path=path) | Start an PF_UNIX server listening on the given path. |
def predict_proba(estimator, X):
if is_probabilistic_classifier(estimator):
try:
proba, = estimator.predict_proba(X)
return proba
except NotImplementedError:
return None
else:
return None | Return result of predict_proba, if an estimator supports it, or None. |
def _rewrite_q(self, q):
if isinstance(q, tuple) and len(q) == 2:
return rewrite_lookup_key(self.model, q[0]), q[1]
if isinstance(q, Node):
q.children = list(map(self._rewrite_q, q.children))
return q | Rewrite field names inside Q call. |
def checkout(self, ref, branch=None):
return git_checkout(self.repo_dir, ref, branch=branch) | Do a git checkout of `ref`. |
def comment(value, comment_text):
if isinstance(value, Doc):
return comment_doc(value, comment_text)
return comment_value(value, comment_text) | Annotates a value or a Doc with a comment.
When printed by prettyprinter, the comment will be
rendered next to the value or Doc. |
def add_access_list(self, loadbalancer, access_list):
req_body = {"accessList": access_list}
uri = "/loadbalancers/%s/accesslist" % utils.get_id(loadbalancer)
resp, body = self.api.method_post(uri, body=req_body)
return body | Adds the access list provided to the load balancer.
The 'access_list' should be a list of dicts in the following format:
[{"address": "192.0.43.10", "type": "DENY"},
{"address": "192.0.43.11", "type": "ALLOW"},
...
{"address": "192.0.43.99", "type": "DENY"},
]
If no access list exists, it is created. If an access list
already exists, it is updated with the provided list. |
def AgregarAjusteMonetario(self, precio_unitario, precio_recupero=None,
**kwargs):
"Agrega campos al detalle de item por un ajuste monetario"
d = {'precioUnitario': precio_unitario,
'precioRecupero': precio_recupero,
}
item_liq = self.solicitud['itemDetalleAjusteLiquidacion'][-1]
item_liq['ajusteMonetario'] = d
return True | Agrega campos al detalle de item por un ajuste monetario |
def prepare_query(query):
for name in query:
value = query[name]
if value is None:
query[name] = ""
elif isinstance(value, bool):
query[name] = int(value)
elif isinstance(value, dict):
raise ValueError("Invalid query data type %r" %
type(value).__name__) | Prepare a query object for the RAPI.
RAPI has lots of curious rules for coercing values.
This function operates on dicts in-place and has no return value.
@type query: dict
@param query: Query arguments |
def add(self, crash):
if crash not in self:
key = crash.key()
skey = self.marshall_key(key)
data = self.marshall_value(crash, storeMemoryMap = True)
self.__db[skey] = data | Adds a new crash to the container.
If the crash appears to be already known, it's ignored.
@see: L{Crash.key}
@type crash: L{Crash}
@param crash: Crash object to add. |
def create_lbaas_member(self, lbaas_pool, body=None):
return self.post(self.lbaas_members_path % lbaas_pool, body=body) | Creates a lbaas_member. |
def check_workers(self):
if time.time() - self._worker_alive_time > 5:
self._worker_alive_time = time.time()
[worker.join() for worker in self._workers if not worker.is_alive()]
self._workers = [
worker for worker in self._workers if worker.is_alive()
]
if len(self._workers) < self._num_workers:
raise ProcessKilled('One of the workers has been killed.') | Kill workers that have been pending for a while and check if all workers
are alive. |
def counts(self, ids=None, setdata=False, output_format='DataFrame'):
return self.apply(lambda x: x.counts, ids=ids, setdata=setdata, output_format=output_format) | Return the counts in each of the specified measurements.
Parameters
----------
ids : [hashable | iterable of hashables | None]
Keys of measurements to get counts of.
If None is given get counts of all measurements.
setdata : bool
Whether to set the data in the Measurement object.
Used only if data is not already set.
output_format : DataFrame | dict
Specifies the output format for that data.
Returns
-------
[DataFrame | Dictionary]
Dictionary keys correspond to measurement keys. |
def to_code(self, context: Context =None):
context = context or Context()
for imp in self.imports:
if imp not in context.imports:
context.imports.append(imp)
counter = Counter()
lines = list(self.to_lines(context=context, counter=counter))
if counter.num_indented_non_doc_blocks == 0:
if self.expects_body_or_pass:
lines.append(" pass")
elif self.closed_by:
lines[-1] += self.closed_by
else:
if self.closed_by:
lines.append(self.closed_by)
return join_lines(*lines) + self._suffix | Generate the code and return it as a string. |
def add_scroll_bar(self):
adj = self.terminal.get_vadjustment()
scroll = Gtk.VScrollbar(adj)
scroll.show()
self.pack_start(scroll, False, False, 0) | Packs the scrollbar. |
def draw(self, parent, box):
import wx
from MAVProxy.modules.lib import mp_widgets
if self.imgpanel is None:
self.imgpanel = mp_widgets.ImagePanel(parent, self.img())
box.Add(self.imgpanel, flag=wx.LEFT, border=0)
box.Layout() | redraw the image |
def cli_program_names(self):
r
program_names = {}
for cli_class in self.cli_classes:
instance = cli_class()
program_names[instance.program_name] = cli_class
return program_names | r"""Developer script program names. |
def get_int(bytearray_, byte_index):
data = bytearray_[byte_index:byte_index + 2]
data[1] = data[1] & 0xff
data[0] = data[0] & 0xff
packed = struct.pack('2B', *data)
value = struct.unpack('>h', packed)[0]
return value | Get int value from bytearray.
int are represented in two bytes |
def get_local_version(sigdir, sig):
version = None
filename = os.path.join(sigdir, '%s.cvd' % sig)
if os.path.exists(filename):
cmd = ['sigtool', '-i', filename]
sigtool = Popen(cmd, stdout=PIPE, stderr=PIPE)
while True:
line = sigtool.stdout.readline()
if line and line.startswith('Version:'):
version = line.split()[1]
break
if not line:
break
sigtool.wait()
return version | Get the local version of a signature |
def select_relevant_id_columns(rows):
relevant_id_columns = [True]
if rows:
prototype_id = rows[0].id
for column in range(1, len(prototype_id)):
def id_equal_to_prototype(row):
return row.id[column] == prototype_id[column]
relevant_id_columns.append(not all(map(id_equal_to_prototype, rows)))
return relevant_id_columns | Find out which of the entries in Row.id are equal for all given rows.
@return: A list of True/False values according to whether the i-th part of the id is always equal. |
def __set_status(self, value):
if value not in [DELIVERY_METHOD_STATUS_PENDING,
DELIVERY_METHOD_STATUS_SENT,
DELIVERY_METHOD_STATUS_CONFIRMED,
DELIVERY_METHOD_STATUS_BOUNCED]:
raise ValueError("Invalid deliveries method status '%s'" % value)
self.__status = value | Sets the deliveries status of this method.
@param value: str |
def to_yaml(cls, dumper, vividict):
dictionary = cls.vividict_to_dict(vividict)
node = dumper.represent_mapping(cls.yaml_tag, dictionary)
return node | Implementation for the abstract method of the base class YAMLObject |
def download(self, path, args=[], filepath=None, opts={},
compress=True, **kwargs):
url = self.base + path
wd = filepath or '.'
params = []
params.append(('stream-channels', 'true'))
params.append(('archive', 'true'))
if compress:
params.append(('compress', 'true'))
for opt in opts.items():
params.append(opt)
for arg in args:
params.append(('arg', arg))
method = 'get'
res = self._do_request(method, url, params=params, stream=True,
**kwargs)
self._do_raise_for_status(res)
mode = 'r|gz' if compress else 'r|'
with tarfile.open(fileobj=res.raw, mode=mode) as tf:
tf.extractall(path=wd) | Makes a request to the IPFS daemon to download a file.
Downloads a file or files from IPFS into the current working
directory, or the directory given by ``filepath``.
Raises
------
~ipfsapi.exceptions.ErrorResponse
~ipfsapi.exceptions.ConnectionError
~ipfsapi.exceptions.ProtocolError
~ipfsapi.exceptions.StatusError
~ipfsapi.exceptions.TimeoutError
Parameters
----------
path : str
The REST command path to send
filepath : str
The local path where IPFS will store downloaded files
Defaults to the current working directory.
args : list
Positional parameters to be sent along with the HTTP request
opts : dict
Query string paramters to be sent along with the HTTP request
compress : bool
Whether the downloaded file should be GZip compressed by the
daemon before being sent to the client
kwargs : dict
Additional arguments to pass to :mod:`requests` |
def _GetEnableOsLoginValue(self, metadata_dict):
instance_data, project_data = self._GetInstanceAndProjectAttributes(
metadata_dict)
instance_value = instance_data.get('enable-oslogin')
project_value = project_data.get('enable-oslogin')
value = instance_value or project_value or ''
return value.lower() == 'true' | Get the value of the enable-oslogin metadata key.
Args:
metadata_dict: json, the deserialized contents of the metadata server.
Returns:
bool, True if OS Login is enabled for VM access. |
def date_range(start, end, boo):
earliest = datetime.strptime(start.replace('-', ' '), '%Y %m %d')
latest = datetime.strptime(end.replace('-', ' '), '%Y %m %d')
num_days = (latest - earliest).days + 1
all_days = [latest - timedelta(days=x) for x in range(num_days)]
all_days.reverse()
output = []
if boo:
for d in all_days:
output.append(int(str(d).replace('-', '')[:8]))
else:
for d in all_days:
output.append(str(d)[:10])
return output | Return list of dates within a specified range, inclusive.
Args:
start: earliest date to include, String ("2015-11-25")
end: latest date to include, String ("2015-12-01")
boo: if true, output list contains Numbers (20151230); if false, list contains Strings ("2015-12-30")
Returns:
list of either Numbers or Strings |
def recruit(self):
if self.networks(full=False):
self.recruiter.recruit(n=1)
else:
self.recruiter.close_recruitment() | Recruit one participant at a time until all networks are full. |
def _meanvalueattr(self,v):
sug = self.layout
if not self.prevlayer(): return sug.grx[v].bar
bars = [sug.grx[x].bar for x in self._neighbors(v)]
return sug.grx[v].bar if len(bars)==0 else float(sum(bars))/len(bars) | find new position of vertex v according to adjacency in prevlayer.
position is given by the mean value of adjacent positions.
experiments show that meanvalue heuristic performs better than median. |
def run(self):
if self.directive_name is None:
raise NotImplementedError('directive_name must be implemented by '
'subclasses of BaseDirective')
env, state = self._prepare_env()
state.doc_names.add(env.docname)
directive_name = '<{}>'.format(self.directive_name)
node = nodes.section()
node.document = self.state.document
result = ViewList()
for line in self._render_rst():
if line.startswith(HEADING_TOKEN):
heading = line[HEADING_TOKEN_LENGTH:]
result.append(heading, directive_name)
result.append('-' * len(heading), directive_name)
else:
result.append(line, directive_name)
nested_parse_with_titles(self.state, result, node)
return node.children | Called by Sphinx to generate documentation for this directive. |
def set_broadcast_layout(self, broadcast_id, layout_type, stylesheet=None):
payload = {
'type': layout_type,
}
if layout_type == 'custom':
if stylesheet is not None:
payload['stylesheet'] = stylesheet
endpoint = self.endpoints.broadcast_url(broadcast_id, layout=True)
response = requests.put(
endpoint,
data=json.dumps(payload),
headers=self.json_headers(),
proxies=self.proxies,
timeout=self.timeout
)
if response.status_code == 200:
pass
elif response.status_code == 400:
raise BroadcastError(
'Invalid request. This response may indicate that data in your request data is '
'invalid JSON. It may also indicate that you passed in invalid layout options.')
elif response.status_code == 403:
raise AuthError('Authentication error.')
else:
raise RequestError('OpenTok server error.', response.status_code) | Use this method to change the layout type of a live streaming broadcast
:param String broadcast_id: The ID of the broadcast that will be updated
:param String layout_type: The layout type for the broadcast. Valid values are:
'bestFit', 'custom', 'horizontalPresentation', 'pip' and 'verticalPresentation'
:param String stylesheet optional: CSS used to style the custom layout.
Specify this only if you set the type property to 'custom' |
def handle_message(self, msg):
if msg.msg_id in self.messagesAllowed:
super(LimitedReporter, self).handle_message(msg) | Manage message of different type and in the context of path. |
def _det_inference(self):
if (self.n_randEffs==2) and (~sp.isnan(self.Y).any()):
rv = 'GP2KronSum'
else:
rv = 'GP'
return rv | Internal method for determining the inference method |
def writeDrizKeywords(hdr,imgnum,drizdict):
_keyprefix = 'D%03d'%imgnum
for key in drizdict:
val = drizdict[key]['value']
if val is None: val = ""
comment = drizdict[key]['comment']
if comment is None: comment = ""
hdr[_keyprefix+key] = (val, drizdict[key]['comment']) | Write basic drizzle-related keywords out to image header as a record
of the processing performed to create the image
The dictionary 'drizdict' will contain the keywords and values to be
written out to the header. |
def _get_request(self, auth=None):
self.request = HSRequest(auth or self.auth, self.env)
self.request.response_callback = self.response_callback
return self.request | Return an http request object
auth: Auth data to use
Returns:
A HSRequest object |
def visible(self):
query_results = self.map(lambda el: el.is_displayed(), 'visible').results
if query_results:
return all(query_results)
return False | Check whether all matched elements are visible.
Returns:
bool |
def _chunk_write(chunk, local_file, progress):
local_file.write(chunk)
if progress is not None:
progress.update(len(chunk)) | Write a chunk to file and update the progress bar. |
async def add_local_charm_dir(self, charm_dir, series):
fh = tempfile.NamedTemporaryFile()
CharmArchiveGenerator(charm_dir).make_archive(fh.name)
with fh:
func = partial(
self.add_local_charm, fh, series, os.stat(fh.name).st_size)
charm_url = await self._connector.loop.run_in_executor(None, func)
log.debug('Uploaded local charm: %s -> %s', charm_dir, charm_url)
return charm_url | Upload a local charm to the model.
This will automatically generate an archive from
the charm dir.
:param charm_dir: Path to the charm directory
:param series: Charm series |
def get_historical_data(symbols, start=None, end=None, **kwargs):
start, end = _sanitize_dates(start, end)
return HistoricalReader(symbols, start=start, end=end, **kwargs).fetch() | Function to obtain historical date for a symbol or list of
symbols. Return an instance of HistoricalReader
Parameters
----------
symbols: str or list
A symbol or list of symbols
start: datetime.datetime, default None
Beginning of desired date range
end: datetime.datetime, default None
End of required date range
kwargs:
Additional Request Parameters (see base class)
Returns
-------
list or DataFrame
Historical stock prices over date range, start to end |
def mangle_agreement(correct_sentence):
bad_sents = []
doc = nlp(correct_sentence)
verbs = [(i, v) for (i, v) in enumerate(doc) if v.tag_.startswith('VB')]
for i, v in verbs:
for alt_verb in lexeme(doc[i].text):
if alt_verb == doc[i].text:
continue
if (tenses(alt_verb) == tenses(v.text) or
(alt_verb.startswith(v.text) and alt_verb.endswith("n't"))):
continue
new_sent = str(doc[:i]) + " {} ".format(alt_verb) + str(doc[i+1:])
new_sent = new_sent.replace(' ,', ',')
bad_sents.append(new_sent)
return bad_sents | Given a correct sentence, return a sentence or sentences with a subject
verb agreement error |
def get_resampler_for_grouping(groupby, rule, how=None, fill_method=None,
limit=None, kind=None, **kwargs):
kwargs['key'] = kwargs.pop('on', None)
tg = TimeGrouper(freq=rule, **kwargs)
resampler = tg._get_resampler(groupby.obj, kind=kind)
r = resampler._get_resampler_for_grouping(groupby=groupby)
return _maybe_process_deprecations(r,
how=how,
fill_method=fill_method,
limit=limit) | Return our appropriate resampler when grouping as well. |
def flattened(self):
parsed = self['parsed_whois']
flat = OrderedDict()
for key in ('domain', 'created_date', 'updated_date', 'expired_date', 'statuses', 'name_servers'):
value = parsed[key]
flat[key] = ' | '.join(value) if type(value) in (list, tuple) else value
registrar = parsed.get('registrar', {})
for key in ('name', 'abuse_contact_phone', 'abuse_contact_email', 'iana_id', 'url', 'whois_server'):
flat['registrar_{0}'.format(key)] = registrar[key]
for contact_type in ('registrant', 'admin', 'tech', 'billing'):
contact = parsed.get('contacts', {}).get(contact_type, {})
for key in ('name', 'email', 'org', 'street', 'city', 'state', 'postal', 'country', 'phone', 'fax'):
value = contact[key]
flat['{0}_{1}'.format(contact_type, key)] = ' '.join(value) if type(value) in (list, tuple) else value
return flat | Returns a flattened version of the parsed whois data |
def on_vrde_server_change(self, restart):
if not isinstance(restart, bool):
raise TypeError("restart can only be an instance of type bool")
self._call("onVRDEServerChange",
in_p=[restart]) | Triggered when settings of the VRDE server object of the
associated virtual machine have changed.
in restart of type bool
Flag whether the server must be restarted
raises :class:`VBoxErrorInvalidVmState`
Session state prevents operation.
raises :class:`VBoxErrorInvalidObjectState`
Session type prevents operation. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.