code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def serialized_task(self, task: Task) -> Tuple[str, str]:
return f"{task.hash}.json", task.json | Returns the name of the task definition file and its contents. |
def map_aliases_to_device_objects(self):
all_devices = self.get_all_devices_in_portal()
for dev_o in all_devices:
dev_o['portals_aliases'] = self.get_portal_by_name(
self.portal_name()
)[2][1]['info']['aliases'][ dev_o['rid'] ]
return all_devices | A device object knows its rid, but not its alias.
A portal object knows its device rids and aliases.
This function adds an 'portals_aliases' key to all of the
device objects so they can be sorted by alias. |
def remove_subkey(self, subkey):
if len(self.subkeys) > 0:
key = subkey if isinstance(subkey, str) else subkey.key
for i in range(len(self.subkeys)):
if self.subkeys[i].key == key:
self.subkeys.pop(i)
break | Remove the given subkey, if existed, from this AdfKey.
Parameters
----------
subkey : str or AdfKey
The subkey to remove. |
async def mutation_resolver(self, mutation_name, args, fields):
try:
mutation_summary = [mutation for mutation in \
self._external_service_data['mutations'] \
if mutation['name'] == mutation_name][0]
except KeyError as e:
raise ValueError("Could not execute mutation named: " + mutation_name)
event_function = self.event_broker.ask
value = await event_function(
action_type=mutation_summary['event'],
payload=args
)
try:
return json.loads(value)
except json.decoder.JSONDecodeError:
raise RuntimeError(value) | the default behavior for mutations is to look up the event,
publish the correct event type with the args as the body,
and return the fields contained in the result |
def post_process(self, indices):
array = javabridge.call(self.jobject, "postProcess", "([I)[I", indices)
if array is None:
return None
else:
return javabridge.get_env().get_int_array_elements(array) | Post-processes the evaluator with the selected attribute indices.
:param indices: the attribute indices list to use
:type indices: ndarray
:return: the processed indices
:rtype: ndarray |
def from_traverse(cls, traverse_block):
if isinstance(traverse_block, Traverse):
return cls(traverse_block.direction, traverse_block.edge_name)
else:
raise AssertionError(u'Tried to initialize an instance of GremlinFoldedTraverse '
u'with block of type {}'.format(type(traverse_block))) | Create a GremlinFoldedTraverse block as a copy of the given Traverse block. |
def set_eng_float_format(accuracy=3, use_eng_prefix=False):
set_option("display.float_format", EngFormatter(accuracy, use_eng_prefix))
set_option("display.column_space", max(12, accuracy + 9)) | Alter default behavior on how float is formatted in DataFrame.
Format float in engineering format. By accuracy, we mean the number of
decimal digits after the floating point.
See also EngFormatter. |
def is_valid(self):
if not isinstance(self._expiration, datetime.datetime):
raise InvalidArgumentError('Expiration datetime must be specified.')
if 'key' not in self.form_data:
raise InvalidArgumentError('object key must be specified.')
if 'bucket' not in self.form_data:
raise InvalidArgumentError('bucket name must be specified.') | Validate for required parameters. |
def stream_bytes(data, chunk_size=default_chunk_size):
stream = BytesStream(data, chunk_size=chunk_size)
return stream.body(), stream.headers | Gets a buffered generator for streaming binary data.
Returns a buffered generator which encodes binary data as
:mimetype:`multipart/form-data` with the corresponding headers.
Parameters
----------
data : bytes
The data bytes to stream
chunk_size : int
The maximum size of each stream chunk
Returns
-------
(generator, dict) |
def _raise_error_if_column_exists(dataset, column_name = 'dataset',
dataset_variable_name = 'dataset',
column_name_error_message_name = 'column_name'):
err_msg = 'The SFrame {0} must contain the column {1}.'.format(
dataset_variable_name,
column_name_error_message_name)
if column_name not in dataset.column_names():
raise ToolkitError(str(err_msg)) | Check if a column exists in an SFrame with error message. |
def add_gemini_query(self, name, query):
logger.info("Adding query {0} with text {1}".format(name, query))
new_query = GeminiQuery(name=name, query=query)
self.session.add(new_query)
self.save()
return new_query | Add a user defined gemini query
Args:
name (str)
query (str) |
def plot(self, dimension):
import matplotlib.pyplot as plt
life_lines = self.get_life_lines(dimension)
x, y = zip(*life_lines)
plt.scatter(x, y)
plt.xlabel("Birth")
plt.ylabel("Death")
if self.max_life is not None:
plt.xlim([0, self.max_life])
plt.title("Persistence Homology Dimension {}".format(dimension))
plt.show() | Plot barcode using matplotlib. |
def send_frame(self, frame):
if self.closed:
if self.close_info and len(self.close_info['reply_text']) > 0:
raise ChannelClosed(
"channel %d is closed: %s : %s",
self.channel_id,
self.close_info['reply_code'],
self.close_info['reply_text'])
raise ChannelClosed()
if not len(self._pending_events):
if not self._active and \
isinstance(frame, (ContentFrame, HeaderFrame)):
raise Channel.Inactive(
"Channel %d flow control activated", self.channel_id)
self._connection.send_frame(frame)
else:
self._pending_events.append(frame) | Queue a frame for sending. Will send immediately if there are no
pending synchronous transactions on this connection. |
def add_vhost(vhost, runas=None):
if runas is None and not salt.utils.platform.is_windows():
runas = salt.utils.user.get_user()
res = __salt__['cmd.run_all'](
[RABBITMQCTL, 'add_vhost', vhost],
reset_system_locale=False,
runas=runas,
python_shell=False)
msg = 'Added'
return _format_response(res, msg) | Adds a vhost via rabbitmqctl add_vhost.
CLI Example:
.. code-block:: bash
salt '*' rabbitmq add_vhost '<vhost_name>' |
def load_wav(path, mono=True):
fs, audio_data = scipy.io.wavfile.read(path)
if audio_data.dtype == 'int8':
audio_data = audio_data/float(2**8)
elif audio_data.dtype == 'int16':
audio_data = audio_data/float(2**16)
elif audio_data.dtype == 'int32':
audio_data = audio_data/float(2**24)
else:
raise ValueError('Got unexpected .wav data type '
'{}'.format(audio_data.dtype))
if mono and audio_data.ndim != 1:
audio_data = audio_data.mean(axis=1)
return audio_data, fs | Loads a .wav file as a numpy array using ``scipy.io.wavfile``.
Parameters
----------
path : str
Path to a .wav file
mono : bool
If the provided .wav has more than one channel, it will be
converted to mono if ``mono=True``. (Default value = True)
Returns
-------
audio_data : np.ndarray
Array of audio samples, normalized to the range [-1., 1.]
fs : int
Sampling rate of the audio data |
def get_monotone_constraints(self):
if self.monotone_constraints is None:
self.monotone_constraints = self.get_field('monotone_constraints')
return self.monotone_constraints | Get the monotone constraints of the Dataset.
Returns
-------
monotone_constraints : numpy array or None
Monotone constraints: -1, 0 or 1, for each feature in the Dataset. |
def create_bmi_config_file(self, filename: str = "bmi_config.txt") -> None:
s0 = self.construct_default_initial_state()
s0.to_csv(filename, index_label="variable") | Create a BMI config file to initialize the model.
Args:
filename: The filename with which the config file should be saved. |
def check_metric(self, metric):
if metric not in self.VALID_METRICS:
raise ValueError(
f"`{self.__class__.__name__}` does not support the `{metric}` "
f"metric. Please choose one of the supported metrics: "
f"{', '.join(self.VALID_METRICS)}."
) | Check that the metric is supported by the KNNIndex instance. |
def encode(self, data, size):
return return_fresh_string(lib.zarmour_encode(self._as_parameter_, data, size)) | Encode a stream of bytes into an armoured string. Returns the armoured
string, or NULL if there was insufficient memory available to allocate
a new string. |
def ElectronIC(pars, data):
ECPL = ExponentialCutoffPowerLaw(
pars[0] / u.eV, 10.0 * u.TeV, pars[1], 10 ** pars[2] * u.TeV
)
IC = InverseCompton(ECPL, seed_photon_fields=["CMB"])
return IC.flux(data, distance=1.0 * u.kpc) | Define particle distribution model, radiative model, and return model flux
at data energy values |
def GetDateRange(self):
start = self.start_date
end = self.end_date
for date, (exception_type, _) in self.date_exceptions.items():
if exception_type == self._EXCEPTION_TYPE_REMOVE:
continue
if not start or (date < start):
start = date
if not end or (date > end):
end = date
if start is None:
start = end
elif end is None:
end = start
return (start, end) | Return the range over which this ServicePeriod is valid.
The range includes exception dates that add service outside of
(start_date, end_date), but doesn't shrink the range if exception
dates take away service at the edges of the range.
Returns:
A tuple of "YYYYMMDD" strings, (start date, end date) or (None, None) if
no dates have been given. |
def read(*components, **kwargs):
rstrip = kwargs.get("rstrip", True)
comment_char = kwargs.get("comment_char", None)
ignore_comments = comment_char is not None
file = open(path(*components))
lines = file.readlines()
file.close()
if ignore_comments:
comment_line_re = re.compile("^\s*{char}".format(char=comment_char))
not_comment_re = re.compile("[^{char}]+".format(char=comment_char))
if rstrip:
return [re.match(not_comment_re, line).group(0).rstrip()
for line in lines
if not re.match(comment_line_re, line)]
else:
return [re.match(not_comment_re, line).group(0)
for line in lines
if not re.match(comment_line_re, line)]
elif rstrip:
return [line.rstrip() for line in lines]
else:
return lines | Read file and return a list of lines. If comment_char is set, ignore the
contents of lines following the comment_char.
Raises:
IOError: if reading path fails |
def register_plugin(host, plugin):
class OriginalMethods(object):
def __getattr__(self, name):
return lambda *args, **kwargs: getattr(host, name).original(host, *args, **kwargs)
if not hasattr(host, "_plugins"):
host._plugins = [OriginalMethods()]
plugin.parent = host._plugins[-1]
plugin.host = host
host._plugins.append(plugin) | Register a plugin with a host object. Some @pluggable methods in the host
will have their behaviour altered by the plugin. |
def setup_logger(log_level, log_file=None):
level = getattr(logging, log_level.upper(), None)
if not level:
color_print("Invalid log level: %s" % log_level, "RED")
sys.exit(1)
if level >= logging.INFO:
sys.tracebacklimit = 0
formatter = ColoredFormatter(
u"%(log_color)s%(bg_white)s%(levelname)-8s%(reset)s %(message)s",
datefmt=None,
reset=True,
log_colors=log_colors_config
)
if log_file:
handler = logging.FileHandler(log_file, encoding="utf-8")
else:
handler = logging.StreamHandler()
handler.setFormatter(formatter)
logger.addHandler(handler)
logger.setLevel(level) | setup root logger with ColoredFormatter. |
def list_keyvaults(access_token, subscription_id, rgname):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', rgname,
'/providers/Microsoft.KeyVault/vaults',
'?api-version=', KEYVAULT_API])
return do_get_next(endpoint, access_token) | Lists key vaults in the named resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
rgname (str): Azure resource group name.
Returns:
HTTP response. 200 OK. |
def get_true_capacity(self):
c = self.capacity
if c is not None:
return c
else:
if self.rooms.count() == 0 and self.activity.default_capacity:
return self.activity.default_capacity
rooms = self.get_true_rooms()
return EighthRoom.total_capacity_of_rooms(rooms) | Get the capacity for the scheduled activity, taking into account activity defaults and
overrides. |
def get_group_details(group):
result = []
for datastore in _get_datastores():
value = datastore.get_group_details(group)
value['datastore'] = datastore.config['DESCRIPTION']
result.append(value)
return result | Get group details. |
def _ReadPresetsFromFileObject(self, file_object):
yaml_generator = yaml.safe_load_all(file_object)
last_preset_definition = None
for yaml_definition in yaml_generator:
try:
preset_definition = self._ReadParserPresetValues(yaml_definition)
except errors.MalformedPresetError as exception:
error_location = 'At start'
if last_preset_definition:
error_location = 'After: {0:s}'.format(last_preset_definition.name)
raise errors.MalformedPresetError(
'{0:s} {1!s}'.format(error_location, exception))
yield preset_definition
last_preset_definition = preset_definition | Reads parser and parser plugin presets from a file-like object.
Args:
file_object (file): file-like object containing the parser and parser
plugin presets definitions.
Yields:
ParserPreset: a parser preset.
Raises:
MalformedPresetError: if one or more plugin preset definitions are
malformed. |
def _replace_with_new_dims(
self: T,
variables: 'OrderedDict[Any, Variable]' = None,
coord_names: set = None,
attrs: 'Optional[OrderedDict]' = __default,
indexes: 'Optional[OrderedDict[Any, pd.Index]]' = __default,
inplace: bool = False,
) -> T:
dims = dict(calculate_dimensions(variables))
return self._replace(
variables, coord_names, dims, attrs, indexes, inplace=inplace) | Replace variables with recalculated dimensions. |
def street_address(self, address):
address = self.street_addresses([address])
if not len(address):
return None
return Address(address[0]) | Geocode one and only address, get a single Address object back
>>> client.street_address("100 Main St, Anywhere, USA")
>>> client.street_address({"street": "100 Main St, anywhere USA"})
:param address: string or dictionary with street address information
:return: an Address object or None for no match |
def _check_error(response):
if 'error' in response:
raise InfluxDBError(response['error'])
elif 'results' in response:
for statement in response['results']:
if 'error' in statement:
msg = '{d[error]} (statement {d[statement_id]})'
raise InfluxDBError(msg.format(d=statement)) | Checks for JSON error messages and raises Python exception |
def _displaystr2num(st):
num = None
for s, n in [('DFP-', 16), ('TV-', 8), ('CRT-', 0)]:
if st.startswith(s):
try:
curnum = int(st[len(s):])
if 0 <= curnum <= 7:
num = n + curnum
break
except Exception:
pass
if num is not None:
return num
else:
raise ValueError('Unrecognised display name: ' + st) | Return a display number from a string |
def call_state(self, addr, *args, **kwargs):
return self.project.simos.state_call(addr, *args, **kwargs) | Returns a state object initialized to the start of a given function, as if it were called with given parameters.
:param addr: The address the state should start at instead of the entry point.
:param args: Any additional positional arguments will be used as arguments to the function call.
The following parametrs are optional.
:param base_state: Use this SimState as the base for the new state instead of a blank state.
:param cc: Optionally provide a SimCC object to use a specific calling convention.
:param ret_addr: Use this address as the function's return target.
:param stack_base: An optional pointer to use as the top of the stack, circa the function entry point
:param alloc_base: An optional pointer to use as the place to put excess argument data
:param grow_like_stack: When allocating data at alloc_base, whether to allocate at decreasing addresses
:param toc: The address of the table of contents for ppc64
:param initial_prefix: If this is provided, all symbolic registers will hold symbolic values with names
prefixed by this string.
:param fs: A dictionary of file names with associated preset SimFile objects.
:param concrete_fs: bool describing whether the host filesystem should be consulted when opening files.
:param chroot: A path to use as a fake root directory, Behaves similarly to a real chroot. Used only
when concrete_fs is set to True.
:param kwargs: Any additional keyword args will be passed to the SimState constructor.
:return: The state at the beginning of the function.
:rtype: SimState
The idea here is that you can provide almost any kind of python type in `args` and it'll be translated to a
binary format to be placed into simulated memory. Lists (representing arrays) must be entirely elements of the
same type and size, while tuples (representing structs) can be elements of any type and size.
If you'd like there to be a pointer to a given value, wrap the value in a `SimCC.PointerWrapper`. Any value
that can't fit in a register will be automatically put in a
PointerWrapper.
If stack_base is not provided, the current stack pointer will be used, and it will be updated.
If alloc_base is not provided, the current stack pointer will be used, and it will be updated.
You might not like the results if you provide stack_base but not alloc_base.
grow_like_stack controls the behavior of allocating data at alloc_base. When data from args needs to be wrapped
in a pointer, the pointer needs to point somewhere, so that data is dumped into memory at alloc_base. If you
set alloc_base to point to somewhere other than the stack, set grow_like_stack to False so that sequencial
allocations happen at increasing addresses. |
def build_uri(secret, name, initial_count=None, issuer_name=None):
is_initial_count_present = (initial_count is not None)
otp_type = 'hotp' if is_initial_count_present else 'totp'
base = 'otpauth://%s/' % otp_type
if issuer_name:
issuer_name = quote(issuer_name)
base += '%s:' % issuer_name
uri = '%(base)s%(name)s?secret=%(secret)s' % {
'name': quote(name, safe='@'),
'secret': secret,
'base': base,
}
if is_initial_count_present:
uri += '&counter=%s' % initial_count
if issuer_name:
uri += '&issuer=%s' % issuer_name
return uri | Returns the provisioning URI for the OTP; works for either TOTP or HOTP.
This can then be encoded in a QR Code and used to provision the Google
Authenticator app.
For module-internal use.
See also:
http://code.google.com/p/google-authenticator/wiki/KeyUriFormat
@param [String] the hotp/totp secret used to generate the URI
@param [String] name of the account
@param [Integer] initial_count starting counter value, defaults to None.
If none, the OTP type will be assumed as TOTP.
@param [String] the name of the OTP issuer; this will be the
organization title of the OTP entry in Authenticator
@return [String] provisioning uri |
def on_show(request, page_name):
revision_id = request.args.get("rev", type=int)
query = RevisionedPage.query.filter_by(name=page_name)
if revision_id:
query = query.filter_by(revision_id=revision_id)
revision_requested = True
else:
query = query.order_by(RevisionedPage.revision_id.desc())
revision_requested = False
page = query.first()
if page is None:
return page_missing(request, page_name, revision_requested)
return Response(generate_template("action_show.html", page=page)) | Displays the page the user requests. |
def state(self, new_state: bool):
self._lutron.send(Lutron.OP_EXECUTE, Keypad._CMD_TYPE, self._keypad.id,
self.component_number, Led._ACTION_LED_STATE,
int(new_state))
self._state = new_state | Sets the new led state.
new_state: bool |
def start_worker(which_worker, config={}):
if which_worker == 'multi_worker':
cls = MultiWorker
elif which_worker == 'fork_worker':
cls = ForkWorker
else:
cls = ForkWorker
return run_worker(cls, config) | Start some worker class.
:param str which_worker: name of the worker
:param dict config: ``rejester`` config block |
def deployment_groups(self):
if not self.__deployment_groups:
self.__deployment_groups = DeploymentGroups(self.__connection)
return self.__deployment_groups | Gets the Deployment Groups API client.
Returns:
DeploymentGroups: |
def loss(self, x_data, y_true):
y_pred = self(x_data)
return y_pred, self.loss_value(x_data, y_true, y_pred) | Forward propagate network and return a value of loss function |
def is_multicast(text):
try:
first = ord(dns.ipv4.inet_aton(text)[0])
return (first >= 224 and first <= 239)
except Exception:
try:
first = ord(dns.ipv6.inet_aton(text)[0])
return (first == 255)
except Exception:
raise ValueError | Is the textual-form network address a multicast address?
@param text: the textual address
@raises ValueError: the address family cannot be determined from the input.
@rtype: bool |
def get_logfile_name(tags):
if not os.path.exists(sd.LOG_DIR):
os.mkdir(sd.LOG_DIR)
filename = "log"
for tag in tags:
filename += "_{}".format(tag)
filename += ".txt"
filename = os.path.join(sd.LOG_DIR,filename)
return filename | Formulates a log file name that incorporates the provided tags.
The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``.
Args:
tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag
will be added in the same order as provided. |
def no_duplicates_constructor(loader, node, deep=False):
mapping = {}
for key_node, value_node in node.value:
key = loader.construct_object(key_node, deep=deep)
value = loader.construct_object(value_node, deep=deep)
if key in mapping:
from intake.catalog.exceptions import DuplicateKeyError
raise DuplicateKeyError("while constructing a mapping",
node.start_mark,
"found duplicate key (%s)" % key,
key_node.start_mark)
mapping[key] = value
return loader.construct_mapping(node, deep) | Check for duplicate keys while loading YAML
https://gist.github.com/pypt/94d747fe5180851196eb |
def wifi_status(self):
return self._info_json.get(CONST.STATUS, {}).get(CONST.WIFI_LINK) | Get the wifi status. |
def add_object(self, obj, properties=()):
self._objects.add(obj)
self._properties |= properties
self._pairs.update((obj, p) for p in properties) | Add an object to the definition and add ``properties`` as related. |
def create_group(self, trigger):
data = self._serialize_object(trigger)
return Trigger(self._post(self._service_url(['triggers', 'groups']), data)) | Create a new group trigger.
:param trigger: Group member trigger to be created
:return: The created group Trigger |
def squeeze(self, array):
if not self._squeeze:
return array
array = array.copy()
array = array.squeeze()
if array.ndim == 0:
array = array[()]
return array | Simplify the given array as much as possible - squeeze out all singleton
dimensions and also convert a zero dimensional array into array scalar |
def get_device_topology(self, id_or_uri):
uri = self._client.build_uri(id_or_uri) + "/deviceTopology"
return self._client.get(uri) | Retrieves the topology information for the rack resource specified by ID or URI.
Args:
id_or_uri: Can be either the resource ID or the resource URI.
Return:
dict: Device topology. |
def get_external_commands_from_arbiters(self):
for arbiter_link_uuid in self.arbiters:
link = self.arbiters[arbiter_link_uuid]
if not link.active:
logger.debug("The arbiter '%s' is not active, it is not possible to get "
"its external commands!", link.name)
continue
try:
logger.debug("Getting external commands from: %s", link.name)
external_commands = link.get_external_commands()
if external_commands:
logger.debug("Got %d commands from: %s", len(external_commands), link.name)
else:
external_commands = []
for external_command in external_commands:
self.add(external_command)
except LinkError:
logger.warning("Arbiter connection failed, I could not get external commands!")
except Exception as exp:
logger.error("Arbiter connection failed, I could not get external commands!")
logger.exception("Exception: %s", exp) | Get external commands from our arbiters
As of now, only the arbiter are requested to provide their external commands that
the receiver will push to all the known schedulers to make them being executed.
:return: None |
def handle_address_save(self, sender, instance, **kwargs):
objects = self.find_associated_with_address(instance)
for obj in objects:
self.handle_save(obj.__class__, obj) | Custom handler for address save |
def move_out_16(library, session, space, offset, length, data, extended=False):
converted_buffer = (ViUInt16 * length)(*tuple(data))
if extended:
return library.viMoveOut16Ex(session, space, offset, length, converted_buffer)
else:
return library.viMoveOut16(session, space, offset, length, converted_buffer) | Moves an 16-bit block of data from local memory to the specified address space and offset.
Corresponds to viMoveOut16* functions of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param space: Specifies the address space. (Constants.*SPACE*)
:param offset: Offset (in bytes) of the address or register from which to read.
:param length: Number of elements to transfer, where the data width of the elements to transfer
is identical to the source data width.
:param data: Data to write to bus.
:param extended: Use 64 bits offset independent of the platform.
:return: return value of the library call.
:rtype: :class:`pyvisa.constants.StatusCode` |
def _set_input(el, value):
if isinstance(value, dict):
el.value = value["val"]
elif type(value) in [list, tuple]:
el.value = ", ".join(item["val"] for item in value)
else:
el.value = value | Set content of given `el` to `value`.
Args:
el (obj): El reference to input you wish to set.
value (obj/list): Value to which the `el` will be set. |
def wp_is_loiter(self, i):
loiter_cmds = [mavutil.mavlink.MAV_CMD_NAV_LOITER_UNLIM,
mavutil.mavlink.MAV_CMD_NAV_LOITER_TURNS,
mavutil.mavlink.MAV_CMD_NAV_LOITER_TIME,
mavutil.mavlink.MAV_CMD_NAV_LOITER_TO_ALT]
if (self.wpoints[i].command in loiter_cmds):
return True
return False | return true if waypoint is a loiter waypoint |
def backlinks(
self,
page: 'WikipediaPage',
**kwargs
) -> PagesDict:
params = {
'action': 'query',
'list': 'backlinks',
'bltitle': page.title,
'bllimit': 500,
}
used_params = kwargs
used_params.update(params)
raw = self._query(
page,
used_params
)
self._common_attributes(raw['query'], page)
v = raw['query']
while 'continue' in raw:
params['blcontinue'] = raw['continue']['blcontinue']
raw = self._query(
page,
params
)
v['backlinks'] += raw['query']['backlinks']
return self._build_backlinks(v, page) | Returns backlinks from other pages with respect to parameters
API Calls for parameters:
- https://www.mediawiki.org/w/api.php?action=help&modules=query%2Bbacklinks
- https://www.mediawiki.org/wiki/API:Backlinks
:param page: :class:`WikipediaPage`
:param kwargs: parameters used in API call
:return: backlinks from other pages |
def get_states(self, devices):
header = BASE_HEADERS.copy()
header['Cookie'] = self.__cookie
json_data = self._create_get_state_request(devices)
request = requests.post(
BASE_URL + 'getStates',
headers=header,
data=json_data,
timeout=10)
if request.status_code != 200:
self.__logged_in = False
self.login()
self.get_states(devices)
return
try:
result = request.json()
except ValueError as error:
raise Exception(
"Not a valid result for" +
"getStates, protocol error:" + error)
self._get_states(result) | Get States of Devices. |
def _subtoken_ids_to_tokens(self, subtokens):
concatenated = "".join(
[self._subtoken_id_to_subtoken_string(s) for s in subtokens])
split = concatenated.split("_")
ret = []
for t in split:
if t:
unescaped = _unescape_token(t + "_")
if unescaped:
ret.append(unescaped)
return ret | Converts a list of subtoken ids to a list of tokens.
Args:
subtokens: a list of integers in the range [0, vocab_size)
Returns:
a list of strings. |
def _try_parse_basic_number(self, data):
try:
return int(data)
except ValueError:
pass
try:
return float(data)
except ValueError:
pass
return data | Try to convert the data into ``int`` or ``float``.
:returns: ``Decimal`` or ``data`` if conversion fails. |
def _ParseEventData(self, variable_length_section):
event_data = WinJobEventData()
event_data.application = (
variable_length_section.application_name.rstrip('\x00'))
event_data.comment = variable_length_section.comment.rstrip('\x00')
event_data.parameters = (
variable_length_section.parameters.rstrip('\x00'))
event_data.username = variable_length_section.author.rstrip('\x00')
event_data.working_directory = (
variable_length_section.working_directory.rstrip('\x00'))
return event_data | Parses the event data form a variable-length data section.
Args:
variable_length_section (job_variable_length_data_section): a
Windows Scheduled Task job variable-length data section.
Returns:
WinJobEventData: event data of the job file. |
def init(args=None):
if args is None:
args = []
arr = (ctypes.c_char_p * len(args))()
arr[:] = args
_LIB.RabitInit(len(arr), arr) | Initialize the rabit library with arguments |
def create_chunked_list(in_dir, size, out_dir, out_name):
create_dirs(out_dir)
in_files = get_files(in_dir)
chunks = chunk(in_files, size)
division = {}
for i, files in enumerate(chunks):
division[i] = [os.path.basename(f) for f in files]
out_file = os.path.join(out_dir, out_name)
with codecs.open(out_file, 'wb', encoding='utf-8') as f:
json.dump(division, f, indent=4) | Create a division of the input files in chunks.
The result is stored to a JSON file. |
def _nose_tools_functions():
module = _BUILDER.string_build(
textwrap.dedent(
)
)
try:
case = next(module["a"].infer())
except astroid.InferenceError:
return
for method in case.methods():
if method.name.startswith("assert") and "_" not in method.name:
pep8_name = _pep8(method.name)
yield pep8_name, astroid.BoundMethod(method, case)
if method.name == "assertEqual":
yield "assert_equals", astroid.BoundMethod(method, case) | Get an iterator of names and bound methods. |
def child_link_extent(self):
if not self._initialized:
raise pycdlibexception.PyCdlibInternalError('Rock Ridge extension not yet initialized')
if self.dr_entries.cl_record is not None:
return self.dr_entries.cl_record.child_log_block_num
if self.ce_entries.cl_record is not None:
return self.ce_entries.cl_record.child_log_block_num
raise pycdlibexception.PyCdlibInternalError('Asked for child extent for non-existent parent record') | Get the extent of the child of this entry if it has one.
Parameters:
None.
Returns:
The logical block number of the child if it exists. |
def _allows_url (self, url_data, roboturl):
with cache_lock:
if roboturl in self.cache:
self.hits += 1
rp = self.cache[roboturl]
return rp.can_fetch(self.useragent, url_data.url)
self.misses += 1
kwargs = dict(auth=url_data.auth, session=url_data.session)
if hasattr(url_data, "proxy") and hasattr(url_data, "proxy_type"):
kwargs["proxies"] = {url_data.proxytype: url_data.proxy}
rp = robotparser2.RobotFileParser(**kwargs)
rp.set_url(roboturl)
rp.read()
with cache_lock:
self.cache[roboturl] = rp
self.add_sitemap_urls(rp, url_data, roboturl)
return rp.can_fetch(self.useragent, url_data.url) | Ask robots.txt allowance. Assumes only single thread per robots.txt
URL calls this function. |
def attach(self, num_name, write=0):
if isinstance(num_name, bytes):
num = self.find(num_name)
else:
num = num_name
vg_id = _C.Vattach(self._hdf_inst._id, num,
write and 'w' or 'r')
_checkErr('vattach', vg_id, "cannot attach Vgroup")
return VG(self, vg_id) | Open an existing vgroup given its name or its reference
number, or create a new vgroup, returning a VG instance for
that vgroup.
Args::
num_name reference number or name of the vgroup to open,
or -1 to create a new vgroup; vcreate() can also
be called to create and name a new vgroup
write set to non-zero to open the vgroup in write mode
and to 0 to open it in readonly mode (default)
Returns::
VG instance for the vgroup
An exception is raised if an attempt is made to open
a non-existent vgroup.
C library equivalent : Vattach |
def _postback(self):
return requests.post(self.get_endpoint(),
data=dict(cmd="_notify-synch", at=IDENTITY_TOKEN, tx=self.tx)).content | Perform PayPal PDT Postback validation.
Sends the transaction ID and business token to PayPal which responses with
SUCCESS or FAILED. |
def list_files(dirname, extension=None):
f = []
for (dirpath, dirnames, filenames) in os.walk(dirname):
f.extend(filenames)
break
if extension is not None:
filtered = []
for filename in f:
fn, ext = os.path.splitext(filename)
if ext.lower() == '.' + extension.lower():
filtered.append(filename)
f = filtered
return f | List all files in directory `dirname`, option to filter on file extension |
def get_log_metric(
self,
metric_name,
retry=google.api_core.gapic_v1.method.DEFAULT,
timeout=google.api_core.gapic_v1.method.DEFAULT,
metadata=None,
):
if "get_log_metric" not in self._inner_api_calls:
self._inner_api_calls[
"get_log_metric"
] = google.api_core.gapic_v1.method.wrap_method(
self.transport.get_log_metric,
default_retry=self._method_configs["GetLogMetric"].retry,
default_timeout=self._method_configs["GetLogMetric"].timeout,
client_info=self._client_info,
)
request = logging_metrics_pb2.GetLogMetricRequest(metric_name=metric_name)
if metadata is None:
metadata = []
metadata = list(metadata)
try:
routing_header = [("metric_name", metric_name)]
except AttributeError:
pass
else:
routing_metadata = google.api_core.gapic_v1.routing_header.to_grpc_metadata(
routing_header
)
metadata.append(routing_metadata)
return self._inner_api_calls["get_log_metric"](
request, retry=retry, timeout=timeout, metadata=metadata
) | Gets a logs-based metric.
Example:
>>> from google.cloud import logging_v2
>>>
>>> client = logging_v2.MetricsServiceV2Client()
>>>
>>> metric_name = client.metric_path('[PROJECT]', '[METRIC]')
>>>
>>> response = client.get_log_metric(metric_name)
Args:
metric_name (str): The resource name of the desired metric:
::
"projects/[PROJECT_ID]/metrics/[METRIC_ID]"
retry (Optional[google.api_core.retry.Retry]): A retry object used
to retry requests. If ``None`` is specified, requests will not
be retried.
timeout (Optional[float]): The amount of time, in seconds, to wait
for the request to complete. Note that if ``retry`` is
specified, the timeout applies to each individual attempt.
metadata (Optional[Sequence[Tuple[str, str]]]): Additional metadata
that is provided to the method.
Returns:
A :class:`~google.cloud.logging_v2.types.LogMetric` instance.
Raises:
google.api_core.exceptions.GoogleAPICallError: If the request
failed for any reason.
google.api_core.exceptions.RetryError: If the request failed due
to a retryable error and retry attempts failed.
ValueError: If the parameters are invalid. |
def size_on_disk(self, start_pos=0):
size = 1
fmts = opcode_table[self.opcode]['operands']
if self.wide:
size += 2
if self.opcode == 0x84:
size += 2
elif fmts:
for fmt, _ in fmts:
size += fmt.value.size
elif self.opcode == 0xAB:
padding = 4 - (start_pos + 1) % 4
padding = padding if padding != 4 else 0
size += padding
size += 8
size += len(self.operands[0]) * 8
elif self.opcode == 0xAA:
raise NotImplementedError()
return size | Returns the size of this instruction and its operands when
packed. `start_pos` is required for the `tableswitch` and
`lookupswitch` instruction as the padding depends on alignment. |
def setup_sanitize_files(self):
for fname in self.get_sanitize_files():
with open(fname, 'r') as f:
self.sanitize_patterns.update(get_sanitize_patterns(f.read())) | For each of the sanitize files that were specified as command line options
load the contents of the file into the sanitise patterns dictionary. |
def delete(self, request, **resources):
resource = resources.get(self._meta.name)
if not resource:
raise HttpError("Bad request", status=status.HTTP_404_NOT_FOUND)
for o in as_tuple(resource):
o.delete()
return HttpResponse("") | Default DELETE method. Allow bulk delete.
:return django.http.response: empty response |
def init_db():
db.drop_all()
db.create_all()
title = "de Finibus Bonorum et Malorum - Part I"
text = "Lorem ipsum dolor sit amet, consectetur adipisicing elit, sed do eiusmod tempor \
incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud \
exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure \
dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. \
Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt \
mollit anim id est laborum."
post = Post(title=title, text=text)
db.session.add(post)
db.session.commit() | Populate a small db with some example entries. |
def print_email(message, app):
invenio_mail = app.extensions['invenio-mail']
with invenio_mail._lock:
invenio_mail.stream.write(
'{0}\n{1}\n'.format(message.as_string(), '-' * 79))
invenio_mail.stream.flush() | Print mail to stream.
Signal handler for email_dispatched signal. Prints by default the output
to the stream specified in the constructor of InvenioMail.
:param message: Message object.
:param app: Flask application object. |
def license_fallback(vendor_dir, sdist_name):
libname = libname_from_dir(sdist_name)
if libname not in HARDCODED_LICENSE_URLS:
raise ValueError('No hardcoded URL for {} license'.format(libname))
url = HARDCODED_LICENSE_URLS[libname]
_, _, name = url.rpartition('/')
dest = license_destination(vendor_dir, libname, name)
r = requests.get(url, allow_redirects=True)
log('Downloading {}'.format(url))
r.raise_for_status()
dest.write_bytes(r.content) | Hardcoded license URLs. Check when updating if those are still needed |
def get_time_period(value):
for time_period in TimePeriod:
if time_period.period == value:
return time_period
raise ValueError('{} is not a valid TimePeriod'.format(value)) | Get the corresponding TimePeriod from the value.
Example values: 'all', 'hour', 'day', 'week', or 'month'. |
def _groupby_new_state(index, outputs, decisions):
output_map = {o: i for i, o in enumerate(outputs)}
groups = pd.Series(index).groupby([output_map[d] for d in decisions])
results = [(outputs[i], pd.Index(sub_group.values)) for i, sub_group in groups]
selected_outputs = [o for o, _ in results]
for output in outputs:
if output not in selected_outputs:
results.append((output, pd.Index([])))
return results | Groups the simulants in the index by their new output state.
Parameters
----------
index : iterable of ints
An iterable of integer labels for the simulants.
outputs : iterable
A list of possible output states.
decisions : `pandas.Series`
A series containing the name of the next state for each simulant in the index.
Returns
-------
iterable of 2-tuples
The first item in each tuple is the name of an output state and the second item
is a `pandas.Index` representing the simulants to transition into that state. |
def qs_add(self, *args, **kwargs):
query = self.query.copy()
if args:
mdict = MultiDict(args[0])
for k, v in mdict.items():
query.add(k, v)
for k, v in kwargs.items():
query.add(k, v)
return self._copy(query=query) | Add value to QuerySet MultiDict |
def _is_valid_index(self, index):
if isinstance(index, int):
return (index >= 0) and (index < len(self))
if isinstance(index, list):
valid = True
for i in index:
valid = valid or self._is_valid_index(i)
return valid
return False | Return ``True`` if and only if the given ``index``
is valid. |
def render(self, obj, name, context):
if self.value_lambda is not None:
val = self.value_lambda(obj)
else:
attr_name = name
if self.property_name is not None:
attr_name = self.property_name
if isinstance(obj, dict):
val = obj.get(attr_name, None)
else:
val = getattr(obj, attr_name, None)
if callable(val):
try:
val = val()
except:
logging.exception("Attempted to call `%s` on obj of type %s.",
attr_name, type(obj))
raise
return val | The default field renderer.
This basic renderer assumes that the object has an attribute with
the same name as the field, unless a different field is specified
as a `property_name`.
The renderer is also passed the context so that it can be
propagated to the `_render_serializable` method of nested
resources (or, for example, if you decide to implement attribute
hiding at the field level instead of at the object level).
Callable attributes of `obj` will be called to fetch value.
This is useful for fields computed from lambda functions
or instance methods. |
def _validate_prepare_time(self, t, pos_c):
if hasattr(t, 'unit'):
t = t.decompose(self.units).value
if not isiterable(t):
t = np.atleast_1d(t)
t = np.ascontiguousarray(t.ravel())
if len(t) > 1:
if len(t) != pos_c.shape[0]:
raise ValueError("If passing in an array of times, it must have a shape "
"compatible with the input position(s).")
return t | Make sure that t is a 1D array and compatible with the C position array. |
def _get_mapping_for_table(self, table):
for mapping in self.mappings.values():
if mapping["table"] == table:
return mapping | Returns the first mapping for a table name |
def _parse_statements(lines):
lines = (l.strip() for l in lines if l)
lines = (l for l in lines if l and not l.startswith('--'))
parts = []
for line in lines:
parts.append(line.rstrip(';'))
if line.endswith(';'):
yield '\n'.join(parts)
parts[:] = []
if parts:
yield '\n'.join(parts) | Return a generator of statements
Args: A list of strings that can contain one or more statements.
Statements are separated using ';' at the end of a line
Everything after the last ';' will be treated as the last statement.
>>> list(_parse_statements(['select * from ', 't1;', 'select name']))
['select * from\\nt1', 'select name']
>>> list(_parse_statements(['select * from t1;', ' ']))
['select * from t1'] |
def vector_args(self, args):
for i in reversed(range(self._vector_count)):
pieces = []
for vec in args:
pieces.append(vec[(i+1) * self._vector_size - 1 : i * self._vector_size])
yield pieces | Yields each of the individual lane pairs from the arguments, in
order from most significan to least significant |
def str_args(args):
res = []
for x in args:
if isinstance(x, tuple) and len(x) == 2:
key, value = x
if value and str_arg(value):
res += ["%s=%s" % (key, str_arg(value))]
else:
res += [str_arg(x)]
return ', '.join(res) | formats a list of function arguments prettily not as code
(kwargs are tuples (argname, argvalue) |
def ratio_to_ave(window, eqdata, **kwargs):
_selection = kwargs.get('selection', 'Volume')
_skipstartrows = kwargs.get('skipstartrows', 0)
_skipendrows = kwargs.get('skipendrows', 0)
_outputcol = kwargs.get('outputcol', 'Ratio to Ave')
_size = len(eqdata.index)
_eqdata = eqdata.loc[:, _selection]
_sma = _eqdata.iloc[:-1 - _skipendrows].rolling(window=window, center=False).mean().values
_outdata = _eqdata.values[window + _skipstartrows:_size - _skipendrows] /\
_sma[window + _skipstartrows - 1:]
_index = eqdata.index[window + _skipstartrows:_size - _skipendrows]
return pd.DataFrame(_outdata, index=_index, columns=[_outputcol], dtype=np.float64) | Return values expressed as ratios to the average over some number
of prior sessions.
Parameters
----------
eqdata : DataFrame
Must contain a column with name matching `selection`, or, if
`selection` is not specified, a column named 'Volume'
window : int
Interval over which to calculate the average. Normally 252 (1 year)
selection : str, optional
Column to select for calculating ratio. Defaults to 'Volume'
skipstartrows : int, optional
Rows to skip at beginning in addition to the `window` rows
that must be skipped to get the baseline volume. Defaults to 0.
skipendrows : int, optional
Rows to skip at end. Defaults to 0.
outputcol : str, optional
Name of column in output dataframe. Defaults to 'Ratio to Ave'
Returns
---------
out : DataFrame |
def create_pgm_dict(
lambdaFile: str,
asts: List,
file_name: str,
mode_mapper_dict: dict,
save_file=False,
) -> Dict:
lambdaStrings = ["import math\n\n"]
state = PGMState(lambdaStrings)
generator = GrFNGenerator()
generator.mode_mapper = mode_mapper_dict
pgm = generator.genPgm(asts, state, {}, "")[0]
if pgm.get("start"):
pgm["start"] = pgm["start"][0]
else:
pgm["start"] = generator.function_defs[-1]
pgm["source"] = [[get_path(file_name, "source")]]
pgm["dateCreated"] = f"{datetime.today().strftime('%Y%m%d')}"
with open(lambdaFile, "w") as f:
f.write("".join(lambdaStrings))
if save_file:
json.dump(pgm, open(file_name[:file_name.rfind(".")] + ".json", "w"))
return pgm | Create a Python dict representing the PGM, with additional metadata for
JSON output. |
def db_validate_yubikey_otp(self, public_id, otp):
return pyhsm.db_cmd.YHSM_Cmd_DB_Validate_OTP( \
self.stick, public_id, otp).execute() | Request the YubiHSM to validate an OTP for a YubiKey stored
in the internal database.
@param public_id: The six bytes public id of the YubiKey
@param otp: The OTP from a YubiKey in binary form (16 bytes)
@type public_id: string
@type otp: string
@returns: validation response
@rtype: L{YHSM_ValidationResult}
@see: L{pyhsm.db_cmd.YHSM_Cmd_DB_Validate_OTP} |
def size(self):
old = self.__file.tell()
self.__file.seek(0, 2)
n_bytes = self.__file.tell()
self.__file.seek(old)
return n_bytes | Calculate and return the file size in bytes. |
def _get_token(self, oauth_request, token_type='access'):
token_field = oauth_request.get_parameter('oauth_token')
token = self.data_store.lookup_token(token_type, token_field)
if not token:
raise OAuthError('Invalid %s token: %s' % (token_type, token_field))
return token | Try to find the token for the provided request token key. |
def consume_keys_asynchronous_processes(self):
print("\nLooking up " + self.input_queue.qsize().__str__() + " keys from " + self.source_name + "\n")
jobs = multiprocessing.cpu_count()*4 if (multiprocessing.cpu_count()*4 < self.input_queue.qsize()) \
else self.input_queue.qsize()
pool = multiprocessing.Pool(processes=jobs, maxtasksperchild=10)
for x in range(jobs):
pool.apply(self.data_worker, [], self.worker_args)
pool.close()
pool.join() | Work through the keys to look up asynchronously using multiple processes |
def bulk_copy(self, ids):
schema = UserSchema()
return self.service.bulk_copy(self.base, self.RESOURCE, ids, schema) | Bulk copy a set of users.
:param ids: Int list of user IDs.
:return: :class:`users.User <users.User>` list |
def _get_I(self, a, b, size, plus_transpose=True):
r_sum = np.zeros((3, 3), dtype='double', order='C')
for r in self._rotations_cartesian:
for i in range(3):
for j in range(3):
r_sum[i, j] += r[a, i] * r[b, j]
if plus_transpose:
r_sum += r_sum.T
if (np.abs(r_sum) < 1e-10).all():
return None
I_mat = np.zeros((3 * size, 3 * size), dtype='double', order='C')
for i in range(size):
I_mat[(i * 3):((i + 1) * 3), (i * 3):((i + 1) * 3)] = r_sum
return I_mat | Return I matrix in Chaput's PRL paper.
None is returned if I is zero matrix. |
def get_link_map(self, nslave):
tree_map, parent_map = self.get_tree(nslave)
ring_map = self.get_ring(tree_map, parent_map)
rmap = {0 : 0}
k = 0
for i in range(nslave - 1):
k = ring_map[k][1]
rmap[k] = i + 1
ring_map_ = {}
tree_map_ = {}
parent_map_ ={}
for k, v in ring_map.items():
ring_map_[rmap[k]] = (rmap[v[0]], rmap[v[1]])
for k, v in tree_map.items():
tree_map_[rmap[k]] = [rmap[x] for x in v]
for k, v in parent_map.items():
if k != 0:
parent_map_[rmap[k]] = rmap[v]
else:
parent_map_[rmap[k]] = -1
return tree_map_, parent_map_, ring_map_ | get the link map, this is a bit hacky, call for better algorithm
to place similar nodes together |
def detach_disk(name=None, kwargs=None, call=None):
if call != 'action':
raise SaltCloudSystemExit(
'The detach_Disk action must be called with -a or --action.'
)
if not name:
log.error(
'Must specify an instance name.'
)
return False
if not kwargs or 'disk_name' not in kwargs:
log.error(
'Must specify a disk_name to detach.'
)
return False
node_name = name
disk_name = kwargs['disk_name']
conn = get_conn()
node = conn.ex_get_node(node_name)
disk = conn.ex_get_volume(disk_name)
__utils__['cloud.fire_event'](
'event',
'detach disk',
'salt/cloud/disk/detaching',
args={
'name': node_name,
'disk_name': disk_name,
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
result = conn.detach_volume(disk, node)
__utils__['cloud.fire_event'](
'event',
'detached disk',
'salt/cloud/disk/detached',
args={
'name': node_name,
'disk_name': disk_name,
},
sock_dir=__opts__['sock_dir'],
transport=__opts__['transport']
)
return result | Detach a disk from an instance.
CLI Example:
.. code-block:: bash
salt-cloud -a detach_disk myinstance disk_name=mydisk |
def set_random_state(state):
randgen.state_set = True
randgen.setstate(state)
faker.generator.random.setstate(state) | Force-set the state of factory.fuzzy's random generator. |
def package_releases(request, package_name, show_hidden=False):
session = DBSession()
package = Package.by_name(session, package_name)
return [rel.version for rel in package.sorted_releases] | Retrieve a list of the releases registered for the given package_name.
Returns a list with all version strings if show_hidden is True or
only the non-hidden ones otherwise. |
def right(self):
if self._has_real():
return self._data.real_right
return self._data.right | Right coordinate. |
def compare_wfns(cls, source, target):
for att in CPEComponent.CPE_COMP_KEYS_EXTENDED:
value_src = source.get_attribute_values(att)[0]
if value_src.find('"') > -1:
value_src = value_src[1:-1]
value_tar = target.get_attribute_values(att)[0]
if value_tar.find('"') > -1:
value_tar = value_tar[1:-1]
yield (att, CPESet2_3._compare(value_src, value_tar)) | Compares two WFNs and returns a generator of pairwise attribute-value
comparison results. It provides full access to the individual
comparison results to enable use-case specific implementations
of novel name-comparison algorithms.
Compare each attribute of the Source WFN to the Target WFN:
:param CPE2_3_WFN source: first WFN CPE Name
:param CPE2_3_WFN target: seconds WFN CPE Name
:returns: generator of pairwise attribute comparison results
:rtype: generator |
def _platform(self) -> Optional[str]:
try:
return str(self.journey.MainStop.BasicStop.Dep.Platform.text)
except AttributeError:
return None | Extract platform. |
def bschoc(value, ndim, lenvals, array, order):
value = stypes.stringToCharP(value)
ndim = ctypes.c_int(ndim)
lenvals = ctypes.c_int(lenvals)
array = stypes.listToCharArrayPtr(array, xLen=lenvals, yLen=ndim)
order = stypes.toIntVector(order)
return libspice.bschoc_c(value, ndim, lenvals, array, order) | Do a binary search for a given value within a character string array,
accompanied by an order vector. Return the index of the matching array
entry, or -1 if the key value is not found.
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/bschoc_c.html
:param value: Key value to be found in array.
:type value: str
:param ndim: Dimension of array.
:type ndim: int
:param lenvals: String length.
:type lenvals: int
:param array: Character string array to search.
:type array: list of strings
:param order: Order vector.
:type order: Array of ints
:return: index
:rtype: int |
def by_ip(self, ip):
try:
number = inet_aton(ip)
except Exception:
raise IpRange.DoesNotExist
try:
return self.filter(start_ip__lte=number, end_ip__gte=number)\
.order_by('end_ip', '-start_ip')[0]
except IndexError:
raise IpRange.DoesNotExist | Find the smallest range containing the given IP. |
def set_contourf_properties(stroke_width, fcolor, fill_opacity, contour_levels, contourf_idx, unit):
return {
"stroke": fcolor,
"stroke-width": stroke_width,
"stroke-opacity": 1,
"fill": fcolor,
"fill-opacity": fill_opacity,
"title": "%.2f" % contour_levels[contourf_idx] + ' ' + unit
} | Set property values for Polygon. |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.