code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def send_publish(self, mid, topic, payload, qos, retain, dup):
self.logger.debug("Send PUBLISH")
if self.sock == NC.INVALID_SOCKET:
return NC.ERR_NO_CONN
return self._do_send_publish(mid, utf8encode(topic), utf8encode(payload), qos, retain, dup) | Send PUBLISH. |
def encode(self, delimiter=';'):
try:
return delimiter.join([str(f) for f in [
self.node_id,
self.child_id,
int(self.type),
self.ack,
int(self.sub_type),
self.payload,
]]) + '\n'
excep... | Encode a command string from message. |
def filter_records(root, head, update, filters=()):
root, head, update = freeze(root), freeze(head), freeze(update)
for filter_ in filters:
root, head, update = filter_(root, head, update)
return thaw(root), thaw(head), thaw(update) | Apply the filters to the records. |
def run_spyder(app, options, args):
main = MainWindow(options)
try:
main.setup()
except BaseException:
if main.console is not None:
try:
main.console.shell.exit_interpreter()
except BaseException:
pass
raise
main.... | Create and show Spyder's main window
Start QApplication event loop |
def list_datasets(name=None):
reg = registry.get_registry(Dataset)
if name is not None:
class_ = reg[name.lower()]
return _REGSITRY_NAME_KWARGS[class_]
else:
return {
dataset_name: _REGSITRY_NAME_KWARGS[class_]
for dataset_name, class_ in registry.get_registry... | Get valid datasets and registered parameters.
Parameters
----------
name : str or None, default None
Return names and registered parameters of registered datasets. If name
is specified, only registered parameters of the respective dataset are
returned.
Returns
-------
d... |
def sctiks(sc, clkstr):
sc = ctypes.c_int(sc)
clkstr = stypes.stringToCharP(clkstr)
ticks = ctypes.c_double()
libspice.sctiks_c(sc, clkstr, ctypes.byref(ticks))
return ticks.value | Convert a spacecraft clock format string to number of "ticks".
http://naif.jpl.nasa.gov/pub/naif/toolkit_docs/C/cspice/sctiks_c.html
:param sc: NAIF spacecraft identification code.
:type sc: int
:param clkstr: Character representation of a spacecraft clock.
:type clkstr: str
:return: Number of... |
def reference(self, ):
tfi = self.get_taskfileinfo_selection()
if tfi:
self.reftrack.reference(tfi) | Reference a file
:returns: None
:rtype: None
:raises: None |
def get_lists(client):
response = client.authenticated_request(client.api.Endpoints.LISTS)
return response.json() | Gets all the client's lists |
def as_dict(self):
if not self._is_valid:
self.validate()
from .converters import to_dict
return to_dict(self) | Returns the model as a dict |
def init_tape(self, string):
for char in string:
if char not in self.alphabet and not char.isspace() and char != self.EMPTY_SYMBOL:
raise RuntimeError('Invalid symbol: "' + char + '"')
self.check()
self.state = self.START_STATE
self.head = 0
self.tape ... | Init system values. |
def sha1(s):
h = hashlib.new('sha1')
h.update(s)
return h.hexdigest() | Returns a sha1 of the given string |
def parse_age(value=None):
if not value:
return None
try:
seconds = int(value)
except ValueError:
return None
if seconds < 0:
return None
try:
return timedelta(seconds=seconds)
except OverflowError:
return None | Parses a base-10 integer count of seconds into a timedelta.
If parsing fails, the return value is `None`.
:param value: a string consisting of an integer represented in base-10
:return: a :class:`datetime.timedelta` object or `None`. |
def get_arguments():
parser = argparse.ArgumentParser(
description='Handles bumping of the artifact version')
parser.add_argument('--log-config',
'-l',
action='store',
dest='logger_config',
help='The location... | This get us the cli arguments.
Returns the args as parsed from the argsparser. |
def _check_timezone_max_length_attribute(self):
possible_max_length = max(map(len, pytz.all_timezones))
if self.max_length < possible_max_length:
return [
checks.Error(
msg=(
"'max_length' is too short to support all possible "
... | Checks that the `max_length` attribute covers all possible pytz
timezone lengths. |
def connect(self, funct):
def get_directory():
rec = QFileDialog.getExistingDirectory(self,
'Path to Recording'
' Directory')
if rec == '':
return
self.setTex... | Call funct when the text was changed.
Parameters
----------
funct : function
function that broadcasts a change.
Notes
-----
There is something wrong here. When you run this function, it calls
for opening a directory three or four times. This is obvio... |
def list_container_services(access_token, subscription_id, resource_group):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/resourcegroups/', resource_group,
'/providers/Microsoft.ContainerService/ContainerServices'... | List the container services in a resource group.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
resource_group (str): Azure resource group name.
Returns:
HTTP response. JSON model. |
def set_variations(self, variations):
if variations is None:
variations = ffi.NULL
else:
variations = _encode_string(variations)
cairo.cairo_font_options_set_variations(self._pointer, variations)
self._check_status() | Sets the OpenType font variations for the font options object.
Font variations are specified as a string with a format that is similar
to the CSS font-variation-settings. The string contains a
comma-separated list of axis assignments, which each assignment
consists of a 4-character axis... |
def pca(U, centre=False):
if centre:
C = np.mean(U, axis=1, keepdims=True)
U = U - C
else:
C = None
B, S, _ = np.linalg.svd(U, full_matrices=False, compute_uv=True)
return B, S**2, C | Compute the PCA basis for columns of input array `U`.
Parameters
----------
U : array_like
2D data array with rows corresponding to different variables and
columns corresponding to different observations
center : bool, optional (default False)
Flag indicating whether to centre data
... |
def add_bonus(worker_dict):
" Adds DB-logged worker bonus to worker list data "
try:
unique_id = '{}:{}'.format(worker_dict['workerId'], worker_dict['assignmentId'])
worker = Participant.query.filter(
Participant.uniqueid == unique_id).one()
worker_dic... | Adds DB-logged worker bonus to worker list data |
def _pop(self, block=True, timeout=None, left=False):
item = None
timer = None
deque = self._deque
empty = IndexError('pop from an empty deque')
if block is False:
if len(self._deque) > 0:
item = deque.popleft() if left else deque.pop()
el... | Removes and returns the an item from this GeventDeque.
This is an internal method, called by the public methods
pop() and popleft(). |
def get_stream(self, stream):
path = '/archive/{}/streams/{}'.format(self._instance, stream)
response = self._client.get_proto(path=path)
message = archive_pb2.StreamInfo()
message.ParseFromString(response.content)
return Stream(message) | Gets a single stream.
:param str stream: The name of the stream.
:rtype: .Stream |
def strtobytes(input, encoding):
py_version = sys.version_info[0]
if py_version >= 3:
return _strtobytes_py3(input, encoding)
return _strtobytes_py2(input, encoding) | Take a str and transform it into a byte array. |
def newDocFragment(self):
ret = libxml2mod.xmlNewDocFragment(self._o)
if ret is None:raise treeError('xmlNewDocFragment() failed')
__tmp = xmlNode(_obj=ret)
return __tmp | Creation of a new Fragment node. |
def find_outliers(group, delta):
with_pos = sorted([pair for pair in enumerate(group)], key=lambda p: p[1])
outliers_start = outliers_end = -1
for i in range(0, len(with_pos) - 1):
cur = with_pos[i][1]
nex = with_pos[i + 1][1]
if nex - cur > delta:
if i < (len(with_pos) -... | given a list of values, find those that are apart from the rest by
`delta`. the indexes for the outliers is returned, if any.
examples:
values = [100, 6, 7, 8, 9, 10, 150]
find_outliers(values, 5) -> [0, 6]
values = [5, 6, 5, 4, 5]
find_outliers(values, 3) -> [] |
def stack1d(*points):
result = np.empty((2, len(points)), order="F")
for index, point in enumerate(points):
result[:, index] = point
return result | Fill out the columns of matrix with a series of points.
This is because ``np.hstack()`` will just make another 1D vector
out of them and ``np.vstack()`` will put them in the rows.
Args:
points (Tuple[numpy.ndarray, ...]): Tuple of 1D points (i.e.
arrays with shape ``(2,)``.
Return... |
def append(self, other):
if not isinstance(other,StarPopulation):
raise TypeError('Only StarPopulation objects can be appended to a StarPopulation.')
if not np.all(self.stars.columns == other.stars.columns):
raise ValueError('Two populations must have same columns to combine them... | Appends stars from another StarPopulations, in place.
:param other:
Another :class:`StarPopulation`; must have same columns as ``self``. |
def _add_property(self, name, default_value):
name = str(name)
self._properties[name] = default_value | Add a device property with a given default value.
Args:
name (str): The name of the property to add
default_value (int, bool): The value of the property |
def dst(self, dt):
tt = _localtime(_mktime((dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second, dt.weekday(), 0, -1)))
if tt.tm_isdst > 0: return _dstdiff
return _zero | datetime -> DST offset in minutes east of UTC. |
def is_invalid_operation(self, callsign, timestamp=datetime.utcnow().replace(tzinfo=UTC)):
callsign = callsign.strip().upper()
if self._lookuptype == "clublogxml":
return self._check_inv_operation_for_date(callsign, timestamp, self._invalid_operations, self._invalid_operations_index)
... | Returns True if an operations is known as invalid
Args:
callsign (string): Amateur Radio callsign
timestamp (datetime, optional): datetime in UTC (tzinfo=pytz.UTC)
Returns:
bool: True if a record exists for this callsign (at the given time)
Raises:
... |
def get_custom_values(self, key):
self._handled.add(key)
return self._lookup[key] | Return a set of values for the given customParameter name. |
def query(self, query, media=None, year=None, fields=None, extended=None, **kwargs):
if not media:
warnings.warn(
"\"media\" parameter is now required on the Trakt['search'].query() method",
DeprecationWarning, stacklevel=2
)
if fields and not medi... | Search by titles, descriptions, translated titles, aliases, and people.
**Note:** Results are ordered by the most relevant score.
:param query: Search title or description
:type query: :class:`~python:str`
:param media: Desired media type (or :code:`None` to return all matching items)... |
def make_gym_env(name,
rl_env_max_episode_steps=-1,
maxskip_env=False,
rendered_env=False,
rendered_env_resize_to=None,
sticky_actions=False):
env = gym.make(name)
return gym_env_wrapper(env, rl_env_max_episode_steps, maxskip_env,
... | Create a gym env optionally with a time limit and maxskip wrapper.
NOTE: The returned env may already be wrapped with TimeLimit!
Args:
name: `str` - base name of the gym env to make.
rl_env_max_episode_steps: `int` or None - Using any value < 0 returns the
env as-in, otherwise we impose the requeste... |
def migrate(self, migrations_package_name, up_to=9999):
from .migrations import MigrationHistory
logger = logging.getLogger('migrations')
applied_migrations = self._get_applied_migrations(migrations_package_name)
modules = import_submodules(migrations_package_name)
unapplied_migr... | Executes schema migrations.
- `migrations_package_name` - fully qualified name of the Python package
containing the migrations.
- `up_to` - number of the last migration to apply. |
def limitsSql(startIndex=0, maxResults=0):
if startIndex and maxResults:
return " LIMIT {}, {}".format(startIndex, maxResults)
elif startIndex:
raise Exception("startIndex was provided, but maxResults was not")
elif maxResults:
return " LIMIT {}".format(maxResults)
else:
... | Construct a SQL LIMIT clause |
def _get_network_vswitch_map_by_port_id(self, port_id):
for network_id, vswitch in six.iteritems(self._network_vswitch_map):
if port_id in vswitch['ports']:
return (network_id, vswitch)
return (None, None) | Get the vswitch name for the received port id. |
def native(self, writeAccess=False, isolation_level=None):
host = self.database().writeHost() if writeAccess else self.database().host()
conn = self.open(writeAccess=writeAccess)
try:
if isolation_level is not None:
if conn.isolation_level == isolation_level:
... | Opens a new database connection to the database defined
by the inputted database.
:return <varaint> native connection |
def template(args):
" Add or remove templates from site. "
site = Site(args.PATH)
if args.ACTION == "add":
return site.add_template(args.TEMPLATE)
return site.remove_template(args.TEMPLATE) | Add or remove templates from site. |
def remove(self, point, node=None):
if not self:
return
if self.should_remove(point, node):
return self._remove(point)
if self.left and self.left.should_remove(point, node):
self.left = self.left._remove(point)
elif self.right and self.right.should_rem... | Removes the node with the given point from the tree
Returns the new root node of the (sub)tree.
If there are multiple points matching "point", only one is removed. The
optional "node" parameter is used for checking the identity, once the
removeal candidate is decided. |
def setnode(delta, graph, node, exists):
delta.setdefault(graph, {}).setdefault('nodes', {})[node] = bool(exists) | Change a delta to say that a node was created or deleted |
def layer_norm(x,
filters=None,
epsilon=1e-6,
name=None,
reuse=None,
layer_collection=None):
if filters is None:
filters = shape_list(x)[-1]
with tf.variable_scope(
name, default_name="layer_norm", values=[x], reuse=reuse):
sca... | Layer normalize the tensor x, averaging over the last dimension. |
def enable(
self, cmd="enable", pattern=r"(ssword|User Name)", re_flags=re.IGNORECASE
):
output = ""
if not self.check_enable_mode():
count = 4
i = 1
while i < count:
self.write_channel(self.normalize_cmd(cmd))
new_data = se... | Enter enable mode.
With RADIUS can prompt for User Name
SSH@Lab-ICX7250>en
User Name:service_netmiko
Password:
SSH@Lab-ICX7250# |
def delete(self):
res = requests.delete(url=self.record_url, headers=HEADERS, verify=False)
if res.status_code == 204:
return {}
return res.json() | Deletes the record. |
def get_special_folder(self, name):
name = name if \
isinstance(name, OneDriveWellKnowFolderNames) \
else OneDriveWellKnowFolderNames(name.lower())
name = name.value
if self.object_id:
url = self.build_url(
self._endpoints.get('get_special').fo... | Returns the specified Special Folder
:return: a special Folder
:rtype: drive.Folder |
def add_passwords(self, identifiers, passwords):
if not isinstance(identifiers, list):
raise TypeError("identifiers can only be an instance of type list")
for a in identifiers[:10]:
if not isinstance(a, basestring):
raise TypeError(
"array ... | Adds a list of passwords required to import or export encrypted virtual
machines.
in identifiers of type str
List of identifiers.
in passwords of type str
List of matching passwords. |
def mutate(self, node, index):
assert index < len(OFFSETS), 'received count with no associated offset'
assert isinstance(node, parso.python.tree.Number)
val = eval(node.value) + OFFSETS[index]
return parso.python.tree.Number(' ' + str(val), node.start_pos) | Modify the numeric value on `node`. |
def delete_user_role(self, user, role):
self.project_service.set_auth(self._token_project)
self.project_service.delete_user_role(user, role) | Remove role from given user.
Args:
user (string): User name.
role (string): Role to remove.
Raises:
requests.HTTPError on failure. |
def rt(nu, size=None):
return rnormal(0, 1, size) / np.sqrt(rchi2(nu, size) / nu) | Student's t random variates. |
def _get_packet(self, socket):
data, (ip, port) = socket.recvfrom(self._buffer_size)
packet, remainder = self._unpack(data)
self.inbox.put((ip, port, packet))
self.new_packet.set()
self.debug(u"RX: {}".format(packet))
if packet.header.sequence_number is not None:
... | Read packet and put it into inbox
:param socket: Socket to read from
:type socket: socket.socket
:return: Read packet
:rtype: APPMessage |
def find_by_name(self, term: str, include_placeholders: bool = False) -> List[Account]:
query = (
self.query
.filter(Account.name.like('%' + term + '%'))
.order_by(Account.name)
)
if not include_placeholders:
query = query.filter(Account.placeholde... | Search for account by part of the name |
def _prerun(self):
self.check_required_params()
self._set_status("RUNNING")
logger.debug(
"{}.PreRun: {}[{}]: running...".format(
self.__class__.__name__, self.__class__.path, self.uuid
),
extra=dict(
kmsg=Message(
... | To execute before running message |
def set_substitution(self, what, rep):
if rep is None:
if what in self._subs:
del self._subs[what]
self._subs[what] = rep | Set a substitution.
Equivalent to ``! sub`` in RiveScript code.
:param str what: The original text to replace.
:param str rep: The text to replace it with.
Set this to ``None`` to delete the substitution. |
def entrez_sets_of_results(url, retstart=False, retmax=False, count=False) -> Optional[List[requests.Response]]:
if not retstart:
retstart = 0
if not retmax:
retmax = 500
if not count:
count = retmax
retmax = 500
while retstart < count:
diff = count - retstart
... | Gets sets of results back from Entrez.
Entrez can only return 500 results at a time. This creates a generator that gets results by incrementing
retstart and retmax.
Parameters
----------
url : str
The Entrez API url to use.
retstart : int
Return values starting at this index.
... |
def _set_lim_and_transforms(self):
LambertAxes._set_lim_and_transforms(self)
yaxis_stretch = Affine2D().scale(4 * self.horizon, 1.0)
yaxis_stretch = yaxis_stretch.translate(-self.horizon, 0.0)
yaxis_space = Affine2D().scale(1.0, 1.1)
self._yaxis_transform = \
yaxis_st... | Setup the key transforms for the axes. |
def children_as_pi(self, squash=False):
probs = self.child_N
if squash:
probs = probs ** .98
sum_probs = np.sum(probs)
if sum_probs == 0:
return probs
return probs / np.sum(probs) | Returns the child visit counts as a probability distribution, pi
If squash is true, exponentiate the probabilities by a temperature
slightly larger than unity to encourage diversity in early play and
hopefully to move away from 3-3s |
def emit(self, record):
try:
QgsMessageLog.logMessage(record.getMessage(), 'InaSAFE', 0)
except MemoryError:
message = tr(
'Due to memory limitations on this machine, InaSAFE can not '
'handle the full log')
print(message)
Q... | Try to log the message to QGIS if available, otherwise do nothing.
:param record: logging record containing whatever info needs to be
logged. |
def _pdb_frame(self):
if self._pdb_obj is not None and self._pdb_obj.curframe is not None:
return self._pdb_obj.curframe | Return current Pdb frame if there is any |
def say_tmp_filepath(
text = None,
preference_program = "festival"
):
filepath = shijian.tmp_filepath() + ".wav"
say(
text = text,
preference_program = preference_program,
filepath = filepath
)
return filepath | Say specified text to a temporary file and return the filepath. |
def gdal_rasterize(src, dst, options):
out = gdal.Rasterize(dst, src, options=gdal.RasterizeOptions(**options))
out = None | a simple wrapper for gdal.Rasterize
Parameters
----------
src: str or :osgeo:class:`ogr.DataSource`
the input data set
dst: str
the output data set
options: dict
additional parameters passed to gdal.Rasterize; see :osgeo:func:`gdal.RasterizeOptions`
Returns
------- |
def layout_asides(self, block, context, frag, view_name, aside_frag_fns):
result = Fragment(frag.content)
result.add_fragment_resources(frag)
for aside, aside_fn in aside_frag_fns:
aside_frag = self.wrap_aside(block, aside, view_name, aside_fn(block, context), context)
as... | Execute and layout the aside_frags wrt the block's frag. Runtimes should feel free to override this
method to control execution, place, and style the asides appropriately for their application
This default method appends the aside_frags after frag. If you override this, you must
call wrap_aside... |
def __get_factory_with_context(self, factory_name):
factory = self.__factories.get(factory_name)
if factory is None:
raise TypeError("Unknown factory '{0}'".format(factory_name))
factory_context = getattr(
factory, constants.IPOPO_FACTORY_CONTEXT, None
)
i... | Retrieves the factory registered with the given and its factory context
:param factory_name: The name of the factory
:return: A (factory, context) tuple
:raise TypeError: Unknown factory, or factory not manipulated |
def _load_settings(self):
if self._autosettings_path == None: return
gui_settings_dir = _os.path.join(_cwd, 'egg_settings')
path = _os.path.join(gui_settings_dir, self._autosettings_path)
if not _os.path.exists(path): return
settings = _g.QtCore.QSettings(path, _g.QtCore.QSetting... | Loads all the parameters from a databox text file. If path=None,
loads from self._autosettings_path. |
def show_ipsecpolicy(self, ipsecpolicy, **_params):
return self.get(self.ipsecpolicy_path % (ipsecpolicy), params=_params) | Fetches information of a specific IPsecPolicy. |
def reset(self, force):
client = self.create_client()
bucket = client.lookup_bucket(self.bucket_name)
if bucket is not None:
if not force:
self._log.error("Bucket already exists, aborting.")
raise ExistingBackendError
self._log.info("Bucket... | Connect to the assigned bucket or create if needed. Clear all the blobs inside. |
def change_ref(self, r0=None, lmax=None):
if lmax is None:
lmax = self.lmax
clm = self.pad(lmax)
if r0 is not None and r0 != self.r0:
for l in _np.arange(lmax+1):
clm.coeffs[:, l, :l+1] *= (self.r0 / r0)**(l+2)
if self.errors is not None:
... | Return a new SHMagCoeffs class instance with a different reference r0.
Usage
-----
clm = x.change_ref([r0, lmax])
Returns
-------
clm : SHMagCoeffs class instance.
Parameters
----------
r0 : float, optional, default = self.r0
The ref... |
def str_to_list(
input_str,
item_converter=lambda x: x,
item_separator=',',
list_to_collection_converter=None,
):
if not isinstance(input_str, six.string_types):
raise ValueError(input_str)
input_str = str_quote_stripper(input_str)
result = [
item_converter(x.strip())
... | a conversion function for list |
def fit(self, y, **kwargs):
if y.ndim > 1:
raise YellowbrickValueError("y needs to be an array or Series with one dimension")
if self.target is None:
self.target = 'Frequency'
self.draw(y)
return self | Sets up y for the histogram and checks to
ensure that ``y`` is of the correct data type.
Fit calls draw.
Parameters
----------
y : an array of one dimension or a pandas Series
kwargs : dict
keyword arguments passed to scikit-learn API. |
def tospark(self, engine=None):
from thunder.series.readers import fromarray
if self.mode == 'spark':
logging.getLogger('thunder').warn('images already in local mode')
pass
if engine is None:
raise ValueError('Must provide SparkContext')
return fromarr... | Convert to spark mode. |
def get_throttled_by_consumed_read_percent(
table_name, lookback_window_start=15, lookback_period=5):
try:
metrics1 = __get_aws_metric(
table_name,
lookback_window_start,
lookback_period,
'ConsumedReadCapacityUnits')
metrics2 = __get_aws_metric... | Returns the number of throttled read events in percent of consumption
:type table_name: str
:param table_name: Name of the DynamoDB table
:type lookback_window_start: int
:param lookback_window_start: Relative start time for the CloudWatch metric
:type lookback_period: int
:param lookback_perio... |
def _output_file_data(self, outfp, blocksize, ino):
log_block_size = self.pvd.logical_block_size()
outfp.seek(ino.extent_location() * log_block_size)
tmp_start = outfp.tell()
with inode.InodeOpenData(ino, log_block_size) as (data_fp, data_len):
utils.copy_data(data_len, block... | Internal method to write a directory record entry out.
Parameters:
outfp - The file object to write the data to.
blocksize - The blocksize to use when writing the data out.
ino - The Inode to write.
Returns:
The total number of bytes written out. |
def hline(self, x, y, width, color):
self.rect(x, y, width, 1, color, fill=True) | Draw a horizontal line up to a given length. |
def _load_config(self, path):
p = os.path.abspath(os.path.expanduser(path))
logger.debug('Loading configuration from: %s', p)
return read_json_file(p) | Load configuration from JSON
:param path: path to the JSON config file
:type path: str
:return: config dictionary
:rtype: dict |
def resample(
self,
rule: Union[str, int] = "1s",
max_workers: int = 4,
) -> "Traffic":
with ProcessPoolExecutor(max_workers=max_workers) as executor:
cumul = []
tasks = {
executor.submit(flight.resample, rule): flight
for fligh... | Resamples all trajectories, flight by flight.
`rule` defines the desired sample rate (default: 1s) |
def check_empty_response(self, orig_request, method_config, start_response):
response_config = method_config.get('response', {}).get('body')
if response_config == 'empty':
cors_handler = self._create_cors_handler(orig_request)
return util.send_wsgi_no_content_response(start_response, cors_handler) | If the response from the backend is empty, return a HTTP 204 No Content.
Args:
orig_request: An ApiRequest, the original request from the user.
method_config: A dict, the API config of the method to be called.
start_response: A function with semantics defined in PEP-333.
Returns:
If th... |
def item_details(item_id, lang="en"):
params = {"item_id": item_id, "lang": lang}
cache_name = "item_details.%(item_id)s.%(lang)s.json" % params
return get_cached("item_details.json", cache_name, params=params) | This resource returns a details about a single item.
:param item_id: The item to query for.
:param lang: The language to display the texts in.
The response is an object with at least the following properties. Note that
the availability of some properties depends on the type of the item.
item_id (... |
def collapse_indents(indentation):
change_in_level = ind_change(indentation)
if change_in_level == 0:
indents = ""
elif change_in_level < 0:
indents = closeindent * (-change_in_level)
else:
indents = openindent * change_in_level
return indentation.replace(openindent, "").repl... | Removes all openindent-closeindent pairs. |
def fuzzy(self, key, limit=5):
instances = [i[2] for i in self.container if i[2]]
if not instances:
return
instances = sum(instances, [])
from fuzzywuzzy import process
maybe = process.extract(key, instances, limit=limit)
return maybe | Give suggestion from all instances. |
def visit_Break(self, _):
if self.break_handlers and self.break_handlers[-1]:
return Statement("goto {0}".format(self.break_handlers[-1]))
else:
return Statement("break") | Generate break statement in most case and goto for orelse clause.
See Also : cxx_loop |
def _preprocess_add_items(self, items):
paths = []
entries = []
for item in items:
if isinstance(item, string_types):
paths.append(self._to_relative_path(item))
elif isinstance(item, (Blob, Submodule)):
entries.append(BaseIndexEntry.from_bl... | Split the items into two lists of path strings and BaseEntries. |
def FlushShortIdRecords(site_service):
szService = c_char_p(site_service.encode('utf-8'))
szMessage = create_string_buffer(b" ")
nMessage = c_ushort(20)
nRet = dnaserv_dll.DnaFlushShortIdRecords(szService, byref(szMessage),
nMessage)
r... | Flush all the queued records.
:param site_service: The site.service where data was pushed
:return: message whether function was successful |
def delete_minion_cachedir(minion_id, provider, opts, base=None):
if isinstance(opts, dict):
__opts__.update(opts)
if __opts__.get('update_cachedir', False) is False:
return
if base is None:
base = __opts__['cachedir']
driver = next(six.iterkeys(__opts__['providers'][provider]))
... | Deletes a minion's entry from the cloud cachedir. It will search through
all cachedirs to find the minion's cache file.
Needs `update_cachedir` set to True. |
def handle_invocation(self, message):
req_id = message.request_id
reg_id = message.registration_id
if reg_id in self._registered_calls:
handler = self._registered_calls[reg_id][REGISTERED_CALL_CALLBACK]
invoke = WampInvokeWrapper(self,handler,message)
invoke.s... | Passes the invocation request to the appropriate
callback. |
def get_full_url(self, parsed_url):
full_path = parsed_url.path
if parsed_url.query:
full_path = '%s?%s' % (full_path, parsed_url.query)
return full_path | Returns url path with querystring |
def printArchive(fileName):
archive = CombineArchive()
if archive.initializeFromArchive(fileName) is None:
print("Invalid Combine Archive")
return None
print('*'*80)
print('Print archive:', fileName)
print('*' * 80)
printMetaDataFor(archive, ".")
print("Num Entries: {0}".form... | Prints content of combine archive
:param fileName: path of archive
:return: None |
def _get_module(target):
filepath, sep, namespace = target.rpartition('|')
if sep and not filepath:
raise BadDirectory("Path to file not supplied.")
module, sep, class_or_function = namespace.rpartition(':')
if (sep and not module) or (filepath and not module):
raise MissingModule("Need ... | Import a named class, module, method or function.
Accepts these formats:
".../file/path|module_name:Class.method"
".../file/path|module_name:Class"
".../file/path|module_name:function"
"module_name:Class"
"module_name:function"
"module_name:Class.function"
If a ... |
def numval(token):
if token.type == 'INTEGER':
return int(token.value)
elif token.type == 'FLOAT':
return float(token.value)
else:
return token.value | Return the numerical value of token.value if it is a number |
def resource(self, uri, methods=frozenset({'GET'}), **kwargs):
def decorator(f):
if kwargs.get('stream'):
f.is_stream = kwargs['stream']
self.add_resource(f, uri=uri, methods=methods, **kwargs)
return decorator | Decorates a function to be registered as a resource route.
:param uri: path of the URL
:param methods: list or tuple of methods allowed
:param host:
:param strict_slashes:
:param stream:
:param version:
:param name: user defined route name for url_for
:pa... |
def read_stdin(self):
text = sys.stdin.read()
if sys.version_info[0] < 3 and text is not None:
text = text.decode(sys.stdin.encoding or 'utf-8')
return text | Reads STDIN until the end of input and returns a unicode string. |
def default( self, o ):
if isinstance(o, datetime):
return o.isoformat()
else:
if isinstance(o, Exception):
return str(o)
else:
if isinstance(o, numpy.integer):
return int(o)
else:
... | If o is a datetime object, convert it to an ISO string. If it is an
exception, convert it to a string. If it is a numpy int, coerce it to
a Python int.
:param o: the field to serialise
:returns: a string encoding of the field |
def set_app_args(self, *args):
if args:
self._set('pyargv', ' '.join(args))
return self._section | Sets ``sys.argv`` for python apps.
Examples:
* pyargv="one two three" will set ``sys.argv`` to ``('one', 'two', 'three')``.
:param args: |
def _class(self):
try:
self._project_name()
except ValueError:
return MalformedReq
if self._is_satisfied():
return SatisfiedReq
if not self._expected_hashes():
return MissingReq
if self._actual_hash() not in self._expected_hashes():... | Return the class I should be, spanning a continuum of goodness. |
def reply(self, text):
data = {'text': text, 'vchannel_id': self['vchannel_id']}
if self.is_p2p():
data['type'] = RTMMessageType.P2PMessage
data['to_uid'] = self['uid']
else:
data['type'] = RTMMessageType.ChannelMessage
data['channel_id'] = self['c... | Replys a text message
Args:
text(str): message content
Returns:
RTMMessage |
def from_pandas_dataframe(cls, bqm_df, offset=0.0, interactions=None):
if interactions is None:
interactions = []
bqm = cls({}, {}, offset, Vartype.BINARY)
for u, row in bqm_df.iterrows():
for v, bias in row.iteritems():
if u == v:
bqm.... | Create a binary quadratic model from a QUBO model formatted as a pandas DataFrame.
Args:
bqm_df (:class:`pandas.DataFrame`):
Quadratic unconstrained binary optimization (QUBO) model formatted
as a pandas DataFrame. Row and column indices label the QUBO variables;
... |
def async_update(self, event, reason={}):
reason['attr'] = []
for data in ['state', 'config']:
changed_attr = self.update_attr(event.get(data, {}))
reason[data] = data in event
reason['attr'] += changed_attr
super().async_update(event, reason) | New event for sensor.
Check if state or config is part of event.
Signal that sensor has updated attributes.
Inform what attributes got changed values. |
def from_binary(cls,pst,filename):
m = Matrix.from_binary(filename)
return ObservationEnsemble(data=m.x,pst=pst, index=m.row_names) | instantiate an observation obsemble from a jco-type file
Parameters
----------
pst : pyemu.Pst
a Pst instance
filename : str
the binary file name
Returns
-------
oe : ObservationEnsemble |
def _stream(self, char):
num = ord(char)
if num in self.basic:
self.dispatch(self.basic[num])
elif num == ctrl.ESC:
self.state = "escape"
elif num == 0x00:
pass
else:
self.dispatch("print", char) | Process a character when in the
default 'stream' state. |
def _FormatDateTime(self, event):
try:
datetime_object = datetime.datetime(
1970, 1, 1, 0, 0, 0, 0, tzinfo=pytz.UTC)
datetime_object += datetime.timedelta(microseconds=event.timestamp)
datetime_object.astimezone(self._output_mediator.timezone)
return datetime_object.replace(tzinfo=... | Formats the date to a datetime object without timezone information.
Note: timezone information must be removed due to lack of support
by xlsxwriter and Excel.
Args:
event (EventObject): event.
Returns:
datetime.datetime|str: date and time value or a string containing
"ERROR" on ... |
def list_media_services(access_token, subscription_id):
endpoint = ''.join([get_rm_endpoint(),
'/subscriptions/', subscription_id,
'/providers/microsoft.media/mediaservices?api-version=', MEDIA_API])
return do_get(endpoint, access_token) | List the media services in a subscription.
Args:
access_token (str): A valid Azure authentication token.
subscription_id (str): Azure subscription id.
Returns:
HTTP response. JSON body. |
async def delete(self):
if self.id == self._origin.Fabric._default_fabric_id:
raise CannotDelete("Default fabric cannot be deleted.")
await self._handler.delete(id=self.id) | Delete this Fabric. |
def class_balancing_sampler(y, indices):
weights = WeightedSampler.class_balancing_sample_weights(y[indices])
return WeightedSubsetSampler(weights, indices=indices) | Construct a `WeightedSubsetSampler` that compensates for class
imbalance.
Parameters
----------
y: NumPy array, 1D dtype=int
sample classes, values must be 0 or positive
indices: NumPy array, 1D dtype=int
An array of indices that identify the subset of sa... |
def GetLoadedModuleBySuffix(path):
root = os.path.splitext(path)[0]
for module in sys.modules.values():
mod_root = os.path.splitext(getattr(module, '__file__', None) or '')[0]
if not mod_root:
continue
if not os.path.isabs(mod_root):
mod_root = os.path.join(os.getcwd(), mod_root)
if IsPa... | Searches sys.modules to find a module with the given file path.
Args:
path: Path to the source file. It can be relative or absolute, as suffix
match can handle both. If absolute, it must have already been
sanitized.
Algorithm:
The given path must be a full suffix of a loaded module to ... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.