code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def decode_msg(msg, enc='utf-8'):
cte = str(msg.get('content-transfer-encoding', '')).lower()
decode = cte not in ("8bit", "7bit", "binary")
res = msg.get_payload(decode=decode)
return decode_bytes(res, enc) | Decodes a message fragment.
Args: msg - A Message object representing the fragment
enc - The encoding to use for decoding the message |
def libvlc_video_set_adjust_float(p_mi, option, value):
f = _Cfunctions.get('libvlc_video_set_adjust_float', None) or \
_Cfunction('libvlc_video_set_adjust_float', ((1,), (1,), (1,),), None,
None, MediaPlayer, ctypes.c_uint, ctypes.c_float)
return f(p_mi, option, value) | Set adjust option as float. Options that take a different type value
are ignored.
@param p_mi: libvlc media player instance.
@param option: adust option to set, values of libvlc_video_adjust_option_t.
@param value: adjust option value.
@version: LibVLC 1.1.1 and later. |
def _get_shipped_from(row):
try:
spans = row.find('div', {'id': 'coltextR2'}).find_all('span')
if len(spans) < 2:
return None
return spans[1].string
except AttributeError:
return None | Get where package was shipped from. |
def filter_metadata(metadata, user_filter, default_filter):
actual_keys = set(metadata.keys())
keep_keys = apply_metadata_filters(user_filter, default_filter, actual_keys)
for key in actual_keys:
if key not in keep_keys:
metadata.pop(key)
return metadata | Filter the cell or notebook metadata, according to the user preference |
def username_to_uuid(username):
if not username.startswith('u-') or len(username) != 28:
raise ValueError('Not an UUID based username: %r' % (username,))
decoded = base64.b32decode(username[2:].upper() + '======')
return UUID(bytes=decoded) | Convert username to UUID.
>>> username_to_uuid('u-ad52zgilvnpgnduefzlh5jgr6y')
UUID('00fbac99-0bab-5e66-8e84-2e567ea4d1f6') |
def _process_output(self, node, **kwargs):
for n in node.nodes:
self._process_node(n, **kwargs) | Processes an output node, which will contain things like `Name` and `TemplateData` nodes. |
def set_ocha_url(cls, url=None):
if url is None:
url = cls._ochaurl_int
cls._ochaurl = url | Set World Bank url from which to retrieve countries data
Args:
url (str): World Bank url from which to retrieve countries data. Defaults to internal value.
Returns:
None |
def str2actfunc(act_func):
if act_func == 'sigmoid':
return tf.nn.sigmoid
elif act_func == 'tanh':
return tf.nn.tanh
elif act_func == 'relu':
return tf.nn.relu | Convert activation function name to tf function. |
def ungrab_hotkey(self, item):
import copy
newItem = copy.copy(item)
if item.get_applicable_regex() is None:
self.__enqueue(self.__ungrabHotkey, newItem.hotKey, newItem.modifiers, self.rootWindow)
if self.__needsMutterWorkaround(item):
self.__enqueue(self.__ungrabRecurse, newItem, self.rootWindow, False)
else:
self.__enqueue(self.__ungrabRecurse, newItem, self.rootWindow) | Ungrab a hotkey.
If the hotkey has no filter regex, it is global and is grabbed recursively from the root window
If it has a filter regex, iterate over all children of the root and ungrab from matching windows |
def _check_dynamic_acl_support(self):
cmds = ['ip access-list openstack-test dynamic',
'no ip access-list openstack-test']
for switch_ip, switch_client in self._switches.items():
try:
self.run_openstack_sg_cmds(cmds)
except Exception:
LOG.error("Switch %s does not support dynamic ACLs. SG "
"support will not be enabled on this switch.",
switch_ip) | Log an error if any switches don't support dynamic ACLs |
def full_block_key(self):
if self.block_key.run is None:
return self.block_key.replace(course_key=self.course_key)
return self.block_key | Returns the "correct" usage key value with the run filled in. |
def separate_directions(di_block):
ppars = doprinc(di_block)
di_df = pd.DataFrame(di_block)
di_df.columns = ['dec', 'inc']
di_df['pdec'] = ppars['dec']
di_df['pinc'] = ppars['inc']
di_df['angle'] = angle(di_df[['dec', 'inc']].values,
di_df[['pdec', 'pinc']].values)
mode1_df = di_df[di_df['angle'] <= 90]
mode2_df = di_df[di_df['angle'] > 90]
mode1 = mode1_df[['dec', 'inc']].values.tolist()
mode2 = mode2_df[['dec', 'inc']].values.tolist()
return mode1, mode2 | Separates set of directions into two modes based on principal direction
Parameters
_______________
di_block : block of nested dec,inc pairs
Return
mode_1_block,mode_2_block : two lists of nested dec,inc pairs |
def get_columns_diff(changes):
for change in changes:
change.diff = []
elt_changes = change.get_changes()
if elt_changes:
change.diff = elt_changes.columns
return changes | Add the changed columns as a diff attribute.
- changes: a list of changes (get_model_changes query.all())
Return: the same list, to which elements we added a "diff"
attribute containing the changed columns. Diff defaults to []. |
def color_for_thread(thread_id):
if thread_id not in seen_thread_colors:
seen_thread_colors[thread_id] = next(thread_colors)
return seen_thread_colors[thread_id] | Associates the thread ID with the next color in the `thread_colors` cycle,
so that thread-specific parts of a log have a consistent separate color. |
def reward(self):
raw_rewards, processed_rewards = 0, 0
for ts in self.time_steps:
if ts.raw_reward is not None:
raw_rewards += ts.raw_reward
if ts.processed_reward is not None:
processed_rewards += ts.processed_reward
return raw_rewards, processed_rewards | Returns a tuple of sum of raw and processed rewards. |
def property(self, *args, **kwargs):
_properties = self.properties(*args, **kwargs)
if len(_properties) == 0:
raise NotFoundError("No property fits criteria")
if len(_properties) != 1:
raise MultipleFoundError("Multiple properties fit criteria")
return _properties[0] | Retrieve single KE-chain Property.
Uses the same interface as the :func:`properties` method but returns only a single pykechain :class:
`models.Property` instance.
If additional `keyword=value` arguments are provided, these are added to the request parameters. Please
refer to the documentation of the KE-chain API for additional query parameters.
:return: a single :class:`models.Property`
:raises NotFoundError: When no `Property` is found
:raises MultipleFoundError: When more than a single `Property` is found |
def json_path_components(path):
if isinstance(path, str):
path = path.split('.')
return list(path) | Convert JSON path to individual path components.
:param path: JSON path, which can be either an iterable of path
components or a dot-separated string
:return: A list of path components |
def sma(arg, n):
if n == 0:
return pd.expanding_mean(arg)
else:
return pd.rolling_mean(arg, n, min_periods=n) | If n is 0 then return the ltd mean; else return the n day mean |
def to_html(self, write_to):
page_html = self.get_html()
with open(write_to, "wb") as writefile:
writefile.write(page_html.encode("utf-8")) | Method to convert the repository list to a search results page and
write it to a HTML file.
:param write_to: File/Path to write the html file to. |
def fn_get_mask(self, value):
value = self._to_ndarray(value)
if numpy.ma.is_masked(value):
return value.mask
else:
return numpy.zeros(value.shape).astype(bool) | Return an array mask.
:param value: The array.
:return: The array mask. |
def infer_year(date):
if isinstance(date, str):
pattern = r'(?P<year>\d{4})'
result = re.match(pattern, date)
if result:
return int(result.groupdict()['year'])
else:
raise ValueError('Invalid date string provided: {}'.format(date))
elif isinstance(date, np.datetime64):
return date.item().year
else:
return date.year | Given a datetime-like object or string infer the year.
Parameters
----------
date : datetime-like object or str
Input date
Returns
-------
int
Examples
--------
>>> infer_year('2000')
2000
>>> infer_year('2000-01')
2000
>>> infer_year('2000-01-31')
2000
>>> infer_year(datetime.datetime(2000, 1, 1))
2000
>>> infer_year(np.datetime64('2000-01-01'))
2000
>>> infer_year(DatetimeNoLeap(2000, 1, 1))
2000
>>> |
def search_tor_node(self, ip):
data = {}
tmp = {}
present = datetime.utcnow().replace(tzinfo=pytz.utc)
for line in self._get_raw_data().splitlines():
params = line.split(' ')
if params[0] == 'ExitNode':
tmp['node'] = params[1]
elif params[0] == 'ExitAddress':
tmp['last_status'] = params[2] + 'T' + params[3] + '+0000'
last_status = parse(tmp['last_status'])
if (self.delta is None or
(present - last_status) < self.delta):
data[params[1]] = tmp
tmp = {}
else:
pass
return data.get(ip, {}) | Lookup an IP address to check if it is a known tor exit node.
:param ip: The IP address to lookup
:type ip: str
:return: Data relative to the tor node. If `ip`is a tor exit node
it will contain a `node` key with the hash of the node and
a `last_status` key with the last update time of the node.
If `ip` is not a tor exit node, the function will return an
empty dictionary.
:rtype: dict |
def __update_common(self):
if "TCON" in self:
self["TCON"].genres = self["TCON"].genres
mimes = {"PNG": "image/png", "JPG": "image/jpeg"}
for pic in self.getall("APIC"):
if pic.mime in mimes:
newpic = APIC(
encoding=pic.encoding, mime=mimes[pic.mime],
type=pic.type, desc=pic.desc, data=pic.data)
self.add(newpic) | Updates done by both v23 and v24 update |
def _save_html_report(self, heads=None, refresh=None):
report = ReportHtml(self)
heads = heads if heads else {}
test_report_filename = report.get_current_filename("html")
report.generate(test_report_filename, title='Test Results', heads=heads, refresh=refresh)
latest_report_filename = report.get_latest_filename("html")
report.generate(latest_report_filename, title='Test Results', heads=heads, refresh=refresh) | Save html report.
:param heads: headers as dict
:param refresh: Boolean, if True will add a reload-tag to the report
:return: Nothing |
def VCIncludes(self):
return [os.path.join(self.si.VCInstallDir, 'Include'),
os.path.join(self.si.VCInstallDir, r'ATLMFC\Include')] | Microsoft Visual C++ & Microsoft Foundation Class Includes |
def get_code(module):
fp = open(module.path)
try:
return compile(fp.read(), str(module.name), 'exec')
finally:
fp.close() | Compile and return a Module's code object. |
def markov_blanket(y, mean, scale, shape, skewness):
return ss.cauchy.logpdf(y, loc=mean, scale=scale) | Markov blanket for each likelihood term - used for state space models
Parameters
----------
y : np.ndarray
univariate time series
mean : np.ndarray
array of location parameters for the Cauchy distribution
scale : float
scale parameter for the Cauchy distribution
shape : float
tail thickness parameter for the Cauchy distribution
skewness : float
skewness parameter for the Cauchy distribution
Returns
----------
- Markov blanket of the Cauchy family |
def enable(cls, args):
mgr = NAppsManager()
if args['all']:
napps = mgr.get_disabled()
else:
napps = args['<napp>']
cls.enable_napps(napps) | Enable subcommand. |
def _get_slice_axis(self, slice_obj, axis=None):
if axis is None:
axis = self.axis or 0
obj = self.obj
if not need_slice(slice_obj):
return obj.copy(deep=False)
labels = obj._get_axis(axis)
indexer = labels.slice_indexer(slice_obj.start, slice_obj.stop,
slice_obj.step, kind=self.name)
if isinstance(indexer, slice):
return self._slice(indexer, axis=axis, kind='iloc')
else:
return self.obj._take(indexer, axis=axis) | this is pretty simple as we just have to deal with labels |
def _client_receive(self):
try:
response = self._client.readline()
self.log.debug('Snippet received: %s', response)
return response
except socket.error as e:
raise Error(
self._ad,
'Encountered socket error reading RPC response "%s"' % e) | Receives the server's response of an Rpc message.
Returns:
Raw byte string of the response.
Raises:
Error: a socket error occurred during the read. |
def stop_server(self):
self.stop = True
while self.task_count:
time.sleep(END_RESP)
self.terminate = True | Stop receiving connections, wait for all tasks to end, and then
terminate the server. |
def path_end_to_end_distance(neurite):
trunk = neurite.root_node.points[0]
return max(morphmath.point_dist(l.points[-1], trunk)
for l in neurite.root_node.ileaf()) | Calculate and return end-to-end-distance of a given neurite. |
def _check_model_types(self, models):
if not hasattr(models, "__iter__"):
models = {models}
if not all([isinstance(model, (AbstractStateModel, StateElementModel)) for model in models]):
raise TypeError("The selection supports only models with base class AbstractStateModel or "
"StateElementModel, see handed elements {0}".format(models))
return models if isinstance(models, set) else set(models) | Check types of passed models for correctness and in case raise exception
:rtype: set
:returns: set of models that are valid for the class |
def _get_col_epsg(mapped_class, geom_attr):
col = class_mapper(mapped_class).get_property(geom_attr).columns[0]
return col.type.srid | Get the EPSG code associated with a geometry attribute.
Arguments:
geom_attr
the key of the geometry property as defined in the SQLAlchemy
mapper. If you use ``declarative_base`` this is the name of
the geometry attribute as defined in the mapped class. |
def contains_geometric_info(var):
return isinstance(var, tuple) and len(var) == 2 and all(isinstance(val, (int, float)) for val in var) | Check whether the passed variable is a tuple with two floats or integers |
def _SigSegvHandler(self, signal_number, stack_frame):
self._OnCriticalError()
if self._original_sigsegv_handler is not None:
signal.signal(signal.SIGSEGV, self._original_sigsegv_handler)
os.kill(self._pid, signal.SIGSEGV) | Signal handler for the SIGSEGV signal.
Args:
signal_number (int): numeric representation of the signal.
stack_frame (frame): current stack frame or None. |
def expanded_by(self, n):
return Rect(self.left - n, self.top - n, self.right + n, self.bottom + n) | Return a rectangle with extended borders.
Create a new rectangle that is wider and taller than the
immediate one. All sides are extended by "n" points. |
def get_cache_prefix(self, prefix=''):
if settings.CACHE_MIDDLEWARE_KEY_PREFIX:
prefix += settings.CACHE_MIDDLEWARE_KEY_PREFIX
if self.request.is_ajax():
prefix += 'ajax'
return prefix | Hook for any extra data you would like
to prepend to your cache key.
The default implementation ensures that ajax not non
ajax requests are cached separately. This can easily
be extended to differentiate on other criteria
like mobile os' for example. |
def from_file(filename):
spec = Spec()
with open(filename, "r", encoding="utf-8") as f:
parse_context = {"current_subpackage": None}
for line in f:
spec, parse_context = _parse(spec, parse_context, line)
return spec | Creates a new Spec object from a given file.
:param filename: The path to the spec file.
:return: A new Spec object. |
def _check_task(taskid):
try:
taskurl = taskid.get('ref', '0000')
except AttributeError:
taskurl = taskid
taskid = taskurl.split('/tasks/')[-1]
LOG.info('Checking taskid %s', taskid)
url = '{}/tasks/{}'.format(API_URL, taskid)
task_response = requests.get(url, headers=HEADERS, verify=GATE_CA_BUNDLE, cert=GATE_CLIENT_CERT)
LOG.debug(task_response.json())
assert task_response.ok, 'Spinnaker communication error: {0}'.format(task_response.text)
task_state = task_response.json()
status = task_state['status']
LOG.info('Current task status: %s', status)
if status == 'SUCCEEDED':
return status
elif status == 'TERMINAL':
raise SpinnakerTaskError(task_state)
else:
raise ValueError | Check Spinnaker Task status.
Args:
taskid (str): Existing Spinnaker Task ID.
Returns:
str: Task status. |
def get_suitable_vis_classes(obj):
ret = []
for class_ in classes_vis():
if isinstance(obj, class_.input_classes):
ret.append(class_)
return ret | Retuns a list of Vis classes that can handle obj. |
def coord(self, func:CoordFunc, *args, **kwargs)->'ImagePoints':
"Put `func` with `args` and `kwargs` in `self.flow_func` for later."
if 'invert' in kwargs: kwargs['invert'] = True
else: warn(f"{func.__name__} isn't implemented for {self.__class__}.")
self.flow_func.append(partial(func, *args, **kwargs))
return self | Put `func` with `args` and `kwargs` in `self.flow_func` for later. |
def transactional_async(func, args, kwds, **options):
options.setdefault('propagation', datastore_rpc.TransactionOptions.ALLOWED)
if args or kwds:
return transaction_async(lambda: func(*args, **kwds), **options)
return transaction_async(func, **options) | The async version of @ndb.transaction. |
def post_to_url(self, value):
if isinstance(value, SpamUrl):
self._post_to_url = value
else:
self._post_to_url = SpamUrl(value) | An Inbound Parse URL to send a copy of your email.
If defined, a copy of your email and its spam report will be sent here.
:param value: An Inbound Parse URL to send a copy of your email.
If defined, a copy of your email and its spam report will be sent here.
:type value: string |
def savePattern(self):
if ( self.dev == None ): return ''
buf = [REPORT_ID, ord('W'), 0xBE, 0xEF, 0xCA, 0xFE, 0, 0, 0]
return self.write(buf); | Save internal RAM pattern to flash |
def _pruning_base(self, axis=None, hs_dims=None):
if not self._is_axis_allowed(axis):
return self.as_array(weighted=False, include_transforms_for_dims=hs_dims)
return self.margin(
axis=axis, weighted=False, include_transforms_for_dims=hs_dims
) | Gets margin if across CAT dimension. Gets counts if across items.
Categorical variables are pruned based on their marginal values. If the
marginal is a 0 or a NaN, the corresponding row/column is pruned. In
case of a subvars (items) dimension, we only prune if all the counts
of the corresponding row/column are zero. |
def relevant_part(self, original, pos):
start = original.rfind(self._separator, 0, pos)
if start == -1:
start = 0
else:
start = start + len(self._separator)
end = original.find(self._separator, pos - 1)
if end == -1:
end = len(original)
return original[start:end], start, end, pos - start | calculates the subword of `original` that `pos` is in |
def _migrate_single(self, conn, migration):
with contextlib.ExitStack() as stack:
for wrapper in self._wrappers:
stack.enter_context(wrapper(conn))
migration.func(conn) | Perform a single migration starting from the given version. |
def t_surf_parameter(self, num_frame, xax):
pyl.figure(num_frame)
if xax == 'time':
xaxisarray = self.get('star_age')
elif xax == 'model':
xaxisarray = self.get('model_number')
else:
print('kippenhahn_error: invalid string for x-axis selction. needs to be "time" or "model"')
logL = self.get('log_L')
logTeff = self.get('log_Teff')
pyl.plot(xaxisarray,logL,'-k',label='log L')
pyl.plot(xaxisarray,logTeff,'-k',label='log Teff')
pyl.ylabel('log L, log Teff')
pyl.legend(loc=2)
if xax == 'time':
pyl.xlabel('t / yrs')
elif xax == 'model':
pyl.xlabel('model number') | Surface parameter evolution as a function of time or model.
Parameters
----------
num_frame : integer
Number of frame to plot this plot into.
xax : string
Either model or time to indicate what is to be used on the
x-axis |
def load_data(handle, reader=None):
if not reader:
reader = os.path.splitext(handle)[1][1:].lower()
if reader not in _READERS:
raise NeuroMError('Do not have a loader for "%s" extension' % reader)
filename = _get_file(handle)
try:
return _READERS[reader](filename)
except Exception as e:
L.exception('Error reading file %s, using "%s" loader', filename, reader)
raise RawDataError('Error reading file %s:\n%s' % (filename, str(e))) | Unpack data into a raw data wrapper |
def ser_iuwt_decomposition(in1, scale_count, scale_adjust, store_smoothed):
wavelet_filter = (1./16)*np.array([1,4,6,4,1])
detail_coeffs = np.empty([scale_count-scale_adjust, in1.shape[0], in1.shape[1]])
C0 = in1
if scale_adjust>0:
for i in range(0, scale_adjust):
C0 = ser_a_trous(C0, wavelet_filter, i)
for i in range(scale_adjust,scale_count):
C = ser_a_trous(C0, wavelet_filter, i)
C1 = ser_a_trous(C, wavelet_filter, i)
detail_coeffs[i-scale_adjust,:,:] = C0 - C1
C0 = C
if store_smoothed:
return detail_coeffs, C0
else:
return detail_coeffs | This function calls the a trous algorithm code to decompose the input into its wavelet coefficients. This is
the isotropic undecimated wavelet transform implemented for a single CPU core.
INPUTS:
in1 (no default): Array on which the decomposition is to be performed.
scale_count (no default): Maximum scale to be considered.
scale_adjust (default=0): Adjustment to scale value if first scales are of no interest.
store_smoothed (default=False):Boolean specifier for whether the smoothed image is stored or not.
OUTPUTS:
detail_coeffs Array containing the detail coefficients.
C0 (optional): Array containing the smoothest version of the input. |
def clear(self):
del self._statements_and_parameters[:]
self.keyspace = None
self.routing_key = None
if self.custom_payload:
self.custom_payload.clear() | This is a convenience method to clear a batch statement for reuse.
*Note:* it should not be used concurrently with uncompleted execution futures executing the same
``BatchStatement``. |
def enclosure_shell(self):
pairs = [(r, self.connected_paths(r, include_self=False))
for r in self.root]
corresponding = collections.OrderedDict(pairs)
return corresponding | A dictionary of path indexes which are 'shell' paths, and values
of 'hole' paths.
Returns
----------
corresponding: dict, {index of self.paths of shell : [indexes of holes]} |
def insert(self, loc, item):
if not isinstance(item, tuple):
item = (item, ) + ('', ) * (self.nlevels - 1)
elif len(item) != self.nlevels:
raise ValueError('Item must have length equal to number of '
'levels.')
new_levels = []
new_codes = []
for k, level, level_codes in zip(item, self.levels, self.codes):
if k not in level:
lev_loc = len(level)
level = level.insert(lev_loc, k)
else:
lev_loc = level.get_loc(k)
new_levels.append(level)
new_codes.append(np.insert(
ensure_int64(level_codes), loc, lev_loc))
return MultiIndex(levels=new_levels, codes=new_codes,
names=self.names, verify_integrity=False) | Make new MultiIndex inserting new item at location
Parameters
----------
loc : int
item : tuple
Must be same length as number of levels in the MultiIndex
Returns
-------
new_index : Index |
def _validate_config(strict=False):
for index in settings.get_index_names():
_validate_mapping(index, strict=strict)
for model in settings.get_index_models(index):
_validate_model(model)
if settings.get_setting("update_strategy", "full") not in ["full", "partial"]:
raise ImproperlyConfigured(
"Invalid SEARCH_SETTINGS: 'update_strategy' value must be 'full' or 'partial'."
) | Validate settings.SEARCH_SETTINGS. |
def _format_from_dict(self, format_string, **kwargs):
kwargs_copy = self.base_dict.copy()
kwargs_copy.update(**kwargs)
localpath = format_string.format(**kwargs_copy)
if kwargs.get('fullpath', False):
return self.fullpath(localpath=localpath)
return localpath | Return a formatted file name dictionary components |
def add_user(self, username, password):
self.server.user_dict[username] = password
self.server.isAuth = True | Add an user to the dictionary. |
def main(args_list=None):
args = parse_args(args_list)
print("Topiary commandline arguments:")
print(args)
df = predict_epitopes_from_args(args)
write_outputs(df, args)
print("Total count: %d" % len(df)) | Script entry-point to predict neo-epitopes from genomic variants using
Topiary. |
def stop(self):
self._presubs.remove(self._sdk_presub_params)
if self._immediacy == FirstValue.on_value_update:
self._unsubscribe_all_matching()
super(ResourceValues, self).stop() | Stop the channel |
def get_file_contents(self, pointer=False):
if self.pointer:
if pointer:
return self.old_pointed
else:
return self.old_data
else:
return self.old_data | Gets any file contents you care about. Defaults to the main file
@param pointer: The the contents of the file pointer, not the pointed
at file
@return: A string of the contents |
def _double_gamma_hrf(response_delay=6,
undershoot_delay=12,
response_dispersion=0.9,
undershoot_dispersion=0.9,
response_scale=1,
undershoot_scale=0.035,
temporal_resolution=100.0,
):
hrf_length = 30
hrf = [0] * int(hrf_length * temporal_resolution)
response_peak = response_delay * response_dispersion
undershoot_peak = undershoot_delay * undershoot_dispersion
for hrf_counter in list(range(len(hrf) - 1)):
resp_pow = math.pow((hrf_counter / temporal_resolution) /
response_peak, response_delay)
resp_exp = math.exp(-((hrf_counter / temporal_resolution) -
response_peak) /
response_dispersion)
response_model = response_scale * resp_pow * resp_exp
undershoot_pow = math.pow((hrf_counter / temporal_resolution) /
undershoot_peak,
undershoot_delay)
undershoot_exp = math.exp(-((hrf_counter / temporal_resolution) -
undershoot_peak /
undershoot_dispersion))
undershoot_model = undershoot_scale * undershoot_pow * undershoot_exp
hrf[hrf_counter] = response_model - undershoot_model
return hrf | Create the double gamma HRF with the timecourse evoked activity.
Default values are based on Glover, 1999 and Walvaert, Durnez,
Moerkerke, Verdoolaege and Rosseel, 2011
Parameters
----------
response_delay : float
How many seconds until the peak of the HRF
undershoot_delay : float
How many seconds until the trough of the HRF
response_dispersion : float
How wide is the rising peak dispersion
undershoot_dispersion : float
How wide is the undershoot dispersion
response_scale : float
How big is the response relative to the peak
undershoot_scale :float
How big is the undershoot relative to the trough
scale_function : bool
Do you want to scale the function to a range of 1
temporal_resolution : float
How many elements per second are you modeling for the stimfunction
Returns
----------
hrf : multi dimensional array
A double gamma HRF to be used for convolution. |
def get_asset_details(self, **params):
res = self._request_withdraw_api('get', 'assetDetail.html', True, data=params)
if not res['success']:
raise BinanceWithdrawException(res['msg'])
return res | Fetch details on assets.
https://github.com/binance-exchange/binance-official-api-docs/blob/master/wapi-api.md#asset-detail-user_data
:param recvWindow: the number of milliseconds the request is valid for
:type recvWindow: int
:returns: API response
.. code-block:: python
{
"success": true,
"assetDetail": {
"CTR": {
"minWithdrawAmount": "70.00000000", //min withdraw amount
"depositStatus": false,//deposit status
"withdrawFee": 35, // withdraw fee
"withdrawStatus": true, //withdraw status
"depositTip": "Delisted, Deposit Suspended" //reason
},
"SKY": {
"minWithdrawAmount": "0.02000000",
"depositStatus": true,
"withdrawFee": 0.01,
"withdrawStatus": true
}
}
}
:raises: BinanceWithdrawException |
def git_show_file(path, ref):
root = get_root()
command = 'git show {}:{}'.format(ref, path)
with chdir(root):
return run_command(command, capture=True).stdout | Return the contents of a file at a given tag |
def get_alert_destination_count(self, channel=None):
if channel is None:
channel = self.get_network_channel()
rqdata = (channel, 0x11, 0, 0)
rsp = self.xraw_command(netfn=0xc, command=2, data=rqdata)
return ord(rsp['data'][1]) | Get the number of supported alert destinations
:param channel: Channel for alerts to be examined, defaults to current |
def _serialize_ep(ep, varprops, version=_default_version):
args = ep[3]
arglist = ' '.join([_serialize_argument(rarg, args[rarg], varprops)
for rarg in sorted(args, key=rargname_sortkey)])
if version < 1.1 or len(ep) < 6 or ep[5] is None:
surface = ''
else:
surface = ' "%s"' % ep[5]
lnk = None if len(ep) < 5 else ep[4]
pred = ep[1]
predstr = pred.string
return '[ {pred}{lnk}{surface} LBL: {label}{s}{args} ]'.format(
pred=predstr,
lnk=_serialize_lnk(lnk),
surface=surface,
label=str(ep[2]),
s=' ' if arglist else '',
args=arglist
) | Serialize an Elementary Predication into the SimpleMRS encoding. |
def swapon(name, priority=None):
ret = {}
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = False
return ret
if __grains__['kernel'] == 'SunOS':
if __grains__['virtual'] != 'zone':
__salt__['cmd.run']('swap -a {0}'.format(name), python_shell=False)
else:
return False
else:
cmd = 'swapon {0}'.format(name)
if priority and 'AIX' not in __grains__['kernel']:
cmd += ' -p {0}'.format(priority)
__salt__['cmd.run'](cmd, python_shell=False)
on_ = swaps()
if name in on_:
ret['stats'] = on_[name]
ret['new'] = True
return ret
return ret | Activate a swap disk
.. versionchanged:: 2016.3.2
CLI Example:
.. code-block:: bash
salt '*' mount.swapon /root/swapfile |
def _submit_topology(cmd_args, app):
cfg = app.cfg
if cmd_args.create_bundle:
ctxtype = ctx.ContextTypes.BUNDLE
elif cmd_args.service_name:
cfg[ctx.ConfigParams.FORCE_REMOTE_BUILD] = True
cfg[ctx.ConfigParams.SERVICE_NAME] = cmd_args.service_name
ctxtype = ctx.ContextTypes.STREAMING_ANALYTICS_SERVICE
sr = ctx.submit(ctxtype, app.app, cfg)
return sr | Submit a Python topology to the service.
This includes an SPL main composite wrapped in a Python topology. |
def getextensibleindex(self, key, name):
return getextensibleindex(
self.idfobjects, self.model, self.idd_info,
key, name) | Get the index of the first extensible item.
Only for internal use. # TODO : hide this
Parameters
----------
key : str
The type of IDF object. This must be in ALL_CAPS.
name : str
The name of the object to fetch.
Returns
-------
int |
def to_existing_absolute_path(string):
value = os.path.abspath(string)
if not os.path.exists( value ) or not os.path.isdir( value ):
msg = '"%r" is not a valid path to a directory.' % string
raise argparse.ArgumentTypeError(msg)
return value | Converts a path into its absolute path and verifies that it exists or throws an exception. |
def cp_string(self, source, dest, **kwargs):
assert isinstance(source, six.string_types), "source must be a string"
assert self._is_s3(dest), "Destination must be s3 location"
return self._put_string(source, dest, **kwargs) | Copies source string into the destination location.
Parameters
----------
source: string
the string with the content to copy
dest: string
the s3 location |
def recursively_resume_states(self):
super(ContainerState, self).recursively_resume_states()
for state in self.states.values():
state.recursively_resume_states() | Resume the state and all of it child states. |
def _initialize_initial_state_fluents(self):
state_fluents = self.rddl.domain.state_fluents
initializer = self.rddl.instance.init_state
self.initial_state_fluents = self._initialize_pvariables(
state_fluents,
self.rddl.domain.state_fluent_ordering,
initializer)
return self.initial_state_fluents | Returns the initial state-fluents instantiated. |
def main(args=None):
parser = argparse.ArgumentParser(description=main.__doc__)
parser.add_argument(
'--binary',
dest='mode',
action='store_const',
const="wb",
default="w",
help='write in binary mode')
parser.add_argument(
'output', metavar='FILE', type=unicode, help='Output file')
logging.basicConfig(
level=logging.DEBUG,
stream=sys.stderr,
format='[%(levelname)s elapsed=%(relativeCreated)dms] %(message)s')
args = parser.parse_args(args or sys.argv[1:])
with open(args.output, args.mode) as fd:
for line in sys.stdin:
fd.write(line) | Buffer stdin and flush, and avoid incomplete files. |
def as_dict(self):
if self.hidden:
rdict = {}
else:
def_selected = self.selected()
comps = [
{
'name': comp.name,
'default': comp.name in self.defaults,
'options': comp.get_ordered_options() if isinstance(comp, Optionable) else None
}
for comp in self
]
rdict = {
'name': self.name,
'required': self.required,
'multiple': self.multiple,
'args': self.in_name,
'returns': self.out_name,
'components': comps
}
return rdict | returns a dictionary representation of the block and of all
component options |
def obfuscate(cls, idStr):
return unicode(base64.urlsafe_b64encode(
idStr.encode('utf-8')).replace(b'=', b'')) | Mildly obfuscates the specified ID string in an easily reversible
fashion. This is not intended for security purposes, but rather to
dissuade users from depending on our internal ID structures. |
def bind(self, *args, **kw):
new_self = self.copy()
new_scopes = Object.translate_to_scopes(*args, **kw)
new_self._scopes = tuple(reversed(new_scopes)) + new_self._scopes
return new_self | Bind environment variables into this object's scope. |
def get_bounding_box(df_points):
xy_min = df_points[['x', 'y']].min()
xy_max = df_points[['x', 'y']].max()
wh = xy_max - xy_min
wh.index = 'width', 'height'
bbox = pd.concat([xy_min, wh])
bbox.name = 'bounding_box'
return bbox | Calculate the bounding box of all points in a data frame. |
def plot(self):
figure()
plot_envelope(self.M, self.C, self.xplot)
for i in range(3):
f = Realization(self.M, self.C)
plot(self.xplot,f(self.xplot))
plot(self.abundance, self.frye, 'k.', markersize=4)
xlabel('Female abundance')
ylabel('Frye density')
title(self.name)
axis('tight') | Plot posterior from simple nonstochetric regression. |
def _parse(self, stream, context, path):
objs = []
while True:
start = stream.tell()
test = stream.read(len(self.find))
stream.seek(start)
if test == self.find:
break
else:
subobj = self.subcon._parse(stream, context, path)
objs.append(subobj)
return objs | Parse until a given byte string is found. |
def read_config(config_file=CONFIG_FILE_DEFAULT, override_url=None):
config = ConfigParser()
config.read_dict(DEFAULT_SETTINGS)
try:
config.readfp(open(config_file))
logger.debug("Using config file at " + config_file)
except:
logger.error(
"Could not find {0}, running with defaults.".format(config_file))
if not logger.handlers:
if config.getboolean("Logging", "to_file"):
handler = logging.FileHandler(config.get("Logging", "file"))
else:
handler = logging.StreamHandler()
handler.setFormatter(logging.Formatter(
config.get("Logging", "format")))
logger.addHandler(handler)
logger.setLevel(config.get("Logging", "level"))
if override_url:
config['Server']['url'] = override_url
return config | Read configuration file, perform sanity check and return configuration
dictionary used by other functions. |
def collapse_all(self):
if implementsCollapseAPI(self._tree):
self._tree.collapse_all()
self.set_focus(self._tree.root)
self._walker.clear_cache()
self.refresh() | Collapse all positions; works only if the underlying tree allows it. |
def max_rigid_id(self):
try:
return max([particle.rigid_id for particle in self.particles()
if particle.rigid_id is not None])
except ValueError:
return | Returns the maximum rigid body ID contained in the Compound.
This is usually used by compound.root to determine the maximum
rigid_id in the containment hierarchy.
Returns
-------
int or None
The maximum rigid body ID contained in the Compound. If no
rigid body IDs are found, None is returned |
def _import_status(data, item, repo_name, repo_tag):
status = item['status']
try:
if 'Downloading from' in status:
return
elif all(x in string.hexdigits for x in status):
data['Image'] = '{0}:{1}'.format(repo_name, repo_tag)
data['Id'] = status
except (AttributeError, TypeError):
pass | Process a status update from docker import, updating the data structure |
def _get_band(self, high_res, low_res, color, ratio):
if self.high_resolution_band == color:
ret = high_res
else:
ret = low_res * ratio
ret.attrs = low_res.attrs.copy()
return ret | Figure out what data should represent this color. |
def override_account_fields(self,
settled_cash=not_overridden,
accrued_interest=not_overridden,
buying_power=not_overridden,
equity_with_loan=not_overridden,
total_positions_value=not_overridden,
total_positions_exposure=not_overridden,
regt_equity=not_overridden,
regt_margin=not_overridden,
initial_margin_requirement=not_overridden,
maintenance_margin_requirement=not_overridden,
available_funds=not_overridden,
excess_liquidity=not_overridden,
cushion=not_overridden,
day_trades_remaining=not_overridden,
leverage=not_overridden,
net_leverage=not_overridden,
net_liquidation=not_overridden):
self._dirty_account = True
self._account_overrides = kwargs = {
k: v for k, v in locals().items() if v is not not_overridden
}
del kwargs['self'] | Override fields on ``self.account``. |
def assert_count_equal(sequence1, sequence2, msg_fmt="{msg}"):
def compare():
missing1 = list(sequence2)
missing2 = []
for item in sequence1:
try:
missing1.remove(item)
except ValueError:
missing2.append(item)
return missing1, missing2
def build_message():
msg = ""
if missing_from_1:
msg += "missing from sequence 1: " + ", ".join(
repr(i) for i in missing_from_1
)
if missing_from_1 and missing_from_2:
msg += "; "
if missing_from_2:
msg += "missing from sequence 2: " + ", ".join(
repr(i) for i in missing_from_2
)
return msg
missing_from_1, missing_from_2 = compare()
if missing_from_1 or missing_from_2:
fail(
msg_fmt.format(
msg=build_message(), first=sequence1, second=sequence2
)
) | Compare the items of two sequences, ignoring order.
>>> assert_count_equal([1, 2], {2, 1})
Items missing in either sequence will be listed:
>>> assert_count_equal(["a", "b", "c"], ["a", "d"])
Traceback (most recent call last):
...
AssertionError: missing from sequence 1: 'd'; missing from sequence 2: 'b', 'c'
Items are counted in each sequence. This makes it useful to detect
duplicates:
>>> assert_count_equal({"a", "b"}, ["a", "a", "b"])
Traceback (most recent call last):
...
AssertionError: missing from sequence 1: 'a'
The following msg_fmt arguments are supported:
* msg - the default error message
* first - first sequence
* second - second sequence |
def execute(self, table_name=None, table_mode='create', use_cache=True, priority='interactive',
allow_large_results=False, dialect=None, billing_tier=None):
job = self.execute_async(table_name=table_name, table_mode=table_mode, use_cache=use_cache,
priority=priority, allow_large_results=allow_large_results,
dialect=dialect, billing_tier=billing_tier)
self._results = job.wait()
return self._results | Initiate the query, blocking until complete and then return the results.
Args:
table_name: the result table name as a string or TableName; if None (the default), then a
temporary table will be used.
table_mode: one of 'create', 'overwrite' or 'append'. If 'create' (the default), the request
will fail if the table exists.
use_cache: whether to use past query results or ignore cache. Has no effect if destination is
specified (default True).
priority:one of 'batch' or 'interactive' (default). 'interactive' jobs should be scheduled
to run quickly but are subject to rate limits; 'batch' jobs could be delayed by as much
as three hours but are not rate-limited.
allow_large_results: whether to allow large results; i.e. compressed data over 100MB. This is
slower and requires a table_name to be specified) (default False).
dialect : {'legacy', 'standard'}, default 'legacy'
'legacy' : Use BigQuery's legacy SQL dialect.
'standard' : Use BigQuery's standard SQL (beta), which is
compliant with the SQL 2011 standard.
billing_tier: Limits the billing tier for this job. Queries that have resource
usage beyond this tier will fail (without incurring a charge). If unspecified, this
will be set to your project default. This can also be used to override your
project-wide default billing tier on a per-query basis.
Returns:
The QueryResultsTable for the query.
Raises:
Exception if query could not be executed. |
def _draw_rectangle_path(self, context, width, height, only_get_extents=False):
c = context
c.save()
if self.side is SnappedSide.LEFT or self.side is SnappedSide.RIGHT:
c.rotate(deg2rad(90))
c.rel_move_to(-width / 2., - height / 2.)
c.rel_line_to(width, 0)
c.rel_line_to(0, height)
c.rel_line_to(-width, 0)
c.close_path()
c.restore()
if only_get_extents:
extents = c.path_extents()
c.new_path()
return extents | Draws the rectangle path for the port
The rectangle is correctly rotated. Height therefore refers to the border thickness and width to the length
of the port.
:param context: The context to draw on
:param float width: The width of the rectangle
:param float height: The height of the rectangle |
def model_predictions(self):
if isinstance(self.observed, ContinuousColumn):
return ValueError("Cannot make model predictions on a continuous scale")
pred = np.zeros(self.data_size).astype('object')
for node in self:
if node.is_terminal:
pred[node.indices] = max(node.members, key=node.members.get)
return pred | Determines the highest frequency of
categorical dependent variable in the
terminal node where that row fell |
def console_width(kwargs):
if sys.platform.startswith('win'):
console_width = _find_windows_console_width()
else:
console_width = _find_unix_console_width()
_width = kwargs.get('width', None)
if _width:
console_width = _width
else:
if not console_width:
console_width = 80
return console_width | Determine console_width. |
def get_factory_bundle(self, name):
with self.__factories_lock:
try:
factory = self.__factories[name]
except KeyError:
raise ValueError("Unknown factory '{0}'".format(name))
else:
factory_context = getattr(
factory, constants.IPOPO_FACTORY_CONTEXT
)
return factory_context.bundle_context.get_bundle() | Retrieves the Pelix Bundle object that registered the given factory
:param name: The name of a factory
:return: The Bundle that registered the given factory
:raise ValueError: Invalid factory |
def get_uris(self, base_uri, filter_list=None):
return {
re.sub(r'^/', base_uri, link.attrib['href'])
for link in self.parsedpage.get_nodes_by_selector('a')
if 'href' in link.attrib and (
link.attrib['href'].startswith(base_uri) or
link.attrib['href'].startswith('/')
) and
not is_uri_to_be_filtered(link.attrib['href'], filter_list)
} | Return a set of internal URIs. |
def get_group_details(self, group):
result = {}
try:
lgroup = self._get_group(group.name)
lgroup = preload(lgroup, database=self._database)
except ObjectDoesNotExist:
return result
for i, j in lgroup.items():
if j is not None:
result[i] = j
return result | Get the group details. |
def make_qemu_dirs(max_qemu_id, output_dir, topology_name):
if max_qemu_id is not None:
for i in range(1, max_qemu_id + 1):
qemu_dir = os.path.join(output_dir, topology_name + '-files',
'qemu', 'vm-%s' % i)
os.makedirs(qemu_dir) | Create Qemu VM working directories if required
:param int max_qemu_id: Number of directories to create
:param str output_dir: Output directory
:param str topology_name: Topology name |
def apply_request_and_page_to_values(self, request, page=None):
value_is_set = False
for value in self._values:
value.apply_request_and_page(request, page) | Use the request and page config to figure out which values are active |
def _write_json_blob(encoded_value, pipeline_id=None):
default_bucket = app_identity.get_default_gcs_bucket_name()
if default_bucket is None:
raise Exception(
"No default cloud storage bucket has been set for this application. "
"This app was likely created before v1.9.0, please see: "
"https://cloud.google.com/appengine/docs/php/googlestorage/setup")
path_components = ['/', default_bucket, "appengine_pipeline"]
if pipeline_id:
path_components.append(pipeline_id)
path_components.append(uuid.uuid4().hex)
file_name = posixpath.join(*path_components)
with cloudstorage.open(file_name, 'w', content_type='application/json') as f:
for start_index in xrange(0, len(encoded_value), _MAX_JSON_SIZE):
end_index = start_index + _MAX_JSON_SIZE
f.write(encoded_value[start_index:end_index])
key_str = blobstore.create_gs_key("/gs" + file_name)
logging.debug("Created blob for filename = %s gs_key = %s", file_name, key_str)
return blobstore.BlobKey(key_str) | Writes a JSON encoded value to a Cloud Storage File.
This function will store the blob in a GCS file in the default bucket under
the appengine_pipeline directory. Optionally using another directory level
specified by pipeline_id
Args:
encoded_value: The encoded JSON string.
pipeline_id: A pipeline id to segment files in Cloud Storage, if none,
the file will be created under appengine_pipeline
Returns:
The blobstore.BlobKey for the file that was created. |
def update_from(self, mapping):
for key, value in mapping.items():
if key in self:
if isinstance(value, Parameter):
value = value.value
self[key].value = value | Updates the set of parameters from a mapping for keys that already exist |
def refresh(self):
self._changed = False
self.es.indices.refresh(index=self.index) | Explicitly refresh one or more index, making all operations
performed since the last refresh available for search. |
def check_arguments(cls, conf):
try:
f = open(conf['file'], "r")
f.close()
except IOError as e:
raise ArgsError("Cannot open config file '%s': %s" %
(conf['file'], e)) | Sanity checks for options needed for configfile mode. |
def warn(message, category=PsyPlotWarning, logger=None):
if logger is not None:
message = "[Warning by %s]\n%s" % (logger.name, message)
warnings.warn(message, category, stacklevel=3) | wrapper around the warnings.warn function for non-critical warnings.
logger may be a logging.Logger instance |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.