code
stringlengths 75
104k
| docstring
stringlengths 1
46.9k
|
|---|---|
def diffPrefsPrior(priorstring):
"""Parses `priorstring` and returns `prior` tuple."""
assert isinstance(priorstring, str)
prior = priorstring.split(',')
if len(prior) == 3 and prior[0] == 'invquadratic':
[c1, c2] = [float(x) for x in prior[1 : ]]
assert c1 > 0 and c2 > 0, "C1 and C2 must be > 1 for invquadratic prior"
return ('invquadratic', c1, c2)
else:
raise ValueError("Invalid diffprefsprior: {0}".format(priorstring))
|
Parses `priorstring` and returns `prior` tuple.
|
def spherical_sum(image, binning_factor=1.0):
"""Sum image values over concentric annuli.
Parameters
----------
image : `DiscreteLp` element
Input data whose radial sum should be computed.
binning_factor : positive float, optional
Reduce the number of output bins by this factor. Increasing this
number can help reducing fluctuations due to the variance of points
that fall in a particular annulus.
A binning factor of ``1`` corresponds to a bin size equal to
image pixel size for images with square pixels, otherwise ::
max(norm2(c)) / norm2(shape)
where the maximum is taken over all corners of the image domain.
Returns
-------
spherical_sum : 1D `DiscreteLp` element
The spherical sum of ``image``. Its space is one-dimensional with
domain ``[0, rmax]``, where ``rmax`` is the radius of the smallest
ball containing ``image.space.domain``. Its shape is ``(N,)`` with ::
N = int(sqrt(sum(n ** 2 for n in image.shape)) / binning_factor)
"""
r = np.sqrt(sum(xi ** 2 for xi in image.space.meshgrid))
rmax = max(np.linalg.norm(c) for c in image.space.domain.corners())
n_bins = int(np.sqrt(sum(n ** 2 for n in image.shape)) / binning_factor)
rad_sum, _ = np.histogram(r, weights=image, bins=n_bins, range=(0, rmax))
out_spc = uniform_discr(min_pt=0, max_pt=rmax, shape=n_bins,
impl=image.space.impl, dtype=image.space.dtype,
interp="linear", axis_labels=["$r$"])
return out_spc.element(rad_sum)
|
Sum image values over concentric annuli.
Parameters
----------
image : `DiscreteLp` element
Input data whose radial sum should be computed.
binning_factor : positive float, optional
Reduce the number of output bins by this factor. Increasing this
number can help reducing fluctuations due to the variance of points
that fall in a particular annulus.
A binning factor of ``1`` corresponds to a bin size equal to
image pixel size for images with square pixels, otherwise ::
max(norm2(c)) / norm2(shape)
where the maximum is taken over all corners of the image domain.
Returns
-------
spherical_sum : 1D `DiscreteLp` element
The spherical sum of ``image``. Its space is one-dimensional with
domain ``[0, rmax]``, where ``rmax`` is the radius of the smallest
ball containing ``image.space.domain``. Its shape is ``(N,)`` with ::
N = int(sqrt(sum(n ** 2 for n in image.shape)) / binning_factor)
|
def find_host_network_interface_by_id(self, id_p):
"""Searches through all host network interfaces for an interface with
the given GUID.
The method returns an error if the given GUID does not
correspond to any host network interface.
in id_p of type str
GUID of the host network interface to search for.
return network_interface of type :class:`IHostNetworkInterface`
Found host network interface object.
"""
if not isinstance(id_p, basestring):
raise TypeError("id_p can only be an instance of type basestring")
network_interface = self._call("findHostNetworkInterfaceById",
in_p=[id_p])
network_interface = IHostNetworkInterface(network_interface)
return network_interface
|
Searches through all host network interfaces for an interface with
the given GUID.
The method returns an error if the given GUID does not
correspond to any host network interface.
in id_p of type str
GUID of the host network interface to search for.
return network_interface of type :class:`IHostNetworkInterface`
Found host network interface object.
|
def _init_template(self, cls, base_init_template):
'''This would be better as an override for Gtk.Widget'''
# TODO: could disallow using a metaclass.. but this is good enough
# .. if you disagree, feel free to fix it and issue a PR :)
if self.__class__ is not cls:
raise TypeError("Inheritance from classes with @GtkTemplate decorators "
"is not allowed at this time")
connected_signals = set()
self.__connected_template_signals__ = connected_signals
base_init_template(self)
for name in self.__gtemplate_widgets__:
widget = self.get_template_child(cls, name)
self.__dict__[name] = widget
if widget is None:
# Bug: if you bind a template child, and one of them was
# not present, then the whole template is broken (and
# it's not currently possible for us to know which
# one is broken either -- but the stderr should show
# something useful with a Gtk-CRITICAL message)
raise AttributeError("A missing child widget was set using "
"GtkTemplate.Child and the entire "
"template is now broken (widgets: %s)" %
', '.join(self.__gtemplate_widgets__))
for name in self.__gtemplate_methods__.difference(connected_signals):
errmsg = ("Signal '%s' was declared with @GtkTemplate.Callback " +
"but was not present in template") % name
warnings.warn(errmsg, GtkTemplateWarning)
|
This would be better as an override for Gtk.Widget
|
def from_raw(self, file_names=None, **kwargs):
"""Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together.
"""
# This function only loads one test at a time (but could contain several
# files). The function from_res() also implements loading several
# datasets (using list of lists as input).
if file_names:
self.file_names = file_names
if not isinstance(file_names, (list, tuple)):
self.file_names = [file_names, ]
# file_type = self.tester
raw_file_loader = self.loader
set_number = 0
test = None
counter = 0
self.logger.debug("start iterating through file(s)")
for f in self.file_names:
self.logger.debug("loading raw file:")
self.logger.debug(f"{f}")
new_tests = raw_file_loader(f, **kwargs)
if new_tests:
if test is not None:
self.logger.debug("continuing reading files...")
_test = self._append(test[set_number], new_tests[set_number])
if not _test:
self.logger.warning(f"EMPTY TEST: {f}")
continue
test[set_number] = _test
self.logger.debug("added this test - started merging")
for j in range(len(new_tests[set_number].raw_data_files)):
raw_data_file = new_tests[set_number].raw_data_files[j]
file_size = new_tests[set_number].raw_data_files_length[j]
test[set_number].raw_data_files.append(raw_data_file)
test[set_number].raw_data_files_length.append(file_size)
counter += 1
if counter > 10:
self.logger.debug("ERROR? Too many files to merge")
raise ValueError("Too many files to merge - "
"could be a p2-p3 zip thing")
else:
self.logger.debug("getting data from first file")
if new_tests[set_number].no_data:
self.logger.debug("NO DATA")
else:
test = new_tests
else:
self.logger.debug("NOTHING LOADED")
self.logger.debug("finished loading the raw-files")
test_exists = False
if test:
if test[0].no_data:
self.logging.debug("the first dataset (or only dataset) loaded from the raw data file is empty")
else:
test_exists = True
if test_exists:
if not prms.Reader.sorted_data:
self.logger.debug("sorting data")
test[set_number] = self._sort_data(test[set_number])
self.datasets.append(test[set_number])
else:
self.logger.warning("No new datasets added!")
self.number_of_datasets = len(self.datasets)
self.status_datasets = self._validate_datasets()
self._invent_a_name()
return self
|
Load a raw data-file.
Args:
file_names (list of raw-file names): uses CellpyData.file_names if
None. If the list contains more than one file name, then the
runs will be merged together.
|
def init_session(self):
"""
Defines a session object for passing requests.
"""
if self.session:
self.session.close()
self.session = make_session(self.username,
self.password,
self.bearer_token,
self.extra_headers_dict)
|
Defines a session object for passing requests.
|
def __optimize_configuration(self):
"""!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
"""
index_neighbor = 0
while (index_neighbor < self.__maxneighbor):
# get random current medoid that is to be replaced
current_medoid_index = self.__current[random.randint(0, self.__number_clusters - 1)]
current_medoid_cluster_index = self.__belong[current_medoid_index]
# get new candidate to be medoid
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
while candidate_medoid_index in self.__current:
candidate_medoid_index = random.randint(0, len(self.__pointer_data) - 1)
candidate_cost = 0.0
for point_index in range(0, len(self.__pointer_data)):
if point_index not in self.__current:
# get non-medoid point and its medoid
point_cluster_index = self.__belong[point_index]
point_medoid_index = self.__current[point_cluster_index]
# get other medoid that is nearest to the point (except current and candidate)
other_medoid_index = self.__find_another_nearest_medoid(point_index, current_medoid_index)
other_medoid_cluster_index = self.__belong[other_medoid_index]
# for optimization calculate all required distances
# from the point to current medoid
distance_current = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[current_medoid_index])
# from the point to candidate median
distance_candidate = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[candidate_medoid_index])
# from the point to nearest (own) medoid
distance_nearest = float('inf')
if ( (point_medoid_index != candidate_medoid_index) and (point_medoid_index != current_medoid_cluster_index) ):
distance_nearest = euclidean_distance_square(self.__pointer_data[point_index], self.__pointer_data[point_medoid_index])
# apply rules for cost calculation
if (point_cluster_index == current_medoid_cluster_index):
# case 1:
if (distance_candidate >= distance_nearest):
candidate_cost += distance_nearest - distance_current
# case 2:
else:
candidate_cost += distance_candidate - distance_current
elif (point_cluster_index == other_medoid_cluster_index):
# case 3 ('nearest medoid' is the representative object of that cluster and object is more similar to 'nearest' than to 'candidate'):
if (distance_candidate > distance_nearest):
pass;
# case 4:
else:
candidate_cost += distance_candidate - distance_nearest
if (candidate_cost < 0):
# set candidate that has won
self.__current[current_medoid_cluster_index] = candidate_medoid_index
# recalculate clusters
self.__update_clusters(self.__current)
# reset iterations and starts investigation from the begining
index_neighbor = 0
else:
index_neighbor += 1
|
!
@brief Finds quasi-optimal medoids and updates in line with them clusters in line with algorithm's rules.
|
def accuracy(self):
"""Get accuracy numbers for each target and overall.
Returns:
A DataFrame with two columns: 'class' and 'accuracy'. It also contains the overall
accuracy with class being '_all'.
Raises:
Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery
does not return 'target' or 'predicted' column.
"""
if self._input_csv_files:
df = self._get_data_from_csv_files()
if 'target' not in df or 'predicted' not in df:
raise ValueError('Cannot find "target" or "predicted" column')
labels = sorted(set(df['target']) | set(df['predicted']))
accuracy_results = []
for label in labels:
correct_count = len(df[(df['target'] == df['predicted']) & (df['target'] == label)])
total_count = len(df[(df['target'] == label)])
accuracy_results.append({
'target': label,
'accuracy': float(correct_count) / total_count if total_count > 0 else 0,
'count': total_count
})
total_correct_count = len(df[(df['target'] == df['predicted'])])
if len(df) > 0:
total_accuracy = float(total_correct_count) / len(df)
accuracy_results.append({'target': '_all', 'accuracy': total_accuracy, 'count': len(df)})
return pd.DataFrame(accuracy_results)
elif self._bigquery:
query = bq.Query("""
SELECT
target,
SUM(CASE WHEN target=predicted THEN 1 ELSE 0 END)/COUNT(*) as accuracy,
COUNT(*) as count
FROM
%s
GROUP BY
target""" % self._bigquery)
query_all = bq.Query("""
SELECT
"_all" as target,
SUM(CASE WHEN target=predicted THEN 1 ELSE 0 END)/COUNT(*) as accuracy,
COUNT(*) as count
FROM
%s""" % self._bigquery)
df = self._get_data_from_bigquery([query, query_all])
return df
|
Get accuracy numbers for each target and overall.
Returns:
A DataFrame with two columns: 'class' and 'accuracy'. It also contains the overall
accuracy with class being '_all'.
Raises:
Exception if the CSV headers do not include 'target' or 'predicted', or BigQuery
does not return 'target' or 'predicted' column.
|
def most_seen_creators_by_works_card(work_kind=None, role_name=None, num=10):
"""
Displays a card showing the Creators that are associated with the most Works.
e.g.:
{% most_seen_creators_by_works_card work_kind='movie' role_name='Director' num=5 %}
"""
object_list = most_seen_creators_by_works(
work_kind=work_kind, role_name=role_name, num=num)
object_list = chartify(object_list, 'num_works', cutoff=1)
# Attempt to create a sensible card title...
if role_name:
# Yes, this pluralization is going to break at some point:
creators_name = '{}s'.format(role_name.capitalize())
else:
creators_name = 'People/groups'
if work_kind:
works_name = Work.get_kind_name_plural(work_kind).lower()
else:
works_name = 'works'
card_title = '{} with most {}'.format(creators_name, works_name)
return {
'card_title': card_title,
'score_attr': 'num_works',
'object_list': object_list,
}
|
Displays a card showing the Creators that are associated with the most Works.
e.g.:
{% most_seen_creators_by_works_card work_kind='movie' role_name='Director' num=5 %}
|
def normalize_pred_string(predstr):
"""
Normalize the predicate string *predstr* to a conventional form.
This makes predicate strings more consistent by removing quotes and
the `_rel` suffix, and by lowercasing them.
Examples:
>>> normalize_pred_string('"_dog_n_1_rel"')
'_dog_n_1'
>>> normalize_pred_string('_dog_n_1')
'_dog_n_1'
"""
tokens = [t for t in split_pred_string(predstr)[:3] if t is not None]
if predstr.lstrip('\'"')[:1] == '_':
tokens = [''] + tokens
return '_'.join(tokens).lower()
|
Normalize the predicate string *predstr* to a conventional form.
This makes predicate strings more consistent by removing quotes and
the `_rel` suffix, and by lowercasing them.
Examples:
>>> normalize_pred_string('"_dog_n_1_rel"')
'_dog_n_1'
>>> normalize_pred_string('_dog_n_1')
'_dog_n_1'
|
def use(self, middleware=None, path='/', method_mask=HTTPMethod.ALL):
"""
Use the middleware (a callable with parameters res, req, next)
upon requests match the provided path.
A None path matches every request.
Returns the middleware so this method may be used as a decorator.
Args:
middleware (callable): A function with signature '(req, res)'
to be called with every request which matches path.
path (str or regex): Object used to test the requests
path. If it matches, either by equality or a successful
regex match, the middleware is called with the req/res
pair.
method_mask (Optional[HTTPMethod]): Filters requests by HTTP
method. The HTTPMethod enum behaves as a bitmask, so
multiple methods may be joined by `+` or `\|`, removed
with `-`, or toggled with `^`
(e.g. `HTTPMethod.GET + HTTPMethod.POST`,
`HTTPMethod.ALL - HTTPMethod.DELETE`).
Returns:
Returns the provided middleware; a requirement for this method
to be used as a decorator.
"""
# catch decorator pattern
if middleware is None:
return lambda mw: self.use(mw, path, method_mask)
if hasattr(middleware, '__growler_router'):
router = getattr(middleware, '__growler_router')
if isinstance(router, (types.MethodType,)):
router = router()
self.add_router(path, router)
elif isinstance(type(middleware), RouterMeta):
router = middleware._RouterMeta__growler_router()
self.add_router(path, router)
elif hasattr(middleware, '__iter__'):
for mw in middleware:
self.use(mw, path, method_mask)
else:
log.info("{} Using {} on path {}", id(self), middleware, path)
self.middleware.add(path=path,
func=middleware,
method_mask=method_mask)
return middleware
|
Use the middleware (a callable with parameters res, req, next)
upon requests match the provided path.
A None path matches every request.
Returns the middleware so this method may be used as a decorator.
Args:
middleware (callable): A function with signature '(req, res)'
to be called with every request which matches path.
path (str or regex): Object used to test the requests
path. If it matches, either by equality or a successful
regex match, the middleware is called with the req/res
pair.
method_mask (Optional[HTTPMethod]): Filters requests by HTTP
method. The HTTPMethod enum behaves as a bitmask, so
multiple methods may be joined by `+` or `\|`, removed
with `-`, or toggled with `^`
(e.g. `HTTPMethod.GET + HTTPMethod.POST`,
`HTTPMethod.ALL - HTTPMethod.DELETE`).
Returns:
Returns the provided middleware; a requirement for this method
to be used as a decorator.
|
def set_offset_and_sequence_number(self, event_data):
"""
Updates offset based on event.
:param event_data: A received EventData with valid offset and sequenceNumber.
:type event_data: ~azure.eventhub.common.EventData
"""
if not event_data:
raise Exception(event_data)
self.offset = event_data.offset.value
self.sequence_number = event_data.sequence_number
|
Updates offset based on event.
:param event_data: A received EventData with valid offset and sequenceNumber.
:type event_data: ~azure.eventhub.common.EventData
|
def get_template_names(self):
"""
Dispatch template according to the kind of request: ajax or normal.
"""
if self.request.is_ajax():
return [self.list_template_name]
else:
return super(Search, self).get_template_names()
|
Dispatch template according to the kind of request: ajax or normal.
|
def getPlatformsByName(platformNames=['all'], mode=None, tags=[], excludePlatformNames=[]):
"""Method that recovers the names of the <Platforms> in a given list.
:param platformNames: List of strings containing the possible platforms.
:param mode: The mode of the search. The following can be chosen: ["phonefy", "usufy", "searchfy"].
:param tags: Just in case the method to select the candidates is a series of tags.
:param excludePlatformNames: List of strings to be excluded from the search.
:return: Array of <Platforms> classes.
"""
allPlatformsList = getAllPlatformObjects(mode)
platformList = []
# Tags has priority over platform
if "all" in platformNames and len(tags) == 0:
# Last condition: checking if "all" has been provided
for plat in allPlatformsList:
if str(plat.platformName).lower() not in excludePlatformNames:
platformList.append(plat)
return platformList
else:
# going through the regexpList
for name in platformNames:
if name not in excludePlatformNames:
for plat in allPlatformsList:
# Verifying if the parameter was provided
if name == str(plat.platformName).lower():
platformList.append(plat)
break
# We need to perform additional checks to verify the Wikipedia platforms, which are called with a single parameter
try:
if name == str(plat.parameterName).lower():
platformList.append(plat)
break
except:
pass
# Verifying if any of the platform tags match the original tag
for t in plat.tags:
if t in tags:
platformList.append(plat)
break
# If the platformList is empty, we will return all
if platformList == []:
return allPlatformsList
else:
return platformList
|
Method that recovers the names of the <Platforms> in a given list.
:param platformNames: List of strings containing the possible platforms.
:param mode: The mode of the search. The following can be chosen: ["phonefy", "usufy", "searchfy"].
:param tags: Just in case the method to select the candidates is a series of tags.
:param excludePlatformNames: List of strings to be excluded from the search.
:return: Array of <Platforms> classes.
|
def load_history(self, f):
"""Load the history of a ``NeuralNet`` from a json file. See
``save_history`` for examples.
Parameters
----------
f : file-like object or str
"""
# TODO: Remove warning in a future release
warnings.warn(
"load_history is deprecated and will be removed in the next "
"release, please use load_params with the f_history keyword",
DeprecationWarning)
self.history = History.from_file(f)
|
Load the history of a ``NeuralNet`` from a json file. See
``save_history`` for examples.
Parameters
----------
f : file-like object or str
|
def configuration(self, event):
"""Return all configurable components' schemata"""
try:
self.log("Schemarequest for all configuration schemata from",
event.user.account.name, lvl=debug)
response = {
'component': 'hfos.events.schemamanager',
'action': 'configuration',
'data': configschemastore
}
self.fireEvent(send(event.client.uuid, response))
except Exception as e:
self.log("ERROR:", e)
|
Return all configurable components' schemata
|
def set_paths(etc_paths = [ "/etc/" ]):
"""
Sets the paths where the configuration files will be searched
* You can have multiple configuration files (e.g. in the /etc/default folder
and in /etc/appfolder/)
"""
global _ETC_PATHS
_ETC_PATHS = []
for p in etc_paths:
_ETC_PATHS.append(os.path.expanduser(p))
|
Sets the paths where the configuration files will be searched
* You can have multiple configuration files (e.g. in the /etc/default folder
and in /etc/appfolder/)
|
def is_carrying_minerals(self) -> bool:
""" Checks if a worker or MULE is carrying (gold-)minerals. """
return any(
buff.value in self._proto.buff_ids
for buff in {BuffId.CARRYMINERALFIELDMINERALS, BuffId.CARRYHIGHYIELDMINERALFIELDMINERALS}
)
|
Checks if a worker or MULE is carrying (gold-)minerals.
|
def map_tree(visitor, tree):
"""Apply function to nodes"""
newn = [map_tree(visitor, node) for node in tree.nodes]
return visitor(tree, newn)
|
Apply function to nodes
|
def _make_lcdproc(
lcd_host, lcd_port, retry_config,
charset=DEFAULT_LCDPROC_CHARSET, lcdd_debug=False):
"""Create and connect to the LCDd server.
Args:
lcd_host (str): the hostname to connect to
lcd_prot (int): the port to connect to
charset (str): the charset to use when sending messages to lcdproc
lcdd_debug (bool): whether to enable full LCDd debug
retry_attempts (int): the number of connection attempts
retry_wait (int): the time to wait between connection attempts
retry_backoff (int): the backoff for increasing inter-attempt delay
Returns:
lcdproc.server.Server
"""
class ServerSpawner(utils.AutoRetryCandidate):
"""Spawn the server, using auto-retry."""
@utils.auto_retry
def connect(self):
return lcdrunner.LcdProcServer(
lcd_host, lcd_port, charset=charset, debug=lcdd_debug)
spawner = ServerSpawner(retry_config=retry_config, logger=logger)
try:
return spawner.connect()
except socket.error as e:
logger.error('Unable to connect to lcdproc %s:%s : %r', lcd_host, lcd_port, e)
raise SystemExit(1)
|
Create and connect to the LCDd server.
Args:
lcd_host (str): the hostname to connect to
lcd_prot (int): the port to connect to
charset (str): the charset to use when sending messages to lcdproc
lcdd_debug (bool): whether to enable full LCDd debug
retry_attempts (int): the number of connection attempts
retry_wait (int): the time to wait between connection attempts
retry_backoff (int): the backoff for increasing inter-attempt delay
Returns:
lcdproc.server.Server
|
def anonymous_required(func=None, url=None):
"""Required that the user is not logged in."""
url = url or "/"
def _dec(view_func):
@wraps(view_func, assigned=available_attrs(view_func))
def _wrapped_view(request, *args, **kwargs):
if request.user.is_authenticated():
return redirect(url)
else:
return view_func(request, *args, **kwargs)
return _wrapped_view
if func is None:
return _dec
else:
return _dec(func)
|
Required that the user is not logged in.
|
def remove_record(self, orcid_id, token, request_type, put_code):
"""Add a record to a profile.
Parameters
----------
:param orcid_id: string
Id of the author.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param put_code: string
The id of the record. Can be retrieved using read_record_* method.
In the result of it, it will be called 'put-code'.
"""
self._update_activities(orcid_id, token, requests.delete, request_type,
put_code=put_code)
|
Add a record to a profile.
Parameters
----------
:param orcid_id: string
Id of the author.
:param token: string
Token received from OAuth 2 3-legged authorization.
:param request_type: string
One of 'activities', 'education', 'employment', 'funding',
'peer-review', 'work'.
:param put_code: string
The id of the record. Can be retrieved using read_record_* method.
In the result of it, it will be called 'put-code'.
|
def get_all_objects(self):
"Return pointers to all GC tracked objects"
for i, generation in enumerate(self.gc_generations):
generation_head_ptr = pygc_head_ptr = generation.head.get_pointer()
generation_head_addr = generation_head_ptr._value
while True:
# _PyObjectBase_GC_UNTRACK macro says that
# gc_prev always points to some value
# there is still a race condition if PyGC_Head
# gets free'd and overwritten just before we look
# at him
pygc_head_ptr = pygc_head_ptr.deref().gc_next
if pygc_head_ptr._value == generation_head_addr:
break
yield pygc_head_ptr.deref().get_object_ptr()
|
Return pointers to all GC tracked objects
|
def native(self, value, context=None):
"""Convert the given string into a list of substrings."""
separator = self.separator.strip() if self.strip and hasattr(self.separator, 'strip') else self.separator
value = super().native(value, context)
if value is None:
return self.cast()
if hasattr(value, 'split'):
value = value.split(separator)
value = self._clean(value)
try:
return self.cast(value) if self.cast else value
except Exception as e:
raise Concern("{0} caught, failed to perform array transform: {1}", e.__class__.__name__, str(e))
|
Convert the given string into a list of substrings.
|
def get_assessments_taken_by_query(self, assessment_taken_query):
"""Gets a list of ``AssessmentTaken`` elements matching the given assessment taken query.
arg: assessment_taken_query
(osid.assessment.AssessmentTakenQuery): the assessment
taken query
return: (osid.assessment.AssessmentTakenList) - the returned
``AssessmentTakenList``
raise: NullArgument - ``assessment_taken_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_taken_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for
# osid.resource.ResourceQuerySession.get_resources_by_query
and_list = list()
or_list = list()
for term in assessment_taken_query._query_terms:
if '$in' in assessment_taken_query._query_terms[term] and '$nin' in assessment_taken_query._query_terms[term]:
and_list.append(
{'$or': [{term: {'$in': assessment_taken_query._query_terms[term]['$in']}},
{term: {'$nin': assessment_taken_query._query_terms[term]['$nin']}}]})
else:
and_list.append({term: assessment_taken_query._query_terms[term]})
for term in assessment_taken_query._keyword_terms:
or_list.append({term: assessment_taken_query._keyword_terms[term]})
if or_list:
and_list.append({'$or': or_list})
view_filter = self._view_filter()
if view_filter:
and_list.append(view_filter)
if and_list:
query_terms = {'$and': and_list}
collection = JSONClientValidated('assessment',
collection='AssessmentTaken',
runtime=self._runtime)
result = collection.find(query_terms).sort('_id', DESCENDING)
else:
result = []
return objects.AssessmentTakenList(result, runtime=self._runtime, proxy=self._proxy)
|
Gets a list of ``AssessmentTaken`` elements matching the given assessment taken query.
arg: assessment_taken_query
(osid.assessment.AssessmentTakenQuery): the assessment
taken query
return: (osid.assessment.AssessmentTakenList) - the returned
``AssessmentTakenList``
raise: NullArgument - ``assessment_taken_query`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure occurred
raise: Unsupported - ``assessment_taken_query`` is not of this
service
*compliance: mandatory -- This method must be implemented.*
|
def is_transaction_invalidated(transaction, state_change):
""" True if the `transaction` is made invalid by `state_change`.
Some transactions will fail due to race conditions. The races are:
- Another transaction which has the same side effect is executed before.
- Another transaction which *invalidates* the state of the smart contract
required by the local transaction is executed before it.
The first case is handled by the predicate `is_transaction_effect_satisfied`,
where a transaction from a different source which does the same thing is
considered. This predicate handles the second scenario.
A transaction can **only** invalidate another iff both share a valid
initial state but a different end state.
Valid example:
A close can invalidate a deposit, because both a close and a deposit
can be executed from an opened state (same initial state), but a close
transaction will transition the channel to a closed state which doesn't
allow for deposits (different end state).
Invalid example:
A settle transaction cannot invalidate a deposit because a settle is
only allowed for the closed state and deposits are only allowed for
the open state. In such a case a deposit should never have been sent.
The deposit transaction for an invalid state is a bug and not a
transaction which was invalidated.
"""
# Most transactions cannot be invalidated by others. These are:
#
# - close transactions
# - settle transactions
# - batch unlocks
#
# Deposits and withdraws are invalidated by the close, but these are not
# made atomic through the WAL.
is_our_failed_update_transfer = (
isinstance(state_change, ContractReceiveChannelSettled) and
isinstance(transaction, ContractSendChannelUpdateTransfer) and
state_change.token_network_identifier == transaction.token_network_identifier and
state_change.channel_identifier == transaction.channel_identifier
)
if is_our_failed_update_transfer:
return True
return False
|
True if the `transaction` is made invalid by `state_change`.
Some transactions will fail due to race conditions. The races are:
- Another transaction which has the same side effect is executed before.
- Another transaction which *invalidates* the state of the smart contract
required by the local transaction is executed before it.
The first case is handled by the predicate `is_transaction_effect_satisfied`,
where a transaction from a different source which does the same thing is
considered. This predicate handles the second scenario.
A transaction can **only** invalidate another iff both share a valid
initial state but a different end state.
Valid example:
A close can invalidate a deposit, because both a close and a deposit
can be executed from an opened state (same initial state), but a close
transaction will transition the channel to a closed state which doesn't
allow for deposits (different end state).
Invalid example:
A settle transaction cannot invalidate a deposit because a settle is
only allowed for the closed state and deposits are only allowed for
the open state. In such a case a deposit should never have been sent.
The deposit transaction for an invalid state is a bug and not a
transaction which was invalidated.
|
def _register_namespace_and_command(self, namespace):
"""Add a Namespace and the corresponding command namespace."""
self._add_namespace(namespace)
# Add the namespace for commands on this database
cmd_name = namespace.source_name.split(".", 1)[0] + ".$cmd"
dest_cmd_name = namespace.dest_name.split(".", 1)[0] + ".$cmd"
self._add_namespace(Namespace(dest_name=dest_cmd_name, source_name=cmd_name))
|
Add a Namespace and the corresponding command namespace.
|
def add_arguments(parser):
"""
Args for the init command
"""
parser.add_argument('-e', '--environment', help='Environment name', required=False, nargs='+')
parser.add_argument('-w', '--dont-wait', help='Skip waiting for the app to be deleted', action='store_true')
|
Args for the init command
|
def get_weather_data(filename='weather.csv', **kwargs):
r"""
Imports weather data from a file.
The data include wind speed at two different heights in m/s, air
temperature in two different heights in K, surface roughness length in m
and air pressure in Pa. The file is located in the example folder of the
windpowerlib. The height in m for which the data applies is specified in
the second row.
Parameters
----------
filename : string
Filename of the weather data file. Default: 'weather.csv'.
Other Parameters
----------------
datapath : string, optional
Path where the weather data file is stored.
Default: 'windpowerlib/example'.
Returns
-------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s,
temperature `temperature` in K, roughness length `roughness_length`
in m, and pressure `pressure` in Pa.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name as string (e.g. 'wind_speed') and the
second level contains the height as integer at which it applies
(e.g. 10, if it was measured at a height of 10 m).
"""
if 'datapath' not in kwargs:
kwargs['datapath'] = os.path.join(os.path.split(
os.path.dirname(__file__))[0], 'example')
file = os.path.join(kwargs['datapath'], filename)
# read csv file
weather_df = pd.read_csv(
file, index_col=0, header=[0, 1],
date_parser=lambda idx: pd.to_datetime(idx, utc=True))
# change type of index to datetime and set time zone
weather_df.index = pd.to_datetime(weather_df.index).tz_convert(
'Europe/Berlin')
# change type of height from str to int by resetting columns
weather_df.columns = [weather_df.axes[1].levels[0][
weather_df.axes[1].codes[0]],
weather_df.axes[1].levels[1][
weather_df.axes[1].codes[1]].astype(int)]
return weather_df
|
r"""
Imports weather data from a file.
The data include wind speed at two different heights in m/s, air
temperature in two different heights in K, surface roughness length in m
and air pressure in Pa. The file is located in the example folder of the
windpowerlib. The height in m for which the data applies is specified in
the second row.
Parameters
----------
filename : string
Filename of the weather data file. Default: 'weather.csv'.
Other Parameters
----------------
datapath : string, optional
Path where the weather data file is stored.
Default: 'windpowerlib/example'.
Returns
-------
weather_df : pandas.DataFrame
DataFrame with time series for wind speed `wind_speed` in m/s,
temperature `temperature` in K, roughness length `roughness_length`
in m, and pressure `pressure` in Pa.
The columns of the DataFrame are a MultiIndex where the first level
contains the variable name as string (e.g. 'wind_speed') and the
second level contains the height as integer at which it applies
(e.g. 10, if it was measured at a height of 10 m).
|
def match(self, data, threshold=0.5, generator=False): # pragma: no cover
"""Identifies records that all refer to the same entity, returns
tuples
containing a set of record ids and a confidence score as a
float between 0 and 1. The record_ids within each set should
refer to the same entity and the confidence score is a measure
of our confidence that all the records in a cluster refer to
the same entity.
This method should only used for small to moderately sized
datasets for larger data, use matchBlocks
Arguments:
data -- Dictionary of records, where the keys are record_ids
and the values are dictionaries with the keys being
field names
threshold -- Number between 0 and 1 (default is .5). We will
consider records as potential duplicates if the
predicted probability of being a duplicate is
above the threshold.
Lowering the number will increase recall,
raising it will increase precision
"""
blocked_pairs = self._blockData(data)
clusters = self.matchBlocks(blocked_pairs, threshold)
if generator:
return clusters
else:
return list(clusters)
|
Identifies records that all refer to the same entity, returns
tuples
containing a set of record ids and a confidence score as a
float between 0 and 1. The record_ids within each set should
refer to the same entity and the confidence score is a measure
of our confidence that all the records in a cluster refer to
the same entity.
This method should only used for small to moderately sized
datasets for larger data, use matchBlocks
Arguments:
data -- Dictionary of records, where the keys are record_ids
and the values are dictionaries with the keys being
field names
threshold -- Number between 0 and 1 (default is .5). We will
consider records as potential duplicates if the
predicted probability of being a duplicate is
above the threshold.
Lowering the number will increase recall,
raising it will increase precision
|
def main(argv=None):
'''Parse command line options and create a server/volume composite.'''
if argv is None:
argv = sys.argv
else:
sys.argv.extend(argv)
program_name = os.path.basename(sys.argv[0])
program_version = "v%s" % __version__
program_build_date = str(__updated__)
program_version_message = '%%(prog)s %s (%s)' % (program_version,
program_build_date)
program_shortdesc = __import__('__main__').__doc__.split("\n")[1]
program_license = '''%s
Created by J. Buchhammer on %s.
Copyright 2016 ProfitBricks GmbH. All rights reserved.
Licensed under the Apache License 2.0
http://www.apache.org/licenses/LICENSE-2.0
Distributed on an "AS IS" basis without warranties
or conditions of any kind, either express or implied.
USAGE
''' % (program_shortdesc, str(__date__))
try:
# Setup argument parser
parser = ArgumentParser(description=program_license,
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('-u', '--user', dest='user', help='the login name')
parser.add_argument('-p', '--password', dest='password',
help='the login password')
parser.add_argument('-L', '--Login', dest='loginfile', default=None,
help='the login file to use')
parser.add_argument('-d', '--datacenterid', dest='datacenterid',
required=True, default=None,
help='datacenter of the new server')
parser.add_argument('-l', '--lanid', dest='lanid', required=True,
default=None, help='LAN of the new server')
parser.add_argument('-n', '--name', dest='servername',
default="SRV_"+datetime.now().isoformat(),
help='name of the new server')
parser.add_argument('-c', '--cores', dest='cores', type=int,
default=2, help='CPU cores')
parser.add_argument('-r', '--ram', dest='ram', type=int, default=4,
help='RAM in GB')
parser.add_argument('-s', '--storage', dest='storage', type=int,
default=4, help='storage in GB')
parser.add_argument('-b', '--boot', dest='bootdevice', default="HDD",
help='boot device')
parser.add_argument('-i', '--imageid', dest='imageid', default=None,
help='installation image')
parser.add_argument('-P', '--imagepassword', dest='imgpassword',
default=None, help='the image password')
parser.add_argument('-v', '--verbose', dest="verbose", action="count",
help="set verbosity level [default: %(default)s]")
parser.add_argument('-V', '--version', action='version',
version=program_version_message)
# Process arguments
args = parser.parse_args()
global verbose
verbose = args.verbose
dc_id = args.datacenterid
lan_id = args.lanid
servername = args.servername
if verbose > 0:
print("Verbose mode on")
print("start {} with args {}".format(program_name, str(args)))
# Test images (location de/fra)
# CDROM: 7fc885b3-c9a6-11e5-aa10-52540005ab80 # debian-8.3.0-amd64-netinst.iso
# HDD: 28007a6d-c88a-11e5-aa10-52540005ab80 # CentOS-7-server-2016-02-01
hdimage = args.imageid
cdimage = None
if args.bootdevice == "CDROM":
hdimage = None
cdimage = args.imageid
print("using boot device {} with image {}"
.format(args.bootdevice, args.imageid))
(user, password) = getLogin(args.loginfile, args.user, args.password)
if user is None or password is None:
raise ValueError("user or password resolved to None")
pbclient = ProfitBricksService(user, password)
first_nic = NIC(name="local", ips=[], dhcp=True, lan=lan_id)
volume = Volume(name=servername+"-Disk", size=args.storage,
image=hdimage, image_password=args.imgpassword)
server = Server(name=servername, cores=args.cores, ram=args.ram*1024,
create_volumes=[volume], nics=[first_nic],
boot_cdrom=cdimage)
print("creating server..")
if verbose > 0:
print("SERVER: {}".format(str(server)))
response = pbclient.create_server(dc_id, server)
print("wait for provisioning..")
wait_for_request(pbclient, response["requestId"])
server_id = response['id']
print("Server provisioned with ID {}".format(server_id))
nics = pbclient.list_nics(dc_id, server_id, 1)
# server should have exactly one nic, but we only test empty nic list
if not nics['items']:
raise CLIError("No NICs found for newly created server {}"
.format(server_id))
nic0 = nics['items'][0]
if verbose > 0:
print("NIC0: {}".format(str(nic0)))
(nic_id, nic_mac) = (nic0['id'], nic0['properties']['mac'])
print("NIC of new Server has ID {} and MAC {}".format(nic_id, nic_mac))
print("{} finished w/o errors".format(program_name))
return 0
except KeyboardInterrupt:
# handle keyboard interrupt #
return 0
except Exception:
traceback.print_exc()
sys.stderr.write("\n" + program_name + ": for help use --help\n")
return 2
|
Parse command line options and create a server/volume composite.
|
def create_anisomagplot(plotman, x, y, z, alpha, options):
'''Plot the data of the tomodir in one overview plot.
'''
sizex, sizez = getfigsize(plotman)
# create figure
f, ax = plt.subplots(2, 3, figsize=(3 * sizex, 2 * sizez))
if options.title is not None:
plt.suptitle(options.title, fontsize=18)
plt.subplots_adjust(wspace=1.5, top=2)
# plot magnitue
if options.cmaglin:
cidx = plotman.parman.add_data(np.power(10, x))
cidy = plotman.parman.add_data(np.power(10, y))
cidz = plotman.parman.add_data(np.power(10, z))
loglin = 'rho'
else:
cidx = plotman.parman.add_data(x)
cidy = plotman.parman.add_data(y)
cidz = plotman.parman.add_data(z)
loglin = 'log_rho'
cidxy = plotman.parman.add_data(np.divide(x, y))
cidyz = plotman.parman.add_data(np.divide(y, z))
cidzx = plotman.parman.add_data(np.divide(z, x))
plot_mag(cidx, ax[0, 0], plotman, 'x', loglin, alpha,
options.mag_vmin, options.mag_vmax,
options.xmin, options.xmax, options.zmin, options.zmax,
options.unit, options.mag_cbtiks, options.no_elecs,
)
plot_mag(cidy, ax[0, 1], plotman, 'y', loglin, alpha,
options.mag_vmin, options.mag_vmax,
options.xmin, options.xmax, options.zmin, options.zmax,
options.unit, options.mag_cbtiks, options.no_elecs,
)
plot_mag(cidz, ax[0, 2], plotman, 'z', loglin, alpha,
options.mag_vmin, options.mag_vmax,
options.xmin, options.xmax, options.zmin, options.zmax,
options.unit, options.mag_cbtiks, options.no_elecs,
)
plot_ratio(cidxy, ax[1, 0], plotman, 'x/y', alpha,
options.rat_vmin, options.rat_vmax,
options.xmin, options.xmax, options.zmin, options.zmax,
options.unit, options.mag_cbtiks, options.no_elecs,
)
plot_ratio(cidyz, ax[1, 1], plotman, 'y/z', alpha,
options.rat_vmin, options.rat_vmax,
options.xmin, options.xmax, options.zmin, options.zmax,
options.unit, options.mag_cbtiks, options.no_elecs,
)
plot_ratio(cidzx, ax[1, 2], plotman, 'z/x', alpha,
options.rat_vmin, options.rat_vmax,
options.xmin, options.xmax, options.zmin, options.zmax,
options.unit, options.mag_cbtiks, options.no_elecs,
)
f.tight_layout()
f.savefig('mag_aniso.png', dpi=300)
return f, ax
|
Plot the data of the tomodir in one overview plot.
|
def _get_area_data(self):
"""Get histogram list based on area type.
List pattern: [type1_hel+,type2_hel+,type1_hel-,type2_hel-]
where type1/2 = F/B or R/L in that order.
"""
if self.mode == '1n':
data = [self.hist['NBMF+'].data,\
self.hist['NBMF-'].data,\
self.hist['NBMB+'].data,\
self.hist['NBMB-'].data]
elif self.area == 'BNMR':
data = [self.hist['F+'].data,\
self.hist['F-'].data,\
self.hist['B+'].data,\
self.hist['B-'].data]
elif self.area == 'BNQR':
data = [self.hist['R+'].data,\
self.hist['R-'].data,\
self.hist['L+'].data,\
self.hist['L-'].data]
else:
data = []
if self.mode == '2h':
data.extend([self.hist['AL1+'].data,self.hist['AL1-'].data,
self.hist['AL0+'].data,self.hist['AL0-'].data,
self.hist['AL3+'].data,self.hist['AL3-'].data,
self.hist['AL2+'].data,self.hist['AL2-'].data])
# copy
return [np.copy(d) for d in data]
|
Get histogram list based on area type.
List pattern: [type1_hel+,type2_hel+,type1_hel-,type2_hel-]
where type1/2 = F/B or R/L in that order.
|
def statistic_recommend(classes, P):
"""
Return recommend parameters which are more suitable due to the input dataset characteristics.
:param classes: all classes name
:type classes : list
:param P: condition positive
:type P : dict
:return: recommendation_list as list
"""
if imbalance_check(P):
return IMBALANCED_RECOMMEND
if binary_check(classes):
return BINARY_RECOMMEND
return MULTICLASS_RECOMMEND
|
Return recommend parameters which are more suitable due to the input dataset characteristics.
:param classes: all classes name
:type classes : list
:param P: condition positive
:type P : dict
:return: recommendation_list as list
|
def do_alias(self, arg):
"""alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])
# Print instance variables in self
alias ps pi self
"""
args = arg.split()
if len(args) == 0:
keys = sorted(self.aliases.keys())
for alias in keys:
self.message("%s = %s" % (alias, self.aliases[alias]))
return
if args[0] in self.aliases and len(args) == 1:
self.message("%s = %s" % (args[0], self.aliases[args[0]]))
else:
self.aliases[args[0]] = ' '.join(args[1:])
|
alias [name [command [parameter parameter ...] ]]
Create an alias called 'name' that executes 'command'. The
command must *not* be enclosed in quotes. Replaceable
parameters can be indicated by %1, %2, and so on, while %* is
replaced by all the parameters. If no command is given, the
current alias for name is shown. If no name is given, all
aliases are listed.
Aliases may be nested and can contain anything that can be
legally typed at the pdb prompt. Note! You *can* override
internal pdb commands with aliases! Those internal commands
are then hidden until the alias is removed. Aliasing is
recursively applied to the first word of the command line; all
other words in the line are left alone.
As an example, here are two useful aliases (especially when
placed in the .pdbrc file):
# Print instance variables (usage "pi classInst")
alias pi for k in %1.__dict__.keys(): print("%1.",k,"=",%1.__dict__[k])
# Print instance variables in self
alias ps pi self
|
def novatel_diag_send(self, timeStatus, receiverStatus, solStatus, posType, velType, posSolAge, csFails, force_mavlink1=False):
'''
Transmits the diagnostics data from the Novatel OEMStar GPS
timeStatus : The Time Status. See Table 8 page 27 Novatel OEMStar Manual (uint8_t)
receiverStatus : Status Bitfield. See table 69 page 350 Novatel OEMstar Manual (uint32_t)
solStatus : solution Status. See table 44 page 197 (uint8_t)
posType : position type. See table 43 page 196 (uint8_t)
velType : velocity type. See table 43 page 196 (uint8_t)
posSolAge : Age of the position solution in seconds (float)
csFails : Times the CRC has failed since boot (uint16_t)
'''
return self.send(self.novatel_diag_encode(timeStatus, receiverStatus, solStatus, posType, velType, posSolAge, csFails), force_mavlink1=force_mavlink1)
|
Transmits the diagnostics data from the Novatel OEMStar GPS
timeStatus : The Time Status. See Table 8 page 27 Novatel OEMStar Manual (uint8_t)
receiverStatus : Status Bitfield. See table 69 page 350 Novatel OEMstar Manual (uint32_t)
solStatus : solution Status. See table 44 page 197 (uint8_t)
posType : position type. See table 43 page 196 (uint8_t)
velType : velocity type. See table 43 page 196 (uint8_t)
posSolAge : Age of the position solution in seconds (float)
csFails : Times the CRC has failed since boot (uint16_t)
|
def serialize(self, subject, *objects_or_combinators):
""" object_combinators may also be URIRefs or Literals """
ec_s = rdflib.BNode()
if self.operator is not None:
if subject is not None:
yield subject, self.predicate, ec_s
yield from oc(ec_s)
yield from self._list.serialize(ec_s, self.operator, *objects_or_combinators)
else:
for thing in objects_or_combinators:
if isinstance(thing, Combinator):
object = rdflib.BNode()
#anything = list(thing(object))
#if anything:
#[print(_) for _ in anything]
hasType = False
for t in thing(object):
if t[1] == rdf.type:
hasType = True
yield t
if not hasType:
yield object, rdf.type, owl.Class
else:
object = thing
yield subject, self.predicate, object
|
object_combinators may also be URIRefs or Literals
|
def in_telephones(objet, pattern):
""" abstractSearch dans une liste de téléphones."""
objet = objet or []
if pattern == '' or not objet:
return False
return max(bool(re.search(pattern, t)) for t in objet)
|
abstractSearch dans une liste de téléphones.
|
def replace_greek_tex(self, name):
"""Replace text representing greek letters with greek letters."""
name = name.replace('gamma-delta', 'gammadelta')
name = name.replace('interleukin-1 beta', 'interleukin-1beta')
# greek_present = False
for greek_txt, tex in self.greek2tex.items():
if greek_txt in name:
# greek_present = True
name = name.replace(greek_txt, "{B}".format(B=tex))
# if greek_present is True:
# name = texcode(name, 'utf-8') # For writing to xlsx
return name
|
Replace text representing greek letters with greek letters.
|
def json(self):
"""
return __cached_json, if accessed withing 300 ms.
This allows to optimize calls when many parameters of entity requires withing short time.
"""
if self.fresh():
return self.__cached_json
# noinspection PyAttributeOutsideInit
self.__last_read_time = time.time()
self.__cached_json = self._router.get_instance(org_id=self.organizationId, instance_id=self.instanceId).json()
return self.__cached_json
|
return __cached_json, if accessed withing 300 ms.
This allows to optimize calls when many parameters of entity requires withing short time.
|
def _initGP(self):
"""
Internal method for initialization of the GP inference objetct
"""
if self._inference=='GP2KronSum':
signalPos = sp.where(sp.arange(self.n_randEffs)!=self.noisPos)[0][0]
gp = GP2KronSum(Y=self.Y, F=self.sample_designs, A=self.trait_designs,
Cg=self.trait_covars[signalPos], Cn=self.trait_covars[self.noisPos],
R=self.sample_covars[signalPos])
else:
mean = MeanKronSum(self.Y, self.sample_designs, self.trait_designs)
Iok = vec(~sp.isnan(mean.Y))[:,0]
if Iok.all(): Iok = None
covar = SumCov(*[KronCov(self.trait_covars[i], self.sample_covars[i], Iok=Iok) for i in range(self.n_randEffs)])
gp = GP(covar = covar, mean = mean)
self.gp = gp
|
Internal method for initialization of the GP inference objetct
|
def is_client_ip_address_whitelisted(request: AxesHttpRequest):
"""
Check if the given request refers to a whitelisted IP.
"""
if settings.AXES_NEVER_LOCKOUT_WHITELIST and is_ip_address_in_whitelist(request.axes_ip_address):
return True
if settings.AXES_ONLY_WHITELIST and is_ip_address_in_whitelist(request.axes_ip_address):
return True
return False
|
Check if the given request refers to a whitelisted IP.
|
def divide(self, layer=WORDS, by=SENTENCES):
"""Divide the Text into pieces by keeping references to original elements, when possible.
This is not possible only, if the _element_ is a multispan.
Parameters
----------
element: str
The element to collect and distribute in resulting bins.
by: str
Each resulting bin is defined by spans of this element.
Returns
-------
list of (list of dict)
"""
if not self.is_tagged(layer):
self.tag(layer)
if not self.is_tagged(by):
self.tag(by)
return divide(self[layer], self[by])
|
Divide the Text into pieces by keeping references to original elements, when possible.
This is not possible only, if the _element_ is a multispan.
Parameters
----------
element: str
The element to collect and distribute in resulting bins.
by: str
Each resulting bin is defined by spans of this element.
Returns
-------
list of (list of dict)
|
def pvfactors_timeseries(
solar_azimuth, solar_zenith, surface_azimuth, surface_tilt,
timestamps, dni, dhi, gcr, pvrow_height, pvrow_width, albedo,
n_pvrows=3, index_observed_pvrow=1,
rho_front_pvrow=0.03, rho_back_pvrow=0.05,
horizon_band_angle=15.,
run_parallel_calculations=True, n_workers_for_parallel_calcs=None):
"""
Calculate front and back surface plane-of-array irradiance on
a fixed tilt or single-axis tracker PV array configuration, and using
the open-source "pvfactors" package.
Please refer to pvfactors online documentation for more details:
https://sunpower.github.io/pvfactors/
Parameters
----------
solar_azimuth: numeric
Sun's azimuth angles using pvlib's azimuth convention (deg)
solar_zenith: numeric
Sun's zenith angles (deg)
surface_azimuth: numeric
Azimuth angle of the front surface of the PV modules, using pvlib's
convention (deg)
surface_tilt: numeric
Tilt angle of the PV modules, going from 0 to 180 (deg)
timestamps: datetime or DatetimeIndex
List of simulation timestamps
dni: numeric
Direct normal irradiance (W/m2)
dhi: numeric
Diffuse horizontal irradiance (W/m2)
gcr: float
Ground coverage ratio of the pv array
pvrow_height: float
Height of the pv rows, measured at their center (m)
pvrow_width: float
Width of the pv rows in the considered 2D plane (m)
albedo: float
Ground albedo
n_pvrows: int, default 3
Number of PV rows to consider in the PV array
index_observed_pvrow: int, default 1
Index of the PV row whose incident irradiance will be returned. Indices
of PV rows go from 0 to n_pvrows-1.
rho_front_pvrow: float, default 0.03
Front surface reflectivity of PV rows
rho_back_pvrow: float, default 0.05
Back surface reflectivity of PV rows
horizon_band_angle: float, default 15
Elevation angle of the sky dome's diffuse horizon band (deg)
run_parallel_calculations: bool, default True
pvfactors is capable of using multiprocessing. Use this flag to decide
to run calculations in parallel (recommended) or not.
n_workers_for_parallel_calcs: int, default None
Number of workers to use in the case of parallel calculations. The
default value of 'None' will lead to using a value equal to the number
of CPU's on the machine running the model.
Returns
-------
front_poa_irradiance: numeric
Calculated incident irradiance on the front surface of the PV modules
(W/m2)
back_poa_irradiance: numeric
Calculated incident irradiance on the back surface of the PV modules
(W/m2)
df_registries: pandas DataFrame
DataFrame containing detailed outputs of the simulation; for
instance the shapely geometries, the irradiance components incident on
all surfaces of the PV array (for all timestamps), etc.
In the pvfactors documentation, this is refered to as the "surface
registry".
References
----------
.. [1] Anoma, Marc Abou, et al. "View Factor Model and Validation for
Bifacial PV and Diffuse Shade on Single-Axis Trackers." 44th IEEE
Photovoltaic Specialist Conference. 2017.
"""
# Convert pandas Series inputs to numpy arrays
if isinstance(solar_azimuth, pd.Series):
solar_azimuth = solar_azimuth.values
if isinstance(solar_zenith, pd.Series):
solar_zenith = solar_zenith.values
if isinstance(surface_azimuth, pd.Series):
surface_azimuth = surface_azimuth.values
if isinstance(surface_tilt, pd.Series):
surface_tilt = surface_tilt.values
if isinstance(dni, pd.Series):
dni = dni.values
if isinstance(dhi, pd.Series):
dhi = dhi.values
# Import pvfactors functions for timeseries calculations.
from pvfactors.timeseries import (calculate_radiosities_parallel_perez,
calculate_radiosities_serially_perez,
get_average_pvrow_outputs)
idx_slice = pd.IndexSlice
# Build up pv array configuration parameters
pvarray_parameters = {
'n_pvrows': n_pvrows,
'pvrow_height': pvrow_height,
'pvrow_width': pvrow_width,
'gcr': gcr,
'rho_ground': albedo,
'rho_front_pvrow': rho_front_pvrow,
'rho_back_pvrow': rho_back_pvrow,
'horizon_band_angle': horizon_band_angle
}
# Run pvfactors calculations: either in parallel or serially
if run_parallel_calculations:
df_registries, df_custom_perez = calculate_radiosities_parallel_perez(
pvarray_parameters, timestamps, solar_zenith, solar_azimuth,
surface_tilt, surface_azimuth, dni, dhi,
n_processes=n_workers_for_parallel_calcs)
else:
inputs = (pvarray_parameters, timestamps, solar_zenith, solar_azimuth,
surface_tilt, surface_azimuth, dni, dhi)
df_registries, df_custom_perez = calculate_radiosities_serially_perez(
inputs)
# Get the average surface outputs
df_outputs = get_average_pvrow_outputs(df_registries,
values=['qinc'],
include_shading=True)
# Select the calculated outputs from the pvrow to observe
ipoa_front = df_outputs.loc[:, idx_slice[index_observed_pvrow,
'front', 'qinc']]
ipoa_back = df_outputs.loc[:, idx_slice[index_observed_pvrow,
'back', 'qinc']]
# Set timestamps as index of df_registries for consistency of outputs
df_registries = df_registries.set_index('timestamps')
return ipoa_front, ipoa_back, df_registries
|
Calculate front and back surface plane-of-array irradiance on
a fixed tilt or single-axis tracker PV array configuration, and using
the open-source "pvfactors" package.
Please refer to pvfactors online documentation for more details:
https://sunpower.github.io/pvfactors/
Parameters
----------
solar_azimuth: numeric
Sun's azimuth angles using pvlib's azimuth convention (deg)
solar_zenith: numeric
Sun's zenith angles (deg)
surface_azimuth: numeric
Azimuth angle of the front surface of the PV modules, using pvlib's
convention (deg)
surface_tilt: numeric
Tilt angle of the PV modules, going from 0 to 180 (deg)
timestamps: datetime or DatetimeIndex
List of simulation timestamps
dni: numeric
Direct normal irradiance (W/m2)
dhi: numeric
Diffuse horizontal irradiance (W/m2)
gcr: float
Ground coverage ratio of the pv array
pvrow_height: float
Height of the pv rows, measured at their center (m)
pvrow_width: float
Width of the pv rows in the considered 2D plane (m)
albedo: float
Ground albedo
n_pvrows: int, default 3
Number of PV rows to consider in the PV array
index_observed_pvrow: int, default 1
Index of the PV row whose incident irradiance will be returned. Indices
of PV rows go from 0 to n_pvrows-1.
rho_front_pvrow: float, default 0.03
Front surface reflectivity of PV rows
rho_back_pvrow: float, default 0.05
Back surface reflectivity of PV rows
horizon_band_angle: float, default 15
Elevation angle of the sky dome's diffuse horizon band (deg)
run_parallel_calculations: bool, default True
pvfactors is capable of using multiprocessing. Use this flag to decide
to run calculations in parallel (recommended) or not.
n_workers_for_parallel_calcs: int, default None
Number of workers to use in the case of parallel calculations. The
default value of 'None' will lead to using a value equal to the number
of CPU's on the machine running the model.
Returns
-------
front_poa_irradiance: numeric
Calculated incident irradiance on the front surface of the PV modules
(W/m2)
back_poa_irradiance: numeric
Calculated incident irradiance on the back surface of the PV modules
(W/m2)
df_registries: pandas DataFrame
DataFrame containing detailed outputs of the simulation; for
instance the shapely geometries, the irradiance components incident on
all surfaces of the PV array (for all timestamps), etc.
In the pvfactors documentation, this is refered to as the "surface
registry".
References
----------
.. [1] Anoma, Marc Abou, et al. "View Factor Model and Validation for
Bifacial PV and Diffuse Shade on Single-Axis Trackers." 44th IEEE
Photovoltaic Specialist Conference. 2017.
|
def _update_state(self, change):
""" Keep position and direction in sync with axis """
self._block_updates = True
try:
self.position = self.axis.Location()
self.direction = self.axis.Direction()
finally:
self._block_updates = False
|
Keep position and direction in sync with axis
|
def load(self, wishlist, calibration=None, resolution=None,
polarization=None, level=None, generate=True, unload=True,
**kwargs):
"""Read and generate requested datasets.
When the `wishlist` contains `DatasetID` objects they can either be
fully-specified `DatasetID` objects with every parameter specified
or they can not provide certain parameters and the "best" parameter
will be chosen. For example, if a dataset is available in multiple
resolutions and no resolution is specified in the wishlist's DatasetID
then the highest (smallest number) resolution will be chosen.
Loaded `DataArray` objects are created and stored in the Scene object.
Args:
wishlist (iterable): Names (str), wavelengths (float), or
DatasetID objects of the requested datasets
to load. See `available_dataset_ids()` for
what datasets are available.
calibration (list, str): Calibration levels to limit available
datasets. This is a shortcut to
having to list each DatasetID in
`wishlist`.
resolution (list | float): Resolution to limit available datasets.
This is a shortcut similar to
calibration.
polarization (list | str): Polarization ('V', 'H') to limit
available datasets. This is a shortcut
similar to calibration.
level (list | str): Pressure level to limit available datasets.
Pressure should be in hPa or mb. If an
altitude is used it should be specified in
inverse meters (1/m). The units of this
parameter ultimately depend on the reader.
generate (bool): Generate composites from the loaded datasets
(default: True)
unload (bool): Unload datasets that were required to generate
the requested datasets (composite dependencies)
but are no longer needed.
"""
dataset_keys = set(wishlist)
needed_datasets = (self.wishlist | dataset_keys) - \
set(self.datasets.keys())
unknown = self.dep_tree.find_dependencies(needed_datasets,
calibration=calibration,
polarization=polarization,
resolution=resolution,
level=level)
self.wishlist |= needed_datasets
if unknown:
unknown_str = ", ".join(map(str, unknown))
raise KeyError("Unknown datasets: {}".format(unknown_str))
self.read(**kwargs)
if generate:
keepables = self.generate_composites()
else:
# don't lose datasets we loaded to try to generate composites
keepables = set(self.datasets.keys()) | self.wishlist
if self.missing_datasets:
# copy the set of missing datasets because they won't be valid
# after they are removed in the next line
missing = self.missing_datasets.copy()
self._remove_failed_datasets(keepables)
missing_str = ", ".join(str(x) for x in missing)
LOG.warning("The following datasets were not created and may require "
"resampling to be generated: {}".format(missing_str))
if unload:
self.unload(keepables=keepables)
|
Read and generate requested datasets.
When the `wishlist` contains `DatasetID` objects they can either be
fully-specified `DatasetID` objects with every parameter specified
or they can not provide certain parameters and the "best" parameter
will be chosen. For example, if a dataset is available in multiple
resolutions and no resolution is specified in the wishlist's DatasetID
then the highest (smallest number) resolution will be chosen.
Loaded `DataArray` objects are created and stored in the Scene object.
Args:
wishlist (iterable): Names (str), wavelengths (float), or
DatasetID objects of the requested datasets
to load. See `available_dataset_ids()` for
what datasets are available.
calibration (list, str): Calibration levels to limit available
datasets. This is a shortcut to
having to list each DatasetID in
`wishlist`.
resolution (list | float): Resolution to limit available datasets.
This is a shortcut similar to
calibration.
polarization (list | str): Polarization ('V', 'H') to limit
available datasets. This is a shortcut
similar to calibration.
level (list | str): Pressure level to limit available datasets.
Pressure should be in hPa or mb. If an
altitude is used it should be specified in
inverse meters (1/m). The units of this
parameter ultimately depend on the reader.
generate (bool): Generate composites from the loaded datasets
(default: True)
unload (bool): Unload datasets that were required to generate
the requested datasets (composite dependencies)
but are no longer needed.
|
def _to_topology(self, atom_list, chains=None, residues=None):
"""Create a mdtraj.Topology from a Compound.
Parameters
----------
atom_list : list of mb.Compound
Atoms to include in the topology
chains : mb.Compound or list of mb.Compound
Chain types to add to the topology
residues : str of list of str
Labels of residues in the Compound. Residues are assigned by
checking against Compound.name.
Returns
-------
top : mdtraj.Topology
See Also
--------
mdtraj.Topology : Details on the mdtraj Topology object
"""
from mdtraj.core.topology import Topology
if isinstance(chains, string_types):
chains = [chains]
if isinstance(chains, (list, set)):
chains = tuple(chains)
if isinstance(residues, string_types):
residues = [residues]
if isinstance(residues, (list, set)):
residues = tuple(residues)
top = Topology()
atom_mapping = {}
default_chain = top.add_chain()
default_residue = top.add_residue('RES', default_chain)
compound_residue_map = dict()
atom_residue_map = dict()
compound_chain_map = dict()
atom_chain_map = dict()
for atom in atom_list:
# Chains
if chains:
if atom.name in chains:
current_chain = top.add_chain()
compound_chain_map[atom] = current_chain
else:
for parent in atom.ancestors():
if chains and parent.name in chains:
if parent not in compound_chain_map:
current_chain = top.add_chain()
compound_chain_map[parent] = current_chain
current_residue = top.add_residue(
'RES', current_chain)
break
else:
current_chain = default_chain
else:
current_chain = default_chain
atom_chain_map[atom] = current_chain
# Residues
if residues:
if atom.name in residues:
current_residue = top.add_residue(atom.name, current_chain)
compound_residue_map[atom] = current_residue
else:
for parent in atom.ancestors():
if residues and parent.name in residues:
if parent not in compound_residue_map:
current_residue = top.add_residue(
parent.name, current_chain)
compound_residue_map[parent] = current_residue
break
else:
current_residue = default_residue
else:
if chains:
try: # Grab the default residue from the custom chain.
current_residue = next(current_chain.residues)
except StopIteration: # Add the residue to the current chain
current_residue = top.add_residue('RES', current_chain)
else: # Grab the default chain's default residue
current_residue = default_residue
atom_residue_map[atom] = current_residue
# Add the actual atoms
try:
elem = get_by_symbol(atom.name)
except KeyError:
elem = get_by_symbol("VS")
at = top.add_atom(atom.name, elem, atom_residue_map[atom])
at.charge = atom.charge
atom_mapping[atom] = at
# Remove empty default residues.
chains_to_remove = [
chain for chain in top.chains if chain.n_atoms == 0]
residues_to_remove = [res for res in top.residues if res.n_atoms == 0]
for chain in chains_to_remove:
top._chains.remove(chain)
for res in residues_to_remove:
for chain in top.chains:
try:
chain._residues.remove(res)
except ValueError: # Already gone.
pass
for atom1, atom2 in self.bonds():
# Ensure that both atoms are part of the compound. This becomes an
# issue if you try to convert a sub-compound to a topology which is
# bonded to a different subcompound.
if all(a in atom_mapping.keys() for a in [atom1, atom2]):
top.add_bond(atom_mapping[atom1], atom_mapping[atom2])
return top
|
Create a mdtraj.Topology from a Compound.
Parameters
----------
atom_list : list of mb.Compound
Atoms to include in the topology
chains : mb.Compound or list of mb.Compound
Chain types to add to the topology
residues : str of list of str
Labels of residues in the Compound. Residues are assigned by
checking against Compound.name.
Returns
-------
top : mdtraj.Topology
See Also
--------
mdtraj.Topology : Details on the mdtraj Topology object
|
async def _create_transaction(self, msg, *args, **kwargs):
"""
Create a transaction with the distant server
:param msg: message to be sent
:param args: args to be sent to the coroutines given to `register_transaction`
:param kwargs: kwargs to be sent to the coroutines given to `register_transaction`
"""
recv_msgs, get_key, _1, _2, _3 = self._msgs_registered[msg.__msgtype__]
key = get_key(msg)
if key in self._transactions[recv_msgs[0]]:
# If we already have a request for this particular key, just add it on the list of things to call
for recv_msg in recv_msgs:
self._transactions[recv_msg][key].append((args, kwargs))
else:
# If that's not the case, add us in the queue, and send the message
for recv_msg in recv_msgs:
self._transactions[recv_msg][key] = [(args, kwargs)]
await ZMQUtils.send(self._socket, msg)
|
Create a transaction with the distant server
:param msg: message to be sent
:param args: args to be sent to the coroutines given to `register_transaction`
:param kwargs: kwargs to be sent to the coroutines given to `register_transaction`
|
def to_dict(self):
"""Returns the Time instance as a usable dictionary for craftai"""
return {
"timestamp": int(self.timestamp),
"timezone": self.timezone,
"time_of_day": self.time_of_day,
"day_of_week": self.day_of_week,
"day_of_month": self.day_of_month,
"month_of_year": self.month_of_year,
"utc_iso": self.utc_iso
}
|
Returns the Time instance as a usable dictionary for craftai
|
def _req_rep_retry(self, request):
"""Returns response and number of retries"""
retries_left = self.RETRIES
while retries_left:
self._logger.log(1, 'Sending REQ `%s`', request)
self._send_request(request)
socks = dict(self._poll.poll(self.TIMEOUT))
if socks.get(self._socket) == zmq.POLLIN:
response = self._receive_response()
self._logger.log(1, 'Received REP `%s`', response)
return response, self.RETRIES - retries_left
else:
self._logger.debug('No response from server (%d retries left)' %
retries_left)
self._close_socket(confused=True)
retries_left -= 1
if retries_left == 0:
raise RuntimeError('Server seems to be offline!')
time.sleep(self.SLEEP)
self._start_socket()
|
Returns response and number of retries
|
def get_value(cls, bucket, key):
"""Get tag value."""
obj = cls.get(bucket, key)
return obj.value if obj else None
|
Get tag value.
|
def _list_machines(self):
"""
Request a list of all added machines.
Populates self._machines dict with mist.client.model.Machine instances
"""
try:
req = self.request(self.mist_client.uri+'/clouds/'+self.id+'/machines')
machines = req.get().json()
except:
# Eg invalid cloud credentials
machines = {}
if machines:
for machine in machines:
self._machines[machine['machine_id']] = Machine(machine, self)
else:
self._machines = {}
|
Request a list of all added machines.
Populates self._machines dict with mist.client.model.Machine instances
|
def to_timepoints(self, unit='hours', offset=None):
"""Return an |numpy.ndarray| representing the starting time points
of the |Timegrid| object.
The following examples identical with the ones of
|Timegrid.from_timepoints| but reversed.
By default, the time points are given in hours:
>>> from hydpy import Timegrid
>>> timegrid = Timegrid('2000-01-01', '2000-01-02', '6h')
>>> timegrid.to_timepoints()
array([ 0., 6., 12., 18.])
Other time units (`days` or `min`) can be defined (only the first
character counts):
>>> timegrid.to_timepoints(unit='d')
array([ 0. , 0.25, 0.5 , 0.75])
Additionally, one can pass an `offset` that must be of type |int|
or an valid |Period| initialization argument:
>>> timegrid.to_timepoints(offset=24)
array([ 24., 30., 36., 42.])
>>> timegrid.to_timepoints(offset='1d')
array([ 24., 30., 36., 42.])
>>> timegrid.to_timepoints(unit='day', offset='1d')
array([ 1. , 1.25, 1.5 , 1.75])
"""
unit = Period.from_cfunits(unit)
if offset is None:
offset = 0.
else:
try:
offset = Period(offset)/unit
except TypeError:
offset = offset
step = self.stepsize/unit
nmb = len(self)
variable = numpy.linspace(offset, offset+step*(nmb-1), nmb)
return variable
|
Return an |numpy.ndarray| representing the starting time points
of the |Timegrid| object.
The following examples identical with the ones of
|Timegrid.from_timepoints| but reversed.
By default, the time points are given in hours:
>>> from hydpy import Timegrid
>>> timegrid = Timegrid('2000-01-01', '2000-01-02', '6h')
>>> timegrid.to_timepoints()
array([ 0., 6., 12., 18.])
Other time units (`days` or `min`) can be defined (only the first
character counts):
>>> timegrid.to_timepoints(unit='d')
array([ 0. , 0.25, 0.5 , 0.75])
Additionally, one can pass an `offset` that must be of type |int|
or an valid |Period| initialization argument:
>>> timegrid.to_timepoints(offset=24)
array([ 24., 30., 36., 42.])
>>> timegrid.to_timepoints(offset='1d')
array([ 24., 30., 36., 42.])
>>> timegrid.to_timepoints(unit='day', offset='1d')
array([ 1. , 1.25, 1.5 , 1.75])
|
def repartition(self, num_partitions, repartition_function=None):
"""Return a new Streamlet containing all elements of the this streamlet but having
num_partitions partitions. Note that this is different from num_partitions(n) in
that new streamlet will be created by the repartition call.
If repartiton_function is not None, it is used to decide which parititons
(from 0 to num_partitions -1), it should route each element to.
It could also return a list of partitions if it wants to send it to multiple
partitions.
"""
from heronpy.streamlet.impl.repartitionbolt import RepartitionStreamlet
if repartition_function is None:
repartition_function = lambda x: x
repartition_streamlet = RepartitionStreamlet(num_partitions, repartition_function, self)
self._add_child(repartition_streamlet)
return repartition_streamlet
|
Return a new Streamlet containing all elements of the this streamlet but having
num_partitions partitions. Note that this is different from num_partitions(n) in
that new streamlet will be created by the repartition call.
If repartiton_function is not None, it is used to decide which parititons
(from 0 to num_partitions -1), it should route each element to.
It could also return a list of partitions if it wants to send it to multiple
partitions.
|
def map_to_subset(self, file, outfile=None, ontology=None, subset=None, class_map=None, relations=None):
"""
Map a file to a subset, writing out results
You can pass either a subset name (e.g. goslim_generic) or a dictionary with ready-made mappings
Arguments
---------
file: file
Name or file object for input assoc file
outfile: file
Name or file object for output (mapped) assoc file; writes to stdout if not set
subset: str
Optional name of subset to map to, e.g. goslim_generic
class_map: dict
Mapping between asserted class ids and ids to map to. Many to many
ontology: `Ontology`
Ontology to extract subset from
"""
if subset is not None:
logging.info("Creating mapping for subset: {}".format(subset))
class_map = ontology.create_slim_mapping(subset=subset, relations=relations)
if class_map is None:
raise ValueError("Neither class_map not subset is set")
col = self.ANNOTATION_CLASS_COLUMN
file = self._ensure_file(file)
tuples = []
for line in file:
if line.startswith("!"):
continue
vals = line.split("\t")
logging.info("LINE: {} VALS: {}".format(line, vals))
if len(vals) < col:
raise ValueError("Line: {} has too few cols, expect class id in col {}".format(line, col))
cid = vals[col]
if cid not in class_map or len(class_map[cid]) == 0:
self.report.error(line, Report.UNMAPPED_ID, cid)
continue
else:
for mcid in class_map[cid]:
vals[col] = mcid
line = "\t".join(vals)
if outfile is not None:
outfile.write(line)
else:
print(line)
|
Map a file to a subset, writing out results
You can pass either a subset name (e.g. goslim_generic) or a dictionary with ready-made mappings
Arguments
---------
file: file
Name or file object for input assoc file
outfile: file
Name or file object for output (mapped) assoc file; writes to stdout if not set
subset: str
Optional name of subset to map to, e.g. goslim_generic
class_map: dict
Mapping between asserted class ids and ids to map to. Many to many
ontology: `Ontology`
Ontology to extract subset from
|
def merge(self, other):
"""
Copy properties from other into self, skipping ``None`` values. Also merges the raw data.
Args:
other (SkypeObj): second object to copy fields from
"""
for attr in self.attrs:
if not getattr(other, attr, None) is None:
setattr(self, attr, getattr(other, attr))
if other.raw:
if not self.raw:
self.raw = {}
self.raw.update(other.raw)
|
Copy properties from other into self, skipping ``None`` values. Also merges the raw data.
Args:
other (SkypeObj): second object to copy fields from
|
def get_series_episodes(self, id, page=1):
"""Get series episodes"""
# perform the request
params = {'page': page}
r = self.session.get(self.base_url + '/series/{}/episodes'.format(id), params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()
|
Get series episodes
|
def _simplify(elements):
"""Simplifies and normalizes the list of elements removing
redundant/repeated elements and normalising upper/lower case
so case sensitivity is resolved here."""
simplified = []
previous = None
for element in elements:
if element == "..":
raise FormicError("Invalid glob:"
" Cannot have '..' in a glob: {0}".format(
"/".join(elements)))
elif element == ".":
# . in a path does not do anything
pass
elif element == "**" and previous == "**":
# Remove repeated "**"s
pass
else:
simplified.append(os.path.normcase(element))
previous = element
if simplified[-1] == "":
# Trailing slash shorthand for /**
simplified[-1] = "**"
# Ensure the pattern either:
# * Starts with a "**", or
# * Starts with the first real element of the glob
if simplified[0] == "":
# "" means the pattern started with a slash.
del simplified[0]
else:
if simplified[0] != "**":
simplified.insert(0, "**")
return simplified
|
Simplifies and normalizes the list of elements removing
redundant/repeated elements and normalising upper/lower case
so case sensitivity is resolved here.
|
def to_kwargs(triangles):
"""
Convert a list of triangles to the kwargs for the Trimesh
constructor.
Parameters
---------
triangles : (n, 3, 3) float
Triangles in space
Returns
---------
kwargs : dict
Keyword arguments for the trimesh.Trimesh constructor
Includes keys 'vertices' and 'faces'
Examples
---------
>>> mesh = trimesh.Trimesh(**trimesh.triangles.to_kwargs(triangles))
"""
triangles = np.asanyarray(triangles, dtype=np.float64)
if not util.is_shape(triangles, (-1, 3, 3)):
raise ValueError('Triangles must be (n,3,3)!')
vertices = triangles.reshape((-1, 3))
faces = np.arange(len(vertices)).reshape((-1, 3))
kwargs = {'vertices': vertices,
'faces': faces}
return kwargs
|
Convert a list of triangles to the kwargs for the Trimesh
constructor.
Parameters
---------
triangles : (n, 3, 3) float
Triangles in space
Returns
---------
kwargs : dict
Keyword arguments for the trimesh.Trimesh constructor
Includes keys 'vertices' and 'faces'
Examples
---------
>>> mesh = trimesh.Trimesh(**trimesh.triangles.to_kwargs(triangles))
|
def infer(examples, alt_rules=None):
"""
Returns a datetime.strptime-compliant format string for parsing the *most likely* date format
used in examples. examples is a list containing example date strings.
"""
date_classes = _tag_most_likely(examples)
if alt_rules:
date_classes = _apply_rewrites(date_classes, alt_rules)
else:
date_classes = _apply_rewrites(date_classes, RULES)
date_string = ''
for date_class in date_classes:
date_string += date_class.directive
return date_string
|
Returns a datetime.strptime-compliant format string for parsing the *most likely* date format
used in examples. examples is a list containing example date strings.
|
def add_column(self, tablename: str, fieldspec: FIELDSPEC_TYPE) -> int:
"""Adds a column to an existing table."""
sql = "ALTER TABLE {} ADD COLUMN {}".format(
tablename, self.fielddefsql_from_fieldspec(fieldspec))
log.info(sql)
return self.db_exec_literal(sql)
|
Adds a column to an existing table.
|
def unicode_dict(_dict):
"""
Make sure keys and values of dict is unicode.
"""
r = {}
for k, v in iteritems(_dict):
r[unicode_obj(k)] = unicode_obj(v)
return r
|
Make sure keys and values of dict is unicode.
|
def take(self, axis, n):
"""Take the first (or last) n rows or columns from the blocks
Note: Axis = 0 will be equivalent to `head` or `tail`
Axis = 1 will be equivalent to `front` or `back`
Args:
axis: The axis to extract (0 for extracting rows, 1 for extracting columns)
n: The number of rows or columns to extract, negative denotes to extract
from the bottom of the object
Returns:
A new BaseFrameManager object, the type of object that called this.
"""
# These are the partitions that we will extract over
if not axis:
partitions = self.partitions
bin_lengths = self.block_lengths
else:
partitions = self.partitions.T
bin_lengths = self.block_widths
if n < 0:
length_bins = np.cumsum(bin_lengths[::-1])
n *= -1
idx = int(np.digitize(n, length_bins))
if idx > 0:
remaining = int(n - length_bins[idx - 1])
else:
remaining = n
# In this case, we require no remote compute. This is much faster.
if remaining == 0:
result = partitions[-idx:]
else:
# Reverse for ease of iteration and then re-reverse at the end
partitions = partitions[::-1]
# We build this iloc to avoid creating a bunch of helper methods.
# This code creates slice objects to be passed to `iloc` to grab
# the last n rows or columns depending on axis.
slice_obj = (
slice(-remaining, None)
if axis == 0
else (slice(None), slice(-remaining, None))
)
func = self.preprocess_func(lambda df: df.iloc[slice_obj])
# We use idx + 1 here because the loop is not inclusive, and we
# need to iterate through idx.
result = np.array(
[
partitions[i]
if i != idx
else [obj.apply(func) for obj in partitions[i]]
for i in range(idx + 1)
]
)[::-1]
else:
length_bins = np.cumsum(bin_lengths)
idx = int(np.digitize(n, length_bins))
if idx > 0:
remaining = int(n - length_bins[idx - 1])
else:
remaining = n
# In this case, we require no remote compute. This is much faster.
if remaining == 0:
result = partitions[:idx]
else:
# We build this iloc to avoid creating a bunch of helper methods.
# This code creates slice objects to be passed to `iloc` to grab
# the first n rows or columns depending on axis.
slice_obj = (
slice(remaining) if axis == 0 else (slice(None), slice(remaining))
)
func = self.preprocess_func(lambda df: df.iloc[slice_obj])
# See note above about idx + 1
result = np.array(
[
partitions[i]
if i != idx
else [obj.apply(func) for obj in partitions[i]]
for i in range(idx + 1)
]
)
return self.__constructor__(result.T) if axis else self.__constructor__(result)
|
Take the first (or last) n rows or columns from the blocks
Note: Axis = 0 will be equivalent to `head` or `tail`
Axis = 1 will be equivalent to `front` or `back`
Args:
axis: The axis to extract (0 for extracting rows, 1 for extracting columns)
n: The number of rows or columns to extract, negative denotes to extract
from the bottom of the object
Returns:
A new BaseFrameManager object, the type of object that called this.
|
def getAdjEdges(self, networkId, nodeId, verbose=None):
"""
Returns a list of connected edges as SUIDs for the node specified by the `nodeId` and `networkId` parameters.
:param networkId: SUID of the network containing the node
:param nodeId: SUID of the node
:param verbose: print more
:returns: 200: successful operation
"""
response=api(url=self.___url+'networks/'+str(networkId)+'/nodes/'+str(nodeId)+'/adjEdges', method="GET", verbose=verbose, parse_params=False)
return response
|
Returns a list of connected edges as SUIDs for the node specified by the `nodeId` and `networkId` parameters.
:param networkId: SUID of the network containing the node
:param nodeId: SUID of the node
:param verbose: print more
:returns: 200: successful operation
|
def verify_hash_type(self):
'''
Verify and display a nag-messsage to the log if vulnerable hash-type is used.
:return:
'''
if self.config['hash_type'].lower() in ['md5', 'sha1']:
log.warning(
'IMPORTANT: Do not use %s hashing algorithm! Please set '
'"hash_type" to sha256 in Salt %s config!',
self.config['hash_type'], self.__class__.__name__
)
|
Verify and display a nag-messsage to the log if vulnerable hash-type is used.
:return:
|
def add_to_products(self, products=None, all_products=False):
"""
Add user group to some product license configuration groups (PLCs), or all of them.
:param products: list of product names the user should be added to
:param all_products: a boolean meaning add to all (don't specify products in this case)
:return: the Group, so you can do Group(...).add_to_products(...).add_users(...)
"""
if all_products:
if products:
raise ArgumentError("When adding to all products, do not specify specific products")
plist = "all"
else:
if not products:
raise ArgumentError("You must specify products to which to add the user group")
plist = {GroupTypes.productConfiguration.name: [product for product in products]}
return self.append(add=plist)
|
Add user group to some product license configuration groups (PLCs), or all of them.
:param products: list of product names the user should be added to
:param all_products: a boolean meaning add to all (don't specify products in this case)
:return: the Group, so you can do Group(...).add_to_products(...).add_users(...)
|
def _todict(cls):
""" generate a dict keyed by value """
return dict((getattr(cls, attr), attr) for attr in dir(cls) if not attr.startswith('_'))
|
generate a dict keyed by value
|
def model_config_from_estimator(instance_type, estimator, task_id, task_type, role=None, image=None, name=None,
model_server_workers=None, vpc_config_override=vpc_utils.VPC_CONFIG_DEFAULT):
"""Export Airflow model config from a SageMaker estimator
Args:
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'
estimator (sagemaker.model.EstimatorBase): The SageMaker estimator to export Airflow config from.
It has to be an estimator associated with a training job.
task_id (str): The task id of any airflow.contrib.operators.SageMakerTrainingOperator or
airflow.contrib.operators.SageMakerTuningOperator that generates training jobs in the DAG. The model config
is built based on the training job generated in this operator.
task_type (str): Whether the task is from SageMakerTrainingOperator or SageMakerTuningOperator. Values can be
'training', 'tuning' or None (which means training job is not from any task).
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the model
image (str): An container image to use for deploying the model
name (str): Name of the model
model_server_workers (int): The number of worker processes used by the inference server.
If None, server will use one worker per vCPU. Only effective when estimator is a
SageMaker framework.
vpc_config_override (dict[str, list[str]]): Override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
Returns:
dict: Model config that can be directly used by SageMakerModelOperator in Airflow. It can also be part
of the config used by SageMakerEndpointOperator in Airflow.
"""
update_estimator_from_task(estimator, task_id, task_type)
if isinstance(estimator, sagemaker.estimator.Estimator):
model = estimator.create_model(role=role, image=image, vpc_config_override=vpc_config_override)
elif isinstance(estimator, sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase):
model = estimator.create_model(vpc_config_override=vpc_config_override)
elif isinstance(estimator, sagemaker.estimator.Framework):
model = estimator.create_model(model_server_workers=model_server_workers, role=role,
vpc_config_override=vpc_config_override)
else:
raise TypeError('Estimator must be one of sagemaker.estimator.Estimator, sagemaker.estimator.Framework'
' or sagemaker.amazon.amazon_estimator.AmazonAlgorithmEstimatorBase.')
model.name = name
return model_config(instance_type, model, role, image)
|
Export Airflow model config from a SageMaker estimator
Args:
instance_type (str): The EC2 instance type to deploy this Model to. For example, 'ml.p2.xlarge'
estimator (sagemaker.model.EstimatorBase): The SageMaker estimator to export Airflow config from.
It has to be an estimator associated with a training job.
task_id (str): The task id of any airflow.contrib.operators.SageMakerTrainingOperator or
airflow.contrib.operators.SageMakerTuningOperator that generates training jobs in the DAG. The model config
is built based on the training job generated in this operator.
task_type (str): Whether the task is from SageMakerTrainingOperator or SageMakerTuningOperator. Values can be
'training', 'tuning' or None (which means training job is not from any task).
role (str): The ``ExecutionRoleArn`` IAM Role ARN for the model
image (str): An container image to use for deploying the model
name (str): Name of the model
model_server_workers (int): The number of worker processes used by the inference server.
If None, server will use one worker per vCPU. Only effective when estimator is a
SageMaker framework.
vpc_config_override (dict[str, list[str]]): Override for VpcConfig set on the model.
Default: use subnets and security groups from this Estimator.
* 'Subnets' (list[str]): List of subnet ids.
* 'SecurityGroupIds' (list[str]): List of security group ids.
Returns:
dict: Model config that can be directly used by SageMakerModelOperator in Airflow. It can also be part
of the config used by SageMakerEndpointOperator in Airflow.
|
def ChunkedTransformerLM(vocab_size,
feature_depth=512,
feedforward_depth=2048,
num_layers=6,
num_heads=8,
dropout=0.1,
chunk_selector=None,
max_len=2048,
mode='train'):
"""Transformer language model operating on chunks.
The input to this model is a sequence presented as a list or tuple of chunks:
(chunk1, chunk2, chunks3, ..., chunkN).
Each chunk should have the same shape (batch, chunk-length) and together they
represent a long sequence that's a concatenation chunk1,chunk2,...,chunkN.
Chunked Transformer emulates the operation of a Transformer on this long
sequence except for the chunked attention layer, which may attend to only
a subset of the chunks to reduce memory use.
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend
(if None, attends to the previous chunks which is equivalent to setting
chunk_selector(x) = [] if x < 1 else [x-1] (TransformerXL); we attend
to the current chunk with a causal mask too, selected chunks unmasked).
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the layer.
"""
stack = [ChunkedDecoderLayer(feature_depth, feedforward_depth, num_heads,
dropout, chunk_selector, mode)
for _ in range(num_layers)]
# Below each Map(L) applies the layer L to each chunk independently.
return layers.Serial(
layers.ShiftRight(),
layers.Map(layers.Embedding(feature_depth, vocab_size)),
layers.Map(layers.Dropout(rate=dropout, mode=mode)),
layers.PositionalEncoding(max_len=max_len),
layers.Serial(*stack),
layers.Map(layers.LayerNorm()),
layers.Map(layers.Dense(vocab_size)),
layers.Map(layers.LogSoftmax()),
)
|
Transformer language model operating on chunks.
The input to this model is a sequence presented as a list or tuple of chunks:
(chunk1, chunk2, chunks3, ..., chunkN).
Each chunk should have the same shape (batch, chunk-length) and together they
represent a long sequence that's a concatenation chunk1,chunk2,...,chunkN.
Chunked Transformer emulates the operation of a Transformer on this long
sequence except for the chunked attention layer, which may attend to only
a subset of the chunks to reduce memory use.
Args:
vocab_size: int: vocab size
feature_depth: int: depth of embedding
feedforward_depth: int: depth of feed-forward layer
num_layers: int: number of encoder/decoder layers
num_heads: int: number of attention heads
dropout: float: dropout rate (how much to drop out)
chunk_selector: a function from chunk number to list of chunks to attend
(if None, attends to the previous chunks which is equivalent to setting
chunk_selector(x) = [] if x < 1 else [x-1] (TransformerXL); we attend
to the current chunk with a causal mask too, selected chunks unmasked).
max_len: int: maximum symbol length for positional encoding
mode: str: 'train' or 'eval'
Returns:
the layer.
|
def _parse_commit(self, ref):
"""Parse a commit command."""
lineno = self.lineno
mark = self._get_mark_if_any()
author = self._get_user_info(b'commit', b'author', False)
more_authors = []
while True:
another_author = self._get_user_info(b'commit', b'author', False)
if another_author is not None:
more_authors.append(another_author)
else:
break
committer = self._get_user_info(b'commit', b'committer')
message = self._get_data(b'commit', b'message')
from_ = self._get_from()
merges = []
while True:
merge = self._get_merge()
if merge is not None:
# while the spec suggests it's illegal, git-fast-export
# outputs multiple merges on the one line, e.g.
# merge :x :y :z
these_merges = merge.split(b' ')
merges.extend(these_merges)
else:
break
properties = {}
while True:
name_value = self._get_property()
if name_value is not None:
name, value = name_value
properties[name] = value
else:
break
return commands.CommitCommand(ref, mark, author, committer, message,
from_, merges, list(self.iter_file_commands()), lineno=lineno,
more_authors=more_authors, properties=properties)
|
Parse a commit command.
|
def from_symmop(cls, symmop, time_reversal):
"""
Initialize a MagSymmOp from a SymmOp and time reversal operator.
Args:
symmop (SymmOp): SymmOp
time_reversal (int): Time reversal operator, +1 or -1.
Returns:
MagSymmOp object
"""
magsymmop = cls(symmop.affine_matrix, time_reversal, symmop.tol)
return magsymmop
|
Initialize a MagSymmOp from a SymmOp and time reversal operator.
Args:
symmop (SymmOp): SymmOp
time_reversal (int): Time reversal operator, +1 or -1.
Returns:
MagSymmOp object
|
def _cmd_up(self):
"""Upgrade to a revision"""
revision = self._get_revision()
if not self._rev:
self._log(0, "upgrading current revision")
else:
self._log(0, "upgrading from revision %s" % revision)
for rev in self._revisions[int(revision) - 1:]:
sql_files = glob.glob(os.path.join(self._migration_path, rev, "*.up.sql"))
sql_files.sort()
self._exec(sql_files, rev)
self._log(0, "done: upgraded revision to %s\n" % rev)
|
Upgrade to a revision
|
def extra_space_exists(str1: str, str2: str) -> bool: # noqa
"""
Return True if a space shouldn't exist between two items
"""
ls1, ls2 = len(str1), len(str2)
if str1.isdigit():
# 10 SM
if str2 in ['SM', '0SM']:
return True
# 12 /10
if ls2 > 2 and str2[0] == '/' and str2[1:].isdigit():
return True
if str2.isdigit():
# OVC 040
if str1 in CLOUD_LIST:
return True
# 12/ 10
if ls1 > 2 and str1.endswith('/') and str1[:-1].isdigit():
return True
# 12/1 0
if ls2 == 1 and ls1 > 3 and str1[:2].isdigit() and '/' in str1 and str1[3:].isdigit():
return True
# Q 1001
if str1 in ['Q', 'A']:
return True
# 36010G20 KT
if str2 == 'KT' and str1[-1].isdigit() \
and (str1[:5].isdigit() or (str1.startswith('VRB') and str1[3:5].isdigit())):
return True
# 36010K T
if str2 == 'T' and ls1 >= 6 \
and (str1[:5].isdigit() or (str1.startswith('VRB') and str1[3:5].isdigit())) and str1[-1] == 'K':
return True
# OVC022 CB
if str2 in CLOUD_TRANSLATIONS and str2 not in CLOUD_LIST and ls1 >= 3 and str1[:3] in CLOUD_LIST:
return True
# FM 122400
if str1 in ['FM', 'TL'] and (str2.isdigit() or (str2.endswith('Z') and str2[:-1].isdigit())):
return True
# TX 20/10
if str1 in ['TX', 'TN'] and str2.find('/') != -1:
return True
return False
|
Return True if a space shouldn't exist between two items
|
def _count_extra_actions(self, game_image):
"""Count the number of extra actions for player in this turn."""
proportional = self._bonus_tools['extra_action_region']
# Use ProportionalRegion to isolate the extra actions area
t, l, b, r = proportional.region_in(game_image)
token_region = game_image[t:b, l:r]
# Use TemplateFinder (multiple) to check for extra actions
game_h, game_w = game_image.shape[0:2]
token_h = int(round(game_h * 27.0 / 960))
token_w = int(round(game_w * 22.0 / 1280))
sizes = (token_h, token_w),
# sizes change every time so just remake it.
# thresholds are tight since need to count conservatively
finder = v.TemplateFinder(pq_data.extra_action_template,
sizes=sizes,
acceptable_threshold=0.1,
immediate_threshold=0.1)
found_tokens = finder.locate_multiple_in(token_region)
return len(found_tokens)
|
Count the number of extra actions for player in this turn.
|
def remove_secondary_linked_files(self, file_path=None, relpath=None,
mimetype=None, time_origin=None,
assoc_with=None):
"""Remove all secondary linked files that match all the criteria,
criterias that are ``None`` are ignored.
:param str file_path: Path of the file.
:param str relpath: Relative filepath.
:param str mimetype: Mimetype of the file.
:param int time_origin: Time origin.
:param str ex_from: Extracted from.
"""
for attrib in self.linked_file_descriptors[:]:
if file_path is not None and attrib['LINK_URL'] != file_path:
continue
if relpath is not None and attrib['RELATIVE_LINK_URL'] != relpath:
continue
if mimetype is not None and attrib['MIME_TYPE'] != mimetype:
continue
if time_origin is not None and\
attrib['TIME_ORIGIN'] != time_origin:
continue
if assoc_with is not None and\
attrib['ASSOCIATED_WITH'] != assoc_with:
continue
del(self.linked_file_descriptors[
self.linked_file_descriptors.index(attrib)])
|
Remove all secondary linked files that match all the criteria,
criterias that are ``None`` are ignored.
:param str file_path: Path of the file.
:param str relpath: Relative filepath.
:param str mimetype: Mimetype of the file.
:param int time_origin: Time origin.
:param str ex_from: Extracted from.
|
def DeleteOldRuns(self, cutoff_timestamp=None):
"""Deletes runs that were started before the timestamp given."""
if cutoff_timestamp is None:
raise ValueError("cutoff_timestamp can't be None")
return data_store.REL_DB.DeleteOldCronJobRuns(
cutoff_timestamp=cutoff_timestamp)
|
Deletes runs that were started before the timestamp given.
|
def getDuration(self):
"""Returns the time in minutes taken for this analysis.
If the analysis is not yet 'ready to process', returns 0
If the analysis is still in progress (not yet verified),
duration = date_verified - date_start_process
Otherwise:
duration = current_datetime - date_start_process
:return: time in minutes taken for this analysis
:rtype: int
"""
starttime = self.getStartProcessDate()
if not starttime:
# The analysis is not yet ready to be processed
return 0
endtime = self.getDateVerified() or DateTime()
# Duration in minutes
duration = (endtime - starttime) * 24 * 60
return duration
|
Returns the time in minutes taken for this analysis.
If the analysis is not yet 'ready to process', returns 0
If the analysis is still in progress (not yet verified),
duration = date_verified - date_start_process
Otherwise:
duration = current_datetime - date_start_process
:return: time in minutes taken for this analysis
:rtype: int
|
def compile_id_list(self, polygon_id_list, nr_of_polygons):
"""
sorts the polygons_id list from least to most occurrences of the zone ids (->speed up)
only 4.8% of all shortcuts include polygons from more than one zone
but only for about 0.4% sorting would be beneficial (zones have different frequencies)
in most of those cases there are only two types of zones (= entries in counted_zones) and one of them
has only one entry.
the polygon lists of all single shortcut are already sorted (during compilation of the binary files)
sorting should be used for closest_timezone_at(), because only in
that use case the polygon lists are quite long (multiple shortcuts are being checked simultaneously).
:param polygon_id_list:
:param nr_of_polygons: length of polygon_id_list
:return: sorted list of polygon_ids, sorted list of zone_ids, boolean: do all entries belong to the same zone
"""
# TODO functional
def all_equal(iterable):
x = None
for x in iterable:
# first_val = x
break
for y in iterable:
if x != y:
return False
return True
zone_id_list = empty([nr_of_polygons], dtype=DTYPE_FORMAT_H_NUMPY)
counted_zones = {}
for pointer_local, polygon_id in enumerate(polygon_id_list):
zone_id = self.id_of(polygon_id)
zone_id_list[pointer_local] = zone_id
try:
counted_zones[zone_id] += 1
except KeyError:
counted_zones[zone_id] = 1
if len(counted_zones) == 1:
# there is only one zone. no sorting needed.
return polygon_id_list, zone_id_list, True
if all_equal(list(counted_zones.values())):
# all the zones have the same amount of polygons. no sorting needed.
return polygon_id_list, zone_id_list, False
counted_zones_sorted = sorted(list(counted_zones.items()), key=lambda zone: zone[1])
sorted_polygon_id_list = empty([nr_of_polygons], dtype=DTYPE_FORMAT_H_NUMPY)
sorted_zone_id_list = empty([nr_of_polygons], dtype=DTYPE_FORMAT_H_NUMPY)
pointer_output = 0
for zone_id, amount in counted_zones_sorted:
# write all polygons from this zone in the new list
pointer_local = 0
detected_polygons = 0
while detected_polygons < amount:
if zone_id_list[pointer_local] == zone_id:
# the polygon at the pointer has the wanted zone_id
detected_polygons += 1
sorted_polygon_id_list[pointer_output] = polygon_id_list[pointer_local]
sorted_zone_id_list[pointer_output] = zone_id
pointer_output += 1
pointer_local += 1
return sorted_polygon_id_list, sorted_zone_id_list, False
|
sorts the polygons_id list from least to most occurrences of the zone ids (->speed up)
only 4.8% of all shortcuts include polygons from more than one zone
but only for about 0.4% sorting would be beneficial (zones have different frequencies)
in most of those cases there are only two types of zones (= entries in counted_zones) and one of them
has only one entry.
the polygon lists of all single shortcut are already sorted (during compilation of the binary files)
sorting should be used for closest_timezone_at(), because only in
that use case the polygon lists are quite long (multiple shortcuts are being checked simultaneously).
:param polygon_id_list:
:param nr_of_polygons: length of polygon_id_list
:return: sorted list of polygon_ids, sorted list of zone_ids, boolean: do all entries belong to the same zone
|
def _WsdlHasMethod(self, method_name):
"""Determine if a method is in the wsdl.
Args:
method_name: The name of the method.
Returns:
True if the method is in the wsdl, otherwise False.
"""
try:
self._method_bindings.get(method_name)
return True
except ValueError:
return False
|
Determine if a method is in the wsdl.
Args:
method_name: The name of the method.
Returns:
True if the method is in the wsdl, otherwise False.
|
def get_all_outcome_links_for_context_accounts(self, account_id, outcome_group_style=None, outcome_style=None):
"""
Get all outcome links for context.
"""
path = {}
data = {}
params = {}
# REQUIRED - PATH - account_id
"""ID"""
path["account_id"] = account_id
# OPTIONAL - outcome_style
"""The detail level of the outcomes. Defaults to "abbrev".
Specify "full" for more information."""
if outcome_style is not None:
params["outcome_style"] = outcome_style
# OPTIONAL - outcome_group_style
"""The detail level of the outcome groups. Defaults to "abbrev".
Specify "full" for more information."""
if outcome_group_style is not None:
params["outcome_group_style"] = outcome_group_style
self.logger.debug("GET /api/v1/accounts/{account_id}/outcome_group_links with query params: {params} and form data: {data}".format(params=params, data=data, **path))
return self.generic_request("GET", "/api/v1/accounts/{account_id}/outcome_group_links".format(**path), data=data, params=params, all_pages=True)
|
Get all outcome links for context.
|
def connect(self):
"Connect to a host on a given (SSL) port."
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((self.host, self.port))
boto.log.debug("wrapping ssl socket; CA certificate file=%s",
self.ca_certs)
self.sock = ssl.wrap_socket(sock, keyfile=self.key_file,
certfile=self.cert_file,
cert_reqs=ssl.CERT_REQUIRED,
ca_certs=self.ca_certs)
cert = self.sock.getpeercert()
hostname = self.host.split(':', 0)[0]
if not ValidateCertificateHostname(cert, hostname):
raise InvalidCertificateException(hostname,
cert,
'remote hostname "%s" does not match '\
'certificate' % hostname)
|
Connect to a host on a given (SSL) port.
|
def get_appstruct(self):
""" return list of tuples keys and values corresponding to this model's
data """
result = []
for k in self._get_keys():
result.append((k, getattr(self, k)))
return result
|
return list of tuples keys and values corresponding to this model's
data
|
def _negotiate_SOCKS4(self, dest_addr, dest_port):
"""
Negotiates a connection through a SOCKS4 server.
"""
proxy_type, addr, port, rdns, username, password = self.proxy
writer = self.makefile("wb")
reader = self.makefile("rb", 0) # buffering=0 renamed in Python 3
try:
# Check if the destination address provided is an IP address
remote_resolve = False
try:
addr_bytes = socket.inet_aton(dest_addr)
except socket.error:
# It's a DNS name. Check where it should be resolved.
if rdns:
addr_bytes = b"\x00\x00\x00\x01"
remote_resolve = True
else:
addr_bytes = socket.inet_aton(socket.gethostbyname(dest_addr))
# Construct the request packet
writer.write(struct.pack(">BBH", 0x04, 0x01, dest_port))
writer.write(addr_bytes)
# The username parameter is considered userid for SOCKS4
if username:
writer.write(username)
writer.write(b"\x00")
# DNS name if remote resolving is required
# NOTE: This is actually an extension to the SOCKS4 protocol
# called SOCKS4A and may not be supported in all cases.
if remote_resolve:
writer.write(dest_addr.encode('idna') + b"\x00")
writer.flush()
# Get the response from the server
resp = self._readall(reader, 8)
if resp[0:1] != b"\x00":
# Bad data
raise GeneralProxyError("SOCKS4 proxy server sent invalid data")
status = ord(resp[1:2])
if status != 0x5A:
# Connection failed: server returned an error
error = SOCKS4_ERRORS.get(status, "Unknown error")
raise SOCKS4Error("{0:#04x}: {1}".format(status, error))
# Get the bound address/port
self.proxy_sockname = (socket.inet_ntoa(resp[4:]), struct.unpack(">H", resp[2:4])[0])
if remote_resolve:
self.proxy_peername = socket.inet_ntoa(addr_bytes), dest_port
else:
self.proxy_peername = dest_addr, dest_port
finally:
reader.close()
writer.close()
|
Negotiates a connection through a SOCKS4 server.
|
def readGraph(edgeList, nodeList = None, directed = False, idKey = 'ID', eSource = 'From', eDest = 'To'):
"""Reads the files given by _edgeList_ and _nodeList_ and creates a networkx graph for the files.
This is designed only for the files produced by metaknowledge and is meant to be the reverse of [writeGraph()](#metaknowledge.graphHelpers.writeGraph), if this does not produce the desired results the networkx builtin [networkx.read_edgelist()](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.readwrite.edgelist.read_edgelist.html) could be tried as it is aimed at a more general usage.
The read edge list format assumes the column named _eSource_ (default `'From'`) is the source node, then the column _eDest_ (default `'To'`) givens the destination and all other columns are attributes of the edges, e.g. weight.
The read node list format assumes the column _idKey_ (default `'ID'`) is the ID of the node for the edge list and the resulting network. All other columns are considered attributes of the node, e.g. count.
**Note**: If the names of the columns do not match those given to **readGraph()** a `KeyError` exception will be raised.
**Note**: If nodes appear in the edgelist but not the nodeList they will be created silently with no attributes.
# Parameters
_edgeList_ : `str`
> a string giving the path to the edge list file
_nodeList_ : `optional [str]`
> default `None`, a string giving the path to the node list file
_directed_ : `optional [bool]`
> default `False`, if `True` the produced network is directed from _eSource_ to _eDest_
_idKey_ : `optional [str]`
> default `'ID'`, the name of the ID column in the node list
_eSource_ : `optional [str]`
> default `'From'`, the name of the source column in the edge list
_eDest_ : `optional [str]`
> default `'To'`, the name of the destination column in the edge list
# Returns
`networkx Graph`
> the graph described by the input files
"""
progArgs = (0, "Starting to reading graphs")
if metaknowledge.VERBOSE_MODE:
progKwargs = {'dummy' : False}
else:
progKwargs = {'dummy' : True}
with _ProgressBar(*progArgs, **progKwargs) as PBar:
if directed:
grph = nx.DiGraph()
else:
grph = nx.Graph()
if nodeList:
PBar.updateVal(0, "Reading " + nodeList)
f = open(os.path.expanduser(os.path.abspath(nodeList)))
nFile = csv.DictReader(f)
for line in nFile:
vals = line
ndID = vals[idKey]
del vals[idKey]
if len(vals) > 0:
grph.add_node(ndID, **vals)
else:
grph.add_node(ndID)
f.close()
PBar.updateVal(.25, "Reading " + edgeList)
f = open(os.path.expanduser(os.path.abspath(edgeList)))
eFile = csv.DictReader(f)
for line in eFile:
vals = line
eFrom = vals[eSource]
eTo = vals[eDest]
del vals[eSource]
del vals[eDest]
if len(vals) > 0:
grph.add_edge(eFrom, eTo, **vals)
else:
grph.add_edge(eFrom, eTo)
PBar.finish("{} nodes and {} edges found".format(len(grph.nodes()), len(grph.edges())))
f.close()
return grph
|
Reads the files given by _edgeList_ and _nodeList_ and creates a networkx graph for the files.
This is designed only for the files produced by metaknowledge and is meant to be the reverse of [writeGraph()](#metaknowledge.graphHelpers.writeGraph), if this does not produce the desired results the networkx builtin [networkx.read_edgelist()](https://networkx.github.io/documentation/networkx-1.10/reference/generated/networkx.readwrite.edgelist.read_edgelist.html) could be tried as it is aimed at a more general usage.
The read edge list format assumes the column named _eSource_ (default `'From'`) is the source node, then the column _eDest_ (default `'To'`) givens the destination and all other columns are attributes of the edges, e.g. weight.
The read node list format assumes the column _idKey_ (default `'ID'`) is the ID of the node for the edge list and the resulting network. All other columns are considered attributes of the node, e.g. count.
**Note**: If the names of the columns do not match those given to **readGraph()** a `KeyError` exception will be raised.
**Note**: If nodes appear in the edgelist but not the nodeList they will be created silently with no attributes.
# Parameters
_edgeList_ : `str`
> a string giving the path to the edge list file
_nodeList_ : `optional [str]`
> default `None`, a string giving the path to the node list file
_directed_ : `optional [bool]`
> default `False`, if `True` the produced network is directed from _eSource_ to _eDest_
_idKey_ : `optional [str]`
> default `'ID'`, the name of the ID column in the node list
_eSource_ : `optional [str]`
> default `'From'`, the name of the source column in the edge list
_eDest_ : `optional [str]`
> default `'To'`, the name of the destination column in the edge list
# Returns
`networkx Graph`
> the graph described by the input files
|
def SensorShare(self, sensor_id, parameters):
"""
Share a sensor with a user
@param sensor_id (int) - Id of sensor to be shared
@param parameters (dictionary) - Additional parameters for the call
@return (bool) - Boolean indicating whether the ShareSensor call was successful
"""
if not parameters['user']['id']:
parameters['user'].pop('id')
if not parameters['user']['username']:
parameters['user'].pop('username')
if self.__SenseApiCall__("/sensors/{0}/users".format(sensor_id), "POST", parameters = parameters):
return True
else:
self.__error__ = "api call unsuccessful"
return False
|
Share a sensor with a user
@param sensor_id (int) - Id of sensor to be shared
@param parameters (dictionary) - Additional parameters for the call
@return (bool) - Boolean indicating whether the ShareSensor call was successful
|
def find_matching_builtin(self, node):
"""
Return matched keyword.
If the node alias on a correct keyword (and only it), it matches.
"""
for path in EQUIVALENT_ITERATORS.keys():
correct_alias = {path_to_node(path)}
if self.aliases[node.func] == correct_alias:
return path
|
Return matched keyword.
If the node alias on a correct keyword (and only it), it matches.
|
def redact_http_basic_auth(output):
'''
Remove HTTP user and password
'''
# We can't use re.compile because re.compile(someregex).sub() doesn't
# support flags even in Python 2.7.
url_re = '(https?)://.*@'
redacted = r'\1://<redacted>@'
if sys.version_info >= (2, 7):
# re.sub() supports flags as of 2.7, use this to do a case-insensitive
# match.
return re.sub(url_re, redacted, output, flags=re.IGNORECASE)
else:
# We're on python 2.6, test if a lowercased version of the output
# string matches the regex...
if re.search(url_re, output.lower()):
# ... and if it does, perform the regex substitution.
return re.sub(url_re, redacted, output.lower())
# No match, just return the original string
return output
|
Remove HTTP user and password
|
def _load_schemas(self):
""" load all schemas into schema dict """
types = ('bill', 'committee', 'person', 'vote', 'event')
for type in types:
schema_path = os.path.join(os.path.split(__file__)[0],
'../schemas/%s.json' % type)
self._schema[type] = json.load(open(schema_path))
self._schema[type]['properties'][settings.LEVEL_FIELD] = {
'minLength': 2, 'type': 'string'}
# bills & votes
self._schema['bill']['properties']['session']['enum'] = \
self.all_sessions()
self._schema['vote']['properties']['session']['enum'] = \
self.all_sessions()
# legislators
terms = [t['name'] for t in self.metadata['terms']]
# ugly break here b/c this line is nearly impossible to split
self._schema['person']['properties']['roles'][
'items']['properties']['term']['enum'] = terms
|
load all schemas into schema dict
|
def triple_apply(self, triple_apply_fn, mutated_fields, input_fields=None):
'''
Apply a transform function to each edge and its associated source and
target vertices in parallel. Each edge is visited once and in parallel.
Modification to vertex data is protected by lock. The effect on the
returned SGraph is equivalent to the following pseudocode:
>>> PARALLEL FOR (source, edge, target) AS triple in G:
... LOCK (triple.source, triple.target)
... (source, edge, target) = triple_apply_fn(triple)
... UNLOCK (triple.source, triple.target)
... END PARALLEL FOR
Parameters
----------
triple_apply_fn : function : (dict, dict, dict) -> (dict, dict, dict)
The function to apply to each triple of (source_vertex, edge,
target_vertex). This function must take as input a tuple of
(source_data, edge_data, target_data) and return a tuple of
(new_source_data, new_edge_data, new_target_data). All variables in
the both tuples must be of dict type.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
mutated_fields : list[str] | str
Fields that ``triple_apply_fn`` will mutate. Note: columns that are
actually mutated by the triple apply function but not specified in
``mutated_fields`` will have undetermined effects.
input_fields : list[str] | str, optional
Fields that ``triple_apply_fn`` will have access to.
The default is ``None``, which grants access to all fields.
``mutated_fields`` will always be included in ``input_fields``.
Returns
-------
out : SGraph
A new SGraph with updated vertex and edge data. Only fields
specified in the ``mutated_fields`` parameter are updated.
Notes
-----
- ``triple_apply`` does not currently support creating new fields in the
lambda function.
Examples
--------
Import turicreate and set up the graph.
>>> edges = turicreate.SFrame({'source': range(9), 'dest': range(1, 10)})
>>> g = turicreate.SGraph()
>>> g = g.add_edges(edges, src_field='source', dst_field='dest')
>>> g.vertices['degree'] = 0
Define the function to apply to each (source_node, edge, target_node)
triple.
>>> def degree_count_fn (src, edge, dst):
src['degree'] += 1
dst['degree'] += 1
return (src, edge, dst)
Apply the function to the SGraph.
>>> g = g.triple_apply(degree_count_fn, mutated_fields=['degree'])
Using native toolkit extension function:
.. code-block:: c++
#include <turicreate/sdk/toolkit_function_macros.hpp>
#include <vector>
using namespace turi;
std::vector<variant_type> connected_components_parameterized(
std::map<std::string, flexible_type>& src,
std::map<std::string, flexible_type>& edge,
std::map<std::string, flexible_type>& dst,
std::string column) {
if (src[column] < dst[column]) dst[column] = src[column];
else src[column] = dst[column];
return {to_variant(src), to_variant(edge), to_variant(dst)};
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(connected_components_parameterized, "src", "edge", "dst", "column");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> from example import connected_components_parameterized as cc
>>> e = tc.SFrame({'__src_id':[1,2,3,4,5], '__dst_id':[3,1,2,5,4]})
>>> g = tc.SGraph().add_edges(e)
>>> g.vertices['cid'] = g.vertices['__id']
>>> for i in range(2):
... g = g.triple_apply(lambda src, edge, dst: cc(src, edge, dst, 'cid'), ['cid'], ['cid'])
>>> g.vertices['cid']
dtype: int
Rows: 5
[4, 1, 1, 1, 4]
'''
assert inspect.isfunction(triple_apply_fn), "Input must be a function"
if not (type(mutated_fields) is list or type(mutated_fields) is str):
raise TypeError('mutated_fields must be str or list of str')
if not (input_fields is None or type(input_fields) is list or type(input_fields) is str):
raise TypeError('input_fields must be str or list of str')
if type(mutated_fields) == str:
mutated_fields = [mutated_fields]
if len(mutated_fields) is 0:
raise ValueError('mutated_fields cannot be empty')
for f in ['__id', '__src_id', '__dst_id']:
if f in mutated_fields:
raise ValueError('mutated_fields cannot contain %s' % f)
all_fields = self.get_fields()
if not set(mutated_fields).issubset(set(all_fields)):
extra_fields = list(set(mutated_fields).difference(set(all_fields)))
raise ValueError('graph does not contain fields: %s' % str(extra_fields))
# select input fields
if input_fields is None:
input_fields = self.get_fields()
elif type(input_fields) is str:
input_fields = [input_fields]
# make input fields a superset of mutated_fields
input_fields_set = set(input_fields + mutated_fields)
input_fields = [x for x in self.get_fields() if x in input_fields_set]
g = self.select_fields(input_fields)
nativefn = None
try:
from .. import extensions
nativefn = extensions._build_native_function_call(triple_apply_fn)
except:
# failure are fine. we just fall out into the next few phases
pass
if nativefn is not None:
with cython_context():
return SGraph(_proxy=g.__proxy__.lambda_triple_apply_native(nativefn, mutated_fields))
else:
with cython_context():
return SGraph(_proxy=g.__proxy__.lambda_triple_apply(triple_apply_fn, mutated_fields))
|
Apply a transform function to each edge and its associated source and
target vertices in parallel. Each edge is visited once and in parallel.
Modification to vertex data is protected by lock. The effect on the
returned SGraph is equivalent to the following pseudocode:
>>> PARALLEL FOR (source, edge, target) AS triple in G:
... LOCK (triple.source, triple.target)
... (source, edge, target) = triple_apply_fn(triple)
... UNLOCK (triple.source, triple.target)
... END PARALLEL FOR
Parameters
----------
triple_apply_fn : function : (dict, dict, dict) -> (dict, dict, dict)
The function to apply to each triple of (source_vertex, edge,
target_vertex). This function must take as input a tuple of
(source_data, edge_data, target_data) and return a tuple of
(new_source_data, new_edge_data, new_target_data). All variables in
the both tuples must be of dict type.
This can also be a toolkit extension function which is compiled
as a native shared library using SDK.
mutated_fields : list[str] | str
Fields that ``triple_apply_fn`` will mutate. Note: columns that are
actually mutated by the triple apply function but not specified in
``mutated_fields`` will have undetermined effects.
input_fields : list[str] | str, optional
Fields that ``triple_apply_fn`` will have access to.
The default is ``None``, which grants access to all fields.
``mutated_fields`` will always be included in ``input_fields``.
Returns
-------
out : SGraph
A new SGraph with updated vertex and edge data. Only fields
specified in the ``mutated_fields`` parameter are updated.
Notes
-----
- ``triple_apply`` does not currently support creating new fields in the
lambda function.
Examples
--------
Import turicreate and set up the graph.
>>> edges = turicreate.SFrame({'source': range(9), 'dest': range(1, 10)})
>>> g = turicreate.SGraph()
>>> g = g.add_edges(edges, src_field='source', dst_field='dest')
>>> g.vertices['degree'] = 0
Define the function to apply to each (source_node, edge, target_node)
triple.
>>> def degree_count_fn (src, edge, dst):
src['degree'] += 1
dst['degree'] += 1
return (src, edge, dst)
Apply the function to the SGraph.
>>> g = g.triple_apply(degree_count_fn, mutated_fields=['degree'])
Using native toolkit extension function:
.. code-block:: c++
#include <turicreate/sdk/toolkit_function_macros.hpp>
#include <vector>
using namespace turi;
std::vector<variant_type> connected_components_parameterized(
std::map<std::string, flexible_type>& src,
std::map<std::string, flexible_type>& edge,
std::map<std::string, flexible_type>& dst,
std::string column) {
if (src[column] < dst[column]) dst[column] = src[column];
else src[column] = dst[column];
return {to_variant(src), to_variant(edge), to_variant(dst)};
}
BEGIN_FUNCTION_REGISTRATION
REGISTER_FUNCTION(connected_components_parameterized, "src", "edge", "dst", "column");
END_FUNCTION_REGISTRATION
compiled into example.so
>>> from example import connected_components_parameterized as cc
>>> e = tc.SFrame({'__src_id':[1,2,3,4,5], '__dst_id':[3,1,2,5,4]})
>>> g = tc.SGraph().add_edges(e)
>>> g.vertices['cid'] = g.vertices['__id']
>>> for i in range(2):
... g = g.triple_apply(lambda src, edge, dst: cc(src, edge, dst, 'cid'), ['cid'], ['cid'])
>>> g.vertices['cid']
dtype: int
Rows: 5
[4, 1, 1, 1, 4]
|
def _set_switchport_basic(self, v, load=False):
"""
Setter method for switchport_basic, mapped from YANG variable /interface/ethernet/switchport_basic (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_switchport_basic is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_switchport_basic() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=switchport_basic.switchport_basic, is_container='container', presence=False, yang_name="switchport-basic", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """switchport_basic must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=switchport_basic.switchport_basic, is_container='container', presence=False, yang_name="switchport-basic", rest_name="", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-drop-node-name': None, u'sort-priority': u'RUNNCFG_INTERFACE_LEVEL_MODE_SWITCHPORT_CONFIG'}}, namespace='urn:brocade.com:mgmt:brocade-interface', defining_module='brocade-interface', yang_type='container', is_config=True)""",
})
self.__switchport_basic = t
if hasattr(self, '_set'):
self._set()
|
Setter method for switchport_basic, mapped from YANG variable /interface/ethernet/switchport_basic (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_switchport_basic is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_switchport_basic() directly.
|
def get_memory_info(self):
"""Return a tuple with the process' RSS and VMS size."""
rss, vms = _psutil_bsd.get_process_memory_info(self.pid)[:2]
return nt_meminfo(rss, vms)
|
Return a tuple with the process' RSS and VMS size.
|
def contains(self, x, ctrs, kdtree=None):
"""Check if the set of balls contains `x`. Uses a K-D Tree to
perform the search if provided."""
return self.overlap(x, ctrs, kdtree=kdtree) > 0
|
Check if the set of balls contains `x`. Uses a K-D Tree to
perform the search if provided.
|
def load_edbfile(file=None):
"""Load the targets from a file"""
import ephem,string,math
if file is None:
import tkFileDialog
try:
file=tkFileDialog.askopenfilename()
except:
return
if file is None or file == '':
return
f=open(file)
lines=f.readlines()
f.close()
for line in lines:
p=line.split(',')
name=p[0].strip().upper()
mpc_objs[name]=ephem.readdb(line)
mpc_objs[name].compute()
objInfoDict[name]="%6s %6s %6s\n" % ( string.center("a",6),
string.center("e",6),
string.center("i",6) )
objInfoDict[name]+="%6.2f %6.3f %6.2f\n" % (mpc_objs[name]._a,mpc_objs[name]._e,math.degrees(mpc_objs[name]._inc))
objInfoDict[name]+="%7.2f %7.2f\n" % ( mpc_objs[name].earth_distance, mpc_objs[name].mag)
doplot(mpc_objs)
|
Load the targets from a file
|
def to_CAG(self):
""" Export to a Causal Analysis Graph (CAG) PyGraphviz AGraph object.
The CAG shows the influence relationships between the variables and
elides the function nodes."""
G = nx.DiGraph()
for (name, attrs) in self.nodes(data=True):
if attrs["type"] == "variable":
for pred_fn in self.predecessors(name):
if not any(
fn_type in pred_fn
for fn_type in ("condition", "decision")
):
for pred_var in self.predecessors(pred_fn):
G.add_node(
self.nodes[pred_var]["basename"],
**self.nodes[pred_var],
)
G.add_node(attrs["basename"], **attrs)
G.add_edge(
self.nodes[pred_var]["basename"],
attrs["basename"],
)
if attrs["is_loop_index"]:
G.add_edge(attrs["basename"], attrs["basename"])
return G
|
Export to a Causal Analysis Graph (CAG) PyGraphviz AGraph object.
The CAG shows the influence relationships between the variables and
elides the function nodes.
|
def run_rnaseq_ann_filter(data):
"""Run RNA-seq annotation and filtering.
"""
data = to_single_data(data)
if dd.get_vrn_file(data):
eff_file = effects.add_to_vcf(dd.get_vrn_file(data), data)[0]
if eff_file:
data = dd.set_vrn_file(data, eff_file)
ann_file = population.run_vcfanno(dd.get_vrn_file(data), data)
if ann_file:
data = dd.set_vrn_file(data, ann_file)
variantcaller = dd.get_variantcaller(data)
if variantcaller and ("gatk-haplotype" in variantcaller):
filter_file = variation.gatk_filter_rnaseq(dd.get_vrn_file(data), data)
data = dd.set_vrn_file(data, filter_file)
# remove variants close to splice junctions
vrn_file = dd.get_vrn_file(data)
vrn_file = variation.filter_junction_variants(vrn_file, data)
data = dd.set_vrn_file(data, vrn_file)
return [[data]]
|
Run RNA-seq annotation and filtering.
|
def template(self, lambda_arn, role_arn, output=None, json=False):
"""
Only build the template file.
"""
if not lambda_arn:
raise ClickException("Lambda ARN is required to template.")
if not role_arn:
raise ClickException("Role ARN is required to template.")
self.zappa.credentials_arn = role_arn
# Create the template!
template = self.zappa.create_stack_template(
lambda_arn=lambda_arn,
lambda_name=self.lambda_name,
api_key_required=self.api_key_required,
iam_authorization=self.iam_authorization,
authorizer=self.authorizer,
cors_options=self.cors,
description=self.apigateway_description,
policy=self.apigateway_policy,
endpoint_configuration=self.endpoint_configuration
)
if not output:
template_file = self.lambda_name + '-template-' + str(int(time.time())) + '.json'
else:
template_file = output
with open(template_file, 'wb') as out:
out.write(bytes(template.to_json(indent=None, separators=(',',':')), "utf-8"))
if not json:
click.echo(click.style("Template created", fg="green", bold=True) + ": " + click.style(template_file, bold=True))
else:
with open(template_file, 'r') as out:
print(out.read())
|
Only build the template file.
|
def parse_sphinx_searchindex(searchindex):
"""Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
"""
# Make sure searchindex uses UTF-8 encoding
if hasattr(searchindex, 'decode'):
searchindex = searchindex.decode('UTF-8')
# parse objects
query = 'objects:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"objects:" not found in search index')
sel = _select_block(searchindex[pos:], '{', '}')
objects = _parse_dict_recursive(sel)
# parse filenames
query = 'filenames:'
pos = searchindex.find(query)
if pos < 0:
raise ValueError('"filenames:" not found in search index')
filenames = searchindex[pos + len(query) + 1:]
filenames = filenames[:filenames.find(']')]
filenames = [f.strip('"') for f in filenames.split(',')]
return filenames, objects
|
Parse a Sphinx search index
Parameters
----------
searchindex : str
The Sphinx search index (contents of searchindex.js)
Returns
-------
filenames : list of str
The file names parsed from the search index.
objects : dict
The objects parsed from the search index.
|
def convert(source, ext=COMPLETE, fmt=HTML, dname=None):
"""Converts a string of MultiMarkdown text to the requested format.
Transclusion is performed if the COMPATIBILITY extension is not set, and dname is set to a
valid directory
Keyword arguments:
source -- string containing MultiMarkdown text
ext -- extension bitfield to pass to conversion process
fmt -- flag indicating output format to use
dname -- Path to use for transclusion - if None, transclusion functionality is bypassed
"""
if dname and not ext & COMPATIBILITY:
if os.path.isfile(dname):
dname = os.path.abspath(os.path.dirname(dname))
source, _ = _expand_source(source, dname, fmt)
_MMD_LIB.markdown_to_string.argtypes = [ctypes.c_char_p, ctypes.c_ulong, ctypes.c_int]
_MMD_LIB.markdown_to_string.restype = ctypes.c_char_p
src = source.encode('utf-8')
return _MMD_LIB.markdown_to_string(src, ext, fmt).decode('utf-8')
|
Converts a string of MultiMarkdown text to the requested format.
Transclusion is performed if the COMPATIBILITY extension is not set, and dname is set to a
valid directory
Keyword arguments:
source -- string containing MultiMarkdown text
ext -- extension bitfield to pass to conversion process
fmt -- flag indicating output format to use
dname -- Path to use for transclusion - if None, transclusion functionality is bypassed
|
def get_src_or_dst_prompt(mode):
"""
String together the proper prompt based on the mode
:param str mode: "read" or "write"
:return str prompt: The prompt needed
"""
_words = {"read": "from", "write": "to"}
# print(os.getcwd())
prompt = "Where would you like to {} your file(s) {}?\n" \
"1. Desktop ({})\n" \
"2. Downloads ({})\n" \
"3. Current ({})\n" \
"4. Browse".format(mode, _words[mode],
os.path.expanduser('~/Desktop'),
os.path.expanduser('~/Downloads'),
os.getcwd())
return prompt
|
String together the proper prompt based on the mode
:param str mode: "read" or "write"
:return str prompt: The prompt needed
|
def deleted_records(endpoint):
"""Populate the ``deleted_records`` key."""
@utils.for_each_value
def _deleted_records(self, key, value):
deleted_recid = maybe_int(value.get('a'))
if deleted_recid:
return get_record_ref(deleted_recid, endpoint)
return _deleted_records
|
Populate the ``deleted_records`` key.
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.