docstring
stringlengths 52
499
| function
stringlengths 67
35.2k
| __index_level_0__
int64 52.6k
1.16M
|
|---|---|---|
Initialize the task context object.
Args:
task_name (str): The name of the task.
dag_name (str): The name of the DAG the task was started from.
workflow_name (str): The name of the workflow the task was started from.
workflow_id (str): The id of the workflow this task is member of.
worker_hostname (str): The name of the worker executing this task.
|
def __init__(self, task_name, dag_name, workflow_name, workflow_id, worker_hostname):
self.task_name = task_name
self.dag_name = dag_name
self.workflow_name = workflow_name
self.workflow_id = workflow_id
self.worker_hostname = worker_hostname
| 764,474
|
Builds and registers a global :class:`FilterSet`.
Args:
name (str): The name of the set.
Yields:
FilterSetFactory: A configurable factory for building a :class:`FilterSet`.
|
def add_filter_set(name):
factory = FilterSetFactory(name)
yield factory
filter_sets[name] = factory.build_filter_set()
| 764,483
|
Stop a worker process.
Args:
config (Config): Reference to the configuration object from which the
settings for the worker are retrieved.
worker_ids (list): An optional list of ids for the worker that should be stopped.
|
def stop_worker(config, *, worker_ids=None):
if worker_ids is not None and not isinstance(worker_ids, list):
worker_ids = [worker_ids]
celery_app = create_app(config)
celery_app.control.shutdown(destination=worker_ids)
| 764,502
|
Return a list of all available workers.
Args:
config (Config): Reference to the configuration object from which the
settings are retrieved.
filter_by_queues (list): Restrict the returned workers to workers that listen to
at least one of the queue names in this list.
Returns:
list: A list of WorkerStats objects.
|
def list_workers(config, *, filter_by_queues=None):
celery_app = create_app(config)
worker_stats = celery_app.control.inspect().stats()
queue_stats = celery_app.control.inspect().active_queues()
if worker_stats is None:
return []
workers = []
for name, w_stat in worker_stats.items():
queues = [QueueStats.from_celery(q_stat) for q_stat in queue_stats[name]]
add_worker = filter_by_queues is None
if not add_worker:
for queue in queues:
if queue.name in filter_by_queues:
add_worker = True
break
if add_worker:
workers.append(WorkerStats.from_celery(name, w_stat, queues))
return workers
| 764,503
|
Asserts that an element has the specified CSS styles. ::
element.assert_style({"color": "rgb(0,0,255)", "font-size": re.compile(r"px")})
Args:
styles (Dict[str, str | RegexObject]): The expected styles.
Returns:
True
Raises:
ExpectationNotMet: The element doesn't have the specified styles.
|
def assert_style(self, styles, **kwargs):
query = StyleQuery(styles, **kwargs)
@self.synchronize(wait=query.wait)
def assert_style():
if not query.resolves_for(self):
raise ExpectationNotMet(query.failure_message)
return True
return assert_style()
| 764,552
|
Checks if the page or current node has a radio button or checkbox with the given label,
value, or id, that is currently checked.
Args:
locator (str): The label, name, or id of a checked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it exists.
|
def has_checked_field(self, locator, **kwargs):
kwargs["checked"] = True
return self.has_selector("field", locator, **kwargs)
| 764,557
|
Checks if the page or current node has no radio button or checkbox with the given label,
value, or id that is currently checked.
Args:
locator (str): The label, name, or id of a checked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it doesn't exist.
|
def has_no_checked_field(self, locator, **kwargs):
kwargs["checked"] = True
return self.has_no_selector("field", locator, **kwargs)
| 764,558
|
Checks if the page or current node has a radio button or checkbox with the given label,
value, or id, that is currently unchecked.
Args:
locator (str): The label, name, or id of an unchecked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it exists.
|
def has_unchecked_field(self, locator, **kwargs):
kwargs["checked"] = False
return self.has_selector("field", locator, **kwargs)
| 764,559
|
Checks if the page or current node has no radio button or checkbox with the given label,
value, or id, that is currently unchecked.
Args:
locator (str): The label, name, or id of an unchecked field.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
Returns:
bool: Whether it doesn't exist.
|
def has_no_unchecked_field(self, locator, **kwargs):
kwargs["checked"] = False
return self.has_no_selector("field", locator, **kwargs)
| 764,560
|
Asserts that the page or current node has the given text content, ignoring any HTML tags.
Args:
*args: Variable length argument list for :class:`TextQuery`.
**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
|
def assert_text(self, *args, **kwargs):
query = TextQuery(*args, **kwargs)
@self.synchronize(wait=query.wait)
def assert_text():
count = query.resolve_for(self)
if not (matches_count(count, query.options) and
(count > 0 or expects_none(query.options))):
raise ExpectationNotMet(query.failure_message)
return True
return assert_text()
| 764,561
|
Asserts that the page or current node doesn't have the given text content, ignoring any
HTML tags.
Args:
*args: Variable length argument list for :class:`TextQuery`.
**kwargs: Arbitrary keyword arguments for :class:`TextQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
|
def assert_no_text(self, *args, **kwargs):
query = TextQuery(*args, **kwargs)
@self.synchronize(wait=query.wait)
def assert_no_text():
count = query.resolve_for(self)
if matches_count(count, query.options) and (
count > 0 or expects_none(query.options)):
raise ExpectationNotMet(query.negative_failure_message)
return True
return assert_no_text()
| 764,562
|
Initialize the MongoReconnectProxy.
Args:
obj: The object for which all calls should be wrapped in the AutoReconnect
exception handling block.
methods (set): The list of method names that should be wrapped.
|
def __init__(self, obj, methods):
self._unproxied_object = obj
self._methods = methods
| 764,565
|
Create a new Config object from a configuration file.
Args:
filename (str): The location and name of the configuration file.
strict (bool): If true raises a ConfigLoadError when the configuration
cannot be found.
Returns:
An instance of the Config class.
Raises:
ConfigLoadError: If the configuration cannot be found.
|
def from_file(cls, filename, *, strict=True):
config = cls()
config.load_from_file(filename, strict=strict)
return config
| 764,580
|
Load the configuration from a dictionary.
Args:
conf_dict (dict): Dictionary with the configuration.
|
def load_from_dict(self, conf_dict=None):
self.set_to_default()
self._update_dict(self._config, conf_dict)
self._update_python_paths()
| 764,582
|
Helper method to update an existing configuration with the values from a file.
Loads a configuration file and replaces all values in the existing configuration
dictionary with the values from the file.
Args:
filename (str): The path and name to the configuration file.
|
def _update_from_file(self, filename):
if os.path.exists(filename):
try:
with open(filename, 'r') as config_file:
yaml_dict = yaml.safe_load(config_file.read())
if yaml_dict is not None:
self._update_dict(self._config, yaml_dict)
except IsADirectoryError:
raise ConfigLoadError(
'The specified configuration file is a directory not a file')
else:
raise ConfigLoadError('The config file {} does not exist'.format(filename))
| 764,583
|
Recursively merges the fields for two dictionaries.
Args:
to_dict (dict): The dictionary onto which the merge is executed.
from_dict (dict): The dictionary merged into to_dict
|
def _update_dict(self, to_dict, from_dict):
for key, value in from_dict.items():
if key in to_dict and isinstance(to_dict[key], dict) and \
isinstance(from_dict[key], dict):
self._update_dict(to_dict[key], from_dict[key])
else:
to_dict[key] = from_dict[key]
| 764,584
|
Asserts that the page has the given title.
Args:
title (str | RegexObject): The string or regex that the title should match.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
|
def assert_title(self, title, **kwargs):
query = TitleQuery(title, **kwargs)
@self.synchronize(wait=query.wait)
def assert_title():
if not query.resolves_for(self):
raise ExpectationNotMet(query.failure_message)
return True
return assert_title()
| 764,586
|
Asserts that the page doesn't have the given title.
Args:
title (str | RegexObject): The string that the title should include.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
True
Raises:
ExpectationNotMet: If the assertion hasn't succeeded during the wait time.
|
def assert_no_title(self, title, **kwargs):
query = TitleQuery(title, **kwargs)
@self.synchronize(wait=query.wait)
def assert_no_title():
if query.resolves_for(self):
raise ExpectationNotMet(query.negative_failure_message)
return True
return assert_no_title()
| 764,587
|
Checks if the page has the given title.
Args:
title (str | RegexObject): The string or regex that the title should match.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
bool: Whether it matches.
|
def has_title(self, title, **kwargs):
try:
self.assert_title(title, **kwargs)
return True
except ExpectationNotMet:
return False
| 764,588
|
Checks if the page doesn't have the given title.
Args:
title (str | RegexObject): The string that the title should include.
**kwargs: Arbitrary keyword arguments for :class:`TitleQuery`.
Returns:
bool: Whether it doesn't match.
|
def has_no_title(self, title, **kwargs):
try:
self.assert_no_title(title, **kwargs)
return True
except ExpectationNotMet:
return False
| 764,589
|
Resolves this query relative to the given node.
Args:
node (node.Base): The node to be evaluated.
Returns:
int: The number of matches found.
|
def resolve_for(self, node):
self.node = node
self.actual_text = normalize_text(
node.visible_text if self.query_type == "visible" else node.all_text)
self.count = len(re.findall(self.search_regexp, self.actual_text))
return self.count
| 764,597
|
Returns the inner content of a given XML node, including tags.
Args:
node (lxml.etree.Element): The node whose inner content is desired.
Returns:
str: The inner content of the node.
|
def inner_content(node):
from lxml import etree
# Include text content at the start of the node.
parts = [node.text]
for child in node.getchildren():
# Include the child serialized to raw XML.
parts.append(etree.tostring(child, encoding="utf-8"))
# Include any text following the child.
parts.append(child.tail)
# Discard any non-existent text parts and return.
return "".join(filter(None, parts))
| 764,600
|
Returns the inner text of a given XML node, excluding tags.
Args:
node: (lxml.etree.Element): The node whose inner text is desired.
Returns:
str: The inner text of the node.
|
def inner_text(node):
from lxml import etree
# Include text content at the start of the node.
parts = [node.text]
for child in node.getchildren():
# Include the raw text content of the child.
parts.append(etree.tostring(child, encoding="utf-8", method="text"))
# Include any text following the child.
parts.append(child.tail)
# Discard any non-existent text parts and return.
return "".join(map(decode_bytes, filter(None, parts)))
| 764,601
|
Returns the given URL with all query keys properly escaped.
Args:
url (str): The URL to normalize.
Returns:
str: The normalized URL.
|
def normalize_url(url):
uri = urlparse(url)
query = uri.query or ""
pairs = parse_qsl(query)
decoded_pairs = [(unquote(key), value) for key, value in pairs]
encoded_pairs = [(quote(key), value) for key, value in decoded_pairs]
normalized_query = urlencode(encoded_pairs)
return ParseResult(
scheme=uri.scheme,
netloc=uri.netloc,
path=uri.path,
params=uri.params,
query=normalized_query,
fragment=uri.fragment).geturl()
| 764,602
|
Find a check box and mark it as checked. The check box can be found via name, id, or label
text. ::
page.check("German")
Args:
locator (str, optional): Which check box to check.
allow_label_click (bool, optional): Attempt to click the label to toggle state if
element is non-visible. Defaults to :data:`capybara.automatic_label_click`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
|
def check(self, locator=None, allow_label_click=None, **kwargs):
self._check_with_label(
"checkbox", True, locator=locator, allow_label_click=allow_label_click, **kwargs)
| 764,618
|
Find a radio button and mark it as checked. The radio button can be found via name, id, or
label text. ::
page.choose("Male")
Args:
locator (str, optional): Which radio button to choose.
allow_label_click (bool, optional): Attempt to click the label to toggle state if
element is non-visible. Defaults to :data:`capybara.automatic_label_click`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
|
def choose(self, locator=None, allow_label_click=None, **kwargs):
self._check_with_label(
"radio_button", True, locator=locator, allow_label_click=allow_label_click, **kwargs)
| 764,619
|
Find a check box and uncheck it. The check box can be found via name, id, or label text. ::
page.uncheck("German")
Args:
locator (str, optional): Which check box to uncheck.
allow_label_click (bool, optional): Attempt to click the label to toggle state if
element is non-visible. Defaults to :data:`capybara.automatic_label_click`.
**kwargs: Arbitrary keyword arguments for :class:`SelectorQuery`.
|
def uncheck(self, locator=None, allow_label_click=None, **kwargs):
self._check_with_label(
"checkbox", False, locator=locator, allow_label_click=allow_label_click, **kwargs)
| 764,622
|
Returns whether to catch the given error.
Args:
error (Exception): The error to consider.
errors (Tuple[Type[Exception], ...], optional): The exception types that should be
caught. Defaults to :class:`ElementNotFound` plus any driver-specific invalid
element errors.
Returns:
bool: Whether to catch the given error.
|
def _should_catch_error(self, error, errors=()):
caught_errors = (
errors or
self.session.driver.invalid_element_errors + (ElementNotFound,))
return isinstance(error, caught_errors)
| 764,630
|
Initialize the exception for invalid workflow definitions.
Args:
workflow_name (str): The name of the workflow that contains an invalid
definition.
graph_name (str): The name of the dag that is invalid.
|
def __init__(self, workflow_name, graph_name):
self.workflow_name = workflow_name
self.graph_name = graph_name
| 764,633
|
Returns whether the given query options expect a possible count of zero.
Args:
options (Dict[str, int | Iterable[int]]): A dictionary of query options.
Returns:
bool: Whether a possible count of zero is expected.
|
def expects_none(options):
if any(options.get(key) is not None for key in ["count", "maximum", "minimum", "between"]):
return matches_count(0, options)
else:
return False
| 764,658
|
Returns a expectation failure message for the given query description.
Args:
description (str): A description of the failed query.
options (Dict[str, Any]): The query options.
Returns:
str: A message describing the failure.
|
def failure_message(description, options):
message = "expected to find {}".format(description)
if options["count"] is not None:
message += " {count} {times}".format(
count=options["count"],
times=declension("time", "times", options["count"]))
elif options["between"] is not None:
between = options["between"]
if between:
first, last = between[0], between[-1]
else:
first, last = None, None
message += " between {first} and {last} times".format(
first=first,
last=last)
elif options["maximum"] is not None:
message += " at most {maximum} {times}".format(
maximum=options["maximum"],
times=declension("time", "times", options["maximum"]))
elif options["minimum"] is not None:
message += " at least {minimum} {times}".format(
minimum=options["minimum"],
times=declension("time", "times", options["minimum"]))
return message
| 764,659
|
Returns whether the given count matches the given query options.
If no quantity options are specified, any count is considered acceptable.
Args:
count (int): The count to be validated.
options (Dict[str, int | Iterable[int]]): A dictionary of query options.
Returns:
bool: Whether the count matches the options.
|
def matches_count(count, options):
if options.get("count") is not None:
return count == int(options["count"])
if options.get("maximum") is not None and int(options["maximum"]) < count:
return False
if options.get("minimum") is not None and int(options["minimum"]) > count:
return False
if options.get("between") is not None and count not in options["between"]:
return False
return True
| 764,660
|
Normalizes the given value to a string of text with extra whitespace removed.
Byte sequences are decoded. ``None`` is converted to an empty string. Everything else
is simply cast to a string.
Args:
value (Any): The data to normalize.
Returns:
str: The normalized text.
|
def normalize_text(value):
if value is None:
return ""
text = decode_bytes(value) if isbytes(value) else str_(value)
return normalize_whitespace(text)
| 764,661
|
Returns the given text with outer whitespace removed and inner whitespace collapsed.
Args:
text (str): The text to normalize.
Returns:
str: The normalized text.
|
def normalize_whitespace(text):
return re.sub(r"\s+", " ", text, flags=re.UNICODE).strip()
| 764,662
|
Returns a compiled regular expression for the given text.
Args:
text (str | RegexObject): The text to match.
exact (bool, optional): Whether the generated regular expression should match exact
strings. Defaults to False.
Returns:
RegexObject: A compiled regular expression that will match the text.
|
def toregex(text, exact=False):
if isregex(text):
return text
escaped = re.escape(normalize_text(text))
if exact:
escaped = r"\A{}\Z".format(escaped)
return re.compile(escaped)
| 764,663
|
Initialise the request object.
Args:
action (str): A string representing the requested action that should be
executed by the server.
payload (dict): A dictionary with data that is available to the action.
|
def __init__(self, action, *, payload=None):
self.action = action
self.payload = payload if payload is not None else {}
self.uid = uuid.uuid4()
| 764,666
|
Initialise the response object.
Args:
success (bool): True if the request was successful.
uid (str): Unique response id.
payload (dict): A dictionary with the response data.
|
def __init__(self, success, uid, *, payload=None):
self.success = success
self.uid = uid
self.payload = payload if payload is not None else {}
| 764,667
|
Initialises the signal server.
Args:
connection: Reference to a signal connection object.
request_key (str): The key under which the list of requests is stored.
|
def __init__(self, connection, request_key):
self._connection = connection
self._request_key = '{}:{}'.format(SIGNAL_REDIS_PREFIX, request_key)
| 764,668
|
Send a response back to the client that issued a request.
Args:
response (Response): Reference to the response object that should be sent.
|
def send(self, response):
self._connection.connection.set('{}:{}'.format(SIGNAL_REDIS_PREFIX, response.uid),
pickle.dumps(response))
| 764,670
|
Push the request back onto the queue.
Args:
request (Request): Reference to a request object that should be pushed back
onto the request queue.
|
def restore(self, request):
self._connection.connection.rpush(self._request_key, pickle.dumps(request))
| 764,671
|
Send a request to the server and wait for its response.
Args:
request (Request): Reference to a request object that is sent to the server.
Returns:
Response: The response from the server to the request.
|
def send(self, request):
self._connection.connection.rpush(self._request_key, pickle.dumps(request))
resp_key = '{}:{}'.format(SIGNAL_REDIS_PREFIX, request.uid)
while True:
if self._connection.polling_time > 0.0:
sleep(self._connection.polling_time)
response_data = self._connection.connection.get(resp_key)
if response_data is not None:
self._connection.connection.delete(resp_key)
break
return pickle.loads(response_data)
| 764,672
|
Returns whether this query resolves for the given session.
Args:
session (Session): The session for which this query should be executed.
Returns:
bool: Whether this query resolves.
|
def resolves_for(self, session):
if self.url:
self.actual_path = session.current_url
else:
result = urlparse(session.current_url)
if self.only_path:
self.actual_path = result.path
else:
request_uri = result.path
if result.query:
request_uri += "?{0}".format(result.query)
self.actual_path = request_uri
if isregex(self.expected_path):
return self.expected_path.search(self.actual_path)
else:
return normalize_url(self.actual_path) == normalize_url(self.expected_path)
| 764,674
|
Resizes the window to the given dimensions.
If this method was called for a window that is not current, then after calling this method
the current window should remain the same as it was before calling this method.
Args:
width (int): The new window width in pixels.
height (int): The new window height in pixels.
|
def resize_to(self, width, height):
self.driver.resize_window_to(self.handle, width, height)
| 764,680
|
genrates a diurnal course of windspeed accroding to the cosine function
Args:
x: series of euqally distributed windspeed values
a: parameter a for the cosine function
b: parameter b for the cosine function
t_shift: parameter t_shift for the cosine function
Returns:
series including diurnal course of windspeed.
|
def _cosine_function(x, a, b, t_shift):
mean_wind, t = x
return a * mean_wind * np.cos(np.pi * (t - t_shift) / 12) + b * mean_wind
| 764,816
|
general function for windspeed disaggregation
Args:
wind_daily: daily values
method: keyword specifying the disaggregation method to be used
a: parameter a for the cosine function
b: parameter b for the cosine function
t_shift: parameter t_shift for the cosine function
Returns:
Disaggregated hourly values of windspeed.
|
def disaggregate_wind(wind_daily, method='equal', a=None, b=None, t_shift=None):
assert method in ('equal', 'cosine', 'random'), 'Invalid method'
wind_eq = melodist.distribute_equally(wind_daily)
if method == 'equal':
wind_disagg = wind_eq
elif method == 'cosine':
assert None not in (a, b, t_shift)
wind_disagg = _cosine_function(np.array([wind_eq.values, wind_eq.index.hour]), a, b, t_shift)
elif method == 'random':
wind_disagg = wind_eq * (-np.log(np.random.rand(len(wind_eq))))**0.3
return wind_disagg
| 764,817
|
fits a cosine function to observed hourly windspeed data
Args:
wind: observed hourly windspeed data
Returns:
parameters needed to generate diurnal features of windspeed using a cosine function
|
def fit_cosine_function(wind):
wind_daily = wind.groupby(wind.index.date).mean()
wind_daily_hourly = pd.Series(index=wind.index, data=wind_daily.loc[wind.index.date].values) # daily values evenly distributed over the hours
df = pd.DataFrame(data=dict(daily=wind_daily_hourly, hourly=wind)).dropna(how='any')
x = np.array([df.daily, df.index.hour])
popt, pcov = scipy.optimize.curve_fit(_cosine_function, x, df.hourly)
return popt
| 764,818
|
reads a single file of KNMI's meteorological time series
data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens
Args:
filename: the file to be opened
Returns:
pandas data frame including time series
|
def read_single_knmi_file(filename):
hourly_data_obs_raw = pd.read_csv(
filename,
parse_dates=[['YYYYMMDD', 'HH']],
date_parser=lambda yyyymmdd, hh: pd.datetime(int(str(yyyymmdd)[0:4]),
int(str(yyyymmdd)[4:6]),
int(str(yyyymmdd)[6:8]),
int(hh) - 1),
skiprows=31,
skipinitialspace=True,
na_values='',
keep_date_col=True,
)
hourly_data_obs_raw.index = hourly_data_obs_raw['YYYYMMDD_HH']
hourly_data_obs_raw.index = hourly_data_obs_raw.index + pd.Timedelta(hours=1)
columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd']
hourly_data_obs = pd.DataFrame(
index=hourly_data_obs_raw.index,
columns=columns_hourly,
data=dict(
temp=hourly_data_obs_raw['T'] / 10 + 273.15,
precip=hourly_data_obs_raw['RH'] / 10,
glob=hourly_data_obs_raw['Q'] * 10000 / 3600.,
hum=hourly_data_obs_raw['U'],
wind=hourly_data_obs_raw['FH'] / 10,
ssd=hourly_data_obs_raw['SQ'] * 6,
),
)
# remove negative values
negative_values = hourly_data_obs['precip'] < 0.0
hourly_data_obs.loc[negative_values, 'precip'] = 0.0
return hourly_data_obs
| 764,824
|
Reads files from a directory and merges the time series
Please note: For each station, a separate directory must be provided!
data availability: www.knmi.nl/nederland-nu/klimatologie/uurgegevens
Args:
directory: directory including the files
Returns:
pandas data frame including time series
|
def read_knmi_dataset(directory):
filemask = '%s*.txt' % directory
filelist = glob.glob(filemask)
columns_hourly = ['temp', 'precip', 'glob', 'hum', 'wind', 'ssd']
ts = pd.DataFrame(columns=columns_hourly)
first_call = True
for file_i in filelist:
print(file_i)
current = read_single_knmi_file(file_i)
if(first_call):
ts = current
first_call = False
else:
ts = pd.concat([ts, current])
return ts
| 764,825
|
Obtains hourly values by equally distributing the daily values.
Args:
daily_data: daily values
divide: if True, divide resulting values by the number of hours in
order to preserve the daily sum (required e.g. for precipitation).
Returns:
Equally distributed hourly values.
|
def distribute_equally(daily_data, divide=False):
index = hourly_index(daily_data.index)
hourly_data = daily_data.reindex(index)
hourly_data = hourly_data.groupby(hourly_data.index.day).transform(
lambda x: x.fillna(method='ffill', limit=23))
if divide:
hourly_data /= 24
return hourly_data
| 764,859
|
Calculates vapor pressure from temperature and humidity after Sonntag (1990).
Args:
temp: temperature values
hum: humidity value(s). Can be scalar (e.g. for calculating saturation vapor pressure).
Returns:
Vapor pressure in hPa.
|
def vapor_pressure(temp, hum):
if np.isscalar(hum):
hum = np.zeros(temp.shape) + hum
assert(temp.shape == hum.shape)
positives = np.array(temp >= 273.15)
vap_press = np.zeros(temp.shape) * np.nan
vap_press[positives] = 6.112 * np.exp((17.62 * (temp[positives] - 273.15)) / (243.12 + (temp[positives] - 273.15))) * hum[positives] / 100.
vap_press[~positives] = 6.112 * np.exp((22.46 * (temp[~positives] - 273.15)) / (272.62 + (temp[~positives] - 273.15))) * hum[~positives] / 100.
return vap_press
| 764,860
|
Aggregates data (hourly to daily values) according to the characteristics
of each variable (e.g., average for temperature, sum for precipitation)
Args:
df: dataframe including time series with one hour time steps
Returns:
dataframe (daily)
|
def daily_from_hourly(df):
df_daily = pd.DataFrame()
if 'temp' in df:
df_daily['temp'] = df.temp.resample('D').mean()
df_daily['tmin'] = df.temp.groupby(df.temp.index.date).min()
df_daily['tmax'] = df.temp.groupby(df.temp.index.date).max()
if 'precip' in df:
df_daily['precip'] = df.precip.resample('D').sum()
if 'glob' in df:
df_daily['glob'] = df.glob.resample('D').mean()
if 'hum' in df:
df_daily['hum'] = df.hum.resample('D').mean()
if 'hum' in df:
df_daily['hum_min'] = df.hum.groupby(df.hum.index.date).min()
if 'hum' in df:
df_daily['hum_max'] = df.hum.groupby(df.hum.index.date).max()
if 'wind' in df:
df_daily['wind'] = df.wind.resample('D').mean()
if 'ssd' in df:
df_daily['ssd'] = df.ssd.resample('D').sum() / 60 # minutes to hours
df_daily.index.name = None
return df_daily
| 764,867
|
Constructs a FullscreenWindow
Args:
out_stream (file): Defaults to sys.__stdout__
hide_cursor (bool): Hides cursor while in context
|
def __init__(self, out_stream=None, hide_cursor=True):
BaseWindow.__init__(self, out_stream=out_stream,
hide_cursor=hide_cursor)
self.fullscreen_ctx = self.t.fullscreen()
| 765,435
|
Renders array to terminal and places (0-indexed) cursor
Args:
array (FSArray): Grid of styled characters to be rendered.
* If array received is of width too small, render it anyway
* If array received is of width too large,
* render the renderable portion
* If array received is of height too small, render it anyway
* If array received is of height too large,
* render the renderable portion (no scroll)
|
def render_to_terminal(self, array, cursor_pos=(0, 0)):
# TODO there's a race condition here - these height and widths are
# super fresh - they might change between the array being constructed
# and rendered
# Maybe the right behavior is to throw away the render
# in the signal handler?
height, width = self.height, self.width
for_stdout = self.fmtstr_to_stdout_xform()
if not self.hide_cursor:
self.write(self.t.hide_cursor)
if (height != self._last_rendered_height or
width != self._last_rendered_width):
self.on_terminal_size_change(height, width)
current_lines_by_row = {}
rows = list(range(height))
rows_for_use = rows[:len(array)]
rest_of_rows = rows[len(array):]
# rows which we have content for and don't require scrolling
for row, line in zip(rows_for_use, array):
current_lines_by_row[row] = line
if line == self._last_lines_by_row.get(row, None):
continue
self.write(self.t.move(row, 0))
self.write(for_stdout(line))
if len(line) < width:
self.write(self.t.clear_eol)
# rows onscreen that we don't have content for
for row in rest_of_rows:
if self._last_lines_by_row and row not in self._last_lines_by_row:
continue
self.write(self.t.move(row, 0))
self.write(self.t.clear_eol)
self.write(self.t.clear_bol)
current_lines_by_row[row] = None
logger.debug(
'lines in last lines by row: %r' % self._last_lines_by_row.keys()
)
logger.debug(
'lines in current lines by row: %r' % current_lines_by_row.keys()
)
self.write(self.t.move(*cursor_pos))
self._last_lines_by_row = current_lines_by_row
if not self.hide_cursor:
self.write(self.t.normal_cursor)
| 765,437
|
Generate the moment matrix of monomials.
Arguments:
n_vars -- current number of variables
block_index -- current block index in the SDP matrix
monomials -- |W_d| set of words of length up to the relaxation level
|
def _generate_moment_matrix(self, n_vars, block_index, processed_entries,
monomialsA, monomialsB, ppt=False):
row_offset = 0
if block_index > 0:
for block_size in self.block_struct[0:block_index]:
row_offset += block_size ** 2
N = len(monomialsA)*len(monomialsB)
func = partial(assemble_monomial_and_do_substitutions,
monomialsA=monomialsA, monomialsB=monomialsB, ppt=ppt,
substitutions=self.substitutions,
pure_substitution_rules=self.pure_substitution_rules)
if self._parallel:
pool = Pool()
# This is just a guess and can be optimized
chunksize = int(max(int(np.sqrt(len(monomialsA) * len(monomialsB) *
len(monomialsA) / 2) / cpu_count()),
1))
for rowA in range(len(monomialsA)):
if self._parallel:
iter_ = pool.map(func, [(rowA, columnA, rowB, columnB)
for rowB in range(len(monomialsB))
for columnA in range(rowA,
len(monomialsA))
for columnB in range((rowA == columnA)*rowB,
len(monomialsB))],
chunksize)
print_criterion = processed_entries + len(iter_)
else:
iter_ = imap(func, [(rowA, columnA, rowB, columnB)
for columnA in range(rowA, len(monomialsA))
for rowB in range(len(monomialsB))
for columnB in range((rowA == columnA)*rowB,
len(monomialsB))])
for columnA, rowB, columnB, monomial in iter_:
processed_entries += 1
n_vars = self._push_monomial(monomial, n_vars,
row_offset, rowA,
columnA, N, rowB,
columnB, len(monomialsB),
prevent_substitutions=True)
if self.verbose > 0 and (not self._parallel or
processed_entries == self.n_vars or
processed_entries == print_criterion):
percentage = processed_entries / self.n_vars
time_used = time.time()-self._time0
eta = (1.0 / percentage) * time_used - time_used
hours = int(eta/3600)
minutes = int((eta-3600*hours)/60)
seconds = eta-3600*hours-minutes*60
msg = ""
if self.verbose > 1 and self._parallel:
msg = ", working on block {:0} with {:0} processes with a chunksize of {:0d}"\
.format(block_index, cpu_count(),
chunksize)
msg = "{:0} (done: {:.2%}, ETA {:02d}:{:02d}:{:03.1f}"\
.format(n_vars, percentage, hours, minutes, seconds) + \
msg
msg = "\r\x1b[KCurrent number of SDP variables: " + msg + ")"
sys.stdout.write(msg)
sys.stdout.flush()
if self._parallel:
pool.close()
pool.join()
if self.verbose > 0:
sys.stdout.write("\r")
return n_vars, block_index + 1, processed_entries
| 765,457
|
Generate localizing matrices
Arguments:
inequalities -- list of inequality constraints
monomials -- localizing monomials
block_index -- the current block index in constraint matrices of the
SDP relaxation
|
def __process_inequalities(self, block_index):
initial_block_index = block_index
row_offsets = [0]
for block, block_size in enumerate(self.block_struct):
row_offsets.append(row_offsets[block] + block_size ** 2)
if self._parallel:
pool = Pool()
for k, ineq in enumerate(self.constraints):
block_index += 1
monomials = self.localizing_monomial_sets[block_index -
initial_block_index-1]
lm = len(monomials)
if isinstance(ineq, str):
self.__parse_expression(ineq, row_offsets[block_index-1])
continue
if ineq.is_Relational:
ineq = convert_relational(ineq)
func = partial(moment_of_entry, monomials=monomials, ineq=ineq,
substitutions=self.substitutions)
if self._parallel and lm > 1:
chunksize = max(int(np.sqrt(lm*lm/2) /
cpu_count()), 1)
iter_ = pool.map(func, ([row, column] for row in range(lm)
for column in range(row, lm)),
chunksize)
else:
iter_ = imap(func, ([row, column] for row in range(lm)
for column in range(row, lm)))
if block_index > self.constraint_starting_block + \
self._n_inequalities and lm > 1:
is_equality = True
else:
is_equality = False
for row, column, polynomial in iter_:
if is_equality:
row, column = 0, 0
self.__push_facvar_sparse(polynomial, block_index,
row_offsets[block_index-1],
row, column)
if is_equality:
block_index += 1
if is_equality:
block_index -= 1
if self.verbose > 0:
sys.stdout.write("\r\x1b[KProcessing %d/%d constraints..." %
(k+1, len(self.constraints)))
sys.stdout.flush()
if self._parallel:
pool.close()
pool.join()
if self.verbose > 0:
sys.stdout.write("\n")
return block_index
| 765,462
|
Generate localizing matrices
Arguments:
equalities -- list of equality constraints
equalities -- list of moment equality constraints
|
def __process_equalities(self, equalities, momentequalities):
monomial_sets = []
n_rows = 0
le = 0
if equalities is not None:
for equality in equalities:
le += 1
# Find the order of the localizing matrix
if equality.is_Relational:
equality = convert_relational(equality)
eq_order = ncdegree(equality)
if eq_order > 2 * self.level:
raise Exception("An equality constraint has degree %d. "
"Choose a higher level of relaxation."
% eq_order)
localization_order = (2 * self.level - eq_order)//2
index = find_variable_set(self.variables, equality)
localizing_monomials = \
pick_monomials_up_to_degree(self.monomial_sets[index],
localization_order)
if len(localizing_monomials) == 0:
localizing_monomials = [S.One]
localizing_monomials = unique(localizing_monomials)
monomial_sets.append(localizing_monomials)
n_rows += len(localizing_monomials) * \
(len(localizing_monomials) + 1) // 2
if momentequalities is not None:
for _ in momentequalities:
le += 1
monomial_sets.append([S.One])
n_rows += 1
A = np.zeros((n_rows, self.n_vars + 1), dtype=self.F.dtype)
n_rows = 0
if self._parallel:
pool = Pool()
for i, equality in enumerate(flatten([equalities, momentequalities])):
func = partial(moment_of_entry, monomials=monomial_sets[i],
ineq=equality, substitutions=self.substitutions)
lm = len(monomial_sets[i])
if self._parallel and lm > 1:
chunksize = max(int(np.sqrt(lm*lm/2) /
cpu_count()), 1)
iter_ = pool.map(func, ([row, column] for row in range(lm)
for column in range(row, lm)),
chunksize)
else:
iter_ = imap(func, ([row, column] for row in range(lm)
for column in range(row, lm)))
# Process M_y(gy)(u,w) entries
for row, column, polynomial in iter_:
# Calculate the moments of polynomial entries
if isinstance(polynomial, str):
self.__parse_expression(equality, -1, A[n_rows])
else:
A[n_rows] = self._get_facvar(polynomial)
n_rows += 1
if self.verbose > 0:
sys.stdout.write("\r\x1b[KProcessing %d/%d equalities..." %
(i+1, le))
sys.stdout.flush()
if self._parallel:
pool.close()
pool.join()
if self.verbose > 0:
sys.stdout.write("\n")
return A
| 765,463
|
Dump current environment as a dictionary
Arguments:
context (dict, optional): Current context, defaults
to the current environment.
|
def dump(context=os.environ):
output = {}
for key, value in context.iteritems():
if not key.startswith("BE_"):
continue
output[key[3:].lower()] = value
return output
| 765,796
|
Write script to a temporary directory
Arguments:
script (list): Commands which to put into a file
Returns:
Absolute path to script
|
def write_script(script, tempdir):
name = "script" + self.suffix
path = os.path.join(tempdir, name)
with open(path, "w") as f:
f.write("\n".join(script))
return path
| 765,807
|
Write aliases to temporary directory
Arguments:
aliases (dict): {name: value} dict of aliases
tempdir (str): Absolute path to where aliases will be stored
|
def write_aliases(aliases, tempdir):
platform = lib.platform()
if platform == "unix":
home_alias = "cd $BE_DEVELOPMENTDIR"
else:
home_alias = "cd %BE_DEVELOPMENTDIR%"
aliases["home"] = home_alias
tempdir = os.path.join(tempdir, "aliases")
os.makedirs(tempdir)
for alias, cmd in aliases.iteritems():
path = os.path.join(tempdir, alias)
if platform == "windows":
path += ".bat"
with open(path, "w") as f:
f.write(cmd)
if platform == "unix":
# Make executable
st = os.stat(path)
os.chmod(path, st.st_mode | stat.S_IXUSR
| stat.S_IXGRP | stat.S_IXOTH)
return tempdir
| 765,808
|
Physically delete local preset
Arguments:
preset (str): Name of preset
|
def remove_preset(preset):
preset_dir = os.path.join(presets_dir(), preset)
try:
shutil.rmtree(preset_dir)
except IOError:
lib.echo("\"%s\" did not exist" % preset)
| 765,810
|
Evaluate whether gist is a be package
Arguments:
gist (str): username/id pair e.g. mottosso/2bb4651a05af85711cde
|
def _gist_is_preset(repo):
_, gistid = repo.split("/")
gist_template = "https://api.github.com/gists/{}"
gist_path = gist_template.format(gistid)
response = get(gist_path)
if response.status_code == 404:
return False
try:
data = response.json()
except:
return False
files = data.get("files", {})
package = files.get("package.json", {})
try:
content = json.loads(package.get("content", ""))
except:
return False
if content.get("type") != "bepreset":
return False
return True
| 765,812
|
Evaluate whether GitHub repository is a be package
Arguments:
gist (str): username/id pair e.g. mottosso/be-ad
|
def _repo_is_preset(repo):
package_template = "https://raw.githubusercontent.com"
package_template += "/{repo}/master/package.json"
package_path = package_template.format(repo=repo)
response = get(package_path)
if response.status_code == 404:
return False
try:
data = response.json()
except:
return False
if not data.get("type") == "bepreset":
return False
return True
| 765,813
|
Copy contents of preset into new project
If package.json contains the key "contents", limit
the files copied to those present in this list.
Arguments:
preset_dir (str): Absolute path to preset
project_dir (str): Absolute path to new project
|
def copy_preset(preset_dir, project_dir):
os.makedirs(project_dir)
package_file = os.path.join(preset_dir, "package.json")
with open(package_file) as f:
package = json.load(f)
for fname in os.listdir(preset_dir):
src = os.path.join(preset_dir, fname)
contents = package.get("contents") or []
if fname not in self.files + contents:
continue
if os.path.isfile(src):
shutil.copy2(src, project_dir)
else:
dest = os.path.join(project_dir, fname)
shutil.copytree(src, dest)
| 765,817
|
Determine subshell command for subprocess.call
Arguments:
parent (str): Absolute path to parent shell executable
|
def cmd(parent):
shell_name = os.path.basename(parent).rsplit(".", 1)[0]
dirname = os.path.dirname(__file__)
# Support for Bash
if shell_name in ("bash", "sh"):
shell = os.path.join(dirname, "_shell.sh").replace("\\", "/")
cmd = [parent.replace("\\", "/"), shell]
# Support for Cmd
elif shell_name in ("cmd",):
shell = os.path.join(dirname, "_shell.bat").replace("\\", "/")
cmd = [parent, "/K", shell]
# Support for Powershell
elif shell_name in ("powershell",):
raise SystemError("Powershell not yet supported")
# Unsupported
else:
raise SystemError("Unsupported shell: %s" % shell_name)
return cmd
| 765,857
|
Return whether or not `path` is a project
Arguments:
path (str): Absolute path
|
def isproject(path):
try:
if os.path.basename(path)[0] in (".", "_"):
return False
if not os.path.isdir(path):
return False
if not any(fname in os.listdir(path)
for fname in ("templates.yaml",
"inventory.yaml")):
return False
except:
return False
return True
| 765,860
|
Print to the console
Arguments:
text (str): Text to print to the console
silen (bool, optional): Whether or not to produce any output
newline (bool, optional): Whether or not to append a newline.
|
def echo(text, silent=False, newline=True):
if silent:
return
print(text) if newline else sys.stdout.write(text)
| 765,861
|
List projects at `root`
Arguments:
root (str): Absolute path to the `be` root directory,
typically the current working directory.
|
def list_projects(root, backend=os.listdir):
projects = list()
for project in sorted(backend(root)):
abspath = os.path.join(root, project)
if not isproject(abspath):
continue
projects.append(project)
return projects
| 765,862
|
List a projects inventory
Given a project, simply list the contents of `inventory.yaml`
Arguments:
root (str): Absolute path to the `be` root directory,
typically the current working directory.
inventory (dict): inventory.yaml
|
def list_inventory(inventory):
inverted = invert_inventory(inventory)
items = list()
for item in sorted(inverted, key=lambda a: (inverted[a], a)):
items.append((item, inverted[item]))
return items
| 765,863
|
Return absolute path to development directory
Arguments:
templates (dict): templates.yaml
inventory (dict): inventory.yaml
context (dict): The be context, from context()
topics (list): Arguments to `in`
user (str): Current `be` user
item (str): Item from template-binding address
|
def pos_development_directory(templates,
inventory,
context,
topics,
user,
item):
replacement_fields = replacement_fields_from_context(context)
binding = binding_from_item(inventory, item)
pattern = pattern_from_template(templates, binding)
positional_arguments = find_positional_arguments(pattern)
highest_argument = find_highest_position(positional_arguments)
highest_available = len(topics) - 1
if highest_available < highest_argument:
echo("Template for \"%s\" requires at least %i arguments" % (
item, highest_argument + 1))
sys.exit(USER_ERROR)
try:
return pattern.format(*topics, **replacement_fields).replace("\\", "/")
except KeyError as exc:
echo("TEMPLATE ERROR: %s is not an available key\n" % exc)
echo("Available tokens:")
for key in replacement_fields:
echo("\n- %s" % key)
sys.exit(TEMPLATE_ERROR)
| 765,866
|
Return absolute path to development directory
Arguments:
templates (dict): templates.yaml
inventory (dict): inventory.yaml
context (dict): The be context, from context()
topics (list): Arguments to `in`
user (str): Current `be` user
|
def fixed_development_directory(templates, inventory, topics, user):
echo("Fixed syntax has been deprecated, see positional syntax")
project, item, task = topics[0].split("/")
template = binding_from_item(inventory, item)
pattern = pattern_from_template(templates, template)
if find_positional_arguments(pattern):
echo("\"%s\" uses a positional syntax" % project)
echo("Try this:")
echo(" be in %s" % " ".join([project, item, task]))
sys.exit(USER_ERROR)
keys = {
"cwd": os.getcwd(),
"project": project,
"item": item.replace("\\", "/"),
"user": user,
"task": task,
"type": task, # deprecated
}
try:
return pattern.format(**keys).replace("\\", "/")
except KeyError as exc:
echo("TEMPLATE ERROR: %s is not an available key\n" % exc)
echo("Available keys")
for key in keys:
echo("\n- %s" % key)
sys.exit(1)
| 765,867
|
Convert context replacement fields
Example:
BE_KEY=value -> {"key": "value}
Arguments:
context (dict): The current context
|
def replacement_fields_from_context(context):
return dict((k[3:].lower(), context[k])
for k in context if k.startswith("BE_"))
| 765,868
|
Return pattern for name
Arguments:
templates (dict): Current templates
name (str): Name of name
|
def pattern_from_template(templates, name):
if name not in templates:
echo("No template named \"%s\"" % name)
sys.exit(1)
return templates[name]
| 765,870
|
Return binding for `item`
Example:
asset:
- myasset
The binding is "asset"
Arguments:
project: Name of project
item (str): Name of item
|
def binding_from_item(inventory, item):
if item in self.bindings:
return self.bindings[item]
bindings = invert_inventory(inventory)
try:
self.bindings[item] = bindings[item]
return bindings[item]
except KeyError as exc:
exc.bindings = bindings
raise exc
| 765,871
|
Resolve the be.yaml redirect key
Arguments:
redirect (dict): Source/destination pairs, e.g. {BE_ACTIVE: ACTIVE}
topics (tuple): Topics from which to sample, e.g. (project, item, task)
context (dict): Context from which to sample
|
def parse_redirect(redirect, topics, context):
for map_source, map_dest in redirect.items():
if re.match("{\d+}", map_source):
topics_index = int(map_source.strip("{}"))
topics_value = topics[topics_index]
context[map_dest] = topics_value
continue
context[map_dest] = context[map_source]
| 765,873
|
Slice a template based on it's positional argument
Arguments:
index (int): Position at which to slice
template (str): Template to slice
Example:
>>> slice(0, "{cwd}/{0}/assets/{1}/{2}")
'{cwd}/{0}'
>>> slice(1, "{cwd}/{0}/assets/{1}/{2}")
'{cwd}/{0}/assets/{1}'
|
def slice(index, template):
try:
return re.match("^.*{[%i]}" % index, template).group()
except AttributeError:
raise ValueError("Index %i not found in template: %s"
% (index, template))
| 765,874
|
Post-processing of the response (after routing).
Args:
req: Request object.
resp: Response object.
resource: Resource object to which the request was
routed. May be None if no route was found
for the request.
|
def process_response(self, req, resp, resource):
if isinstance(resp.body, dict):
try:
resp.body = json.dumps(resp.body)
except(nameError):
resp.status = falcon.HTTP_500
| 766,049
|
Run the given command.
Parameters:
:param command: A string describing a command.
:param arguments: A list of strings describing arguments to the command.
|
def run(command=None, *arguments):
if command is None:
sys.exit('django-shortcuts: No argument was supplied, please specify one.')
if command in ALIASES:
command = ALIASES[command]
if command == 'startproject':
return call('django-admin.py startproject %s' % ' '.join(arguments), shell=True)
script_path = os.getcwd()
while not os.path.exists(os.path.join(script_path, 'manage.py')):
base_dir = os.path.dirname(script_path)
if base_dir != script_path:
script_path = base_dir
else:
sys.exit('django-shortcuts: No \'manage.py\' script found in this directory or its parents.')
return call('%(python)s %(script_path)s %(command)s %(arguments)s' % {
'python': sys.executable,
'script_path': os.path.join(script_path, 'manage.py'),
'command': command or '',
'arguments': ' '.join(arguments)
}, shell=True)
| 766,064
|
Send a request to the given Wunderlist API endpoint
Params:
endpoint -- API endpoint to send request to
Keyword Args:
headers -- headers to add to the request
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request
|
def request(self, endpoint, method='GET', headers=None, params=None, data=None):
if not headers:
headers = {}
if method in ['POST', 'PATCH', 'PUT']:
headers['Content-Type'] = 'application/json'
url = '/'.join([self.api_url, 'v' + self.api_version, endpoint])
data = json.dumps(data) if data else None
try:
response = requests.request(method=method, url=url, params=params, headers=headers, data=data)
# TODO Does recreating the exception classes 'requests' use suck? Yes, but it sucks more to expose the underlying library I use
except requests.exceptions.Timeout as e:
raise wp_exceptions.TimeoutError(e)
except requests.exceptions.ConnectionError as e:
raise wp_exceptions.ConnectionError(e)
self._validate_response(method, response)
return response
| 766,595
|
Create a Wunderlist client with the given parameters.
Params:
access_token -- Wunderlist access token, given once a user has given Wunderlist permission access their data
client_id -- Wunderlist-generated ID for the app accessing the client's data
api -- WunderApi handle to API information
|
def __init__(self, access_token, client_id, api):
self.client_id = client_id
self.access_token = access_token
self.api = api
| 766,597
|
Send a request to the given Wunderlist API with 'X-Access-Token' and 'X-Client-ID' headers and ensure the response code is as expected given the request type
Params:
endpoint -- API endpoint to send request to
Keyword Args:
method -- GET, PUT, PATCH, DELETE, etc.
params -- parameters to encode in the request
data -- data to send with the request
|
def authenticated_request(self, endpoint, method='GET', params=None, data=None):
headers = {
'X-Access-Token' : self.access_token,
'X-Client-ID' : self.client_id
}
return self.api.request(endpoint, method=method, headers=headers, params=params, data=data)
| 766,598
|
Decorator for adding wait animation to long running
functions.
Args:
animation (str, tuple): String reference to animation or tuple
with custom animation.
speed (float): Number of seconds each cycle of animation.
Examples:
>>> @animation.wait('bar')
>>> def long_running_function():
>>> ... 5 seconds later ...
>>> return
|
def wait(animation='elipses', text='', speed=0.2):
def decorator(func):
func.animation = animation
func.speed = speed
func.text = text
@wraps(func)
def wrapper(*args, **kwargs):
animation = func.animation
text = func.text
if not isinstance(animation, (list, tuple)) and \
not hasattr(animations, animation):
text = animation if text == '' else text
animation = 'elipses'
wait = Wait(animation=animation, text=text, speed=func.speed)
wait.start()
try:
ret = func(*args, **kwargs)
finally:
wait.stop()
sys.stdout.write('\n')
return ret
return wrapper
return decorator
| 766,653
|
Merge several timeseries
Arguments:
tup: sequence of Timeseries, with the same shape except for axis 0
Returns:
Resulting merged timeseries which can have duplicate time points.
|
def merge(tup):
if not all(tuple(ts.shape[1:] == tup[0].shape[1:] for ts in tup[1:])):
raise ValueError('Timeseries to merge must have compatible shapes')
indices = np.vstack(tuple(ts.tspan for ts in tup)).argsort()
return np.vstack((tup))[indices]
| 766,659
|
Return the angle of the complex argument.
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
|
def angle(self, deg=False):
if self.dtype.str[1] != 'c':
warnings.warn('angle() is intended for complex-valued timeseries',
RuntimeWarning, 1)
return Timeseries(np.angle(self, deg=deg), self.tspan, self.labels)
| 766,673
|
Merge another timeseries with this one
Arguments:
ts (Timeseries): The two timeseries being merged must have the
same shape except for axis 0.
Returns:
Resulting merged timeseries which can have duplicate time points.
|
def merge(self, ts):
if ts.shape[1:] != self.shape[1:]:
raise ValueError('Timeseries to merge must have compatible shapes')
indices = np.vstack((self.tspan, ts.tspan)).argsort()
return np.vstack((self, ts))[indices]
| 766,681
|
Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted.
|
def expand_dims(self, axis):
if axis == -1:
axis = self.ndim
array = np.expand_dims(self, axis)
if axis == 0:
# prepended an axis: no longer a Timeseries
return array
else:
new_labels = self.labels.insert(axis, None)
return Timeseries(array, self.tspan, new_labels)
| 766,682
|
Plot a polar histogram of a phase variable's probability distribution
Args:
dts: DistTimeseries with axis 2 ranging over separate instances of an
oscillator (time series values are assumed to represent an angle)
times (float or sequence of floats): The target times at which
to plot the distribution
nbins (int): number of histogram bins
colormap
|
def phase_histogram(dts, times=None, nbins=30, colormap=mpl.cm.Blues):
if times is None:
times = np.linspace(dts.tspan[0], dts.tspan[-1], num=4)
elif isinstance(times, numbers.Number):
times = np.array([times], dtype=np.float64)
indices = distob.gather(dts.tspan.searchsorted(times))
if indices[-1] == len(dts.tspan):
indices[-1] -= 1
nplots = len(indices)
fig = plt.figure()
n = np.zeros((nbins, nplots))
for i in range(nplots):
index = indices[i]
time = dts.tspan[index]
phases = distob.gather(dts.mod2pi()[index, 0, :])
ax = fig.add_subplot(1, nplots, i + 1, projection='polar')
n[:,i], bins, patches = ax.hist(phases, nbins, (-np.pi, np.pi),
density=True, histtype='bar')
ax.set_title('time = %d s' % time)
ax.set_xticklabels(['0', r'$\frac{\pi}{4}$', r'$\frac{\pi}{2}$',
r'$\frac{3\pi}{4}$', r'$\pi$', r'$\frac{-3\pi}{4}$',
r'$\frac{-\pi}{2}$', r'$\frac{-\pi}{4}$'])
nmin, nmax = n.min(), n.max()
#TODO should make a custom colormap instead of reducing color dynamic range:
norm = mpl.colors.Normalize(1.2*nmin - 0.2*nmax,
0.6*nmin + 0.4*nmax, clip=True)
for i in range(nplots):
ax = fig.get_axes()[i]
ax.set_ylim(0, nmax)
for this_n, thispatch in zip(n[:,i], ax.patches):
color = colormap(norm(this_n))
thispatch.set_facecolor(color)
thispatch.set_edgecolor(color)
fig.show()
| 766,690
|
Plot time resolved power spectral density from cwt results
Args:
ts: the original Timeseries
coefs: continuous wavelet transform coefficients as calculated by cwt()
freqs: list of frequencies (in Hz) corresponding to coefs.
tsize, fsize: size of the plot (time axis and frequency axis, in pixels)
|
def _plot_cwt(ts, coefs, freqs, tsize=1024, fsize=512):
import matplotlib.style
import matplotlib as mpl
mpl.style.use('classic')
import matplotlib.pyplot as plt
from scipy import interpolate
channels = ts.shape[1]
fig = plt.figure()
for i in range(channels):
rect = (0.1, 0.85*(channels - i - 1)/channels + 0.1,
0.8, 0.85/channels)
ax = fig.add_axes(rect)
logpowers = np.log((coefs[:, :, i] * coefs[:, :, i].conj()).real)
tmin, tmax = ts.tspan[0], ts.tspan[-1]
fmin, fmax = freqs[0], freqs[-1]
tgrid, fgrid = np.mgrid[tmin:tmax:tsize*1j, fmin:fmax:fsize*1j]
gd = interpolate.interpn((ts.tspan, freqs), logpowers,
(tgrid, fgrid)).T
ax.imshow(gd, cmap='gnuplot2', aspect='auto', origin='lower',
extent=(tmin, tmax, fmin, fmax))
ax.set_ylabel('freq (Hz)')
fig.axes[0].set_title(u'log(power spectral density)')
fig.axes[channels - 1].set_xlabel('time (s)')
fig.show()
| 766,701
|
How to couple the output of one node to the input of another.
Args:
source_y (array of shape (8,)): state of the source node
target_y (array of shape (8,)): state of the target node
weight (float): the connection strength
Returns:
input (array of shape (8,)): value to drive each variable of the
target node.
|
def coupling(self, source_y, target_y, weight):
v_pyramidal = source_y[1] - source_y[2]
return (np.array([0, 0, 0, 0, 0, 1.0, 0, 0]) *
(weight*self.g1*self.He2*self.ke2*self.S(v_pyramidal)))
| 766,714
|
load a multi-channel Timeseries from a MATLAB .mat file
Args:
filename (str): .mat file to load
varname (str): variable name. only needed if there is more than one
variable saved in the .mat file
fs (scalar): sample rate of timeseries in Hz. (constant timestep assumed)
Returns:
Timeseries
|
def timeseries_from_mat(filename, varname=None, fs=1.0):
import scipy.io as sio
if varname is None:
mat_dict = sio.loadmat(filename)
if len(mat_dict) > 1:
raise ValueError('Must specify varname: file contains '
'more than one variable. ')
else:
mat_dict = sio.loadmat(filename, variable_names=(varname,))
array = mat_dict.popitem()[1]
return Timeseries(array, fs=fs)
| 766,748
|
save a Timeseries to a MATLAB .mat file
Args:
ts (Timeseries): the timeseries to save
filename (str): .mat filename to save to
|
def save_mat(ts, filename):
import scipy.io as sio
tspan = ts.tspan
fs = (1.0*len(tspan) - 1) / (tspan[-1] - tspan[0])
mat_dict = {'data': np.asarray(ts),
'fs': fs,
'labels': ts.labels[1]}
sio.savemat(filename, mat_dict, do_compression=True)
return
| 766,749
|
load a multi-channel Timeseries from an EDF (European Data Format) file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
Timeseries
|
def _load_edflib(filename):
import edflib
e = edflib.EdfReader(filename, annotations_mode='all')
if np.ptp(e.get_samples_per_signal()) != 0:
raise Error('channels have differing numbers of samples')
if np.ptp(e.get_signal_freqs()) != 0:
raise Error('channels have differing sample rates')
n = e.samples_in_file(0)
m = e.signals_in_file
channelnames = e.get_signal_text_labels()
dt = 1.0/e.samplefrequency(0)
# EDF files hold <=16 bits of information for each sample. Representing as
# double precision (64bit) is unnecessary use of memory. use 32 bit float:
ar = np.zeros((n, m), dtype=np.float32)
# edflib requires input buffer of float64s
buf = np.zeros((n,), dtype=np.float64)
for i in range(m):
e.read_phys_signal(i, 0, n, buf)
ar[:,i] = buf
tspan = np.arange(0, (n - 1 + 0.5) * dt, dt, dtype=np.float32)
return Timeseries(ar, tspan, labels=[None, channelnames])
| 766,752
|
Get a list of event annotations from an EDF (European Data Format file
or EDF+ file, using edflib.
Args:
filename: EDF+ file
Returns:
list: annotation events, each in the form [start_time, duration, text]
|
def annotations_from_file(filename):
import edflib
e = edflib.EdfReader(filename, annotations_mode='all')
return e.read_annotations()
| 766,753
|
Make a new version of a function that has its own independent copy
of any globals that it uses directly, and has its own name.
All other attributes are assigned from the original function.
Args:
f: the function to clone
name (str): the name for the new function (if None, keep the same name)
Returns:
A copy of the function f, having its own copy of any globals used
Raises:
SimValueError
|
def __clone_function(f, name=None):
if not isinstance(f, types.FunctionType):
raise SimTypeError('Given parameter is not a function.')
if name is None:
name = f.__name__
newglobals = f.__globals__.copy()
globals_used = [x for x in f.__globals__ if x in f.__code__.co_names]
for x in globals_used:
gv = f.__globals__[x]
if isinstance(gv, types.FunctionType):
# Recursively clone any global functions used by this function.
newglobals[x] = __clone_function(gv)
elif isinstance(gv, types.ModuleType):
newglobals[x] = gv
else:
# If it is something else, deep copy it.
newglobals[x] = copy.deepcopy(gv)
newfunc = types.FunctionType(
f.__code__, newglobals, name, f.__defaults__, f.__closure__)
return newfunc
| 766,759
|
Insert a new axis, at a given position in the array shape
Args:
axis (int): Position (amongst axes) where new axis is to be inserted.
|
def expand_dims(self, axis):
if axis <= self._distaxis:
subaxis = axis
new_distaxis = self._distaxis + 1
else:
subaxis = axis - 1
new_distaxis = self._distaxis
new_subts = [rts.expand_dims(subaxis) for rts in self._subarrays]
if axis == 0:
# prepended an axis: no longer a Timeseries
return distob.DistArray(new_subts, new_distaxis)
else:
axislabels = self.labels[self._distaxis]
return DistTimeseries(new_subts, new_distaxis, axislabels)
| 766,766
|
Return the angle of a complex Timeseries
Args:
deg (bool, optional):
Return angle in degrees if True, radians if False (default).
Returns:
angle (Timeseries):
The counterclockwise angle from the positive real axis on
the complex plane, with dtype as numpy.float64.
|
def angle(self, deg=False):
if self.dtype.str[1] != 'c':
warnings.warn('angle() is intended for complex-valued timeseries',
RuntimeWarning, 1)
da = distob.vectorize(np.angle)(self, deg)
return _dts_from_da(da, self.tspan, self.labels)
| 766,768
|
Deterministic term f of the complete network system
dy = f(y, t)dt + G(y, t).dot(dW)
(or for an ODE network system without noise, dy/dt = f(y, t))
Args:
y (array of shape (d,)): where d is the dimension of the overall
state space of the complete network system.
Returns:
f (array of shape (d,)): Defines the deterministic term of the
complete network system
|
def f(self, y, t):
coupling = self.coupling_function[0]
res = np.empty_like(self.y0)
for j, m in enumerate(self.submodels):
slicej = slice(self._si[j], self._si[j+1])
target_y = y[slicej] # target node state
res[slicej] = m.f(target_y, t) # deterministic part of submodel j
# get indices of all source nodes that provide input to node j:
sources = np.nonzero(self.network[:,j])[0]
for i in sources:
weight = self.network[i, j]
source_y = y[slice(self._si[i], self._si[i+1])] # source state
res[slicej] += coupling(source_y, target_y, weight)
return res
| 766,775
|
Allow submodels with scalar equations. Convert to 1D vector systems.
Args:
m (Model)
|
def _scalar_to_vector(self, m):
if not isinstance(m.y0, numbers.Number):
return m
else:
m = copy.deepcopy(m)
t0 = 0.0
if isinstance(m.y0, numbers.Integral):
numtype = np.float64
else:
numtype = type(m.y0)
y0_orig = m.y0
m.y0 = np.array([m.y0], dtype=numtype)
def make_vector_fn(fn):
def newfn(y, t):
return np.array([fn(y[0], t)], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_matrix_fn(fn):
def newfn(y, t):
return np.array([[fn(y[0], t)]], dtype=numtype)
newfn.__name__ = fn.__name__
return newfn
def make_coupling_fn(fn):
def newfn(source_y, target_y, weight):
return np.array([fn(source_y[0], target_y[0], weight)])
newfn.__name__ = fn.__name__
return newfn
if isinstance(m.f(y0_orig, t0), numbers.Number):
m.f = make_vector_fn(m.f)
if hasattr(m, 'G') and isinstance(m.G(y0_orig,t0), numbers.Number):
m.G = make_matrix_fn(m.G)
if (hasattr(m, 'coupling') and
isinstance(m.coupling(y0_orig, y0_orig, 0.5),
numbers.Number)):
m.coupling = make_coupling_fn(m.coupling)
return m
| 766,780
|
Make lookup to dnsbl provider
Parameters:
* addr (string) - ip address to check
* provider (string) - dnsbl provider
Returns:
* DNSBLResponse object
Raises:
* ValueError
|
async def dnsbl_request(self, addr, provider):
response = None
error = None
try:
socket.inet_aton(addr)
except socket.error:
raise ValueError('wrong ip format')
ip_reversed = '.'.join(reversed(addr.split('.')))
dnsbl_query = "%s.%s" % (ip_reversed, provider.host)
try:
async with self._semaphore:
response = await self._resolver.query(dnsbl_query, 'A')
except aiodns.error.DNSError as exc:
if exc.args[0] != 4: # 4: domain name not found:
error = exc
return DNSBLResponse(addr=addr, provider=provider, response=response, error=error)
| 766,852
|
Async check ip with dnsbl providers.
Parameters:
* addr - ip address to check
Returns:
* DNSBLResult object
|
async def _check_ip(self, addr):
tasks = []
for provider in self.providers:
tasks.append(self.dnsbl_request(addr, provider))
results = await asyncio.gather(*tasks)
return DNSBLResult(addr=addr, results=results)
| 766,853
|
Delete temporary files not under version control.
Args:
deltox: If True, delete virtual environments used by tox
|
def clean(deltox=False):
basedir = dirname(__file__)
print(cyan('delete temp files and dirs for packaging'))
local(flo(
'rm -rf '
'{basedir}/.eggs/ '
'{basedir}/utlz.egg-info/ '
'{basedir}/dist '
'{basedir}/README '
'{basedir}/build/ '
))
print(cyan('\ndelete temp files and dirs for editing'))
local(flo(
'rm -rf '
'{basedir}/.cache '
'{basedir}/.ropeproject '
))
print(cyan('\ndelete bytecode compiled versions of the python src'))
# cf. http://stackoverflow.com/a/30659970
local(flo('find {basedir}/utlz {basedir}/tests ') +
'\( -name \*pyc -o -name \*.pyo -o -name __pycache__ '
'-o -name \*.so -o -name \*.o -o -name \*.c \) '
'-prune '
'-exec rm -rf {} +')
if deltox:
print(cyan('\ndelete tox virual environments'))
local(flo('cd {basedir} && rm -rf .tox/'))
| 767,326
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.