code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
def module_del(self, key):
"""
Deregister from python module change events.
"""
if key in self._module_event_map:
del self._module_event_map[key]
if key in self._watch_modules.names:
self._watch_modules.remove(key) | Deregister from python module change events. | Below is the the instruction that describes the task:
### Input:
Deregister from python module change events.
### Response:
def module_del(self, key):
"""
Deregister from python module change events.
"""
if key in self._module_event_map:
del self._module_event_map[key]
if key in self._watch_modules.names:
self._watch_modules.remove(key) |
def create_epochs(data, events_onsets, sampling_rate=1000, duration=1, onset=0, index=None):
"""
Epoching a dataframe.
Parameters
----------
data : pandas.DataFrame
Data*time.
events_onsets : list
A list of event onsets indices.
sampling_rate : int
Sampling rate (samples/second).
duration : int or list
Duration(s) of each epoch(s) (in seconds).
onset : int
Epoch onset(s) relative to events_onsets (in seconds).
index : list
Events names in order that will be used as index. Must contains uniques names. If not provided, will be replaced by event number.
Returns
----------
epochs : dict
dict containing all epochs.
Example
----------
>>> import neurokit as nk
>>> epochs = nk.create_epochs(data, events_onsets)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- numpy
"""
# Convert ints to arrays if needed
if isinstance(duration, list) or isinstance(duration, np.ndarray):
duration = np.array(duration)
else:
duration = np.array([duration]*len(events_onsets))
if isinstance(onset, list) or isinstance(onset, np.ndarray):
onset = np.array(onset)
else:
onset = np.array([onset]*len(events_onsets))
if isinstance(data, list) or isinstance(data, np.ndarray) or isinstance(data, pd.Series):
data = pd.DataFrame({"Signal": list(data)})
# Store durations
duration_in_s = duration.copy()
onset_in_s = onset.copy()
# Convert to timepoints
duration = duration*sampling_rate
onset = onset*sampling_rate
# Create the index
if index is None:
index = list(range(len(events_onsets)))
else:
if len(list(set(index))) != len(index):
print("NeuroKit Warning: create_epochs(): events_names does not contain uniques names, replacing them by numbers.")
index = list(range(len(events_onsets)))
else:
index = list(index)
# Create epochs
epochs = {}
for event, event_onset in enumerate(events_onsets):
epoch_onset = int(event_onset + onset[event])
epoch_end = int(event_onset+duration[event]+1)
epoch = data[epoch_onset:epoch_end].copy()
epoch.index = np.linspace(start=onset_in_s[event], stop=duration_in_s[event], num=len(epoch), endpoint=True)
relative_time = np.linspace(start=onset[event], stop=duration[event], num=len(epoch), endpoint=True).astype(int).tolist()
absolute_time = np.linspace(start=epoch_onset, stop=epoch_end, num=len(epoch), endpoint=True).astype(int).tolist()
epoch["Epoch_Relative_Time"] = relative_time
epoch["Epoch_Absolute_Time"] = absolute_time
epochs[index[event]] = epoch
return(epochs) | Epoching a dataframe.
Parameters
----------
data : pandas.DataFrame
Data*time.
events_onsets : list
A list of event onsets indices.
sampling_rate : int
Sampling rate (samples/second).
duration : int or list
Duration(s) of each epoch(s) (in seconds).
onset : int
Epoch onset(s) relative to events_onsets (in seconds).
index : list
Events names in order that will be used as index. Must contains uniques names. If not provided, will be replaced by event number.
Returns
----------
epochs : dict
dict containing all epochs.
Example
----------
>>> import neurokit as nk
>>> epochs = nk.create_epochs(data, events_onsets)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- numpy | Below is the the instruction that describes the task:
### Input:
Epoching a dataframe.
Parameters
----------
data : pandas.DataFrame
Data*time.
events_onsets : list
A list of event onsets indices.
sampling_rate : int
Sampling rate (samples/second).
duration : int or list
Duration(s) of each epoch(s) (in seconds).
onset : int
Epoch onset(s) relative to events_onsets (in seconds).
index : list
Events names in order that will be used as index. Must contains uniques names. If not provided, will be replaced by event number.
Returns
----------
epochs : dict
dict containing all epochs.
Example
----------
>>> import neurokit as nk
>>> epochs = nk.create_epochs(data, events_onsets)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- numpy
### Response:
def create_epochs(data, events_onsets, sampling_rate=1000, duration=1, onset=0, index=None):
"""
Epoching a dataframe.
Parameters
----------
data : pandas.DataFrame
Data*time.
events_onsets : list
A list of event onsets indices.
sampling_rate : int
Sampling rate (samples/second).
duration : int or list
Duration(s) of each epoch(s) (in seconds).
onset : int
Epoch onset(s) relative to events_onsets (in seconds).
index : list
Events names in order that will be used as index. Must contains uniques names. If not provided, will be replaced by event number.
Returns
----------
epochs : dict
dict containing all epochs.
Example
----------
>>> import neurokit as nk
>>> epochs = nk.create_epochs(data, events_onsets)
Notes
----------
*Authors*
- Dominique Makowski (https://github.com/DominiqueMakowski)
*Dependencies*
- numpy
"""
# Convert ints to arrays if needed
if isinstance(duration, list) or isinstance(duration, np.ndarray):
duration = np.array(duration)
else:
duration = np.array([duration]*len(events_onsets))
if isinstance(onset, list) or isinstance(onset, np.ndarray):
onset = np.array(onset)
else:
onset = np.array([onset]*len(events_onsets))
if isinstance(data, list) or isinstance(data, np.ndarray) or isinstance(data, pd.Series):
data = pd.DataFrame({"Signal": list(data)})
# Store durations
duration_in_s = duration.copy()
onset_in_s = onset.copy()
# Convert to timepoints
duration = duration*sampling_rate
onset = onset*sampling_rate
# Create the index
if index is None:
index = list(range(len(events_onsets)))
else:
if len(list(set(index))) != len(index):
print("NeuroKit Warning: create_epochs(): events_names does not contain uniques names, replacing them by numbers.")
index = list(range(len(events_onsets)))
else:
index = list(index)
# Create epochs
epochs = {}
for event, event_onset in enumerate(events_onsets):
epoch_onset = int(event_onset + onset[event])
epoch_end = int(event_onset+duration[event]+1)
epoch = data[epoch_onset:epoch_end].copy()
epoch.index = np.linspace(start=onset_in_s[event], stop=duration_in_s[event], num=len(epoch), endpoint=True)
relative_time = np.linspace(start=onset[event], stop=duration[event], num=len(epoch), endpoint=True).astype(int).tolist()
absolute_time = np.linspace(start=epoch_onset, stop=epoch_end, num=len(epoch), endpoint=True).astype(int).tolist()
epoch["Epoch_Relative_Time"] = relative_time
epoch["Epoch_Absolute_Time"] = absolute_time
epochs[index[event]] = epoch
return(epochs) |
def pad(data, length):
"""This function returns a padded version of the input data to the
given length. this function will shorten the given data to the length
specified if necessary. post-condition: len(data) = length
:param data: the data byte array to pad
:param length: the length to pad the array to
"""
if (len(data) > length):
return data[0:length]
else:
return data + b"\0" * (length - len(data)) | This function returns a padded version of the input data to the
given length. this function will shorten the given data to the length
specified if necessary. post-condition: len(data) = length
:param data: the data byte array to pad
:param length: the length to pad the array to | Below is the the instruction that describes the task:
### Input:
This function returns a padded version of the input data to the
given length. this function will shorten the given data to the length
specified if necessary. post-condition: len(data) = length
:param data: the data byte array to pad
:param length: the length to pad the array to
### Response:
def pad(data, length):
"""This function returns a padded version of the input data to the
given length. this function will shorten the given data to the length
specified if necessary. post-condition: len(data) = length
:param data: the data byte array to pad
:param length: the length to pad the array to
"""
if (len(data) > length):
return data[0:length]
else:
return data + b"\0" * (length - len(data)) |
def process_exception(self, request, exception):
"""Report exceptions from requests via Exreporter.
"""
gc = GithubCredentials(
user=settings.EXREPORTER_GITHUB_USER,
repo=settings.EXREPORTER_GITHUB_REPO,
auth_token=settings.EXREPORTER_GITHUB_AUTH_TOKEN)
gs = GithubStore(credentials=gc)
reporter = ExReporter(
store=gs, labels=settings.EXREPORTER_GITHUB_LABELS)
reporter.report() | Report exceptions from requests via Exreporter. | Below is the the instruction that describes the task:
### Input:
Report exceptions from requests via Exreporter.
### Response:
def process_exception(self, request, exception):
"""Report exceptions from requests via Exreporter.
"""
gc = GithubCredentials(
user=settings.EXREPORTER_GITHUB_USER,
repo=settings.EXREPORTER_GITHUB_REPO,
auth_token=settings.EXREPORTER_GITHUB_AUTH_TOKEN)
gs = GithubStore(credentials=gc)
reporter = ExReporter(
store=gs, labels=settings.EXREPORTER_GITHUB_LABELS)
reporter.report() |
def execute(sql, args=None, key='default'):
"""It is used for update, delete records.
:param sql string: the sql stamtement like 'select * from %s'
:param args list: Wen set None, will use dbi execute(sql), else
dbi execute(sql, args), the args keep the original rules, it shuld be tuple or list of list
:param key: a key for your dabtabase you wanna use
eg::
execute('insert into users values(%s, %s)', [(1L, 'blablabla'), (2L, 'animer')])
execute('delete from users')
"""
database = __db[key]
return database.execute(sql, args) | It is used for update, delete records.
:param sql string: the sql stamtement like 'select * from %s'
:param args list: Wen set None, will use dbi execute(sql), else
dbi execute(sql, args), the args keep the original rules, it shuld be tuple or list of list
:param key: a key for your dabtabase you wanna use
eg::
execute('insert into users values(%s, %s)', [(1L, 'blablabla'), (2L, 'animer')])
execute('delete from users') | Below is the the instruction that describes the task:
### Input:
It is used for update, delete records.
:param sql string: the sql stamtement like 'select * from %s'
:param args list: Wen set None, will use dbi execute(sql), else
dbi execute(sql, args), the args keep the original rules, it shuld be tuple or list of list
:param key: a key for your dabtabase you wanna use
eg::
execute('insert into users values(%s, %s)', [(1L, 'blablabla'), (2L, 'animer')])
execute('delete from users')
### Response:
def execute(sql, args=None, key='default'):
"""It is used for update, delete records.
:param sql string: the sql stamtement like 'select * from %s'
:param args list: Wen set None, will use dbi execute(sql), else
dbi execute(sql, args), the args keep the original rules, it shuld be tuple or list of list
:param key: a key for your dabtabase you wanna use
eg::
execute('insert into users values(%s, %s)', [(1L, 'blablabla'), (2L, 'animer')])
execute('delete from users')
"""
database = __db[key]
return database.execute(sql, args) |
def error_view(template_dir=None):
"""
Create the Error view
Must be instantiated
import error_view
ErrorView = error_view()
:param template_dir: The directory containing the view pages
:return:
"""
if not template_dir:
template_dir = "Pylot/Error"
template_page = "%s/index.html" % template_dir
class Error(Pylot):
"""
Error Views
"""
@classmethod
def register(cls, app, **kwargs):
super(cls, cls).register(app, **kwargs)
@app.errorhandler(400)
def error_400(error):
return cls.index(error, 400)
@app.errorhandler(401)
def error_401(error):
return cls.index(error, 401)
@app.errorhandler(403)
def error_403(error):
return cls.index(error, 403)
@app.errorhandler(404)
def error_404(error):
return cls.index(error, 404)
@app.errorhandler(500)
def error_500(error):
return cls.index(error, 500)
@app.errorhandler(503)
def error_503(error):
return cls.index(error, 503)
@classmethod
def index(cls, error, code):
cls.meta_(title="Error %s" % code)
return cls.render(error=error, view_template=template_page), code
return Error | Create the Error view
Must be instantiated
import error_view
ErrorView = error_view()
:param template_dir: The directory containing the view pages
:return: | Below is the the instruction that describes the task:
### Input:
Create the Error view
Must be instantiated
import error_view
ErrorView = error_view()
:param template_dir: The directory containing the view pages
:return:
### Response:
def error_view(template_dir=None):
"""
Create the Error view
Must be instantiated
import error_view
ErrorView = error_view()
:param template_dir: The directory containing the view pages
:return:
"""
if not template_dir:
template_dir = "Pylot/Error"
template_page = "%s/index.html" % template_dir
class Error(Pylot):
"""
Error Views
"""
@classmethod
def register(cls, app, **kwargs):
super(cls, cls).register(app, **kwargs)
@app.errorhandler(400)
def error_400(error):
return cls.index(error, 400)
@app.errorhandler(401)
def error_401(error):
return cls.index(error, 401)
@app.errorhandler(403)
def error_403(error):
return cls.index(error, 403)
@app.errorhandler(404)
def error_404(error):
return cls.index(error, 404)
@app.errorhandler(500)
def error_500(error):
return cls.index(error, 500)
@app.errorhandler(503)
def error_503(error):
return cls.index(error, 503)
@classmethod
def index(cls, error, code):
cls.meta_(title="Error %s" % code)
return cls.render(error=error, view_template=template_page), code
return Error |
def halfmax_points(self):
"""Get the bandpass' half-maximum wavelengths. These can be used to
compute a representative bandwidth, or for display purposes.
Unlike calc_halfmax_points(), this function will use a cached value if
available.
"""
t = self.registry._halfmaxes.get((self.telescope, self.band))
if t is not None:
return t
t = self.calc_halfmax_points()
self.registry.register_halfmaxes(self.telescope, self.band, t[0], t[1])
return t | Get the bandpass' half-maximum wavelengths. These can be used to
compute a representative bandwidth, or for display purposes.
Unlike calc_halfmax_points(), this function will use a cached value if
available. | Below is the the instruction that describes the task:
### Input:
Get the bandpass' half-maximum wavelengths. These can be used to
compute a representative bandwidth, or for display purposes.
Unlike calc_halfmax_points(), this function will use a cached value if
available.
### Response:
def halfmax_points(self):
"""Get the bandpass' half-maximum wavelengths. These can be used to
compute a representative bandwidth, or for display purposes.
Unlike calc_halfmax_points(), this function will use a cached value if
available.
"""
t = self.registry._halfmaxes.get((self.telescope, self.band))
if t is not None:
return t
t = self.calc_halfmax_points()
self.registry.register_halfmaxes(self.telescope, self.band, t[0], t[1])
return t |
def fit_transform(self, X, u=None):
"""Fit X into an embedded space and return that transformed
output.
Inputs
----------
X : array, shape (n_samples, n_features). X contains a sample per row.
Returns
-------
embedding : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X, u)
return self.embedding | Fit X into an embedded space and return that transformed
output.
Inputs
----------
X : array, shape (n_samples, n_features). X contains a sample per row.
Returns
-------
embedding : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space. | Below is the the instruction that describes the task:
### Input:
Fit X into an embedded space and return that transformed
output.
Inputs
----------
X : array, shape (n_samples, n_features). X contains a sample per row.
Returns
-------
embedding : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
### Response:
def fit_transform(self, X, u=None):
"""Fit X into an embedded space and return that transformed
output.
Inputs
----------
X : array, shape (n_samples, n_features). X contains a sample per row.
Returns
-------
embedding : array, shape (n_samples, n_components)
Embedding of the training data in low-dimensional space.
"""
self.fit(X, u)
return self.embedding |
def debugDumpOneNode(self, output, depth):
"""Dumps debug information for the element node, it is not
recursive """
libxml2mod.xmlDebugDumpOneNode(output, self._o, depth) | Dumps debug information for the element node, it is not
recursive | Below is the the instruction that describes the task:
### Input:
Dumps debug information for the element node, it is not
recursive
### Response:
def debugDumpOneNode(self, output, depth):
"""Dumps debug information for the element node, it is not
recursive """
libxml2mod.xmlDebugDumpOneNode(output, self._o, depth) |
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1) | Sleeps until all the processors are done. | Below is the the instruction that describes the task:
### Input:
Sleeps until all the processors are done.
### Response:
def wait_until_finished(self):
"""
Sleeps until all the processors are done.
"""
for file_path, processor in self._processors.items():
while not processor.done:
time.sleep(0.1) |
def pil_image(self):
"""Return a PIL image from the current image.
"""
channels, fill_value = self._finalize()
if self.is_empty():
return Pil.new(self.mode, (0, 0))
if self.mode == "L":
if fill_value is not None:
img = Pil.fromarray(channels[0].filled(fill_value))
else:
img = Pil.fromarray(channels[0].filled(0))
alpha = np.zeros(channels[0].shape, np.uint8)
mask = np.ma.getmaskarray(channels[0])
alpha = np.where(mask, alpha, 255)
pil_alpha = Pil.fromarray(alpha)
img = Pil.merge("LA", (img, pil_alpha))
elif self.mode == "LA":
if fill_value is not None:
img = Pil.fromarray(channels[0].filled(fill_value))
pil_alpha = Pil.fromarray(channels[1])
else:
img = Pil.fromarray(channels[0].filled(0))
alpha = np.zeros(channels[0].shape, np.uint8)
mask = np.ma.getmaskarray(channels[0])
alpha = np.where(mask, alpha, channels[1])
pil_alpha = Pil.fromarray(alpha)
img = Pil.merge("LA", (img, pil_alpha))
elif self.mode == "RGB":
# Mask where all channels have missing data (incomplete data will
# be shown).
mask = (np.ma.getmaskarray(channels[0]) &
np.ma.getmaskarray(channels[1]) &
np.ma.getmaskarray(channels[2]))
if fill_value is not None:
pil_r = Pil.fromarray(channels[0].filled(fill_value[0]))
pil_g = Pil.fromarray(channels[1].filled(fill_value[1]))
pil_b = Pil.fromarray(channels[2].filled(fill_value[2]))
img = Pil.merge("RGB", (pil_r, pil_g, pil_b))
else:
pil_r = Pil.fromarray(channels[0].filled(0))
pil_g = Pil.fromarray(channels[1].filled(0))
pil_b = Pil.fromarray(channels[2].filled(0))
alpha = np.zeros(channels[0].shape, np.uint8)
alpha = np.where(mask, alpha, 255)
pil_a = Pil.fromarray(alpha)
img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a))
elif self.mode == "RGBA":
# Mask where all channels have missing data (incomplete data will
# be shown).
mask = (np.ma.getmaskarray(channels[0]) &
np.ma.getmaskarray(channels[1]) &
np.ma.getmaskarray(channels[2]) &
np.ma.getmaskarray(channels[3]))
if fill_value is not None:
pil_r = Pil.fromarray(channels[0].filled(fill_value[0]))
pil_g = Pil.fromarray(channels[1].filled(fill_value[1]))
pil_b = Pil.fromarray(channels[2].filled(fill_value[2]))
pil_a = Pil.fromarray(channels[3].filled(fill_value[3]))
img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a))
else:
pil_r = Pil.fromarray(channels[0].filled(0))
pil_g = Pil.fromarray(channels[1].filled(0))
pil_b = Pil.fromarray(channels[2].filled(0))
alpha = np.where(mask, 0, channels[3])
pil_a = Pil.fromarray(alpha)
img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a))
else:
raise TypeError("Does not know how to use mode %s." % (self.mode))
return img | Return a PIL image from the current image. | Below is the the instruction that describes the task:
### Input:
Return a PIL image from the current image.
### Response:
def pil_image(self):
"""Return a PIL image from the current image.
"""
channels, fill_value = self._finalize()
if self.is_empty():
return Pil.new(self.mode, (0, 0))
if self.mode == "L":
if fill_value is not None:
img = Pil.fromarray(channels[0].filled(fill_value))
else:
img = Pil.fromarray(channels[0].filled(0))
alpha = np.zeros(channels[0].shape, np.uint8)
mask = np.ma.getmaskarray(channels[0])
alpha = np.where(mask, alpha, 255)
pil_alpha = Pil.fromarray(alpha)
img = Pil.merge("LA", (img, pil_alpha))
elif self.mode == "LA":
if fill_value is not None:
img = Pil.fromarray(channels[0].filled(fill_value))
pil_alpha = Pil.fromarray(channels[1])
else:
img = Pil.fromarray(channels[0].filled(0))
alpha = np.zeros(channels[0].shape, np.uint8)
mask = np.ma.getmaskarray(channels[0])
alpha = np.where(mask, alpha, channels[1])
pil_alpha = Pil.fromarray(alpha)
img = Pil.merge("LA", (img, pil_alpha))
elif self.mode == "RGB":
# Mask where all channels have missing data (incomplete data will
# be shown).
mask = (np.ma.getmaskarray(channels[0]) &
np.ma.getmaskarray(channels[1]) &
np.ma.getmaskarray(channels[2]))
if fill_value is not None:
pil_r = Pil.fromarray(channels[0].filled(fill_value[0]))
pil_g = Pil.fromarray(channels[1].filled(fill_value[1]))
pil_b = Pil.fromarray(channels[2].filled(fill_value[2]))
img = Pil.merge("RGB", (pil_r, pil_g, pil_b))
else:
pil_r = Pil.fromarray(channels[0].filled(0))
pil_g = Pil.fromarray(channels[1].filled(0))
pil_b = Pil.fromarray(channels[2].filled(0))
alpha = np.zeros(channels[0].shape, np.uint8)
alpha = np.where(mask, alpha, 255)
pil_a = Pil.fromarray(alpha)
img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a))
elif self.mode == "RGBA":
# Mask where all channels have missing data (incomplete data will
# be shown).
mask = (np.ma.getmaskarray(channels[0]) &
np.ma.getmaskarray(channels[1]) &
np.ma.getmaskarray(channels[2]) &
np.ma.getmaskarray(channels[3]))
if fill_value is not None:
pil_r = Pil.fromarray(channels[0].filled(fill_value[0]))
pil_g = Pil.fromarray(channels[1].filled(fill_value[1]))
pil_b = Pil.fromarray(channels[2].filled(fill_value[2]))
pil_a = Pil.fromarray(channels[3].filled(fill_value[3]))
img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a))
else:
pil_r = Pil.fromarray(channels[0].filled(0))
pil_g = Pil.fromarray(channels[1].filled(0))
pil_b = Pil.fromarray(channels[2].filled(0))
alpha = np.where(mask, 0, channels[3])
pil_a = Pil.fromarray(alpha)
img = Pil.merge("RGBA", (pil_r, pil_g, pil_b, pil_a))
else:
raise TypeError("Does not know how to use mode %s." % (self.mode))
return img |
def _read_field(self):
'''
Read a single byte for field type, then read the value.
'''
ftype = self._input[self._pos]
self._pos += 1
reader = self.field_type_map.get(ftype)
if reader:
return reader(self)
raise Reader.FieldError('Unknown field type %s', ftype) | Read a single byte for field type, then read the value. | Below is the the instruction that describes the task:
### Input:
Read a single byte for field type, then read the value.
### Response:
def _read_field(self):
'''
Read a single byte for field type, then read the value.
'''
ftype = self._input[self._pos]
self._pos += 1
reader = self.field_type_map.get(ftype)
if reader:
return reader(self)
raise Reader.FieldError('Unknown field type %s', ftype) |
async def mail(
self,
sender: str,
options: Iterable[str] = None,
timeout: DefaultNumType = _default,
) -> SMTPResponse:
"""
Send an SMTP MAIL command, which specifies the message sender and
begins a new mail transfer session ("envelope").
:raises SMTPSenderRefused: on unexpected server response code
"""
await self._ehlo_or_helo_if_needed()
if options is None:
options = []
options_bytes = [option.encode("ascii") for option in options]
from_string = b"FROM:" + quote_address(sender).encode("ascii")
async with self._command_lock:
response = await self.execute_command(
b"MAIL", from_string, *options_bytes, timeout=timeout
)
if response.code != SMTPStatus.completed:
raise SMTPSenderRefused(response.code, response.message, sender)
return response | Send an SMTP MAIL command, which specifies the message sender and
begins a new mail transfer session ("envelope").
:raises SMTPSenderRefused: on unexpected server response code | Below is the the instruction that describes the task:
### Input:
Send an SMTP MAIL command, which specifies the message sender and
begins a new mail transfer session ("envelope").
:raises SMTPSenderRefused: on unexpected server response code
### Response:
async def mail(
self,
sender: str,
options: Iterable[str] = None,
timeout: DefaultNumType = _default,
) -> SMTPResponse:
"""
Send an SMTP MAIL command, which specifies the message sender and
begins a new mail transfer session ("envelope").
:raises SMTPSenderRefused: on unexpected server response code
"""
await self._ehlo_or_helo_if_needed()
if options is None:
options = []
options_bytes = [option.encode("ascii") for option in options]
from_string = b"FROM:" + quote_address(sender).encode("ascii")
async with self._command_lock:
response = await self.execute_command(
b"MAIL", from_string, *options_bytes, timeout=timeout
)
if response.code != SMTPStatus.completed:
raise SMTPSenderRefused(response.code, response.message, sender)
return response |
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f | Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint. | Below is the the instruction that describes the task:
### Input:
Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
### Response:
def before_request(self, f):
"""Like :meth:`Flask.before_request` but for a blueprint. This function
is only executed before each request that is handled by a function of
that blueprint.
"""
self.record_once(lambda s: s.app.before_request_funcs
.setdefault(self.name, []).append(f))
return f |
def write_hier_all(self, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False):
"""Write hierarchy for all GO Terms in obo file."""
# Print: [biological_process, molecular_function, and cellular_component]
for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']:
self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None) | Write hierarchy for all GO Terms in obo file. | Below is the the instruction that describes the task:
### Input:
Write hierarchy for all GO Terms in obo file.
### Response:
def write_hier_all(self, out=sys.stdout,
len_dash=1, max_depth=None, num_child=None, short_prt=False):
"""Write hierarchy for all GO Terms in obo file."""
# Print: [biological_process, molecular_function, and cellular_component]
for go_id in ['GO:0008150', 'GO:0003674', 'GO:0005575']:
self.write_hier(go_id, out, len_dash, max_depth, num_child, short_prt, None) |
def _remotes_on(port, which_end):
'''
Return a set of ip addrs active tcp connections
'''
port = int(port)
ret = _netlink_tool_remote_on(port, which_end)
if ret is not None:
return ret
ret = set()
proc_available = False
for statf in ['/proc/net/tcp', '/proc/net/tcp6']:
if os.path.isfile(statf):
proc_available = True
with salt.utils.files.fopen(statf, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.strip().startswith('sl'):
continue
iret = _parse_tcp_line(line)
sl = next(iter(iret))
if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED
ret.add(iret[sl]['remote_addr'])
if not proc_available: # Fallback to use OS specific tools
if salt.utils.platform.is_sunos():
return _sunos_remotes_on(port, which_end)
if salt.utils.platform.is_freebsd():
return _freebsd_remotes_on(port, which_end)
if salt.utils.platform.is_netbsd():
return _netbsd_remotes_on(port, which_end)
if salt.utils.platform.is_openbsd():
return _openbsd_remotes_on(port, which_end)
if salt.utils.platform.is_windows():
return _windows_remotes_on(port, which_end)
if salt.utils.platform.is_aix():
return _aix_remotes_on(port, which_end)
return _linux_remotes_on(port, which_end)
return ret | Return a set of ip addrs active tcp connections | Below is the the instruction that describes the task:
### Input:
Return a set of ip addrs active tcp connections
### Response:
def _remotes_on(port, which_end):
'''
Return a set of ip addrs active tcp connections
'''
port = int(port)
ret = _netlink_tool_remote_on(port, which_end)
if ret is not None:
return ret
ret = set()
proc_available = False
for statf in ['/proc/net/tcp', '/proc/net/tcp6']:
if os.path.isfile(statf):
proc_available = True
with salt.utils.files.fopen(statf, 'r') as fp_:
for line in fp_:
line = salt.utils.stringutils.to_unicode(line)
if line.strip().startswith('sl'):
continue
iret = _parse_tcp_line(line)
sl = next(iter(iret))
if iret[sl][which_end] == port and iret[sl]['state'] == 1: # 1 is ESTABLISHED
ret.add(iret[sl]['remote_addr'])
if not proc_available: # Fallback to use OS specific tools
if salt.utils.platform.is_sunos():
return _sunos_remotes_on(port, which_end)
if salt.utils.platform.is_freebsd():
return _freebsd_remotes_on(port, which_end)
if salt.utils.platform.is_netbsd():
return _netbsd_remotes_on(port, which_end)
if salt.utils.platform.is_openbsd():
return _openbsd_remotes_on(port, which_end)
if salt.utils.platform.is_windows():
return _windows_remotes_on(port, which_end)
if salt.utils.platform.is_aix():
return _aix_remotes_on(port, which_end)
return _linux_remotes_on(port, which_end)
return ret |
def transaction_id(self, transaction_id):
"""
Sets the transaction_id of this AdditionalRecipientReceivable.
The ID of the transaction that the additional recipient receivable was applied to.
:param transaction_id: The transaction_id of this AdditionalRecipientReceivable.
:type: str
"""
if transaction_id is None:
raise ValueError("Invalid value for `transaction_id`, must not be `None`")
if len(transaction_id) < 1:
raise ValueError("Invalid value for `transaction_id`, length must be greater than or equal to `1`")
self._transaction_id = transaction_id | Sets the transaction_id of this AdditionalRecipientReceivable.
The ID of the transaction that the additional recipient receivable was applied to.
:param transaction_id: The transaction_id of this AdditionalRecipientReceivable.
:type: str | Below is the the instruction that describes the task:
### Input:
Sets the transaction_id of this AdditionalRecipientReceivable.
The ID of the transaction that the additional recipient receivable was applied to.
:param transaction_id: The transaction_id of this AdditionalRecipientReceivable.
:type: str
### Response:
def transaction_id(self, transaction_id):
"""
Sets the transaction_id of this AdditionalRecipientReceivable.
The ID of the transaction that the additional recipient receivable was applied to.
:param transaction_id: The transaction_id of this AdditionalRecipientReceivable.
:type: str
"""
if transaction_id is None:
raise ValueError("Invalid value for `transaction_id`, must not be `None`")
if len(transaction_id) < 1:
raise ValueError("Invalid value for `transaction_id`, length must be greater than or equal to `1`")
self._transaction_id = transaction_id |
def parse_help_text(self, file_path):
''' Load of list of commands and descriptions from a file. '''
with open(file_path) as f:
lines = f.readlines()
# Parse commands and descriptions, which are separated by a multi-space
# (any sequence of two or more space characters in a row.
cmds = []
descs = []
for line in lines:
line = line.strip()
if len(line) == 0:
cmds.append('')
descs.append('')
else:
tokens = line.split(' ')
cmds.append(tokens[0])
descs.append(''.join(tokens[1:]).strip())
max_len = len(max(cmds, key=len))
# Convert commands and descriptions into help text.
text = ''
for cmd, desc in zip(cmds, descs):
if len(cmd) == 0:
text += '\n'
else:
text += self.style.help(cmd.ljust(max_len + 2), desc + '\n')
return cmds, text | Load of list of commands and descriptions from a file. | Below is the the instruction that describes the task:
### Input:
Load of list of commands and descriptions from a file.
### Response:
def parse_help_text(self, file_path):
''' Load of list of commands and descriptions from a file. '''
with open(file_path) as f:
lines = f.readlines()
# Parse commands and descriptions, which are separated by a multi-space
# (any sequence of two or more space characters in a row.
cmds = []
descs = []
for line in lines:
line = line.strip()
if len(line) == 0:
cmds.append('')
descs.append('')
else:
tokens = line.split(' ')
cmds.append(tokens[0])
descs.append(''.join(tokens[1:]).strip())
max_len = len(max(cmds, key=len))
# Convert commands and descriptions into help text.
text = ''
for cmd, desc in zip(cmds, descs):
if len(cmd) == 0:
text += '\n'
else:
text += self.style.help(cmd.ljust(max_len + 2), desc + '\n')
return cmds, text |
def getWorkerInfo(dataTask):
"""Returns the total execution time and task quantity by worker"""
workertime = []
workertasks = []
for fichier, vals in dataTask.items():
if hasattr(vals, 'values'):
#workers_names.append(fichier)
# Data from worker
totaltime = sum([a['executionTime'] for a in vals.values()])
totaltasks = sum([1 for a in vals.values()])
workertime.append(totaltime)
workertasks.append(totaltasks)
return workertime, workertasks | Returns the total execution time and task quantity by worker | Below is the the instruction that describes the task:
### Input:
Returns the total execution time and task quantity by worker
### Response:
def getWorkerInfo(dataTask):
"""Returns the total execution time and task quantity by worker"""
workertime = []
workertasks = []
for fichier, vals in dataTask.items():
if hasattr(vals, 'values'):
#workers_names.append(fichier)
# Data from worker
totaltime = sum([a['executionTime'] for a in vals.values()])
totaltasks = sum([1 for a in vals.values()])
workertime.append(totaltime)
workertasks.append(totaltasks)
return workertime, workertasks |
def rollback(self):
"""Implementation of NAPALM method rollback."""
commands = []
commands.append('configure replace flash:rollback-0')
commands.append('write memory')
self.device.run_commands(commands) | Implementation of NAPALM method rollback. | Below is the the instruction that describes the task:
### Input:
Implementation of NAPALM method rollback.
### Response:
def rollback(self):
"""Implementation of NAPALM method rollback."""
commands = []
commands.append('configure replace flash:rollback-0')
commands.append('write memory')
self.device.run_commands(commands) |
def update_J(self):
"""Updates J, JTJ, and internal counters."""
self.calc_J()
# np.dot(j, j.T) is slightly faster but 2x as much mem
step = np.ceil(1e-2 * self.J.shape[1]).astype('int') # 1% more mem...
self.JTJ = low_mem_sq(self.J, step=step)
#copies still, since J is not C -ordered but a slice of j_e...
#doing self.J.copy() works but takes 2x as much ram..
self._fresh_JTJ = True
self._J_update_counter = 0
if np.any(np.isnan(self.JTJ)):
raise FloatingPointError('J, JTJ have nans.')
#Update self._exp_err
self._exp_err = self.error - self.find_expected_error(delta_params='perfect') | Updates J, JTJ, and internal counters. | Below is the the instruction that describes the task:
### Input:
Updates J, JTJ, and internal counters.
### Response:
def update_J(self):
"""Updates J, JTJ, and internal counters."""
self.calc_J()
# np.dot(j, j.T) is slightly faster but 2x as much mem
step = np.ceil(1e-2 * self.J.shape[1]).astype('int') # 1% more mem...
self.JTJ = low_mem_sq(self.J, step=step)
#copies still, since J is not C -ordered but a slice of j_e...
#doing self.J.copy() works but takes 2x as much ram..
self._fresh_JTJ = True
self._J_update_counter = 0
if np.any(np.isnan(self.JTJ)):
raise FloatingPointError('J, JTJ have nans.')
#Update self._exp_err
self._exp_err = self.error - self.find_expected_error(delta_params='perfect') |
def _set_config(self, config=None):
"""Set this component's initial configuration"""
if not config:
config = {}
try:
# pprint(self.configschema)
self.config = self.componentmodel(config)
# self.log("Config schema:", lvl=critical)
# pprint(self.config.__dict__)
# pprint(self.config._fields)
try:
name = self.config.name
self.log("Name set to: ", name, lvl=verbose)
except (AttributeError, KeyError): # pragma: no cover
self.log("Has no name.", lvl=verbose)
try:
self.config.name = self.uniquename
except (AttributeError, KeyError) as e: # pragma: no cover
self.log("Cannot set component name for configuration: ", e,
type(e), self.name, exc=True, lvl=critical)
try:
uuid = self.config.uuid
self.log("UUID set to: ", uuid, lvl=verbose)
except (AttributeError, KeyError):
self.log("Has no UUID", lvl=verbose)
self.config.uuid = str(uuid4())
try:
notes = self.config.notes
self.log("Notes set to: ", notes, lvl=verbose)
except (AttributeError, KeyError):
self.log("Has no notes, trying docstring", lvl=verbose)
notes = self.__doc__
if notes is None:
notes = "No notes."
else:
notes = notes.lstrip().rstrip()
self.log(notes)
self.config.notes = notes
try:
componentclass = self.config.componentclass
self.log("Componentclass set to: ", componentclass,
lvl=verbose)
except (AttributeError, KeyError):
self.log("Has no component class", lvl=verbose)
self.config.componentclass = self.name
except ValidationError as e:
self.log("Not setting invalid component configuration: ", e,
type(e), exc=True, lvl=error) | Set this component's initial configuration | Below is the the instruction that describes the task:
### Input:
Set this component's initial configuration
### Response:
def _set_config(self, config=None):
"""Set this component's initial configuration"""
if not config:
config = {}
try:
# pprint(self.configschema)
self.config = self.componentmodel(config)
# self.log("Config schema:", lvl=critical)
# pprint(self.config.__dict__)
# pprint(self.config._fields)
try:
name = self.config.name
self.log("Name set to: ", name, lvl=verbose)
except (AttributeError, KeyError): # pragma: no cover
self.log("Has no name.", lvl=verbose)
try:
self.config.name = self.uniquename
except (AttributeError, KeyError) as e: # pragma: no cover
self.log("Cannot set component name for configuration: ", e,
type(e), self.name, exc=True, lvl=critical)
try:
uuid = self.config.uuid
self.log("UUID set to: ", uuid, lvl=verbose)
except (AttributeError, KeyError):
self.log("Has no UUID", lvl=verbose)
self.config.uuid = str(uuid4())
try:
notes = self.config.notes
self.log("Notes set to: ", notes, lvl=verbose)
except (AttributeError, KeyError):
self.log("Has no notes, trying docstring", lvl=verbose)
notes = self.__doc__
if notes is None:
notes = "No notes."
else:
notes = notes.lstrip().rstrip()
self.log(notes)
self.config.notes = notes
try:
componentclass = self.config.componentclass
self.log("Componentclass set to: ", componentclass,
lvl=verbose)
except (AttributeError, KeyError):
self.log("Has no component class", lvl=verbose)
self.config.componentclass = self.name
except ValidationError as e:
self.log("Not setting invalid component configuration: ", e,
type(e), exc=True, lvl=error) |
def _extract_match(self, candidate, offset):
"""Attempts to extract a match from a candidate string.
Arguments:
candidate -- The candidate text that might contain a phone number.
offset -- The offset of candidate within self.text
Returns the match found, None if none can be found
"""
# Skip a match that is more likely a publication page reference or a
# date.
if (_SLASH_SEPARATED_DATES.search(candidate)):
return None
# Skip potential time-stamps.
if _TIME_STAMPS.search(candidate):
following_text = self.text[offset + len(candidate):]
if _TIME_STAMPS_SUFFIX.match(following_text):
return None
# Try to come up with a valid match given the entire candidate.
match = self._parse_and_verify(candidate, offset)
if match is not None:
return match
# If that failed, try to find an "inner match" -- there might be a
# phone number within this candidate.
return self._extract_inner_match(candidate, offset) | Attempts to extract a match from a candidate string.
Arguments:
candidate -- The candidate text that might contain a phone number.
offset -- The offset of candidate within self.text
Returns the match found, None if none can be found | Below is the the instruction that describes the task:
### Input:
Attempts to extract a match from a candidate string.
Arguments:
candidate -- The candidate text that might contain a phone number.
offset -- The offset of candidate within self.text
Returns the match found, None if none can be found
### Response:
def _extract_match(self, candidate, offset):
"""Attempts to extract a match from a candidate string.
Arguments:
candidate -- The candidate text that might contain a phone number.
offset -- The offset of candidate within self.text
Returns the match found, None if none can be found
"""
# Skip a match that is more likely a publication page reference or a
# date.
if (_SLASH_SEPARATED_DATES.search(candidate)):
return None
# Skip potential time-stamps.
if _TIME_STAMPS.search(candidate):
following_text = self.text[offset + len(candidate):]
if _TIME_STAMPS_SUFFIX.match(following_text):
return None
# Try to come up with a valid match given the entire candidate.
match = self._parse_and_verify(candidate, offset)
if match is not None:
return match
# If that failed, try to find an "inner match" -- there might be a
# phone number within this candidate.
return self._extract_inner_match(candidate, offset) |
def updateRules( self ):
"""
Updates the query line items to match the latest rule options.
"""
terms = sorted(self._rules.keys())
for child in self.lineWidgets():
child.setTerms(terms) | Updates the query line items to match the latest rule options. | Below is the the instruction that describes the task:
### Input:
Updates the query line items to match the latest rule options.
### Response:
def updateRules( self ):
"""
Updates the query line items to match the latest rule options.
"""
terms = sorted(self._rules.keys())
for child in self.lineWidgets():
child.setTerms(terms) |
def is_ancestor_of_repository(self, id_, repository_id):
"""Tests if an ``Id`` is an ancestor of a repository.
arg: id (osid.id.Id): an ``Id``
arg: repository_id (osid.id.Id): the Id of a repository
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``repository_id,`` ``false`` otherwise
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_ancestor_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=repository_id)
return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=repository_id) | Tests if an ``Id`` is an ancestor of a repository.
arg: id (osid.id.Id): an ``Id``
arg: repository_id (osid.id.Id): the Id of a repository
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``repository_id,`` ``false`` otherwise
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``. | Below is the the instruction that describes the task:
### Input:
Tests if an ``Id`` is an ancestor of a repository.
arg: id (osid.id.Id): an ``Id``
arg: repository_id (osid.id.Id): the Id of a repository
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``repository_id,`` ``false`` otherwise
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
### Response:
def is_ancestor_of_repository(self, id_, repository_id):
"""Tests if an ``Id`` is an ancestor of a repository.
arg: id (osid.id.Id): an ``Id``
arg: repository_id (osid.id.Id): the Id of a repository
return: (boolean) - ``true`` if this ``id`` is an ancestor of
``repository_id,`` ``false`` otherwise
raise: NotFound - ``repository_id`` not found
raise: NullArgument - ``repository_id`` is ``null``
raise: OperationFailed - unable to complete request
raise: PermissionDenied - authorization failure
*compliance: mandatory -- This method must be implemented.*
*implementation notes*: If ``id`` not found return ``false``.
"""
# Implemented from template for
# osid.resource.BinHierarchySession.is_ancestor_of_bin
if self._catalog_session is not None:
return self._catalog_session.is_ancestor_of_catalog(id_=id_, catalog_id=repository_id)
return self._hierarchy_session.is_ancestor(id_=id_, ancestor_id=repository_id) |
def changed(self, filename='.md5', glob=None):
"""Are any of the files matched by ``glob`` changed?
"""
if glob is not None:
filename += '.glob-' + ''.join(ch.lower()
for ch in glob if ch.isalpha())
return changed(self, filename, glob=glob) | Are any of the files matched by ``glob`` changed? | Below is the the instruction that describes the task:
### Input:
Are any of the files matched by ``glob`` changed?
### Response:
def changed(self, filename='.md5', glob=None):
"""Are any of the files matched by ``glob`` changed?
"""
if glob is not None:
filename += '.glob-' + ''.join(ch.lower()
for ch in glob if ch.isalpha())
return changed(self, filename, glob=glob) |
def deserialize(cls, assoc_s):
"""
Parse an association as stored by serialize().
inverse of serialize
@param assoc_s: Association as serialized by serialize()
@type assoc_s: str
@return: instance of this class
"""
pairs = kvform.kvToSeq(assoc_s, strict=True)
keys = []
values = []
for k, v in pairs:
keys.append(k)
values.append(v)
if keys != cls.assoc_keys:
raise ValueError('Unexpected key values: %r', keys)
version, handle, secret, issued, lifetime, assoc_type = values
if version != '2':
raise ValueError('Unknown version: %r' % version)
issued = int(issued)
lifetime = int(lifetime)
secret = oidutil.fromBase64(secret)
return cls(handle, secret, issued, lifetime, assoc_type) | Parse an association as stored by serialize().
inverse of serialize
@param assoc_s: Association as serialized by serialize()
@type assoc_s: str
@return: instance of this class | Below is the the instruction that describes the task:
### Input:
Parse an association as stored by serialize().
inverse of serialize
@param assoc_s: Association as serialized by serialize()
@type assoc_s: str
@return: instance of this class
### Response:
def deserialize(cls, assoc_s):
"""
Parse an association as stored by serialize().
inverse of serialize
@param assoc_s: Association as serialized by serialize()
@type assoc_s: str
@return: instance of this class
"""
pairs = kvform.kvToSeq(assoc_s, strict=True)
keys = []
values = []
for k, v in pairs:
keys.append(k)
values.append(v)
if keys != cls.assoc_keys:
raise ValueError('Unexpected key values: %r', keys)
version, handle, secret, issued, lifetime, assoc_type = values
if version != '2':
raise ValueError('Unknown version: %r' % version)
issued = int(issued)
lifetime = int(lifetime)
secret = oidutil.fromBase64(secret)
return cls(handle, secret, issued, lifetime, assoc_type) |
def _printer(self, *out, **kws):
"""Generic print function."""
flush = kws.pop('flush', True)
fileh = kws.pop('file', self.writer)
sep = kws.pop('sep', ' ')
end = kws.pop('sep', '\n')
print(*out, file=fileh, sep=sep, end=end)
if flush:
fileh.flush() | Generic print function. | Below is the the instruction that describes the task:
### Input:
Generic print function.
### Response:
def _printer(self, *out, **kws):
"""Generic print function."""
flush = kws.pop('flush', True)
fileh = kws.pop('file', self.writer)
sep = kws.pop('sep', ' ')
end = kws.pop('sep', '\n')
print(*out, file=fileh, sep=sep, end=end)
if flush:
fileh.flush() |
def to_dict(self, properties=None):
"""Return a dictionary containing Substance data.
If the properties parameter is not specified, everything except cids and aids is included. This is because the
aids and cids properties each require an extra request to retrieve.
:param properties: (optional) A list of the desired properties.
"""
if not properties:
skip = {'deposited_compound', 'standardized_compound', 'cids', 'aids'}
properties = [p for p in dir(Substance) if isinstance(getattr(Substance, p), property) and p not in skip]
return {p: getattr(self, p) for p in properties} | Return a dictionary containing Substance data.
If the properties parameter is not specified, everything except cids and aids is included. This is because the
aids and cids properties each require an extra request to retrieve.
:param properties: (optional) A list of the desired properties. | Below is the the instruction that describes the task:
### Input:
Return a dictionary containing Substance data.
If the properties parameter is not specified, everything except cids and aids is included. This is because the
aids and cids properties each require an extra request to retrieve.
:param properties: (optional) A list of the desired properties.
### Response:
def to_dict(self, properties=None):
"""Return a dictionary containing Substance data.
If the properties parameter is not specified, everything except cids and aids is included. This is because the
aids and cids properties each require an extra request to retrieve.
:param properties: (optional) A list of the desired properties.
"""
if not properties:
skip = {'deposited_compound', 'standardized_compound', 'cids', 'aids'}
properties = [p for p in dir(Substance) if isinstance(getattr(Substance, p), property) and p not in skip]
return {p: getattr(self, p) for p in properties} |
def create_pswd_change(self, body):
"""Create password change ticket.
Args:
body (dict): Please see: https://auth0.com/docs/api/v2#!/Tickets/post_password_change
"""
return self.client.post(self._url('password-change'), data=body) | Create password change ticket.
Args:
body (dict): Please see: https://auth0.com/docs/api/v2#!/Tickets/post_password_change | Below is the the instruction that describes the task:
### Input:
Create password change ticket.
Args:
body (dict): Please see: https://auth0.com/docs/api/v2#!/Tickets/post_password_change
### Response:
def create_pswd_change(self, body):
"""Create password change ticket.
Args:
body (dict): Please see: https://auth0.com/docs/api/v2#!/Tickets/post_password_change
"""
return self.client.post(self._url('password-change'), data=body) |
def get_num_processors():
"""
Return number of online processor cores.
"""
# try different strategies and use first one that succeeeds
try:
return os.cpu_count() # Py3 only
except AttributeError:
pass
try:
import multiprocessing
return multiprocessing.cpu_count()
except ImportError: # no multiprocessing?
pass
except NotImplementedError:
# multiprocessing cannot determine CPU count
pass
try:
from subprocess32 import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess32?
pass
try:
from subprocess import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess.check_call (Py 2.6)
pass
raise RuntimeError("Cannot determine number of processors") | Return number of online processor cores. | Below is the the instruction that describes the task:
### Input:
Return number of online processor cores.
### Response:
def get_num_processors():
"""
Return number of online processor cores.
"""
# try different strategies and use first one that succeeeds
try:
return os.cpu_count() # Py3 only
except AttributeError:
pass
try:
import multiprocessing
return multiprocessing.cpu_count()
except ImportError: # no multiprocessing?
pass
except NotImplementedError:
# multiprocessing cannot determine CPU count
pass
try:
from subprocess32 import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess32?
pass
try:
from subprocess import check_output
ncpus = check_output('nproc')
return int(ncpus)
except CalledProcessError: # no `/usr/bin/nproc`
pass
except (ValueError, TypeError):
# unexpected output from `nproc`
pass
except ImportError: # no subprocess.check_call (Py 2.6)
pass
raise RuntimeError("Cannot determine number of processors") |
def directory(self, query, **kwargs):
"""Search by users or channels on all server."""
if isinstance(query, dict):
query = str(query).replace("'", '"')
return self.__call_api_get('directory', query=query, kwargs=kwargs) | Search by users or channels on all server. | Below is the the instruction that describes the task:
### Input:
Search by users or channels on all server.
### Response:
def directory(self, query, **kwargs):
"""Search by users or channels on all server."""
if isinstance(query, dict):
query = str(query).replace("'", '"')
return self.__call_api_get('directory', query=query, kwargs=kwargs) |
def __GetServiceVersionDescription(protocol, server, port, path, sslContext):
"""
Private method that returns a root from an ElementTree describing the API versions
supported by the specified server. The result will be vimServiceVersions.xml
if it exists, otherwise vimService.wsdl if it exists, otherwise None.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
tree = __GetElementTree(protocol, server, port,
path + "/vimServiceVersions.xml", sslContext)
if tree is not None:
return tree
tree = __GetElementTree(protocol, server, port,
path + "/vimService.wsdl", sslContext)
return tree | Private method that returns a root from an ElementTree describing the API versions
supported by the specified server. The result will be vimServiceVersions.xml
if it exists, otherwise vimService.wsdl if it exists, otherwise None.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context | Below is the the instruction that describes the task:
### Input:
Private method that returns a root from an ElementTree describing the API versions
supported by the specified server. The result will be vimServiceVersions.xml
if it exists, otherwise vimService.wsdl if it exists, otherwise None.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
### Response:
def __GetServiceVersionDescription(protocol, server, port, path, sslContext):
"""
Private method that returns a root from an ElementTree describing the API versions
supported by the specified server. The result will be vimServiceVersions.xml
if it exists, otherwise vimService.wsdl if it exists, otherwise None.
@param protocol: What protocol to use for the connection (e.g. https or http).
@type protocol: string
@param server: Which server to connect to.
@type server: string
@param port: Port
@type port: int
@param path: Path
@type path: string
@param sslContext: SSL Context describing the various SSL options. It is only
supported in Python 2.7.9 or higher.
@type sslContext: SSL.Context
"""
tree = __GetElementTree(protocol, server, port,
path + "/vimServiceVersions.xml", sslContext)
if tree is not None:
return tree
tree = __GetElementTree(protocol, server, port,
path + "/vimService.wsdl", sslContext)
return tree |
def enable_eye_dome_lighting(self):
"""Enable eye dome lighting (EDL)"""
if hasattr(self, 'edl_pass'):
return self
# create the basic VTK render steps
basic_passes = vtk.vtkRenderStepsPass()
# blur the resulting image
# The blur delegates rendering the unblured image to the basic_passes
self.edl_pass = vtk.vtkEDLShading()
self.edl_pass.SetDelegatePass(basic_passes)
# tell the renderer to use our render pass pipeline
self.glrenderer = vtk.vtkOpenGLRenderer.SafeDownCast(self)
self.glrenderer.SetPass(self.edl_pass)
return self.glrenderer | Enable eye dome lighting (EDL) | Below is the the instruction that describes the task:
### Input:
Enable eye dome lighting (EDL)
### Response:
def enable_eye_dome_lighting(self):
"""Enable eye dome lighting (EDL)"""
if hasattr(self, 'edl_pass'):
return self
# create the basic VTK render steps
basic_passes = vtk.vtkRenderStepsPass()
# blur the resulting image
# The blur delegates rendering the unblured image to the basic_passes
self.edl_pass = vtk.vtkEDLShading()
self.edl_pass.SetDelegatePass(basic_passes)
# tell the renderer to use our render pass pipeline
self.glrenderer = vtk.vtkOpenGLRenderer.SafeDownCast(self)
self.glrenderer.SetPass(self.edl_pass)
return self.glrenderer |
def handle_cd_command(arg):
"""Handles a `cd` shell command by calling python's os.chdir."""
CD_CMD = 'cd'
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1] if len(tokens) > 1 else None
if not directory:
return False, "No folder name was provided."
try:
os.chdir(directory)
subprocess.call(['pwd'])
return True, None
except OSError as e:
return False, e.strerror | Handles a `cd` shell command by calling python's os.chdir. | Below is the the instruction that describes the task:
### Input:
Handles a `cd` shell command by calling python's os.chdir.
### Response:
def handle_cd_command(arg):
"""Handles a `cd` shell command by calling python's os.chdir."""
CD_CMD = 'cd'
tokens = arg.split(CD_CMD + ' ')
directory = tokens[-1] if len(tokens) > 1 else None
if not directory:
return False, "No folder name was provided."
try:
os.chdir(directory)
subprocess.call(['pwd'])
return True, None
except OSError as e:
return False, e.strerror |
def delete_record(self, record):
"""
Permanently removes record from table.
"""
try:
self.session.delete(record)
self.session.commit()
except Exception as e:
self.session.rollback()
raise ProgrammingError(e)
finally:
self.session.close() | Permanently removes record from table. | Below is the the instruction that describes the task:
### Input:
Permanently removes record from table.
### Response:
def delete_record(self, record):
"""
Permanently removes record from table.
"""
try:
self.session.delete(record)
self.session.commit()
except Exception as e:
self.session.rollback()
raise ProgrammingError(e)
finally:
self.session.close() |
def build_image_path(self, src):
"""\
This method will take an image path and build
out the absolute path to that image
* using the initial url we crawled
so we can find a link to the image
if they use relative urls like ../myimage.jpg
"""
o = urlparse(src)
# we have a full url
if o.hostname:
return o.geturl()
# we have a relative url
return urljoin(self.target_url, src) | \
This method will take an image path and build
out the absolute path to that image
* using the initial url we crawled
so we can find a link to the image
if they use relative urls like ../myimage.jpg | Below is the the instruction that describes the task:
### Input:
\
This method will take an image path and build
out the absolute path to that image
* using the initial url we crawled
so we can find a link to the image
if they use relative urls like ../myimage.jpg
### Response:
def build_image_path(self, src):
"""\
This method will take an image path and build
out the absolute path to that image
* using the initial url we crawled
so we can find a link to the image
if they use relative urls like ../myimage.jpg
"""
o = urlparse(src)
# we have a full url
if o.hostname:
return o.geturl()
# we have a relative url
return urljoin(self.target_url, src) |
def get_level(self, level=2):
"""Get all nodes that are exactly this far away."""
if level == 1:
for child in self.children.values(): yield child
else:
for child in self.children.values():
for node in child.get_level(level-1): yield node | Get all nodes that are exactly this far away. | Below is the the instruction that describes the task:
### Input:
Get all nodes that are exactly this far away.
### Response:
def get_level(self, level=2):
"""Get all nodes that are exactly this far away."""
if level == 1:
for child in self.children.values(): yield child
else:
for child in self.children.values():
for node in child.get_level(level-1): yield node |
def _allocate_output(self, windows, shape):
"""
Allocate an output array whose rows should be passed to `self.compute`.
The resulting array must have a shape of ``shape``.
If we have standard outputs (i.e. self.outputs is NotSpecified), the
default is an empty ndarray whose dtype is ``self.dtype``.
If we have an outputs tuple, the default is an empty recarray with
``self.outputs`` as field names. Each field will have dtype
``self.dtype``.
This can be overridden to control the kind of array constructed
(e.g. to produce a LabelArray instead of an ndarray).
"""
missing_value = self.missing_value
outputs = self.outputs
if outputs is not NotSpecified:
out = recarray(
shape,
formats=[self.dtype.str] * len(outputs),
names=outputs,
)
out[:] = missing_value
else:
out = full(shape, missing_value, dtype=self.dtype)
return out | Allocate an output array whose rows should be passed to `self.compute`.
The resulting array must have a shape of ``shape``.
If we have standard outputs (i.e. self.outputs is NotSpecified), the
default is an empty ndarray whose dtype is ``self.dtype``.
If we have an outputs tuple, the default is an empty recarray with
``self.outputs`` as field names. Each field will have dtype
``self.dtype``.
This can be overridden to control the kind of array constructed
(e.g. to produce a LabelArray instead of an ndarray). | Below is the the instruction that describes the task:
### Input:
Allocate an output array whose rows should be passed to `self.compute`.
The resulting array must have a shape of ``shape``.
If we have standard outputs (i.e. self.outputs is NotSpecified), the
default is an empty ndarray whose dtype is ``self.dtype``.
If we have an outputs tuple, the default is an empty recarray with
``self.outputs`` as field names. Each field will have dtype
``self.dtype``.
This can be overridden to control the kind of array constructed
(e.g. to produce a LabelArray instead of an ndarray).
### Response:
def _allocate_output(self, windows, shape):
"""
Allocate an output array whose rows should be passed to `self.compute`.
The resulting array must have a shape of ``shape``.
If we have standard outputs (i.e. self.outputs is NotSpecified), the
default is an empty ndarray whose dtype is ``self.dtype``.
If we have an outputs tuple, the default is an empty recarray with
``self.outputs`` as field names. Each field will have dtype
``self.dtype``.
This can be overridden to control the kind of array constructed
(e.g. to produce a LabelArray instead of an ndarray).
"""
missing_value = self.missing_value
outputs = self.outputs
if outputs is not NotSpecified:
out = recarray(
shape,
formats=[self.dtype.str] * len(outputs),
names=outputs,
)
out[:] = missing_value
else:
out = full(shape, missing_value, dtype=self.dtype)
return out |
def get_token(self):
""" Method to retrieve an auth token.
The cached global token is looked up and returned if it exists. If it
is `None` a new one is requested and returned.
Returns:
Simplenote API token as string
"""
if self.token == None:
self.token = self.authenticate(self.username, self.password)
try:
return str(self.token,'utf-8')
except TypeError:
return self.token | Method to retrieve an auth token.
The cached global token is looked up and returned if it exists. If it
is `None` a new one is requested and returned.
Returns:
Simplenote API token as string | Below is the the instruction that describes the task:
### Input:
Method to retrieve an auth token.
The cached global token is looked up and returned if it exists. If it
is `None` a new one is requested and returned.
Returns:
Simplenote API token as string
### Response:
def get_token(self):
""" Method to retrieve an auth token.
The cached global token is looked up and returned if it exists. If it
is `None` a new one is requested and returned.
Returns:
Simplenote API token as string
"""
if self.token == None:
self.token = self.authenticate(self.username, self.password)
try:
return str(self.token,'utf-8')
except TypeError:
return self.token |
def id_token_jwt_grant(request, token_uri, assertion):
"""Implements the JWT Profile for OAuth 2.0 Authorization Grants, but
requests an OpenID Connect ID Token instead of an access token.
This is a variant on the standard JWT Profile that is currently unique
to Google. This was added for the benefit of authenticating to services
that require ID Tokens instead of access tokens or JWT bearer tokens.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
token_uri (str): The OAuth 2.0 authorization server's token endpoint
URI.
assertion (str): JWT token signed by a service account. The token's
payload must include a ``target_audience`` claim.
Returns:
Tuple[str, Optional[datetime], Mapping[str, str]]:
The (encoded) Open ID Connect ID Token, expiration, and additional
data returned by the endpoint.
Raises:
google.auth.exceptions.RefreshError: If the token endpoint returned
an error.
"""
body = {
'assertion': assertion,
'grant_type': _JWT_GRANT_TYPE,
}
response_data = _token_endpoint_request(request, token_uri, body)
try:
id_token = response_data['id_token']
except KeyError as caught_exc:
new_exc = exceptions.RefreshError(
'No ID token in response.', response_data)
six.raise_from(new_exc, caught_exc)
payload = jwt.decode(id_token, verify=False)
expiry = datetime.datetime.utcfromtimestamp(payload['exp'])
return id_token, expiry, response_data | Implements the JWT Profile for OAuth 2.0 Authorization Grants, but
requests an OpenID Connect ID Token instead of an access token.
This is a variant on the standard JWT Profile that is currently unique
to Google. This was added for the benefit of authenticating to services
that require ID Tokens instead of access tokens or JWT bearer tokens.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
token_uri (str): The OAuth 2.0 authorization server's token endpoint
URI.
assertion (str): JWT token signed by a service account. The token's
payload must include a ``target_audience`` claim.
Returns:
Tuple[str, Optional[datetime], Mapping[str, str]]:
The (encoded) Open ID Connect ID Token, expiration, and additional
data returned by the endpoint.
Raises:
google.auth.exceptions.RefreshError: If the token endpoint returned
an error. | Below is the the instruction that describes the task:
### Input:
Implements the JWT Profile for OAuth 2.0 Authorization Grants, but
requests an OpenID Connect ID Token instead of an access token.
This is a variant on the standard JWT Profile that is currently unique
to Google. This was added for the benefit of authenticating to services
that require ID Tokens instead of access tokens or JWT bearer tokens.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
token_uri (str): The OAuth 2.0 authorization server's token endpoint
URI.
assertion (str): JWT token signed by a service account. The token's
payload must include a ``target_audience`` claim.
Returns:
Tuple[str, Optional[datetime], Mapping[str, str]]:
The (encoded) Open ID Connect ID Token, expiration, and additional
data returned by the endpoint.
Raises:
google.auth.exceptions.RefreshError: If the token endpoint returned
an error.
### Response:
def id_token_jwt_grant(request, token_uri, assertion):
"""Implements the JWT Profile for OAuth 2.0 Authorization Grants, but
requests an OpenID Connect ID Token instead of an access token.
This is a variant on the standard JWT Profile that is currently unique
to Google. This was added for the benefit of authenticating to services
that require ID Tokens instead of access tokens or JWT bearer tokens.
Args:
request (google.auth.transport.Request): A callable used to make
HTTP requests.
token_uri (str): The OAuth 2.0 authorization server's token endpoint
URI.
assertion (str): JWT token signed by a service account. The token's
payload must include a ``target_audience`` claim.
Returns:
Tuple[str, Optional[datetime], Mapping[str, str]]:
The (encoded) Open ID Connect ID Token, expiration, and additional
data returned by the endpoint.
Raises:
google.auth.exceptions.RefreshError: If the token endpoint returned
an error.
"""
body = {
'assertion': assertion,
'grant_type': _JWT_GRANT_TYPE,
}
response_data = _token_endpoint_request(request, token_uri, body)
try:
id_token = response_data['id_token']
except KeyError as caught_exc:
new_exc = exceptions.RefreshError(
'No ID token in response.', response_data)
six.raise_from(new_exc, caught_exc)
payload = jwt.decode(id_token, verify=False)
expiry = datetime.datetime.utcfromtimestamp(payload['exp'])
return id_token, expiry, response_data |
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Read the data stream and decode the AttributeReference structure into
its parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the vendor identification or
attribute name is missing from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the AttributeReference structure.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the AttributeReference "
"object.".format(
kmip_version.value
)
)
super(AttributeReference, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.VENDOR_IDENTIFICATION, local_buffer):
self._vendor_identification = primitives.TextString(
tag=enums.Tags.VENDOR_IDENTIFICATION
)
self._vendor_identification.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The AttributeReference encoding is missing the vendor "
"identification string."
)
if self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_buffer):
self._attribute_name = primitives.TextString(
tag=enums.Tags.ATTRIBUTE_NAME
)
self._attribute_name.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The AttributeReference encoding is missing the attribute "
"name string."
)
self.is_oversized(local_buffer) | Read the data stream and decode the AttributeReference structure into
its parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the vendor identification or
attribute name is missing from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the AttributeReference structure. | Below is the the instruction that describes the task:
### Input:
Read the data stream and decode the AttributeReference structure into
its parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the vendor identification or
attribute name is missing from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the AttributeReference structure.
### Response:
def read(self, input_buffer, kmip_version=enums.KMIPVersion.KMIP_2_0):
"""
Read the data stream and decode the AttributeReference structure into
its parts.
Args:
input_buffer (stream): A data stream containing encoded object
data, supporting a read method.
kmip_version (enum): A KMIPVersion enumeration defining the KMIP
version with which the object will be decoded. Optional,
defaults to KMIP 2.0.
Raises:
InvalidKmipEncoding: Raised if the vendor identification or
attribute name is missing from the encoding.
VersionNotSupported: Raised when a KMIP version is provided that
does not support the AttributeReference structure.
"""
if kmip_version < enums.KMIPVersion.KMIP_2_0:
raise exceptions.VersionNotSupported(
"KMIP {} does not support the AttributeReference "
"object.".format(
kmip_version.value
)
)
super(AttributeReference, self).read(
input_buffer,
kmip_version=kmip_version
)
local_buffer = BytearrayStream(input_buffer.read(self.length))
if self.is_tag_next(enums.Tags.VENDOR_IDENTIFICATION, local_buffer):
self._vendor_identification = primitives.TextString(
tag=enums.Tags.VENDOR_IDENTIFICATION
)
self._vendor_identification.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The AttributeReference encoding is missing the vendor "
"identification string."
)
if self.is_tag_next(enums.Tags.ATTRIBUTE_NAME, local_buffer):
self._attribute_name = primitives.TextString(
tag=enums.Tags.ATTRIBUTE_NAME
)
self._attribute_name.read(
local_buffer,
kmip_version=kmip_version
)
else:
raise exceptions.InvalidKmipEncoding(
"The AttributeReference encoding is missing the attribute "
"name string."
)
self.is_oversized(local_buffer) |
def fix_report(self, report, errors="drop", prefer="before"):
"""Perform utc assignment on all readings in a report.
The returned report will have all reading timestamps in UTC. This only
works on SignedListReport objects. Note that the report should
typically have previously been added to the UTC assigner using
add_report or no reference points from the report will be used.
Args:
report (SignedListReport): The report that we should fix.
errors (str): The behavior that we should have when we can't
fix a given reading. The only currently support behavior is
drop, which means that the reading will be dropped and not
included in the new report.
prefer (str): Whether to prefer fixing readings by looking for
reference points after the reading or before, all other things
being equal. See the description of ``assign_utc``.
Returns:
SignedListReport: The report with UTC timestamps.
"""
if not isinstance(report, SignedListReport):
raise ArgumentError("Report must be a SignedListReport", report=report)
if errors not in ('drop',):
raise ArgumentError("Unknown errors handler: {}, supported=['drop']".format(errors))
self.ensure_prepared()
fixed_readings = []
dropped_readings = 0
for reading in report.visible_readings:
assignment = self.assign_utc(reading.reading_id, reading.raw_time, prefer=prefer)
if assignment is None:
dropped_readings += 1
continue
fixed_reading = IOTileReading(assignment.rtc_value, reading.stream, reading.value,
reading_time=assignment.utc, reading_id=reading.reading_id)
fixed_readings.append(fixed_reading)
fixed_report = SignedListReport.FromReadings(report.origin, fixed_readings, report_id=report.report_id,
selector=report.streamer_selector, streamer=report.origin_streamer,
sent_timestamp=report.sent_timestamp)
fixed_report.received_time = report.received_time
if dropped_readings > 0:
self._logger.warning("Dropped %d readings of %d when fixing UTC timestamps in report 0x%08X for device 0x%08X",
dropped_readings, len(report.visible_readings), report.report_id, report.origin)
return fixed_report | Perform utc assignment on all readings in a report.
The returned report will have all reading timestamps in UTC. This only
works on SignedListReport objects. Note that the report should
typically have previously been added to the UTC assigner using
add_report or no reference points from the report will be used.
Args:
report (SignedListReport): The report that we should fix.
errors (str): The behavior that we should have when we can't
fix a given reading. The only currently support behavior is
drop, which means that the reading will be dropped and not
included in the new report.
prefer (str): Whether to prefer fixing readings by looking for
reference points after the reading or before, all other things
being equal. See the description of ``assign_utc``.
Returns:
SignedListReport: The report with UTC timestamps. | Below is the the instruction that describes the task:
### Input:
Perform utc assignment on all readings in a report.
The returned report will have all reading timestamps in UTC. This only
works on SignedListReport objects. Note that the report should
typically have previously been added to the UTC assigner using
add_report or no reference points from the report will be used.
Args:
report (SignedListReport): The report that we should fix.
errors (str): The behavior that we should have when we can't
fix a given reading. The only currently support behavior is
drop, which means that the reading will be dropped and not
included in the new report.
prefer (str): Whether to prefer fixing readings by looking for
reference points after the reading or before, all other things
being equal. See the description of ``assign_utc``.
Returns:
SignedListReport: The report with UTC timestamps.
### Response:
def fix_report(self, report, errors="drop", prefer="before"):
"""Perform utc assignment on all readings in a report.
The returned report will have all reading timestamps in UTC. This only
works on SignedListReport objects. Note that the report should
typically have previously been added to the UTC assigner using
add_report or no reference points from the report will be used.
Args:
report (SignedListReport): The report that we should fix.
errors (str): The behavior that we should have when we can't
fix a given reading. The only currently support behavior is
drop, which means that the reading will be dropped and not
included in the new report.
prefer (str): Whether to prefer fixing readings by looking for
reference points after the reading or before, all other things
being equal. See the description of ``assign_utc``.
Returns:
SignedListReport: The report with UTC timestamps.
"""
if not isinstance(report, SignedListReport):
raise ArgumentError("Report must be a SignedListReport", report=report)
if errors not in ('drop',):
raise ArgumentError("Unknown errors handler: {}, supported=['drop']".format(errors))
self.ensure_prepared()
fixed_readings = []
dropped_readings = 0
for reading in report.visible_readings:
assignment = self.assign_utc(reading.reading_id, reading.raw_time, prefer=prefer)
if assignment is None:
dropped_readings += 1
continue
fixed_reading = IOTileReading(assignment.rtc_value, reading.stream, reading.value,
reading_time=assignment.utc, reading_id=reading.reading_id)
fixed_readings.append(fixed_reading)
fixed_report = SignedListReport.FromReadings(report.origin, fixed_readings, report_id=report.report_id,
selector=report.streamer_selector, streamer=report.origin_streamer,
sent_timestamp=report.sent_timestamp)
fixed_report.received_time = report.received_time
if dropped_readings > 0:
self._logger.warning("Dropped %d readings of %d when fixing UTC timestamps in report 0x%08X for device 0x%08X",
dropped_readings, len(report.visible_readings), report.report_id, report.origin)
return fixed_report |
def _get(self, name, interval, config, timestamp, **kws):
'''
Get the interval.
'''
i_bucket = config['i_calc'].to_bucket(timestamp)
fetch = kws.get('fetch')
process_row = kws.get('process_row') or self._process_row
rval = OrderedDict()
if fetch:
data = fetch( self._client.connect(), self._table, name, interval, i_bucket )
else:
data = self._type_get(name, interval, i_bucket)
if config['coarse']:
if data:
rval[ config['i_calc'].from_bucket(i_bucket) ] = process_row(data.values()[0][None])
else:
rval[ config['i_calc'].from_bucket(i_bucket) ] = self._type_no_value()
else:
for r_bucket,row_data in data.values()[0].items():
rval[ config['r_calc'].from_bucket(r_bucket) ] = process_row(row_data)
return rval | Get the interval. | Below is the the instruction that describes the task:
### Input:
Get the interval.
### Response:
def _get(self, name, interval, config, timestamp, **kws):
'''
Get the interval.
'''
i_bucket = config['i_calc'].to_bucket(timestamp)
fetch = kws.get('fetch')
process_row = kws.get('process_row') or self._process_row
rval = OrderedDict()
if fetch:
data = fetch( self._client.connect(), self._table, name, interval, i_bucket )
else:
data = self._type_get(name, interval, i_bucket)
if config['coarse']:
if data:
rval[ config['i_calc'].from_bucket(i_bucket) ] = process_row(data.values()[0][None])
else:
rval[ config['i_calc'].from_bucket(i_bucket) ] = self._type_no_value()
else:
for r_bucket,row_data in data.values()[0].items():
rval[ config['r_calc'].from_bucket(r_bucket) ] = process_row(row_data)
return rval |
def parse_session_token(response_headers):
""" Extracts session token from response headers and parses
:param dict response_headers:
:return:
A dictionary of partition id to session lsn
for given collection
:rtype: dict
"""
# extract session token from response header
session_token = ''
if http_constants.HttpHeaders.SessionToken in response_headers:
session_token = response_headers[http_constants.HttpHeaders.SessionToken]
id_to_sessionlsn = {}
if session_token is not '':
''' extract id, lsn from the token. For p-collection,
the token will be a concatenation of pairs for each collection'''
token_pairs = session_token.split(',')
for token_pair in token_pairs:
tokens = token_pair.split(':')
if (len(tokens) == 2):
id = tokens[0]
sessionToken = VectorSessionToken.create(tokens[1])
if sessionToken is None:
raise HTTPFailure(http_constants.StatusCodes.INTERNAL_SERVER_ERROR, "Could not parse the received session token: %s" % tokens[1])
id_to_sessionlsn[id] = sessionToken
return id_to_sessionlsn | Extracts session token from response headers and parses
:param dict response_headers:
:return:
A dictionary of partition id to session lsn
for given collection
:rtype: dict | Below is the the instruction that describes the task:
### Input:
Extracts session token from response headers and parses
:param dict response_headers:
:return:
A dictionary of partition id to session lsn
for given collection
:rtype: dict
### Response:
def parse_session_token(response_headers):
""" Extracts session token from response headers and parses
:param dict response_headers:
:return:
A dictionary of partition id to session lsn
for given collection
:rtype: dict
"""
# extract session token from response header
session_token = ''
if http_constants.HttpHeaders.SessionToken in response_headers:
session_token = response_headers[http_constants.HttpHeaders.SessionToken]
id_to_sessionlsn = {}
if session_token is not '':
''' extract id, lsn from the token. For p-collection,
the token will be a concatenation of pairs for each collection'''
token_pairs = session_token.split(',')
for token_pair in token_pairs:
tokens = token_pair.split(':')
if (len(tokens) == 2):
id = tokens[0]
sessionToken = VectorSessionToken.create(tokens[1])
if sessionToken is None:
raise HTTPFailure(http_constants.StatusCodes.INTERNAL_SERVER_ERROR, "Could not parse the received session token: %s" % tokens[1])
id_to_sessionlsn[id] = sessionToken
return id_to_sessionlsn |
def is_ordered_mapping(obj):
"""Checks whether given object is an ordered mapping,
e.g. a :class:`OrderedDict`.
:return: ``True`` if argument is an ordered mapping, ``False`` otherwise
"""
if not (is_mapping(obj) and hasattr(obj, '__reversed__')):
return False
# PyPy has a bug where the standard :class:`dict` has the ``__reversed__``
# method but it's unusable and throws an exception when called
try:
obj.__reversed__()
except TypeError:
return False
else:
return True | Checks whether given object is an ordered mapping,
e.g. a :class:`OrderedDict`.
:return: ``True`` if argument is an ordered mapping, ``False`` otherwise | Below is the the instruction that describes the task:
### Input:
Checks whether given object is an ordered mapping,
e.g. a :class:`OrderedDict`.
:return: ``True`` if argument is an ordered mapping, ``False`` otherwise
### Response:
def is_ordered_mapping(obj):
"""Checks whether given object is an ordered mapping,
e.g. a :class:`OrderedDict`.
:return: ``True`` if argument is an ordered mapping, ``False`` otherwise
"""
if not (is_mapping(obj) and hasattr(obj, '__reversed__')):
return False
# PyPy has a bug where the standard :class:`dict` has the ``__reversed__``
# method but it's unusable and throws an exception when called
try:
obj.__reversed__()
except TypeError:
return False
else:
return True |
def records(self, name):
"""
Get a list of all domain records for the given domain name
Parameters
----------
name: str
domain name
"""
if self.get(name):
return DomainRecords(self.api, name) | Get a list of all domain records for the given domain name
Parameters
----------
name: str
domain name | Below is the the instruction that describes the task:
### Input:
Get a list of all domain records for the given domain name
Parameters
----------
name: str
domain name
### Response:
def records(self, name):
"""
Get a list of all domain records for the given domain name
Parameters
----------
name: str
domain name
"""
if self.get(name):
return DomainRecords(self.api, name) |
def add_mavlink_packet(self, msg):
'''add data to the graph'''
mtype = msg.get_type()
if mtype not in self.msg_types:
return
for i in range(len(self.fields)):
if mtype not in self.field_types[i]:
continue
f = self.fields[i]
self.values[i] = mavutil.evaluate_expression(f, self.state.master.messages)
if self.livegraph is not None:
self.livegraph.add_values(self.values) | add data to the graph | Below is the the instruction that describes the task:
### Input:
add data to the graph
### Response:
def add_mavlink_packet(self, msg):
'''add data to the graph'''
mtype = msg.get_type()
if mtype not in self.msg_types:
return
for i in range(len(self.fields)):
if mtype not in self.field_types[i]:
continue
f = self.fields[i]
self.values[i] = mavutil.evaluate_expression(f, self.state.master.messages)
if self.livegraph is not None:
self.livegraph.add_values(self.values) |
def setup():
"""Walk the user though the Wallace setup."""
# Create the Wallace config file if it does not already exist.
config_name = ".wallaceconfig"
config_path = os.path.join(os.path.expanduser("~"), config_name)
if os.path.isfile(config_path):
log("Wallace config file already exists.", chevrons=False)
else:
log("Creating Wallace config file at ~/.wallaceconfig...",
chevrons=False)
wallace_module_path = os.path.dirname(os.path.realpath(__file__))
src = os.path.join(wallace_module_path, "config", config_name)
shutil.copyfile(src, config_path) | Walk the user though the Wallace setup. | Below is the the instruction that describes the task:
### Input:
Walk the user though the Wallace setup.
### Response:
def setup():
"""Walk the user though the Wallace setup."""
# Create the Wallace config file if it does not already exist.
config_name = ".wallaceconfig"
config_path = os.path.join(os.path.expanduser("~"), config_name)
if os.path.isfile(config_path):
log("Wallace config file already exists.", chevrons=False)
else:
log("Creating Wallace config file at ~/.wallaceconfig...",
chevrons=False)
wallace_module_path = os.path.dirname(os.path.realpath(__file__))
src = os.path.join(wallace_module_path, "config", config_name)
shutil.copyfile(src, config_path) |
def path_exists(self, dest, weight=None):
"""Return whether there is a path leading from me to ``dest``.
With ``weight``, only consider edges that have a stat by the
given name.
Raise ``ValueError`` if ``dest`` is not a node in my character
or the name of one.
"""
try:
return bool(self.shortest_path_length(dest, weight))
except KeyError:
return False | Return whether there is a path leading from me to ``dest``.
With ``weight``, only consider edges that have a stat by the
given name.
Raise ``ValueError`` if ``dest`` is not a node in my character
or the name of one. | Below is the the instruction that describes the task:
### Input:
Return whether there is a path leading from me to ``dest``.
With ``weight``, only consider edges that have a stat by the
given name.
Raise ``ValueError`` if ``dest`` is not a node in my character
or the name of one.
### Response:
def path_exists(self, dest, weight=None):
"""Return whether there is a path leading from me to ``dest``.
With ``weight``, only consider edges that have a stat by the
given name.
Raise ``ValueError`` if ``dest`` is not a node in my character
or the name of one.
"""
try:
return bool(self.shortest_path_length(dest, weight))
except KeyError:
return False |
def register_animation(self, animation_class):
"""Add a new animation"""
self.state.animationClasses.append(animation_class)
return len(self.state.animationClasses) - 1 | Add a new animation | Below is the the instruction that describes the task:
### Input:
Add a new animation
### Response:
def register_animation(self, animation_class):
"""Add a new animation"""
self.state.animationClasses.append(animation_class)
return len(self.state.animationClasses) - 1 |
def git_clean(ctx):
"""
Delete all files untracked by git.
:param ctx: Context object.
:return: None.
"""
# Get command parts
cmd_part_s = [
# Program path
'git',
# Clean untracked files
'clean',
# Remove all untracked files
'-x',
# Remove untracked directories too
'-d',
# Force to remove
'-f',
# Give two `-f` flags to remove sub-repositories too
'-f',
]
# Print title
print_title('git_clean')
# Print the command in multi-line format
print_text(_format_multi_line_command(cmd_part_s))
# Create subprocess to run the command in top directory
proc = subprocess.Popen(cmd_part_s, cwd=ctx.top_dir)
# Wait the subprocess to finish
proc.wait()
# Print end title
print_title('git_clean', is_end=True) | Delete all files untracked by git.
:param ctx: Context object.
:return: None. | Below is the the instruction that describes the task:
### Input:
Delete all files untracked by git.
:param ctx: Context object.
:return: None.
### Response:
def git_clean(ctx):
"""
Delete all files untracked by git.
:param ctx: Context object.
:return: None.
"""
# Get command parts
cmd_part_s = [
# Program path
'git',
# Clean untracked files
'clean',
# Remove all untracked files
'-x',
# Remove untracked directories too
'-d',
# Force to remove
'-f',
# Give two `-f` flags to remove sub-repositories too
'-f',
]
# Print title
print_title('git_clean')
# Print the command in multi-line format
print_text(_format_multi_line_command(cmd_part_s))
# Create subprocess to run the command in top directory
proc = subprocess.Popen(cmd_part_s, cwd=ctx.top_dir)
# Wait the subprocess to finish
proc.wait()
# Print end title
print_title('git_clean', is_end=True) |
def p_pragma(self, p):
'pragma : LPAREN TIMES ID TIMES RPAREN'
p[0] = Pragma(PragmaEntry(p[3], lineno=p.lineno(1)),
lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) | pragma : LPAREN TIMES ID TIMES RPAREN | Below is the the instruction that describes the task:
### Input:
pragma : LPAREN TIMES ID TIMES RPAREN
### Response:
def p_pragma(self, p):
'pragma : LPAREN TIMES ID TIMES RPAREN'
p[0] = Pragma(PragmaEntry(p[3], lineno=p.lineno(1)),
lineno=p.lineno(1))
p.set_lineno(0, p.lineno(1)) |
def form_valid(self, form):
"""
Praise be, someone has spammed us.
"""
form.send_email(to=self.to_addr)
return super(EmailView, self).form_valid(form) | Praise be, someone has spammed us. | Below is the the instruction that describes the task:
### Input:
Praise be, someone has spammed us.
### Response:
def form_valid(self, form):
"""
Praise be, someone has spammed us.
"""
form.send_email(to=self.to_addr)
return super(EmailView, self).form_valid(form) |
def _connect_command(self):
'''
Generates a JSON string with the params to be used
when sending CONNECT to the server.
->> CONNECT {"lang": "python3"}
'''
options = {
"verbose": self.options["verbose"],
"pedantic": self.options["pedantic"],
"lang": __lang__,
"version": __version__,
"protocol": PROTOCOL
}
if "auth_required" in self._server_info:
if self._server_info["auth_required"]:
# In case there is no password, then consider handle
# sending a token instead.
if self.options["user"] is not None and self.options["password"] is not None:
options["user"] = self.options["user"]
options["pass"] = self.options["password"]
elif self.options["token"] is not None:
options["auth_token"] = self.options["token"]
elif self._current_server.uri.password is None:
options["auth_token"] = self._current_server.uri.username
else:
options["user"] = self._current_server.uri.username
options["pass"] = self._current_server.uri.password
if self.options["name"] is not None:
options["name"] = self.options["name"]
if self.options["no_echo"] is not None:
options["echo"] = not self.options["no_echo"]
connect_opts = json.dumps(options, sort_keys=True)
return b''.join([CONNECT_OP + _SPC_ + connect_opts.encode() + _CRLF_]) | Generates a JSON string with the params to be used
when sending CONNECT to the server.
->> CONNECT {"lang": "python3"} | Below is the the instruction that describes the task:
### Input:
Generates a JSON string with the params to be used
when sending CONNECT to the server.
->> CONNECT {"lang": "python3"}
### Response:
def _connect_command(self):
'''
Generates a JSON string with the params to be used
when sending CONNECT to the server.
->> CONNECT {"lang": "python3"}
'''
options = {
"verbose": self.options["verbose"],
"pedantic": self.options["pedantic"],
"lang": __lang__,
"version": __version__,
"protocol": PROTOCOL
}
if "auth_required" in self._server_info:
if self._server_info["auth_required"]:
# In case there is no password, then consider handle
# sending a token instead.
if self.options["user"] is not None and self.options["password"] is not None:
options["user"] = self.options["user"]
options["pass"] = self.options["password"]
elif self.options["token"] is not None:
options["auth_token"] = self.options["token"]
elif self._current_server.uri.password is None:
options["auth_token"] = self._current_server.uri.username
else:
options["user"] = self._current_server.uri.username
options["pass"] = self._current_server.uri.password
if self.options["name"] is not None:
options["name"] = self.options["name"]
if self.options["no_echo"] is not None:
options["echo"] = not self.options["no_echo"]
connect_opts = json.dumps(options, sort_keys=True)
return b''.join([CONNECT_OP + _SPC_ + connect_opts.encode() + _CRLF_]) |
def group_dict(self, group: str) -> Dict[str, Any]:
"""The names and values of options in a group.
Useful for copying options into Application settings::
from tornado.options import define, parse_command_line, options
define('template_path', group='application')
define('static_path', group='application')
parse_command_line()
application = Application(
handlers, **options.group_dict('application'))
.. versionadded:: 3.1
"""
return dict(
(opt.name, opt.value())
for name, opt in self._options.items()
if not group or group == opt.group_name
) | The names and values of options in a group.
Useful for copying options into Application settings::
from tornado.options import define, parse_command_line, options
define('template_path', group='application')
define('static_path', group='application')
parse_command_line()
application = Application(
handlers, **options.group_dict('application'))
.. versionadded:: 3.1 | Below is the the instruction that describes the task:
### Input:
The names and values of options in a group.
Useful for copying options into Application settings::
from tornado.options import define, parse_command_line, options
define('template_path', group='application')
define('static_path', group='application')
parse_command_line()
application = Application(
handlers, **options.group_dict('application'))
.. versionadded:: 3.1
### Response:
def group_dict(self, group: str) -> Dict[str, Any]:
"""The names and values of options in a group.
Useful for copying options into Application settings::
from tornado.options import define, parse_command_line, options
define('template_path', group='application')
define('static_path', group='application')
parse_command_line()
application = Application(
handlers, **options.group_dict('application'))
.. versionadded:: 3.1
"""
return dict(
(opt.name, opt.value())
for name, opt in self._options.items()
if not group or group == opt.group_name
) |
def end(self):
"""End the roaster control process via thread signal.
This simply sends an exit signal to the thread, and shuts it down. In
order to stop monitoring, call the `set_monitor` method with false.
:returns: None
"""
self._process.shutdown()
self._roasting = False
self._roast['date'] = now_date(str=True) | End the roaster control process via thread signal.
This simply sends an exit signal to the thread, and shuts it down. In
order to stop monitoring, call the `set_monitor` method with false.
:returns: None | Below is the the instruction that describes the task:
### Input:
End the roaster control process via thread signal.
This simply sends an exit signal to the thread, and shuts it down. In
order to stop monitoring, call the `set_monitor` method with false.
:returns: None
### Response:
def end(self):
"""End the roaster control process via thread signal.
This simply sends an exit signal to the thread, and shuts it down. In
order to stop monitoring, call the `set_monitor` method with false.
:returns: None
"""
self._process.shutdown()
self._roasting = False
self._roast['date'] = now_date(str=True) |
def add_job(self, job_name, job_id=None):
""" Create a new, empty Job. """
logger.debug('Creating a new job named {0}'.format(job_name))
if not self._name_is_available(job_name):
raise DagobahError('name %s is not available' % job_name)
if not job_id:
job_id = self.backend.get_new_job_id()
self.created_jobs += 1
self.jobs.append(Job(self,
self.backend,
job_id,
job_name))
job = self.get_job(job_name)
job.commit() | Create a new, empty Job. | Below is the the instruction that describes the task:
### Input:
Create a new, empty Job.
### Response:
def add_job(self, job_name, job_id=None):
""" Create a new, empty Job. """
logger.debug('Creating a new job named {0}'.format(job_name))
if not self._name_is_available(job_name):
raise DagobahError('name %s is not available' % job_name)
if not job_id:
job_id = self.backend.get_new_job_id()
self.created_jobs += 1
self.jobs.append(Job(self,
self.backend,
job_id,
job_name))
job = self.get_job(job_name)
job.commit() |
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj | Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document) | Below is the the instruction that describes the task:
### Input:
Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
### Response:
def decode(self, s, _w=WHITESPACE.match):
"""Return the Python representation of ``s`` (a ``str`` or ``unicode``
instance containing a JSON document)
"""
obj, end = self.raw_decode(s, idx=_w(s, 0).end())
end = _w(s, end).end()
if end != len(s):
raise JSONDecodeError("Extra data", s, end, len(s))
return obj |
def dokent(data, NN):
"""
gets Kent parameters for data
Parameters
___________________
data : nested pairs of [Dec,Inc]
NN : normalization
NN is the number of data for Kent ellipse
NN is 1 for Kent ellipses of bootstrapped mean directions
Return
kpars dictionary keys
dec : mean declination
inc : mean inclination
n : number of datapoints
Eta : major ellipse
Edec : declination of major ellipse axis
Einc : inclination of major ellipse axis
Zeta : minor ellipse
Zdec : declination of minor ellipse axis
Zinc : inclination of minor ellipse axis
"""
X, kpars = [], {}
N = len(data)
if N < 2:
return kpars
#
# get fisher mean and convert to co-inclination (theta)/dec (phi) in radians
#
fpars = fisher_mean(data)
pbar = fpars["dec"] * np.pi / 180.
tbar = (90. - fpars["inc"]) * np.pi / 180.
#
# initialize matrices
#
H = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
w = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
b = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
gam = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
xg = []
#
# set up rotation matrix H
#
H = [[np.cos(tbar) * np.cos(pbar), -np.sin(pbar), np.sin(tbar) * np.cos(pbar)], [np.cos(tbar)
* np.sin(pbar), np.cos(pbar), np.sin(pbar) * np.sin(tbar)], [-np.sin(tbar), 0., np.cos(tbar)]]
#
# get cartesian coordinates of data
#
for rec in data:
X.append(dir2cart([rec[0], rec[1], 1.]))
#
# put in T matrix
#
T = Tmatrix(X)
for i in range(3):
for j in range(3):
T[i][j] = old_div(T[i][j], float(NN))
#
# compute B=H'TH
#
for i in range(3):
for j in range(3):
for k in range(3):
w[i][j] += T[i][k] * H[k][j]
for i in range(3):
for j in range(3):
for k in range(3):
b[i][j] += H[k][i] * w[k][j]
#
# choose a rotation w about North pole to diagonalize upper part of B
#
psi = 0.5 * np.arctan(2. * b[0][1] / (b[0][0] - b[1][1]))
w = [[np.cos(psi), -np.sin(psi), 0],
[np.sin(psi), np.cos(psi), 0], [0., 0., 1.]]
for i in range(3):
for j in range(3):
gamtmp = 0.
for k in range(3):
gamtmp += H[i][k] * w[k][j]
gam[i][j] = gamtmp
for i in range(N):
xg.append([0., 0., 0.])
for k in range(3):
xgtmp = 0.
for j in range(3):
xgtmp += gam[j][k] * X[i][j]
xg[i][k] = xgtmp
# compute asymptotic ellipse parameters
#
xmu, sigma1, sigma2 = 0., 0., 0.
for i in range(N):
xmu += xg[i][2]
sigma1 = sigma1 + xg[i][0]**2
sigma2 = sigma2 + xg[i][1]**2
xmu = old_div(xmu, float(N))
sigma1 = old_div(sigma1, float(N))
sigma2 = old_div(sigma2, float(N))
g = -2.0 * np.log(0.05) / (float(NN) * xmu**2)
if np.sqrt(sigma1 * g) < 1:
eta = np.arcsin(np.sqrt(sigma1 * g))
if np.sqrt(sigma2 * g) < 1:
zeta = np.arcsin(np.sqrt(sigma2 * g))
if np.sqrt(sigma1 * g) >= 1.:
eta = old_div(np.pi, 2.)
if np.sqrt(sigma2 * g) >= 1.:
zeta = old_div(np.pi, 2.)
#
# convert Kent parameters to directions,angles
#
kpars["dec"] = fpars["dec"]
kpars["inc"] = fpars["inc"]
kpars["n"] = NN
ZDir = cart2dir([gam[0][1], gam[1][1], gam[2][1]])
EDir = cart2dir([gam[0][0], gam[1][0], gam[2][0]])
kpars["Zdec"] = ZDir[0]
kpars["Zinc"] = ZDir[1]
kpars["Edec"] = EDir[0]
kpars["Einc"] = EDir[1]
if kpars["Zinc"] < 0:
kpars["Zinc"] = -kpars["Zinc"]
kpars["Zdec"] = (kpars["Zdec"] + 180.) % 360.
if kpars["Einc"] < 0:
kpars["Einc"] = -kpars["Einc"]
kpars["Edec"] = (kpars["Edec"] + 180.) % 360.
kpars["Zeta"] = zeta * 180. / np.pi
kpars["Eta"] = eta * 180. / np.pi
return kpars | gets Kent parameters for data
Parameters
___________________
data : nested pairs of [Dec,Inc]
NN : normalization
NN is the number of data for Kent ellipse
NN is 1 for Kent ellipses of bootstrapped mean directions
Return
kpars dictionary keys
dec : mean declination
inc : mean inclination
n : number of datapoints
Eta : major ellipse
Edec : declination of major ellipse axis
Einc : inclination of major ellipse axis
Zeta : minor ellipse
Zdec : declination of minor ellipse axis
Zinc : inclination of minor ellipse axis | Below is the the instruction that describes the task:
### Input:
gets Kent parameters for data
Parameters
___________________
data : nested pairs of [Dec,Inc]
NN : normalization
NN is the number of data for Kent ellipse
NN is 1 for Kent ellipses of bootstrapped mean directions
Return
kpars dictionary keys
dec : mean declination
inc : mean inclination
n : number of datapoints
Eta : major ellipse
Edec : declination of major ellipse axis
Einc : inclination of major ellipse axis
Zeta : minor ellipse
Zdec : declination of minor ellipse axis
Zinc : inclination of minor ellipse axis
### Response:
def dokent(data, NN):
"""
gets Kent parameters for data
Parameters
___________________
data : nested pairs of [Dec,Inc]
NN : normalization
NN is the number of data for Kent ellipse
NN is 1 for Kent ellipses of bootstrapped mean directions
Return
kpars dictionary keys
dec : mean declination
inc : mean inclination
n : number of datapoints
Eta : major ellipse
Edec : declination of major ellipse axis
Einc : inclination of major ellipse axis
Zeta : minor ellipse
Zdec : declination of minor ellipse axis
Zinc : inclination of minor ellipse axis
"""
X, kpars = [], {}
N = len(data)
if N < 2:
return kpars
#
# get fisher mean and convert to co-inclination (theta)/dec (phi) in radians
#
fpars = fisher_mean(data)
pbar = fpars["dec"] * np.pi / 180.
tbar = (90. - fpars["inc"]) * np.pi / 180.
#
# initialize matrices
#
H = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
w = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
b = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
gam = [[0., 0., 0.], [0., 0., 0.], [0., 0., 0.]]
xg = []
#
# set up rotation matrix H
#
H = [[np.cos(tbar) * np.cos(pbar), -np.sin(pbar), np.sin(tbar) * np.cos(pbar)], [np.cos(tbar)
* np.sin(pbar), np.cos(pbar), np.sin(pbar) * np.sin(tbar)], [-np.sin(tbar), 0., np.cos(tbar)]]
#
# get cartesian coordinates of data
#
for rec in data:
X.append(dir2cart([rec[0], rec[1], 1.]))
#
# put in T matrix
#
T = Tmatrix(X)
for i in range(3):
for j in range(3):
T[i][j] = old_div(T[i][j], float(NN))
#
# compute B=H'TH
#
for i in range(3):
for j in range(3):
for k in range(3):
w[i][j] += T[i][k] * H[k][j]
for i in range(3):
for j in range(3):
for k in range(3):
b[i][j] += H[k][i] * w[k][j]
#
# choose a rotation w about North pole to diagonalize upper part of B
#
psi = 0.5 * np.arctan(2. * b[0][1] / (b[0][0] - b[1][1]))
w = [[np.cos(psi), -np.sin(psi), 0],
[np.sin(psi), np.cos(psi), 0], [0., 0., 1.]]
for i in range(3):
for j in range(3):
gamtmp = 0.
for k in range(3):
gamtmp += H[i][k] * w[k][j]
gam[i][j] = gamtmp
for i in range(N):
xg.append([0., 0., 0.])
for k in range(3):
xgtmp = 0.
for j in range(3):
xgtmp += gam[j][k] * X[i][j]
xg[i][k] = xgtmp
# compute asymptotic ellipse parameters
#
xmu, sigma1, sigma2 = 0., 0., 0.
for i in range(N):
xmu += xg[i][2]
sigma1 = sigma1 + xg[i][0]**2
sigma2 = sigma2 + xg[i][1]**2
xmu = old_div(xmu, float(N))
sigma1 = old_div(sigma1, float(N))
sigma2 = old_div(sigma2, float(N))
g = -2.0 * np.log(0.05) / (float(NN) * xmu**2)
if np.sqrt(sigma1 * g) < 1:
eta = np.arcsin(np.sqrt(sigma1 * g))
if np.sqrt(sigma2 * g) < 1:
zeta = np.arcsin(np.sqrt(sigma2 * g))
if np.sqrt(sigma1 * g) >= 1.:
eta = old_div(np.pi, 2.)
if np.sqrt(sigma2 * g) >= 1.:
zeta = old_div(np.pi, 2.)
#
# convert Kent parameters to directions,angles
#
kpars["dec"] = fpars["dec"]
kpars["inc"] = fpars["inc"]
kpars["n"] = NN
ZDir = cart2dir([gam[0][1], gam[1][1], gam[2][1]])
EDir = cart2dir([gam[0][0], gam[1][0], gam[2][0]])
kpars["Zdec"] = ZDir[0]
kpars["Zinc"] = ZDir[1]
kpars["Edec"] = EDir[0]
kpars["Einc"] = EDir[1]
if kpars["Zinc"] < 0:
kpars["Zinc"] = -kpars["Zinc"]
kpars["Zdec"] = (kpars["Zdec"] + 180.) % 360.
if kpars["Einc"] < 0:
kpars["Einc"] = -kpars["Einc"]
kpars["Edec"] = (kpars["Edec"] + 180.) % 360.
kpars["Zeta"] = zeta * 180. / np.pi
kpars["Eta"] = eta * 180. / np.pi
return kpars |
def is_available(self, rtdc_ds, verbose=False):
"""Check whether the feature is available
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to check availability for
Returns
-------
available: bool
`True`, if feature can be computed with `compute`
Notes
-----
This method returns `False` for a feature if there
is a feature defined with the same name but with
higher priority (even if the feature would be
available otherwise).
"""
# Check config keys
for item in self.req_config:
section, keys = item
if section not in rtdc_ds.config:
if verbose:
print("{} not in config".format(section))
return False
else:
for key in keys:
if key not in rtdc_ds.config[section]:
if verbose:
print("{} not in config['{}']".format(key,
section))
return False
# Check features
for col in self.req_features:
if col not in rtdc_ds:
return False
# Check priorities of other features
for of in AncillaryFeature.features:
if of == self:
# nothing to compare
continue
elif of.feature_name == self.feature_name:
# same feature name
if of.priority <= self.priority:
# lower priority, ignore
continue
else:
# higher priority
if of.is_available(rtdc_ds):
# higher priority is available, thus
# this feature is not available
return False
else:
# higher priority not available
continue
else:
# other feature
continue
return True | Check whether the feature is available
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to check availability for
Returns
-------
available: bool
`True`, if feature can be computed with `compute`
Notes
-----
This method returns `False` for a feature if there
is a feature defined with the same name but with
higher priority (even if the feature would be
available otherwise). | Below is the the instruction that describes the task:
### Input:
Check whether the feature is available
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to check availability for
Returns
-------
available: bool
`True`, if feature can be computed with `compute`
Notes
-----
This method returns `False` for a feature if there
is a feature defined with the same name but with
higher priority (even if the feature would be
available otherwise).
### Response:
def is_available(self, rtdc_ds, verbose=False):
"""Check whether the feature is available
Parameters
----------
rtdc_ds: instance of RTDCBase
The dataset to check availability for
Returns
-------
available: bool
`True`, if feature can be computed with `compute`
Notes
-----
This method returns `False` for a feature if there
is a feature defined with the same name but with
higher priority (even if the feature would be
available otherwise).
"""
# Check config keys
for item in self.req_config:
section, keys = item
if section not in rtdc_ds.config:
if verbose:
print("{} not in config".format(section))
return False
else:
for key in keys:
if key not in rtdc_ds.config[section]:
if verbose:
print("{} not in config['{}']".format(key,
section))
return False
# Check features
for col in self.req_features:
if col not in rtdc_ds:
return False
# Check priorities of other features
for of in AncillaryFeature.features:
if of == self:
# nothing to compare
continue
elif of.feature_name == self.feature_name:
# same feature name
if of.priority <= self.priority:
# lower priority, ignore
continue
else:
# higher priority
if of.is_available(rtdc_ds):
# higher priority is available, thus
# this feature is not available
return False
else:
# higher priority not available
continue
else:
# other feature
continue
return True |
def html(tag):
"""Return sequence of start and end regex patterns for simple HTML tag"""
return (HTML_START.format(tag=tag), HTML_END.format(tag=tag)) | Return sequence of start and end regex patterns for simple HTML tag | Below is the the instruction that describes the task:
### Input:
Return sequence of start and end regex patterns for simple HTML tag
### Response:
def html(tag):
"""Return sequence of start and end regex patterns for simple HTML tag"""
return (HTML_START.format(tag=tag), HTML_END.format(tag=tag)) |
def create_CAG_with_indicators(input, output, filename="CAG_with_indicators.pdf"):
""" Create a CAG with mapped indicators """
with open(input, "rb") as f:
G = pickle.load(f)
G.map_concepts_to_indicators(min_temporal_res="month")
G.set_indicator("UN/events/weather/precipitation", "Historical Average Total Daily Rainfall (Maize)", "DSSAT")
G.set_indicator("UN/events/human/agriculture/food_production",
"Historical Production (Maize)", "DSSAT")
G.set_indicator("UN/entities/human/food/food_security", "IPC Phase Classification", "FEWSNET")
G.set_indicator("UN/entities/food_availability", "Production, Meat indigenous, total", "FAO")
G.set_indicator("UN/entities/human/financial/economic/market", "Inflation Rate", "ieconomics.com")
G.set_indicator("UN/events/human/death", "Battle-related deaths", "WDI")
with open(output, "wb") as f:
pickle.dump(G, f) | Create a CAG with mapped indicators | Below is the the instruction that describes the task:
### Input:
Create a CAG with mapped indicators
### Response:
def create_CAG_with_indicators(input, output, filename="CAG_with_indicators.pdf"):
""" Create a CAG with mapped indicators """
with open(input, "rb") as f:
G = pickle.load(f)
G.map_concepts_to_indicators(min_temporal_res="month")
G.set_indicator("UN/events/weather/precipitation", "Historical Average Total Daily Rainfall (Maize)", "DSSAT")
G.set_indicator("UN/events/human/agriculture/food_production",
"Historical Production (Maize)", "DSSAT")
G.set_indicator("UN/entities/human/food/food_security", "IPC Phase Classification", "FEWSNET")
G.set_indicator("UN/entities/food_availability", "Production, Meat indigenous, total", "FAO")
G.set_indicator("UN/entities/human/financial/economic/market", "Inflation Rate", "ieconomics.com")
G.set_indicator("UN/events/human/death", "Battle-related deaths", "WDI")
with open(output, "wb") as f:
pickle.dump(G, f) |
def dec2dms(x):
"""
Convert decimal degrees into a sexagessimal string in degrees.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format [+-]DD:MM:SS.SS
or XX:XX:XX.XX if x is not finite.
"""
if not np.isfinite(x):
return 'XX:XX:XX.XX'
if x < 0:
sign = '-'
else:
sign = '+'
x = abs(x)
d = int(math.floor(x))
m = int(math.floor((x - d) * 60))
s = float(( (x - d) * 60 - m) * 60)
return '{0}{1:02d}:{2:02d}:{3:05.2f}'.format(sign, d, m, s) | Convert decimal degrees into a sexagessimal string in degrees.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format [+-]DD:MM:SS.SS
or XX:XX:XX.XX if x is not finite. | Below is the the instruction that describes the task:
### Input:
Convert decimal degrees into a sexagessimal string in degrees.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format [+-]DD:MM:SS.SS
or XX:XX:XX.XX if x is not finite.
### Response:
def dec2dms(x):
"""
Convert decimal degrees into a sexagessimal string in degrees.
Parameters
----------
x : float
Angle in degrees
Returns
-------
dms : string
String of format [+-]DD:MM:SS.SS
or XX:XX:XX.XX if x is not finite.
"""
if not np.isfinite(x):
return 'XX:XX:XX.XX'
if x < 0:
sign = '-'
else:
sign = '+'
x = abs(x)
d = int(math.floor(x))
m = int(math.floor((x - d) * 60))
s = float(( (x - d) * 60 - m) * 60)
return '{0}{1:02d}:{2:02d}:{3:05.2f}'.format(sign, d, m, s) |
def resolve_pid(fetched_pid):
"""Retrieve the real PID given a fetched PID.
:param pid: fetched PID to resolve.
"""
return PersistentIdentifier.get(
pid_type=fetched_pid.pid_type,
pid_value=fetched_pid.pid_value,
pid_provider=fetched_pid.provider.pid_provider
) | Retrieve the real PID given a fetched PID.
:param pid: fetched PID to resolve. | Below is the the instruction that describes the task:
### Input:
Retrieve the real PID given a fetched PID.
:param pid: fetched PID to resolve.
### Response:
def resolve_pid(fetched_pid):
"""Retrieve the real PID given a fetched PID.
:param pid: fetched PID to resolve.
"""
return PersistentIdentifier.get(
pid_type=fetched_pid.pid_type,
pid_value=fetched_pid.pid_value,
pid_provider=fetched_pid.provider.pid_provider
) |
def execute_command(self, command):
"""Execute a command
Args:
command (str)
Returns:
process (object)
"""
self.runner.info_log("Executing command: %s" % command)
process = Popen(
command,
stdout=open(os.devnull, 'w'),
stderr=open('runner.log', 'a'),
)
return process | Execute a command
Args:
command (str)
Returns:
process (object) | Below is the the instruction that describes the task:
### Input:
Execute a command
Args:
command (str)
Returns:
process (object)
### Response:
def execute_command(self, command):
"""Execute a command
Args:
command (str)
Returns:
process (object)
"""
self.runner.info_log("Executing command: %s" % command)
process = Popen(
command,
stdout=open(os.devnull, 'w'),
stderr=open('runner.log', 'a'),
)
return process |
def now(self, when=None):
"""Set the current value to the correct tuple based on the seconds
since the epoch. If 'when' is not provided, get the current time
from the task manager.
"""
if when is None:
when = _TaskManager().get_time()
tup = time.localtime(when)
self.value = (tup[0]-1900, tup[1], tup[2], tup[6] + 1)
return self | Set the current value to the correct tuple based on the seconds
since the epoch. If 'when' is not provided, get the current time
from the task manager. | Below is the the instruction that describes the task:
### Input:
Set the current value to the correct tuple based on the seconds
since the epoch. If 'when' is not provided, get the current time
from the task manager.
### Response:
def now(self, when=None):
"""Set the current value to the correct tuple based on the seconds
since the epoch. If 'when' is not provided, get the current time
from the task manager.
"""
if when is None:
when = _TaskManager().get_time()
tup = time.localtime(when)
self.value = (tup[0]-1900, tup[1], tup[2], tup[6] + 1)
return self |
def delete_policy_version(policyName, policyVersionId,
region=None, key=None, keyid=None, profile=None):
'''
Given a policy name and version, delete it.
Returns {deleted: true} if the policy version was deleted and returns
{deleted: false} if the policy version was not deleted.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.delete_policy_version mypolicy version
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_policy_version(policyName=policyName,
policyVersionId=policyVersionId)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)} | Given a policy name and version, delete it.
Returns {deleted: true} if the policy version was deleted and returns
{deleted: false} if the policy version was not deleted.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.delete_policy_version mypolicy version | Below is the the instruction that describes the task:
### Input:
Given a policy name and version, delete it.
Returns {deleted: true} if the policy version was deleted and returns
{deleted: false} if the policy version was not deleted.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.delete_policy_version mypolicy version
### Response:
def delete_policy_version(policyName, policyVersionId,
region=None, key=None, keyid=None, profile=None):
'''
Given a policy name and version, delete it.
Returns {deleted: true} if the policy version was deleted and returns
{deleted: false} if the policy version was not deleted.
CLI Example:
.. code-block:: bash
salt myminion boto_iot.delete_policy_version mypolicy version
'''
try:
conn = _get_conn(region=region, key=key, keyid=keyid, profile=profile)
conn.delete_policy_version(policyName=policyName,
policyVersionId=policyVersionId)
return {'deleted': True}
except ClientError as e:
return {'deleted': False, 'error': __utils__['boto3.get_error'](e)} |
def has_wrap_around_links(self, minimum_working=0.9):
"""Test if a machine has wrap-around connections installed.
Since the Machine object does not explicitly define whether a machine
has wrap-around links they must be tested for directly. This test
performs a "fuzzy" test on the number of wrap-around links which are
working to determine if wrap-around links are really present.
Parameters
----------
minimum_working : 0.0 <= float <= 1.0
The minimum proportion of all wrap-around links which must be
working for this function to return True.
Returns
-------
bool
True if the system has wrap-around links, False if not.
"""
working = 0
for x in range(self.width):
if (x, 0, Links.south) in self:
working += 1
if (x, self.height - 1, Links.north) in self:
working += 1
if (x, 0, Links.south_west) in self:
working += 1
if (x, self.height - 1, Links.north_east) in self:
working += 1
for y in range(self.height):
if (0, y, Links.west) in self:
working += 1
if (self.width - 1, y, Links.east) in self:
working += 1
# Don't re-count links counted when scanning the x-axis
if y != 0 and (0, y, Links.south_west) in self:
working += 1
if (y != self.height - 1 and
(self.width - 1, y, Links.north_east) in self):
working += 1
total = (4 * self.width) + (4 * self.height) - 2
return (float(working) / float(total)) >= minimum_working | Test if a machine has wrap-around connections installed.
Since the Machine object does not explicitly define whether a machine
has wrap-around links they must be tested for directly. This test
performs a "fuzzy" test on the number of wrap-around links which are
working to determine if wrap-around links are really present.
Parameters
----------
minimum_working : 0.0 <= float <= 1.0
The minimum proportion of all wrap-around links which must be
working for this function to return True.
Returns
-------
bool
True if the system has wrap-around links, False if not. | Below is the the instruction that describes the task:
### Input:
Test if a machine has wrap-around connections installed.
Since the Machine object does not explicitly define whether a machine
has wrap-around links they must be tested for directly. This test
performs a "fuzzy" test on the number of wrap-around links which are
working to determine if wrap-around links are really present.
Parameters
----------
minimum_working : 0.0 <= float <= 1.0
The minimum proportion of all wrap-around links which must be
working for this function to return True.
Returns
-------
bool
True if the system has wrap-around links, False if not.
### Response:
def has_wrap_around_links(self, minimum_working=0.9):
"""Test if a machine has wrap-around connections installed.
Since the Machine object does not explicitly define whether a machine
has wrap-around links they must be tested for directly. This test
performs a "fuzzy" test on the number of wrap-around links which are
working to determine if wrap-around links are really present.
Parameters
----------
minimum_working : 0.0 <= float <= 1.0
The minimum proportion of all wrap-around links which must be
working for this function to return True.
Returns
-------
bool
True if the system has wrap-around links, False if not.
"""
working = 0
for x in range(self.width):
if (x, 0, Links.south) in self:
working += 1
if (x, self.height - 1, Links.north) in self:
working += 1
if (x, 0, Links.south_west) in self:
working += 1
if (x, self.height - 1, Links.north_east) in self:
working += 1
for y in range(self.height):
if (0, y, Links.west) in self:
working += 1
if (self.width - 1, y, Links.east) in self:
working += 1
# Don't re-count links counted when scanning the x-axis
if y != 0 and (0, y, Links.south_west) in self:
working += 1
if (y != self.height - 1 and
(self.width - 1, y, Links.north_east) in self):
working += 1
total = (4 * self.width) + (4 * self.height) - 2
return (float(working) / float(total)) >= minimum_working |
def is_time_older(self):
"""
Check if the current time is older than the one in the database.
"""
if (
self._authorization()
and self.is_in_database()
and int(
PyFunceble.INTERN["whois_db"][PyFunceble.INTERN["file_to_test"]][
PyFunceble.INTERN["to_test"]
]["epoch"]
)
< int(PyFunceble.time())
):
# * We are authorized to work.
# and
# * The element we are testing is in the database.
# and
# * The epoch of the expiration date is less than our current epoch.
# The expiration date is in the past, we return True.
return True
# The expiration date is in the future, we return False.
return False | Check if the current time is older than the one in the database. | Below is the the instruction that describes the task:
### Input:
Check if the current time is older than the one in the database.
### Response:
def is_time_older(self):
"""
Check if the current time is older than the one in the database.
"""
if (
self._authorization()
and self.is_in_database()
and int(
PyFunceble.INTERN["whois_db"][PyFunceble.INTERN["file_to_test"]][
PyFunceble.INTERN["to_test"]
]["epoch"]
)
< int(PyFunceble.time())
):
# * We are authorized to work.
# and
# * The element we are testing is in the database.
# and
# * The epoch of the expiration date is less than our current epoch.
# The expiration date is in the past, we return True.
return True
# The expiration date is in the future, we return False.
return False |
def avg_grads(tower_grads):
"""Calculate the average gradient for each shared variable across all
towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
Modified from this tutorial: https://tinyurl.com/n3jr2vm
"""
if len(tower_grads) == 1:
return tower_grads[0]
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = [g for g, _ in grad_and_vars]
# Average over the 'tower' dimension.
grad = tf.add_n(grads) / len(grads)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
assert all(v is grad_and_var[1] for grad_and_var in grad_and_vars)
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads | Calculate the average gradient for each shared variable across all
towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
Modified from this tutorial: https://tinyurl.com/n3jr2vm | Below is the the instruction that describes the task:
### Input:
Calculate the average gradient for each shared variable across all
towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
Modified from this tutorial: https://tinyurl.com/n3jr2vm
### Response:
def avg_grads(tower_grads):
"""Calculate the average gradient for each shared variable across all
towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been
averaged across all towers.
Modified from this tutorial: https://tinyurl.com/n3jr2vm
"""
if len(tower_grads) == 1:
return tower_grads[0]
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = [g for g, _ in grad_and_vars]
# Average over the 'tower' dimension.
grad = tf.add_n(grads) / len(grads)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
assert all(v is grad_and_var[1] for grad_and_var in grad_and_vars)
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads |
def present(
name,
user=None,
fingerprint=None,
key=None,
port=None,
enc=None,
config=None,
hash_known_hosts=True,
timeout=5,
fingerprint_hash_type=None):
'''
Verifies that the specified host is known by the specified user
On many systems, specifically those running with openssh 4 or older, the
``enc`` option must be set, only openssh 5 and above can detect the key
type.
name
The name of the remote host (e.g. "github.com")
Note that only a single hostname is supported, if foo.example.com and
bar.example.com have the same host you will need two separate Salt
States to represent them.
user
The user who owns the ssh authorized keys file to modify
fingerprint
The fingerprint of the key which must be present in the known_hosts
file (optional if key specified)
key
The public key which must be present in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, port which will be used to when requesting the
public key from the remote host, defaults to port 22.
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
fingerprint_hash_type
The public key fingerprint hash type that the public key fingerprint
was originally hashed with. This defaults to ``sha256`` if not specified.
.. versionadded:: 2016.11.4
.. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256``
'''
ret = {'name': name,
'changes': {},
'result': None if __opts__['test'] else True,
'comment': ''}
if not user:
config = config or '/etc/ssh/ssh_known_hosts'
else:
config = config or '.ssh/known_hosts'
if not user and not os.path.isabs(config):
comment = 'If not specifying a "user", specify an absolute "config".'
ret['result'] = False
return dict(ret, comment=comment)
if __opts__['test']:
if key and fingerprint:
comment = 'Specify either "key" or "fingerprint", not both.'
ret['result'] = False
return dict(ret, comment=comment)
elif key and not enc:
comment = 'Required argument "enc" if using "key" argument.'
ret['result'] = False
return dict(ret, comment=comment)
try:
result = __salt__['ssh.check_known_host'](user, name,
key=key,
fingerprint=fingerprint,
config=config,
port=port,
fingerprint_hash_type=fingerprint_hash_type)
except CommandNotFoundError as err:
ret['result'] = False
ret['comment'] = 'ssh.check_known_host error: {0}'.format(err)
return ret
if result == 'exists':
comment = 'Host {0} is already in {1}'.format(name, config)
ret['result'] = True
return dict(ret, comment=comment)
elif result == 'add':
comment = 'Key for {0} is set to be added to {1}'.format(name,
config)
return dict(ret, comment=comment)
else: # 'update'
comment = 'Key for {0} is set to be updated in {1}'.format(name,
config)
return dict(ret, comment=comment)
result = __salt__['ssh.set_known_host'](
user=user,
hostname=name,
fingerprint=fingerprint,
key=key,
port=port,
enc=enc,
config=config,
hash_known_hosts=hash_known_hosts,
timeout=timeout,
fingerprint_hash_type=fingerprint_hash_type)
if result['status'] == 'exists':
return dict(ret,
comment='{0} already exists in {1}'.format(name, config))
elif result['status'] == 'error':
return dict(ret, result=False, comment=result['error'])
else: # 'updated'
if key:
new_key = result['new'][0]['key']
return dict(ret,
changes={'old': result['old'], 'new': result['new']},
comment='{0}\'s key saved to {1} (key: {2})'.format(
name, config, new_key))
else:
fingerprint = result['new'][0]['fingerprint']
return dict(ret,
changes={'old': result['old'], 'new': result['new']},
comment='{0}\'s key saved to {1} (fingerprint: {2})'.format(
name, config, fingerprint)) | Verifies that the specified host is known by the specified user
On many systems, specifically those running with openssh 4 or older, the
``enc`` option must be set, only openssh 5 and above can detect the key
type.
name
The name of the remote host (e.g. "github.com")
Note that only a single hostname is supported, if foo.example.com and
bar.example.com have the same host you will need two separate Salt
States to represent them.
user
The user who owns the ssh authorized keys file to modify
fingerprint
The fingerprint of the key which must be present in the known_hosts
file (optional if key specified)
key
The public key which must be present in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, port which will be used to when requesting the
public key from the remote host, defaults to port 22.
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
fingerprint_hash_type
The public key fingerprint hash type that the public key fingerprint
was originally hashed with. This defaults to ``sha256`` if not specified.
.. versionadded:: 2016.11.4
.. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256`` | Below is the the instruction that describes the task:
### Input:
Verifies that the specified host is known by the specified user
On many systems, specifically those running with openssh 4 or older, the
``enc`` option must be set, only openssh 5 and above can detect the key
type.
name
The name of the remote host (e.g. "github.com")
Note that only a single hostname is supported, if foo.example.com and
bar.example.com have the same host you will need two separate Salt
States to represent them.
user
The user who owns the ssh authorized keys file to modify
fingerprint
The fingerprint of the key which must be present in the known_hosts
file (optional if key specified)
key
The public key which must be present in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, port which will be used to when requesting the
public key from the remote host, defaults to port 22.
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
fingerprint_hash_type
The public key fingerprint hash type that the public key fingerprint
was originally hashed with. This defaults to ``sha256`` if not specified.
.. versionadded:: 2016.11.4
.. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256``
### Response:
def present(
name,
user=None,
fingerprint=None,
key=None,
port=None,
enc=None,
config=None,
hash_known_hosts=True,
timeout=5,
fingerprint_hash_type=None):
'''
Verifies that the specified host is known by the specified user
On many systems, specifically those running with openssh 4 or older, the
``enc`` option must be set, only openssh 5 and above can detect the key
type.
name
The name of the remote host (e.g. "github.com")
Note that only a single hostname is supported, if foo.example.com and
bar.example.com have the same host you will need two separate Salt
States to represent them.
user
The user who owns the ssh authorized keys file to modify
fingerprint
The fingerprint of the key which must be present in the known_hosts
file (optional if key specified)
key
The public key which must be present in the known_hosts file
(optional if fingerprint specified)
port
optional parameter, port which will be used to when requesting the
public key from the remote host, defaults to port 22.
enc
Defines what type of key is being used, can be ed25519, ecdsa ssh-rsa
or ssh-dss
config
The location of the authorized keys file relative to the user's home
directory, defaults to ".ssh/known_hosts". If no user is specified,
defaults to "/etc/ssh/ssh_known_hosts". If present, must be an
absolute path when a user is not specified.
hash_known_hosts : True
Hash all hostnames and addresses in the known hosts file.
timeout : int
Set the timeout for connection attempts. If ``timeout`` seconds have
elapsed since a connection was initiated to a host or since the last
time anything was read from that host, then the connection is closed
and the host in question considered unavailable. Default is 5 seconds.
.. versionadded:: 2016.3.0
fingerprint_hash_type
The public key fingerprint hash type that the public key fingerprint
was originally hashed with. This defaults to ``sha256`` if not specified.
.. versionadded:: 2016.11.4
.. versionchanged:: 2017.7.0: default changed from ``md5`` to ``sha256``
'''
ret = {'name': name,
'changes': {},
'result': None if __opts__['test'] else True,
'comment': ''}
if not user:
config = config or '/etc/ssh/ssh_known_hosts'
else:
config = config or '.ssh/known_hosts'
if not user and not os.path.isabs(config):
comment = 'If not specifying a "user", specify an absolute "config".'
ret['result'] = False
return dict(ret, comment=comment)
if __opts__['test']:
if key and fingerprint:
comment = 'Specify either "key" or "fingerprint", not both.'
ret['result'] = False
return dict(ret, comment=comment)
elif key and not enc:
comment = 'Required argument "enc" if using "key" argument.'
ret['result'] = False
return dict(ret, comment=comment)
try:
result = __salt__['ssh.check_known_host'](user, name,
key=key,
fingerprint=fingerprint,
config=config,
port=port,
fingerprint_hash_type=fingerprint_hash_type)
except CommandNotFoundError as err:
ret['result'] = False
ret['comment'] = 'ssh.check_known_host error: {0}'.format(err)
return ret
if result == 'exists':
comment = 'Host {0} is already in {1}'.format(name, config)
ret['result'] = True
return dict(ret, comment=comment)
elif result == 'add':
comment = 'Key for {0} is set to be added to {1}'.format(name,
config)
return dict(ret, comment=comment)
else: # 'update'
comment = 'Key for {0} is set to be updated in {1}'.format(name,
config)
return dict(ret, comment=comment)
result = __salt__['ssh.set_known_host'](
user=user,
hostname=name,
fingerprint=fingerprint,
key=key,
port=port,
enc=enc,
config=config,
hash_known_hosts=hash_known_hosts,
timeout=timeout,
fingerprint_hash_type=fingerprint_hash_type)
if result['status'] == 'exists':
return dict(ret,
comment='{0} already exists in {1}'.format(name, config))
elif result['status'] == 'error':
return dict(ret, result=False, comment=result['error'])
else: # 'updated'
if key:
new_key = result['new'][0]['key']
return dict(ret,
changes={'old': result['old'], 'new': result['new']},
comment='{0}\'s key saved to {1} (key: {2})'.format(
name, config, new_key))
else:
fingerprint = result['new'][0]['fingerprint']
return dict(ret,
changes={'old': result['old'], 'new': result['new']},
comment='{0}\'s key saved to {1} (fingerprint: {2})'.format(
name, config, fingerprint)) |
def _process_files(self, record_id, data):
"""Snapshot bucket and add files in record during first publishing."""
if self.files:
assert not self.files.bucket.locked
self.files.bucket.locked = True
snapshot = self.files.bucket.snapshot(lock=True)
data['_files'] = self.files.dumps(bucket=snapshot.id)
yield data
db.session.add(RecordsBuckets(
record_id=record_id, bucket_id=snapshot.id
))
else:
yield data | Snapshot bucket and add files in record during first publishing. | Below is the the instruction that describes the task:
### Input:
Snapshot bucket and add files in record during first publishing.
### Response:
def _process_files(self, record_id, data):
"""Snapshot bucket and add files in record during first publishing."""
if self.files:
assert not self.files.bucket.locked
self.files.bucket.locked = True
snapshot = self.files.bucket.snapshot(lock=True)
data['_files'] = self.files.dumps(bucket=snapshot.id)
yield data
db.session.add(RecordsBuckets(
record_id=record_id, bucket_id=snapshot.id
))
else:
yield data |
def key(self):
"""
Example::
/browse/homes/ca/ -> ca
/browse/homes/ca/los-angeles-county/ -> los-angeles-county
/browse/homes/ca/los-angeles-county/91001/ -> 91001
/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ -> tola-ave_5038895
:return:
"""
return [part.strip() for part in self.href.split("/") if part.strip()][
-1] | Example::
/browse/homes/ca/ -> ca
/browse/homes/ca/los-angeles-county/ -> los-angeles-county
/browse/homes/ca/los-angeles-county/91001/ -> 91001
/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ -> tola-ave_5038895
:return: | Below is the the instruction that describes the task:
### Input:
Example::
/browse/homes/ca/ -> ca
/browse/homes/ca/los-angeles-county/ -> los-angeles-county
/browse/homes/ca/los-angeles-county/91001/ -> 91001
/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ -> tola-ave_5038895
:return:
### Response:
def key(self):
"""
Example::
/browse/homes/ca/ -> ca
/browse/homes/ca/los-angeles-county/ -> los-angeles-county
/browse/homes/ca/los-angeles-county/91001/ -> 91001
/browse/homes/ca/los-angeles-county/91001/tola-ave_5038895/ -> tola-ave_5038895
:return:
"""
return [part.strip() for part in self.href.split("/") if part.strip()][
-1] |
def count(cls, *criterion, **kwargs):
"""Returns a count of the instances meeting the specified
filter criterion and kwargs.
Examples:
>>> User.count()
500
>>> User.count(country="India")
300
>>> User.count(User.age > 50, country="India")
39
"""
if criterion or kwargs:
return cls.filter(
*criterion,
**kwargs).count()
else:
return cls.query.count() | Returns a count of the instances meeting the specified
filter criterion and kwargs.
Examples:
>>> User.count()
500
>>> User.count(country="India")
300
>>> User.count(User.age > 50, country="India")
39 | Below is the the instruction that describes the task:
### Input:
Returns a count of the instances meeting the specified
filter criterion and kwargs.
Examples:
>>> User.count()
500
>>> User.count(country="India")
300
>>> User.count(User.age > 50, country="India")
39
### Response:
def count(cls, *criterion, **kwargs):
"""Returns a count of the instances meeting the specified
filter criterion and kwargs.
Examples:
>>> User.count()
500
>>> User.count(country="India")
300
>>> User.count(User.age > 50, country="India")
39
"""
if criterion or kwargs:
return cls.filter(
*criterion,
**kwargs).count()
else:
return cls.query.count() |
def get_changes(self, checks=None, imports=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes needed by this restructuring
`resources` can be a list of `rope.base.resources.File`\s to
apply the restructuring on. If `None`, the restructuring will
be applied to all python files.
`checks` argument has been deprecated. Use the `args` argument
of the constructor. The usage of::
strchecks = {'obj1.type': 'mod.A', 'obj2': 'mod.B',
'obj3.object': 'mod.C'}
checks = restructuring.make_checks(strchecks)
can be replaced with::
args = {'obj1': 'type=mod.A', 'obj2': 'name=mod.B',
'obj3': 'object=mod.C'}
where obj1, obj2 and obj3 are wildcard names that appear
in restructuring pattern.
"""
if checks is not None:
warnings.warn(
'The use of checks parameter is deprecated; '
'use the args parameter of the constructor instead.',
DeprecationWarning, stacklevel=2)
for name, value in checks.items():
self.args[name] = similarfinder._pydefined_to_str(value)
if imports is not None:
warnings.warn(
'The use of imports parameter is deprecated; '
'use imports parameter of the constructor, instead.',
DeprecationWarning, stacklevel=2)
self.imports = imports
changes = change.ChangeSet('Restructuring <%s> to <%s>' %
(self.pattern, self.goal))
if resources is not None:
files = [resource for resource in resources
if libutils.is_python_file(self.project, resource)]
else:
files = self.project.get_python_files()
job_set = task_handle.create_jobset('Collecting Changes', len(files))
for resource in files:
job_set.started_job(resource.path)
pymodule = self.project.get_pymodule(resource)
finder = similarfinder.SimilarFinder(pymodule,
wildcards=self.wildcards)
matches = list(finder.get_matches(self.pattern, self.args))
computer = self._compute_changes(matches, pymodule)
result = computer.get_changed()
if result is not None:
imported_source = self._add_imports(resource, result,
self.imports)
changes.add_change(change.ChangeContents(resource,
imported_source))
job_set.finished_job()
return changes | Get the changes needed by this restructuring
`resources` can be a list of `rope.base.resources.File`\s to
apply the restructuring on. If `None`, the restructuring will
be applied to all python files.
`checks` argument has been deprecated. Use the `args` argument
of the constructor. The usage of::
strchecks = {'obj1.type': 'mod.A', 'obj2': 'mod.B',
'obj3.object': 'mod.C'}
checks = restructuring.make_checks(strchecks)
can be replaced with::
args = {'obj1': 'type=mod.A', 'obj2': 'name=mod.B',
'obj3': 'object=mod.C'}
where obj1, obj2 and obj3 are wildcard names that appear
in restructuring pattern. | Below is the the instruction that describes the task:
### Input:
Get the changes needed by this restructuring
`resources` can be a list of `rope.base.resources.File`\s to
apply the restructuring on. If `None`, the restructuring will
be applied to all python files.
`checks` argument has been deprecated. Use the `args` argument
of the constructor. The usage of::
strchecks = {'obj1.type': 'mod.A', 'obj2': 'mod.B',
'obj3.object': 'mod.C'}
checks = restructuring.make_checks(strchecks)
can be replaced with::
args = {'obj1': 'type=mod.A', 'obj2': 'name=mod.B',
'obj3': 'object=mod.C'}
where obj1, obj2 and obj3 are wildcard names that appear
in restructuring pattern.
### Response:
def get_changes(self, checks=None, imports=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Get the changes needed by this restructuring
`resources` can be a list of `rope.base.resources.File`\s to
apply the restructuring on. If `None`, the restructuring will
be applied to all python files.
`checks` argument has been deprecated. Use the `args` argument
of the constructor. The usage of::
strchecks = {'obj1.type': 'mod.A', 'obj2': 'mod.B',
'obj3.object': 'mod.C'}
checks = restructuring.make_checks(strchecks)
can be replaced with::
args = {'obj1': 'type=mod.A', 'obj2': 'name=mod.B',
'obj3': 'object=mod.C'}
where obj1, obj2 and obj3 are wildcard names that appear
in restructuring pattern.
"""
if checks is not None:
warnings.warn(
'The use of checks parameter is deprecated; '
'use the args parameter of the constructor instead.',
DeprecationWarning, stacklevel=2)
for name, value in checks.items():
self.args[name] = similarfinder._pydefined_to_str(value)
if imports is not None:
warnings.warn(
'The use of imports parameter is deprecated; '
'use imports parameter of the constructor, instead.',
DeprecationWarning, stacklevel=2)
self.imports = imports
changes = change.ChangeSet('Restructuring <%s> to <%s>' %
(self.pattern, self.goal))
if resources is not None:
files = [resource for resource in resources
if libutils.is_python_file(self.project, resource)]
else:
files = self.project.get_python_files()
job_set = task_handle.create_jobset('Collecting Changes', len(files))
for resource in files:
job_set.started_job(resource.path)
pymodule = self.project.get_pymodule(resource)
finder = similarfinder.SimilarFinder(pymodule,
wildcards=self.wildcards)
matches = list(finder.get_matches(self.pattern, self.args))
computer = self._compute_changes(matches, pymodule)
result = computer.get_changed()
if result is not None:
imported_source = self._add_imports(resource, result,
self.imports)
changes.add_change(change.ChangeContents(resource,
imported_source))
job_set.finished_job()
return changes |
def from_xyz(x, y, z, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CIE-XYZ values.
Parameters:
:x:
The Red component value [0...1]
:y:
The Green component value [0...1]
:z:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color(xyz_to_rgb(x, y, z), 'rgb', alpha, wref) | Create a new instance based on the specifed CIE-XYZ values.
Parameters:
:x:
The Red component value [0...1]
:y:
The Green component value [0...1]
:z:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137, 0.5)
Color(1.0, 0.5, 0.0, 0.5) | Below is the the instruction that describes the task:
### Input:
Create a new instance based on the specifed CIE-XYZ values.
Parameters:
:x:
The Red component value [0...1]
:y:
The Green component value [0...1]
:z:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
### Response:
def from_xyz(x, y, z, alpha=1.0, wref=_DEFAULT_WREF):
"""Create a new instance based on the specifed CIE-XYZ values.
Parameters:
:x:
The Red component value [0...1]
:y:
The Green component value [0...1]
:z:
The Blue component value [0...1]
:alpha:
The color transparency [0...1], default is opaque
:wref:
The whitepoint reference, default is 2° D65.
Returns:
A grapefruit.Color instance.
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137)
Color(1.0, 0.5, 0.0, 1.0)
>>> Color.from_xyz(0.488941, 0.365682, 0.0448137, 0.5)
Color(1.0, 0.5, 0.0, 0.5)
"""
return Color(xyz_to_rgb(x, y, z), 'rgb', alpha, wref) |
def read(self):
"Connect to the feedback service and read all data."
log.msg('APNSService read (connecting)')
try:
server, port = ((FEEDBACK_SERVER_SANDBOX_HOSTNAME
if self.environment == 'sandbox'
else FEEDBACK_SERVER_HOSTNAME), FEEDBACK_SERVER_PORT)
factory = self.feedbackProtocolFactory()
context = self.getContextFactory()
reactor.connectSSL(server, port, factory, context)
factory.deferred.addErrback(log_errback('apns-feedback-read'))
timeout = reactor.callLater(self.timeout,
lambda: factory.deferred.called or factory.deferred.errback(
Exception('Feedbcak fetch timed out after %i seconds' % self.timeout)))
def cancel_timeout(r):
try: timeout.cancel()
except: pass
return r
factory.deferred.addBoth(cancel_timeout)
except Exception, e:
log.err('APNService feedback error initializing: %s' % str(e))
raise
return factory.deferred | Connect to the feedback service and read all data. | Below is the the instruction that describes the task:
### Input:
Connect to the feedback service and read all data.
### Response:
def read(self):
"Connect to the feedback service and read all data."
log.msg('APNSService read (connecting)')
try:
server, port = ((FEEDBACK_SERVER_SANDBOX_HOSTNAME
if self.environment == 'sandbox'
else FEEDBACK_SERVER_HOSTNAME), FEEDBACK_SERVER_PORT)
factory = self.feedbackProtocolFactory()
context = self.getContextFactory()
reactor.connectSSL(server, port, factory, context)
factory.deferred.addErrback(log_errback('apns-feedback-read'))
timeout = reactor.callLater(self.timeout,
lambda: factory.deferred.called or factory.deferred.errback(
Exception('Feedbcak fetch timed out after %i seconds' % self.timeout)))
def cancel_timeout(r):
try: timeout.cancel()
except: pass
return r
factory.deferred.addBoth(cancel_timeout)
except Exception, e:
log.err('APNService feedback error initializing: %s' % str(e))
raise
return factory.deferred |
def wait_for_tasks(self, tasks):
"""Given the service instance si and tasks, it returns after all the
tasks are complete
"""
property_collector = self.service_instance.RetrieveContent().propertyCollector
task_list = [str(task) for task in tasks]
# Create filter
obj_specs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
property_spec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task,
pathSet=[],
all=True)
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = obj_specs
filter_spec.propSet = [property_spec]
pcfilter = property_collector.CreateFilter(filter_spec, True)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(task_list):
update = property_collector.WaitForUpdates(version)
for filter_set in update.filterSet:
for obj_set in filter_set.objectSet:
task = obj_set.obj
for change in obj_set.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
if not str(task) in task_list:
continue
if state == vim.TaskInfo.State.success:
# Remove task from taskList
task_list.remove(str(task))
elif state == vim.TaskInfo.State.error:
raise task.info.error
# Move to next version
version = update.version
finally:
if pcfilter:
pcfilter.Destroy() | Given the service instance si and tasks, it returns after all the
tasks are complete | Below is the the instruction that describes the task:
### Input:
Given the service instance si and tasks, it returns after all the
tasks are complete
### Response:
def wait_for_tasks(self, tasks):
"""Given the service instance si and tasks, it returns after all the
tasks are complete
"""
property_collector = self.service_instance.RetrieveContent().propertyCollector
task_list = [str(task) for task in tasks]
# Create filter
obj_specs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
property_spec = vmodl.query.PropertyCollector.PropertySpec(type=vim.Task,
pathSet=[],
all=True)
filter_spec = vmodl.query.PropertyCollector.FilterSpec()
filter_spec.objectSet = obj_specs
filter_spec.propSet = [property_spec]
pcfilter = property_collector.CreateFilter(filter_spec, True)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(task_list):
update = property_collector.WaitForUpdates(version)
for filter_set in update.filterSet:
for obj_set in filter_set.objectSet:
task = obj_set.obj
for change in obj_set.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
if not str(task) in task_list:
continue
if state == vim.TaskInfo.State.success:
# Remove task from taskList
task_list.remove(str(task))
elif state == vim.TaskInfo.State.error:
raise task.info.error
# Move to next version
version = update.version
finally:
if pcfilter:
pcfilter.Destroy() |
def explorev2(self, datasource_type, datasource_id):
"""Deprecated endpoint, here for backward compatibility of urls"""
return redirect(url_for(
'Superset.explore',
datasource_type=datasource_type,
datasource_id=datasource_id,
**request.args)) | Deprecated endpoint, here for backward compatibility of urls | Below is the the instruction that describes the task:
### Input:
Deprecated endpoint, here for backward compatibility of urls
### Response:
def explorev2(self, datasource_type, datasource_id):
"""Deprecated endpoint, here for backward compatibility of urls"""
return redirect(url_for(
'Superset.explore',
datasource_type=datasource_type,
datasource_id=datasource_id,
**request.args)) |
def stop_trace(self, frame=None):
"""Stop tracing from here"""
self.tracing = False
self.full = False
frame = frame or sys._getframe().f_back
while frame:
del frame.f_trace
frame = frame.f_back
sys.settrace(None)
log.info('Stopping trace') | Stop tracing from here | Below is the the instruction that describes the task:
### Input:
Stop tracing from here
### Response:
def stop_trace(self, frame=None):
"""Stop tracing from here"""
self.tracing = False
self.full = False
frame = frame or sys._getframe().f_back
while frame:
del frame.f_trace
frame = frame.f_back
sys.settrace(None)
log.info('Stopping trace') |
def _rebuffer(self):
"""
(very internal) refill the repeat buffer
"""
results = []
exceptions = []
for i in xrange(self.stride):
try:
results.append(self.iterable.next())
exceptions.append(False)
except Exception, excp:
results.append(excp)
exceptions.append(True)
self._repeat_buffer = repeat((results, exceptions), self.n) | (very internal) refill the repeat buffer | Below is the the instruction that describes the task:
### Input:
(very internal) refill the repeat buffer
### Response:
def _rebuffer(self):
"""
(very internal) refill the repeat buffer
"""
results = []
exceptions = []
for i in xrange(self.stride):
try:
results.append(self.iterable.next())
exceptions.append(False)
except Exception, excp:
results.append(excp)
exceptions.append(True)
self._repeat_buffer = repeat((results, exceptions), self.n) |
def commit(self):
""" commit the injections desired, overwriting any previous injections in the file. """
self.logger.debug("Starting injections...")
self.logger.debug("Injections dict is:")
self.logger.debug(self.inject_dict)
self.logger.debug("Clear list is:")
self.logger.debug(self.clear_set)
for filename, content in self.inject_dict.items():
content = _unicode(content)
self.logger.debug("Injecting values into %s..." % filename)
self.destructive_inject(filename, content)
for filename in self.clear_set:
self.logger.debug("Clearing injection from %s..." % filename)
self.destructive_clear(filename) | commit the injections desired, overwriting any previous injections in the file. | Below is the the instruction that describes the task:
### Input:
commit the injections desired, overwriting any previous injections in the file.
### Response:
def commit(self):
""" commit the injections desired, overwriting any previous injections in the file. """
self.logger.debug("Starting injections...")
self.logger.debug("Injections dict is:")
self.logger.debug(self.inject_dict)
self.logger.debug("Clear list is:")
self.logger.debug(self.clear_set)
for filename, content in self.inject_dict.items():
content = _unicode(content)
self.logger.debug("Injecting values into %s..." % filename)
self.destructive_inject(filename, content)
for filename in self.clear_set:
self.logger.debug("Clearing injection from %s..." % filename)
self.destructive_clear(filename) |
def create(message_type, priority=0):
"""Create a ProtocolMessage."""
message = protobuf.ProtocolMessage()
message.type = message_type
message.priority = priority
return message | Create a ProtocolMessage. | Below is the the instruction that describes the task:
### Input:
Create a ProtocolMessage.
### Response:
def create(message_type, priority=0):
"""Create a ProtocolMessage."""
message = protobuf.ProtocolMessage()
message.type = message_type
message.priority = priority
return message |
def commitLine(self, line) :
"""Commits a line making it ready to be streamed to a file and saves the current buffer if needed. If no stream is active, raises a ValueError"""
if self.streamBuffer is None :
raise ValueError("Commit lines is only for when you are streaming to a file")
self.streamBuffer.append(line)
if len(self.streamBuffer) % self.writeRate == 0 :
for i in xrange(len(self.streamBuffer)) :
self.streamBuffer[i] = str(self.streamBuffer[i])
self.streamFile.write("%s\n" % ('\n'.join(self.streamBuffer)))
self.streamFile.flush()
self.streamBuffer = [] | Commits a line making it ready to be streamed to a file and saves the current buffer if needed. If no stream is active, raises a ValueError | Below is the the instruction that describes the task:
### Input:
Commits a line making it ready to be streamed to a file and saves the current buffer if needed. If no stream is active, raises a ValueError
### Response:
def commitLine(self, line) :
"""Commits a line making it ready to be streamed to a file and saves the current buffer if needed. If no stream is active, raises a ValueError"""
if self.streamBuffer is None :
raise ValueError("Commit lines is only for when you are streaming to a file")
self.streamBuffer.append(line)
if len(self.streamBuffer) % self.writeRate == 0 :
for i in xrange(len(self.streamBuffer)) :
self.streamBuffer[i] = str(self.streamBuffer[i])
self.streamFile.write("%s\n" % ('\n'.join(self.streamBuffer)))
self.streamFile.flush()
self.streamBuffer = [] |
def querypath(self, block, path):
"""An XPath-like interface to `query`."""
class BadPath(Exception):
"""Bad path exception thrown when path cannot be found."""
pass
results = self.query(block)
ROOT, SEP, WORD, FINAL = six.moves.range(4) # pylint: disable=C0103
state = ROOT
lexer = RegexLexer(
("dotdot", r"\.\."),
("dot", r"\."),
("slashslash", r"//"),
("slash", r"/"),
("atword", r"@\w+"),
("word", r"\w+"),
("err", r"."),
)
for tokname, toktext in lexer.lex(path):
if state == FINAL:
# Shouldn't be any tokens after a last token.
raise BadPath()
if tokname == "dotdot":
# .. (parent)
if state == WORD:
raise BadPath()
results = results.parent()
state = WORD
elif tokname == "dot":
# . (current node)
if state == WORD:
raise BadPath()
state = WORD
elif tokname == "slashslash":
# // (descendants)
if state == SEP:
raise BadPath()
if state == ROOT:
raise NotImplementedError()
results = results.descendants()
state = SEP
elif tokname == "slash":
# / (here)
if state == SEP:
raise BadPath()
if state == ROOT:
raise NotImplementedError()
state = SEP
elif tokname == "atword":
# @xxx (attribute access)
if state != SEP:
raise BadPath()
results = results.attr(toktext[1:])
state = FINAL
elif tokname == "word":
# xxx (tag selection)
if state != SEP:
raise BadPath()
results = results.children().tagged(toktext)
state = WORD
else:
raise BadPath("Invalid thing: %r" % toktext)
return results | An XPath-like interface to `query`. | Below is the the instruction that describes the task:
### Input:
An XPath-like interface to `query`.
### Response:
def querypath(self, block, path):
"""An XPath-like interface to `query`."""
class BadPath(Exception):
"""Bad path exception thrown when path cannot be found."""
pass
results = self.query(block)
ROOT, SEP, WORD, FINAL = six.moves.range(4) # pylint: disable=C0103
state = ROOT
lexer = RegexLexer(
("dotdot", r"\.\."),
("dot", r"\."),
("slashslash", r"//"),
("slash", r"/"),
("atword", r"@\w+"),
("word", r"\w+"),
("err", r"."),
)
for tokname, toktext in lexer.lex(path):
if state == FINAL:
# Shouldn't be any tokens after a last token.
raise BadPath()
if tokname == "dotdot":
# .. (parent)
if state == WORD:
raise BadPath()
results = results.parent()
state = WORD
elif tokname == "dot":
# . (current node)
if state == WORD:
raise BadPath()
state = WORD
elif tokname == "slashslash":
# // (descendants)
if state == SEP:
raise BadPath()
if state == ROOT:
raise NotImplementedError()
results = results.descendants()
state = SEP
elif tokname == "slash":
# / (here)
if state == SEP:
raise BadPath()
if state == ROOT:
raise NotImplementedError()
state = SEP
elif tokname == "atword":
# @xxx (attribute access)
if state != SEP:
raise BadPath()
results = results.attr(toktext[1:])
state = FINAL
elif tokname == "word":
# xxx (tag selection)
if state != SEP:
raise BadPath()
results = results.children().tagged(toktext)
state = WORD
else:
raise BadPath("Invalid thing: %r" % toktext)
return results |
def get_domain_home_from_url(url):
""" parse url for domain and homepage
:param url: the req url
:type url: str
:return: (homepage, domain)
:rtype:
"""
p = parse.urlparse(url)
if p.netloc:
return '{}://{}'.format(p.scheme, p.netloc), p.netloc
else:
return '', '' | parse url for domain and homepage
:param url: the req url
:type url: str
:return: (homepage, domain)
:rtype: | Below is the the instruction that describes the task:
### Input:
parse url for domain and homepage
:param url: the req url
:type url: str
:return: (homepage, domain)
:rtype:
### Response:
def get_domain_home_from_url(url):
""" parse url for domain and homepage
:param url: the req url
:type url: str
:return: (homepage, domain)
:rtype:
"""
p = parse.urlparse(url)
if p.netloc:
return '{}://{}'.format(p.scheme, p.netloc), p.netloc
else:
return '', '' |
def input_to_fastq(
self, input_file, sample_name,
paired_end, fastq_folder, output_file=None, multiclass=False):
"""
Builds a command to convert input file to fastq, for various inputs.
Takes either .bam, .fastq.gz, or .fastq input and returns
commands that will create the .fastq file, regardless of input type.
This is useful to made your pipeline easily accept any of these input
types seamlessly, standardizing you to the fastq which is still the
most common format for adapter trimmers, etc.
It will place the output fastq file in given `fastq_folder`.
:param str input_file: filename of input you want to convert to fastq
:return str: A command (to be run with PipelineManager) that will ensure
your fastq file exists.
"""
fastq_prefix = os.path.join(fastq_folder, sample_name)
self.make_sure_path_exists(fastq_folder)
# this expects a list; if it gets a string, convert it to a list.
if type(input_file) != list:
input_file = [input_file]
if len(input_file) > 1:
cmd = []
output_file = []
for in_i, in_arg in enumerate(input_file):
output = fastq_prefix + "_R" + str(in_i + 1) + ".fastq"
result_cmd, uf, result_file = \
self.input_to_fastq(in_arg, sample_name, paired_end,
fastq_folder, output, multiclass=True)
cmd.append(result_cmd)
output_file.append(result_file)
else:
# There was only 1 input class.
# Convert back into a string
input_file = input_file[0]
if not output_file:
output_file = fastq_prefix + "_R1.fastq"
input_ext = self.get_input_ext(input_file)
if input_ext == ".bam":
print("Found .bam file")
#cmd = self.bam_to_fastq(input_file, fastq_prefix, paired_end)
cmd, fq1, fq2 = self.bam_to_fastq_awk(input_file, fastq_prefix, paired_end)
# pm.run(cmd, output_file, follow=check_fastq)
elif input_ext == ".fastq.gz":
print("Found .fastq.gz file")
if paired_end and not multiclass:
# For paired-end reads in one fastq file, we must split the file into 2.
script_path = os.path.join(
self.tools.scripts_dir, "fastq_split.py")
cmd = self.tools.python + " -u " + script_path
cmd += " -i " + input_file
cmd += " -o " + fastq_prefix
# Must also return the set of output files
output_file = [fastq_prefix + "_R1.fastq", fastq_prefix + "_R2.fastq"]
else:
# For single-end reads, we just unzip the fastq.gz file.
# or, paired-end reads that were already split.
cmd = self.ziptool + " -d -c " + input_file + " > " + output_file
# a non-shell version
# cmd1 = "gunzip --force " + input_file
# cmd2 = "mv " + os.path.splitext(input_file)[0] + " " + output_file
# cmd = [cmd1, cmd2]
elif input_ext == ".fastq":
cmd = "ln -sf " + input_file + " " + output_file
print("Found .fastq file; no conversion necessary")
return [cmd, fastq_prefix, output_file] | Builds a command to convert input file to fastq, for various inputs.
Takes either .bam, .fastq.gz, or .fastq input and returns
commands that will create the .fastq file, regardless of input type.
This is useful to made your pipeline easily accept any of these input
types seamlessly, standardizing you to the fastq which is still the
most common format for adapter trimmers, etc.
It will place the output fastq file in given `fastq_folder`.
:param str input_file: filename of input you want to convert to fastq
:return str: A command (to be run with PipelineManager) that will ensure
your fastq file exists. | Below is the the instruction that describes the task:
### Input:
Builds a command to convert input file to fastq, for various inputs.
Takes either .bam, .fastq.gz, or .fastq input and returns
commands that will create the .fastq file, regardless of input type.
This is useful to made your pipeline easily accept any of these input
types seamlessly, standardizing you to the fastq which is still the
most common format for adapter trimmers, etc.
It will place the output fastq file in given `fastq_folder`.
:param str input_file: filename of input you want to convert to fastq
:return str: A command (to be run with PipelineManager) that will ensure
your fastq file exists.
### Response:
def input_to_fastq(
self, input_file, sample_name,
paired_end, fastq_folder, output_file=None, multiclass=False):
"""
Builds a command to convert input file to fastq, for various inputs.
Takes either .bam, .fastq.gz, or .fastq input and returns
commands that will create the .fastq file, regardless of input type.
This is useful to made your pipeline easily accept any of these input
types seamlessly, standardizing you to the fastq which is still the
most common format for adapter trimmers, etc.
It will place the output fastq file in given `fastq_folder`.
:param str input_file: filename of input you want to convert to fastq
:return str: A command (to be run with PipelineManager) that will ensure
your fastq file exists.
"""
fastq_prefix = os.path.join(fastq_folder, sample_name)
self.make_sure_path_exists(fastq_folder)
# this expects a list; if it gets a string, convert it to a list.
if type(input_file) != list:
input_file = [input_file]
if len(input_file) > 1:
cmd = []
output_file = []
for in_i, in_arg in enumerate(input_file):
output = fastq_prefix + "_R" + str(in_i + 1) + ".fastq"
result_cmd, uf, result_file = \
self.input_to_fastq(in_arg, sample_name, paired_end,
fastq_folder, output, multiclass=True)
cmd.append(result_cmd)
output_file.append(result_file)
else:
# There was only 1 input class.
# Convert back into a string
input_file = input_file[0]
if not output_file:
output_file = fastq_prefix + "_R1.fastq"
input_ext = self.get_input_ext(input_file)
if input_ext == ".bam":
print("Found .bam file")
#cmd = self.bam_to_fastq(input_file, fastq_prefix, paired_end)
cmd, fq1, fq2 = self.bam_to_fastq_awk(input_file, fastq_prefix, paired_end)
# pm.run(cmd, output_file, follow=check_fastq)
elif input_ext == ".fastq.gz":
print("Found .fastq.gz file")
if paired_end and not multiclass:
# For paired-end reads in one fastq file, we must split the file into 2.
script_path = os.path.join(
self.tools.scripts_dir, "fastq_split.py")
cmd = self.tools.python + " -u " + script_path
cmd += " -i " + input_file
cmd += " -o " + fastq_prefix
# Must also return the set of output files
output_file = [fastq_prefix + "_R1.fastq", fastq_prefix + "_R2.fastq"]
else:
# For single-end reads, we just unzip the fastq.gz file.
# or, paired-end reads that were already split.
cmd = self.ziptool + " -d -c " + input_file + " > " + output_file
# a non-shell version
# cmd1 = "gunzip --force " + input_file
# cmd2 = "mv " + os.path.splitext(input_file)[0] + " " + output_file
# cmd = [cmd1, cmd2]
elif input_ext == ".fastq":
cmd = "ln -sf " + input_file + " " + output_file
print("Found .fastq file; no conversion necessary")
return [cmd, fastq_prefix, output_file] |
def to_header(self, span_context):
"""Convert a SpanContext object to header string.
:type span_context:
:class:`~opencensus.trace.span_context.SpanContext`
:param span_context: SpanContext object.
:rtype: str
:returns: A trace context header string in google cloud format.
"""
trace_id = span_context.trace_id
span_id = span_context.span_id
trace_options = span_context.trace_options.trace_options_byte
header = '{}/{};o={}'.format(
trace_id,
span_id,
int(trace_options))
return header | Convert a SpanContext object to header string.
:type span_context:
:class:`~opencensus.trace.span_context.SpanContext`
:param span_context: SpanContext object.
:rtype: str
:returns: A trace context header string in google cloud format. | Below is the the instruction that describes the task:
### Input:
Convert a SpanContext object to header string.
:type span_context:
:class:`~opencensus.trace.span_context.SpanContext`
:param span_context: SpanContext object.
:rtype: str
:returns: A trace context header string in google cloud format.
### Response:
def to_header(self, span_context):
"""Convert a SpanContext object to header string.
:type span_context:
:class:`~opencensus.trace.span_context.SpanContext`
:param span_context: SpanContext object.
:rtype: str
:returns: A trace context header string in google cloud format.
"""
trace_id = span_context.trace_id
span_id = span_context.span_id
trace_options = span_context.trace_options.trace_options_byte
header = '{}/{};o={}'.format(
trace_id,
span_id,
int(trace_options))
return header |
def get_text(revision, strip=True):
"""Extract the text from a revision.
Args:
revision: a string
strip: a boolean
Returns:
a string
"""
# text start tag looks like "<text ..otherstuff>"
start_pos = revision.find("<text")
assert start_pos != -1
end_tag_pos = revision.find(">", start_pos)
assert end_tag_pos != -1
end_tag_pos += len(">")
end_pos = revision.find("</text>")
if end_pos == -1:
ret = ""
else:
ret = revision[end_tag_pos:end_pos]
if strip:
ret = strip_text(ret)
ret = text_encoder.to_unicode_utf8(ret)
return ret | Extract the text from a revision.
Args:
revision: a string
strip: a boolean
Returns:
a string | Below is the the instruction that describes the task:
### Input:
Extract the text from a revision.
Args:
revision: a string
strip: a boolean
Returns:
a string
### Response:
def get_text(revision, strip=True):
"""Extract the text from a revision.
Args:
revision: a string
strip: a boolean
Returns:
a string
"""
# text start tag looks like "<text ..otherstuff>"
start_pos = revision.find("<text")
assert start_pos != -1
end_tag_pos = revision.find(">", start_pos)
assert end_tag_pos != -1
end_tag_pos += len(">")
end_pos = revision.find("</text>")
if end_pos == -1:
ret = ""
else:
ret = revision[end_tag_pos:end_pos]
if strip:
ret = strip_text(ret)
ret = text_encoder.to_unicode_utf8(ret)
return ret |
def get_work_spec(self, work_spec_name):
'''Get the dictionary defining some work spec.'''
with self.registry.lock(identifier=self.worker_id) as session:
return session.get(WORK_SPECS, work_spec_name) | Get the dictionary defining some work spec. | Below is the the instruction that describes the task:
### Input:
Get the dictionary defining some work spec.
### Response:
def get_work_spec(self, work_spec_name):
'''Get the dictionary defining some work spec.'''
with self.registry.lock(identifier=self.worker_id) as session:
return session.get(WORK_SPECS, work_spec_name) |
def setCurrency( self, currency ):
"""
Sets the currency for this widget.
:param currency | <str>
"""
self._currency = currency
self.setValue(self.value()) | Sets the currency for this widget.
:param currency | <str> | Below is the the instruction that describes the task:
### Input:
Sets the currency for this widget.
:param currency | <str>
### Response:
def setCurrency( self, currency ):
"""
Sets the currency for this widget.
:param currency | <str>
"""
self._currency = currency
self.setValue(self.value()) |
def top_sentences (kernel, path):
"""
determine distance for each sentence
"""
key_sent = {}
i = 0
if isinstance(path, str):
path = json_iter(path)
for meta in path:
graf = meta["graf"]
tagged_sent = [WordNode._make(x) for x in graf]
text = " ".join([w.raw for w in tagged_sent])
m_sent = mh_digest([str(w.word_id) for w in tagged_sent])
dist = sum([m_sent.jaccard(m) * rl.rank for rl, m in kernel])
key_sent[text] = (dist, i)
i += 1
for text, (dist, i) in sorted(key_sent.items(), key=lambda x: x[1][0], reverse=True):
yield SummarySent(dist=dist, idx=i, text=text) | determine distance for each sentence | Below is the the instruction that describes the task:
### Input:
determine distance for each sentence
### Response:
def top_sentences (kernel, path):
"""
determine distance for each sentence
"""
key_sent = {}
i = 0
if isinstance(path, str):
path = json_iter(path)
for meta in path:
graf = meta["graf"]
tagged_sent = [WordNode._make(x) for x in graf]
text = " ".join([w.raw for w in tagged_sent])
m_sent = mh_digest([str(w.word_id) for w in tagged_sent])
dist = sum([m_sent.jaccard(m) * rl.rank for rl, m in kernel])
key_sent[text] = (dist, i)
i += 1
for text, (dist, i) in sorted(key_sent.items(), key=lambda x: x[1][0], reverse=True):
yield SummarySent(dist=dist, idx=i, text=text) |
def sjoiner(inbox, s=None, join=""):
"""
String joins input with indices in s.
Arguments:
- s(sequence) [default: ``None``] ``tuple`` or ``list`` of indices of the
elements which will be joined.
- join(``str``) [default: ``""``] String which will join the elements of
the inbox i.e. ``join.join()``.
"""
return join.join([input_ for i, input_ in enumerate(inbox) if i in s]) | String joins input with indices in s.
Arguments:
- s(sequence) [default: ``None``] ``tuple`` or ``list`` of indices of the
elements which will be joined.
- join(``str``) [default: ``""``] String which will join the elements of
the inbox i.e. ``join.join()``. | Below is the the instruction that describes the task:
### Input:
String joins input with indices in s.
Arguments:
- s(sequence) [default: ``None``] ``tuple`` or ``list`` of indices of the
elements which will be joined.
- join(``str``) [default: ``""``] String which will join the elements of
the inbox i.e. ``join.join()``.
### Response:
def sjoiner(inbox, s=None, join=""):
"""
String joins input with indices in s.
Arguments:
- s(sequence) [default: ``None``] ``tuple`` or ``list`` of indices of the
elements which will be joined.
- join(``str``) [default: ``""``] String which will join the elements of
the inbox i.e. ``join.join()``.
"""
return join.join([input_ for i, input_ in enumerate(inbox) if i in s]) |
def ls(url='http://localhost:8080/manager', timeout=180):
'''
list all the deployed webapps
url : http://localhost:8080/manager
the URL of the server manager webapp
timeout : 180
timeout for HTTP request
CLI Examples:
.. code-block:: bash
salt '*' tomcat.ls
salt '*' tomcat.ls http://localhost:8080/manager
'''
ret = {}
data = _wget('list', '', url, timeout=timeout)
if data['res'] is False:
return {}
data['msg'].pop(0)
for line in data['msg']:
tmp = line.split(':')
ret[tmp[0]] = {
'mode': tmp[1],
'sessions': tmp[2],
'fullname': tmp[3],
'version': '',
}
sliced = tmp[3].split('##')
if len(sliced) > 1:
ret[tmp[0]]['version'] = sliced[1]
return ret | list all the deployed webapps
url : http://localhost:8080/manager
the URL of the server manager webapp
timeout : 180
timeout for HTTP request
CLI Examples:
.. code-block:: bash
salt '*' tomcat.ls
salt '*' tomcat.ls http://localhost:8080/manager | Below is the the instruction that describes the task:
### Input:
list all the deployed webapps
url : http://localhost:8080/manager
the URL of the server manager webapp
timeout : 180
timeout for HTTP request
CLI Examples:
.. code-block:: bash
salt '*' tomcat.ls
salt '*' tomcat.ls http://localhost:8080/manager
### Response:
def ls(url='http://localhost:8080/manager', timeout=180):
'''
list all the deployed webapps
url : http://localhost:8080/manager
the URL of the server manager webapp
timeout : 180
timeout for HTTP request
CLI Examples:
.. code-block:: bash
salt '*' tomcat.ls
salt '*' tomcat.ls http://localhost:8080/manager
'''
ret = {}
data = _wget('list', '', url, timeout=timeout)
if data['res'] is False:
return {}
data['msg'].pop(0)
for line in data['msg']:
tmp = line.split(':')
ret[tmp[0]] = {
'mode': tmp[1],
'sessions': tmp[2],
'fullname': tmp[3],
'version': '',
}
sliced = tmp[3].split('##')
if len(sliced) > 1:
ret[tmp[0]]['version'] = sliced[1]
return ret |
def get_zone():
'''
Get current timezone (i.e. America/Denver)
Returns:
str: Timezone in unix format
Raises:
CommandExecutionError: If timezone could not be gathered
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
cmd = ['tzutil', '/g']
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode'] or not res['stdout']:
raise CommandExecutionError('tzutil encountered an error getting '
'timezone',
info=res)
return mapper.get_unix(res['stdout'].lower(), 'Unknown') | Get current timezone (i.e. America/Denver)
Returns:
str: Timezone in unix format
Raises:
CommandExecutionError: If timezone could not be gathered
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone | Below is the the instruction that describes the task:
### Input:
Get current timezone (i.e. America/Denver)
Returns:
str: Timezone in unix format
Raises:
CommandExecutionError: If timezone could not be gathered
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
### Response:
def get_zone():
'''
Get current timezone (i.e. America/Denver)
Returns:
str: Timezone in unix format
Raises:
CommandExecutionError: If timezone could not be gathered
CLI Example:
.. code-block:: bash
salt '*' timezone.get_zone
'''
cmd = ['tzutil', '/g']
res = __salt__['cmd.run_all'](cmd, python_shell=False)
if res['retcode'] or not res['stdout']:
raise CommandExecutionError('tzutil encountered an error getting '
'timezone',
info=res)
return mapper.get_unix(res['stdout'].lower(), 'Unknown') |
def run_symmetrized_readout(self, program: Program, trials: int) -> np.ndarray:
"""
Run a quil program in such a way that the readout error is made collectively symmetric
This means the probability of a bitstring ``b`` being mistaken for a bitstring ``c`` is
the same as the probability of ``not(b)`` being mistaken for ``not(c)``
A more general symmetrization would guarantee that the probability of ``b`` being
mistaken for ``c`` depends only on which bit of ``c`` are different from ``b``. This
would require choosing random subsets of bits to flip.
In a noisy device, the probability of accurately reading the 0 state might be higher
than that of the 1 state. This makes correcting for readout more difficult. This
function runs the program normally ``(trials//2)`` times. The other half of the time,
it will insert an ``X`` gate prior to any ``MEASURE`` instruction and then flip the
measured classical bit back.
See :py:func:`run` for this function's parameter descriptions.
"""
flipped_program = _get_flipped_protoquil_program(program)
if trials % 2 != 0:
raise ValueError("Using symmetrized measurement functionality requires that you "
"take an even number of trials.")
half_trials = trials // 2
flipped_program = flipped_program.wrap_in_numshots_loop(shots=half_trials)
flipped_executable = self.compile(flipped_program)
executable = self.compile(program.wrap_in_numshots_loop(half_trials))
samples = self.run(executable)
flipped_samples = self.run(flipped_executable)
double_flipped_samples = np.logical_not(flipped_samples).astype(int)
results = np.concatenate((samples, double_flipped_samples), axis=0)
np.random.shuffle(results)
return results | Run a quil program in such a way that the readout error is made collectively symmetric
This means the probability of a bitstring ``b`` being mistaken for a bitstring ``c`` is
the same as the probability of ``not(b)`` being mistaken for ``not(c)``
A more general symmetrization would guarantee that the probability of ``b`` being
mistaken for ``c`` depends only on which bit of ``c`` are different from ``b``. This
would require choosing random subsets of bits to flip.
In a noisy device, the probability of accurately reading the 0 state might be higher
than that of the 1 state. This makes correcting for readout more difficult. This
function runs the program normally ``(trials//2)`` times. The other half of the time,
it will insert an ``X`` gate prior to any ``MEASURE`` instruction and then flip the
measured classical bit back.
See :py:func:`run` for this function's parameter descriptions. | Below is the the instruction that describes the task:
### Input:
Run a quil program in such a way that the readout error is made collectively symmetric
This means the probability of a bitstring ``b`` being mistaken for a bitstring ``c`` is
the same as the probability of ``not(b)`` being mistaken for ``not(c)``
A more general symmetrization would guarantee that the probability of ``b`` being
mistaken for ``c`` depends only on which bit of ``c`` are different from ``b``. This
would require choosing random subsets of bits to flip.
In a noisy device, the probability of accurately reading the 0 state might be higher
than that of the 1 state. This makes correcting for readout more difficult. This
function runs the program normally ``(trials//2)`` times. The other half of the time,
it will insert an ``X`` gate prior to any ``MEASURE`` instruction and then flip the
measured classical bit back.
See :py:func:`run` for this function's parameter descriptions.
### Response:
def run_symmetrized_readout(self, program: Program, trials: int) -> np.ndarray:
"""
Run a quil program in such a way that the readout error is made collectively symmetric
This means the probability of a bitstring ``b`` being mistaken for a bitstring ``c`` is
the same as the probability of ``not(b)`` being mistaken for ``not(c)``
A more general symmetrization would guarantee that the probability of ``b`` being
mistaken for ``c`` depends only on which bit of ``c`` are different from ``b``. This
would require choosing random subsets of bits to flip.
In a noisy device, the probability of accurately reading the 0 state might be higher
than that of the 1 state. This makes correcting for readout more difficult. This
function runs the program normally ``(trials//2)`` times. The other half of the time,
it will insert an ``X`` gate prior to any ``MEASURE`` instruction and then flip the
measured classical bit back.
See :py:func:`run` for this function's parameter descriptions.
"""
flipped_program = _get_flipped_protoquil_program(program)
if trials % 2 != 0:
raise ValueError("Using symmetrized measurement functionality requires that you "
"take an even number of trials.")
half_trials = trials // 2
flipped_program = flipped_program.wrap_in_numshots_loop(shots=half_trials)
flipped_executable = self.compile(flipped_program)
executable = self.compile(program.wrap_in_numshots_loop(half_trials))
samples = self.run(executable)
flipped_samples = self.run(flipped_executable)
double_flipped_samples = np.logical_not(flipped_samples).astype(int)
results = np.concatenate((samples, double_flipped_samples), axis=0)
np.random.shuffle(results)
return results |
def execute(self, target, source, env, executor=None):
"""Execute a command action.
This will handle lists of commands as well as individual commands,
because construction variable substitution may turn a single
"command" into a list. This means that this class can actually
handle lists of commands, even though that's not how we use it
externally.
"""
escape_list = SCons.Subst.escape_list
flatten_sequence = SCons.Util.flatten_sequence
try:
shell = env['SHELL']
except KeyError:
raise SCons.Errors.UserError('Missing SHELL construction variable.')
try:
spawn = env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
else:
if is_String(spawn):
spawn = env.subst(spawn, raw=1, conv=lambda x: x)
escape = env.get('ESCAPE', lambda x: x)
ENV = get_default_ENV(env)
# Ensure that the ENV values are all strings:
for key, value in ENV.items():
if not is_String(value):
if is_List(value):
# If the value is a list, then we assume it is a
# path list, because that's a pretty common list-like
# value to stick in an environment variable:
value = flatten_sequence(value)
ENV[key] = os.pathsep.join(map(str, value))
else:
# If it isn't a string or a list, then we just coerce
# it to a string, which is the proper way to handle
# Dir and File instances and will produce something
# reasonable for just about everything else:
ENV[key] = str(value)
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
cmd_list, ignore, silent = self.process(target, list(map(rfile, source)), env, executor)
# Use len() to filter out any "command" that's zero-length.
for cmd_line in filter(len, cmd_list):
# Escape the command line for the interpreter we are using.
cmd_line = escape_list(cmd_line, escape)
result = spawn(shell, escape, cmd_line[0], cmd_line, ENV)
if not ignore and result:
msg = "Error %s" % result
return SCons.Errors.BuildError(errstr=msg,
status=result,
action=self,
command=cmd_line)
return 0 | Execute a command action.
This will handle lists of commands as well as individual commands,
because construction variable substitution may turn a single
"command" into a list. This means that this class can actually
handle lists of commands, even though that's not how we use it
externally. | Below is the the instruction that describes the task:
### Input:
Execute a command action.
This will handle lists of commands as well as individual commands,
because construction variable substitution may turn a single
"command" into a list. This means that this class can actually
handle lists of commands, even though that's not how we use it
externally.
### Response:
def execute(self, target, source, env, executor=None):
"""Execute a command action.
This will handle lists of commands as well as individual commands,
because construction variable substitution may turn a single
"command" into a list. This means that this class can actually
handle lists of commands, even though that's not how we use it
externally.
"""
escape_list = SCons.Subst.escape_list
flatten_sequence = SCons.Util.flatten_sequence
try:
shell = env['SHELL']
except KeyError:
raise SCons.Errors.UserError('Missing SHELL construction variable.')
try:
spawn = env['SPAWN']
except KeyError:
raise SCons.Errors.UserError('Missing SPAWN construction variable.')
else:
if is_String(spawn):
spawn = env.subst(spawn, raw=1, conv=lambda x: x)
escape = env.get('ESCAPE', lambda x: x)
ENV = get_default_ENV(env)
# Ensure that the ENV values are all strings:
for key, value in ENV.items():
if not is_String(value):
if is_List(value):
# If the value is a list, then we assume it is a
# path list, because that's a pretty common list-like
# value to stick in an environment variable:
value = flatten_sequence(value)
ENV[key] = os.pathsep.join(map(str, value))
else:
# If it isn't a string or a list, then we just coerce
# it to a string, which is the proper way to handle
# Dir and File instances and will produce something
# reasonable for just about everything else:
ENV[key] = str(value)
if executor:
target = executor.get_all_targets()
source = executor.get_all_sources()
cmd_list, ignore, silent = self.process(target, list(map(rfile, source)), env, executor)
# Use len() to filter out any "command" that's zero-length.
for cmd_line in filter(len, cmd_list):
# Escape the command line for the interpreter we are using.
cmd_line = escape_list(cmd_line, escape)
result = spawn(shell, escape, cmd_line[0], cmd_line, ENV)
if not ignore and result:
msg = "Error %s" % result
return SCons.Errors.BuildError(errstr=msg,
status=result,
action=self,
command=cmd_line)
return 0 |
def diff_summary(self, old, new, rel_path=None):
"""
Provides a summarized output of a diff between two revisions
(file, change type, file type)
"""
full_url_or_path = self.__url_or_path
if rel_path is not None:
full_url_or_path += '/' + rel_path
result = self.run_command(
'diff',
['--old', '{0}@{1}'.format(full_url_or_path, old),
'--new', '{0}@{1}'.format(full_url_or_path, new),
'--summarize', '--xml'],
do_combine=True)
root = xml.etree.ElementTree.fromstring(result)
diff = []
for element in root.findall('paths/path'):
diff.append({
'path': element.text,
'item': element.attrib['item'],
'kind': element.attrib['kind']})
return diff | Provides a summarized output of a diff between two revisions
(file, change type, file type) | Below is the the instruction that describes the task:
### Input:
Provides a summarized output of a diff between two revisions
(file, change type, file type)
### Response:
def diff_summary(self, old, new, rel_path=None):
"""
Provides a summarized output of a diff between two revisions
(file, change type, file type)
"""
full_url_or_path = self.__url_or_path
if rel_path is not None:
full_url_or_path += '/' + rel_path
result = self.run_command(
'diff',
['--old', '{0}@{1}'.format(full_url_or_path, old),
'--new', '{0}@{1}'.format(full_url_or_path, new),
'--summarize', '--xml'],
do_combine=True)
root = xml.etree.ElementTree.fromstring(result)
diff = []
for element in root.findall('paths/path'):
diff.append({
'path': element.text,
'item': element.attrib['item'],
'kind': element.attrib['kind']})
return diff |
def energy_upperbound(self, spins):
"""A formula for an upper bound on the energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
Returns:
Formula that upper bounds the energy with spins fixed.
"""
subtheta = self.theta.copy()
subtheta.fix_variables(spins)
# ok, let's start eliminating variables
trees = self._trees
if not trees:
# if there are no variables to eliminate, then the offset of
# subtheta is the exact value and we can just return it
assert not subtheta.linear and not subtheta.quadratic
return subtheta.offset
energy = Plus(self.message_upperbound(trees, {}, subtheta), subtheta.offset)
return energy | A formula for an upper bound on the energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
Returns:
Formula that upper bounds the energy with spins fixed. | Below is the the instruction that describes the task:
### Input:
A formula for an upper bound on the energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
Returns:
Formula that upper bounds the energy with spins fixed.
### Response:
def energy_upperbound(self, spins):
"""A formula for an upper bound on the energy of Theta with spins fixed.
Args:
spins (dict): Spin values for a subset of the variables in Theta.
Returns:
Formula that upper bounds the energy with spins fixed.
"""
subtheta = self.theta.copy()
subtheta.fix_variables(spins)
# ok, let's start eliminating variables
trees = self._trees
if not trees:
# if there are no variables to eliminate, then the offset of
# subtheta is the exact value and we can just return it
assert not subtheta.linear and not subtheta.quadratic
return subtheta.offset
energy = Plus(self.message_upperbound(trees, {}, subtheta), subtheta.offset)
return energy |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.