code stringlengths 75 104k | docstring stringlengths 1 46.9k | text stringlengths 164 112k |
|---|---|---|
async def first(self):
"""Fetch the first row and then close the result set unconditionally.
Returns None if no row is present.
"""
if self._metadata is None:
self._non_result()
try:
return (await self.fetchone())
finally:
await self.close() | Fetch the first row and then close the result set unconditionally.
Returns None if no row is present. | Below is the the instruction that describes the task:
### Input:
Fetch the first row and then close the result set unconditionally.
Returns None if no row is present.
### Response:
async def first(self):
"""Fetch the first row and then close the result set unconditionally.
Returns None if no row is present.
"""
if self._metadata is None:
self._non_result()
try:
return (await self.fetchone())
finally:
await self.close() |
def json_to_csv(json_input):
'''
Convert simple JSON to CSV
Accepts a JSON string or JSON object
'''
try:
json_input = json.loads(json_input)
except:
pass # If loads fails, it's probably already parsed
headers = set()
for json_row in json_input:
headers.update(json_row.keys())
csv_io = StringIO.StringIO()
csv_out = csv.DictWriter(csv_io,headers)
csv_out.writeheader()
for json_row in json_input:
csv_out.writerow(json_row)
csv_io.seek(0)
return csv_io.read() | Convert simple JSON to CSV
Accepts a JSON string or JSON object | Below is the the instruction that describes the task:
### Input:
Convert simple JSON to CSV
Accepts a JSON string or JSON object
### Response:
def json_to_csv(json_input):
'''
Convert simple JSON to CSV
Accepts a JSON string or JSON object
'''
try:
json_input = json.loads(json_input)
except:
pass # If loads fails, it's probably already parsed
headers = set()
for json_row in json_input:
headers.update(json_row.keys())
csv_io = StringIO.StringIO()
csv_out = csv.DictWriter(csv_io,headers)
csv_out.writeheader()
for json_row in json_input:
csv_out.writerow(json_row)
csv_io.seek(0)
return csv_io.read() |
def save(self):
"""
Deletes the selected files from storage
"""
storage = get_media_storage()
for storage_name in self.cleaned_data['selected_files']:
full_path = storage.path(storage_name)
try:
storage.delete(storage_name)
self.success_files.append(full_path)
except OSError:
self.error_files.append(full_path) | Deletes the selected files from storage | Below is the the instruction that describes the task:
### Input:
Deletes the selected files from storage
### Response:
def save(self):
"""
Deletes the selected files from storage
"""
storage = get_media_storage()
for storage_name in self.cleaned_data['selected_files']:
full_path = storage.path(storage_name)
try:
storage.delete(storage_name)
self.success_files.append(full_path)
except OSError:
self.error_files.append(full_path) |
def disable_script(zap_helper, script_name):
"""Disable a script."""
with zap_error_handler():
console.debug('Disabling script "{0}"'.format(script_name))
result = zap_helper.zap.script.disable(script_name)
if result != 'OK':
raise ZAPError('Error disabling script: {0}'.format(result))
console.info('Script "{0}" disabled'.format(script_name)) | Disable a script. | Below is the the instruction that describes the task:
### Input:
Disable a script.
### Response:
def disable_script(zap_helper, script_name):
"""Disable a script."""
with zap_error_handler():
console.debug('Disabling script "{0}"'.format(script_name))
result = zap_helper.zap.script.disable(script_name)
if result != 'OK':
raise ZAPError('Error disabling script: {0}'.format(result))
console.info('Script "{0}" disabled'.format(script_name)) |
def turn_on(self):
"""Turn bulb on (full brightness)."""
command = "C {},,,,100,\r\n".format(self._zid)
response = self._hub.send_command(command)
_LOGGER.debug("Turn on %s: %s", repr(command), response)
return response | Turn bulb on (full brightness). | Below is the the instruction that describes the task:
### Input:
Turn bulb on (full brightness).
### Response:
def turn_on(self):
"""Turn bulb on (full brightness)."""
command = "C {},,,,100,\r\n".format(self._zid)
response = self._hub.send_command(command)
_LOGGER.debug("Turn on %s: %s", repr(command), response)
return response |
def ashraeiam(self, aoi):
"""
Determine the incidence angle modifier using
``self.module_parameters['b']``, ``aoi``,
and the :py:func:`ashraeiam` function.
Uses default arguments if keys not in module_parameters.
Parameters
----------
aoi : numeric
The angle of incidence in degrees.
Returns
-------
modifier : numeric
The AOI modifier.
"""
kwargs = _build_kwargs(['b'], self.module_parameters)
return ashraeiam(aoi, **kwargs) | Determine the incidence angle modifier using
``self.module_parameters['b']``, ``aoi``,
and the :py:func:`ashraeiam` function.
Uses default arguments if keys not in module_parameters.
Parameters
----------
aoi : numeric
The angle of incidence in degrees.
Returns
-------
modifier : numeric
The AOI modifier. | Below is the the instruction that describes the task:
### Input:
Determine the incidence angle modifier using
``self.module_parameters['b']``, ``aoi``,
and the :py:func:`ashraeiam` function.
Uses default arguments if keys not in module_parameters.
Parameters
----------
aoi : numeric
The angle of incidence in degrees.
Returns
-------
modifier : numeric
The AOI modifier.
### Response:
def ashraeiam(self, aoi):
"""
Determine the incidence angle modifier using
``self.module_parameters['b']``, ``aoi``,
and the :py:func:`ashraeiam` function.
Uses default arguments if keys not in module_parameters.
Parameters
----------
aoi : numeric
The angle of incidence in degrees.
Returns
-------
modifier : numeric
The AOI modifier.
"""
kwargs = _build_kwargs(['b'], self.module_parameters)
return ashraeiam(aoi, **kwargs) |
def teetext(table, source=None, encoding=None, errors='strict', template=None,
prologue=None, epilogue=None):
"""
Return a table that writes rows to a text file as they are iterated over.
"""
assert template is not None, 'template is required'
return TeeTextView(table, source=source, encoding=encoding, errors=errors,
template=template, prologue=prologue, epilogue=epilogue) | Return a table that writes rows to a text file as they are iterated over. | Below is the the instruction that describes the task:
### Input:
Return a table that writes rows to a text file as they are iterated over.
### Response:
def teetext(table, source=None, encoding=None, errors='strict', template=None,
prologue=None, epilogue=None):
"""
Return a table that writes rows to a text file as they are iterated over.
"""
assert template is not None, 'template is required'
return TeeTextView(table, source=source, encoding=encoding, errors=errors,
template=template, prologue=prologue, epilogue=epilogue) |
def plot_periodbase_lsp(lspinfo, outfile=None, plotdpi=100):
'''Makes a plot of periodograms obtained from `periodbase` functions.
This takes the output dict produced by any `astrobase.periodbase`
period-finder function or a pickle filename containing such a dict and makes
a periodogram plot.
Parameters
----------
lspinfo : dict or str
If lspinfo is a dict, it must be a dict produced by an
`astrobase.periodbase` period-finder function or a dict from your own
period-finder function or routine that is of the form below with at
least these keys::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the `METHODLABELS` dict above,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
If lspinfo is a str, then it must be a path to a pickle file that
contains a dict of the form described above.
outfile : str or None
If this is a str, will write the periodogram plot to the file specified
by this string. If this is None, will write to a file called
'lsp-plot.png' in the current working directory.
plotdpi : int
Sets the resolution in DPI of the output periodogram plot PNG file.
Returns
-------
str
Absolute path to the periodogram plot file created.
'''
# get the lspinfo from a pickle file transparently
if isinstance(lspinfo,str) and os.path.exists(lspinfo):
LOGINFO('loading LSP info from pickle %s' % lspinfo)
with open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
try:
# get the things to plot out of the data
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
lspmethod = lspinfo['method']
# make the LSP plot on the first subplot
plt.plot(periods, lspvals)
plt.xscale('log',basex=10)
plt.xlabel('Period [days]')
plt.ylabel(PLOTYLABELS[lspmethod])
plottitle = '%s best period: %.6f d' % (METHODSHORTLABELS[lspmethod],
bestperiod)
plt.title(plottitle)
# show the best five peaks on the plot
for bestperiod, bestpeak in zip(lspinfo['nbestperiods'],
lspinfo['nbestlspvals']):
plt.annotate('%.6f' % bestperiod,
xy=(bestperiod, bestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='x-small')
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the figure
if outfile and isinstance(outfile, str):
if outfile.endswith('.png'):
plt.savefig(outfile,bbox_inches='tight',dpi=plotdpi)
else:
plt.savefig(outfile,bbox_inches='tight')
plt.close()
return os.path.abspath(outfile)
elif dispok:
plt.show()
plt.close()
return
else:
LOGWARNING('no output file specified and no $DISPLAY set, '
'saving to lsp-plot.png in current directory')
outfile = 'lsp-plot.png'
plt.savefig(outfile,bbox_inches='tight',dpi=plotdpi)
plt.close()
return os.path.abspath(outfile)
except Exception as e:
LOGEXCEPTION('could not plot this LSP, appears to be empty')
return | Makes a plot of periodograms obtained from `periodbase` functions.
This takes the output dict produced by any `astrobase.periodbase`
period-finder function or a pickle filename containing such a dict and makes
a periodogram plot.
Parameters
----------
lspinfo : dict or str
If lspinfo is a dict, it must be a dict produced by an
`astrobase.periodbase` period-finder function or a dict from your own
period-finder function or routine that is of the form below with at
least these keys::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the `METHODLABELS` dict above,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
If lspinfo is a str, then it must be a path to a pickle file that
contains a dict of the form described above.
outfile : str or None
If this is a str, will write the periodogram plot to the file specified
by this string. If this is None, will write to a file called
'lsp-plot.png' in the current working directory.
plotdpi : int
Sets the resolution in DPI of the output periodogram plot PNG file.
Returns
-------
str
Absolute path to the periodogram plot file created. | Below is the the instruction that describes the task:
### Input:
Makes a plot of periodograms obtained from `periodbase` functions.
This takes the output dict produced by any `astrobase.periodbase`
period-finder function or a pickle filename containing such a dict and makes
a periodogram plot.
Parameters
----------
lspinfo : dict or str
If lspinfo is a dict, it must be a dict produced by an
`astrobase.periodbase` period-finder function or a dict from your own
period-finder function or routine that is of the form below with at
least these keys::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the `METHODLABELS` dict above,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
If lspinfo is a str, then it must be a path to a pickle file that
contains a dict of the form described above.
outfile : str or None
If this is a str, will write the periodogram plot to the file specified
by this string. If this is None, will write to a file called
'lsp-plot.png' in the current working directory.
plotdpi : int
Sets the resolution in DPI of the output periodogram plot PNG file.
Returns
-------
str
Absolute path to the periodogram plot file created.
### Response:
def plot_periodbase_lsp(lspinfo, outfile=None, plotdpi=100):
'''Makes a plot of periodograms obtained from `periodbase` functions.
This takes the output dict produced by any `astrobase.periodbase`
period-finder function or a pickle filename containing such a dict and makes
a periodogram plot.
Parameters
----------
lspinfo : dict or str
If lspinfo is a dict, it must be a dict produced by an
`astrobase.periodbase` period-finder function or a dict from your own
period-finder function or routine that is of the form below with at
least these keys::
{'periods': np.array of all periods searched by the period-finder,
'lspvals': np.array of periodogram power value for each period,
'bestperiod': a float value that is the period with the highest
peak in the periodogram, i.e. the most-likely actual
period,
'method': a three-letter code naming the period-finder used; must
be one of the keys in the `METHODLABELS` dict above,
'nbestperiods': a list of the periods corresponding to periodogram
peaks (`nbestlspvals` below) to annotate on the
periodogram plot so they can be called out
visually,
'nbestlspvals': a list of the power values associated with
periodogram peaks to annotate on the periodogram
plot so they can be called out visually; should be
the same length as `nbestperiods` above}
If lspinfo is a str, then it must be a path to a pickle file that
contains a dict of the form described above.
outfile : str or None
If this is a str, will write the periodogram plot to the file specified
by this string. If this is None, will write to a file called
'lsp-plot.png' in the current working directory.
plotdpi : int
Sets the resolution in DPI of the output periodogram plot PNG file.
Returns
-------
str
Absolute path to the periodogram plot file created.
'''
# get the lspinfo from a pickle file transparently
if isinstance(lspinfo,str) and os.path.exists(lspinfo):
LOGINFO('loading LSP info from pickle %s' % lspinfo)
with open(lspinfo,'rb') as infd:
lspinfo = pickle.load(infd)
try:
# get the things to plot out of the data
periods = lspinfo['periods']
lspvals = lspinfo['lspvals']
bestperiod = lspinfo['bestperiod']
lspmethod = lspinfo['method']
# make the LSP plot on the first subplot
plt.plot(periods, lspvals)
plt.xscale('log',basex=10)
plt.xlabel('Period [days]')
plt.ylabel(PLOTYLABELS[lspmethod])
plottitle = '%s best period: %.6f d' % (METHODSHORTLABELS[lspmethod],
bestperiod)
plt.title(plottitle)
# show the best five peaks on the plot
for bestperiod, bestpeak in zip(lspinfo['nbestperiods'],
lspinfo['nbestlspvals']):
plt.annotate('%.6f' % bestperiod,
xy=(bestperiod, bestpeak), xycoords='data',
xytext=(0.0,25.0), textcoords='offset points',
arrowprops=dict(arrowstyle="->"),fontsize='x-small')
# make a grid
plt.grid(color='#a9a9a9',
alpha=0.9,
zorder=0,
linewidth=1.0,
linestyle=':')
# make the figure
if outfile and isinstance(outfile, str):
if outfile.endswith('.png'):
plt.savefig(outfile,bbox_inches='tight',dpi=plotdpi)
else:
plt.savefig(outfile,bbox_inches='tight')
plt.close()
return os.path.abspath(outfile)
elif dispok:
plt.show()
plt.close()
return
else:
LOGWARNING('no output file specified and no $DISPLAY set, '
'saving to lsp-plot.png in current directory')
outfile = 'lsp-plot.png'
plt.savefig(outfile,bbox_inches='tight',dpi=plotdpi)
plt.close()
return os.path.abspath(outfile)
except Exception as e:
LOGEXCEPTION('could not plot this LSP, appears to be empty')
return |
def feed_eof(self):
"""Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
"""
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b''] | Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected. | Below is the the instruction that describes the task:
### Input:
Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
### Response:
def feed_eof(self):
"""Send a potentially "ragged" EOF.
This method will raise an SSL_ERROR_EOF exception if the EOF is
unexpected.
"""
self._incoming.write_eof()
ssldata, appdata = self.feed_ssldata(b'')
assert appdata == [] or appdata == [b''] |
def uniform_crossover(parents):
"""Perform uniform crossover on two parent chromosomes.
Randomly take genes from one parent or the other.
Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy
"""
chromosome_length = len(parents[0])
children = [[], []]
for i in range(chromosome_length):
selected_parent = random.randint(0, 1)
# Take from the selected parent, and add it to child 1
# Take from the other parent, and add it to child 2
children[0].append(parents[selected_parent][i])
children[1].append(parents[1 - selected_parent][i])
return children | Perform uniform crossover on two parent chromosomes.
Randomly take genes from one parent or the other.
Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy | Below is the the instruction that describes the task:
### Input:
Perform uniform crossover on two parent chromosomes.
Randomly take genes from one parent or the other.
Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy
### Response:
def uniform_crossover(parents):
"""Perform uniform crossover on two parent chromosomes.
Randomly take genes from one parent or the other.
Ex. p1 = xxxxx, p2 = yyyyy, child = xyxxy
"""
chromosome_length = len(parents[0])
children = [[], []]
for i in range(chromosome_length):
selected_parent = random.randint(0, 1)
# Take from the selected parent, and add it to child 1
# Take from the other parent, and add it to child 2
children[0].append(parents[selected_parent][i])
children[1].append(parents[1 - selected_parent][i])
return children |
def async_(self, cbl, fun):
"""
return a pair (pipe, process) so that the process writes
`fun(a)` to the pipe for each element `a` in the iterable returned
by the callable `cbl`.
:param cbl: a function returning something iterable
:type cbl: callable
:param fun: an unary translation function
:type fun: callable
:rtype: (:class:`multiprocessing.Pipe`,
:class:`multiprocessing.Process`)
"""
# create two unix pipes to redirect the workers stdout and
# stderr
stdout = os.pipe()
stderr = os.pipe()
# create a multiprocessing pipe for the results
pipe = multiprocessing.Pipe(False)
receiver, sender = pipe
process = FillPipeProcess(cbl(), stdout[1], stderr[1], pipe, fun)
process.start()
self.processes.append(process)
logging.debug('Worker process %s spawned', process.pid)
def threaded_wait():
# wait(2) for the process to die
process.join()
if process.exitcode < 0:
msg = 'received signal {0}'.format(-process.exitcode)
elif process.exitcode > 0:
msg = 'returned error code {0}'.format(process.exitcode)
else:
msg = 'exited successfully'
logging.debug('Worker process %s %s', process.pid, msg)
self.processes.remove(process)
# XXX: it would be much nicer to run this as a coroutine than a thread,
# except that this code is executed before the eventloop is started.
#
# spawn a thread to collect the worker process once it dies
# preventing it from hanging around as zombie
threading.Thread(target=threaded_wait).start()
# TODO: avoid this if logging level > debug
def threaded_reader(prefix, fd):
with os.fdopen(fd) as handle:
for line in handle:
logging.debug('Worker process %s said on %s: %s',
process.pid, prefix, line.rstrip())
# spawn two threads that read from the stdout and stderr pipes
# and write anything that appears there to the log
threading.Thread(target=threaded_reader,
args=('stdout', stdout[0])).start()
os.close(stdout[1])
threading.Thread(target=threaded_reader,
args=('stderr', stderr[0])).start()
os.close(stderr[1])
# closing the sending end in this (receiving) process guarantees
# that here the appropriate EOFError is raised upon .recv in the walker
sender.close()
return receiver, process | return a pair (pipe, process) so that the process writes
`fun(a)` to the pipe for each element `a` in the iterable returned
by the callable `cbl`.
:param cbl: a function returning something iterable
:type cbl: callable
:param fun: an unary translation function
:type fun: callable
:rtype: (:class:`multiprocessing.Pipe`,
:class:`multiprocessing.Process`) | Below is the the instruction that describes the task:
### Input:
return a pair (pipe, process) so that the process writes
`fun(a)` to the pipe for each element `a` in the iterable returned
by the callable `cbl`.
:param cbl: a function returning something iterable
:type cbl: callable
:param fun: an unary translation function
:type fun: callable
:rtype: (:class:`multiprocessing.Pipe`,
:class:`multiprocessing.Process`)
### Response:
def async_(self, cbl, fun):
"""
return a pair (pipe, process) so that the process writes
`fun(a)` to the pipe for each element `a` in the iterable returned
by the callable `cbl`.
:param cbl: a function returning something iterable
:type cbl: callable
:param fun: an unary translation function
:type fun: callable
:rtype: (:class:`multiprocessing.Pipe`,
:class:`multiprocessing.Process`)
"""
# create two unix pipes to redirect the workers stdout and
# stderr
stdout = os.pipe()
stderr = os.pipe()
# create a multiprocessing pipe for the results
pipe = multiprocessing.Pipe(False)
receiver, sender = pipe
process = FillPipeProcess(cbl(), stdout[1], stderr[1], pipe, fun)
process.start()
self.processes.append(process)
logging.debug('Worker process %s spawned', process.pid)
def threaded_wait():
# wait(2) for the process to die
process.join()
if process.exitcode < 0:
msg = 'received signal {0}'.format(-process.exitcode)
elif process.exitcode > 0:
msg = 'returned error code {0}'.format(process.exitcode)
else:
msg = 'exited successfully'
logging.debug('Worker process %s %s', process.pid, msg)
self.processes.remove(process)
# XXX: it would be much nicer to run this as a coroutine than a thread,
# except that this code is executed before the eventloop is started.
#
# spawn a thread to collect the worker process once it dies
# preventing it from hanging around as zombie
threading.Thread(target=threaded_wait).start()
# TODO: avoid this if logging level > debug
def threaded_reader(prefix, fd):
with os.fdopen(fd) as handle:
for line in handle:
logging.debug('Worker process %s said on %s: %s',
process.pid, prefix, line.rstrip())
# spawn two threads that read from the stdout and stderr pipes
# and write anything that appears there to the log
threading.Thread(target=threaded_reader,
args=('stdout', stdout[0])).start()
os.close(stdout[1])
threading.Thread(target=threaded_reader,
args=('stderr', stderr[0])).start()
os.close(stderr[1])
# closing the sending end in this (receiving) process guarantees
# that here the appropriate EOFError is raised upon .recv in the walker
sender.close()
return receiver, process |
def round(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the round function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the round function.
'''
return cls._unary_op(x, tf.round, tf.float32) | Returns a TensorFluent for the round function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the round function. | Below is the the instruction that describes the task:
### Input:
Returns a TensorFluent for the round function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the round function.
### Response:
def round(cls, x: 'TensorFluent') -> 'TensorFluent':
'''Returns a TensorFluent for the round function.
Args:
x: The input fluent.
Returns:
A TensorFluent wrapping the round function.
'''
return cls._unary_op(x, tf.round, tf.float32) |
def get_date_range(self, periodicity=PERIODICITY_WEEKLY):
"""Returns a date range (date from, date to) that suits with the passed
in periodicity.
:param periodicity: string that represents the periodicity
:type periodicity: str
:return: A date range
:rtype: [(DateTime, DateTime)]
"""
today = datetime.date.today()
if periodicity == PERIODICITY_DAILY:
# Daily, load last 30 days
date_from = DateTime() - 30
date_to = DateTime() + 1
return date_from, date_to
if periodicity == PERIODICITY_MONTHLY:
# Monthly, load last 2 years
min_year = today.year - 1 if today.month == 12 else today.year - 2
min_month = 1 if today.month == 12 else today.month
date_from = DateTime(min_year, min_month, 1)
date_to = DateTime(today.year, today.month,
monthrange(today.year, today.month)[1],
23, 59, 59)
return date_from, date_to
if periodicity == PERIODICITY_QUARTERLY:
# Quarterly, load last 4 years
m = (((today.month - 1) / 3) * 3) + 1
min_year = today.year - 4 if today.month == 12 else today.year - 5
date_from = DateTime(min_year, m, 1)
date_to = DateTime(today.year, m + 2,
monthrange(today.year, m + 2)[1], 23, 59,
59)
return date_from, date_to
if periodicity == PERIODICITY_BIANNUAL:
# Biannual, load last 10 years
m = (((today.month - 1) / 6) * 6) + 1
min_year = today.year - 10 if today.month == 12 else today.year - 11
date_from = DateTime(min_year, m, 1)
date_to = DateTime(today.year, m + 5,
monthrange(today.year, m + 5)[1], 23, 59,
59)
return date_from, date_to
if periodicity in [PERIODICITY_YEARLY, PERIODICITY_ALL]:
# Yearly or All time, load last 15 years
min_year = today.year - 15 if today.month == 12 else today.year - 16
date_from = DateTime(min_year, 1, 1)
date_to = DateTime(today.year, 12, 31, 23, 59, 59)
return date_from, date_to
# Default Weekly, load last six months
year, weeknum, dow = today.isocalendar()
min_year = today.year if today.month > 6 else today.year - 1
min_month = today.month - 6 if today.month > 6 \
else (today.month - 6) + 12
date_from = DateTime(min_year, min_month, 1)
date_to = DateTime() - dow + 7
return date_from, date_to | Returns a date range (date from, date to) that suits with the passed
in periodicity.
:param periodicity: string that represents the periodicity
:type periodicity: str
:return: A date range
:rtype: [(DateTime, DateTime)] | Below is the the instruction that describes the task:
### Input:
Returns a date range (date from, date to) that suits with the passed
in periodicity.
:param periodicity: string that represents the periodicity
:type periodicity: str
:return: A date range
:rtype: [(DateTime, DateTime)]
### Response:
def get_date_range(self, periodicity=PERIODICITY_WEEKLY):
"""Returns a date range (date from, date to) that suits with the passed
in periodicity.
:param periodicity: string that represents the periodicity
:type periodicity: str
:return: A date range
:rtype: [(DateTime, DateTime)]
"""
today = datetime.date.today()
if periodicity == PERIODICITY_DAILY:
# Daily, load last 30 days
date_from = DateTime() - 30
date_to = DateTime() + 1
return date_from, date_to
if periodicity == PERIODICITY_MONTHLY:
# Monthly, load last 2 years
min_year = today.year - 1 if today.month == 12 else today.year - 2
min_month = 1 if today.month == 12 else today.month
date_from = DateTime(min_year, min_month, 1)
date_to = DateTime(today.year, today.month,
monthrange(today.year, today.month)[1],
23, 59, 59)
return date_from, date_to
if periodicity == PERIODICITY_QUARTERLY:
# Quarterly, load last 4 years
m = (((today.month - 1) / 3) * 3) + 1
min_year = today.year - 4 if today.month == 12 else today.year - 5
date_from = DateTime(min_year, m, 1)
date_to = DateTime(today.year, m + 2,
monthrange(today.year, m + 2)[1], 23, 59,
59)
return date_from, date_to
if periodicity == PERIODICITY_BIANNUAL:
# Biannual, load last 10 years
m = (((today.month - 1) / 6) * 6) + 1
min_year = today.year - 10 if today.month == 12 else today.year - 11
date_from = DateTime(min_year, m, 1)
date_to = DateTime(today.year, m + 5,
monthrange(today.year, m + 5)[1], 23, 59,
59)
return date_from, date_to
if periodicity in [PERIODICITY_YEARLY, PERIODICITY_ALL]:
# Yearly or All time, load last 15 years
min_year = today.year - 15 if today.month == 12 else today.year - 16
date_from = DateTime(min_year, 1, 1)
date_to = DateTime(today.year, 12, 31, 23, 59, 59)
return date_from, date_to
# Default Weekly, load last six months
year, weeknum, dow = today.isocalendar()
min_year = today.year if today.month > 6 else today.year - 1
min_month = today.month - 6 if today.month > 6 \
else (today.month - 6) + 12
date_from = DateTime(min_year, min_month, 1)
date_to = DateTime() - dow + 7
return date_from, date_to |
def refactor_froms_to_imports(self, offset):
"""Converting imports of the form "from ..." to "import ..."."""
refactor = ImportOrganizer(self.project)
changes = refactor.froms_to_imports(self.resource, offset)
return translate_changes(changes) | Converting imports of the form "from ..." to "import ...". | Below is the the instruction that describes the task:
### Input:
Converting imports of the form "from ..." to "import ...".
### Response:
def refactor_froms_to_imports(self, offset):
"""Converting imports of the form "from ..." to "import ..."."""
refactor = ImportOrganizer(self.project)
changes = refactor.froms_to_imports(self.resource, offset)
return translate_changes(changes) |
def _run(self): # pylint: disable=method-hidden
""" Runnable main method, perform wait on long-running subtasks """
try:
self.event_stop.wait()
except gevent.GreenletExit: # killed without exception
self.event_stop.set()
gevent.killall(self.greenlets) # kill children
raise # re-raise to keep killed status
except Exception:
self.stop() # ensure cleanup and wait on subtasks
raise | Runnable main method, perform wait on long-running subtasks | Below is the the instruction that describes the task:
### Input:
Runnable main method, perform wait on long-running subtasks
### Response:
def _run(self): # pylint: disable=method-hidden
""" Runnable main method, perform wait on long-running subtasks """
try:
self.event_stop.wait()
except gevent.GreenletExit: # killed without exception
self.event_stop.set()
gevent.killall(self.greenlets) # kill children
raise # re-raise to keep killed status
except Exception:
self.stop() # ensure cleanup and wait on subtasks
raise |
def set_basic_auth(self, username, password):
"""Manually set basic auth in the header when normal method does not work."""
credentials = str(b64encode('{}:{}'.format(username, password).encode('utf-8')), 'utf-8')
self.authorization = 'Basic {}'.format(credentials) | Manually set basic auth in the header when normal method does not work. | Below is the the instruction that describes the task:
### Input:
Manually set basic auth in the header when normal method does not work.
### Response:
def set_basic_auth(self, username, password):
"""Manually set basic auth in the header when normal method does not work."""
credentials = str(b64encode('{}:{}'.format(username, password).encode('utf-8')), 'utf-8')
self.authorization = 'Basic {}'.format(credentials) |
def run(self):
""" Run the command with supplied configuration """
device = self.model_config.torch_device()
learner = api.Learner(device, self.model_factory.instantiate(), self.max_grad_norm)
optimizer = self.optimizer_factory.instantiate(learner.model)
# All callbacks used for learning
callbacks = self.gather_callbacks(optimizer)
# Metrics to track through this training
metrics = learner.metrics()
# Check if training was already started and potentially continue where we left off
training_info = self.resume_training(learner, callbacks, metrics)
training_info.on_train_begin()
if training_info.optimizer_initial_state:
optimizer.load_state_dict(training_info.optimizer_initial_state)
for global_epoch_idx in range(training_info.start_epoch_idx + 1, self.epochs + 1):
epoch_info = api.EpochInfo(
training_info=training_info,
global_epoch_idx=global_epoch_idx,
batches_per_epoch=self.source.train_iterations_per_epoch(),
optimizer=optimizer
)
# Execute learning
learner.run_epoch(epoch_info, self.source)
self.storage.checkpoint(epoch_info, learner.model)
training_info.on_train_end()
return training_info | Run the command with supplied configuration | Below is the the instruction that describes the task:
### Input:
Run the command with supplied configuration
### Response:
def run(self):
""" Run the command with supplied configuration """
device = self.model_config.torch_device()
learner = api.Learner(device, self.model_factory.instantiate(), self.max_grad_norm)
optimizer = self.optimizer_factory.instantiate(learner.model)
# All callbacks used for learning
callbacks = self.gather_callbacks(optimizer)
# Metrics to track through this training
metrics = learner.metrics()
# Check if training was already started and potentially continue where we left off
training_info = self.resume_training(learner, callbacks, metrics)
training_info.on_train_begin()
if training_info.optimizer_initial_state:
optimizer.load_state_dict(training_info.optimizer_initial_state)
for global_epoch_idx in range(training_info.start_epoch_idx + 1, self.epochs + 1):
epoch_info = api.EpochInfo(
training_info=training_info,
global_epoch_idx=global_epoch_idx,
batches_per_epoch=self.source.train_iterations_per_epoch(),
optimizer=optimizer
)
# Execute learning
learner.run_epoch(epoch_info, self.source)
self.storage.checkpoint(epoch_info, learner.model)
training_info.on_train_end()
return training_info |
def close(self):
"""
Close (destroy) this USB context, and all related instances.
When this method has been called, methods on its instance will
become mosty no-ops, returning None until explicitly re-opened
(by calling open() or __enter__()).
Note: "exit" is a deprecated alias of "close".
"""
self.__auto_open = False
self.__context_cond.acquire()
try:
while self.__context_refcount and self.__context_p:
self.__context_cond.wait()
self._exit()
finally:
self.__context_cond.notifyAll()
self.__context_cond.release() | Close (destroy) this USB context, and all related instances.
When this method has been called, methods on its instance will
become mosty no-ops, returning None until explicitly re-opened
(by calling open() or __enter__()).
Note: "exit" is a deprecated alias of "close". | Below is the the instruction that describes the task:
### Input:
Close (destroy) this USB context, and all related instances.
When this method has been called, methods on its instance will
become mosty no-ops, returning None until explicitly re-opened
(by calling open() or __enter__()).
Note: "exit" is a deprecated alias of "close".
### Response:
def close(self):
"""
Close (destroy) this USB context, and all related instances.
When this method has been called, methods on its instance will
become mosty no-ops, returning None until explicitly re-opened
(by calling open() or __enter__()).
Note: "exit" is a deprecated alias of "close".
"""
self.__auto_open = False
self.__context_cond.acquire()
try:
while self.__context_refcount and self.__context_p:
self.__context_cond.wait()
self._exit()
finally:
self.__context_cond.notifyAll()
self.__context_cond.release() |
def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return (
self.is_numpy_compatible and np.issubdtype(self.as_numpy_dtype, np.floating)
) or self.base_dtype == bfloat16 | Returns whether this is a (non-quantized, real) floating point type. | Below is the the instruction that describes the task:
### Input:
Returns whether this is a (non-quantized, real) floating point type.
### Response:
def is_floating(self):
"""Returns whether this is a (non-quantized, real) floating point type."""
return (
self.is_numpy_compatible and np.issubdtype(self.as_numpy_dtype, np.floating)
) or self.base_dtype == bfloat16 |
def add_comment(self, body, allow_create=False, allow_hashes=False,
summary=None):
"Implement as required by parent to store comment in CSV file."
if allow_hashes:
raise ValueError('allow_hashes not implemented for %s yet' % (
self.__class__.__name__))
if self.thread_id is None:
self.thread_id = self.lookup_thread_id()
if not os.path.exists(self.thread_id):
if not allow_create:
raise KeyError(self.topic)
with open(self.thread_id, 'a', newline='') as fdesc:
csv.writer(fdesc).writerow(self.header)
with open(self.thread_id, 'a', newline='') as fdesc:
writer = csv.writer(fdesc)
writer.writerow([self.user, datetime.datetime.utcnow(), summary,
body, '']) | Implement as required by parent to store comment in CSV file. | Below is the the instruction that describes the task:
### Input:
Implement as required by parent to store comment in CSV file.
### Response:
def add_comment(self, body, allow_create=False, allow_hashes=False,
summary=None):
"Implement as required by parent to store comment in CSV file."
if allow_hashes:
raise ValueError('allow_hashes not implemented for %s yet' % (
self.__class__.__name__))
if self.thread_id is None:
self.thread_id = self.lookup_thread_id()
if not os.path.exists(self.thread_id):
if not allow_create:
raise KeyError(self.topic)
with open(self.thread_id, 'a', newline='') as fdesc:
csv.writer(fdesc).writerow(self.header)
with open(self.thread_id, 'a', newline='') as fdesc:
writer = csv.writer(fdesc)
writer.writerow([self.user, datetime.datetime.utcnow(), summary,
body, '']) |
def transformer_base_v1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 256
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_schedule = "legacy"
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.1
hparams.shared_embedding_and_softmax_weights = True
hparams.symbol_modality_num_shards = 16
# Add new ones like this.
hparams.add_hparam("filter_size", 2048)
# Layer-related flags. If zero, these fall back on hparams.num_hidden_layers.
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 0)
# Attention-related flags.
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("parameter_attention_key_channels", 0)
hparams.add_hparam("parameter_attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("attention_dropout_broadcast_dims", "")
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("relu_dropout_broadcast_dims", "")
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams.add_hparam("use_pad_remover", True)
hparams.add_hparam("self_attention_type", "dot_product")
hparams.add_hparam("conv_first_kernel", 3)
hparams.add_hparam("attention_variables_3d", False)
hparams.add_hparam("use_target_space_embedding", True)
# These parameters are only used when ffn_layer=="local_moe_tpu"
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
# If specified, use this value instead of problem name in metrics.py.
# This is useful for programs that can automatically compare experiments side
# by side based on the same metric names.
hparams.add_hparam("overload_eval_metric_name", "")
# For making a transformer encoder unidirectional by using masked
# attention.
hparams.add_hparam("unidirectional_encoder", False)
# For hard attention.
hparams.add_hparam("hard_attention_k", 0)
return hparams | Set of hyperparameters. | Below is the the instruction that describes the task:
### Input:
Set of hyperparameters.
### Response:
def transformer_base_v1():
"""Set of hyperparameters."""
hparams = common_hparams.basic_params1()
hparams.norm_type = "layer"
hparams.hidden_size = 512
hparams.batch_size = 4096
hparams.max_length = 256
hparams.clip_grad_norm = 0. # i.e. no gradient clipping
hparams.optimizer_adam_epsilon = 1e-9
hparams.learning_rate_schedule = "legacy"
hparams.learning_rate_decay_scheme = "noam"
hparams.learning_rate = 0.1
hparams.learning_rate_warmup_steps = 4000
hparams.initializer_gain = 1.0
hparams.num_hidden_layers = 6
hparams.initializer = "uniform_unit_scaling"
hparams.weight_decay = 0.0
hparams.optimizer_adam_beta1 = 0.9
hparams.optimizer_adam_beta2 = 0.98
hparams.num_sampled_classes = 0
hparams.label_smoothing = 0.1
hparams.shared_embedding_and_softmax_weights = True
hparams.symbol_modality_num_shards = 16
# Add new ones like this.
hparams.add_hparam("filter_size", 2048)
# Layer-related flags. If zero, these fall back on hparams.num_hidden_layers.
hparams.add_hparam("num_encoder_layers", 0)
hparams.add_hparam("num_decoder_layers", 0)
# Attention-related flags.
hparams.add_hparam("num_heads", 8)
hparams.add_hparam("attention_key_channels", 0)
hparams.add_hparam("attention_value_channels", 0)
hparams.add_hparam("ffn_layer", "dense_relu_dense")
hparams.add_hparam("parameter_attention_key_channels", 0)
hparams.add_hparam("parameter_attention_value_channels", 0)
# All hyperparameters ending in "dropout" are automatically set to 0.0
# when not in training mode.
hparams.add_hparam("attention_dropout", 0.0)
hparams.add_hparam("attention_dropout_broadcast_dims", "")
hparams.add_hparam("relu_dropout", 0.0)
hparams.add_hparam("relu_dropout_broadcast_dims", "")
hparams.add_hparam("pos", "timing") # timing, none
hparams.add_hparam("nbr_decoder_problems", 1)
hparams.add_hparam("proximity_bias", False)
hparams.add_hparam("causal_decoder_self_attention", True)
hparams.add_hparam("use_pad_remover", True)
hparams.add_hparam("self_attention_type", "dot_product")
hparams.add_hparam("conv_first_kernel", 3)
hparams.add_hparam("attention_variables_3d", False)
hparams.add_hparam("use_target_space_embedding", True)
# These parameters are only used when ffn_layer=="local_moe_tpu"
hparams.add_hparam("moe_overhead_train", 1.0)
hparams.add_hparam("moe_overhead_eval", 2.0)
hparams.moe_num_experts = 16
hparams.moe_loss_coef = 1e-3
# If specified, use this value instead of problem name in metrics.py.
# This is useful for programs that can automatically compare experiments side
# by side based on the same metric names.
hparams.add_hparam("overload_eval_metric_name", "")
# For making a transformer encoder unidirectional by using masked
# attention.
hparams.add_hparam("unidirectional_encoder", False)
# For hard attention.
hparams.add_hparam("hard_attention_k", 0)
return hparams |
def deserializeEc(x, compress=True):
"""
Deserialize binary string @x into an EC element.
"""
return _deserialize(x, ec1Element, compress, librelic.ec_read_bin_abi) | Deserialize binary string @x into an EC element. | Below is the the instruction that describes the task:
### Input:
Deserialize binary string @x into an EC element.
### Response:
def deserializeEc(x, compress=True):
"""
Deserialize binary string @x into an EC element.
"""
return _deserialize(x, ec1Element, compress, librelic.ec_read_bin_abi) |
def _init_coord_properties(self):
"""
Generates combinations of named coordinate values, mapping them to the internal array.
For Example: x, xy, xyz, y, yy, zyx, etc
"""
def gen_getter_setter_funs(*args):
indices = [self.coords[coord] for coord in args]
def getter(self):
return tuple(self._array[indices]) if len(args) > 1 else self._array[indices[0]]
def setter(self, value):
setitem(self._array, indices, value)
self.notify_observers()
return getter, setter
for n_repeats in range(1, len(self.coords)+1):
for args in itertools.product(self.coords.keys(), repeat=n_repeats):
getter, setter = gen_getter_setter_funs(*args)
setattr(self.__class__, ''.join(args), property(fget=getter, fset=setter)) | Generates combinations of named coordinate values, mapping them to the internal array.
For Example: x, xy, xyz, y, yy, zyx, etc | Below is the the instruction that describes the task:
### Input:
Generates combinations of named coordinate values, mapping them to the internal array.
For Example: x, xy, xyz, y, yy, zyx, etc
### Response:
def _init_coord_properties(self):
"""
Generates combinations of named coordinate values, mapping them to the internal array.
For Example: x, xy, xyz, y, yy, zyx, etc
"""
def gen_getter_setter_funs(*args):
indices = [self.coords[coord] for coord in args]
def getter(self):
return tuple(self._array[indices]) if len(args) > 1 else self._array[indices[0]]
def setter(self, value):
setitem(self._array, indices, value)
self.notify_observers()
return getter, setter
for n_repeats in range(1, len(self.coords)+1):
for args in itertools.product(self.coords.keys(), repeat=n_repeats):
getter, setter = gen_getter_setter_funs(*args)
setattr(self.__class__, ''.join(args), property(fget=getter, fset=setter)) |
def defgate(self, name, matrix, parameters=None):
"""
Define a new static gate.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param string name: The name of the gate.
:param array-like matrix: List of lists or Numpy 2d array.
:param list parameters: list of parameters that are used in this gate
:return: The Program instance.
:rtype: Program
"""
return self.inst(DefGate(name, matrix, parameters)) | Define a new static gate.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param string name: The name of the gate.
:param array-like matrix: List of lists or Numpy 2d array.
:param list parameters: list of parameters that are used in this gate
:return: The Program instance.
:rtype: Program | Below is the the instruction that describes the task:
### Input:
Define a new static gate.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param string name: The name of the gate.
:param array-like matrix: List of lists or Numpy 2d array.
:param list parameters: list of parameters that are used in this gate
:return: The Program instance.
:rtype: Program
### Response:
def defgate(self, name, matrix, parameters=None):
"""
Define a new static gate.
.. note::
The matrix elements along each axis are ordered by bitstring. For two qubits the order
is ``00, 01, 10, 11``, where the the bits **are ordered in reverse** by the qubit index,
i.e., for qubits 0 and 1 the bitstring ``01`` indicates that qubit 0 is in the state 1.
See also :ref:`the related documentation section in the QVM Overview <basis-ordering>`.
:param string name: The name of the gate.
:param array-like matrix: List of lists or Numpy 2d array.
:param list parameters: list of parameters that are used in this gate
:return: The Program instance.
:rtype: Program
"""
return self.inst(DefGate(name, matrix, parameters)) |
def addSiInfo(self, msrunContainer, specfiles=None,
attributes=['obsMz', 'rt', 'charge']):
"""Transfer attributes to :class:`Sii` elements from the corresponding
:class`Si` in :class:`MsrunContainer.sic <MsrunContainer>`. If an
attribute is not present in the ``Si`` the attribute value in the
``Sii``is set to ``None``.
Attribute examples: 'obsMz', 'rt', 'charge', 'tic', 'iit', 'ms1Id'
:param msrunContainer: an instance of :class:`MsrunContainer` which has
imported the corresponding specfiles
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:param attributes: a list of ``Si`` attributes that should be
transfered.
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "SiiContainer.addSiInfo()": '\
'"%s" is not present in "SiiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
elif specfile not in msrunContainer.info:
warntext = 'Error while calling "SiiContainer.addSiInfo()": '\
'"%s" is not present in "MsrunContainer.info"'\
% (specfile, )
warnings.warn(warntext)
else:
for identifier in self.container[specfile]:
si = msrunContainer.sic[specfile][identifier]
for sii in self.container[specfile][identifier]:
for attribute in attributes:
setattr(sii, attribute,
getattr(si, attribute, None)
) | Transfer attributes to :class:`Sii` elements from the corresponding
:class`Si` in :class:`MsrunContainer.sic <MsrunContainer>`. If an
attribute is not present in the ``Si`` the attribute value in the
``Sii``is set to ``None``.
Attribute examples: 'obsMz', 'rt', 'charge', 'tic', 'iit', 'ms1Id'
:param msrunContainer: an instance of :class:`MsrunContainer` which has
imported the corresponding specfiles
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:param attributes: a list of ``Si`` attributes that should be
transfered. | Below is the the instruction that describes the task:
### Input:
Transfer attributes to :class:`Sii` elements from the corresponding
:class`Si` in :class:`MsrunContainer.sic <MsrunContainer>`. If an
attribute is not present in the ``Si`` the attribute value in the
``Sii``is set to ``None``.
Attribute examples: 'obsMz', 'rt', 'charge', 'tic', 'iit', 'ms1Id'
:param msrunContainer: an instance of :class:`MsrunContainer` which has
imported the corresponding specfiles
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:param attributes: a list of ``Si`` attributes that should be
transfered.
### Response:
def addSiInfo(self, msrunContainer, specfiles=None,
attributes=['obsMz', 'rt', 'charge']):
"""Transfer attributes to :class:`Sii` elements from the corresponding
:class`Si` in :class:`MsrunContainer.sic <MsrunContainer>`. If an
attribute is not present in the ``Si`` the attribute value in the
``Sii``is set to ``None``.
Attribute examples: 'obsMz', 'rt', 'charge', 'tic', 'iit', 'ms1Id'
:param msrunContainer: an instance of :class:`MsrunContainer` which has
imported the corresponding specfiles
:param specfiles: the name of an ms-run file or a list of names. If None
all specfiles are selected.
:param attributes: a list of ``Si`` attributes that should be
transfered.
"""
if specfiles is None:
specfiles = [_ for _ in viewkeys(self.info)]
else:
specfiles = aux.toList(specfiles)
for specfile in specfiles:
if specfile not in self.info:
warntext = 'Error while calling "SiiContainer.addSiInfo()": '\
'"%s" is not present in "SiiContainer.info"!'\
% (specfile, )
warnings.warn(warntext)
elif specfile not in msrunContainer.info:
warntext = 'Error while calling "SiiContainer.addSiInfo()": '\
'"%s" is not present in "MsrunContainer.info"'\
% (specfile, )
warnings.warn(warntext)
else:
for identifier in self.container[specfile]:
si = msrunContainer.sic[specfile][identifier]
for sii in self.container[specfile][identifier]:
for attribute in attributes:
setattr(sii, attribute,
getattr(si, attribute, None)
) |
def fetch(self, ui_version=values.unset):
"""
Fetch a ConfigurationInstance
:param unicode ui_version: Pinned UI version
:returns: Fetched ConfigurationInstance
:rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance
"""
return self._proxy.fetch(ui_version=ui_version, ) | Fetch a ConfigurationInstance
:param unicode ui_version: Pinned UI version
:returns: Fetched ConfigurationInstance
:rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance | Below is the the instruction that describes the task:
### Input:
Fetch a ConfigurationInstance
:param unicode ui_version: Pinned UI version
:returns: Fetched ConfigurationInstance
:rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance
### Response:
def fetch(self, ui_version=values.unset):
"""
Fetch a ConfigurationInstance
:param unicode ui_version: Pinned UI version
:returns: Fetched ConfigurationInstance
:rtype: twilio.rest.flex_api.v1.configuration.ConfigurationInstance
"""
return self._proxy.fetch(ui_version=ui_version, ) |
def __diff_set(self, level):
"""Difference of sets"""
t1_hashtable = self.__create_hashtable(level.t1, level)
t2_hashtable = self.__create_hashtable(level.t2, level)
t1_hashes = set(t1_hashtable.keys())
t2_hashes = set(t2_hashtable.keys())
hashes_added = t2_hashes - t1_hashes
hashes_removed = t1_hashes - t2_hashes
items_added = [t2_hashtable[i].item for i in hashes_added]
items_removed = [t1_hashtable[i].item for i in hashes_removed]
for item in items_added:
change_level = level.branch_deeper(
notpresent, item, child_relationship_class=SetRelationship)
self.__report_result('set_item_added', change_level)
for item in items_removed:
change_level = level.branch_deeper(
item, notpresent, child_relationship_class=SetRelationship)
self.__report_result('set_item_removed', change_level) | Difference of sets | Below is the the instruction that describes the task:
### Input:
Difference of sets
### Response:
def __diff_set(self, level):
"""Difference of sets"""
t1_hashtable = self.__create_hashtable(level.t1, level)
t2_hashtable = self.__create_hashtable(level.t2, level)
t1_hashes = set(t1_hashtable.keys())
t2_hashes = set(t2_hashtable.keys())
hashes_added = t2_hashes - t1_hashes
hashes_removed = t1_hashes - t2_hashes
items_added = [t2_hashtable[i].item for i in hashes_added]
items_removed = [t1_hashtable[i].item for i in hashes_removed]
for item in items_added:
change_level = level.branch_deeper(
notpresent, item, child_relationship_class=SetRelationship)
self.__report_result('set_item_added', change_level)
for item in items_removed:
change_level = level.branch_deeper(
item, notpresent, child_relationship_class=SetRelationship)
self.__report_result('set_item_removed', change_level) |
def join(self):
"""Wait for transfer to exit, raising errors as necessary."""
self.closed = True
while self.expect > 0:
val = self.wait_change.get()
self.expect -= 1
if val is not None:
# Wait a while for all running greenlets to exit, and
# then attempt to force them to exit so join()
# terminates in a reasonable amount of time.
gevent.joinall(list(self.greenlets), timeout=30)
gevent.killall(list(self.greenlets), block=True, timeout=30)
raise val | Wait for transfer to exit, raising errors as necessary. | Below is the the instruction that describes the task:
### Input:
Wait for transfer to exit, raising errors as necessary.
### Response:
def join(self):
"""Wait for transfer to exit, raising errors as necessary."""
self.closed = True
while self.expect > 0:
val = self.wait_change.get()
self.expect -= 1
if val is not None:
# Wait a while for all running greenlets to exit, and
# then attempt to force them to exit so join()
# terminates in a reasonable amount of time.
gevent.joinall(list(self.greenlets), timeout=30)
gevent.killall(list(self.greenlets), block=True, timeout=30)
raise val |
def cena_tau(imt, mag, params):
"""
Returns the inter-event standard deviation, tau, for the CENA case
"""
if imt.name == "PGV":
C = params["PGV"]
else:
C = params["SA"]
if mag > 6.5:
return C["tau3"]
elif (mag > 5.5) and (mag <= 6.5):
return ITPL(mag, C["tau3"], C["tau2"], 5.5, 1.0)
elif (mag > 5.0) and (mag <= 5.5):
return ITPL(mag, C["tau2"], C["tau1"], 5.0, 0.5)
else:
return C["tau1"] | Returns the inter-event standard deviation, tau, for the CENA case | Below is the the instruction that describes the task:
### Input:
Returns the inter-event standard deviation, tau, for the CENA case
### Response:
def cena_tau(imt, mag, params):
"""
Returns the inter-event standard deviation, tau, for the CENA case
"""
if imt.name == "PGV":
C = params["PGV"]
else:
C = params["SA"]
if mag > 6.5:
return C["tau3"]
elif (mag > 5.5) and (mag <= 6.5):
return ITPL(mag, C["tau3"], C["tau2"], 5.5, 1.0)
elif (mag > 5.0) and (mag <= 5.5):
return ITPL(mag, C["tau2"], C["tau1"], 5.0, 0.5)
else:
return C["tau1"] |
def _parse_access_token(self, resp_text):
' parse access token from urlencoded str like access_token=abcxyz&expires_in=123000&other=true '
r = self._qs2dict(resp_text)
access_token = r.pop('access_token')
expires = time.time() + float(r.pop('expires_in'))
return JsonDict(access_token=access_token, expires=expires, **r) | parse access token from urlencoded str like access_token=abcxyz&expires_in=123000&other=true | Below is the the instruction that describes the task:
### Input:
parse access token from urlencoded str like access_token=abcxyz&expires_in=123000&other=true
### Response:
def _parse_access_token(self, resp_text):
' parse access token from urlencoded str like access_token=abcxyz&expires_in=123000&other=true '
r = self._qs2dict(resp_text)
access_token = r.pop('access_token')
expires = time.time() + float(r.pop('expires_in'))
return JsonDict(access_token=access_token, expires=expires, **r) |
def deserialize_transaction(tx_hex):
"""
Given a serialized transaction, return its inputs, outputs,
locktime, and version
Each input will have:
* transaction_hash: string
* output_index: int
* [optional] sequence: int
* [optional] script_sig: string
Each output will have:
* value: int
* script_hex: string
"""
tx = bitcoin.deserialize(str(tx_hex))
inputs = tx["ins"]
outputs = tx["outs"]
ret_inputs = []
ret_outputs = []
for inp in inputs:
ret_inp = {
"transaction_hash": inp["outpoint"]["hash"],
"output_index": int(inp["outpoint"]["index"]),
}
if "sequence" in inp:
ret_inp["sequence"] = int(inp["sequence"])
if "script" in inp:
ret_inp["script_sig"] = inp["script"]
ret_inputs.append(ret_inp)
for out in outputs:
ret_out = {
"value": out["value"],
"script_hex": out["script"]
}
ret_outputs.append(ret_out)
return ret_inputs, ret_outputs, tx["locktime"], tx["version"] | Given a serialized transaction, return its inputs, outputs,
locktime, and version
Each input will have:
* transaction_hash: string
* output_index: int
* [optional] sequence: int
* [optional] script_sig: string
Each output will have:
* value: int
* script_hex: string | Below is the the instruction that describes the task:
### Input:
Given a serialized transaction, return its inputs, outputs,
locktime, and version
Each input will have:
* transaction_hash: string
* output_index: int
* [optional] sequence: int
* [optional] script_sig: string
Each output will have:
* value: int
* script_hex: string
### Response:
def deserialize_transaction(tx_hex):
"""
Given a serialized transaction, return its inputs, outputs,
locktime, and version
Each input will have:
* transaction_hash: string
* output_index: int
* [optional] sequence: int
* [optional] script_sig: string
Each output will have:
* value: int
* script_hex: string
"""
tx = bitcoin.deserialize(str(tx_hex))
inputs = tx["ins"]
outputs = tx["outs"]
ret_inputs = []
ret_outputs = []
for inp in inputs:
ret_inp = {
"transaction_hash": inp["outpoint"]["hash"],
"output_index": int(inp["outpoint"]["index"]),
}
if "sequence" in inp:
ret_inp["sequence"] = int(inp["sequence"])
if "script" in inp:
ret_inp["script_sig"] = inp["script"]
ret_inputs.append(ret_inp)
for out in outputs:
ret_out = {
"value": out["value"],
"script_hex": out["script"]
}
ret_outputs.append(ret_out)
return ret_inputs, ret_outputs, tx["locktime"], tx["version"] |
def load(cls, sc, path):
"""
Load a model from the given path.
"""
jmodel = sc._jvm.org.apache.spark.mllib.feature \
.Word2VecModel.load(sc._jsc.sc(), path)
model = sc._jvm.org.apache.spark.mllib.api.python.Word2VecModelWrapper(jmodel)
return Word2VecModel(model) | Load a model from the given path. | Below is the the instruction that describes the task:
### Input:
Load a model from the given path.
### Response:
def load(cls, sc, path):
"""
Load a model from the given path.
"""
jmodel = sc._jvm.org.apache.spark.mllib.feature \
.Word2VecModel.load(sc._jsc.sc(), path)
model = sc._jvm.org.apache.spark.mllib.api.python.Word2VecModelWrapper(jmodel)
return Word2VecModel(model) |
def path_distance(points):
"""
Compute the path distance from given set of points
"""
vecs = np.diff(points, axis=0)[:, :3]
d2 = [np.dot(p, p) for p in vecs]
return np.sum(np.sqrt(d2)) | Compute the path distance from given set of points | Below is the the instruction that describes the task:
### Input:
Compute the path distance from given set of points
### Response:
def path_distance(points):
"""
Compute the path distance from given set of points
"""
vecs = np.diff(points, axis=0)[:, :3]
d2 = [np.dot(p, p) for p in vecs]
return np.sum(np.sqrt(d2)) |
def load_data(self, pdbid):
"""Loads and parses an XML resource and saves it as a tree if successful"""
f = urlopen("http://projects.biotec.tu-dresden.de/plip-rest/pdb/%s?format=xml" % pdbid.lower())
self.doc = etree.parse(f) | Loads and parses an XML resource and saves it as a tree if successful | Below is the the instruction that describes the task:
### Input:
Loads and parses an XML resource and saves it as a tree if successful
### Response:
def load_data(self, pdbid):
"""Loads and parses an XML resource and saves it as a tree if successful"""
f = urlopen("http://projects.biotec.tu-dresden.de/plip-rest/pdb/%s?format=xml" % pdbid.lower())
self.doc = etree.parse(f) |
def sample_truncated_gaussian_vector(data, uncertainties, bounds=None):
'''
Samples a Gaussian distribution subject to boundaries on the data
:param numpy.ndarray data:
Vector of N data values
:param numpy.ndarray uncertainties:
Vector of N data uncertainties
:param int number_bootstraps:
Number of bootstrap samples
:param tuple bounds:
(Lower, Upper) bound of data space
'''
nvals = len(data)
if bounds:
# if bounds[0] or (fabs(bounds[0]) < PRECISION):
if bounds[0] is not None:
lower_bound = (bounds[0] - data) / uncertainties
else:
lower_bound = -np.inf * np.ones_like(data)
# if bounds[1] or (fabs(bounds[1]) < PRECISION):
if bounds[1] is not None:
upper_bound = (bounds[1] - data) / uncertainties
else:
upper_bound = np.inf * np.ones_like(data)
sample = hmtk_truncnorm.rvs(lower_bound, upper_bound, size=nvals)
else:
sample = np.random.normal(0., 1., nvals)
return data + uncertainties * sample | Samples a Gaussian distribution subject to boundaries on the data
:param numpy.ndarray data:
Vector of N data values
:param numpy.ndarray uncertainties:
Vector of N data uncertainties
:param int number_bootstraps:
Number of bootstrap samples
:param tuple bounds:
(Lower, Upper) bound of data space | Below is the the instruction that describes the task:
### Input:
Samples a Gaussian distribution subject to boundaries on the data
:param numpy.ndarray data:
Vector of N data values
:param numpy.ndarray uncertainties:
Vector of N data uncertainties
:param int number_bootstraps:
Number of bootstrap samples
:param tuple bounds:
(Lower, Upper) bound of data space
### Response:
def sample_truncated_gaussian_vector(data, uncertainties, bounds=None):
'''
Samples a Gaussian distribution subject to boundaries on the data
:param numpy.ndarray data:
Vector of N data values
:param numpy.ndarray uncertainties:
Vector of N data uncertainties
:param int number_bootstraps:
Number of bootstrap samples
:param tuple bounds:
(Lower, Upper) bound of data space
'''
nvals = len(data)
if bounds:
# if bounds[0] or (fabs(bounds[0]) < PRECISION):
if bounds[0] is not None:
lower_bound = (bounds[0] - data) / uncertainties
else:
lower_bound = -np.inf * np.ones_like(data)
# if bounds[1] or (fabs(bounds[1]) < PRECISION):
if bounds[1] is not None:
upper_bound = (bounds[1] - data) / uncertainties
else:
upper_bound = np.inf * np.ones_like(data)
sample = hmtk_truncnorm.rvs(lower_bound, upper_bound, size=nvals)
else:
sample = np.random.normal(0., 1., nvals)
return data + uncertainties * sample |
def get_effective_member_count(self, group_id):
"""
Returns a count of effective members for the group identified by the
passed group ID.
"""
self._valid_group_id(group_id)
url = "{}/group/{}/effective_member?view=count".format(self.API,
group_id)
data = self._get_resource(url)
count = data.get("data").get("count")
return int(count) | Returns a count of effective members for the group identified by the
passed group ID. | Below is the the instruction that describes the task:
### Input:
Returns a count of effective members for the group identified by the
passed group ID.
### Response:
def get_effective_member_count(self, group_id):
"""
Returns a count of effective members for the group identified by the
passed group ID.
"""
self._valid_group_id(group_id)
url = "{}/group/{}/effective_member?view=count".format(self.API,
group_id)
data = self._get_resource(url)
count = data.get("data").get("count")
return int(count) |
def save(self, data):
"""Save the config data
Args:
data: any serializable config data
Raises:
ConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name,
or the data is not serializable or the loader is nested
"""
if self.__nested:
raise ConfigLoaderException("Cannot save the config if the 'nested' paramter is True!")
if self.__loaded_config_file is None:
raise ConfigLoaderException("Load not called yet!")
try:
with open(self.__loaded_config_file, 'w') as f:
f.write(self.__formatter.encode(data))
except Exception as e:
raise ConfigLoaderException("Config data is not serializable: %s" % e) | Save the config data
Args:
data: any serializable config data
Raises:
ConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name,
or the data is not serializable or the loader is nested | Below is the the instruction that describes the task:
### Input:
Save the config data
Args:
data: any serializable config data
Raises:
ConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name,
or the data is not serializable or the loader is nested
### Response:
def save(self, data):
"""Save the config data
Args:
data: any serializable config data
Raises:
ConfigLoaderException: if the ConfigLoader.load not called, so there is no config file name,
or the data is not serializable or the loader is nested
"""
if self.__nested:
raise ConfigLoaderException("Cannot save the config if the 'nested' paramter is True!")
if self.__loaded_config_file is None:
raise ConfigLoaderException("Load not called yet!")
try:
with open(self.__loaded_config_file, 'w') as f:
f.write(self.__formatter.encode(data))
except Exception as e:
raise ConfigLoaderException("Config data is not serializable: %s" % e) |
def as_view(url=None, methods=None, view_class=ActionsView, name=None, url_rules=None, **kwargs):
"""Decorator to transform a function into a view class. Be warned that this will replace
the function with the view class.
"""
def decorator(f):
if url is not None:
f = expose(url, methods=methods)(f)
clsdict = {"name": name or f.__name__,
"actions": getattr(f, "actions", None),
"url_rules": url_rules or getattr(f, "urls", None)}
if isinstance(f, WithActionsDecorator):
f = f.func
clsdict['func'] = f
def constructor(self, **ctorkwargs):
for k, v in kwargs.items():
if k not in ctorkwargs or ctorkwargs[k] is None:
ctorkwargs[k] = v
view_class.__init__(self, func=f, **ctorkwargs)
clsdict["__init__"] = constructor
return type(f.__name__, (view_class,), clsdict)
return decorator | Decorator to transform a function into a view class. Be warned that this will replace
the function with the view class. | Below is the the instruction that describes the task:
### Input:
Decorator to transform a function into a view class. Be warned that this will replace
the function with the view class.
### Response:
def as_view(url=None, methods=None, view_class=ActionsView, name=None, url_rules=None, **kwargs):
"""Decorator to transform a function into a view class. Be warned that this will replace
the function with the view class.
"""
def decorator(f):
if url is not None:
f = expose(url, methods=methods)(f)
clsdict = {"name": name or f.__name__,
"actions": getattr(f, "actions", None),
"url_rules": url_rules or getattr(f, "urls", None)}
if isinstance(f, WithActionsDecorator):
f = f.func
clsdict['func'] = f
def constructor(self, **ctorkwargs):
for k, v in kwargs.items():
if k not in ctorkwargs or ctorkwargs[k] is None:
ctorkwargs[k] = v
view_class.__init__(self, func=f, **ctorkwargs)
clsdict["__init__"] = constructor
return type(f.__name__, (view_class,), clsdict)
return decorator |
def group_by(self, *args):
"""
This method lets you specify the grouping fields explicitly. The `args` must
be names of grouping fields or calculated fields that this queryset was
created with.
"""
for name in args:
assert name in self._fields or name in self._calculated_fields, \
'Cannot group by `%s` since it is not included in the query' % name
qs = copy(self)
qs._grouping_fields = args
return qs | This method lets you specify the grouping fields explicitly. The `args` must
be names of grouping fields or calculated fields that this queryset was
created with. | Below is the the instruction that describes the task:
### Input:
This method lets you specify the grouping fields explicitly. The `args` must
be names of grouping fields or calculated fields that this queryset was
created with.
### Response:
def group_by(self, *args):
"""
This method lets you specify the grouping fields explicitly. The `args` must
be names of grouping fields or calculated fields that this queryset was
created with.
"""
for name in args:
assert name in self._fields or name in self._calculated_fields, \
'Cannot group by `%s` since it is not included in the query' % name
qs = copy(self)
qs._grouping_fields = args
return qs |
def _get_team_abbreviation(self, team):
"""
Retrieve team's abbreviation.
The team's abbreviation is embedded within the 'school_name' tag and
requires special parsing as it is located in the middle of a URI. The
abbreviation is returned for the requested school.
Parameters
----------
team : PyQuery object
A PyQuery object representing a single row in a table on the
conference page.
Returns
-------
string
Returns a string of the team's abbreviation, such as 'PURDUE'.
"""
name_tag = team('th[data-stat="school_name"] a')
team_abbreviation = re.sub(r'.*/cfb/schools/', '', str(name_tag))
team_abbreviation = re.sub(r'/.*', '', team_abbreviation)
return team_abbreviation | Retrieve team's abbreviation.
The team's abbreviation is embedded within the 'school_name' tag and
requires special parsing as it is located in the middle of a URI. The
abbreviation is returned for the requested school.
Parameters
----------
team : PyQuery object
A PyQuery object representing a single row in a table on the
conference page.
Returns
-------
string
Returns a string of the team's abbreviation, such as 'PURDUE'. | Below is the the instruction that describes the task:
### Input:
Retrieve team's abbreviation.
The team's abbreviation is embedded within the 'school_name' tag and
requires special parsing as it is located in the middle of a URI. The
abbreviation is returned for the requested school.
Parameters
----------
team : PyQuery object
A PyQuery object representing a single row in a table on the
conference page.
Returns
-------
string
Returns a string of the team's abbreviation, such as 'PURDUE'.
### Response:
def _get_team_abbreviation(self, team):
"""
Retrieve team's abbreviation.
The team's abbreviation is embedded within the 'school_name' tag and
requires special parsing as it is located in the middle of a URI. The
abbreviation is returned for the requested school.
Parameters
----------
team : PyQuery object
A PyQuery object representing a single row in a table on the
conference page.
Returns
-------
string
Returns a string of the team's abbreviation, such as 'PURDUE'.
"""
name_tag = team('th[data-stat="school_name"] a')
team_abbreviation = re.sub(r'.*/cfb/schools/', '', str(name_tag))
team_abbreviation = re.sub(r'/.*', '', team_abbreviation)
return team_abbreviation |
def to_xml(self):
'''
Returns a DOM representation of the deliveries method
@returns: Element
'''
for n, v in { "method": self.method, "status": self.status,
"date":self.date}.items():
if is_empty_or_none(v):
raise DeliveryMethodError("'%s' attribute cannot be " \
"empty or None." % n)
doc = Document()
root = doc.createElement("delivery")
super(DeliveryMethod, self).to_xml(root)
self._create_text_node(root, "method", self.method)
self._create_text_node(root, "status", self.status)
self._create_text_node(root, "reference", self.ref, True)
self._create_text_node(root, "date", self.date)
return root | Returns a DOM representation of the deliveries method
@returns: Element | Below is the the instruction that describes the task:
### Input:
Returns a DOM representation of the deliveries method
@returns: Element
### Response:
def to_xml(self):
'''
Returns a DOM representation of the deliveries method
@returns: Element
'''
for n, v in { "method": self.method, "status": self.status,
"date":self.date}.items():
if is_empty_or_none(v):
raise DeliveryMethodError("'%s' attribute cannot be " \
"empty or None." % n)
doc = Document()
root = doc.createElement("delivery")
super(DeliveryMethod, self).to_xml(root)
self._create_text_node(root, "method", self.method)
self._create_text_node(root, "status", self.status)
self._create_text_node(root, "reference", self.ref, True)
self._create_text_node(root, "date", self.date)
return root |
def matrix_representation(op):
"""Return a matrix representation of a linear operator.
Parameters
----------
op : `Operator`
The linear operator of which one wants a matrix representation.
If the domain or range is a `ProductSpace`, it must be a power-space.
Returns
-------
matrix : `numpy.ndarray`
The matrix representation of the operator.
The shape will be ``op.domain.shape + op.range.shape`` and the dtype
is the promoted (greatest) dtype of the domain and range.
Examples
--------
Approximate a matrix on its own:
>>> mat = np.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> op = odl.MatrixOperator(mat)
>>> matrix_representation(op)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
It also works with `ProductSpace`'s and higher dimensional `TensorSpace`'s.
In this case, the returned "matrix" will also be higher dimensional:
>>> space = odl.uniform_discr([0, 0], [2, 2], (2, 2))
>>> grad = odl.Gradient(space)
>>> tensor = odl.matrix_representation(grad)
>>> tensor.shape == (2, 2, 2, 2, 2)
True
Since the "matrix" is now higher dimensional, we need to use e.g.
`numpy.tensordot` if we want to compute with the matrix representation:
>>> x = space.element(lambda x: x[0] ** 2 + 2 * x[1] ** 2)
>>> grad(x)
ProductSpace(uniform_discr([ 0., 0.], [ 2., 2.], (2, 2)), 2).element([
<BLANKLINE>
[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]
])
>>> np.tensordot(tensor, x, axes=grad.domain.ndim)
array([[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]])
Notes
----------
The algorithm works by letting the operator act on all unit vectors, and
stacking the output as a matrix.
"""
if not op.is_linear:
raise ValueError('the operator is not linear')
if not (isinstance(op.domain, TensorSpace) or
(isinstance(op.domain, ProductSpace) and
op.domain.is_power_space and
all(isinstance(spc, TensorSpace) for spc in op.domain))):
raise TypeError('operator domain {!r} is neither `TensorSpace` '
'nor `ProductSpace` with only equal `TensorSpace` '
'components'.format(op.domain))
if not (isinstance(op.range, TensorSpace) or
(isinstance(op.range, ProductSpace) and
op.range.is_power_space and
all(isinstance(spc, TensorSpace) for spc in op.range))):
raise TypeError('operator range {!r} is neither `TensorSpace` '
'nor `ProductSpace` with only equal `TensorSpace` '
'components'.format(op.range))
# Generate the matrix
dtype = np.promote_types(op.domain.dtype, op.range.dtype)
matrix = np.zeros(op.range.shape + op.domain.shape, dtype=dtype)
tmp_ran = op.range.element() # Store for reuse in loop
tmp_dom = op.domain.zero() # Store for reuse in loop
for j in nd_iterator(op.domain.shape):
tmp_dom[j] = 1.0
op(tmp_dom, out=tmp_ran)
matrix[(Ellipsis,) + j] = tmp_ran.asarray()
tmp_dom[j] = 0.0
return matrix | Return a matrix representation of a linear operator.
Parameters
----------
op : `Operator`
The linear operator of which one wants a matrix representation.
If the domain or range is a `ProductSpace`, it must be a power-space.
Returns
-------
matrix : `numpy.ndarray`
The matrix representation of the operator.
The shape will be ``op.domain.shape + op.range.shape`` and the dtype
is the promoted (greatest) dtype of the domain and range.
Examples
--------
Approximate a matrix on its own:
>>> mat = np.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> op = odl.MatrixOperator(mat)
>>> matrix_representation(op)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
It also works with `ProductSpace`'s and higher dimensional `TensorSpace`'s.
In this case, the returned "matrix" will also be higher dimensional:
>>> space = odl.uniform_discr([0, 0], [2, 2], (2, 2))
>>> grad = odl.Gradient(space)
>>> tensor = odl.matrix_representation(grad)
>>> tensor.shape == (2, 2, 2, 2, 2)
True
Since the "matrix" is now higher dimensional, we need to use e.g.
`numpy.tensordot` if we want to compute with the matrix representation:
>>> x = space.element(lambda x: x[0] ** 2 + 2 * x[1] ** 2)
>>> grad(x)
ProductSpace(uniform_discr([ 0., 0.], [ 2., 2.], (2, 2)), 2).element([
<BLANKLINE>
[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]
])
>>> np.tensordot(tensor, x, axes=grad.domain.ndim)
array([[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]])
Notes
----------
The algorithm works by letting the operator act on all unit vectors, and
stacking the output as a matrix. | Below is the the instruction that describes the task:
### Input:
Return a matrix representation of a linear operator.
Parameters
----------
op : `Operator`
The linear operator of which one wants a matrix representation.
If the domain or range is a `ProductSpace`, it must be a power-space.
Returns
-------
matrix : `numpy.ndarray`
The matrix representation of the operator.
The shape will be ``op.domain.shape + op.range.shape`` and the dtype
is the promoted (greatest) dtype of the domain and range.
Examples
--------
Approximate a matrix on its own:
>>> mat = np.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> op = odl.MatrixOperator(mat)
>>> matrix_representation(op)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
It also works with `ProductSpace`'s and higher dimensional `TensorSpace`'s.
In this case, the returned "matrix" will also be higher dimensional:
>>> space = odl.uniform_discr([0, 0], [2, 2], (2, 2))
>>> grad = odl.Gradient(space)
>>> tensor = odl.matrix_representation(grad)
>>> tensor.shape == (2, 2, 2, 2, 2)
True
Since the "matrix" is now higher dimensional, we need to use e.g.
`numpy.tensordot` if we want to compute with the matrix representation:
>>> x = space.element(lambda x: x[0] ** 2 + 2 * x[1] ** 2)
>>> grad(x)
ProductSpace(uniform_discr([ 0., 0.], [ 2., 2.], (2, 2)), 2).element([
<BLANKLINE>
[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]
])
>>> np.tensordot(tensor, x, axes=grad.domain.ndim)
array([[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]])
Notes
----------
The algorithm works by letting the operator act on all unit vectors, and
stacking the output as a matrix.
### Response:
def matrix_representation(op):
"""Return a matrix representation of a linear operator.
Parameters
----------
op : `Operator`
The linear operator of which one wants a matrix representation.
If the domain or range is a `ProductSpace`, it must be a power-space.
Returns
-------
matrix : `numpy.ndarray`
The matrix representation of the operator.
The shape will be ``op.domain.shape + op.range.shape`` and the dtype
is the promoted (greatest) dtype of the domain and range.
Examples
--------
Approximate a matrix on its own:
>>> mat = np.array([[1, 2, 3],
... [4, 5, 6],
... [7, 8, 9]])
>>> op = odl.MatrixOperator(mat)
>>> matrix_representation(op)
array([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
It also works with `ProductSpace`'s and higher dimensional `TensorSpace`'s.
In this case, the returned "matrix" will also be higher dimensional:
>>> space = odl.uniform_discr([0, 0], [2, 2], (2, 2))
>>> grad = odl.Gradient(space)
>>> tensor = odl.matrix_representation(grad)
>>> tensor.shape == (2, 2, 2, 2, 2)
True
Since the "matrix" is now higher dimensional, we need to use e.g.
`numpy.tensordot` if we want to compute with the matrix representation:
>>> x = space.element(lambda x: x[0] ** 2 + 2 * x[1] ** 2)
>>> grad(x)
ProductSpace(uniform_discr([ 0., 0.], [ 2., 2.], (2, 2)), 2).element([
<BLANKLINE>
[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]
])
>>> np.tensordot(tensor, x, axes=grad.domain.ndim)
array([[[ 2. , 2. ],
[-2.75, -6.75]],
<BLANKLINE>
[[ 4. , -4.75],
[ 4. , -6.75]]])
Notes
----------
The algorithm works by letting the operator act on all unit vectors, and
stacking the output as a matrix.
"""
if not op.is_linear:
raise ValueError('the operator is not linear')
if not (isinstance(op.domain, TensorSpace) or
(isinstance(op.domain, ProductSpace) and
op.domain.is_power_space and
all(isinstance(spc, TensorSpace) for spc in op.domain))):
raise TypeError('operator domain {!r} is neither `TensorSpace` '
'nor `ProductSpace` with only equal `TensorSpace` '
'components'.format(op.domain))
if not (isinstance(op.range, TensorSpace) or
(isinstance(op.range, ProductSpace) and
op.range.is_power_space and
all(isinstance(spc, TensorSpace) for spc in op.range))):
raise TypeError('operator range {!r} is neither `TensorSpace` '
'nor `ProductSpace` with only equal `TensorSpace` '
'components'.format(op.range))
# Generate the matrix
dtype = np.promote_types(op.domain.dtype, op.range.dtype)
matrix = np.zeros(op.range.shape + op.domain.shape, dtype=dtype)
tmp_ran = op.range.element() # Store for reuse in loop
tmp_dom = op.domain.zero() # Store for reuse in loop
for j in nd_iterator(op.domain.shape):
tmp_dom[j] = 1.0
op(tmp_dom, out=tmp_ran)
matrix[(Ellipsis,) + j] = tmp_ran.asarray()
tmp_dom[j] = 0.0
return matrix |
def toposort_flatten(data, sort=True):
"""Returns a single list of dependencies. For any set returned by
toposort(), those items are sorted and appended to the result (just to
make the results deterministic)."""
result = []
for d in toposort(data):
try:
result.extend((sorted if sort else list)(d))
except TypeError as e:
result.extend(list(d))
return result | Returns a single list of dependencies. For any set returned by
toposort(), those items are sorted and appended to the result (just to
make the results deterministic). | Below is the the instruction that describes the task:
### Input:
Returns a single list of dependencies. For any set returned by
toposort(), those items are sorted and appended to the result (just to
make the results deterministic).
### Response:
def toposort_flatten(data, sort=True):
"""Returns a single list of dependencies. For any set returned by
toposort(), those items are sorted and appended to the result (just to
make the results deterministic)."""
result = []
for d in toposort(data):
try:
result.extend((sorted if sort else list)(d))
except TypeError as e:
result.extend(list(d))
return result |
def perceptual_weighting(S, frequencies, **kwargs):
'''Perceptual weighting of a power spectrogram:
`S_p[f] = A_weighting(f) + 10*log(S[f] / ref)`
Parameters
----------
S : np.ndarray [shape=(d, t)]
Power spectrogram
frequencies : np.ndarray [shape=(d,)]
Center frequency for each row of `S`
kwargs : additional keyword arguments
Additional keyword arguments to `power_to_db`.
Returns
-------
S_p : np.ndarray [shape=(d, t)]
perceptually weighted version of `S`
See Also
--------
power_to_db
Notes
-----
This function caches at level 30.
Examples
--------
Re-weight a CQT power spectrum, using peak power as reference
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('A1')))
>>> freqs = librosa.cqt_frequencies(C.shape[0],
... fmin=librosa.note_to_hz('A1'))
>>> perceptual_CQT = librosa.perceptual_weighting(C**2,
... freqs,
... ref=np.max)
>>> perceptual_CQT
array([[ -80.076, -80.049, ..., -104.735, -104.735],
[ -78.344, -78.555, ..., -103.725, -103.725],
...,
[ -76.272, -76.272, ..., -76.272, -76.272],
[ -76.485, -76.485, ..., -76.485, -76.485]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... fmin=librosa.note_to_hz('A1'),
... y_axis='cqt_hz')
>>> plt.title('Log CQT power')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(perceptual_CQT, y_axis='cqt_hz',
... fmin=librosa.note_to_hz('A1'),
... x_axis='time')
>>> plt.title('Perceptually weighted log CQT')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
'''
offset = time_frequency.A_weighting(frequencies).reshape((-1, 1))
return offset + power_to_db(S, **kwargs) | Perceptual weighting of a power spectrogram:
`S_p[f] = A_weighting(f) + 10*log(S[f] / ref)`
Parameters
----------
S : np.ndarray [shape=(d, t)]
Power spectrogram
frequencies : np.ndarray [shape=(d,)]
Center frequency for each row of `S`
kwargs : additional keyword arguments
Additional keyword arguments to `power_to_db`.
Returns
-------
S_p : np.ndarray [shape=(d, t)]
perceptually weighted version of `S`
See Also
--------
power_to_db
Notes
-----
This function caches at level 30.
Examples
--------
Re-weight a CQT power spectrum, using peak power as reference
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('A1')))
>>> freqs = librosa.cqt_frequencies(C.shape[0],
... fmin=librosa.note_to_hz('A1'))
>>> perceptual_CQT = librosa.perceptual_weighting(C**2,
... freqs,
... ref=np.max)
>>> perceptual_CQT
array([[ -80.076, -80.049, ..., -104.735, -104.735],
[ -78.344, -78.555, ..., -103.725, -103.725],
...,
[ -76.272, -76.272, ..., -76.272, -76.272],
[ -76.485, -76.485, ..., -76.485, -76.485]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... fmin=librosa.note_to_hz('A1'),
... y_axis='cqt_hz')
>>> plt.title('Log CQT power')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(perceptual_CQT, y_axis='cqt_hz',
... fmin=librosa.note_to_hz('A1'),
... x_axis='time')
>>> plt.title('Perceptually weighted log CQT')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout() | Below is the the instruction that describes the task:
### Input:
Perceptual weighting of a power spectrogram:
`S_p[f] = A_weighting(f) + 10*log(S[f] / ref)`
Parameters
----------
S : np.ndarray [shape=(d, t)]
Power spectrogram
frequencies : np.ndarray [shape=(d,)]
Center frequency for each row of `S`
kwargs : additional keyword arguments
Additional keyword arguments to `power_to_db`.
Returns
-------
S_p : np.ndarray [shape=(d, t)]
perceptually weighted version of `S`
See Also
--------
power_to_db
Notes
-----
This function caches at level 30.
Examples
--------
Re-weight a CQT power spectrum, using peak power as reference
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('A1')))
>>> freqs = librosa.cqt_frequencies(C.shape[0],
... fmin=librosa.note_to_hz('A1'))
>>> perceptual_CQT = librosa.perceptual_weighting(C**2,
... freqs,
... ref=np.max)
>>> perceptual_CQT
array([[ -80.076, -80.049, ..., -104.735, -104.735],
[ -78.344, -78.555, ..., -103.725, -103.725],
...,
[ -76.272, -76.272, ..., -76.272, -76.272],
[ -76.485, -76.485, ..., -76.485, -76.485]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... fmin=librosa.note_to_hz('A1'),
... y_axis='cqt_hz')
>>> plt.title('Log CQT power')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(perceptual_CQT, y_axis='cqt_hz',
... fmin=librosa.note_to_hz('A1'),
... x_axis='time')
>>> plt.title('Perceptually weighted log CQT')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
### Response:
def perceptual_weighting(S, frequencies, **kwargs):
'''Perceptual weighting of a power spectrogram:
`S_p[f] = A_weighting(f) + 10*log(S[f] / ref)`
Parameters
----------
S : np.ndarray [shape=(d, t)]
Power spectrogram
frequencies : np.ndarray [shape=(d,)]
Center frequency for each row of `S`
kwargs : additional keyword arguments
Additional keyword arguments to `power_to_db`.
Returns
-------
S_p : np.ndarray [shape=(d, t)]
perceptually weighted version of `S`
See Also
--------
power_to_db
Notes
-----
This function caches at level 30.
Examples
--------
Re-weight a CQT power spectrum, using peak power as reference
>>> y, sr = librosa.load(librosa.util.example_audio_file())
>>> C = np.abs(librosa.cqt(y, sr=sr, fmin=librosa.note_to_hz('A1')))
>>> freqs = librosa.cqt_frequencies(C.shape[0],
... fmin=librosa.note_to_hz('A1'))
>>> perceptual_CQT = librosa.perceptual_weighting(C**2,
... freqs,
... ref=np.max)
>>> perceptual_CQT
array([[ -80.076, -80.049, ..., -104.735, -104.735],
[ -78.344, -78.555, ..., -103.725, -103.725],
...,
[ -76.272, -76.272, ..., -76.272, -76.272],
[ -76.485, -76.485, ..., -76.485, -76.485]])
>>> import matplotlib.pyplot as plt
>>> plt.figure()
>>> plt.subplot(2, 1, 1)
>>> librosa.display.specshow(librosa.amplitude_to_db(C,
... ref=np.max),
... fmin=librosa.note_to_hz('A1'),
... y_axis='cqt_hz')
>>> plt.title('Log CQT power')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.subplot(2, 1, 2)
>>> librosa.display.specshow(perceptual_CQT, y_axis='cqt_hz',
... fmin=librosa.note_to_hz('A1'),
... x_axis='time')
>>> plt.title('Perceptually weighted log CQT')
>>> plt.colorbar(format='%+2.0f dB')
>>> plt.tight_layout()
'''
offset = time_frequency.A_weighting(frequencies).reshape((-1, 1))
return offset + power_to_db(S, **kwargs) |
def replace_blocks(self, blocks):
"""Replace multiple blocks. blocks must be a list of tuples where
each tuple consists of (namespace, offset, key, data)"""
start = 0
bulk_insert = self.bulk_insert
blocks_len = len(blocks)
row = '(%s,%s,%s,%s,%s)'
query = 'REPLACE INTO gauged_data (namespace, offset, `key`, ' \
'data, flags) VALUES '
execute = self.cursor.execute
to_buffer = self.to_buffer
while start < blocks_len:
rows = blocks[start:start+bulk_insert]
params = []
for namespace, offset, key, data, flags in rows:
params.extend((namespace, offset, key, to_buffer(data), flags))
insert = (row + ',') * (len(rows) - 1) + row
execute(query + insert, params)
start += bulk_insert | Replace multiple blocks. blocks must be a list of tuples where
each tuple consists of (namespace, offset, key, data) | Below is the the instruction that describes the task:
### Input:
Replace multiple blocks. blocks must be a list of tuples where
each tuple consists of (namespace, offset, key, data)
### Response:
def replace_blocks(self, blocks):
"""Replace multiple blocks. blocks must be a list of tuples where
each tuple consists of (namespace, offset, key, data)"""
start = 0
bulk_insert = self.bulk_insert
blocks_len = len(blocks)
row = '(%s,%s,%s,%s,%s)'
query = 'REPLACE INTO gauged_data (namespace, offset, `key`, ' \
'data, flags) VALUES '
execute = self.cursor.execute
to_buffer = self.to_buffer
while start < blocks_len:
rows = blocks[start:start+bulk_insert]
params = []
for namespace, offset, key, data, flags in rows:
params.extend((namespace, offset, key, to_buffer(data), flags))
insert = (row + ',') * (len(rows) - 1) + row
execute(query + insert, params)
start += bulk_insert |
def get_appliance_stats_by_location(self, location_id, start, end, granularity=None, per_page=None, page=None,
min_power=None):
"""Get appliance usage data for a given location within a given time range.
Stats are generated by fetching appliance events that match the supplied
criteria and then aggregating them together based on the granularity
specified with the request.
Note:
This endpoint uses the location's time zone when generating time intervals
for the stats, which is relevant if that time zone uses daylight saving
time (some days will be 23 or 25 hours long).
Args:
location_id (string): hexadecimal id of the sensor to query, e.g.
``0x0013A20040B65FAD``
start (string): ISO 8601 start time for getting the events of appliances.
end (string): ISO 8601 stop time for getting the events of appliances.
Cannot be larger than 1 month from start time
granularity (string): granularity of stats. If the granularity is
'unknown', the stats for the appliances between the start and
end time is returned.;
must be one of "minutes", "hours", "days", "weeks", "months", or "unknown"
(default: days)
min_power (string): The minimum average power (in watts) for filtering.
Only events with an average power above this value will be returned.
(default: 400)
per_page (string, optional): the number of returned results per page
(min 1, max 500) (default: 10)
page (string, optional): the page number to return (min 1, max 100000)
(default: 1)
Returns:
list: dictionary objects containing appliance events meeting specified criteria
"""
url = "https://api.neur.io/v1/appliances/stats"
headers = self.__gen_headers()
headers["Content-Type"] = "application/json"
params = {
"locationId": location_id,
"start": start,
"end": end
}
if granularity:
params["granularity"] = granularity
if min_power:
params["minPower"] = min_power
if per_page:
params["perPage"] = per_page
if page:
params["page"] = page
url = self.__append_url_params(url, params)
r = requests.get(url, headers=headers)
return r.json() | Get appliance usage data for a given location within a given time range.
Stats are generated by fetching appliance events that match the supplied
criteria and then aggregating them together based on the granularity
specified with the request.
Note:
This endpoint uses the location's time zone when generating time intervals
for the stats, which is relevant if that time zone uses daylight saving
time (some days will be 23 or 25 hours long).
Args:
location_id (string): hexadecimal id of the sensor to query, e.g.
``0x0013A20040B65FAD``
start (string): ISO 8601 start time for getting the events of appliances.
end (string): ISO 8601 stop time for getting the events of appliances.
Cannot be larger than 1 month from start time
granularity (string): granularity of stats. If the granularity is
'unknown', the stats for the appliances between the start and
end time is returned.;
must be one of "minutes", "hours", "days", "weeks", "months", or "unknown"
(default: days)
min_power (string): The minimum average power (in watts) for filtering.
Only events with an average power above this value will be returned.
(default: 400)
per_page (string, optional): the number of returned results per page
(min 1, max 500) (default: 10)
page (string, optional): the page number to return (min 1, max 100000)
(default: 1)
Returns:
list: dictionary objects containing appliance events meeting specified criteria | Below is the the instruction that describes the task:
### Input:
Get appliance usage data for a given location within a given time range.
Stats are generated by fetching appliance events that match the supplied
criteria and then aggregating them together based on the granularity
specified with the request.
Note:
This endpoint uses the location's time zone when generating time intervals
for the stats, which is relevant if that time zone uses daylight saving
time (some days will be 23 or 25 hours long).
Args:
location_id (string): hexadecimal id of the sensor to query, e.g.
``0x0013A20040B65FAD``
start (string): ISO 8601 start time for getting the events of appliances.
end (string): ISO 8601 stop time for getting the events of appliances.
Cannot be larger than 1 month from start time
granularity (string): granularity of stats. If the granularity is
'unknown', the stats for the appliances between the start and
end time is returned.;
must be one of "minutes", "hours", "days", "weeks", "months", or "unknown"
(default: days)
min_power (string): The minimum average power (in watts) for filtering.
Only events with an average power above this value will be returned.
(default: 400)
per_page (string, optional): the number of returned results per page
(min 1, max 500) (default: 10)
page (string, optional): the page number to return (min 1, max 100000)
(default: 1)
Returns:
list: dictionary objects containing appliance events meeting specified criteria
### Response:
def get_appliance_stats_by_location(self, location_id, start, end, granularity=None, per_page=None, page=None,
min_power=None):
"""Get appliance usage data for a given location within a given time range.
Stats are generated by fetching appliance events that match the supplied
criteria and then aggregating them together based on the granularity
specified with the request.
Note:
This endpoint uses the location's time zone when generating time intervals
for the stats, which is relevant if that time zone uses daylight saving
time (some days will be 23 or 25 hours long).
Args:
location_id (string): hexadecimal id of the sensor to query, e.g.
``0x0013A20040B65FAD``
start (string): ISO 8601 start time for getting the events of appliances.
end (string): ISO 8601 stop time for getting the events of appliances.
Cannot be larger than 1 month from start time
granularity (string): granularity of stats. If the granularity is
'unknown', the stats for the appliances between the start and
end time is returned.;
must be one of "minutes", "hours", "days", "weeks", "months", or "unknown"
(default: days)
min_power (string): The minimum average power (in watts) for filtering.
Only events with an average power above this value will be returned.
(default: 400)
per_page (string, optional): the number of returned results per page
(min 1, max 500) (default: 10)
page (string, optional): the page number to return (min 1, max 100000)
(default: 1)
Returns:
list: dictionary objects containing appliance events meeting specified criteria
"""
url = "https://api.neur.io/v1/appliances/stats"
headers = self.__gen_headers()
headers["Content-Type"] = "application/json"
params = {
"locationId": location_id,
"start": start,
"end": end
}
if granularity:
params["granularity"] = granularity
if min_power:
params["minPower"] = min_power
if per_page:
params["perPage"] = per_page
if page:
params["page"] = page
url = self.__append_url_params(url, params)
r = requests.get(url, headers=headers)
return r.json() |
def fht(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH, etaV, zetaH,
zetaV, xdirect, fhtarg, use_ne_eval, msrc, mrec):
r"""Hankel Transform using the Digital Linear Filter method.
The *Digital Linear Filter* method was introduced to geophysics by
[Ghos70]_, and made popular and wide-spread by [Ande75]_, [Ande79]_,
[Ande82]_. The DLF is sometimes referred to as the *Fast Hankel Transform*
FHT, from which this routine has its name.
This implementation of the DLF follows [Key12]_, equation 6. Without going
into the mathematical details (which can be found in any of the above
papers) and following [Key12]_, the DLF method rewrites the Hankel
transform of the form
.. math:: F(r) = \int^\infty_0 f(\lambda)J_v(\lambda r)\
\mathrm{d}\lambda
as
.. math:: F(r) = \sum^n_{i=1} f(b_i/r)h_i/r \ ,
where :math:`h` is the digital filter.The Filter abscissae b is given by
.. math:: b_i = \lambda_ir = e^{ai}, \qquad i = -l, -l+1, \cdots, l \ ,
with :math:`l=(n-1)/2`, and :math:`a` is the spacing coefficient.
This function is loosely based on ``get_CSEM1D_FD_FHT.m`` from the source
code distributed with [Key12]_.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For DLF, this is 1.
conv : bool
Only relevant for QWE/QUAD.
"""
# 1. Get fhtargs
fhtfilt = fhtarg[0]
pts_per_dec = fhtarg[1]
lambd = fhtarg[2]
int_pts = fhtarg[3]
# 2. Call the kernel
PJ = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH,
zetaV, lambd, ab, xdirect, msrc, mrec, use_ne_eval)
# 3. Carry out the dlf
fEM = dlf(PJ, lambd, off, fhtfilt, pts_per_dec, factAng=factAng, ab=ab,
int_pts=int_pts)
return fEM, 1, True | r"""Hankel Transform using the Digital Linear Filter method.
The *Digital Linear Filter* method was introduced to geophysics by
[Ghos70]_, and made popular and wide-spread by [Ande75]_, [Ande79]_,
[Ande82]_. The DLF is sometimes referred to as the *Fast Hankel Transform*
FHT, from which this routine has its name.
This implementation of the DLF follows [Key12]_, equation 6. Without going
into the mathematical details (which can be found in any of the above
papers) and following [Key12]_, the DLF method rewrites the Hankel
transform of the form
.. math:: F(r) = \int^\infty_0 f(\lambda)J_v(\lambda r)\
\mathrm{d}\lambda
as
.. math:: F(r) = \sum^n_{i=1} f(b_i/r)h_i/r \ ,
where :math:`h` is the digital filter.The Filter abscissae b is given by
.. math:: b_i = \lambda_ir = e^{ai}, \qquad i = -l, -l+1, \cdots, l \ ,
with :math:`l=(n-1)/2`, and :math:`a` is the spacing coefficient.
This function is loosely based on ``get_CSEM1D_FD_FHT.m`` from the source
code distributed with [Key12]_.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For DLF, this is 1.
conv : bool
Only relevant for QWE/QUAD. | Below is the the instruction that describes the task:
### Input:
r"""Hankel Transform using the Digital Linear Filter method.
The *Digital Linear Filter* method was introduced to geophysics by
[Ghos70]_, and made popular and wide-spread by [Ande75]_, [Ande79]_,
[Ande82]_. The DLF is sometimes referred to as the *Fast Hankel Transform*
FHT, from which this routine has its name.
This implementation of the DLF follows [Key12]_, equation 6. Without going
into the mathematical details (which can be found in any of the above
papers) and following [Key12]_, the DLF method rewrites the Hankel
transform of the form
.. math:: F(r) = \int^\infty_0 f(\lambda)J_v(\lambda r)\
\mathrm{d}\lambda
as
.. math:: F(r) = \sum^n_{i=1} f(b_i/r)h_i/r \ ,
where :math:`h` is the digital filter.The Filter abscissae b is given by
.. math:: b_i = \lambda_ir = e^{ai}, \qquad i = -l, -l+1, \cdots, l \ ,
with :math:`l=(n-1)/2`, and :math:`a` is the spacing coefficient.
This function is loosely based on ``get_CSEM1D_FD_FHT.m`` from the source
code distributed with [Key12]_.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For DLF, this is 1.
conv : bool
Only relevant for QWE/QUAD.
### Response:
def fht(zsrc, zrec, lsrc, lrec, off, factAng, depth, ab, etaH, etaV, zetaH,
zetaV, xdirect, fhtarg, use_ne_eval, msrc, mrec):
r"""Hankel Transform using the Digital Linear Filter method.
The *Digital Linear Filter* method was introduced to geophysics by
[Ghos70]_, and made popular and wide-spread by [Ande75]_, [Ande79]_,
[Ande82]_. The DLF is sometimes referred to as the *Fast Hankel Transform*
FHT, from which this routine has its name.
This implementation of the DLF follows [Key12]_, equation 6. Without going
into the mathematical details (which can be found in any of the above
papers) and following [Key12]_, the DLF method rewrites the Hankel
transform of the form
.. math:: F(r) = \int^\infty_0 f(\lambda)J_v(\lambda r)\
\mathrm{d}\lambda
as
.. math:: F(r) = \sum^n_{i=1} f(b_i/r)h_i/r \ ,
where :math:`h` is the digital filter.The Filter abscissae b is given by
.. math:: b_i = \lambda_ir = e^{ai}, \qquad i = -l, -l+1, \cdots, l \ ,
with :math:`l=(n-1)/2`, and :math:`a` is the spacing coefficient.
This function is loosely based on ``get_CSEM1D_FD_FHT.m`` from the source
code distributed with [Key12]_.
The function is called from one of the modelling routines in :mod:`model`.
Consult these modelling routines for a description of the input and output
parameters.
Returns
-------
fEM : array
Returns frequency-domain EM response.
kcount : int
Kernel count. For DLF, this is 1.
conv : bool
Only relevant for QWE/QUAD.
"""
# 1. Get fhtargs
fhtfilt = fhtarg[0]
pts_per_dec = fhtarg[1]
lambd = fhtarg[2]
int_pts = fhtarg[3]
# 2. Call the kernel
PJ = kernel.wavenumber(zsrc, zrec, lsrc, lrec, depth, etaH, etaV, zetaH,
zetaV, lambd, ab, xdirect, msrc, mrec, use_ne_eval)
# 3. Carry out the dlf
fEM = dlf(PJ, lambd, off, fhtfilt, pts_per_dec, factAng=factAng, ab=ab,
int_pts=int_pts)
return fEM, 1, True |
def _readline(self):
"""Read exactly one line from the device, nonblocking.
Returns:
None on no data
"""
if len(self.lines) > 1:
return self.lines.pop(0)
tail = ''
if len(self.lines):
tail = self.lines.pop()
try:
tail += self._read()
except socket.error:
logging.exception('No new data')
time.sleep(0.1)
self.lines += linesepx.split(tail)
if len(self.lines) > 1:
return self.lines.pop(0) | Read exactly one line from the device, nonblocking.
Returns:
None on no data | Below is the the instruction that describes the task:
### Input:
Read exactly one line from the device, nonblocking.
Returns:
None on no data
### Response:
def _readline(self):
"""Read exactly one line from the device, nonblocking.
Returns:
None on no data
"""
if len(self.lines) > 1:
return self.lines.pop(0)
tail = ''
if len(self.lines):
tail = self.lines.pop()
try:
tail += self._read()
except socket.error:
logging.exception('No new data')
time.sleep(0.1)
self.lines += linesepx.split(tail)
if len(self.lines) > 1:
return self.lines.pop(0) |
def load_json_object(file_name, default=None):
"""
Deserialized <file_name> into a Python object
:param str|unicode file_name: The name of the file to read
:param default: The value to return if the file is not found
"""
if not os.path.isfile(file_name):
return default
# use utf-8-sig in case the file has a Byte Order Mark
with codecs.open(file_name, 'r', 'utf-8-sig') as in_file:
# read the text from the file
content = in_file.read()
# convert Windows line endings to Linux line endings
content = content.replace('\r\n', '\n')
# return a deserialized object
return json.loads(content) | Deserialized <file_name> into a Python object
:param str|unicode file_name: The name of the file to read
:param default: The value to return if the file is not found | Below is the the instruction that describes the task:
### Input:
Deserialized <file_name> into a Python object
:param str|unicode file_name: The name of the file to read
:param default: The value to return if the file is not found
### Response:
def load_json_object(file_name, default=None):
"""
Deserialized <file_name> into a Python object
:param str|unicode file_name: The name of the file to read
:param default: The value to return if the file is not found
"""
if not os.path.isfile(file_name):
return default
# use utf-8-sig in case the file has a Byte Order Mark
with codecs.open(file_name, 'r', 'utf-8-sig') as in_file:
# read the text from the file
content = in_file.read()
# convert Windows line endings to Linux line endings
content = content.replace('\r\n', '\n')
# return a deserialized object
return json.loads(content) |
def decode_chain_list(in_bytes):
"""Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN
:param in_bytes: the input bytes
:return the decoded list of strings"""
bstrings = numpy.frombuffer(in_bytes, numpy.dtype('S' + str(mmtf.utils.constants.CHAIN_LEN)))
return [s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE) for s in bstrings] | Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN
:param in_bytes: the input bytes
:return the decoded list of strings | Below is the the instruction that describes the task:
### Input:
Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN
:param in_bytes: the input bytes
:return the decoded list of strings
### Response:
def decode_chain_list(in_bytes):
"""Convert a list of bytes to a list of strings. Each string is of length mmtf.CHAIN_LEN
:param in_bytes: the input bytes
:return the decoded list of strings"""
bstrings = numpy.frombuffer(in_bytes, numpy.dtype('S' + str(mmtf.utils.constants.CHAIN_LEN)))
return [s.decode("ascii").strip(mmtf.utils.constants.NULL_BYTE) for s in bstrings] |
def update_aliases(self):
"""Get aliases information from room state.
Returns:
boolean: True if the aliases changed, False if not
"""
try:
response = self.client.api.get_room_state(self.room_id)
for chunk in response:
if "content" in chunk and "aliases" in chunk["content"]:
if chunk["content"]["aliases"] != self.aliases:
self.aliases = chunk["content"]["aliases"]
return True
else:
return False
except MatrixRequestError:
return False | Get aliases information from room state.
Returns:
boolean: True if the aliases changed, False if not | Below is the the instruction that describes the task:
### Input:
Get aliases information from room state.
Returns:
boolean: True if the aliases changed, False if not
### Response:
def update_aliases(self):
"""Get aliases information from room state.
Returns:
boolean: True if the aliases changed, False if not
"""
try:
response = self.client.api.get_room_state(self.room_id)
for chunk in response:
if "content" in chunk and "aliases" in chunk["content"]:
if chunk["content"]["aliases"] != self.aliases:
self.aliases = chunk["content"]["aliases"]
return True
else:
return False
except MatrixRequestError:
return False |
def insert(self, table_name, record, attr_names=None):
"""
Send an INSERT query to the database.
:param str table_name: Table name of executing the query.
:param record: Record to be inserted.
:type record: |dict|/|namedtuple|/|list|/|tuple|
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
:ref:`example-insert-records`
"""
self.insert_many(table_name, records=[record], attr_names=attr_names) | Send an INSERT query to the database.
:param str table_name: Table name of executing the query.
:param record: Record to be inserted.
:type record: |dict|/|namedtuple|/|list|/|tuple|
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
:ref:`example-insert-records` | Below is the the instruction that describes the task:
### Input:
Send an INSERT query to the database.
:param str table_name: Table name of executing the query.
:param record: Record to be inserted.
:type record: |dict|/|namedtuple|/|list|/|tuple|
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
:ref:`example-insert-records`
### Response:
def insert(self, table_name, record, attr_names=None):
"""
Send an INSERT query to the database.
:param str table_name: Table name of executing the query.
:param record: Record to be inserted.
:type record: |dict|/|namedtuple|/|list|/|tuple|
:raises IOError: |raises_write_permission|
:raises simplesqlite.NullDatabaseConnectionError:
|raises_check_connection|
:raises simplesqlite.OperationalError: |raises_operational_error|
:Example:
:ref:`example-insert-records`
"""
self.insert_many(table_name, records=[record], attr_names=attr_names) |
def find(self,
text,
magicKey=None,
sourceCountry=None,
bbox=None,
location=None,
distance=3218.69,
outSR=102100,
category=None,
outFields="*",
maxLocations=20,
forStorage=False):
"""
The find operation geocodes one location per request; the input
address is specified in a single parameter.
Inputs:
text - Specifies the location to be geocoded. This can be a
street address, place name, postal code, or POI.
magicKey - The find operation retrieves results quicker when you
pass in valid text and magicKey values than when you don't pass
in magicKey. However, to get these advantages, you need to make
a prior request to suggest, which provides a magicKey. This may
or may not be relevant to your workflow.
sourceCountry - A value representing the country. Providing this
value increases geocoding speed. Acceptable values include the
full country name in English or the official language of the
country, the ISO 3166-1 2-digit country code, or the
ISO 3166-1 3-digit country code.
bbox - A set of bounding box coordinates that limit the search
area to a specific region. This is especially useful for
applications in which a user will search for places and
addresses only within the current map extent.
location - Defines an origin point location that is used with
the distance parameter to sort geocoding candidates based upon
their proximity to the location. The distance parameter
specifies the radial distance from the location in meters. The
priority of candidates within this radius is boosted relative
to those outside the radius.
distance - Specifies the radius of an area around a point
location which is used to boost the rank of geocoding
candidates so that candidates closest to the location are
returned first. The distance value is in meters.
outSR - The spatial reference of the x/y coordinates returned by
a geocode request. This is useful for applications using a map
with a spatial reference different than that of the geocode
service.
category - A place or address type which can be used to filter
find results. The parameter supports input of single category
values or multiple comma-separated values. The category
parameter can be passed in a request with or without the text
parameter.
outFields - The list of fields to be returned in the response.
maxLocation - The maximum number of locations to be returned by
a search, up to the maximum number allowed by the service. If
not specified, then one location will be returned.
forStorage - Specifies whether the results of the operation will
be persisted. The default value is false, which indicates the
results of the operation can't be stored, but they can be
temporarily displayed on a map for instance. If you store the
results, in a database for example, you need to set this
parameter to true.
"""
if isinstance(self._securityHandler, (AGOLTokenSecurityHandler, OAuthSecurityHandler)):
url = self._url + "/find"
params = {
"f" : "json",
"text" : text,
#"token" : self._securityHandler.token
}
if not magicKey is None:
params['magicKey'] = magicKey
if not sourceCountry is None:
params['sourceCountry'] = sourceCountry
if not bbox is None:
params['bbox'] = bbox
if not location is None:
if isinstance(location, Point):
params['location'] = location.asDictionary
if isinstance(location, list):
params['location'] = "%s,%s" % (location[0], location[1])
if not distance is None:
params['distance'] = distance
if not outSR is None:
params['outSR'] = outSR
if not category is None:
params['category'] = category
if outFields is None:
params['outFields'] = "*"
else:
params['outFields'] = outFields
if not maxLocations is None:
params['maxLocations'] = maxLocations
if not forStorage is None:
params['forStorage'] = forStorage
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
raise Exception("This function works on the ArcGIS Online World Geocoder") | The find operation geocodes one location per request; the input
address is specified in a single parameter.
Inputs:
text - Specifies the location to be geocoded. This can be a
street address, place name, postal code, or POI.
magicKey - The find operation retrieves results quicker when you
pass in valid text and magicKey values than when you don't pass
in magicKey. However, to get these advantages, you need to make
a prior request to suggest, which provides a magicKey. This may
or may not be relevant to your workflow.
sourceCountry - A value representing the country. Providing this
value increases geocoding speed. Acceptable values include the
full country name in English or the official language of the
country, the ISO 3166-1 2-digit country code, or the
ISO 3166-1 3-digit country code.
bbox - A set of bounding box coordinates that limit the search
area to a specific region. This is especially useful for
applications in which a user will search for places and
addresses only within the current map extent.
location - Defines an origin point location that is used with
the distance parameter to sort geocoding candidates based upon
their proximity to the location. The distance parameter
specifies the radial distance from the location in meters. The
priority of candidates within this radius is boosted relative
to those outside the radius.
distance - Specifies the radius of an area around a point
location which is used to boost the rank of geocoding
candidates so that candidates closest to the location are
returned first. The distance value is in meters.
outSR - The spatial reference of the x/y coordinates returned by
a geocode request. This is useful for applications using a map
with a spatial reference different than that of the geocode
service.
category - A place or address type which can be used to filter
find results. The parameter supports input of single category
values or multiple comma-separated values. The category
parameter can be passed in a request with or without the text
parameter.
outFields - The list of fields to be returned in the response.
maxLocation - The maximum number of locations to be returned by
a search, up to the maximum number allowed by the service. If
not specified, then one location will be returned.
forStorage - Specifies whether the results of the operation will
be persisted. The default value is false, which indicates the
results of the operation can't be stored, but they can be
temporarily displayed on a map for instance. If you store the
results, in a database for example, you need to set this
parameter to true. | Below is the the instruction that describes the task:
### Input:
The find operation geocodes one location per request; the input
address is specified in a single parameter.
Inputs:
text - Specifies the location to be geocoded. This can be a
street address, place name, postal code, or POI.
magicKey - The find operation retrieves results quicker when you
pass in valid text and magicKey values than when you don't pass
in magicKey. However, to get these advantages, you need to make
a prior request to suggest, which provides a magicKey. This may
or may not be relevant to your workflow.
sourceCountry - A value representing the country. Providing this
value increases geocoding speed. Acceptable values include the
full country name in English or the official language of the
country, the ISO 3166-1 2-digit country code, or the
ISO 3166-1 3-digit country code.
bbox - A set of bounding box coordinates that limit the search
area to a specific region. This is especially useful for
applications in which a user will search for places and
addresses only within the current map extent.
location - Defines an origin point location that is used with
the distance parameter to sort geocoding candidates based upon
their proximity to the location. The distance parameter
specifies the radial distance from the location in meters. The
priority of candidates within this radius is boosted relative
to those outside the radius.
distance - Specifies the radius of an area around a point
location which is used to boost the rank of geocoding
candidates so that candidates closest to the location are
returned first. The distance value is in meters.
outSR - The spatial reference of the x/y coordinates returned by
a geocode request. This is useful for applications using a map
with a spatial reference different than that of the geocode
service.
category - A place or address type which can be used to filter
find results. The parameter supports input of single category
values or multiple comma-separated values. The category
parameter can be passed in a request with or without the text
parameter.
outFields - The list of fields to be returned in the response.
maxLocation - The maximum number of locations to be returned by
a search, up to the maximum number allowed by the service. If
not specified, then one location will be returned.
forStorage - Specifies whether the results of the operation will
be persisted. The default value is false, which indicates the
results of the operation can't be stored, but they can be
temporarily displayed on a map for instance. If you store the
results, in a database for example, you need to set this
parameter to true.
### Response:
def find(self,
text,
magicKey=None,
sourceCountry=None,
bbox=None,
location=None,
distance=3218.69,
outSR=102100,
category=None,
outFields="*",
maxLocations=20,
forStorage=False):
"""
The find operation geocodes one location per request; the input
address is specified in a single parameter.
Inputs:
text - Specifies the location to be geocoded. This can be a
street address, place name, postal code, or POI.
magicKey - The find operation retrieves results quicker when you
pass in valid text and magicKey values than when you don't pass
in magicKey. However, to get these advantages, you need to make
a prior request to suggest, which provides a magicKey. This may
or may not be relevant to your workflow.
sourceCountry - A value representing the country. Providing this
value increases geocoding speed. Acceptable values include the
full country name in English or the official language of the
country, the ISO 3166-1 2-digit country code, or the
ISO 3166-1 3-digit country code.
bbox - A set of bounding box coordinates that limit the search
area to a specific region. This is especially useful for
applications in which a user will search for places and
addresses only within the current map extent.
location - Defines an origin point location that is used with
the distance parameter to sort geocoding candidates based upon
their proximity to the location. The distance parameter
specifies the radial distance from the location in meters. The
priority of candidates within this radius is boosted relative
to those outside the radius.
distance - Specifies the radius of an area around a point
location which is used to boost the rank of geocoding
candidates so that candidates closest to the location are
returned first. The distance value is in meters.
outSR - The spatial reference of the x/y coordinates returned by
a geocode request. This is useful for applications using a map
with a spatial reference different than that of the geocode
service.
category - A place or address type which can be used to filter
find results. The parameter supports input of single category
values or multiple comma-separated values. The category
parameter can be passed in a request with or without the text
parameter.
outFields - The list of fields to be returned in the response.
maxLocation - The maximum number of locations to be returned by
a search, up to the maximum number allowed by the service. If
not specified, then one location will be returned.
forStorage - Specifies whether the results of the operation will
be persisted. The default value is false, which indicates the
results of the operation can't be stored, but they can be
temporarily displayed on a map for instance. If you store the
results, in a database for example, you need to set this
parameter to true.
"""
if isinstance(self._securityHandler, (AGOLTokenSecurityHandler, OAuthSecurityHandler)):
url = self._url + "/find"
params = {
"f" : "json",
"text" : text,
#"token" : self._securityHandler.token
}
if not magicKey is None:
params['magicKey'] = magicKey
if not sourceCountry is None:
params['sourceCountry'] = sourceCountry
if not bbox is None:
params['bbox'] = bbox
if not location is None:
if isinstance(location, Point):
params['location'] = location.asDictionary
if isinstance(location, list):
params['location'] = "%s,%s" % (location[0], location[1])
if not distance is None:
params['distance'] = distance
if not outSR is None:
params['outSR'] = outSR
if not category is None:
params['category'] = category
if outFields is None:
params['outFields'] = "*"
else:
params['outFields'] = outFields
if not maxLocations is None:
params['maxLocations'] = maxLocations
if not forStorage is None:
params['forStorage'] = forStorage
return self._post(url=url,
param_dict=params,
securityHandler=self._securityHandler,
proxy_url=self._proxy_url,
proxy_port=self._proxy_port)
else:
raise Exception("This function works on the ArcGIS Online World Geocoder") |
def savepoint(cr):
"""return a context manager wrapping postgres savepoints"""
if hasattr(cr, 'savepoint'):
with cr.savepoint():
yield
else:
name = uuid.uuid1().hex
cr.execute('SAVEPOINT "%s"' % name)
try:
yield
cr.execute('RELEASE SAVEPOINT "%s"' % name)
except:
cr.execute('ROLLBACK TO SAVEPOINT "%s"' % name) | return a context manager wrapping postgres savepoints | Below is the the instruction that describes the task:
### Input:
return a context manager wrapping postgres savepoints
### Response:
def savepoint(cr):
"""return a context manager wrapping postgres savepoints"""
if hasattr(cr, 'savepoint'):
with cr.savepoint():
yield
else:
name = uuid.uuid1().hex
cr.execute('SAVEPOINT "%s"' % name)
try:
yield
cr.execute('RELEASE SAVEPOINT "%s"' % name)
except:
cr.execute('ROLLBACK TO SAVEPOINT "%s"' % name) |
def list_processed_parameter_group_histogram(self, group=None, start=None, stop=None, merge_time=20):
"""
Reads index records related to processed parameter groups between the
specified start and stop time.
Each iteration returns a chunk of chronologically-sorted records.
:param float merge_time: Maximum gap in seconds before two consecutive index records are merged together.
:rtype: ~collections.Iterable[.IndexGroup]
"""
params = {}
if group is not None:
params['group'] = group
if start is not None:
params['start'] = to_isostring(start)
if stop is not None:
params['stop'] = to_isostring(stop)
if merge_time is not None:
params['mergeTime'] = int(merge_time * 1000)
return pagination.Iterator(
client=self._client,
path='/archive/{}/parameter-index'.format(self._instance),
params=params,
response_class=archive_pb2.IndexResponse,
items_key='group',
item_mapper=IndexGroup,
) | Reads index records related to processed parameter groups between the
specified start and stop time.
Each iteration returns a chunk of chronologically-sorted records.
:param float merge_time: Maximum gap in seconds before two consecutive index records are merged together.
:rtype: ~collections.Iterable[.IndexGroup] | Below is the the instruction that describes the task:
### Input:
Reads index records related to processed parameter groups between the
specified start and stop time.
Each iteration returns a chunk of chronologically-sorted records.
:param float merge_time: Maximum gap in seconds before two consecutive index records are merged together.
:rtype: ~collections.Iterable[.IndexGroup]
### Response:
def list_processed_parameter_group_histogram(self, group=None, start=None, stop=None, merge_time=20):
"""
Reads index records related to processed parameter groups between the
specified start and stop time.
Each iteration returns a chunk of chronologically-sorted records.
:param float merge_time: Maximum gap in seconds before two consecutive index records are merged together.
:rtype: ~collections.Iterable[.IndexGroup]
"""
params = {}
if group is not None:
params['group'] = group
if start is not None:
params['start'] = to_isostring(start)
if stop is not None:
params['stop'] = to_isostring(stop)
if merge_time is not None:
params['mergeTime'] = int(merge_time * 1000)
return pagination.Iterator(
client=self._client,
path='/archive/{}/parameter-index'.format(self._instance),
params=params,
response_class=archive_pb2.IndexResponse,
items_key='group',
item_mapper=IndexGroup,
) |
def hook_fn(self, module:nn.Module, input:Tensors, output:Tensors):
"Applies `hook_func` to `module`, `input`, `output`."
if self.detach:
input = (o.detach() for o in input ) if is_listy(input ) else input.detach()
output = (o.detach() for o in output) if is_listy(output) else output.detach()
self.stored = self.hook_func(module, input, output) | Applies `hook_func` to `module`, `input`, `output`. | Below is the the instruction that describes the task:
### Input:
Applies `hook_func` to `module`, `input`, `output`.
### Response:
def hook_fn(self, module:nn.Module, input:Tensors, output:Tensors):
"Applies `hook_func` to `module`, `input`, `output`."
if self.detach:
input = (o.detach() for o in input ) if is_listy(input ) else input.detach()
output = (o.detach() for o in output) if is_listy(output) else output.detach()
self.stored = self.hook_func(module, input, output) |
def is_maximal_matching(G, matching):
"""Determines whether the given set of edges is a maximal matching.
A matching is a subset of edges in which no node occurs more than
once. The cardinality of a matching is the number of matched edges.
A maximal matching is one where one cannot add any more edges
without violating the matching rule.
Parameters
----------
G : NetworkX graph
The graph on which to check the maximal matching.
edges : iterable
A iterable of edges.
Returns
-------
is_matching : bool
True if the given edges are a maximal matching.
Example
-------
This example checks two sets of edges, both derived from a
single Chimera unit cell, for a matching. The first set (a matching) is
a subset of the second, which was found using the `min_maximal_matching()`
function.
>>> import dwave_networkx as dnx
>>> G = dnx.chimera_graph(1, 1, 4)
>>> dnx.is_matching({(0, 4), (2, 7)})
True
>>> dnx.is_maximal_matching(G,{(0, 4), (2, 7)})
False
>>> dnx.is_maximal_matching(G,{(0, 4), (1, 5), (2, 7), (3, 6)})
True
"""
touched_nodes = set().union(*matching)
# first check if a matching
if len(touched_nodes) != len(matching) * 2:
return False
# now for each edge, check that at least one of its variables is
# already in the matching
for (u, v) in G.edges:
if u not in touched_nodes and v not in touched_nodes:
return False
return True | Determines whether the given set of edges is a maximal matching.
A matching is a subset of edges in which no node occurs more than
once. The cardinality of a matching is the number of matched edges.
A maximal matching is one where one cannot add any more edges
without violating the matching rule.
Parameters
----------
G : NetworkX graph
The graph on which to check the maximal matching.
edges : iterable
A iterable of edges.
Returns
-------
is_matching : bool
True if the given edges are a maximal matching.
Example
-------
This example checks two sets of edges, both derived from a
single Chimera unit cell, for a matching. The first set (a matching) is
a subset of the second, which was found using the `min_maximal_matching()`
function.
>>> import dwave_networkx as dnx
>>> G = dnx.chimera_graph(1, 1, 4)
>>> dnx.is_matching({(0, 4), (2, 7)})
True
>>> dnx.is_maximal_matching(G,{(0, 4), (2, 7)})
False
>>> dnx.is_maximal_matching(G,{(0, 4), (1, 5), (2, 7), (3, 6)})
True | Below is the the instruction that describes the task:
### Input:
Determines whether the given set of edges is a maximal matching.
A matching is a subset of edges in which no node occurs more than
once. The cardinality of a matching is the number of matched edges.
A maximal matching is one where one cannot add any more edges
without violating the matching rule.
Parameters
----------
G : NetworkX graph
The graph on which to check the maximal matching.
edges : iterable
A iterable of edges.
Returns
-------
is_matching : bool
True if the given edges are a maximal matching.
Example
-------
This example checks two sets of edges, both derived from a
single Chimera unit cell, for a matching. The first set (a matching) is
a subset of the second, which was found using the `min_maximal_matching()`
function.
>>> import dwave_networkx as dnx
>>> G = dnx.chimera_graph(1, 1, 4)
>>> dnx.is_matching({(0, 4), (2, 7)})
True
>>> dnx.is_maximal_matching(G,{(0, 4), (2, 7)})
False
>>> dnx.is_maximal_matching(G,{(0, 4), (1, 5), (2, 7), (3, 6)})
True
### Response:
def is_maximal_matching(G, matching):
"""Determines whether the given set of edges is a maximal matching.
A matching is a subset of edges in which no node occurs more than
once. The cardinality of a matching is the number of matched edges.
A maximal matching is one where one cannot add any more edges
without violating the matching rule.
Parameters
----------
G : NetworkX graph
The graph on which to check the maximal matching.
edges : iterable
A iterable of edges.
Returns
-------
is_matching : bool
True if the given edges are a maximal matching.
Example
-------
This example checks two sets of edges, both derived from a
single Chimera unit cell, for a matching. The first set (a matching) is
a subset of the second, which was found using the `min_maximal_matching()`
function.
>>> import dwave_networkx as dnx
>>> G = dnx.chimera_graph(1, 1, 4)
>>> dnx.is_matching({(0, 4), (2, 7)})
True
>>> dnx.is_maximal_matching(G,{(0, 4), (2, 7)})
False
>>> dnx.is_maximal_matching(G,{(0, 4), (1, 5), (2, 7), (3, 6)})
True
"""
touched_nodes = set().union(*matching)
# first check if a matching
if len(touched_nodes) != len(matching) * 2:
return False
# now for each edge, check that at least one of its variables is
# already in the matching
for (u, v) in G.edges:
if u not in touched_nodes and v not in touched_nodes:
return False
return True |
def _set_implicit_commit(self, v, load=False):
"""
Setter method for implicit_commit, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_implicit_commit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_implicit_commit() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=implicit_commit.implicit_commit, is_container='container', presence=False, yang_name="implicit-commit", rest_name="implicit-commit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set implicit commit behavior for LSPs'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """implicit_commit must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=implicit_commit.implicit_commit, is_container='container', presence=False, yang_name="implicit-commit", rest_name="implicit-commit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set implicit commit behavior for LSPs'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__implicit_commit = t
if hasattr(self, '_set'):
self._set() | Setter method for implicit_commit, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_implicit_commit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_implicit_commit() directly. | Below is the the instruction that describes the task:
### Input:
Setter method for implicit_commit, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_implicit_commit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_implicit_commit() directly.
### Response:
def _set_implicit_commit(self, v, load=False):
"""
Setter method for implicit_commit, mapped from YANG variable /mpls_config/router/mpls/mpls_cmds_holder/policy/implicit_commit (container)
If this variable is read-only (config: false) in the
source YANG file, then _set_implicit_commit is considered as a private
method. Backends looking to populate this variable should
do so via calling thisObj._set_implicit_commit() directly.
"""
if hasattr(v, "_utype"):
v = v._utype(v)
try:
t = YANGDynClass(v,base=implicit_commit.implicit_commit, is_container='container', presence=False, yang_name="implicit-commit", rest_name="implicit-commit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set implicit commit behavior for LSPs'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)
except (TypeError, ValueError):
raise ValueError({
'error-string': """implicit_commit must be of a type compatible with container""",
'defined-type': "container",
'generated-type': """YANGDynClass(base=implicit_commit.implicit_commit, is_container='container', presence=False, yang_name="implicit-commit", rest_name="implicit-commit", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'info': u'Set implicit commit behavior for LSPs'}}, namespace='urn:brocade.com:mgmt:brocade-mpls', defining_module='brocade-mpls', yang_type='container', is_config=True)""",
})
self.__implicit_commit = t
if hasattr(self, '_set'):
self._set() |
def stitch_images(images, margin=5, cols=5):
"""Utility function to stitch images together with a `margin`.
Args:
images: The array of 2D images to stitch.
margin: The black border margin size between images (Default value = 5)
cols: Max number of image cols. New row is created when number of images exceed the column size.
(Default value = 5)
Returns:
A single numpy image array comprising of input images.
"""
if len(images) == 0:
return None
h, w, c = images[0].shape
n_rows = int(math.ceil(len(images) / cols))
n_cols = min(len(images), cols)
out_w = n_cols * w + (n_cols - 1) * margin
out_h = n_rows * h + (n_rows - 1) * margin
stitched_images = np.zeros((out_h, out_w, c), dtype=images[0].dtype)
for row in range(n_rows):
for col in range(n_cols):
img_idx = row * cols + col
if img_idx >= len(images):
break
stitched_images[(h + margin) * row: (h + margin) * row + h,
(w + margin) * col: (w + margin) * col + w, :] = images[img_idx]
return stitched_images | Utility function to stitch images together with a `margin`.
Args:
images: The array of 2D images to stitch.
margin: The black border margin size between images (Default value = 5)
cols: Max number of image cols. New row is created when number of images exceed the column size.
(Default value = 5)
Returns:
A single numpy image array comprising of input images. | Below is the the instruction that describes the task:
### Input:
Utility function to stitch images together with a `margin`.
Args:
images: The array of 2D images to stitch.
margin: The black border margin size between images (Default value = 5)
cols: Max number of image cols. New row is created when number of images exceed the column size.
(Default value = 5)
Returns:
A single numpy image array comprising of input images.
### Response:
def stitch_images(images, margin=5, cols=5):
"""Utility function to stitch images together with a `margin`.
Args:
images: The array of 2D images to stitch.
margin: The black border margin size between images (Default value = 5)
cols: Max number of image cols. New row is created when number of images exceed the column size.
(Default value = 5)
Returns:
A single numpy image array comprising of input images.
"""
if len(images) == 0:
return None
h, w, c = images[0].shape
n_rows = int(math.ceil(len(images) / cols))
n_cols = min(len(images), cols)
out_w = n_cols * w + (n_cols - 1) * margin
out_h = n_rows * h + (n_rows - 1) * margin
stitched_images = np.zeros((out_h, out_w, c), dtype=images[0].dtype)
for row in range(n_rows):
for col in range(n_cols):
img_idx = row * cols + col
if img_idx >= len(images):
break
stitched_images[(h + margin) * row: (h + margin) * row + h,
(w + margin) * col: (w + margin) * col + w, :] = images[img_idx]
return stitched_images |
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.link(p) | Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties} | Below is the the instruction that describes the task:
### Input:
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
### Response:
def link(self, other):
"""
Link (associate) this object with anI{other} properties object
to create a network of properties. Links are bidirectional.
@param other: The object to link.
@type other: L{Properties}
@return: self
@rtype: L{Properties}
"""
p = other.__pts__
return self.properties.link(p) |
def _lookup_vpc_count_min_max(session=None, **bfilter):
"""Look up count/min/max Nexus VPC Allocs for given switch.
:param session: db session
:param bfilter: filter for mappings query
:returns: number of VPCs and min value if query gave a result,
else raise NexusVPCAllocNotFound.
"""
if session is None:
session = bc.get_reader_session()
try:
res = session.query(
func.count(nexus_models_v2.NexusVPCAlloc.vpc_id),
func.min(nexus_models_v2.NexusVPCAlloc.vpc_id),
func.max(nexus_models_v2.NexusVPCAlloc.vpc_id),
).filter(nexus_models_v2.NexusVPCAlloc.switch_ip ==
bfilter['switch_ip']).one()
count = res[0]
sw_min = res[1]
sw_max = res[2]
return count, sw_min, sw_max
except sa_exc.NoResultFound:
pass
raise c_exc.NexusVPCAllocNotFound(**bfilter) | Look up count/min/max Nexus VPC Allocs for given switch.
:param session: db session
:param bfilter: filter for mappings query
:returns: number of VPCs and min value if query gave a result,
else raise NexusVPCAllocNotFound. | Below is the the instruction that describes the task:
### Input:
Look up count/min/max Nexus VPC Allocs for given switch.
:param session: db session
:param bfilter: filter for mappings query
:returns: number of VPCs and min value if query gave a result,
else raise NexusVPCAllocNotFound.
### Response:
def _lookup_vpc_count_min_max(session=None, **bfilter):
"""Look up count/min/max Nexus VPC Allocs for given switch.
:param session: db session
:param bfilter: filter for mappings query
:returns: number of VPCs and min value if query gave a result,
else raise NexusVPCAllocNotFound.
"""
if session is None:
session = bc.get_reader_session()
try:
res = session.query(
func.count(nexus_models_v2.NexusVPCAlloc.vpc_id),
func.min(nexus_models_v2.NexusVPCAlloc.vpc_id),
func.max(nexus_models_v2.NexusVPCAlloc.vpc_id),
).filter(nexus_models_v2.NexusVPCAlloc.switch_ip ==
bfilter['switch_ip']).one()
count = res[0]
sw_min = res[1]
sw_max = res[2]
return count, sw_min, sw_max
except sa_exc.NoResultFound:
pass
raise c_exc.NexusVPCAllocNotFound(**bfilter) |
def get_authorization_lookup_session(self):
"""Gets the ``OsidSession`` associated with the authorization lookup service.
return: (osid.authorization.AuthorizationLookupSession) - an
``AuthorizationLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_authorization_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_authorization_lookup()`` is ``true``.*
"""
if not self.supports_authorization_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AuthorizationLookupSession(runtime=self._runtime) | Gets the ``OsidSession`` associated with the authorization lookup service.
return: (osid.authorization.AuthorizationLookupSession) - an
``AuthorizationLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_authorization_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_authorization_lookup()`` is ``true``.* | Below is the the instruction that describes the task:
### Input:
Gets the ``OsidSession`` associated with the authorization lookup service.
return: (osid.authorization.AuthorizationLookupSession) - an
``AuthorizationLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_authorization_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_authorization_lookup()`` is ``true``.*
### Response:
def get_authorization_lookup_session(self):
"""Gets the ``OsidSession`` associated with the authorization lookup service.
return: (osid.authorization.AuthorizationLookupSession) - an
``AuthorizationLookupSession``
raise: OperationFailed - unable to complete request
raise: Unimplemented - ``supports_authorization_lookup()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_authorization_lookup()`` is ``true``.*
"""
if not self.supports_authorization_lookup():
raise errors.Unimplemented()
# pylint: disable=no-member
return sessions.AuthorizationLookupSession(runtime=self._runtime) |
async def wait_for_all_empty(self, *queues):
"""
Wait for multiple queues to be empty at the same time.
Require delegate when calling from coroutines running in other containers
"""
matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None]
while matchers:
await self.wait_for_all(*matchers)
matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None] | Wait for multiple queues to be empty at the same time.
Require delegate when calling from coroutines running in other containers | Below is the the instruction that describes the task:
### Input:
Wait for multiple queues to be empty at the same time.
Require delegate when calling from coroutines running in other containers
### Response:
async def wait_for_all_empty(self, *queues):
"""
Wait for multiple queues to be empty at the same time.
Require delegate when calling from coroutines running in other containers
"""
matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None]
while matchers:
await self.wait_for_all(*matchers)
matchers = [m for m in (q.waitForEmpty() for q in queues) if m is not None] |
def until_state(self, state, timeout=None):
"""Future that resolves when a certain client state is attained
Parameters
----------
state : str
Desired state, one of ("disconnected", "syncing", "synced")
timeout: float
Timeout for operation in seconds.
"""
return self._state.until_state(state, timeout=timeout) | Future that resolves when a certain client state is attained
Parameters
----------
state : str
Desired state, one of ("disconnected", "syncing", "synced")
timeout: float
Timeout for operation in seconds. | Below is the the instruction that describes the task:
### Input:
Future that resolves when a certain client state is attained
Parameters
----------
state : str
Desired state, one of ("disconnected", "syncing", "synced")
timeout: float
Timeout for operation in seconds.
### Response:
def until_state(self, state, timeout=None):
"""Future that resolves when a certain client state is attained
Parameters
----------
state : str
Desired state, one of ("disconnected", "syncing", "synced")
timeout: float
Timeout for operation in seconds.
"""
return self._state.until_state(state, timeout=timeout) |
def chunker(l, n):
"""
Generates n-sized chunks from the list l
"""
for i in ranger(0, len(l), n):
yield l[i:i + n] | Generates n-sized chunks from the list l | Below is the the instruction that describes the task:
### Input:
Generates n-sized chunks from the list l
### Response:
def chunker(l, n):
"""
Generates n-sized chunks from the list l
"""
for i in ranger(0, len(l), n):
yield l[i:i + n] |
def base_warfare(name, bases, attributes):
"""
Adds any number of attributes to an existing class.
:param name: Name.
:type name: unicode
:param bases: Bases.
:type bases: list
:param attributes: Attributes.
:type attributes: dict
:return: Base.
:rtype: object
"""
assert len(bases) == 1, "{0} | '{1}' object has multiple bases!".format(__name__, name)
base = foundations.common.get_first_item(bases)
for name, value in attributes.iteritems():
if name != "__metaclass__":
setattr(base, name, value)
return base | Adds any number of attributes to an existing class.
:param name: Name.
:type name: unicode
:param bases: Bases.
:type bases: list
:param attributes: Attributes.
:type attributes: dict
:return: Base.
:rtype: object | Below is the the instruction that describes the task:
### Input:
Adds any number of attributes to an existing class.
:param name: Name.
:type name: unicode
:param bases: Bases.
:type bases: list
:param attributes: Attributes.
:type attributes: dict
:return: Base.
:rtype: object
### Response:
def base_warfare(name, bases, attributes):
"""
Adds any number of attributes to an existing class.
:param name: Name.
:type name: unicode
:param bases: Bases.
:type bases: list
:param attributes: Attributes.
:type attributes: dict
:return: Base.
:rtype: object
"""
assert len(bases) == 1, "{0} | '{1}' object has multiple bases!".format(__name__, name)
base = foundations.common.get_first_item(bases)
for name, value in attributes.iteritems():
if name != "__metaclass__":
setattr(base, name, value)
return base |
def show_weights(self, **kwargs):
"""
Call :func:`eli5.show_weights` for the locally-fit
classification pipeline. Keyword arguments are passed
to :func:`eli5.show_weights`.
:func:`fit` must be called before using this method.
"""
self._fix_target_names(kwargs)
return eli5.show_weights(self.clf_, vec=self.vec_, **kwargs) | Call :func:`eli5.show_weights` for the locally-fit
classification pipeline. Keyword arguments are passed
to :func:`eli5.show_weights`.
:func:`fit` must be called before using this method. | Below is the the instruction that describes the task:
### Input:
Call :func:`eli5.show_weights` for the locally-fit
classification pipeline. Keyword arguments are passed
to :func:`eli5.show_weights`.
:func:`fit` must be called before using this method.
### Response:
def show_weights(self, **kwargs):
"""
Call :func:`eli5.show_weights` for the locally-fit
classification pipeline. Keyword arguments are passed
to :func:`eli5.show_weights`.
:func:`fit` must be called before using this method.
"""
self._fix_target_names(kwargs)
return eli5.show_weights(self.clf_, vec=self.vec_, **kwargs) |
def build_single_handler_applications(paths, argvs=None):
''' Return a dictionary mapping routes to Bokeh applications built using
single handlers, for specified files or directories.
This function iterates over ``paths`` and ``argvs`` and calls
:func:`~bokeh.command.util.build_single_handler_application` on each
to generate the mapping.
Args:
path (seq[str]) : paths to files or directories for creating Bokeh
applications.
argvs (dict[str, list[str]], optional) : mapping of paths to command
line arguments to pass to the handler for each path
Returns:
dict[str, Application]
Raises:
RuntimeError
'''
applications = {}
argvs = {} or argvs
for path in paths:
application = build_single_handler_application(path, argvs.get(path, []))
route = application.handlers[0].url_path()
if not route:
if '/' in applications:
raise RuntimeError("Don't know the URL path to use for %s" % (path))
route = '/'
applications[route] = application
return applications | Return a dictionary mapping routes to Bokeh applications built using
single handlers, for specified files or directories.
This function iterates over ``paths`` and ``argvs`` and calls
:func:`~bokeh.command.util.build_single_handler_application` on each
to generate the mapping.
Args:
path (seq[str]) : paths to files or directories for creating Bokeh
applications.
argvs (dict[str, list[str]], optional) : mapping of paths to command
line arguments to pass to the handler for each path
Returns:
dict[str, Application]
Raises:
RuntimeError | Below is the the instruction that describes the task:
### Input:
Return a dictionary mapping routes to Bokeh applications built using
single handlers, for specified files or directories.
This function iterates over ``paths`` and ``argvs`` and calls
:func:`~bokeh.command.util.build_single_handler_application` on each
to generate the mapping.
Args:
path (seq[str]) : paths to files or directories for creating Bokeh
applications.
argvs (dict[str, list[str]], optional) : mapping of paths to command
line arguments to pass to the handler for each path
Returns:
dict[str, Application]
Raises:
RuntimeError
### Response:
def build_single_handler_applications(paths, argvs=None):
''' Return a dictionary mapping routes to Bokeh applications built using
single handlers, for specified files or directories.
This function iterates over ``paths`` and ``argvs`` and calls
:func:`~bokeh.command.util.build_single_handler_application` on each
to generate the mapping.
Args:
path (seq[str]) : paths to files or directories for creating Bokeh
applications.
argvs (dict[str, list[str]], optional) : mapping of paths to command
line arguments to pass to the handler for each path
Returns:
dict[str, Application]
Raises:
RuntimeError
'''
applications = {}
argvs = {} or argvs
for path in paths:
application = build_single_handler_application(path, argvs.get(path, []))
route = application.handlers[0].url_path()
if not route:
if '/' in applications:
raise RuntimeError("Don't know the URL path to use for %s" % (path))
route = '/'
applications[route] = application
return applications |
def parse(self, string):
'''Parse some string to the Grammar.
Returns a nodeResult with the following attributes:
- is_valid: True when the string is successfully parsed
by the Grammar.
- pos: position in the string where parsing ended.
(this is the end of the string when is_valid is True)
- expecting: a list containing possible elements at position
'pos' in the string.
- tree: the parse_tree containing a structured
result for the given string.
'''
self._string = string
self._expecting = Expecting()
self._cached_kw_match.clear()
self._len_string = len(string)
self._pos = None
tree = Node(self._element, string, 0, self._len_string)
node_res = Result(*self._walk(
self._element,
0,
tree.children,
self._element,
True))
# get rest if anything
rest = self._string[node_res.pos:].lstrip()
# set is_valid to False if we have 'rest' left.
if node_res.is_valid and rest:
node_res.is_valid = False
# add end_of_statement to expecting if this is possible
if not self._expecting.required and rest:
self._expecting.set_mode_required(node_res.pos, True)
self._expecting.update(end_of_statement, node_res.pos)
node_res.expecting = self._expecting.get_expecting()
# add expecting and correct pos to node_res if node_res is not valid
if not node_res.is_valid:
node_res.pos = self._expecting.pos
node_res.tree = tree
return node_res | Parse some string to the Grammar.
Returns a nodeResult with the following attributes:
- is_valid: True when the string is successfully parsed
by the Grammar.
- pos: position in the string where parsing ended.
(this is the end of the string when is_valid is True)
- expecting: a list containing possible elements at position
'pos' in the string.
- tree: the parse_tree containing a structured
result for the given string. | Below is the the instruction that describes the task:
### Input:
Parse some string to the Grammar.
Returns a nodeResult with the following attributes:
- is_valid: True when the string is successfully parsed
by the Grammar.
- pos: position in the string where parsing ended.
(this is the end of the string when is_valid is True)
- expecting: a list containing possible elements at position
'pos' in the string.
- tree: the parse_tree containing a structured
result for the given string.
### Response:
def parse(self, string):
'''Parse some string to the Grammar.
Returns a nodeResult with the following attributes:
- is_valid: True when the string is successfully parsed
by the Grammar.
- pos: position in the string where parsing ended.
(this is the end of the string when is_valid is True)
- expecting: a list containing possible elements at position
'pos' in the string.
- tree: the parse_tree containing a structured
result for the given string.
'''
self._string = string
self._expecting = Expecting()
self._cached_kw_match.clear()
self._len_string = len(string)
self._pos = None
tree = Node(self._element, string, 0, self._len_string)
node_res = Result(*self._walk(
self._element,
0,
tree.children,
self._element,
True))
# get rest if anything
rest = self._string[node_res.pos:].lstrip()
# set is_valid to False if we have 'rest' left.
if node_res.is_valid and rest:
node_res.is_valid = False
# add end_of_statement to expecting if this is possible
if not self._expecting.required and rest:
self._expecting.set_mode_required(node_res.pos, True)
self._expecting.update(end_of_statement, node_res.pos)
node_res.expecting = self._expecting.get_expecting()
# add expecting and correct pos to node_res if node_res is not valid
if not node_res.is_valid:
node_res.pos = self._expecting.pos
node_res.tree = tree
return node_res |
def set_t(self, value):
'''
setter
Time.
'''
if isinstance(value, int) is False:
raise TypeError("The type of __t must be int.")
self.__t = value | setter
Time. | Below is the the instruction that describes the task:
### Input:
setter
Time.
### Response:
def set_t(self, value):
'''
setter
Time.
'''
if isinstance(value, int) is False:
raise TypeError("The type of __t must be int.")
self.__t = value |
def _conf_packages(args):
"""Runs custom configuration steps for the packages that ship with support
in acorn.
"""
from acorn.config import config_dir
from os import path
from acorn.base import testmode
target = config_dir(True)
alternate = path.join(path.abspath(path.expanduser("~")), ".acorn")
if not testmode and target != alternate:# pragma: no cover
msg.err("Could not configure custom ~/.acorn directory.")
exit(0)
from acorn.utility import reporoot
from glob import glob
from os import chdir, getcwd
from shutil import copy
current = getcwd()
source = path.join(reporoot, "acorn", "config")
chdir(source)
count = 0
#For the unit testing, we don't clobber the local directory, so the copies
#are disabled.
for json in glob("*.json"):
if not testmode:# pragma: no cover
copy(json, target)
count += 1
for cfg in glob("*.cfg"):
if not testmode:# pragma: no cover
copy(cfg, target)
count += 1
#Switch the directory back to what it was.
chdir(current)
msg.okay("Copied {0:d} package files to {1}.".format(count, target)) | Runs custom configuration steps for the packages that ship with support
in acorn. | Below is the the instruction that describes the task:
### Input:
Runs custom configuration steps for the packages that ship with support
in acorn.
### Response:
def _conf_packages(args):
"""Runs custom configuration steps for the packages that ship with support
in acorn.
"""
from acorn.config import config_dir
from os import path
from acorn.base import testmode
target = config_dir(True)
alternate = path.join(path.abspath(path.expanduser("~")), ".acorn")
if not testmode and target != alternate:# pragma: no cover
msg.err("Could not configure custom ~/.acorn directory.")
exit(0)
from acorn.utility import reporoot
from glob import glob
from os import chdir, getcwd
from shutil import copy
current = getcwd()
source = path.join(reporoot, "acorn", "config")
chdir(source)
count = 0
#For the unit testing, we don't clobber the local directory, so the copies
#are disabled.
for json in glob("*.json"):
if not testmode:# pragma: no cover
copy(json, target)
count += 1
for cfg in glob("*.cfg"):
if not testmode:# pragma: no cover
copy(cfg, target)
count += 1
#Switch the directory back to what it was.
chdir(current)
msg.okay("Copied {0:d} package files to {1}.".format(count, target)) |
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line width.
expand_tabs : bool, optional
If True, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True).
drop_whitespace : bool, optional
If True, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words (default: True).
Returns
-------
Series or Index
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: object
"""
kwargs['width'] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr) | r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line width.
expand_tabs : bool, optional
If True, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True).
drop_whitespace : bool, optional
If True, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words (default: True).
Returns
-------
Series or Index
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: object | Below is the the instruction that describes the task:
### Input:
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line width.
expand_tabs : bool, optional
If True, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True).
drop_whitespace : bool, optional
If True, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words (default: True).
Returns
-------
Series or Index
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: object
### Response:
def str_wrap(arr, width, **kwargs):
r"""
Wrap long strings in the Series/Index to be formatted in
paragraphs with length less than a given width.
This method has the same keyword parameters and defaults as
:class:`textwrap.TextWrapper`.
Parameters
----------
width : int
Maximum line width.
expand_tabs : bool, optional
If True, tab characters will be expanded to spaces (default: True).
replace_whitespace : bool, optional
If True, each whitespace character (as defined by string.whitespace)
remaining after tab expansion will be replaced by a single space
(default: True).
drop_whitespace : bool, optional
If True, whitespace that, after wrapping, happens to end up at the
beginning or end of a line is dropped (default: True).
break_long_words : bool, optional
If True, then words longer than width will be broken in order to ensure
that no lines are longer than width. If it is false, long words will
not be broken, and some lines may be longer than width (default: True).
break_on_hyphens : bool, optional
If True, wrapping will occur preferably on whitespace and right after
hyphens in compound words, as it is customary in English. If false,
only whitespaces will be considered as potentially good places for line
breaks, but you need to set break_long_words to false if you want truly
insecable words (default: True).
Returns
-------
Series or Index
Notes
-----
Internally, this method uses a :class:`textwrap.TextWrapper` instance with
default settings. To achieve behavior matching R's stringr library str_wrap
function, use the arguments:
- expand_tabs = False
- replace_whitespace = True
- drop_whitespace = True
- break_long_words = False
- break_on_hyphens = False
Examples
--------
>>> s = pd.Series(['line to be wrapped', 'another line to be wrapped'])
>>> s.str.wrap(12)
0 line to be\nwrapped
1 another line\nto be\nwrapped
dtype: object
"""
kwargs['width'] = width
tw = textwrap.TextWrapper(**kwargs)
return _na_map(lambda s: '\n'.join(tw.wrap(s)), arr) |
def scramble_native_password(password, message):
"""Scramble used for mysql_native_password"""
if not password:
return b''
stage1 = sha1_new(password).digest()
stage2 = sha1_new(stage1).digest()
s = sha1_new()
s.update(message[:SCRAMBLE_LENGTH])
s.update(stage2)
result = s.digest()
return _my_crypt(result, stage1) | Scramble used for mysql_native_password | Below is the the instruction that describes the task:
### Input:
Scramble used for mysql_native_password
### Response:
def scramble_native_password(password, message):
"""Scramble used for mysql_native_password"""
if not password:
return b''
stage1 = sha1_new(password).digest()
stage2 = sha1_new(stage1).digest()
s = sha1_new()
s.update(message[:SCRAMBLE_LENGTH])
s.update(stage2)
result = s.digest()
return _my_crypt(result, stage1) |
def _verify_same_spaces(self):
"""Verifies that all the envs have the same observation and action space."""
# Pre-conditions: self._envs is initialized.
if self._envs is None:
raise ValueError("Environments not initialized.")
if not isinstance(self._envs, list):
tf.logging.warning("Not checking observation and action space "
"compatibility across envs, since there is just one.")
return
# NOTE: We compare string representations of observation_space and
# action_space because compositional classes like space.Tuple don't return
# true on object comparison.
if not all(
str(env.observation_space) == str(self.observation_space)
for env in self._envs):
err_str = ("All environments should have the same observation space, but "
"don't.")
tf.logging.error(err_str)
# Log all observation spaces.
for i, env in enumerate(self._envs):
tf.logging.error("Env[%d] has observation space [%s]", i,
env.observation_space)
raise ValueError(err_str)
if not all(
str(env.action_space) == str(self.action_space) for env in self._envs):
err_str = "All environments should have the same action space, but don't."
tf.logging.error(err_str)
# Log all action spaces.
for i, env in enumerate(self._envs):
tf.logging.error("Env[%d] has action space [%s]", i, env.action_space)
raise ValueError(err_str) | Verifies that all the envs have the same observation and action space. | Below is the the instruction that describes the task:
### Input:
Verifies that all the envs have the same observation and action space.
### Response:
def _verify_same_spaces(self):
"""Verifies that all the envs have the same observation and action space."""
# Pre-conditions: self._envs is initialized.
if self._envs is None:
raise ValueError("Environments not initialized.")
if not isinstance(self._envs, list):
tf.logging.warning("Not checking observation and action space "
"compatibility across envs, since there is just one.")
return
# NOTE: We compare string representations of observation_space and
# action_space because compositional classes like space.Tuple don't return
# true on object comparison.
if not all(
str(env.observation_space) == str(self.observation_space)
for env in self._envs):
err_str = ("All environments should have the same observation space, but "
"don't.")
tf.logging.error(err_str)
# Log all observation spaces.
for i, env in enumerate(self._envs):
tf.logging.error("Env[%d] has observation space [%s]", i,
env.observation_space)
raise ValueError(err_str)
if not all(
str(env.action_space) == str(self.action_space) for env in self._envs):
err_str = "All environments should have the same action space, but don't."
tf.logging.error(err_str)
# Log all action spaces.
for i, env in enumerate(self._envs):
tf.logging.error("Env[%d] has action space [%s]", i, env.action_space)
raise ValueError(err_str) |
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout) | Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes | Below is the the instruction that describes the task:
### Input:
Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
### Response:
def node_hist_fig(
node_color_distribution,
title="Graph Node Distribution",
width=400,
height=300,
top=60,
left=25,
bottom=60,
right=25,
bgcolor="rgb(240,240,240)",
y_gridcolor="white",
):
"""Define the plotly plot representing the node histogram
Parameters
----------
node_color_distribution: list of dicts describing the build_histogram
width, height: integers - width and height of the histogram FigureWidget
left, top, right, bottom: ints; number of pixels around the FigureWidget
bgcolor: rgb of hex color code for the figure background color
y_gridcolor: rgb of hex color code for the yaxis y_gridcolor
Returns
-------
FigureWidget object representing the histogram of the graph nodes
"""
text = [
"{perc}%".format(**locals())
for perc in [d["perc"] for d in node_color_distribution]
]
pl_hist = go.Bar(
y=[d["height"] for d in node_color_distribution],
marker=dict(color=[d["color"] for d in node_color_distribution]),
text=text,
hoverinfo="y+text",
)
hist_layout = dict(
title=title,
width=width,
height=height,
font=dict(size=12),
xaxis=dict(showline=True, zeroline=False, showgrid=False, showticklabels=False),
yaxis=dict(showline=False, gridcolor=y_gridcolor, tickfont=dict(size=10)),
bargap=0.01,
margin=dict(l=left, r=right, b=bottom, t=top),
hovermode="x",
plot_bgcolor=bgcolor,
)
return go.FigureWidget(data=[pl_hist], layout=hist_layout) |
def targetOffset(self, dx, dy):
""" Returns a new Pattern with the given target offset """
pattern = Pattern(self.path)
pattern.similarity = self.similarity
pattern.offset = Location(dx, dy)
return pattern | Returns a new Pattern with the given target offset | Below is the the instruction that describes the task:
### Input:
Returns a new Pattern with the given target offset
### Response:
def targetOffset(self, dx, dy):
""" Returns a new Pattern with the given target offset """
pattern = Pattern(self.path)
pattern.similarity = self.similarity
pattern.offset = Location(dx, dy)
return pattern |
def paragraph(separator='\n\n', wrap_start='', wrap_end='',
html=False, sentences_quantity=3):
"""Return a random paragraph."""
return paragraphs(quantity=1, separator=separator, wrap_start=wrap_start,
wrap_end=wrap_end, html=html,
sentences_quantity=sentences_quantity) | Return a random paragraph. | Below is the the instruction that describes the task:
### Input:
Return a random paragraph.
### Response:
def paragraph(separator='\n\n', wrap_start='', wrap_end='',
html=False, sentences_quantity=3):
"""Return a random paragraph."""
return paragraphs(quantity=1, separator=separator, wrap_start=wrap_start,
wrap_end=wrap_end, html=html,
sentences_quantity=sentences_quantity) |
def OnPasteFormat(self, event):
"""Paste format event handler"""
with undo.group(_("Paste format")):
self.grid.actions.paste_format()
self.grid.ForceRefresh()
self.grid.update_attribute_toolbar()
self.grid.actions.zoom() | Paste format event handler | Below is the the instruction that describes the task:
### Input:
Paste format event handler
### Response:
def OnPasteFormat(self, event):
"""Paste format event handler"""
with undo.group(_("Paste format")):
self.grid.actions.paste_format()
self.grid.ForceRefresh()
self.grid.update_attribute_toolbar()
self.grid.actions.zoom() |
def switch(poi):
"""
Zaps into a specific product specified by switch context to the product of interest(poi)
A poi is:
sdox:dev - for product "dev" located in container "sdox"
If poi does not contain a ":" it is interpreted as product name implying that a product within this
container is already active. So if this task is called with ape zap prod (and the corresponding container is
already zapped in), than only the product is switched.
After the context has been switched to sdox:dev additional commands may be available
that are relevant to sdox:dev
:param poi: product of interest, string: <container_name>:<product_name> or <product_name>.
"""
parts = poi.split(':')
if len(parts) == 2:
container_name, product_name = parts
elif len(parts) == 1 and os.environ.get('CONTAINER_NAME'):
# interpret poi as product name if already zapped into a product in order
# to enable simply switching products by doing ape zap prod.
container_name = os.environ.get('CONTAINER_NAME')
product_name = parts[0]
else:
print('unable to find poi: ', poi)
sys.exit(1)
if container_name not in tasks.get_containers():
raise ContainerNotFound('No such container %s' % container_name)
elif product_name not in tasks.get_products(container_name):
raise ProductNotFound('No such product %s' % product_name)
else:
print(SWITCH_TEMPLATE.format(
source_header=tasks.conf.SOURCE_HEADER,
container_name=container_name,
product_name=product_name
)) | Zaps into a specific product specified by switch context to the product of interest(poi)
A poi is:
sdox:dev - for product "dev" located in container "sdox"
If poi does not contain a ":" it is interpreted as product name implying that a product within this
container is already active. So if this task is called with ape zap prod (and the corresponding container is
already zapped in), than only the product is switched.
After the context has been switched to sdox:dev additional commands may be available
that are relevant to sdox:dev
:param poi: product of interest, string: <container_name>:<product_name> or <product_name>. | Below is the the instruction that describes the task:
### Input:
Zaps into a specific product specified by switch context to the product of interest(poi)
A poi is:
sdox:dev - for product "dev" located in container "sdox"
If poi does not contain a ":" it is interpreted as product name implying that a product within this
container is already active. So if this task is called with ape zap prod (and the corresponding container is
already zapped in), than only the product is switched.
After the context has been switched to sdox:dev additional commands may be available
that are relevant to sdox:dev
:param poi: product of interest, string: <container_name>:<product_name> or <product_name>.
### Response:
def switch(poi):
"""
Zaps into a specific product specified by switch context to the product of interest(poi)
A poi is:
sdox:dev - for product "dev" located in container "sdox"
If poi does not contain a ":" it is interpreted as product name implying that a product within this
container is already active. So if this task is called with ape zap prod (and the corresponding container is
already zapped in), than only the product is switched.
After the context has been switched to sdox:dev additional commands may be available
that are relevant to sdox:dev
:param poi: product of interest, string: <container_name>:<product_name> or <product_name>.
"""
parts = poi.split(':')
if len(parts) == 2:
container_name, product_name = parts
elif len(parts) == 1 and os.environ.get('CONTAINER_NAME'):
# interpret poi as product name if already zapped into a product in order
# to enable simply switching products by doing ape zap prod.
container_name = os.environ.get('CONTAINER_NAME')
product_name = parts[0]
else:
print('unable to find poi: ', poi)
sys.exit(1)
if container_name not in tasks.get_containers():
raise ContainerNotFound('No such container %s' % container_name)
elif product_name not in tasks.get_products(container_name):
raise ProductNotFound('No such product %s' % product_name)
else:
print(SWITCH_TEMPLATE.format(
source_header=tasks.conf.SOURCE_HEADER,
container_name=container_name,
product_name=product_name
)) |
def join_tokens_to_sentences(tokens):
""" Correctly joins tokens to multiple sentences
Instead of always placing white-space between the tokens, it will distinguish
between the next symbol and *not* insert whitespace if it is a sentence
symbol (e.g. '.', or '?')
Args:
tokens: array of string tokens
Returns:
Joint sentences as one string
"""
text = ""
for (entry, next_entry) in zip(tokens, tokens[1:]):
text += entry
if next_entry not in SENTENCE_STOPS:
text += " "
text += tokens[-1]
return text | Correctly joins tokens to multiple sentences
Instead of always placing white-space between the tokens, it will distinguish
between the next symbol and *not* insert whitespace if it is a sentence
symbol (e.g. '.', or '?')
Args:
tokens: array of string tokens
Returns:
Joint sentences as one string | Below is the the instruction that describes the task:
### Input:
Correctly joins tokens to multiple sentences
Instead of always placing white-space between the tokens, it will distinguish
between the next symbol and *not* insert whitespace if it is a sentence
symbol (e.g. '.', or '?')
Args:
tokens: array of string tokens
Returns:
Joint sentences as one string
### Response:
def join_tokens_to_sentences(tokens):
""" Correctly joins tokens to multiple sentences
Instead of always placing white-space between the tokens, it will distinguish
between the next symbol and *not* insert whitespace if it is a sentence
symbol (e.g. '.', or '?')
Args:
tokens: array of string tokens
Returns:
Joint sentences as one string
"""
text = ""
for (entry, next_entry) in zip(tokens, tokens[1:]):
text += entry
if next_entry not in SENTENCE_STOPS:
text += " "
text += tokens[-1]
return text |
def fit1d(samples, e, remove_zeros = False, **kw):
"""Fits a 1D distribution with splines.
Input:
samples: Array
Array of samples from a probability distribution
e: Array
Edges that define the events in the probability
distribution. For example, e[0] < x <= e[1] is
the range of values that are associated with the
first event.
**kw: Arguments that are passed on to spline_bse1d.
Returns:
distribution: Array
An array that gives an estimate of probability for
events defined by e.
knots: Array
Sequence of knots that were used for the spline basis
"""
samples = samples[~np.isnan(samples)]
length = len(e)-1
hist,_ = np.histogramdd(samples, (e,))
hist = hist/sum(hist)
basis, knots = spline_base1d(length, marginal = hist, **kw)
non_zero = hist>0
model = linear_model.BayesianRidge()
if remove_zeros:
model.fit(basis[non_zero, :], hist[:,np.newaxis][non_zero,:])
else:
hist[~non_zero] = np.finfo(float).eps
model.fit(basis, hist[:,np.newaxis])
return model.predict(basis), hist, knots | Fits a 1D distribution with splines.
Input:
samples: Array
Array of samples from a probability distribution
e: Array
Edges that define the events in the probability
distribution. For example, e[0] < x <= e[1] is
the range of values that are associated with the
first event.
**kw: Arguments that are passed on to spline_bse1d.
Returns:
distribution: Array
An array that gives an estimate of probability for
events defined by e.
knots: Array
Sequence of knots that were used for the spline basis | Below is the the instruction that describes the task:
### Input:
Fits a 1D distribution with splines.
Input:
samples: Array
Array of samples from a probability distribution
e: Array
Edges that define the events in the probability
distribution. For example, e[0] < x <= e[1] is
the range of values that are associated with the
first event.
**kw: Arguments that are passed on to spline_bse1d.
Returns:
distribution: Array
An array that gives an estimate of probability for
events defined by e.
knots: Array
Sequence of knots that were used for the spline basis
### Response:
def fit1d(samples, e, remove_zeros = False, **kw):
"""Fits a 1D distribution with splines.
Input:
samples: Array
Array of samples from a probability distribution
e: Array
Edges that define the events in the probability
distribution. For example, e[0] < x <= e[1] is
the range of values that are associated with the
first event.
**kw: Arguments that are passed on to spline_bse1d.
Returns:
distribution: Array
An array that gives an estimate of probability for
events defined by e.
knots: Array
Sequence of knots that were used for the spline basis
"""
samples = samples[~np.isnan(samples)]
length = len(e)-1
hist,_ = np.histogramdd(samples, (e,))
hist = hist/sum(hist)
basis, knots = spline_base1d(length, marginal = hist, **kw)
non_zero = hist>0
model = linear_model.BayesianRidge()
if remove_zeros:
model.fit(basis[non_zero, :], hist[:,np.newaxis][non_zero,:])
else:
hist[~non_zero] = np.finfo(float).eps
model.fit(basis, hist[:,np.newaxis])
return model.predict(basis), hist, knots |
def get_mongo_version(self):
"""
Gets mongo version of the server if it is running. Otherwise return
version configured in mongoVersion property
"""
if self._mongo_version:
return self._mongo_version
mongo_version = self.read_current_mongo_version()
if not mongo_version:
mongo_version = self.get_configured_mongo_version()
self._mongo_version = mongo_version
return self._mongo_version | Gets mongo version of the server if it is running. Otherwise return
version configured in mongoVersion property | Below is the the instruction that describes the task:
### Input:
Gets mongo version of the server if it is running. Otherwise return
version configured in mongoVersion property
### Response:
def get_mongo_version(self):
"""
Gets mongo version of the server if it is running. Otherwise return
version configured in mongoVersion property
"""
if self._mongo_version:
return self._mongo_version
mongo_version = self.read_current_mongo_version()
if not mongo_version:
mongo_version = self.get_configured_mongo_version()
self._mongo_version = mongo_version
return self._mongo_version |
def exclude_types(self, *objs):
'''Exclude the specified object instances and types from sizing.
All instances and types of the given objects are excluded,
even objects specified as positional arguments in subsequent
calls to methods **asizeof** and **asizesof**.
'''
for o in objs:
for t in _keytuple(o):
if t and t not in self._excl_d:
self._excl_d[t] = 0 | Exclude the specified object instances and types from sizing.
All instances and types of the given objects are excluded,
even objects specified as positional arguments in subsequent
calls to methods **asizeof** and **asizesof**. | Below is the the instruction that describes the task:
### Input:
Exclude the specified object instances and types from sizing.
All instances and types of the given objects are excluded,
even objects specified as positional arguments in subsequent
calls to methods **asizeof** and **asizesof**.
### Response:
def exclude_types(self, *objs):
'''Exclude the specified object instances and types from sizing.
All instances and types of the given objects are excluded,
even objects specified as positional arguments in subsequent
calls to methods **asizeof** and **asizesof**.
'''
for o in objs:
for t in _keytuple(o):
if t and t not in self._excl_d:
self._excl_d[t] = 0 |
def _affine_inv_mult(c, m):
"Applies the inverse affine transform described in `m` to `c`."
size = c.flow.size()
h,w = c.size
m[0,1] *= h/w
m[1,0] *= w/h
c.flow = c.flow.view(-1,2)
a = torch.inverse(m[:2,:2].t())
c.flow = torch.mm(c.flow - m[:2,2], a).view(size)
return c | Applies the inverse affine transform described in `m` to `c`. | Below is the the instruction that describes the task:
### Input:
Applies the inverse affine transform described in `m` to `c`.
### Response:
def _affine_inv_mult(c, m):
"Applies the inverse affine transform described in `m` to `c`."
size = c.flow.size()
h,w = c.size
m[0,1] *= h/w
m[1,0] *= w/h
c.flow = c.flow.view(-1,2)
a = torch.inverse(m[:2,:2].t())
c.flow = torch.mm(c.flow - m[:2,2], a).view(size)
return c |
def _process_coref_span_annotations_for_word(label: str,
word_index: int,
clusters: DefaultDict[int, List[Tuple[int, int]]],
coref_stacks: DefaultDict[int, List[int]]) -> None:
"""
For a given coref label, add it to a currently open span(s), complete a span(s) or
ignore it, if it is outside of all spans. This method mutates the clusters and coref_stacks
dictionaries.
Parameters
----------
label : ``str``
The coref label for this word.
word_index : ``int``
The word index into the sentence.
clusters : ``DefaultDict[int, List[Tuple[int, int]]]``
A dictionary mapping cluster ids to lists of inclusive spans into the
sentence.
coref_stacks: ``DefaultDict[int, List[int]]``
Stacks for each cluster id to hold the start indices of active spans (spans
which we are inside of when processing a given word). Spans with the same id
can be nested, which is why we collect these opening spans on a stack, e.g:
[Greg, the baker who referred to [himself]_ID1 as 'the bread man']_ID1
"""
if label != "-":
for segment in label.split("|"):
# The conll representation of coref spans allows spans to
# overlap. If spans end or begin at the same word, they are
# separated by a "|".
if segment[0] == "(":
# The span begins at this word.
if segment[-1] == ")":
# The span begins and ends at this word (single word span).
cluster_id = int(segment[1:-1])
clusters[cluster_id].append((word_index, word_index))
else:
# The span is starting, so we record the index of the word.
cluster_id = int(segment[1:])
coref_stacks[cluster_id].append(word_index)
else:
# The span for this id is ending, but didn't start at this word.
# Retrieve the start index from the document state and
# add the span to the clusters for this id.
cluster_id = int(segment[:-1])
start = coref_stacks[cluster_id].pop()
clusters[cluster_id].append((start, word_index)) | For a given coref label, add it to a currently open span(s), complete a span(s) or
ignore it, if it is outside of all spans. This method mutates the clusters and coref_stacks
dictionaries.
Parameters
----------
label : ``str``
The coref label for this word.
word_index : ``int``
The word index into the sentence.
clusters : ``DefaultDict[int, List[Tuple[int, int]]]``
A dictionary mapping cluster ids to lists of inclusive spans into the
sentence.
coref_stacks: ``DefaultDict[int, List[int]]``
Stacks for each cluster id to hold the start indices of active spans (spans
which we are inside of when processing a given word). Spans with the same id
can be nested, which is why we collect these opening spans on a stack, e.g:
[Greg, the baker who referred to [himself]_ID1 as 'the bread man']_ID1 | Below is the the instruction that describes the task:
### Input:
For a given coref label, add it to a currently open span(s), complete a span(s) or
ignore it, if it is outside of all spans. This method mutates the clusters and coref_stacks
dictionaries.
Parameters
----------
label : ``str``
The coref label for this word.
word_index : ``int``
The word index into the sentence.
clusters : ``DefaultDict[int, List[Tuple[int, int]]]``
A dictionary mapping cluster ids to lists of inclusive spans into the
sentence.
coref_stacks: ``DefaultDict[int, List[int]]``
Stacks for each cluster id to hold the start indices of active spans (spans
which we are inside of when processing a given word). Spans with the same id
can be nested, which is why we collect these opening spans on a stack, e.g:
[Greg, the baker who referred to [himself]_ID1 as 'the bread man']_ID1
### Response:
def _process_coref_span_annotations_for_word(label: str,
word_index: int,
clusters: DefaultDict[int, List[Tuple[int, int]]],
coref_stacks: DefaultDict[int, List[int]]) -> None:
"""
For a given coref label, add it to a currently open span(s), complete a span(s) or
ignore it, if it is outside of all spans. This method mutates the clusters and coref_stacks
dictionaries.
Parameters
----------
label : ``str``
The coref label for this word.
word_index : ``int``
The word index into the sentence.
clusters : ``DefaultDict[int, List[Tuple[int, int]]]``
A dictionary mapping cluster ids to lists of inclusive spans into the
sentence.
coref_stacks: ``DefaultDict[int, List[int]]``
Stacks for each cluster id to hold the start indices of active spans (spans
which we are inside of when processing a given word). Spans with the same id
can be nested, which is why we collect these opening spans on a stack, e.g:
[Greg, the baker who referred to [himself]_ID1 as 'the bread man']_ID1
"""
if label != "-":
for segment in label.split("|"):
# The conll representation of coref spans allows spans to
# overlap. If spans end or begin at the same word, they are
# separated by a "|".
if segment[0] == "(":
# The span begins at this word.
if segment[-1] == ")":
# The span begins and ends at this word (single word span).
cluster_id = int(segment[1:-1])
clusters[cluster_id].append((word_index, word_index))
else:
# The span is starting, so we record the index of the word.
cluster_id = int(segment[1:])
coref_stacks[cluster_id].append(word_index)
else:
# The span for this id is ending, but didn't start at this word.
# Retrieve the start index from the document state and
# add the span to the clusters for this id.
cluster_id = int(segment[:-1])
start = coref_stacks[cluster_id].pop()
clusters[cluster_id].append((start, word_index)) |
def apply_classifier(self, name, samples=None, subset=None):
"""
Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
c = self.classifiers[name]
labs = c.classifier.ulabels_
with self.pbar.set(total=len(samples), desc='Applying ' + name + ' classifier') as prog:
for s in samples:
d = self.data[s]
try:
f = c.predict(d.focus)
except ValueError:
# in case there's no data
f = np.array([-2] * len(d.Time))
for l in labs:
ind = f == l
d.filt.add(name=name + '_{:.0f}'.format(l),
filt=ind,
info=name + ' ' + c.method + ' classifier',
params=(c.analytes, c.method))
prog.update()
return name | Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str | Below is the the instruction that describes the task:
### Input:
Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str
### Response:
def apply_classifier(self, name, samples=None, subset=None):
"""
Apply a clustering classifier based on all samples, or a subset.
Parameters
----------
name : str
The name of the classifier to apply.
subset : str
The subset of samples to apply the classifier to.
Returns
-------
name : str
"""
if samples is not None:
subset = self.make_subset(samples)
samples = self._get_samples(subset)
c = self.classifiers[name]
labs = c.classifier.ulabels_
with self.pbar.set(total=len(samples), desc='Applying ' + name + ' classifier') as prog:
for s in samples:
d = self.data[s]
try:
f = c.predict(d.focus)
except ValueError:
# in case there's no data
f = np.array([-2] * len(d.Time))
for l in labs:
ind = f == l
d.filt.add(name=name + '_{:.0f}'.format(l),
filt=ind,
info=name + ' ' + c.method + ' classifier',
params=(c.analytes, c.method))
prog.update()
return name |
def files(self, path="/", **kwargs):
# type: (Text, **Any) -> Iterator[Text]
"""Walk a filesystem, yielding absolute paths to files.
Arguments:
path (str): A path to a directory.
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: An iterator over file paths (absolute
from the filesystem root).
This method invokes `Walker.files` with the bound `FS` object.
"""
walker = self._make_walker(**kwargs)
return walker.files(self.fs, path=path) | Walk a filesystem, yielding absolute paths to files.
Arguments:
path (str): A path to a directory.
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: An iterator over file paths (absolute
from the filesystem root).
This method invokes `Walker.files` with the bound `FS` object. | Below is the the instruction that describes the task:
### Input:
Walk a filesystem, yielding absolute paths to files.
Arguments:
path (str): A path to a directory.
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: An iterator over file paths (absolute
from the filesystem root).
This method invokes `Walker.files` with the bound `FS` object.
### Response:
def files(self, path="/", **kwargs):
# type: (Text, **Any) -> Iterator[Text]
"""Walk a filesystem, yielding absolute paths to files.
Arguments:
path (str): A path to a directory.
Keyword Arguments:
ignore_errors (bool): If `True`, any errors reading a
directory will be ignored, otherwise exceptions will be
raised.
on_error (callable): If ``ignore_errors`` is `False`, then
this callable will be invoked with a path and the exception
object. It should return `True` to ignore the error, or
`False` to re-raise it.
search (str): If ``'breadth'`` then the directory will be
walked *top down*. Set to ``'depth'`` to walk *bottom up*.
filter (list): If supplied, this parameter should be a list
of file name patterns, e.g. ``['*.py']``. Files will only be
returned if the final component matches one of the
patterns.
exclude (list, optional): If supplied, this parameter should be
a list of filename patterns, e.g. ``['~*', '.*']``. Files matching
any of these patterns will be removed from the walk.
filter_dirs (list, optional): A list of patterns that will be used
to match directories paths. The walk will only open directories
that match at least one of these patterns.
exclude_dirs (list): A list of patterns that will be used
to filter out directories from the walk, e.g. ``['*.svn',
'*.git']``.
max_depth (int, optional): Maximum directory depth to walk.
Returns:
~collections.Iterator: An iterator over file paths (absolute
from the filesystem root).
This method invokes `Walker.files` with the bound `FS` object.
"""
walker = self._make_walker(**kwargs)
return walker.files(self.fs, path=path) |
def lower_coerce_type_blocks(ir_blocks):
"""Lower CoerceType blocks into Filter blocks with a type-check predicate."""
new_ir_blocks = []
for block in ir_blocks:
new_block = block
if isinstance(block, CoerceType):
predicate = BinaryComposition(
u'contains', Literal(list(block.target_class)), LocalField('@class'))
new_block = Filter(predicate)
new_ir_blocks.append(new_block)
return new_ir_blocks | Lower CoerceType blocks into Filter blocks with a type-check predicate. | Below is the the instruction that describes the task:
### Input:
Lower CoerceType blocks into Filter blocks with a type-check predicate.
### Response:
def lower_coerce_type_blocks(ir_blocks):
"""Lower CoerceType blocks into Filter blocks with a type-check predicate."""
new_ir_blocks = []
for block in ir_blocks:
new_block = block
if isinstance(block, CoerceType):
predicate = BinaryComposition(
u'contains', Literal(list(block.target_class)), LocalField('@class'))
new_block = Filter(predicate)
new_ir_blocks.append(new_block)
return new_ir_blocks |
def filter_variance(matrix, top):
"""Filter genes in an expression matrix by variance.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(top, (int, np.integer))
if top >= matrix.p:
logger.warning('Variance filter has no effect '
'("top" parameter is >= number of genes).')
return matrix.copy()
var = np.var(matrix.X, axis=1, ddof=1)
total_var = np.sum(var) # total sum of variance
a = np.argsort(var)
a = a[::-1]
sel = np.zeros(matrix.p, dtype=np.bool_)
sel[a[:top]] = True
lost_p = matrix.p - top
lost_var = total_var - np.sum(var[sel])
logger.info('Selected the %d most variable genes '
'(excluded %.1f%% of genes, representing %.1f%% '
'of total variance).',
top, 100 * (lost_p / float(matrix.p)),
100 * (lost_var / total_var))
matrix = matrix.loc[sel]
return matrix | Filter genes in an expression matrix by variance.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix. | Below is the the instruction that describes the task:
### Input:
Filter genes in an expression matrix by variance.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix.
### Response:
def filter_variance(matrix, top):
"""Filter genes in an expression matrix by variance.
Parameters
----------
matrix: ExpMatrix
The expression matrix.
top: int
The number of genes to retain.
Returns
-------
ExpMatrix
The filtered expression matrix.
"""
assert isinstance(matrix, ExpMatrix)
assert isinstance(top, (int, np.integer))
if top >= matrix.p:
logger.warning('Variance filter has no effect '
'("top" parameter is >= number of genes).')
return matrix.copy()
var = np.var(matrix.X, axis=1, ddof=1)
total_var = np.sum(var) # total sum of variance
a = np.argsort(var)
a = a[::-1]
sel = np.zeros(matrix.p, dtype=np.bool_)
sel[a[:top]] = True
lost_p = matrix.p - top
lost_var = total_var - np.sum(var[sel])
logger.info('Selected the %d most variable genes '
'(excluded %.1f%% of genes, representing %.1f%% '
'of total variance).',
top, 100 * (lost_p / float(matrix.p)),
100 * (lost_var / total_var))
matrix = matrix.loc[sel]
return matrix |
def parse_descedant_elements(self, element):
'''parses all descendants of an etree element'''
for descendant in element.iterdescendants():
self.parsers[descendant.tag](descendant) | parses all descendants of an etree element | Below is the the instruction that describes the task:
### Input:
parses all descendants of an etree element
### Response:
def parse_descedant_elements(self, element):
'''parses all descendants of an etree element'''
for descendant in element.iterdescendants():
self.parsers[descendant.tag](descendant) |
def iterclass(cls):
"""Iterates over (valid) attributes of a class.
Args:
cls (object): the class to iterate over
Yields:
(str, obj) tuples: the class-level attributes.
"""
for field in dir(cls):
if hasattr(cls, field):
value = getattr(cls, field)
yield field, value | Iterates over (valid) attributes of a class.
Args:
cls (object): the class to iterate over
Yields:
(str, obj) tuples: the class-level attributes. | Below is the the instruction that describes the task:
### Input:
Iterates over (valid) attributes of a class.
Args:
cls (object): the class to iterate over
Yields:
(str, obj) tuples: the class-level attributes.
### Response:
def iterclass(cls):
"""Iterates over (valid) attributes of a class.
Args:
cls (object): the class to iterate over
Yields:
(str, obj) tuples: the class-level attributes.
"""
for field in dir(cls):
if hasattr(cls, field):
value = getattr(cls, field)
yield field, value |
def sigma_filter(filename, region, step_size, box_size, shape, domask, sid):
"""
Calculate the background and rms for a sub region of an image. The results are
written to shared memory - irms and ibkg.
Parameters
----------
filename : string
Fits file to open
region : list
Region within the fits file that is to be processed. (row_min, row_max).
step_size : (int, int)
The filtering step size
box_size : (int, int)
The size of the box over which the filter is applied (each step).
shape : tuple
The shape of the fits image
domask : bool
If true then copy the data mask to the output.
sid : int
The stripe number
Returns
-------
None
"""
ymin, ymax = region
logging.debug('rows {0}-{1} starting at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
# cut out the region of interest plus 1/2 the box size, but clip to the image size
data_row_min = max(0, ymin - box_size[0]//2)
data_row_max = min(shape[0], ymax + box_size[0]//2)
# Figure out how many axes are in the datafile
NAXIS = fits.getheader(filename)["NAXIS"]
with fits.open(filename, memmap=True) as a:
if NAXIS == 2:
data = a[0].section[data_row_min:data_row_max, 0:shape[1]]
elif NAXIS == 3:
data = a[0].section[0, data_row_min:data_row_max, 0:shape[1]]
elif NAXIS == 4:
data = a[0].section[0, 0, data_row_min:data_row_max, 0:shape[1]]
else:
logging.error("Too many NAXIS for me {0}".format(NAXIS))
logging.error("fix your file to be more sane")
raise Exception("Too many NAXIS")
row_len = shape[1]
logging.debug('data size is {0}'.format(data.shape))
def box(r, c):
"""
calculate the boundaries of the box centered at r,c
with size = box_size
"""
r_min = max(0, r - box_size[0] // 2)
r_max = min(data.shape[0] - 1, r + box_size[0] // 2)
c_min = max(0, c - box_size[1] // 2)
c_max = min(data.shape[1] - 1, c + box_size[1] // 2)
return r_min, r_max, c_min, c_max
# set up a grid of rows/cols at which we will compute the bkg/rms
rows = list(range(ymin-data_row_min, ymax-data_row_min, step_size[0]))
rows.append(ymax-data_row_min)
cols = list(range(0, shape[1], step_size[1]))
cols.append(shape[1])
# store the computed bkg/rms in this smaller array
vals = np.zeros(shape=(len(rows),len(cols)))
for i, row in enumerate(rows):
for j, col in enumerate(cols):
r_min, r_max, c_min, c_max = box(row, col)
new = data[r_min:r_max, c_min:c_max]
new = np.ravel(new)
bkg, _ = sigmaclip(new, 3, 3)
vals[i,j] = bkg
# indices of all the pixels within our region
gr, gc = np.mgrid[ymin-data_row_min:ymax-data_row_min, 0:shape[1]]
logging.debug("Interpolating bkg to sharemem")
ifunc = RegularGridInterpolator((rows, cols), vals)
for i in range(gr.shape[0]):
row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32)
start_idx = np.ravel_multi_index((ymin+i, 0), shape)
end_idx = start_idx + row_len
ibkg[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row)
del ifunc
logging.debug(" ... done writing bkg")
# signal that the bkg is done for this region, and wait for neighbours
barrier(bkg_events, sid)
logging.debug("{0} background subtraction".format(sid))
for i in range(data_row_max - data_row_min):
start_idx = np.ravel_multi_index((data_row_min + i, 0), shape)
end_idx = start_idx + row_len
data[i, :] = data[i, :] - ibkg[start_idx:end_idx]
# reset/recycle the vals array
vals[:] = 0
for i, row in enumerate(rows):
for j, col in enumerate(cols):
r_min, r_max, c_min, c_max = box(row, col)
new = data[r_min:r_max, c_min:c_max]
new = np.ravel(new)
_ , rms = sigmaclip(new, 3, 3)
vals[i,j] = rms
logging.debug("Interpolating rm to sharemem rms")
ifunc = RegularGridInterpolator((rows, cols), vals)
for i in range(gr.shape[0]):
row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32)
start_idx = np.ravel_multi_index((ymin+i, 0), shape)
end_idx = start_idx + row_len
irms[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row)
del ifunc
logging.debug(" .. done writing rms")
if domask:
barrier(mask_events, sid)
logging.debug("applying mask")
for i in range(gr.shape[0]):
mask = np.where(np.bitwise_not(np.isfinite(data[i + ymin-data_row_min,:])))[0]
for j in mask:
idx = np.ravel_multi_index((i + ymin,j),shape)
ibkg[idx] = np.nan
irms[idx] = np.nan
logging.debug(" ... done applying mask")
logging.debug('rows {0}-{1} finished at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
return | Calculate the background and rms for a sub region of an image. The results are
written to shared memory - irms and ibkg.
Parameters
----------
filename : string
Fits file to open
region : list
Region within the fits file that is to be processed. (row_min, row_max).
step_size : (int, int)
The filtering step size
box_size : (int, int)
The size of the box over which the filter is applied (each step).
shape : tuple
The shape of the fits image
domask : bool
If true then copy the data mask to the output.
sid : int
The stripe number
Returns
-------
None | Below is the the instruction that describes the task:
### Input:
Calculate the background and rms for a sub region of an image. The results are
written to shared memory - irms and ibkg.
Parameters
----------
filename : string
Fits file to open
region : list
Region within the fits file that is to be processed. (row_min, row_max).
step_size : (int, int)
The filtering step size
box_size : (int, int)
The size of the box over which the filter is applied (each step).
shape : tuple
The shape of the fits image
domask : bool
If true then copy the data mask to the output.
sid : int
The stripe number
Returns
-------
None
### Response:
def sigma_filter(filename, region, step_size, box_size, shape, domask, sid):
"""
Calculate the background and rms for a sub region of an image. The results are
written to shared memory - irms and ibkg.
Parameters
----------
filename : string
Fits file to open
region : list
Region within the fits file that is to be processed. (row_min, row_max).
step_size : (int, int)
The filtering step size
box_size : (int, int)
The size of the box over which the filter is applied (each step).
shape : tuple
The shape of the fits image
domask : bool
If true then copy the data mask to the output.
sid : int
The stripe number
Returns
-------
None
"""
ymin, ymax = region
logging.debug('rows {0}-{1} starting at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
# cut out the region of interest plus 1/2 the box size, but clip to the image size
data_row_min = max(0, ymin - box_size[0]//2)
data_row_max = min(shape[0], ymax + box_size[0]//2)
# Figure out how many axes are in the datafile
NAXIS = fits.getheader(filename)["NAXIS"]
with fits.open(filename, memmap=True) as a:
if NAXIS == 2:
data = a[0].section[data_row_min:data_row_max, 0:shape[1]]
elif NAXIS == 3:
data = a[0].section[0, data_row_min:data_row_max, 0:shape[1]]
elif NAXIS == 4:
data = a[0].section[0, 0, data_row_min:data_row_max, 0:shape[1]]
else:
logging.error("Too many NAXIS for me {0}".format(NAXIS))
logging.error("fix your file to be more sane")
raise Exception("Too many NAXIS")
row_len = shape[1]
logging.debug('data size is {0}'.format(data.shape))
def box(r, c):
"""
calculate the boundaries of the box centered at r,c
with size = box_size
"""
r_min = max(0, r - box_size[0] // 2)
r_max = min(data.shape[0] - 1, r + box_size[0] // 2)
c_min = max(0, c - box_size[1] // 2)
c_max = min(data.shape[1] - 1, c + box_size[1] // 2)
return r_min, r_max, c_min, c_max
# set up a grid of rows/cols at which we will compute the bkg/rms
rows = list(range(ymin-data_row_min, ymax-data_row_min, step_size[0]))
rows.append(ymax-data_row_min)
cols = list(range(0, shape[1], step_size[1]))
cols.append(shape[1])
# store the computed bkg/rms in this smaller array
vals = np.zeros(shape=(len(rows),len(cols)))
for i, row in enumerate(rows):
for j, col in enumerate(cols):
r_min, r_max, c_min, c_max = box(row, col)
new = data[r_min:r_max, c_min:c_max]
new = np.ravel(new)
bkg, _ = sigmaclip(new, 3, 3)
vals[i,j] = bkg
# indices of all the pixels within our region
gr, gc = np.mgrid[ymin-data_row_min:ymax-data_row_min, 0:shape[1]]
logging.debug("Interpolating bkg to sharemem")
ifunc = RegularGridInterpolator((rows, cols), vals)
for i in range(gr.shape[0]):
row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32)
start_idx = np.ravel_multi_index((ymin+i, 0), shape)
end_idx = start_idx + row_len
ibkg[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row)
del ifunc
logging.debug(" ... done writing bkg")
# signal that the bkg is done for this region, and wait for neighbours
barrier(bkg_events, sid)
logging.debug("{0} background subtraction".format(sid))
for i in range(data_row_max - data_row_min):
start_idx = np.ravel_multi_index((data_row_min + i, 0), shape)
end_idx = start_idx + row_len
data[i, :] = data[i, :] - ibkg[start_idx:end_idx]
# reset/recycle the vals array
vals[:] = 0
for i, row in enumerate(rows):
for j, col in enumerate(cols):
r_min, r_max, c_min, c_max = box(row, col)
new = data[r_min:r_max, c_min:c_max]
new = np.ravel(new)
_ , rms = sigmaclip(new, 3, 3)
vals[i,j] = rms
logging.debug("Interpolating rm to sharemem rms")
ifunc = RegularGridInterpolator((rows, cols), vals)
for i in range(gr.shape[0]):
row = np.array(ifunc((gr[i], gc[i])), dtype=np.float32)
start_idx = np.ravel_multi_index((ymin+i, 0), shape)
end_idx = start_idx + row_len
irms[start_idx:end_idx] = row # np.ctypeslib.as_ctypes(row)
del ifunc
logging.debug(" .. done writing rms")
if domask:
barrier(mask_events, sid)
logging.debug("applying mask")
for i in range(gr.shape[0]):
mask = np.where(np.bitwise_not(np.isfinite(data[i + ymin-data_row_min,:])))[0]
for j in mask:
idx = np.ravel_multi_index((i + ymin,j),shape)
ibkg[idx] = np.nan
irms[idx] = np.nan
logging.debug(" ... done applying mask")
logging.debug('rows {0}-{1} finished at {2}'.format(ymin, ymax, strftime("%Y-%m-%d %H:%M:%S", gmtime())))
return |
def get_pane_index(self, pane):
" Return the index of the given pane. ValueError if not found. "
assert isinstance(pane, Pane)
return self.panes.index(pane) | Return the index of the given pane. ValueError if not found. | Below is the the instruction that describes the task:
### Input:
Return the index of the given pane. ValueError if not found.
### Response:
def get_pane_index(self, pane):
" Return the index of the given pane. ValueError if not found. "
assert isinstance(pane, Pane)
return self.panes.index(pane) |
def forward(self, inputs, states=None): # pylint: disable=arguments-differ
"""Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`."""
batch_size = inputs.shape[self._batch_axis]
skip_states = states is None
if skip_states:
states = self.cell.begin_state(batch_size, ctx=inputs.context)
if isinstance(states, ndarray.NDArray):
states = [states]
for state, info in zip(states, self.cell.state_info(batch_size)):
if state.shape != info['shape']:
raise ValueError(
'Invalid recurrent state shape. Expecting %s, got %s.'%(
str(info['shape']), str(state.shape)))
states = sum(zip(*((j for j in i) for i in states)), ())
outputs, states = self.cell.unroll(
inputs.shape[self._axis], inputs, states,
layout=self._layout, merge_outputs=True)
if skip_states:
return outputs
return outputs, states | Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`. | Below is the the instruction that describes the task:
### Input:
Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`.
### Response:
def forward(self, inputs, states=None): # pylint: disable=arguments-differ
"""Defines the forward computation. Arguments can be either
:py:class:`NDArray` or :py:class:`Symbol`."""
batch_size = inputs.shape[self._batch_axis]
skip_states = states is None
if skip_states:
states = self.cell.begin_state(batch_size, ctx=inputs.context)
if isinstance(states, ndarray.NDArray):
states = [states]
for state, info in zip(states, self.cell.state_info(batch_size)):
if state.shape != info['shape']:
raise ValueError(
'Invalid recurrent state shape. Expecting %s, got %s.'%(
str(info['shape']), str(state.shape)))
states = sum(zip(*((j for j in i) for i in states)), ())
outputs, states = self.cell.unroll(
inputs.shape[self._axis], inputs, states,
layout=self._layout, merge_outputs=True)
if skip_states:
return outputs
return outputs, states |
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. """
self._meta.setdefault(key, {})[metafield] = value | Set the meta field for a key to a new value. | Below is the the instruction that describes the task:
### Input:
Set the meta field for a key to a new value.
### Response:
def meta_set(self, key, metafield, value):
""" Set the meta field for a key to a new value. """
self._meta.setdefault(key, {})[metafield] = value |
def to_string(self, other):
"""String representation with addtional information"""
arg = "%s/%s,%s" % (
self.ttl, self.get_remaining_ttl(current_time_millis()), other)
return DNSEntry.to_string(self, "record", arg) | String representation with addtional information | Below is the the instruction that describes the task:
### Input:
String representation with addtional information
### Response:
def to_string(self, other):
"""String representation with addtional information"""
arg = "%s/%s,%s" % (
self.ttl, self.get_remaining_ttl(current_time_millis()), other)
return DNSEntry.to_string(self, "record", arg) |
def append(self, item: TransItem):
"""
Append an item to the list. If there is not enough sentences in the
list, then the list is extended as needed.
There is no control made to make sure that the key is consistent.
"""
if not (1 <= item.index <= settings.I18N_MAX_SENTENCES_PER_GROUP):
return
if len(self.sentences) < item.index:
for _ in range(len(self.sentences), item.index):
self.sentences.append(Sentence())
self.sentences[item.index - 1].append(item) | Append an item to the list. If there is not enough sentences in the
list, then the list is extended as needed.
There is no control made to make sure that the key is consistent. | Below is the the instruction that describes the task:
### Input:
Append an item to the list. If there is not enough sentences in the
list, then the list is extended as needed.
There is no control made to make sure that the key is consistent.
### Response:
def append(self, item: TransItem):
"""
Append an item to the list. If there is not enough sentences in the
list, then the list is extended as needed.
There is no control made to make sure that the key is consistent.
"""
if not (1 <= item.index <= settings.I18N_MAX_SENTENCES_PER_GROUP):
return
if len(self.sentences) < item.index:
for _ in range(len(self.sentences), item.index):
self.sentences.append(Sentence())
self.sentences[item.index - 1].append(item) |
def compose(self, mapping):
"""Apply the ``compose`` method to all functions.
Returns a new farray.
"""
items = [f.compose(mapping) for f in self._items]
return self.__class__(items, self.shape, self.ftype) | Apply the ``compose`` method to all functions.
Returns a new farray. | Below is the the instruction that describes the task:
### Input:
Apply the ``compose`` method to all functions.
Returns a new farray.
### Response:
def compose(self, mapping):
"""Apply the ``compose`` method to all functions.
Returns a new farray.
"""
items = [f.compose(mapping) for f in self._items]
return self.__class__(items, self.shape, self.ftype) |
def metapolicy(self, permitted):
"""
Sets metapolicy to ``permitted``. (only applicable to master
policy files). Acceptable values correspond to those listed in
Section 3(b)(i) of the crossdomain.xml specification, and are
also available as a set of constants defined in this module.
By default, Flash assumes a value of ``master-only`` for all
policies except socket policies, (which assume a default of
``all``) so if this is desired (and, for security, it
typically is), this method does not need to be called.
Note that a metapolicy of ``none`` forbids **all** access,
even if one or more domains, headers or identities have
previously been specified as allowed. As such, setting the
metapolicy to ``none`` will remove all access previously
granted by ``allow_domain``, ``allow_headers`` or
``allow_identity``. Additionally, attempting to grant access
via ``allow_domain``, ``allow_headers`` or ``allow_identity``
will, when the metapolicy is ``none``, raise ``TypeError``.
"""
if permitted not in VALID_SITE_CONTROL:
raise TypeError(SITE_CONTROL_ERROR.format(permitted))
if permitted == SITE_CONTROL_NONE:
# Metapolicy 'none' means no access is permitted.
self.domains = {}
self.header_domains = {}
self.identities = []
self.site_control = permitted | Sets metapolicy to ``permitted``. (only applicable to master
policy files). Acceptable values correspond to those listed in
Section 3(b)(i) of the crossdomain.xml specification, and are
also available as a set of constants defined in this module.
By default, Flash assumes a value of ``master-only`` for all
policies except socket policies, (which assume a default of
``all``) so if this is desired (and, for security, it
typically is), this method does not need to be called.
Note that a metapolicy of ``none`` forbids **all** access,
even if one or more domains, headers or identities have
previously been specified as allowed. As such, setting the
metapolicy to ``none`` will remove all access previously
granted by ``allow_domain``, ``allow_headers`` or
``allow_identity``. Additionally, attempting to grant access
via ``allow_domain``, ``allow_headers`` or ``allow_identity``
will, when the metapolicy is ``none``, raise ``TypeError``. | Below is the the instruction that describes the task:
### Input:
Sets metapolicy to ``permitted``. (only applicable to master
policy files). Acceptable values correspond to those listed in
Section 3(b)(i) of the crossdomain.xml specification, and are
also available as a set of constants defined in this module.
By default, Flash assumes a value of ``master-only`` for all
policies except socket policies, (which assume a default of
``all``) so if this is desired (and, for security, it
typically is), this method does not need to be called.
Note that a metapolicy of ``none`` forbids **all** access,
even if one or more domains, headers or identities have
previously been specified as allowed. As such, setting the
metapolicy to ``none`` will remove all access previously
granted by ``allow_domain``, ``allow_headers`` or
``allow_identity``. Additionally, attempting to grant access
via ``allow_domain``, ``allow_headers`` or ``allow_identity``
will, when the metapolicy is ``none``, raise ``TypeError``.
### Response:
def metapolicy(self, permitted):
"""
Sets metapolicy to ``permitted``. (only applicable to master
policy files). Acceptable values correspond to those listed in
Section 3(b)(i) of the crossdomain.xml specification, and are
also available as a set of constants defined in this module.
By default, Flash assumes a value of ``master-only`` for all
policies except socket policies, (which assume a default of
``all``) so if this is desired (and, for security, it
typically is), this method does not need to be called.
Note that a metapolicy of ``none`` forbids **all** access,
even if one or more domains, headers or identities have
previously been specified as allowed. As such, setting the
metapolicy to ``none`` will remove all access previously
granted by ``allow_domain``, ``allow_headers`` or
``allow_identity``. Additionally, attempting to grant access
via ``allow_domain``, ``allow_headers`` or ``allow_identity``
will, when the metapolicy is ``none``, raise ``TypeError``.
"""
if permitted not in VALID_SITE_CONTROL:
raise TypeError(SITE_CONTROL_ERROR.format(permitted))
if permitted == SITE_CONTROL_NONE:
# Metapolicy 'none' means no access is permitted.
self.domains = {}
self.header_domains = {}
self.identities = []
self.site_control = permitted |
def log_player_trades_with_port(self, player, to_port, port, to_player):
"""
:param player: catan.game.Player
:param to_port: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
:param port: catan.board.Port
:param to_player: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
"""
self._log('{0} trades '.format(player.color))
# to_port items
self._log('[')
for i, (num, res) in enumerate(to_port):
if i > 0:
self._log(', ')
self._log('{0} {1}'.format(num, res.value))
self._log(']')
self._log(' to port {0} for '.format(port.type.value))
# to_player items
self._log('[')
for i, (num, res) in enumerate(to_player):
if i > 0:
self._log(', ')
self._log('{0} {1}'.format(num, res.value))
self._log(']')
self._log('\n') | :param player: catan.game.Player
:param to_port: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
:param port: catan.board.Port
:param to_player: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)] | Below is the the instruction that describes the task:
### Input:
:param player: catan.game.Player
:param to_port: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
:param port: catan.board.Port
:param to_player: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
### Response:
def log_player_trades_with_port(self, player, to_port, port, to_player):
"""
:param player: catan.game.Player
:param to_port: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
:param port: catan.board.Port
:param to_player: list of tuples, [(int, game.board.Terrain), (int, game.board.Terrain)]
"""
self._log('{0} trades '.format(player.color))
# to_port items
self._log('[')
for i, (num, res) in enumerate(to_port):
if i > 0:
self._log(', ')
self._log('{0} {1}'.format(num, res.value))
self._log(']')
self._log(' to port {0} for '.format(port.type.value))
# to_player items
self._log('[')
for i, (num, res) in enumerate(to_player):
if i > 0:
self._log(', ')
self._log('{0} {1}'.format(num, res.value))
self._log(']')
self._log('\n') |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.