code stringlengths 51 2.38k | docstring stringlengths 4 15.2k |
|---|---|
def save_binary(self, filename):
_safe_call(_LIB.LGBM_DatasetSaveBinary(
self.construct().handle,
c_str(filename)))
return self | Save Dataset to a binary file.
Parameters
----------
filename : string
Name of the output file.
Returns
-------
self : Dataset
Returns self. |
def get_pages(parser, token):
try:
tag_name, args = token.contents.split(None, 1)
except ValueError:
var_name = 'pages'
else:
args = args.split()
if len(args) == 2 and args[0] == 'as':
var_name = args[1]
else:
msg = 'Invalid arguments for %r tag' % tag_name
raise template.TemplateSyntaxError(msg)
return GetPagesNode(var_name) | Add to context the list of page links.
Usage:
.. code-block:: html+django
{% get_pages %}
This is mostly used for Digg-style pagination.
This call inserts in the template context a *pages* variable, as a sequence
of page links. You can use *pages* in different ways:
- just print *pages.get_rendered* and you will get Digg-style pagination displayed:
.. code-block:: html+django
{{ pages.get_rendered }}
- display pages count:
.. code-block:: html+django
{{ pages|length }}
- check if the page list contains more than one page:
.. code-block:: html+django
{{ pages.paginated }}
{# the following is equivalent #}
{{ pages|length > 1 }}
- get a specific page:
.. code-block:: html+django
{# the current selected page #}
{{ pages.current }}
{# the first page #}
{{ pages.first }}
{# the last page #}
{{ pages.last }}
{# the previous page (or nothing if you are on first page) #}
{{ pages.previous }}
{# the next page (or nothing if you are in last page) #}
{{ pages.next }}
{# the third page #}
{{ pages.3 }}
{# this means page.1 is the same as page.first #}
{# the 1-based index of the first item on the current page #}
{{ pages.current_start_index }}
{# the 1-based index of the last item on the current page #}
{{ pages.current_end_index }}
{# the total number of objects, across all pages #}
{{ pages.total_count }}
{# the first page represented as an arrow #}
{{ pages.first_as_arrow }}
{# the last page represented as an arrow #}
{{ pages.last_as_arrow }}
- iterate over *pages* to get all pages:
.. code-block:: html+django
{% for page in pages %}
{# display page link #}
{{ page.render_link}}
{# the page url (beginning with "?") #}
{{ page.url }}
{# the page path #}
{{ page.path }}
{# the page number #}
{{ page.number }}
{# a string representing the page (commonly the page number) #}
{{ page.label }}
{# check if the page is the current one #}
{{ page.is_current }}
{# check if the page is the first one #}
{{ page.is_first }}
{# check if the page is the last one #}
{{ page.is_last }}
{% endfor %}
You can change the variable name, e.g.:
.. code-block:: html+django
{% get_pages as page_links %}
Must be called after ``{% paginate objects %}``. |
def stream(self):
if not hasattr(self, '_stream'):
self._stream = zmq.eventloop.zmqstream.ZMQStream(self._socket, io_loop=self.io_loop)
return self._stream | Return the current zmqstream, creating one if necessary |
def from_mapping(cls, mapping):
out = cls()
for elem, count in mapping.items():
out._set_count(elem, count)
return out | Create a bag from a dict of elem->count.
Each key in the dict is added if the value is > 0.
Raises:
ValueError: If any count is < 0. |
def _descriptor_names(self):
descriptor_names = []
for name in dir(self):
try:
attr = getattr(type(self), name)
if isinstance(attr, DJANGO_RELATED_FIELD_DESCRIPTOR_CLASSES):
descriptor_names.append(name)
except AttributeError:
pass
return descriptor_names | Attributes which are Django descriptors. These represent a field
which is a one-to-many or many-to-many relationship that is
potentially defined in another model, and doesn't otherwise appear
as a field on this model. |
def activate_in_ec(self, ec_index):
with self._mutex:
if ec_index >= len(self.owned_ecs):
ec_index -= len(self.owned_ecs)
if ec_index >= len(self.participating_ecs):
raise exceptions.BadECIndexError(ec_index)
ec = self.participating_ecs[ec_index]
else:
ec = self.owned_ecs[ec_index]
ec.activate_component(self._obj) | Activate this component in an execution context.
@param ec_index The index of the execution context to activate in.
This index is into the total array of contexts, that
is both owned and participating contexts. If the value
of ec_index is greater than the length of
@ref owned_ecs, that length is subtracted from
ec_index and the result used as an index into
@ref participating_ecs. |
def get_callable_name(c):
if hasattr(c, 'name'):
return six.text_type(c.name)
elif hasattr(c, '__name__'):
return six.text_type(c.__name__) + u'()'
else:
return six.text_type(c) | Get a human-friendly name for the given callable.
:param c: The callable to get the name for
:type c: callable
:rtype: unicode |
def free_parameters(self):
self._update_parameters()
free_parameters_dictionary = collections.OrderedDict()
for parameter_name, parameter in self._parameters.iteritems():
if parameter.free:
free_parameters_dictionary[parameter_name] = parameter
return free_parameters_dictionary | Get a dictionary with all the free parameters in this model
:return: dictionary of free parameters |
def verifyExpanded(self, samplerate):
results = self.expandFunction(self.verifyComponents, args=(samplerate,))
msg = [x for x in results if x]
if len(msg) > 0:
return msg[0]
else:
return 0 | Checks the expanded parameters for invalidating conditions
:param samplerate: generation samplerate (Hz), passed on to component verification
:type samplerate: int
:returns: str -- error message, if any, 0 otherwise |
def get_configs(args, command_args, ansible_args=()):
configs = [
config.Config(
molecule_file=util.abs_path(c),
args=args,
command_args=command_args,
ansible_args=ansible_args,
) for c in glob.glob(MOLECULE_GLOB)
]
_verify_configs(configs)
return configs | Glob the current directory for Molecule config files, instantiate config
objects, and returns a list.
:param args: A dict of options, arguments and commands from the CLI.
:param command_args: A dict of options passed to the subcommand from
the CLI.
:param ansible_args: An optional tuple of arguments provided to the
`ansible-playbook` command.
:return: list |
def __run_delta_sql(self, delta):
self.__run_sql_file(delta.get_file())
self.__update_upgrades_table(delta) | Execute the delta sql file on the database |
def option(*args, **kwargs):
def decorate_sub_command(method):
if not hasattr(method, "optparser"):
method.optparser = SubCmdOptionParser()
method.optparser.add_option(*args, **kwargs)
return method
def decorate_class(klass):
assert _forgiving_issubclass(klass, Cmdln)
_inherit_attr(klass, "toplevel_optparser_options", [], cp=lambda l: l[:])
klass.toplevel_optparser_options.append( (args, kwargs) )
return klass
def decorate(obj):
if _forgiving_issubclass(obj, Cmdln):
return decorate_class(obj)
else:
return decorate_sub_command(obj)
return decorate | Decorator to add an option to the optparser argument of a Cmdln
subcommand
To add a toplevel option, apply the decorator on the class itself. (see
p4.py for an example)
Example:
@cmdln.option("-E", dest="environment_path")
class MyShell(cmdln.Cmdln):
@cmdln.option("-f", "--force", help="force removal")
def do_remove(self, subcmd, opts, *args):
#... |
def continuous_frequency(self, data_frame):
tap_timestamps = data_frame.td[data_frame.action_type==1]
cont_freq = 1.0/(np.array(tap_timestamps[1:-1])-np.array(tap_timestamps[0:-2]))
duration = math.ceil(data_frame.td[-1])
return cont_freq, duration | This method returns continuous frequency
:param data_frame: the data frame
:type data_frame: pandas.DataFrame
:return cont_freq: frequency
:rtype cont_freq: float |
def tree_queryset(value):
from django.db.models.query import QuerySet
from copy import deepcopy
if not isinstance(value, QuerySet):
return value
qs = value
qs2 = deepcopy(qs)
is_filtered = bool(qs.query.where.children)
if is_filtered:
include_pages = set()
for p in qs2.order_by('rght').iterator():
if p.parent_id and p.parent_id not in include_pages and p.id not in include_pages:
ancestor_id_list = p.get_ancestors().values_list('id', flat=True)
include_pages.update(ancestor_id_list)
if include_pages:
qs = qs | qs.model._default_manager.filter(id__in=include_pages)
qs = qs.distinct()
return qs | Converts a normal queryset from an MPTT model to include all the ancestors
so a filtered subset of items can be formatted correctly |
def is_newer_b(a, bfiles):
if isinstance(bfiles, basestring):
bfiles = [bfiles]
if not op.exists(a): return False
if not all(op.exists(b) for b in bfiles): return False
atime = os.stat(a).st_mtime
for b in bfiles:
if atime > os.stat(b).st_mtime:
return False
return True | check that all b files have been modified more recently than a |
def add_page(self, title=None, content=None, old_url=None,
tags=None, old_id=None, old_parent_id=None):
if not title:
text = decode_entities(strip_tags(content)).replace("\n", " ")
title = text.split(". ")[0]
if tags is None:
tags = []
self.pages.append({
"title": title,
"content": content,
"tags": tags,
"old_url": old_url,
"old_id": old_id,
"old_parent_id": old_parent_id,
}) | Adds a page to the list of pages to be imported - used by the
Wordpress importer. |
def _sub_nat(self):
result = np.zeros(len(self), dtype=np.int64)
result.fill(iNaT)
return result.view('timedelta64[ns]') | Subtract pd.NaT from self |
def relaxNGNewMemParserCtxt(buffer, size):
ret = libxml2mod.xmlRelaxNGNewMemParserCtxt(buffer, size)
if ret is None:raise parserError('xmlRelaxNGNewMemParserCtxt() failed')
return relaxNgParserCtxt(_obj=ret) | Create an XML RelaxNGs parse context for that memory buffer
expected to contain an XML RelaxNGs file. |
def _unzip_file(self, zip_file, out_folder):
try:
zf = zipfile.ZipFile(zip_file, 'r')
zf.extractall(path=out_folder)
zf.close()
del zf
return True
except:
return False | unzips a file to a given folder |
def make_ui(self, path='hgwebdir.config'):
sections = [
'alias',
'auth',
'decode/encode',
'defaults',
'diff',
'email',
'extensions',
'format',
'merge-patterns',
'merge-tools',
'hooks',
'http_proxy',
'smtp',
'patch',
'paths',
'profiling',
'server',
'trusted',
'ui',
'web',
]
repos = path
baseui = ui.ui()
cfg = config.config()
cfg.read(repos)
self.paths = cfg.items('paths')
self.base_path = self.paths[0][1].replace('*', '')
self.check_repo_dir(self.paths)
self.set_statics(cfg)
for section in sections:
for k, v in cfg.items(section):
baseui.setconfig(section, k, v)
return baseui | A funcion that will read python rc files and make an ui from read options
:param path: path to mercurial config file |
def _conv_shape_tuple(self, lhs_shape, rhs_shape, strides, pads):
if isinstance(pads, str):
pads = padtype_to_pads(lhs_shape[2:], rhs_shape[2:], strides, pads)
if len(pads) != len(lhs_shape) - 2:
msg = 'Wrong number of explicit pads for conv: expected {}, got {}.'
raise TypeError(msg.format(len(lhs_shape) - 2, len(pads)))
lhs_padded = onp.add(lhs_shape[2:], onp.add(*zip(*pads)))
out_space = onp.floor_divide(
onp.subtract(lhs_padded, rhs_shape[2:]), strides) + 1
out_space = onp.maximum(0, out_space)
out_shape = (lhs_shape[0], rhs_shape[0]) + tuple(out_space)
return tuple(out_shape) | Compute the shape of a conv given input shapes in canonical order. |
def _swclock_to_hwclock():
res = __salt__['cmd.run_all'](['hwclock', '--systohc'], python_shell=False)
if res['retcode'] != 0:
msg = 'hwclock failed to set hardware clock from software clock: {0}'.format(res['stderr'])
raise CommandExecutionError(msg)
return True | Set hardware clock to value of software clock. |
def rt_update(self, statement, linenum, mode, xparser):
section = self.find_section(self.module.charindex(linenum, 1))
if section == "body":
xparser.parse_line(statement, self, mode)
elif section == "signature":
if mode == "insert":
xparser.parse_signature(statement, self) | Uses the specified line parser to parse the given line.
:arg statement: a string of lines that are part of a single statement.
:arg linenum: the line number of the first line in the list relative to
the entire module contents.
arg mode: either 'insert', 'replace' or 'delete'
:arg xparser: an instance of the executable parser from the real
time update module's line parser. |
def close(self):
if self.error_log and not self.quiet:
print("\nErrors occured:", file=sys.stderr)
for err in self.error_log:
print(err, file=sys.stderr)
self._session.close() | Print error log and close session |
def contains_opposite_color_piece(self, square, position):
return not position.is_square_empty(square) and \
position.piece_at_square(square).color != self.color | Finds if square on the board is occupied by a ``Piece``
belonging to the opponent.
:type: square: Location
:type: position: Board
:rtype: bool |
def _delete_element(name, element_type, data, server=None):
_api_delete('{0}/{1}'.format(element_type, quote(name, safe='')), data, server)
return name | Delete an element |
def _get_register_size(self, reg_offset):
if reg_offset in self.project.arch.register_names:
reg_name = self.project.arch.register_names[reg_offset]
reg_size = self.project.arch.registers[reg_name][1]
return reg_size
l.warning("_get_register_size(): unsupported register offset %d. Assum size 1. "
"More register name mappings should be implemented in archinfo.", reg_offset)
return 1 | Get the size of a register.
:param int reg_offset: Offset of the register.
:return: Size in bytes.
:rtype: int |
def scaledBy(self, scale):
scaled = deepcopy(self)
for test in scaled.elements[0].tests:
if type(test.value) in (int, float):
if test.property == 'scale-denominator':
test.value /= scale
elif test.property == 'zoom':
test.value += log(scale)/log(2)
return scaled | Return a new Selector with scale denominators scaled by a number. |
def get_child_ids(self, parent_alias):
self._cache_init()
return self._cache_get_entry(self.CACHE_NAME_PARENTS, parent_alias, []) | Returns child IDs of the given parent category
:param str parent_alias: Parent category alias
:rtype: list
:return: a list of child IDs |
def get_instance(self, payload):
return ActivityInstance(self._version, payload, workspace_sid=self._solution['workspace_sid'], ) | Build an instance of ActivityInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.taskrouter.v1.workspace.activity.ActivityInstance
:rtype: twilio.rest.taskrouter.v1.workspace.activity.ActivityInstance |
def get_seqs_fasta(seqs, names, out_fa):
with open(out_fa, 'w') as fa_handle:
for s, n in itertools.izip(seqs, names):
print(">cx{1}-{0}\n{0}".format(s, n), file=fa_handle)
return out_fa | get fasta from sequences |
def python_zip(file_list, gallery_path, extension='.py'):
zipname = os.path.basename(os.path.normpath(gallery_path))
zipname += '_python' if extension == '.py' else '_jupyter'
zipname = os.path.join(gallery_path, zipname + '.zip')
zipname_new = zipname + '.new'
with zipfile.ZipFile(zipname_new, mode='w') as zipf:
for fname in file_list:
file_src = os.path.splitext(fname)[0] + extension
zipf.write(file_src, os.path.relpath(file_src, gallery_path))
_replace_md5(zipname_new)
return zipname | Stores all files in file_list into an zip file
Parameters
----------
file_list : list
Holds all the file names to be included in zip file
gallery_path : str
path to where the zipfile is stored
extension : str
'.py' or '.ipynb' In order to deal with downloads of python
sources and jupyter notebooks the file extension from files in
file_list will be removed and replace with the value of this
variable while generating the zip file
Returns
-------
zipname : str
zip file name, written as `target_dir_{python,jupyter}.zip`
depending on the extension |
def files(self, request, id):
gist = self.send(request, id).json()
return gist['files'] | Returns a list of files in the gist
Arguments:
request: an initial request object
id: the gist identifier
Returns:
A list of the files |
def combine(self, *others):
p = self.clone()
for other in others:
p.hooks.extend(other.hooks)
return p | Combine other Panglers into this Pangler.
Returns a copy of this Pangler with all of the hooks from the provided
Panglers added to it as well. The new Pangler will be bound to the same
instance and have the same `id`, but new hooks will not be shared with
this Pangler or any provided Panglers. |
def check_serializable(cls):
if is_named_tuple(cls):
return
if not hasattr(cls, "__new__"):
print("The class {} does not have a '__new__' attribute and is "
"probably an old-stye class. Please make it a new-style class "
"by inheriting from 'object'.")
raise RayNotDictionarySerializable("The class {} does not have a "
"'__new__' attribute and is "
"probably an old-style class. We "
"do not support this. Please make "
"it a new-style class by "
"inheriting from 'object'."
.format(cls))
try:
obj = cls.__new__(cls)
except Exception:
raise RayNotDictionarySerializable("The class {} has overridden "
"'__new__', so Ray may not be able "
"to serialize it efficiently."
.format(cls))
if not hasattr(obj, "__dict__"):
raise RayNotDictionarySerializable("Objects of the class {} do not "
"have a '__dict__' attribute, so "
"Ray cannot serialize it "
"efficiently.".format(cls))
if hasattr(obj, "__slots__"):
raise RayNotDictionarySerializable("The class {} uses '__slots__', so "
"Ray may not be able to serialize "
"it efficiently.".format(cls)) | Throws an exception if Ray cannot serialize this class efficiently.
Args:
cls (type): The class to be serialized.
Raises:
Exception: An exception is raised if Ray cannot serialize this class
efficiently. |
def rar3_s2k(psw, salt):
if not isinstance(psw, unicode):
psw = psw.decode('utf8')
seed = bytearray(psw.encode('utf-16le') + salt)
h = Rar3Sha1(rarbug=True)
iv = EMPTY
for i in range(16):
for j in range(0x4000):
cnt = S_LONG.pack(i * 0x4000 + j)
h.update(seed)
h.update(cnt[:3])
if j == 0:
iv += h.digest()[19:20]
key_be = h.digest()[:16]
key_le = pack("<LLLL", *unpack(">LLLL", key_be))
return key_le, iv | String-to-key hash for RAR3. |
def numberwang(random=random, *args, **kwargs):
n = random.randint(2, 150)
return inflectify.number_to_words(n) | Return a number that is spelled out.
>>> numberwang(random=mock_random)
'two'
>>> numberwang(random=mock_random, capitalize=True)
'Two'
>>> numberwang(random=mock_random, slugify=True)
'two' |
def badge_label(self, badge):
kind = badge.kind if isinstance(badge, Badge) else badge
return self.__badges__[kind] | Display the badge label for a given kind |
def onSelectRow(self, event):
grid = self.grid
row = event.Row
default = (255, 255, 255, 255)
highlight = (191, 216, 216, 255)
cell_color = grid.GetCellBackgroundColour(row, 0)
attr = wx.grid.GridCellAttr()
if cell_color == default:
attr.SetBackgroundColour(highlight)
self.selected_rows.add(row)
else:
attr.SetBackgroundColour(default)
try:
self.selected_rows.remove(row)
except KeyError:
pass
if self.selected_rows and self.deleteRowButton:
self.deleteRowButton.Enable()
else:
self.deleteRowButton.Disable()
grid.SetRowAttr(row, attr)
grid.Refresh() | Highlight or unhighlight a row for possible deletion. |
def download_data():
with urlopen(_retrieve_download_url()) as f:
with open(os.path.join(CACHE_FOLDER, CACHE_ZIP), "wb") as local_file:
local_file.write(f.read()) | Downloads complete station dataset including catchment descriptors and amax records. And saves it into a cache
folder. |
def make_veto_table(workflow, out_dir, vetodef_file=None, tags=None):
if vetodef_file is None:
vetodef_file = workflow.cp.get_opt_tags("workflow-segments",
"segments-veto-definer-file", [])
file_url = urlparse.urljoin('file:',
urllib.pathname2url(vetodef_file))
vdf_file = File(workflow.ifos, 'VETO_DEFINER',
workflow.analysis_time, file_url=file_url)
vdf_file.PFN(file_url, site='local')
else:
vdf_file = vetodef_file
if tags is None: tags = []
makedir(out_dir)
node = PlotExecutable(workflow.cp, 'page_vetotable', ifos=workflow.ifos,
out_dir=out_dir, tags=tags).create_node()
node.add_input_opt('--veto-definer-file', vdf_file)
node.new_output_file_opt(workflow.analysis_time, '.html', '--output-file')
workflow += node
return node.output_files[0] | Creates a node in the workflow for writing the veto_definer
table. Returns a File instances for the output file. |
def push_doc(self, document):
msg = self._protocol.create('PUSH-DOC', document)
reply = self._send_message_wait_for_reply(msg)
if reply is None:
raise RuntimeError("Connection to server was lost")
elif reply.header['msgtype'] == 'ERROR':
raise RuntimeError("Failed to push document: " + reply.content['text'])
else:
return reply | Push a document to the server, overwriting any existing server-side doc.
Args:
document : (Document)
A Document to push to the server
Returns:
The server reply |
def send_hid_event(use_page, usage, down):
message = create(protobuf.SEND_HID_EVENT_MESSAGE)
event = message.inner()
abstime = binascii.unhexlify(b'438922cf08020000')
data = use_page.to_bytes(2, byteorder='big')
data += usage.to_bytes(2, byteorder='big')
data += (1 if down else 0).to_bytes(2, byteorder='big')
event.hidEventData = abstime + \
binascii.unhexlify(b'00000000000000000100000000000000020' +
b'00000200000000300000001000000000000') + \
data + \
binascii.unhexlify(b'0000000000000001000000')
return message | Create a new SEND_HID_EVENT_MESSAGE. |
def value(self, cell):
if self._value is not None:
return self._value(cell)
else:
return cell.value | Extract the value of ``cell``, ready to be rendered.
If this Column was instantiated with a ``value`` attribute, it
is called here to provide the value. (For example, to provide a
calculated value.) Otherwise, ``cell.value`` is returned. |
def _rel_import(module, tgt):
try:
exec("from ." + module + " import " + tgt, globals(), locals())
except SyntaxError:
exec("from " + module + " import " + tgt, globals(), locals())
except (ValueError, SystemError):
exec("from " + module + " import " + tgt, globals(), locals())
return eval(tgt) | Using relative import in both Python 2 and Python 3 |
def getLiftOps(self, valu, cmpr='='):
if valu is None:
iops = (('pref', b''),)
return (
('indx', ('byprop', self.pref, iops)),
)
if cmpr == '~=':
return (
('form:re', (self.name, valu, {})),
)
lops = self.type.getLiftOps('form', cmpr, (None, self.name, valu))
if lops is not None:
return lops
iops = self.type.getIndxOps(valu, cmpr)
return (
('indx', ('byprop', self.pref, iops)),
) | Get a set of lift operations for use with an Xact. |
def paths_for_shell(paths, separator=' '):
paths = filter(None, paths)
paths = map(shlex.quote, paths)
if separator is None:
return paths
return separator.join(paths) | Converts a list of paths for use in shell commands |
def _apply_options(self, token):
if token.is_punct and self.remove_punct:
return None
if token.is_stop and self.remove_stop_words:
return None
if token.is_digit and self.remove_digits:
return None
if token.is_oov and self.exclude_oov:
return None
if token.pos_ in self.exclude_pos_tags:
return None
if token.ent_type_ in self.exclude_entities:
return None
if self.lemmatize:
return token.lemma_
if self.lower:
return token.lower_
return token.orth_ | Applies various filtering and processing options on token.
Returns:
The processed token. None if filtered. |
def alpha_senders(self):
if self._alpha_senders is None:
self._alpha_senders = AlphaSenderList(self._version, service_sid=self._solution['sid'], )
return self._alpha_senders | Access the alpha_senders
:returns: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderList
:rtype: twilio.rest.messaging.v1.service.alpha_sender.AlphaSenderList |
def execute_job(job, app=Injected, task_router=Injected):
app.logger.info("Job fetched, preparing the task '{0}'.".format(job.path))
task, task_callable = task_router.route(job.path)
jc = JobContext(job, task, task_callable)
app.logger.info("Executing task.")
result = jc.task_callable(jc.task_data)
app.logger.info("Task {0} executed successfully.".format(job.path))
return {'task_name': job.path, 'data': result} | Execute a job.
:param job: job to execute
:type job: Job
:param app: service application instance, injected
:type app: ServiceApplication
:param task_router: task router instance, injected
:type task_router: TaskRouter
:return: task result
:rtype: dict |
def _get_package_status(package):
status = package["status_str"] or "Unknown"
stage = package["stage_str"] or "Unknown"
if stage == "Fully Synchronised":
return status
return "%(status)s / %(stage)s" % {"status": status, "stage": stage} | Get the status for a package. |
def input_variables(self, exclude_specials=True):
def has_write_access(accesses):
return any(acc for acc in accesses if acc.access_type == 'write')
def has_read_access(accesses):
return any(acc for acc in accesses if acc.access_type == 'read')
input_variables = [ ]
for variable, accesses in self._variable_accesses.items():
if not has_write_access(accesses) and has_read_access(accesses):
if not exclude_specials or not variable.category:
input_variables.append(variable)
return input_variables | Get all variables that have never been written to.
:return: A list of variables that are never written to. |
def peek_64(library, session, address):
value_64 = ViUInt64()
ret = library.viPeek64(session, address, byref(value_64))
return value_64.value, ret | Read an 64-bit value from the specified address.
Corresponds to viPeek64 function of the VISA library.
:param library: the visa library wrapped by ctypes.
:param session: Unique logical identifier to a session.
:param address: Source address to read the value.
:return: Data read from bus, return value of the library call.
:rtype: bytes, :class:`pyvisa.constants.StatusCode` |
def register_processor(self, processor):
if not callable(processor):
raise ValueError('Processor %s is not callable.' % processor.__class__.__name__)
else:
self.processors.append(processor) | Register a new processor.
Note that processors are called in the order that they are registered. |
def PWS_stack(streams, weight=2, normalize=True):
Linstack = linstack(streams)
instaphases = []
print("Computing instantaneous phase")
for stream in streams:
instaphase = stream.copy()
for tr in instaphase:
analytic = hilbert(tr.data)
envelope = np.sqrt(np.sum((np.square(analytic),
np.square(tr.data)), axis=0))
tr.data = analytic / envelope
instaphases.append(instaphase)
print("Computing the phase stack")
Phasestack = linstack(instaphases, normalize=normalize)
for tr in Phasestack:
tr.data = Linstack.select(station=tr.stats.station)[0].data *\
np.abs(tr.data ** weight)
return Phasestack | Compute the phase weighted stack of a series of streams.
.. note:: It is recommended to align the traces before stacking.
:type streams: list
:param streams: List of :class:`obspy.core.stream.Stream` to stack.
:type weight: float
:param weight: Exponent to the phase stack used for weighting.
:type normalize: bool
:param normalize: Normalize traces before stacking.
:return: Stacked stream.
:rtype: :class:`obspy.core.stream.Stream` |
def _parse_command_line():
parser = argparse.ArgumentParser()
parser.add_argument(
'--portserver_static_pool',
type=str,
default='15000-24999',
help='Comma separated N-P Range(s) of ports to manage (inclusive).')
parser.add_argument(
'--portserver_unix_socket_address',
type=str,
default='@unittest-portserver',
help='Address of AF_UNIX socket on which to listen (first @ is a NUL).')
parser.add_argument('--verbose',
action='store_true',
default=False,
help='Enable verbose messages.')
parser.add_argument('--debug',
action='store_true',
default=False,
help='Enable full debug messages.')
return parser.parse_args(sys.argv[1:]) | Configure and parse our command line flags. |
def command_line(argv):
arguments = parse_command_line(argv)
if arguments.generate:
generate_fixer_file(arguments.generate)
paths = edit_files(arguments.patterns,
expressions=arguments.expressions,
functions=arguments.functions,
executables=arguments.executables,
start_dirs=arguments.start_dirs,
max_depth=arguments.max_depth,
dry_run=arguments.dry_run,
output=arguments.output,
encoding=arguments.encoding,
newline=arguments.newline)
is_sys = arguments.output in [sys.stdout, sys.stderr]
if not is_sys and isinstance(arguments.output, io.IOBase):
arguments.output.close()
return paths | Instantiate an editor and process arguments.
Optional argument:
- processed_paths: paths processed are appended to the list. |
def get_weights_fn(modality_type, value=None):
if modality_type in (ModalityType.CTC_SYMBOL,
ModalityType.IDENTITY_SYMBOL,
ModalityType.MULTI_LABEL,
ModalityType.SYMBOL,
ModalityType.SYMBOL_ONE_HOT):
return common_layers.weights_nonzero
elif modality_type in ModalityType.get_choices():
return common_layers.weights_all
return value | Gets default weights function; if none available, return value. |
def parse_from_parent(
self,
parent,
state
):
parsed_dict = self._dictionary.parse_from_parent(parent, state)
return self._converter.from_dict(parsed_dict) | Parse the aggregate from the provided parent XML element. |
def pipe(cmd, txt):
return Popen(
cmd2args(cmd),
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
shell=win32
).communicate(txt)[0] | Pipe `txt` into the command `cmd` and return the output. |
def resolve_nodes(self, nodes):
if not nodes:
return []
resolved = []
for node in nodes:
if node in resolved:
continue
self.resolve_node(node, resolved)
return resolved | Resolve a given set of nodes.
Dependencies of the nodes, even if they are not in the given list will
also be resolved!
:param list nodes: List of nodes to be resolved
:return: A list of resolved nodes |
def validate(bo, error_level: str = "WARNING") -> Tuple[bool, List[Tuple[str, str]]]:
if bo.ast:
bo = validate_functions(bo.ast, bo)
if error_level == "WARNING":
bo = validate_arg_values(bo.ast, bo)
else:
bo.validation_messages.append(("ERROR", "Invalid BEL Statement - cannot parse"))
for msg in bo.validation_messages:
if msg[0] == "ERROR":
bo.parse_valid = False
break
return bo | Semantically validate BEL AST
Add errors and warnings to bel_obj.validation_messages
Error Levels are similar to log levels - selecting WARNING includes both
WARNING and ERROR, selecting ERROR just includes ERROR
Args:
bo: main BEL language object
error_level: return ERRORs only or also WARNINGs
Returns:
Tuple[bool, List[Tuple[str, str]]]: (is_valid, messages) |
def cancel(self):
with self._lock:
if self._state not in (self.S_PENDING, self.S_RUNNING):
return False
self._result = Cancelled('cancelled by Future.cancel()')
self._state = self.S_EXCEPTION
self._done.set()
return True | Cancel the execution of the async function, if possible.
This method marks the future as done and sets the :class:`Cancelled`
exception.
A future that is not running can always be cancelled. However when a
future is running, the ability to cancel it depends on the pool
implementation. For example, a fiber pool can cancel running fibers but
a thread pool cannot.
Return ``True`` if the future could be cancelled, ``False`` otherwise. |
def cli(env, **args):
create_args = _parse_create_args(env.client, args)
create_args['primary_disk'] = args.get('primary_disk')
manager = CapacityManager(env.client)
capacity_id = args.get('capacity_id')
test = args.get('test')
result = manager.create_guest(capacity_id, test, create_args)
env.fout(_build_receipt(result, test)) | Allows for creating a virtual guest in a reserved capacity. |
def attr_category_postprocess(get_attr_category_func):
@functools.wraps(get_attr_category_func)
def wrapped(
name: str, attr: Any, obj: Any
) -> Tuple[AttrCategory, ...]:
category = get_attr_category_func(name, attr, obj)
category = list(category) if isinstance(category, tuple) else [category]
if is_slotted_attr(obj, name):
category.append(AttrCategory.SLOT)
return tuple(category)
return wrapped | Unifies attr_category to a tuple, add AttrCategory.SLOT if needed. |
def generate_id(self, agreement_id, types, values):
values_hash = utils.generate_multi_value_hash(types, values)
return utils.generate_multi_value_hash(
['bytes32', 'address', 'bytes32'],
[agreement_id, self.address, values_hash]
) | Generate id for the condition.
:param agreement_id: id of the agreement, hex str
:param types: list of types
:param values: list of values
:return: id, str |
def index_by(self, column_or_label):
column = self._get_column(column_or_label)
index = {}
for key, row in zip(column, self.rows):
index.setdefault(key, []).append(row)
return index | Return a dict keyed by values in a column that contains lists of
rows corresponding to each value. |
def cmd(send, *_):
a = ["primary", "secondary", "tertiary", "hydraulic", "compressed", "required", "pseudo", "intangible", "flux"]
b = [
"compressor", "engine", "lift", "elevator", "irc bot", "stabilizer", "computer", "fwilson", "csl", "4506", "router", "switch", "thingy",
"capacitor"
]
c = [
"broke", "exploded", "corrupted", "melted", "froze", "died", "reset", "was seen by the godofskies", "burned", "corroded", "reversed polarity",
"was accidentallied", "nuked"
]
send("because %s %s %s" % ((choice(a), choice(b), choice(c)))) | Gives a reason for something.
Syntax: {command} |
def resolve_filenames(all_expr):
files = []
for expr in all_expr.split(','):
expr = expr.strip()
files += fs.get_fs(expr).resolve_filenames(expr)
log.debug('Filenames: {0}'.format(files))
return files | resolve expression for a filename
:param all_expr:
A comma separated list of expressions. The expressions can contain
the wildcard characters ``*`` and ``?``. It also resolves Spark
datasets to the paths of the individual partitions
(i.e. ``my_data`` gets resolved to
``[my_data/part-00000, my_data/part-00001]``).
:returns: A list of file names.
:rtype: list |
def is_outlier(df, item_id, segment_id, price):
if (segment_id, item_id) not in df.index:
return False
mean = df.loc[(segment_id, item_id)]['mean']
std = df.loc[(segment_id, item_id)]['std']
return gaussian_outlier.is_outlier(
x=price, mean=mean, standard_deviation=std
) | Verify if a item is an outlier compared to the
other occurrences of the same item, based on his price.
Args:
item_id: idPlanilhaItens
segment_id: idSegmento
price: VlUnitarioAprovado |
def getDetectorClassConstructors(detectors):
detectorConstructors = {
d : globals()[detectorNameToClass(d)] for d in detectors}
return detectorConstructors | Takes in names of detectors. Collects class names that correspond to those
detectors and returns them in a dict. The dict maps detector name to class
names. Assumes the detectors have been imported. |
def file_contents(file_name):
curr_dir = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(curr_dir, file_name)) as the_file:
contents = the_file.read()
return contents | Given a file name to a valid file returns the file object. |
def run(self, funcs):
funcs = [f if callable(f) else functools.partial(*f) for f in funcs]
if len(funcs) == 1:
return [funcs[0]()]
if len(funcs) > self._workers:
self.shutdown()
self._workers = len(funcs)
self._executor = futures.ThreadPoolExecutor(self._workers)
futs = [self._executor.submit(f) for f in funcs]
done, not_done = futures.wait(futs, self._timeout, futures.FIRST_EXCEPTION)
for f in done:
if not f.cancelled() and f.exception() is not None:
if not_done:
for nd in not_done:
nd.cancel()
self.shutdown(False)
raise f.exception()
return [f.result(timeout=0) for f in futs] | Run a set of functions in parallel, returning their results.
Make sure any function you pass exits with a reasonable timeout. If it
doesn't return within the timeout or the result is ignored due an exception
in a separate thread it will continue to stick around until it finishes,
including blocking process exit.
Args:
funcs: An iterable of functions or iterable of args to functools.partial.
Returns:
A list of return values with the values matching the order in funcs.
Raises:
Propagates the first exception encountered in one of the functions. |
def set_unit_desired_state(self, unit, desired_state):
if desired_state not in self._STATES:
raise ValueError('state must be one of: {0}'.format(
self._STATES
))
if isinstance(unit, Unit):
unit = unit.name
else:
unit = str(unit)
self._single_request('Units.Set', unitName=unit, body={
'desiredState': desired_state
})
return self.get_unit(unit) | Update the desired state of a unit running in the cluster
Args:
unit (str, Unit): The Unit, or name of the unit to update
desired_state: State the user wishes the Unit to be in
("inactive", "loaded", or "launched")
Returns:
Unit: The unit that was updated
Raises:
fleet.v1.errors.APIError: Fleet returned a response code >= 400
ValueError: An invalid value was provided for ``desired_state`` |
def get_view_name(view_cls, suffix=None):
name = view_cls.__name__
name = formatting.remove_trailing_string(name, 'View')
name = formatting.remove_trailing_string(name, 'ViewSet')
name = formatting.camelcase_to_spaces(name)
if suffix:
name += ' ' + suffix
return name | Given a view class, return a textual name to represent the view.
This name is used in the browsable API, and in OPTIONS responses.
This function is the default for the `VIEW_NAME_FUNCTION` setting. |
def list_presets(cfg, out=sys.stdout):
for section in cfg.sections():
if section.startswith("preset:"):
out.write((section.replace("preset:", "")) + os.linesep)
for k, v in cfg.items(section):
out.write("\t%s = %s" % (k, v) + os.linesep) | Write a human readable list of available presets to out.
:param cfg: ConfigParser instance
:param out: file object to write to |
def _normalize_batch(b:Tuple[Tensor,Tensor], mean:FloatTensor, std:FloatTensor, do_x:bool=True, do_y:bool=False)->Tuple[Tensor,Tensor]:
"`b` = `x`,`y` - normalize `x` array of imgs and `do_y` optionally `y`."
x,y = b
mean,std = mean.to(x.device),std.to(x.device)
if do_x: x = normalize(x,mean,std)
if do_y and len(y.shape) == 4: y = normalize(y,mean,std)
return x,y | `b` = `x`,`y` - normalize `x` array of imgs and `do_y` optionally `y`. |
def getKey(self, namespace, ns_key):
namespace = self._fixNS(namespace)
if namespace == BARE_NS:
return ns_key
ns_alias = self.namespaces.getAlias(namespace)
if ns_alias is None:
return None
if ns_alias == NULL_NAMESPACE:
tail = ns_key
else:
tail = '%s.%s' % (ns_alias, ns_key)
return 'openid.' + tail | Get the key for a particular namespaced argument |
def _hierarchy_bounds(intervals_hier):
boundaries = list(itertools.chain(*list(itertools.chain(*intervals_hier))))
return min(boundaries), max(boundaries) | Compute the covered time range of a hierarchical segmentation.
Parameters
----------
intervals_hier : list of ndarray
A hierarchical segmentation, encoded as a list of arrays of segment
intervals.
Returns
-------
t_min : float
t_max : float
The minimum and maximum times spanned by the annotation |
def strip_keys(d, nones=False, depth=0):
r
ans = type(d)((str(k).strip(), v) for (k, v) in viewitems(OrderedDict(d))
if (not nones or (str(k).strip() and str(k).strip() != 'None')))
if int(depth) < 1:
return ans
if int(depth) > strip_keys.MAX_DEPTH:
warnings.warn(RuntimeWarning("Maximum recursion depth allowance (%r) exceeded." % strip_keys.MAX_DEPTH))
for k, v in viewitems(ans):
if isinstance(v, Mapping):
ans[k] = strip_keys(v, nones=nones, depth=int(depth) - 1)
return ans | r"""Strip whitespace from all dictionary keys, to the depth indicated
>>> strip_keys({' a': ' a', ' b\t c ': {'d e ': 'd e '}}) == {'a': ' a', 'b\t c': {'d e ': 'd e '}}
True
>>> strip_keys({' a': ' a', ' b\t c ': {'d e ': 'd e '}}, depth=100) == {'a': ' a', 'b\t c': {'d e': 'd e '}}
True |
def tyn_calus_scaling(target, DABo, To, mu_o, viscosity='pore.viscosity',
temperature='pore.temperature'):
r
Ti = target[temperature]
mu_i = target[viscosity]
value = DABo*(Ti/To)*(mu_o/mu_i)
return value | r"""
Uses Tyn_Calus model to adjust a diffusion coeffciient for liquids from
reference conditions to conditions of interest
Parameters
----------
target : OpenPNM Object
The object for which these values are being calculated. This
controls the length of the calculated array, and also provides
access to other necessary thermofluid properties.
DABo : float, array_like
Diffusion coefficient at reference conditions
mu_o, To : float, array_like
Viscosity & temperature at reference conditions, respectively
pressure : string
The dictionary key containing the pressure values in Pascals (Pa)
temperature : string
The dictionary key containing the temperature values in Kelvin (K) |
def channel_state_until_state_change(
raiden,
canonical_identifier: CanonicalIdentifier,
state_change_identifier: int,
) -> typing.Optional[NettingChannelState]:
wal = restore_to_state_change(
transition_function=node.state_transition,
storage=raiden.wal.storage,
state_change_identifier=state_change_identifier,
)
msg = 'There is a state change, therefore the state must not be None'
assert wal.state_manager.current_state is not None, msg
chain_state = wal.state_manager.current_state
channel_state = views.get_channelstate_by_canonical_identifier(
chain_state=chain_state,
canonical_identifier=canonical_identifier,
)
if not channel_state:
raise RaidenUnrecoverableError(
f"Channel was not found before state_change {state_change_identifier}",
)
return channel_state | Go through WAL state changes until a certain balance hash is found. |
def local_regon(self):
regon_digits = [int(digit) for digit in list(self.regon())]
for _ in range(4):
regon_digits.append(self.random_digit())
regon_digits.append(local_regon_checksum(regon_digits))
return ''.join(str(digit) for digit in regon_digits) | Returns 14 character Polish National Business Registry Number,
local entity number.
https://pl.wikipedia.org/wiki/REGON |
def _bg(self, coro: coroutine) -> asyncio.Task:
async def runner():
try:
await coro
except:
self._log.exception("async: Coroutine raised exception")
return asyncio.ensure_future(runner()) | Run coro in background, log errors |
def center_kernel(kernel, iterations=20):
kernel = kernel_norm(kernel)
nx, ny = np.shape(kernel)
if nx %2 == 0:
raise ValueError("kernel needs odd number of pixels")
x_grid, y_grid = util.make_grid(nx, deltapix=1, left_lower=False)
x_w = np.sum(kernel * util.array2image(x_grid))
y_w = np.sum(kernel * util.array2image(y_grid))
kernel_centered = de_shift_kernel(kernel, shift_x=-x_w, shift_y=-y_w, iterations=iterations)
return kernel_norm(kernel_centered) | given a kernel that might not be perfectly centered, this routine computes its light weighted center and then
moves the center in an iterative process such that it is centered
:param kernel: 2d array (odd numbers)
:param iterations: int, number of iterations
:return: centered kernel |
def inject(function):
try:
bindings = _infer_injected_bindings(function)
except _BindingNotYetAvailable:
bindings = 'deferred'
return method_wrapper(function, bindings) | Decorator declaring parameters to be injected.
eg.
>>> Sizes = Key('sizes')
>>> Names = Key('names')
>>>
>>> class A:
... @inject
... def __init__(self, number: int, name: str, sizes: Sizes):
... print([number, name, sizes])
...
>>> def configure(binder):
... binder.bind(A)
... binder.bind(int, to=123)
... binder.bind(str, to='Bob')
... binder.bind(Sizes, to=[1, 2, 3])
Use the Injector to get a new instance of A:
>>> a = Injector(configure).get(A)
[123, 'Bob', [1, 2, 3]]
.. note::
This decorator is to be used on class constructors. Using it on non-constructor
methods worked in the past but it was an implementation detail rather than
a design decision.
Third party libraries may, however, provide support for injecting dependencies
into non-constructor methods or free functions in one form or another. |
def sendToViewChanger(self, msg, frm):
if (isinstance(msg, InstanceChange) or
self.msgHasAcceptableViewNo(msg, frm)):
logger.debug("{} sending message to view changer: {}".
format(self, (msg, frm)))
self.msgsToViewChanger.append((msg, frm)) | Send the message to the intended view changer.
:param msg: the message to send
:param frm: the name of the node which sent this `msg` |
def callable_validator(instance, attribute, value):
if not callable(value):
raise TypeError('"{name}" value "{value}" must be callable'.format(name=attribute.name, value=value)) | Validate that an attribute value is callable.
:raises TypeError: if ``value`` is not callable |
def create(self, ttl=values.unset):
data = values.of({'Ttl': ttl, })
payload = self._version.create(
'POST',
self._uri,
data=data,
)
return TokenInstance(self._version, payload, account_sid=self._solution['account_sid'], ) | Create a new TokenInstance
:param unicode ttl: The duration in seconds the credentials are valid
:returns: Newly created TokenInstance
:rtype: twilio.rest.api.v2010.account.token.TokenInstance |
def _depth_event(self, msg):
if 'e' in msg and msg['e'] == 'error':
self.close()
if self._callback:
self._callback(None)
if self._last_update_id is None:
self._depth_message_buffer.append(msg)
else:
self._process_depth_message(msg) | Handle a depth event
:param msg:
:return: |
def _dict_raise_on_duplicates(ordered_pairs):
d = {}
for k, v in ordered_pairs:
if k in d:
raise ValueError("duplicate key: %r" % (k,))
else:
d[k] = v
return d | Reject duplicate keys. |
def _sort_dd_skips(configs, dd_indices_all):
config_current_skips = np.abs(configs[:, 1] - configs[:, 0])
if np.all(np.isnan(config_current_skips)):
return {0: []}
available_skips_raw = np.unique(config_current_skips)
available_skips = available_skips_raw[
~np.isnan(available_skips_raw)
].astype(int)
dd_configs_sorted = {}
for skip in available_skips:
indices = np.where(config_current_skips == skip)[0]
dd_configs_sorted[skip - 1] = dd_indices_all[indices]
return dd_configs_sorted | Given a set of dipole-dipole configurations, sort them according to
their current skip.
Parameters
----------
configs: Nx4 numpy.ndarray
Dipole-Dipole configurations
Returns
-------
dd_configs_sorted: dict
dictionary with the skip as keys, and arrays/lists with indices to
these skips. |
def detect_events(self, max_attempts=3):
for _ in xrange(max_attempts):
try:
with KindleCloudReaderAPI\
.get_instance(self.uname, self.pword) as kcr:
self.books = kcr.get_library_metadata()
self.progress = kcr.get_library_progress()
except KindleAPIError:
continue
else:
break
else:
return None
progress_map = {book.asin: self.progress[book.asin].locs[1]
for book in self.books}
new_events = self._snapshot.calc_update_events(progress_map)
update_event = UpdateEvent(datetime.now().replace(microsecond=0))
new_events.append(update_event)
self._event_buf.extend(new_events)
return new_events | Returns a list of `Event`s detected from differences in state
between the current snapshot and the Kindle Library.
`books` and `progress` attributes will be set with the latest API
results upon successful completion of the function.
Returns:
If failed to retrieve progress, None
Else, the list of `Event`s |
def rev_reg_id2cred_def_id(rr_id: str) -> str:
if ok_rev_reg_id(rr_id):
return ':'.join(rr_id.split(':')[2:-2])
raise BadIdentifier('Bad revocation registry identifier {}'.format(rr_id)) | Given a revocation registry identifier, return its corresponding credential definition identifier.
Raise BadIdentifier if input is not a revocation registry identifier.
:param rr_id: revocation registry identifier
:return: credential definition identifier |
def _file_md5(file_):
md5 = hashlib.md5()
chunk_size = 128 * md5.block_size
for chunk in iter(lambda: file_.read(chunk_size), b''):
md5.update(chunk)
file_.seek(0)
byte_digest = md5.digest()
return base64.b64encode(byte_digest).decode() | Compute the md5 digest of a file in base64 encoding. |
def _connect(self):
if self._connParams:
self._conn = psycopg2.connect(**self._connParams)
else:
self._conn = psycopg2.connect('')
try:
ver_str = self._conn.get_parameter_status('server_version')
except AttributeError:
ver_str = self.getParam('server_version')
self._version = util.SoftwareVersion(ver_str) | Establish connection to PostgreSQL Database. |
def status_set(workload_state, message):
valid_states = ['maintenance', 'blocked', 'waiting', 'active']
if workload_state not in valid_states:
raise ValueError(
'{!r} is not a valid workload state'.format(workload_state)
)
cmd = ['status-set', workload_state, message]
try:
ret = subprocess.call(cmd)
if ret == 0:
return
except OSError as e:
if e.errno != errno.ENOENT:
raise
log_message = 'status-set failed: {} {}'.format(workload_state,
message)
log(log_message, level='INFO') | Set the workload state with a message
Use status-set to set the workload state with a message which is visible
to the user via juju status. If the status-set command is not found then
assume this is juju < 1.23 and juju-log the message unstead.
workload_state -- valid juju workload state.
message -- status update message |
def _get_method(self, request):
methodname = request.method.lower()
method = getattr(self, methodname, None)
if not method or not callable(method):
raise errors.MethodNotAllowed()
return method | Figure out the requested method and return the callable. |
def pprint(self, num=10):
def pprint_map(time_, rdd):
print('>>> Time: {}'.format(time_))
data = rdd.take(num + 1)
for d in data[:num]:
py_pprint.pprint(d)
if len(data) > num:
print('...')
print('')
self.foreachRDD(pprint_map) | Print the first ``num`` elements of each RDD.
:param int num: Set number of elements to be printed. |
def extract_stream(source, dest, stream_id):
if not os.path.isfile(source):
raise IOError('No such file: ' + source)
subprocess.check_output([
'ffmpeg',
'-i', source,
'-y',
'-nostats',
'-loglevel', '0',
'-codec', 'copy',
'-map', '0:' + str(stream_id),
'-f', 'rawvideo',
dest,
]) | Get the data out of the file using ffmpeg
@param filename: mp4 filename |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.