docstring stringlengths 52 499 | function stringlengths 67 35.2k | __index_level_0__ int64 52.6k 1.16M |
|---|---|---|
Check status
Args:
Returns:
True: Sucess
False: Failed | def checkStatus(self):
checkAccount()
data = {'userid': self.user_id,
'useridx': self.useridx
}
r = self.session.post(nurls['checkStatus'], data = data)
p = re.compile(r'\<message\>(?P<message>.+)\</message\>')
message = p.search(r.text).... | 1,010,113 |
getDiskSpace
Args:
file_path: Full path for a file you want to checkUpload
upload_path: Ndrive path where you want to upload file
ex) /Picture/
Returns:
True: Possible to upload a file with a given file_size
False: Impossible to upload a ... | def getDiskSpace(self, file_path, upload_path = '', overwrite = False):
self.checkAccount()
url = nurls['checkUpload']
file_size = os.stat(file_path).st_size
file_name = os.path.basename(file_path)
now = datetime.datetime.now().isoformat()
data = {'userid': ... | 1,010,116 |
PUT
Args:
file_path: Full path for a file you want to upload
upload_path: Ndrive path where you want to upload file
ex) /Picture/
Returns:
True: Upload success
False: Upload failed | def put(self, file_path, upload_path = ''):
f = open(file_path, "r")
c = f.read()
file_name = os.path.basename(file_path)
now = datetime.datetime.now().isoformat()
url = nurls['put'] + upload_path + file_name
headers = {'userid': self.user_id,
... | 1,010,117 |
DELETE
Args:
file_path: Full path for a file you want to delete
upload_path: Ndrive path where you want to delete file
ex) /Picture/
Returns:
True: Delete success
False: Delete failed | def delete(self, file_path):
now = datetime.datetime.now().isoformat()
url = nurls['put'] + upload_path + file_name
headers = {'userid': self.user_id,
'useridx': self.useridx,
'Content-Type': "application/x-www-form-urlencoded; charset=UTF-8",
... | 1,010,118 |
DoMove
Args:
dummy: ???
orgresource: Path for a file which you want to move
dstresource: Destination path
bShareFireCopy: ???
Returns:
True: Move success
False: Move failed | def doMove(self, orgresource, dstresource, dummy = 56184, stresource = 'F', bShareFireCopy = 'false'):
url = nurls['doMove']
data = {'userid': self.user_id,
'useridx': self.useridx,
'dummy': dummy,
'orgresource': orgresource,
... | 1,010,120 |
GetProperty
Args:
dummy: ???
orgresource: File path
Returns:
FileInfo object:
False: Failed to get property | def getProperty(self, orgresource, dummy = 56184):
url = nurls['getProperty']
data = {'userid': self.user_id,
'useridx': self.useridx,
'dummy': dummy,
'orgresource': orgresource,
}
r = self.session.post(url = url, data =... | 1,010,121 |
GetVersionListCount
Args:
orgresource: File path
Returns:
Integer number: # of version list
False: Failed to get property | def getVersionListCount(self, orgresource):
url = nurls['getVersionListCount']
data = {'userid': self.user_id,
'useridx': self.useridx,
'orgresource': orgresource,
}
r = self.session.post(url = url, data = data)
j = json.loads(r... | 1,010,122 |
SetProperty
Args:
orgresource: File path
protect: 'Y' or 'N', 중요 표시
Returns:
Integer number: # of version list
False: Failed to get property | def setProperty(self, orgresource, protect, dummy = 7046):
url = nurls['setProperty']
data = {'userid': self.user_id,
'useridx': self.useridx,
'orgresource': orgresource,
'protect': protect,
'dummy': dummy,
}
... | 1,010,123 |
GetMusicAlbumList
Args:
tagtype = ???
startnum
pagingrow
Returns:
???
False: Failed to get property | def getMusicAlbumList(self, tagtype = 0, startnum = 0, pagingrow = 100):
url = nurls['setProperty']
data = {'userid': self.user_id,
'useridx': self.useridx,
'tagtype': tagtype,
'startnum': startnum,
'pagingrow': pagingrow,
... | 1,010,124 |
Normalize a sequence of values via rank and Normal c.d.f.
Args:
x (array_like): sequence of values.
Returns:
Gaussian-normalized values.
Example:
.. doctest::
>>> from scipy_sugar.stats import quantile_gaussianize
>>> print(quantile_gaussianize([-1, 0, 2]))
[... | def quantile_gaussianize(x):
from scipy.stats import norm, rankdata
x = asarray(x, float).copy()
ok = isfinite(x)
x[ok] *= -1
y = empty_like(x)
y[ok] = rankdata(x[ok])
y[ok] = norm.isf(y[ok] / (sum(ok) + 1))
y[~ok] = x[~ok]
return y | 1,010,139 |
Writes the voevent to the file object.
e.g.::
with open('/tmp/myvoevent.xml','wb') as f:
voeventparse.dump(v, f)
Args:
voevent(:class:`Voevent`): Root node of the VOevent etree.
file (io.IOBase): An open (binary mode) file object for writing.
pretty_print
p... | def dump(voevent, file, pretty_print=True, xml_declaration=True):
file.write(dumps(voevent, pretty_print, xml_declaration)) | 1,011,008 |
Tests if a voevent conforms to the schema.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
Returns:
bool: Whether VOEvent is valid | def valid_as_v2_0(voevent):
_return_to_standard_xml(voevent)
valid_bool = voevent_v2_0_schema.validate(voevent)
_remove_root_tag_prefix(voevent)
return valid_bool | 1,011,009 |
Sets the minimal 'Who' attributes: date of authoring, AuthorIVORN.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
date(datetime.datetime): Date of authoring.
NB Microseconds are ignored, as per the VOEvent spec.
author_ivorn(str): Short author identifier,
... | def set_who(voevent, date=None, author_ivorn=None):
if author_ivorn is not None:
voevent.Who.AuthorIVORN = ''.join(('ivo://', author_ivorn))
if date is not None:
voevent.Who.Date = date.replace(microsecond=0).isoformat() | 1,011,010 |
Add descriptions or references to the How section.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
descriptions(str): Description string, or list of description
strings.
references(:py:class:`voeventparse.misc.Reference`): A reference element
(or list ... | def add_how(voevent, descriptions=None, references=None):
if not voevent.xpath('How'):
etree.SubElement(voevent, 'How')
if descriptions is not None:
for desc in _listify(descriptions):
# d = etree.SubElement(voevent.How, 'Description')
# voevent.How.Description[voeve... | 1,011,013 |
Add citations to other voevents.
The schema mandates that the 'Citations' section must either be entirely
absent, or non-empty - hence we require this wrapper function for its
creation prior to listing the first citation.
Args:
voevent(:class:`Voevent`): Root node of a VOEvent etree.
e... | def add_citations(voevent, event_ivorns):
if not voevent.xpath('Citations'):
etree.SubElement(voevent, 'Citations')
voevent.Citations.extend(_listify(event_ivorns)) | 1,011,015 |
Make a share url of directory
>>> nd.makeShareUrl('/Picture/flower.png', PASSWORD)
Args:
full_path:
The full path of directory to get share url.
Should be end with '/'.
ex) /folder/
passwd:
Access password for shared d... | def makeShareUrl(self, full_path, passwd):
if full_path[-1] is not '/':
full_path += '/'
data = {'_callback': 'window.__jindo_callback._347',
'path': full_path,
'passwd': passwd,
'userid': self.user_id,
'useridx': self... | 1,011,257 |
Checks whether the given files have bit for bit solution matches
on the given variable list.
Args:
model_path: absolute path to the model dataset
bench_path: absolute path to the benchmark dataset
config: the configuration of the set of analyses
Returns:
A dictionary create... | def bit_for_bit(model_path, bench_path, config):
fname = model_path.split(os.path.sep)[-1]
# Error handling
if not (os.path.isfile(bench_path) and os.path.isfile(model_path)):
return elements.error("Bit for Bit",
"File named " + fname + " has no suitable match!")
... | 1,011,505 |
Description
Args:
model_config: a dictionary with the model configuration data
bench_config: a dictionary with the benchmark configuration data
model_bundle: a LIVVkit model bundle object
bench_bundle: a LIVVkit model bundle object
Returns:
A dictionary created by the e... | def diff_configurations(model_config, bench_config, model_bundle, bench_bundle):
diff_dict = LIVVDict()
model_data = model_bundle.parse_config(model_config)
bench_data = bench_bundle.parse_config(bench_config)
if model_data == {} and bench_data == {}:
return elements.error("Configuration Co... | 1,011,506 |
Add a object
Args:
Object: Object will be added
Returns:
Object: Object with id
Raises:
TypeError: If add object is not a dict
MultipleInvalid: If input object is invaild | def add(self, obj):
if not isinstance(obj, dict):
raise TypeError("Add object should be a dict object")
obj = self.validation(obj)
obj["id"] = self.maxId + 1
obj = self._cast_model(obj)
self.model.db.append(obj)
if not self._batch.enable.is_set():
... | 1,011,533 |
Get a object by id
Args:
id (int): Object id
Returns:
Object: Object with specified id
None: If object not found | def get(self, id):
for obj in self.model.db:
if obj["id"] == id:
return self._cast_model(obj)
return None | 1,011,534 |
Remove a object by id
Args:
id (int): Object's id should be deleted
Returns:
len(int): affected rows | def remove(self, id):
before_len = len(self.model.db)
self.model.db = [t for t in self.model.db if t["id"] != id]
if not self._batch.enable.is_set():
self.model.save_db()
return before_len - len(self.model.db) | 1,011,535 |
Update a object
Args:
id (int): Target Object ID
newObj (object): New object will be merged into original object
Returns:
Object: Updated object
None: If specified object id is not found
MultipleInvalid: If input obj... | def update(self, id, newObj):
newObj = self.validation(newObj)
for obj in self.model.db:
if obj["id"] != id:
continue
newObj.pop("id", None)
obj.update(newObj)
obj = self._cast_model(obj)
if not self._batch.enable.is_s... | 1,011,537 |
Set a object
Args:
id (int): Target Object ID
newObj (object): New object will be set
Returns:
Object: New object
None: If specified object id is not found
MultipleInvalid: If input object is invaild | def set(self, id, newObj):
newObj = self.validation(newObj)
for index in xrange(0, len(self.model.db)):
if self.model.db[index]["id"] != id:
continue
newObj["id"] = id
self.model.db[index] = self._cast_model(newObj)
if not self._b... | 1,011,538 |
Handles the parsing of options for LIVVkit's command line interface
Args:
args: The list of arguments, typically sys.argv[1:] | def parse_args(args=None):
parser = argparse.ArgumentParser(description="Main script to run LIVVkit.",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
fromfile_prefix_chars='@')
parser.add_argument('-o', '--out-dir',
... | 1,011,578 |
Get the configuration directory.
Get the configuration directories, optionally for a specific program.
Args:
program (str) : The name of the program whose configuration directories have to be found.
system_wide (bool): Gets the system-wide configuration directories.
Returns:
list: A list of all matching ... | def get_config_dir(program='', system_wide=False):
config_homes = []
if system_wide:
if os.name == 'nt':
config_homes.append(
winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'))
else:
config_homes.append('/etc')
config_homes.append('/etc/xdg')
if os.name == 'darwin':
config_homes.append('/... | 1,011,786 |
Get the configuration file for a program.
Gets the configuration file for a given program, assuming it stores it in
a standard location. See also :func:`get_config_dir()`.
Args:
program (str): The program for which to get the configuration file.
system_wide (bool):Whether to get the system-wide file for the... | def get_config_file(program, system_wide=False):
program_config_homes = get_config_dir(program, system_wide)
config_homes = get_config_dir(system_wide=system_wide)
config_files = []
for home in config_homes:
for sub in os.listdir(home):
if os.path.isfile(os.path.join(home, sub)):
if sub.startswith(prog... | 1,011,787 |
Returns a dictionary representing a new section. Sections
contain a list of elements that are displayed separately from
the global elements on the page.
Args:
title: The title of the section to be displayed
element_list: The list of elements to display within the section
Returns:
... | def section(title, element_list):
sect = {
'Type': 'Section',
'Title': title,
}
if isinstance(element_list, list):
sect['Elements'] = element_list
else:
sect['Elements'] = [element_list]
return sect | 1,011,957 |
Render the specified template and return the output.
Args:
tmpl_name (str): file name of the template
request_env (dict): request environment
Returns:
str - the rendered template | def render(self, tmpl_name, request_env):
return super(WebApplication, self).render(tmpl_name, request_env) | 1,012,016 |
Set the volume.
Sets the volume to a given percentage (integer between 0 and 100).
Args:
percentage (int): The percentage (as a 0 to 100 integer) to set the volume to.
Raises:
ValueError: if the percentage is >100 or <0. | def set_volume(percentage):
if percentage > 100 or percentage < 0:
raise ValueError('percentage must be an integer between 0 and 100')
if system.get_name() == 'windows':
# TODO: Implement volume for Windows. Looks like WinAPI is the
# solution...
pass
elif system.get_name() == 'mac':
# OS X uses 0-10 ... | 1,012,157 |
Increase the volume.
Increase the volume by a given percentage.
Args:
percentage (int): The percentage (as an integer between 0 and 100) to increase the volume by.
Raises:
ValueError: if the percentage is >100 or <0. | def increase_volume(percentage):
if percentage > 100 or percentage < 0:
raise ValueError('percentage must be an integer between 0 and 100')
if system.get_name() == 'windows':
# TODO: Implement volume for Windows. Looks like WinAPI is the
# solution...
pass
elif system.get_name() == 'mac':
volume_int =... | 1,012,159 |
Read a GPTL timing file and extract some data.
Args:
file_path: the path to the GPTL timing file
var_list: a list of strings to look for in the file
Returns:
A dict containing key-value pairs of the livvkit
and the times associated with them | def parse_gptl(file_path, var_list):
timing_result = dict()
if os.path.isfile(file_path):
with open(file_path, 'r') as f:
for var in var_list:
for line in f:
if var in line:
timing_result[var] = float(line.split()[4])/int(line.... | 1,012,347 |
Search for a file in a directory, and return the first match.
If the file is not found return an empty string
Args:
search_dir: The root directory to search in
file_pattern: A unix-style wildcard pattern representing
the file to find
Returns:
The path to the file if it ... | def find_file(search_dir, file_pattern):
for root, dirnames, fnames in os.walk(search_dir):
for fname in fnames:
if fnmatch.fnmatch(fname, file_pattern):
return os.path.join(root, fname)
return "" | 1,012,348 |
Write out data to a json file.
Args:
data: A dictionary representation of the data to write out
path: The directory to output the file in
file_name: The name of the file to write out | def write_json(data, path, file_name):
if os.path.exists(path) and not os.path.isdir(path):
return
elif not os.path.exists(path):
mkdir_p(path)
with open(os.path.join(path, file_name), 'w') as f:
json_tricks.dump(data, f, indent=4, primitives=True, allow_nan=True) | 1,012,351 |
Get the output of a command.
Gets a nice Unicode no-extra-whitespace string of the ``stdout`` of a given command.
Args:
command (str or list): A string of the command, or a list of the arguments (as would be used in :class:`subprocess.Popen`).
Note:
If ``command`` is a ``str``, it will be evaluated with ``s... | def get_cmd_out(command):
if isinstance(command, list):
result = sp.check_output(command)
else:
result = sp.check_output(command, shell=True)
return result.decode('utf-8').rstrip() | 1,012,604 |
Check if a program is in the system ``PATH``.
Checks if a given program is in the user's ``PATH`` or not.
Args:
program (str): The program to try to find in ``PATH``.
Returns:
bool: Is the program in ``PATH``? | def is_in_path(program):
if sys.version_info.major == 2:
path = os.getenv('PATH')
if os.name == 'nt':
path = path.split(';')
else:
path = path.split(':')
else:
path = os.get_exec_path()
for i in path:
if os.path.isdir(i):
if program in os.listdir(i):
return True | 1,012,606 |
Check if process is running.
Check if the given process name is running or not.
Note:
On a Linux system, kernel threads (like ``kthreadd`` etc.)
are excluded.
Args:
process (str): The name of the process.
Returns:
bool: Is the process running? | def is_running(process):
if os.name == 'nt':
process_list = get_cmd_out(['tasklist', '/v'])
return process in process_list
else:
process_list = get_cmd_out('ps axw | awk \'{print $5}\'')
for i in process_list.split('\n'):
# 'COMMAND' is the column heading
# [*] indicates kernel-level processes like... | 1,012,607 |
Adds a program to startup.
Adds a program to user startup.
Args:
name (str) : The name of the startup entry.
command (str) : The command to run.
system_wide (bool): Add to system-wide startup.
Note:
``system_wide`` requires superuser/admin privileges. | def add_item(name, command, system_wide=False):
desktop_env = system.get_name()
if os.path.isfile(command):
command_is_file = True
if not desktop_env == 'windows':
# Will not exit program if insufficient permissions
sp.Popen(['chmod +x %s' % command], shell=True)
if desktop_env == 'windows':
impor... | 1,012,664 |
List startup programs.
List the programs set to run at startup.
Args:
system_wide (bool): Gets the programs that run at system-wide startup.
Returns:
list: A list of dictionaries in this format:
.. code-block:: python
{
'name': 'The name of the entry.',
'command': 'The command used to run... | def list_items(system_wide=False):
desktop_env = system.get_name()
result = []
if desktop_env == 'windows':
sys_startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup')
user_startup_dir = os.path.join(get_config_dir()[0], 'Roaming\\Mi... | 1,012,665 |
Removes a program from startup.
Removes a program from startup.
Args:
name (str) : The name of the program (as known to the system) to remove. See :func:``list_items``.
system_wide (bool): Remove it from system-wide startup.
Note:
``system_wide`` requires superuser/admin privileges. | def remove_item(name, system_wide=False):
desktop_env = system.get_name()
if desktop_env == 'windows':
import winreg
if system_wide:
startup_dir = os.path.join(winreg.ExpandEnvironmentStrings('%PROGRAMDATA%'), 'Microsoft\\Windows\\Start Menu\\Programs\\Startup')
else:
startup_dir = os.path.join(dire... | 1,012,666 |
Convert BAM file to ES file.
Args:
bam_fn (str): File name of the BAM file.
bam_fo (file): File object of the ES file.
allowed_delta (int): Maximal allowed coordinates difference for correct reads. | def bam2es(
bam_fn,
es_fo,
allowed_delta,
):
es_fo.write("# RN: read name" + os.linesep)
es_fo.write("# Q: is mapped with quality" + os.linesep)
es_fo.write("# Chr: chr id" + os.linesep)
es_fo.write("# D: direction" + os.linesep)
es_... | 1,012,675 |
Convert ES to ET.
Args:
es_fo (file): File object for the ES file.
et_fo (file): File object for the ET file. | def es2et(
es_fo,
et_fo,
):
et_fo.write("# Mapping information for read tuples" + os.linesep)
et_fo.write("#" + os.linesep)
et_fo.write("# RN: read name" + os.linesep)
et_fo.write("# I: intervals with asigned categories" + os.linesep)
et_fo.writ... | 1,012,679 |
ET to ROC conversion.
Args:
et_fo (file): File object for the ET file.
roc_fo (file): File object for the ROC file.
raises: ValueError | def et2roc(et_fo, roc_fo):
stats_dicts = [
{
"q": q,
"M": 0,
"w": 0,
"m": 0,
"P": 0,
"U": 0,
"u": 0,
"T": 0,
"t": 0,
"x": 0
... | 1,012,681 |
Plot accuracy.
Args:
data: Panda dataframe in *the* format. | def plot_accuracy(data, output_dir_path='.', output_filename='accuracy.png',
width=10, height=8):
output_path = os.path.join(output_dir_path, output_filename)
max_val_data = get_epoch_max_val_acc(data)
max_val_label = round(max_val_data['acc'].values[0], 4)
# max_val_epoch = max... | 1,012,720 |
Create two plots: 1) loss 2) accuracy.
Args:
data: Panda dataframe in *the* format. | def plot(data, output_dir_path='.', width=10, height=8):
if not isinstance(data, pd.DataFrame):
data = pd.DataFrame(data)
plot_accuracy(data, output_dir_path=output_dir_path,
width=width, height=height)
plot_loss(data, output_dir_path, width=width, height=height) | 1,012,721 |
Returns the value inside nested structure of data located
at period delimited path
When traversing a list, as long as that list is containing objects of
type dict, items in that list will have their "name" and "type" values
tested against the current key in the path.
Args:
data (dict or li... | def dict_get_path(data, path, default=None):
keys = path.split(".")
for k in keys:
if type(data) == list:
found = False
for item in data:
name = item.get("name", item.get("type"))
if name == k:
found = True
... | 1,013,029 |
Evaluates calls from call_queue and places the results in result_queue.
This worker is run in a seperate process.
Args:
call_queue: A multiprocessing.Queue of _CallItems that will be read and
evaluated by the worker.
result_queue: A multiprocessing.Queue of _ResultItems that will w... | def _process_worker(call_queue, result_queue, shutdown):
while True:
try:
call_item = call_queue.get(block=True, timeout=0.1)
except queue.Empty:
if shutdown.is_set():
return
else:
try:
r = call_item()
excep... | 1,013,036 |
Initializes a new ProcessPoolExecutor instance.
Args:
max_workers: The maximum number of processes that can be used to
execute the given calls. If None or not given then as many
worker processes will be created as the machine has processors. | def __init__(self, max_workers=None):
_remove_dead_thread_references()
if max_workers is None:
self._max_workers = multiprocessing.cpu_count()
else:
self._max_workers = max_workers
# Make the call queue slightly larger than the number of processes to
... | 1,013,041 |
Check if 'app' is installed (OS X).
Check if the given applications is installed on this OS X system.
Args:
app (str): The application name.
Returns:
bool: Is the app installed or not? | def mac_app_exists(app):
APP_CHECK_APPLESCRIPT =
with open('/tmp/app_check.AppleScript', 'w') as f:
f.write(APP_CHECK_APPLESCRIPT % app)
app_check_proc = sp.Popen(
['osascript', '-e', '/tmp/app_check.AppleScript'])
if app_check_proc.wait() != 0:
return False
else:
return True | 1,013,058 |
Collects the analyses cases to be run and launches processes for each of
them.
Args:
run_type: A string representation of the run type (eg. verification)
module: The module corresponding to the run. Must have a run_suite function
config: The configuration for the module | def run(run_type, module, config):
print(" -----------------------------------------------------------------")
print(" Beginning " + run_type.lower() + " test suite ")
print(" -----------------------------------------------------------------")
print("")
summary = run_quiet(module, config)
... | 1,013,062 |
Check RNF validity of a read tuple.
Args:
read_tuple_name (str): Read tuple name to be checked.s | def validate(self, read_tuple_name):
if reg_lrn.match(read_tuple_name) is None:
self.report_error(
read_tuple_name=read_tuple_name,
error_name="wrong_read_tuple_name_structure",
message="'{}' is not matched".format(reg_lrn),
)
... | 1,013,150 |
Report an error.
Args:
read_tuple_name (): Name of the read tuple.
error_name (): Name of the error.
wrong (str): What is wrong.
message (str): Additional msessage to be printed.
warning (bool): Warning (not an error). | def report_error(self, read_tuple_name, error_name, wrong="", message="", warning=False):
if (not self.report_only_first) or (error_name not in self.reported_errors):
print("\t".join(["error" if warning == False else "warning", read_tuple_name, error_name, wrong, message]))
self.rep... | 1,013,151 |
Get the coin address associated with a user id.
If the specified user id does not yet have an address for this
coin, then generate one.
Args:
user_id (str): this user's unique identifier
Returns:
str: Base58Check address for this account | def getaccountaddress(self, user_id=""):
address = self.rpc.call("getaccountaddress", user_id)
self.logger.debug("Your", self.coin, "address is", address)
return address | 1,013,264 |
Calculate the total balance in all addresses belonging to this user.
Args:
user_id (str): this user's unique identifier
as_decimal (bool): balance is returned as a Decimal if True (default)
or a string if False
Returns:
str or Decimal: this ac... | def getbalance(self, user_id="", as_decimal=True):
balance = unicode(self.rpc.call("getbalance", user_id))
self.logger.debug("\"" + user_id + "\"", self.coin, "balance:", balance)
if as_decimal:
return Decimal(balance)
else:
return balance | 1,013,265 |
List all transactions associated with this account.
Args:
user_id (str): this user's unique identifier
count (int): number of transactions to return (default=10)
start_at (int): start the list at this transaction (default=0)
Returns:
list [dict]: transactions as... | def listtransactions(self, user_id="", count=10, start_at=0):
txlist = self.rpc.call("listtransactions", user_id, count, start_at)
self.logger.debug("Got transaction list for " + str(user_id))
return txlist | 1,013,266 |
Send coins from user's account.
Args:
user_id (str): this user's unique identifier
dest_address (str): address which is to receive coins
amount (str or Decimal): amount to send (eight decimal points)
minconf (int): ensure the account has a valid balance using this
... | def sendfrom(self, user_id, dest_address, amount, minconf=1):
amount = Decimal(amount).quantize(self.quantum, rounding=ROUND_HALF_EVEN)
txhash = self.rpc.call("sendfrom",
user_id, dest_address, float(str(amount)), minconf
)
self.logger.debug("Send %s %s from %s to %s... | 1,013,268 |
Sign a message with the private key of an address.
Cryptographically signs a message using ECDSA. Since this requires
an address's private key, the wallet must be unlocked first.
Args:
address (str): address used to sign the message
message (str): plaintext message to whic... | def signmessage(self, address, message):
signature = self.rpc.call("signmessage", address, message)
self.logger.debug("Signature: %s" % signature)
return signature | 1,013,270 |
Verifies that a message has been signed by an address.
Args:
address (str): address claiming to have signed the message
signature (str): ECDSA signature
message (str): plaintext message which was signed
Returns:
bool: True if the address signed the message, Fals... | def verifymessage(self, address, signature, message):
verified = self.rpc.call("verifymessage", address, signature, message)
self.logger.debug("Signature verified: %s" % str(verified))
return verified | 1,013,271 |
Passes an arbitrary command to the coin daemon.
Args:
command (str): command to be sent to the coin daemon | def call(self, command, *args):
return self.rpc.call(str(command), *args) | 1,013,272 |
Convert SAM to RNF-based FASTQ with respect to argparse parameters.
Args:
args (...): Arguments parsed by argparse | def sam2rnf(args):
rnftools.mishmash.Source.recode_sam_reads(
sam_fn=args.sam_fn,
fastq_rnf_fo=args.fq_fo,
fai_fo=args.fai_fo,
genome_id=args.genome_id,
number_of_read_tuples=10**9,
simulator_name=args.simulator_name,
allow_unmapped=args.allow_unmapped,
... | 1,013,593 |
Add another parser for a SAM2RNF-like command.
Args:
subparsers (subparsers): File name of the genome from which read tuples are created (FASTA file).
simulator_name (str): Name of the simulator used in comments. | def add_sam2rnf_parser(subparsers, subcommand, help, description, simulator_name=None):
parser_sam2rnf = subparsers.add_parser(subcommand, help=help, description=description)
parser_sam2rnf.set_defaults(func=sam2rnf)
parser_sam2rnf.add_argument(
'-s', '--sam', type=str, metavar='file', dest=... | 1,013,594 |
Convert WgSim FASTQ files to RNF FASTQ files.
Args:
rnf_fastq_fo (file): File object of the target RNF file.
fai_fo (file): File object of FAI index of the reference genome.
genome_id (int): RNF genome ID.
wgsim_fastq_1_fn (str): File name of the first WgSim FASTQ file.
wgsim_fastq_2_fn (str): File na... | def recode_wgsim_reads(
rnf_fastq_fo,
fai_fo,
genome_id,
wgsim_fastq_1_fn,
wgsim_fastq_2_fn=None,
number_of_read_tuples=10**9,
):
wgsim_pattern = re.compile(
'@(.*)_([0-9]+)_([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_([0-9]+):([0-9]+):([0-9]+)_(... | 1,013,890 |
Calls jsonrpc service's method and returns its return value in a JSON
string or None if there is none.
Arguments:
jsondata -- remote method call in jsonrpc format | def call(self, jsondata):
result = yield self.call_py(jsondata)
if result is None:
defer.returnValue(None)
else:
defer.returnValue(json.dumps(result)) | 1,013,956 |
Create RNF representation of this read.
Args:
read_tuple_id_width (int): Maximal expected string length of read tuple ID.
genome_id_width (int): Maximal expected string length of genome ID.
chr_id_width (int): Maximal expected string length of chromosome ID.
coor_width (int): Maximal expected string leng... | def stringize(
self,
rnf_profile=RnfProfile(),
):
sorted_segments = sorted(self.segments,
key=lambda x: (
x.genome_id * (10 ** 23) +
x.chr_id * (10 ** 21) +
(x.left + (int(x.left == 0) * x.right - 1)) * (10 ** 11) +
x.right * (10 ** ... | 1,014,000 |
Get RNF values for this read from its textual representation and save them
into this object.
Args:
string(str): Textual representation of a read.
Raises:
ValueError | def destringize(self, string):
# todo: assert -- starting with (, ending with )
# (prefix,read_tuple_id,segments_t,suffix)=(text).split("__")
# segments=segments_t.split("),(")
m = read_tuple_destr_pattern.match(string)
if not m:
smbl.messages.error(
... | 1,014,001 |
Construct a .desktop file and return it as a string.
Create a standards-compliant .desktop file, returning it as a string.
Args:
name (str) : The program's name.
exec\_ (str) : The command.
terminal (bool): Determine if program should be run in a terminal emulator or not. Defaults to ``False``.
addi... | def construct(name, exec_, terminal=False, additional_opts={}):
desktop_file = '[Desktop Entry]\n'
desktop_file_dict = {
'Name': name,
'Exec': exec_,
'Terminal': 'true' if terminal else 'false',
'Comment': additional_opts.get('Comment', name)
}
desktop_file = ('[Desktop Entry]\nName={name}\nExec={exec_... | 1,014,002 |
Execute a .desktop file.
Executes a given .desktop file path properly.
Args:
desktop_file (str) : The path to the .desktop file.
files (list): Any files to be launched by the .desktop. Defaults to empty list.
return_cmd (bool): Return the command (as ``str``) instead of executing. Defaults to ``False``.
... | def execute(desktop_file, files=None, return_cmd=False, background=False):
# Attempt to manually parse and execute
desktop_file_exec = parse(desktop_file)['Exec']
for i in desktop_file_exec.split():
if i.startswith('%'):
desktop_file_exec = desktop_file_exec.replace(i, '')
desktop_file_exec = desktop_fil... | 1,014,003 |
Locate a .desktop from the standard locations.
Find the path to the .desktop file of a given .desktop filename or application name.
Standard locations:
- ``~/.local/share/applications/``
- ``/usr/share/applications``
Args:
desktop_filename_or_name (str): Either the filename of a .desktop file or the name of... | def locate(desktop_filename_or_name):
paths = [
os.path.expanduser('~/.local/share/applications'),
'/usr/share/applications']
result = []
for path in paths:
for file in os.listdir(path):
if desktop_filename_or_name in file.split(
'.') or desktop_filename_or_name == file:
# Example: org.gnome.g... | 1,014,004 |
Parse a .desktop file.
Parse a .desktop file or a string with its contents into an easy-to-use dict, with standard values present even if not defined in file.
Args:
desktop_file_or_string (str): Either the path to a .desktop file or a string with a .desktop file as its contents.
Returns:
dict: A dictionary of ... | def parse(desktop_file_or_string):
if os.path.isfile(desktop_file_or_string):
with open(desktop_file_or_string) as f:
desktop_file = f.read()
else:
desktop_file = desktop_file_or_string
result = {}
for line in desktop_file.split('\n'):
if '=' in line:
result[line.split('=')[0]] = line.split('=')[1... | 1,014,005 |
Naive cycle detector
See help(cycle_detector) for more context.
Args:
sequence: A sequence to detect cyles in.
f, start: Function and starting state for finite state machine
Yields:
Values yielded by sequence_a if it terminates, undefined if a
cycle is found.
Raises:
... | def naive(seqs, f=None, start=None, key=lambda x: x):
history = {}
for step, value in enumerate(seqs[0]):
keyed = key(value)
yield value
if keyed in history:
raise CycleDetected(
first=history[keyed], period=step - history[keyed])
history[keyed] =... | 1,014,084 |
Gosper's cycle detector
See help(cycle_detector) for more context.
Args:
sequence: A sequence to detect cyles in.
f, start: Function and starting state for finite state machine
Yields:
Values yielded by sequence_a if it terminates, undefined if a
cycle is found.
Raises:
... | def gosper(seqs, f=None, start=None, key=lambda x: x):
tab = []
for c, value in enumerate(seqs[0], start=1):
yield value
try:
e = tab.index(key(value))
raise CycleDetected(
period=c - ((((c >> e) - 1) | 1) << e))
except ValueError:
... | 1,014,085 |
instantiate all registered vodka applications
Args:
config (dict or MungeConfig): configuration object | def instantiate(config):
for handle, cfg in list(config["apps"].items()):
if not cfg.get("enabled", True):
continue
app = get_application(handle)
instances[app.handle] = app(cfg) | 1,014,223 |
Parse all of the timing files, and generate some statistics
about the run.
Args:
file_list: A list of timing files to parse
var_list: A list of variables to look for in the timing file
Returns:
A dict containing values that have the form:
[mean, min, max, mean, standard... | def generate_timing_stats(file_list, var_list):
timing_result = dict()
timing_summary = dict()
for file in file_list:
timing_result[file] = functions.parse_gptl(file, var_list)
for var in var_list:
var_time = []
for f, data in timing_result.items():
try:
... | 1,014,259 |
Generate a scaling plot.
Args:
timing_data: data returned from a `*_scaling` method
title: the title of the plot
ylabel: the y-axis label of the plot
description: a description of the plot
plot_file: the file to write out to
Returns:
an image element containing ... | def generate_scaling_plot(timing_data, title, ylabel, description, plot_file):
proc_counts = timing_data['proc_counts']
if len(proc_counts) > 2:
plt.figure(figsize=(10, 8), dpi=150)
plt.title(title)
plt.xlabel("Number of processors")
plt.ylabel(ylabel)
for case, cas... | 1,014,261 |
Description
Args:
timing_stats: a dictionary of the form
{proc_count : {model||bench : { var : { stat : val }}}}
scaling_var: the variable that accounts for the total runtime
title: the title of the plot
description: the description of the plot
plot_file: the fil... | def generate_timing_breakdown_plot(timing_stats, scaling_var, title, description, plot_file):
# noinspection PyProtectedMember
cmap_data = colormaps._viridis_data
n_subplots = len(six.viewkeys(timing_stats))
fig, ax = plt.subplots(1, n_subplots+1, figsize=(3*(n_subplots+2), 5))
for plot_num, p_... | 1,014,264 |
Add a new read to the current buffer. If it is a new read tuple (detected from ID), the buffer will be flushed.
Args:
read_tuple_id (int): ID of the read tuple.
bases (str): Sequence of bases.
qualities (str): Sequence of FASTQ qualities.
segments (list of rnftools.rnfformat.segment): List of segments co... | def add_read(
self,
read_tuple_id,
bases,
qualities,
segments,
):
assert type(bases) is str, "Wrong type of bases: '{}'".format(bases)
assert type(qualities) is str, "Wrong type of qualities: '{}'".format(qualities)
assert type(segments) is t... | 1,014,431 |
Combine more profiles and set their maximal values.
Args:
*rnf_profiles (rnftools.rnfformat.RnfProfile): RNF profile. | def combine(*rnf_profiles):
for rnf_profile in rnf_profiles:
self.prefix_width = max(self.prefix_width, rnf_profile.prefix_width)
self.read_tuple_id_width = max(self.read_tuple_id_width, rnf_profile.read_tuple_id_width)
self.genome_id_width = max(self.genome_id_widt... | 1,014,457 |
Load RNF values from a read tuple name.
Args:
read_tuple_name (str): Read tuple name which the values are taken from. | def load(self, read_tuple_name):
self.prefix_width = 0
self.read_tuple_id_width = 0
self.genome_id_width = 0
self.chr_id_width = 0
self.coor_width = 0
parts = read_tuple_name.split("__")
self.prefix_width = len(parts[0])
self.read_tuple_id_width ... | 1,014,458 |
Apply profile on a read tuple name and update read tuple ID.
Args:
read_tuple_name (str): Read tuple name to be updated.
read_tuple_id (id): New read tuple ID.
synchronize_widths (bool): Update widths (in accordance to this profile). | def apply(self, read_tuple_name, read_tuple_id=None, synchronize_widths=True):
parts = read_tuple_name.split("__")
parts[0] = self._fill_right(parts[0], "-", self.prefix_width)
if read_tuple_id is not None:
parts[1] = "{:x}".format(read_tuple_id)
parts[1] = self._fil... | 1,014,459 |
Check if the given read tuple name satisfies this profile.
Args:
read_tuple_name (str): Read tuple name. | def check(self, read_tuple_name):
parts = read_tuple_name.split("__")
if len(parts[0]) != self.prefix_width or len(parts[1]) != self.read_tuple_id_width:
return False
segments = parts[2][1:-1].split("),(")
for segment in segments:
int_widths = list(map... | 1,014,460 |
Set the desktop wallpaper.
Sets the desktop wallpaper to an image.
Args:
image (str): The path to the image to be set as wallpaper. | def set_wallpaper(image):
desktop_env = system.get_name()
if desktop_env in ['gnome', 'unity', 'cinnamon', 'pantheon', 'mate']:
uri = 'file://%s' % image
SCHEMA = 'org.gnome.desktop.background'
KEY = 'picture-uri'
if desktop_env == 'mate':
uri = image
SCHEMA = 'org.mate.background'
KEY = 'pict... | 1,014,634 |
Add a new graph to the overlap report.
Args:
y (str): Value plotted on y-axis.
x_label (str): Label on x-axis.
y_label (str): Label on y-axis.
title (str): Title of the plot.
x_run ((float,float)): x-range.
y_run ((int,int)): y-rang.
svg_size_px ((int,int): Size of SVG image in pixels.
key_po... | def add_graph(
self,
y,
x_label=None,
y_label="",
title="",
x_run=None,
y_run=None,
svg_size_px=None,
key_position="bottom right",
):
if x_run is None:
x_run = self.default_x_run
if y_run is None:
... | 1,014,808 |
Convert DwgSim FASTQ file to RNF FASTQ file.
Args:
dwgsim_prefix (str): DwgSim prefix of the simulation (see its commandline parameters).
fastq_rnf_fo (file): File object of RNF FASTQ.
fai_fo (file): File object for FAI file of the reference genome.
genome_id (int): RNF genome ID to be used.
estimate_... | def recode_dwgsim_reads(
dwgsim_prefix,
fastq_rnf_fo,
fai_fo,
genome_id,
estimate_unknown_values,
number_of_read_tuples=10**9,
):
dwgsim_pattern = re.compile(
'@(.*)_([0-9]+)_([0-9]+)_([01])_([01])_([01])_([01])_([0-9]+):([0-9]+):([0-9]+)... | 1,014,835 |
Parse a CISM output log and extract some information.
Args:
file_path: absolute path to the log file
Return:
A dictionary created by the elements object corresponding to
the results of the bit for bit testing | def parse_log(file_path):
if not os.path.isfile(file_path):
return elements.error("Output Log", "Could not open file: " + file_path.split(os.sep)[-1])
headers = ["Converged Iterations",
"Avg. Iterations to Converge",
"Processor Count",
"Dycore Type"]
... | 1,014,905 |
Convert the CISM configuration file to a python dictionary
Args:
file_path: absolute path to the configuration file
Returns:
A dictionary representation of the given file | def parse_config(file_path):
if not os.path.isfile(file_path):
return {}
parser = ConfigParser()
parser.read(file_path)
# Strip out inline comments
for s in parser._sections:
for v in six.iterkeys(parser._sections[s]):
parser._sections[s][v] = parser._sections[s][v].... | 1,014,906 |
Create RNF representation of this segment.
Args:
rnf_profile (rnftools.rnfformat.RnfProfile): RNF profile (with widths). | def stringize(
self,
rnf_profile,
):
coor_width = max(rnf_profile.coor_width, len(str(self.left)), len(str(self.right)))
return "({},{},{},{},{})".format(
str(self.genome_id).zfill(rnf_profile.genome_id_width),
str(self.chr_id).zfill(rnf_profile.chr_... | 1,014,966 |
Get RNF values for this segment from its textual representation and
save them into this object.
Args:
string (str): Textual representation of a segment. | def destringize(self, string):
m = segment_destr_pattern.match(string)
self.genome_id = int(m.group(1))
self.chr_id = int(m.group(2))
self.direction = m.group(3)
self.left = int(m.group(4))
self.right = int(m.group(5)) | 1,014,967 |
Start configuration process for the provided handler
Args:
cfg (dict): config container
handler (config.Handler class): config handler to use
path (str): current path in the configuration progress | def configure(self, cfg, handler, path=""):
# configure simple value attributes (str, int etc.)
for name, attr in handler.attributes():
if cfg.get(name) is not None:
continue
if attr.expected_type not in [list, dict]:
cfg[name] = self.se... | 1,015,019 |
Plot accuracy and loss from a panda's dataframe.
Args:
data: Panda dataframe in the format of the Keras CSV log.
output_dir_path: The path to the directory where the resultings plots
should end up. | def _from_keras_log_format(data, **kwargs):
data_val = pd.DataFrame(data[['epoch']])
data_val['acc'] = data['val_acc']
data_val['loss'] = data['val_loss']
data_val['data'] = 'validation'
data_training = pd.DataFrame(data[['acc', 'loss', 'epoch']])
data_training['data'] = 'training'
r... | 1,015,023 |
Plot accuracy and loss from a Keras CSV log.
Args:
csv_path: The path to the CSV log with the actual data.
output_dir_path: The path to the directory where the resultings plots
should end up. | def from_keras_log(csv_path, output_dir_path, **kwargs):
# automatically get seperator by using Python's CSV parser
data = pd.read_csv(csv_path, sep=None, engine='python')
_from_keras_log_format(data, output_dir_path=output_dir_path, **kwargs) | 1,015,024 |
Return configuration value
Args:
key_name (str): configuration key
Returns:
The value for the specified configuration key, or if not found
in the config the default value specified in the Configuration Handler
class specified inside this component | def get_config(self, key_name):
if key_name in self.config:
return self.config.get(key_name)
return self.Configuration.default(key_name, inst=self) | 1,015,048 |
Get resource or collection of resources.
---
parameters:
- name: resource
in: path
type: string | async def get(self, request, resource=None, **kwargs):
if resource is not None and resource != '':
return self.to_simple(request, resource, **kwargs)
return self.to_simple(request, self.collection, many=True, **kwargs) | 1,015,064 |
Update a resource.
---
parameters:
- name: resource
in: path
type: string | async def put(self, request, resource=None, **kwargs):
if resource is None:
raise RESTNotFound(reason='Resource not found')
return await self.post(request, resource=resource, **kwargs) | 1,015,068 |
Initialize preprocessor.
Args:
column_metadata(dict): Meta information of the column.
transformer_type(str): Type of data the transformer is able to transform. | def __init__(self, column_metadata):
self.column_metadata = column_metadata
self.col_name = column_metadata['name']
self.check_data_type() | 1,015,421 |
Check the type of the transformer and column match.
Args:
column_metadata(dict): Metadata of the column.
Raises a ValueError if the types don't match | def check_data_type(self):
metadata_type = self.column_metadata.get('type')
if self.type != metadata_type and metadata_type not in self.type:
raise ValueError('Types of transformer don\'t match') | 1,015,422 |
Prepare the transformer to convert data.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
None | def fit(self, col):
dates = self.safe_datetime_cast(col)
self.default_val = dates.groupby(dates).count().index[0].timestamp() * 1e9 | 1,015,424 |
Prepare the transformer to convert data and return the processed table.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame | def transform(self, col):
out = pd.DataFrame()
out[self.col_name] = self.safe_datetime_cast(col)
out[self.col_name] = self.to_timestamp(out)
return out | 1,015,425 |
Converts data back into original format.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.DataFrame | def reverse_transform(self, col):
if isinstance(col, pd.Series):
col = col.to_frame()
output = pd.DataFrame(index=col.index)
output[self.col_name] = col.apply(self.safe_date, axis=1)
return output | 1,015,426 |
Parses string values into datetime.
Args:
col(pandas.DataFrame): Data to transform.
Returns:
pandas.Series | def safe_datetime_cast(self, col):
casted_dates = pd.to_datetime(col[self.col_name], format=self.date_format, errors='coerce')
if len(casted_dates[casted_dates.isnull()]):
# This will raise an error for bad formatted data
# but not for out of bonds or missing dates.
... | 1,015,427 |
Transform a datetime series into linux epoch.
Args:
data(pandas.DataFrame): DataFrame containins a column named as `self.col_name`.
Returns:
pandas.Series | def to_timestamp(self, data):
result = pd.Series(index=data.index)
_slice = ~data[self.col_name].isnull()
result[_slice] = data[_slice][self.col_name].astype('int64')
return result | 1,015,428 |
Transform x[self.col_name] into a date string.
Args:
x(dict like / pandas.Series): Row containing data to cast safely.
Returns:
str | def safe_date(self, x):
t = x[self.col_name]
if np.isnan(t):
return t
elif np.isposinf(t):
t = sys.maxsize
elif np.isneginf(t):
t = -sys.maxsize
tmp = time.localtime(float(t) / 1e9)
return time.strftime(self.date_format, tm... | 1,015,429 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.