text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def full_width_svg(url, width, height, alt_text=None):
''' Helper to render an SVG that will size to fill
its element while keeping its dimentions.
'''
return {
'ratio': str((float(height)/float(width))*100)[:2],
'url': url,
'alt_text': alt_text
} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def open(self):
"""Opens a SSH connection with a Pluribus machine.""" |
self._connection = paramiko.SSHClient()
self._connection.set_missing_host_key_policy(paramiko.AutoAddPolicy())
try:
self._connection.connect(hostname=self._hostname,
username=self._username,
password=self._password,
timeout=self._timeout,
port=self._port)
self._connection.get_transport().set_keepalive(self._keepalive)
self.connected = True
self.config = PluribusConfig(self)
except paramiko.ssh_exception.AuthenticationException:
raise pyPluribus.exceptions.ConnectionError("Unable to open connection with {hostname}: \
invalid credentials!".format(hostname=self._hostname))
except socket_error as sockerr:
raise pyPluribus.exceptions.ConnectionError("Cannot open connection: {skterr}. \
Wrong port?".format(skterr=sockerr.message))
except socket_gaierror as sockgai:
raise pyPluribus.exceptions.ConnectionError("Cannot open connection: {gaierr}. \
Wrong hostname?".format(gaierr=sockgai.message)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close(self):
"""Closes the SSH connection if the connection is UP.""" |
if not self.connected:
return None
if self.config is not None:
if self.config.changed() and not self.config.committed():
try:
self.config.discard() # if configuration changed and not committed, will rollback
except pyPluribus.exceptions.ConfigurationDiscardError as discarderr: # bad luck.
raise pyPluribus.exceptions.ConnectionError("Could not discard the configuration: \
{err}".format(err=discarderr))
self._connection.close() # close SSH connection
self.config = None # reset config object
self._connection = None #
self.connected = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cli(self, command):
""" Executes a command and returns raw output from the CLI. :param command: Command to be executed on the CLI. :raise pyPluribus.exceptions.TimeoutError: when execution of the command exceeds the timeout :raise pyPluribus.exceptions.CommandExecutionError: when not able to retrieve the output :return: Raw output of the command CLI Example: .. code-block:: python device.cli('switch-poweroff') """ |
if not self.connected:
raise pyPluribus.exceptions.ConnectionError("Not connected to the deivce.")
cli_output = ''
ssh_session = self._connection.get_transport().open_session() # opens a new SSH session
ssh_session.settimeout(self._timeout)
ssh_session.exec_command(command)
ssh_output = ''
err_output = ''
ssh_output_makefile = ssh_session.makefile()
ssh_error_makefile = ssh_session.makefile_stderr()
for byte_output in ssh_output_makefile:
ssh_output += byte_output
for byte_error in ssh_error_makefile:
err_output += byte_error
if not ssh_output:
if err_output:
raise pyPluribus.exceptions.CommandExecutionError(err_output)
cli_output = '\n'.join(ssh_output.split(self._ssh_banner)[-1].splitlines()[1:])
if cli_output == 'Please enter username and password:': # rare cases when connection is lost :(
self.open() # retry to open connection
return self.cli(command)
return cli_output |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_data_xlsx(file_name, file_contents=None, on_demand=False):
'''
Loads the new excel format files. Old format files will automatically get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
return get_data_xls(file_name, file_contents=file_contents, on_demand=on_demand) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_data_xls(file_name, file_contents=None, on_demand=False):
'''
Loads the old excel format files. New format files will automatically
get loaded as well.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
def tuple_to_iso_date(tuple_date):
'''
Turns a gregorian (year, month, day, hour, minute, nearest_second) into a
standard YYYY-MM-DDTHH:MM:SS ISO date. If the date part is all zeros, it's
assumed to be a time; if the time part is all zeros it's assumed to be a date;
if all of it is zeros it's taken to be a time, specifically 00:00:00 (midnight).
Note that datetimes of midnight will come back as date-only strings. A date
of month=0 and day=0 is meaningless, so that part of the coercion is safe.
For more on the hairy nature of Excel date/times see
http://www.lexicon.net/sjmachin/xlrd.html
'''
(y,m,d, hh,mm,ss) = tuple_date
non_zero = lambda n: n!=0
date = "%04d-%02d-%02d" % (y,m,d) if list(filter(non_zero, (y,m,d))) else ''
time = "T%02d:%02d:%02d" % (hh,mm,ss) if list(filter(non_zero, (hh,mm,ss))) or not date else ''
return date+time
def format_excel_val(book, val_type, value, want_tuple_date):
'''Cleans up the incoming excel data'''
# Data val_type Codes:
# EMPTY 0
# TEXT 1 a Unicode string
# NUMBER 2 float
# DATE 3 float
# BOOLEAN 4 int; 1 means TRUE, 0 means FALSE
# ERROR 5
if val_type == 2: # TEXT
if value == int(value): value = int(value)
elif val_type == 3: # NUMBER
datetuple = xlrd.xldate_as_tuple(value, book.datemode)
value = datetuple if want_tuple_date else tuple_to_iso_date(datetuple)
elif val_type == 5: # ERROR
value = xlrd.error_text_from_code[value]
return value
def xlrd_xsl_to_array(file_name, file_contents=None):
'''
Returns:
A list of 2-D tables holding the converted cells of each sheet
'''
book = xlrd.open_workbook(file_name, file_contents=file_contents, on_demand=on_demand)
formatter = lambda t_v: format_excel_val(book, t_v[0], t_v[1], False)
row_builder = lambda s, r: list(map(formatter, zip(s.row_types(r), s.row_values(r))))
data = [SheetYielder(book, index, row_builder) for index in range(book.nsheets)]
if not on_demand:
for sheet in data:
sheet.load()
book.release_resources()
return data
return xlrd_xsl_to_array(file_name, file_contents) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_data_excel_xml(file_name, file_contents=None, on_demand=False):
'''
Loads xml excel format files.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy (will be ignored).
'''
# NOTE this method is inefficient and uses code that's not of the highest quality
if file_contents:
xml_file = BytesIO(file_contents)
else:
xml_file = file_name
book = xmlparse.ParseExcelXMLFile(xml_file)
row_builder = lambda s, r: list(s.row_values(r))
return [XMLSheetYielder(book, index, row_builder) for index in range(len(book))] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_data_csv(file_name, encoding='utf-8', file_contents=None, on_demand=False):
'''
Gets good old csv data from a file.
Args:
file_name: The name of the local file, or the holder for the
extension type when the file_contents are supplied.
encoding: Loads the file with the specified cell encoding.
file_contents: The file-like object holding contents of file_name.
If left as None, then file_name is directly loaded.
on_demand: Requests that a yielder be used in place of a full data
copy.
'''
def yield_csv(csv_contents, csv_file):
try:
for line in csv_contents:
yield line
finally:
try:
csv_file.close()
except:
pass
def process_csv(csv_contents, csv_file):
return [line for line in yield_csv(csv_contents, csv_file)]
if file_contents:
csv_file = BytesIO(file_contents)
else:
# Don't use 'open as' format, as on_demand loads shouldn't close the file early
csv_file = open(file_name, 'rb')
reader = csv.reader(csv_file, dialect=csv.excel, encoding=encoding)
if on_demand:
table = yield_csv(reader, csv_file)
else:
table = process_csv(reader, csv_file)
return [table] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write(data, file_name, worksheet_names=None):
'''
Writes 2D tables to file.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file (determines type).
worksheet_names: A list of worksheet names (optional).
'''
if re.search(XML_EXT_REGEX, file_name):
return write_xml(data, file_name, worksheet_names=worksheet_names)
elif re.search(XLSX_EXT_REGEX, file_name):
return write_xlsx(data, file_name, worksheet_names=worksheet_names)
elif re.search(XLS_EXT_REGEX, file_name):
return write_xls(data, file_name, worksheet_names=worksheet_names)
elif re.search(CSV_EXT_REGEX, file_name):
return write_csv(data, file_name)
else:
return write_csv(data, file_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_xls(data, file_name, worksheet_names=None):
'''
Writes out to old excel format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
worksheet_names: A list of worksheet names (optional).
'''
workbook = xlwt.Workbook()
for sheet_index, sheet_data in enumerate(data):
if worksheet_names and sheet_index < len(worksheet_names) and worksheet_names[sheet_index]:
name = worksheet_names[sheet_index]
else:
name = 'Worksheet {}'.format(sheet_index)
sheet = workbook.add_sheet(name)
for row_index, row in enumerate(sheet_data):
for col_index, value in enumerate(row):
sheet.write(row_index, col_index, value)
workbook.save(file_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def write_csv(data, file_name, encoding='utf-8'):
'''
Writes out to csv format.
Args:
data: 2D list of tables/worksheets.
file_name: Name of the output file.
'''
name_extension = len(data) > 1
root, ext = os.path.splitext(file_name)
for i, sheet in enumerate(data):
fname = file_name if not name_extension else root+"_"+str(i)+ext
with open(fname, 'wb') as date_file:
csv_file = csv.writer(date_file, encoding=encoding)
for line in sheet:
csv_file.writerow(line) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_group(name):
""" Return group with given name, if it exists. Check cache first. """ |
group = cache.get('bits.general.group_%s' % name)
if not group:
group = Group.objects.get(name=name)
cache.set('bits.general.group_%s' % name, group, 365 * 24 * 60 * 60)
return group |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def uniqify(list_):
"inefficient on long lists; short lists only. preserves order."
a=[]
for x in list_:
if x not in a: a.append(x)
return a |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def eliminate_sequential_children(paths):
"helper for infer_columns. removes paths that are direct children of the n-1 or n-2 path"
return [p for i,p in enumerate(paths) if not ((i>0 and paths[i-1]==p[:-1]) or (i>1 and paths[i-2]==p[:-1]))] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def collapse_group_expr(groupx,cols,ret_row):
"collapses columns matching the group expression. I'm sure this is buggy; look at a real DB's imp of this."
for i,col in enumerate(cols.children):
if col==groupx: ret_row[i]=ret_row[i][0]
return ret_row |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def update_aliases(aliases,aonly,x):
"helper for ctor. takes AliasX or string as second arg"
if isinstance(x,basestring): aliases[x]=x
elif isinstance(x,sqparse2.AliasX):
if not isinstance(x.alias,basestring): raise TypeError('alias not string',type(x.alias))
if isinstance(x.name,sqparse2.NameX): aliases.update({x.alias:x.name.name,x.name.name:x.name.name})
elif isinstance(x.name,sqparse2.SelectX):
aliases.update({x.alias:x.alias})
aonly[x.alias]=x.name
else: raise TypeError('aliasx_unk_thing',type(x.name)) # pragma: no cover
else: raise TypeError(type(x)) # pragma: no cover |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def resolve_aonly(self,tables_dict,table_ctor):
"circular depends on pgmock.Table. refactor."
for alias,selectx in self.aonly.items():
table = table_ctor(alias,infer_columns(selectx,tables_dict),None)
table.rows = run_select(selectx,tables_dict,table_ctor)
self.aonly[alias] = table
self.aonly_resolved = True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def rowget(self,tables_dict,row_list,index):
"row_list in self.row_order"
tmp=row_list
for i in self.index_tuple(tables_dict,index,False): tmp=tmp[i]
return tmp |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def eval_agg_call(self, exp):
"helper for eval_callx; evaluator for CallX that consume multiple rows"
if not isinstance(self.c_row,list): raise TypeError('aggregate function expected a list of rows')
if len(exp.args.children)!=1: raise ValueError('aggregate function expected a single value',exp.args)
arg,=exp.args.children # intentional: error if len!=1
vals=[Evaluator(c_r,self.nix,self.tables).eval(arg) for c_r in self.c_row]
if not vals: return None
if exp.f=='min': return min(vals)
elif exp.f=='max': return max(vals)
elif exp.f=='count': return len(vals)
else: raise NotImplementedError('unk_func',exp.f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def eval_nonagg_call(self, exp):
"helper for eval_callx; evaluator for CallX that consume a single value"
# todo: get more concrete about argument counts
args=self.eval(exp.args)
if exp.f=='coalesce':
a,b=args # todo: does coalesce take more than 2 args?
return b if a is None else a
elif exp.f=='unnest': return self.eval(exp.args)[0] # note: run_select does some work in this case too
elif exp.f in ('to_tsquery','to_tsvector'): return set(self.eval(exp.args.children[0]).split())
else: raise NotImplementedError('unk_function',exp.f) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def eval_callx(self, exp):
"dispatch for CallX"
# below: this isn't contains(exp,consumes_row) -- it's just checking the current expression
return (self.eval_agg_call if consumes_rows(exp) else self.eval_nonagg_call)(exp) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def redirect_to_handler(error, location):
""" Cause a requset with an error to internally redirect to a URI path. This is generally for internal use, but can be called from within a Pecan controller to trigger a validation failure from *within* the controller itself, e.g.:: @expose() @validate(some_schema, '/some/handler') def some_controller(self, **kw):
if some_bad_condition():
redirect_to_handler(error_exception, '/some/handler') """ |
if callable(location):
location = location()
request.environ['REQUEST_METHOD'] = 'GET'
redirect(location, internal=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _transfer_str(self, conn, tmp, name, data):
''' transfer string to remote file '''
if type(data) == dict:
data = utils.jsonify(data)
afd, afile = tempfile.mkstemp()
afo = os.fdopen(afd, 'w')
try:
afo.write(data.encode('utf8'))
except:
raise errors.AnsibleError("failure encoding into utf-8")
afo.flush()
afo.close()
remote = os.path.join(tmp, name)
try:
conn.put_file(afile, remote)
finally:
os.unlink(afile)
return remote |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _execute_module(self, conn, tmp, module_name, args,
async_jid=None, async_module=None, async_limit=None, inject=None):
''' runs a module that has already been transferred '''
# hack to support fireball mode
if module_name == 'fireball':
args = "%s password=%s" % (args, base64.b64encode(str(utils.key_for_hostname(conn.host))))
if 'port' not in args:
args += " port=%s" % C.ZEROMQ_PORT
(remote_module_path, is_new_style, shebang) = self._copy_module(conn, tmp, module_name, args, inject)
cmd_mod = ""
if self.sudo and self.sudo_user != 'root':
# deal with possible umask issues once sudo'ed to other user
cmd_chmod = "chmod a+r %s" % remote_module_path
self._low_level_exec_command(conn, cmd_chmod, tmp, sudoable=False)
cmd = ""
if not is_new_style:
args = utils.template(self.basedir, args, inject)
argsfile = self._transfer_str(conn, tmp, 'arguments', args)
if async_jid is None:
cmd = "%s %s" % (remote_module_path, argsfile)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module, argsfile]])
else:
if async_jid is None:
cmd = "%s" % (remote_module_path)
else:
cmd = " ".join([str(x) for x in [remote_module_path, async_jid, async_limit, async_module]])
if not shebang:
raise errors.AnsibleError("module is missing interpreter line")
cmd = shebang.replace("#!","") + " " + cmd
if tmp.find("tmp") != -1 and C.DEFAULT_KEEP_REMOTE_FILES != '1':
cmd = cmd + "; rm -rf %s >/dev/null 2>&1" % tmp
res = self._low_level_exec_command(conn, cmd, tmp, sudoable=True)
return ReturnData(conn=conn, result=res['stdout']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _executor(self, host):
''' handler for multiprocessing library '''
try:
exec_rc = self._executor_internal(host)
#if type(exec_rc) != ReturnData and type(exec_rc) != ansible.runner.return_data.ReturnData:
# raise Exception("unexpected return type: %s" % type(exec_rc))
# redundant, right?
if not exec_rc.comm_ok:
self.callbacks.on_unreachable(host, exec_rc.result)
return exec_rc
except errors.AnsibleError, ae:
msg = str(ae)
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg))
except Exception:
msg = traceback.format_exc()
self.callbacks.on_unreachable(host, msg)
return ReturnData(host=host, comm_ok=False, result=dict(failed=True, msg=msg)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _executor_internal(self, host):
''' executes any module one or more times '''
host_variables = self.inventory.get_variables(host)
if self.transport in [ 'paramiko', 'ssh' ]:
port = host_variables.get('ansible_ssh_port', self.remote_port)
if port is None:
port = C.DEFAULT_REMOTE_PORT
else:
# fireball, local, etc
port = self.remote_port
inject = {}
inject.update(host_variables)
inject.update(self.module_vars)
inject.update(self.setup_cache[host])
inject['hostvars'] = HostVars(self.setup_cache, self.inventory)
inject['group_names'] = host_variables.get('group_names', [])
inject['groups'] = self.inventory.groups_list()
# allow with_foo to work in playbooks...
items = None
items_plugin = self.module_vars.get('items_lookup_plugin', None)
if items_plugin is not None and items_plugin in utils.plugins.lookup_loader:
items_terms = self.module_vars.get('items_lookup_terms', '')
items_terms = utils.template_ds(self.basedir, items_terms, inject)
items = utils.plugins.lookup_loader.get(items_plugin, runner=self, basedir=self.basedir).run(items_terms, inject=inject)
if type(items) != list:
raise errors.AnsibleError("lookup plugins have to return a list: %r" % items)
if len(items) and self.module_name in [ 'apt', 'yum' ]:
# hack for apt and soon yum, with_items maps back into a single module call
inject['item'] = ",".join(items)
items = None
# logic to decide how to run things depends on whether with_items is used
if items is None:
return self._executor_internal_inner(host, self.module_name, self.module_args, inject, port)
elif len(items) > 0:
# executing using with_items, so make multiple calls
# TODO: refactor
aggregrate = {}
all_comm_ok = True
all_changed = False
all_failed = False
results = []
for x in items:
inject['item'] = x
result = self._executor_internal_inner(host, self.module_name, self.module_args, inject, port)
results.append(result.result)
if result.comm_ok == False:
all_comm_ok = False
all_failed = True
break
for x in results:
if x.get('changed') == True:
all_changed = True
if (x.get('failed') == True) or (('rc' in x) and (x['rc'] != 0)):
all_failed = True
break
msg = 'All items completed'
if all_failed:
msg = "One or more items failed."
rd_result = dict(failed=all_failed, changed=all_changed, results=results, msg=msg)
if not all_failed:
del rd_result['failed']
return ReturnData(host=host, comm_ok=all_comm_ok, result=rd_result)
else:
self.callbacks.on_skipped(host, None)
return ReturnData(host=host, comm_ok=True, result=dict(skipped=True)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _low_level_exec_command(self, conn, cmd, tmp, sudoable=False, executable=None):
''' execute a command string over SSH, return the output '''
if executable is None:
executable = '/bin/sh'
sudo_user = self.sudo_user
rc, stdin, stdout, stderr = conn.exec_command(cmd, tmp, sudo_user, sudoable=sudoable, executable=executable)
if type(stdout) not in [ str, unicode ]:
out = ''.join(stdout.readlines())
else:
out = stdout
if type(stderr) not in [ str, unicode ]:
err = ''.join(stderr.readlines())
else:
err = stderr
if rc != None:
return dict(rc=rc, stdout=out, stderr=err )
else:
return dict(stdout=out, stderr=err ) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _remote_md5(self, conn, tmp, path):
''' takes a remote md5sum without requiring python, and returns 0 if no file '''
test = "rc=0; [ -r \"%s\" ] || rc=2; [ -f \"%s\" ] || rc=1" % (path,path)
md5s = [
"(/usr/bin/md5sum %s 2>/dev/null)" % path, # Linux
"(/sbin/md5sum -q %s 2>/dev/null)" % path, # ?
"(/usr/bin/digest -a md5 %s 2>/dev/null)" % path, # Solaris 10+
"(/sbin/md5 -q %s 2>/dev/null)" % path, # Freebsd
"(/usr/bin/md5 -n %s 2>/dev/null)" % path, # Netbsd
"(/bin/md5 -q %s 2>/dev/null)" % path # Openbsd
]
cmd = " || ".join(md5s)
cmd = "%s; %s || (echo \"${rc} %s\")" % (test, cmd, path)
data = self._low_level_exec_command(conn, cmd, tmp, sudoable=False)
data2 = utils.last_non_blank_line(data['stdout'])
try:
return data2.split()[0]
except IndexError:
sys.stderr.write("warning: md5sum command failed unusually, please report this to the list so it can be fixed\n")
sys.stderr.write("command: %s\n" % md5s)
sys.stderr.write("----\n")
sys.stderr.write("output: %s\n" % data)
sys.stderr.write("----\n")
# this will signal that it changed and allow things to keep going
return "INVALIDMD5SUM" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _make_tmp_path(self, conn):
''' make and return a temporary path on a remote box '''
basefile = 'ansible-%s-%s' % (time.time(), random.randint(0, 2**48))
basetmp = os.path.join(C.DEFAULT_REMOTE_TMP, basefile)
if self.sudo and self.sudo_user != 'root':
basetmp = os.path.join('/tmp', basefile)
cmd = 'mkdir -p %s' % basetmp
if self.remote_user != 'root':
cmd += ' && chmod a+rx %s' % basetmp
cmd += ' && echo %s' % basetmp
result = self._low_level_exec_command(conn, cmd, None, sudoable=False)
rc = utils.last_non_blank_line(result['stdout']).strip() + '/'
return rc |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _copy_module(self, conn, tmp, module_name, module_args, inject):
''' transfer a module over SFTP, does not run it '''
if module_name.startswith("/"):
raise errors.AnsibleFileNotFound("%s is not a module" % module_name)
# Search module path(s) for named module.
in_path = utils.plugins.module_finder.find_plugin(module_name)
if in_path is None:
raise errors.AnsibleFileNotFound("module %s not found in %s" % (module_name, utils.plugins.module_finder.print_paths()))
out_path = os.path.join(tmp, module_name)
module_data = ""
is_new_style=False
with open(in_path) as f:
module_data = f.read()
if module_common.REPLACER in module_data:
is_new_style=True
module_data = module_data.replace(module_common.REPLACER, module_common.MODULE_COMMON)
encoded_args = "\"\"\"%s\"\"\"" % module_args.replace("\"","\\\"")
module_data = module_data.replace(module_common.REPLACER_ARGS, encoded_args)
encoded_lang = "\"\"\"%s\"\"\"" % C.DEFAULT_MODULE_LANG
module_data = module_data.replace(module_common.REPLACER_LANG, encoded_lang)
if is_new_style:
facility = C.DEFAULT_SYSLOG_FACILITY
if 'ansible_syslog_facility' in inject:
facility = inject['ansible_syslog_facility']
module_data = module_data.replace('syslog.LOG_USER', "syslog.%s" % facility)
lines = module_data.split("\n")
shebang = None
if lines[0].startswith("#!"):
shebang = lines[0]
args = shlex.split(str(shebang[2:]))
interpreter = args[0]
interpreter_config = 'ansible_%s_interpreter' % os.path.basename(interpreter)
if interpreter_config in inject:
lines[0] = shebang = "#!%s %s" % (inject[interpreter_config], " ".join(args[1:]))
module_data = "\n".join(lines)
self._transfer_str(conn, tmp, module_name, module_data)
return (out_path, is_new_style, shebang) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _parallel_exec(self, hosts):
''' handles mulitprocessing when more than 1 fork is required '''
if not hosts:
return
p = multiprocessing.Pool(self.forks)
results = []
#results = p.map(multiprocessing_runner, hosts) # can't handle keyboard interrupt
results = p.map_async(multiprocessing_runner, hosts).get(9999999)
p.close()
p.join()
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def _partition_results(self, results):
''' seperate results by ones we contacted & ones we didn't '''
if results is None:
return None
results2 = dict(contacted={}, dark={})
for result in results:
host = result.host
if host is None:
raise Exception("internal error, host not set")
if result.communicated_ok():
results2["contacted"][host] = result.result
else:
results2["dark"][host] = result.result
# hosts which were contacted but never got a chance to return
for host in self.inventory.list_hosts(self.pattern):
if not (host in results2['dark'] or host in results2['contacted']):
results2["dark"][host] = {}
return results2 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def run(self):
''' xfer & run module on all matched hosts '''
# find hosts that match the pattern
hosts = self.inventory.list_hosts(self.pattern)
if len(hosts) == 0:
self.callbacks.on_no_hosts()
return dict(contacted={}, dark={})
global multiprocessing_runner
multiprocessing_runner = self
results = None
# Check if this is an action plugin. Some of them are designed
# to be ran once per group of hosts. Example module: pause,
# run once per hostgroup, rather than pausing once per each
# host.
p = utils.plugins.action_loader.get(self.module_name, self)
if p and getattr(p, 'BYPASS_HOST_LOOP', None):
# Expose the current hostgroup to the bypassing plugins
self.host_set = hosts
# We aren't iterating over all the hosts in this
# group. So, just pick the first host in our group to
# construct the conn object with.
result_data = self._executor(hosts[0]).result
# Create a ResultData item for each host in this group
# using the returned result. If we didn't do this we would
# get false reports of dark hosts.
results = [ ReturnData(host=h, result=result_data, comm_ok=True) \
for h in hosts ]
del self.host_set
elif self.forks > 1:
try:
results = self._parallel_exec(hosts)
except IOError, ie:
print ie.errno
if ie.errno == 32:
# broken pipe from Ctrl+C
raise errors.AnsibleError("interupted")
raise
else:
results = [ self._executor(h) for h in hosts ]
return self._partition_results(results) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def run_async(self, time_limit):
''' Run this module asynchronously and return a poller. '''
self.background = time_limit
results = self.run()
return results, poller.AsyncPoller(results, self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def image_by_id(self, id):
""" Return image with given Id """ |
if not id:
return None
return next((image for image in self.images() if image['Id'] == id),
None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def image_by_tag(self, tag):
""" Return image with given tag """ |
if not tag:
return None
return next((image for image in self.images() if tag
in image['RepoTags']), None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def image_exists(self, id=None, tag=None):
""" Check if specified image exists """ |
exists = False
if id and self.image_by_id(id):
exists = True
elif tag and self.image_by_tag(tag):
exists = True
return exists |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def container_by_id(self, id):
""" Returns container with given id """ |
if not id:
return None
return next((container for container in self.containers(all=True)
if container['Id'] == id), None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def container_by_name(self, name):
""" Returns container with given name """ |
if not name:
return None
# docker prepends a '/' to container names in the container dict
name = '/'+name
return next((container for container in self.containers(all=True)
if name in container['Names']), None) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def container_exists(self, id=None, name=None):
""" Checks if container exists already """ |
exists = False
if id and self.container_by_id(id):
exists = True
elif name and self.container_by_name(name):
exists = True
return exists |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def container_running(self, id=None, name=None):
""" Checks if container is running """ |
running = False
if id:
running = self.inspect_container(id)['State']['Running']
elif name:
running = self.inspect_container(name)['State']['Running']
return running |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_container_ip(self, container):
""" Returns the internal ip of the container if available """ |
info = self.inspect_container(container)
if not info:
return None
netInfo = info['NetworkSettings']
if not netInfo:
return None
ip = netInfo['IPAddress']
if not ip:
return None
return ip |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse(fileobj):
"""Parse fileobj for a shebang.""" |
fileobj.seek(0)
try:
part = fileobj.read(2)
except UnicodeDecodeError:
part = ""
if part == "#!":
shebang = shlex.split(fileobj.readline().strip())
if (platform.system() == "Windows" and
len(shebang) and
os.path.basename(shebang[0]) == "env"):
return shebang[1:]
return shebang
return [] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, port=5000, background=False):
""" Runs this application with builtin server for testing. This is only for test usage, do not use in production stage. :param port: Port number :param background: Flag to run in background :type port: int :type background: bool """ |
target = os.path.dirname(os.path.abspath(sys.argv[0]))
driver = Driver(self, port, target, 1)
if background:
driver.run_background()
else:
driver.run() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def unbind(self, handler, argspec):
""" handler will no longer be called if args match argspec :param argspec: instance of ArgSpec - args to be matched """ |
self.handlers[argspec.key].remove((handler, argspec))
if not len(self.handlers[argspec.key]):
del self.handlers[argspec.key] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dispatch(self, *args, **kwargs):
""" Call handlers that match args or kwargs :return: set of handlers called """ |
called_handlers = set()
for handler_list in self.handlers.values():
for handler, argspec in handler_list:
accept_args, accept_kwargs = argspec.accepts
if handler in called_handlers and False:
continue
else:
if args_match(accept_args, accept_kwargs, self.default, *args, **kwargs):
called_handlers.add(handler)
handler(*args, **kwargs)
return called_handlers |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iterencode(self, o, _one_shot=False):
"""Encode the given object and yield each string representation as available. For example:: for chunk in JSONEncoder().iterencode(bigobject):
mysocket.write(chunk) """ |
if self.check_circular:
markers = {}
else:
markers = None
if self.ensure_ascii:
_encoder = encode_basestring_ascii
else:
_encoder = encode_basestring
if self.encoding != 'utf-8':
def _encoder(o, _orig_encoder=_encoder, _encoding=self.encoding):
if isinstance(o, str):
o = o.decode(_encoding)
return _orig_encoder(o)
def floatstr(o, allow_nan=self.allow_nan,
_repr=FLOAT_REPR, _inf=INFINITY, _neginf=-INFINITY):
# Check for specials. Note that this type of test is processor
# and/or platform-specific, so do tests which don't depend on the
# internals.
if o != o:
text = 'NaN'
elif o == _inf:
text = 'Infinity'
elif o == _neginf:
text = '-Infinity'
else:
return _repr(o)
if not allow_nan:
raise ValueError(
"Out of range float values are not JSON compliant: " +
repr(o))
return text
_iterencode = _make_iterencode(
markers, self.default, _encoder, self.indent, floatstr,
self.key_separator, self.item_separator, self.sort_keys,
self.skipkeys, _one_shot, self._order)
return _iterencode(o, 0) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self):
""" Updates intension and then adds or includes extension """ |
# updates intension
self.update_intension()
self._size_known_intension = len(self.members)
self._update_members = False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_entailed_by(self, other):
""" Means merging other with self does not produce any new information. """ |
if not set(self.include.keys()).issubset(set(other.include.keys())):
return False
if not self.exclude.isuperset(other.exclude):
return False
if not self.prototype.is_entailed_by(other.prototype):
return False
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_instances(self):
""" Returns the members of the LazyDict """ |
if self._update_members: self.update()
return iter(sorted(self.members.iteritems())) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bots_list(self):
""" List all user's bots :rtype: list of Bot :return: user's bots """ |
data = self.client.bots()
return [Bot(item) for item in data] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bots_create(self, bot):
""" Save new bot :param bot: bot object to save :type bot: Bot """ |
self.client.bots(_method="POST", _json=bot.to_json(), _params=dict(userToken=self.token)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bots_get(self, bot):
""" Fetch and fill Bot object :param bot: empty bot object with name to search :type bot: Bot :rtype: Bot :return: filled bot object """ |
data = self.client.bots.__getattr__(bot.name).__call__()
return Bot(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bots_delete(self, bot):
""" Delete existing bot :param bot: bot to delete :type bot: Bot """ |
self.client.bots.__getattr__(bot.name).__call__(_method="DELETE", _params=dict(botName=bot.name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def talk(self, bot, message):
""" Talk to bot and get response based You can use this method to integrate the platform with your own channels :param bot: bot to talk to :type bot: Bot :param message: message to send to bot :type message: Message :rtype: ActionResponse :return: response object """ |
data = self.client.talk(_method="POST", _params=dict(botName=bot.name), _json=message.to_json())
return ActionResponse(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def push(self, bot, channel_type, ar, user_id):
""" Use this method to push message to user of bot. The message should be packed into ActionResponse object. This allows to push text messages, buttons, images. This also allows to force current state of user. :param bot: bot that will push user :type bot: Bot :param channel_type: one of [telegram, facebook, slack] :type channel_type: str :param ar: message packed in response object :type ar: ActionResponse :param user_id: user id in used channel :type user_id: str """ |
self.client.push.__getattr__(bot.name).__call__(_method="POST",
_params=dict(id=user_id, channel=channel_type),
_json=ar.to_json()) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def broadcast(self, bot, channel_type, text):
""" Use this method to broadcast text message to all users of bot. :param bot: bot that will push user :type bot: Bot :param channel_type: one of [telegram, facebook, slack] :type channel_type: str :param text: text message :type text: str """ |
self.client.broadcast.__getattr__(bot.name).__call__(_method="POST",
_params=dict(channel=channel_type),
_json=dict(message=text)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __parse(self, lines):
""" Parse a list of lines and create an internal property dictionary """ |
# Every line in the file must consist of either a comment
# or a key-value pair. A key-value pair is a line consisting
# of a key which is a combination of non-white space characters
# The separator character between key-value pairs is a '=',
# ':' or a whitespace character not including the newline.
# If the '=' or ':' characters are found, in the line, even
# keys containing whitespace chars are allowed.
# A line with only a key according to the rules above is also
# fine. In such case, the value is considered as the empty string.
# In order to include characters '=' or ':' in a key or value,
# they have to be properly escaped using the backslash character.
# Some examples of valid key-value pairs:
#
# key value
# key=value
# key:value
# key value1,value2,value3
# key value1,value2,value3 \
# value4, value5
# key
# This key= this value
# key = value1 value2 value3
# Any line that starts with a '#' or '!' is considerered a comment
# and skipped. Also any trailing or preceding whitespaces
# are removed from the key/value.
# This is a line parser. It parses the
# contents like by line.
lineno=0
i = iter(lines)
for line in i:
lineno += 1
line = line.strip()
# Skip null lines
if not line: continue
# Skip lines which are comments
if line[0] in ('#','!'): continue
# Some flags
escaped=False
# Position of first separation char
sepidx = -1
# A flag for performing wspace re check
flag = 0
# Check for valid space separation
# First obtain the max index to which we
# can search.
m = self.othercharre.search(line)
if m:
first, last = m.span()
start, end = 0, first
flag = 1
wspacere = re.compile(r'(?<![\\\=\:])(\s)')
else:
if self.othercharre2.search(line):
# Check if either '=' or ':' is present
# in the line. If they are then it means
# they are preceded by a backslash.
# This means, we need to modify the
# wspacere a bit, not to look for
# : or = characters.
wspacere = re.compile(r'(?<![\\])(\s)')
start, end = 0, len(line)
m2 = wspacere.search(line, start, end)
if m2:
# print 'Space match=>',line
# Means we need to split by space.
first, last = m2.span()
sepidx = first
elif m:
# print 'Other match=>',line
# No matching wspace char found, need
# to split by either '=' or ':'
first, last = m.span()
sepidx = last - 1
# print line[sepidx]
# If the last character is a backslash
# it has to be preceded by a space in which
# case the next line is read as part of the
# same property
while line[-1] == '\\':
# Read next line
nextline = next(i)
nextline = nextline.strip()
lineno += 1
# This line will become part of the value
line = line[:-1] + nextline
# Now split to key,value according to separation char
if sepidx != -1:
key, value = line[:sepidx], line[sepidx+1:]
else:
key,value = line,''
self._keyorder.append(key)
self.process_pair(key, value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load(self, stream):
""" Load properties from an open file stream """ |
# For the time being only accept file input streams
if not _is_file(stream):
raise TypeError('Argument should be a file object!')
# Check for the opened mode
if stream.mode != 'r':
raise ValueError('Stream should be opened in read-only mode!')
try:
lines = stream.readlines()
self.__parse(lines)
except IOError:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_property(self, key, value):
""" Set the property for the given key """ |
if type(key) is str and type(value) is str:
self.process_pair(key, value)
else:
raise TypeError('Both key and value should be strings!') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def list(self, out=sys.stdout):
""" Prints a listing of the properties to the stream 'out' which defaults to the standard output """ |
out.write('-- listing properties --\n')
for key,value in self._properties.items():
out.write(''.join((key,'=',value,'\n'))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def store(self, out, header=""):
""" Write the properties list to the stream 'out' along with the optional 'header' """ |
if out.mode[0] != 'w':
raise ValueError('Steam should be opened in write mode!')
try:
out.write(''.join(('#',header,'\n')))
# Write timestamp
tstamp = time.strftime('%a %b %d %H:%M:%S %Z %Y', time.localtime())
out.write(''.join(('#',tstamp,'\n')))
# Write properties from the pristine dictionary
for prop in self._keyorder:
if prop in self._origprops:
val = self._origprops[prop]
out.write(''.join((prop,'=',self.escape(val),'\n')))
out.close()
except IOError:
raise |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_random(self):
"""Get a kitten, either from the db, or a new one. """ |
num_kittens = self.count()
new_cutoff = (num_kittens / (num_kittens + constants.KITTEN_FRESHNESS))
if random.random() < new_cutoff:
return self._rand_inst()
else:
return self.create_new() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def boot():
"""Read configuration files, initialize glin and run main loop""" |
argparser = argparse.ArgumentParser(
description="Controller for LED stripes (WS2801, WS2811 an similar)")
argparser.add_argument("-c", "--config", metavar="CONFIGFILE", dest="configfiles", action='append',
help='Configuration File. May be repeated multiple times. Later configuration files override previous ones.')
argparser.add_argument("-d", "--debug", dest="log_debug", action='store_const', const=True, help='Set log level to debug. Overrides -i/--info')
argparser.add_argument("-i", "--info", dest="log_info", action='store_const', const=True, help='Set log level to info.')
args = argparser.parse_args()
if args.log_debug:
logging.basicConfig(level=logging.DEBUG)
elif args.log_info:
logging.basicConfig(level=logging.INFO)
cfg = configparser.ConfigParser()
cfgpath = os.path.join(os.path.dirname(__file__), "default.conf")
cfg.read(cfgpath)
if args.configfiles is not None:
cfg.read(args.configfiles)
if "core" not in cfg:
logging.critical("No [core] section found in configurations files")
sys.exit()
if "leds" not in cfg["core"]:
logging.critical("No leds value found in [core] section in configurations files")
sys.exit()
led_count = int(cfg["core"]["leds"])
if "hwbackend" not in cfg["core"]:
logging.critical("No hwbackend value found in [core] section in configurations files")
sys.exit()
backend_name = cfg["core"]["hwbackend"]
hwbackends = list(iter_entry_points(group='glin.hwbackend', name=backend_name))
if len(hwbackends) != 1:
logging.critical("Found multiple hwbackend with same name. Cant decide upon one. Quitting.")
sys.exit()
backend_class = hwbackends[0].load()
backend_configuration = dict(cfg[backend_name]) if backend_name in cfg else {}
backend = backend_class(led_count=led_count, config=backend_configuration)
app = glin.app.GlinApp(led_count, hw_backend=backend)
for entry_point in iter_entry_points(group='glin.animation', name=None):
animation_class = entry_point.load()
try:
if issubclass(animation_class, glin.animations.AbstractAnimation):
app.register_animation(animation_class)
else:
logging.error("This is not a valid animation class. Has to be subclass of glin.animations:AbstraktAnimation. Ignoring.: {ep}"
.format(ep=entry_point))
except TypeError:
logging.error("This is not a Class. Ignoring.: {ep}".format(ep=entry_point))
app.execute() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _play_sound(self, filename):
""" Shells player with the provided filename. `filename` Filename for sound file. """ |
command = self._get_external_player()
if not command:
return # no player found
if common.IS_MACOSX:
command += ' "{0}"'.format(filename)
else:
# append quiet flag and filename
is_play = (command == 'play')
command += ' -q "{0}"'.format(filename)
# HACK: play can default to using pulseaudio. here, we
# check if pulse command exists and delegate to alsa if
# not
if is_play and not common.which('pulseaudio'):
command += ' -t alsa'
# play sound file, ignore if it fails
common.shell_process(command, background=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_option(self, option, block_name, *values):
""" Parse options for play, end_play, and timer_play. """ |
if len(values) != 1:
raise TypeError
value = os.path.realpath(os.path.expanduser(values[0]))
if not os.path.isfile(value) and not os.path.islink(value):
raise ValueError(u'Sound file "{0}" does not exist'
.format(value))
# special extension check for aplay player
ext = os.path.splitext(value)[1].lower()
if ext != 'wav' and self._get_external_player() == 'aplay':
raise ValueError(u"Only WAV sound file "
"supported for 'aplay'")
if option == 'play':
option = 'start_' + option
key = option.split('_', 1)[0]
self.files[key] = value |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def on_taskend(self, task):
""" Play sounds at task end. """ |
key = 'timer' if task.elapsed else 'end'
filename = self.files.get(key)
if filename:
self._play_sound(filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _fixpath(root, base):
"""Return absolute, normalized, joined paths""" |
return os.path.abspath(os.path.normpath(os.path.join(root, base))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _sorter(generated):
"""Return a list of paths sorted by dirname & basename.""" |
pairs = [(os.path.dirname(f), os.path.basename(f))
for f in set(list(generated))]
pairs.sort()
return [os.path.join(pair[0], pair[1]) for pair in pairs] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _walk(recursion):
"""Returns a recursive or non-recursive directory walker""" |
try:
from scandir import walk as walk_function
except ImportError:
from os import walk as walk_function
if recursion:
walk = partial(walk_function)
else:
def walk(path): # pylint: disable=C0111
try:
yield next(walk_function(path))
except NameError:
yield walk_function(path)
return walk |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def isvalid(path, access=None, extensions=None, filetype=None, minsize=None):
"""Check whether file meets access, extension, size, and type criteria.""" |
return ((access is None or os.access(path, access)) and
(extensions is None or checkext(path, extensions)) and
(((filetype == 'all' and os.path.exists(path)) or
(filetype == 'dir' and os.path.isdir(path)) or
(filetype == 'file' and os.path.isfile(path))) or
filetype is None) and
(minsize is None or (not os.path.isfile(path) or
os.path.getsize(path) > minsize))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _generator_file(self):
"""Generator for `self.filetype` of 'file'""" |
for path in self.paths:
if os.path.isfile(path):
if isvalid(path, self.access, self.extensions,
minsize=self.minsize):
yield os.path.abspath(path)
elif os.path.isdir(path):
for root, _, fnames in self._walker(path):
yield from self._generator_rebase(fnames, root) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _generator_other(self):
"""Generator for `self.filetype` other than file""" |
for path in self.paths:
for root, dnames, fnames in self._walker(path):
yield from self._generator_rebase(dnames, root)
yield from self._generator_rebase(fnames, root) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def all(self, paths, access=None):
"""Verify list of paths""" |
self.failures = [path for path in paths if not
isvalid(path, access, filetype='all')]
return not self.failures |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dirs(self, paths, access=None):
"""Verify list of directories""" |
self.failures = [path for path in paths if not
isvalid(path, access, filetype='dir')]
return not self.failures |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def files(self, paths, access=None, extensions=None, minsize=None):
"""Verify list of files""" |
self.failures = [path for path in paths if not
isvalid(path, access, extensions, 'file', minsize)]
return not self.failures |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_manhole_factory(namespace, **passwords):
"""Get a Manhole Factory """ |
realm = manhole_ssh.TerminalRealm()
realm.chainedProtocolFactory.protocolFactory = (
lambda _: EnhancedColoredManhole(namespace)
)
p = portal.Portal(realm)
p.registerChecker(
checkers.InMemoryUsernamePasswordDatabaseDontUse(**passwords)
)
return manhole_ssh.ConchFactory(p) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_entity(self):
"""Create a new entity. The entity will have a higher UID than any previously associated with this world. :return: the new entity :rtype: :class:`essence.Entity`""" |
self._highest_id_seen += 1
entity = Entity(self._highest_id_seen, self)
self._entities.append(entity)
return entity |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def destroy_entitiy(self, entity):
"""Remove the entity and all connected components from the world. Long-hand for :func:`essence.Entity.destroy`. """ |
for relation in self._database.values():
relation.pop(entity, None)
for l in self._entities_by_component.values():
l.discard(entity)
self._entities.remove(entity) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_component(self, entity, component):
"""Add component to entity. Long-hand for :func:`essence.Entity.add`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component: component to add to the entity :type component: :class:`essence.Component`""" |
component_type = type(component)
relation = self._get_relation(component_type)
if entity in relation:
# PYTHON2.6: Numbers required in format string.
msg = "Component {0} can't be added to entity {1} since it already has a component of type {2}.".format(component, entity, component_type)
raise DuplicateComponentError(msg)
relation[entity] = component
self._entities_with(component_type).add(entity) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_component(self, entity, component_type, missing=MISSING):
"""Get the component of type component_type associated with entity. Long-hand for :func:`essence.Entity.get`. :param entity: entity to query :type entity: :class:`essence.Entity` :param component_type: component to add to the entity :type component_type: The :class:`type` of a :class:`Component` subclass :param missing: value to return if :type missing: :class:`essence.Component` :raises:""" |
relation = self._get_relation(component_type)
if entity not in relation:
if missing is MISSING:
raise NoSuchComponentError()
else:
return missing
return relation[entity] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_component(self, entity, component_type):
"""Remove the component of component_type from entity. Long-hand for :func:`essence.Entity.remove`. :param entity: entity to associate :type entity: :class:`essence.Entity` :param component_type: Type of component :type component_type: The :class:`type` of a :class:`Component` subclass""" |
relation = self._get_relation(component_type)
del relation[entity]
self._entities_with(component_type).remove(entity) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, *args, **kwargs):
"""Calls update on each of the systems self.systems.""" |
for system in self.systems:
system.update(self, *args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def auto_discover():
""" Auto-map urls from controllers directory. Ignored only files and classes that start from underscore. """ |
urls = [
url(r'^admin/', admin.site.urls),
]
# TODO: we can create python package to have a lot of controllers
# in separate files
def get_controllers(module_name):
"""Return list of controllers in a module."""
module = import_module('app.controllers.{}'.format(module_name))
controllers = []
for obj_name in dir(module):
# we ignore import of Controller and hidden names
if obj_name.startswith('_') or obj_name == 'Controller':
continue
obj = getattr(module, obj_name)
# include only controllers
if issubclass(obj, Controller):
controllers.append(obj)
return controllers
def controller_to_path(controller):
"""
Convert controller's name to a valid path.
Make url in lower case by replace capital letters to small
and adding underscore between words.
"""
words = re.findall('[A-Z][a-z]*', controller.__name__)
if words[-1] == 'Controller':
del words[-1]
# transform words to a url address
url_path = '_'.join(words).lower()
# main controller is a root handler
# TODO: root address inside the file should always come last
if url_path == 'main':
url_path = ''
return url_path
# load all controllers (excluding main controllers)
for file_name in listdir('controllers', get_files=True, hide_ignored=True):
# remove .py extension from file name
app_name = file_name.split('.', 1)[0]
# we will include main controller at the end
if app_name == 'main':
continue
# add url for each controller
for controller in get_controllers(app_name):
url_path = controller_to_path(controller)
urls.append(url(
r'^{}/{}$'.format(app_name, url_path),
controller.as_view()
))
# add urls for main controllers
for controller in get_controllers('main'):
url_path = controller_to_path(controller)
# map urls to a root path
urls.append(url(
r'^{}$'.format(url_path),
controller.as_view()
))
return urls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def download_file(url, suffix=''):
"""Download attached file as temporary file. Parameters url : string SCO-API download Url suffix : string, optional If suffix is specified, the name of the downloaded file will end with that suffix, otherwise there will be no suffix. Returns ------- string, string Path to downloaded file and file suffix """ |
r = urllib2.urlopen(url)
# Save attached file in temp file and return path to temp file
fd, f_path = tempfile.mkstemp(suffix=suffix)
os.write(fd, r.read())
os.close(fd)
return f_path, suffix |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_resource_listing(url, offset, limit, properties):
"""Gneric method to retrieve a resource listing from a SCO-API. Takes the resource-specific API listing Url as argument. Parameters url : string Resource listing Url for a SCO-API offset : int, optional Starting offset for returned list items limit : int, optional Limit the number of items in the result properties : List(string) List of additional object properties to be included for items in the result Returns ------- List(ResourceHandle) List of resource handle (one per subject in the object listing) """ |
# Create listing query based on given arguments
query = [
QPARA_OFFSET + '=' + str(offset),
QPARA_LIMIT + '=' + str(limit)
]
# Add properties argument if property list is not None and not empty
if not properties is None:
if len(properties) > 0:
query.append(QPARA_ATTRIBUTES + '=' + ','.join(properties))
# Add query to Url.
url = url + '?' + '&'.join(query)
# Get subject listing Url for given SCO-API and decorate it with
# given listing arguments. Then retrieve listing from SCO-API.
json_obj = JsonResource(url).json
# Convert result into a list of resource handles and return the result
resources = []
for element in json_obj['items']:
resource = ResourceHandle(element)
# Add additional properties to resource if list is given
if not properties is None:
resource.properties = {}
for prop in properties:
if prop in element:
resource.properties[prop] = element[prop]
resources.append(resource)
return resources |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def to_local_time(timestamp):
"""Convert a datatime object from UTC time to local time. Adopted from: http://stackoverflow.com/questions/4770297/python-convert-utc-datetime-string-to-local-datetime Parameters timestamp : string Default string representation of timestamps expected to be in UTC time zone Returns ------- datetime Datetime object in local time zone """ |
utc = dt.datetime.strptime(timestamp, '%Y-%m-%dT%H:%M:%S.%f')
# Get UTC and local time zone
from_zone = tz.gettz('UTC')
to_zone = tz.tzlocal()
# Tell the utc object that it is in UTC time zone
utc = utc.replace(tzinfo=from_zone)
# Convert time zone
return utc.astimezone(to_zone) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _load_secret(self, creds_file):
'''read the oauth secrets and account ID from a credentials configuration file''' try: with open(creds_file) as fp: creds = json.load(fp) return creds except Exception as e: sys.stderr.write("Error loading oauth secret from local file called '{0}'\n".format(creds_file)) sys.stderr.write("\tThere should be a local OAuth credentials file \n") sys.stderr.write("\twhich has contents like this:\n") sys.stderr.write(""" { "account_id": "1234567890001", "client_id": "30ff0909-0909-33d3-ae88-c9887777a7b7", "client_secret": "mzKKjZZyeW5YgsdfBD37c5730g397agU35-Dsgeox6-73giehbt0996nQ" } """ | )
sys.stderr.write("\n")
raise e |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_video_count(self, search_q=None):
'''Return the number of videos in the account'''
if search_q is not None:
params = {'q': search_q}
else:
params = None
url = "/counts/videos"
result = self._make_request(self.CMS_Server, 'GET', url, params=params)
return result['count'] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def post_video(self, videoUrl, name=None, ingestMedia=True):
'''Post and optionally ingest media from the specified URL'''
if name is None:
name = os.path.basename(videoUrl)
url = '/videos'
data = {'name': name}
new_video = self._make_request(self.CMS_Server, 'POST', url, data=data)
if ingestMedia:
self.ingest_video(new_video['id'], videoUrl)
return new_video |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect(self):
""" Connects the client to the server and returns it. """ |
key = paramiko.RSAKey(data=base64.b64decode(
app.config['SSH_HOST_KEY']
))
client = paramiko.SSHClient()
client.get_host_keys().add(
app.config['SSH_HOST'],
'ssh-rsa',
key
)
client.connect(
app.config['SSH_HOST'],
username=app.config['SSH_USER'],
password=app.config['SSH_PASSWORD'],
)
return client |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def server_rules(self):
""" Reads the server rules from the client and returns it. """ |
sftp = self.client.open_sftp()
try:
rule_path = self.rule_location
try:
stat_entry = sftp.stat(rule_path)
if stat.S_ISDIR(stat_entry.st_mode):
sftp.rmdir(rule_path)
return []
except IOError:
return []
with sftp.open(rule_path, 'r') as file_handle:
data = file_handle.read()
return self._parse(data)
finally:
sftp.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _parse(data: str) -> list: """ Parses the given data string and returns a list of rule objects. """ |
if isinstance(data, bytes):
data = data.decode('utf-8')
lines = (
item for item in
(item.strip() for item in data.split('\n'))
if len(item) and not item.startswith('#')
)
rules = []
for line in lines:
rules.append(
Rule.parse(line)
)
return rules |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jaccardIndex(s1, s2, stranded=False):
""" Compute the Jaccard index for two collections of genomic intervals :param s1: the first set of genomic intervals :param s2: the second set of genomic intervals :param stranded: if True, treat regions on different strands as not intersecting each other, even if they occupy the same genomic region. :return: Jaccard index """ |
def count(s):
""" sum the size of regions in s. """
tot = 0
for r in s:
tot += len(r)
return tot
if stranded:
raise GenomicIntervalError("Sorry, stranded mode for computing Jaccard " +
"index hasn't been implemented yet.")
s1 = collapseRegions(s1)
s2 = collapseRegions(s2)
intersection = regionsIntersection(s1, s2)
c_i = count(intersection)
return c_i / float(count(s1) + count(s2) - c_i) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def intervalTreesFromList(inElements, verbose=False, openEnded=False):
""" build a dictionary, indexed by chrom name, of interval trees for each chrom. :param inElements: list of genomic intervals. Members of the list must have chrom, start and end fields; no other restrictions. :param verbose: output progress messages to sys.stderr if True """ |
elements = {}
if verbose:
totalLines = len(inElements)
pind = ProgressIndicator(totalToDo=totalLines,
messagePrefix="completed",
messageSuffix="of parsing")
for element in inElements:
if element.chrom not in elements:
elements[element.chrom] = []
elements[element.chrom].append(element)
if verbose:
pind.done += 1
pind.showProgress()
# create an interval tree for each list
trees = {}
if verbose:
totalLines = len(elements)
pind = ProgressIndicator(totalToDo=totalLines,
messagePrefix="completed",
messageSuffix="of making interval trees")
for chrom in elements:
trees[chrom] = IntervalTree(elements[chrom], openEnded)
if verbose:
pind.done += 1
pind.showProgress()
return trees |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def regionsIntersection(s1, s2, collapse=True):
""" given two lists of genomic regions with chromosome, start and end coordinates, return a new list of regions which is the intersection of those two sets. Lists must be sorted by chromosome and start index :return: new list that represents the intersection of the two input lists. output regions will all have name "X", be on strand "+" and have score 0 :param s1: first list of genomic regions :param s2: second list of genomic regions :raise GenomicIntervalError: if the input regions are not sorted correctly (by chromosome and start index) :note: O(n) time, O(n) space; informally, might use up to 3x space of input """ |
debug = False
# we don't need to explicitly check for sorting because sorted order is
# a post-condition of the collapsing function
s1_c = collapseRegions(s1)
s2_c = collapseRegions(s2)
if len(s1_c) == 0 or len(s2_c) == 0:
return []
res = []
j = 0
for i in range(0, len(s1_c)):
if debug:
sys.stderr.write("processing from s1_c : " + str(s1_c[i]) + "\n")
# find first thing in s2_c with end in or after s1_c[i]
if debug:
sys.stderr.write("i = " + str(i) + " and j = " + str(j) + "\n")
while (j < len(s2_c) and
(s2_c[j].chrom < s1_c[i].chrom or
(s2_c[j].chrom == s1_c[i].chrom and s2_c[j].end <= s1_c[i].start))):
j += 1
# nothing intersects if we hit the end of s2, or the end of the chrom,
# or we're still on the same chrom but start after the end of s2_c[i]
if j >= len(s2_c) or s2_c[j].chrom > s1_c[i].chrom or \
(s2_c[j].chrom == s1_c[i].chrom and s2_c[j].start >= s1_c[i].end):
continue
# now everything at or after j in s2_c that starts before
# the end of s1_c must overlap with it
while s2_c[j].start < s1_c[i].end:
s = max(s1_c[i].start, s2_c[j].start)
e = min(s1_c[i].end, s2_c[j].end)
overlap = GenomicInterval(s1_c[i].chrom, s, e, "X", 0, "+")
if debug:
sys.stderr.write("\tadding to overlaps: " + str(overlap) + "\n")
res.append(overlap)
j += 1
if j >= len(s2_c) or s2_c[j].chrom != s1_c[i].chrom:
break
# it's possible the last intersecting element runs on to the
# next element from s1_c, so...
j -= 1
if debug:
sys.stderr.write("\tmoving s2_c index back to " + str(s2_c[j]) + "\n")
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def bucketIterator(elements, buckets):
""" For each bucket in buckets, yield it and any elements that overlap it. :param elements: the genomic intervals to place into the buckets. Must be sorted by chromosome and start index. This could be a list, or an iterator. :param buckets: the buckets into which genomic intervals should be binned. Must be sorted by chromosome and start index. This could be a list, or an iterator :return: iterator that will yeild a tuple of 1 bucket and 1 list of elements in the bucket for each call to __next__(). """ |
def check_sorted(current, previous):
if (previous is not None) and \
((previous.chrom > current.chrom) or
((previous.chrom == current.chrom) and
(previous.start > current.start))):
raise GenomicIntervalError("elements not sorted. Saw " +
str(previous) + " before " + str(current))
def updateOpen(openHeap, elementIterator, bucketChrom,
bucketStart, bucketEnd):
"""
Drop elements from heap which start earlier than current bucket.
Update the open heap so that it contains only elements that end after the
start of the current bucket. Note that the heap may already contain some
elements that start after the end of the current bucket, if a previous
bucket ended after the end of this one and brought them into the set.
:param openHeap: a min heap of elements; uses the default sorting order
for the genomic intervals, which is by end index. This
is what we're updating.
:param elementIterator: an iterator from which we will pull new elements.
Elements yielded by this iterator must be sorted
by start index. Must be 'peakable'
:param bucketChrom: the chromosome of the current bucket.
:param bucketStart: the start index of the current bucket.
:param bucketEnd: the end index of the current bucket.
"""
# first, we're going to pop elements from the heap which can no longer
# overalp this or any future buckets. Buckets are sorted by start, so
# we'll never see another bucket that starts earlier than this one --
# hence any elements that end before the start of this bucket will never be
# used again and can be dropped. Elements in the heap are ordered by end
# index, so once we reach an element in the heap that does not end before
# the start of this bucket, we are sure that no others will come after it
# which do end before the start of this bucket. So we can stop dropping.
while len(openHeap) > 0 and ((openHeap[0].chrom < bucketChrom) or
((openHeap[0].chrom == bucketChrom) and
(openHeap[0].end <= bucketStart))):
heappop(openHeap)
# now we're going to add new elements from the iterator to the heap. As
# we know that elements in the iterator are sorted by start index, we know
# that once we see an element that has a start index greater than the end
# of this bucket, we can stop -- everything else after it will also start
# after the end of this bucket.
while (elementIterator.peek() is not None) and \
((elementIterator.peek().chrom < bucketChrom) or
((elementIterator.peek().chrom == bucketChrom) and
(elementIterator.peek().start < bucketEnd))):
e = elementIterator.__next__()
# if e falls before this bucket, we can skip it; buckets are sorted by
# start, so no other buckets start earlier than this one and so it
# cannot intersect any others.
if (e.chrom < bucketChrom) or \
(e.chrom == bucketChrom and e.end <= bucketStart):
continue
# now we know e intersects this bucket..
heappush(openHeap, e)
openElems = []
prevBucket = None
elementIter = AutoApplyIterator(elements, check_sorted)
for bucket in buckets:
# make sure the buckets are sorted by start index
if prevBucket is not None and ((bucket.chrom < prevBucket.chrom) or
(bucket.chrom == prevBucket.chrom and
bucket.start < prevBucket.start)):
raise GenomicIntervalError("regions-of-interest are not sorted. Saw " +
str(prevBucket) + " before " + str(bucket))
updateOpen(openElems, elementIter, bucket.chrom, bucket. start, bucket.end)
# be careful here not to leak a reference to the heap; if the caller
# decides to mess with that list, it'll screw us up. Anyway, we need a
# final check here to make sure we trim off any elements that exceed the
# end of this bucket.
yield bucket, [x for x in openElems if x.start < bucket.end]
prevBucket = bucket |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parseWigString(line, scoreType=int):
""" Parse a string in simple Wig format and return a GenomicInterval. :param line: the string to be parsed :param scoreType: treat the score field as having this type. :return: GenomicInterval object representing this wig line; the name of the interval will be set to 'X', and it's strand to the default. """ |
parts = line.split("\t")
if (len(parts) < 4):
raise GenomicIntervalError("failed to parse " + line +
" as wig format, too few fields")
return GenomicInterval(parts[0].strip(), int(parts[1]), int(parts[2]), None,
scoreType(parts[3])) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parseBEDString(line, scoreType=int, dropAfter=None):
""" Parse a string in BED format and return a GenomicInterval object. :param line: the string to be parsed :param dropAfter: an int indicating that any fields after and including this field should be ignored as they don't conform to the BED format. By default, None, meaning we use all fields. Index from zero. :return: GenomicInterval object built from the BED string representation """ |
peices = line.split("\t")
if dropAfter is not None:
peices = peices[0:dropAfter]
if len(peices) < 3:
raise GenomicIntervalError("BED elements must have at least chrom, " +
"start and end; found only " +
str(len(peices)) + " in " + line)
chrom = peices[0]
start = peices[1]
end = peices[2]
name = None
score = None
strand = None
if len(peices) >= 4 is not None:
name = peices[3]
if len(peices) >= 5 is not None:
score = peices[4]
if len(peices) >= 6 is not None:
strand = peices[5]
return GenomicInterval(chrom, start, end, name, score, strand, scoreType) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sameRegion(self, e):
""" Check whether self represents the same DNA region as e. :param e: genomic region to compare against :return: True if self and e are for the same region (ignores differences in non-region related fields, such as name or score -- but does consider strand) """ |
if e is None:
return False
return (self.chrom == e.chrom and self.start == e.start and
self.end == e.end and self.name == e.name and
self.strand == e.strand) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.