text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def api_headers_tween_factory(handler, registry):
"""This tween provides necessary API headers """ |
def api_headers_tween(request):
response = handler(request)
set_version(request, response)
set_req_guid(request, response)
return response
return api_headers_tween |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean_url(url):
""" Normalize the url and clean it 'http://www.assemblee-nationale.fr/dyn/15/dossiers/deuxieme_partie' 'https://www.conseil-constitutionnel.fr/decision/2013/2013681DC.htm' """ |
url = url.strip()
# fix urls like 'pjl09-518.htmlhttp://www.assemblee-nationale.fr/13/ta/ta051`8.asp'
if url.find('https://') > 0:
url = 'https://' + url.split('https://')[1]
if url.find('http://') > 0:
url = 'http://' + url.split('http://')[1]
scheme, netloc, path, params, query, fragment = urlparse(url)
path = path.replace('//', '/')
if 'xtor' in fragment:
fragment = ''
# fix url like http://www.senat.fr/dossier-legislatif/www.conseil-constitutionnel.fr/decision/2012/2012646dc.htm
if 'www.conseil-' in url:
url = urlunparse((scheme, netloc, path, params, query, fragment))
url = 'http://www.conseil-' + url.split('www.conseil-')[1]
return find_stable_link_for_CC_decision(url)
if 'legifrance.gouv.fr' in url:
params = ''
url_jo_params = parse_qs(query)
if 'WAspad' in path:
newurl = get_redirected_url(url)
if url != newurl:
return clean_url(newurl)
if 'cidTexte' in url_jo_params:
query = 'cidTexte=' + url_jo_params['cidTexte'][0]
elif path.endswith('/jo/texte'):
newurl = find_jo_link(url)
if url != newurl:
return clean_url(newurl)
if netloc == 'legifrance.gouv.fr':
netloc = 'www.legifrance.gouv.fr'
if 'jo_pdf.do' in path and 'id' in url_jo_params:
path = 'affichTexte.do'
query = 'cidTexte=' + url_jo_params['id'][0]
# ensure to link initial version of the text and not furtherly modified ones
if query.startswith('cidTexte'):
query += '&categorieLien=id'
path = path.replace('./affichTexte.do', 'affichTexte.do')
if 'senat.fr' in netloc:
path = path.replace('leg/../', '/')
path = path.replace('dossierleg/', 'dossier-legislatif/')
# normalize dosleg url by removing extra url parameters
if 'dossier-legislatif/' in path:
query = ''
fragment = ''
if netloc == 'webdim':
netloc = 'www.assemblee-nationale.fr'
# force https
if 'assemblee-nationale.fr' not in netloc and 'conseil-constitutionnel.fr' not in netloc:
scheme = 'https'
# url like http://www.assemblee-nationale.fr/13/projets/pl2727.asp2727
if 'assemblee-nationale.fr' in url:
path = re_clean_ending_digits.sub(r"\1", path)
if '/dossiers/' in path:
url = urlunparse((scheme, netloc, path, params, query, fragment))
legislature, slug = parse_national_assembly_url(url)
if legislature and slug:
template = AN_OLD_URL_TEMPLATE
if legislature > 14:
template = AN_NEW_URL_TEMPLATE
return template.format(legislature=legislature, slug=slug)
return urlunparse((scheme, netloc, path, params, query, fragment)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_national_assembly_url(url_an):
"""Returns the slug and the legislature of an AN url (14, 'devoir_vigilance_entreprises_donneuses_ordre') (15, 'retablissement_confiance_action_publique') (14, 'le_dossier') (15, 'deuxieme_partie') (14, None) """ |
legislature_match = re.search(r"\.fr/(dyn/)?(\d+)/", url_an)
if legislature_match:
legislature = int(legislature_match.group(2))
else:
legislature = None
slug = None
slug_match = re.search(r"/([\w_\-]*)(?:\.asp)?(?:#([\w_\-]*))?$", url_an)
if slug_match:
if legislature and legislature > 14:
slug = slug_match.group(2) or slug_match.group(1)
else:
slug = slug_match.group(1)
return legislature, slug |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(self, given_file):
""" Read given_file to self.contents Will ignoring duplicate lines if self.unique is True Will sort self.contents after reading file if self.sorted is True """ |
if self.unique is not False and self.unique is not True:
raise AttributeError("Attribute 'unique' is not True or False.")
self.filename = str.strip(given_file)
self.log('Read-only opening {0}'.format(self.filename))
with open(self.filename, 'r') as handle:
for line in handle:
line = line.rstrip('\r\n')
if line is None:
line = '' # Blank lines that were just \n become None so convert them to empty string.
if self.unique is False or line not in self.contents:
self.contents.append(line)
if self.sorted:
self.sort()
self.log('Read {0} lines.'.format(len(self.contents)))
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check(self, line):
""" Find first occurrence of 'line' in file. This searches each line as a whole, if you want to see if a substring is in a line, use .grep() or .egrep() If found, return the line; this makes it easier to chain methods. :param line: String; whole line to find. :return: String or False. """ |
if not isinstance(line, str):
raise TypeError("Parameter 'line' not a 'string', is {0}".format(type(line)))
if line in self.contents:
return line
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, line):
""" Append 'line' to contents where 'line' is an entire line or a list of lines. If self.unique is False it will add regardless of contents. Multi-line strings are converted to a list delimited by new lines. :param line: String or List of Strings; arbitrary string(s) to append to file contents. :return: Boolean; whether contents were changed during this method call. """ |
if self.unique is not False and self.unique is not True:
raise AttributeError("Attribute 'unique' is not True or False.")
self.log('add({0}); unique={1}'.format(line, self.unique))
if line is False:
return False
if isinstance(line, str):
line = line.split('\n')
if not isinstance(line, list):
raise TypeError("Parameter 'line' not a 'string' or 'list', is {0}".format(type(line)))
local_changes = False
for this in line:
if self.unique is False or this not in self.contents:
self.contents.append(this)
self.changed = local_changes = True
if self.sorted and local_changes:
self.sort()
return local_changes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rm(self, line):
""" Remove all occurrences of 'line' from contents where 'line' is an entire line or a list of lines. Return true if the file was changed by rm(), False otherwise. Multi-line strings are converted to a list delimited by new lines. :param line: String, or List of Strings; each string represents an entire line to be removed from file. :return: Boolean, whether contents were changed. """ |
self.log('rm({0})'.format(line))
if line is False:
return False
if isinstance(line, str):
line = line.split('\n')
if not isinstance(line, list):
raise TypeError("Parameter 'line' not a 'string' or 'list', is {0}".format(type(line)))
local_changes = False
for this in line:
if this in self.contents:
while this in self.contents:
self.log('Removed "{0}" from position {1}'.format(this, self.contents.index(this)))
self.contents.remove(this)
self.changed = local_changes = True
else:
self.log('"{0}" not in {1}'.format(this, self.filename))
if self.sorted and local_changes:
self.sort()
return local_changes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def replace(self, old, new):
""" Replace all lines of file that match 'old' with 'new' Will replace duplicates if found. :param old: String, List of Strings, a multi-line String, or False; what to replace. :param new: String; what to use as replacement. :return: Boolean; whether contents changed during method call. """ |
self.log('replace({0}, {1})'.format(old, new))
if old is False:
return False
if isinstance(old, str):
old = old.split('\n')
if not isinstance(old, list):
raise TypeError("Parameter 'old' not a 'string' or 'list', is {0}".format(type(old)))
if not isinstance(new, str):
raise TypeError("Parameter 'new' not a 'string', is {0}".format(type(new)))
local_changes = False
for this in old:
if this in self.contents:
while this in self.contents:
index = self.contents.index(this)
self.changed = local_changes = True
self.contents.remove(this)
self.contents.insert(index, new)
self.log('Replaced "{0}" with "{1}" at line {2}'.format(this, new, index))
else:
self.log('"{0}" not in {1}'.format(this, self.filename))
return local_changes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def partition(pred, iterable):
""" split the results of an iterable based on a predicate """ |
trues = []
falses = []
for item in iterable:
if pred(item):
trues.append(item)
else:
falses.append(item)
return trues, falses |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def zip_with_output(skip_args=[]):
"""decorater that zips the input of a function with its output only zips positional arguments. skip_args : list a list of indexes of arguments to exclude from the skip @zip_with_output(skip_args=[0]) def foo(bar, baz):
return baz will decorate foo s.t. foo(x, y) = y """ |
def decorator(fn):
def wrapped(*args, **vargs):
g = [arg for i, arg in enumerate(args) if i not in skip_args]
if len(g) == 1:
return(g[0], fn(*args, **vargs))
else:
return (g, fn(*args, **vargs))
return wrapped
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def capture_exception(fn):
"""decorator that catches and returns an exception from wrapped function""" |
def wrapped(*args):
try:
return fn(*args)
except Exception as e:
return e
return wrapped |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compose(*funcs):
"""compose a list of functions""" |
return lambda x: reduce(lambda v, f: f(v), reversed(funcs), x) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def map_over_glob(fn, path, pattern):
"""map a function over a glob pattern, relative to a directory""" |
return [fn(x) for x in glob.glob(os.path.join(path, pattern))] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def mkdir_recursive(dirname):
"""makes all the directories along a given path, if they do not exist""" |
parent = os.path.dirname(dirname)
if parent != "":
if not os.path.exists(parent):
mkdir_recursive(parent)
if not os.path.exists(dirname):
os.mkdir(dirname)
elif not os.path.exists(dirname):
os.mkdir(dirname) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def indent_text(*strs, **kwargs):
""" indents text according to an operater string and a global indentation level. returns a tuple of all passed args, indented according to the operator string indent: [defaults to +0] The operator string, of the form ++n : increments the global indentation level by n and indents +n : indents with the global indentation level + n --n : decrements the global indentation level by n -n : indents with the global indentation level - n ==n : sets the global indentation level to exactly n and indents =n : indents with an indentation level of exactly n """ |
# python 2.7 workaround
indent = kwargs["indent"] if "indent" in kwargs else"+0"
autobreak = kwargs.get("autobreak", False)
char_limit = kwargs.get("char_limit", 80)
split_char = kwargs.get("split_char", " ")
strs = list(strs)
if autobreak:
for index, s in enumerate(strs):
if len(s) > char_limit:
strs[index] = []
spl = s.split(split_char)
result = []
collect = ""
for current_block in spl:
if len(current_block) + len(collect) > char_limit:
strs[index].append(collect[:-1] + "\n")
collect = " "
collect += current_block + split_char
strs[index].append(collect + "\n")
strs = flatten_list(strs)
global lasting_indent
if indent.startswith("++"):
lasting_indent = lasting_indent + int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("+"):
cur_indent = lasting_indent + int(indent[1:])
elif indent.startswith("--"):
lasting_indent = lasting_indent - int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("-"):
cur_indent = lasting_indent - int(indent[1:])
elif indent.startswith("=="):
lasting_indent = int(indent[2:])
cur_indent = lasting_indent
elif indent.startswith("="):
lasting_indent = int(indent[1:])
cur_indent = int(indent[1:])
else:
raise Exception(
"indent command format '%s' unrecognized (see the docstring)")
# mutate indentation level if needed
return tuple([" " * cur_indent] + [elem.replace("\n", "\n" + " " * cur_indent)
for elem in strs]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pdebug(*args, **kwargs):
"""print formatted output to stdout with indentation control""" |
if should_msg(kwargs.get("groups", ["debug"])):
# initialize colorama only if uninitialized
global colorama_init
if not colorama_init:
colorama_init = True
colorama.init()
args = indent_text(*args, **kwargs)
# write to stdout
sys.stderr.write(colorama.Fore.CYAN)
sys.stderr.write("".join(args))
sys.stderr.write(colorama.Fore.RESET)
sys.stderr.write("\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pout(*args, **kwargs):
"""print to stdout, maintaining indent level""" |
if should_msg(kwargs.get("groups", ["normal"])):
args = indent_text(*args, **kwargs)
# write to stdout
sys.stderr.write("".join(args))
sys.stderr.write("\n") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def urlretrieve(url, dest, write_mode="w"):
"""save a file to disk from a given url""" |
response = urllib2.urlopen(url)
mkdir_recursive(os.path.dirname(dest))
with open(dest, write_mode) as f:
f.write(response.read())
f.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_dups(seq):
"""remove duplicates from a sequence, preserving order""" |
seen = set()
seen_add = seen.add
return [x for x in seq if not (x in seen or seen_add(x))] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def json_requested():
"""Check if json is the preferred output format for the request.""" |
best = request.accept_mimetypes.best_match(
['application/json', 'text/html'])
return (best == 'application/json' and
request.accept_mimetypes[best] >
request.accept_mimetypes['text/html']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_readme():
'Get the long description from the README file'
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.rst'), encoding='utf-8') as my_fd:
result = my_fd.read()
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_nc_attrs(nc):
"""Gets netCDF file metadata attributes. Arguments: nc (netCDF4.Dataset):
an open NetCDF4 Dataset to pull attributes from. Returns: dict: Metadata as extracted from the netCDF file. """ |
meta = {
'experiment': nc.experiment_id,
'frequency': nc.frequency,
'institute': nc.institute_id,
'model': nc.model_id,
'modeling_realm': nc.modeling_realm,
'ensemble_member': 'r{}i{}p{}'.format(nc.realization, nc.initialization_method, nc.physics_version),
}
variable_name = get_var_name(nc)
if variable_name:
meta.update({'variable_name': variable_name})
return meta |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_var_name(nc):
"""Guesses the variable_name of an open NetCDF file """ |
non_variable_names = [
'lat',
'lat_bnds',
'lon',
'lon_bnds',
'time',
'latitude',
'longitude',
'bnds'
]
_vars = set(nc.variables.keys())
_vars.difference_update(set(non_variable_names))
if len(_vars) == 1:
return _vars.pop()
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _update_known_atts(self, **kwargs):
"""Updates instance attributes with supplied keyword arguments. """ |
for k, v in kwargs.items():
if k not in ATTR_KEYS:
# Warn if passed in unknown kwargs
raise SyntaxWarning('Unknown argument: {}'.format(k))
elif not v: # Delete attributes with falsey values
delattr(self, k)
else:
setattr(self, k, v) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def savecache(apicache, json_file):
""" Saves apicache dictionary as json_file, returns dictionary as indented str """ |
if apicache is None or apicache is {}:
return ""
apicachestr = json.dumps(apicache, indent=2)
with open(json_file, 'w') as cache_file:
cache_file.write(apicachestr)
return apicachestr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def loadcache(json_file):
""" Loads json file as dictionary, feeds it to monkeycache and spits result """ |
f = open(json_file, 'r')
data = f.read()
f.close()
try:
apicache = json.loads(data)
except ValueError as e:
print("Error processing json:", json_file, e)
return {}
return apicache |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def monkeycache(apis):
""" Feed this a dictionary of api bananas, it spits out processed cache """ |
if isinstance(type(apis), type(None)) or apis is None:
return {}
verbs = set()
cache = {}
cache['count'] = apis['count']
cache['asyncapis'] = []
apilist = apis['api']
if apilist is None:
print("[monkeycache] Server response issue, no apis found")
for api in apilist:
name = getvalue(api, 'name')
verb, subject = splitverbsubject(name)
apidict = {}
apidict['name'] = name
apidict['description'] = getvalue(api, 'description')
apidict['isasync'] = getvalue(api, 'isasync')
if apidict['isasync']:
cache['asyncapis'].append(name)
apidict['related'] = splitcsvstring(getvalue(api, 'related'))
required = []
apiparams = []
for param in getvalue(api, 'params'):
apiparam = {}
apiparam['name'] = getvalue(param, 'name')
apiparam['description'] = getvalue(param, 'description')
apiparam['required'] = (getvalue(param, 'required') is True)
apiparam['length'] = int(getvalue(param, 'length'))
apiparam['type'] = getvalue(param, 'type')
apiparam['related'] = splitcsvstring(getvalue(param, 'related'))
if apiparam['required']:
required.append(apiparam['name'])
apiparams.append(apiparam)
apidict['requiredparams'] = required
apidict['params'] = apiparams
if verb not in cache:
cache[verb] = {}
cache[verb][subject] = apidict
verbs.add(verb)
cache['verbs'] = list(verbs)
return cache |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def execute(self, sql, param=(), times=1):
"""This function is the most use one, with the paramter times it will try x times to execute the sql, default is 1. """ |
self.log and self.log.debug('%s %s' % ('SQL:', sql))
if param is not ():
self.log and self.log.debug('%s %s' % ('PARAMs:', param))
for i in xrange(times):
try:
ret, res = self._execute(sql, param)
return ret, res
except Exception, e:
self.log and self.log.warn("The %s time execute, fail" % i)
self.log and self.log.warn(e)
if i: sleep(i**1.5)
self.log and self.log.error(e)
return None, e |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_metadata(source):
""" Extract the metadata from the module or dict argument. It returns a `metadata` dictionary that provides keywords arguments for the setuptools `setup` function. """ |
if isinstance(source, types.ModuleType):
metadata = source.__dict__
else:
metadata = source
setuptools_kwargs = {}
for key in "name version url license".split():
val = metadata.get("__" + key + "__")
if val is not None:
setuptools_kwargs[key] = val
version = metadata.get("__version__")
if version is not None:
setuptools_kwargs["version"] = version
# Search for author email with a <...@...> syntax in the author field.
author = metadata.get("__author__")
if author is not None:
email_pattern = u"<([^>]+@[^>]+)>"
match = re.search(email_pattern, author)
if match is not None:
setuptools_kwargs["author_email"] = email = match.groups()[0]
setuptools_kwargs["author"] = author.replace(u"<" + email + u">", u"").strip()
else:
setuptools_kwargs["author"] = author
# Get the module summary.
summary = metadata.get("__summary__")
if summary is not None:
setuptools_kwargs["description"] = summary
# Get and process the module README.
README_filenames = ["README.md", "README.txt", "README"]
for filename in README_filenames:
if os.path.isfile(filename):
README = open(filename).read()
if hasattr(README, "decode"):
README = README.decode("utf-8")
README_rst = to_rst(README)
setuptools_kwargs["long_description"] = README_rst or README
break
# Process keywords that match trove classifiers.
keywords = metadata.get("__keywords__")
if keywords is not None:
classifiers = []
keywords = [k.strip() for k in keywords.split(",")]
for keyword in keywords:
trove_id = trove_search(keyword)
if trove_id is not None:
classifiers.append(trove_id)
classifiers = sorted(list(set(classifiers)))
setuptools_kwargs["classifiers"] = classifiers
return setuptools_kwargs |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def profile(length=25):
""" Start the application under the code profiler """ |
from werkzeug.contrib.profiler import ProfilerMiddleware
app.wsgi_app = ProfilerMiddleware(app.wsgi_app, restrictions=[length])
app.run() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _help():
""" Display both SQLAlchemy and Python help statements """ |
statement = '%s%s' % (shelp, phelp % ', '.join(cntx_.keys()))
print statement.strip() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self):
""" Sets up live server, and then loops over handling http requests. """ |
try:
# Go through the list of possible ports, hoping we can find
# one that is free to use for the WSGI server.
for index, port in enumerate(self.possible_ports):
try:
self.httpd = self._create_server(port)
except socket.error as e:
if (index + 1 < len(self.possible_ports) and
e.error == errno.EADDRINUSE):
# This port is already in use, so we go on and try with
# the next one in the list.
continue
else:
# Either none of the given ports are free or the error
# is something else than "Address already in use". So
# we let that error bubble up to the main thread.
raise
else:
# A free port was found.
self.port = port
break
self.is_ready.set()
self.httpd.serve_forever()
except Exception as e:
self.error = e
self.is_ready.set() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def remove_column(table, remove_index):
'''
Removes the specified column from the table.
'''
for row_index in range(len(table)):
old_row = table[row_index]
new_row = []
for column_index in range(len(old_row)):
if column_index != remove_index:
new_row.append(old_row[column_index])
table[row_index] = new_row
return table |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def row_content_length(row):
'''
Returns the length of non-empty content in a given row.
'''
if not row:
return 0
try:
return (index + 1 for index, cell in reversed(list(enumerate(row))) if not is_empty_cell(cell)).next()
except StopIteration:
return len(row) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def split_block_by_row_length(block, split_row_length):
'''
Splits the block by finding all rows with less consequetive, non-empty rows than the
min_row_length input.
'''
split_blocks = []
current_block = []
for row in block:
if row_content_length(row) <= split_row_length:
if current_block:
split_blocks.append(current_block)
split_blocks.append([row])
current_block = []
else:
current_block.append(row)
if current_block:
split_blocks.append(current_block)
return split_blocks |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_need_install():
"""Check if installed package are exactly the same to this one. """ |
md5_root, md5_dst = list(), list()
need_install_flag = False
for root, _, basename_list in os.walk(_ROOT):
if os.path.basename(root) != "__pycache__":
for basename in basename_list:
src = os.path.join(root, basename)
dst = os.path.join(root.replace(_ROOT, _DST), basename)
if os.path.exists(dst):
if md5_of_file(src) != md5_of_file(dst):
return True
else:
return True
return need_install_flag |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_permission_to_view(page, user):
""" Check whether the user has permission to view the page. If the user has any of the page's permissions, they have permission. If the page has no set permissions, they have permission. """ |
if page.permissions.count() == 0:
return True
for perm in page.permissions.all():
perm_label = '%s.%s' % (perm.content_type.app_label, perm.codename)
if user.has_perm(perm_label):
return True
return False |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_directives(self, line):
"""List all directives supported by the bot""" |
for name, cmd in self.adapter.directives.items():
with colorize('blue'):
print('bot %s:' % name)
if cmd.__doc__:
for line in cmd.__doc__.split('\n'):
print(' %s' % line)
else:
print() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def do_bot(self, line):
"""Call the bot""" |
with colorize('blue'):
if not line:
self.say('what?')
try:
res = self.adapter.receive(message=line)
except UnknownCommand:
self.say("I do not known what the '%s' directive is" % line)
else:
self.say(res) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def timeout(seconds, error_message=None):
"""Timeout checking just for Linux-like platform, not working in Windows platform.""" |
def decorated(func):
result = ""
def _handle_timeout(signum, frame):
errmsg = error_message or 'Timeout: The action <%s> is timeout!' % func.__name__
global result
result = None
import inspect
stack_frame = inspect.stack()[4]
file_name = os.path.basename(stack_frame[1])
line_no = stack_frame[2]
method_name = stack_frame[3]
code_text = ','.join(stack_frame[4])
stack_info = 'Stack: %s, %s:%s >%s' % (method_name, file_name, line_no, code_text)
sys.stderr.write(errmsg+'\n')
sys.stderr.write(stack_info+'\n')
raise TimeoutError(errmsg)
@sysx.platform(sysx.UNIX_LIKE, case_false_wraps=func)
def wrapper(*args, **kwargs):
global result
signal.signal(signal.SIGALRM, _handle_timeout)
signal.alarm(seconds)
try:
result = func(*args, **kwargs)
finally:
signal.alarm(0)
return result
return functools.wraps(func)(wrapper)
return decorated |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def broadcast(self, command, *args, **kwargs):
""" Notifies each user with a specified command. """ |
criterion = kwargs.pop('criterion', self.BROADCAST_FILTER_ALL)
for index, user in items(self.users()):
if criterion(user, command, *args, **kwargs):
self.notify(user, command, *args, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_config(basedir, files):
""" Returns the config object for the selected docker-compose.yml This is an instance of `compose.config.config.Config`. """ |
config_details = config.find(
basedir, files,
environment.Environment.from_env_file(basedir))
return config.load(config_details) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def build(config, services):
""" Builds images and tags them appropriately. Where "appropriately" means with the output of: git describe --tags HEAD and 'latest' as well (so the "latest" image for each will always be the most recently built) """ |
filtered_services = {name: service for name, service in services.iteritems() if 'build' in service}
_call_output('docker-compose build {}'.format(' '.join(filtered_services.iterkeys())))
version = _get_version()
for service_name, service_dict in filtered_services.iteritems():
# Tag with proper version, they're already tagged latest from build
image = service_dict['image']
_call('docker tag {image}:latest {image}:{version}'.format(
image=image,
version=version
)
) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def push(config, services):
""" Upload the defined services to their respective repositories. So's we can then tell the remote docker host to then pull and run them. """ |
version = _get_version()
for service_name, service_dict in services.iteritems():
image = service_dict['image']
things = {'image': image, 'version': version}
_call_output('docker push {image}:latest'.format(**things))
_call_output('docker push {image}:{version}'.format(**things)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def generate_bins(bins, values=None):
"""Compute bin edges for numpy.histogram based on values and a requested bin parameters Unlike `range`, the largest value is included within the range of the last, largest value, so generate_bins(N) with produce a sequence with length N+1 Arguments: bins (int or 2-tuple of floats or sequence of floats) s or the first pair of bin edges [0] [0, 1, 2, 3] [0] [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10] [0.0, 2.0, 4.0, 6.0, 8.0, 10.0, 12.0, 14.0, 16.0, 18.0, 20.0] [0, 3, 6, 9, 12, 15, 18, 21] """ |
if isinstance(bins, int):
bins = (bins,)
if isinstance(bins, float):
bins = (0, bins)
if not len(bins) in (1, 2):
return bins
if values is None or not hasattr(values, '__iter__') or not any(values) or not hasattr(values, '__len__') or len(values) < 1:
values = [0]
value_min, value_max = pd.np.min(values), pd.np.max(values)
value_range = value_max - value_min
if len(bins) == 1:
if not value_range:
return range(int(bins[0]) + 1)
bins = (0, value_range / float(bins[0]))
if len(bins) == 2:
if not value_range:
return bins
binwidth = ((bins[1] - bins[0]) or 1)
bin0 = bins[0] or pd.np.min(values)
if (bin0 / value_range) <= .3:
bin0 = 0
numbins = int(value_range / float(binwidth))
bins = list(pd.np.arange(numbins + 1) * binwidth + bin0)
else:
binwidth = pd.np.min(pd.np.diff(bins)) or pd.np.mean(pd.np.diff(bins)) or 1.
bins = list(bins)
while bins[-1] < value_max:
bins.append(bins[-1] + binwidth)
return bins |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def select(target, path, default=None, slient=True):
"""Select item with path from target. If not find item and slient marked as True, return default value. If not find item and slient marked as False, raise KeyError. """ |
def _(value, slient):
if slient:
return value
else:
raise KeyError("")
default = partial(_, default, slient)
names = path.split(".")
node = target
for name in names:
if isinstance(node, dict):
try:
node = node[name]
except:
return default()
elif isinstance(node, list) and name.isdigit():
try:
node = node[int(name)]
except:
return default()
elif hasattr(node, name):
node = getattr(node, name)
else:
return default()
return node |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(target, path, value):
"""Update item in path of target with given value. """ |
names = path.split(".")
names_length = len(names)
node = target
for index in range(names_length):
name = names[index]
if index == names_length - 1:
last = True
else:
last = False
if isinstance(node, dict):
if last:
node[name] = value
return
else:
if not name in node:
node[name] = {}
node = node[name]
elif isinstance(node, list):
name = int(name)
listpad(node, name+1)
if last:
node[name] = value
return
else:
node[name] = {}
node = node[name]
else:
if last:
setattr(node, name, value)
else:
setattr(node, name, {})
node = getattr(node, name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_bam_index_stats(fn):
""" Parse the output from Picard's BamIndexStast and return as pandas Dataframe. Parameters filename : str of filename or file handle Filename of the Picard output you want to parse. Returns ------- df : pandas.DataFrame Data from output file. """ |
with open(fn) as f:
lines = [x.strip().split() for x in f.readlines()]
no_counts = int(lines[-1][-1])
lines = lines[:-1]
chrom = [x[0] for x in lines]
length = [int(x[2]) for x in lines]
aligned = [int(x[4]) for x in lines]
unaligned = [int(x[6]) for x in lines]
df = pd.DataFrame([length, aligned, unaligned], columns=chrom,
index=['length', 'aligned', 'unaligned']).T
df = df.ix[sorted(df.index)]
return df |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_alignment_summary_metrics(fn):
""" Parse the output from Picard's CollectAlignmentSummaryMetrics and return as pandas Dataframe. Parameters filename : str of filename or file handle Filename of the Picard output you want to parse. Returns ------- df : pandas.DataFrame Data from output file. """ |
df = pd.read_table(fn, index_col=0, skiprows=range(6) + [10, 11]).T
return df |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_mark_duplicate_metrics(fn):
""" Parse the output from Picard's MarkDuplicates and return as pandas Series. Parameters filename : str of filename or file handle Filename of the Picard output you want to parse. Returns ------- metrics : pandas.Series Duplicate metrics. hist : pandas.Series Duplicate histogram. """ |
with open(fn) as f:
lines = [x.strip().split('\t') for x in f.readlines()]
metrics = pd.Series(lines[7], lines[6])
m = pd.to_numeric(metrics[metrics.index[1:]])
metrics[m.index] = m.values
vals = np.array(lines[11:-1])
hist = pd.Series(vals[:, 1], index=[int(float(x)) for x in vals[:, 0]])
hist = pd.to_numeric(hist)
return metrics, hist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_insert_metrics(fn):
""" Parse the output from Picard's CollectInsertSizeMetrics and return as pandas Series. Parameters filename : str of filename or file handle Filename of the Picard output you want to parse. Returns ------- metrics : pandas.Series Insert size metrics. hist : pandas.Series Insert size histogram. """ |
with open(fn) as f:
lines = [x.strip().split('\t') for x in f.readlines()]
index = lines[6]
vals = lines[7]
for i in range(len(index) - len(vals)):
vals.append(np.nan)
for i, v in enumerate(vals):
if type(v) == str:
try:
vals[i] = int(v)
except ValueError:
try:
vals[i] = float(v)
except ValueError:
continue
metrics = pd.Series(vals, index=index)
vals = np.array(lines[11:-1])
hist = pd.Series(vals[:, 1], index=[int(float(x)) for x in vals[:, 0]])
hist = pd.to_numeric(hist)
return metrics, hist |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shell_sqlalchemy(session: SqlalchemySession, backend: ShellBackend):
""" This command includes SQLAlchemy DB Session """ |
namespace = {
'session': session
}
namespace.update(backend.get_namespace())
embed(user_ns=namespace, header=backend.header) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def shell_django(session: DjangoSession, backend: ShellBackend):
""" This command includes Django DB Session """ |
namespace = {
'session': session
}
namespace.update(backend.get_namespace())
embed(user_ns=namespace, header=backend.header) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize_query(func):
""" Ensure any SQLExpression instances are serialized""" |
@functools.wraps(func)
def wrapper(self, query, *args, **kwargs):
if hasattr(query, 'serialize'):
query = query.serialize()
assert isinstance(query, basestring), 'Expected query to be string'
if self.debug:
print('SQL:', query)
return func(self, query, *args, **kwargs)
return wrapper |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def autocomplete_view(self, request):
""" Searches in the fields of the given related model and returns the result as a simple string to be used by the jQuery Autocomplete plugin """ |
query = request.GET.get('q', None)
app_label = request.GET.get('app_label', None)
model_name = request.GET.get('model_name', None)
search_fields = request.GET.get('search_fields', None)
object_pk = request.GET.get('object_pk', None)
try:
to_string_function = self.related_string_functions[model_name]
except KeyError:
to_string_function = lambda x: str(x)
if search_fields and app_label and model_name and (query or object_pk):
def construct_search(field_name):
# use different lookup methods depending on the notation
if field_name.startswith('^'):
fmt, name = "{}__istartswith", field_name[1:]
elif field_name.startswith('='):
fmt, name = "{}__iexact", field_name[1:]
elif field_name.startswith('@'):
fmt, name = "{}__search", field_name[1:]
else:
fmt, name = "{}__icontains", field_name
return fmt.format(name)
model = apps.get_model(app_label, model_name)
queryset = model._default_manager.all()
data = ''
if query:
for bit in query.split():
or_queries = [
models.Q(**{construct_search(smart_str(field_name)): smart_str(bit)})
for field_name
in search_fields.split(',')
]
other_qs = QuerySet(model)
other_qs.query.select_related = queryset.query.select_related
other_qs = other_qs.filter(reduce(operator.or_, or_queries))
queryset = queryset & other_qs
if self.autocomplete_limit:
queryset = queryset[:self.autocomplete_limit]
data = ''.join([
'{}|{}\n'.format(to_string_function(f), f.pk)
for f
in queryset
])
elif object_pk:
try:
obj = queryset.get(pk=object_pk)
except:
pass
else:
data = to_string_function(obj)
return HttpResponse(data)
return HttpResponseNotFound() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, context):
"""We have to overwrite this method because we don't want an implicit context """ |
args = []
kwargs = {}
for arg in self.explicit_arguments:
if arg.name is not None:
kwargs[arg.name] = arg.value
else:
args.append(arg.value)
for arg in self.implicit_arguments:
if arg.name is not None:
annotation = self.signature.parameters[arg.name].annotation
annotation_name = annotation
if not isinstance(annotation, str):
annotation_name = annotation.__name__
if annotation is Table:
value = context.table
elif annotation is Context:
value = context
elif annotation is Text:
value = context.text
elif annotation is inspect._empty:
raise RuntimeError(
"Parameter '{}' of step implementation '{}{}' does not have a type! Please specify it in the correct steps file.".format(
arg.name,
self.func.__qualname__,
self.signature,
)
)
elif CONTEXT_NAMESPACE.format(annotation_name) in context:
value = context.__getattr__(CONTEXT_NAMESPACE.format(annotation_name))
else:
raise RuntimeError(
"'{}' was not found in context. Is a context parameter missing?".format(arg.name))
kwargs[arg.name] = value
else:
raise RuntimeError("Argument name shouldn't be None")
with context.user_mode():
return_value = self.func(*args, **kwargs)
return_annotation = self.signature.return_annotation
if return_annotation == inspect.Signature.empty:
return
if not isinstance(return_annotation, str):
return_annotation = return_annotation.__name__
context.__setattr__(CONTEXT_NAMESPACE.format(return_annotation), return_value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def liftover_bed( bed, chain, mapped=None, unmapped=None, liftOver_path='liftOver', ):
""" Lift over a bed file using a given chain file. Parameters bed : str or pybedtools.BedTool Coordinates to lift over. chain : str Path to chain file to use for lift over. mapped : str Path for bed file with coordinates that are lifted over correctly. unmapped : str Path for text file to store coordinates that did not lift over correctly. If this is not provided, these are discarded. liftOver_path : str Path to liftOver executable if not in path. Returns ------- new_coords : pandas.DataFrame Pandas data frame with lift over results. Index is old coordinates in the form chrom:start-end and columns are chrom, start, end and loc (chrom:start-end) in new coordinate system. """ |
import subprocess
import pybedtools as pbt
if mapped == None:
import tempfile
mapped = tempfile.NamedTemporaryFile()
mname = mapped.name
else:
mname = mapped
if unmapped == None:
import tempfile
unmapped = tempfile.NamedTemporaryFile()
uname = unmapped.name
else:
uname = unmapped
if type(bed) == str:
bt = pbt.BedTool(bed)
elif type(bed) == pbt.bedtool.BedTool:
bt = bed
else:
sys.exit(1)
bt = bt.sort()
c = '{} {} {} {} {}'.format(liftOver_path, bt.fn, chain, mname, uname)
subprocess.check_call(c, shell=True)
with open(uname) as f:
missing = pbt.BedTool(''.join([x for x in f.readlines()[1::2]]),
from_string=True)
bt = bt.subtract(missing)
bt_mapped = pbt.BedTool(mname)
old_loc = []
for r in bt:
old_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_loc = []
new_chrom = []
new_start = []
new_end = []
for r in bt_mapped:
new_loc.append('{}:{}-{}'.format(r.chrom, r.start, r.end))
new_chrom.append(r.chrom)
new_start.append(r.start)
new_end.append(r.end)
new_coords = pd.DataFrame({'loc':new_loc, 'chrom': new_chrom,
'start': new_start, 'end': new_end},
index=old_loc)
for f in [mapped, unmapped]:
try:
f.close()
except AttributeError:
continue
return new_coords |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def deseq2_size_factors(counts, meta, design):
""" Get size factors for counts using DESeq2. Parameters counts : pandas.DataFrame Counts to pass to DESeq2. meta : pandas.DataFrame Pandas dataframe whose index matches the columns of counts. This is passed to DESeq2's colData. design : str Design like ~subject_id that will be passed to DESeq2. The design variables should match columns in meta. Returns ------- sf : pandas.Series Series whose index matches the columns of counts and whose values are the size factors from DESeq2. Divide each column by its size factor to obtain normalized counts. """ |
import rpy2.robjects as r
from rpy2.robjects import pandas2ri
pandas2ri.activate()
r.r('suppressMessages(library(DESeq2))')
r.globalenv['counts'] = counts
r.globalenv['meta'] = meta
r.r('dds = DESeqDataSetFromMatrix(countData=counts, colData=meta, '
'design={})'.format(design))
r.r('dds = estimateSizeFactors(dds)')
r.r('sf = sizeFactors(dds)')
sf = r.globalenv['sf']
return pd.Series(sf, index=counts.columns) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def goseq_gene_enrichment(genes, sig, plot_fn=None, length_correct=True):
""" Perform goseq enrichment for an Ensembl gene set. Parameters genes : list List of all genes as Ensembl IDs. sig : list List of boolean values indicating whether each gene is significant or not. plot_fn : str Path to save length bias plot to. If not provided, the plot is deleted. length_correct : bool Correct for length bias. Returns ------- go_results : pandas.DataFrame Dataframe with goseq results as well as Benjamini-Hochberg correct p-values. """ |
import os
import readline
import statsmodels.stats.multitest as smm
import rpy2.robjects as r
genes = list(genes)
sig = [bool(x) for x in sig]
r.r('suppressMessages(library(goseq))')
r.globalenv['genes'] = list(genes)
r.globalenv['group'] = list(sig)
r.r('group = as.logical(group)')
r.r('names(group) = genes')
r.r('pwf = nullp(group, "hg19", "ensGene")')
if length_correct:
r.r('wall = goseq(pwf, "hg19", "ensGene")')
else:
r.r('wall = goseq(pwf, "hg19", "ensGene", method="Hypergeometric")')
r.r('t = as.data.frame(wall)')
t = r.globalenv['t']
go_results = pd.DataFrame(columns=list(t.colnames))
for i, c in enumerate(go_results.columns):
go_results[c] = list(t[i])
r, c, ask, abf = smm.multipletests(
go_results.over_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['over_represented_pvalue_bh'] = c
r, c, ask, abf = smm.multipletests(
go_results.under_represented_pvalue, alpha=0.05, method='fdr_i')
go_results['under_represented_pvalue_bh'] = c
go_results.index = go_results.category
go_results = go_results.drop('category', axis=1)
if plot_fn and os.path.exists('Rplots.pdf'):
from os import rename
rename('Rplots.pdf', plot_fn)
elif os.path.exists('Rplots.pdf'):
from os import remove
remove('Rplots.pdf')
return go_results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def categories_to_colors(cats, colormap=None):
""" Map categorical data to colors. Parameters cats : pandas.Series or list Categorical data as a list or in a Series. colormap : list List of RGB triples. If not provided, the tableau20 colormap defined in this module will be used. Returns ------- legend : pd.Series Series whose values are colors and whose index are the original categories that correspond to those colors. """ |
if colormap is None:
colormap = tableau20
if type(cats) != pd.Series:
cats = pd.Series(cats)
legend = pd.Series(dict(zip(set(cats), colormap)))
# colors = pd.Series([legend[x] for x in cats.values], index=cats.index)
# I've removed this output:
# colors : pd.Series
# Series whose values are the colors for each category. If cats was a
# Series, then out will have the same index as cats.
return(legend) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def plot_color_legend(legend, horizontal=False, ax=None):
""" Plot a pandas Series with labels and colors. Parameters legend : pandas.Series Pandas Series whose values are RGB triples and whose index contains categorical labels. horizontal : bool If True, plot horizontally. ax : matplotlib.axis Axis to plot on. Returns ------- ax : matplotlib.axis Plot axis. """ |
import matplotlib.pyplot as plt
import numpy as np
t = np.array([np.array([x for x in legend])])
if ax is None:
fig, ax = plt.subplots(1, 1)
if horizontal:
ax.imshow(t, interpolation='none')
ax.set_yticks([])
ax.set_xticks(np.arange(0, legend.shape[0]))
t = ax.set_xticklabels(legend.index)
else:
t = t.reshape([legend.shape[0], 1, 3])
ax.imshow(t, interpolation='none')
ax.set_xticks([])
ax.set_yticks(np.arange(0, legend.shape[0]))
t = ax.set_yticklabels(legend.index)
return ax |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def make_color_legend_rects(colors, labels=None):
""" Make list of rectangles and labels for making legends. Parameters colors : pandas.Series or list Pandas series whose values are colors and index is labels. Alternatively, you can provide a list with colors and provide the labels as a list. labels : list If colors is a list, this should be the list of corresponding labels. Returns ------- out : pd.Series Pandas series whose values are matplotlib rectangles and whose index are the legend labels for those rectangles. You can add each of these rectangles to your axis using ax.add_patch(r) for r in out then create a legend whose labels are out.values and whose labels are legend_rects.index: for r in legend_rects: ax.add_patch(r) lgd = ax.legend(legend_rects.values, labels=legend_rects.index) """ |
from matplotlib.pyplot import Rectangle
if labels:
d = dict(zip(labels, colors))
se = pd.Series(d)
else:
se = colors
rects = []
for i in se.index:
r = Rectangle((0, 0), 0, 0, fc=se[i])
rects.append(r)
out = pd.Series(rects, index=se.index)
return out |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def pc_correlation(self, covariates, num_pc=5):
""" Calculate the correlation between the first num_pc prinicipal components and known covariates. The size and index of covariates determines whether u or v is used. Parameters covariates : pandas.DataFrame Dataframe of covariates whose index corresponds to the index of either u or v. num_pc : int Number of principal components to correlate with. Returns ------- corr : pandas.Panel Panel with correlation values and p-values. """ |
from scipy.stats import spearmanr
if (covariates.shape[0] == self.u.shape[0] and
len(set(covariates.index) & set(self.u.index)) == self.u.shape[0]):
mat = self.u
elif (covariates.shape[0] == self.v.shape[0] and
len(set(covariates.index) & set(self.v.index)) == self.v.shape[0]):
mat = self.v
else:
import sys
sys.stderr.write('Covariates differ in size from input data.\n')
sys.exit(1)
corr = pd.Panel(items=['rho', 'pvalue'],
major_axis=covariates.columns,
minor_axis=mat.columns[0:num_pc])
for i in corr.major_axis:
for j in corr.minor_axis:
rho, p = spearmanr(covariates[i], mat[j])
corr.ix['rho', i, j] = rho
corr.ix['pvalue', i, j] = p
return corr |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def can_handle(self, text: str) -> bool: """Check whether this parser can parse the text""" |
try:
changelogs = self.split_changelogs(text)
if not changelogs:
return False
for changelog in changelogs:
_header, _changes = self.split_changelog(changelog)
if not any((_header, _changes)):
return False
header = self.parse_header(_header)
changes = self.parse_changes(_changes)
if not any((header, changes)):
return False
except Exception:
return False
else:
return True |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remember(empowered, powerupClass, interface):
""" Adds a powerup to ``empowered`` that will instantiate ``powerupClass`` with the empowered's store when adapted to the given interface. :param empowered: The Empowered (Store or Item) to be powered up. :type empowered: ``axiom.item.Empowered`` :param powerupClass: The class that will be powered up to. :type powerupClass: class :param interface: The interface of the powerup. :type interface: ``zope.interface.Interface`` :returns: ``None`` """ |
className = fullyQualifiedName(powerupClass)
powerup = _StoredByName(store=empowered.store, className=className)
empowered.powerUp(powerup, interface) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def forget(empowered, powerupClass, interface):
""" Forgets powerups previously stored with ``remember``. :param empowered: The Empowered (Store or Item) to be powered down. :type empowered: ``axiom.item.Empowered`` :param powerupClass: The class for which powerups will be forgotten. :type powerupClass: class :param interface: The interface the powerups were installed for. :type interface: ``zope.interface.Interface`` :returns: ``None`` :raises ValueError: Class wasn't previously remembered. """ |
className = fullyQualifiedName(powerupClass)
withThisName = _StoredByName.className == className
items = empowered.store.query(_StoredByName, withThisName)
if items.count() == 0:
template = "No named powerups for {} (interface: {})".format
raise ValueError(template(powerupClass, interface))
for stored in items:
empowered.powerDown(stored, interface)
stored.deleteFromStore() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def init_event_loop(self):
""" Every cell should have its own event loop for proper containment. The type of event loop is not so important however. """ |
self.loop = asyncio.new_event_loop()
self.loop.set_debug(self.debug)
if hasattr(self.loop, '_set_coroutine_wrapper'):
self.loop._set_coroutine_wrapper(self.debug)
elif self.debug:
warnings.warn("Cannot set debug on loop: %s" % self.loop)
self.loop_policy = IOCellEventLoopPolicy(self.loop)
if not hasattr(self.loop, '_exception_handler'):
warnings.warn("Cannot save exception handler for: %s" % self.loop)
self.loop_exception_handler_save = None
else:
self.loop_exception_handler_save = self.loop._exception_handler
self.loop.set_exception_handler(self.loop_exception_handler) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cleanup_event_loop(self):
""" Cleanup an event loop and close it down forever. """ |
for task in asyncio.Task.all_tasks(loop=self.loop):
if self.debug:
warnings.warn('Cancelling task: %s' % task)
task._log_destroy_pending = False
task.cancel()
self.loop.close()
self.loop.set_exception_handler(self.loop_exception_handler_save)
self.loop_exception_handler_save = None
self.loop_policy = None
self.loop = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_tier(self, coro, **kwargs):
""" Add a coroutine to the cell as a task tier. The source can be a single value or a list of either `Tier` types or coroutine functions already added to a `Tier` via `add_tier`. """ |
self.assertNotFinalized()
assert asyncio.iscoroutinefunction(coro)
tier = self.Tier(self, coro, **kwargs)
self.tiers.append(tier)
self.tiers_coro_map[coro] = tier
return tier |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append_tier(self, coro, **kwargs):
""" Implicitly source from the tail tier like a pipe. """ |
source = self.tiers[-1] if self.tiers else None
return self.add_tier(coro, source=source, **kwargs) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tier(self, *args, append=True, source=None, **kwargs):
""" Function decorator for a tier coroutine. If the function being decorated is not already a coroutine function it will be wrapped. """ |
if len(args) == 1 and not kwargs and callable(args[0]):
raise TypeError('Uncalled decorator syntax is invalid')
def decorator(coro):
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
if append and source is None:
self.append_tier(coro, *args, **kwargs)
else:
self.add_tier(coro, *args, source=source, **kwargs)
return coro
return decorator |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cleaner(self, coro):
""" Function decorator for a cleanup coroutine. """ |
if not asyncio.iscoroutinefunction(coro):
coro = asyncio.coroutine(coro)
self.add_cleaner(coro)
return coro |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def finalize(self):
""" Look at our tiers and setup the final data flow. Once this is run a cell can not be modified again. """ |
self.assertNotFinalized()
starters = []
finishers = []
for x in self.tiers:
if not x.sources:
starters.append(x)
if not x.dests:
finishers.append(x)
self.add_tier(self.output_feed, source=finishers)
self.coord.setup_wrap(self)
self.finalized = True
return starters |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def output(self):
""" Produce a classic generator for this cell's final results. """ |
starters = self.finalize()
try:
yield from self._output(starters)
finally:
self.close() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def event_loop(self):
""" Run the event loop once. """ |
if hasattr(self.loop, '._run_once'):
self.loop._thread_id = threading.get_ident()
try:
self.loop._run_once()
finally:
self.loop._thread_id = None
else:
self.loop.call_soon(self.loop.stop)
self.loop.run_forever() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def clean(self):
""" Run all of the cleaners added by the user. """ |
if self.cleaners:
yield from asyncio.wait([x() for x in self.cleaners],
loop=self.loop) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_html_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format. using html data format. """ |
new_dict = {"id": pydict["id"]}
for field in self:
if field.key in pydict:
if recovery_name:
new_dict[field.name] = pydict[field.key]
else:
new_dict[field.key] = pydict[field.key]
return new_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_raw_values(self, pydict, recovery_name=True):
"""Convert naive get response data to human readable field name format. using raw data format. """ |
new_dict = {"id": pydict["id"]}
for field in self:
raw_key = "%s_raw" % field.key
if raw_key in pydict:
if recovery_name:
new_dict[field.name] = pydict[raw_key]
else:
new_dict[field.key] = pydict[raw_key]
return new_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def convert_values(self, pydict):
"""Convert knackhq data type instance to json friendly data. """ |
new_dict = dict()
for key, value in pydict.items():
try: # is it's BaseDataType Instance
new_dict[key] = value._data
except AttributeError:
new_dict[key] = value
return new_dict |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def insert(self, data, using_name=True):
"""Insert one or many records. :param data: dict type data or list of dict :param using_name: if you are using field name in data, please set using_name = True (it's the default), otherwise, False **中文文档** 插入多条记录 """ |
if isinstance(data, list): # if iterable, insert one by one
for d in data:
self.insert_one(d, using_name=using_name)
else: # not iterable, execute insert_one
self.insert_one(data, using_name=using_name) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, url, params=dict()):
"""Http get method wrapper, to support search. """ |
try:
res = requests.get(url, headers=self.headers, params=params)
return json.loads(res.text)
except Exception as e:
print(e)
return "error" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def post(self, url, data):
"""Http post method wrapper, to support insert. """ |
try:
res = requests.post(
url, headers=self.headers, data=json.dumps(data))
return json.loads(res.text)
except Exception as e:
print(e)
return "error" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, url):
"""Http delete method wrapper, to support delete. """ |
try:
res = requests.delete(url, headers=self.headers)
return json.loads(res.text)
except Exception as e:
print(e)
return "error" |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def combine_express_output(fnL, column='eff_counts', names=None, tg=None, define_sample_name=None, debug=False):
""" Combine eXpress output files Parameters: fnL : list of strs of filenames List of paths to results.xprs files. column : string Column name of eXpress output to combine. names : list of strings Names to use for columns of output files. Overrides define_sample_name if provided. tg : string File with transcript-to-gene mapping. Transcripts should be in first column and genes in second column. define_sample_name : function that takes string as input Function mapping filename to sample name (or basename). For instance, you may have the basename in the path and use a regex to extract it. The basenames will be used as the column names. If this is not provided, the columns will be named as the input files. debug : boolean Passing True will trigger any debugging statements. """ |
if names is not None:
assert len(names) == len(fnL)
if define_sample_name is None:
define_sample_name = lambda x: x
transcriptL = []
for i,fn in enumerate(fnL):
if names is not None:
bn = names[i]
else:
bn = define_sample_name(fn)
tDF = pd.read_table(fn, index_col=1, header=0)
se = tDF[column]
se.name = bn
transcriptL.append(se)
transcriptDF = pd.DataFrame(transcriptL).T
transcriptDF.index.name = 'transcript'
# There should not be any missing values.
if transcriptDF.shape != transcriptDF.dropna().shape:
sys.stderr.write('''Missing values in eXpress output. Check that the
same reference was used for all output files.\n''')
sys.exit(1)
if tg is not None:
tgDF = pd.read_table(tg,
index_col=0,
header=None,
names=['gene_id'])
import copy
geneDF = copy.deepcopy(transcriptDF)
geneDF['gene'] = tgDF.ix[geneDF.index]
geneDF = geneDF.groupby('gene').sum()
return transcriptDF, geneDF
else:
return transcriptDF, None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(path, encoding="utf-8"):
"""Auto-decoding string reader. Usage:: or """ |
with open(path, "rb") as f:
content = f.read()
try:
text = content.decode(encoding)
except:
res = chardet.detect(content)
text = content.decode(res["encoding"])
return text |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(text, path):
"""Writer text to file with utf-8 encoding. Usage:: or """ |
with open(path, "wb") as f:
f.write(text.encode("utf-8")) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _delay(self, ms):
"""Implement default delay mechanism. """ |
if ms:
self.Delay(ms)
else:
if self.default_delay:
self.Delay(self.default_delay) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def AltTab(self, n=1, delay=0):
"""Press down Alt, then press n times Tab, then release Alt. """ |
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Alt, 1)))
for i in range(n):
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.Tab, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Alt, 1))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Ctrl_C(self, delay=0):
"""Ctrl + C shortcut. """ |
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.C, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Ctrl_V(self, delay=0):
"""Ctrl + V shortcut. """ |
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.V, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def Ctrl_W(self, delay=0):
"""Ctrl + W shortcut. """ |
self._delay(delay)
self.add(Command("KeyDown", 'KeyDown "%s", %s' % (BoardKey.Ctrl, 1)))
self.add(Command("KeyPress", 'KeyPress "%s", %s' % (BoardKey.W, 1)))
self.add(Command("KeyUp", 'KeyUp "%s", %s' % (BoardKey.Ctrl, 1))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def randomize(length=6, choices=None):
"""Returns a random string of the given length.""" |
if type(choices) == str:
choices = list(choices)
choices = choices or ascii_lowercase
return "".join(choice(choices) for _ in range(length)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_scores_for_node(G, node, depth_limit=22, number_of_recommendations=None, impact_mode=10):
"""Calculate the score of multiple records.""" |
n, w, dep, _ = dfs_edges(G, node, depth_limit, "Record")
count_total_ways = len(n)
# print "Number of paths {}".format(len(n))
if impact_mode == 0:
impact_div = 12
elif impact_mode == 1:
impact_div = 1000
elif impact_mode == 2:
impact_div = 100
elif impact_mode == 10:
impact_div = count_total_ways
elif impact_mode == 11:
impact_div = count_total_ways/2
d_ = {'Nodes': n, 'Scores': w, 'Depth': dep}
d = pd.DataFrame(data=d_)
del n, w, dep, d_
n, w, dep = None, None, None
gc.collect()
nodes = array('I')
weight_high = array('f')
weight_new = array('f')
ways = array('I')
nodes_with_weight = d.groupby('Nodes')
del d
gc.collect()
# print "Number nodes {}".format(len(nodes_with_weight))
for node, end_nodes in nodes_with_weight:
nodes.append(node)
new_score, highest_score, number_of_paths = \
calc_weight_of_multiple_paths(end_nodes, impact_div)
weight_high.append(highest_score)
weight_new.append(new_score)
ways.append(number_of_paths)
new_weights_d = {'Node': nodes, 'Score_Highest': weight_high,
'Score': weight_new, 'Paths': ways}
new_weights = pd.DataFrame(data=new_weights_d)
del new_weights_d, nodes, weight_high, weight_new, ways
gc.collect()
# Numpy sort by score
new_weights = new_weights.sort_values(by='Score', ascending=False)
new_weights = new_weights[:number_of_recommendations]
return new_weights |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def dfs_edges(G, start, depth_limit=1, get_only=True, get_path=False):
"""Deepest first search.""" |
depth_limit = depth_limit - 1
# creates unsigned int array (2 Byte)
output_nodes = array('L')
output_depth = array('I')
# creates float array (4 Byte)
output_weights = array('f')
apath = []
if G.node.get(start) is None:
# raise KeyError('Start node not found')
print('Start node not found')
return output_nodes, output_weights, output_depth, apath
visited = set()
visited.add(start)
# Save the start node with its data to the stack
stack = [(start, G.edges_iter(start, data=True), 1.0)]
visited.add(start)
while stack:
if len(output_nodes) > 80100100:
print("To many nodes for: {}".format(start))
del output_nodes
del output_weights
del output_depth
output_nodes = array('L')
output_depth = array('I')
# creates float array (4 Byte)
output_weights = array('f')
gc.collect()
break
parent, children, weight = stack[-1]
try:
parent_, child, child_keys = next(children)
# print "child: {}, parent_data: {}".format(child, parent_data)
if child not in visited:
weight = child_keys.get('weight', 1.0) * weight
visited.add(child)
if len(stack) >= depth_limit or weight <= 0.00001:
visited.remove(child)
else:
stack.append((child, G.edges_iter(child, data=True),
weight))
# if its not and user.
if get_only and child > 100000000000:
# if get_only and G.node[child].get('Type') != get_only:
continue
output_nodes.append(child)
output_weights.append(weight)
output_depth.append(len(stack))
if get_path:
apath.append([step[0] for step in stack])
except StopIteration:
stack.pop()
visited.remove(parent)
# if data.get('Type') == "Node":
return output_nodes, output_weights, output_depth, apath |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def calc_weight_of_multiple_paths(path_scores, impact_div=12):
"""Caluculate the weight of multipe paths.""" |
number_of_paths = len(path_scores)
if number_of_paths > 1:
score_total = 0.0
highest_score = 0.0
for score in path_scores.Scores:
score_total += score
if highest_score < score:
highest_score = score
score_mean = score_total / number_of_paths
# print "score_total: {}".format(score_total)
# print "score_mean: {}".format(score_mean)
# print "number_of_paths: {}".format(number_of_paths)
# Calculate the weight depending on how many ways are found
weight_count_impact = number_of_paths / float(number_of_paths +
impact_div)
# print "weight_count_impact: {}".format(weight_count_impact)
new_score = highest_score + ((1 + weight_count_impact) * score_mean)
# print "new_score: {}".format(new_score)
return new_score, highest_score, number_of_paths
else:
return (path_scores.Scores.iloc[0], path_scores.Scores.iloc[0],
number_of_paths) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def recommend_for_record(self, record_id, depth=4, num_reco=10):
"""Calculate recommendations for record.""" |
data = calc_scores_for_node(self._graph, record_id, depth, num_reco)
return data.Node.tolist(), data.Score.tolist() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_profile(self, profile_name):
"""Load user profiles from file.""" |
data = self.storage.get_user_profiles(profile_name)
for x in data.get_user_views():
self._graph.add_edge(int(x[0]), int(x[1]), {'weight': float(x[2])})
self.all_records[int(x[1])] += 1
return self._graph |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def del_big_nodes(self, grater_than=215):
"""Delete big nodes with many connections from the graph.""" |
G = self._graph
it = G.nodes_iter()
node_paths = []
node_names = []
del_nodes = []
summe = 1
count = 1
for node in it:
l = len(G[node])
if l > grater_than:
del_nodes.append(node)
continue
summe += l
node_names.append(node)
node_paths.append(l)
count += 1
for node in del_nodes:
G.remove_node(node)
if node > 1000000000:
self.valid_user.pop(node)
print("Nodes deleted: {}".format(len(del_nodes))) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def punify_filename(filename):
""" small hackisch workaround for unicode problems with the picflash api """ |
path, extension = splitext(filename)
return path.encode('punycode').decode('utf8') + extension |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def upload(apikey, picture, resize=None, rotation='00', noexif=False, callback=None):
""" prepares post for regular upload :param str apikey: Apikey needed for Autentication on picflash. :param str/tuple/list picture: Path to picture as str or picture data. \ If data a tuple or list with the file name as str \ and data as byte object in that order. :param str resize: Aresolution in the folowing format: \ '80x80'(optional) :param str|degree rotation: The picture will be rotated by this Value. \ Allowed values are 00, 90, 180, 270.(optional) :param boolean noexif: set to True when exif data should be purged.\ (optional) :param function callback: function witch will be called after every read. \ Need to take one argument. you can use the len function to determine \ the body length and call bytes_read(). """ |
if isinstance(picture, str):
with open(picture, 'rb') as file_obj:
picture_name = picture
data = file_obj.read()
elif isinstance(picture, (tuple, list)):
picture_name = picture[0]
data = picture[1]
else:
raise TypeError("The second argument must be str or list/tuple. "
"Please refer to the documentation for details.")
check_rotation(rotation)
check_resize(resize)
check_callback(callback)
post_data = compose_post(apikey, resize, rotation, noexif)
post_data['Datei[]'] = (punify_filename(basename(picture_name)), data)
return do_upload(post_data, callback) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.