text_prompt
stringlengths 157
13.1k
| code_prompt
stringlengths 7
19.8k
⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_new_enriched_bins_matrixes(region_files, dfs, bin_size):
"""Add enriched bins based on bed files. There is no way to find the correspondence between region file and matrix file, but it does not matter."""
|
dfs = _remove_epic_enriched(dfs)
names = ["Enriched_" + os.path.basename(r) for r in region_files]
regions = region_files_to_bins(region_files, names, bin_size)
new_dfs = OrderedDict()
assert len(regions.columns) == len(dfs)
for region, (n, df) in zip(regions, dfs.items()):
region_col = regions[region]
df = df.join(region_col, how="outer").fillna(0)
new_dfs[n] = df
return new_dfs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def merge_chromosome_dfs(df_tuple):
# type: (Tuple[pd.DataFrame, pd.DataFrame]) -> pd.DataFrame """Merges data from the two strands into strand-agnostic counts."""
|
plus_df, minus_df = df_tuple
index_cols = "Chromosome Bin".split()
count_column = plus_df.columns[0]
if plus_df.empty:
return return_other(minus_df, count_column, index_cols)
if minus_df.empty:
return return_other(plus_df, count_column, index_cols)
# sum duplicate bins
# TODO: why are there duplicate bins here in the first place?
plus_df = plus_df.groupby(index_cols).sum()
minus_df = minus_df.groupby(index_cols).sum()
# first sum the two bins from each strand
df = pd.concat([plus_df, minus_df], axis=1).fillna(0).sum(axis=1)
df = df.reset_index().sort_values(by="Bin")
df.columns = ["Chromosome", "Bin", count_column]
df = df.sort_values(["Chromosome", "Bin"])
df[["Bin", count_column]] = df[["Bin", count_column]].astype(int32)
df = df[[count_column, "Chromosome", "Bin"]]
return df.reset_index(drop=True)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_log2fc_bigwigs(matrix, outdir, args):
# type: (pd.DataFrame, str, Namespace) -> None """Create bigwigs from matrix."""
|
call("mkdir -p {}".format(outdir), shell=True)
genome_size_dict = args.chromosome_sizes
outpaths = []
for bed_file in matrix[args.treatment]:
outpath = join(outdir, splitext(basename(bed_file))[0] + "_log2fc.bw")
outpaths.append(outpath)
data = create_log2fc_data(matrix, args)
Parallel(n_jobs=args.number_cores)(delayed(_create_bigwig)(bed_column, outpath, genome_size_dict) for outpath, bed_column in zip(outpaths, data))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_to_island_expectations_dict(average_window_readcount, current_max_scaled_score, island_eligibility_threshold, island_expectations, gap_contribution):
# type: ( float, int, float, Dict[int, float], float) -> Dict[int, float] """Can probably be heavily optimized. Time required to run can be seen from logging info."""
|
scaled_score = current_max_scaled_score + E_VALUE
for index in range(current_max_scaled_score + 1, scaled_score + 1):
island_expectation = 0.0
i = island_eligibility_threshold #i is the number of tags in the added window
current_island = int(round(index - compute_window_score(
i, average_window_readcount) / BIN_SIZE))
while (current_island >= 0):
if current_island in island_expectations:
island_expectation += _poisson(
i, average_window_readcount) * island_expectations[
current_island]
i += 1
current_island = int(round(index - compute_window_score(
i, average_window_readcount) / BIN_SIZE))
island_expectation *= gap_contribution
if island_expectation:
island_expectations[index] = island_expectation
return island_expectations
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def effective_genome_size(fasta, read_length, nb_cores, tmpdir="/tmp"):
# type: (str, int, int, str) -> None """Compute effective genome size for genome."""
|
idx = Fasta(fasta)
genome_length = sum([len(c) for c in idx])
logging.info("Temporary directory: " + tmpdir)
logging.info("File analyzed: " + fasta)
logging.info("Genome length: " + str(genome_length))
print("File analyzed: ", fasta)
print("Genome length: ", genome_length)
chromosomes = ", ".join([c.name for c in idx])
if "_" in chromosomes:
print("Warning. The following chromosomes are part of your genome:\n",
chromosomes.replace(">", "") + "\n",
file=sys.stderr)
print(
"You probably want to remove all chromosomes in your fasta containing '_' for the effective genome size computation to be accurate.",
file=sys.stderr)
# if tmpdir is None:
# try:
# tmpdir = os.environ['TMPDIR']
# except KeyError:
# tmpdir = '/tmp'
output_file = os.path.join(tmpdir, '{1}.jf'.format(read_length,
basename(fasta)))
atexit.register(
lambda: call("rm {output_file}".format(output_file=output_file), shell=True))
call(
"jellyfish count -t {nb_cores} -m {read_length} -s {genome_length} -L 1 -U 1 --out-counter-len 1 --counter-len 1 {fasta} -o {output_file}".format(
**vars()),
shell=True)
stats = check_output("jellyfish stats {output_file}".format(
output_file=output_file),
shell=True)
unique_kmers = int(stats.split()[1])
effective_genome_size = unique_kmers / genome_length
logging.info("Number unique {read_length}-mers: ".format(
read_length=read_length) + str(unique_kmers))
logging.info("Effective genome size: " + str(effective_genome_size))
print("Number unique {read_length}-mers: ".format(read_length=read_length),
unique_kmers)
print("Effective genome size: ", effective_genome_size)
assert effective_genome_size < 1, "Something wrong happened, effective genome size over 1!"
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_island_bins(df, window_size, genome, args):
# type: (pd.DataFrame, int, str, Namespace) -> Dict[str, Set[int]] """Finds the enriched bins in a df."""
|
# need these chromos because the df might not have islands in all chromos
chromosomes = natsorted(list(args.chromosome_sizes))
chromosome_island_bins = {} # type: Dict[str, Set[int]]
df_copy = df.reset_index(drop=False)
for chromosome in chromosomes:
cdf = df_copy.loc[df_copy.Chromosome == chromosome]
if cdf.empty:
chromosome_island_bins[chromosome] = set()
else:
island_starts_ends = zip(cdf.Start.values.tolist(),
cdf.End.values.tolist())
island_bins = chain(*[range(
int(start), int(end), window_size)
for start, end in island_starts_ends])
chromosome_island_bins[chromosome] = set(island_bins)
return chromosome_island_bins
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_genome_size_dict(genome):
# type: (str) -> Dict[str,int] """Creates genome size dict from string containing data."""
|
size_file = get_genome_size_file(genome)
size_lines = open(size_file).readlines()
size_dict = {}
for line in size_lines:
genome, length = line.split()
size_dict[genome] = int(length)
return size_dict
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def compute_score_threshold(average_window_readcount, island_enriched_threshold, gap_contribution, boundary_contribution, genome_length_in_bins):
# type: (float, int, float, float, float) -> float """ What does island_expectations do? """
|
required_p_value = poisson.pmf(island_enriched_threshold,
average_window_readcount)
prob = boundary_contribution * required_p_value
score = -log(required_p_value)
current_scaled_score = int(round(score / BIN_SIZE))
island_expectations_d = {} # type: Dict[int, float]
island_expectations_d[current_scaled_score] = prob * genome_length_in_bins
island_expectations_d[
0] = boundary_contribution * genome_length_in_bins / gap_contribution
current_max_scaled_score = current_scaled_score
interval = int(1 / BIN_SIZE)
partial_cumu = 0.0
logging.info("Finding the score required to consider an island enriched.")
while (partial_cumu > E_VALUE_THRESHOLD or partial_cumu < 1e-100):
current_scaled_score += interval
current_max_scaled_score = current_scaled_score - interval
# logging.debug(island_expectations_d)
if current_scaled_score > current_max_scaled_score:
# logging.debug(island_expectations_d)
island_expectations_d = add_to_island_expectations_dict(
average_window_readcount, current_max_scaled_score,
island_enriched_threshold, island_expectations_d,
gap_contribution)
partial_cumu = 0.0001
current_max_scaled_score += 1000
if max(island_expectations_d) > interval:
partial_cumu = sum(
[val
for idx, val in island_expectations_d.items()
if idx > current_max_scaled_score - interval])
else:
partial_cumu = sum(island_expectations_d.values())
logging.debug("Computing cumulative distribution.")
score_threshold = generate_cumulative_dist(island_expectations_d,
current_max_scaled_score + 1)
logging.info("Enriched score threshold for islands: " + str(
score_threshold))
return score_threshold
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_readlength(args):
# type: (Namespace) -> int """Estimate length of reads based on 10000 first."""
|
try:
bed_file = args.treatment[0]
except AttributeError:
bed_file = args.infiles[0]
filereader = "cat "
if bed_file.endswith(".gz") and search("linux", platform, IGNORECASE):
filereader = "zcat "
elif bed_file.endswith(".gz") and search("darwin", platform, IGNORECASE):
filereader = "gzcat "
elif bed_file.endswith(".bz2"):
filereader = "bzgrep "
command = filereader + "{} | head -10000".format(bed_file)
output = check_output(command, shell=True)
df = pd.read_table(
BytesIO(output),
header=None,
usecols=[1, 2],
sep="\t",
names=["Start", "End"])
readlengths = df.End - df.Start
mean_readlength = readlengths.mean()
median_readlength = readlengths.median()
max_readlength = readlengths.max()
min_readlength = readlengths.min()
logging.info((
"Used first 10000 reads of {} to estimate a median read length of {}\n"
"Mean readlength: {}, max readlength: {}, min readlength: {}.").format(
bed_file, median_readlength, mean_readlength, max_readlength,
min_readlength))
return median_readlength
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_closest_readlength(estimated_readlength):
# type: (int) -> int """Find the predefined readlength closest to the estimated readlength. In the case of a tie, choose the shortest readlength."""
|
readlengths = [36, 50, 75, 100]
differences = [abs(r - estimated_readlength) for r in readlengths]
min_difference = min(differences)
index_of_min_difference = [i
for i, d in enumerate(differences)
if d == min_difference][0]
return readlengths[index_of_min_difference]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_version(output):
""" Parses the supplied output and returns the version string. :param output: A string containing the output of running snort. :returns: Version string for the version of snort run. None if not found. """
|
for x in output.splitlines():
match = VERSION_PATTERN.match(x)
if match:
return match.group('version').strip()
return None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_alert(output):
""" Parses the supplied output and yields any alerts. Example alert format: 01/28/14-22:26:04.885446 [**] [1:1917:11] INDICATOR-SCAN UPnP service discover attempt [**] [Classification: Detection of a Network Scan] [Priority: 3] {UDP} 10.1.1.132:58650 -> 239.255.255.250:1900 :param output: A string containing the output of running snort :returns: Generator of snort alert dicts """
|
for x in output.splitlines():
match = ALERT_PATTERN.match(x)
if match:
rec = {'timestamp': datetime.strptime(match.group('timestamp'),
'%m/%d/%y-%H:%M:%S.%f'),
'sid': int(match.group('sid')),
'revision': int(match.group('revision')),
'priority': int(match.group('priority')),
'message': match.group('message'),
'source': match.group('src'),
'destination': match.group('dest'),
'protocol': match.group('protocol'),
}
if match.group('classtype'):
rec['classtype'] = match.group('classtype')
yield rec
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, pcap):
""" Runs snort against the supplied pcap. :param pcap: Filepath to pcap file to scan :returns: tuple of version, list of alerts """
|
proc = Popen(self._snort_cmd(pcap), stdout=PIPE,
stderr=PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise Exception("\n".join(["Execution failed return code: {0}" \
.format(proc.returncode), stderr or ""]))
return (parse_version(stderr),
[ x for x in parse_alert(stdout) ])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(self, pcap):
""" Runs suricata against the supplied pcap. :param pcap: Filepath to pcap file to scan :returns: tuple of version, list of alerts """
|
tmpdir = None
try:
tmpdir = tempfile.mkdtemp(prefix='tmpsuri')
proc = Popen(self._suri_cmd(pcap, tmpdir), stdout=PIPE,
stderr=PIPE, universal_newlines=True)
stdout, stderr = proc.communicate()
if proc.returncode != 0:
raise Exception("\n".join(["Execution failed return code: {0}" \
.format(proc.returncode), stderr or ""]))
with open(os.path.join(tmpdir, 'fast.log')) as tmp:
return (parse_version(stdout),
[ x for x in parse_alert(tmp.read()) ])
finally:
if tmpdir:
shutil.rmtree(tmpdir)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def analyse_pcap(infile, filename):
""" Run IDS across the supplied file. :param infile: File like object containing pcap data. :param filename: Filename of the submitted file. :returns: Dictionary of analysis results. """
|
tmp = tempfile.NamedTemporaryFile(suffix=".pcap", delete=False)
m = hashlib.md5()
results = {'filename': filename,
'status': 'Failed',
'apiversion': __version__,
}
try:
size = 0
while True:
buf = infile.read(16384)
if not buf: break
tmp.write(buf)
size += len(buf)
m.update(buf)
tmp.close()
results['md5'] = m.hexdigest()
results['filesize'] = size
results.update(runner.run(tmp.name))
except OSError as ex:
results['stderr'] = str(ex)
finally:
os.remove(tmp.name)
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def submit_and_render():
""" Blocking POST handler for file submission. Runs snort on supplied file and returns results as rendered html. """
|
data = request.files.file
template = env.get_template("results.html")
if not data:
pass
results = analyse_pcap(data.file, data.filename)
results.update(base)
return template.render(results)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def api_submit():
""" Blocking POST handler for file submission. Runs snort on supplied file and returns results as json text. """
|
data = request.files.file
response.content_type = 'application/json'
if not data or not hasattr(data, 'file'):
return json.dumps({"status": "Failed", "stderr": "Missing form params"})
return json.dumps(analyse_pcap(data.file, data.filename), default=jsondate, indent=4)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def main():
""" Main entrypoint for command-line webserver. """
|
parser = argparse.ArgumentParser()
parser.add_argument("-H", "--host", help="Web server Host address to bind to",
default="0.0.0.0", action="store", required=False)
parser.add_argument("-p", "--port", help="Web server Port to bind to",
default=8080, action="store", required=False)
args = parser.parse_args()
logging.basicConfig()
run(host=args.host, port=args.port, reloader=True, server=SERVER)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_pcap(pcap):
""" Simple test for pcap magic bytes in supplied file. :param pcap: File path to Pcap file to check :returns: True if content is pcap (magic bytes present), otherwise False. """
|
with open(pcap, 'rb') as tmp:
header = tmp.read(4)
# check for both big/little endian
if header == b"\xa1\xb2\xc3\xd4" or \
header == b"\xd4\xc3\xb2\xa1":
return True
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _run_ids(runner, pcap):
""" Runs the specified IDS runner. :param runner: Runner instance to use :param pcap: File path to pcap for analysis :returns: dict of run metadata/alerts """
|
run = {'name': runner.conf.get('name'),
'module': runner.conf.get('module'),
'ruleset': runner.conf.get('ruleset', 'default'),
'status': STATUS_FAILED,
}
try:
run_start = datetime.now()
version, alerts = runner.run(pcap)
run['version'] = version or 'Unknown'
run['status'] = STATUS_SUCCESS
run['alerts'] = alerts
except Exception as ex:
run['error'] = str(ex)
finally:
run['duration'] = duration(run_start)
return run
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def run(pcap):
""" Runs all configured IDS instances against the supplied pcap. :param pcap: File path to pcap file to analyse :returns: Dict with details and results of run/s """
|
start = datetime.now()
errors = []
status = STATUS_FAILED
analyses = []
pool = ThreadPool(MAX_THREADS)
try:
if not is_pcap(pcap):
raise Exception("Not a valid pcap file")
runners = []
for conf in Config().modules.values():
runner = registry.get(conf['module'])
if not runner:
raise Exception("No module named: '{0}' found registered"
.format(conf['module']))
runners.append(runner(conf))
# launch via worker pool
analyses = [ pool.apply_async(_run_ids, (runner, pcap)) for runner in runners ]
analyses = [ x.get() for x in analyses ]
# were all runs successful?
if all([ x['status'] == STATUS_SUCCESS for x in analyses ]):
status = STATUS_SUCCESS
# propagate any errors to the main list
for run in [ x for x in analyses if x['status'] != STATUS_SUCCESS ]:
errors.append("Failed to run {0}: {1}".format(run['name'], run['error']))
except Exception as ex:
errors.append(str(ex))
return {'start': start,
'duration': duration(start),
'status': status,
'analyses': analyses,
'errors': errors,
}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _set_up_pool_config(self):
'''
Helper to configure pool options during DatabaseWrapper initialization.
'''
self._max_conns = self.settings_dict['OPTIONS'].get('MAX_CONNS', pool_config_defaults['MAX_CONNS'])
self._min_conns = self.settings_dict['OPTIONS'].get('MIN_CONNS', self._max_conns)
self._test_on_borrow = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW',
pool_config_defaults['TEST_ON_BORROW'])
if self._test_on_borrow:
self._test_on_borrow_query = self.settings_dict["OPTIONS"].get('TEST_ON_BORROW_QUERY',
pool_config_defaults['TEST_ON_BORROW_QUERY'])
else:
self._test_on_borrow_query = None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def _create_connection_pool(self, conn_params):
'''
Helper to initialize the connection pool.
'''
connection_pools_lock.acquire()
try:
# One more read to prevent a read/write race condition (We do this
# here to avoid the overhead of locking each time we get a connection.)
if (self.alias not in connection_pools or
connection_pools[self.alias]['settings'] != self.settings_dict):
logger.info("Creating connection pool for db alias %s" % self.alias)
logger.info(" using MIN_CONNS = %s, MAX_CONNS = %s, TEST_ON_BORROW = %s" % (self._min_conns,
self._max_conns,
self._test_on_borrow))
from psycopg2 import pool
connection_pools[self.alias] = {
'pool': pool.ThreadedConnectionPool(self._min_conns, self._max_conns, **conn_params),
'settings': dict(self.settings_dict),
}
finally:
connection_pools_lock.release()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def close(self):
'''
Override to return the connection to the pool rather than closing it.
'''
if self._wrapped_connection and self._pool:
logger.debug("Returning connection %s to pool %s" % (self._wrapped_connection, self._pool))
self._pool.putconn(self._wrapped_connection)
self._wrapped_connection = None
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def b58encode_int(i, default_one=True):
'''Encode an integer using Base58'''
if not i and default_one:
return alphabet[0]
string = ""
while i:
i, idx = divmod(i, 58)
string = alphabet[idx] + string
return string
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def breadcrumb_safe(context, label, viewname, *args, **kwargs):
""" Same as breadcrumb but label is not escaped. """
|
append_breadcrumb(context, _(label), viewname, args, kwargs)
return ''
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def breadcrumb_raw(context, label, viewname, *args, **kwargs):
""" Same as breadcrumb but label is not translated. """
|
append_breadcrumb(context, escape(label), viewname, args, kwargs)
return ''
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def breadcrumb_raw_safe(context, label, viewname, *args, **kwargs):
""" Same as breadcrumb but label is not escaped and translated. """
|
append_breadcrumb(context, label, viewname, args, kwargs)
return ''
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def render_breadcrumbs(context, *args):
""" Render breadcrumbs html using bootstrap css classes. """
|
try:
template_path = args[0]
except IndexError:
template_path = getattr(settings, 'BREADCRUMBS_TEMPLATE',
'django_bootstrap_breadcrumbs/bootstrap2.html')
links = []
for (label, viewname, view_args, view_kwargs) in context[
'request'].META.get(CONTEXT_KEY, []):
if isinstance(viewname, Model) and hasattr(
viewname, 'get_absolute_url') and ismethod(
viewname.get_absolute_url):
url = viewname.get_absolute_url(*view_args, **view_kwargs)
else:
try:
try:
# 'resolver_match' introduced in Django 1.5
current_app = context['request'].resolver_match.namespace
except AttributeError:
try:
resolver_match = resolve(context['request'].path)
current_app = resolver_match.namespace
except Resolver404:
current_app = None
url = reverse(viewname=viewname, args=view_args,
kwargs=view_kwargs, current_app=current_app)
except NoReverseMatch:
url = viewname
links.append((url, smart_text(label) if label else label))
if not links:
return ''
if VERSION > (1, 8): # pragma: nocover
# RequestContext is deprecated in recent django
# https://docs.djangoproject.com/en/1.10/ref/templates/upgrading/
context = context.flatten()
context['breadcrumbs'] = links
context['breadcrumbs_total'] = len(links)
return mark_safe(template.loader.render_to_string(template_path, context))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _find_symbol(self, module, name, fallback=None):
""" Find the symbol of the specified name inside the module or raise an exception. """
|
if not hasattr(module, name) and fallback:
return self._find_symbol(module, fallback, None)
return getattr(module, name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def apply(self, incoming):
""" Store the incoming activation, apply the activation function and store the result as outgoing activation. """
|
assert len(incoming) == self.size
self.incoming = incoming
outgoing = self.activation(self.incoming)
assert len(outgoing) == self.size
self.outgoing = outgoing
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delta(self, above):
""" The derivative of the activation function at the current state. """
|
return self.activation.delta(self.incoming, self.outgoing, above)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def feed(self, weights, data):
""" Evaluate the network with alternative weights on the input data and return the output activation. """
|
assert len(data) == self.layers[0].size
self.layers[0].apply(data)
# Propagate trough the remaining layers.
connections = zip(self.layers[:-1], weights, self.layers[1:])
for previous, weight, current in connections:
incoming = self.forward(weight, previous.outgoing)
current.apply(incoming)
# Return the activations of the output layer.
return self.layers[-1].outgoing
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_network(self):
"""Define model and initialize weights."""
|
self.network = Network(self.problem.layers)
self.weights = Matrices(self.network.shapes)
if self.load:
loaded = np.load(self.load)
assert loaded.shape == self.weights.shape, (
'weights to load must match problem definition')
self.weights.flat = loaded
else:
self.weights.flat = np.random.normal(
self.problem.weight_mean, self.problem.weight_scale,
len(self.weights.flat))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _init_training(self):
# pylint: disable=redefined-variable-type """Classes needed during training."""
|
if self.check:
self.backprop = CheckedBackprop(self.network, self.problem.cost)
else:
self.backprop = BatchBackprop(self.network, self.problem.cost)
self.momentum = Momentum()
self.decent = GradientDecent()
self.decay = WeightDecay()
self.tying = WeightTying(*self.problem.weight_tying)
self.weights = self.tying(self.weights)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _every(times, step_size, index):
""" Given a loop over batches of an iterable and an operation that should be performed every few elements. Determine whether the operation should be called for the current index. """
|
current = index * step_size
step = current // times * times
reached = current >= step
overshot = current >= step + step_size
return current and reached and not overshot
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_tax_lvl(entry, tax_lvl_depth=[]):
""" Parse a single kraken-report entry and return a dictionary of taxa for its named ranks. :type entry: dict :param entry: attributes of a single kraken-report row. :type tax_lvl_depth: list :param tax_lvl_depth: running record of taxon levels encountered in previous calls. """
|
# How deep in the hierarchy are we currently? Each two spaces of
# indentation is one level deeper. Also parse the scientific name at this
# level.
depth_and_name = re.match('^( *)(.*)', entry['sci_name'])
depth = len(depth_and_name.group(1))//2
name = depth_and_name.group(2)
# Remove the previous levels so we're one higher than the level of the new
# taxon. (This also works if we're just starting out or are going deeper.)
del tax_lvl_depth[depth:]
# Append the new taxon.
tax_lvl_depth.append((entry['rank'], name))
# Create a tax_lvl dict for the named ranks.
tax_lvl = {x[0]: x[1] for x in tax_lvl_depth if x[0] in ranks}
return(tax_lvl)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_kraken_report(kdata, max_rank, min_rank):
""" Parse a single output file from the kraken-report tool. Return a list of counts at each of the acceptable taxonomic levels, and a list of NCBI IDs and a formatted string representing their taxonomic hierarchies. :type kdata: str :param kdata: Contents of the kraken report file. """
|
# map between NCBI taxonomy IDs and the string rep. of the hierarchy
taxa = OrderedDict()
# the master collection of read counts (keyed on NCBI ID)
counts = OrderedDict()
# current rank
r = 0
max_rank_idx = ranks.index(max_rank)
min_rank_idx = ranks.index(min_rank)
for entry in kdata:
erank = entry['rank'].strip()
# print("erank: "+erank)
if erank in ranks:
r = ranks.index(erank)
# update running tally of ranks
tax_lvl = parse_tax_lvl(entry)
# record the reads assigned to this taxon level, and record the taxonomy string with the NCBI ID
if erank in ranks and min_rank_idx >= ranks.index(entry['rank']) >= max_rank_idx:
taxon_reads = int(entry["taxon_reads"])
clade_reads = int(entry["clade_reads"])
if taxon_reads > 0 or (clade_reads > 0 and entry['rank'] == min_rank):
taxa[entry['ncbi_tax']] = tax_fmt(tax_lvl, r)
if entry['rank'] == min_rank:
counts[entry['ncbi_tax']] = clade_reads
else:
counts[entry['ncbi_tax']] = taxon_reads
# print(" Counting {} reads at {}".format(counts[entry['ncbi_tax']], '; '.join(taxa[entry['ncbi_tax']])))
#TODO: handle subspecies
#if erank == '-' and min_rank == "SS" and last_entry_indent < curr_indent:
# pass
return counts, taxa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def process_samples(kraken_reports_fp, max_rank, min_rank):
""" Parse all kraken-report data files into sample counts dict and store global taxon id -> taxonomy data """
|
taxa = OrderedDict()
sample_counts = OrderedDict()
for krep_fp in kraken_reports_fp:
if not osp.isfile(krep_fp):
raise RuntimeError("ERROR: File '{}' not found.".format(krep_fp))
# use the kraken report filename as the sample ID
sample_id = osp.splitext(osp.split(krep_fp)[1])[0]
with open(krep_fp, "rt") as kf:
try:
kdr = csv.DictReader(kf, fieldnames=field_names,
delimiter="\t")
kdata = [entry for entry in kdr][1:]
except OSError as oe:
raise RuntimeError("ERROR: {}".format(oe))
scounts, staxa = parse_kraken_report(kdata, max_rank=max_rank,
min_rank=min_rank)
# update master records
taxa.update(staxa)
sample_counts[sample_id] = scounts
return sample_counts, taxa
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_biom_table(sample_counts, taxa):
""" Create a BIOM table from sample counts and taxonomy metadata. :type sample_counts: dict :param sample_counts: A dictionary of dictionaries with the first level keyed on sample ID, and the second level keyed on taxon ID with counts as values. :type taxa: dict :param taxa: A mapping between the taxon IDs from sample_counts to the full representation of the taxonomy string. The values in this dict will be used as metadata in the BIOM table. :rtype: biom.Table :return: A BIOM table containing the per-sample taxon counts and full taxonomy identifiers as metadata for each taxon. """
|
data = [[0 if taxid not in sample_counts[sid] else sample_counts[sid][taxid]
for sid in sample_counts]
for taxid in taxa]
data = np.array(data, dtype=int)
tax_meta = [{'taxonomy': taxa[taxid]} for taxid in taxa]
gen_str = "kraken-biom v{} ({})".format(__version__, __url__)
return Table(data, list(taxa), list(sample_counts), tax_meta,
type="OTU table", create_date=str(dt.now().isoformat()),
generated_by=gen_str, input_is_dense=True)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_biom(biomT, output_fp, fmt="hdf5", gzip=False):
""" Write the BIOM table to a file. :type biomT: biom.table.Table :param biomT: A BIOM table containing the per-sample OTU counts and metadata to be written out to file. :type output_fp str :param output_fp: Path to the BIOM-format file that will be written. :type fmt: str :param fmt: One of: hdf5, json, tsv. The BIOM version the table will be output (2.x, 1.0, 'classic'). """
|
opener = open
mode = 'w'
if gzip and fmt != "hdf5":
if not output_fp.endswith(".gz"):
output_fp += ".gz"
opener = gzip_open
mode = 'wt'
# HDF5 BIOM files are gzipped by default
if fmt == "hdf5":
opener = h5py.File
with opener(output_fp, mode) as biom_f:
if fmt == "json":
biomT.to_json(biomT.generated_by, direct_io=biom_f)
elif fmt == "tsv":
biom_f.write(biomT.to_tsv())
else:
biomT.to_hdf5(biom_f, biomT.generated_by)
return output_fp
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write_otu_file(otu_ids, fp):
""" Write out a file containing only the list of OTU IDs from the kraken data. One line per ID. :type otu_ids: list or iterable :param otu_ids: The OTU identifiers that will be written to file. :type fp: str :param fp: The path to the output file. """
|
fpdir = osp.split(fp)[0]
if not fpdir == "" and not osp.isdir(fpdir):
raise RuntimeError("Specified path does not exist: {}".format(fpdir))
with open(fp, 'wt') as outf:
outf.write('\n'.join(otu_ids))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def transform(self, X):
"""Performs predictions blending using the trained weights. Args: X (array-like):
Predictions of different models. Returns: dict with blended predictions (key is 'y_pred'). """
|
assert np.shape(X)[0] == len(self._weights), (
'BlendingOptimizer: Number of models to blend its predictions and weights does not match: '
'n_models={}, weights_len={}'.format(np.shape(X)[0], len(self._weights)))
blended_predictions = np.average(np.power(X, self._power),
weights=self._weights,
axis=0) ** (1.0 / self._power)
return {'y_pred': blended_predictions}
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fit_transform(self, X, y, step_size=0.1, init_weights=None, warm_start=False):
"""Fit optimizer to X, then transforms X. See `fit` and `transform` for further explanation."""
|
self.fit(X=X, y=y, step_size=step_size, init_weights=init_weights, warm_start=warm_start)
return self.transform(X=X)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def escape_tags(value, valid_tags):
""" Strips text from the given html string, leaving only tags. This functionality requires BeautifulSoup, nothing will be done otherwise. This isn't perfect. Someone could put javascript in here: <a onClick="alert('hi');">test</a> So if you use valid_tags, you still need to trust your data entry. Or we could try: - only escape the non matching bits - use BeautifulSoup to understand the elements, escape everything else and remove potentially harmful attributes (onClick). - Remove this feature entirely. Half-escaping things securely is very difficult, developers should not be lured into a false sense of security. """
|
# 1. escape everything
value = conditional_escape(value)
# 2. Reenable certain tags
if valid_tags:
# TODO: precompile somewhere once?
tag_re = re.compile(r'<(\s*/?\s*(%s))(.*?\s*)>' %
'|'.join(re.escape(tag) for tag in valid_tags))
value = tag_re.sub(_replace_quot, value)
# Allow comments to be hidden
value = value.replace("<!--", "<!--").replace("-->", "-->")
return mark_safe(value)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_seo_content_types(seo_models):
"""Returns a list of content types from the models defined in settings."""
|
try:
return [ContentType.objects.get_for_model(m).id for m in seo_models]
except Exception: # previously caught DatabaseError
# Return an empty list if this is called too early
return []
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_seo_admin(admin_site, metadata_class):
"""Register the backends specified in Meta.backends with the admin."""
|
if metadata_class._meta.use_sites:
path_admin = SitePathMetadataAdmin
model_instance_admin = SiteModelInstanceMetadataAdmin
model_admin = SiteModelMetadataAdmin
view_admin = SiteViewMetadataAdmin
else:
path_admin = PathMetadataAdmin
model_instance_admin = ModelInstanceMetadataAdmin
model_admin = ModelMetadataAdmin
view_admin = ViewMetadataAdmin
def get_list_display():
return tuple(
name for name, obj in metadata_class._meta.elements.items()
if obj.editable)
backends = metadata_class._meta.backends
if 'model' in backends:
class ModelAdmin(model_admin):
form = get_model_form(metadata_class)
list_display = model_admin.list_display + get_list_display()
_register_admin(admin_site, metadata_class._meta.get_model('model'),
ModelAdmin)
if 'view' in backends:
class ViewAdmin(view_admin):
form = get_view_form(metadata_class)
list_display = view_admin.list_display + get_list_display()
_register_admin(admin_site, metadata_class._meta.get_model('view'),
ViewAdmin)
if 'path' in backends:
class PathAdmin(path_admin):
form = get_path_form(metadata_class)
list_display = path_admin.list_display + get_list_display()
_register_admin(admin_site, metadata_class._meta.get_model('path'),
PathAdmin)
if 'modelinstance' in backends:
class ModelInstanceAdmin(model_instance_admin):
form = get_modelinstance_form(metadata_class)
list_display = (model_instance_admin.list_display +
get_list_display())
_register_admin(admin_site,
metadata_class._meta.get_model('modelinstance'),
ModelInstanceAdmin)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _construct_form(self, i, **kwargs):
"""Override the method to change the form attribute empty_permitted."""
|
form = super(MetadataFormset, self)._construct_form(i, **kwargs)
# Monkey patch the form to always force a save.
# It's unfortunate, but necessary because we always want an instance
# Affect on performance shouldn't be too great, because ther is only
# ever one metadata attached
form.empty_permitted = False
form.has_changed = lambda: True
# Set a marker on this object to prevent automatic metadata creation
# This is seen by the post_save handler, which then skips this
# instance.
if self.instance:
self.instance.__seo_metadata_handled = True
return form
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _get_metadata_model(name=None):
"""Find registered Metadata object."""
|
if name is not None:
try:
return registry[name]
except KeyError:
if len(registry) == 1:
valid_names = 'Try using the name "%s" or simply leaving it '\
'out altogether.' % list(registry)[0]
else:
valid_names = "Valid names are " + ", ".join(
'"%s"' % k for k in list(registry))
raise Exception(
"Metadata definition with name \"%s\" does not exist.\n%s" % (
name, valid_names))
else:
assert len(registry) == 1, \
"You must have exactly one Metadata class, if using " \
"get_metadata() without a 'name' parameter."
return list(registry.values())[0]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _resolve_value(self, name):
""" Returns an appropriate value for the given name. """
|
name = str(name)
if name in self._metadata._meta.elements:
element = self._metadata._meta.elements[name]
# Look in instances for an explicit value
if element.editable:
value = getattr(self, name)
if value:
return value
# Otherwise, return an appropriate default value (populate_from)
populate_from = element.populate_from
if isinstance(populate_from, collections.Callable):
return populate_from(self, **self._populate_from_kwargs())
elif isinstance(populate_from, Literal):
return populate_from.value
elif populate_from is not NotSet:
return self._resolve_value(populate_from)
# If this is not an element, look for an attribute on metadata
try:
value = getattr(self._metadata, name)
except AttributeError:
pass
else:
if isinstance(value, collections.Callable):
if getattr(value, '__self__', None):
return value(self)
else:
return value(self._metadata, obj=self)
return value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _urls_for_js(urls=None):
""" Return templated URLs prepared for javascript. """
|
if urls is None:
# prevent circular import
from .urls import urlpatterns
urls = [url.name for url in urlpatterns if getattr(url, 'name', None)]
urls = dict(zip(urls, [get_uri_template(url) for url in urls]))
urls.update(getattr(settings, 'LEAFLET_STORAGE_EXTRA_URLS', {}))
return urls
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def decorated_patterns(func, *urls):
""" Utility function to decorate a group of url in urls.py Taken from http://djangosnippets.org/snippets/532/ + comments See also http://friendpaste.com/6afByRiBB9CMwPft3a6lym Example: urlpatterns = [ url(r'^language/(?P<lang_code>[a-z]+)$', views.MyView, name='name'), ] + decorated_patterns(login_required, url(r'^', include('cms.urls')), """
|
def decorate(urls, func):
for url in urls:
if isinstance(url, RegexURLPattern):
url.__class__ = DecoratedURLPattern
if not hasattr(url, "_decorate_with"):
setattr(url, "_decorate_with", [])
url._decorate_with.append(func)
elif isinstance(url, RegexURLResolver):
for pp in url.url_patterns:
if isinstance(pp, RegexURLPattern):
pp.__class__ = DecoratedURLPattern
if not hasattr(pp, "_decorate_with"):
setattr(pp, "_decorate_with", [])
pp._decorate_with.append(func)
if func:
if not isinstance(func, (list, tuple)):
func = [func]
for f in func:
decorate(urls, f)
return urls
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_custom_fields(self):
""" Return a list of custom fields for this model """
|
return CustomField.objects.filter(
content_type=ContentType.objects.get_for_model(self))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_custom_field(self, field_name):
""" Get a custom field object for this model field_name - Name of the custom field you want. """
|
content_type = ContentType.objects.get_for_model(self)
return CustomField.objects.get(
content_type=content_type, name=field_name)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_custom_value(self, field_name):
""" Get a value for a specified custom field field_name - Name of the custom field you want. """
|
custom_field = self.get_custom_field(field_name)
return CustomFieldValue.objects.get_or_create(
field=custom_field, object_id=self.id)[0].value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_custom_value(self, field_name, value):
""" Set a value for a specified custom field field_name - Name of the custom field you want. value - Value to set it to """
|
custom_field = self.get_custom_field(field_name)
custom_value = CustomFieldValue.objects.get_or_create(
field=custom_field, object_id=self.id)[0]
custom_value.value = value
custom_value.save()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def assign_item(self, item, origin):
""" Assigns an item from a given cluster to the closest located cluster. :param item: the item to be moved. :param origin: the originating cluster. """
|
closest_cluster = origin
for cluster in self.__clusters:
if self.distance(item, centroid(cluster)) < self.distance(
item, centroid(closest_cluster)):
closest_cluster = cluster
if id(closest_cluster) != id(origin):
self.move_item(item, origin, closest_cluster)
return True
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def move_item(self, item, origin, destination):
""" Moves an item from one cluster to anoter cluster. :param item: the item to be moved. :param origin: the originating cluster. :param destination: the target cluster. """
|
if self.equality:
item_index = 0
for i, element in enumerate(origin):
if self.equality(element, item):
item_index = i
break
else:
item_index = origin.index(item)
destination.append(origin.pop(item_index))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def initialise_clusters(self, input_, clustercount):
""" Initialises the clusters by distributing the items from the data. evenly across n clusters :param input_: the data set (a list of tuples). :param clustercount: the amount of clusters (n). """
|
# initialise the clusters with empty lists
self.__clusters = []
for _ in range(clustercount):
self.__clusters.append([])
# distribute the items into the clusters
count = 0
for item in input_:
self.__clusters[count % clustercount].append(item)
count += 1
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def publish_progress(self, total, current):
""" If a progress function was supplied, this will call that function with the total number of elements, and the remaining number of elements. :param total: The total number of elements. :param remaining: The remaining number of elements. """
|
if self.progress_callback:
self.progress_callback(total, current)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_linkage_method(self, method):
""" Sets the method to determine the distance between two clusters. :param method: The method to use. It can be one of ``'single'``, ``'complete'``, ``'average'`` or ``'uclus'``, or a callable. The callable should take two collections as parameters and return a distance value between both collections. """
|
if method == 'single':
self.linkage = single
elif method == 'complete':
self.linkage = complete
elif method == 'average':
self.linkage = average
elif method == 'uclus':
self.linkage = uclus
elif hasattr(method, '__call__'):
self.linkage = method
else:
raise ValueError('distance method must be one of single, '
'complete, average of uclus')
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cluster(self, matrix=None, level=None, sequence=None):
""" Perform hierarchical clustering. :param matrix: The 2D list that is currently under processing. The matrix contains the distances of each item with each other :param level: The current level of clustering :param sequence: The sequence number of the clustering """
|
logger.info("Performing cluster()")
if matrix is None:
# create level 0, first iteration (sequence)
level = 0
sequence = 0
matrix = []
# if the matrix only has two rows left, we are done
linkage = partial(self.linkage, distance_function=self.distance)
initial_element_count = len(self._data)
while len(matrix) > 2 or matrix == []:
item_item_matrix = Matrix(self._data,
linkage,
True,
0)
item_item_matrix.genmatrix(self.num_processes)
matrix = item_item_matrix.matrix
smallestpair = None
mindistance = None
rowindex = 0 # keep track of where we are in the matrix
# find the minimum distance
for row in matrix:
cellindex = 0 # keep track of where we are in the matrix
for cell in row:
# if we are not on the diagonal (which is always 0)
# and if this cell represents a new minimum...
cell_lt_mdist = cell < mindistance if mindistance else False
if ((rowindex != cellindex) and
(cell_lt_mdist or smallestpair is None)):
smallestpair = (rowindex, cellindex)
mindistance = cell
cellindex += 1
rowindex += 1
sequence += 1
level = matrix[smallestpair[1]][smallestpair[0]]
cluster = Cluster(level, self._data[smallestpair[0]],
self._data[smallestpair[1]])
# maintain the data, by combining the the two most similar items
# in the list we use the min and max functions to ensure the
# integrity of the data. imagine: if we first remove the item
# with the smaller index, all the rest of the items shift down by
# one. So the next index will be wrong. We could simply adjust the
# value of the second "remove" call, but we don't know the order
# in which they come. The max and min approach clarifies that
self._data.remove(self._data[max(smallestpair[0],
smallestpair[1])]) # remove item 1
self._data.remove(self._data[min(smallestpair[0],
smallestpair[1])]) # remove item 2
self._data.append(cluster) # append item 1 and 2 combined
self.publish_progress(initial_element_count, len(self._data))
# all the data is in one single cluster. We return that and stop
self.__cluster_created = True
logger.info("Call to cluster() is complete")
return
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def flatten(L):
""" Flattens a list. Example: [a,b,c,d,e,f] """
|
if not isinstance(L, list):
return [L]
if L == []:
return L
return flatten(L[0]) + flatten(L[1:])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def fullyflatten(container):
""" Completely flattens out a cluster and returns a one-dimensional set containing the cluster's items. This is useful in cases where some items of the cluster are clusters in their own right and you only want the items. :param container: the container to flatten. """
|
flattened_items = []
for item in container:
if hasattr(item, 'items'):
flattened_items = flattened_items + fullyflatten(item.items)
else:
flattened_items.append(item)
return flattened_items
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def minkowski_distance(x, y, p=2):
""" Calculates the minkowski distance between two points. :param x: the first point :param y: the second point :param p: the order of the minkowski algorithm. If *p=1* it is equal to the manhatten distance, if *p=2* it is equal to the euclidian distance. The higher the order, the closer it converges to the Chebyshev distance, which has *p=infinity*. """
|
from math import pow
assert len(y) == len(x)
assert len(x) >= 1
sum = 0
for i in range(len(x)):
sum += abs(x[i] - y[i]) ** p
return pow(sum, 1.0 / float(p))
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def magnitude(a):
"calculates the magnitude of a vecor"
from math import sqrt
sum = 0
for coord in a:
sum += coord ** 2
return sqrt(sum)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def dotproduct(a, b):
"Calculates the dotproduct between two vecors"
assert(len(a) == len(b))
out = 0
for i in range(len(a)):
out += a[i] * b[i]
return out
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
|
def centroid(data, method=median):
"returns the central vector of a list of vectors"
out = []
for i in range(len(data[0])):
out.append(method([x[i] for x in data]))
return tuple(out)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def display(self, depth=0):
""" Pretty-prints this cluster. Useful for debuging. """
|
print(depth * " " + "[level %s]" % self.level)
for item in self.items:
if isinstance(item, Cluster):
item.display(depth + 1)
else:
print(depth * " " + "%s" % item)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getlevel(self, threshold):
""" Retrieve all clusters up to a specific level threshold. This level-threshold represents the maximum distance between two clusters. So the lower you set this threshold, the more clusters you will receive and the higher you set it, you will receive less but bigger clusters. :param threshold: The level threshold: .. note:: It is debatable whether the value passed into this method should really be as strongly linked to the real cluster-levels as it is right now. The end-user will not know the range of this value unless s/he first inspects the top-level cluster. So instead you might argue that a value ranging from 0 to 1 might be a more useful approach. """
|
left = self.items[0]
right = self.items[1]
# if this object itself is below the threshold value we only need to
# return it's contents as a list
if self.level <= threshold:
return [fullyflatten(self.items)]
# if this cluster's level is higher than the threshold we will
# investgate it's left and right part. Their level could be below the
# threshold
if isinstance(left, Cluster) and left.level <= threshold:
if isinstance(right, Cluster):
return [fullyflatten(left.items)] + right.getlevel(threshold)
else:
return [fullyflatten(left.items)] + [[right]]
elif isinstance(right, Cluster) and right.level <= threshold:
if isinstance(left, Cluster):
return left.getlevel(threshold) + [fullyflatten(right.items)]
else:
return [[left]] + [fullyflatten(right.items)]
# Alright. We covered the cases where one of the clusters was below
# the threshold value. Now we'll deal with the clusters that are above
# by recursively applying the previous cases.
if isinstance(left, Cluster) and isinstance(right, Cluster):
return left.getlevel(threshold) + right.getlevel(threshold)
elif isinstance(left, Cluster):
return left.getlevel(threshold) + [[right]]
elif isinstance(right, Cluster):
return [[left]] + right.getlevel(threshold)
else:
return [[left], [right]]
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def jsmin(js, **kwargs):
""" returns a minified version of the javascript string """
|
if not is_3:
if cStringIO and not isinstance(js, unicode):
# strings can use cStringIO for a 3x performance
# improvement, but unicode (in python2) cannot
klass = cStringIO.StringIO
else:
klass = StringIO.StringIO
else:
klass = io.StringIO
ins = klass(js)
outs = klass()
JavascriptMinify(ins, outs, **kwargs).minify()
return outs.getvalue()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def cached(fun):
""" memoizing decorator for linkage functions. Parameters have been hardcoded (no ``*args``, ``**kwargs`` magic), because, the way this is coded (interchangingly using sets and frozensets) is true for this specific case. For other cases that is not necessarily guaranteed. """
|
_cache = {}
@wraps(fun)
def newfun(a, b, distance_function):
frozen_a = frozenset(a)
frozen_b = frozenset(b)
if (frozen_a, frozen_b) not in _cache:
result = fun(a, b, distance_function)
_cache[(frozen_a, frozen_b)] = result
return _cache[(frozen_a, frozen_b)]
return newfun
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def single(a, b, distance_function):
""" Given two collections ``a`` and ``b``, this will return the distance of the points which are closest together. ``distance_function`` is used to determine the distance between two elements. Example:: 1 # (distance between 2 and 3) """
|
left_a, right_a = min(a), max(a)
left_b, right_b = min(b), max(b)
result = min(distance_function(left_a, right_b),
distance_function(left_b, right_a))
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def average(a, b, distance_function):
""" Given two collections ``a`` and ``b``, this will return the mean of all distances. ``distance_function`` is used to determine the distance between two elements. Example:: 26 """
|
distances = [distance_function(x, y)
for x in a for y in b]
return sum(distances) / len(distances)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def worker(self):
""" Multiprocessing task function run by worker processes """
|
tasks_completed = 0
for task in iter(self.task_queue.get, 'STOP'):
col_index, item, item2 = task
if not hasattr(item, '__iter__') or isinstance(item, tuple):
item = [item]
if not hasattr(item2, '__iter__') or isinstance(item2, tuple):
item2 = [item2]
result = (col_index, self.combinfunc(item, item2))
self.done_queue.put(result)
tasks_completed += 1
logger.info("Worker %s performed %s tasks",
current_process().name,
tasks_completed)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def genmatrix(self, num_processes=1):
""" Actually generate the matrix :param num_processes: If you want to use multiprocessing to split up the work and run ``combinfunc()`` in parallel, specify ``num_processes > 1`` and this number of workers will be spun up, the work is split up amongst them evenly. """
|
use_multiprocessing = num_processes > 1
if use_multiprocessing:
self.task_queue = Queue()
self.done_queue = Queue()
self.matrix = []
logger.info("Generating matrix for %s items - O(n^2)", len(self.data))
if use_multiprocessing:
logger.info("Using multiprocessing on %s processes!", num_processes)
if use_multiprocessing:
logger.info("Spinning up %s workers", num_processes)
processes = [Process(target=self.worker) for i in range(num_processes)]
[process.start() for process in processes]
for row_index, item in enumerate(self.data):
logger.debug("Generating row %s/%s (%0.2f%%)",
row_index,
len(self.data),
100.0 * row_index / len(self.data))
row = {}
if use_multiprocessing:
num_tasks_queued = num_tasks_completed = 0
for col_index, item2 in enumerate(self.data):
if self.diagonal is not None and col_index == row_index:
# This is a cell on the diagonal
row[col_index] = self.diagonal
elif self.symmetric and col_index < row_index:
# The matrix is symmetric and we are "in the lower left
# triangle" - fill this in after (in case of multiprocessing)
pass
# Otherwise, this cell is not on the diagonal and we do indeed
# need to call combinfunc()
elif use_multiprocessing:
# Add that thing to the task queue!
self.task_queue.put((col_index, item, item2))
num_tasks_queued += 1
# Start grabbing the results as we go, so as not to stuff all of
# the worker args into memory at once (as Queue.get() is a
# blocking operation)
if num_tasks_queued > num_processes:
col_index, result = self.done_queue.get()
row[col_index] = result
num_tasks_completed += 1
else:
# Otherwise do it here, in line
"""
if not hasattr(item, '__iter__') or isinstance(item, tuple):
item = [item]
if not hasattr(item2, '__iter__') or isinstance(item2, tuple):
item2 = [item2]
"""
# See the comment in function _encapsulate_item_for_combinfunc
# for details of why the lines above have been replaced
# by function invocations
item = _encapsulate_item_for_combinfunc(item)
item2 = _encapsulate_item_for_combinfunc(item2)
row[col_index] = self.combinfunc(item, item2)
if self.symmetric:
# One more iteration to get symmetric lower left triangle
for col_index, item2 in enumerate(self.data):
if col_index >= row_index:
break
# post-process symmetric "lower left triangle"
row[col_index] = self.matrix[col_index][row_index]
if use_multiprocessing:
# Grab the remaining worker task results
while num_tasks_completed < num_tasks_queued:
col_index, result = self.done_queue.get()
row[col_index] = result
num_tasks_completed += 1
row_indexed = [row[index] for index in range(len(self.data))]
self.matrix.append(row_indexed)
if use_multiprocessing:
logger.info("Stopping/joining %s workers", num_processes)
[self.task_queue.put('STOP') for i in range(num_processes)]
[process.join() for process in processes]
logger.info("Matrix generated")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def validate(fname):
""" This function uses dciodvfy to generate a list of warnings and errors discovered within the DICOM file. :param fname: Location and filename of DICOM file. """
|
validation = {
"errors": [],
"warnings": []
}
for line in _process(fname):
kind, message = _determine(line)
if kind in validation:
validation[kind].append(message)
return validation
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def numpy(self):
""" Grabs image data and converts it to a numpy array """
|
# load GDCM's image reading functionality
image_reader = gdcm.ImageReader()
image_reader.SetFileName(self.fname)
if not image_reader.Read():
raise IOError("Could not read DICOM image")
pixel_array = self._gdcm_to_numpy(image_reader.GetImage())
return pixel_array
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _gdcm_to_numpy(self, image):
""" Converts a GDCM image to a numpy array. :param image: GDCM.ImageReader.GetImage() """
|
gdcm_typemap = {
gdcm.PixelFormat.INT8: numpy.int8,
gdcm.PixelFormat.UINT8: numpy.uint8,
gdcm.PixelFormat.UINT16: numpy.uint16,
gdcm.PixelFormat.INT16: numpy.int16,
gdcm.PixelFormat.UINT32: numpy.uint32,
gdcm.PixelFormat.INT32: numpy.int32,
gdcm.PixelFormat.FLOAT32: numpy.float32,
gdcm.PixelFormat.FLOAT64: numpy.float64
}
pixel_format = image.GetPixelFormat().GetScalarType()
if pixel_format in gdcm_typemap:
self.data_type = gdcm_typemap[pixel_format]
else:
raise KeyError(''.join(pixel_format, \
" is not a supported pixel format"))
#dimension = image.GetDimension(0), image.GetDimension(1)
self.dimensions = image.GetDimension(1), image.GetDimension(0)
gdcm_array = image.GetBuffer()
# GDCM returns char* as type str. This converts it to type bytes
if sys.version_info >= (3, 0):
gdcm_array = gdcm_array.encode(sys.getfilesystemencoding(), "surrogateescape")
# use float for accurate scaling
dimensions = image.GetDimensions()
result = numpy.frombuffer(gdcm_array, dtype=self.data_type).astype(float)
if len(dimensions) == 3:
# for cine (animations) there are 3 dims: x, y, number of frames
result.shape = dimensions[2], dimensions[0], dimensions[1]
else:
result.shape = dimensions
return result
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save_as_plt(self, fname, pixel_array=None, vmin=None, vmax=None, cmap=None, format=None, origin=None):
""" This method saves the image from a numpy array using matplotlib :param fname: Location and name of the image file to be saved. :param pixel_array: Numpy pixel array, i.e. ``numpy()`` return value :param vmin: matplotlib vmin :param vmax: matplotlib vmax :param cmap: matplotlib color map :param format: matplotlib format :param origin: matplotlib origin This method will return True if successful """
|
from matplotlib.backends.backend_agg \
import FigureCanvasAgg as FigureCanvas
from matplotlib.figure import Figure
from pylab import cm
if pixel_array is None:
pixel_array = self.numpy
if cmap is None:
cmap = cm.bone
fig = Figure(figsize=pixel_array.shape[::-1], dpi=1, frameon=False)
canvas = FigureCanvas(fig)
fig.figimage(pixel_array, cmap=cmap, vmin=vmin,
vmax=vmax, origin=origin)
fig.savefig(fname, dpi=1, format=format)
return True
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def read(self):
""" Returns array of dictionaries containing all the data elements in the DICOM file. """
|
def ds(data_element):
value = self._str_filter.ToStringPair(data_element.GetTag())
if value[1]:
return DataElement(data_element, value[0].strip(), value[1].strip())
results = [data for data in self.walk(ds) if data is not None]
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def walk(self, fn):
""" Loops through all data elements and allows a function to interact with each data element. Uses a generator to improve iteration. :param fn: Function that interacts with each DICOM element """
|
if not hasattr(fn, "__call__"):
raise TypeError("""walk_dataset requires a
function as its parameter""")
dataset = self._dataset
iterator = dataset.GetDES().begin()
while (not iterator.equal(dataset.GetDES().end())):
data_element = iterator.next()
yield fn(data_element)
header = self._header
iterator = header.GetDES().begin()
while (not iterator.equal(header.GetDES().end())):
data_element = iterator.next()
yield fn(data_element)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find(self, group=None, element=None, name=None, VR=None):
""" Searches for data elements in the DICOM file given the filters supplied to this method. :param group: Hex decimal for the group of a DICOM element e.g. 0x002 :param element: Hex decimal for the element value of a DICOM element e.g. 0x0010 :param name: Name of the DICOM element, e.g. "Modality" :param VR: Value Representation of the DICOM element, e.g. "PN" """
|
results = self.read()
if name is not None:
def find_name(data_element):
return data_element.name.lower() == name.lower()
return filter(find_name, results)
if group is not None:
def find_group(data_element):
return (data_element.tag['group'] == group
or int(data_element.tag['group'], 16) == group)
results = filter(find_group, results)
if element is not None:
def find_element(data_element):
return (data_element.tag['element'] == element
or int(data_element.tag['element'], 16) == element)
results = filter(find_element, results)
if VR is not None:
def find_VR(data_element):
return data_element.VR.lower() == VR.lower()
results = filter(find_VR, results)
return results
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def anonymize(self):
""" According to PS 3.15-2008, basic application level De-Indentification of a DICOM file requires replacing the values of a set of data elements"""
|
self._anon_obj = gdcm.Anonymizer()
self._anon_obj.SetFile(self._file)
self._anon_obj.RemoveGroupLength()
if self._anon_tags is None:
self._anon_tags = get_anon_tags()
for tag in self._anon_tags:
cur_tag = tag['Tag'].replace("(", "")
cur_tag = cur_tag.replace(")", "")
name = tag["Attribute Name"].replace(" ", "").encode("utf8")
group, element = cur_tag.split(",", 1)
# TODO expand this 50xx, 60xx, gggg, eeee
if ("xx" not in group
and "gggg" not in group
and "eeee" not in group):
group = int(group, 16)
element = int(element, 16)
if self.find(group=group, element=element):
self._anon_obj.Replace(
gdcm.Tag(group, element), "Anon" + name)
return self._anon_obj
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def image(self):
""" Read the loaded DICOM image data """
|
if self._image is None:
self._image = Image(self.fname)
return self._image
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def repo_name(self):
""" Returns a DataFrame of the repo names present in this project directory :return: DataFrame """
|
ds = [[x.repo_name] for x in self.repos]
df = pd.DataFrame(ds, columns=['repository'])
return df
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def command(self):
"""Manually import a CSV into a nYNAB budget"""
|
print('pynYNAB CSV import')
args = self.parser.parse_args()
verify_common_args(args)
verify_csvimport(args.schema, args.accountname)
client = clientfromkwargs(**args)
delta = do_csvimport(args, client)
client.push(expected_delta=delta)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def command(self):
"""Manually import an OFX into a nYNAB budget"""
|
print('pynYNAB OFX import')
args = self.parser.parse_args()
verify_common_args(args)
client = clientfromkwargs(**args)
delta = do_ofximport(args.file, client)
client.push(expected_delta=delta)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def default_listener(col_attr, default):
"""Establish a default-setting listener."""
|
@event.listens_for(col_attr, "init_scalar", retval=True, propagate=True)
def init_scalar(target, value, dict_):
if default.is_callable:
# the callable of ColumnDefault always accepts a context argument
value = default.arg(None)
elif default.is_scalar:
value = default.arg
else:
raise NotImplementedError(
"Can't invoke pre-default for a SQL-level column default")
dict_[col_attr.key] = value
return value
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def has_coverage(self):
""" Returns a boolean for is a parseable .coverage file can be found in the repository :return: bool """
|
if os.path.exists(self.git_dir + os.sep + '.coverage'):
try:
with open(self.git_dir + os.sep + '.coverage', 'r') as f:
blob = f.read()
blob = blob.split('!')[2]
json.loads(blob)
return True
except Exception:
return False
else:
return False
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def __check_extension(files, ignore_globs=None, include_globs=None):
""" Internal method to filter a list of file changes by extension and ignore_dirs. :param files: :param ignore_globs: a list of globs to ignore (if none falls back to extensions and ignore_dir) :param include_globs: a list of globs to include (if none, includes all). :return: dict """
|
if include_globs is None or include_globs == []:
include_globs = ['*']
out = {}
for key in files.keys():
# count up the number of patterns in the ignore globs list that match
if ignore_globs is not None:
count_exclude = sum([1 if fnmatch.fnmatch(key, g) else 0 for g in ignore_globs])
else:
count_exclude = 0
# count up the number of patterns in the include globs list that match
count_include = sum([1 if fnmatch.fnmatch(key, g) else 0 for g in include_globs])
# if we have one vote or more to include and none to exclude, then we use the file.
if count_include > 0 and count_exclude == 0:
out[key] = files[key]
return out
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _repo_name(self):
""" Returns the name of the repository, using the local directory name. :returns: str """
|
if self._git_repo_name is not None:
return self._git_repo_name
else:
reponame = self.repo.git_dir.split(os.sep)[-2]
if reponame.strip() == '':
return 'unknown_repo'
return reponame
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _decode(self, obj, context):
""" Get the python representation of the obj """
|
return b''.join(map(int2byte, [c + 0x60 for c in bytearray(obj)])).decode("utf8")
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update(self, instance, validated_data):
""" temporarily remove hstore virtual fields otherwise DRF considers them many2many """
|
model = self.Meta.model
meta = self.Meta.model._meta
original_virtual_fields = list(meta.virtual_fields) # copy
if hasattr(model, '_hstore_virtual_fields'):
# remove hstore virtual fields from meta
for field in model._hstore_virtual_fields.values():
meta.virtual_fields.remove(field)
instance = super(HStoreSerializer, self).update(instance, validated_data)
if hasattr(model, '_hstore_virtual_fields'):
# restore original virtual fields
meta.virtual_fields = original_virtual_fields
return instance
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_image(self, data):
""" update image on panel, as quickly as possible """
|
if 1 in data.shape:
data = data.squeeze()
if self.conf.contrast_level is not None:
clevels = [self.conf.contrast_level, 100.0-self.conf.contrast_level]
imin, imax = np.percentile(data, clevels)
data = np.clip((data - imin)/(imax - imin + 1.e-8), 0, 1)
self.axes.images[0].set_data(data)
self.canvas.draw()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_viewlimits(self, axes=None):
""" update xy limits of a plot"""
|
if axes is None:
axes = self.axes
xmin, xmax, ymin, ymax = self.data_range
if len(self.conf.zoom_lims) >1:
zlims = self.conf.zoom_lims[-1]
if axes in zlims:
xmin, xmax, ymin, ymax = zlims[axes]
xmin = max(self.data_range[0], xmin)
xmax = min(self.data_range[1], xmax)
ymin = max(self.data_range[2], ymin)
ymax = min(self.data_range[3], ymax)
if (xmax < self.data_range[0] or
xmin > self.data_range[1] or
ymax < self.data_range[2] or
ymin > self.data_range[3] ):
self.conf.zoom_lims.pop()
return
if abs(xmax-xmin) < 2:
xmin = int(0.5*(xmax+xmin) - 1)
xmax = xmin + 2
if abs(ymax-ymin) < 2:
ymin = int(0.5*(ymax+xmin) - 1)
ymax = ymin + 2
self.axes.set_xlim((xmin, xmax),emit=True)
self.axes.set_ylim((ymin, ymax),emit=True)
self.axes.update_datalim(((xmin, ymin), (xmax, ymax)))
self.conf.datalimits = [xmin, xmax, ymin, ymax]
self.redraw()
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def zoom_leftup(self, event=None):
"""leftup event handler for zoom mode in images"""
|
if self.zoom_ini is None:
return
ini_x, ini_y, ini_xd, ini_yd = self.zoom_ini
try:
dx = abs(ini_x - event.x)
dy = abs(ini_y - event.y)
except:
dx, dy = 0, 0
t0 = time.time()
self.rbbox = None
self.zoom_ini = None
if (dx > 3) and (dy > 3) and (t0-self.mouse_uptime)>0.1:
self.mouse_uptime = t0
zlims, tlims = {}, {}
ax = self.axes
xmin, xmax = ax.get_xlim()
ymin, ymax = ax.get_ylim()
zlims[ax] = [xmin, xmax, ymin, ymax]
if len(self.conf.zoom_lims) == 0:
self.conf.zoom_lims.append(zlims)
ax_inv = ax.transData.inverted
try:
x1, y1 = ax_inv().transform((event.x, event.y))
except:
x1, y1 = self.x_lastmove, self.y_lastmove
try:
x0, y0 = ax_inv().transform((ini_x, ini_y))
except:
x0, y0 = ini_xd, ini_yd
tlims[ax] = [int(round(min(x0, x1))), int(round(max(x0, x1))),
int(round(min(y0, y1))), int(round(max(y0, y1)))]
self.conf.zoom_lims.append(tlims)
# now apply limits:
self.set_viewlimits()
if callable(self.zoom_callback):
self.zoom_callback(wid=self.GetId(), limits=tlims[ax])
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def collect_directories(self, directories):
""" Collects all the directories into a `set` object. If `self.recursive` is set to `True` this method will iterate through and return all of the directories and the subdirectories found from `directories` that are not blacklisted. if `self.recursive` is set to `False` this will return all the directories that are not balcklisted. `directories` may be either a single object or an iterable. Recommend passing in absolute paths instead of relative. `collect_directories` will attempt to convert `directories` to absolute paths if they are not already. """
|
directories = util.to_absolute_paths(directories)
if not self.recursive:
return self._remove_blacklisted(directories)
recursive_dirs = set()
for dir_ in directories:
walk_iter = os.walk(dir_, followlinks=True)
walk_iter = [w[0] for w in walk_iter]
walk_iter = util.to_absolute_paths(walk_iter)
walk_iter = self._remove_blacklisted(walk_iter)
recursive_dirs.update(walk_iter)
return recursive_dirs
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_directories(self, directories):
""" Removes any `directories` from the set of plugin directories. `directories` may be a single object or an iterable. Recommend passing in all paths as absolute, but the method will attemmpt to convert all paths to absolute if they are not already based on the current working directory. """
|
directories = util.to_absolute_paths(directories)
self.plugin_directories = util.remove_from_set(self.plugin_directories,
directories)
|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def remove_blacklisted_directories(self, directories):
""" Attempts to remove the `directories` from the set of blacklisted directories. If a particular directory is not found in the set of blacklisted, method will continue on silently. `directories` may be a single instance or an iterable. Recommend passing in absolute paths. Method will try to convert to an absolute path if it is not already using the current working directory. """
|
directories = util.to_absolute_paths(directories)
black_dirs = self.blacklisted_directories
black_dirs = util.remove_from_set(black_dirs, directories)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.