text_prompt stringlengths 157 13.1k | code_prompt stringlengths 7 19.8k ⌀ |
|---|---|
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_ordered_values(self, keypath, distance_from, open_interval=True):
""" Retrieves the referents's values sorted by their distance from the min, max, or mid value. """ |
values = []
if keypath[0] == 'target':
# instances start with 'target' prefix, but
# don't contain it, so we remove it here.
keypath = keypath[1:]
for _, instance in self.iter_singleton_referents():
value = instance.get_value_from_path(keypath)
if hasattr(value, 'low') and value.low != value.high:
return []
values.append(float(value))
if len(values) == 0:
return []
values = np.array(values)
anchor = values.min()
diffs = values - anchor
if distance_from == 'max':
anchor = values.max()
diffs = anchor - values
if distance_from == 'mean':
anchor = values.mean()
diffs = abs(anchor - values)
sdiffs = np.unique(diffs)
sdiffs.sort()
results = []
for ix, el in enumerate(sdiffs):
mask = diffs <= el
vals = values[mask]
if False:
# when vagueness has been made precise through an ordinal
results.append(IntervalCell(vals.min(), vals.max()))
elif distance_from == 'max':
if open_interval:
results.append(IntervalCell(vals.min(), np.inf))
else:
results.append(IntervalCell(vals.min(), vals.min()))
elif distance_from == 'min':
if open_interval:
results.append(IntervalCell(-np.inf, vals.max()))
else:
results.append(IntervalCell(vals.max(), vals.max()))
elif distance_from == 'mean':
if ix+1 == len(sdiffs): continue # skip last
results.append(IntervalCell(vals.min(), vals.max()))
return results |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_paths_for_attribute(self, attribute_name):
""" Returns a path list to all attributes that have with a particular name. """ |
has_name = lambda name, structure: name == attribute_name
return self.find_path(has_name, on_targets=True) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_cell(self, keypath, cell):
""" Adds a new cell to the end of `keypath` of type `cell`""" |
keypath = keypath[:] # copy
inner = self # the most inner dict where cell is added
cellname = keypath # the name of the cell
assert keypath not in self, "Already exists: %s " % (str(keypath))
if isinstance(keypath, list):
while len(keypath) > 1:
cellname = keypath.pop(0)
if cellname not in inner:
inner.__dict__['p'][cellname] = DictCell()
inner = inner[cellname] # move in one
cellname = keypath[0]
# now we can add 'cellname'->(Cell) to inner (DictCell)
inner.__dict__['p'][cellname] = cell
return inner[cellname] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def size(self):
""" Returns the size of the belief state. Initially if there are $n$ consistent members, (the result of `self.number_of_singleton_referents()`) then there are generally $2^{n}-1$ valid belief states. """ |
n = self.number_of_singleton_referents()
targets = list(self.iter_referents_tuples())
n_targets = len(targets)
if n == 0 or n_targets == 0:
return 0
#if len(self.__dict__['deferred_effects']) != 0:
# return -1
size1 = len(list(self.iter_referents_tuples()))
tlow, thigh = self['targetset_arity'].get_tuple()
clow, chigh = self['contrast_arity'].get_tuple()
return size1 |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_referents(self):
""" Generates target sets that are compatible with the current beliefstate. """ |
tlow, thigh = self['targetset_arity'].get_tuple()
clow, chigh = self['contrast_arity'].get_tuple()
referents = list(self.iter_singleton_referents())
t = len(referents)
low = max(1, tlow)
high = min([t, thigh])
for targets in itertools.chain.from_iterable(itertools.combinations(referents, r) \
for r in reversed(xrange(low, high+1))):
if clow <= t-len(targets) <= chigh:
yield targets |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def number_of_singleton_referents(self):
""" Returns the number of singleton elements of the referential domain that are compatible with the current belief state. This is the size of the union of all referent sets. """ |
if self.__dict__['referential_domain']:
ct = 0
for i in self.iter_singleton_referents():
ct += 1
return ct
else:
raise Exception("self.referential_domain must be defined") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_singleton_referents(self):
""" Iterator of all of the singleton members of the context set. NOTE: this evaluates entities one-at-a-time, and does not handle relational constraints. """ |
try:
for member in self.__dict__['referential_domain'].iter_entities():
if self['target'].is_entailed_by(member) and (self['distractor'].empty() or not self['distractor'].is_entailed_by(member)):
yield member['num'], member
except KeyError:
raise Exception("No referential_domain defined") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def iter_singleton_referents_tuples(self):
""" Iterator of all of the singleton members's id number of the context set. NOTE: this evaluates entities one-at-a-time, and does not handle relational constraints. """ |
try:
for member in self.__dict__['referential_domain'].iter_entities():
if self['target'].is_entailed_by(member) and (self['distractor'].empty() or not self['distractor'].is_entailed_by(member)):
yield member['num'].low
except KeyError:
raise Exception("No referential_domain defined") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def copy(self):
""" Copies the BeliefState by recursively deep-copying all of its parts. Domains are not copied, as they do not change during the interpretation or generation. """ |
copied = BeliefState(self.__dict__['referential_domain'])
for key in ['environment_variables', 'deferred_effects', 'pos', 'p']:
copied.__dict__[key] = copy.deepcopy(self.__dict__[key])
return copied |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gp_datdir(initial, topN):
"""example for plotting from a text file via numpy.loadtxt 1. prepare input/output directories 2. load the data into an OrderedDict() [adjust axes units] 3. sort countries from highest to lowest population 4. select the <topN> most populated countries 5. call ccsgp.make_plot with data from 4 Below is an output image for country initial T and the 4 most populated countries for this initial (click to enlarge). Also see:: $ python -m ccsgp_get_started.examples.gp_datdir -h for help on the command line options. .. image:: pics/T.png :width: 450 px .. image:: pics/U.png :width: 450 px :param initial: country initial :type initial: str :param topN: number of most populated countries to plot :type topN: int :ivar inDir: input directory according to package structure and initial :ivar outDir: output directory according to package structure :ivar data: OrderedDict with datasets to plot as separate keys :ivar file: data input file for specific country, format: [x y] OR [x y dx dy] :ivar country: country, filename stem of input file :ivar file_url: absolute url to input file :ivar nSets: number of datasets """ |
# prepare input/output directories
inDir, outDir = getWorkDirs()
initial = initial.capitalize()
inDir = os.path.join(inDir, initial)
if not os.path.exists(inDir): # catch missing initial
return "initial %s doesn't exist" % initial
# prepare data
data = OrderedDict()
for file in os.listdir(inDir):
country = os.path.splitext(file)[0]
file_url = os.path.join(inDir, file)
data[country] = np.loadtxt(open(file_url, 'rb')) # load data
# set y-axis unit to 1M
data[country][:, 1] /= 1e6
if data[country].shape[1] > 2: data[country][:, 3:] /= 1e6
logging.debug(data) # shown if --log flag given on command line
# sort countries according to mean population (highest -> lowest)
sorted_data = OrderedDict(sorted(
data.items(), key = lambda t: np.mean(t[1][:,1]), reverse = True
))
# "pop" (select) N most populated countries
top_data = OrderedDict(
sorted_data.popitem(last = False) for i in xrange(topN)
if sorted_data
)
# generate plot using ccsgp.make_plot
nSets = len(top_data)
make_plot(
data = top_data.values(),
properties = [ getOpts(i) for i in xrange(nSets) ],
titles = top_data.keys(), # use data keys as legend titles
name = os.path.join(outDir, initial),
key = [ 'at graph 1., 1.2', 'maxrows 2' ],
ylabel = 'total population ({/Symbol \664} 10^{6})',
xlabel = 'year', rmargin = 0.99, tmargin = 0.85, size='8.5in,8in'
)
return 'done' |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def linreg_ols_pinv(y, X, rcond=1e-15):
"""Linear Regression, OLS, by multiplying with Pseudoinverse""" |
import numpy as np
try: # multiply with inverse to compute coefficients
return np.dot(np.linalg.pinv(
np.dot(X.T, X), rcond=rcond), np.dot(X.T, y))
except np.linalg.LinAlgError:
print("LinAlgError: SVD does not converge")
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def create_error_response(code, message, status=status.BAD_REQUEST):
"""Create a fail response. Args: code (str):
the code of the error. The title should be lowercase and underscore separated. message (dict, list, str):
the message of the error. This can be a list, dictionary or simple string. status (int):
the status code. Defaults to 400. Returns: Response: the response with the error. The format of the error is the following: code and message. The code could be `user_error` or `internal_error`. The message contains either a string, or a list or a dictionary. If not specify, the status will be a 400. """ |
errors = dict(code=code, message=message)
return Response(errors=errors, status=status) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def display_unit(self):
""" Display unit of value. :type: ``str`` """ |
if self._display_unit:
return self._display_unit
elif self._Q:
config = Configuration.display.unit_systems
default_system = Configuration.unit_system
units = config.systems[default_system]
self._display_unit = units.get(self._type, self._unit)
if self._type == "temperature":
from_unit = "deg" + self._unit.upper()
to_unit = "deg" + self._display_unit.upper()
else:
from_unit = self._unit
to_unit = self._display_unit
#print("dv", from_unit, to_unit)
self._q_unit = self._Q("1 " + from_unit)
self._q_display = self._Q("1 " + to_unit)
return self._display_unit |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def value(self, new_value):
""" Updates the value. If the change exceeds the change delta observers and linked values are notified. """ |
datetime_value = None
if new_value:
datetime_value = new_value.strftime("%Y-%M-%dT%H:%M:%SZ")
self._set_value(datetime_value) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_option(self, value, text, selected=False):
""" Add option to select :param value: The value that the option represent. :param text: The text that should be displayer in dropdown :param selected: True if the option should be the default value. """ |
option = {"value": value, "text": text, "selected":selected}
self.options += [option]
if selected:
self.selected_options += [option]
self._value.append(value)
if self._persist_value:
self.settings.store_value("selected_options", self.selected_options) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_logfile_name(tags):
"""Formulates a log file name that incorporates the provided tags. The log file will be located in ``scgpm_seqresults_dnanexus.LOG_DIR``. Args: tags: `list` of tags to append to the log file name. Each tag will be '_' delimited. Each tag will be added in the same order as provided. """ |
if not os.path.exists(sd.LOG_DIR):
os.mkdir(sd.LOG_DIR)
filename = "log"
for tag in tags:
filename += "_{}".format(tag)
filename += ".txt"
filename = os.path.join(sd.LOG_DIR,filename)
return filename |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def rewrap(s, width=COLS):
""" Join all lines from input string and wrap it at specified width """ |
s = ' '.join([l.strip() for l in s.strip().split('\n')])
return '\n'.join(textwrap.wrap(s, width)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_configuration(options):
""" interactively add a new configuration """ |
if options.username != None:
username = options.username
else:
username = prompt('Username: ')
if options.password != None:
password = options.password
else:
password = prompt('Password: ', hide_input=not options.show_password)
if options.app_url != None:
app_url = options.app_url
else:
app_url = prompt('App URL (default: https://app.jut.io just hit enter): ')
if app_url.strip() == '':
app_url = 'https://app.jut.io'
section = '%s@%s' % (username, app_url)
if config.exists(section):
raise JutException('Configuration for "%s" already exists' % section)
token_manager = auth.TokenManager(username=username,
password=password,
app_url=app_url)
authorization = authorizations.get_authorization(token_manager,
app_url=app_url)
client_id = authorization['client_id']
client_secret = authorization['client_secret']
deployment_name = default_deployment(app_url,
client_id,
client_secret)
config.add(section, **{
'app_url': app_url,
'deployment_name': deployment_name,
'username': username,
'client_id': client_id,
'client_secret': client_secret
})
if options.default:
config.set_default(name=section)
else:
default_configuration(interactive=False) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def get_initial_centroids(self):
'''Randomly choose k data points as initial centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
n = self.data.shape[0] # number of data points
# Pick K indices from range [0, N).
rand_indices = np.random.randint(0, n, self.k)
# Keep centroids as dense format, as many entries will be nonzero due to averaging.
# As long as at least one document in a cluster contains a word,
# it will carry a nonzero weight in the TF-IDF vector of the centroid.
centroids = self.data[rand_indices,:].toarray()
self.centroids=centroids
return centroids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def smart_initialize(self):
'''Use k-means++ to initialize a good set of centroids'''
if self.seed is not None: # useful for obtaining consistent results
np.random.seed(self.seed)
centroids = np.zeros((self.k, self.data.shape[1]))
# Randomly choose the first centroid.
# Since we have no prior knowledge, choose uniformly at random
idx = np.random.randint(self.data.shape[0])
centroids[0] = self.data[idx,:].toarray()
# Compute distances from the first centroid chosen to all the other data points
squared_distances = pairwise_distances(self.data, centroids[0:1], metric='euclidean').flatten()**2
for i in range(1, self.k):
# Choose the next centroid randomly, so that the probability for each data point to be chosen
# is directly proportional to its squared distance from the nearest centroid.
# Roughtly speaking, a new centroid should be as far as from ohter centroids as possible.
idx = np.random.choice(self.data.shape[0], 1, p=squared_distances/sum(squared_distances))
centroids[i] = self.data[idx,:].toarray()
# Now compute distances from the centroids to all data points
squared_distances = np.min(pairwise_distances(self.data, centroids[0:i+1], metric='euclidean')**2,axis=1)
self.centroids=centroids
return centroids |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def filter_line(line: str, context: RunContext) -> typing.Optional[str]: """ Filters out lines that match a given regex :param line: line to filter :type line: str :param context: run context :type context: _RunContext :return: line if it doesn't match the filter :rtype: optional str """ |
if context.filters is not None:
for filter_ in context.filters:
if re.match(filter_, line):
return None
return line |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def capture_output_from_running_process(context: RunContext) -> None: """ Parses output from a running sub-process Decodes and filters the process output line by line, buffering it If "mute" is False, sends the output back in real time :param context: run context :type context: _RunContext """ |
# Get the raw output one line at a time
_output = context.capture.readline(block=False)
if _output:
line = decode_and_filter(_output, context)
if line:
if not context.mute:
# Print in real time
_LOGGER_PROCESS.debug(line)
# Buffer the line
context.process_output_chunks.append(line)
# Get additional output if any
return capture_output_from_running_process(context)
return None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def autorg(filename, mininterval=None, qminrg=None, qmaxrg=None, noprint=True):
"""Execute autorg. Inputs: filename: either a name of an ascii file, or an instance of Curve. mininterval: the minimum number of points in the Guinier range qminrg: the maximum value of qmin*Rg. Default of autorg is 1.0 qmaxrg: the maximum value of qmax*Rg. Default of autorg is 1.3 noprint: if the output of autorg should be redirected to the null device. Outputs: Rg as an ErrorValue I0 as an ErrorValue qmin: the lower end of the chosen Guinier range qmax: the upper end of the chosen Guinier range quality: the quality parameter, between 0 and 1 aggregation: float, the extent of aggregation """ |
if isinstance(filename, Curve):
curve = filename
with tempfile.NamedTemporaryFile('w+b',
delete=False) as f:
curve.save(f)
filename = f.name
cmdline = ['autorg', filename, '-f', 'ssv']
if mininterval is not None:
cmdline.extend(['--mininterval', str(mininterval)])
if qminrg is not None:
cmdline.extend(['--sminrg', str(qminrg)])
if qmaxrg is not None:
cmdline.extend(['--smaxrg', str(qmaxrg)])
result = execute_command(cmdline, noprint=noprint)
Rg, dRg, I0, dI0, idxfirst, idxlast, quality, aggregation, filename = result[0].split(None, 8)
try:
curve
except NameError:
curve = Curve.new_from_file(filename)
else:
os.unlink(filename)
return ErrorValue(float(Rg), float(dRg)), ErrorValue(float(I0), float(dI0)), curve.q[int(idxfirst) - 1], curve.q[
int(idxlast) - 1], float(quality), float(aggregation) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def datcmp(*curves, alpha=None, adjust=None, test='CORMAP'):
"""Run datcmp on the scattering curves. Inputs: *curves: scattering curves as positional arguments alpha: confidence parameter adjust: adjustment type (string), see the help of datcmp for details test: test (string), see the help of datcmp for details Outputs: matC: the C matrix matp: the matrix of the p values comparing the i-th and j-th exposure matpadj: adjusted p-matrix of the exposures ok: list of the same length as the number of curves. If True, the given curve does not differ significantly from the others. """ |
if len({len(c) for c in curves}) != 1:
raise ValueError('All curves have to be of the same length.')
datcmpargs = []
if alpha is not None:
datcmpargs.append('--alpha=%f' % alpha)
if adjust is not None:
datcmpargs.append('--adjust=%s' % adjust)
if test is not None:
datcmpargs.append('--test=%s' % test)
with tempfile.TemporaryDirectory(prefix='credolib_datcmp') as td:
for i, c in enumerate(curves):
mat = np.zeros((len(c), 3))
mat[:, 0] = c.q
mat[:, 1] = c.Intensity
mat[:, 2] = c.Error
np.savetxt(os.path.join(td, 'curve_%d.dat' % i), mat)
matC = np.zeros((len(curves), len(curves))) + np.nan
matp = np.zeros((len(curves), len(curves))) + np.nan
matpadj = np.zeros((len(curves), len(curves))) + np.nan
ok = np.zeros(len(curves)) + np.nan
try:
results = subprocess.check_output(
['datcmp'] + datcmpargs + [os.path.join(td, 'curve_%d.dat' % i) for i in range(len(curves))]).decode(
'utf-8')
except subprocess.CalledProcessError:
pass
else:
for l in results.split('\n'):
m = re.match(
'^\s*(?P<i>\d+)\s*vs\.\s*(?P<j>\d+)\s*(?P<C>\d*\.\d*)\s*(?P<p>\d*\.\d*)\s*(?P<adjp>\d*\.\d*)[\s\*]{1}$',
l)
if m is not None:
i = int(m.group('i')) - 1
j = int(m.group('j')) - 1
matC[i, j] = matC[j, i] = float(m.group('C'))
matp[i, j] = matp[j, i] = float(m.group('p'))
matpadj[i, j] = matpadj[j, i] = float(m.group('adjp'))
else:
m = re.match('\s*(?P<i>\d+)(?P<ack>[\*\s]{1})\s*', l)
if m is not None:
ok[int(m.group('i')) - 1] = (m.group('ack') == '*')
return matC, matp, matpadj, ok |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def datporod(gnomoutfile):
"""Run datporod and return the estimated Porod volume. Returns: Radius of gyration found in the input file I0 found in the input file Vporod: the estimated Porod volume """ |
results = subprocess.check_output(['datporod', gnomoutfile]).decode('utf-8').strip().split()
return float(results[0]), float(results[1]), float(results[2]) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def gnom(curve, Rmax, outputfilename=None, Npoints_realspace=None, initial_alpha=None):
"""Run GNOM on the dataset. Inputs: curve: an instance of sastool.classes2.Curve or anything which has a save() method, saving the scattering curve to a given .dat file, in q=4*pi*sin(theta)/lambda [1/nm] units Rmax: the estimated maximum extent of the scattering object, in nm. outputfilename: the preferred name of the output file. If not given, the .out file produced by gnom will be lost. Npoints_realspace: the expected number of points in the real space initial_alpha: the initial value of the regularization parameter. Outputs: the same as of read_gnom_pr() """ |
with tempfile.TemporaryDirectory(prefix='credolib_gnom') as td:
curve.save(os.path.join(td, 'curve.dat'))
print('Using curve for GNOM: qrange from {} to {}'.format(curve.q.min(), curve.q.max()))
if Npoints_realspace is None:
Npoints_realspace = ""
else:
Npoints_realspace = str(Npoints_realspace)
if initial_alpha is None:
initial_alpha = ""
else:
initial_alpha = str(initial_alpha)
# GNOM questions and our answers:
# Printer type [ postscr ] : <ENTER>
# Input data, first file : <curve.dat in the temporary directory><ENTER>
# Output file [ gnom.out ] : <gnom.out in the temporary directory><ENTER>
# No of start points to skip [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Input data, second file [ none ] : <ENTER>
# No of end points to omit [ 0 ] : 0<ENTER>
# ... (just GNOM output)
# ... (just GNOM output)
# Angular scale (1/2/3/4) [ 1 ] : 2<ENTER>
# Plot input dataa (Y/N) [ Yes ] : N<ENTER>
# File containing expert parameters [ none ] : <ENTER>
# Kernel already calculated (Y/N) [ No ] : N<ENTER>
# Type of system (0/1/2/3/4/5/6) [ 0 ] : 0<ENTER>
# Zero condition at r=min (Y/N) [ Yes ] : Y<ENTER>
# Zero condition at r=max (Y/N) [ Yes ] : Y<ENTER>
# -- Arbitrary monodisperse system --
# Rmin=0, Rmax is maximum particle diameter
# Rmax for evaluating p(r) : <Rmax * 10><ENTER>
# Number of points in real space [(always different)] : <Npoints_realspace><ENTER>
# Kernel-storage file name [ kern.bin ] : <ENTER>
# Experimental setup (0/1/2) [ 0 ] : 0<ENTER>
# Initial ALPHA [ 0.0 ] : <initial_alpha><ENTER>
# Plot alpha distribution (Y/N) [ Yes ] : N<ENTER>
# Plot results (Y/N) [ Yes ] : N<ENTER>
# ... solution ...
# Your choice : <ENTER>
# Evaluate errors (Y/N) [ Yes ] : Y<ENTER>
# Plot p(r) with errors (Y/N) [ Yes ] : N<ENTER>
# Next data set (Yes/No/Same) [ No ] : N<ENTER>
gnominput = "\n%s\n%s\n0\n\n0\n2\nN\n\nN\n0\nY\nY\n%f\n%s\n\n0\n%s\nN\nN\n\nY\nN\nN\n" % (
os.path.join(td, 'curve.dat'), os.path.join(td, 'gnom.out'), 10 * Rmax, Npoints_realspace, initial_alpha)
result = subprocess.run(['gnom'], stdout=subprocess.PIPE, stderr=subprocess.PIPE,
input=gnominput.encode('utf-8'))
pr, metadata = read_gnom_pr(os.path.join(td, 'gnom.out'), True)
pr[:, 0] /= 10
metadata['q'] *= 10
metadata['qj'] *= 10
metadata['qmin'] *= 10
metadata['qmax'] *= 10
metadata['dmax'] /= 10
metadata['dmin'] /= 10
metadata['Rg_guinier'] /= 10
metadata['Rg_gnom'] /= 10
if outputfilename is not None:
shutil.copy(os.path.join(td, 'gnom.out'), outputfilename)
return pr, metadata |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_account(netid, timestamp=None):
""" The Libraries object has a method for getting information about a user's library account """ |
response = _get_account(netid, timestamp=timestamp)
return _account_from_json(response) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def delete(self, list_uuid, uuid):
""" Delete one list.""" |
res = self.get(list_uuid, uuid)
url = "%(base)s/%(list_uuid)s/contacts/%(uuid)s" % {
'base': self.local_base_url,
'list_uuid': list_uuid,
'uuid': uuid
}
self.core.delete(url)
return res |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def find_plugin(value, key=DEFAULT_LOOKUP_KEY, conn=None):
""" get's the plugin matching the key and value example: find_plugin("plugin1", "ServiceName") => list of 0 or 1 item example: find_plugin("plugin1", "Name") => list of 0-to-many items :param value: :param key: <str> (default "Name") :param conn: :return: """ |
# cast to list to hide rethink internals from caller
result = list(RPC.filter({
key: value
}).run(conn))
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def sendAppliedToOwner(self, context={}):
""" Sent to project owner when user applies to a project """ |
super(ApplyMail, self).__init__(self.apply.project.owner.email, self.async, self.apply.project.owner.locale)
return self.sendEmail('volunteerApplied-ToOwner', 'New volunteer', context) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add(self, *files):
"""Add one or more files to the index running git-add.""" |
try:
_run_command(("git", "add") + files)
except CalledProcessError:
# Only if the command fails we check if the files
# exist, because git-add most of the time fails when
# the provided files are not found.
for f in files:
if not Path(f).exists():
raise FileNotFoundError(f"No such file or directory: {f}") |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def commit(self, message, files=None):
"""Run git-commit.""" |
if files:
self.add(*files)
return _run_command(["git", "commit", "-m", f'"{message}"']) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def log(self, _from=None, to=None):
"""Run git-log.""" |
command = ["git", "log"]
if _from:
to = "HEAD" if not to else to
revision_range = f"{_from}..{to}"
command.append(revision_range)
git_log_text = _run_command(command)
commit_text_lst = _extract_commit_texts(git_log_text)
return [Commit(commit_text) for commit_text in commit_text_lst] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def tag(self, name=None):
"""Create and list tag objects running git-tag command""" |
command = ["git", "tag"]
if not name:
command.extend(
[
"-l",
"--sort=creatordate",
"--format=%(creatordate:short)%09%(refname:strip=2)",
]
)
command_output = _run_command(command).strip()
if command_output == "":
return []
tag_text_list = command_output.split("\n")
tag_list = [Tag(text) for text in tag_text_list]
return list(reversed(tag_list))
command.extend(["-a", name, "-m", '""'])
return _run_command(command) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register(self, prefix, viewset, base_name=None, router_class=None):
""" Append the given viewset to the proper registry. """ |
if base_name is None:
base_name = self.get_default_base_name(viewset)
if router_class is not None:
kwargs = {'trailing_slash': bool(self.trailing_slash)}
single_object_router_classes = (
AuthenticationRouter, SingleObjectRouter)
if issubclass(router_class, single_object_router_classes):
router = router_class(**kwargs)
router.register(prefix, viewset, base_name=base_name)
self._single_object_registry.append(router)
else:
self.registry.append((prefix, viewset, base_name)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_nested( self, parent_prefix, prefix, viewset, base_name=None, parent_lookup_name=None, depth_level=1 ):
""" Register a nested viewset wihtout worrying of instantiate a nested router for registry. """ |
kwargs = {
'trailing_slash': bool(self.trailing_slash)
}
if parent_lookup_name is not None:
kwargs.update(lookup=parent_lookup_name)
# Section for the depth of the route and add more routes
if depth_level > 1:
routers = filter(
lambda r: (r._depth_level == (depth_level - 1)) and
r._nested_prefix == parent_prefix,
self._nested_object_registry
)
try:
parent_router = next(routers)
except StopIteration:
raise RuntimeError('parent registered resource not found')
else:
parent_router = self
nested_router = NestedSimpleRouter(
parent_router,
parent_prefix,
**kwargs
)
nested_router._nested_prefix = prefix
nested_router._depth_level = depth_level
nested_router.register(prefix, viewset, base_name)
self._nested_object_registry.append(nested_router) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_urls(self):
""" Generate the list of URL patterns including the registered single object routers urls. """ |
base_urls = super(SimpleRouter, self).get_urls()
single_urls = sum([r.urls for r in self._single_object_registry], [])
nested_urls = sum([r.urls for r in self._nested_object_registry], [])
return base_urls + single_urls + nested_urls |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_config(path):
"""parse either the config file we found, or use some canned defaults""" |
config = configparser.ConfigParser()
if path: # if user has config with user creds in it, this will grab it
config.read(path)
try:
return {k: v for k, v in config["default"].items()}
except KeyError:
return {} |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def lookup(path, cache=True, scope=None, safe=False):
"""Get element reference from input element. The element can be a builtin/globals/scope object or is resolved from the current execution stack. :limitations: it does not resolve class methods or static values such as True, False, numbers, string and keywords. :param str path: full path to a python element. :param bool cache: if True (default), permits to reduce time complexity for lookup resolution in using cache memory to save resolved elements. :param dict scope: object scrope from where find path. For example, this scope can be locals(). Default is globals(). :param bool safe: use lookup in a safe context. A safe context avoid to reach builtins function with I/O consequences. :return: python object which is accessible through input path or raise an exception if the path is wrong. :rtype: object :raises ImportError: if path is wrong """ |
result = None
found = path and cache and path in __LOOKUP_CACHE
if found:
result = __LOOKUP_CACHE[path]
elif path:
_eval = safe_eval if safe else eval
try: # search among scope
result = _eval(path, scope)
except (NameError, SyntaxError):
# we generate a result in order to accept the result such as a None
generated_result = random()
result = generated_result
components = path.split('.')
index = 0
components_len = len(components)
module_name = components[0]
# try to resolve an absolute path
try:
result = import_module(module_name)
except ImportError:
# resolve element globals or locals of the from previous frame
previous_frame = currentframe().f_back
if module_name in previous_frame.f_locals:
result = previous_frame.f_locals[module_name]
elif module_name in previous_frame.f_globals:
result = previous_frame.f_globals[module_name]
found = result is not generated_result
if found:
if components_len > 1:
index = 1
# try to import all sub-modules/packages
try: # check if name is defined from an external module
# find the right module
while index < components_len:
module_name = '{0}.{1}'.format(
module_name, components[index]
)
result = import_module(module_name)
index += 1
except ImportError:
# path sub-module content
try:
if PY26: # when __import__ is used
index = 1 # restart count of pathing
while index < components_len:
result = getattr(result, components[index])
index += 1
except AttributeError:
raise ImportError(
'Wrong path {0} at {1}'.format(
path, components[:index]
)
)
else: # in case of PY26
if PY26:
index = 1
while index < components_len:
result = getattr(result, components[index])
index += 1
else:
found = True
if found:
if cache: # save in cache if found
__LOOKUP_CACHE[path] = result
else:
raise ImportError('Wrong path {0}'.format(path))
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def getpath(element):
"""Get full path of a given element such as the opposite of the resolve_path behaviour. :param element: must be directly defined into a module or a package and has the attribute '__name__'. :return: element absolute path. :rtype: str :raises AttributeError: if element has not the attribute __name__. :Example: b3j0f.utils.path.getpath """ |
if not hasattr(element, '__name__'):
raise AttributeError(
'element {0} must have the attribute __name__'.format(element)
)
result = element.__name__ if ismodule(element) else \
'{0}.{1}'.format(element.__module__, element.__name__)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def register_eventclass(event_id):
"""Decorator for registering event classes for parsing """ |
def register(cls):
if not issubclass(cls, Event):
raise MessageException(('Cannot register a class that'
' is not a subclass of Event'))
EVENT_REGISTRY[event_id] = cls
logger.debug('######### Event registry is now: {0}'.format(
EVENT_REGISTRY))
return cls
return register |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def json_minify(string, strip_space=True):
# pragma: no cover """Removes whitespace from json strings, returning the string """ |
in_string = False
in_multi = False
in_single = False
new_str = []
index = 0
for match in re.finditer(TOKENIZER, string):
if not (in_multi or in_single):
tmp = string[index:match.start()]
if not in_string and strip_space:
# replace white space as defined in standard
tmp = re.sub('[ \t\n\r]+', '', tmp)
new_str.append(tmp)
index = match.end()
val = match.group()
if val == '"' and not (in_multi or in_single):
escaped = END_SLASHES_RE.search(string, 0, match.start())
# start of string or unescaped quote character to end string
if not in_string or (escaped is None or len(escaped.group()) % 2 == 0):
in_string = not in_string
index -= 1 # include " character in next catch
elif not (in_string or in_multi or in_single):
if val == '/*':
in_multi = True
elif val == '//':
in_single = True
elif val == '*/' and in_multi and not (in_string or in_single):
in_multi = False
elif val in '\r\n' and not (in_multi or in_string) and in_single:
in_single = False
elif not ((in_multi or in_single) or (val in ' \r\n\t' and strip_space)):
new_str.append(val)
new_str.append(string[index:])
return ''.join(new_str) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def resolve_template(template):
"Accepts a template object, path-to-template or list of paths"
if isinstance(template, (list, tuple)):
return loader.select_template(template)
elif isinstance(template, basestring):
try:
return loader.get_template(template)
except TemplateDoesNotExist:
return None
else:
return template |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def value(self, value, *args, **kwargs):
""" Takes a string value and returns the Date based on the format """ |
from datetime import datetime
value = self.obj.value(value, *args, **kwargs)
try:
rv = datetime.strptime(value, self.format)
except ValueError as _: # noqa
rv = None
return rv |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _interpret_framer(self, args, kwargs):
""" Interprets positional and keyword arguments related to framers. :param args: A tuple of positional arguments. The first such argument will be interpreted as a framer object, and the second will be interpreted as a framer state. :param kwargs: A dictionary of keyword arguments. The ``send`` and ``recv`` keyword arguments are interpreted as send and receive framers, respectively, and the ``send_state`` and ``recv_state`` keyword arguments are interpreted as states for those framers. :returns: An instance of ``FramerElement``, which may be pushed onto the framer stack. """ |
# Cannot specify both positional and keyword arguments, but
# must provide one or the other
if not args and not kwargs:
raise exc.InvalidFramerSpecification(
"No framers specified")
elif args and kwargs:
raise exc.InvalidFramerSpecification(
"Cannot mix positional and keyword framer specifications")
# Start with the current send and receive framers
send = self._send_framer
recv = self._recv_framer
send_state = self._send_state
recv_state = self._recv_state
# Now, is it positional style?
if args:
send = args[0]
recv = args[0]
# Do we have a state?
if len(args) > 1:
send_state = args[1]
recv_state = args[1]
else:
# Allocate one
state = framers.FramerState()
# Initialize it
send.initialize_state(state)
send_state = state
recv_state = state
else:
# OK, it's keyword style; do we have a send framer?
if 'send' in kwargs:
send = kwargs['send']
# Do we have a send state?
if 'send_state' in kwargs:
send_state = kwargs['send_state']
else:
# Allocate one and initialize it
send_state = framers.FramerState()
send.initialize_state(send_state)
# How about a receive framer?
if 'recv' in kwargs:
recv = kwargs['recv']
# Do we have a recv state?
if 'recv_state' in kwargs:
recv_state = kwargs['recv_state']
else:
# Allocate one and initialize it
recv_state = framers.FramerState()
recv.initialize_state(recv_state)
# Create and return a FramerElement
return FramerElement(send, recv, send_state, recv_state) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connection_made(self, transport):
""" Called by the underlying transport when a connection is made. :param transport: The transport representing the connection. """ |
# Save the underlying transport
self._transport = transport
# Call connection_made() on the client protocol, passing
# ourself as the transport
self._client.connection_made(self) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def data_received(self, data):
""" Called by the underlying transport when data is received. :param data: The data received on the connection. """ |
# First, add the data to the receive buffer
self._recv_buf += data
# Now, pass all frames we can find to the client protocol
while self._recv_buf and not self._recv_paused:
try:
# Extract one frame
frame = self._recv_framer.to_frame(self._recv_buf,
self._recv_state)
except exc.NoFrames:
# There's data in the buffer, but no complete frames
break
# Now call the client protocol's frame_received() method
self._client.frame_received(frame) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_extra_info(self, name, default=None):
""" Called by the client protocol to return optional transport information. Information requests not recognized by the ``FramerProtocol`` are passed on to the underlying transport. The values of ``name`` recognized directly by ``FramerProtocol`` are: =============== ============================================ Value Description =============== ============================================ send_framer The active framer for the send direction. send_state The state for the send framer. recv_framer The active framer for the receive direction. recv_state The state for the receive framer. recv_buf The current receive buffer. recv_paused ``True`` if reading is paused. client_protocol The client ``FramedProtocol``. transport The underlying transport. =============== ============================================ :param name: A string representing the piece of transport-specific information to get. :param default: The value to return if the information doesn't exist. :returns: The requested data. """ |
# Handle data we know about
if name in self._handlers:
return self._handlers[name](self)
# Call get_extra_info() on the transport
return self._transport.get_extra_info(name, default=default) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def set_write_buffer_limits(self, high=None, low=None):
""" Called by the client protocol to set the high- and low-water limits for write flow control. These two values control when call the protocol's ``pause_writing()`` and ``resume_writing()`` methods are called. :param high: The high-water limit. Must be a non-negative integer greater than or equal to ``low``, if both are specified. :param low: The low-water limit. Must be a non-negative integer less than or equal to ``high``, if both are specified. If only ``high`` is specified, defaults to an implementation-specific value less than or equal to ``high``. """ |
# Call set_write_buffer_limits() on the transport
self._transport.set_write_buffer_limits(high=high, low=low) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def send_frame(self, frame):
""" Called by the client protocol to send a frame to the remote peer. This method does not block; it buffers the data and arranges for it to be sent out asynchronously. :param frame: The frame to send to the peer. Must be in the format expected by the currently active send framer. """ |
# Convert the frame to bytes and write them to the connection
data = self._send_framer.to_bytes(frame, self._send_state)
self._transport.write(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def menu():
""" Return global menu composed from all modules menu. This method will compose the global menu by calling menu() function for module, it should be located under module_path.menu module """ |
root = MenuItem('')
for mod in droplet.modules():
if mod.installed:
module_path = mod.__class__.__module__.rsplit('.', 1)[0]
menu = import_module(module_path + '.menu')
if menu:
menu.menu(root)
return root |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append(self, item):
""" Add the given item as children """ |
if self.url:
raise TypeError('Menu items with URL cannot have childrens')
# Look for already present common node
if not item.is_leaf():
for current_item in self.items:
if item.name == current_item.name:
for children in item.items:
current_item.append(children)
return
# First insertion
self.items.append(item) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def doc_id(self):
"""Returns the couchbase document's id, object property. :returns: The document id (that is created from :attr:'doc_type' and :attr:'__key_field__' value, or auto-hashed document id at first saving). :rtype: unicode """ |
if self.id:
return '%s_%s' % (self.doc_type, self.id.lower())
return self._hashed_key |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def touch(self, expiration):
"""Updates the current document's expiration value. :param expiration: Expiration in seconds for the document to be removed by couchbase server, defaults to 0 - will never expire. :type expiration: int :returns: Response from CouchbaseClient. :rtype: unicode :raises: :exc:'cbwrapper.errors.DoesNotExist' or :exc:'couchbase.exception.TemporaryFailError' """ |
if not self.cas_value or not self.doc_id:
raise self.DoesNotExist(self)
return self.bucket.touch(self.doc_id, expiration) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_description(status_code):
""" Get the description for a status code. """ |
description = _descriptions.get(status_code)
if description is None:
description = 'code = %s (no description)' % str(status_code)
return description |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def metered_meta(metrics, base=type):
"""Creates a metaclass that will add the specified metrics at a path parametrized on the dynamic class name. Prime use case is for base classes if all subclasses need separate metrics and / or the metrics need to be used in base class methods, e.g., Tornado's ``RequestHandler`` like:: import tapes import tornado import abc registry = tapes.Registry() class MyCommonBaseHandler(tornado.web.RequestHandler):
__metaclass__ = metered_meta([ ('latency', 'my.http.endpoints.{}.latency', registry.timer) ], base=abc.ABCMeta) @tornado.gen.coroutine def get(self, *args, **kwargs):
with self.latency.time():
yield self.get_impl(*args, **kwargs) @abc.abstractmethod def get_impl(self, *args, **kwargs):
pass class MyImplHandler(MyCommonBaseHandler):
@tornado.gen.coroutine def get_impl(self, *args, **kwargs):
self.finish({'stuff': 'something'}) class MyOtherImplHandler(MyCommonBaseHandler):
@tornado.gen.coroutine def get_impl(self, *args, **kwargs):
self.finish({'other stuff': 'more of something'}) This would produce two different relevant metrics, - ``my.http.endpoints.MyImplHandler.latency`` - ``my.http.endpoints.MyOtherImplHandler.latency`` and, as an unfortunate side effect of adding it in the base class, a ``my.http.endpoints.MyCommonBaseHandler.latency`` too. :param metrics: list of (attr_name, metrics_path_template, metrics_factory) :param base: optional meta base if other than `type` :return: a metaclass that populates the class with the needed metrics at paths based on the dynamic class name """ |
class _MeteredMeta(base):
def __new__(meta, name, bases, dict_):
new_dict = dict(**dict_)
for attr_name, template, factory in metrics:
new_dict[attr_name] = factory(template.format(name))
return super(_MeteredMeta, meta).__new__(meta, name, bases, new_dict)
return _MeteredMeta |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def make_action_list(self, item_list, **kwargs):
''' Generates a list of actions for sending to Elasticsearch '''
action_list = []
es_index = get2(kwargs, "es_index", self.es_index)
action_type = kwargs.get("action_type","index")
action_settings = {'_op_type': action_type,
'_index': es_index}
doc_type = kwargs.get("doc_type", self.doc_type)
if not doc_type:
doc_type = "unk"
id_field = kwargs.get("id_field")
for item in item_list:
action = get_es_action_item(item,
action_settings,
doc_type,
id_field)
action_list.append(action)
return action_list |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def bulk_save(self, action_list, **kwargs):
''' sends a passed in action_list to elasticsearch '''
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
err_log = logging.getLogger("index.errors")
es = self.es
es_index = get2(kwargs, "es_index", self.es_index)
reset_index = kwargs.get("reset_index",self.reset_index)
doc_type = kwargs.get("doc_type", self.doc_type)
lg.info("Sending %s items to Elasticsearch",len(action_list))
# bulk_stream = helpers.streaming_bulk(es,
result = helpers.bulk(es,
action_list,
chunk_size=400,
raise_on_error=False)
lg.info("FINISHED sending to Elasticsearch")
if result[1]:
lg.info("Formating Error results")
# action_keys = {item['_id']:i for i, item in enumerate(action_list)}
new_result = []
for item in result[1][:5]:
for action_item in action_list:
if action_item['_id'] == item[list(item)[0]]['_id']:
new_result.append((item, action_item,))
break
err_log.info("Results for batch '%s'\n(%s,\n%s\n%s)",
kwargs.get('batch', "No Batch Number provided"),
result[0],
json.dumps(new_result, indent=4),
json.dumps(result[1]))
del new_result
lg.info("Finished Error logging")
# for success, result in bulk_stream:
# lg.debug("\nsuccess: %s \nresult:\n%s", success, pp.pformat(result))
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_list(self, method="list", **kwargs):
""" returns a key value list of items based on the specfied criteria
""" |
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3]))
lg.setLevel(self.log_level)
args = inspect.getargvalues(inspect.currentframe())[3]
lg.debug("\n****** Args *****:\n%s",
pp.pformat(args))
es = kwargs.get("es",self.es)
doc_type = get2(kwargs, "doc_type", self.doc_type)
id_field = get2(kwargs, "id_field", "_id")
value_fld = kwargs.get("value_fld")
fields = kwargs.get("fields")
sort_dir = get2(kwargs,"sort_dir", "asc")
sort_fields = get2(kwargs,"sort_fields", get2(kwargs, "fields", [value_fld]))
size = get2(kwargs,"size",2000)
term = get2(kwargs,"term",'').replace("/","//")
filter_field = kwargs.get('filter_field')
filter_value = kwargs.get('filter_value')
dsl = {}
# set retutn to only return the fields specified or return the whole
# document if not specified
if fields is not None:
dsl["_source"] = fields
elif value_fld is not None:
dsl["_source"] = [value_fld]
fields = [value_fld]
else:
fields = []
# set query parameters based on the return method "list" or "search"
if sort_dir != "none" and method == "list":
dsl["sort"] = []
for fld in sort_fields:
if fld is not None:
dsl["sort"].append({ fld: sort_dir })
if method == "search":
# query in elasticsearch breaks if the is a single open parenthesis
# remove a single parenthesis from the search term
if "(" in term and ")" not in term:
search_term = term.replace("(", "")
else:
search_term = term
size = 5
dsl['query'] = {
"bool": {
"should": [
{
"query_string" : {
"analyze_wildcard": {
"query": "*%s*" % search_term
}
}
},
{
"query_string" : {
"query": "*%s*" % search_term,
"analyzer": "default",
"analyze_wildcard": True,
"fields": fields,
"boost": 10
}
}
]
}
}
else:
pass
if filter_value:
dsl['filter'] = {
"term": { filter_field: filter_value }
}
lg.info("\n-------- size: %s\ndsl:\n%s", size, json.dumps(dsl,indent=4))
result = es.search(index=self.es_index,
size=size,
doc_type=doc_type,
body=dsl)
if kwargs.get("calc"):
result = self._calc_result(result, kwargs['calc'])
lg.debug(pp.pformat(result))
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def authorize_role(self, role, protocol, from_port, to_port, cidr_ip):
""" Authorize access to machines in a given role from a given network. """ |
if (protocol != 'tcp' and protocol != 'udp'):
raise RuntimeError('error: expected protocol to be tcp or udp '\
'but got %s' % (protocol))
self._check_role_name(role)
role_group_name = self._group_name_for_role(role)
# Revoke first to avoid InvalidPermission.Duplicate error
self.ec2.revoke_security_group(role_group_name,
ip_protocol=protocol,
from_port=from_port,
to_port=to_port, cidr_ip=cidr_ip)
self.ec2.authorize_security_group(role_group_name,
ip_protocol=protocol,
from_port=from_port,
to_port=to_port,
cidr_ip=cidr_ip) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def check_running(self, role, number):
""" Check that a certain number of instances in a role are running. """ |
instances = self.get_instances_in_role(role, "running")
if len(instances) != number:
print "Expected %s instances in role %s, but was %s %s" % \
(number, role, len(instances), instances)
return False
else:
return instances |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_instances_in_role(self, role, state_filter=None):
""" Get all the instances in a role, filtered by state. @param role: the name of the role @param state_filter: the state that the instance should be in (e.g. "running"), or None for all states """ |
self._check_role_name(role)
instances = []
for instance in self._get_instances(self._group_name_for_role(role),
state_filter):
instances.append(Instance(instance.id, instance.dns_name,
instance.private_dns_name,
instance.private_ip_address))
return instances |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get_instances(self, state_filter=None):
""" Get all the instances filtered by state. @param state_filter: the state that the instance should be in (e.g. "running"), or None for all states """ |
instances = []
for instance in self._get_instances(self._get_cluster_group_name(),
state_filter):
instances.append(Instance(instance.id, instance.dns_name,
instance.private_dns_name,
instance.private_ip_address))
return instances |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def print_status(self, roles=None, state_filter="running"):
""" Print the status of instances in the given roles, filtered by state. """ |
if not roles:
for instance in self._get_instances(self._get_cluster_group_name(),
state_filter):
self._print_instance("", instance)
else:
for role in roles:
for instance in self._get_instances(self._group_name_for_role(role),
state_filter):
self._print_instance(role, instance) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def CreateBlockDeviceMap(self, image_id, instance_type):
""" If you launch without specifying a manual device block mapping, you may not get all the ephemeral devices available to the given instance type. This will build one that ensures all available ephemeral devices are mapped. """ |
# get the block device mapping stored with the image
image = self.ec2.get_image(image_id)
block_device_map = image.block_device_mapping
assert(block_device_map)
# update it to include the ephemeral devices
# max is 4... is it an error for instances with fewer than 4 ?
# see: http://docs.aws.amazon.com/AWSEC2/latest/UserGuide/
# InstanceStorage.html#StorageOnInstanceTypes
ephemeral_device_names = ['/dev/sdb', '/dev/sdc', '/dev/sdd', '/dev/sde']
for i, device_name in enumerate(ephemeral_device_names):
name = 'ephemeral%d' % (i)
bdt = blockdevicemapping.BlockDeviceType(ephemeral_name = name)
block_device_map[device_name] = bdt
return block_device_map |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _create_security_groups(self, role):
""" Create the security groups for a given role, including a group for the cluster if it doesn't exist. """ |
self._check_role_name(role)
security_group_names = self._get_all_group_names()
cluster_group_name = self._get_cluster_group_name()
if not cluster_group_name in security_group_names:
self.ec2.create_security_group(cluster_group_name,
"Cluster (%s)" % (self._name))
self.ec2.authorize_security_group(cluster_group_name,
cluster_group_name)
# Allow SSH from anywhere
self.ec2.authorize_security_group(cluster_group_name,
ip_protocol="tcp",
from_port=22, to_port=22,
cidr_ip="0.0.0.0/0")
role_group_name = self._group_name_for_role(role)
if not role_group_name in security_group_names:
self.ec2.create_security_group(role_group_name,
"Role %s (%s)" % (role, self._name))
return |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _delete_security_groups(self):
""" Delete the security groups for each role in the cluster, and the group for the cluster. """ |
group_names = self._get_all_group_names_for_cluster()
for group in group_names:
self.ec2.delete_security_group(group) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def serialize(expr):
"""Serialize input expr into a parsable value. :rtype: str""" |
result = None
if isinstance(expr, string_types):
result = expr
elif expr is not None:
result = '=py:{0}'.format(expr)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse( svalue, conf=None, configurable=None, ptype=None, scope=DEFAULT_SCOPE, safe=DEFAULT_SAFE, besteffort=DEFAULT_BESTEFFORT ):
"""Parser which delegates parsing to expression or format parser.""" |
result = None
if ptype is None:
ptype = object
compilation = REGEX_EXPR.match(svalue)
_scope = {} if scope is None else scope.copy()
if compilation:
lang, expr = compilation.group('lang', 'expr')
result = _exprparser(
expr=expr, lang=lang, conf=conf, configurable=configurable,
scope=_scope, safe=safe, besteffort=besteffort
)
else:
result = _strparser(
svalue=svalue, conf=conf, configurable=configurable,
scope=_scope, safe=safe, besteffort=besteffort
)
# try to cast value in ptype
if not isinstance(result, ptype):
try:
result = ptype(result)
except TypeError:
result = result
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _exprparser( expr, scope, lang=None, conf=None, configurable=None, safe=DEFAULT_SAFE, besteffort=DEFAULT_BESTEFFORT, tostr=False ):
"""In charge of parsing an expression and return a python object.""" |
if scope is None:
scope = {}
scope.update({
'configurable': configurable,
'conf': conf
})
expr = REGEX_EXPR_R.sub(
_refrepl(
configurable=configurable, conf=conf, safe=safe, scope=scope,
besteffort=besteffort
), expr
)
result = resolve(
expr=expr, name=lang, safe=safe, scope=scope, tostr=tostr,
besteffort=besteffort
)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _ref( pname, conf=None, configurable=None, cname=None, path=None, history=0 ):
"""Resolve a parameter value. :param Configuration conf: configuration to use. :param str pname: parameter name. :param Configurable configurable: configurable. :param str cname: category name. :param str path: conf path. :param int history: parameter history research. :return: parameter. :raises: ParserError if conf and configurable are None. """ |
result = None
if configurable is not None:
kwargs = {}
if conf is not None:
kwargs['conf'] = conf
if path is not None:
kwargs['paths'] = path
if conf is None:
conf = configurable.getconf(**kwargs)
if conf is None:
raise ParserError(
'Wrong ref parameters. Conf and configurable are both None.'
)
result = conf.param(pname=pname, cname=cname, history=history)
return result |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def mount(dev, mountpoint, flags='', log=None):
'''Mount the given dev to the given mountpoint by using the given flags'''
ensureDirectory(mountpoint)
systemCall('mount %s %s %s' % (flags, dev, mountpoint),
log=log) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def systemCall(cmd, sh=True, log=None):
'''Fancy magic version of os.system'''
if log is None:
log = logging
log.debug('System call [sh:%s]: %s' \
% (sh, cmd))
out = []
proc = None
poller = None
outBuf = ['']
errBuf = ['']
def pollOutput():
'''
Read, log and store output (if any) from processes pipes.
'''
removeChars = '\r\n'
# collect fds with new output
fds = [entry[0] for entry in poller.poll()]
if proc.stdout.fileno() in fds:
while True:
try:
tmp = proc.stdout.read(100)
except IOError:
break
outBuf[0] += tmp
while '\n' in outBuf[0]:
line, _, outBuf[0] = outBuf[0].partition('\n')
log.debug(line)
out.append(line + '\n')
if not tmp:
break
if proc.stderr.fileno() in fds:
while True:
try:
tmp = proc.stderr.read(100)
except IOError:
break
errBuf[0] += tmp
while '\n' in errBuf[0]:
line, _, errBuf[0] = errBuf[0].partition('\n')
log.warning(line)
if not tmp:
break
while True:
if proc is None:
# create and start process
proc = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=sh)
# create poll select
poller = select.poll()
flags = fcntl.fcntl(proc.stdout, fcntl.F_GETFL)
fcntl.fcntl(proc.stdout, fcntl.F_SETFL, flags| os.O_NONBLOCK)
flags = fcntl.fcntl(proc.stderr, fcntl.F_GETFL)
fcntl.fcntl(proc.stderr, fcntl.F_SETFL, flags| os.O_NONBLOCK)
# register pipes to polling
poller.register(proc.stdout, select.POLLIN)
poller.register(proc.stderr, select.POLLIN)
pollOutput()
if proc.poll() is not None: # proc finished
break
# poll once after the process ended to collect all the missing output
pollOutput()
# check return code
if proc.returncode != 0:
raise RuntimeError(
CalledProcessError(proc.returncode, cmd, ''.join(out))
)
return ''.join(out) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
| def chrootedSystemCall(chrootDir, cmd, sh=True, mountPseudoFs=True, log=None):
'''Chrooted version of systemCall. Manages necessary pseudo filesystems.'''
if log is None:
log = conduct.app.log
# determine mount points for pseudo fs
proc = path.join(chrootDir, 'proc')
sys = path.join(chrootDir, 'sys')
dev = path.join(chrootDir, 'dev')
devpts = path.join(chrootDir, 'dev', 'pts')
# mount pseudo fs
if mountPseudoFs:
mount('proc', proc, '-t proc')
mount('/sys', sys, '--rbind')
mount('/dev', dev, '--rbind')
try:
# exec chrooted cmd
log.debug('Execute chrooted command ...')
cmd = 'chroot %s %s' % (chrootDir, cmd)
return systemCall(cmd, sh, log)
finally:
# umount if pseudo fs was mounted
if mountPseudoFs:
# handle devpts
if path.exists(devpts):
umount(devpts, '-lf')
# lazy is ok for pseudo fs
umount(dev, '-lf')
umount(sys, '-lf')
umount(proc, '-lf') |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def extensions(self, component):
"""Return a list of components that declare to implement the extension point interface. """ |
classes = ComponentMeta._registry.get(self.interface, ())
components = [component.compmgr[cls] for cls in classes]
return [c for c in components if c] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def implements(*interfaces):
"""Can be used in the class definition of `Component` subclasses to declare the extension points that are extended. """ |
import sys
frame = sys._getframe(1)
locals_ = frame.f_locals
# Some sanity checks
msg = 'implements() can only be used in a class definition'
assert locals_ is not frame.f_globals and '__module__' in locals_, msg
locals_.setdefault('_implements', []).extend(interfaces) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def is_enabled(self, cls):
"""Return whether the given component class is enabled.""" |
if cls not in self.enabled:
self.enabled[cls] = self.is_component_enabled(cls)
return self.enabled[cls] |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def disable_component(self, component):
"""Force a component to be disabled. :param component: can be a class or an instance. """ |
if not isinstance(component, type):
component = component.__class__
self.enabled[component] = False
self.components[component] = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def size(self):
"""Calculate and return the file size in bytes.""" |
old = self.__file.tell() # old position
self.__file.seek(0, 2) # end of file
n_bytes = self.__file.tell() # file size in bytes
self.__file.seek(old) # back to old position
return n_bytes |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def prev(self, n=1):
"""Get the previous n data from file. Keyword argument: n -- number of structs to be retrieved (default 1) Must be greater than 0. Return: A data in the format of obj_fmt, if n = 1. A list of structs, otherwise. """ |
# Current position - #data
i = abs(self.tell - n)
# Get the next n data starting from i
return self.get(i, n) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def get(self, i, n=1):
"""Get the n data starting from the ith. Keyword argument: i -- position in file n -- number of structs to be retrieved (default 1) Must be greater than 0. Return: A data in the format of obj_fmt, if n = 1. A list of structs, otherwise. *This method changes file.tell value. """ |
# If there is nothing to get...
if self.size == 0:
return None
if n < 1:
return []
# Current byte position - (n * data_size)
offset = i * self.__strct.size
# Set file pointer to -(#data)
self.__file.seek(offset)
# Unpack raw data to struct
data = map(lambda x: self.unpack(x), self.raw(n))
data = map(lambda x: x if len(x) > 1 else x[0], data)
data = list(data)
# If n is 1, return a single unpacked data.
# Otherwise, return a list of unpacked data
return data[0] if n == 1 else data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def last(self):
"""Get the last object in file.""" |
# End of file
self.__file.seek(0, 2)
# Get the last struct
data = self.get(self.length - 1)
return data |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def append(self, value):
"""Write the value into the file. Keyword arguments: value -- value to be writen (tuple) """ |
# Pack value
data = self.pack(value)
# End of file
self.__file.seek(0, 2)
# Write packed value
self.__file.write(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def write(self, i, value):
"""Write value in ith position in file. Keyword arguments: i -- position in file value -- value to be packed (tuple) """ |
# Current byte position - (n * data_size)
offset = i * self.__strct.size
# Set file pointer to -(#data)
self.__file.seek(offset)
# Pack value
data = self.pack(value)
# Write packed value
self.__file.write(data) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _inspectArguments(self, args):
"""inspect the command-line-args and give them to appBase""" |
if args:
self.exec_path = PathStr(args[0])
else:
self.exec_path = None
session_name = None
args = args[1:]
openSession = False
for arg in args:
if arg in ('-h', '--help'):
self._showHelp()
elif arg in ('-d', '--debug'):
print('RUNNGING IN DEBUG-MODE')
self.opts['debugMode'] = True
elif arg in ('-l', '--log'):
print('CREATE LOG')
self.opts['createLog'] = True
elif arg in ('-s', '--server'):
self.opts['server'] = True
elif arg in ('-o', '--open'):
openSession = True
elif openSession:
session_name = arg
else:
print("Argument '%s' not known." % arg)
return self._showHelp()
return session_name |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def save(self):
"""save the current session
override, if session was saved earlier""" |
if self.path:
self._saveState(self.path)
else:
self.saveAs() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def open(self):
"""open a session to define in a dialog in an extra window""" |
filename = self.dialogs.getOpenFileName(filter="*.%s" % self.FTYPE)
if filename:
self.new(filename) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def new(self, filename=None):
"""start a session an independent process""" |
path = (self.exec_path,)
if self.exec_path.filetype() in ('py', 'pyw', 'pyz', self.FTYPE):
# get the absolute path to the python-executable
p = find_executable("python")
path = (p, 'python') + path
else:
# if run in frozen env (.exe):
# first arg if execpath of the next session:
path += (self.exec_path,)
if filename:
path += ('-o', filename)
os.spawnl(os.P_NOWAIT, *path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _saveState(self, path):
"""save current state and add a new state""" |
self.addSession() # next session
self._save(str(self.n_sessions), path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _autoSave(self):
"""save state into 'autosave' """ |
a = 'autoSave'
path = self.path
if not path:
path = self.dir.join('%s.%s' % (a, self.FTYPE))
self._createdAutosaveFile = path
self.tmp_dir_save_session = self.tmp_dir_session.join(a).mkdir()
self._save(a, path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def blockingSave(self, path):
"""
saved session to file - returns after finish
only called by interactiveTutorial-save at the moment
""" |
self.tmp_dir_save_session = self.tmp_dir_session.join('block').mkdir()
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.saveThread.prepare('0', path, self.tmp_dir_session, state)
self.sigSave.emit(self)
self.saveThread.run() |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _save(self, stateName, path):
"""save into 'stateName' to pyz-path""" |
print('saving...')
state = {'session': dict(self.opts),
'dialogs': self.dialogs.saveState()}
self.sigSave.emit(state)
self.saveThread.prepare(stateName, path, self.tmp_dir_session, state)
self.saveThread.start()
self.current_session = stateName
r = self.opts['recent sessions']
try:
# is this session already exists: remove it
r.pop(r.index(path))
except ValueError:
pass
# add this session at the beginning
r.insert(0, path) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def _recusiveReplaceArrayWithPlaceholder(self, state):
"""
replace all numpy.array within the state dict
with a placeholder
this allows to save the arrays extra using numpy.save_compressed
""" |
arrays = {}
def recursive(state):
for key, val in state.items():
if isinstance(val, dict):
recursive(val)
else:
if isinstance(val, np.ndarray):
name = 'arr_%i' % recursive.c
arrays[name] = val
state[key] = name
recursive.c += 1
recursive.c = 0
recursive(state)
return arrays |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def start_indexing(self):
""" Read all the files and tokenize their text into words and accumulate all the words in a list. """ |
for filepath in self.filepaths:
with open(filepath) as fp:
blob = fp.read()
self.words.extend(self.tokenize(blob)) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def parse_name(name):
""" Split a query name into field name, operator and whether it is inverted. """ |
inverted, op = False, OP_EQ
if name is not None:
for op_ in (OP_NIN, OP_IN, OP_NOT, OP_LIKE):
if name.endswith(op_):
op = op_
name = name[:len(name) - len(op)]
break
if name.startswith('!'):
inverted = True
name = name[1:]
return name, inverted, op |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def connect_db(database_name=":memory:"):
""" Connect lazy_record to the database at the path specified in +database_name+. """ |
db = repo.Repo.connect_db(database_name)
base.Repo.db = db
query.Repo.db = db |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def close_db():
""" Close the connection to the database opened in `connect_db` """ |
db = repo.Repo.db
if db is not None:
db.close()
repo.Repo.db = None
base.Repo.db = None
query.Repo.db = None |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def load_schema(schema):
""" Load a schema file with path +schema+ into the database. Assumes that there exists an active database connection. """ |
with repo.Repo.db:
repo.Repo.db.executescript(schema) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def update_values(self, min=None, max=None, conflict='warning'):
""" Update the boundaries, handling possible conflicts. :param conflict: What to do in case of failure: 'silent', 'warning' or 'error'. """ |
conflict_txt = None
if min is not None:
if min > self.min:
if min > self.max:
self.max = self.highest
self.prefer_highest = False
conflict_txt = 'Minimum {0:s} conflicts with maximum {1:s}; minimum is higher so it takes precedence, but lower values in range are not preferred.'.format('{0:d}.{1:d}'.format(*self.to_tup(min)), '{0:d}.{1:d}'.format(*self.to_tup(self.max)))
self.min = min
if max is not None:
if max < self.max:
if max >= self.min:
self.max = max
else:
self.prefer_highest = False
conflict_txt = 'Maximum {0:s} conflicts with minimum {1:s}; minimum is higher so takes it precedence, but lower values in range are now preferred.'.format('{0:d}.{1:d}'.format(*self.to_tup(max)), '{0:d}.{1:d}'.format(*self.to_tup(self.min)))
if conflict_txt:
version_problem_notify(conflict_txt, conflict=conflict) |
<SYSTEM_TASK:>
Solve the following problem using Python, implementing the functions described below, one line at a time
<END_TASK>
<USER_TASK:>
Description:
def add_selection(self, selection, conflict = 'warning'):
""" Restrict the range given a selection string :param selection: A single selection (without comma), like '>=1.3'. :param conflict: What to do in case of failure: 'silent', 'warning' or 'error'. """ |
selection = selection.replace(' ', '').replace('=.', '=0.')
if not selection:
return
if selection.count(',') or selection.count('_'):
raise VersionFormatError(('Version string "{0:s}" is incorrect. Perhaps you\'re trying to add a combined one; ' +
'you should use add_selections for that').format(selection))
if selection.count('.') > 1:
raise VersionFormatError(('Version string "{0:s}" is incorrect. Perhaps it contains a version longer than 2 numbers ' +
'(e.g. "3.14)" which is intentionally not supported. Version numbers beyond the second are for bugfixes only.').format(selection))
regex = r'^([><=]=?)(\d+|\*)(?:\.(\d*|\*))?$'
found = findall(regex, selection)
if not found:
raise VersionFormatError('Version string "{0:s}" not properly formatted according to "{1:s}".'.format(selection, regex))
operation, majorstr, minorstr = found[0]
if majorstr == '*':
return
major = int(majorstr)
if minorstr == '*':
self.update_values(conflict=conflict,
min = self.to_nr(major, 0),
max = self.to_nr(major + 1, 0) - 1,
)
return
exclusive = int(not operation.endswith('='))
major_only = int(not minorstr)
nr = self.to_nr(major, int(minorstr or 0))
if operation.startswith('='):
self.update_values(conflict=conflict,
min = nr,
max = nr + major_only * self.limit - major_only,
)
elif operation.startswith('<'):
self.update_values(conflict=conflict,
max = nr - exclusive + (not exclusive) * (major_only * self.limit - major_only),
)
elif operation.startswith('>'):
self.update_values(conflict=conflict,
min = nr + exclusive + exclusive * (major_only * self.limit - major_only),
)
else:
raise VersionFormatError('Version (in)equality operator "{0:s}" not recognized. ' +
'Full operation "{1:s}"'.format(operation, selection)) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.