text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compute_cusum_ts(self, ts): """ Compute the Cumulative Sum at each point 't' of the time series. """
mean = np.mean(ts) cusums = np.zeros(len(ts)) cusum[0] = (ts[0] - mean) for i in np.arange(1, len(ts)): cusums[i] = cusums[i - 1] + (ts[i] - mean) assert(np.isclose(cumsum[-1], 0.0)) return cusums
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def detect_mean_shift(self, ts, B=1000): """ Detect mean shift in a time series. B is number of bootstrapped samples to draw. """
x = np.arange(0, len(ts)) stat_ts_func = self.compute_balance_mean_ts null_ts_func = self.shuffle_timeseries stats_ts, pvals, nums = self.get_ts_stats_significance(x, ts, stat_ts_func, null_ts_func, B=B, permute_fast=True) return stats_ts, pvals, nums
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parallelize_func(iterable, func, chunksz=1, n_jobs=16, *args, **kwargs): """ Parallelize a function over each element of an iterable. """
chunker = func chunks = more_itertools.chunked(iterable, chunksz) chunks_results = Parallel(n_jobs=n_jobs, verbose=50)( delayed(chunker)(chunk, *args, **kwargs) for chunk in chunks) results = more_itertools.flatten(chunks_results) return list(results)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ts_stats_significance(ts, ts_stat_func, null_ts_func, B=1000, permute_fast=False): """ Compute the statistical significance of a test statistic at each point of the time series. """
stats_ts = ts_stat_func(ts) if permute_fast: # Permute it in 1 shot null_ts = map(np.random.permutation, np.array([ts, ] * B)) else: null_ts = np.vstack([null_ts_func(ts) for i in np.arange(0, B)]) stats_null_ts = np.vstack([ts_stat_func(nts) for nts in null_ts]) pvals = [] nums = [] for i in np.arange(0, len(stats_ts)): num_samples = np.sum((stats_null_ts[:, i] >= stats_ts[i])) nums.append(num_samples) pval = num_samples / float(B) pvals.append(pval) return stats_ts, pvals, nums
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_ci(theta_star, blockratio=1.0): """ Get the confidence interval. """
# get rid of nans while we sort b_star = np.sort(theta_star[~np.isnan(theta_star)]) se = np.std(b_star) * np.sqrt(blockratio) # bootstrap 95% CI based on empirical percentiles ci = [b_star[int(len(b_star) * .025)], b_star[int(len(b_star) * .975)]] return ci
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_pvalue(value, ci): """ Get the p-value from the confidence interval."""
from scipy.stats import norm se = (ci[1] - ci[0]) / (2.0 * 1.96) z = value / se pvalue = -2 * norm.cdf(-np.abs(z)) return pvalue
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ts_stats_significance_bootstrap(ts, stats_ts, stats_func, B=1000, b=3): """ Compute the statistical significance of a test statistic at each point of the time series by using timeseries boootstrap. """
pvals = [] for tp in np.arange(0, len(stats_ts)): pf = partial(stats_func, t=tp) bs = bootstrap_ts(ts, pf, B=B, b=b) ci = get_ci(bs, blockratio=b / len(stats_ts)) pval = abs(get_pvalue(stats_ts[tp], ci)) pvals.append(pval) return pvals
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_traceback(extracted_tb, exc_type, exc_value, cwd='', term=None, function_color=12, dim_color=8, editor='vi', template=DEFAULT_EDITOR_SHORTCUT_TEMPLATE): """Return an iterable of formatted Unicode traceback frames. Also include a pseudo-frame at the end representing the exception itself. Format things more compactly than the stock formatter, and make every frame an editor shortcut. """
def format_shortcut(editor, path, line_number, function=None): """Return a pretty-printed editor shortcut.""" return template.format(editor=editor, line_number=line_number or 0, path=path, function=function or u'', hash_if_function=u' # ' if function else u'', function_format=term.color(function_color), # Underline is also nice and doesn't make us # worry about appearance on different background # colors. normal=term.normal, dim_format=term.color(dim_color) + term.bold, line_number_max_width=line_number_max_width, term=term) template += '\n' # Newlines are awkward to express on the command line. extracted_tb = _unicode_decode_extracted_tb(extracted_tb) if not term: term = Terminal() if extracted_tb: # Shorten file paths: for i, (file, line_number, function, text) in enumerate(extracted_tb): extracted_tb[i] = human_path(src(file), cwd), line_number, function, text line_number_max_width = len(unicode(max(the_line for _, the_line, _, _ in extracted_tb))) # Stack frames: for i, (path, line_number, function, text) in enumerate(extracted_tb): text = (text and text.strip()) or u'' yield (format_shortcut(editor, path, line_number, function) + (u' %s\n' % text)) # Exception: if exc_type is SyntaxError: # Format a SyntaxError to look like our other traceback lines. # SyntaxErrors have a format different from other errors and include a # file path which looks out of place in our newly highlit, editor- # shortcutted world. if hasattr(exc_value, 'filename') and hasattr(exc_value, 'lineno'): exc_lines = [format_shortcut(editor, exc_value.filename, exc_value.lineno)] formatted_exception = format_exception_only(SyntaxError, exc_value)[1:] else: # The logcapture plugin may format exceptions as strings, # stripping them of the full filename and lineno exc_lines = [] formatted_exception = format_exception_only(SyntaxError, exc_value) formatted_exception.append(u'(Try --nologcapture for a more detailed traceback)\n') else: exc_lines = [] formatted_exception = format_exception_only(exc_type, exc_value) exc_lines.extend([_decode(f) for f in formatted_exception]) yield u''.join(exc_lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_relevant_tb(tb, exctype, is_test_failure): """Return extracted traceback frame 4-tuples that aren't unittest ones. This used to be _exc_info_to_string(). """
# Skip test runner traceback levels: while tb and _is_unittest_frame(tb): tb = tb.tb_next if is_test_failure: # Skip assert*() traceback levels: length = _count_relevant_tb_levels(tb) return extract_tb(tb, length) return extract_tb(tb)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _unicode_decode_extracted_tb(extracted_tb): """Return a traceback with the string elements translated into Unicode."""
return [(_decode(file), line_number, _decode(function), _decode(text)) for file, line_number, function, text in extracted_tb]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _count_relevant_tb_levels(tb): """Return the number of frames in ``tb`` before all that's left is unittest frames. Unlike its namesake in unittest, this doesn't bail out as soon as it hits a unittest frame, which means we don't bail out as soon as somebody uses the mock library, which defines ``__unittest``. """
length = contiguous_unittest_frames = 0 while tb: length += 1 if _is_unittest_frame(tb): contiguous_unittest_frames += 1 else: contiguous_unittest_frames = 0 tb = tb.tb_next return length - contiguous_unittest_frames
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cmdloop(self, *args, **kwargs): """Call pdb's cmdloop, making readline work. Patch raw_input so it sees the original stdin and stdout, lest readline refuse to work. The C implementation of raw_input uses readline functionality only if both stdin and stdout are from a terminal AND are FILE*s (not PyObject*s): http://bugs.python.org/issue5727 and https://bugzilla.redhat.com/show_bug.cgi?id=448864 """
def unwrapping_raw_input(*args, **kwargs): """Call raw_input(), making sure it finds an unwrapped stdout.""" wrapped_stdout = sys.stdout sys.stdout = wrapped_stdout.stream ret = orig_raw_input(*args, **kwargs) sys.stdout = wrapped_stdout return ret try: orig_raw_input = raw_input except NameError: orig_raw_input = input if hasattr(sys.stdout, 'stream'): __builtin__.raw_input = unwrapping_raw_input # else if capture plugin has replaced it with a StringIO, don't bother. try: # Interesting things happen when you try to not reference the # superclass explicitly. ret = cmd.Cmd.cmdloop(self, *args, **kwargs) finally: __builtin__.raw_input = orig_raw_input return ret
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_trace(*args, **kwargs): """Call pdb.set_trace, making sure it receives the unwrapped stdout. This is so we don't keep drawing progress bars over debugger output. """
# There's no stream attr if capture plugin is enabled: out = sys.stdout.stream if hasattr(sys.stdout, 'stream') else None # Python 2.5 can't put an explicit kwarg and **kwargs in the same function # call. kwargs['stdout'] = out debugger = pdb.Pdb(*args, **kwargs) # Ordinarily (and in a silly fashion), pdb refuses to use raw_input() if # you pass it a stream on instantiation. Fix that: debugger.use_rawinput = True debugger.set_trace(sys._getframe().f_back)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def begin(self): """Make some monkeypatches to dodge progress bar. Wrap stderr and stdout to keep other users of them from smearing the progress bar. Wrap some pdb routines to stop showing the bar while in the debugger. """
# The calls to begin/finalize end up like this: a call to begin() on # instance A of the plugin, then a paired begin/finalize for each test # on instance B, then a final call to finalize() on instance A. # TODO: Do only if isatty. self._stderr.append(sys.stderr) sys.stderr = StreamWrapper(sys.stderr, self) # TODO: Any point? self._stdout.append(sys.stdout) sys.stdout = StreamWrapper(sys.stdout, self) self._set_trace.append(pdb.set_trace) pdb.set_trace = set_trace self._cmdloop.append(pdb.Pdb.cmdloop) pdb.Pdb.cmdloop = cmdloop # nosetests changes directories to the tests dir when run from a # distribution dir, so save the original cwd for relativizing paths. self._cwd = '' if self.conf.options.absolute_paths else getcwd()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def finalize(self, result): """Put monkeypatches back as we found them."""
sys.stderr = self._stderr.pop() sys.stdout = self._stdout.pop() pdb.set_trace = self._set_trace.pop() pdb.Pdb.cmdloop = self._cmdloop.pop()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def configure(self, options, conf): """Turn style-forcing on if bar-forcing is on. It'd be messy to position the bar but still have the rest of the terminal capabilities emit ''. """
super(ProgressivePlugin, self).configure(options, conf) if (getattr(options, 'verbosity', 0) > 1 and getattr(options, 'enable_plugin_id', False)): # TODO: Can we forcibly disable the ID plugin? print ('Using --with-id and --verbosity=2 or higher with ' 'nose-progressive causes visualization errors. Remove one ' 'or the other to avoid a mess.') if options.with_bar: options.with_styling = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, test_path, number): """Draw an updated progress bar. At the moment, the graph takes a fixed width, and the test identifier takes the rest of the row, truncated from the left to fit. test_path -- the selector of the test being run number -- how many tests have been run so far, including this one """
# TODO: Play nicely with absurdly narrow terminals. (OS X's won't even # go small enough to hurt us.) # Figure out graph: GRAPH_WIDTH = 14 # min() is in case we somehow get the total test count wrong. It's tricky. num_filled = int(round(min(1.0, float(number) / self.max) * GRAPH_WIDTH)) graph = ''.join([self._fill_cap(' ' * num_filled), self._empty_cap(self._empty_char * (GRAPH_WIDTH - num_filled))]) # Figure out the test identifier portion: cols_for_path = self.cols - GRAPH_WIDTH - 2 # 2 spaces between path & graph if len(test_path) > cols_for_path: test_path = test_path[len(test_path) - cols_for_path:] else: test_path += ' ' * (cols_for_path - len(test_path)) # Put them together, and let simmer: self.last = self._term.bold(test_path) + ' ' + graph with self._at_last_line(): self.stream.write(self.last) self.stream.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def erase(self): """White out the progress bar."""
with self._at_last_line(): self.stream.write(self._term.clear_eol) self.stream.flush()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dodging(bar): """Return a context manager which erases the bar, lets you output things, and then redraws the bar. It's reentrant. """
class ShyProgressBar(object): """Context manager that implements a progress bar that gets out of the way""" def __enter__(self): """Erase the progress bar so bits of disembodied progress bar don't get scrolled up the terminal.""" # My terminal has no status line, so we make one manually. bar._is_dodging += 1 # Increment before calling erase(), which # calls dodging() again. if bar._is_dodging <= 1: # It *was* 0. bar.erase() def __exit__(self, type, value, tb): """Redraw the last saved state of the progress bar.""" if bar._is_dodging == 1: # Can't decrement yet; write() could # read it. # This is really necessary only because we monkeypatch # stderr; the next test is about to start and will redraw # the bar. with bar._at_last_line(): bar.stream.write(bar.last) bar.stream.flush() bar._is_dodging -= 1 return ShyProgressBar()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _makeResult(self): """Return a Result that doesn't print dots. Nose's ResultProxy will wrap it, and other plugins can still print stuff---but without smashing into our progress bar, care of ProgressivePlugin's stderr/out wrapping. """
return ProgressiveResult(self._cwd, self._totalTests, self.stream, config=self.config)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def run(self, test): "Run the given test case or test suite...quietly." # These parts of Nose's pluggability are baked into # nose.core.TextTestRunner. Reproduce them: wrapper = self.config.plugins.prepareTest(test) if wrapper is not None: test = wrapper wrapped = self.config.plugins.setOutputStream(self.stream) if wrapped is not None: self.stream = wrapped result = self._makeResult() startTime = time() try: test(result) except KeyboardInterrupt: # we need to ignore these exception to not # show traceback when user intentionally # interrupted test suite execution, and # to output some reasonable results on # already passed and failed tests. pass stopTime = time() # We don't care to hear about errors again at the end; we take care of # that in result.addError(), while the tests run. # result.printErrors() # # However, we do need to call this one useful line from # nose.result.TextTestResult's implementation of printErrors() to make # sure other plugins get a chance to report: self.config.plugins.report(self.stream) result.printSummary(startTime, stopTime) self.config.plugins.finalize(result) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _printTraceback(self, test, err): """Print a nicely formatted traceback. :arg err: exc_info()-style traceback triple :arg test: the test that precipitated this call """
# Don't bind third item to a local var; that can create # circular refs which are expensive to collect. See the # sys.exc_info() docs. exception_type, exception_value = err[:2] # TODO: In Python 3, the traceback is attached to the exception # instance through the __traceback__ attribute. If the instance # is saved in a local variable that persists outside the except # block, the traceback will create a reference cycle with the # current frame and its dictionary of local variables. This will # delay reclaiming dead resources until the next cyclic garbage # collection pass. extracted_tb = extract_relevant_tb( err[2], exception_type, exception_type is test.failureException) test_frame_index = index_of_test_frame( extracted_tb, exception_type, exception_value, test) if test_frame_index: # We have a good guess at which frame is the test, so # trim everything until that. We don't care to see test # framework frames. extracted_tb = extracted_tb[test_frame_index:] with self.bar.dodging(): self.stream.write(''.join( format_traceback( extracted_tb, exception_type, exception_value, self._cwd, self._term, self._options.function_color, self._options.dim_color, self._options.editor, self._options.editor_shortcut_template)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _printHeadline(self, kind, test, is_failure=True): """Output a 1-line error summary to the stream if appropriate. The line contains the kind of error and the pathname of the test. :arg kind: The (string) type of incident the precipitated this call :arg test: The test that precipitated this call """
if is_failure or self._options.show_advisories: with self.bar.dodging(): self.stream.writeln( '\n' + (self._term.bold if is_failure else '') + '%s: %s' % (kind, nose_selector(test)) + (self._term.normal if is_failure else ''))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _recordAndPrintHeadline(self, test, error_class, artifact): """Record that an error-like thing occurred, and print a summary. Store ``artifact`` with the record. Return whether the test result is any sort of failure. """
# We duplicate the errorclass handling from super rather than calling # it and monkeying around with showAll flags to keep it from printing # anything. is_error_class = False for cls, (storage, label, is_failure) in self.errorClasses.items(): if isclass(error_class) and issubclass(error_class, cls): if is_failure: test.passed = False storage.append((test, artifact)) is_error_class = True if not is_error_class: self.errors.append((test, artifact)) test.passed = False is_any_failure = not is_error_class or is_failure self._printHeadline(label if is_error_class else 'ERROR', test, is_failure=is_any_failure) return is_any_failure
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addSkip(self, test, reason): """Catch skipped tests in Python 2.7 and above. Though ``addSkip()`` is deprecated in the nose plugin API, it is very much not deprecated as a Python 2.7 ``TestResult`` method. In Python 2.7, this will get called instead of ``addError()`` for skips. :arg reason: Text describing why the test was skipped """
self._recordAndPrintHeadline(test, SkipTest, reason) # Python 2.7 users get a little bonus: the reason the test was skipped. if isinstance(reason, Exception): reason = getattr(reason, 'message', None) or getattr( reason, 'args')[0] if reason and self._options.show_advisories: with self.bar.dodging(): self.stream.writeln(reason)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def printSummary(self, start, stop): """As a final summary, print number of tests, broken down by result."""
def renderResultType(type, number, is_failure): """Return a rendering like '2 failures'. :arg type: A singular label, like "failure" :arg number: The number of tests with a result of that type :arg is_failure: Whether that type counts as a failure """ # I'd rather hope for the best with plurals than totally punt on # being Englishlike: ret = '%s %s%s' % (number, type, 's' if number != 1 else '') if is_failure and number: ret = self._term.bold(ret) return ret # Summarize the special cases: counts = [('test', self.testsRun, False), ('failure', len(self.failures), True), ('error', len(self.errors), True)] # Support custom errorclasses as well as normal failures and errors. # Lowercase any all-caps labels, but leave the rest alone in case there # are hard-to-read camelCaseWordBreaks. counts.extend([(label.lower() if label.isupper() else label, len(storage), is_failure) for (storage, label, is_failure) in self.errorClasses.values() if len(storage)]) summary = (', '.join(renderResultType(*a) for a in counts) + ' in %.1fs' % (stop - start)) # Erase progress bar. Bash doesn't clear the whole line when printing # the prompt, leaving a piece of the bar. Also, the prompt may not be # at the bottom of the terminal. self.bar.erase() self.stream.writeln() if self.wasSuccessful(): self.stream.write(self._term.bold_green('OK! ')) self.stream.writeln(summary)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def nose_selector(test): """Return the string you can pass to nose to run `test`, including argument values if the test was made by a test generator. Return "Unknown test" if it can't construct a decent path. """
address = test_address(test) if address: file, module, rest = address if module: if rest: try: return '%s:%s%s' % (module, rest, test.test.arg or '') except AttributeError: return '%s:%s' % (module, rest) else: return module return 'Unknown test'
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def human_path(path, cwd): """Return the most human-readable representation of the given path. If an absolute path is given that's within the current directory, convert it to a relative path to shorten it. Otherwise, return the absolute path. """
# TODO: Canonicalize the path to remove /kitsune/../kitsune nonsense. path = abspath(path) if cwd and path.startswith(cwd): path = path[len(cwd) + 1:] # Make path relative. Remove leading slash. return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def know(self, what, confidence): """Know something with the given confidence, and return self for chaining. If confidence is higher than that of what we already know, replace what we already know with what you're telling us. """
if confidence > self.confidence: self.best = what self.confidence = confidence return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_arg_types(coordlist_length, shape_name): """Find coordinate types based on shape name and coordlist length This function returns a list of coordinate types based on which coordinates can be repeated for a given type of shap Parameters coordlist_length : int The number of coordinates or arguments used to define the shape. shape_name : str One of the names in `pyregion.ds9_shape_defs`. Returns ------- arg_types : list A list of objects from `pyregion.region_numbers` with a length equal to coordlist_length. """
from .ds9_region_parser import ds9_shape_defs from .ds9_attr_parser import ds9_shape_in_comment_defs if shape_name in ds9_shape_defs: shape_def = ds9_shape_defs[shape_name] else: shape_def = ds9_shape_in_comment_defs[shape_name] initial_arg_types = shape_def.args_list arg_repeats = shape_def.args_repeat if arg_repeats is None: return initial_arg_types # repeat args between n1 and n2 n1, n2 = arg_repeats arg_types = list(initial_arg_types[:n1]) num_of_repeats = coordlist_length - (len(initial_arg_types) - n2) arg_types.extend((num_of_repeats - n1) // (n2 - n1) * initial_arg_types[n1:n2]) arg_types.extend(initial_arg_types[n2:]) return arg_types
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def convert_to_imagecoord(shape, header): """Convert the coordlist of `shape` to image coordinates Parameters shape : `pyregion.parser_helper.Shape` The `Shape` to convert coordinates header : `~astropy.io.fits.Header` Specifies what WCS transformations to use. Returns ------- new_coordlist : list A list of image coordinates defining the shape. """
arg_types = _generate_arg_types(len(shape.coord_list), shape.name) new_coordlist = [] is_even_distance = True coord_list_iter = iter(zip(shape.coord_list, arg_types)) new_wcs = WCS(header) pixel_scales = proj_plane_pixel_scales(new_wcs) for coordinate, coordinate_type in coord_list_iter: if coordinate_type == CoordOdd: even_coordinate = next(coord_list_iter)[0] old_coordinate = SkyCoord(coordinate, even_coordinate, frame=shape.coord_format, unit='degree', obstime='J2000') new_coordlist.extend( np.asscalar(x) for x in old_coordinate.to_pixel(new_wcs, origin=1) ) elif coordinate_type == Distance: if arg_types[-1] == Angle: degree_per_pixel = pixel_scales[0 if is_even_distance else 1] is_even_distance = not is_even_distance else: degree_per_pixel = np.sqrt(proj_plane_pixel_area(new_wcs)) new_coordlist.append(coordinate / degree_per_pixel) elif coordinate_type == Angle: new_angle = _estimate_angle(coordinate, shape.coord_format, header) new_coordlist.append(new_angle) else: new_coordlist.append(coordinate) return new_coordlist
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_auth_info(): """ Get authentication details to jottacloud. Will first check environment variables, then the .netrc file. """
env_username = os.environ.get('JOTTACLOUD_USERNAME') env_password = os.environ.get('JOTTACLOUD_PASSWORD') netrc_auth = None try: netrc_file = netrc.netrc() netrc_auth = netrc_file.authenticators('jottacloud.com') except IOError: # .netrc file doesn't exist pass netrc_username = None netrc_password = None if netrc_auth: netrc_username, _, netrc_password = netrc_auth username = env_username or netrc_username password = env_password or netrc_password if not (username and password): raise JFSError('Could not find username and password in either env or ~/.netrc, ' 'you need to add one of these to use these tools') return (username, password)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_md5(fileobject, size=2**16): """Utility function to calculate md5 hashes while being light on memory usage. By reading the fileobject piece by piece, we are able to process content that is larger than available memory"""
fileobject.seek(0) md5 = hashlib.md5() for data in iter(lambda: fileobject.read(size), b''): if not data: break if isinstance(data, six.text_type): data = data.encode('utf-8') # md5 needs a byte string md5.update(data) fileobject.seek(0) # rewind read head return md5.hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def deleted(self): 'Return datetime.datetime or None if the file isnt deleted' _d = self.folder.attrib.get('deleted', None) if _d is None: return None return dateutil.parser.parse(str(_d))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def sync(self): 'Update state of folder from Jottacloud server' log.info("syncing %r" % self.path) self.folder = self.jfs.get(self.path) self.synced = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def mkdir(self, foldername): 'Create a new subfolder and return the new JFSFolder' #url = '%s?mkDir=true' % posixpath.join(self.path, foldername) url = posixpath.join(self.path, foldername) params = {'mkDir':'true'} r = self.jfs.post(url, params) self.sync() return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def delete(self): 'Delete this folder and return a deleted JFSFolder' #url = '%s?dlDir=true' % self.path params = {'dlDir':'true'} r = self.jfs.post(self.path, params) self.sync() return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def hard_delete(self): 'Deletes without possibility to restore' url = 'https://www.jottacloud.com/rest/webrest/%s/action/delete' % self.jfs.username data = {'paths[]': self.path.replace(JFS_ROOT, ''), 'web': 'true', 'ts': int(time.time()), 'authToken': 0} r = self.jfs.post(url, content=data) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def rename(self, newpath): "Move folder to a new name, possibly a whole new path" # POST https://www.jottacloud.com/jfs/**USERNAME**/Jotta/Sync/Ny%20mappe?mvDir=/**USERNAME**/Jotta/Sync/testFolder #url = '%s?mvDir=/%s%s' % (self.path, self.jfs.username, newpath) params = {'mvDir':'/%s%s' % (self.jfs.username, newpath)} r = self.jfs.post(self.path, extra_headers={'Content-Type':'application/octet-stream'}, params=params) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def up(self, fileobj_or_path, filename=None, upload_callback=None): 'Upload a file to current folder and return the new JFSFile' close_on_done = False if isinstance(fileobj_or_path, six.string_types): filename = filename or os.path.basename(fileobj_or_path) fileobj_or_path = open(fileobj_or_path, 'rb') close_on_done = True elif hasattr(fileobj_or_path, 'read'): # file like pass else: # TODO: handle generators here? raise JFSError("Need filename or file-like object") if filename is None: if hasattr(fileobj_or_path, 'name'): filename = os.path.basename(fileobj_or_path.name) else: raise JFSError("Unable to guess filename") log.debug('.up %s -> %s %s', repr(fileobj_or_path), repr(self.path), repr(filename)) r = self.jfs.up(posixpath.join(self.path, filename), fileobj_or_path, upload_callback=upload_callback) if close_on_done: fileobj_or_path.close() self.sync() return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def factory(fileobject, jfs, parentpath): # fileobject from lxml.objectify 'Class method to get the correct file class instatiated' if hasattr(fileobject, 'currentRevision'): # a normal file return JFSFile(fileobject, jfs, parentpath) elif str(fileobject.latestRevision.state) == ProtoFile.STATE_INCOMPLETE: return JFSIncompleteFile(fileobject, jfs, parentpath) elif str(fileobject.latestRevision.state) == ProtoFile.STATE_CORRUPT: return JFSCorruptFile(fileobject, jfs, parentpath) else: raise NotImplementedError('No JFS*File support for state %r. Please file a bug!' % fileobject.latestRevision.state)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def resume(self, data): 'Resume uploading an incomplete file, after a previous upload was interrupted. Returns new file object' if not hasattr(data, 'read'): data = six.BytesIO(data)#StringIO(data) #Check that we actually know from what byte to resume. #If self.size === -1, it means we never got the value from the server. #This is perfectly normal if the file was instatiated via e.g. a file listing, #and not directly via JFS.getObject() if self.size == -1: log.debug('%r is an incomplete file, but .size is unknown. Refreshing the file object from server', self.path) self.f = self.jfs.get(self.path) #check if what we're asked to upload is actually the right file md5 = calculate_md5(data) if md5 != self.md5: raise JFSError('''MD5 hashes don't match! Are you trying to resume with the wrong file?''') log.debug('Resuming %s from offset %s', self.path, self.size) return self.jfs.up(self.path, data, resume_offset=self.size)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def size(self): """Bytes uploaded of the file so far. Note that we only have the file size if the file was requested directly, not if it's part of a folder listing. """
if hasattr(self.f.latestRevision, 'size'): return int(self.f.latestRevision.size) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def stream(self, chunk_size=64*1024): 'Returns a generator to iterate over the file contents' #return self.jfs.stream(url='%s?mode=bin' % self.path, chunk_size=chunk_size) return self.jfs.stream(url=self.path, params={'mode':'bin'}, chunk_size=chunk_size)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def restore(self): 'Restore the file' # # # As of 2016-06-15, Jottacloud.com has changed their restore api # To restore, this is what's done # # HTTP POST to https://www.jottacloud.com/web/restore/trash/list # Data: # hash:undefined # files:@0025d37be5329a18eece18dd93f793509e8_dGVzdF9kZWxldGUudHh0 # # where `files` is a comma separated list, and each item is constructed thus: # @<uuid of path>_<base64 encoded file name> # if not self.deleted: raise JFSError('Tried to restore a not deleted file') raise NotImplementedError('Jottacloud has changed the restore API. Please use jottacloud.com in a browser, for now.') # TODO: figure out how to solve this url = 'https://www.jottacloud.com/rest/webrest/%s/action/restore' % self.jfs.username data = {'paths[]': self.path.replace(JFS_ROOT, ''), 'web': 'true', 'ts': int(time.time()), 'authToken': 0} r = self.jfs.post(url, content=data) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def delete(self): 'Delete this file and return the new, deleted JFSFile' #url = '%s?dl=true' % self.path r = self.jfs.post(url=self.path, params={'dl':'true'}) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def thumb(self, size=BIGTHUMB): '''Get a thumbnail as string or None if the file isnt an image size would be one of JFSFile.BIGTHUMB, .MEDIUMTHUMB, .SMALLTHUMB or .XLTHUMB''' if not self.is_image(): return None if not size in (self.BIGTHUMB, self.MEDIUMTHUMB, self.SMALLTHUMB, self.XLTHUMB): raise JFSError('Invalid thumbnail size: %s for image %s' % (size, self.path)) #return self.jfs.raw('%s?mode=thumb&ts=%s' % (self.path, size)) return self.jfs.raw(url=self.path, params={'mode':'thumb', 'ts':size})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def new_mountpoint(self, name): """Create a new mountpoint"""
url = posixpath.join(self.path, name) r = self._jfs.post(url, extra_headers={'content-type': 'application/x-www-form-urlencoded'}) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def sharedFiles(self): 'iterate over shared files and get their public URI' for f in self.sharing.files.iterchildren(): yield (f.attrib['name'], f.attrib['uuid'], 'https://www.jottacloud.com/p/%s/%s' % (self.jfs.username, f.publicURI.text))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def files(self): 'iterate over found files' for _f in self.searchresult.files.iterchildren(): yield ProtoFile.factory(_f, jfs=self.jfs, parentpath=unicode(_f.abspath))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def request(self, url, extra_headers=None, params=None): 'Make a GET request for url, with or without caching' if not url.startswith('http'): # relative url url = self.rootpath + url log.debug("getting url: %r, extra_headers=%r, params=%r", url, extra_headers, params) if extra_headers is None: extra_headers={} r = self.session.get(url, headers=extra_headers, params=params, timeout=1800) #max retries is set in __init__ if r.status_code in ( 500, ): raise JFSError(r.reason) return r
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def raw(self, url, extra_headers=None, params=None): 'Make a GET request for url and return whatever content we get' r = self.request(url, extra_headers=extra_headers, params=params) # uncomment to dump raw xml # with open('/tmp/%s.xml' % time.time(), 'wb') as f: # f.write(r.content) if not r.ok: o = lxml.objectify.fromstring(r.content) JFSError.raiseError(o, url) return r.content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def get(self, url, params=None): 'Make a GET request for url and return the response content as a generic lxml.objectify object' url = self.escapeUrl(url) content = six.BytesIO(self.raw(url, params=params)) # We need to make sure that the xml fits in available memory before we parse # with lxml.objectify.fromstring(), or else it will bomb out. # If it is too big, we need to buffer it to disk before we run it through objectify. see #87 # # get length of buffer content.seek(0,2) contentlen = content.tell() content.seek(0) MAX_BUFFER_SIZE=1024*1024*200 # 200MB. TODO: find a way to compute this if contentlen > MAX_BUFFER_SIZE: # xml is too big to parse with lxml.objectify.fromstring() contentfile = tempfile.NamedTemporaryFile() contentfile.write(content.read()) o = lxml.objectify.parse(contentfile) else: o = lxml.objectify.fromstring(content.getvalue()) if o.tag == 'error': JFSError.raiseError(o, url) return o
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def devices(self): 'return generator of configured devices' return self.fs is not None and [JFSDevice(d, self, parentpath=self.rootpath) for d in self.fs.devices.iterchildren()] or [x for x in []]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(region_string): """Parse DS9 region string into a ShapeList. Parameters region_string : str Region string Returns ------- shapes : `ShapeList` List of `~pyregion.Shape` """
rp = RegionParser() ss = rp.parse(region_string) sss1 = rp.convert_attr(ss) sss2 = _check_wcs(sss1) shape_list, comment_list = rp.filter_shape2(sss2) return ShapeList(shape_list, comment_list=comment_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def open(fname): """Open, read and parse DS9 region file. Parameters fname : str Filename Returns ------- shapes : `ShapeList` List of `~pyregion.Shape` """
with _builtin_open(fname) as fh: region_string = fh.read() return parse(region_string)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_region(s): """Read region. Parameters s : str Region string Returns ------- shapes : `ShapeList` List of `~pyregion.Shape` """
rp = RegionParser() ss = rp.parse(s) sss1 = rp.convert_attr(ss) sss2 = _check_wcs(sss1) shape_list = rp.filter_shape(sss2) return ShapeList(shape_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_region_as_imagecoord(s, header): """Read region as image coordinates. Parameters s : str Region string header : `~astropy.io.fits.Header` FITS header Returns ------- shapes : `~pyregion.ShapeList` List of `~pyregion.Shape` """
rp = RegionParser() ss = rp.parse(s) sss1 = rp.convert_attr(ss) sss2 = _check_wcs(sss1) sss3 = rp.sky_to_image(sss2, header) shape_list = rp.filter_shape(sss3) return ShapeList(shape_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mask(region, hdu, origin=1): """Get mask. Parameters region : `~pyregion.ShapeList` List of `~pyregion.Shape` hdu : `~astropy.io.fits.ImageHDU` FITS image HDU origin : float TODO: document me Returns ------- mask : `~numpy.array` Boolean mask Examples -------- """
from pyregion.region_to_filter import as_region_filter data = hdu.data region_filter = as_region_filter(region, origin=origin) mask = region_filter.mask(data) return mask
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def as_imagecoord(self, header): """New shape list in image coordinates. Parameters header : `~astropy.io.fits.Header` FITS header Returns ------- shape_list : `ShapeList` New shape list, with coordinates of the each shape converted to the image coordinate using the given header information. """
comment_list = self._comment_list if comment_list is None: comment_list = cycle([None]) r = RegionParser.sky_to_image(zip(self, comment_list), header) shape_list, comment_list = zip(*list(r)) return ShapeList(shape_list, comment_list=comment_list)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mask(self, hdu=None, header=None, shape=None): """Create a 2-d mask. Parameters hdu : `astropy.io.fits.ImageHDU` FITS image HDU header : `~astropy.io.fits.Header` FITS header shape : tuple Image shape Returns ------- mask : `numpy.array` Boolean mask Examples -------- get_mask(hdu=f[0]) get_mask(shape=(10,10)) get_mask(header=f[0].header, shape=(10,10)) """
if hdu and header is None: header = hdu.header if hdu and shape is None: shape = hdu.data.shape region_filter = self.get_filter(header=header) mask = region_filter.mask(shape) return mask
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, outfile): """Write this shape list to a region file. Parameters outfile : str File name """
if len(self) < 1: print("WARNING: The region list is empty. The region file " "'{:s}' will be empty.".format(outfile)) try: outf = _builtin_open(outfile, 'w') outf.close() return except IOError as e: cmsg = "Unable to create region file '{:s}'.".format(outfile) if e.args: e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:] else: e.args = (cmsg,) raise e prev_cs = self[0].coord_format outf = None try: outf = _builtin_open(outfile, 'w') attr0 = self[0].attr[1] defaultline = " ".join(["{:s}={:s}".format(a, attr0[a]) for a in attr0 if a != 'text']) # first line is globals outf.write("global {0}\n".format(defaultline)) # second line must be a coordinate format outf.write("{0}\n".format(prev_cs)) for shape in self: shape_attr = '' if prev_cs == shape.coord_format \ else shape.coord_format + "; " shape_excl = '-' if shape.exclude else '' text_coordlist = ["{:f}".format(f) for f in shape.coord_list] shape_coords = "(" + ",".join(text_coordlist) + ")" shape_comment = " # " + shape.comment if shape.comment else '' shape_str = (shape_attr + shape_excl + shape.name + shape_coords + shape_comment) outf.write("{0}\n".format(shape_str)) except IOError as e: cmsg = "Unable to create region file \'{:s}\'.".format(outfile) if e.args: e.args = (e.args[0] + '\n' + cmsg,) + e.args[1:] else: e.args = (cmsg,) raise e finally: if outf: outf.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AppConfigFlagHandler(feature=None): """ This is the default handler. It checks for feature flags in the current app's configuration. For example, to have 'unfinished_feature' hidden in production but active in development: config.py class ProductionConfig(Config): FEATURE_FLAGS = { 'unfinished_feature' : False, } class DevelopmentConfig(Config): FEATURE_FLAGS = { 'unfinished_feature' : True, } """
if not current_app: log.warn(u"Got a request to check for {feature} but we're outside the request context. Returning False".format(feature=feature)) return False try: return current_app.config[FEATURE_FLAGS_CONFIG][feature] except (AttributeError, KeyError): raise NoFeatureFlagFound()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_active(feature): """ Check if a feature is active """
if current_app: feature_flagger = current_app.extensions.get(EXTENSION_NAME) if feature_flagger: return feature_flagger.check(feature) else: raise AssertionError("Oops. This application doesn't have the Flask-FeatureFlag extention installed.") else: log.warn(u"Got a request to check for {feature} but we're running outside the request context. Check your setup. Returning False".format(feature=feature)) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_active_feature(feature, redirect_to=None, redirect=None): """ Decorator for Flask views. If a feature is off, it can either return a 404 or redirect to a URL if you'd rather. """
def _is_active_feature(func): @wraps(func) def wrapped(*args, **kwargs): if not is_active(feature): url = redirect_to if redirect: url = url_for(redirect) if url: log.debug(u'Feature {feature} is off, redirecting to {url}'.format(feature=feature, url=url)) return _redirect(url, code=302) else: log.debug(u'Feature {feature} is off, aborting request'.format(feature=feature)) abort(404) return func(*args, **kwargs) return wrapped return _is_active_feature
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def init_app(self, app): """ Add ourselves into the app config and setup, and add a jinja function test """
app.config.setdefault(FEATURE_FLAGS_CONFIG, {}) app.config.setdefault(RAISE_ERROR_ON_MISSING_FEATURES, False) if hasattr(app, "add_template_test"): # flask 0.10 and higher has a proper hook app.add_template_test(self.check, name=self.JINJA_TEST_NAME) else: app.jinja_env.tests[self.JINJA_TEST_NAME] = self.check if not hasattr(app, 'extensions'): app.extensions = {} app.extensions[EXTENSION_NAME] = self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check(self, feature): """ Loop through all our feature flag checkers and return true if any of them are true. The order of handlers matters - we will immediately return True if any handler returns true. If you want to a handler to return False and stop the chain, raise the StopCheckingFeatureFlags exception."""
found = False for handler in self.handlers: try: if handler(feature): return True except StopCheckingFeatureFlags: return False except NoFeatureFlagFound: pass else: found = True if not found: message = u"No feature flag defined for {feature}".format(feature=feature) if current_app.debug and current_app.config.get(RAISE_ERROR_ON_MISSING_FEATURES, False): raise KeyError(message) else: log.info(message) missing_feature.send(self, feature=feature) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def yank_path(self, path): """Clear cache of results from a specific path"""
for func in self._caches: cache = {} for key in self._caches[func].keys(): log.debug("cache key %s for func %s", key, func) if path in key[0]: log.debug("del cache key %s", key) del self._caches[func][key]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def release(self, path, fh): "Run after a read or write operation has finished. This is where we upload on writes" #print "release! inpath:", path in self.__newfiles.keys() # if the path exists in self.__newfiles.keys(), we have a new version to upload try: f = self.__newfiles[path] # make a local shortcut to Stringio object f.seek(0, os.SEEK_END) if f.tell() > 0: # file has length self.client.up(path, f) # upload to jottacloud del self.__newfiles[path] del f self._dirty(path) except KeyError: pass return ESUCCESS
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def truncate(self, path, length, fh=None): "Download existing path, truncate and reupload" try: f = self._getpath(path) except JFS.JFSError: raise OSError(errno.ENOENT, '') if isinstance(f, (JFS.JFSFile, JFS.JFSFolder)) and f.is_deleted(): raise OSError(errno.ENOENT) data = StringIO(f.read()) data.truncate(length) try: self.client.up(path, data) # replace file contents self._dirty(path) return ESUCCESS except: raise OSError(errno.ENOENT, '')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def commandline_text(bytestring): 'Convert bytestring from command line to unicode, using default file system encoding' if six.PY3: return bytestring unicode_string = bytestring.decode(sys.getfilesystemencoding()) return unicode_string
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sky_to_image(shape_list, header): """Converts a `ShapeList` into shapes with coordinates in image coordinates Parameters shape_list : `pyregion.ShapeList` The ShapeList to convert header : `~astropy.io.fits.Header` Specifies what WCS transformations to use. Yields ------- shape, comment : Shape, str Shape with image coordinates and the associated comment Note ---- The comments in the original `ShapeList` are unaltered """
for shape, comment in shape_list: if isinstance(shape, Shape) and \ (shape.coord_format not in image_like_coordformats): new_coords = convert_to_imagecoord(shape, header) l1n = copy.copy(shape) l1n.coord_list = new_coords l1n.coord_format = "image" yield l1n, comment elif isinstance(shape, Shape) and shape.coord_format == "physical": if header is None: raise RuntimeError("Physical coordinate is not known.") new_coordlist = convert_physical_to_imagecoord(shape, header) l1n = copy.copy(shape) l1n.coord_list = new_coordlist l1n.coord_format = "image" yield l1n, comment else: yield shape, comment
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _new(self, src_path, dry_run=False, remove_uploaded=False): 'Code to upload' # are we getting a symbolic link? if os.path.islink(src_path): sourcefile = os.path.normpath(os.path.join(self.topdir, os.readlink(src_path))) if not os.path.exists(sourcefile): # broken symlink log.error("broken symlink %s->%s", src_path, sourcefile) raise IOError("broken symliknk %s->%s", src_path, sourcefile) jottapath = self.get_jottapath(src_path, filename=os.path.basename(sourcefile)) elif os.path.splitext(src_path)[1].lower() == '.lnk': # windows .lnk sourcefile = os.path.normpath(readlnk(src_path)) if not os.path.exists(sourcefile): # broken symlink log.error("broken fat32lnk %s->%s", src_path, sourcefile) raise IOError("broken fat32lnk %s->%s", src_path, sourcefile) jottapath = self.get_jottapath(src_path, filename=os.path.basename(sourcefile)) else: sourcefile = src_path if not os.path.exists(sourcefile): # file not exis log.error("file does not exist: %s", sourcefile) raise IOError("file does not exist: %s", sourcefile) jottapath = self.get_jottapath(src_path) log.info('Uploading file %s to %s', sourcefile, jottapath) if not dry_run: if not jottacloud.new(sourcefile, jottapath, self.jfs): log.error('Uploading file %s failed', sourcefile) raise if remove_uploaded: log.info('Removing file after upload: %s', src_path) if not dry_run: os.remove(src_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _estimate_angle(angle, reg_coordinate_frame, header): """Transform an angle into a different frame Parameters angle : float, int The number of degrees, measured from the Y axis in origin's frame reg_coordinate_frame : str Coordinate frame in which ``angle`` is defined header : `~astropy.io.fits.Header` instance Header describing the image Returns ------- angle : float The angle, measured from the Y axis in the WCS defined by ``header'` """
y_axis_rot = _calculate_rotation_angle(reg_coordinate_frame, header) return angle - y_axis_rot
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _calculate_rotation_angle(reg_coordinate_frame, header): """Calculates the rotation angle from the region to the header's frame This attempts to be compatible with the implementation used by SAOImage DS9. In particular, this measures the rotation of the north axis as measured at the center of the image, and therefore requires a `~astropy.io.fits.Header` object with defined 'NAXIS1' and 'NAXIS2' keywords. Parameters reg_coordinate_frame : str Coordinate frame used by the region file header : `~astropy.io.fits.Header` instance Header describing the image Returns ------- y_axis_rot : float Degrees by which the north axis in the region's frame is rotated when transformed to pixel coordinates """
new_wcs = WCS(header) region_frame = SkyCoord( '0d 0d', frame=reg_coordinate_frame, obstime='J2000') region_frame = SkyCoord( '0d 0d', frame=reg_coordinate_frame, obstime='J2000', equinox=region_frame.equinox) origin = SkyCoord.from_pixel( header['NAXIS1'] / 2, header['NAXIS2'] / 2, wcs=new_wcs, origin=1).transform_to(region_frame) offset = proj_plane_pixel_scales(new_wcs)[1] origin_x, origin_y = origin.to_pixel(new_wcs, origin=1) origin_lon = origin.data.lon.degree origin_lat = origin.data.lat.degree offset_point = SkyCoord( origin_lon, origin_lat + offset, unit='degree', frame=origin.frame.name, obstime='J2000') offset_x, offset_y = offset_point.to_pixel(new_wcs, origin=1) north_rot = np.arctan2( offset_y - origin_y, offset_x - origin_x) / np.pi * 180. cdelt = new_wcs.wcs.get_cdelt() if (cdelt > 0).all() or (cdelt < 0).all(): return north_rot - 90 else: return -(north_rot - 90)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sf(f, dirpath, jottapath): """Create and return a SyncFile tuple from filename. localpath will be a byte string with utf8 code points jottapath will be a unicode string"""
log.debug('Create SyncFile from %s', repr(f)) log.debug('Got encoded filename %r, joining with dirpath %r', _encode_filename_to_filesystem(f), dirpath) return SyncFile(localpath=os.path.join(dirpath, _encode_filename_to_filesystem(f)), jottapath=posixpath.join(_decode_filename_to_unicode(jottapath), _decode_filename_to_unicode(f)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_jottapath(localtopdir, dirpath, jottamountpoint): """Translate localtopdir to jottapath. Returns unicode string"""
log.debug("get_jottapath %r %r %r", localtopdir, dirpath, jottamountpoint) normpath = posixpath.normpath(posixpath.join(jottamountpoint, posixpath.basename(localtopdir), posixpath.relpath(dirpath, localtopdir))) return _decode_filename_to_unicode(normpath)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_file(jottapath, JFS): """Check if a file exists on jottacloud"""
log.debug("is_file %r", jottapath) try: jf = JFS.getObject(jottapath) except JFSNotFoundError: return False return isinstance(jf, JFSFile)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def compare(localtopdir, jottamountpoint, JFS, followlinks=False, exclude_patterns=None): """Make a tree of local files and folders and compare it with what's currently on JottaCloud. For each folder, yields: dirpath, # byte string, full path onlylocal, # set(), files that only exist locally, i.e. newly added files that don't exist online, onlyremote, # set(), files that only exist in the JottaCloud, i.e. deleted locally bothplaces # set(), files that exist both locally and remotely onlyremotefolders, # set(), folders that only exist in the JottaCloud, i.e. deleted locally """
def excluded(unicodepath, fname): fpath = os.path.join(unicodepath, _decode_filename_to_unicode(fname)) # skip FIFOs, block devices, character devices and the like, see bug#129 mode = os.stat(fpath).st_mode if not (stat.S_ISREG(mode) or stat.S_ISLNK(mode) or stat.S_ISDIR(mode)): # we only like regular files, dirs or symlinks return True if exclude_patterns is None: return False for p in exclude_patterns: if p.search(fpath): log.debug("%r excluded by pattern %r", fpath, p.pattern) return True return False bytestring_localtopdir = _encode_filename_to_filesystem(localtopdir) for dirpath, dirnames, filenames in os.walk(bytestring_localtopdir, followlinks=followlinks): # to keep things explicit, and avoid encoding/decoding issues, # keep a bytestring AND a unicode variant of dirpath dirpath = _encode_filename_to_filesystem(dirpath) unicodepath = _decode_filename_to_unicode(dirpath) log.debug("compare walk: %r -> %s files ", unicodepath, len(filenames)) # create set()s of local files and folders # paths will be unicode strings localfiles = set([f for f in filenames if not excluded(unicodepath, f)]) # these are on local disk localfolders = set([f for f in dirnames if not excluded(unicodepath, f)]) # these are on local disk jottapath = get_jottapath(localtopdir, unicodepath, jottamountpoint) # translate to jottapath log.debug("compare jottapath: %r", jottapath) # create set()s of remote files and folders # paths will be unicode strings cloudfiles = filelist(jottapath, JFS) # set(). these are on jottacloud cloudfolders = folderlist(jottapath, JFS) log.debug("--cloudfiles: %r", cloudfiles) log.debug("--localfiles: %r", localfiles) log.debug("--cloudfolders: %r", cloudfolders) onlylocal = [ sf(f, dirpath, jottapath) for f in localfiles.difference(cloudfiles)] onlyremote = [ sf(f, dirpath, jottapath) for f in cloudfiles.difference(localfiles)] bothplaces = [ sf(f, dirpath, jottapath) for f in localfiles.intersection(cloudfiles)] onlyremotefolders = [ sf(f, dirpath, jottapath) for f in cloudfolders.difference(localfolders)] yield dirpath, onlylocal, onlyremote, bothplaces, onlyremotefolders
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _decode_filename_to_unicode(f): '''Get bytestring filename and return unicode. First, try to decode from default file system encoding If that fails, use ``chardet`` module to guess encoding. As a last resort, try to decode as utf-8. If the argument already is unicode, return as is''' log.debug('_decode_filename_to_unicode(%s)', repr(f)) if isinstance(f, unicode): return f try: return f.decode(sys.getfilesystemencoding()) except UnicodeDecodeError: charguess = chardet.detect(f) log.debug("chardet filename: %r -> %r", f, charguess) if charguess['encoding'] is not None: try: return f.decode(charguess['encoding']) except UnicodeDecodeError: pass log.warning('Cannot understand decoding of this filename: %r (guessed %r, but was wrong)', f, charguess) log.debug('Trying utf-8 to decode %r', f) try: return f.decode('utf-8') except UnicodeDecodeError: pass log.debug('Trying latin1 to decode %r', f) try: return f.decode('latin1') except UnicodeDecodeError: log.warning('Exhausted all options. Decoding %r to safe ascii', f) return f.decode('ascii', errors='ignore')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _encode_filename_to_filesystem(f): '''Get a unicode filename and return bytestring, encoded to file system default. If the argument already is a bytestring, return as is''' log.debug('_encode_filename_to_filesystem(%s)', repr(f)) if isinstance(f, str): return f try: return f.encode(sys.getfilesystemencoding()) except UnicodeEncodeError: raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def resume(localfile, jottafile, JFS): """Continue uploading a new file from local file (already exists on JottaCloud"""
with open(localfile) as lf: _complete = jottafile.resume(lf) return _complete
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def replace_if_changed(localfile, jottapath, JFS): """Compare md5 hash to determine if contents have changed. Upload a file from local disk and replace file on JottaCloud if the md5s differ, or continue uploading if the file is incompletely uploaded. Returns the JottaFile object"""
jf = JFS.getObject(jottapath) lf_hash = getxattrhash(localfile) # try to read previous hash, stored in xattr if lf_hash is None: # no valid hash found in xattr, with open(localfile) as lf: lf_hash = calculate_md5(lf) # (re)calculate it if type(jf) == JFSIncompleteFile: log.debug("Local file %s is incompletely uploaded, continue", localfile) return resume(localfile, jf, JFS) elif jf.md5 == lf_hash: # hashes are the same log.debug("hash match (%s), file contents haven't changed", lf_hash) setxattrhash(localfile, lf_hash) return jf # return the version from jottaclouds else: setxattrhash(localfile, lf_hash) return new(localfile, jottapath, JFS)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def iter_tree(jottapath, JFS): """Get a tree of of files and folders. use as an iterator, you get something like os.walk"""
filedirlist = JFS.getObject('%s?mode=list' % jottapath) log.debug("got tree: %s", filedirlist) if not isinstance(filedirlist, JFSFileDirList): yield ( '', tuple(), tuple() ) for path in filedirlist.tree: yield path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _query(self, filename): """Get size of filename"""
# - Query metadata of one file # - Return a dict with a 'size' key, and a file size value (-1 for not found) # - Retried if an exception is thrown log.Info('Querying size of %s' % filename) from jottalib.JFS import JFSNotFoundError, JFSIncompleteFile remote_path = posixpath.join(self.folder.path, filename) try: remote_file = self.client.getObject(remote_path) except JFSNotFoundError: return {'size': -1} return { 'size': remote_file.size, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_drawing(document, container, elem): """Parse drawing element. We don't do much with drawing element. We can find embeded image but we don't do more than that. """
_blip = elem.xpath('.//a:blip', namespaces=NAMESPACES) if len(_blip) > 0: blip = _blip[0] _rid = blip.attrib[_name('{{{r}}}embed')] img = doc.Image(_rid) container.elements.append(img)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_footnote(document, container, elem): "Parse the footnote element." _rid = elem.attrib[_name('{{{w}}}id')] foot = doc.Footnote(_rid) container.elements.append(foot)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_text(document, container, element): "Parse text element." txt = None alternate = element.find(_name('{{{mc}}}AlternateContent')) if alternate is not None: parse_alternate(document, container, alternate) br = element.find(_name('{{{w}}}br')) if br is not None: if _name('{{{w}}}type') in br.attrib: _type = br.attrib[_name('{{{w}}}type')] brk = doc.Break(_type) else: brk = doc.Break() container.elements.append(brk) t = element.find(_name('{{{w}}}t')) if t is not None: txt = doc.Text(t.text) txt.parent = container container.elements.append(txt) rpr = element.find(_name('{{{w}}}rPr')) if rpr is not None: # Notice it is using txt as container parse_previous_properties(document, txt, rpr) for r in element.findall(_name('{{{w}}}r')): parse_text(document, container, r) foot = element.find(_name('{{{w}}}footnoteReference')) if foot is not None: parse_footnote(document, container, foot) end = element.find(_name('{{{w}}}endnoteReference')) if end is not None: parse_endnote(document, container, end) sym = element.find(_name('{{{w}}}sym')) if sym is not None: _font = sym.attrib[_name('{{{w}}}font')] _char = sym.attrib[_name('{{{w}}}char')] container.elements.append(doc.Symbol(font=_font, character=_char)) image = element.find(_name('{{{w}}}drawing')) if image is not None: parse_drawing(document, container, image) refe = element.find(_name('{{{w}}}commentReference')) if refe is not None: _m = doc.Comment(refe.attrib[_name('{{{w}}}id')], 'reference') container.elements.append(_m) return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_paragraph(document, par): """Parse paragraph element. Some other elements could be found inside of paragraph element (math, links). """
paragraph = doc.Paragraph() paragraph.document = document for elem in par: if elem.tag == _name('{{{w}}}pPr'): parse_paragraph_properties(document, paragraph, elem) if elem.tag == _name('{{{w}}}r'): parse_text(document, paragraph, elem) if elem.tag == _name('{{{m}}}oMath'): _m = doc.Math() paragraph.elements.append(_m) if elem.tag == _name('{{{m}}}oMathPara'): _m = doc.Math() paragraph.elements.append(_m) if elem.tag == _name('{{{w}}}commentRangeStart'): _m = doc.Comment(elem.attrib[_name('{{{w}}}id')], 'start') paragraph.elements.append(_m) if elem.tag == _name('{{{w}}}commentRangeEnd'): _m = doc.Comment(elem.attrib[_name('{{{w}}}id')], 'end') paragraph.elements.append(_m) if elem.tag == _name('{{{w}}}hyperlink'): try: t = doc.Link(elem.attrib[_name('{{{r}}}id')]) parse_text(document, t, elem) paragraph.elements.append(t) except: logger.error('Error with with hyperlink [%s].', str(elem.attrib.items())) if elem.tag == _name('{{{w}}}smartTag'): parse_smarttag(document, paragraph, elem) return paragraph
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_table_properties(doc, table, prop): "Parse table properties." if not table: return style = prop.find(_name('{{{w}}}tblStyle')) if style is not None: table.style_id = style.attrib[_name('{{{w}}}val')] doc.add_style_as_used(table.style_id)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_table_column_properties(doc, cell, prop): "Parse table column properties." if not cell: return grid = prop.find(_name('{{{w}}}gridSpan')) if grid is not None: cell.grid_span = int(grid.attrib[_name('{{{w}}}val')]) vmerge = prop.find(_name('{{{w}}}vMerge')) if vmerge is not None: if _name('{{{w}}}val') in vmerge.attrib: cell.vmerge = vmerge.attrib[_name('{{{w}}}val')] else: cell.vmerge = ""
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_table(document, tbl): "Parse table element." def _change(rows, pos_x): if len(rows) == 1: return rows count_x = 1 for x in rows[-1]: if count_x == pos_x: x.row_span += 1 count_x += x.grid_span return rows table = doc.Table() tbl_pr = tbl.find(_name('{{{w}}}tblPr')) if tbl_pr is not None: parse_table_properties(document, table, tbl_pr) for tr in tbl.xpath('./w:tr', namespaces=NAMESPACES): columns = [] pos_x = 0 for tc in tr.xpath('./w:tc', namespaces=NAMESPACES): cell = doc.TableCell() tc_pr = tc.find(_name('{{{w}}}tcPr')) if tc_pr is not None: parse_table_column_properties(doc, cell, tc_pr) # maybe after pos_x += cell.grid_span if cell.vmerge is not None and cell.vmerge == "": table.rows = _change(table.rows, pos_x) else: for p in tc.xpath('./w:p', namespaces=NAMESPACES): cell.elements.append(parse_paragraph(document, p)) columns.append(cell) table.rows.append(columns) return table
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_document(xmlcontent): """Parse document with content. Content is placed in file 'document.xml'. """
document = etree.fromstring(xmlcontent) body = document.xpath('.//w:body', namespaces=NAMESPACES)[0] document = doc.Document() for elem in body: if elem.tag == _name('{{{w}}}p'): document.elements.append(parse_paragraph(document, elem)) if elem.tag == _name('{{{w}}}tbl'): document.elements.append(parse_table(document, elem)) if elem.tag == _name('{{{w}}}sdt'): document.elements.append(doc.TOC()) return document
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_relationship(document, xmlcontent, rel_type): """Parse relationship document. Relationships hold information like external or internal references for links. Relationships are placed in file '_rels/document.xml.rels'. """
doc = etree.fromstring(xmlcontent) for elem in doc: if elem.tag == _name('{{{pr}}}Relationship'): rel = {'target': elem.attrib['Target'], 'type': elem.attrib['Type'], 'target_mode': elem.attrib.get('TargetMode', 'Internal')} document.relationships[rel_type][elem.attrib['Id']] = rel
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_style(document, xmlcontent): """Parse styles document. Styles are defined in file 'styles.xml'. """
styles = etree.fromstring(xmlcontent) _r = styles.xpath('.//w:rPrDefault', namespaces=NAMESPACES) if len(_r) > 0: rpr = _r[0].find(_name('{{{w}}}rPr')) if rpr is not None: st = doc.Style() parse_previous_properties(document, st, rpr) document.default_style = st # rest of the styles for style in styles.xpath('.//w:style', namespaces=NAMESPACES): st = doc.Style() st.style_id = style.attrib[_name('{{{w}}}styleId')] style_type = style.attrib[_name('{{{w}}}type')] if style_type is not None: st.style_type = style_type if _name('{{{w}}}default') in style.attrib: is_default = style.attrib[_name('{{{w}}}default')] if is_default is not None: st.is_default = is_default == '1' name = style.find(_name('{{{w}}}name')) if name is not None: st.name = name.attrib[_name('{{{w}}}val')] based_on = style.find(_name('{{{w}}}basedOn')) if based_on is not None: st.based_on = based_on.attrib[_name('{{{w}}}val')] document.styles.styles[st.style_id] = st if st.is_default: document.styles.default_styles[st.style_type] = st.style_id rpr = style.find(_name('{{{w}}}rPr')) if rpr is not None: parse_previous_properties(document, st, rpr) ppr = style.find(_name('{{{w}}}pPr')) if ppr is not None: parse_paragraph_properties(document, st, ppr)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_comments(document, xmlcontent): """Parse comments document. Comments are defined in file 'comments.xml' """
comments = etree.fromstring(xmlcontent) document.comments = {} for comment in comments.xpath('.//w:comment', namespaces=NAMESPACES): # w:author # w:id # w: date comment_id = comment.attrib[_name('{{{w}}}id')] comm = doc.CommentContent(comment_id) comm.author = comment.attrib.get(_name('{{{w}}}author'), None) comm.date = comment.attrib.get(_name('{{{w}}}date'), None) comm.elements = [parse_paragraph(document, para) for para in comment.xpath('.//w:p', namespaces=NAMESPACES)] document.comments[comment_id] = comm
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_footnotes(document, xmlcontent): """Parse footnotes document. Footnotes are defined in file 'footnotes.xml' """
footnotes = etree.fromstring(xmlcontent) document.footnotes = {} for footnote in footnotes.xpath('.//w:footnote', namespaces=NAMESPACES): _type = footnote.attrib.get(_name('{{{w}}}type'), None) # don't know what to do with these now if _type in ['separator', 'continuationSeparator', 'continuationNotice']: continue paragraphs = [parse_paragraph(document, para) for para in footnote.xpath('.//w:p', namespaces=NAMESPACES)] document.footnotes[footnote.attrib[_name('{{{w}}}id')]] = paragraphs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_endnotes(document, xmlcontent): """Parse endnotes document. Endnotes are defined in file 'endnotes.xml' """
endnotes = etree.fromstring(xmlcontent) document.endnotes = {} for note in endnotes.xpath('.//w:endnote', namespaces=NAMESPACES): paragraphs = [parse_paragraph(document, para) for para in note.xpath('.//w:p', namespaces=NAMESPACES)] document.endnotes[note.attrib[_name('{{{w}}}id')]] = paragraphs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_numbering(document, xmlcontent): """Parse numbering document. Numbering is defined in file 'numbering.xml'. """
numbering = etree.fromstring(xmlcontent) document.abstruct_numbering = {} document.numbering = {} for abstruct_num in numbering.xpath('.//w:abstractNum', namespaces=NAMESPACES): numb = {} for lvl in abstruct_num.xpath('./w:lvl', namespaces=NAMESPACES): ilvl = int(lvl.attrib[_name('{{{w}}}ilvl')]) fmt = lvl.find(_name('{{{w}}}numFmt')) numb[ilvl] = {'numFmt': fmt.attrib[_name('{{{w}}}val')]} document.abstruct_numbering[abstruct_num.attrib[_name('{{{w}}}abstractNumId')]] = numb for num in numbering.xpath('.//w:num', namespaces=NAMESPACES): num_id = num.attrib[_name('{{{w}}}numId')] abs_num = num.find(_name('{{{w}}}abstractNumId')) if abs_num is not None: number_id = abs_num.attrib[_name('{{{w}}}val')] document.numbering[int(num_id)] = number_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_from_file(file_object): """Parses existing OOXML file. :Args: - file_object (:class:`ooxml.docx.DOCXFile`): OOXML file object :Returns: Returns parsed document of type :class:`ooxml.doc.Document` """
logger.info('Parsing %s file.', file_object.file_name) # Read the files doc_content = file_object.read_file('document.xml') # Parse the document document = parse_document(doc_content) try: style_content = file_object.read_file('styles.xml') parse_style(document, style_content) except KeyError: logger.warning('Could not read styles.') try: doc_rel_content = file_object.read_file('_rels/document.xml.rels') parse_relationship(document, doc_rel_content, 'document') except KeyError: logger.warning('Could not read document relationships.') try: doc_rel_content = file_object.read_file('_rels/endnotes.xml.rels') parse_relationship(document, doc_rel_content, 'endnotes') except KeyError: logger.warning('Could not read endnotes relationships.') try: doc_rel_content = file_object.read_file('_rels/footnotes.xml.rels') parse_relationship(document, doc_rel_content, 'footnotes') except KeyError: logger.warning('Could not read footnotes relationships.') try: comments_content = file_object.read_file('comments.xml') parse_comments(document, comments_content) except KeyError: logger.warning('Could not read comments.') try: footnotes_content = file_object.read_file('footnotes.xml') parse_footnotes(document, footnotes_content) except KeyError: logger.warning('Could not read footnotes.') try: endnotes_content = file_object.read_file('endnotes.xml') parse_endnotes(document, endnotes_content) except KeyError: logger.warning('Could not read endnotes.') try: numbering_content = file_object.read_file('numbering.xml') parse_numbering(document, numbering_content) except KeyError: logger.warning('Could not read numbering.') return document