text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_gpg_home(appname, config_dir=None): """ Make GPG keyring dir for a particular application. Return the path. """
assert is_valid_appname(appname) config_dir = get_config_dir( config_dir ) path = os.path.join( config_dir, "gpgkeys", appname ) if not os.path.exists(path): os.makedirs( path, 0700 ) else: os.chmod( path, 0700 ) return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_gpg_home( appname, config_dir=None ): """ Get the GPG keyring directory for a particular application. Return the path. """
assert is_valid_appname(appname) config_dir = get_config_dir( config_dir ) path = os.path.join( config_dir, "gpgkeys", appname ) return path
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_gpg_tmphome( prefix=None, config_dir=None ): """ Make a temporary directory to hold GPG keys that are not going to be stored to the application's keyring. """
if prefix is None: prefix = "tmp" config_dir = get_config_dir( config_dir ) tmppath = os.path.join( config_dir, "tmp" ) if not os.path.exists( tmppath ): os.makedirs( tmppath, 0700 ) tmpdir = tempfile.mkdtemp( prefix=("%s-" % prefix), dir=tmppath ) return tmpdir
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_stash_key( appname, key_bin, config_dir=None, gpghome=None ): """ Store a key locally to our app keyring. Does NOT put it into a blockchain ID Return the key ID on success Return None on error """
assert is_valid_appname(appname) key_bin = str(key_bin) assert len(key_bin) > 0 if gpghome is None: config_dir = get_config_dir( config_dir ) keydir = make_gpg_home( appname, config_dir=config_dir ) else: keydir = gpghome gpg = gnupg.GPG( homedir=keydir ) res = gpg.import_keys( key_bin ) try: assert res.count == 1, "Failed to store key (%s)" % res except AssertionError, e: log.exception(e) log.error("Failed to store key to %s" % keydir) log.debug("res: %s" % res.__dict__) log.debug("(%s)\n%s" % (len(key_bin), key_bin)) return None return res.fingerprints[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_unstash_key( appname, key_id, config_dir=None, gpghome=None ): """ Remove a public key locally from our local app keyring Return True on success Return False on error """
assert is_valid_appname(appname) if gpghome is None: config_dir = get_config_dir( config_dir ) keydir = get_gpg_home( appname, config_dir=config_dir ) else: keydir = gpghome gpg = gnupg.GPG( homedir=keydir ) res = gpg.delete_keys( [key_id] ) if res.status == 'Must delete secret key first': # this is a private key res = gpg.delete_keys( [key_id], secret=True ) try: assert res.status == 'ok', "Failed to delete key (%s)" % res except AssertionError, e: log.exception(e) log.error("Failed to delete key '%s'" % key_id) log.debug("res: %s" % res.__dict__) return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_download_key( key_id, key_server, config_dir=None ): """ Download a GPG key from a key server. Do not import it into any keyrings. Return the ASCII-armored key """
config_dir = get_config_dir( config_dir ) tmpdir = make_gpg_tmphome( prefix="download", config_dir=config_dir ) gpg = gnupg.GPG( homedir=tmpdir ) recvdat = gpg.recv_keys( key_server, key_id ) fingerprint = None try: assert recvdat.count == 1 assert len(recvdat.fingerprints) == 1 fingerprint = recvdat.fingerprints[0] except AssertionError, e: log.exception(e) log.error( "Failed to fetch key '%s' from '%s'" % (key_id, key_server)) shutil.rmtree( tmpdir ) return None keydat = gpg.export_keys( [fingerprint] ) shutil.rmtree( tmpdir ) return str(keydat)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_key_fingerprint( key_data, config_dir=None ): """ Get the key ID of a given serialized key Return the fingerprint on success Return None on error """
key_data = str(key_data) config_dir = get_config_dir( config_dir ) tmpdir = make_gpg_tmphome( prefix="key_id-", config_dir=config_dir ) gpg = gnupg.GPG( homedir=tmpdir ) res = gpg.import_keys( key_data ) try: assert res.count == 1, "Failed to import key" assert len(res.fingerprints) == 1, "Nonsensical GPG response: wrong number of fingerprints" fingerprint = res.fingerprints[0] shutil.rmtree(tmpdir) return fingerprint except AssertionError, e: log.exception(e) shutil.rmtree(tmpdir) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_verify_key( key_id, key_data, config_dir=None ): """ Verify that a given serialized key, when imported, has the given key ID. Return True on success Return False on error """
key_data = str(key_data) config_dir = get_config_dir( config_dir ) sanitized_key_id = "".join( key_id.upper().split(" ") ) if len(sanitized_key_id) < 16: log.debug("Fingerprint is too short to be secure") return False fingerprint = gpg_key_fingerprint( key_data, config_dir=config_dir ) if fingerprint is None: log.debug("Failed to fingerprint key") return False if sanitized_key_id != fingerprint and not fingerprint.endswith( sanitized_key_id ): log.debug("Imported key does not match the given ID") return False else: return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_export_key( appname, key_id, config_dir=None, include_private=False ): """ Get the ASCII-armored key, given the ID """
assert is_valid_appname(appname) config_dir = get_config_dir( config_dir ) keydir = get_gpg_home( appname, config_dir=config_dir ) gpg = gnupg.GPG( homedir=keydir ) keydat = gpg.export_keys( [key_id], secret=include_private ) if not keydat: log.debug("Failed to export key %s from '%s'" % (key_id, keydir)) assert keydat return keydat
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_fetch_key( key_url, key_id=None, config_dir=None ): """ Fetch a GPG public key from the given URL. Supports anything urllib2 supports. If the URL has no scheme, then assume it's a PGP key server, and use GPG to go get it. The key is not accepted into any keyrings. Return the key data on success. If key_id is given, verify the key matches. Return None on error, or on failure to carry out any key verification """
dat = None from_blockstack = False # make sure it's valid try: urlparse.urlparse(key_url) except: log.error("Invalid URL") return None if "://" in key_url and not key_url.lower().startswith("iks://"): opener = None key_data = None # handle blockstack:// URLs if key_url.startswith("blockstack://"): blockstack_opener = BlockstackHandler( config_path=os.path.join(config_dir, blockstack_client.CONFIG_FILENAME) ) opener = urllib2.build_opener( blockstack_opener ) from_blockstack = True elif key_url.lower().startswith("http://") or key_url.lower().startswith("https://"): # fetch, but at least try not to look like a bot opener = urllib2.build_opener() opener.addheaders = [('User-agent', 'Mozilla/5.0')] else: # defaults opener = urllib2.build_opener() try: f = opener.open( key_url ) key_data_str = f.read() key_data = None if from_blockstack: # expect: {'key name': 'PEM string'} key_data_dict = json.loads(key_data_str) assert len(key_data_dict) == 1, "Got multiple keys" key_data = str(key_data_dict[key_data_dict.keys()[0]]) else: # expect: PEM string key_data = key_data_str f.close() except Exception, e: log.exception(e) if key_id is not None: log.error("Failed to fetch key '%s' from '%s'" % (key_id, key_url)) else: log.error("Failed to fetch key from '%s'" % key_url) return None # verify, if we have the ID. # if we don't have the key ID, then we must be fetching from blockstack # (since then the data will have already been verified by the protocol, using locally-hosted trusted information) if not from_blockstack and key_id is None: log.error( "No key ID given for key located at %s" % key_url ) return None if key_id is not None: rc = gpg_verify_key( key_id, key_data, config_dir=config_dir ) if not rc: log.error("Failed to verify key %s" % key_id) return None dat = key_data else: # iks protocol, fetch from keyserver key_server = key_url if '://' in key_server: key_server = urlparse.urlparse(key_server).netloc dat = gpg_download_key( key_id, key_server, config_dir=config_dir ) assert dat is not None and len(dat) > 0, "BUG: no key data received for '%s' from '%s'" % (key_id, key_url) return dat
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_app_put_key( blockchain_id, appname, keyname, key_data, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ): """ Put an application GPG key. Stash the private key locally to an app-specific keyring. If immutable is True, then store the data as an immutable entry (e.g. update the zonefile with the key hash) This is a time-consuming operation (on the order of an hour), and you will get back the transaction ID on a successful execution. It is up to you to wait until the transaction is confirmed before using the key. Otherwise, the key is stored to mutable storage. """
assert is_valid_appname(appname) assert is_valid_keyname(keyname) try: keydir = make_gpg_home( appname, config_dir=config_dir ) key_id = gpg_stash_key( appname, key_data, config_dir=config_dir, gpghome=keydir ) assert key_id is not None, "Failed to stash key" log.debug("Stashed app key '%s:%s' (%s) under '%s'" % (appname, keyname, key_id, keydir)) except Exception, e: log.exception(e) log.error("Failed to store GPG key '%s'" % keyname) return {'error': "Failed to store GPG key locally"} # get public key... assert is_valid_appname(appname) try: pubkey_data = gpg_export_key( appname, key_id, config_dir=config_dir ) except: return {'error': 'Failed to load key'} fq_key_name = "gpg.%s.%s" % (appname, keyname) key_url = None if not immutable: res = client.put_mutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in res: return res key_url = client.make_mutable_data_url( blockchain_id, fq_key_name, res['version'] ) else: res = client.put_immutable( blockchain_id, fq_key_name, {fq_key_name: pubkey_data}, txid=txid, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in res: return res key_url = client.make_immutable_data_url( blockchain_id, fq_key_name, res['immutable_data_hash'] ) res['key_url'] = key_url res['key_data'] = pubkey_data res['key_id'] = gpg_key_fingerprint( pubkey_data, config_dir=config_dir ) log.debug("Put key %s:%s (%s) to %s" % (appname, keyname, res['key_id'], key_url)) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_app_delete_key( blockchain_id, appname, keyname, txid=None, immutable=False, proxy=None, wallet_keys=None, config_dir=None ): """ Remove an application GPG key. Unstash the local private key. If immutable is True, then remove the data from the user's zonefile, not profile. The delete may take on the order of an hour to complete on the blockchain. A transaction ID will be returned to you on successful deletion, and it will be up to you to wait for the transaction to get confirmed. """
assert is_valid_appname(appname) assert is_valid_keyname(keyname) fq_key_name = "gpg.%s.%s" % (appname, keyname) result = {} dead_pubkey_dict = None dead_pubkey = None key_id = None if not immutable: # find the key first, so we can get the key ID and then remove it locally dead_pubkey_dict = client.get_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys ) if 'error' in dead_pubkey_dict: return dead_pubkey_dict else: # need the key ID so we can unstash locally dead_pubkey_dict = client.get_immutable( blockchain_id, None, data_id=fq_key_name, proxy=proxy ) if 'error' in dead_pubkey_dict: return dead_pubkey_dict dead_pubkey_kv = dead_pubkey_dict['data'] assert len(dead_pubkey_kv.keys()) == 1, "Not a public key we wrote: %s" % dead_pubkey_kv dead_pubkey = dead_pubkey_kv[ dead_pubkey_kv.keys()[0] ] key_id = gpg_key_fingerprint( dead_pubkey, config_dir=config_dir ) assert key_id is not None, "Failed to load pubkey fingerprint" # actually delete if not immutable: result = client.delete_mutable( blockchain_id, fq_key_name, proxy=proxy, wallet_keys=wallet_keys ) else: result = client.delete_immutable( blockchain_id, None, data_id=fq_key_name, wallet_keys=wallet_keys, proxy=proxy ) if 'error' in result: return result # unstash try: rc = gpg_unstash_key( appname, key_id, config_dir=config_dir ) assert rc, "Failed to unstash key" except: log.warning("Failed to remove private key for '%s'" % key_id ) result['warning'] = "Failed to remove private key" if os.environ.get('BLOCKSTACK_TEST') is not None: # make sure this never happens in testing raise return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_sign( path_to_sign, sender_key_info, config_dir=None, passphrase=None ): """ Sign a file on disk. @sender_key_info should be a dict with { } """
if config_dir is None: config_dir = get_config_dir() # ingest keys tmpdir = make_gpg_tmphome( prefix="sign", config_dir=config_dir ) try: sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir ) except Exception, e: log.exception(e) shutil.rmtree(tmpdir) return {'error': 'No such private key'} res = gpg_stash_key( "sign", sender_privkey, config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to load sender private key'} # do the signature gpg = gnupg.GPG( homedir=tmpdir ) res = None with open(path_to_sign, "r") as fd_in: res = gpg.sign_file( fd_in, keyid=sender_key_info['key_id'], passphrase=passphrase, detach=True ) shutil.rmtree(tmpdir) if not res: log.debug("sign_file error: %s" % res.__dict__) log.debug("signer: %s" % sender_key_info['key_id']) return {'error': 'Failed to sign data'} return {'status': True, 'sig': res.data }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_verify( path_to_verify, sigdata, sender_key_info, config_dir=None ): """ Verify a file on disk was signed by the given sender. @sender_key_info should be a dict with { } Return {'status': True} on success """
if config_dir is None: config_dir = get_config_dir() # ingest keys tmpdir = make_gpg_tmphome( prefix="verify", config_dir=config_dir ) res = gpg_stash_key( "verify", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to stash key %s' % sender_key_info['key_id']} # stash detached signature fd, path = tempfile.mkstemp( prefix=".sig-verify-" ) f = os.fdopen(fd, "w") f.write( sigdata ) f.flush() os.fsync(f.fileno()) f.close() # verify gpg = gnupg.GPG( homedir=tmpdir ) with open(path, "r") as fd_in: res = gpg.verify_file( fd_in, data_filename=path_to_verify ) shutil.rmtree(tmpdir) try: os.unlink(path) except: pass if not res: log.debug("verify_file error: %s" % res.__dict__) return {'error': 'Failed to decrypt data'} log.debug("verification succeeded from keys in %s" % config_dir) return {'status': True}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_encrypt( fd_in, path_out, sender_key_info, recipient_key_infos, passphrase=None, config_dir=None ): """ Encrypt a stream of data for a set of keys. @sender_key_info should be a dict with { } Return {'status': True} on success """
if config_dir is None: config_dir = get_config_dir() # ingest keys tmpdir = make_gpg_tmphome( prefix="encrypt", config_dir=config_dir ) for key_info in recipient_key_infos: res = gpg_stash_key( "encrypt", key_info['key_data'], config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to stash key %s' % key_info['key_id']} # copy over our key try: sender_privkey = gpg_export_key( sender_key_info['app_name'], sender_key_info['key_id'], include_private=True, config_dir=config_dir ) except Exception, e: log.exception(e) shutil.rmtree(tmpdir) return {'error': 'No such private key'} res = gpg_stash_key( "encrypt", sender_privkey, config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to load sender private key'} recipient_key_ids = [r['key_id'] for r in recipient_key_infos] # do the encryption gpg = gnupg.GPG( homedir=tmpdir ) res = gpg.encrypt_file( fd_in, recipient_key_ids, sign=sender_key_info['key_id'], passphrase=passphrase, output=path_out, always_trust=True ) shutil.rmtree(tmpdir) if res.status != 'encryption ok': log.debug("encrypt_file error: %s" % res.__dict__) log.debug("recipients: %s" % recipient_key_ids) log.debug("signer: %s" % sender_key_info['key_id']) return {'error': 'Failed to encrypt data'} return {'status': True}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gpg_decrypt( fd_in, path_out, sender_key_info, my_key_info, passphrase=None, config_dir=None ): """ Decrypt a stream of data using key info for a private key we own. @my_key_info and @sender_key_info should be data returned by gpg_app_get_key { } Return {'status': True} on succes """
if config_dir is None: config_dir = get_config_dir() # ingest keys tmpdir = make_gpg_tmphome( prefix="decrypt", config_dir=config_dir ) res = gpg_stash_key( "decrypt", sender_key_info['key_data'], config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to stash key %s' % sender_key_info['key_id']} try: my_privkey = gpg_export_key( my_key_info['app_name'], my_key_info['key_id'], include_private=True, config_dir=config_dir ) except: shutil.rmtree(tmpdir) return {'error': 'Failed to load local private key for %s' % my_key_info['key_id']} res = gpg_stash_key( "decrypt", my_privkey, config_dir=config_dir, gpghome=tmpdir ) if res is None: shutil.rmtree(tmpdir) return {'error': 'Failed to load private key'} # do the decryption gpg = gnupg.GPG( homedir=tmpdir ) res = gpg.decrypt_file( fd_in, passphrase=passphrase, output=path_out, always_trust=True ) shutil.rmtree(tmpdir) if res.status != 'decryption ok': log.debug("decrypt_file: %s" % res.__dict__) return {'error': 'Failed to decrypt data'} log.debug("decryption succeeded from keys in %s" % config_dir) return {'status': True}
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_primary_command_usage(message=''): # type: (str) -> str """Return the usage string for the primary command."""
if not settings.merge_primary_command and None in settings.subcommands: return format_usage(settings.subcommands[None].__doc__) if not message: message = '\n{}\n'.format(settings.message) if settings.message else '' doc = _DEFAULT_DOC.format(message=message) if None in settings.subcommands: return _merge_doc(doc, settings.subcommands[None].__doc__) return format_usage(doc)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_help_usage(command): # type: (str) -> None """Print out a help message and exit the program. Args: command: If a command value is supplied then print the help message for the command module if available. If the command is '-a' or '--all', then print the standard help message but with a full list of available commands. Raises: ValueError: Raised if the help message is requested for an invalid command or an unrecognized option is passed to help. """
if not command: doc = get_primary_command_usage() elif command in ('-a', '--all'): subcommands = [k for k in settings.subcommands if k is not None] available_commands = subcommands + ['help'] command_doc = '\nAvailable commands:\n{}\n'.format( '\n'.join(' {}'.format(c) for c in sorted(available_commands))) doc = get_primary_command_usage(command_doc) elif command.startswith('-'): raise ValueError("Unrecognized option '{}'.".format(command)) elif command in settings.subcommands: subcommand = settings.subcommands[command] doc = format_usage(subcommand.__doc__) docopt.docopt(doc, argv=('--help',))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_usage(doc, width=None): # type: (str, Optional[int]) -> str """Format the docstring for display to the user. Args: doc: The docstring to reformat for display. Returns: The docstring formatted to parse and display to the user. This includes dedenting, rewrapping, and translating the docstring if necessary. """
sections = doc.replace('\r', '').split('\n\n') width = width or get_terminal_size().columns or 80 return '\n\n'.join(_wrap_section(s.strip(), width) for s in sections)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_commands(docstring): # type: (str) -> Generator[Tuple[List[str], List[str]], None, None] """Parse a docopt-style string for commands and subcommands. Args: docstring: A docopt-style string to parse. If the string is not a valid docopt-style string, it will not yield and values. Yields: All tuples of commands and subcommands found in the docopt docstring. """
try: docopt.docopt(docstring, argv=()) except (TypeError, docopt.DocoptLanguageError): return except docopt.DocoptExit: pass for command in _parse_section('usage', docstring): args = command.split() commands = [] i = 0 for i, arg in enumerate(args): if arg[0].isalpha() and not arg[0].isupper(): commands.append(arg) else: break yield commands, args[i:]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _merge_doc(original, to_merge): # type: (str, str) -> str """Merge two usage strings together. Args: original: The source of headers and initial section lines. to_merge: The source for the additional section lines to append. Returns: A new usage string that contains information from both usage strings. """
if not original: return to_merge or '' if not to_merge: return original or '' sections = [] for name in ('usage', 'arguments', 'options'): sections.append(_merge_section( _get_section(name, original), _get_section(name, to_merge) )) return format_usage('\n\n'.join(s for s in sections).rstrip())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _merge_section(original, to_merge): # type: (str, str) -> str """Merge two sections together. Args: original: The source of header and initial section lines. to_merge: The source for the additional section lines to append. Returns: A new section string that uses the header of the original argument and the section lines from both. """
if not original: return to_merge or '' if not to_merge: return original or '' try: index = original.index(':') + 1 except ValueError: index = original.index('\n') name = original[:index].strip() section = '\n '.join( (original[index + 1:].lstrip(), to_merge[index + 1:].lstrip()) ).rstrip() return '{name}\n {section}'.format(name=name, section=section)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_section(name, source): # type: (str, str) -> Optional[str] """Extract the named section from the source. Args: name: The name of the section to extract (e.g. "Usage"). source: The usage string to parse. Returns: A string containing only the requested section. If the section appears multiple times, each instance will be merged into a single section. """
pattern = re.compile( '^([^\n]*{name}[^\n]*\n?(?:[ \t].*?(?:\n|$))*)'.format(name=name), re.IGNORECASE | re.MULTILINE) usage = None for section in pattern.findall(source): usage = _merge_section(usage, section.strip()) return usage
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _wrap_section(source, width): # type: (str, int) -> str """Wrap the given section string to the current terminal size. Intelligently wraps the section string to the given width. When wrapping section lines, it auto-adjusts the spacing between terms and definitions. It also adjusts commands the fit the correct length for the arguments. Args: source: The section string to wrap. Returns: The wrapped section string. """
if _get_section('usage', source): return _wrap_usage_section(source, width) if _is_definition_section(source): return _wrap_definition_section(source, width) lines = inspect.cleandoc(source).splitlines() paragraphs = (textwrap.wrap(line, width, replace_whitespace=False) for line in lines) return '\n'.join(line for paragraph in paragraphs for line in paragraph)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_definition_section(source): """Determine if the source is a definition section. Args: source: The usage string source that may be a section. Returns: True if the source describes a definition section; otherwise, False. """
try: definitions = textwrap.dedent(source).split('\n', 1)[1].splitlines() return all( re.match(r'\s\s+((?!\s\s).+)\s\s+.+', s) for s in definitions) except IndexError: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _wrap_usage_section(source, width): # type: (str, int) -> str """Wrap the given usage section string to the current terminal size. Note: Commands arguments are wrapped to the column that the arguments began on the first line of the command. Args: source: The section string to wrap. Returns: The wrapped section string. """
if not any(len(line) > width for line in source.splitlines()): return source section_header = source[:source.index(':') + 1].strip() lines = [section_header] for commands, args in parse_commands(source): command = ' {} '.format(' '.join(commands)) max_len = width - len(command) sep = '\n' + ' ' * len(command) wrapped_args = sep.join(textwrap.wrap(' '.join(args), max_len)) full_command = command + wrapped_args lines += full_command.splitlines() return '\n'.join(lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _wrap_definition_section(source, width): # type: (str, int) -> str """Wrap the given definition section string to the current terminal size. Note: Auto-adjusts the spacing between terms and definitions. Args: source: The section string to wrap. Returns: The wrapped section string. """
index = source.index('\n') + 1 definitions, max_len = _get_definitions(source[index:]) sep = '\n' + ' ' * (max_len + 4) lines = [source[:index].strip()] for arg, desc in six.iteritems(definitions): wrapped_desc = sep.join(textwrap.wrap(desc, width - max_len - 4)) lines.append(' {arg:{size}} {desc}'.format( arg=arg, size=str(max_len), desc=wrapped_desc )) return '\n'.join(lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_definitions(source): # type: (str) -> Tuple[Dict[str, str], int] """Extract a dictionary of arguments and definitions. Args: source: The source for a section of a usage string that contains definitions. Returns: A two-tuple containing a dictionary of all arguments and definitions as well as the length of the longest argument. """
max_len = 0 descs = collections.OrderedDict() # type: Dict[str, str] lines = (s.strip() for s in source.splitlines()) non_empty_lines = (s for s in lines if s) for line in non_empty_lines: if line: arg, desc = re.split(r'\s\s+', line.strip()) arg_len = len(arg) if arg_len > max_len: max_len = arg_len descs[arg] = desc return descs, max_len
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_section(name, source): # type: (str, str) -> List[str] """Yield each section line. Note: Depending on how it is wrapped, a section line can take up more than one physical line. Args: name: The name of the section to extract (e.g. "Usage"). source: The usage string to parse. Returns: A list containing each line, de-wrapped by whitespace from the source code. If the section is defined multiple times in the source code, all lines from all sections with that name will be returned. """
section = textwrap.dedent(_get_section(name, source)[7:]) commands = [] # type: List[str] for line in section.splitlines(): if not commands or line[:1].isalpha() and line[:1].islower(): commands.append(line) else: commands[-1] = '{} {}'.format(commands[-1].strip(), line.strip()) return commands
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def move(self, particle, u, v, w, modelTimestep, **kwargs): """ I'm dead, so no behaviors should act on me """
# Kill the particle if it isn't settled and isn't already dead. if not particle.settled and not particle.dead: particle.die() # Still save the temperature and salinity for the model output temp = kwargs.get('temperature', None) if temp is not None and math.isnan(temp): temp = None particle.temp = temp salt = kwargs.get('salinity', None) if salt is not None and math.isnan(salt): salt = None particle.salt = salt u = 0 v = 0 w = 0 # Do the calculation to determine the new location result = AsaTransport.distance_from_location_using_u_v_w(u=u, v=v, w=w, timestep=modelTimestep, location=particle.location) result['u'] = u result['v'] = v result['w'] = w return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def read_history_file(self, filename=None): u'''Load a readline history file.''' if filename is None: filename = self.history_filename try: for line in open(filename, u'r'): self.add_history(lineobj.ReadLineTextBuffer(ensure_unicode(line.rstrip()))) except IOError: self.history = [] self.history_cursor = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def write_history_file(self, filename = None): u'''Save a readline history file.''' if filename is None: filename = self.history_filename fp = open(filename, u'wb') for line in self.history[-self.history_length:]: fp.write(ensure_str(line.get_line_text())) fp.write(u'\n') fp.close()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def add_history(self, line): u'''Append a line to the history buffer, as if it was the last line typed.''' if not hasattr(line, "get_line_text"): line = lineobj.ReadLineTextBuffer(line) if not line.get_line_text(): pass elif len(self.history) > 0 and self.history[-1].get_line_text() == line.get_line_text(): pass else: self.history.append(line) self.history_cursor = len(self.history)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def beginning_of_history(self): # (M-<) u'''Move to the first line in the history.''' self.history_cursor = 0 if len(self.history) > 0: self.l_buffer = self.history[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_time_objects_from_model_timesteps(cls, times, start): """ Calculate the datetimes of the model timesteps times should start at 0 and be in seconds """
modelTimestep = [] newtimes = [] for i in xrange(0, len(times)): try: modelTimestep.append(times[i+1] - times[i]) except StandardError: modelTimestep.append(times[i] - times[i-1]) newtimes.append(start + timedelta(seconds=times[i])) return (modelTimestep, newtimes)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fill_polygon_with_points(cls, goal=None, polygon=None): """ Fill a shapely polygon with X number of points """
if goal is None: raise ValueError("Must specify the number of points (goal) to fill the polygon with") if polygon is None or (not isinstance(polygon, Polygon) and not isinstance(polygon, MultiPolygon)): raise ValueError("Must specify a polygon to fill points with") minx = polygon.bounds[0] maxx = polygon.bounds[2] miny = polygon.bounds[1] maxy = polygon.bounds[3] points = [] now = time.time() while len(points) < goal: random_x = random.uniform(minx, maxx) random_y = random.uniform(miny, maxy) p = Point(random_x, random_y) if p.within(polygon): points.append(p) logger.info("Filling polygon with points took %f seconds" % (time.time() - now)) return points
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def distance_from_location_using_u_v_w(cls, u=None, v=None, w=None, timestep=None, location=None): """ Calculate the greate distance from a location using u, v, and w. u, v, and w must be in the same units as the timestep. Stick with seconds. """
# Move horizontally distance_horiz = 0 azimuth = 0 angle = 0 depth = location.depth if u is not 0 and v is not 0: s_and_d = AsaMath.speed_direction_from_u_v(u=u,v=v) # calculates velocity in m/s from transformed u and v distance_horiz = s_and_d['speed'] * timestep # calculate the horizontal distance in meters using the velocity and model timestep angle = s_and_d['direction'] # Great circle calculation # Calculation takes in azimuth (heading from North, so convert our mathematical angle to azimuth) azimuth = AsaMath.math_angle_to_azimuth(angle=angle) distance_vert = 0. if w is not None: # Move vertically # Depth is positive up, negative down. w wil be negative if moving down, and positive if moving up distance_vert = w * timestep depth += distance_vert # calculate the vertical distance in meters using w (m/s) and model timestep (s) if distance_horiz != 0: vertical_angle = math.degrees(math.atan(distance_vert / distance_horiz)) gc_result = AsaGreatCircle.great_circle(distance=distance_horiz, azimuth=azimuth, start_point=location) else: # Did we go up or down? vertical_angle = 0. if distance_vert < 0: # Down vertical_angle = 270. elif distance_vert > 0: # Up vertical_angle = 90. gc_result = { 'latitude': location.latitude, 'longitude': location.longitude, 'reverse_azimuth': 0 } #logger.info("Particle moving from %fm to %fm from a vertical speed of %f m/s over %s seconds" % (location.depth, depth, w, str(timestep))) gc_result['azimuth'] = azimuth gc_result['depth'] = depth gc_result['distance'] = distance_horiz gc_result['angle'] = angle gc_result['vertical_distance'] = distance_vert gc_result['vertical_angle'] = vertical_angle return gc_result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def shutdown(self): """Wait for all threads to complete"""
# cleanup self.started = False try: # nice way of doing things - let's wait until all items # in the queue are processed for t in self._threads: t.join() finally: # Emergency brake - if a KeyboardInterrupt is raised, # threads will finish processing current task and exit self.stopped = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _unpack_bytes(bytes): """ Unpack a set of bytes into an integer. First pads to 4 bytes. Little endian. """
if bytes == b'': return 0 int_length = 4 len_diff = int_length - len(bytes) bytes = bytes + len_diff * b'\x00' return struct.unpack("<L", bytes)[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_sprints(): """ Returns all sprints, enriched with their assigned tasks. The project should only have one ``sprints.py`` module. We will define it's path via the ``RAPID_PROTOTYPING_SPRINTS_MODULE`` setting. The setting should be the fully qualified name of the ``sprints.py`` module (i.e. ``projectname.context.sprints.sprints``). Furthermore the project can have any amount of ``*_costs.py`` modules in any folder (as long as they are on the pythonpath). This function will find all ``*_costs.py`` modules and add those tasks, that have been assigned to a sprint, to the corresponding sprints in the ``sprints.py`` module. """
sprints = load_member_from_setting( 'RAPID_PROTOTYPING_SPRINTS_MODULE') all_tasks = [] # TODO The onerror parameter is basically a workaround to ignore errors # The reason for that being, that in my case, the GeoDjango package was in # the path, permanently requesting certain libraries on import. Since they # were not present, the search was aborted with an OSError. for importer, package_name, _ in pkgutil.walk_packages( onerror=lambda p: p): if not package_name.endswith('_costs'): continue if not getattr(settings, 'TEST_RUN', None) and ( '.test_app.' in package_name): # pragma: nocover continue costs = load_member(package_name + '.costs') for task in costs: all_tasks.append(task) sorted_tasks = sorted(all_tasks, key=itemgetter('id')) for sprint in sprints: remaining_time = 0 sprint['tasks'] = [] for task in sorted_tasks: if task.get('sprint') == sprint.get('id'): if not task.get('actual_time'): remaining_time += \ task.get('developer_time') or task.get('time') sprint.get('tasks').append(task) sprint['remaining_time'] = remaining_time sprint['remaining_hours'] = round(float(remaining_time) / 60, 2) return sprints
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def append_overhead_costs(costs, new_id, overhead_percentage=0.15): """ Adds 15% overhead costs to the list of costs. Usage:: from rapid_prototyping.context.utils import append_overhead_costs costs = [ ] costs = append_overhead_costs(costs, MAIN_ID + get_counter(counter)[0]) :param costs: Your final list of costs. :param new_id: The id that this new item should get. """
total_time = 0 for item in costs: total_time += item['time'] costs.append({ 'id': new_id, 'task': 'Overhead, Bufixes & Iterations', 'time': total_time * overhead_percentage, }, ) return costs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def arduino_default_path(): """platform specific default root path."""
if sys.platform == 'darwin': s = path('/Applications/Arduino.app/Contents/Resources/Java') elif sys.platform == 'win32': s = None else: s = path('/usr/share/arduino/') return s
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkForChanges(f, sde, isTable): """ returns False if there are no changes """
# try simple feature count first fCount = int(arcpy.GetCount_management(f).getOutput(0)) sdeCount = int(arcpy.GetCount_management(sde).getOutput(0)) if fCount != sdeCount: return True fields = [fld.name for fld in arcpy.ListFields(f)] # filter out shape fields if not isTable: fields = filter_fields(fields) d = arcpy.Describe(f) shapeType = d.shapeType if shapeType == 'Polygon': shapeToken = 'SHAPE@AREA' elif shapeType == 'Polyline': shapeToken = 'SHAPE@LENGTH' elif shapeType == 'Point': shapeToken = 'SHAPE@XY' else: shapeToken = 'SHAPE@JSON' fields.append(shapeToken) def parseShape(shapeValue): if shapeValue is None: return 0 elif shapeType in ['Polygon', 'Polyline']: return shapeValue elif shapeType == 'Point': if shapeValue[0] is not None and shapeValue[1] is not None: return shapeValue[0] + shapeValue[1] else: return 0 else: return shapeValue outputSR = arcpy.Describe(f).spatialReference else: outputSR = None changed = False with arcpy.da.SearchCursor(f, fields, sql_clause=(None, 'ORDER BY OBJECTID')) as fCursor, \ arcpy.da.SearchCursor(sde, fields, sql_clause=(None, 'ORDER BY OBJECTID'), spatial_reference=outputSR) as sdeCursor: for fRow, sdeRow in izip(fCursor, sdeCursor): if fRow != sdeRow: # check shapes first if fRow[-1] != sdeRow[-1] and not isTable: if shapeType not in ['Polygon', 'Polyline', 'Point']: changed = True break fShape = parseShape(fRow[-1]) sdeShape = parseShape(sdeRow[-1]) try: assert_almost_equal(fShape, sdeShape, -1) # trim off shapes fRow = list(fRow[:-1]) sdeRow = list(sdeRow[:-1]) except AssertionError: changed = True break # trim microseconds since they can be off by one between file and sde databases for i in range(len(fRow)): if type(fRow[i]) is datetime: fRow = list(fRow) sdeRow = list(sdeRow) fRow[i] = fRow[i].replace(microsecond=0) try: sdeRow[i] = sdeRow[i].replace(microsecond=0) except: pass # compare all values except OBJECTID if fRow[1:] != sdeRow[1:]: changed = True break return changed
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_metaboard( replace_existing=False, ): """install metaboard. http://metalab.at/wiki/Metaboard """
metaboard = AutoBunch() metaboard.name = 'Metaboard' metaboard.upload.protocol = 'usbasp' metaboard.upload.maximum_size = '14336' metaboard.upload.speed = '19200' metaboard.build.mcu = 'atmega168' metaboard.build.f_cpu = '16000000L' metaboard.build.core = 'arduino' metaboard.upload.disable_flushing = 'true' board_id = 'metaboard' install_board(board_id, metaboard, replace_existing=replace_existing)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __total_pages(self) -> int: """ Return max pages created by limit """
row_count = self.model.query.count() if isinstance(row_count, int): return int(row_count / self.limit) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def links(self, base_link, current_page) -> dict: """ Return JSON paginate links """
max_pages = self.max_pages - 1 if \ self.max_pages > 0 else self.max_pages base_link = '/%s' % (base_link.strip("/")) self_page = current_page prev = current_page - 1 if current_page is not 0 else None prev_link = '%s/page/%s/%s' % (base_link, prev, self.limit) if \ prev is not None else None next = current_page + 1 if current_page < max_pages else None next_link = '%s/page/%s/%s' % (base_link, next, self.limit) if \ next is not None else None first = 0 last = max_pages return { 'self': '%s/page/%s/%s' % (base_link, self_page, self.limit), 'prev': prev_link, 'next': next_link, 'first': '%s/page/%s/%s' % (base_link, first, self.limit), 'last': '%s/page/%s/%s' % (base_link, last, self.limit), }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def json_paginate(self, base_url, page_number): """ Return a dict for a JSON paginate """
data = self.page(page_number) first_id = None last_id = None if data: first_id = data[0].id last_id = data[-1].id return { 'meta': { 'total_pages': self.max_pages, 'first_id': first_id, 'last_id': last_id, 'current_page': page_number }, 'data': self.page(page_number), 'links': self.links(base_url, page_number) }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_arguments(parser, default_level=logging.INFO): """ Add arguments to an ArgumentParser or OptionParser for purposes of grabbing a logging level. """
adder = ( getattr(parser, 'add_argument', None) or getattr(parser, 'add_option') ) adder( '-l', '--log-level', default=default_level, type=log_level, help="Set log level (DEBUG, INFO, WARNING, ERROR)")
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup(options, **kwargs): """ Setup logging with options or arguments from an OptionParser or ArgumentParser. Also pass any keyword arguments to the basicConfig call. """
params = dict(kwargs) params.update(level=options.log_level) logging.basicConfig(**params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup_requests_logging(level): """ Setup logging for 'requests' such that it logs details about the connection, headers, etc. """
requests_log = logging.getLogger("requests.packages.urllib3") requests_log.setLevel(level) requests_log.propagate = True # enable debugging at httplib level http_client.HTTPConnection.debuglevel = level <= logging.DEBUG
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _set_period(self, period): """ Set the period for the timestamp. If period is 0 or None, no period will be used. """
self._period = period if period: self._period_seconds = tempora.get_period_seconds(self._period) self._date_format = tempora.get_date_format_string( self._period_seconds) else: self._period_seconds = 0 self._date_format = ''
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_filename(self, t): """ Return the appropriate filename for the given time based on the defined period. """
root, ext = os.path.splitext(self.base_filename) # remove seconds not significant to the period if self._period_seconds: t -= t % self._period_seconds # convert it to a datetime object for formatting dt = datetime.datetime.utcfromtimestamp(t) # append the datestring to the filename # workaround for datetime.strftime not handling '' properly appended_date = ( dt.strftime(self._date_format) if self._date_format != '' else '' ) if appended_date: # in the future, it would be nice for this format # to be supplied as a parameter. result = root + ' ' + appended_date + ext else: result = self.base_filename return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def emit(self, record): """ Emit a record. Output the record to the file, ensuring that the currently- opened file has the correct date. """
now = time.time() current_name = self.get_filename(now) try: if not self.stream.name == current_name: self._use_file(current_name) except AttributeError: # a stream has not been created, so create one. self._use_file(current_name) logging.StreamHandler.emit(self, record)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(app, uri, file_or_directory, pattern, use_modified_since, use_content_range): # TODO: Though mach9 is not a file server, I feel like we should at least # make a good effort here. Modified-since is nice, but we could # also look into etags, expires, and caching """ Register a static directory handler with Mach9 by adding a route to the router and registering a handler. :param app: Mach9 :param file_or_directory: File or directory path to serve from :param uri: URL to serve from :param pattern: regular expression used to match files in the URL :param use_modified_since: If true, send file modified time, and return not modified if the browser's matches the server's :param use_content_range: If true, process header for range requests and sends the file part that is requested """
# If we're not trying to match a file directly, # serve from the folder if not path.isfile(file_or_directory): uri += '<file_uri:' + pattern + '>' async def _handler(request, file_uri=None): # Using this to determine if the URL is trying to break out of the path # served. os.path.realpath seems to be very slow if file_uri and '../' in file_uri: raise InvalidUsage("Invalid URL") # Merge served directory and requested file if provided # Strip all / that in the beginning of the URL to help prevent python # from herping a derp and treating the uri as an absolute path root_path = file_path = file_or_directory if file_uri: file_path = path.join( file_or_directory, sub('^[/]*', '', file_uri)) # URL decode the path sent by the browser otherwise we won't be able to # match filenames which got encoded (filenames with spaces etc) file_path = path.abspath(unquote(file_path)) if not file_path.startswith(path.abspath(unquote(root_path))): raise FileNotFound('File not found', path=file_or_directory, relative_url=file_uri) try: headers = {} # Check if the client has been sent this file before # and it has not been modified since stats = None if use_modified_since: stats = await stat(file_path) modified_since = strftime( '%a, %d %b %Y %H:%M:%S GMT', gmtime(stats.st_mtime)) if request.headers.get('If-Modified-Since') == modified_since: return HTTPResponse(status=304) headers['Last-Modified'] = modified_since _range = None if use_content_range: _range = None if not stats: stats = await stat(file_path) headers['Accept-Ranges'] = 'bytes' headers['Content-Length'] = str(stats.st_size) if request.method != 'HEAD': try: _range = ContentRangeHandler(request, stats) except HeaderNotFound: pass else: del headers['Content-Length'] for key, value in _range.headers.items(): headers[key] = value if request.method == 'HEAD': return HTTPResponse( headers=headers, content_type=guess_type(file_path)[0] or 'text/plain') else: return await file(file_path, headers=headers, _range=_range) except ContentRangeError: raise except Exception: raise FileNotFound('File not found', path=file_or_directory, relative_url=file_uri) app.route(uri, methods=['GET', 'HEAD'])(_handler)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fix_imports(script): """ Replace "from PyQt5 import" by "from pyqode.qt import". :param script: script path """
with open(script, 'r') as f_script: lines = f_script.read().splitlines() new_lines = [] for l in lines: if l.startswith("import "): l = "from . " + l if "from PyQt5 import" in l: l = l.replace("from PyQt5 import", "from pyqode.qt import") new_lines.append(l) with open(script, 'w') as f_script: f_script.write("\n".join(new_lines))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def eval_py(self, _globals, _locals): """ Evaluates a file containing a Python params dictionary. """
try: params = eval(self.script, _globals, _locals) except NameError as e: raise Exception( 'Failed to evaluate parameters: {}' .format(str(e)) ) except ResolutionError as e: raise Exception('GetOutput: {}'.format(str(e))) return params
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def new(cls, arg): """ Creates a new Parameter object from the given ParameterArgument. """
content = None if arg.kind == 'file': if os.path.exists(arg.value): with open(arg.value, 'r') as f: content = f.read() else: raise Exception('File does not exist: {}'.format(arg.value)) elif arg.kind == 'cli': content = arg.value for source_cls in cls.sources: if source_cls.supports_source(arg): return source_cls(content) msg = 'Unsupported Parameter Source "{}"' raise Execption(msg.format(arg.value))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def minimum_pitch(self): """ Returns the minimal pitch between two neighboring nodes of the mesh in each direction. :return: Minimal pitch in each direction. """
pitch = self.pitch minimal_pitch = [] for p in pitch: minimal_pitch.append(min(p)) return min(minimal_pitch)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def surrounding_nodes(self, position): """ Returns nearest node indices and direction of opposite node. :param position: Position inside the mesh to search nearest node for as (x,y,z) :return: Nearest node indices and direction of opposite node. """
n_node_index, n_node_position, n_node_error = self.nearest_node(position) if n_node_error == 0.0: index_mod = [] for i in range(len(n_node_index)): new_point = np.asarray(n_node_position) new_point[i] += 1.e-5*np.abs(new_point[i]) try: self.nearest_node(tuple(new_point)) index_mod.append(-1) except ValueError: index_mod.append(1) else: # Check if node_position is larger or smaller in resp. axes than position index_mod = [] for i in range(len(n_node_index)): if n_node_position[i] > position[i]: index_mod.append(-1) else: index_mod.append(1) return tuple(n_node_index), tuple(index_mod)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tokenize(self, string): """Tokenize a string and return an iterator over its tokens."""
it = colorise.compat.ifilter(None, self._pattern.finditer(string)) try: t = colorise.compat.next(it) except StopIteration: yield string, False return pos, buf, lm, escapeflag = -1, '', -1, False # Check if we need to yield any starting text if t.start() > 0: yield string[:t.start()], False pos = t.start() it = itertools.chain([t], it) for m in it: start = m.start() e, s = m.group(2) or '', m.group(3) escaped = e.count(self._ESCAPE) % 2 != 0 if escaped: buf += string[pos:m.end(2)-1] + s escapeflag = True else: buf += string[pos:m.start(3)] if buf: yield buf, escapeflag buf = '' escapeflag = False if lm == start: yield '', False yield s, False lm = m.end() pos = m.end() if buf: yield buf, escapeflag escapeflag = False if pos < len(string): yield string[pos:], False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(self, format_string): """Parse color syntax from a formatted string."""
txt, state = '', 0 colorstack = [(None, None)] itokens = self.tokenize(format_string) for token, escaped in itokens: if token == self._START_TOKEN and not escaped: if txt: yield txt, colorstack[-1] txt = '' state += 1 colors = self.extract_syntax(colorise.compat.next(itokens)[0]) colorstack.append(tuple(b or a for a, b in zip(colorstack[-1], colors))) elif token == self._FMT_TOKEN and not escaped: # if state == 0: # raise ColorSyntaxError("Missing '{0}'" # .format(self._START_TOKEN)) if state % 2 != 0: state += 1 else: txt += token elif token == self._STOP_TOKEN and not escaped: if state < 2: raise ColorSyntaxError("Missing '{0}' or '{1}'" .format(self._STOP_TOKEN, self._FMT_TOKEN)) if txt: yield txt, colorstack[-1] txt = '' state -= 2 colorstack.pop() else: txt += token if state != 0: raise ColorSyntaxError("Invalid color format") if txt: yield txt, colorstack[-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_mapping(cls, evidence_mapping): """Create an Evidence instance from the given mapping :param evidence_mapping: a mapping (e.g. dict) of values provided by Watson :return: a new Evidence """
return cls(metadata_map=MetadataMap.from_mapping(evidence_mapping['metadataMap']), copyright=evidence_mapping['copyright'], id=evidence_mapping['id'], terms_of_use=evidence_mapping['termsOfUse'], document=evidence_mapping['document'], title=evidence_mapping['title'], text=evidence_mapping['text'], value=evidence_mapping['value'])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def to_obj(cls, obj_data=None, *fields, **field_map): ''' prioritize obj_dict when there are conficts ''' obj_dict = obj_data.__dict__ if hasattr(obj_data, '__dict__') else obj_data if not fields: fields = obj_dict.keys() obj = cls() update_obj(obj_dict, obj, *fields, **field_map) return obj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def with_ctx(func=None): ''' Auto create a new context if not available ''' if not func: return functools.partial(with_ctx) @functools.wraps(func) def func_with_context(_obj, *args, **kwargs): if 'ctx' not in kwargs or kwargs['ctx'] is None: # if context is empty, ensure context with _obj.ctx() as new_ctx: kwargs['ctx'] = new_ctx return func(_obj, *args, **kwargs) else: # if context is available, just call the function return func(_obj, *args, **kwargs) return func_with_context
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def open(self, auto_commit=None, schema=None): ''' Create a context to execute queries ''' if schema is None: schema = self.schema ac = auto_commit if auto_commit is not None else schema.auto_commit exe = ExecutionContext(self.path, schema=schema, auto_commit=ac) # setup DB if required if not os.path.isfile(self.path) or os.path.getsize(self.path) == 0: getLogger().warning("DB does not exist at {}. Setup is required.".format(self.path)) # run setup files if schema is not None and schema.setup_files: for file_path in schema.setup_files: getLogger().debug("Executing script file: {}".format(file_path)) exe.cur.executescript(self.read_file(file_path)) # run setup scripts if schema.setup_scripts: for script in schema.setup_scripts: exe.cur.executescript(script) return exe
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def build_insert(self, table, values, columns=None): ''' Insert an active record into DB and return lastrowid if available ''' if not columns: columns = table.columns if len(values) < len(columns): column_names = ','.join(columns[-len(values):]) else: column_names = ','.join(columns) query = "INSERT INTO %s (%s) VALUES (%s) " % (table.name, column_names, ','.join(['?'] * len(values))) return query
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def select_record(self, table, where=None, values=None, orderby=None, limit=None, columns=None): ''' Support these keywords where, values, orderby, limit and columns''' query = self.schema.query_builder.build_select(table, where, orderby, limit, columns) return table.to_table(self.execute(query, values), columns=columns)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def should_be_excluded(name, exclude_patterns): """Check if a name should be excluded. Returns True if name matches at least one of the exclude patterns in the exclude_patterns list. """
for pattern in exclude_patterns: if fnmatch.fnmatch(name, pattern): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def filter_visited(curr_dir, subdirs, already_visited, follow_dirlinks, on_error): """Filter subdirs that have already been visited. This is used to avoid loops in the search performed by os.walk() in index_files_by_size. curr_dir is the path of the current directory, as returned by os.walk(). subdirs is the list of subdirectories for the current directory, as returned by os.walk(). already_visited is a set of tuples (st_dev, st_ino) of already visited directories. This set will not be modified. on error is a function f(OSError) -> None, to be called in case of error. Returns a tuple: the new (possibly filtered) subdirs list, and a new set of already visited directories, now including the subdirs. """
filtered = [] to_visit = set() _already_visited = already_visited.copy() try: # mark the current directory as visited, so we catch symlinks to it # immediately instead of after one iteration of the directory loop file_info = os.stat(curr_dir) if follow_dirlinks else os.lstat(curr_dir) _already_visited.add((file_info.st_dev, file_info.st_ino)) except OSError as e: on_error(e) for subdir in subdirs: full_path = os.path.join(curr_dir, subdir) try: file_info = os.stat(full_path) if follow_dirlinks else os.lstat(full_path) except OSError as e: on_error(e) continue if not follow_dirlinks and stat.S_ISLNK(file_info.st_mode): # following links to dirs is disabled, ignore this one continue dev_inode = (file_info.st_dev, file_info.st_ino) if dev_inode not in _already_visited: filtered.append(subdir) to_visit.add(dev_inode) else: on_error(OSError(errno.ELOOP, "directory loop detected", full_path)) return filtered, _already_visited.union(to_visit)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def index_files_by_size(root, files_by_size, exclude_dirs, exclude_files, follow_dirlinks): """Recursively index files under a root directory. Each regular file is added *in-place* to the files_by_size dictionary, according to the file size. This is a (possibly empty) dictionary of lists of filenames, indexed by file size. exclude_dirs is a list of glob patterns to exclude directories. exclude_files is a list of glob patterns to exclude files. follow_dirlinks controls whether to follow symbolic links to subdirectories while crawling. Returns True if there were any I/O errors while listing directories. Returns a list of error messages that occurred. If empty, there were no errors. """
# encapsulate the value in a list, so we can modify it by reference # inside the auxiliary function errors = [] already_visited = set() def _print_error(error): """Print a listing error to stderr. error should be an os.OSError instance. """ # modify the outside errors value; must be encapsulated in a list, # because if we assign to a variable here we just create an # independent local copy msg = "error listing '%s': %s" % (error.filename, error.strerror) sys.stderr.write("%s\n" % msg) errors.append(msg) # XXX: The actual root may be matched by the exclude pattern. Should we # prune it as well? for curr_dir, subdirs, filenames in os.walk(root, topdown=True, onerror=_print_error, followlinks=follow_dirlinks): # modify subdirs in-place to influence os.walk subdirs[:] = prune_names(subdirs, exclude_dirs) filenames = prune_names(filenames, exclude_files) # remove subdirs that have already been visited; loops can happen # if there's a symlink loop and follow_dirlinks==True, or if # there's a hardlink loop (which is usually a corrupted filesystem) subdirs[:], already_visited = filter_visited(curr_dir, subdirs, already_visited, follow_dirlinks, _print_error) for base_filename in filenames: full_path = os.path.join(curr_dir, base_filename) # avoid race condition: file can be deleted between os.walk() # seeing it and us calling os.lstat() try: file_info = os.lstat(full_path) except OSError as e: _print_error(e) continue # only want regular files, not symlinks if stat.S_ISREG(file_info.st_mode): size = file_info.st_size if size in files_by_size: # append to the list of files with the same size files_by_size[size].append(full_path) else: # start a new list for this file size files_by_size[size] = [full_path] return errors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def calculate_md5(filename, length): """Calculate the MD5 hash of a file, up to length bytes. Returns the MD5 in its binary form, as an 8-byte string. Raises IOError or OSError in case of error. """
assert length >= 0 # shortcut: MD5 of an empty string is 'd41d8cd98f00b204e9800998ecf8427e', # represented here in binary if length == 0: return '\xd4\x1d\x8c\xd9\x8f\x00\xb2\x04\xe9\x80\t\x98\xec\xf8\x42\x7e' md5_summer = hashlib.md5() f = open(filename, 'rb') try: bytes_read = 0 while bytes_read < length: chunk_size = min(MD5_CHUNK_SIZE, length - bytes_read) chunk = f.read(chunk_size) if not chunk: # found EOF: means length was larger than the file size, or # file was truncated while reading -- print warning? break md5_summer.update(chunk) bytes_read += len(chunk) finally: f.close() md5 = md5_summer.digest() return md5
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_duplicates(filenames, max_size): """Find duplicates in a list of files, comparing up to `max_size` bytes. Returns a 2-tuple of two values: ``(duplicate_groups, errors)``. `duplicate_groups` is a (possibly empty) list of lists: the names of files that have at least two copies, grouped together. `errors` is a list of error messages that occurred. If empty, there were no errors. For example, assuming ``a1`` and ``a2`` are identical, ``c1`` and ``c2`` are identical, and ``b`` is different from all others:: [['a1', 'a2'], ['c1', 'c2']] [] Note that ``b`` is not included in the results, as it has no duplicates. """
errors = [] # shortcut: can't have duplicates if there aren't at least 2 files if len(filenames) < 2: return [], errors # shortcut: if comparing 0 bytes, they're all the same if max_size == 0: return [filenames], errors files_by_md5 = {} for filename in filenames: try: md5 = calculate_md5(filename, max_size) except EnvironmentError as e: msg = "unable to calculate MD5 for '%s': %s" % (filename, e.strerror) sys.stderr.write("%s\n" % msg) errors.append(msg) continue if md5 not in files_by_md5: # unique beginning so far; index it on its own files_by_md5[md5] = [filename] else: # found a potential duplicate (same beginning) files_by_md5[md5].append(filename) # Filter out the unique files (lists of files with the same md5 that # only contain 1 file), and create a list of the lists of duplicates. # Don't use values() because on Python 2 this creates a list of all # values (file lists), and that may be very large. duplicates = [l for l in py3compat.itervalues(files_by_md5) if len(l) >= 2] return duplicates, errors
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_duplicates_in_dirs(directories, exclude_dirs=None, exclude_files=None, follow_dirlinks=False): """Recursively scan a list of directories, looking for duplicate files. `exclude_dirs`, if provided, should be a list of glob patterns. Subdirectories whose names match these patterns are excluded from the scan. `exclude_files`, if provided, should be a list of glob patterns. Files whose names match these patterns are excluded from the scan. ``follow_dirlinks`` controls whether to follow symbolic links to subdirectories while crawling. Returns a 2-tuple of two values: ``(duplicate_groups, errors)``. `duplicate_groups` is a (possibly empty) list of lists: the names of files that have at least two copies, grouped together. `errors` is a list of error messages that occurred. If empty, there were no errors. For example, assuming ``./a1`` and ``/dir1/a2`` are identical, ``/dir1/c1`` and ``/dir2/c2`` are identical, ``/dir2/b`` is different from all others, that any subdirectories called ``tmp`` should not be scanned, and that files ending in ``.bak`` should be ignored: [['./a1', '/dir1/a2'], ['/dir1/c1', '/dir2/c2']] [] """
if exclude_dirs is None: exclude_dirs = [] if exclude_files is None: exclude_files = [] errors_in_total = [] files_by_size = {} # First, group all files by size for directory in directories: sub_errors = index_files_by_size(directory, files_by_size, exclude_dirs, exclude_files, follow_dirlinks) errors_in_total += sub_errors all_duplicates = [] # Now, within each file size, check for duplicates. # # We use an iterator over the dict (which gives us the keys), instead # of explicitly accessing dict.keys(). On Python 2, dict.keys() returns # a list copy of the keys, which may be very large. for size in iter(files_by_size): # for large file sizes, divide them further into groups by matching # initial portion; how much of the file is used to match depends on # the file size if size >= PARTIAL_MD5_THRESHOLD: partial_size = min(round_up_to_mult(size // PARTIAL_MD5_READ_RATIO, PARTIAL_MD5_READ_MULT), PARTIAL_MD5_MAX_READ) possible_duplicates_list, sub_errors = find_duplicates(files_by_size[size], partial_size) errors_in_total += sub_errors else: # small file size, group them all together and do full MD5s possible_duplicates_list = [files_by_size[size]] # Do full MD5 scan on suspected duplicates. calculate_md5 (and # therefore find_duplicates) needs to know how many bytes to scan. # We're using the file's size, as per stat(); this is a problem if # the file is growing. We'll only scan up to the size the file had # when we indexed. Would be better to somehow tell calculate_md5 to # scan until EOF (e.g. give it a negative size). for possible_duplicates in possible_duplicates_list: duplicates, sub_errors = find_duplicates(possible_duplicates, size) all_duplicates += duplicates errors_in_total += sub_errors return all_duplicates, errors_in_total
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def semimajor(P,M): """P, M can be ``Quantity`` objects; otherwise default to day, M_sun """
if type(P) != Quantity: P = P*u.day if type(M) != Quantity: M = M*u.M_sun a = ((P/2/np.pi)**2*const.G*M)**(1./3) return a.to(u.AU)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def random_spherepos(n): """returns SkyCoord object with n positions randomly oriented on the unit sphere Parameters n : int number of positions desired Returns ------- c : ``SkyCoord`` object with random positions """
signs = np.sign(rand.uniform(-1,1,size=n)) thetas = Angle(np.arccos(rand.uniform(size=n)*signs),unit=u.rad) #random b/w 0 and 180 phis = Angle(rand.uniform(0,2*np.pi,size=n),unit=u.rad) c = SkyCoord(phis,thetas,1,representation='physicsspherical') return c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_dict(self): """ Return a dict of all instance variables with truthy values, with key names camelized """
return { inflection.camelize(k, False): v for k, v in self.__dict__.items() if v }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def depth(self): """ Returns the number of ancestors of this directory. """
return len(self.path.rstrip(os.sep).split(os.sep))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def ancestors(self, stop=None): """ Generates the parents until stop or the absolute root directory is reached. """
folder = self while folder.parent != stop: if folder.parent == folder: return yield folder.parent folder = folder.parent
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_descendant_of(self, ancestor): """ Checks if this folder is inside the given ancestor. """
stop = Folder(ancestor) for folder in self.ancestors(): if folder == stop: return True if stop.depth > folder.depth: return False return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_relative_path(self, root): """ Gets the fragment of the current path starting at root. """
if self.path == root: return '' ancestors = self.ancestors(stop=root) return functools.reduce(lambda f, p: Folder(p.name).child(f), ancestors, self.name)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_mirror(self, target_root, source_root=None): """ Returns a File or Folder object that reperesents if the entire fragment of this directory starting with `source_root` were copied to `target_root`. source_root='/usr/local/hyde') Folder('/usr/tmp/stuff') """
fragment = self.get_relative_path( source_root if source_root else self.parent) return Folder(target_root).child(fragment)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def file_or_folder(path): """ Returns a File or Folder object that would represent the given path. """
target = unicode(path) return Folder(target) if os.path.isdir(target) else File(target)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_binary(self): """Return true if this is a binary file."""
with open(self.path, 'rb') as fin: CHUNKSIZE = 1024 while 1: chunk = fin.read(CHUNKSIZE) if b'\0' in chunk: return True if len(chunk) < CHUNKSIZE: break return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_temp(text): """ Creates a temprorary file and writes the `text` into it """
import tempfile (handle, path) = tempfile.mkstemp(text=True) os.close(handle) afile = File(path) afile.write(text) return afile
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_all(self, encoding='utf-8'): """ Reads from the file and returns the content as a string. """
logger.info("Reading everything from %s" % self) with codecs.open(self.path, 'r', encoding) as fin: read_text = fin.read() return read_text
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def write(self, text, encoding="utf-8"): """ Writes the given text to the file using the given encoding. """
logger.info("Writing to %s" % self) with codecs.open(self.path, 'w', encoding) as fout: fout.write(text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_to(self, destination): """ Copies the file to the given destination. Returns a File object that represents the target file. `destination` must be a File or Folder object. """
target = self.__get_destination__(destination) logger.info("Copying %s to %s" % (self, target)) shutil.copy(self.path, unicode(destination)) return target
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def etag(self): """ Generates etag from file contents. """
CHUNKSIZE = 1024 * 64 from hashlib import md5 hash = md5() with open(self.path) as fin: chunk = fin.read(CHUNKSIZE) while chunk: hash_update(hash, chunk) chunk = fin.read(CHUNKSIZE) return hash.hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def child_folder(self, fragment): """ Returns a folder object by combining the fragment to this folder's path """
return Folder(os.path.join(self.path, Folder(fragment).path))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def child(self, fragment): """ Returns a path of a child item represented by `fragment`. """
return os.path.join(self.path, FS(fragment).path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make(self): """ Creates this directory and any of the missing directories in the path. Any errors that may occur are eaten. """
try: if not self.exists: logger.info("Creating %s" % self.path) os.makedirs(self.path) except os.error: pass return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self): """ Deletes the directory if it exists. """
if self.exists: logger.info("Deleting %s" % self.path) shutil.rmtree(self.path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _create_target_tree(self, target): """ There is a bug in dir_util that makes `copy_tree` crash if a folder in the tree has been deleted before and readded now. To workaround the bug, we first walk the tree and create directories that are needed. """
source = self with source.walker as walker: @walker.folder_visitor def visit_folder(folder): """ Create the mirror directory """ if folder != source: Folder(folder.get_mirror(target, source)).make()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def copy_contents_to(self, destination): """ Copies the contents of this directory to the given destination. Returns a Folder object that represents the moved directory. """
logger.info("Copying contents of %s to %s" % (self, destination)) target = Folder(destination) target.make() self._create_target_tree(target) dir_util.copy_tree(self.path, unicode(target)) return target
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __start(self): """ Start a new thread to process Cron """
thread = Thread(target=self.__loop, args=()) thread.daemon = True # daemonize thread thread.start() self.__enabled = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __dict_to_BetterDict(self, attr): """Convert the passed attr to a BetterDict if the value is a dict Returns: The new value of the passed attribute."""
if type(self[attr]) == dict: self[attr] = BetterDict(self[attr]) return self[attr]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _bd_(self): """Property that allows dot lookups of otherwise hidden attributes."""
if not getattr(self, '__bd__', False): self.__bd = BetterDictLookUp(self) return self.__bd
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_or_update(sender, **kwargs): """ Create or update an Activity Monitor item from some instance. """
now = datetime.datetime.now() # I can't explain why this import fails unless it's here. from activity_monitor.models import Activity instance = kwargs['instance'] # Find this object's content type and model class. instance_content_type = ContentType.objects.get_for_model(sender) instance_model = sender content_object = instance_model.objects.get(id=instance.id) # check to see if the activity already exists. Will need later. try: activity = Activity.objects.get(content_type=instance_content_type, object_id=content_object.id) except: activity = None # We now know the content type, the model (sender), content type and content object. # We need to loop through ACTIVITY_MONITOR_MODELS in settings for other fields for activity_setting in settings.ACTIVITY_MONITOR_MODELS: this_app_label = activity_setting['model'].split('.')[0] this_model_label = activity_setting['model'].split('.')[1] this_content_type = ContentType.objects.get(app_label=this_app_label, model=this_model_label) if this_content_type == instance_content_type: # first, check to see if we even WANT to register this activity. # use the boolean 'check' field. Also, delete if needed. if 'check' in activity_setting: if getattr(instance, activity_setting['check']) is False: if activity: activity.delete() return # does it use the default manager (objects) or a custom manager? try: manager = activity_setting['manager'] except: manager = 'objects' # what field denotes the activity time? created is default try: timestamp = getattr(instance, activity_setting['date_field']) except: timestamp = getattr(instance, 'created') # if the given time stamp is a daterather than datetime type, # normalize it out to a datetime if type(timestamp) == type(now): clean_timestamp = timestamp else: clean_timestamp = datetime.datetime.combine(timestamp, datetime.time()) # Find a valid user object if 'user_field' in activity_setting: # pull the user object from instance using user_field user = getattr(instance, activity_setting['user_field']) elif this_model_label == 'user' or this_model_label == 'profile': # this IS auth.user or a Django 1.5 custom user user = instance else: # we didn't specify a user, so it must be instance.user user = instance.user # BAIL-OUT CHECKS # Determine all the reasons we would want to bail out. # Make sure it's not a future item, like a future-published blog entry. if clean_timestamp > now: return # or some really old content that was just re-saved for some reason if clean_timestamp < (now - datetime.timedelta(days=3)): return # or there's not a user object if not user: return # or the user is god or staff, and we're filtering out, don't add to monitor if user.is_superuser and 'filter_superuser' in activity_setting: return if user.is_staff and 'filter_staff' in activity_setting: return # build a default string representation # note that each activity can get back to the object via get_absolute_url() verb = activity_setting.get('verb', None) override_string = activity_setting.get('override_string', None) # MANAGER CHECK # Make sure the item "should" be registered, based on the manager argument. # If InstanceModel.manager.all() includes this item, then register. Otherwise, return. # Also, check to see if it should be deleted. try: getattr(instance_model, manager).get(pk=instance.pk) except instance_model.DoesNotExist: try: activity.delete() return except Activity.DoesNotExist: return if user and clean_timestamp and instance: if not activity: # If the activity didn't already exist, create it. activity = Activity( actor = user, content_type = instance_content_type, object_id = content_object.id, content_object = content_object, timestamp = clean_timestamp, verb = verb, override_string = override_string, ) activity.save() return activity
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def highlight_differences(s1, s2, color): """Highlight the characters in s2 that differ from those in s1."""
ls1, ls2 = len(s1), len(s2) diff_indices = [i for i, (a, b) in enumerate(zip(s1, s2)) if a != b] print(s1) if ls2 > ls1: colorise.cprint('_' * (ls2-ls1), fg=color) else: print() colorise.highlight(s2, indices=diff_indices, fg=color, end='') if ls1 > ls2: colorise.cprint('_' * (ls1-ls2), fg=color) else: print()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_jinja_env(): """Create a Jinja2 `~jinja2.Environment`. Returns ------- env : `jinja2.Environment` Jinja2 template rendering environment, configured to use templates in ``templates/``. """
template_dir = os.path.join(os.path.dirname(__file__), 'templates') env = jinja2.Environment( loader=jinja2.FileSystemLoader(template_dir), autoescape=jinja2.select_autoescape(['html']) ) env.filters['simple_date'] = filter_simple_date env.filters['paragraphify'] = filter_paragraphify return env