text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_profile_form(): """ Returns the profile form defined by ``settings.ACCOUNTS_PROFILE_FORM_CLASS``. """
from yacms.conf import settings try: return import_dotted_path(settings.ACCOUNTS_PROFILE_FORM_CLASS) except ImportError: raise ImproperlyConfigured("Value for ACCOUNTS_PROFILE_FORM_CLASS " "could not be imported: %s" % settings.ACCOUNTS_PROFILE_FORM_CLASS)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_profile_user_fieldname(profile_model=None, user_model=None): """ Returns the name of the first field on the profile model that points to the ``auth.User`` model. """
Profile = profile_model or get_profile_model() User = user_model or get_user_model() for field in Profile._meta.fields: if field.rel and field.rel.to == User: return field.name raise ImproperlyConfigured("Value for ACCOUNTS_PROFILE_MODEL does not " "contain a ForeignKey field for auth.User: %s" % Profile.__name__)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def gameValue(self): """identify the correpsonding internal SC2 game value for self.type's value"""
allowed = type(self).ALLOWED_TYPES try: if isinstance(allowed, dict): # if ALLOWED_TYPES is not a dict, there is no-internal game value mapping defined return allowed.get(self.type.name) except: pass # None .type values are okay -- such result in a None gameValue() result return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def direct2dDistance(self, point): """consider the distance between two mapPoints, ignoring all terrain, pathing issues"""
if not isinstance(point, MapPoint): return 0.0 return ((self.x-point.x)**2 + (self.y-point.y)**2)**(0.5) # simple distance formula
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def midPoint(self, point): """identify the midpoint between two mapPoints"""
x = (self.x + point.x)/2.0 y = (self.y + point.y)/2.0 z = (self.z + point.z)/2.0 return MapPoint(x,y,z)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def merge_conf(to_hash, other_hash, path=[]): "merges other_hash into to_hash" for key in other_hash: if (key in to_hash and isinstance(to_hash[key], dict) and isinstance(other_hash[key], dict)): merge_conf(to_hash[key], other_hash[key], path + [str(key)]) else: to_hash[key] = other_hash[key] return to_hash
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_inputs(self): """ Check for the existence of input files """
self.inputs = self.expand_filenames(self.inputs) result = False if len(self.inputs) == 0 or self.files_exist(self.inputs): result = True else: print("Not executing task. Input file(s) do not exist.") return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def check_outputs(self): """ Check for the existence of output files """
self.outputs = self.expand_filenames(self.outputs) result = False if self.files_exist(self.outputs): if self.dependencies_are_newer(self.outputs, self.inputs): result = True print("Dependencies are newer than outputs.") print("Running task.") elif self.force: print("Dependencies are older than inputs, but 'force' option present.") print("Running task.") result = True else: print("Dependencies are older than inputs.") else: print("No ouput file(s).") print("Running task.") result = True return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def expand_filenames(self, filenames): """ Expand a list of filenames using environment variables, followed by expansion of shell-style wildcards. """
results = [] for filename in filenames: result = filename if "$" in filename: template = Template(filename) result = template.substitute(**self.environment) logging.debug( "Expanding {} to {}.".format(filename, result)) if any([pattern in result for pattern in "*[]?"]): expanded = glob.glob(result) if len(expanded) > 0: result = expanded else: result = "NONEXISTENT" if isinstance(result, list): results.extend(result) else: results.append(result) return sorted(list(set(results)))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def files_exist(self, filenames): """ Check if all files in a given list exist. """
return all([os.path.exists(os.path.abspath(filename)) and os.path.isfile(os.path.abspath(filename)) for filename in filenames])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dependencies_are_newer(self, files, dependencies): """ For two lists of files, check if any file in the second list is newer than any file of the first. """
dependency_mtimes = [ os.path.getmtime(filename) for filename in dependencies] file_mtimes = [os.path.getmtime(filename) for filename in files] result = False for file_mtime in file_mtimes: for dependency_mtime in dependency_mtimes: if dependency_mtime > file_mtime: result = True return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mktemp_file(self): """ Create a temporary file in the '.faz' directory for the code to feed to the interpreter. """
if not(os.path.exists(self.__dirname)): logging.debug("Creating directory {}".format(self.__dirname)) os.mkdir(self.__dirname) elif not(os.path.isdir(self.__dirname)): raise TempDirIsFileException( "There is a file called %s in this directory!!!" % self.__dirname) #self.fdesc, self.fname = tempfile.mkstemp(dir=self.__dirname, text=True) self.f = tempfile.NamedTemporaryFile(dir=self.__dirname, delete=False, mode="wt") logging.debug("Creating file {}".format(self.f.name))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_max_page(dom): """ Try to guess how much pages are in book listing. Args: dom (obj): HTMLElement container of the page with book list. Returns: int: Number of pages for given category. """
div = dom.find("div", {"class": "razeniKnihListovani"}) if not div: return 1 # isolate only page numbers from links links = div[0].find("a") max_page = filter( lambda x: "href" in x.params and "pageindex=" in x.params["href"], links ) max_page = map( lambda x: x.params["href"].split("pageindex=")[-1], max_page ) max_page = filter(lambda x: x.isdigit(), max_page) max_page = map(lambda x: int(x), max_page) if not max_page: return 1 return max(max_page)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_book_links(dom): """ Parse links to the details about publications from page with book list. Args: dom (obj): HTMLElement container of the page with book list. Returns: list: List of strings / absolute links to book details. """
links = [] picker = lambda x: x.params.get("class", "").startswith("boxProKnihy") for el in dom.find(None, fn=picker): book_ref = el.find("a") if not book_ref or "href" not in book_ref[0].params: continue links.append(book_ref[0].params["href"]) return links
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_book_links(links): """ Go thru `links` to categories and return list to all publications in all given categories. Args: links (list): List of strings (absolute links to categories). Returns: list: List of strings / absolute links to book details. """
book_links = [] for link in links: data = DOWNER.download(link + "1") dom = dhtmlparser.parseString(data) book_links.extend(_parse_book_links(dom)) max_page = _get_max_page(dom) if max_page == 1: continue for i in range(max_page - 1): data = DOWNER.download(link + str(i + 2)) book_links.extend( _parse_book_links( dhtmlparser.parseString(data) ) ) return book_links
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _parse_authors(authors): """ Parse informations about authors of the book. Args: dom (obj): HTMLElement containing slice of the page with details. Returns: list: List of :class:`.Author` objects. Blank if no author \ found. """
link = authors.find("a") link = link[0].params.get("href") if link else None author_list = _strip_content(authors) if "(" in author_list: author_list = author_list.split("(")[0] if not author_list.strip(): return [] return map( lambda author: Author(author.strip(), link), author_list.strip().split(",") )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _process_book(link): """ Download and parse available informations about book from the publishers webpages. Args: link (str): URL of the book at the publishers webpages. Returns: obj: :class:`.Publication` instance with book details. """
# download and parse book info data = DOWNER.download(link) dom = dhtmlparser.parseString( utils.handle_encodnig(data) ) dhtmlparser.makeDoubleLinked(dom) # some books are without price in expected elements, this will try to get # it from elsewhere price = None try: price = _strip_content(zapi.get_price(dom)) except UserWarning: price = dom.find("p", {"class": "vaseCena"}) if price: price = price[0].getContent().replace("&nbsp;", " ") price = filter(lambda x: x.isdigit(), price.strip()) if price: price = price[0] + "kč" else: price = "-1" else: price = "-1" # required informations pub = Publication( title=_strip_content(zapi.get_title(dom)), authors=_parse_authors(zapi.get_author(dom)), price=price, publisher=_strip_content(zapi.get_publisher(dom)) ) # optional informations pub.optionals.URL = link pub.optionals.pages = _strip_content(zapi.get_pages(dom)) pub.optionals.pub_date = _strip_content(zapi.get_pub_date(dom)) pub.optionals.ISBN = _strip_content(zapi.get_ISBN(dom)) pub.optionals.binding = _strip_content(zapi.get_binding(dom)) # post checks if pub.title.startswith("E-kniha:"): pub.title = pub.title.replace("E-kniha:", "", 1).strip() pub.optionals.is_ebook = True if pub.optionals.ISBN: if " " in pub.optionals.ISBN: pub.optionals.ISBN = pub.optionals.ISBN.split(" ")[0] if "(" in pub.optionals.ISBN: pub.optionals.ISBN = pub.optionals.ISBN.split("(")[0] return pub
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(text) -> Optional['Response']: """Parse response into an instance of the appropriate child class."""
# Trim the start and end markers, and ensure only lowercase is used if text.startswith(MARKER_START) and text.endswith(MARKER_END): text = text[1:len(text)-1].lower() # No-op; can just ignore these if not text: return None if text.startswith(CMD_DATETIME): return DateTimeResponse(text) elif text.startswith(CMD_OPMODE): return OpModeResponse(text) elif text.startswith(CMD_DEVBYIDX_PREFIX): if RESPONSE_ERROR == text[2:] or text[2:4] == '00': return DeviceNotFoundResponse(text) return DeviceInfoResponse(text) elif text.startswith(CMD_DEVICE_PREFIX): action = next((a for a in [ACTION_ADD, ACTION_DEL, ACTION_SET] if a == text[2:3]), ACTION_NONE) args = text[2+len(action):] if RESPONSE_ERROR == args: return DeviceNotFoundResponse(text) elif action == ACTION_ADD: if not args: return DeviceAddingResponse(text) return DeviceAddedResponse(text) elif action == ACTION_SET: return DeviceChangedResponse(text) elif action == ACTION_DEL: return DeviceDeletedResponse(text) else: return DeviceInfoResponse(text) elif text.startswith(CMD_CLEAR_STATUS): return ClearedStatusResponse(text) elif text.startswith(CMD_ROMVER): return ROMVersionResponse(text) elif text.startswith(CMD_EXIT_DELAY): return ExitDelayResponse(text) elif text.startswith(CMD_ENTRY_DELAY): return EntryDelayResponse(text) elif text.startswith(CMD_SWITCH_PREFIX) and is_ascii_hex(text[1:2]): return SwitchResponse(text) elif text.startswith(CMD_EVENT_LOG): if RESPONSE_ERROR == text[2:]: return EventLogNotFoundResponse(text) return EventLogResponse(text) elif text.startswith(CMD_SENSOR_LOG): if RESPONSE_ERROR == text[2:]: return SensorLogNotFoundResponse(text) return SensorLogResponse(text) else: raise ValueError("Response not recognised: " + text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_closed(self) -> Optional[bool]: """For Magnet Sensor; True if Closed, False if Open."""
if self._device_type is not None and self._device_type == DeviceType.DoorMagnet: return bool(self._current_status & 0x01) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rssi_bars(self) -> int: """Received Signal Strength Indication, from 0 to 4 bars."""
rssi_db = self.rssi_db if rssi_db < 45: return 0 elif rssi_db < 60: return 1 elif rssi_db < 75: return 2 elif rssi_db < 90: return 3 return 4
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def zone(self) -> Optional[str]: """Zone the device is assigned to."""
if self._device_category == DC_BASEUNIT: return None return '{:02x}-{:02x}'.format(self._group_number, self._unit_number)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def workspace_state_changed(ob, event): """ when a workspace is made 'open', we need to give all intranet users the 'Guest' role equally, when the workspace is not open, we need to remove the role again """
workspace = event.object roles = ['Guest', ] if event.new_state.id == 'open': api.group.grant_roles( groupname=INTRANET_USERS_GROUP_ID, obj=workspace, roles=roles, ) workspace.reindexObjectSecurity() elif event.old_state.id == 'open': api.group.revoke_roles( groupname=INTRANET_USERS_GROUP_ID, obj=workspace, roles=roles, ) workspace.reindexObjectSecurity()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def workspace_added(ob, event): """ when a workspace is created, we add the creator to the admin group. We then setup our placeful workflow """
# Whoever creates the workspace should be added as an Admin creator = ob.Creator() IWorkspace(ob).add_to_team( user=creator, groups=set(['Admins']), ) # Configure our placeful workflow cmfpw = 'CMFPlacefulWorkflow' ob.manage_addProduct[cmfpw].manage_addWorkflowPolicyConfig() # Set the policy for the config pc = getattr(ob, WorkflowPolicyConfig_id) pc.setPolicyIn('') pc.setPolicyBelow('ploneintranet_policy')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def participation_policy_changed(ob, event): """ Move all the existing users to a new group """
workspace = IWorkspace(ob) old_group_name = workspace.group_for_policy(event.old_policy) old_group = api.group.get(old_group_name) for member in old_group.getAllGroupMembers(): groups = workspace.get(member.getId()).groups groups -= set([event.old_policy.title()]) groups.add(event.new_policy.title())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def invitation_accepted(event): """ When an invitation is accepted, add the user to the team """
request = getRequest() storage = get_storage() if event.token_id not in storage: return ws_uid, username = storage[event.token_id] storage[event.token_id] acl_users = api.portal.get_tool('acl_users') acl_users.updateCredentials( request, request.response, username, None ) catalog = api.portal.get_tool(name="portal_catalog") brain = catalog.unrestrictedSearchResults(UID=ws_uid)[0] with api.env.adopt_roles(["Manager"]): ws = IWorkspace(brain.getObject()) for name in ws.members: member = api.user.get(username=name) if member is not None: if member.getUserName() == username: api.portal.show_message( _('Oh boy, oh boy, you are already a member'), request, ) break else: ws.add_to_team(user=username) api.portal.show_message( _('Welcome to our family, Stranger'), request, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_deleted_from_site_event(event): """ Remove deleted user from all the workspaces where he is a member """
userid = event.principal catalog = api.portal.get_tool('portal_catalog') query = {'object_provides': WORKSPACE_INTERFACE} query['workspace_members'] = userid workspaces = [ IWorkspace(b._unrestrictedGetObject()) for b in catalog.unrestrictedSearchResults(query) ] for workspace in workspaces: workspace.remove_from_team(userid)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_context_name(fn): """ Return the `fn` in absolute path in `template_data` directory. """
return os.path.join(os.path.dirname(__file__), "template_data", fn)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def data_context(fn, mode="r"): """ Return content fo the `fn` from the `template_data` directory. """
with open(data_context_name(fn), mode) as f: return f.read()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tmp_context(fn, mode="r"): """ Return content fo the `fn` from the temporary directory. """
with open(tmp_context_name(fn), mode) as f: return f.read()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cleanup_environment(): """ Shutdown the ZEO server process running in another thread and cleanup the temporary directory. """
SERV.terminate() shutil.rmtree(TMP_PATH) if os.path.exists(TMP_PATH): os.rmdir(TMP_PATH) global TMP_PATH TMP_PATH = None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stringify(obj): """ Return the string representation of an object. :param obj: object to get the representation of :returns: unicode string representation of `obj` or `obj` unchanged This function returns a string representation for many of the types from the standard library. It does not convert numeric or Boolean values to strings -- it only converts non-primitive instances such as :class:`datetime.datetime`. The following table describes the types that are handled and describes how they are represented. | Class | Behavior | +============================+============================================+ | :class:`uuid.UUID` | ``str(obj)`` | | :class:`datetime.datetime` | ``obj.strftime('%Y-%m-%dT%H:%M:%S.%f%z')`` | | :class:`memoryview` | ``obj.tobytes().decode('utf-8')`` | | :class:`bytearray` | ``bytes(obj).decode('utf-8')`` | | :class:`buffer` | ``bytes(obj).decode('utf-8')`` | | :class:`bytes` | ``obj.decode('utf-8')`` | Other types are returned unharmed. """
out = obj if isinstance(obj, uuid.UUID): out = str(obj) elif hasattr(obj, 'strftime'): out = obj.strftime('%Y-%m-%dT%H:%M:%S.%f%z') elif isinstance(obj, memoryview): out = obj.tobytes() elif isinstance(obj, bytearray): out = bytes(obj) elif sys.version_info[0] < 3 and isinstance(obj, buffer): out = bytes(obj) if isinstance(out, bytes): out = out.decode('utf-8') return out
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize_collection(coll): """ Normalize all elements in a collection. :param coll: the collection to normalize. This is required to implement one of the following protocols: :class:`collections.Mapping`, :class:`collections.Sequence`, or :class:`collections.Set`. :returns: a new instance of the input class with the keys and values normalized via :func:`.stringify` :raises: :exc:`RuntimeError` if `coll` is not a collection This function transforms the collection by recursively transforming each key and value contained in it. The action is recursive but the implementation is unrolled and iterative. If you are interested in the algorithm used, it is described as comments in the code. """
# # The recursive version of this algorithm is something like: # # if isinstance(coll, dict): # return dict((stringify(k), normalize_collection(v)) # for k, v in coll.items()) # if isinstance(obj, (list, tuple)): # return [normalize_collection(item) for item in obj] # raise RuntimeError('non-container root') # # Since this is NOT simply a tail-recursive function, unrolling # the recursion requires that we store intermediate "frame info" # somewhere while processing. I chose to use two stacks for # this: # # value_stack: contains the produced values. The while loop # appends a new container to this stack when it encounters a # container on the work stack. When the algorithm terminates, # we return the first (oldest) value on the stack. # work_stack: contains the items that need to be processed and # a function to call when the value is completed. Initially, # we place the input collection onto work stack without a # processing function. # # The algorithm starts with the input collection on the work # stack. Each iteration pops the top of the stack which contains # a value and a completion function (inserter). If the value is # a collection, then we push a new container onto the value stack, # iterate over the container, and push each item onto the work # stack with a function that will insert it into the new container. # value_stack = [] work_stack = [(coll, None)] def create_container(container_type, inserter): clone = container_type() if inserter: inserter(clone) value_stack.append(clone) return clone while work_stack: value, inserter = work_stack.pop() if isinstance(value, (frozenset, list, set, tuple)): target = create_container(list, inserter) inserter = functools.partial(target.insert, 0) for item in value: work_stack.append((item, inserter)) elif isinstance(value, dict): target = create_container(dict, inserter) for key, item in value.items(): inserter = functools.partial(target.__setitem__, stringify(key)) work_stack.append((item, inserter)) else: if inserter is None: raise RuntimeError( 'non-container root - type %r' % value.__class__) inserter(stringify(value)) return value_stack[0]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def available(self): """ Returns a set of the available versions. :returns: A set of integers giving the available versions. """
# Short-circuit if not self._schema: return set() # Build up the set of available versions avail = set(self._schema.__vers_downgraders__.keys()) avail.add(self._schema.__version__) return avail
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_args_to_dict(values_specs): """It is used to analyze the extra command options to command. Besides known options and arguments, our commands also support user to put more options to the end of command line. For example, list_nets -- --tag x y --key1 value1, where '-- --tag x y --key1 value1' is extra options to our list_nets. This feature can support V2.0 API's fields selection and filters. For example, to list networks which has name 'test4', we can have list_nets -- --name=test4. built-in types. By default, type is string. The key without value is a bool option. Key with two values will be a list option. """
# values_specs for example: '-- --tag x y --key1 type=int value1' # -- is a pseudo argument values_specs_copy = values_specs[:] if values_specs_copy and values_specs_copy[0] == '--': del values_specs_copy[0] # converted ArgumentParser arguments for each of the options _options = {} # the argument part for current option in _options current_arg = None # the string after remove meta info in values_specs # for example, '--tag x y --key1 value1' _values_specs = [] # record the count of values for an option # for example: for '--tag x y', it is 2, while for '--key1 value1', it is 1 _value_number = 0 # list=true _list_flag = False # action=clear _clear_flag = False # the current item in values_specs current_item = None # the str after 'type=' current_type_str = None for _item in values_specs_copy: if _item.startswith('--'): # Deal with previous argument if any _process_previous_argument( current_arg, _value_number, current_type_str, _list_flag, _values_specs, _clear_flag, values_specs) # Init variables for current argument current_item = _item _list_flag = False _clear_flag = False current_type_str = None if "=" in _item: _value_number = 1 _item = _item.split('=')[0] else: _value_number = 0 if _item in _options: raise exceptions.CommandError( _("Duplicated options %s") % ' '.join(values_specs)) else: _options.update({_item: {}}) current_arg = _options[_item] _item = current_item elif _item.startswith('type='): if current_arg is None: raise exceptions.CommandError( _("Invalid values_specs %s") % ' '.join(values_specs)) if 'type' not in current_arg: current_type_str = _item.split('=', 2)[1] current_arg.update({'type': eval(current_type_str)}) if current_type_str == 'bool': current_arg.update({'type': utils.str2bool}) elif current_type_str == 'dict': current_arg.update({'type': utils.str2dict}) continue elif _item == 'list=true': _list_flag = True continue elif _item == 'action=clear': _clear_flag = True continue if not _item.startswith('--'): # All others are value items # Make sure '--' occurs first and allow minus value if (not current_item or '=' in current_item or _item.startswith('-') and not is_number(_item)): raise exceptions.CommandError( _("Invalid values_specs %s") % ' '.join(values_specs)) _value_number += 1 if _item.startswith('---'): raise exceptions.CommandError( _("Invalid values_specs %s") % ' '.join(values_specs)) _values_specs.append(_item) # Deal with last one argument _process_previous_argument( current_arg, _value_number, current_type_str, _list_flag, _values_specs, _clear_flag, values_specs) # Populate the parser with arguments _parser = argparse.ArgumentParser(add_help=False) for opt, optspec in six.iteritems(_options): _parser.add_argument(opt, **optspec) _args = _parser.parse_args(_values_specs) result_dict = {} for opt in six.iterkeys(_options): _opt = opt.split('--', 2)[1] _opt = _opt.replace('-', '_') _value = getattr(_args, _opt) result_dict.update({_opt: _value}) return result_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _merge_args(qCmd, parsed_args, _extra_values, value_specs): """Merge arguments from _extra_values into parsed_args. If an argument value are provided in both and it is a list, the values in _extra_values will be merged into parsed_args. @param parsed_args: the parsed args from known options @param _extra_values: the other parsed arguments in unknown parts @param values_specs: the unparsed unknown parts """
temp_values = _extra_values.copy() for key, value in six.iteritems(temp_values): if hasattr(parsed_args, key): arg_value = getattr(parsed_args, key) if arg_value is not None and value is not None: if isinstance(arg_value, list): if value and isinstance(value, list): if (not arg_value or isinstance(arg_value[0], type(value[0]))): arg_value.extend(value) _extra_values.pop(key)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update_dict(obj, dict, attributes): """Update dict with fields from obj.attributes. :param obj: the object updated into dict :param dict: the result dictionary :param attributes: a list of attributes belonging to obj """
for attribute in attributes: if hasattr(obj, attribute) and getattr(obj, attribute) is not None: dict[attribute] = getattr(obj, attribute)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def retrieve_list(self, parsed_args): """Retrieve a list of resources from Neutron server."""
neutron_client = self.get_client() _extra_values = parse_args_to_dict(self.values_specs) _merge_args(self, parsed_args, _extra_values, self.values_specs) search_opts = self.args2search_opts(parsed_args) search_opts.update(_extra_values) if self.pagination_support: page_size = parsed_args.page_size if page_size: search_opts.update({'limit': page_size}) if self.sorting_support: keys = parsed_args.sort_key if keys: search_opts.update({'sort_key': keys}) dirs = parsed_args.sort_dir len_diff = len(keys) - len(dirs) if len_diff > 0: dirs += ['asc'] * len_diff elif len_diff < 0: dirs = dirs[:len(keys)] if dirs: search_opts.update({'sort_dir': dirs}) data = self.call_server(neutron_client, search_opts, parsed_args) collection = _get_resource_plural(self.resource, neutron_client) return data.get(collection, [])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def lock_key(group_id, item_id, group_width=8): """Creates a lock ID where the lower bits are the group ID and the upper bits are the item ID. This allows the use of a bigint namespace for items, with a limited space for grouping. :group_id: an integer identifying the group. Must be less than 2 ^ :group_width: :item_id: item_id an integer. must be less than 2 ^ (63 - :group_width:) - 1 :gropu_width: the number of bits to reserve for the group ID. """
if group_id >= (1 << group_width): raise Exception("Group ID is too big") if item_id >= (1 << (63 - group_width)) - 1: raise Exception("Item ID is too big") return (item_id << group_width) | group_id
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def release_lock(dax, key, lock_mode=LockMode.wait): """Manually release a pg advisory lock. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum """
lock_fxn = _lock_fxn("unlock", lock_mode, False) return dax.get_scalar( dax.callproc(lock_fxn, key if isinstance(key, (list, tuple)) else [key])[0])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def advisory_lock(dax, key, lock_mode=LockMode.wait, xact=False): """A context manager for obtaining a lock, executing code, and then releasing the lock. A boolean value is passed to the block indicating whether or not the lock was obtained. :dax: a DataAccess instance :key: either a big int or a 2-tuple of integers :lock_mode: a member of the LockMode enum. Determines how this function operates: - wait: the wrapped code will not be executed until the lock is obtained. - skip: an attempt will be made to get the lock, and if unsuccessful, False is passed to the code block - error: an attempt will be made to get the lock, and if unsuccessful, an exception will be raised. :xact: a boolean, if True, the lock will be obtained according to lock_mode, but will not be released after the code is executed, since it will be automatically released at the end of the transaction. """
if lock_mode == LockMode.wait: obtain_lock(dax, key, lock_mode, xact) else: got_lock = obtain_lock(dax, key, lock_mode, xact) if not got_lock: if lock_mode == LockMode.error: raise Exception("Unable to obtain advisory lock {}".format(key)) else: # lock_mode is skip yield False return # At this point we have the lock try: yield True finally: if not xact: release_lock(dax, key, lock_mode)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _lock_fxn(direction, lock_mode, xact): """Builds a pg advisory lock function name based on various options. :direction: one of "lock" or "unlock" :lock_mode: a member of the LockMode enum :xact: a boolean, if True the lock will be automatically released at the end of the transaction and cannot be manually released. """
if direction == "unlock" or lock_mode == LockMode.wait: try_mode = "" else: try_mode = "_try" if direction == "lock" and xact: xact_mode = "_xact" else: xact_mode = "" return "pg{}_advisory{}_{}".format(try_mode, xact_mode, direction)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_hash(fName, readSize, dire=pDir()): """ creates the required hash """
if not fileExists(fName, dire): return -1 readSize = readSize * 1024 # bytes to be read fName = os.path.join(dire, fName) # name coupled with path with open(fName, 'rb') as f: size = os.path.getsize(fName) if size < readSize * 2: return -1 data = f.read(readSize) f.seek(-readSize, os.SEEK_END) data += f.read(readSize) return md5(data).hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download_file(fName, time, dire=pDir()): """ download the required subtitle """
# hash gen_hash = get_hash(fName, 64, dire) if gen_hash == -1: return -1 # making request user_agent = {'User-agent': 'SubDB/1.0 (sub/0.1; http://github.com/leosartaj/sub)'} param = {'action': 'download', 'hash': gen_hash, 'language': 'en'} # Specification for the request try: r = requests.get("http://api.thesubdb.com/", headers = user_agent, params = param, timeout=time) # Get Request except (requests.exceptions.Timeout, socket.error): return 'Timeout Error' if r.status_code != 200: return r.status_code # save file fName, fExt = os.path.splitext(fName) fName += '.srt' # replace extension with srt fName = os.path.join(dire, fName) # name coupled with path with open(fName, 'wb') as f: f.write(r.text.encode('ascii', 'ignore')) return r.status_code
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def file_downloaded(dwn, fName, verbose=False): """ print for downloaded file """
if verbose: if dwn == 200: fName, fExt = os.path.splitext(fName) print 'Downloaded ' + fName + '.srt' return True elif dwn != -1: print 'Tried downloading got ' + str(dwn) + ' for ' + fName return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def download(name, options): """ download a file or all files in a directory """
dire = os.path.dirname(name) # returns the directory name fName = os.path.basename(name) # returns the filename fNameOnly, fExt = os.path.splitext(fName) dwn = 0 if fileExists(fName, dire) and not fileExists((fNameOnly + '.srt'), dire): # skip if already downloaded if file_downloaded(download_file(fName, options.timeout, dire), fName, options.verbose): dwn += 1 elif dirExists(name): for filename in os.listdir(name): if options.recursive: dwn += download(os.path.join(name, filename), options) else: if file_downloaded(download_file(filename, options.timeout, name), filename, options.verbose): dwn += 1 return dwn
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_dateobject_to_string(x): """Convert a Pandas Timestamp object or datetime object to 'YYYY-MM-DD' string Parameters x : str, list, tuple, numpy.ndarray, pandas.DataFrame A Pandas Timestamp object or datetime object, or an array of these objects Returns ------- y : str, list, tuple, numpy.ndarray, pandas.DataFrame A string 'YYYY-MM-DD' or array of date strings. Example ------- The function aims to convert a string as follows Timestamp('2014-09-23 00:00:00') => '2014-09-23' datetime.datetime(2014,9,23,0,0) => '2014-09-23' Code Example print(clean_dateobject_to_string(pd.Timestamp('2014-09-23 00:00:00'))) '2014-09-23' print(clean_dateobject_to_string(datetime(2014,9,23,0,0))) '2014-09-23' Behavior -------- - If it is not an object with strftime function the None is return """
import numpy as np import pandas as pd def proc_elem(e): try: return e.strftime("%Y-%m-%d") except Exception as e: print(e) return None def proc_list(x): return [proc_elem(e) for e in x] def proc_ndarray(x): tmp = proc_list(list(x.reshape((x.size,)))) return np.array(tmp).reshape(x.shape) # transform string, list/tuple, numpy array, pandas dataframe if "strftime" in dir(x): return proc_elem(x) elif isinstance(x, (list, tuple)): return proc_list(x) elif isinstance(x, np.ndarray): return proc_ndarray(x) elif isinstance(x, pd.DataFrame): return pd.DataFrame(proc_ndarray(x.values), columns=x.columns, index=x.index) else: return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AND(*args, **kwargs): """ ALL args must not raise an exception when called incrementally. If an exception is specified, raise it, otherwise raise the callable's exception. :params iterable[Certifier] args: The certifiers to call :param callable kwargs['exc']: Callable that excepts the unexpectedly raised exception as argument and return an exception to raise. :raises CertifierError: The first certifier error if at least one raises a certifier error. """
for arg in args: try: arg() except CertifierError as e: exc = kwargs.get('exc', None) if exc is not None: raise exc(e) raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def NAND(*args, **kwargs): """ ALL args must raise an exception when called overall. Raise the specified exception on failure OR the first exception. :params iterable[Certifier] args: The certifiers to call :param callable kwargs['exc']: Callable that excepts the unexpectedly raised exception as argument and return an exception to raise. """
errors = [] for arg in args: try: arg() except CertifierError as e: errors.append(e) if (len(errors) != len(args)) and len(args) > 1: exc = kwargs.get( 'exc', CertifierValueError('Expecting no certified values'), ) if exc is not None: raise exc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def XOR(a, b, exc=CertifierValueError('Expected at least one certified value')): """ Only one arg must not raise a Certifier exception when called overall. Raise the specified exception on failure. :params Certifier a: The first certifiers to call :params Certifier b: The second certifiers to call :param Exception exc: Callable that is raised if XOR fails. """
errors = [] for certifier in [a, b]: try: certifier() except CertifierError as e: errors.append(e) if len(errors) != 1: if exc is not None: raise exc
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cli(ctx, verbose, config): """ IPS Vagrant Management Utility """
assert isinstance(ctx, Context) # Set up the logger verbose = verbose if (verbose <= 3) else 3 log_levels = {1: logging.WARN, 2: logging.INFO, 3: logging.DEBUG} log_level = log_levels[verbose] ctx.log = logging.getLogger('ipsv') ctx.log.setLevel(log_level) # Console logger console_format = logging.Formatter("[%(levelname)s] %(name)s: %(message)s") ch = logging.StreamHandler() ch.setLevel(log_level) ch.setFormatter(console_format) ctx.log.addHandler(ch) # File logger file_format = logging.Formatter("[%(asctime)s] [%(levelname)s] %(name)s: %(message)s") file_logger = logging.FileHandler(os.path.join(ctx.config.get('Paths', 'Log'), 'ipsv.log')) file_logger.setLevel(log_level) file_logger.setFormatter(file_format) ctx.log.addHandler(file_logger) # Load the configuration if os.path.isfile(config): ctx.config_path = config ctx.log.debug('Loading configuration: %s', ctx.config_path) ctx.load_config(config) else: ctx.config_path = os.path.join(ctx.basedir, 'config', 'ipsv.conf') ctx.log.debug('Loading default configuration: %s', ctx.config_path) ctx.setup()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def db(self): """ Get a loaded database session """
if self.database is NotImplemented: self.database = Session return self.database
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_login(self, use_session=True): """ Get an active login session @param use_session: Use a saved session file if available @type use_session: bool """
# Should we try and return an existing login session? if use_session and self._login.check(): self.cookiejar = self._login.cookiejar return self.cookiejar # Prompt the user for their login credentials username = click.prompt('IPS Username') password = click.prompt('IPS Password', hide_input=True) remember = click.confirm('Save login session?', True) # Process the login cookiejar = self._login.process(username, password, remember) if remember: self.cookiejar = cookiejar return cookiejar
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def list_commands(self, ctx): """ List CLI commands @type ctx: Context @rtype: list """
commands_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), 'commands') command_list = [name for __, name, ispkg in pkgutil.iter_modules([commands_path]) if ispkg] command_list.sort() return command_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_command(self, ctx, name): """ Get a bound command method @type ctx: Context @param name: Command name @type name: str @rtype: object """
try: mod = importlib.import_module('ips_vagrant.commands.{name}'.format(name=name)) return mod.cli except (ImportError, AttributeError): return
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _patched_pep257(): """Monkey-patch pep257 after imports to avoid info logging."""
import pep257 if getattr(pep257, "log", None): def _dummy(*args, **kwargs): del args del kwargs old_log_info = pep257.log.info pep257.log.info = _dummy # suppress(unused-attribute) try: yield finally: if getattr(pep257, "log", None): pep257.log.info = old_log_info
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _stamped_deps(stamp_directory, func, dependencies, *args, **kwargs): """Run func, assumed to have dependencies as its first argument."""
if not isinstance(dependencies, list): jobstamps_dependencies = [dependencies] else: jobstamps_dependencies = dependencies kwargs.update({ "jobstamps_cache_output_directory": stamp_directory, "jobstamps_dependencies": jobstamps_dependencies }) return jobstamp.run(func, dependencies, *args, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _debug_linter_status(linter, filename, show_lint_files): """Indicate that we are running this linter if required."""
if show_lint_files: print("{linter}: {filename}".format(linter=linter, filename=filename))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_flake8(filename, stamp_file_name, show_lint_files): """Run flake8, cached by stamp_file_name."""
_debug_linter_status("flake8", filename, show_lint_files) return _stamped_deps(stamp_file_name, _run_flake8_internal, filename)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_prospector_on(filenames, tools, disabled_linters, show_lint_files, ignore_codes=None): """Run prospector on filename, using the specified tools. This function enables us to run different tools on different classes of files, which is necessary in the case of tests. """
from prospector.run import Prospector, ProspectorConfig assert tools tools = list(set(tools) - set(disabled_linters)) return_dict = dict() ignore_codes = ignore_codes or list() # Early return if all tools were filtered out if not tools: return return_dict # pylint doesn't like absolute paths, so convert to relative. all_argv = (["-F", "-D", "-M", "--no-autodetect", "-s", "veryhigh"] + ("-t " + " -t ".join(tools)).split(" ")) for filename in filenames: _debug_linter_status("prospector", filename, show_lint_files) with _custom_argv(all_argv + [os.path.relpath(f) for f in filenames]): prospector = Prospector(ProspectorConfig()) prospector.execute() messages = prospector.get_messages() or list() for message in messages: message.to_absolute_path(os.getcwd()) loc = message.location code = message.code if code in ignore_codes: continue key = _Key(loc.path, loc.line, code) return_dict[key] = message return return_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_prospector(filename, stamp_file_name, disabled_linters, show_lint_files): """Run prospector."""
linter_tools = [ "pep257", "pep8", "pyflakes" ] if can_run_pylint(): linter_tools.append("pylint") # Run prospector on tests. There are some errors we don't care about: # - invalid-name: This is often triggered because test method names # can be quite long. Descriptive test method names are # good, so disable this warning. # - super-on-old-class: unittest.TestCase is a new style class, but # pylint detects an old style class. # - too-many-public-methods: TestCase subclasses by definition have # lots of methods. test_ignore_codes = [ "invalid-name", "super-on-old-class", "too-many-public-methods" ] kwargs = dict() if _file_is_test(filename): kwargs["ignore_codes"] = test_ignore_codes else: if can_run_frosted(): linter_tools += ["frosted"] return _stamped_deps(stamp_file_name, _run_prospector_on, [filename], linter_tools, disabled_linters, show_lint_files, **kwargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_pyroma(setup_file, show_lint_files): """Run pyroma."""
from pyroma import projectdata, ratings from prospector.message import Message, Location _debug_linter_status("pyroma", setup_file, show_lint_files) return_dict = dict() data = projectdata.get_data(os.getcwd()) all_tests = ratings.ALL_TESTS for test in [mod() for mod in [t.__class__ for t in all_tests]]: if test.test(data) is False: class_name = test.__class__.__name__ key = _Key(setup_file, 0, class_name) loc = Location(setup_file, None, None, 0, 0) msg = test.message() return_dict[key] = Message("pyroma", class_name, loc, msg) return return_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_polysquare_style_linter(matched_filenames, cache_dir, show_lint_files): """Run polysquare-generic-file-linter on matched_filenames."""
from polysquarelinter import linter as lint from prospector.message import Message, Location return_dict = dict() def _custom_reporter(error, file_path): key = _Key(file_path, error[1].line, error[0]) loc = Location(file_path, None, None, error[1].line, 0) return_dict[key] = Message("polysquare-generic-file-linter", error[0], loc, error[1].description) for filename in matched_filenames: _debug_linter_status("style-linter", filename, show_lint_files) # suppress(protected-access,unused-attribute) lint._report_lint_error = _custom_reporter lint.main([ "--spellcheck-cache=" + os.path.join(cache_dir, "spelling"), "--stamp-file-path=" + os.path.join(cache_dir, "jobstamps", "polysquarelinter"), "--log-technical-terms-to=" + os.path.join(cache_dir, "technical-terms"), ] + matched_filenames + [ "--block-regexps" ] + _BLOCK_REGEXPS) return return_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_spellcheck_linter(matched_filenames, cache_dir, show_lint_files): """Run spellcheck-linter on matched_filenames."""
from polysquarelinter import lint_spelling_only as lint from prospector.message import Message, Location for filename in matched_filenames: _debug_linter_status("spellcheck-linter", filename, show_lint_files) return_dict = dict() def _custom_reporter(error, file_path): line = error.line_offset + 1 key = _Key(file_path, line, "file/spelling_error") loc = Location(file_path, None, None, line, 0) # suppress(protected-access) desc = lint._SPELLCHECK_MESSAGES[error.error_type].format(error.word) return_dict[key] = Message("spellcheck-linter", "file/spelling_error", loc, desc) # suppress(protected-access,unused-attribute) lint._report_spelling_error = _custom_reporter lint.main([ "--spellcheck-cache=" + os.path.join(cache_dir, "spelling"), "--stamp-file-path=" + os.path.join(cache_dir, "jobstamps", "polysquarelinter"), "--technical-terms=" + os.path.join(cache_dir, "technical-terms"), ] + matched_filenames) return return_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _run_markdownlint(matched_filenames, show_lint_files): """Run markdownlint on matched_filenames."""
from prospector.message import Message, Location for filename in matched_filenames: _debug_linter_status("mdl", filename, show_lint_files) try: proc = subprocess.Popen(["mdl"] + matched_filenames, stdout=subprocess.PIPE, stderr=subprocess.PIPE) lines = proc.communicate()[0].decode().splitlines() except OSError as error: if error.errno == errno.ENOENT: return [] lines = [ re.match(r"([\w\-.\/\\ ]+)\:([0-9]+)\: (\w+) (.+)", l).groups(1) for l in lines ] return_dict = dict() for filename, lineno, code, msg in lines: key = _Key(filename, int(lineno), code) loc = Location(filename, None, None, int(lineno), 0) return_dict[key] = Message("markdownlint", code, loc, msg) return return_dict
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_cache_dir(candidate): """Get the current cache directory."""
if candidate: return candidate import distutils.dist # suppress(import-error) import distutils.command.build # suppress(import-error) build_cmd = distutils.command.build.build(distutils.dist.Distribution()) build_cmd.finalize_options() cache_dir = os.path.abspath(build_cmd.build_temp) # Make sure that it is created before anyone tries to use it try: os.makedirs(cache_dir) except OSError as error: if error.errno != errno.EEXIST: raise error return cache_dir
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _is_excluded(filename, exclusions): """Return true if filename matches any of exclusions."""
for exclusion in exclusions: if fnmatch(filename, exclusion): return True return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _file_lines(self, filename): """Get lines for filename, caching opened files."""
try: return self._file_lines_cache[filename] except KeyError: if os.path.isfile(filename): with open(filename) as python_file: self._file_lines_cache[filename] = python_file.readlines() else: self._file_lines_cache[filename] = "" return self._file_lines_cache[filename]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _suppressed(self, filename, line, code): """Return true if linter error code is suppressed inline. The suppression format is suppress(CODE1,CODE2,CODE3) etc. """
if code in self.suppress_codes: return True lines = self._file_lines(filename) # File is zero length, cannot be suppressed if not lines: return False # Handle errors which appear after the end of the document. while line > len(lines): line = line - 1 relevant_line = lines[line - 1] try: suppressions_function = relevant_line.split("#")[1].strip() if suppressions_function.startswith("suppress("): return code in _parse_suppressions(suppressions_function) except IndexError: above_line = lines[max(0, line - 2)] suppressions_function = above_line.strip()[1:].strip() if suppressions_function.startswith("suppress("): return code in _parse_suppressions(suppressions_function) finally: pass
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_md_files(self): """Get all markdown files."""
all_f = _all_files_matching_ext(os.getcwd(), "md") exclusions = [ "*.egg/*", "*.eggs/*", "*build/*" ] + self.exclusions return sorted([f for f in all_f if not _is_excluded(f, exclusions)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_files_to_lint(self, external_directories): """Get files to lint."""
all_f = [] for external_dir in external_directories: all_f.extend(_all_files_matching_ext(external_dir, "py")) packages = self.distribution.packages or list() for package in packages: all_f.extend(_all_files_matching_ext(package, "py")) py_modules = self.distribution.py_modules or list() for filename in py_modules: all_f.append(os.path.realpath(filename + ".py")) all_f.append(os.path.join(os.getcwd(), "setup.py")) # Remove duplicates which may exist due to symlinks or repeated # packages found by /setup.py all_f = list(set([os.path.realpath(f) for f in all_f])) exclusions = [ "*.egg/*", "*.eggs/*" ] + self.exclusions return sorted([f for f in all_f if not _is_excluded(f, exclusions)])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def initialize_options(self): # suppress(unused-function) """Set all options to their initial values."""
self._file_lines_cache = dict() self.suppress_codes = list() self.exclusions = list() self.cache_directory = "" self.stamp_directory = "" self.disable_linters = list() self.show_lint_files = 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def normalize_pattern(pattern): """Converts backslashes in path patterns to forward slashes. Doesn't normalize regular expressions - they may contain escapes. """
if not (pattern.startswith('RE:') or pattern.startswith('!RE:')): pattern = _slashes.sub('/', pattern) if len(pattern) > 1: pattern = pattern.rstrip('/') return pattern
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(self, pat, fun): r"""Add a pattern and replacement. The pattern must not contain capturing groups. The replacement might be either a string template in which \& will be replaced with the match, or a function that will get the matching text as argument. It does not get match object, because capturing is forbidden anyway. """
self._pat = None self._pats.append(pat) self._funs.append(fun)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_replacer(self, replacer): r"""Add all patterns from another replacer. All patterns and replacements from replacer are appended to the ones already defined. """
self._pat = None self._pats.extend(replacer._pats) self._funs.extend(replacer._funs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def is_pattern_valid(pattern): """Returns True if pattern is valid. :param pattern: Normalized pattern. is_pattern_valid() assumes pattern to be normalized. see: globbing.normalize_pattern """
result = True translator = Globster.pattern_info[Globster.identify(pattern)]["translator"] tpattern = '(%s)' % translator(pattern) try: re_obj = lazy_regex.lazy_compile(tpattern, re.UNICODE) re_obj.search("") # force compile except Exception as e: result = False return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def put(self, device_id: int) -> Device: """ Updates the Device Resource with the name. """
device = self._get_or_abort(device_id) self.update(device) session.commit() session.add(device) return device
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xross_listener(http_method=None, **xross_attrs): """Instructs xross to handle AJAX calls right from the moment it is called. This should be placed in a view decorated with `@xross_view()`. :param str http_method: GET or POST. To be used as a source of data for xross. :param dict xross_attrs: xross handler attributes. Those attributes will be available in operation functions in `xross` keyword argument. """
handler = currentframe().f_back.f_locals['request']._xross_handler handler.set_attrs(**xross_attrs) if http_method is not None: handler.http_method = http_method handler.dispatch()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def xross_view(*op_functions): """This decorator should be used to decorate application views that require xross functionality. :param list op_functions: operations (functions, methods) responsible for handling xross requests. Function names considered to be operations names. Using them clients will address those functions (e.g. xross-ready HTML elements may be marked with `data-xop` attributes to define the above mentioned operations, or just define `id` which will serve for the same purpose). They can accept `request` as first argument (for methods it'll be second, as the first is `self`), It can also accept `xross` keyword argument, which will contain any additional `xross attrs` as defined by `xross_listener()`. Those functions should return string or dict (handled by client as JSON) or HTTPResponse, e.g. from `render()` result. Examples: def do_something(request, param1_from_html_el, param2_from_html_el, xross=None): return '%s - %s' % (param1_from_html_el, param2_from_html_el) """
operations_dict = construct_operations_dict(*op_functions) def get_request(src): return src if isinstance(src, HttpRequest) else None def dec_wrapper(func): def func_wrapper(*fargs, **fkwargs): request_idx = getattr(func, '_req_idx', None) if request_idx is None: request = get_request(fargs[0]) request_idx = 0 if not request: # Possibly a class-based view where 0-attr is `self`. request = get_request(fargs[1]) request_idx = 1 func._req_idx = request_idx else: request = fargs[request_idx] if hasattr(request, '_xross_handler'): request._xross_handler._op_bindings.update(operations_dict['_op_bindings']) else: request._xross_handler = build_handler_class(operations_dict)(request, func) try: response = func(*fargs, **fkwargs) except HandlerException as e: return HttpResponseBadRequest(e if settings.DEBUG else b'') except ResponseEmpty as e: return HttpResponseNotFound(e if settings.DEBUG else b'') except ResponseReady as r: response = r.response if response is None: response = '' if isinstance(response, str): response = HttpResponse(response) elif isinstance(response, dict): response = HttpResponse(json.dumps(response), content_type='application/json') return response return func_wrapper return dec_wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def intercept(actions: dict={}): """ Decorates a function and handles any exceptions that may rise. Args: actions: A dictionary ``<exception type>: <action>``. Available actions\ are :class:`raises` and :class:`returns`. Returns: Any value declared using a :class:`returns` action. Raises: AnyException: if AnyException is declared together with a :class:`raises` action. InterceptorError: if the decorator is called with something different from a :class:`returns` or :class:`raises` action. Interceptors can be declared inline to return a value or raise an exception when the declared exception is risen: 'ok' 'intercepted!' Traceback (most recent call last): Exception: intercepted! But they can also be declared and then used later on: 'intercepted!' You can declare also an action that captures the risen exception by passing a callable to the action. This is useful to create a custom error message: 'intercepted inner exception' Or to convert captured exceptions into custom errors: Traceback (most recent call last): intercept.CustomError: inner exception """
for action in actions.values(): if type(action) is not returns and type(action) is not raises: raise InterceptorError('Actions must be declared as `returns` or `raises`') def decorated(f): def wrapped(*args, **kargs): try: return f(*args, **kargs) except Exception as e: if e.__class__ in actions: return actions[e.__class__](e) else: raise return wrapped return decorated
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extend(self, *bindings): """ Append the given bindings to this keymap. Arguments: *bindings (Binding): Bindings to be added. Returns: Keymap: self """
self._bindings.extend(self._preprocess(bindings)) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def when(self, key): """ Specify context, i.e. condition that must be met. Arguments: key (str): Name of the context whose value you want to query. Returns: Context: """
ctx = Context(key, self) self.context.append(ctx) return ctx
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _split_source_page(self, path): """Split the source file texts by triple-dashed lines. shit code """
with codecs.open(path, "rb", "utf-8") as fd: textlist = fd.readlines() metadata_notation = "---\n" if textlist[0] != metadata_notation: logging.error( "{} first line must be triple-dashed!".format(path) ) sys.exit(1) metadata_textlist = [] metadata_end_flag = False idx = 1 max_idx = len(textlist) # TODO(crow): BE PYTHONIC!!! while not metadata_end_flag: metadata_textlist.append(textlist[idx]) idx += 1 if idx >= max_idx: logging.error( "{} doesn't have end triple-dashed!".format(path) ) sys.exit(1) if textlist[idx] == metadata_notation: metadata_end_flag = True content = textlist[idx + 1:] return metadata_textlist, content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_feed_data(self, file_paths): """ get data to display in feed file """
rv = {} for i in file_paths: # TODO(crow): only support first category _ = i.split('/') category = _[-2] name = _[-1].split('.')[0] page_config, md = self._get_config_and_content(i) parsed_md = tools.parse_markdown(md, self.site_config) rv.setdefault(category, {}) rv[category].update( { i: { 'title': page_config.get('title', ''), 'name': name.decode('utf-8'), 'content': parsed_md, 'date': page_config.get('date', '') } } ) return rv
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_feed(self, feed_data): """ render feed file with data """
atom_feed = self._render_html('atom.xml', feed_data) feed_path = os.path.join(os.getcwd(), 'public', 'atom.xml') with codecs.open(feed_path, 'wb', 'utf-8') as f: f.write(atom_feed)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _normalize_tz(val): """Normalizes all valid ISO8601 time zone variants to the one python will parse. :val: a timestamp string without a timezone, or with a timezone in one of the ISO8601 accepted formats. """
match = _TZ_RE.match(val) if match: ts, tz = match.groups() if len(tz) == 5: # If the length of the tz is 5 then it is of the form (+|-)dddd, which is exactly what python # wants, so just return it. return ts + tz if len(tz) == 6: # If the length of the tz is 6 then it is of the form (+|-)dd:dd, just remove the colon return ts + tz[:3] + tz[4:] if tz == "Z" or tz == "z": # If the tz is "Z" or 'z', return a timezone of +0000 return ts + "+0000" else: # Otherwise, the timzone must be of the format (+|-)dd, in which case we just need to add two # "0" to it, and it will be in the proper format. return ts + tz + "00" else: return val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def node(self, nodeid): """Creates a new node with the specified name, with `MockSocket` instances as incoming and outgoing sockets. Returns the implementation object created for the node from the cls, args and address specified, and the sockets. `cls` must be a callable that takes the insock and outsock, and the specified args and kwargs. """
_assert_valid_nodeid(nodeid) # addr = 'tcp://' + nodeid # insock = MockInSocket(addEndpoints=lambda endpoints: self.bind(addr, insock, endpoints)) # outsock = lambda: MockOutSocket(addr, self) return Node(hub=Hub(nodeid=nodeid))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def do_label(self): """ Create label for x and y axis, title and suptitle """
outputdict = self.outputdict xlabel_options = self.kwargs.get("xlabel_options", {}) self.subplot.set_xlabel( self.kwargs.get("xlabel", "").format(**outputdict), **xlabel_options) ylabel_options = self.kwargs.get("ylabel_options", {}) self.subplot.set_ylabel( self.kwargs.get("ylabel", "").format(**outputdict), **ylabel_options) suptitle = self.kwargs.get("suptitle", None) if suptitle is not None: suptitle_options = self.kwargs.get("suptitle_options", {}) self.figure.suptitle( suptitle.format(**outputdict), fontsize=int(self.kwargs.get("suptitle_fontsize", 15)), **suptitle_options) title = self.kwargs.get("title", None) if title is not None: title_options = self.kwargs.get("title_options", {}) self.subplot.set_title( title.format(**outputdict), fontsize=int(self.kwargs.get("title_fontsize", 12)), **title_options) xlim = self.kwargs.get("xlim", None) ylim = self.kwargs.get("ylim", None) if xlim is not None: self.subplot.set_xlim(xlim) if ylim is not None: self.subplot.set_ylim(ylim) # axis format self.subplot.ticklabel_format( style="sci", useOffset=False, scilimits=self.kwargs.get("scilimits", (-4, 4)) ) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extant_item(arg, arg_type): """Determine if parser argument is an existing file or directory. This technique comes from http://stackoverflow.com/a/11541450/95592 and from http://stackoverflow.com/a/11541495/95592 Args: arg: parser argument containing filename to be checked arg_type: string of either "file" or "directory" Returns: If the file exists, return the filename or directory. Raises: If the file does not exist, raise a parser error. """
if arg_type == "file": if not os.path.isfile(arg): raise argparse.ArgumentError( None, "The file {arg} does not exist.".format(arg=arg)) else: # File exists so return the filename return arg elif arg_type == "directory": if not os.path.isdir(arg): raise argparse.ArgumentError( None, "The directory {arg} does not exist.".format(arg=arg)) else: # Directory exists so return the directory name return arg
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_config_input_output(args=sys.argv): """Parse the args using the config_file, input_dir, output_dir pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD """
parser = argparse.ArgumentParser( description='Process the input files using the given config') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) parser.add_argument( 'input_dir', help='Directory containing the input files.', metavar='DIR', type=extant_dir) parser.add_argument( 'output_dir', help='Directory where the output files should be saved.', metavar='DIR', type=extant_dir) return parser.parse_args(args[1:])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_config(args=sys.argv): """Parse the args using the config_file pattern Args: args: sys.argv Returns: The populated namespace object from parser.parse_args(). Raises: TBD """
parser = argparse.ArgumentParser( description='Read in the config file') parser.add_argument( 'config_file', help='Configuration file.', metavar='FILE', type=extant_file) return parser.parse_args(args[1:])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cache(self, CachableItem): """Updates cache area with latest information """
_cachedItem = self.get(CachableItem) if not _cachedItem: _dirtyCachedItem = self.mapper.get(CachableItem) logger.debug("new cachable item added to sql cache area {id: %s, type: %s}", str(_dirtyCachedItem.getId()), str(_dirtyCachedItem.__class__)) cached_item = self.session.merge(_dirtyCachedItem) notify(CacheObjectCreatedEvent(cached_item, self)) return cached_item else: _newCacheItem = self.mapper.get(CachableItem) if _cachedItem != _newCacheItem: logger.debug("Cachable item modified in sql cache area {id: %s, type: %s}", str(_newCacheItem.getId()), str(_newCacheItem.__class__)) cached_item = self.session.merge(_newCacheItem) notify(CacheObjectModifiedEvent(cached_item, self)) return cached_item return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_json(item, **kwargs): """ formats a datatype object to a json value """
try: json.dumps(item.value) return item.value except TypeError: if 'time' in item.class_type.lower() \ or 'date' in item.class_type.lower(): return item.value.isoformat() raise
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def format_sparql(item, dt_format='turtle', **kwargs): """ Formats a datatype value to a SPARQL representation args: item: the datatype object dt_format: the return format ['turtle', 'uri'] """
try: rtn_val = json.dumps(item.value) rtn_val = item.value except: if 'time' in item.class_type.lower() \ or 'date' in item.class_type.lower(): rtn_val = item.value.isoformat() else: rtn_val = str(item.value) if hasattr(item, "datatype"): if hasattr(item, "lang") and item.lang: rtn_val = '%s@%s' % (json.dumps(rtn_val), item.lang) else: dt = item.datatype if dt_format == "uri": dt = item.datatype.sparql_uri if item.datatype in ["xsd_string", "xsd_dateTime", "xsd_time", "xsd_date"]: rtn_val = json.dumps(rtn_val) else: rtn_val = '"%s"' % json.dumps(rtn_val) rtn_val = '%s^^%s' % (rtn_val, dt.sparql) return rtn_val
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _format(self, method="sparql", dt_format="turtle"): """ Rormats the value in various formats args: method: ['sparql', 'json', 'pyuri'] dt_format: ['turtle','uri'] used in conjuction with the 'sparql' method """
try: return __FORMAT_OPTIONS__[method](self, dt_format=dt_format) except KeyError: raise NotImplementedError("'{}' is not a valid format method" "".format(method))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def bind(self, prefix, namespace, *args, **kwargs): """ Extends the function to add an attribute to the class for each added namespace to allow for use of dot notation. All prefixes are converted to lowercase Args: prefix: string of namespace name namespace: rdflib.namespace instance kwargs: calc: whether or not create the lookup reference dictionaries Example usage: RdfNsManager.rdf.type => http://www.w3.org/1999/02/22-rdf-syntax-ns#type """
# RdfNamespace(prefix, namespace, **kwargs) setattr(self, prefix, RdfNamespace(prefix, namespace, **kwargs)) if kwargs.pop('calc', True): self.__make_dicts__
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def prefix(self, format="sparql"): ''' Generates a string of the rdf namespaces listed used in the framework format: "sparql" or "turtle" ''' lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) _return_str = "" if format.lower() == "sparql": return "\n".join([ns._sparql_ for ns in self.namespaces]) elif format.lower() in ["turtle", "ttl"]: return "\n".join([ns._ttl_ for ns in self.namespaces]) elif format.lower() in ["rdf", "xml", "rdf/xml"]: return "<rdf:RDF %s>" % \ " ".join([ns._xml_ for ns in self.namespaces]) else: raise NotImplementedError("'%s' is not a valid prefix type." % format)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, filepath, file_encoding=None): """ Reads the the beginning of a turtle file and sets the prefix's used in that file and sets the prefix attribute Args: filepath: the path to the turtle file file_encoding: specify a specific encoding if necessary """
with open(filepath, encoding=file_encoding) as inf: for line in inf: current_line = str(line).strip() if current_line.startswith("@prefix"): self._add_ttl_ns(current_line.replace("\n","")) elif len(current_line) > 10: break self.__make_dicts__
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dict_load(self, ns_dict): """ Reads a dictionary of namespaces and binds them to the manager Args: ns_dict: dictionary with the key as the prefix and the value as the uri """
for prefix, uri in ns_dict.items(): self.bind(prefix, uri, override=False, calc=False) self.__make_dicts__
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _add_ttl_ns(self, line): """ takes one prefix line from the turtle file and binds the namespace to the class Args: line: the turtle prefix line string """
lg = logging.getLogger("%s.%s" % (self.ln, inspect.stack()[0][3])) lg.setLevel(self.log_level) lg.debug("line:\n%s", line) line = str(line).strip() # if the line is not a prefix line exit if line is None or line == 'none' or line == '' \ or not line.lower().startswith('@prefix'): return # parse the turtle line line = line.replace("@prefix","",1).strip() if line.endswith("."): line = line[:-1] prefix = line[:line.find(":")].strip() uri = self.clean_iri(line[line.find(":")+1:].strip()) # add the namespace to the class lg.debug("\nprefix: %s uri: %s", prefix, uri) self.bind(prefix, uri, override=False, calc=False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def del_ns(self, namespace): """ will remove a namespace ref from the manager. either Arg is optional. args: namespace: prefix, string or Namespace() to remove """
# remove the item from the namespace dict namespace = str(namespace) attr_name = None if hasattr(self, namespace): delattr(self, namespace)