text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decode_length(self, data, state): """ Extract and decode a frame length from the data buffer. The consumed data should be removed from the buffer. If the length data is incomplete, must raise a ``NoFrames`` exception. :param data: A ``bytearray`` instance containing the data so far read. :param state: An instance of ``FramerState``. If the buffer contains a partial encoded length, this object can be used to store state information to allow the remainder of the length to be read. :returns: The frame length, as an integer. """
# Do we have enough data yet? if len(data) < self.fmt.size: raise exc.NoFrames() # Extract the length length = self.fmt.unpack(six.binary_type(data[:self.fmt.size]))[0] del data[:self.fmt.size] # Return the length return length
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def interpret(self, infile): """ Process a file of rest and return json """
# need row headings data = pandas.read_csv(infile) # FIXME find the right foo return json.dumps(data.foo())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def binary_search(data, target, lo=0, hi=None): """ Perform binary search on sorted list data for target. Returns int representing position of target in data. """
hi = hi if hi is not None else len(data) mid = (lo + hi) // 2 if hi < 2 or hi > len(data) or target > data[-1] or target < data[0]: return -1 if data[mid] > target: return binary_search(data, target, lo=lo, hi=mid) elif data[mid] < target: return binary_search(data, target, lo=(mid + 1), hi=hi) elif data[mid] == target: return mid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def new(self, user_id, tokens=None, user_data=None, valid_until=None, client_ip=None, encoding='utf-8'): """Creates a new authentication ticket. Args: user_id: User id to store in ticket (stored in plain text) tokens: Optional sequence of token strings to store in the ticket (stored in plain text). user_data: Optional user data to store in the ticket (string like object stored in plain text) valid_until: Expiration time of ticket as a integer (typically time.time() + seconds). client_ip: Optional string or ip_address.IPAddress of the client. encoding: Optional encoding type that is used when hashing the strings passed to the function Returns: A ticket string that can later be used to identify the user """
if valid_until is None: valid_until = int(time.time()) + TicketFactory._DEFAULT_TIMEOUT else: valid_until = int(valid_until) # Make sure we dont have any exclamations in the user_id user_id = ulp.quote(user_id) # Create a comma seperated list of tokens token_str = '' if tokens: # Escape characters in our tokens token_str = ','.join((ulp.quote(t) for t in tokens)) # Encode our user data (a string) user_str = '' if not user_data else ulp.quote(user_data) # Get our address ip = self._DEFAULT_IP if client_ip is None else ip_address(client_ip) # Create our digest data0 = bytes([ip.version]) + ip.packed + pack(">I", valid_until) data1 = ('\0'.join((user_id, token_str, user_str))).encode(encoding) digest = self._hexdigest(data0, data1) # digest + timestamp as an eight character hexadecimal + userid parts = ('{0}{1:08x}{2}'.format(digest, valid_until, user_id), token_str, user_str) return '!'.join(parts)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def validate(self, ticket, client_ip=None, now=None, encoding='utf-8'): """Validates the passed ticket, , raises a TicketError on failure Args: ticket: String value (possibly generated by new function) client_ip: Optional IPAddress of client, should be passed if the ip address was passed on ticket creation. now: Optional (defaults to time.time()) time to use when validating ticket date Returns: Ticket a TicketInfo tuple containing the users authentication details on success Raises: TicketParseError: Invalid ticket format TicketDigestError: Digest is incorrect (ticket data was modified) TicketExpired: Ticket has passed expiration date """
parts = self.parse(ticket) # Check if our ticket matches new_ticket = self.new(*(parts[1:]), client_ip=client_ip, encoding=encoding) if new_ticket[:self._hash.digest_size * 2] != parts.digest: raise TicketDigestError(ticket) if now is None: now = time.time() if parts.valid_until <= now: raise TicketExpired(ticket) return parts
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse(self, ticket): """Parses the passed ticket, returning a tuple containing the digest, user_id, valid_until, tokens, and user_data fields """
if len(ticket) < self._min_ticket_size(): raise TicketParseError(ticket, 'Invalid ticket length') digest_len = self._hash.digest_size * 2 digest = ticket[:digest_len] try: time_len = 8 time = int(ticket[digest_len:digest_len + time_len], 16) except: raise TicketParseError(ticket, 'Invalid time field') parts = ticket[digest_len + time_len:].split('!') if len(parts) != 3: raise TicketParseError(ticket, 'Missing parts') user_id = ulp.unquote(parts[0]) tokens = () if parts[1]: tokens = tuple((ulp.unquote(t) for t in parts[1].split(','))) user_data = ulp.unquote(parts[2]) return TicketInfo(digest, user_id, tokens, user_data, time)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def random_word(self, *args, **kwargs): """ Return a random word from this tree. The length of the word depends on the this tree. :return: a random word from this tree. args and kwargs are ignored. """
word = "" current = (">", 0) while current[0] != "<": choices = self[current] choice = random_weighted_choice(choices) current = choice word += current[0][-1] return word[:-1]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def buy(self): """ Attempts to purchase a user shop item, returns result Uses the associated user and buyURL to attempt to purchase the user shop item. Returns whether or not the item was successfully bought. Returns bool - True if successful, false otherwise """
# Buy the item pg = self.usr.getPage("http://www.neopets.com/" + self.buyURL, vars = {'Referer': 'http://www.neopets.com/browseshop.phtml?owner=' + self.owner}) # If it was successful a redirect to the shop is sent if "(owned by" in pg.content: return True elif "does not exist in this shop" in pg.content: return False else: logging.getLogger("neolib.item").exception("Unknown message when attempting to buy user shop item.", {'pg': pg}) return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fetch(self, cache=None): """Query the info page to fill in the property cache. Return a dictionary with the fetched properties and values. """
self.reset() soup = get(self.url).soup details = soup.find(id="detailsframe") getdef = lambda s: [elem for elem in details.find("dt", text=re.compile(s)).next_siblings if elem.name == 'dd'][0] getdefstring = lambda s: getdef(s).string.strip() info = { "title": details.find(id="title").string.strip(), "type": getdefstring("Type:"), "files": getdefstring("Files:"), "size": getdefstring("Size:"), "uploaded": getdefstring("Uploaded:"), "submitter": getdef("By:").parent.find("a", href=re.compile("user")).string.strip(), "seeders": getdefstring("Seeders:"), "leechers": getdefstring("Leechers:"), "comments": details.find(id="NumComments").string.strip(), "link": details.find("a", href=re.compile("^magnet\:"))['href'].strip(), } if self._use_cache(cache): self._attrs = info self._fetched = True return info
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, item, cache=None): """Lookup a torrent info property. If cache is True, check the cache first. If the cache is empty, then fetch torrent info before returning it. """
if item not in self._keys: raise KeyError(item) if self._use_cache(cache) and (self._fetched or item in self._attrs): return self._attrs[item] info = self.fetch(cache=cache) return info[item]
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def as_dict(self, cache=None, fetch=True): """Return torrent properties as a dictionary. Set the cache flag to False to disable the cache. On the other hand, set the fetch flag to False to avoid fetching data if it's not cached. """
if not self._fetched and fetch: info = self.fetch(cache) elif self._use_cache(cache): info = self._attrs.copy() else: info = {} info.update(url=self.url) return info
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def user_filter(config, message, fasnick=None, *args, **kw): """ A particular user Use this rule to include messages that are associated with a specific user. """
fasnick = kw.get('fasnick', fasnick) if fasnick: return fasnick in fmn.rules.utils.msg2usernames(message, **config)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def not_user_filter(config, message, fasnick=None, *args, **kw): """ Everything except a particular user Use this rule to exclude messages that are associated with one or more users. Specify several users by separating them with a comma ','. """
fasnick = kw.get('fasnick', fasnick) if not fasnick: return False fasnick = (fasnick or []) and fasnick.split(',') valid = True for nick in fasnick: if nick.strip() in fmn.rules.utils.msg2usernames(message, **config): valid = False break return valid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_users_of_group(config, group): """ Utility to query fas for users of a group. """
if not group: return set() fas = fmn.rules.utils.get_fas(config) return fmn.rules.utils.get_user_of_group(config, fas, group)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fas_group_member_filter(config, message, group=None, *args, **kw): """ Messages regarding any member of a FAS group Use this rule to include messages that have anything to do with **any user** belonging to a particular fas group. You might want to use this to monitor the activity of a group for which you are responsible. """
if not group: return False fasusers = _get_users_of_group(config, group) msgusers = fmn.rules.utils.msg2usernames(message, **config) return bool(fasusers.intersection(msgusers))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def package_filter(config, message, package=None, *args, **kw): """ A particular package Use this rule to include messages that relate to a certain package (*i.e., nethack*). """
package = kw.get('package', package) if package: return package in fmn.rules.utils.msg2packages(message, **config)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def package_regex_filter(config, message, pattern=None, *args, **kw): """ All packages matching a regular expression Use this rule to include messages that relate to packages that match particular regular expressions (*i.e., (maven|javapackages-tools|maven-surefire)*). """
pattern = kw.get('pattern', pattern) if pattern: packages = fmn.rules.utils.msg2packages(message, **config) regex = fmn.rules.utils.compile_regex(pattern.encode('utf-8')) return any([regex.search(p.encode('utf-8')) for p in packages])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def regex_filter(config, message, pattern=None, *args, **kw): """ All messages matching a regular expression Use this rule to include messages that bear a certain pattern. This can be anything that appears anywhere in the message (for instance, you could combine this with rules for wiki updates or Ask Fedora changes to alert yourself of activity in your area of expertise). (*i.e., (beefy miracle)*). """
pattern = kw.get('pattern', pattern) if pattern: regex = fmn.rules.utils.compile_regex(pattern.encode('utf-8')) return bool(regex.search( fedmsg.encoding.dumps(message['msg']).encode('utf-8') ))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_option(self, opt_name, otype, hidden=False): """ Add an option to the object :param opt_name: option name :type opt_name: str :param otype: option type :type otype: subclass of :class:`.GenericType` :param hidden: if True the option will be hidden :type hidden: bool """
if self.has_option(opt_name): raise ValueError("The option is already present !") opt = ValueOption.FromType(opt_name, otype) opt.hidden = hidden self._options[opt_name] = opt
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def print_options(self): """ print description of the component options """
summary = [] for opt_name, opt in self.options.items(): if opt.hidden: continue summary.append(opt.summary()) print("\n".join(summary))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loadtxt(fn, **kwargs): """Study the text data file fn. Call numpys loadtxt with keyword arguments based on the study. Return data returned from numpy `loadtxt <http://docs.scipy.org/doc/numpy/reference/generated/numpy.loadtxt.html#numpy-loadtxt>`. kwargs: keyword arguments accepted by numpys loadtxt. Any keyword arguments provided will take precedence over the ones resulting from the study. Set the module attribute PP to the instance of PatternPull used. """
global PP PP = PatternPull(fn) txtargs = PP.loadtxtargs() txtargs.update(kwargs) # Let kwargs dominate. return np.loadtxt(fn, **txtargs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def loadtxt_asdict(fn, **kwargs): """Return what is returned from loadtxt as a dict. The 'unpack' keyword is enforced to True. The keys in the dict is the column numbers loaded. It is the
kwargs.update(unpack=True) d = loadtxt(fn, **kwargs) if len(np.shape(d)) == 2: keys = kwargs.get('usecols', None) or range(len(d)) D = dict([(k, v) for k, v in zip(keys, d)]) elif len(np.shape(d)) == 1: keys = kwargs.get('usecols', None) or [0] D = dict([(keys[0], d)]) else: raise Exception('Unknown dimension of loaded data.') return D
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def file_rows(self, fo): """Return the lines in the file as a list. fo is the open file object."""
rows = [] for i in range(NUMROWS): line = fo.readline() if not line: break rows += [line] return rows
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def count_matches(self): """Set the matches_p, matches_c and rows attributes."""
try: self.fn = self.fo.name rows = self.file_rows(self.fo) self.fo.seek(0) except AttributeError: with open(self.fn) as fo: rows = self.file_rows(fo) matches_p = [] matches_c = [] for line in rows: cnt = len(re.findall(DATPRX, line)) matches_p.append(cnt) cnt = len(re.findall(DATCRX, line)) matches_c.append(cnt) self.rows = rows # Is newlines in the end a problem? self.matches_p = matches_p self.matches_c = matches_c
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def rows2skip(self, decdel): """ Return the number of rows to skip based on the decimal delimiter decdel. When each record start to have the same number of matches, this is where the data starts. This is the idea. And the number of consecutive records to have the same number of matches is to be EQUAL_CNT_REQ. """
if decdel == '.': ms = self.matches_p elif decdel == ',': ms = self.matches_c # else make error... cnt = row = 0 for val1, val2 in zip(ms, ms[1:]): # val2 is one element ahead. row += 1 if val2 == val1 != 0: # 0 is no matches, so it doesn't count. cnt += 1 else: cnt = 0 if cnt == EQUAL_CNT_REQ: break else: # print 'No break-out for', decdel, 'cnt:', cnt pass self.cnt = cnt return row - EQUAL_CNT_REQ
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_decdel_rts(self): """Figure out the decimal seperator and rows to skip and set corresponding attributes. """
lnr = max(self.rows2skip(','), self.rows2skip('.')) + 1 # If EQUAL_CNT_REQ was not met, raise error. Implement! if self.cnt > EQUAL_CNT_REQ: raise PatternError('Did not find ' + str(EQUAL_CNT_REQ) + ' data rows with equal data pattern in file: ' + self.fn) elif self.cnt < EQUAL_CNT_REQ: # Too few rows raise PatternError('Less than', str(EQUAL_CNT_REQ) + 'data rows in', self.fn + '?', '\nTry lower the EQUAL_CNT_REQ') if self.matches_p[lnr] <= self.matches_c[lnr]: self.decdel = '.' # If equal, assume decimal point is used. self.datrx = DATPRX else: self.decdel = ',' # Assume the lesser count is correct. self.datrx = DATCRX self.rts = self.rows2skip(self.decdel)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def study_datdel(self): """Figure out the data delimiter."""
nodigs = r'(\D+)' line = self.rows[self.rts + 1] # Study second line of data only. digs = re.findall(self.datrx, line) # if any of the numbers contain a '+' in it, it need to be escaped # before used in the pattern: digs = [dig.replace('+', r'\+') for dig in digs] pat = nodigs.join(digs) m = re.search(pat, line) groups = m.groups() # If the count of data on the row is 1, the groups tuple (the # data delimiters) is empty. if not groups: self.datdelgroups = groups return # self.datdel remain None. rpt_cnt = groups.count(groups[0]) if rpt_cnt != len(groups): self.warnings.append('Warning, data seperator not consistent.') if groups[0].strip(): # If a delimiter apart from white space is included, let that be # the delimiter for numpys loadtxt. self.datdel = groups[0].strip() elif groups[0] == '\t': # If specifically a tab as delimiter, use that. self.datdel = groups[0] # For other white space delimiters, let datdel remain None. # work-around for the event that numbers clutters the channel names and # rts is one number low: res = [dat.strip() for dat in self.rows[self.rts].split(self.datdel) if dat.strip()] if not all([re.match(self.datrx, dat) for dat in res]): self.rts += 1 # print 'DEBUG: rts was adjusted with 1' # Keep the groups for debug: self.datdelgroups = groups
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def channel_names(self, usecols=None): """Attempt to extract the channel names from the data file. Return a list with names. Return None on failed attempt. usecols: A list with columns to use. If present, the returned list will include only names for columns requested. It will align with the columns returned by numpys loadtxt by using the same keyword (usecols). """
# Search from [rts - 1] and up (last row before data). Split respective # row on datdel. Accept consecutive elements starting with alphas # character after strip. If the count of elements equals the data count # on row rts + 1, accept it as the channel names. if self.decdel == '.': datcnt = self.matches_p[self.rts] elif self.decdel == ',': datcnt = self.matches_c[self.rts] if usecols and max(usecols) >= datcnt: mess = ' Max column index is ' raise IndexError(str(usecols) + mess + str(datcnt - 1)) names = None if not self.rts: # Only data. return None # From last row before data and up. for row in self.rows[self.rts - 1::-1]: # datdel might be None, (whitespace) splitlist = row.split(self.datdel) for i, word in enumerate(splitlist): if not word.strip().startswith(ALPHAS): break elif i + 1 == datcnt: # Accept names = [ch.strip() for ch in splitlist[:datcnt]] break if names: break if usecols: names = [names[i] for i in sorted(usecols)] return names
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register(self, cls, instance): """ Register the given instance as implementation for a class interface """
if not issubclass(cls, DropletInterface): raise TypeError('Given class is not a NAZInterface subclass: %s' % cls) if not isinstance(instance, cls): raise TypeError('Given instance does not implement the class: %s' % instance) if instance.name in self.INSTANCES_BY_NAME: if self.INSTANCES_BY_NAME[instance.name] != instance: raise ValueError('Given name is registered ' 'by other instance: %s' % instance.name) self.INSTANCES_BY_INTERFACE[cls].add(instance) self.INSTANCES_BY_NAME[instance.name] = instance
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def stream_tap(callables, stream): """ Calls each callable with each item in the stream. Use with Buckets. Make a Bucket with a callable and then pass a tuple of those buckets in as the callables. After iterating over this generator, get contents from each Spigot. :param callables: collection of callable. :param stream: Iterator if values. """
for item in stream: for caller in callables: caller(item) yield item
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def selected(self, interrupt=False): """This object has been selected."""
self.ao2.output(self.get_title(), interrupt=interrupt)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def header(self, k, v, replace=True): """ Sets header value. Replaces existing value if `replace` is True. Otherwise create a list of existing values and `v` :param k: Header key :param v: Header value :param replace: flag for setting mode. :type k: str :type v: str :type replace: bool """
if replace: self._headers[k] = [v] else: self._headers.setdefault(k, []).append(v) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def cookie(self, k, v, expires=None, domain=None, path='/', secure=False): """ Sets cookie value. :param k: Name for cookie value :param v: Cookie value :param expires: Cookie expiration date :param domain: Cookie domain :param path: Cookie path :param secure: Flag for `https only` :type k: str :type v: str :type expires: datetime.datetime :type domain: str :type path: str :type secure: bool """
ls = ['{}={}'.format(k, v)] if expires is not None: dt = format_date_time(mktime(expires.timetuple())) ls.append('expires={}'.format(dt)) if domain is not None: ls.append('domain={}'.format(domain)) if path is not None: ls.append('path={}'.format(path)) if secure: ls.append('secure') return self.header('Set-Cookie', '; '.join(ls), False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def decorate_all_methods(decorator): """ Build and return a decorator that will decorate all class members. This will apply the passed decorator to all of the methods in the decorated class, except the __init__ method, when a class is decorated with it. """
def decorate_class(cls): for name, m in inspect.getmembers(cls, inspect.ismethod): if name != "__init__": setattr(cls, name, decorator(m)) return cls return decorate_class
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def just_in_time_method(func): """ This is a dcorator for methods. It redirect calls to the decorated method to the equivalent method in a class member called 'item'. 'item' is expected to be None when the class is instantiated. The point is to easily allow on-demand construction or loading of large, expensive objects just-in-time. To apply this decorator to a method in a class, the class must have the following instance variables: | Name | Description | +=========+================================================================+ | item | The wrapped object. Calls to this method will be redirected | | | to the method with the same name in item. This should be set | | | to None when the object is created; it will be loaded | | | on-demand the first time this method is called. | | factory | An object from which 'item' can be loaded. Can be a factory, | | | or similar, but must provide the subscript operator, as this | | | is used to pass a key that uniquely identifies 'item' | | key | any object that uniquely identifies 'item'; must be what is | | | expected by 'index' as argument for the subscript operator. | A common pattern is to create a new class B which inherits from A, implement the above requirements in B and then apply this decorator to all the methods inherited from A. If 'item' is an object of type A, then this pattern makes B behave exactly like A, but with just-in-time construction. """
if not inspect.ismethod: raise MetaError("oops") def wrapper(self, *args, **kwargs): if self.item is None: self.item = self.factory[self.key] return getattr(self.item, func.__name__)(*args, **kwargs) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def AddAccelerator(self, modifiers, key, action): """ Add an accelerator. Modifiers and key follow the same pattern as the list used to create wx.AcceleratorTable objects. """
newId = wx.NewId() self.Bind(wx.EVT_MENU, action, id = newId) self.RawAcceleratorTable.append((modifiers, key, newId)) self.SetAcceleratorTable(wx.AcceleratorTable(self.RawAcceleratorTable)) return newId
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_config(lines, module=None): """Parse a config file. Names referenced within the config file are found within the calling scope. For example:: would find the ``bar`` method of the ``foo`` class, because ``foo`` is in the same scope as the call to parse_config. :param lines: An iterable of configuration lines (an open file object will do). :param module: Optional. If provided and not None, look for referenced names within this object instead of the calling module. """
if module is None: module = _calling_scope(2) lines = IndentChecker(lines) path_router = PathRouter() for depth, line in lines: if depth > 0: raise SyntaxError('unexpected indent') name, path, types = parse_path_spec(line) if types: template_arg = (path, dict( (k, find_object(module, v)) for k, v in types.iteritems() )) else: template_arg = path handler = read_handler_block(lines, module) path_router.add(name, template_arg, handler) return path_router
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_config(name='urls.conf'): """Load a config from a resource file. The resource is found using `pkg_resources.resource_stream()`_, relative to the calling module. See :func:`parse_config` for config file details. :param name: The name of the resource, relative to the calling module. .. _pkg_resources.resource_stream(): http://packages.python.org/distribute/pkg_resources.html#basic-resource-access """
module = _calling_scope(2) config = resource_stream(module.__name__, name) return parse_config(config, module)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_placeholder(self, name=None, db_type=None): """Returns a placeholder for the specified name, by applying the instance's format strings. :name: if None an unamed placeholder is returned, otherwise a named placeholder is returned. :db_type: if not None the placeholder is typecast. """
if name is None: placeholder = self.unnamed_placeholder else: placeholder = self.named_placeholder.format(name) if db_type: return self.typecast(placeholder, db_type) else: return placeholder
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_tuple(self, iterable, surround="()", joiner=", "): """Returns the iterable as a SQL tuple."""
return "{0}{1}{2}".format(surround[0], joiner.join(iterable), surround[1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def to_expression(self, lhs, rhs, op): """Builds a binary sql expression. At its most basic, returns 'lhs op rhs' such as '5 + 3'. However, it also specially handles the 'in' and 'between' operators. For each of these operators it is expected that rhs will be iterable. If the comparison operator is of the form 'not(op)' where op is the operator, it will result in not (lhs op rhs). This allows for doing the full range of null checks on composite types. For composite types, 'is null' only returns true when all fields are null, and 'is not null' returns true only when all fields are not null. So, for a composite type with some null fields, 'is null' and 'is not null' will both return false, making it difficult to get all rows that have composite columns with some value in them. The solution to this is to use not (composite is null) which is true when all composite fields, or only some composite fields are not null. """
if op == "raw": # TODO: This is not documented return lhs elif op == "between": return "{0} between {1} and {2}".format(lhs, *rhs) elif op == "in": return "{0} in {1}".format(lhs, self.to_tuple(rhs)) elif op.startswith("not(") and op.endswith(")"): return "not ({0} {1} {2})".format(lhs, op[4:-1].strip(), rhs) else: return "{0} {1} {2}".format(lhs, op.strip(), rhs)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def value_comparisons(self, values, comp="=", is_assignment=False): """Builds out a series of value comparisions. :values: can either be a dictionary, in which case the return will compare a name to a named placeholder, using the comp argument. I.E. values = {"first_name": "John", "last_name": "Smith"} will return ["first_name = %(first_name)s", "last_name = %(last_name)s"]. Otherwise values will be assumed to be an iterable of 2- or 3-tuples in the form (column, value[, operator]). When operator is not specified, it will fallback to comp. So for instance values = [("first_name", "John"), ("id", (10, 100), "between")] will return ["first_name = %s", "id between %s and %s "]. :is_assigment: if False, transform_op will be called on each operator. """
if isinstance(values, dict): if self.sort_columns: keys = sorted(values.keys()) else: keys = list(values.keys()) params = zip(keys, [self.to_placeholder(k) for k in keys]) return [ self.to_expression( i[0], i[1], comp if is_assignment else self.transform_op(comp, values[i[0]])) for i in params] else: if self.sort_columns: values = sorted(values, key=operator.itemgetter(0)) comps = [] for val in values: lhs = val[0] op = val[2] if len(val) == 3 else comp if op == "raw": rhs = None elif op == "between": rhs = (self.to_placeholder(), self.to_placeholder()) elif op == "in": rhs = [self.to_placeholder() for i in val[1]] else: rhs = self.to_placeholder() if not is_assignment: op = self.transform_op(op, val[1]) comps.append(self.to_expression(lhs, rhs, op)) return comps
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def join_comparisons(self, values, joiner, *, is_assignment=False, comp="="): """Generates comparisons with the value_comparisions method, and joins them with joiner. :is_assignment: if false, transform_op will be called on each comparison operator. """
if isinstance(values, str): return values else: return joiner.join(self.value_comparisons(values, comp, is_assignment))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_params(self, values): """Gets params to be passed to execute from values. :values: can either be a dict, in which case it will be returned as is, or can be an enumerable of 2- or 3-tuples. This will return an enumerable of the 2nd values, and in the case of some operators such as 'in' and 'between' the values will be specially handled. """
if values is None: return None elif isinstance(values, dict): return values elif isinstance(values, (list, tuple)): params = [] for val in values: if len(val) == 2: params.append(val[1]) else: if val[2] in ("in", "between"): params.extend(val[1]) else: params.append(val[1]) return params elif isinstance(values, str): return None else: raise TypeError( "values must be None, a dict, list or tuple, is {0}".format(type(values).__name__))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_find_all_query(self, table_name, constraints=None, *, columns=None, order_by=None, limiting=(None, None)): """Builds a find query. :limiting: if present must be a 2-tuple of (limit, offset) either of which can be None. """
where, params = self.parse_constraints(constraints) if columns: if isinstance(columns, str): pass else: columns = ", ".join(columns) else: columns = "*" if order_by: order = " order by {0}".format(order_by) else: order = "" paging = "" if limiting is not None: limit, offset = limiting if limit is not None: paging += " limit {0}".format(limit) if offset is not None: paging += " offset {0}".format(offset) return ("select {0} from {1} where {2}{3}{4}".format( columns, table_name, where or "1 = 1", order, paging ), params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def pretty_print(rows, keyword, domain): """ rows is list when get domains dict when get specific domain """
if isinstance(rows, dict): pretty_print_domain(rows, keyword, domain) elif isinstance(rows, list): pretty_print_zones(rows)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_data(self, data, datatype="ttl", namespace=None, graph=None, is_file=False, **kwargs): """ Loads data via file stream from python to triplestore Args: ----- data: The data or filepath to load datatype(['ttl', 'xml', 'rdf']): the type of data to load namespace: the namespace to use graph: the graph to load the data to. is_file(False): If true python will read the data argument as a filepath, determine the datatype from the file extension, read the file and send it to blazegraph as a datastream """
log.setLevel(kwargs.get("log_level", self.log_level)) time_start = datetime.datetime.now() datatype_map = { 'ttl': 'text/turtle', 'xml': 'application/rdf+xml', 'rdf': 'application/rdf+xml', 'nt': 'text/plain' } if is_file: datatype = data.split(os.path.extsep)[-1] file_name = data log.debug('starting data load of %s', file_name) data = open(data, 'rb').read() else: try: data = data.encode('utf-8') except AttributeError: # data already encoded pass try: content_type = datatype_map[datatype] except KeyError: raise NotImplementedError("'%s' is not an implemented data format", datatype) context_uri = pick(graph, self.graph) result = requests.post(url=self._make_url(namespace), headers={"Content-Type": content_type}, params={"context-uri": context_uri}, data=data) if result.status_code == 200: if is_file: log.info (" loaded %s into blazegraph - %s", file_name, self.format_response(result.text)) else: log.info(" loaded data - %s", self.format_response(result.text)) log.setLevel(self.log_level) return result else: raise SyntaxError(result.text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_local_file(self, file_path, namespace=None, graph=None, **kwargs): """ Uploads data to the Blazegraph Triplestore that is stored in files in directory that is available locally to blazegraph args: file_path: full path to the file namespace: the Blazegraph namespace to load the data graph: uri of the graph to load the data. Default is None kwargs: container_dir: the directory as seen by blazegraph - defaults to instance attribute if not passed """
time_start = datetime.datetime.now() url = self._make_url(namespace) params = {} if graph: params['context-uri'] = graph new_path = [] container_dir = pick(kwargs.get('container_dir'), self.container_dir) if container_dir: new_path.append(self.container_dir) new_path.append(file_path) params['uri'] = "file:///%s" % os.path.join(*new_path) log.debug(" loading %s into blazegraph", file_path) result = requests.post(url=url, params=params) if result.status_code > 300: raise SyntaxError(result.text) log.info("loaded '%s' in time: %s blazegraph response: %s", file_path, datetime.datetime.now() - time_start, self.format_response(result.text)) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def has_namespace(self, namespace): """ tests to see if the namespace exists args: namespace: the name of the namespace """
result = requests.get(self._make_url(namespace)) if result.status_code == 200: return True elif result.status_code == 404: return False
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create_namespace(self, namespace=None, params=None): """ Creates a namespace in the triplestore args: namespace: the name of the namspace to create params: Dictionary of Blazegraph paramaters. defaults are: {'axioms': 'com.bigdata.rdf.axioms.NoAxioms', 'geoSpatial': False, 'isolatableIndices': False, 'justify': False, 'quads': False, 'rdr': False, 'textIndex': False, 'truthMaintenance': False} """
namespace = pick(namespace, self.namespace) params = pick(params, self.namespace_params) if not namespace: raise ReferenceError("No 'namespace' specified") _params = {'axioms': 'com.bigdata.rdf.axioms.NoAxioms', 'geoSpatial': False, 'isolatableIndices': False, 'justify': False, 'namespace': namespace, 'quads': True, 'rdr': False, 'textIndex': False, 'truthMaintenance': False} if params: _params.update(params) content_type = "text/plain" url = self._make_url("prepareProperties").replace("/sparql", "") params = ["%s=%s" % (map_val, json.dumps(_params[map_key]).replace("\"", "")) \ for map_key, map_val in self.ns_property_map.items()] params = "\n".join(params) result = requests.post(url=url, headers={"Content-Type": content_type}, data=params) data = result.text content_type = "application/xml" url = self._make_url("x").replace("/x/sparql", "") result = requests.post(url=url, headers={"Content-Type": content_type}, data=data) if result.status_code == 201: log.warning(result.text) return result.text else: raise RuntimeError(result.text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_namespace(self, namespace): """ Deletes a namespace fromt the triplestore args: namespace: the name of the namespace """
# if not self.has_namespace(namespace): # return "Namespace does not exists" # log = logging.getLogger("%s.%s" % (self.log_name, # inspect.stack()[0][3])) # log.setLevel(self.log_level) url = self._make_url(namespace).replace("/sparql", "") result = requests.delete(url=url) if result.status_code == 200: log.critical(result.text) return result.text raise RuntimeError(result.text)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _make_url(self, namespace=None, url=None, **kwargs): """ Creates the REST Url based on the supplied namespace args: namespace: string of the namespace kwargs: check_status_call: True/False, whether the function is called from check_status. Used to avoid recurrsion error """
if not kwargs.get("check_status_call"): if not self.url: self.check_status rtn_url = self.url if url: rtn_url = url if rtn_url is None: rtn_url = self.ext_url namespace = pick(namespace, self.namespace) if namespace: rtn_url = os.path.join(rtn_url.replace("sparql", ""), "namespace", namespace, "sparql").replace("\\", "/") elif not rtn_url.endswith("sparql"): rtn_url = os.path.join(rtn_url, "sparql").replace("\\", "/") return rtn_url
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def reset_namespace(self, namespace=None, params=None): """ Will delete and recreate specified namespace args: namespace(str): Namespace to reset params(dict): params used to reset the namespace """
log = logging.getLogger("%s.%s" % (self.log_name, inspect.stack()[0][3])) log.setLevel(self.log_level) namespace = pick(namespace, self.namespace) params = pick(params, self.namespace_params) log.warning(" Reseting namespace '%s' at host: %s", namespace, self.url) try: self.delete_namespace(namespace) except RuntimeError: pass self.create_namespace(namespace, params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def tree_render(request, upy_context, vars_dictionary): """ It renders template defined in upy_context's page passed in arguments """
page = upy_context['PAGE'] return render_to_response(page.template.file_name, vars_dictionary, context_instance=RequestContext(request))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def view_404(request, url=None): """ It returns a 404 http response """
res = render_to_response("404.html", {"PAGE_URL": request.get_full_path()}, context_instance=RequestContext(request)) res.status_code = 404 return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def view_500(request, url=None): """ it returns a 500 http response """
res = render_to_response("500.html", context_instance=RequestContext(request)) res.status_code = 500 return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def favicon(request): """ It returns favicon's location """
favicon = u"{}tree/images/favicon.ico".format(settings.STATIC_URL) try: from seo.models import MetaSite site = MetaSite.objects.get(default=True) return HttpResponseRedirect(site.favicon.url) except: return HttpResponseRedirect(favicon)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_username(self): """ Ensure the username doesn't exist or contain invalid chars. We limit it to slugifiable chars since it's used as the slug for the user's profile view. """
username = self.cleaned_data.get("username") if username.lower() != slugify(username).lower(): raise forms.ValidationError( ugettext("Username can only contain letters, numbers, dashes " "or underscores.")) lookup = {"username__iexact": username} try: User.objects.exclude(id=self.instance.id).get(**lookup) except User.DoesNotExist: return username raise forms.ValidationError( ugettext("This username is already registered"))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_password2(self): """ Ensure the password fields are equal, and match the minimum length defined by ``ACCOUNTS_MIN_PASSWORD_LENGTH``. """
password1 = self.cleaned_data.get("password1") password2 = self.cleaned_data.get("password2") if password1: errors = [] if password1 != password2: errors.append(ugettext("Passwords do not match")) if len(password1) < settings.ACCOUNTS_MIN_PASSWORD_LENGTH: errors.append( ugettext("Password must be at least %s characters") % settings.ACCOUNTS_MIN_PASSWORD_LENGTH) if errors: self._errors["password1"] = self.error_class(errors) return password2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_email(self): """ Ensure the email address is not already registered. """
email = self.cleaned_data.get("email") qs = User.objects.exclude(id=self.instance.id).filter(email=email) if len(qs) == 0: return email raise forms.ValidationError( ugettext("This email is already registered"))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_articles(self, issue=''): """ Yields a list of articles from the given issue. """
soup = get_soup() # get soup of all articles issues = soup.find_all('ul') # validating and assigning default value for issue if not type(issue) is int or issue < 0 : issue = 1 if issue > len(issues): issue = len(issues) # considering latest article is last element articles = issues[len(issues)-issue].find_all('a') mArticles = [] for article in articles: mArticle = {} mArticle['link'] = article.get('href')[1:] mArticle['title'] = article.find('li').contents[0].strip() mArticle['author'] = article.find('span').contents[0].encode('utf8') mArticles.append(mArticle) return mArticles
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fromLink(self, link): """ Factory Method. Fetches article data from given link and builds the object """
soup = get_article_soup(link) head = soup.find_all('article',class_='')[0] parts = link.split('/') id = '%s-%s'%(parts[0],parts[-1]) issue = parts[0].split('-')[-1] #fetching head title = head.find("h1").contents[0] if head.find("h1") else '' tagline = head.find("h2").contents[0] if head.find("h2") else '' body = '' #fetching body if len(soup.find_all('article',class_='main-body')) > 0: body = soup.find_all('article',class_='main-body')[0].find(class_='inner') author = '' #fetching author if len(soup.find_all('aside')) > 0: aside = soup.find_all('aside')[0] if soup.find_all('aside')[0] else '' author = Author.from_soup(aside) return Article(id=id,title=title,tagline=tagline,body=body,issue=issue,link='http://thezine.biz/%s'%link,author=author)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_soup(self,soup): """ Factory Pattern. Fetches author data from given soup and builds the object """
if soup is None or soup is '': return None else: author_name = soup.find('em').contents[0].strip() if soup.find('em') else '' author_image = soup.find('img').get('src') if soup.find('img') else '' author_contact = Contact.from_soup(self,soup) return Author(author_name,author_image,author_contact)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def from_soup(self,author,soup): """ Factory Pattern. Fetches contact data from given soup and builds the object """
email = soup.find('span',class_='icon icon-mail').findParent('a').get('href').split(':')[-1] if soup.find('span',class_='icon icon-mail') else '' facebook = soup.find('span',class_='icon icon-facebook').findParent('a').get('href') if soup.find('span',class_='icon icon-facebook') else '' twitter = soup.find('span',class_='icon icon-twitter-3').findParent('a').get('href') if soup.find('span',class_='icon icon-twitter-3') else '' link = soup.find('span',class_='icon icon-link').findParent('a').get('href') if soup.find('span',class_='icon icon-link') else '' return Contact(email,facebook,twitter,link)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find(self, text): """ Return a list of genres found in text. """
genres = [] text = text.lower() category_counter = Counter() counter = Counter() for genre in self.db.genres: found = self.contains_entity(genre, text) if found: counter[genre] += found category = self.db.reference[genre] points = self.db.points[genre] points *= found # Add bonus points if additional terms points to category if category_counter[category] > 0: points += 1 category_counter[category] += points for tag in self.db.tags: found = self.contains_entity(tag, text) if found: category = self.db.reference[tag] if not counter[category]: counter[category] += found points = self.db.points[tag] points *= found category_counter[category] += points if not category_counter: return genres main_category = category_counter.most_common(1)[0][0] # Convert counter to a flat list of genres, sorted by count sorted_genres = [ite for ite, it in counter.most_common()] for genre in sorted_genres: insert = True if self.unique_category: if not self.db.reference[genre] == main_category: insert = False if insert: genres.append(genre) return genres
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def contains_entity(entity, text): """ Attempt to try entity, return false if not found. Otherwise the amount of time entitu is occuring. """
try: entity = re.escape(entity) entity = entity.replace("\ ", "([^\w])?") pattern = "(\ |-|\\\|/|\.|,|^)%s(\ |\-|\\\|/|\.|,|$)" % entity found = len(re.findall(pattern, text, re.I | re.M)) except Exception as e: found = False return found
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def is_executable(path): '''is the given path executable?''' return (stat.S_IXUSR & os.stat(path)[stat.ST_MODE] or stat.S_IXGRP & os.stat(path)[stat.ST_MODE] or stat.S_IXOTH & os.stat(path)[stat.ST_MODE])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def prepare_writeable_dir(tree): ''' make sure a directory exists and is writeable ''' if tree != '/': tree = os.path.realpath(os.path.expanduser(tree)) if not os.path.exists(tree): try: os.makedirs(tree) except (IOError, OSError), e: exit("Could not make dir %s: %s" % (tree, e)) if not os.access(tree, os.W_OK): exit("Cannot write to path %s" % tree)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def path_dwim(basedir, given): ''' make relative paths work like folks expect. ''' if given.startswith("/"): return given elif given.startswith("~/"): return os.path.expanduser(given) else: return os.path.join(basedir, given)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def parse_json(raw_data): ''' this version for module return data only ''' orig_data = raw_data # ignore stuff like tcgetattr spewage or other warnings data = filter_leading_non_json_lines(raw_data) try: return json.loads(data) except: # not JSON, but try "Baby JSON" which allows many of our modules to not # require JSON and makes writing modules in bash much simpler results = {} try: tokens = shlex.split(data) except: print "failed to parse json: "+ data raise for t in tokens: if t.find("=") == -1: raise errors.AnsibleError("failed to parse: %s" % orig_data) (key,value) = t.split("=", 1) if key == 'changed' or 'failed': if value.lower() in [ 'true', '1' ]: value = True elif value.lower() in [ 'false', '0' ]: value = False if key == 'rc': value = int(value) results[key] = value if len(results.keys()) == 0: return { "failed" : True, "parsed" : False, "msg" : orig_data } return results
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def md5(filename): ''' Return MD5 hex digest of local file, or None if file is not present. ''' if not os.path.exists(filename): return None digest = _md5() blocksize = 64 * 1024 infile = open(filename, 'rb') block = infile.read(blocksize) while block: digest.update(block) block = infile.read(blocksize) infile.close() return digest.hexdigest()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def _gitinfo(): ''' returns a string containing git branch, commit id and commit date ''' result = None repo_path = os.path.join(os.path.dirname(__file__), '..', '..', '..', '.git') if os.path.exists(repo_path): # Check if the .git is a file. If it is a file, it means that we are in a submodule structure. if os.path.isfile(repo_path): try: gitdir = yaml.load(open(repo_path)).get('gitdir') # There is a posibility the .git file to have an absolute path. if os.path.isabs(gitdir): repo_path = gitdir else: repo_path = os.path.join(repo_path.split('.git')[0], gitdir) except (IOError, AttributeError): return '' f = open(os.path.join(repo_path, "HEAD")) branch = f.readline().split('/')[-1].rstrip("\n") f.close() branch_path = os.path.join(repo_path, "refs", "heads", branch) if os.path.exists(branch_path): f = open(branch_path) commit = f.readline()[:10] f.close() date = time.localtime(os.stat(branch_path).st_mtime) if time.daylight == 0: offset = time.timezone else: offset = time.altzone result = "({0} {1}) last updated {2} (GMT {3:+04d})".format(branch, commit, time.strftime("%Y/%m/%d %H:%M:%S", date), offset / -36) else: result = '' return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def compile_when_to_only_if(expression): ''' when is a shorthand for writing only_if conditionals. It requires less quoting magic. only_if is retained for backwards compatibility. ''' # when: set $variable # when: unset $variable # when: failed $json_result # when: changed $json_result # when: int $x >= $z and $y < 3 # when: int $x in $alist # when: float $x > 2 and $y <= $z # when: str $x != $y if type(expression) not in [ str, unicode ]: raise errors.AnsibleError("invalid usage of when_ operator: %s" % expression) tokens = expression.split() if len(tokens) < 2: raise errors.AnsibleError("invalid usage of when_ operator: %s" % expression) # when_set / when_unset if tokens[0] in [ 'set', 'unset' ]: tcopy = tokens[1:] for (i,t) in enumerate(tokens[1:]): if t.find("$") != -1: tcopy[i] = "is_%s('''%s''')" % (tokens[0], t) else: tcopy[i] = t return " ".join(tcopy) # when_failed / when_changed elif tokens[0] in [ 'failed', 'changed' ]: tcopy = tokens[1:] for (i,t) in enumerate(tokens[1:]): if t.find("$") != -1: tcopy[i] = "is_%s(%s)" % (tokens[0], t) else: tcopy[i] = t return " ".join(tcopy) # when_integer / when_float / when_string elif tokens[0] in [ 'integer', 'float', 'string' ]: cast = None if tokens[0] == 'integer': cast = 'int' elif tokens[0] == 'string': cast = 'str' elif tokens[0] == 'float': cast = 'float' tcopy = tokens[1:] for (i,t) in enumerate(tokens[1:]): if t.find("$") != -1: # final variable substitution will happen in Runner code tcopy[i] = "%s('''%s''')" % (cast, t) else: tcopy[i] = t return " ".join(tcopy) # when_boolean elif tokens[0] in [ 'bool', 'boolean' ]: tcopy = tokens[1:] for (i, t) in enumerate(tcopy): if t.find("$") != -1: tcopy[i] = "(is_set('''%s''') and '''%s'''.lower() not in ('false', 'no', 'n', 'none', '0', ''))" % (t, t) return " ".join(tcopy) else: raise errors.AnsibleError("invalid usage of when_ operator: %s" % expression)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def make_sudo_cmd(sudo_user, executable, cmd): """ helper function for connection plugins to create sudo commands """
# Rather than detect if sudo wants a password this time, -k makes # sudo always ask for a password if one is required. # Passing a quoted compound command to sudo (or sudo -s) # directly doesn't work, so we shellquote it with pipes.quote() # and pass the quoted string to the user's shell. We loop reading # output until we see the randomly-generated sudo prompt set with # the -p option. randbits = ''.join(chr(random.randint(ord('a'), ord('z'))) for x in xrange(32)) prompt = '[sudo via ansible, key=%s] password: ' % randbits sudocmd = '%s -k && %s %s -S -p "%s" -u %s %s -c %s' % ( C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_EXE, C.DEFAULT_SUDO_FLAGS, prompt, sudo_user, executable or '$SHELL', pipes.quote(cmd)) return ('/bin/sh -c ' + pipes.quote(sudocmd), prompt)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def login(self): """ Logs the user in, returns the result Returns bool - Whether or not the user logged in successfully """
# Request index to obtain initial cookies and look more human pg = self.getPage("http://www.neopets.com") form = pg.form(action="/login.phtml") form.update({'username': self.username, 'password': self.password}) pg = form.submit() logging.getLogger("neolib.user").info("Login check", {'pg': pg}) return self.username in pg.content
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sync(self, browser): """ Enables cookie synchronization with specified browser, returns result Returns bool - True if successful, false otherwise """
BrowserCookies.loadBrowsers() if not browser in BrowserCookies.browsers: return False self.browserSync = True self.browser = browser return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save(self): """ Exports all user attributes to the user's configuration and writes configuration Saves the values for each attribute stored in User.configVars into the user's configuration. The password is automatically encoded and salted to prevent saving it as plaintext. The session is pickled, encoded, and compressed to take up less space in the configuration file. All other attributes are saved in plain text. Writes the changes to the configuration file. """
# Code to load all attributes for prop in dir(self): if getattr(self, prop) == None: continue if not prop in self.configVars: continue # Special handling for some attributes if prop == "session": pic = pickle.dumps(getattr(self, prop).cookies) comp = zlib.compress(pic) enc = base64.b64encode(comp) self.config[prop] = enc.decode() continue if prop == "password" and not self.savePassword: continue if prop == "password": s = hashlib.md5(self.username.encode()).hexdigest() p = base64.b64encode(getattr(self, prop).encode()) + s.encode() self.config[prop] = p.decode() continue self.config[prop] = str(getattr(self, prop)) if 'password' in self.config and not self.savePassword: del self.config.password self.config.write() self.__loadConfig()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mod_git_ignore(directory, ignore_item, action): """ checks if an item is in the specified gitignore file and adds it if it is not in the file """
if not os.path.isdir(directory): return ignore_filepath = os.path.join(directory,".gitignore") if not os.path.exists(ignore_filepath): items = [] else: with open(ignore_filepath) as ig_file: items = ig_file.readlines() # strip and clean the lines clean_items = [line.strip("\n").strip() for line in items] clean_items = make_list(clean_items) if action == "add": if ignore_item not in clean_items: with open(ignore_filepath, "w") as ig_file: clean_items.append(ignore_item) ig_file.write("\n".join(clean_items) + "\n") elif action == "remove": with open(ignore_filepath, "w") as ig_file: for i, value in enumerate(clean_items): if value != ignore_item.lower(): ig_file.write(items[i])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def monitor_running_process(context: RunContext): """ Runs an infinite loop that waits for the process to either exit on its or time out Captures all output from the running process :param context: run context :type context: RunContext """
while True: capture_output_from_running_process(context) if context.process_finished(): context.return_code = context.command.returncode break if context.process_timed_out(): context.return_code = -1 raise ProcessTimeoutError( exe_name=context.exe_short_name, timeout=context.timeout, )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def base_elts(elt, cls=None, depth=None): """Get bases elements of the input elt. - If elt is an instance, get class and all base classes. - If elt is a method, get all base methods. - If elt is a class, get all base classes. - In other case, get an empty list. :param elt: supposed inherited elt. :param cls: cls from where find attributes equal to elt. If None, it is found as much as possible. Required in python3 for function classes. :type cls: type or list :param int depth: search depth. If None (default), depth is maximal. :return: elt bases elements. if elt has not base elements, result is empty. :rtype: list """
result = [] elt_name = getattr(elt, '__name__', None) if elt_name is not None: cls = [] if cls is None else ensureiterable(cls) elt_is_class = False # if cls is None and elt is routine, it is possible to find the cls if not cls and isroutine(elt): if hasattr(elt, '__self__'): # from the instance instance = get_method_self(elt) # get instance if instance is None and PY2: # get base im_class if PY2 cls = list(elt.im_class.__bases__) else: # use instance class cls = [instance.__class__] # cls is elt if elt is a class elif isclass(elt): elt_is_class = True cls = list(elt.__bases__) if cls: # if cls is not empty, find all base classes index_of_found_classes = 0 # get last visited class index visited_classes = set(cls) # cache for visited classes len_classes = len(cls) if depth is None: # if depth is None, get maximal value depth = -1 # set negative value while depth != 0 and index_of_found_classes != len_classes: len_classes = len(cls) for index in range(index_of_found_classes, len_classes): _cls = cls[index] for base_cls in _cls.__bases__: if base_cls in visited_classes: continue else: visited_classes.add(base_cls) cls.append(base_cls) index_of_found_classes = len_classes depth -= 1 if elt_is_class: # if cls is elt, result is classes minus first class result = cls elif isroutine(elt): # get an elt to compare with found element if ismethod(elt): elt_to_compare = get_method_function(elt) else: elt_to_compare = elt for _cls in cls: # for all classes # get possible base elt b_elt = getattr(_cls, elt_name, None) if b_elt is not None: # compare funcs if ismethod(b_elt): bec = get_method_function(b_elt) else: bec = b_elt # if matching, add to result if bec is elt_to_compare: result.append(b_elt) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_embedding(elt, embedding=None): """Try to get elt embedding elements. :param embedding: embedding element. Must have a module. :return: a list of [module [,class]*] embedding elements which define elt. :rtype: list """
result = [] # result is empty in the worst case # start to get module module = getmodule(elt) if module is not None: # if module exists visited = set() # cache to avoid to visit twice same element if embedding is None: embedding = module # list of compounds elements which construct the path to elt compounds = [embedding] while compounds: # while compounds elements exist # get last compound last_embedding = compounds[-1] # stop to iterate on compounds when last embedding is elt if last_embedding == elt: result = compounds # result is compounds break else: # search among embedded elements for name in dir(last_embedding): # get embedded element embedded = getattr(last_embedding, name) try: # check if embedded has already been visited if embedded not in visited: visited.add(embedded) # set it as visited else: continue except TypeError: pass else: # get embedded module embedded_module = getmodule(embedded) # and compare it with elt module if embedded_module is module: # add embedded to compounds compounds.append(embedded) # end the second loop break else: # remove last element if no coumpound element is found compounds.pop(-1) return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def projects(accountable): """ List all projects. """
projects = accountable.metadata()['projects'] headers = sorted(['id', 'key', 'self']) rows = [[v for k, v in sorted(p.items()) if k in headers] for p in projects] rows.insert(0, headers) print_table(SingleTable(rows))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def issuetypes(accountable, project_key): """ List all issue types. Optional parameter to list issue types by a given project. """
projects = accountable.issue_types(project_key) headers = sorted(['id', 'name', 'description']) rows = [] for key, issue_types in sorted(projects.items()): for issue_type in issue_types: rows.append( [key] + [v for k, v in sorted(issue_type.items()) if k in headers] ) rows.insert(0, ['project_key'] + headers) print_table(SingleTable(rows))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def components(accountable, project_key): """ Returns a list of all a project's components. """
components = accountable.project_components(project_key) headers = sorted(['id', 'name', 'self']) rows = [[v for k, v in sorted(component.items()) if k in headers] for component in components] rows.insert(0, headers) print_table(SingleTable(rows))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkoutbranch(accountable, options): """ Create a new issue and checkout a branch named after it. """
issue = accountable.checkout_branch(options) headers = sorted(['id', 'key', 'self']) rows = [headers, [itemgetter(header)(issue) for header in headers]] print_table(SingleTable(rows))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def checkout(accountable, issue_key): """ Checkout a new branch or checkout to a branch for a given issue. """
issue = accountable.checkout(issue_key) headers = issue.keys() rows = [headers, [v for k, v in issue.items()]] print_table(SingleTable(rows))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def issue(ctx, accountable, issue_key): """ List metadata for a given issue key. """
accountable.issue_key = issue_key if not ctx.invoked_subcommand: issue = accountable.issue_meta() headers = issue.keys() rows = [headers, [v for k, v in issue.items()]] print_table(SingleTable(rows))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(accountable, options): """ Update an existing issue. """
issue = accountable.issue_update(options) headers = issue.keys() rows = [headers, [v for k, v in issue.items()]] print_table(SingleTable(rows))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def comments(accountable): """ Lists all comments for a given issue key. """
comments = accountable.issue_comments() headers = sorted(['author_name', 'body', 'updated']) if comments: rows = [[v for k, v in sorted(c.items()) if k in headers] for c in comments] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho('No comments found for {}'.format( accountable.issue_key ), fg='red')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def addcomment(accountable, body): """ Add a comment to the given issue key. Accepts a body argument to be used as the comment's body. """
r = accountable.issue_add_comment(body) headers = sorted(['author_name', 'body', 'updated']) rows = [[v for k, v in sorted(r.items()) if k in headers]] rows.insert(0, headers) print_table(SingleTable(rows))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def worklog(accountable): """ List all worklogs for a given issue key. """
worklog = accountable.issue_worklog() headers = ['author_name', 'comment', 'time_spent'] if worklog: rows = [[v for k, v in sorted(w.items()) if k in headers] for w in worklog] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho( 'No worklogs found for {}'.format(accountable.issue_key), fg='red' )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def transitions(accountable): """ List all possible transitions for a given issue. """
transitions = accountable.issue_transitions().get('transitions') headers = ['id', 'name'] if transitions: rows = [[v for k, v in sorted(t.items()) if k in headers] for t in transitions] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho( 'No transitions found for {}'.format(accountable.issue_key), fg='red' )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dotransition(accountable, transition_id): """ Transition the given issue to the provided ID. The API does not return a JSON response for this call. """
t = accountable.issue_do_transition(transition_id) if t.status_code == 204: click.secho( 'Successfully transitioned {}'.format(accountable.issue_key), fg='green' )
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def users(accountable, query): """ Executes a user search for the given query. """
users = accountable.users(query) headers = ['display_name', 'key'] if users: rows = [[v for k, v in sorted(u.items()) if k in headers] for u in users] rows.insert(0, headers) print_table(SingleTable(rows)) else: click.secho('No users found for query {}'.format( query ), fg='red')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def guess_saves(zone, data): """Return types with guessed DST saves"""
saves = {} details = {} for (time0, type0), (time1, type1) in pairs(data.times): is_dst0 = bool(data.types[type0][1]) is_dst1 = bool(data.types[type1][1]) if (is_dst0, is_dst1) == (False, True): shift = data.types[type1][0] - data.types[type0][0] if shift: saves.setdefault(type1, set()).add(shift) details[type1, shift] = (time0, time1) elif (is_dst0, is_dst1) == (True, False): shift = data.types[type0][0] - data.types[type1][0] if shift: saves.setdefault(type0, set()).add(shift) details[type0, shift] = (time0, time1) types = data.types[:] for i, (offset, save, abbr) in enumerate(data.types): if save: guesses = saves.get(i, set()) if not guesses: print("No save value guesses for type %d (%r) in zone %s." % (i, types[i][-1], zone)) guess = timedelta(hours=1) elif len(guesses) == 1: guess = guesses.pop() else: print("Multiple save value guesses for type %d in zone %s." % (i, zone)) for g in guesses: d = details[i, g] print(" ", g, *d) guess = min(g for g in guesses if g) types[i] = (offset, guess, abbr) return types
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_commit_tree(profile, sha): """Get the SHA of a commit's tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. sha The SHA of a commit. Returns: The SHA of the commit's tree. """
data = commits.get_commit(profile, sha) tree = data.get("tree") sha = tree.get("sha") return sha
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_file_from_tree(tree, file_path): """Remove a file from a tree. Args: tree A list of dicts containing info about each blob in a tree. file_path The path of a file to remove from a tree. Returns: The provided tree, but with the item matching the specified file_path removed. """
match = None for item in tree: if item.get("path") == file_path: match = item break if match: tree.remove(match) return tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_file_to_tree(tree, file_path, file_contents, is_executable=False): """Add a file to a tree. Args: tree A list of dicts containing info about each blob in a tree. file_path The path of the new file in the tree. file_contents The (UTF-8 encoded) contents of the new file. is_executable If ``True``, the new file will get executable permissions (0755). Otherwise, it will get 0644 permissions. Returns: The provided tree, but with the new file added. """
record = { "path": file_path, "mode": "100755" if is_executable else "100644", "type": "blob", "content": file_contents, } tree.append(record) return tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_files_in_branch(profile, branch_sha): """Get all files in a branch's tree. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. branch_sha The SHA a branch's HEAD points to. Returns: A list of dicts containing info about each blob in the tree. """
tree_sha = get_commit_tree(profile, branch_sha) files = get_files_in_tree(profile, tree_sha) tree = [prepare(x) for x in files] return tree
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_file( profile, branch, file_path, file_contents, is_executable=False, commit_message=None): """Add a file to a branch. Args: profile A profile generated from ``simplygithub.authentication.profile``. Such profiles tell this module (i) the ``repo`` to connect to, and (ii) the ``token`` to connect with. branch The name of a branch. file_path The path of the new file in the tree. file_contents The (UTF-8 encoded) contents of the new file. is_executable If ``True``, the new file will get executable permissions (0755). Otherwise, it will get 0644 permissions. commit_message A commit message to give to the commit. Returns: A dict with data about the branch's new ref (it includes the new SHA the branch's HEAD points to, after committing the new file). """
branch_sha = get_branch_sha(profile, branch) tree = get_files_in_branch(profile, branch_sha) new_tree = add_file_to_tree(tree, file_path, file_contents, is_executable) data = trees.create_tree(profile, new_tree) sha = data.get("sha") if not commit_message: commit_message = "Added " + file_path + "." parents = [branch_sha] commit_data = commits.create_commit(profile, commit_message, sha, parents) commit_sha = commit_data.get("sha") ref_data = refs.update_ref(profile, "heads/" + branch, commit_sha) return ref_data