text_prompt
stringlengths
157
13.1k
code_prompt
stringlengths
7
19.8k
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add(self, interval, offset): """ The added interval must be overlapping or beyond the last stored interval ie. added in sorted order. :param interval: interval to add :param offset: full virtual offset to add :return: """
start, stop = self.get_start_stop(interval) if len(self.starts) > 0: if start < self.starts[-1] or offset <= self.offsets[-1][1]: raise ValueError('intervals and offsets must be added in-order') self.offsets[-1][1] = offset self.offsets[-1][2] += 1 else: self.starts.append(start) self.stops.append(stop) self.offsets.append([offset, offset, 1])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_sum(qs, field): """ get sum for queryset. ``qs``: queryset ``field``: The field name to sum. """
sum_field = '%s__sum' % field qty = qs.aggregate(Sum(field))[sum_field] return qty if qty else 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_max(qs, field): """ get max for queryset. qs: queryset field: The field name to max. """
max_field = '%s__max' % field num = qs.aggregate(Max(field))[max_field] return num if num else 0
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def do_filter(qs, qdata, quick_query_fields=[], int_quick_query_fields=[]): """ auto filter queryset by dict. qs: queryset need to filter. qdata: quick_query_fields: int_quick_query_fields: """
try: qs = qs.filter( __gen_quick_query_params( qdata.get('q_quick_search_kw'), quick_query_fields, int_quick_query_fields) ) q, kw_query_params = __gen_query_params(qdata) qs = qs.filter(q, **kw_query_params) except: import traceback traceback.print_exc() return qs
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def read_gcvs(filename): """ Reads variable star data in `GCVS format`_. :param filename: path to GCVS data file (usually ``iii.dat``) .. _`GCVS format`: http://www.sai.msu.su/gcvs/gcvs/iii/html/ """
with open(filename, 'r') as fp: parser = GcvsParser(fp) for star in parser: yield star
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dict_to_body(star_dict): """ Converts a dictionary of variable star data to a `Body` instance. Requires `PyEphem <http://rhodesmill.org/pyephem/>`_ to be installed. """
if ephem is None: # pragma: no cover raise NotImplementedError("Please install PyEphem in order to use dict_to_body.") body = ephem.FixedBody() body.name = star_dict['name'] body._ra = ephem.hours(str(star_dict['ra'])) body._dec = ephem.degrees(str(star_dict['dec'])) body._epoch = ephem.J2000 return body
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def tempfile(self): "write the docx to a named tmpfile and return the tmpfile filename" tf = tempfile.NamedTemporaryFile() tfn = tf.name tf.close() os.remove(tf.name) shutil.copy(self.fn, tfn) return tfn
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sheets(self): """return the sheets of data."""
data = Dict() for src in [src for src in self.zipfile.namelist() if 'xl/worksheets/' in src]: name = os.path.splitext(os.path.basename(src))[0] xml = self.xml(src) data[name] = xml return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def workbook_data(self): """return a readable XML form of the data."""
document = XML( fn=os.path.splitext(self.fn)[0]+'.xml', root=Element.workbook()) shared_strings = [ str(t.text) for t in self.xml('xl/sharedStrings.xml') .root.xpath(".//xl:t", namespaces=self.NS)] for key in self.sheets.keys(): worksheet = self.sheets[key].transform(XT, shared_strings=shared_strings) document.root.append(worksheet.root) return document
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def process(self, event): """Put and process tasks in queue. """
logger.info(f"{self}: put {event.src_path}") self.queue.put(os.path.basename(event.src_path))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def main(): """ Scrapes Apple's iCal feed for Australian public holidays and generates per- state listings. """
print "Downloading Holidays from Apple's server..." r = requests.get('http://files.apple.com/calendars/Australian32Holidays.ics') cal = Calendar.from_ical(r.text) print "Processing calendar data..." valid_states = ['ACT', 'NSW', 'NT', 'QLD', 'SA', 'TAS', 'VIC', 'WA'] state_cal = {} all_cal = make_calendar() for state in valid_states: state_cal[state] = make_calendar() for event in cal.walk('VEVENT'): event_name = event.decoded('SUMMARY').lower() if filter(lambda x: x in event_name, IGNORED_EVENTS): continue # see if there is a state or if it is for all if '(' in event_name: # and not 'day in lieu' in event_name: # it is just for certain states. # eg: # - Easter Tuesday (TAS) # - Labour Day (ACT, NSW, SA, QLD) states = event_name.split('(', 2)[1].split(')')[0].split(',') if states == ['day in lieu']: # only a day in lieu, switch to all-cal logic all_cal.add_component(event) continue for state in states: state = state.strip().upper() assert state in valid_states, 'state=%r' % state state_cal[state].add_component(event) else: # for all states all_cal.add_component(event) print "Writing to disk..." # done, write calendars. with open('au_holidays.ics', 'wb') as f: f.write(all_cal.to_ical()) for state in state_cal.keys(): with open('%s_holidays.ics' % state.lower(), 'wb') as f: f.write(state_cal[state].to_ical()) print "All done!"
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def init_editing_mode(self, e): # (M-C-j) '''Initialize vi editingmode''' self.show_all_if_ambiguous = 'on' self.key_dispatch = {} self.__vi_insert_mode = None self._vi_command = None self._vi_command_edit = None self._vi_key_find_char = None self._vi_key_find_direction = True self._vi_yank_buffer = None self._vi_multiplier1 = '' self._vi_multiplier2 = '' self._vi_undo_stack = [] self._vi_undo_cursor = -1 self._vi_current = None self._vi_search_text = '' self.vi_save_line () self.vi_set_insert_mode (True) # make ' ' to ~ self insert for c in range(ord(' '), 127): self._bind_key('%s' % chr(c), self.vi_key) self._bind_key('BackSpace', self.vi_backspace) self._bind_key('Escape', self.vi_escape) self._bind_key('Return', self.vi_accept_line) self._bind_key('Left', self.backward_char) self._bind_key('Right', self.forward_char) self._bind_key('Home', self.beginning_of_line) self._bind_key('End', self.end_of_line) self._bind_key('Delete', self.delete_char) self._bind_key('Control-d', self.vi_eof) self._bind_key('Control-z', self.vi_eof) self._bind_key('Control-r', self.vi_redo) self._bind_key('Up', self.vi_arrow_up) self._bind_key('Control-p', self.vi_up) self._bind_key('Down', self.vi_arrow_down) self._bind_key('Control-n', self.vi_down) self._bind_key('Tab', self.vi_complete)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_collection_documents_generator(client, database_name, collection_name, spec, latest_n, sort_key): """ This is a python generator that yields tweets stored in a mongodb collection. Tweet "created_at" field is assumed to have been stored in the format supported by MongoDB. Inputs: - client: A pymongo MongoClient object. - database_name: The name of a Mongo database as a string. - collection_name: The name of the tweet collection as a string. - spec: A python dictionary that defines higher query arguments. - latest_n: The number of latest results we require from the mongo document collection. - sort_key: A field name according to which we will sort in ascending order. Yields: - document: A document in python dictionary (json) format. """
mongo_database = client[database_name] collection = mongo_database[collection_name] collection.create_index(sort_key) if latest_n is not None: skip_n = collection.count() - latest_n if collection.count() - latest_n < 0: skip_n = 0 cursor = collection.find(filter=spec).sort([(sort_key, ASCENDING), ]) cursor = cursor[skip_n:] else: cursor = collection.find(filter=spec).sort([(sort_key, ASCENDING), ]) for document in cursor: yield document
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_connected_components(graph, connectivity_type, node_to_id): """ Extract the largest connected component from a graph. Inputs: - graph: An adjacency matrix in scipy sparse matrix format. - connectivity_type: A string that can be either: "strong" or "weak". - node_to_id: A map from graph node id to Twitter id, in python dictionary format. Outputs: - largest_connected_component: An adjacency matrix in scipy sparse matrix format. - new_node_to_id: A map from graph node id to Twitter id, in python dictionary format. - old_node_list: List of nodes from the possibly disconnected original graph. Raises: - RuntimeError: If there the input graph is empty. """
# Get a networkx graph. nx_graph = nx.from_scipy_sparse_matrix(graph, create_using=nx.DiGraph()) # Calculate all connected components in graph. if connectivity_type == "weak": largest_connected_component_list = nxalgcom.weakly_connected_component_subgraphs(nx_graph) elif connectivity_type == "strong": largest_connected_component_list = nxalgcom.strongly_connected_component_subgraphs(nx_graph) else: print("Invalid connectivity type input.") raise RuntimeError # Handle empty graph. try: largest_connected_component = max(largest_connected_component_list, key=len) except ValueError: print("Error: Empty graph.") raise RuntimeError old_node_list = largest_connected_component.nodes() node_to_node = dict(zip(np.arange(len(old_node_list)), old_node_list)) largest_connected_component = nx.to_scipy_sparse_matrix(largest_connected_component, dtype=np.float64, format="csr") # Make node_to_id. new_node_to_id = {k: node_to_id[v] for k, v in node_to_node.items()} return largest_connected_component, new_node_to_id, old_node_list
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def sendEmail(self, subject, body, toAddress=False): """ sends an email using the agrcpythonemailer@gmail.com account """
if not toAddress: toAddress = self.toAddress toAddress = toAddress.split(';') message = MIMEText(body) message['Subject'] = subject message['From'] = self.fromAddress message['To'] = ','.join(toAddress) if not self.testing: s = SMTP(self.server, self.port) s.sendmail(self.fromAddress, toAddress, message.as_string()) s.quit() print('email sent') else: print('***Begin Test Email Message***') print(message) print('***End Test Email Message***')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_completions(self): """Return a list of possible completions for the string ending at the point. Also set begidx and endidx in the process."""
completions = [] self.begidx = self.l_buffer.point self.endidx = self.l_buffer.point buf=self.l_buffer.line_buffer if self.completer: # get the string to complete while self.begidx > 0: self.begidx -= 1 if buf[self.begidx] in self.completer_delims: self.begidx += 1 break text = ensure_str(u''.join(buf[self.begidx:self.endidx])) log(u'complete text="%s"' % ensure_unicode(text)) i = 0 while 1: try: r = ensure_unicode(self.completer(text, i)) except IndexError: break i += 1 if r is None: break elif r and r not in completions: completions.append(r) else: pass log(u'text completions=<%s>' % map(ensure_unicode, completions)) if (self.complete_filesystem == "on") and not completions: # get the filename to complete while self.begidx > 0: self.begidx -= 1 if buf[self.begidx] in u' \t\n': self.begidx += 1 break text = ensure_str(u''.join(buf[self.begidx:self.endidx])) log(u'file complete text="%s"' % ensure_unicode(text)) completions = map(ensure_unicode, glob.glob(os.path.expanduser(text) + '*')) if self.mark_directories == u'on': mc = [] for f in completions: if os.path.isdir(f): mc.append(f + os.sep) else: mc.append(f) completions = mc log(u'fnames=<%s>' % map(ensure_unicode, completions)) return completions
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def complete(self, e): # (TAB) u"""Attempt to perform completion on the text before point. The actual completion performed is application-specific. The default is filename completion."""
completions = self._get_completions() if completions: cprefix = commonprefix(completions) if len(cprefix) > 0: rep = [ c for c in cprefix ] point=self.l_buffer.point self.l_buffer[self.begidx:self.endidx] = rep self.l_buffer.point = point + len(rep) - (self.endidx - self.begidx) if len(completions) > 1: if self.show_all_if_ambiguous == u'on': self._display_completions(completions) else: self._bell() else: self._bell() self.finalize()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def possible_completions(self, e): # (M-?) u"""List the possible completions of the text before point. """
completions = self._get_completions() self._display_completions(completions) self.finalize()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_completions(self, e): # (M-*) u"""Insert all completions of the text before point that would have been generated by possible-completions."""
completions = self._get_completions() b = self.begidx e = self.endidx for comp in completions: rep = [ c for c in comp ] rep.append(' ') self.l_buffer[b:e] = rep b += len(rep) e = b self.line_cursor = b self.finalize()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def insert_text(self, string): u"""Insert text into the command line."""
self.l_buffer.insert_text(string, self.argument_reset) self.finalize()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete_char(self, e): # (C-d) u"""Delete the character at point. If point is at the beginning of the line, there are no characters in the line, and the last character typed was not bound to delete-char, then return EOF."""
self.l_buffer.delete_char(self.argument_reset) self.finalize()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: u"""Insert yourself. """
if e.char and ord(e.char)!=0: #don't insert null character in buffer, can happen with dead keys. self.insert_text(e.char) self.finalize()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def paste(self,e): u"""Paste windows clipboard. Assume single line strip other lines and end of line markers and trailing spaces"""
#(Control-v) if self.enable_win32_clipboard: txt=clipboard.get_clipboard_text_and_convert(False) txt=txt.split("\n")[0].strip("\r").strip("\n") log("paste: >%s<"%map(ord,txt)) self.insert_text(txt) self.finalize()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def dump_functions(self, e): # () u"""Print all of the functions and their key bindings to the Readline output stream. If a numeric argument is supplied, the output is formatted in such a way that it can be made part of an inputrc file. This command is unbound by default."""
print txt="\n".join(self.rl_settings_to_string()) print txt self._print_prompt() self.finalize()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def fit(self, X, y=None): """Calls the ctmc.ctmc function Parameters X : list of lists (see ctmc function 'data') y not used, present for API consistence purpose. """
self.transmat, self.genmat, self.transcount, self.statetime = ctmc( X, self.numstates, self.transintv, self.toltime, self.debug) return self
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RV_1(self): """Instantaneous RV of star 1 with respect to system center-of-mass """
return self.orbpop_long.RV * (self.orbpop_long.M2 / (self.orbpop_long.M1 + self.orbpop_long.M2))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RV_2(self): """Instantaneous RV of star 2 with respect to system center-of-mass """
return -self.orbpop_long.RV * (self.orbpop_long.M1 / (self.orbpop_long.M1 + self.orbpop_long.M2)) +\ self.orbpop_short.RV_com1
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RV_3(self): """Instantaneous RV of star 3 with respect to system center-of-mass """
return -self.orbpop_long.RV * (self.orbpop_long.M1 / (self.orbpop_long.M1 + self.orbpop_long.M2)) +\ self.orbpop_short.RV_com2
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_hdf(self,filename,path=''): """Save to .h5 file. """
self.orbpop_long.save_hdf(filename,'{}/long'.format(path)) self.orbpop_short.save_hdf(filename,'{}/short'.format(path))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def Rsky(self): """Projected sky separation of stars """
return np.sqrt(self.position.x**2 + self.position.y**2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RV_com1(self): """RVs of star 1 relative to center-of-mass """
return self.RV * (self.M2 / (self.M1 + self.M2))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def RV_com2(self): """RVs of star 2 relative to center-of-mass """
return -self.RV * (self.M1 / (self.M1 + self.M2))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def save_hdf(self,filename,path=''): """Saves all relevant data to .h5 file; so state can be restored. """
self.dataframe.to_hdf(filename,'{}/df'.format(path))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def add_pii_permissions(self, group, view_only=None): """Adds PII model permissions. """
pii_model_names = [m.split(".")[1] for m in self.pii_models] if view_only: permissions = Permission.objects.filter( (Q(codename__startswith="view") | Q(codename__startswith="display")), content_type__model__in=pii_model_names, ) else: permissions = Permission.objects.filter( content_type__model__in=pii_model_names ) for permission in permissions: group.permissions.add(permission) for model in self.pii_models: permissions = Permission.objects.filter( codename__startswith="view", content_type__app_label=model.split(".")[0], content_type__model=f"historical{model.split('.')[1]}", ) for permission in permissions: group.permissions.add(permission) for permission in Permission.objects.filter( content_type__app_label="edc_registration", codename__in=[ "add_registeredsubject", "delete_registeredsubject", "change_registeredsubject", ], ): group.permissions.remove(permission) permission = Permission.objects.get( content_type__app_label="edc_registration", codename="view_historicalregisteredsubject", ) group.permissions.add(permission)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_attribute_cardinality(attribute): """ Returns the cardinality of the given resource attribute. :returns: One of the constants defined in :class:`evererst.constants.CARDINALITY_CONSTANTS`. :raises ValueError: If the given attribute is not a relation attribute (i.e., if it is a terminal attribute). """
if attribute.kind == RESOURCE_ATTRIBUTE_KINDS.MEMBER: card = CARDINALITY_CONSTANTS.ONE elif attribute.kind == RESOURCE_ATTRIBUTE_KINDS.COLLECTION: card = CARDINALITY_CONSTANTS.MANY else: raise ValueError('Can not determine cardinality for non-terminal ' 'attributes.') return card
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def setup(path_config="~/.config/scalar/config.yaml", configuration_name=None): """ Load a configuration from a default or specified configuration file, accessing a default or specified configuration name. """
global config global client global token global room # config file path_config = Path(path_config).expanduser() log.debug("load config {path}".format(path = path_config)) if not path_config.exists(): log.error("no config {path} found".format(path = path_config)) sys.exit() else: with open(str(path_config), "r") as _file: config = yaml.load(_file) if not configuration_name: for configuration in list(config["configurations"].items()): if configuration[1]["default"]: config = configuration[1] else: config["configurations"][configuration_name] # connect to homeserver and room log.debug("Matrix username: " + config["username"]) log.debug("connect to homeserver " + config["homeserver"]) client = MatrixClient(config["homeserver"]) token = client.login_with_password(username = config["username"], password = config["passcode"]) log.debug("connect to room " + config["room_alias"]) room = client.join_room(config["room_alias"])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def worker_wrapper(worker_instance, pid_path): """ A wrapper to start RQ worker as a new process. :param worker_instance: RQ's worker instance :param pid_path: A file to check if the worker is running or not """
def exit_handler(*args): """ Remove pid file on exit """ if len(args) > 0: print("Exit py signal {signal}".format(signal=args[0])) remove(pid_path) atexit.register(exit_handler) signal.signal(signal.SIGINT, exit_handler) signal.signal(signal.SIGTERM, exit_handler) worker_instance.work() # Remove pid file if the process can not catch signals exit_handler(2)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def collection(self): """Return the redis-collection instance."""
if not self.include_collections: return None ctx = stack.top if ctx is not None: if not hasattr(ctx, 'redislite_collection'): ctx.redislite_collection = Collection(redis=self.connection) return ctx.redislite_collection
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def queue(self): """The queue property. Return rq.Queue instance."""
if not self.include_rq: return None ctx = stack.top if ctx is not None: if not hasattr(ctx, 'redislite_queue'): ctx.redislite_queue = {} for queue_name in self.queues: ctx.redislite_queue[queue_name] = \ Queue(queue_name, connection=self.connection) return ctx.redislite_queue
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def start_worker(self): """Trigger new process as a RQ worker."""
if not self.include_rq: return None worker = Worker(queues=self.queues, connection=self.connection) worker_pid_path = current_app.config.get( "{}_WORKER_PID".format(self.config_prefix), 'rl_worker.pid' ) try: worker_pid_file = open(worker_pid_path, 'r') worker_pid = int(worker_pid_file.read()) print("Worker already started with PID=%d" % worker_pid) worker_pid_file.close() return worker_pid except (IOError, TypeError): self.worker_process = Process(target=worker_wrapper, kwargs={ 'worker_instance': worker, 'pid_path': worker_pid_path }) self.worker_process.start() worker_pid_file = open(worker_pid_path, 'w') worker_pid_file.write("%d" % self.worker_process.pid) worker_pid_file.close() print("Start a worker process with PID=%d" % self.worker_process.pid) return self.worker_process.pid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def image_save_buffer_fix(maxblock=1048576): """ Contextmanager that change MAXBLOCK in ImageFile. """
before = ImageFile.MAXBLOCK ImageFile.MAXBLOCK = maxblock try: yield finally: ImageFile.MAXBLOCK = before
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upgrade_many(upgrade=True, create_examples_all=True): """upgrade many libs. source: http://arduino.cc/playground/Main/LibraryList you can set your arduino path if it is not default """
urls = set() def inst(url): print('upgrading %s' % url) assert url not in urls urls.add(url) try: lib = install_lib(url, upgrade) print(' -> %s' % lib) except Exception as e: print(e) ############################ # github.com ############################ inst('https://github.com/sensorium/Mozzi/zipball/master') inst('https://github.com/madsci1016/Arduino-EasyTransfer/zipball/master') inst('https://github.com/sparkfun/SevSeg/zipball/master') inst( 'https://github.com/madsci1016/Arduino-SoftEasyTransfer/zipball/master') inst('https://github.com/madsci1016/Arduino-PS2X/zipball/master') # inst('http://github.com/wimleers/flexitimer2/zipball/v1.0')# can't install inst('https://github.com/kerinin/arduino-splines/zipball/master') inst('https://github.com/asynclabs/WiShield/zipball/master') inst('https://github.com/asynclabs/dataflash/zipball/master') inst('https://github.com/slugmobile/AtTouch/zipball/master') inst( 'https://github.com/carlynorama/Arduino-Library-Button/zipball/master') inst( 'https://github.com/carlynorama/Arduino-Library-FancyLED/zipball/master') inst('https://github.com/markfickett/arduinomorse/zipball/master') inst('https://github.com/rocketscream/Low-Power/zipball/master') inst( 'https://github.com/arduino-libraries/CapacitiveSensor/zipball/master') ############################ # arduiniana.org ############################ # TODO: how to get latest version?? inst('http://arduiniana.org/PString/PString2.zip') inst('http://arduiniana.org/Flash/Flash3.zip') inst('http://arduiniana.org/NewSoftSerial/NewSoftSerial10c.zip') inst('http://arduiniana.org/Streaming/Streaming4.zip') inst('http://arduiniana.org/PWMServo/PWMServo.zip') inst('http://arduiniana.org/TinyGPS/TinyGPS10.zip') ############################ # google ############################ # TODO: how to get latest version?? # parse http://code.google.com/p/arduino-pinchangeint/downloads/list # simplified version in core inst('http://rogue-code.googlecode.com/files/Arduino-Library-Tone.zip') inst('http://arduino-playground.googlecode.com/files/LedDisplay03.zip') inst('http://sserial2mobile.googlecode.com/files/SSerial2Mobile-1.1.0.zip') inst('http://webduino.googlecode.com/files/webduino-1.4.1.zip') inst('http://arduino-pid-library.googlecode.com/files/PID_v1.0.1.zip') inst('http://ideoarduinolibraries.googlecode.com/files/Qtouch1Wire.zip') inst('http://arduino-timerone.googlecode.com/files/TimerOne-v8.zip') inst('http://arduinounit.googlecode.com/files/arduinounit-1.4.2.zip') inst('http://arduinode.googlecode.com/files/arduinode_0.1.zip') inst('http://arduino-edb.googlecode.com/files/EDB_r7.zip') inst('http://arduino-dblib.googlecode.com/files/DB.zip') inst( 'http://morse-endecoder.googlecode.com/files/Morse_EnDecoder_2010.12.06.tar.gz') inst('http://arduino-pinchangeint.googlecode.com/files/PinChangeInt.zip') inst('http://arduino-tvout.googlecode.com/files/TVout_R5.91.zip') inst('http://narcoleptic.googlecode.com/files/Narcoleptic_v1a.zip') ############################ # teensy ############################ inst('http://www.pjrc.com/teensy/arduino_libraries/OneWire.zip') inst('http://www.pjrc.com/teensy/arduino_libraries/VirtualWire.zip') inst('http://www.pjrc.com/teensy/arduino_libraries/FrequencyTimer2.zip') inst('http://www.pjrc.com/teensy/arduino_libraries/FreqCount.zip') inst('http://www.pjrc.com/teensy/arduino_libraries/FreqMeasure.zip') ############################ # others ############################ # too big # inst('http://www.state-machine.com/arduino/qp_arduino.zip') # The owner of this website (download.milesburton.com) has banned your access based on your browser's signature # inst('http://download.milesburton.com/Arduino/MaximTemperature/DallasTemperature_370Beta.zip') inst('http://www.shikadi.net/files/arduino/SerialIP-1.0.zip') inst( 'http://siggiorn.com/wp-content/uploads/libraries/ArduinoByteBuffer.zip') inst( 'http://siggiorn.com/wp-content/uploads/libraries/ArduinoSerialManager.zip') inst('http://arduino-tweet.appspot.com/Library-Twitter-1.2.2.zip') # can't install # inst('http://gkaindl.com/php/download.php?key=ArduinoEthernet') inst( 'http://sebastian.setz.name/wp-content/uploads/2011/01/multiCameraIrControl_1-5.zip') inst('http://alexandre.quessy.net/static/avr/Tween_01.zip') inst( 'http://www.lpelettronica.it/images/stories/LPM11162_images/Arduino/LPM11162_ArduinoLib_v1.zip') # inst('http://nootropicdesign.com/hackvision/downloads/Controllers.zip') inst( 'http://interface.khm.de/wp-content/uploads/2009/01/FreqCounter_1_12.zip') inst( 'http://interface.khm.de/wp-content/uploads/2010/06/FreqPeriod_1_12.zip') ############################ # arduino.cc ############################ inst('http://arduino.cc/playground/uploads/Main/PS2Keyboard002.zip') inst('http://arduino.cc/playground/uploads/Code/Metro.zip') inst('http://www.arduino.cc/playground/uploads/Main/MsTimer2.zip') # can't install # inst('http://www.arduino.cc/playground/uploads/Code/Time.zip') inst('http://arduino.cc/playground/uploads/Main/LedControl.zip') # can't install # inst('http://www.arduino.cc/playground/uploads/Code/ks0108GLCD.zip')# inst('http://arduino.cc/playground/uploads/Code/Bounce.zip') inst('http://arduino.cc/playground/uploads/Main/CapacitiveSense003.zip') inst('http://arduino.cc/playground/uploads/Main/PinChangeInt.zip') # can't install # inst('http://arduino.cc/playground/uploads/Code/TimerThree.zip') inst('http://arduino.cc/playground/uploads/Code/TimedAction-1_6.zip') # can't install # inst('http://www.arduino.cc/playground/uploads/Code/Time.zip') inst('http://arduino.cc/playground/uploads/Code/EventFuse.zip') inst('http://arduino.cc/playground/uploads/Code/Charlieplex.zip') inst('http://arduino.cc/playground/uploads/Code/DigitalToggle.zip') inst('http://arduino.cc/playground/uploads/Code/Enerlib.zip') inst('http://arduino.cc/playground/uploads/Code/AdvButton_11.zip') # old version # inst('http://arduino.cc/playground/uploads/Code/AdvButton.zip') # can't install # inst('http://arduino.cc/playground/uploads/Code/SerialDebugger.zip') # inst('http://arduino.cc/playground/uploads/Code/MatrixMath.zip') inst('http://arduino.cc/playground/uploads/Code/StackArray.zip') inst('http://arduino.cc/playground/uploads/Code/StackList.zip') inst('http://arduino.cc/playground/uploads/Code/QueueArray.zip') inst('http://arduino.cc/playground/uploads/Code/QueueList.zip') inst('http://arduino.cc/playground/uploads/Code/Ping-1_3.zip') inst('http://www.arduino.cc/playground/uploads/Code/LED.zip') # inst('') if create_examples_all: print('create "all" menu item') exampallcreate.create_examples_all() print('install finished')
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def confirm(self, batch_id=None, filename=None): """Flags the batch as confirmed by updating confirmation_datetime on the history model for this batch. """
if batch_id or filename: export_history = self.history_model.objects.using(self.using).filter( Q(batch_id=batch_id) | Q(filename=filename), sent=True, confirmation_code__isnull=True, ) else: export_history = self.history_model.objects.using(self.using).filter( sent=True, confirmation_code__isnull=True ) if export_history.count() == 0: raise ConfirmationError( "Nothing to do. No history of sent and unconfirmed files" ) else: confirmation_code = ConfirmationCode() export_history.update( confirmation_code=confirmation_code.identifier, confirmation_datetime=get_utcnow(), ) return confirmation_code.identifier
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_single_word(word, lemmatizing="wordnet"): """ Performs stemming or lemmatizing on a single word. If we are to search for a word in a clean bag-of-words, we need to search it after the same kind of preprocessing. Inputs: - word: A string containing the source word. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - lemma: The resulting clean lemma or stem. """
if lemmatizing == "porter": porter = PorterStemmer() lemma = porter.stem(word) elif lemmatizing == "snowball": snowball = SnowballStemmer('english') lemma = snowball.stem(word) elif lemmatizing == "wordnet": wordnet = WordNetLemmatizer() lemma = wordnet.lemmatize(word) else: print("Invalid lemmatizer argument.") raise RuntimeError return lemma
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_document(document, sent_tokenize, _treebank_word_tokenize, tagger, lemmatizer, lemmatize, stopset, first_cap_re, all_cap_re, digits_punctuation_whitespace_re, pos_set): """ Extracts a clean bag-of-words from a document. Inputs: - document: A string containing some text. Output: - lemma_list: A python list of lemmas or stems. - lemma_to_keywordbag: A python dictionary that maps stems/lemmas to original topic keywords. """
#################################################################################################################### # Tokenizing text #################################################################################################################### # start_time = time.perf_counter() try: tokenized_document = fast_word_tokenize(document, sent_tokenize, _treebank_word_tokenize) except LookupError: print("Warning: Could not tokenize document. If these warnings are commonplace, there is a problem with the nltk resources.") lemma_list = list() lemma_to_keywordbag = defaultdict(lambda: defaultdict(int)) return lemma_list, lemma_to_keywordbag # elapsed_time = time.perf_counter() - start_time # print("Tokenize", elapsed_time) #################################################################################################################### # Separate ["camelCase"] into ["camel", "case"] and make every letter lower case #################################################################################################################### # start_time = time.perf_counter() tokenized_document = [separate_camel_case(token, first_cap_re, all_cap_re).lower() for token in tokenized_document] # elapsed_time = time.perf_counter() - start_time # print("camelCase", elapsed_time) #################################################################################################################### # Parts of speech tagger #################################################################################################################### # start_time = time.perf_counter() tokenized_document = tagger.tag(tokenized_document) tokenized_document = [token[0] for token in tokenized_document if (token[1] in pos_set)] # elapsed_time = time.perf_counter() - start_time # print("POS", elapsed_time) #################################################################################################################### # Removing digits, punctuation and whitespace #################################################################################################################### # start_time = time.perf_counter() tokenized_document_no_punctuation = list() append_token = tokenized_document_no_punctuation.append for token in tokenized_document: new_token = remove_digits_punctuation_whitespace(token, digits_punctuation_whitespace_re) if not new_token == u'': append_token(new_token) # elapsed_time = time.perf_counter() - start_time # print("digits etc", elapsed_time) #################################################################################################################### # Removing stopwords #################################################################################################################### # start_time = time.perf_counter() tokenized_document_no_stopwords = list() append_word = tokenized_document_no_stopwords.append for word in tokenized_document_no_punctuation: if word not in stopset: append_word(word) # elapsed_time = time.perf_counter() - start_time # print("stopwords 1", elapsed_time) #################################################################################################################### # Stemming and Lemmatizing #################################################################################################################### # start_time = time.perf_counter() lemma_to_keywordbag = defaultdict(lambda: defaultdict(int)) final_doc = list() append_lemma = final_doc.append for word in tokenized_document_no_stopwords: lemma = lemmatize(word) append_lemma(lemma) lemma_to_keywordbag[lemma][word] += 1 # elapsed_time = time.perf_counter() - start_time # print("lemmatize", elapsed_time) #################################################################################################################### # One more stopword removal #################################################################################################################### # start_time = time.perf_counter() lemma_list = list() append_word = lemma_list.append for word in final_doc: if word not in stopset: append_word(word) # elapsed_time = time.perf_counter() - start_time # print("stopwords 2", elapsed_time) return lemma_list, lemma_to_keywordbag
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clean_corpus_serial(corpus, lemmatizing="wordnet"): """ Extracts a bag-of-words from each document in a corpus serially. Inputs: - corpus: A python list of python strings. Each string is a document. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - list_of_bags_of_words: A list of python dictionaries representing bags-of-words. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords. """
list_of_bags_of_words = list() append_bag_of_words = list_of_bags_of_words.append lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int)) for document in corpus: word_list, lemma_to_keywordbag = clean_document(document=document, lemmatizing=lemmatizing) # TODO: Alter this. bag_of_words = combine_word_list(word_list) append_bag_of_words(bag_of_words) for lemma, keywordbag in lemma_to_keywordbag.items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag_total[lemma][keyword] += multiplicity return list_of_bags_of_words, lemma_to_keywordbag_total
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def extract_bag_of_words_from_corpus_parallel(corpus, lemmatizing="wordnet"): """ This extracts one bag-of-words from a list of strings. The documents are mapped to parallel processes. Inputs: - corpus: A list of strings. - lemmatizing: A string containing one of the following: "porter", "snowball" or "wordnet". Output: - bag_of_words: This is a bag-of-words in python dictionary format. - lemma_to_keywordbag_total: Aggregated python dictionary that maps stems/lemmas to original topic keywords. """
#################################################################################################################### # Map and reduce document cleaning. #################################################################################################################### # Build a pool of processes. pool = Pool(processes=get_threads_number()*2,) # Partition the tweets to chunks. partitioned_corpus = chunks(corpus, len(corpus) / get_threads_number()) # Map the cleaning of the tweet corpus to a pool of processes. list_of_bags_of_words, list_of_lemma_to_keywordset_maps = pool.map(partial(clean_corpus_serial, lemmatizing=lemmatizing), partitioned_corpus) # Reduce dictionaries to a single dictionary serially. bag_of_words = reduce_list_of_bags_of_words(list_of_bags_of_words) # Reduce lemma to keyword maps to a single dictionary. lemma_to_keywordbag_total = defaultdict(lambda: defaultdict(int)) for lemma_to_keywordbag in list_of_lemma_to_keywordset_maps: for lemma, keywordbag in lemma_to_keywordbag.items(): for keyword, multiplicity in keywordbag.items(): lemma_to_keywordbag_total[lemma][keyword] += multiplicity return bag_of_words, lemma_to_keywordbag_total
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def middleware(func): """ Executes routes.py route middleware """
@wraps(func) def parse(*args, **kwargs): """ get middleware from route, execute middleware in order """ middleware = copy.deepcopy(kwargs['middleware']) kwargs.pop('middleware') if request.method == "OPTIONS": # return 200 json response for CORS return JsonResponse(200) if middleware is None: return func(*args, **kwargs) for mware in middleware: ware = mware() if ware.status is False: return ware.response return func(*args, **kwargs) return parse
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def progress_bar_media(): """ progress_bar_media simple tag return rendered script tag for javascript used by progress_bar """
if PROGRESSBARUPLOAD_INCLUDE_JQUERY: js = ["http://code.jquery.com/jquery-1.8.3.min.js",] else: js = [] js.append("js/progress_bar.js") m = Media(js=js) return m.render()
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def send(MESSAGE, SOCKET, MESSAGE_ID=None, CODE_FILE=None, CODE_LINE=None, CODE_FUNC=None, **kwargs): r"""Send a message to the journal. Value of the MESSAGE argument will be used for the MESSAGE= field. MESSAGE must be a string and will be sent as UTF-8 to the journal. MESSAGE_ID can be given to uniquely identify the type of message. It must be a string or a uuid.UUID object. CODE_LINE, CODE_FILE, and CODE_FUNC can be specified to identify the caller. Unless at least on of the three is given, values are extracted from the stack frame of the caller of send(). CODE_FILE and CODE_FUNC must be strings, CODE_LINE must be an integer. Additional fields for the journal entry can only be specified as keyword arguments. The payload can be either a string or bytes. A string will be sent as UTF-8, and bytes will be sent as-is to the journal. Other useful fields include PRIORITY, SYSLOG_FACILITY, SYSLOG_IDENTIFIER, SYSLOG_PID. """
args = ['MESSAGE=' + MESSAGE] if MESSAGE_ID is not None: id = getattr(MESSAGE_ID, 'hex', MESSAGE_ID) args.append('MESSAGE_ID=' + id) if CODE_LINE == CODE_FILE == CODE_FUNC == None: CODE_FILE, CODE_LINE, CODE_FUNC = \ _traceback.extract_stack(limit=2)[0][:3] if CODE_FILE is not None: args.append('CODE_FILE=' + CODE_FILE) if CODE_LINE is not None: args.append('CODE_LINE={:d}'.format(CODE_LINE)) if CODE_FUNC is not None: args.append('CODE_FUNC=' + CODE_FUNC) args.extend(_make_line(key.upper(), val) for key, val in kwargs.items()) return sendv(SOCKET, *args)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def exists(self): """ Checks if item already exists in database """
self_object = self.query.filter_by(id=self.id).first() if self_object is None: return False return True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self): """ Easy delete for db models """
try: if self.exists() is False: return None self.db.session.delete(self) self.db.session.commit() except (Exception, BaseException) as error: # fail silently return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def row_to_dict(self, row): """ Converts a raw GCVS record to a dictionary of star data. """
constellation = self.parse_constellation(row[0]) name = self.parse_name(row[1]) ra, dec = self.parse_coordinates(row[2]) variable_type = row[3].strip() max_magnitude, symbol = self.parse_magnitude(row[4]) min_magnitude, symbol = self.parse_magnitude(row[5]) if symbol == '(' and max_magnitude is not None: # this is actually amplitude min_magnitude = max_magnitude + min_magnitude epoch = self.parse_epoch(row[8]) period = self.parse_period(row[10]) return { 'constellation': constellation, 'name': name, 'ra': ra, 'dec': dec, 'variable_type': variable_type, 'max_magnitude': max_magnitude, 'min_magnitude': min_magnitude, 'epoch': epoch, 'period': period, }
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_magnitude(self, magnitude_str): """ Converts magnitude field to a float value, or ``None`` if GCVS does not list the magnitude. Returns a tuple (magnitude, symbol), where symbol can be either an empty string or a single character - one of '<', '>', '('. """
symbol = magnitude_str[0].strip() magnitude = magnitude_str[1:6].strip() return float(magnitude) if magnitude else None, symbol
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def parse_period(self, period_str): """ Converts period field to a float value or ``None`` if there is no period in GCVS record. """
period = period_str.translate(TRANSLATION_MAP)[3:14].strip() return float(period) if period else None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def find_hwpack_dir(root): """search for hwpack dir under root."""
root = path(root) log.debug('files in dir: %s', root) for x in root.walkfiles(): log.debug(' %s', x) hwpack_dir = None for h in (root.walkfiles('boards.txt')): assert not hwpack_dir hwpack_dir = h.parent log.debug('found hwpack: %s', hwpack_dir) assert hwpack_dir return hwpack_dir
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def install_hwpack(url, replace_existing=False): """install hwpackrary from web or local files system. :param url: web address or file path :param replace_existing: bool :rtype: None """
d = tmpdir(tmpdir()) f = download(url) Archive(f).extractall(d) clean_dir(d) src_dhwpack = find_hwpack_dir(d) targ_dhwpack = hwpack_dir() / src_dhwpack.name doaction = 0 if targ_dhwpack.exists(): log.debug('hwpack already exists: %s', targ_dhwpack) if replace_existing: log.debug('remove %s', targ_dhwpack) targ_dhwpack.rmtree() doaction = 1 else: doaction = 1 if doaction: log.debug('move %s -> %s', src_dhwpack, targ_dhwpack) src_dhwpack.move(targ_dhwpack) hwpack_dir().copymode(targ_dhwpack) for x in targ_dhwpack.walk(): hwpack_dir().copymode(x)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, volume_id, vtype, size, affinity): """ create a volume """
volume_id = volume_id or str(uuid.uuid4()) params = {'volume_type_name': vtype, 'size': size, 'affinity': affinity} return self.http_put('/volumes/%s' % volume_id, params=self.unused(params))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def restore(self, volume_id, **kwargs): """ restore a volume from a backup """
# These arguments are required self.required('create', kwargs, ['backup', 'size']) # Optional Arguments volume_id = volume_id or str(uuid.uuid4()) kwargs['volume_type_name'] = kwargs['volume_type_name'] or 'vtype' kwargs['size'] = kwargs['size'] or 1 # Make the request return self.http_put('/volumes/%s' % volume_id, params=self.unused(kwargs))
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def create(self, volume_id, backup_id): """ create a backup """
backup_id = backup_id or str(uuid.uuid4()) return self.http_put('/backups/%s' % backup_id, params={'volume': volume_id})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def delete(self, volume_id, force=False): """ delete an export """
return self.http_delete('/volumes/%s/export' % volume_id, params={'force': force})
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def update(self, volume_id, **kwargs): """ update an export """
# These arguments are allowed self.allowed('update', kwargs, ['status', 'instance_id', 'mountpoint', 'ip', 'initiator', 'session_ip', 'session_initiator']) # Remove parameters that are None params = self.unused(kwargs) return self.http_post('/volumes/%s/export' % volume_id, params=params)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def proto_refactor(proto_filename, namespace, namespace_path): """This method refactors a Protobuf file to import from a namespace that will map to the desired python package structure. It also ensures that the syntax is set to "proto2", since protoc complains without it. Args: proto_filename (str): the protobuf filename to be refactored namespace (str): the desired package name (i.e. "dropsonde.py2") namespace_path (str): the desired path corresponding to the package name (i.e. "dropsonde/py2") """
with open(proto_filename) as f: data = f.read() if not re.search('syntax = "proto2"', data): insert_syntax = 'syntax = "proto2";\n' data = insert_syntax + data substitution = 'import "{}/\\1";'.format(namespace_path) data = re.sub('import\s+"([^"]+\.proto)"\s*;', substitution, data) return data
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def proto_refactor_files(dest_dir, namespace, namespace_path): """This method runs the refactoring on all the Protobuf files in the Dropsonde repo. Args: dest_dir (str): directory where the Protobuf files lives. namespace (str): the desired package name (i.e. "dropsonde.py2") namespace_path (str): the desired path corresponding to the package name (i.e. "dropsonde/py2") """
for dn, dns, fns in os.walk(dest_dir): for fn in fns: fn = os.path.join(dn, fn) if fnmatch.fnmatch(fn, '*.proto'): data = proto_refactor(fn, namespace, namespace_path) with open(fn, 'w') as f: f.write(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def clone_source_dir(source_dir, dest_dir): """Copies the source Protobuf files into a build directory. Args: source_dir (str): source directory of the Protobuf files dest_dir (str): destination directory of the Protobuf files """
if os.path.isdir(dest_dir): print('removing', dest_dir) shutil.rmtree(dest_dir) shutil.copytree(source_dir, dest_dir)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def are_budget_data_package_fields_filled_in(self, resource): """ Check if the budget data package fields are all filled in because if not then this can't be a budget data package """
fields = ['country', 'currency', 'year', 'status'] return all([self.in_resource(f, resource) for f in fields])
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def generate_budget_data_package(self, resource): """ Try to grab a budget data package schema from the resource. The schema only allows fields which are defined in the budget data package specification. If a field is found that is not in the specification this will return a NotABudgetDataPackageException and in that case we can just return and ignore the resource """
# Return if the budget data package fields have not been filled in if not self.are_budget_data_package_fields_filled_in(resource): return try: resource['schema'] = self.data.schema except exceptions.NotABudgetDataPackageException: log.debug('Resource is not a Budget Data Package') resource['schema'] = [] return # If the schema fits, this can be exported as a budget data package # so we add the missing metadata fields to the resource. resource['BudgetDataPackage'] = True resource['standard'] = self.data.version resource['granularity'] = self.data.granularity resource['type'] = self.data.budget_type
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def before_update(self, context, current, resource): """ If the resource has changed we try to generate a budget data package, but if it hasn't then we don't do anything """
# Return if the budget data package fields have not been filled in if not self.are_budget_data_package_fields_filled_in(resource): return if resource.get('upload', '') == '': # If it isn't an upload we check if it's the same url if current['url'] == resource['url']: # Return if it's the same return else: self.data.load(resource['url']) else: self.data.load(resource['upload'].file) self.generate_budget_data_package(resource)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def upload_directory_contents(input_dict, environment_dict): """This function serves to upload every file in a user-supplied source directory to all of the vessels in the current target group. It essentially calls seash's `upload` function repeatedly, each time with a file name taken from the source directory. A note on the input_dict argument: `input_dict` contains our own `command_dict` (see below), with the `"[ARGUMENT]"` sub-key of `children` renamed to what argument the user provided. In our case, this will be the source dir to read from. (If not, this is an error!) """
# Check user input and seash state: # 1, Make sure there is an active user key. if environment_dict["currentkeyname"] is None: raise seash_exceptions.UserError("""Error: Please set an identity before using 'uploaddir'! Example: !> loadkeys your_user_name !> as your_user_name your_user_name@ !> """) # 2, Make sure there is a target to work on. if environment_dict["currenttarget"] is None: raise seash_exceptions.UserError("""Error: Please set a target to work on before using 'uploaddir'! Example your_user_name@ !> on browsegood your_user_name@browsegood !> """) # 3, Complain if we don't have a source dir argument try: source_directory = input_dict["uploaddir"]["children"].keys()[0] except IndexError: raise seash_exceptions.UserError("""Error: Missing operand to 'uploaddir' Please specify which source directory's contents you want uploaded, e.g. your_user_name@browsegood !> uploaddir a_local_directory """) # Sanity check: Does the source dir exist? if not os.path.exists(source_directory): raise seash_exceptions.UserError("Error: Source directory '" + source_directory + "' does not exist.") # Sanity check: Is the source dir a directory? if not os.path.isdir(source_directory): raise seash_exceptions.UserError("Error: Source directory '" + source_directory + "' is not a directory.\nDid you mean to use the 'upload' command instead?") # Alright --- user input and seash state seem sane, let's do the work! # These are the files we will need to upload: file_list = os.listdir(source_directory) for filename in file_list: # We construct the filename-to-be uploaded from the source dir, # the OS-specific path separator, and the actual file name. # This is enough for `upload_target` to find the file. path_and_filename = source_directory + os.sep + filename if not os.path.isdir(path_and_filename): print "Uploading '" + path_and_filename + "'..." # Construct an input_dict containing command args for seash's # `upload FILENAME` function. # XXX There might be a cleaner way to do this. faked_input_dict = {"upload": {"name": "upload", "children": {path_and_filename: {"name": "filename"}}}} command_callbacks.upload_filename(faked_input_dict, environment_dict) else: print "Skipping sub-directory '" + filename + "'. You may upload it separately."
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def __load_file(self, key_list) -> str: """ Load a translator file """
file = str(key_list[0]) + self.extension key_list.pop(0) file_path = os.path.join(self.path, file) if os.path.exists(file_path): return Json.from_file(file_path) else: raise FileNotFoundError(file_path)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def remove_programmer(programmer_id): """remove programmer. :param programmer_id: programmer id (e.g. 'avrisp') :rtype: None """
log.debug('remove %s', programmer_id) lines = programmers_txt().lines() lines = filter( lambda x: not x.strip().startswith(programmer_id + '.'), lines) programmers_txt().write_lines(lines)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load(self, entity_class, entity): """ Load the given repository entity into the session and return a clone. If it was already loaded before, look up the loaded entity and return it. All entities referenced by the loaded entity will also be loaded (and cloned) recursively. :raises ValueError: When an attempt is made to load an entity that has no ID """
if self.__needs_flushing: self.flush() if entity.id is None: raise ValueError('Can not load entity without an ID.') cache = self.__get_cache(entity_class) sess_ent = cache.get_by_id(entity.id) if sess_ent is None: if self.__clone_on_load: sess_ent = self.__clone(entity, cache) else: # Only needed by the nosql backend pragma: no cover cache.add(entity) sess_ent = entity self.__unit_of_work.register_clean(entity_class, sess_ent) return sess_ent
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def onStart(self, event): """ Display the environment of a started container """
c = event.container print '+' * 5, 'started:', c kv = lambda s: s.split('=', 1) env = {k: v for (k, v) in (kv(s) for s in c.attrs['Config']['Env'])} print env
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _identifier_data(self): """Return a unique identifier for the folder data"""
# Use only file names data = [ff.name for ff in self.files] data.sort() # also use the folder name data.append(self.path.name) # add meta data data += self._identifier_meta() return hash_obj(data)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _search_files(path): """Search a folder for data files .. versionchanged:: 0.6.0 `path` is not searched recursively anymore """
path = pathlib.Path(path) fifo = [] for fp in path.glob("*"): if fp.is_dir(): continue for fmt in formats: # series data is not supported in SeriesFolder if not fmt.is_series and fmt.verify(fp): fifo.append((fp, fmt.__name__)) break # ignore qpimage formats if multiple formats were # detected. theformats = [ff[1] for ff in fifo] formset = set(theformats) if len(formset) > 1: fmts_qpimage = ["SingleHdf5Qpimage", "SeriesHdf5Qpimage"] fifo = [ff for ff in fifo if ff[1] not in fmts_qpimage] # ignore raw tif files if single_tif_phasics is detected if len(formset) > 1 and "SingleTifPhasics" in theformats: fmts_badtif = "SingleTifHolo" fifo = [ff for ff in fifo if ff[1] not in fmts_badtif] # otherwise, prevent multiple file formats theformats2 = [ff[1] for ff in fifo] formset2 = set(theformats2) if len(formset2) > 1: msg = "Qpformat does not support multiple different file " \ + "formats within one directory: {}".format(formset2) raise MultipleFormatsNotSupportedError(msg) # sort the lists fifo = sorted(fifo) return fifo
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_identifier(self, idx): """Return an identifier for the data at index `idx` .. versionchanged:: 0.4.2 indexing starts at 1 instead of 0 """
name = self._get_cropped_file_names()[idx] return "{}:{}:{}".format(self.identifier, name, idx + 1)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def verify(path): """Verify folder file format The folder file format is only valid when there is only one file format present. """
valid = True fifo = SeriesFolder._search_files(path) # dataset size if len(fifo) == 0: valid = False # number of different file formats fifmts = [ff[1] for ff in fifo] if len(set(fifmts)) != 1: valid = False return valid
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description:
def load_file(path): '''Load a txt data file''' path = pathlib.Path(path) data = path.open().readlines() # remove comments and empty lines data = [l for l in data if len(l.strip()) and not l.startswith("#")] # determine data shape n = len(data) m = len(data[0].strip().split()) res = np.zeros((n, m), dtype=np.dtype(float)) # write data to array, replacing comma with point decimal separator for ii in range(n): res[ii] = np.array(data[ii].strip().replace(",", ".").split(), dtype=float) return res
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def emit(self, record): """Write record as journal event. MESSAGE is taken from the message provided by the user, and PRIORITY, LOGGER, THREAD_NAME, CODE_{FILE,LINE,FUNC} fields are appended automatically. In addition, record.MESSAGE_ID will be used if present. """
if record.args and isinstance(record.args, collections.Mapping): extra = dict(self._extra, **record.args) # Merge metadata from handler and record else: extra = self._extra try: msg = self.format(record) pri = self.mapPriority(record.levelno) mid = getattr(record, 'MESSAGE_ID', None) send(msg, SOCKET=self.socket, MESSAGE_ID=mid, PRIORITY=format(pri), LOGGER=record.name, THREAD_NAME=record.threadName, CODE_FILE=record.pathname, CODE_LINE=record.lineno, CODE_FUNC=record.funcName, **extra) except Exception: self.handleError(record)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def mapPriority(levelno): """Map logging levels to journald priorities. Since Python log level numbers are "sparse", we have to map numbers in between the standard levels too. """
if levelno <= _logging.DEBUG: return LOG_DEBUG elif levelno <= _logging.INFO: return LOG_INFO elif levelno <= _logging.WARNING: return LOG_WARNING elif levelno <= _logging.ERROR: return LOG_ERR elif levelno <= _logging.CRITICAL: return LOG_CRIT else: return LOG_ALERT
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get_args(self, func): """ Get the arguments of a method and return it as a dictionary with the supplied defaults, method arguments with no default are assigned None """
def reverse(iterable): if iterable: iterable = list(iterable) while len(iterable): yield iterable.pop() args, varargs, varkw, defaults = inspect.getargspec(func) result = {} for default in reverse(defaults): result[args.pop()] = default for arg in reverse(args): if arg == 'self': continue result[arg] = None return result
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def guess_format(path): """Determine the file format of a folder or a file"""
for fmt in formats: if fmt.verify(path): return fmt.__name__ else: msg = "Undefined file format: '{}'".format(path) raise UnknownFileFormatError(msg)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def load_data(path, fmt=None, bg_data=None, bg_fmt=None, meta_data={}, holo_kw={}, as_type="float32"): """Load experimental data Parameters path: str Path to experimental data file or folder fmt: str The file format to use (see `file_formats.formats`). If set to `None`, the file format is guessed. bg_data: str Path to background data file or `qpimage.QPImage` bg_fmt: str The file format to use (see `file_formats.formats`) for the background. If set to `None`, the file format is be guessed. meta_data: dict Meta data (see `qpimage.meta.DATA_KEYS`) as_type: str Defines the data type that the input data is casted to. The default is "float32" which saves memory. If high numerical accuracy is required (does not apply for a simple 2D phase analysis), set this to double precision ("float64"). Returns ------- dataobj: SeriesData or SingleData Object that gives lazy access to the experimental data. """
path = pathlib.Path(path).resolve() # sanity checks for kk in meta_data: if kk not in qpimage.meta.DATA_KEYS: msg = "Meta data key not allowed: {}".format(kk) raise ValueError(msg) # ignore None or nan values in meta_data for kk in list(meta_data.keys()): if meta_data[kk] in [np.nan, None]: meta_data.pop(kk) if fmt is None: fmt = guess_format(path) else: if not formats_dict[fmt].verify(path): msg = "Wrong file format '{}' for '{}'!".format(fmt, path) raise WrongFileFormatError(msg) dataobj = formats_dict[fmt](path=path, meta_data=meta_data, holo_kw=holo_kw, as_type=as_type) if bg_data is not None: if isinstance(bg_data, qpimage.QPImage): # qpimage instance dataobj.set_bg(bg_data) else: # actual data on disk bg_path = pathlib.Path(bg_data).resolve() if bg_fmt is None: bg_fmt = guess_format(bg_path) bgobj = formats_dict[bg_fmt](path=bg_path, meta_data=meta_data, holo_kw=holo_kw, as_type=as_type) dataobj.set_bg(bgobj) return dataobj
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def duration(seconds): """Return a string of the form "1 hr 2 min 3 sec" representing the given number of seconds."""
if seconds < 1: return 'less than 1 sec' seconds = int(round(seconds)) components = [] for magnitude, label in ((3600, 'hr'), (60, 'min'), (1, 'sec')): if seconds >= magnitude: components.append('{} {}'.format(seconds // magnitude, label)) seconds %= magnitude return ' '.join(components)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_shortcut_prefix(self, user_agent, standart_prefix): """ Returns the shortcut prefix of browser. :param user_agent: The user agent of browser. :type user_agent: str :param standart_prefix: The default prefix. :type standart_prefix: str :return: The shortcut prefix of browser. :rtype: str """
# pylint: disable=no-self-use if user_agent is not None: user_agent = user_agent.lower() opera = 'opera' in user_agent mac = 'mac' in user_agent konqueror = 'konqueror' in user_agent spoofer = 'spoofer' in user_agent safari = 'applewebkit' in user_agent windows = 'windows' in user_agent chrome = 'chrome' in user_agent firefox = ( ('firefox' in user_agent) or ('minefield' in user_agent) ) internet_explorer = ( ('msie' in user_agent) or ('trident' in user_agent) ) if opera: return 'SHIFT + ESC' elif chrome and mac and (not spoofer): return 'CTRL + OPTION' elif safari and (not windows) and (not spoofer): return 'CTRL + ALT' elif (not windows) and (safari or mac or konqueror): return 'CTRL' elif firefox: return 'ALT + SHIFT' elif chrome or internet_explorer: return 'ALT' return standart_prefix return standart_prefix
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_role_description(self, role): """ Returns the description of role. :param role: The role. :type role: str :return: The description of role. :rtype: str """
parameter = 'role-' + role.lower() if self.configure.has_parameter(parameter): return self.configure.get_parameter(parameter) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_language_description(self, language_code): """ Returns the description of language. :param language_code: The BCP 47 code language. :type language_code: str :return: The description of language. :rtype: str """
language = language_code.lower() parameter = 'language-' + language if self.configure.has_parameter(parameter): return self.configure.get_parameter(parameter) elif '-' in language: codes = re.split(r'\-', language) parameter = 'language-' + codes[0] if self.configure.has_parameter(parameter): return self.configure.get_parameter(parameter) return None
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _get_description(self, element): """ Returns the description of element. :param element: The element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :return: The description of element. :rtype: str """
description = None if element.has_attribute('title'): description = element.get_attribute('title') elif element.has_attribute('aria-label'): description = element.get_attribute('aria-label') elif element.has_attribute('alt'): description = element.get_attribute('alt') elif element.has_attribute('label'): description = element.get_attribute('label') elif ( (element.has_attribute('aria-labelledby')) or (element.has_attribute('aria-describedby')) ): if element.has_attribute('aria-labelledby'): description_ids = re.split( '[ \n\r\t]+', element.get_attribute('aria-labelledby').strip() ) else: description_ids = re.split( '[ \n\r\t]+', element.get_attribute('aria-describedby').strip() ) for description_id in description_ids: element_description = self.parser.find( '#' + description_id ).first_result() if element_description is not None: description = element_description.get_text_content() break elif ( (element.get_tag_name() == 'INPUT') and (element.has_attribute('type')) ): type_attribute = element.get_attribute('type').lower() if ( ( (type_attribute == 'button') or (type_attribute == 'submit') or (type_attribute == 'reset') ) and (element.has_attribute('value')) ): description = element.get_attribute('value') if not bool(description): description = element.get_text_content() return re.sub('[ \n\r\t]+', ' ', description.strip())
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _generate_list_shortcuts(self): """ Generate the list of shortcuts of page. """
id_container_shortcuts_before = ( AccessibleDisplayImplementation.ID_CONTAINER_SHORTCUTS_BEFORE ) id_container_shortcuts_after = ( AccessibleDisplayImplementation.ID_CONTAINER_SHORTCUTS_AFTER ) local = self.parser.find('body').first_result() if local is not None: container_before = self.parser.find( '#' + id_container_shortcuts_before ).first_result() if ( (container_before is None) and (self.attribute_accesskey_before) ): container_before = self.parser.create_element('div') container_before.set_attribute( 'id', id_container_shortcuts_before ) text_container_before = self.parser.create_element('span') text_container_before.set_attribute( 'class', AccessibleDisplayImplementation.CLASS_TEXT_SHORTCUTS ) text_container_before.append_text( self.attribute_accesskey_before ) container_before.append_element(text_container_before) local.prepend_element(container_before) if container_before is not None: self.list_shortcuts_before = self.parser.find( container_before ).find_children('ul').first_result() if self.list_shortcuts_before is None: self.list_shortcuts_before = self.parser.create_element( 'ul' ) container_before.append_element(self.list_shortcuts_before) container_after = self.parser.find( '#' + id_container_shortcuts_after ).first_result() if ( (container_after is None) and (self.attribute_accesskey_after) ): container_after = self.parser.create_element('div') container_after.set_attribute( 'id', id_container_shortcuts_after ) text_container_after = self.parser.create_element('span') text_container_after.set_attribute( 'class', AccessibleDisplayImplementation.CLASS_TEXT_SHORTCUTS ) text_container_after.append_text( self.attribute_accesskey_after ) container_after.append_element(text_container_after) local.append_element(container_after) if container_after is not None: self.list_shortcuts_after = self.parser.find( container_after ).find_children('ul').first_result() if self.list_shortcuts_after is None: self.list_shortcuts_after = self.parser.create_element( 'ul' ) container_after.append_element(self.list_shortcuts_after) self.list_shortcuts_added = True
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _insert(self, element, new_element, before): """ Insert a element before or after other element. :param element: The reference element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param new_element: The element that be inserted. :type new_element: hatemile.util.html.htmldomelement.HTMLDOMElement :param before: To insert the element before the other element. :type before: bool """
tag_name = element.get_tag_name() append_tags = [ 'BODY', 'A', 'FIGCAPTION', 'LI', 'DT', 'DD', 'LABEL', 'OPTION', 'TD', 'TH' ] controls = ['INPUT', 'SELECT', 'TEXTAREA'] if tag_name == 'HTML': body = self.parser.find('body').first_result() if body is not None: self._insert(body, new_element, before) elif tag_name in append_tags: if before: element.prepend_element(new_element) else: element.append_element(new_element) elif tag_name in controls: labels = [] if element.has_attribute('id'): labels = self.parser.find( 'label[for="' + element.get_attribute('id') + '"]' ).list_results() if not labels: labels = self.parser.find(element).find_ancestors( 'label' ).list_results() for label in labels: self._insert(label, new_element, before) elif before: element.insert_before(new_element) else: element.insert_after(new_element)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _force_read_simple(self, element, text_before, text_after, data_of): """ Force the screen reader display an information of element. :param element: The reference element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param text_before: The text content to show before the element. :type text_before: str :param text_after: The text content to show after the element. :type text_after: str :param data_of: The name of attribute that links the content with element. :type data_of: str """
self.id_generator.generate_id(element) identifier = element.get_attribute('id') selector = '[' + data_of + '="' + identifier + '"]' reference_before = self.parser.find( '.' + AccessibleDisplayImplementation.CLASS_FORCE_READ_BEFORE + selector ).first_result() reference_after = self.parser.find( '.' + AccessibleDisplayImplementation.CLASS_FORCE_READ_AFTER + selector ).first_result() references = self.parser.find(selector).list_results() if reference_before in references: references.remove(reference_before) if reference_after in references: references.remove(reference_after) if not references: if text_before: if reference_before is not None: reference_before.remove_node() span = self.parser.create_element('span') span.set_attribute( 'class', AccessibleDisplayImplementation.CLASS_FORCE_READ_BEFORE ) span.set_attribute(data_of, identifier) span.append_text(text_before) self._insert(element, span, True) if text_after: if reference_after is not None: reference_after.remove_node() span = self.parser.create_element('span') span.set_attribute( 'class', AccessibleDisplayImplementation.CLASS_FORCE_READ_AFTER ) span.set_attribute(data_of, identifier) span.append_text(text_after) self._insert(element, span, False)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def _force_read( self, element, value, text_prefix_before, text_suffix_before, text_prefix_after, text_suffix_after, data_of ): """ Force the screen reader display an information of element with prefixes or suffixes. :param element: The reference element. :type element: hatemile.util.html.htmldomelement.HTMLDOMElement :param value: The value to be show. :type value: str :param text_prefix_before: The prefix of value to show before the element. :type text_prefix_before: str :param text_suffix_before: The suffix of value to show before the element. :type text_suffix_before: str :param text_prefix_after: The prefix of value to show after the element. :type text_prefix_after: str :param text_suffix_after: The suffix of value to show after the element. :type text_suffix_after: str :param data_of: The name of attribute that links the content with element. :type data_of: str """
if (text_prefix_before) or (text_suffix_before): text_before = text_prefix_before + value + text_suffix_before else: text_before = '' if (text_prefix_after) or (text_suffix_after): text_after = text_prefix_after + value + text_suffix_after else: text_after = '' self._force_read_simple(element, text_before, text_after, data_of)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def provider(func=None, *, singleton=False, injector=None): """ Decorator to mark a function as a provider. Args: singleton (bool): The returned value should be a singleton or shared instance. If False (the default) the provider function will be invoked again for every time it's needed for injection. injector (Injector): If provided, the function is immediately registered as a provider with the injector instance. Example: @diay.provider(singleton=True) def myfunc() -> MyClass: return MyClass(args) """
def decorator(func): wrapped = _wrap_provider_func(func, {'singleton': singleton}) if injector: injector.register_provider(wrapped) return wrapped if func: return decorator(func) return decorator
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def inject(*args, **kwargs): """ Mark a class or function for injection, meaning that a DI container knows that it should inject dependencies into it. Normally you won't need this as the injector will inject the required arguments anyway, but it can be used to inject properties into a class without having to specify it in the constructor, or to inject arguments that aren't properly type hinted. Example: @diay.inject('foo', MyClass) class MyOtherClass: pass assert isinstance(injector.get(MyOtherClass).foo, MyClass) """
def wrapper(obj): if inspect.isclass(obj) or callable(obj): _inject_object(obj, *args, **kwargs) return obj raise DiayException("Don't know how to inject into %r" % obj) return wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_plugin(self, plugin: Plugin): """ Register a plugin. """
if isinstance(plugin, Plugin): lazy = False elif issubclass(plugin, Plugin): lazy = True else: msg = 'plugin %r must be an object/class of type Plugin' % plugin raise DiayException(msg) predicate = inspect.isfunction if lazy else inspect.ismethod methods = inspect.getmembers(plugin, predicate=predicate) for _, method in methods: if getattr(method, '__di__', {}).get('provides'): if lazy: self.register_lazy_provider_method(plugin, method) else: self.register_provider(method)
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_provider(self, func): """ Register a provider function. """
if 'provides' not in getattr(func, '__di__', {}): raise DiayException('function %r is not a provider' % func) self.factories[func.__di__['provides']] = func
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def register_lazy_provider_method(self, cls, method): """ Register a class method lazily as a provider. """
if 'provides' not in getattr(method, '__di__', {}): raise DiayException('method %r is not a provider' % method) @functools.wraps(method) def wrapper(*args, **kwargs): return getattr(self.get(cls), method.__name__)(*args, **kwargs) self.factories[method.__di__['provides']] = wrapper
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_factory(self, thing: type, value, overwrite=False): """ Set the factory for something. """
if thing in self.factories and not overwrite: raise DiayException('factory for %r already exists' % thing) self.factories[thing] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def set_instance(self, thing: type, value, overwrite=False): """ Set an instance of a thing. """
if thing in self.instances and not overwrite: raise DiayException('instance for %r already exists' % thing) self.instances[thing] = value
<SYSTEM_TASK:> Solve the following problem using Python, implementing the functions described below, one line at a time <END_TASK> <USER_TASK:> Description: def get(self, thing: type): """ Get an instance of some type. """
if thing in self.instances: return self.instances[thing] if thing in self.factories: fact = self.factories[thing] ret = self.get(fact) if hasattr(fact, '__di__') and fact.__di__['singleton']: self.instances[thing] = ret return ret if inspect.isclass(thing): return self._call_class_init(thing) elif callable(thing): return self.call(thing) raise DiayException('cannot resolve: %r' % thing)